├── LICENSE ├── README.md ├── pom.xml └── src ├── main ├── java │ └── co │ │ └── solinx │ │ └── kafka │ │ └── monitor │ │ ├── Main.java │ │ ├── Start.java │ │ ├── api │ │ ├── AbstractApi.java │ │ ├── AlarmsApi.java │ │ ├── BrokerTopicServletApi.java │ │ ├── BrokersApi.java │ │ ├── ConsumerApi.java │ │ ├── HistoryMetricsApi.java │ │ ├── InitBean.java │ │ ├── MessageApi.java │ │ ├── MetricsApi.java │ │ ├── PartitionsApi.java │ │ ├── TopicsApi.java │ │ └── kafkaInfoApi.java │ │ ├── common │ │ ├── DateUtils.java │ │ ├── JettyUtils.java │ │ ├── KafkaUtils.java │ │ ├── Utils.java │ │ └── WebUI.java │ │ ├── config │ │ └── PreConfig.java │ │ ├── core │ │ ├── consumer │ │ │ ├── MonitorConsumer.java │ │ │ └── MonitorConsumerRecord.java │ │ ├── listener │ │ │ ├── BrokerListener.java │ │ │ └── TopicListener.java │ │ ├── produce │ │ │ ├── MonitorProducer.java │ │ │ └── ProduceRecord.java │ │ └── service │ │ │ ├── ConfigService.java │ │ │ ├── ConsumerService.java │ │ │ ├── CuratorService.java │ │ │ ├── CustomConsumerGroupService.java │ │ │ ├── InitService.java │ │ │ ├── JolokiaService.java │ │ │ ├── KafkaBaseInfoService.java │ │ │ ├── MessageService.java │ │ │ ├── MetricsReportService.java │ │ │ ├── PartitionService.java │ │ │ ├── ProduceService.java │ │ │ └── TopicService.java │ │ ├── db │ │ └── DBUtils.java │ │ ├── model │ │ ├── Broker.java │ │ ├── Consumer.java │ │ ├── Controller.java │ │ ├── KafkaConfig.java │ │ ├── KafkaMonitorData.java │ │ ├── Message.java │ │ ├── MonitorConfig.java │ │ ├── PageData.java │ │ ├── Partition.java │ │ ├── PartitionReplica.java │ │ ├── Topic.java │ │ └── ZooConfig.java │ │ ├── persist │ │ └── MetricsDataPersist.java │ │ └── utils │ │ ├── DateUtils.java │ │ ├── IClientListener.java │ │ ├── IDGenerator.java │ │ ├── JsonLoader.java │ │ └── zookeeper │ │ ├── WatcherEvent.java │ │ ├── WatcherEventCallback.java │ │ └── ZookeeperClient.java ├── resources │ ├── application.properties │ ├── blog_log.txt │ ├── kafkaMonitorConfig.json │ ├── log4j.properties │ ├── static │ │ ├── asset-manifest.json │ │ ├── favicon.ico │ │ ├── index.html │ │ ├── manifest.json │ │ ├── npm.json │ │ ├── service-worker.js │ │ ├── static │ │ │ ├── css │ │ │ │ ├── main.a5c0e99c.css │ │ │ │ └── main.a5c0e99c.css.map │ │ │ ├── js │ │ │ │ ├── Wysiwyg.0994dccd.chunk.js │ │ │ │ ├── Wysiwyg.0994dccd.chunk.js.map │ │ │ │ ├── Wysiwyg.f30d8599.chunk.js │ │ │ │ ├── Wysiwyg.f30d8599.chunk.js.map │ │ │ │ ├── charts.5b28f0c0.js │ │ │ │ ├── charts.5b28f0c0.js.map │ │ │ │ ├── main.ae3dc27e.js │ │ │ │ ├── main.ae3dc27e.js.map │ │ │ │ ├── vendor.061917d4.js │ │ │ │ ├── vendor.061917d4.js.map │ │ │ │ ├── vendor.360868ee.js │ │ │ │ └── vendor.360868ee.js.map │ │ │ └── media │ │ │ │ ├── b1.553c69e9.jpg │ │ │ │ ├── beauty.defb9858.jpg │ │ │ │ └── default-skin.b257fa9c.svg │ │ ├── temp │ │ │ ├── index.css │ │ │ ├── index.html │ │ │ ├── index.js │ │ │ └── index.jsp │ │ └── weibo.json │ └── zkConfig.json └── webapp │ ├── chart.html │ ├── index.css │ ├── index.html │ ├── index.js │ └── index.jsp └── test ├── java └── co │ └── solinx │ ├── AppTest.java │ └── kafka │ ├── BrokerDataTest.java │ ├── KafkaBaseInfoTest.java │ ├── KafkaConfigTest.java │ ├── KafkaConsumer.java │ ├── KafkaConsumerTest.java │ ├── PartitionDataTest.java │ ├── PartitionServiceTest.java │ ├── Test.java │ ├── TestKafkaConsumerGroupService.java │ ├── TopicDataTest.java │ ├── TopicServiceTest.java │ ├── alarm │ └── KafkaAlarmTest.java │ └── metrics │ └── ProduceServiceTest.java └── resources └── zkConfig.json /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Linx 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |   Kafka Monitor为Kafka的可视化管理与监控工具,为Kafka的稳定运维提供高效、可靠、稳定的保障,这里主要简单介绍Kafka Monitor的相关功能与页面的介绍; 2 | Kafka Monitor主要功能:Kafka基本信息仪表盘、broker列表、topic列表、当前消费者列表、Topic添加删除、Topic数据查询; 3 | ## 一、仪表盘 4 |   仪表盘分三部分:状态、图表、报警;状态栏显示了Kafka当前集群状态、Topic总数、节点数、Partition总数,并通过图表显示当前Kafka的可用性信息;有可用性图表、延迟统计图表、异常统计图表;报警栏位当前kafka的异常信息; 5 | ## 二、broker页面 6 |   broker页面显示当前broker的可用总数、broker列表中显示当前Kafka集群中每个broker的基本信息、还可通过点击broker ID进入broker详情页面 7 | ## 三、Topic页面 8 |   页面显示当前kafka所有topic总数、Partition总数、topic首选副本率等,并提供添加Topic功能; 9 | ## 四、消费者页面 10 |   页面显示当前kafka中所有consumer列表,并显示所属group、topic等信息; 11 | ## 五、数据查询页面 12 |   查询页面可通过输入topic、partition、offset与Num(消息数量)查询指定的topic中某个partition的消息; 13 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 3 | 4.0.0 4 | 5 | co.solinx 6 | kafka-monitor 7 | 1.0-SNAPSHOT 8 | jar 9 | 10 | kafka-monitor 11 | http://maven.apache.org 12 | 13 | 14 | UTF-8 15 | 8.5.8 16 | 17 | 1.6.2 18 | 1.1 19 | 1.2.16 20 | 1.0.6 21 | 2.10.0 22 | 8.0.16 23 | 4.13.1 24 | 0.10.0.1 25 | 0.10.0.1 26 | 3.5 27 | 1.2.31 28 | 3.4.14 29 | 1.16.18 30 | 1.3.5 31 | 32 | 33 | 1.8 34 | 1.8 35 | UTF-8 36 | 37 | UTF-8 38 | UTF-8 39 | 40 | 41 | 42 | 43 | 44 | org.apache.kafka 45 | kafka-clients 46 | ${kafka_client.version} 47 | 48 | 49 | 50 | org.apache.commons 51 | commons-lang3 52 | ${commons-lang3.version} 53 | 54 | 55 | 56 | org.apache.kafka 57 | kafka_2.10 58 | ${kafka.version} 59 | 60 | 61 | com.alibaba 62 | fastjson 63 | ${fastjson.version} 64 | 65 | 66 | 67 | 68 | org.slf4j 69 | slf4j-api 70 | ${slf4j_version} 71 | 72 | 73 | org.slf4j 74 | slf4j-log4j12 75 | ${slf4j_version} 76 | 77 | 78 | commons-logging 79 | commons-logging-api 80 | ${jcl_version} 81 | 82 | 83 | log4j 84 | log4j 85 | ${log4j_version} 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | mysql 95 | mysql-connector-java 96 | ${mysql.version} 97 | 98 | 99 | 100 | org.apache.curator 101 | curator-framework 102 | ${curator.version} 103 | 104 | 105 | 106 | org.apache.zookeeper 107 | zookeeper 108 | ${zookeeper.version} 109 | 110 | 111 | 112 | junit 113 | junit 114 | ${junit.version} 115 | test 116 | 117 | 118 | 119 | org.apache.curator 120 | curator-recipes 121 | ${curator.version} 122 | 123 | 124 | 125 | 126 | org.jolokia 127 | jolokia-jvm 128 | ${jolokia-jvm.version} 129 | 130 | 131 | 132 | 133 | org.projectlombok 134 | lombok 135 | ${lombok.version} 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | org.springframework.boot 146 | spring-boot-starter-web 147 | 148 | 149 | ch.qos.logback 150 | logback-classic 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | org.springframework.boot 159 | spring-boot-dependencies 160 | 2.0.0.RELEASE 161 | pom 162 | import 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | src/main/resources 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | org.apache.maven.plugins 183 | maven-compiler-plugin 184 | 185 | 1.8 186 | 1.8 187 | 188 | 189 | 190 | 191 | org.apache.maven.plugins 192 | maven-jar-plugin 193 | 194 | 195 | 196 | true 197 | false 198 | lib/ 199 | co.solinx.kafka.monitor.Main 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | org.apache.maven.plugins 209 | maven-dependency-plugin 210 | 211 | 212 | copy-dependencies 213 | install 214 | 215 | copy-dependencies 216 | 217 | 218 | ${project.build.directory}/lib 219 | true 220 | true 221 | true 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/Main.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor; 2 | 3 | import co.solinx.kafka.monitor.core.service.ConfigService; 4 | import co.solinx.kafka.monitor.core.service.InitService; 5 | 6 | import java.util.Properties; 7 | 8 | /** 9 | * @author linxin 10 | * @version v1.0 11 | * Copyright (c) 2015 by solinx 12 | * @date 2016/12/12. 13 | */ 14 | public class Main { 15 | 16 | public static void main(String[] args) throws Exception { 17 | 18 | 19 | // WebUI webUI = new WebUI(ConfigService.monitorConfig.getHost(), ConfigService.monitorConfig.getPort(), "WebUi", "/"); 20 | // 21 | // webUI.bind(); 22 | initMonitorService(); 23 | } 24 | 25 | public static void initMonitorService() { 26 | InitService initService = new InitService(); 27 | initService.init(); 28 | } 29 | 30 | 31 | } 32 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/Start.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor; 2 | 3 | import org.springframework.boot.SpringApplication; 4 | import org.springframework.boot.autoconfigure.EnableAutoConfiguration; 5 | import org.springframework.context.annotation.ComponentScan; 6 | import org.springframework.context.annotation.Configuration; 7 | 8 | /** 9 | * @author linx 10 | * @create 2018-04 01-0:33 11 | **/ 12 | @Configuration 13 | @ComponentScan 14 | @EnableAutoConfiguration 15 | public class Start { 16 | 17 | public static void main(String[] args) { 18 | SpringApplication.run(Start.class); 19 | } 20 | 21 | } 22 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/api/AbstractApi.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.api; 2 | 3 | import co.solinx.kafka.monitor.model.PageData; 4 | 5 | /** 6 | * @author linxin 7 | * @version v1.0 8 | * Copyright (c) 2015 by solinx 9 | * @date 2017/12/29. 10 | */ 11 | public abstract class AbstractApi { 12 | protected PageData pageData; 13 | 14 | 15 | public String formatData(String callback) { 16 | String resultStr = pageData.toString(); 17 | if (callback != null) { 18 | resultStr = String.format("%s(%s)", callback, resultStr); 19 | } 20 | return resultStr; 21 | } 22 | 23 | public String formatData(String callback,PageData pageData) { 24 | String resultStr = pageData.toString(); 25 | if (callback != null) { 26 | resultStr = String.format("%s(%s)", callback, resultStr); 27 | } 28 | return resultStr; 29 | } 30 | 31 | } 32 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/api/AlarmsApi.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.api; 2 | 3 | import co.solinx.kafka.monitor.core.service.KafkaBaseInfoService; 4 | import co.solinx.kafka.monitor.model.PageData; 5 | import co.solinx.kafka.monitor.model.Topic; 6 | import com.alibaba.fastjson.JSONArray; 7 | import com.alibaba.fastjson.JSONObject; 8 | import org.springframework.web.bind.annotation.RequestMapping; 9 | import org.springframework.web.bind.annotation.RestController; 10 | 11 | import java.util.List; 12 | 13 | /** 14 | * 报警信息 15 | * 16 | * @author linxin 17 | * @version v1.0 18 | * Copyright (c) 2015 by solinx 19 | * @date 2017/12/27. 20 | */ 21 | @RestController 22 | @RequestMapping("/data/alarmServlet") 23 | public class AlarmsApi extends AbstractApi { 24 | 25 | private static KafkaBaseInfoService service = KafkaBaseInfoService.getInstance(); 26 | 27 | @RequestMapping 28 | public String alarms(String callback) { 29 | pageData = new PageData(); 30 | try { 31 | List topicList = service.getTopics(); 32 | 33 | JSONArray array = new JSONArray(); 34 | for (Topic 35 | topic : topicList) { 36 | int preferred = (int) topic.getPreferred(); 37 | if (preferred != 100) { 38 | JSONObject temp = new JSONObject(); 39 | temp.put("preferred", preferred + "%"); 40 | temp.put("underReplicated", topic.getUnderReplicated()); 41 | temp.put("topic", topic.getName()); 42 | array.add(temp); 43 | } 44 | } 45 | pageData.setData(array); 46 | } catch (Exception e) { 47 | pageData.setStatus(500); 48 | pageData.setError(e.getMessage()); 49 | } 50 | return formatData(callback, pageData); 51 | } 52 | 53 | } 54 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/api/BrokerTopicServletApi.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.api; 2 | 3 | import co.solinx.kafka.monitor.core.service.KafkaBaseInfoService; 4 | import co.solinx.kafka.monitor.model.PageData; 5 | import co.solinx.kafka.monitor.model.Partition; 6 | import co.solinx.kafka.monitor.model.Topic; 7 | import com.alibaba.fastjson.JSONArray; 8 | import com.alibaba.fastjson.JSONObject; 9 | import org.slf4j.Logger; 10 | import org.slf4j.LoggerFactory; 11 | import org.springframework.web.bind.annotation.PathVariable; 12 | import org.springframework.web.bind.annotation.RequestMapping; 13 | import org.springframework.web.bind.annotation.RestController; 14 | 15 | import java.util.Arrays; 16 | import java.util.Collection; 17 | import java.util.List; 18 | import java.util.stream.Collectors; 19 | 20 | /** 21 | * @author linxin 22 | * @version v1.0 23 | * Copyright (c) 2015 by solinx 24 | * @date 2017/12/27. 25 | */ 26 | @RestController 27 | @RequestMapping("/data/brokerTopicServlet") 28 | public class BrokerTopicServletApi extends AbstractApi { 29 | 30 | private static KafkaBaseInfoService service = KafkaBaseInfoService.getInstance(); 31 | private Logger logger = LoggerFactory.getLogger(BrokerTopicServletApi.class); 32 | 33 | @RequestMapping("/{brokerID}") 34 | public String topic(@PathVariable int brokerID, String callback) { 35 | pageData = new PageData(); 36 | List topicList = service.getTopics(); 37 | JSONArray array = new JSONArray(); 38 | for (Topic topic : 39 | topicList) { 40 | Collection topicPar = topic.getLeaderPartitions(brokerID); 41 | int partitionCount = topicPar.size(); 42 | JSONObject topicObj = new JSONObject(); 43 | topicObj.put("name", topic.getName()); 44 | topicObj.put("partitionCount", topic.getPartitionMap().size()); 45 | topicObj.put("brokerPartitionCount", partitionCount); 46 | 47 | topicObj.put("PartitionIds", Arrays.toString(topicPar.stream().map(p -> p.getId() + " ").collect(Collectors.toList()).toArray())); 48 | array.add(topicObj); 49 | } 50 | pageData.setData(array); 51 | return formatData(callback, pageData); 52 | } 53 | 54 | } 55 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/api/BrokersApi.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.api; 2 | 3 | import co.solinx.kafka.monitor.core.service.KafkaBaseInfoService; 4 | import co.solinx.kafka.monitor.model.Broker; 5 | import co.solinx.kafka.monitor.model.PageData; 6 | import co.solinx.kafka.monitor.model.Topic; 7 | import com.alibaba.fastjson.JSONObject; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | import org.springframework.web.bind.annotation.PathVariable; 11 | import org.springframework.web.bind.annotation.RequestMapping; 12 | import org.springframework.web.bind.annotation.RequestMethod; 13 | import org.springframework.web.bind.annotation.RestController; 14 | 15 | import java.util.List; 16 | 17 | /** 18 | * broker信息 19 | * 20 | * @author linxin 21 | * @version v1.0 22 | * Copyright (c) 2015 by solinx 23 | * @date 2017/12/26. 24 | */ 25 | 26 | @RestController 27 | @RequestMapping("/data/brokerServlet") 28 | public class BrokersApi extends AbstractApi { 29 | 30 | private Logger logger = LoggerFactory.getLogger(BrokersApi.class); 31 | private KafkaBaseInfoService service = KafkaBaseInfoService.getInstance(); 32 | 33 | @RequestMapping 34 | public String brokers(String callback) { 35 | pageData = new PageData(); 36 | List brokerList = service.getBrokers(); 37 | 38 | pageData.setData(brokerList); 39 | return formatData(callback, pageData); 40 | } 41 | 42 | @RequestMapping(value = "/{id}",method= RequestMethod.GET) 43 | public String getBrokerById(@PathVariable int id,String callback) { 44 | pageData = new PageData(); 45 | Broker broker = service.getBrokerById(id); 46 | List topicList = service.getTopics(); 47 | int partitionCount = 0; 48 | for (Topic topic : 49 | topicList) { 50 | partitionCount += topic.getLeaderPartitions(id).size(); 51 | } 52 | 53 | pageData.setData(broker); 54 | 55 | JSONObject extend = new JSONObject(); 56 | extend.put("partitionCount", partitionCount); 57 | extend.put("topicCount", topicList.size()); 58 | pageData.setExtend(extend); 59 | return formatData(callback, pageData); 60 | } 61 | 62 | @RequestMapping("/summary") 63 | public String getSummary(String callback) { 64 | pageData = new PageData(); 65 | List brokerList = service.getBrokers(); 66 | JSONObject result = new JSONObject(); 67 | result.put("brokerTotal", brokerList.size()); 68 | result.put("brokerAbleTotal", brokerList.size()); 69 | 70 | pageData.setData(result); 71 | return formatData(callback, pageData); 72 | } 73 | 74 | } 75 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/api/ConsumerApi.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.api; 2 | 3 | import co.solinx.kafka.monitor.core.service.CustomConsumerGroupService; 4 | import co.solinx.kafka.monitor.model.Consumer; 5 | import co.solinx.kafka.monitor.model.PageData; 6 | import org.springframework.web.bind.annotation.PathVariable; 7 | import org.springframework.web.bind.annotation.RequestMapping; 8 | import org.springframework.web.bind.annotation.RequestMethod; 9 | import org.springframework.web.bind.annotation.RestController; 10 | 11 | import java.util.List; 12 | 13 | /** 14 | * @author linxin 15 | * @version v1.0 16 | * Copyright (c) 2015 by solinx 17 | * @date 2017/12/27. 18 | */ 19 | @RestController 20 | @RequestMapping("/data/consumerServlet") 21 | public class ConsumerApi extends AbstractApi { 22 | 23 | @RequestMapping 24 | public String consumers(String callback) { 25 | pageData = new PageData(); 26 | CustomConsumerGroupService consumerGroupService = new CustomConsumerGroupService(); 27 | List consumerList = consumers(consumerGroupService); 28 | pageData.setData(consumerList); 29 | return formatData(callback, pageData); 30 | } 31 | 32 | @RequestMapping(value = "/{topicName}", method = RequestMethod.GET) 33 | public String consumersByTopicName(@PathVariable String topicName, String callback) { 34 | pageData = new PageData(); 35 | CustomConsumerGroupService consumerGroupService = new CustomConsumerGroupService(); 36 | List consumerList = consumers(consumerGroupService, topicName); 37 | 38 | pageData.setData(consumerList); 39 | return formatData(callback, pageData); 40 | } 41 | 42 | public List consumers(CustomConsumerGroupService consumerGroupService) { 43 | return consumerGroupService.getConsumerList(); 44 | } 45 | 46 | public List consumers(CustomConsumerGroupService consumerGroupService, String topicName) { 47 | return consumerGroupService.getConsumerList(topicName); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/api/HistoryMetricsApi.java: -------------------------------------------------------------------------------- 1 | //package co.solinx.kafka.monitor.api; 2 | // 3 | //import co.solinx.kafka.monitor.common.DateUtils; 4 | //import co.solinx.kafka.monitor.db.DBUtils; 5 | //import co.solinx.kafka.monitor.model.KafkaMonitorData; 6 | //import com.alibaba.fastjson.JSONObject; 7 | //import org.slf4j.Logger; 8 | //import org.slf4j.LoggerFactory; 9 | //import org.springframework.web.bind.annotation.PathVariable; 10 | //import org.springframework.web.bind.annotation.RequestMapping; 11 | //import org.springframework.web.bind.annotation.RestController; 12 | // 13 | //import javax.ws.rs.GET; 14 | //import javax.ws.rs.Path; 15 | //import javax.ws.rs.PathParam; 16 | //import javax.ws.rs.QueryParam; 17 | //import java.util.ArrayList; 18 | //import java.util.List; 19 | // 20 | ///** 21 | // * @author linxin 22 | // * @version v1.0 23 | // * Copyright (c) 2015 by solinx 24 | // * @date 2017/12/27. 25 | // */ 26 | //@RestController 27 | //@RequestMapping("/historyMetricsServlet") 28 | //public class HistoryMetricsApi extends AbstractApi { 29 | // 30 | // private DBUtils dbUtils = new DBUtils<>(KafkaMonitorData.class); 31 | // private Logger logger = LoggerFactory.getLogger(HistoryMetricsApi.class); 32 | // 33 | // @RequestMapping("/{type}") 34 | // public String historyData(@PathVariable String type, @QueryParam("startTime") String startTime, 35 | // @QueryParam("endTime") String endTime, @QueryParam("callback") String callback) { 36 | // String where = " where currentTime >= '" + startTime + "' and currentTime <='" + endTime + "'"; 37 | // 38 | // JSONObject resultObject; 39 | // 40 | // try { 41 | // 42 | // List dataList = dbUtils.query("select * from kafkaMonitorData " + where); 43 | // 44 | // resultObject = getMetricsByType(dataList, type); 45 | // pageData.setData(resultObject); 46 | // return formatData(callback); 47 | // } catch (Exception e) { 48 | // e.printStackTrace(); 49 | // } 50 | // return null; 51 | // } 52 | // 53 | // public JSONObject getMetricsByType(List dataList, String type) { 54 | // 55 | // List consumerAvgList = new ArrayList<>(); 56 | // List producerAvgList = new ArrayList<>(); 57 | // List consumerErrorList = new ArrayList<>(); 58 | // List producerErrorList = new ArrayList<>(); 59 | // List delayList = new ArrayList<>(); 60 | // List delayedRateList = new ArrayList<>(); 61 | // List delayMsAvgList = new ArrayList<>(); 62 | // List delayMsMaxList = new ArrayList<>(); 63 | // List duplicatedList = new ArrayList<>(); 64 | // List duplicatedRateList = new ArrayList<>(); 65 | // List lostRateList = new ArrayList<>(); 66 | // List lostTotalList = new ArrayList<>(); 67 | // List producerTotalList = new ArrayList<>(); 68 | // List consumerTotalList = new ArrayList<>(); 69 | // 70 | // 71 | // List timeList = new ArrayList<>(); 72 | // for (KafkaMonitorData model : 73 | // dataList) { 74 | // consumerAvgList.add(model.getConsumeAvailabilityAvg()); 75 | // producerAvgList.add(model.getProduceAvailabilityAvg()); 76 | // consumerErrorList.add(model.getConsumerError()); 77 | // consumerTotalList.add(model.getConsumerTotal()); 78 | // delayList.add(model.getDelay()); 79 | // delayedRateList.add(model.getDelayedRate()); 80 | // delayMsAvgList.add(model.getDelayMsAvg()); 81 | // delayMsMaxList.add(model.getDelayMsMax()); 82 | // duplicatedList.add(model.getDuplicated()); 83 | // duplicatedRateList.add(model.getDuplicatedRate()); 84 | // lostRateList.add(model.getLostRate()); 85 | // lostTotalList.add(model.getLostTotal()); 86 | // producerErrorList.add(model.getProducerError()); 87 | // producerTotalList.add(model.getProducerTotal()); 88 | // 89 | // timeList.add(DateUtils.getTimeStr(model.getCurrentTime(), DateUtils.HYPHEN_DISPLAY_DATE)); 90 | // } 91 | // JSONObject resultObj = new JSONObject(); 92 | // 93 | // switch (type) { 94 | // case "total": 95 | // resultObj.put("producerTotal", producerTotalList); 96 | // resultObj.put("consumerTotal", consumerTotalList); 97 | // break; 98 | // case "avg": 99 | // resultObj.put("producerAvg", producerAvgList); 100 | // resultObj.put("consumerAvg", consumerAvgList); 101 | // break; 102 | // case "delayed": 103 | // resultObj.put("delayed", delayList); 104 | // resultObj.put("duplicated", duplicatedList); 105 | // break; 106 | // case "error": 107 | // resultObj.put("lostTotal", lostTotalList); 108 | // resultObj.put("consumerError", consumerErrorList); 109 | // resultObj.put("producerError", producerErrorList); 110 | // break; 111 | // case "rate": 112 | // resultObj.put("duplicatedRate", duplicatedRateList); 113 | // resultObj.put("lostRate", lostRateList); 114 | // resultObj.put("delayedRate", delayedRateList); 115 | // break; 116 | // case "delay": 117 | // resultObj.put("delayMsAvg", delayMsAvgList); 118 | // resultObj.put("delayMsMax", delayMsMaxList); 119 | // break; 120 | // default: 121 | // break; 122 | // } 123 | // 124 | // resultObj.put("time", timeList); 125 | // return resultObj; 126 | // } 127 | //} 128 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/api/InitBean.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.api; 2 | 3 | import co.solinx.kafka.monitor.core.service.InitService; 4 | import org.springframework.beans.factory.InitializingBean; 5 | 6 | /** 7 | * @auther linx 8 | * @create 2018-04-01 11:47 9 | **/ 10 | public class InitBean implements InitializingBean { 11 | 12 | @Override 13 | public void afterPropertiesSet() { 14 | InitService initService = new InitService(); 15 | initService.init(); 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/api/MessageApi.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.api; 2 | 3 | import co.solinx.kafka.monitor.core.service.MessageService; 4 | import co.solinx.kafka.monitor.model.Message; 5 | import co.solinx.kafka.monitor.model.PageData; 6 | import com.alibaba.fastjson.JSONArray; 7 | import org.springframework.web.bind.annotation.RequestMapping; 8 | import org.springframework.web.bind.annotation.RestController; 9 | 10 | import java.util.List; 11 | 12 | @RestController 13 | @RequestMapping("/data/messageServlet") 14 | public class MessageApi extends AbstractApi { 15 | 16 | @RequestMapping 17 | public String message( String topic, 18 | int partition, 19 | int offset, 20 | int messageSum, 21 | String callback) { 22 | pageData = new PageData(); 23 | MessageService service = new MessageService(); 24 | List messageList = service.getMesage(topic, partition, offset, messageSum); 25 | JSONArray array = new JSONArray(); 26 | for (Message message : 27 | messageList) { 28 | array.add(message); 29 | } 30 | pageData.setData(array); 31 | return formatData(callback, pageData); 32 | } 33 | 34 | } 35 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/api/MetricsApi.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.api; 2 | 3 | import co.solinx.kafka.monitor.core.service.MetricsReportService; 4 | import co.solinx.kafka.monitor.model.PageData; 5 | import com.alibaba.fastjson.JSONObject; 6 | import org.springframework.web.bind.annotation.PathVariable; 7 | import org.springframework.web.bind.annotation.RequestMapping; 8 | import org.springframework.web.bind.annotation.RestController; 9 | 10 | 11 | @RestController 12 | @RequestMapping("/data/metricsServlet") 13 | public class MetricsApi extends AbstractApi { 14 | 15 | @RequestMapping("/{type}") 16 | public String metrics(@PathVariable("type") String type, 17 | String callback) { 18 | pageData = new PageData(); 19 | MetricsReportService metricsServlet = MetricsReportService.getMetricsService(); 20 | JSONObject resultList = new JSONObject(); 21 | 22 | switch (type) { 23 | case "total": 24 | resultList = metricsServlet.getTotalMetrics(); 25 | break; 26 | case "avg": 27 | resultList = metricsServlet.getAvgMetrics(); 28 | break; 29 | case "delayed": 30 | resultList = metricsServlet.getDelayedMetrics(); 31 | break; 32 | case "error": 33 | resultList = metricsServlet.getErrorMetrics(); 34 | break; 35 | case "rate": 36 | resultList = metricsServlet.getRateMetrics(); 37 | break; 38 | case "delay": 39 | resultList = metricsServlet.getDelayMetrics(); 40 | break; 41 | default: 42 | break; 43 | } 44 | 45 | pageData.setData(resultList); 46 | return formatData(callback, pageData); 47 | } 48 | 49 | } 50 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/api/PartitionsApi.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.api; 2 | 3 | import co.solinx.kafka.monitor.core.service.KafkaBaseInfoService; 4 | import co.solinx.kafka.monitor.model.PageData; 5 | import co.solinx.kafka.monitor.model.Partition; 6 | import co.solinx.kafka.monitor.model.Topic; 7 | import com.alibaba.fastjson.JSONArray; 8 | import com.alibaba.fastjson.JSONObject; 9 | import org.springframework.web.bind.annotation.RequestMapping; 10 | import org.springframework.web.bind.annotation.RestController; 11 | 12 | import java.util.Arrays; 13 | 14 | /** 15 | * @author linxin 16 | * @version v1.0 17 | * Copyright (c) 2015 by solinx 18 | * @date 2017/12/27. 19 | */ 20 | @RestController 21 | @RequestMapping("/data/partitionServlet") 22 | public class PartitionsApi extends AbstractApi { 23 | 24 | private KafkaBaseInfoService service = KafkaBaseInfoService.getInstance(); 25 | 26 | 27 | @RequestMapping 28 | public String partition(String topicName, String callback) { 29 | pageData = new PageData(); 30 | Topic topic = service.getTopic(topicName); 31 | JSONArray array = new JSONArray(); 32 | for (Partition tp : 33 | topic.getPartitionMap().values()) { 34 | JSONObject part = new JSONObject(); 35 | part.put("id", tp.getId()); 36 | part.put("firstOffset", tp.getFirstOffset()); 37 | part.put("lastOffset", tp.getSize()); 38 | part.put("size", tp.getSize() - tp.getFirstOffset()); 39 | //是否leader 40 | part.put("leader", tp.getLeaderId()); 41 | //副本集 42 | part.put("replicas", Arrays.toString(tp.getReplicasArray())); 43 | //同步副本 44 | part.put("inSyncReplicas", Arrays.toString(tp.getIsr())); 45 | //首先副本是否为leader 46 | part.put("leaderPreferred", String.valueOf(tp.isLeaderPreferred())); 47 | //是否复制中 48 | part.put("underReplicated", String.valueOf(tp.isUnderReplicated())); 49 | array.add(part); 50 | } 51 | pageData.setData(array); 52 | return formatData(callback, pageData); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/api/TopicsApi.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.api; 2 | 3 | import co.solinx.kafka.monitor.core.service.KafkaBaseInfoService; 4 | import co.solinx.kafka.monitor.core.service.TopicService; 5 | import co.solinx.kafka.monitor.model.PageData; 6 | import co.solinx.kafka.monitor.model.Topic; 7 | import com.alibaba.fastjson.JSONArray; 8 | import com.alibaba.fastjson.JSONObject; 9 | import org.slf4j.Logger; 10 | import org.slf4j.LoggerFactory; 11 | import org.springframework.web.bind.annotation.PathVariable; 12 | import org.springframework.web.bind.annotation.RequestMapping; 13 | import org.springframework.web.bind.annotation.RequestMethod; 14 | import org.springframework.web.bind.annotation.RestController; 15 | 16 | import java.util.Arrays; 17 | import java.util.List; 18 | 19 | /** 20 | * @author linxin 21 | * @version v1.0 22 | * Copyright (c) 2015 by solinx 23 | * @date 2017/12/27. 24 | */ 25 | @RestController 26 | @RequestMapping("/data/topicServlet") 27 | public class TopicsApi extends AbstractApi { 28 | 29 | private KafkaBaseInfoService service = KafkaBaseInfoService.getInstance(); 30 | private TopicService topicService = new TopicService(); 31 | private final static Logger logger = LoggerFactory.getLogger(TopicsApi.class); 32 | 33 | @RequestMapping 34 | public String topics(String callback) { 35 | List topicList = service.getTopics(); 36 | pageData = new PageData(); 37 | JSONArray array = new JSONArray(); 38 | for (Topic 39 | topic : topicList) { 40 | JSONObject temp = new JSONObject(); 41 | double partitionSize = topic.getPartitionMap().size(); 42 | 43 | temp.put("name", topic.getName()); 44 | temp.put("partitionTotal", partitionSize); 45 | 46 | //partition首选副本率(首选副本为leader),最优为100% 47 | temp.put("preferred", topic.getPreferred()); 48 | //正在复制的Partition数,正常应为0 49 | temp.put("underReplicated", topic.getUnderReplicated()); 50 | JSONObject configObj = (JSONObject) topic.getConfig().get("config"); 51 | 52 | temp.put("customConfig", configObj.size() > 0 ? true : false); 53 | 54 | array.add(temp); 55 | } 56 | 57 | pageData.setData(array); 58 | return formatData(callback, pageData); 59 | } 60 | 61 | @RequestMapping("/summary") 62 | public String summary(String callback) { 63 | pageData = new PageData(); 64 | List topicList = service.getTopics(); 65 | JSONObject result = new JSONObject(); 66 | result.put("topicTotal", topicList.size()); 67 | result.put("partitionTotal", topicList.stream().mapToInt((t) -> t.getPartitionMap().size()).sum()); 68 | 69 | pageData.setData(result); 70 | return formatData(callback, pageData); 71 | } 72 | 73 | @RequestMapping(value = "/{topicName}", method = RequestMethod.GET) 74 | public String topic(@PathVariable String topicName, String callback) { 75 | pageData = new PageData(); 76 | 77 | Topic topic = service.getTopic(topicName); 78 | JSONObject jsonObject = new JSONObject(); 79 | 80 | jsonObject.put("name", topic.getName()); 81 | jsonObject.put("PartitionTotal", topic.getPartitionMap().size()); 82 | jsonObject.put("totalSize", topic.getSize()); 83 | jsonObject.put("availableSize", topic.getAvailableSize()); 84 | jsonObject.put("PreferredReplicas", topic.getPreferredReplicaPercent() * 100 + "%"); 85 | jsonObject.put("UnderReplicatedPartitions", Arrays.toString(topic.getUnderReplicatedPartitions().stream().mapToInt(p -> p.getId()).toArray())); 86 | 87 | pageData.setData(jsonObject); 88 | return formatData(callback, pageData); 89 | } 90 | 91 | @RequestMapping(value = "/create", method = RequestMethod.GET) 92 | public String create(String topic, 93 | int replicaFactor, 94 | int partitions, String callback) { 95 | pageData = new PageData(); 96 | try { 97 | topicService.createTopic(topic, Integer.valueOf(partitions) 98 | , replicaFactor); 99 | 100 | } catch (Exception e) { 101 | pageData.setStatus(500); 102 | pageData.setError(e.getMessage()); 103 | logger.error("添加topic异常", e); 104 | } 105 | return formatData(callback, pageData); 106 | } 107 | 108 | @RequestMapping(value = "/delete", method = RequestMethod.GET) 109 | public String delete(String topic, String callback) { 110 | pageData = new PageData(); 111 | topicService.deleteTopic(topic); 112 | return formatData(callback, pageData); 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/api/kafkaInfoApi.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.api; 2 | 3 | import co.solinx.kafka.monitor.core.service.ConfigService; 4 | import co.solinx.kafka.monitor.core.service.KafkaBaseInfoService; 5 | import co.solinx.kafka.monitor.model.Broker; 6 | import co.solinx.kafka.monitor.model.PageData; 7 | import co.solinx.kafka.monitor.model.Topic; 8 | import com.alibaba.fastjson.JSONObject; 9 | import org.springframework.web.bind.annotation.RequestMapping; 10 | import org.springframework.web.bind.annotation.RestController; 11 | 12 | import java.util.List; 13 | 14 | /** 15 | * @author linxin 16 | * @version v1.0 17 | * Copyright (c) 2015 by solinx 18 | * @date 2017/12/27. 19 | */ 20 | @RestController 21 | @RequestMapping("/data/kafkaInfoServlet") 22 | public class kafkaInfoApi extends AbstractApi { 23 | 24 | KafkaBaseInfoService service = KafkaBaseInfoService.getInstance(); 25 | 26 | @RequestMapping 27 | public String kafkaInfo(String callback) { 28 | pageData = new PageData(); 29 | List brokersMap = service.getBrokers(); 30 | List topicList = service.getTopics(); 31 | JSONObject zkConfigObj = ConfigService.rootObj; 32 | 33 | 34 | JSONObject extend = new JSONObject(); 35 | 36 | extend.put("servers", KafkaBaseInfoService.getInstance().randomBroker().getHost()); 37 | extend.put("partitionNum", topicList.stream().mapToInt((t) -> t.getPartitionMap().size()).sum()); 38 | extend.put("clusterState", clusterState(topicList)); 39 | extend.put("topicNum", topicList.size()); 40 | extend.put("brokerNum", brokersMap.size()); 41 | extend.put("zkConfig", zkConfigObj); 42 | pageData.setExtend(extend); 43 | return formatData(callback, pageData); 44 | } 45 | 46 | public boolean clusterState(List topicList) { 47 | boolean result = true; 48 | for (Topic 49 | topic : topicList) { 50 | int preferred = (int) topic.getPreferred(); 51 | if (preferred != 100) { 52 | result = false; 53 | } 54 | } 55 | return result; 56 | } 57 | 58 | } 59 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/common/JettyUtils.java: -------------------------------------------------------------------------------- 1 | //package co.solinx.kafka.monitor.common; 2 | // 3 | //import org.eclipse.jetty.servlet.DefaultServlet; 4 | //import org.eclipse.jetty.servlet.ServletContextHandler; 5 | //import org.eclipse.jetty.servlet.ServletHolder; 6 | //import org.eclipse.jetty.websocket.WebSocketServlet; 7 | // 8 | //import javax.servlet.Servlet; 9 | //import javax.servlet.ServletException; 10 | //import javax.servlet.annotation.WebServlet; 11 | //import javax.servlet.http.HttpServlet; 12 | //import javax.servlet.http.HttpServletRequest; 13 | //import javax.servlet.http.HttpServletResponse; 14 | //import java.io.IOException; 15 | //import java.net.URL; 16 | // 17 | ///** 18 | // * Created by xin on 2017-01-07. 19 | // * 20 | // * @author linx 21 | // */ 22 | //public class JettyUtils { 23 | // 24 | // 25 | // public static ServletContextHandler createServletHandler(Servlet servlet) { 26 | // return createServletHandler(servlet, servlet.getClass().getAnnotation(WebServlet.class).urlPatterns()[0]); 27 | // } 28 | // 29 | // /** 30 | // * ServletHandler 31 | // * 32 | // * @param path 33 | // * @return 34 | // */ 35 | // public static ServletContextHandler createServletHandler(Servlet servlet, String path) { 36 | // 37 | // ServletContextHandler handler = new ServletContextHandler(); 38 | //// ServletHolder holder = new ServletHolder(servlet); 39 | //// handler.addServlet(holder, path); 40 | //// handler.setContextPath("/data"); 41 | // 42 | // return handler; 43 | // } 44 | // 45 | // 46 | // /** 47 | // * WebSocket 48 | // * 49 | // * @param servlet 50 | // * @param path 51 | // * @return 52 | // */ 53 | // public static ServletContextHandler createWebSockethandler(WebSocketServlet servlet, String path) { 54 | // ServletContextHandler handler = new ServletContextHandler(); 55 | //// ServletHolder holder = new ServletHolder(servlet); 56 | //// handler.addServlet(holder, "/"); 57 | //// handler.setContextPath(path); 58 | // return handler; 59 | // } 60 | // 61 | // /** 62 | // * 静态资源 63 | // * 64 | // * @param resourceBase 65 | // * @param path 66 | // * @return 67 | // */ 68 | // public static ServletContextHandler createStaticHandler(String resourceBase, String path) { 69 | // ServletContextHandler handler = new ServletContextHandler(); 70 | //// handler.setInitParameter("org.eclipse.jetty.servlet.Default.gzip", "false"); 71 | //// DefaultServlet staticServlet = new DefaultServlet(); 72 | //// ServletHolder holder = new ServletHolder(staticServlet); 73 | //// URL url = JettyUtils.class.getClassLoader().getResource(resourceBase); 74 | //// holder.setInitParameter("resourceBase", url.toString()); 75 | //// handler.setContextPath("/"); 76 | //// handler.addServlet(holder, path); 77 | // return handler; 78 | // } 79 | // 80 | // 81 | // /** 82 | // * Servlet 83 | // * 84 | // * @param context 85 | // * @return 86 | // */ 87 | // public static HttpServlet createServlet(String context) { 88 | // 89 | // 90 | // HttpServlet servlet = new HttpServlet() { 91 | // @Override 92 | // protected void doGet(HttpServletRequest req, HttpServletResponse resp) 93 | // throws ServletException, IOException { 94 | // 95 | // 96 | // resp.setContentType("text/html;charset=utf-8"); 97 | // resp.setStatus(HttpServletResponse.SC_OK); 98 | // resp.setHeader("Cache-Control", "no-cache, no-store, must-revalidate"); 99 | // resp.getWriter().println(context); 100 | // } 101 | // 102 | // @Override 103 | // protected void doTrace(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { 104 | // resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED); 105 | // } 106 | // }; 107 | // return servlet; 108 | // } 109 | //} 110 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/common/KafkaUtils.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.common; 2 | 3 | import co.solinx.kafka.monitor.core.service.ConfigService; 4 | import co.solinx.kafka.monitor.model.Broker; 5 | import co.solinx.kafka.monitor.model.Partition; 6 | import co.solinx.kafka.monitor.model.Topic; 7 | import co.solinx.kafka.monitor.model.ZooConfig; 8 | import kafka.api.PartitionOffsetRequestInfo; 9 | import kafka.common.TopicAndPartition; 10 | import kafka.javaapi.OffsetRequest; 11 | import kafka.javaapi.OffsetResponse; 12 | import kafka.javaapi.TopicMetadataRequest; 13 | import kafka.javaapi.TopicMetadataResponse; 14 | import kafka.network.BlockingChannel; 15 | import kafka.utils.ZKConfig; 16 | import kafka.utils.ZkUtils; 17 | import org.slf4j.Logger; 18 | import org.slf4j.LoggerFactory; 19 | 20 | import java.util.Arrays; 21 | import java.util.List; 22 | import java.util.Properties; 23 | import java.util.stream.Collectors; 24 | 25 | /** 26 | * kafka工具类 27 | * 28 | * @author linx 29 | * @create 2018-01-28 21:56 30 | **/ 31 | public class KafkaUtils { 32 | 33 | private static Logger logger = LoggerFactory.getLogger(KafkaUtils.class); 34 | 35 | public static ZkUtils getZkUtils() { 36 | ZooConfig zooConfig = ConfigService.zooConfig; 37 | String ip = zooConfig.getHost(); 38 | int sessionTimeout = Integer.parseInt(zooConfig.getSessionTimeoutMs()); 39 | int connTimeout = Integer.valueOf(zooConfig.getConnectionTimeoutMs()); 40 | return ZkUtils.apply(ip, sessionTimeout, connTimeout, false); 41 | } 42 | 43 | /** 44 | * 取Broker Channel通道 45 | * 46 | * @param broker 47 | * @return 48 | */ 49 | public static BlockingChannel getChannel(Broker broker) { 50 | BlockingChannel channel = new BlockingChannel(broker.getHost(), 51 | broker.getPort(), BlockingChannel.UseDefaultBufferSize(), 52 | BlockingChannel.UseDefaultBufferSize(), 10000); 53 | 54 | channel.connect(); 55 | return channel; 56 | } 57 | 58 | public TopicMetadataResponse topicMetadataRequest(BlockingChannel channel, String[] topics) { 59 | TopicMetadataRequest request = new TopicMetadataRequest((short) 0, 0, "kafkaMonitor", Arrays.asList(topics)); 60 | channel.send(request); 61 | final kafka.api.TopicMetadataResponse underlyingResponse = 62 | kafka.api.TopicMetadataResponse.readFrom(channel.receive().payload()); 63 | TopicMetadataResponse response = new TopicMetadataResponse(underlyingResponse); 64 | return response; 65 | } 66 | 67 | /** 68 | * 请求取topic偏移 69 | * 70 | * @param broker 71 | * @param topic 72 | * @param brokerPartitions 73 | * @return 74 | */ 75 | public static OffsetResponse sendOffsetRequest(Broker broker, Topic topic, 76 | List brokerPartitions, long time) { 77 | 78 | PartitionOffsetRequestInfo requestInfo = new PartitionOffsetRequestInfo(time, 1); 79 | 80 | final OffsetRequest offsetRequest = new OffsetRequest( 81 | brokerPartitions.stream() 82 | .collect(Collectors.toMap( 83 | partition -> new TopicAndPartition(topic.getName(), partition.getId()), 84 | partition -> requestInfo)), (short) 0, "kafkaMonitor"); 85 | 86 | logger.debug("Sending offset request: {}", offsetRequest); 87 | if (broker != null) { 88 | BlockingChannel channel = getChannel(broker); 89 | channel.send(offsetRequest.underlying()); 90 | final kafka.api.OffsetResponse underlyingResponse = kafka.api.OffsetResponse.readFrom(channel.receive().payload()); 91 | channel.disconnect(); 92 | return new OffsetResponse(underlyingResponse); 93 | } else { 94 | return null; 95 | } 96 | 97 | } 98 | 99 | } 100 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/common/Utils.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.common; 2 | 3 | import kafka.utils.ZkUtils; 4 | import org.apache.kafka.common.security.JaasUtils; 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | import scala.collection.JavaConversions; 8 | 9 | import java.util.Arrays; 10 | 11 | /** 12 | * kafkaUtils 13 | * @author linxin 14 | * @version v1.0 15 | * Copyright (c) 2015 by solinx 16 | * @date 2016/12/23. 17 | */ 18 | public class Utils { 19 | 20 | private static final Logger logger= LoggerFactory.getLogger(Utils.class); 21 | 22 | public static final int ZK_CONNECTION_TIMEOUT_MS = 30_000; 23 | public static final int ZK_SESSION_TIMEOUT_MS = 30_000; 24 | 25 | 26 | public static int getPartitionNumByTopic(String zk,String topic){ 27 | ZkUtils zkUtils=ZkUtils.apply(zk,ZK_SESSION_TIMEOUT_MS,ZK_CONNECTION_TIMEOUT_MS, JaasUtils.isZkSecurityEnabled()); 28 | 29 | try { 30 | return zkUtils.getPartitionsForTopics(JavaConversions.asScalaBuffer(Arrays.asList(topic))).apply(topic).size(); 31 | }finally { 32 | zkUtils.close(); 33 | } 34 | } 35 | 36 | } 37 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/common/WebUI.java: -------------------------------------------------------------------------------- 1 | //package co.solinx.kafka.monitor.common; 2 | // 3 | //import org.eclipse.jetty.server.Server; 4 | //import org.eclipse.jetty.server.handler.ContextHandlerCollection; 5 | //import org.eclipse.jetty.servlet.DefaultServlet; 6 | //import org.eclipse.jetty.servlet.ServletContextHandler; 7 | //import org.eclipse.jetty.servlet.ServletHolder; 8 | //import org.eclipse.jetty.websocket.WebSocketHandler; 9 | //import org.eclipse.jetty.websocket.WebSocketServlet; 10 | //import org.slf4j.Logger; 11 | //import org.slf4j.LoggerFactory; 12 | // 13 | //import javax.servlet.Servlet; 14 | //import java.net.InetSocketAddress; 15 | //import java.net.URL; 16 | // 17 | ///** 18 | // * Created by xin on 2017-01-07. 19 | // */ 20 | //public class WebUI { 21 | // 22 | // private int port; 23 | // private String name; 24 | // private String basePath; 25 | // private Server server; 26 | // private String host; 27 | // private ContextHandlerCollection collection = new ContextHandlerCollection(); 28 | // private static Logger logger = LoggerFactory.getLogger(WebUI.class); 29 | // 30 | // public WebUI(String host, int port, String name, String basePath) { 31 | // this.port = port; 32 | // this.name = name; 33 | // this.basePath = basePath; 34 | // this.host = host; 35 | // } 36 | // 37 | // 38 | // public void attachPage(Servlet name, String path) { 39 | //// attachHandler(JettyUtils.createServletHandler(name, path)); 40 | // } 41 | // 42 | // /** 43 | // * 添加页面 44 | // */ 45 | // public void attachPage(Servlet servlet) { 46 | // 47 | // 48 | //// attachHandler(JettyUtils.createServletHandler(servlet)); 49 | // 50 | // } 51 | // 52 | // /** 53 | // * 添加WebSocket服务 54 | // * 55 | // * @param webSocketServlet WebSocket服务 56 | // * @param path 57 | // */ 58 | // public void attachWebSocket(WebSocketServlet webSocketServlet, String path) { 59 | //// attachHandler(JettyUtils.createWebSockethandler(webSocketServlet, path)); 60 | // } 61 | // 62 | // 63 | // public void attachHandler(ServletContextHandler handler) { 64 | //// collection.addHandler(handler); 65 | // 66 | // } 67 | // 68 | // public void attachHandler(WebSocketHandler handler) { 69 | // collection.addHandler(handler); 70 | // } 71 | // 72 | // public ServletContextHandler staticRsource() { 73 | // ServletContextHandler handler = new ServletContextHandler(); 74 | // handler.setInitParameter("org.eclipse.jetty.servlet.Default.gzip", "false"); 75 | // DefaultServlet staticServlet = new DefaultServlet(); 76 | // ServletHolder holder = new ServletHolder(staticServlet); 77 | // URL url = JettyUtils.class.getClassLoader().getResource("static"); 78 | // holder.setInitParameter("resourceBase", url.toString()); 79 | // handler.setContextPath("/"); 80 | // handler.addServlet(holder, "/"); 81 | // return handler; 82 | // } 83 | // 84 | // public ServletContextHandler jerseyHandler() { 85 | // ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS); 86 | // context.setContextPath("/"); 87 | // ServletHolder servlet = context.addServlet( 88 | // org.glassfish.jersey.servlet.ServletContainer.class, "/*"); 89 | // servlet.setInitOrder(1); 90 | // servlet.setInitParameter("jersey.config.server.provider.packages", "co.solinx.kafka.monitor.api"); 91 | // context.setContextPath("/data"); 92 | // context.addServlet(servlet, "/"); 93 | // return context; 94 | // } 95 | // 96 | // 97 | // public void bind() throws Exception { 98 | // 99 | // server = new Server(new InetSocketAddress(host, port)); 100 | // 101 | // try { 102 | // 103 | // ContextHandlerCollection handlerCollection = new ContextHandlerCollection(); 104 | // handlerCollection.addHandler(staticRsource()); 105 | // handlerCollection.addHandler(jerseyHandler()); 106 | //// server.setHandler(handlerCollection); 107 | //// server.start(); 108 | // logger.debug("name : {} basePath : {}", name, basePath); 109 | // } catch (Exception e) { 110 | // throw e; 111 | // } 112 | // } 113 | // 114 | // 115 | // public static void main(String[] args) { 116 | // WebUI webUI = new WebUI("0.0.0.0", 5050, "WebUi", "/"); 117 | // 118 | // 119 | // try { 120 | // //webUI.attachHandler(JettyUtils.createStaticHandler(Constant.STATIC_RESOURCE_DIR,"/static")); 121 | // webUI.attachHandler(JettyUtils.createStaticHandler("static", "/")); 122 | //// webUI.attachPage(new BrokerServlet()); 123 | // webUI.bind(); 124 | // } catch (Exception e) { 125 | // e.printStackTrace(); 126 | // } 127 | // } 128 | //} 129 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/config/PreConfig.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.config; 2 | 3 | import co.solinx.kafka.monitor.api.InitBean; 4 | import org.springframework.context.annotation.Bean; 5 | import org.springframework.context.annotation.ComponentScan; 6 | import org.springframework.context.annotation.Configuration; 7 | 8 | /** 9 | * @auther linx 10 | * @create 2018-04-01 11:57 11 | **/ 12 | @Configuration 13 | @ComponentScan("co.solinx.kafka.monitor.api") 14 | public class PreConfig { 15 | 16 | @Bean 17 | InitBean initBean() { 18 | return new InitBean(); 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/core/consumer/MonitorConsumer.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.core.consumer; 2 | 3 | import org.apache.kafka.clients.consumer.ConsumerRecord; 4 | import org.apache.kafka.clients.consumer.KafkaConsumer; 5 | 6 | import java.util.Arrays; 7 | import java.util.Iterator; 8 | import java.util.Properties; 9 | 10 | /** 11 | * @author linxin 12 | * @version v1.0 13 | * Copyright (c) 2015 by solinx 14 | * @date 2016/12/23. 15 | */ 16 | public class MonitorConsumer { 17 | private KafkaConsumer consumer; 18 | private Iterator> recordIterator; 19 | 20 | 21 | public MonitorConsumer(String topic, Properties properties) { 22 | consumer = new KafkaConsumer<>(properties); 23 | consumer.subscribe(Arrays.asList(topic)); 24 | } 25 | 26 | public MonitorConsumerRecord receive() { 27 | if (recordIterator == null || !recordIterator.hasNext()) 28 | recordIterator = consumer.poll(Long.MAX_VALUE).iterator(); 29 | 30 | ConsumerRecord record = recordIterator.next(); 31 | return new MonitorConsumerRecord(record.topic(), record.partition(), record.offset(), record.key(), record.value()); 32 | } 33 | 34 | public void close() { 35 | consumer.close(); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/core/consumer/MonitorConsumerRecord.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.core.consumer; 2 | 3 | /** 4 | * @author linxin 5 | * @version v1.0 6 | * Copyright (c) 2015 by solinx 7 | * @date 2016/12/23. 8 | */ 9 | public class MonitorConsumerRecord { 10 | 11 | private final String topic; 12 | private final int partition; 13 | private final long offset; 14 | private final String key; 15 | private final String value; 16 | 17 | 18 | public MonitorConsumerRecord(String topic, int partition, long offset, String key, String value) { 19 | this.topic = topic; 20 | this.partition = partition; 21 | this.offset = offset; 22 | this.key = key; 23 | this.value = value; 24 | } 25 | 26 | 27 | public String getTopic() { 28 | return topic; 29 | } 30 | 31 | public int getPartition() { 32 | return partition; 33 | } 34 | 35 | public long getOffset() { 36 | return offset; 37 | } 38 | 39 | public String getKey() { 40 | return key; 41 | } 42 | 43 | public String getValue() { 44 | return value; 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/core/listener/BrokerListener.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.core.listener; 2 | 3 | import co.solinx.kafka.monitor.model.Broker; 4 | import com.alibaba.fastjson.JSON; 5 | import kafka.utils.ZkUtils; 6 | import org.apache.commons.lang3.StringUtils; 7 | import org.apache.curator.framework.CuratorFramework; 8 | import org.apache.curator.framework.recipes.cache.ChildData; 9 | import org.apache.curator.framework.recipes.cache.PathChildrenCache; 10 | import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; 11 | import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener; 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | 15 | import java.util.Map; 16 | import java.util.TreeMap; 17 | 18 | /** 19 | * @author linxin 20 | * @version v1.0 21 | * Copyright (c) 2015 by linx 22 | * @date 2018/1/26. 23 | */ 24 | public class BrokerListener implements PathChildrenCacheListener { 25 | 26 | private Logger logger = LoggerFactory.getLogger(BrokerListener.class); 27 | 28 | /** 29 | * broker信息缓存 30 | */ 31 | public Map brokerCache = new TreeMap(); 32 | /** 33 | * broker节点缓存 34 | */ 35 | private PathChildrenCache brokerPathCache; 36 | 37 | public BrokerListener(Map brokerCache, PathChildrenCache brokerPathCache) { 38 | this.brokerCache = brokerCache; 39 | this.brokerPathCache = brokerPathCache; 40 | } 41 | 42 | /** 43 | * 添加broker 44 | * 45 | * @param broker 46 | */ 47 | public void addBroker(Broker broker) { 48 | brokerCache.put(broker.getId(), broker); 49 | } 50 | 51 | /** 52 | * 移除broker 53 | * 54 | * @param brokerID 55 | */ 56 | public void removeBroker(int brokerID) { 57 | brokerCache.remove(brokerID); 58 | } 59 | 60 | /** 61 | * 解析brokerID 62 | * 63 | * @param childData 64 | * @return 65 | */ 66 | public int parseBrokerID(ChildData childData) { 67 | String brokerID = StringUtils.substringAfter(childData.getPath(), ZkUtils.BrokerIdsPath() + "/"); 68 | return Integer.parseInt(brokerID); 69 | } 70 | 71 | /** 72 | * 解析Broker对象 73 | * 74 | * @param childData 75 | * @return 76 | */ 77 | public Broker parseBroker(ChildData childData) { 78 | Broker broker = JSON.parseObject(childData.getData(), Broker.class); 79 | broker.setId(parseBrokerID(childData)); 80 | return broker; 81 | } 82 | 83 | @Override 84 | public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { 85 | 86 | logger.debug("BrokerListener {} {}", event.getType(), event.getData()); 87 | 88 | switch (event.getType()) { 89 | case CHILD_REMOVED: 90 | removeBroker(parseBrokerID(event.getData())); 91 | break; 92 | case CHILD_ADDED: 93 | case CHILD_UPDATED: 94 | addBroker(parseBroker(event.getData())); 95 | break; 96 | case INITIALIZED: 97 | brokerPathCache.getCurrentData().stream() 98 | .map(BrokerListener.this::parseBroker) 99 | .forEach(broker -> addBroker(broker)); 100 | break; 101 | default: 102 | break; 103 | 104 | } 105 | 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/core/listener/TopicListener.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.core.listener; 2 | 3 | import co.solinx.kafka.monitor.model.Partition; 4 | import co.solinx.kafka.monitor.model.Topic; 5 | import com.alibaba.fastjson.JSONArray; 6 | import com.alibaba.fastjson.JSONObject; 7 | import kafka.utils.ZkUtils; 8 | import org.apache.curator.framework.CuratorFramework; 9 | import org.apache.curator.framework.recipes.cache.ChildData; 10 | import org.apache.curator.framework.recipes.cache.TreeCacheEvent; 11 | import org.apache.curator.framework.recipes.cache.TreeCacheListener; 12 | 13 | import java.util.List; 14 | 15 | import static org.apache.curator.framework.recipes.cache.TreeCacheEvent.Type.NODE_ADDED; 16 | 17 | 18 | public class TopicListener implements TreeCacheListener{ 19 | 20 | 21 | private List topicList; 22 | 23 | public TopicListener(List topicList) { 24 | this.topicList = topicList; 25 | } 26 | 27 | @Override 28 | public void childEvent(CuratorFramework curator, TreeCacheEvent event) throws Exception { 29 | ChildData data = event.getData(); 30 | if (data != null) { 31 | if (event.getType() == NODE_ADDED) { 32 | 33 | } 34 | String path = data.getPath(); 35 | //判断是否为topics节点 36 | if (path.contains(String.format("%s/",ZkUtils.BrokerTopicsPath())) && (!path.contains("partitions"))) { 37 | Topic topic = JSONObject.parseObject(data.getData(), Topic.class); 38 | String name = path.substring(path.lastIndexOf("/") + 1, path.length()); 39 | topic.setName(name); 40 | 41 | int[] tPartiyions = topic.getPartitions().keySet().stream().mapToInt((t) -> Integer.valueOf(t)).sorted().toArray(); 42 | for (Object key : tPartiyions 43 | ) { 44 | String partitionPath = String.format("%s/partitions/%s/state", path, key); 45 | String state = new String(curator.getData().forPath(partitionPath)); 46 | Partition partition = JSONObject.parseObject(state, Partition.class); 47 | JSONArray replicas = topic.getPartitions().getJSONArray(String.valueOf(key)); 48 | int[] replicasArray = new int[replicas.size()]; 49 | for (int i = 0; i < 50 | replicas.size(); i++) { 51 | replicasArray[i] = replicas.getInteger(i); 52 | } 53 | partition.setReplicasArray(replicasArray); 54 | 55 | topic.getPartitionMap().put((Integer) key, partition); 56 | } 57 | topicList.add(topic); 58 | } 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/core/produce/MonitorProducer.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.core.produce; 2 | 3 | import org.apache.kafka.clients.producer.KafkaProducer; 4 | import org.apache.kafka.clients.producer.ProducerRecord; 5 | import org.apache.kafka.clients.producer.RecordMetadata; 6 | 7 | import java.util.Properties; 8 | import java.util.concurrent.Future; 9 | 10 | /** 11 | * MonitorProducer 12 | * @author linxin 13 | * @version v1.0 14 | * Copyright (c) 2015 by solinx 15 | * @date 2016/12/22. 16 | */ 17 | public class MonitorProducer { 18 | private KafkaProducer producer; 19 | 20 | 21 | public MonitorProducer(Properties properties) { 22 | producer = new KafkaProducer<>(properties); 23 | } 24 | 25 | public RecordMetadata send(ProduceRecord record) throws Exception { 26 | ProducerRecord producerRecord = new ProducerRecord(record.getTopic(), record.getPartition(), record.getKey(), record.getValue()); 27 | 28 | Future metadataFuture = producer.send(producerRecord); 29 | return metadataFuture.get(); 30 | } 31 | 32 | 33 | public void close() { 34 | producer.close(); 35 | } 36 | 37 | } 38 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/core/produce/ProduceRecord.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.core.produce; 2 | 3 | /** 4 | * @author linxin 5 | * @version v1.0 6 | * Copyright (c) 2015 by solinx 7 | * @date 2016/12/22. 8 | */ 9 | public class ProduceRecord { 10 | 11 | private final String topic; 12 | private final int partition; 13 | private final String key; 14 | private final String value; 15 | 16 | public ProduceRecord(String topic, int partition, String key, String value) { 17 | this.topic = topic; 18 | this.partition = partition; 19 | this.key = key; 20 | this.value = value; 21 | } 22 | 23 | 24 | public String getTopic() { 25 | return topic; 26 | } 27 | 28 | public int getPartition() { 29 | return partition; 30 | } 31 | 32 | public String getKey() { 33 | return key; 34 | } 35 | 36 | public String getValue() { 37 | return value; 38 | } 39 | 40 | 41 | @Override 42 | public String toString() { 43 | return "ProduceRecord{" + 44 | "topic='" + topic + '\'' + 45 | ", partition=" + partition + 46 | ", key='" + key + '\'' + 47 | ", value='" + value + '\'' + 48 | '}'; 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/core/service/ConfigService.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.core.service; 2 | 3 | import co.solinx.kafka.monitor.model.KafkaConfig; 4 | import co.solinx.kafka.monitor.model.MonitorConfig; 5 | import co.solinx.kafka.monitor.model.ZooConfig; 6 | import co.solinx.kafka.monitor.utils.JsonLoader; 7 | import com.alibaba.fastjson.JSONObject; 8 | 9 | import java.util.Properties; 10 | 11 | /** 12 | * @author linxin 13 | * @version v1.0 14 | * Copyright (c) 2015 by solinx 15 | * @date 2016/12/20. 16 | */ 17 | public class ConfigService { 18 | 19 | 20 | public static JSONObject rootObj; 21 | public static String bootstrapServers = "bootstrap.servers"; 22 | public static String keyDeserializer = "key.deserializer"; 23 | public static String valueSerializer = "value.serializer"; 24 | public static String keySerializer = "key.serializer"; 25 | public static String valueDeserializer = "value.deserializer"; 26 | public static String enableAutoCommit = "enable.auto.commit"; 27 | public static String autoCommitIntervalMs = "auto.commit.interval.ms"; 28 | public static String sessionTimeoutMs = "session.timeout.ms"; 29 | public static String groupId = "group.id"; 30 | 31 | private static KafkaConfig kafkaconfig; 32 | public static ZooConfig zooConfig; 33 | public static MonitorConfig monitorConfig; 34 | 35 | static { 36 | rootObj = JsonLoader.loadJSONFile(CuratorService.class.getClassLoader().getResourceAsStream("kafkaMonitorConfig.json")); 37 | kafkaconfig = JSONObject.parseObject(rootObj.getJSONObject("kafka").toJSONString(), KafkaConfig.class); 38 | zooConfig = JSONObject.parseObject(rootObj.getJSONObject("zookeeper").toJSONString(), ZooConfig.class); 39 | monitorConfig = JSONObject.parseObject(rootObj.toJSONString(), MonitorConfig.class); 40 | } 41 | 42 | 43 | /** 44 | * kafka配置 45 | * 46 | * @return 47 | */ 48 | public static Properties getKafkaProducerConf() { 49 | Properties props = new Properties(); 50 | props.setProperty(bootstrapServers, KafkaBaseInfoService.getInstance().reandomBrokerHost()); 51 | props.put(keyDeserializer, kafkaconfig.getKeyDeserializer()); 52 | props.put(valueDeserializer, kafkaconfig.getValueDeserializer()); 53 | props.put(keySerializer, kafkaconfig.getKeySerializer()); 54 | props.put(valueSerializer, kafkaconfig.getValueSerializer()); 55 | return props; 56 | } 57 | 58 | /** 59 | * consumer配置 60 | * 61 | * @return 62 | */ 63 | public static Properties getKafkaConsumerConf() { 64 | Properties props = new Properties(); 65 | props.setProperty(bootstrapServers, KafkaBaseInfoService.getInstance().reandomBrokerHost()); 66 | props.put(enableAutoCommit, kafkaconfig.getEnableAutoCommit()); 67 | props.put(autoCommitIntervalMs, kafkaconfig.getAutoCommitIntervalMs()); 68 | props.put(sessionTimeoutMs, kafkaconfig.getSessionTimeoutMs()); 69 | props.put(keyDeserializer, kafkaconfig.getKeyDeserializer()); 70 | props.put(valueDeserializer, kafkaconfig.getValueDeserializer()); 71 | props.put(keySerializer, kafkaconfig.getKeySerializer()); 72 | props.put(valueSerializer, kafkaconfig.getValueSerializer()); 73 | props.put(groupId, kafkaconfig.getGroupId()); 74 | return props; 75 | } 76 | 77 | /** 78 | * zookeeper配置 79 | * 80 | * @return 81 | */ 82 | public static Properties getZkProper() { 83 | Properties props = new Properties(); 84 | props.put(ZooConfig.HOST, zooConfig.getHost()); 85 | props.put(ZooConfig.SESSION_TIMEOUT_MS, zooConfig.getSessionTimeoutMs()); 86 | props.put(ZooConfig.CONNECTION_TIMEOUT_MS, zooConfig.getConnectionTimeoutMs()); 87 | props.put(ZooConfig.RETRY_ONE_TIME, zooConfig.getRetryOneTime()); 88 | return props; 89 | } 90 | 91 | 92 | } 93 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/core/service/ConsumerService.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.core.service; 2 | 3 | import com.alibaba.fastjson.JSON; 4 | import com.alibaba.fastjson.JSONObject; 5 | import co.solinx.kafka.monitor.core.consumer.MonitorConsumer; 6 | import co.solinx.kafka.monitor.core.consumer.MonitorConsumerRecord; 7 | import org.apache.kafka.common.MetricName; 8 | import org.apache.kafka.common.metrics.*; 9 | import org.apache.kafka.common.metrics.stats.*; 10 | import org.apache.kafka.common.utils.SystemTime; 11 | import org.slf4j.Logger; 12 | import org.slf4j.LoggerFactory; 13 | 14 | import java.math.BigDecimal; 15 | import java.text.DecimalFormat; 16 | import java.util.*; 17 | import java.util.concurrent.TimeUnit; 18 | 19 | /** 20 | * 监控程序consumer写入服务 21 | * 22 | * @author linxin 23 | * @version v1.0 24 | * Copyright (c) 2015 by solinx 25 | * @date 2016/12/23. 26 | */ 27 | public class ConsumerService { 28 | 29 | private static final Logger logger = LoggerFactory.getLogger(ConsumerService.class); 30 | private static final String METRIC_GROUP_NAME = "consumer-service"; 31 | 32 | private final String name = "monitor"; 33 | private final Thread thread; 34 | private final ConsumerMetrics sensor; 35 | private MonitorConsumer consumer; 36 | private String MONITOR_TOPIC; 37 | private int delayedTime = 20_000; 38 | 39 | 40 | public ConsumerService() { 41 | 42 | thread = new Thread(() -> { 43 | consumer(); 44 | }, name + "consumer-service"); 45 | 46 | Properties props = ConfigService.getKafkaConsumerConf(); 47 | MONITOR_TOPIC = ConfigService.monitorConfig.getMonitorTopic(); 48 | 49 | consumer = new MonitorConsumer(MONITOR_TOPIC, props); 50 | 51 | MetricConfig metricConfig = new MetricConfig().samples(60).timeWindow(1000, TimeUnit.MILLISECONDS); 52 | List reporterList = new ArrayList<>(); 53 | reporterList.add(new JmxReporter("kmf.services")); 54 | Metrics metrics = new Metrics(metricConfig, reporterList, new SystemTime()); 55 | Map tags = new HashMap<>(); 56 | tags.put("name", "monitor"); 57 | sensor = new ConsumerMetrics(metrics, tags); 58 | } 59 | 60 | public void start() { 61 | thread.start(); 62 | } 63 | 64 | //逻辑 producer生产消息时存入序号、时间 65 | //使用消息生产的时间与consumer消费的时间差作为延迟时间,延迟时间超过某个阀值时该消息延迟的 66 | //使用index判断消息的重复与失败,index为producer生产的序列, 67 | //consumer某个partition首次接收到消息时消息index=0将该partition的nextIndex设置为1, 68 | //consumer某个partition第二次收到消息时当前nextIndex为1,消息的index为1,正常情况下nextIndex==index,nextIndex=index+1 69 | //往后每次收到的消息消息正常时index与nextIndex都是相等的 70 | // indexnextIndex时,消息有丢失,丢失了index-nextIndex条消息,nextIndex=index+1 72 | public void consumer() { 73 | 74 | Map nextIndexs = new HashMap<>(); 75 | while (true) { 76 | 77 | MonitorConsumerRecord record; 78 | try { 79 | record = consumer.receive(); 80 | JSONObject messageObj = JSON.parseObject(record.getValue()); 81 | long msgTime = messageObj.getLong("time"); 82 | int msgPartition = record.getPartition(); 83 | long index = messageObj.getLong("index"); 84 | String topic = messageObj.getString("topic"); 85 | long curTime = System.currentTimeMillis(); 86 | //延迟时间 87 | long delayTime = curTime - msgTime; 88 | sensor.recordsDelay.record(delayTime); 89 | if (delayTime > delayedTime) { 90 | sensor.recordsDelayed.record(); 91 | } 92 | sensor.recordsConsume.record(); 93 | 94 | if (!nextIndexs.containsKey(msgPartition)) { 95 | nextIndexs.put(msgPartition, 1l); 96 | continue; 97 | } 98 | 99 | long nextIndex = nextIndexs.get(msgPartition); 100 | if (index == nextIndex) { 101 | nextIndexs.put(msgPartition, index + 1); 102 | } else if (index < nextIndex) { 103 | sensor.recordsDuplicated.record(); 104 | } else if (index > nextIndex) { 105 | sensor.recordsLost.record(index - nextIndex); 106 | nextIndexs.put(msgPartition, index + 1); 107 | } 108 | } catch (Exception e) { 109 | sensor.consumerError.record(); 110 | logger.warn("{}", e); 111 | 112 | continue; 113 | } 114 | } 115 | 116 | } 117 | 118 | private class ConsumerMetrics { 119 | public final Metrics metrics; 120 | private final Sensor bytesConsume; 121 | private final Sensor consumerError; 122 | private final Sensor recordsConsume; 123 | private final Sensor recordsDuplicated; 124 | private final Sensor recordsLost; 125 | private final Sensor recordsDelay; 126 | private final Sensor recordsDelayed; 127 | 128 | 129 | public ConsumerMetrics(Metrics metrics, final Map tags) { 130 | 131 | this.metrics = metrics; 132 | 133 | consumerError = metrics.sensor("consume-error"); 134 | consumerError.add(new MetricName("consume-error-rate", METRIC_GROUP_NAME, "The average number of errors per second", tags), new Rate()); 135 | consumerError.add(new MetricName("consume-error-total", METRIC_GROUP_NAME, "The total number of errors", tags), new Total()); 136 | 137 | recordsConsume = metrics.sensor("records-consumed"); 138 | recordsConsume.add(new MetricName("records-consumed-rate", METRIC_GROUP_NAME, "The average number of records per second that are consumed", tags), new Rate()); 139 | recordsConsume.add(new MetricName("records-consumed-total", METRIC_GROUP_NAME, "The total number of records that are consumed", tags), new Total()); 140 | 141 | bytesConsume = metrics.sensor("bytes-consume"); 142 | recordsDuplicated = metrics.sensor("records-duplicated"); 143 | recordsDuplicated.add(new MetricName("records-duplicated-rate", METRIC_GROUP_NAME, "The average number of records per second that are duplicated", tags), new Rate()); 144 | recordsDuplicated.add(new MetricName("records-duplicated-total", METRIC_GROUP_NAME, "The total number of records that are duplicated", tags), new Total()); 145 | 146 | recordsDelay = metrics.sensor("records-delay"); 147 | recordsDelay.add(new MetricName("records-delay-ms-avg", METRIC_GROUP_NAME, "The average latency of records from producer to consumer", tags), new SampledStat(0) { 148 | @Override 149 | protected void update(Sample sample, MetricConfig config, double value, long timeMs) { 150 | sample.value += value; 151 | } 152 | 153 | @Override 154 | public double combine(List samples, MetricConfig config, long now) { 155 | double total = 0.0; 156 | double count = 0; 157 | for (int i = 0; i < samples.size(); i++) { 158 | Sample s = samples.get(i); 159 | total += s.value; 160 | count += s.eventCount; 161 | } 162 | BigDecimal bTotal = new BigDecimal(Double.toString(total)); 163 | BigDecimal bCount = new BigDecimal(Double.toString(count)); 164 | 165 | return count == 0 ? 0 : bTotal.divide(bCount, 3, BigDecimal.ROUND_HALF_UP).doubleValue(); 166 | } 167 | }); 168 | recordsDelay.add(new MetricName("records-delay-ms-max", METRIC_GROUP_NAME, "The maximum latency of records from producer to consumer", tags), new Max()); 169 | 170 | recordsLost = metrics.sensor("records-lost"); 171 | recordsLost.add(new MetricName("records-lost-rate", METRIC_GROUP_NAME, "The average number of records per second that are lost", tags), new Rate()); 172 | recordsLost.add(new MetricName("records-lost-total", METRIC_GROUP_NAME, "The total number of records that are lost", tags), new Total()); 173 | 174 | 175 | recordsDelayed = metrics.sensor("records-delayed"); 176 | recordsDelayed.add(new MetricName("records-delayed-rate", METRIC_GROUP_NAME, "The average number of records per second that are either lost or arrive after maximum allowed latency under SLA", tags), new Rate()); 177 | recordsDelayed.add(new MetricName("records-delayed-total", METRIC_GROUP_NAME, "The total number of records that are either lost or arrive after maximum allowed latency under SLA", tags), new Total()); 178 | 179 | 180 | metrics.addMetric(new MetricName("consume-availability-avg", METRIC_GROUP_NAME, "The average consume availability", tags), 181 | (config, now) -> { 182 | double recordsConsumedRate = sensor.metrics.metrics().get(new MetricName("records-consumed-rate", METRIC_GROUP_NAME, tags)).value(); 183 | double recordsLostRate = sensor.metrics.metrics().get(new MetricName("records-lost-rate", METRIC_GROUP_NAME, tags)).value(); 184 | double recordsDelayedRate = sensor.metrics.metrics().get(new MetricName("records-delayed-rate", METRIC_GROUP_NAME, tags)).value(); 185 | 186 | if (new Double(recordsLostRate).isNaN()) { 187 | recordsLostRate = 0; 188 | } 189 | if (new Double(recordsDelayedRate).isNaN()) { 190 | recordsDelayedRate = 0; 191 | } 192 | 193 | double consumeAvailability = recordsConsumedRate + recordsLostRate > 0 ? 194 | (recordsConsumedRate - recordsDelayedRate) / (recordsConsumedRate + recordsLostRate) : 0; 195 | BigDecimal bg = new BigDecimal(consumeAvailability); 196 | return bg.setScale(3, BigDecimal.ROUND_HALF_UP).doubleValue(); 197 | }); 198 | 199 | } 200 | } 201 | 202 | } 203 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/core/service/CuratorService.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.core.service; 2 | 3 | import com.alibaba.fastjson.JSONObject; 4 | import org.apache.curator.framework.CuratorFramework; 5 | import org.apache.curator.framework.CuratorFrameworkFactory; 6 | import org.apache.curator.retry.RetryOneTime; 7 | 8 | /** 9 | * @author linxin 10 | * @version v1.0 11 | * Copyright (c) 2015 by solinx 12 | * @date 2016/12/14. 13 | */ 14 | public class CuratorService { 15 | 16 | static CuratorFramework curator = null; 17 | static JSONObject configObject = ConfigService.rootObj; 18 | 19 | private CuratorService() { 20 | 21 | } 22 | 23 | public static CuratorFramework getInstance() { 24 | if (curator == null) { 25 | JSONObject zkConfig = configObject.getJSONObject("zookeeper"); 26 | CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder(); 27 | builder.connectString(zkConfig.getString("host")); 28 | builder.sessionTimeoutMs(zkConfig.getInteger("SessionTimeoutMs")); 29 | builder.connectionTimeoutMs(zkConfig.getInteger("ConnectionTimeoutMs")); 30 | builder.retryPolicy(new RetryOneTime(zkConfig.getInteger("RetryOneTime"))); 31 | 32 | curator = builder.build(); 33 | curator.start(); 34 | } 35 | return curator; 36 | } 37 | 38 | } 39 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/core/service/CustomConsumerGroupService.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.core.service; 2 | 3 | import kafka.admin.AdminClient; 4 | import kafka.admin.ConsumerGroupCommand; 5 | import kafka.common.TopicAndPartition; 6 | import kafka.coordinator.GroupOverview; 7 | import org.apache.kafka.clients.consumer.Consumer; 8 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 9 | import org.apache.kafka.common.TopicPartition; 10 | import org.slf4j.Logger; 11 | import org.slf4j.LoggerFactory; 12 | import scala.Function1; 13 | import scala.Option; 14 | import scala.collection.JavaConversions; 15 | import scala.collection.Seq; 16 | 17 | import java.util.*; 18 | import java.util.stream.Collectors; 19 | import java.util.stream.Stream; 20 | 21 | /** 22 | * @author linxin 23 | * @version v1.0 24 | * Copyright (c) 2015 by solinx 25 | * @date 2016/12/20. 26 | */ 27 | public class CustomConsumerGroupService { 28 | 29 | private Logger logger = LoggerFactory.getLogger(CustomConsumerGroupService.class); 30 | private AdminClient adminClient; 31 | private org.apache.kafka.clients.consumer.KafkaConsumer consumer; 32 | Properties props = new Properties(); 33 | 34 | public CustomConsumerGroupService() { 35 | props = ConfigService.getKafkaConsumerConf(); 36 | adminClient = AdminClient.create(props); 37 | 38 | logger.debug("{}", adminClient.bootstrapBrokers()); 39 | } 40 | 41 | 42 | /** 43 | * 取得所有group 44 | * 45 | * @return 46 | */ 47 | public List groupList() { 48 | return JavaConversions.asJavaList(adminClient.listAllConsumerGroupsFlattened()); 49 | } 50 | 51 | 52 | public void close() { 53 | adminClient.close(); 54 | } 55 | 56 | public ConsumerGroupCommand.ConsumerGroupCommandOptions opts() { 57 | return null; 58 | } 59 | 60 | /** 61 | * 取得group下面所有consumer 62 | * 63 | * @param group 64 | * @return 65 | */ 66 | public List getConsumerList(String group) { 67 | List consumerList = new ArrayList(); 68 | List consumerSummaryList = JavaConversions.asJavaList(adminClient.describeConsumerGroup(group)); 69 | 70 | Consumer consumer = getConsumer(); 71 | consumerSummaryList.stream().forEach(consumerSummary -> { 72 | 73 | //取得topic与partition 74 | List topicAndPartitionStream = JavaConversions.asJavaList(consumerSummary.assignment()) 75 | .parallelStream().map(tp -> new TopicAndPartition(tp.topic(), tp.partition())) 76 | .collect(Collectors.toList()); 77 | 78 | /** 79 | * partition与偏移信息 80 | */ 81 | Stream> partitionOffsets = topicAndPartitionStream.stream().flatMap(topicAndPartition -> { 82 | OffsetAndMetadata offsetAndMetadata = consumer.committed(new TopicPartition(topicAndPartition.topic(), topicAndPartition.partition())); 83 | Map offsetMap = new HashMap<>(); 84 | if (offsetAndMetadata != null) { 85 | offsetMap.put(topicAndPartition, offsetAndMetadata.offset()); 86 | } else { 87 | offsetMap.put(topicAndPartition, -1L); 88 | } 89 | return Stream.of(offsetMap); 90 | }); 91 | 92 | final Map partitionOffsetsMap = topicAndPartitionStream.size() > 0 ? partitionOffsets.findFirst().get() : new HashMap<>(); 93 | 94 | //取得偏移信息 95 | topicAndPartitionStream.forEach(topicAndPartition -> { 96 | co.solinx.kafka.monitor.model.Consumer model = new co.solinx.kafka.monitor.model.Consumer(); 97 | long endOff = findLogEndOffset(topicAndPartition.topic(), topicAndPartition.partition()); 98 | long currentOff = 0; 99 | if (partitionOffsetsMap.size() > 0 && partitionOffsetsMap.containsKey(topicAndPartition)) { 100 | currentOff = partitionOffsetsMap.get(topicAndPartition); 101 | } 102 | 103 | model.setMemberId(consumerSummary.memberId()); 104 | model.setCurrentOffset(currentOff); 105 | model.setEndOffset(endOff); 106 | model.setPartition(topicAndPartition.partition()); 107 | model.setTopic(topicAndPartition.topic()); 108 | model.setClientId(consumerSummary.clientId()); 109 | model.setHost(consumerSummary.clientHost()); 110 | model.setGroup(group); 111 | consumerList.add(model); 112 | }); 113 | 114 | }); 115 | return consumerList; 116 | } 117 | 118 | /** 119 | * 取得所有consumer 120 | * 121 | * @return 122 | */ 123 | public List getConsumerList() { 124 | List consumerList = new ArrayList(); 125 | List groupList = groupList(); 126 | groupList.stream().forEach(group -> consumerList.addAll(getConsumerList(group.groupId()))); 127 | return consumerList; 128 | 129 | } 130 | 131 | /** 132 | * 取得topic下所有consumer 133 | * 134 | * @param topic 135 | * @return 136 | */ 137 | public List getConsumerByTopic(String topic) { 138 | 139 | return getConsumerList().stream().filter(c -> c.getTopic().equals(topic)).collect(Collectors.toList()); 140 | } 141 | 142 | /** 143 | * 取得尾部offset(LEO) 144 | * 145 | * @param topic 146 | * @param partition 147 | * @return 148 | */ 149 | private long findLogEndOffset(String topic, int partition) { 150 | Consumer consumer = getConsumer(); 151 | TopicPartition topicPartition = new TopicPartition(topic, partition); 152 | List tpList = new ArrayList(); 153 | tpList.add(topicPartition); 154 | consumer.assign(tpList); 155 | consumer.seekToEnd(tpList); 156 | Long longEndOffset = consumer.position(topicPartition); 157 | return longEndOffset; 158 | } 159 | 160 | 161 | private Consumer getConsumer() { 162 | if (consumer == null) { 163 | consumer = newConsumer(); 164 | } 165 | return consumer; 166 | } 167 | 168 | private org.apache.kafka.clients.consumer.KafkaConsumer newConsumer() { 169 | 170 | 171 | return new org.apache.kafka.clients.consumer.KafkaConsumer(props); 172 | } 173 | 174 | public void describeTopicPartition(String group, Seq topicPartitions, Function1> getPartitionOffset, Function1> getOwner) { 175 | 176 | } 177 | 178 | } 179 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/core/service/InitService.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.core.service; 2 | 3 | import org.slf4j.Logger; 4 | import org.slf4j.LoggerFactory; 5 | 6 | import javax.servlet.ServletException; 7 | import javax.servlet.annotation.WebServlet; 8 | import javax.servlet.http.HttpServlet; 9 | import javax.servlet.http.HttpServletRequest; 10 | import javax.servlet.http.HttpServletResponse; 11 | import java.io.IOException; 12 | 13 | /** 14 | * Created by xin on 2016-12-18. 15 | */ 16 | public class InitService { 17 | 18 | private static final Logger logger = LoggerFactory.getLogger(InitService.class); 19 | 20 | public void init() { 21 | logger.debug("init-------------"); 22 | 23 | try { 24 | KafkaBaseInfoService.getInstance(); 25 | monitorStart(); 26 | 27 | Thread.sleep(2000); 28 | } catch (Exception e) { 29 | e.printStackTrace(); 30 | } 31 | } 32 | 33 | public void monitorStart() throws IOException { 34 | ProduceService produceService = new ProduceService(); 35 | ConsumerService consumerService = new ConsumerService(); 36 | 37 | produceService.start(); 38 | consumerService.start(); 39 | 40 | 41 | MetricsReportService.getMetricsService().start(); 42 | 43 | JolokiaService jolokiaService = new JolokiaService(); 44 | jolokiaService.start(); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/core/service/JolokiaService.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.core.service; 2 | 3 | import org.jolokia.jvmagent.JolokiaServer; 4 | import org.jolokia.jvmagent.JvmAgentConfig; 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | import java.io.IOException; 9 | 10 | /** 11 | * 12 | * 13 | * @author linxin 14 | * @version v1.0 15 | * Copyright (c) 2015 by solinx 16 | * @date 2016/12/22. 17 | */ 18 | public class JolokiaService { 19 | 20 | private Logger logger = LoggerFactory.getLogger(JolokiaService.class); 21 | private JolokiaServer jolokiaServer; 22 | 23 | public JolokiaService() throws IOException { 24 | jolokiaServer = new JolokiaServer(new JvmAgentConfig("host=*,port=8889"), false); 25 | 26 | } 27 | 28 | public void start() { 29 | jolokiaServer.start(); 30 | logger.info("Jolokia Server started at port {}", 8889); 31 | } 32 | 33 | } 34 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/core/service/KafkaBaseInfoService.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.core.service; 2 | 3 | import co.solinx.kafka.monitor.common.KafkaUtils; 4 | import co.solinx.kafka.monitor.core.listener.BrokerListener; 5 | import co.solinx.kafka.monitor.core.listener.TopicListener; 6 | import co.solinx.kafka.monitor.model.*; 7 | import co.solinx.kafka.monitor.utils.JsonLoader; 8 | import com.alibaba.fastjson.JSON; 9 | import com.alibaba.fastjson.JSONArray; 10 | import com.alibaba.fastjson.JSONObject; 11 | import kafka.api.PartitionMetadata; 12 | import kafka.api.PartitionOffsetRequestInfo; 13 | import kafka.client.ClientUtils; 14 | import kafka.client.ClientUtils$; 15 | import kafka.cluster.BrokerEndPoint; 16 | import kafka.cluster.Replica; 17 | import kafka.common.ErrorMapping; 18 | import kafka.common.TopicAndPartition; 19 | import kafka.javaapi.*; 20 | import kafka.network.BlockingChannel; 21 | import kafka.utils.ZkUtils; 22 | import org.apache.commons.lang3.StringUtils; 23 | import org.apache.curator.framework.CuratorFramework; 24 | import org.apache.curator.framework.recipes.cache.*; 25 | import org.slf4j.Logger; 26 | import org.slf4j.LoggerFactory; 27 | import scala.collection.*; 28 | import scala.collection.convert.*; 29 | import scala.collection.mutable.ArraySeq; 30 | 31 | import java.io.InputStream; 32 | import java.util.*; 33 | import java.util.Map; 34 | import java.util.function.Consumer; 35 | import java.util.stream.Collectors; 36 | import java.util.stream.LongStream; 37 | 38 | import static org.apache.curator.framework.recipes.cache.TreeCacheEvent.Type.NODE_ADDED; 39 | 40 | /** 41 | * 缓存kafka基本信息(broker、topic,consumer) 42 | * brokerCache 43 | * topicCache topicList 44 | * 45 | * @author linxin 46 | * @version v1.0 47 | * Copyright (c) 2015 by solinx 48 | * @date 2016/12/12. 49 | */ 50 | public class KafkaBaseInfoService { 51 | 52 | private Logger logger = LoggerFactory.getLogger(KafkaBaseInfoService.class); 53 | private CuratorFramework curator = CuratorService.getInstance(); 54 | /** 55 | * topic节点缓存 56 | */ 57 | private TreeCache topicTreeCache; 58 | /** 59 | * consumer缓存 60 | */ 61 | private TreeCache consumerTreeCache; 62 | /** 63 | * broker节点缓存 64 | */ 65 | private PathChildrenCache brokerPathCache; 66 | /** 67 | * topic配置缓存 68 | */ 69 | private PathChildrenCache topicConfigCache; 70 | 71 | /** 72 | * broker信息缓存 73 | */ 74 | public Map brokerCache = new TreeMap(); 75 | 76 | private List topicList = new ArrayList<>(); 77 | /** 78 | * 集群控制节点 79 | */ 80 | private Controller controller; 81 | /** 82 | * controller节点缓存 83 | */ 84 | private NodeCache controllerNodeCache; 85 | 86 | private static KafkaBaseInfoService kafkaService = null; 87 | 88 | public synchronized static KafkaBaseInfoService getInstance() { 89 | if (kafkaService == null) { 90 | kafkaService = new KafkaBaseInfoService(); 91 | } 92 | return kafkaService; 93 | } 94 | 95 | private KafkaBaseInfoService() { 96 | 97 | try { 98 | 99 | brokerPathCache = new PathChildrenCache(curator, ZkUtils.BrokerIdsPath(), true); 100 | brokerPathCache.getListenable().addListener(new BrokerListener(brokerCache, brokerPathCache)); 101 | brokerPathCache.start(); 102 | 103 | topicConfigCache = new PathChildrenCache(curator, ZkUtils.EntityConfigPath() + "/topics", true); 104 | topicConfigCache.start(); 105 | 106 | //topic缓存 107 | topicTreeCache = new TreeCache(curator, ZkUtils.BrokerTopicsPath()); 108 | topicTreeCache.getListenable().addListener(new TopicListener(topicList)); 109 | topicTreeCache.start(); 110 | 111 | consumerTreeCache = new TreeCache(curator, ZkUtils.ConsumersPath()); 112 | consumerTreeCache.start(); 113 | 114 | 115 | //controller信息 116 | controllerNodeCache = new NodeCache(curator, ZkUtils.ControllerPath()); 117 | controllerNodeCache.getListenable().addListener(() -> controller = JSON.parseObject(controllerNodeCache.getCurrentData().getData(), Controller.class)); 118 | controllerNodeCache.start(true); 119 | controller = JSON.parseObject(controllerNodeCache.getCurrentData().getData(), Controller.class); 120 | 121 | //todo 等待缓存zk数据 122 | Thread.sleep(5000); 123 | } catch (Exception e) { 124 | logger.error("{}", e); 125 | } 126 | } 127 | 128 | /** 129 | * 从缓存topic基础信息中取得replicas、isr、leader 130 | * 131 | * @param topic 132 | */ 133 | private void mergeTopic(Topic topic) { 134 | //todo 需考虑topicList为空的情况 135 | Topic tm = topicList.stream().filter(m -> m.getName().equals(topic.getName())).findFirst().get(); 136 | topic.getPartitionMap().values().stream().forEach((t) -> { 137 | if (t.getLeaderId() == -1) { 138 | Partition partition = tm.getPartitionMap().get(t.getId()); 139 | t.setReplicasArray(partition.getReplicasArray()); 140 | t.setIsr(partition.getIsr()); 141 | t.setLeader(partition.getLeaderId()); 142 | } 143 | }); 144 | } 145 | 146 | /** 147 | * 取得所有Topic 148 | * 149 | * @return 150 | */ 151 | public List getTopics() { 152 | //todo 处理topic具体信息 153 | return getTopicMetadata().values().stream().sorted(Comparator.comparing(Topic::getName)).map((e) -> { 154 | 155 | Map partitionMap = e.getPartitionMap(); 156 | mergeTopic(e); 157 | double preferred = 0; 158 | int underReplicated = 0; 159 | int partitionSize = partitionMap.size(); 160 | //计算首选副本率 161 | for (Partition tPart : 162 | partitionMap.values()) { 163 | if (tPart.isLeaderPreferred()) { 164 | preferred++; 165 | } 166 | int rSum = tPart.getReplicas().size(); 167 | if (rSum > 0) { 168 | if (tPart.getIsr().length < rSum) { 169 | underReplicated++; 170 | } 171 | } else { 172 | underReplicated++; 173 | } 174 | } 175 | e.setPreferred(preferred / partitionSize * 100); 176 | 177 | e.setUnderReplicated(underReplicated); 178 | return e; 179 | }).collect(Collectors.toList()); 180 | 181 | 182 | } 183 | 184 | /** 185 | * 取得topic 186 | * 187 | * @param topic 188 | * @return 189 | */ 190 | public Topic getTopic(String topic) { 191 | final Topic resultTopic = getTopicMetadata(topic).get(topic); 192 | //lastOffset 193 | getTopicPartitionSizes(resultTopic, kafka.api.OffsetRequest.LatestTime()).entrySet() 194 | .forEach(entry -> { 195 | Partition tp = resultTopic.getPartition(entry.getKey()); 196 | if (tp != null) { 197 | tp.setSize(entry.getValue()); 198 | } 199 | }); 200 | //firstOffset 201 | getTopicPartitionSizes(resultTopic, kafka.api.OffsetRequest.EarliestTime()).entrySet() 202 | .forEach(entry -> { 203 | Partition tp = resultTopic.getPartition(entry.getKey()); 204 | if (tp != null) { 205 | tp.setFirstOffset(entry.getValue()); 206 | } 207 | }); 208 | mergeTopic(resultTopic); 209 | 210 | return resultTopic; 211 | } 212 | 213 | /** 214 | * 取topic中partition的偏移(offset) 215 | * 216 | * @param topic topic 217 | * @param time 偏移时间 218 | * @return 219 | */ 220 | private Map getTopicPartitionSizes(Topic topic, long time) { 221 | 222 | return topic.getPartitionMap().values().parallelStream() 223 | .filter(p -> p.getLeader() != null) 224 | .collect(Collectors.groupingBy(p -> p.getLeader().getId())) 225 | .entrySet().parallelStream() 226 | .map(entry -> { 227 | final Integer brokerID = entry.getKey(); 228 | final List brokerPartitions = entry.getValue(); 229 | try { 230 | Broker broker = getBrokerById(brokerID); 231 | //partition 偏移(offset) 请求信息 232 | OffsetResponse offsetResponse = KafkaUtils.sendOffsetRequest(broker, topic, brokerPartitions, time); 233 | 234 | return brokerPartitions.stream() 235 | .collect(Collectors.toMap(Partition::getId, 236 | partition -> Optional.ofNullable( 237 | offsetResponse.offsets(topic.getName(), partition.getId())) 238 | .map(Arrays::stream) 239 | .orElse(LongStream.empty()) 240 | .findFirst() 241 | .orElse(-1L) 242 | )); 243 | } catch (Exception ex) { 244 | logger.error("Unable to get partition log size for topic {} partitions {}", topic.getName(), 245 | brokerPartitions.stream() 246 | .map(Partition::getId) 247 | .map(String::valueOf) 248 | .collect(Collectors.joining(","))); 249 | ex.printStackTrace(); 250 | return brokerPartitions.stream().collect(Collectors.toMap(Partition::getId, tp -> -1L)); 251 | } 252 | }) 253 | .map(Map::entrySet) 254 | .flatMap(Collection::stream) 255 | .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); 256 | 257 | } 258 | 259 | /** 260 | * 使用brokerId取broker 261 | * 262 | * @param brokerID 263 | * @return 264 | */ 265 | public Broker getBrokerById(int brokerID) { 266 | if (brokerCache.size() == 0) { 267 | throw new RuntimeException("no broker available "); 268 | 269 | } 270 | return brokerCache.get(brokerID); 271 | } 272 | 273 | /** 274 | * 取得topic元数据 275 | * 276 | * @param topics 277 | * @return 278 | */ 279 | public Map getTopicMetadata(String... topics) { 280 | 281 | //请求topic元数据 282 | kafka.api.TopicMetadataResponse response = ClientUtils.fetchTopicMetadata(JavaConversions.asScalaIterable(Arrays.asList(topics)).toSet(), JavaConversions.asScalaBuffer(getBrokerEndPoints()), "test", 2000, 1); 283 | 284 | //从元数据中取得topic信息 285 | Map topicMap = WrapAsJava$.MODULE$.seqAsJavaList(response.topicsMetadata()) 286 | .stream().filter(error -> error.errorCode() == ErrorMapping.NoError()) 287 | .map((temp) -> { 288 | Topic topic = new Topic(temp.topic()); 289 | topic.setConfig(JSONObject.parseObject(topicConfigCache.getCurrentData(ZkUtils.EntityConfigPath() + "/topics/" + temp.topic()).getData(), Map.class)); 290 | List pMetadata = WrapAsJava$.MODULE$.seqAsJavaList(temp.partitionsMetadata()); 291 | topic.setPartitionMap( 292 | pMetadata.stream() 293 | .map((pMta) -> { 294 | //添加Partition副本信息 295 | Partition partition = new Partition(pMta.partitionId()); 296 | BrokerEndPoint leader; 297 | int leaderId = -1; 298 | if (pMta.leader().nonEmpty()) { 299 | leader = pMta.leader().get(); 300 | leaderId = leader.id(); 301 | } 302 | 303 | partition.setIsr(WrapAsJava$.MODULE$.seqAsJavaList(pMta.isr()).stream().mapToInt(i -> i.id()).toArray()); 304 | 305 | 306 | for (BrokerEndPoint replica : 307 | WrapAsJava$.MODULE$.seqAsJavaList(pMta.replicas())) { 308 | boolean isLeader = false; 309 | if (replica.id() == leaderId) { 310 | isLeader = true; 311 | } 312 | partition.addReplica(new PartitionReplica(replica.id(), true, isLeader)); 313 | } 314 | 315 | partition.setReplicasArray(WrapAsJava$.MODULE$.seqAsJavaList(pMta.replicas()).stream().mapToInt(m -> m.id()).toArray()); 316 | 317 | if (pMta.replicas().size() > 0) { 318 | //首选副本 319 | BrokerEndPoint preferedReplica = WrapAsJava$.MODULE$.seqAsJavaList(pMta.replicas()).get(0); 320 | //首选副本等于leader 321 | if (leaderId == preferedReplica.id()) { 322 | partition.setPreferredLeaderId(leaderId); 323 | } 324 | } 325 | return partition; 326 | }).collect(Collectors.toMap(Partition::getId, p -> p)) 327 | ); 328 | return topic; 329 | }).collect(Collectors.toMap(Topic::getName, t -> t)); 330 | 331 | return topicMap; 332 | } 333 | 334 | private List getIsr(String topic, PartitionMetadata pmd) { 335 | // return pmd.isr().stream().map((temp) -> temp.id()).collect(Collectors.toList()); 336 | return null; 337 | } 338 | 339 | 340 | /** 341 | * broker列表 342 | * 343 | * @return 344 | */ 345 | public List getBrokers() { 346 | return brokerCache.values().stream().map((t) -> { 347 | if (t.getId() == controller.getBrokerId()) { 348 | t.setController(true); 349 | } 350 | return t; 351 | }).collect(Collectors.toList()); 352 | } 353 | 354 | public List getBrokerEndPoints() { 355 | List endPointList = new ArrayList<>(); 356 | brokerCache.values().stream().forEach(b -> { 357 | BrokerEndPoint endPoint = new BrokerEndPoint(b.getId(), b.getHost(), b.getPort()); 358 | endPointList.add(endPoint); 359 | }); 360 | return endPointList; 361 | } 362 | 363 | 364 | public Broker randomBroker() { 365 | int brokerId = new Random().nextInt(brokerCache.size()); 366 | return (Broker) brokerCache.values().toArray()[brokerId]; 367 | } 368 | 369 | public String reandomBrokerHost() { 370 | Broker broker = randomBroker(); 371 | String host = broker.getHost(); 372 | int port = broker.getPort(); 373 | return String.format("%s:%d", host, port); 374 | } 375 | 376 | 377 | } 378 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/core/service/MessageService.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.core.service; 2 | 3 | import co.solinx.kafka.monitor.model.Message; 4 | import co.solinx.kafka.monitor.model.Topic; 5 | import co.solinx.kafka.monitor.model.Partition; 6 | import co.solinx.kafka.monitor.model.Broker; 7 | import kafka.api.FetchRequestBuilder; 8 | import kafka.javaapi.consumer.SimpleConsumer; 9 | import kafka.javaapi.message.ByteBufferMessageSet; 10 | import kafka.message.MessageAndOffset; 11 | 12 | import java.io.UnsupportedEncodingException; 13 | import java.nio.ByteBuffer; 14 | import java.util.ArrayList; 15 | import java.util.List; 16 | import java.util.stream.StreamSupport; 17 | 18 | /** 19 | * @author linxin 20 | * @version v1.0 21 | * Copyright (c) 2015 by solinx 22 | * @date 2016/12/15. 23 | */ 24 | public class MessageService { 25 | 26 | KafkaBaseInfoService kafkaService = KafkaBaseInfoService.getInstance(); 27 | 28 | public List getMesage(String topicName, int partitionID, int offset, int count) { 29 | Topic topic = kafkaService.getTopic(topicName); 30 | Partition partition = topic.getPartition(partitionID); 31 | Broker broker = kafkaService.getBrokerById(partition.getLeader().getId()); 32 | 33 | SimpleConsumer consumer = new SimpleConsumer(broker.getHost(), broker.getPort(), 10000, 10000, ""); 34 | FetchRequestBuilder requestBuilder = new FetchRequestBuilder() 35 | .clientId("kafkaMonitor") 36 | .maxWait(5000) 37 | .minBytes(1); 38 | List messageList = new ArrayList<>(count); 39 | long currentOffset = offset; 40 | while (messageList.size() < count) { 41 | kafka.api.FetchRequest request = requestBuilder.addFetch(topicName, partitionID, currentOffset, 1024 * 1024).build(); 42 | 43 | kafka.javaapi.FetchResponse response = consumer.fetch(request); 44 | ByteBufferMessageSet messageSet = response.messageSet(topicName, partitionID); 45 | if (messageSet.validBytes() <= 0) break; 46 | 47 | int oldSize = messageList.size(); 48 | StreamSupport.stream(messageSet.spliterator(), false) 49 | .limit(count - messageList.size()) 50 | .map(MessageAndOffset::message) 51 | .map((msg) -> { 52 | Message mmsg = new Message(); 53 | if (msg.hasKey()) { 54 | mmsg.setKey(readString(msg.key())); 55 | } 56 | if (!msg.isNull()) { 57 | mmsg.setMessage(readString(msg.payload())); 58 | } 59 | mmsg.setValid(msg.isValid()); 60 | mmsg.setCompressionCodec(msg.compressionCodec().name()); 61 | mmsg.setChecksum(msg.checksum()); 62 | return mmsg; 63 | }).forEach(messageList::add); 64 | currentOffset += messageList.size() - oldSize; 65 | 66 | } 67 | consumer.close(); 68 | return messageList; 69 | } 70 | 71 | private String readString(ByteBuffer buffer) { 72 | try { 73 | return new String(readBytes(buffer), "UTF-8"); 74 | } catch (UnsupportedEncodingException e) { 75 | return ""; 76 | } 77 | } 78 | 79 | private byte[] readBytes(ByteBuffer buffer) { 80 | return readBytes(buffer, 0, buffer.limit()); 81 | } 82 | 83 | private byte[] readBytes(ByteBuffer buffer, int offset, int size) { 84 | byte[] dest = new byte[size]; 85 | if (buffer.hasArray()) { 86 | System.arraycopy(buffer.array(), buffer.arrayOffset() + offset, dest, 0, size); 87 | } else { 88 | buffer.mark(); 89 | buffer.get(dest); 90 | buffer.reset(); 91 | } 92 | return dest; 93 | } 94 | 95 | } 96 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/core/service/PartitionService.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.core.service; 2 | 3 | import co.solinx.kafka.monitor.common.KafkaUtils; 4 | import kafka.admin.AdminUtils; 5 | import kafka.admin.RackAwareMode; 6 | import kafka.utils.ZkUtils; 7 | 8 | /** 9 | * @author linxin 10 | * @version v1.0 11 | * Copyright (c) 2015 by solinx 12 | * @date 2017/12/13. 13 | */ 14 | public class PartitionService { 15 | 16 | private ZkUtils zkUtils = KafkaUtils.getZkUtils(); 17 | 18 | public void addPartition(String topic, int partitions, String replica) { 19 | AdminUtils.addPartitions(zkUtils, topic, partitions, replica, true, RackAwareMode.Enforced$.MODULE$); 20 | } 21 | 22 | /** 23 | * 添加分区 24 | * 25 | * @param topic topic名称 26 | * @param partitions 分区数 27 | */ 28 | public void addPartition(String topic, int partitions) { 29 | this.addPartition(topic, partitions, ""); 30 | } 31 | 32 | } 33 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/core/service/ProduceService.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.core.service; 2 | 3 | import co.solinx.kafka.monitor.core.produce.MonitorProducer; 4 | import co.solinx.kafka.monitor.model.ZooConfig; 5 | import com.alibaba.fastjson.JSONObject; 6 | import co.solinx.kafka.monitor.common.Utils; 7 | import co.solinx.kafka.monitor.core.produce.ProduceRecord; 8 | import org.apache.kafka.clients.producer.RecordMetadata; 9 | import org.apache.kafka.common.MetricName; 10 | import org.apache.kafka.common.metrics.*; 11 | import org.apache.kafka.common.metrics.stats.Rate; 12 | import org.apache.kafka.common.metrics.stats.Total; 13 | import org.apache.kafka.common.utils.SystemTime; 14 | import org.slf4j.Logger; 15 | import org.slf4j.LoggerFactory; 16 | 17 | import java.util.*; 18 | import java.util.concurrent.*; 19 | import java.util.concurrent.atomic.AtomicInteger; 20 | import java.util.concurrent.atomic.AtomicLong; 21 | 22 | /** 23 | * 监控消息生产者 24 | * 25 | * @author linxin 26 | * @version v1.0 27 | * Copyright (c) 2015 by solinx 28 | * @date 2016/12/22. 29 | */ 30 | public class ProduceService { 31 | 32 | private final Logger logger = LoggerFactory.getLogger(ProduceService.class); 33 | ProduceMetrics produceMetrics; 34 | private String METRIC_GROUP_NAME = "produce-service"; 35 | private ScheduledExecutorService produceExecutor; 36 | private ScheduledExecutorService partitionHandlerExecutor; 37 | /** 38 | * 消息生产者 39 | */ 40 | private MonitorProducer producer; 41 | private AtomicInteger partitionNum; 42 | private String MONITOR_TOPIC; 43 | private String zkConnect; 44 | private ConcurrentMap currentPartition; 45 | 46 | 47 | public ProduceService() { 48 | 49 | Properties properties = ConfigService.getZkProper(); 50 | zkConnect = properties.getProperty(ZooConfig.HOST); 51 | MONITOR_TOPIC = ConfigService.monitorConfig.getMonitorTopic(); 52 | 53 | produceExecutor = Executors.newScheduledThreadPool(4, r -> new Thread(r, "produce-service")); 54 | partitionHandlerExecutor = Executors.newSingleThreadScheduledExecutor(r -> new Thread(r, "partition-change-handler")); 55 | partitionNum = new AtomicInteger(1); 56 | currentPartition = new ConcurrentHashMap<>(); 57 | 58 | MetricConfig metricConfig = new MetricConfig().samples(60).timeWindow(100, TimeUnit.MILLISECONDS); 59 | List reporters = new ArrayList<>(); 60 | reporters.add(new JmxReporter("kmf.services")); 61 | Metrics metrics = new Metrics(metricConfig, reporters, new SystemTime()); 62 | Map tags = new HashMap<>(); 63 | tags.put("name", "test"); 64 | produceMetrics = new ProduceMetrics(metrics, tags); 65 | 66 | int existingPartitionCount = Utils.getPartitionNumByTopic(zkConnect, MONITOR_TOPIC); 67 | if (existingPartitionCount > 0) { 68 | partitionNum.set(existingPartitionCount); 69 | } 70 | 71 | initialProducer(); 72 | 73 | } 74 | 75 | /** 76 | * 初始化生产者服务 77 | */ 78 | private void initialProducer() { 79 | Properties props = ConfigService.getKafkaProducerConf(); 80 | 81 | producer = new MonitorProducer(props); 82 | } 83 | 84 | /** 85 | * 初始化生产者Schedule,每个partition一个线程 86 | */ 87 | private void initialProducerSchedule() { 88 | for (int partition = 0; partition < partitionNum.get(); partition++) { 89 | if (!currentPartition.containsKey(partition)) { 90 | produceMetrics.addPartitionSensor(partition); 91 | currentPartition.put(partition, new AtomicLong(0)); 92 | } 93 | produceExecutor.scheduleWithFixedDelay(new ProduceRunnable(partition), 20000, 1000, TimeUnit.MILLISECONDS); 94 | } 95 | } 96 | 97 | 98 | public void start() { 99 | 100 | initialProducerSchedule(); 101 | partitionHandlerExecutor.scheduleWithFixedDelay(new PartitionHandler(), 30000, 30000, TimeUnit.MILLISECONDS); 102 | } 103 | 104 | 105 | /** 106 | * producer度量 107 | */ 108 | private class ProduceMetrics { 109 | 110 | private final Metrics metrics; 111 | final Sensor recordsProduce; 112 | final Sensor errorProduce; 113 | private final ConcurrentMap _recordsProducedPerPartition; 114 | private final ConcurrentMap _produceErrorPerPartition; 115 | private final Map tags; 116 | 117 | public ProduceMetrics(Metrics metrics, final Map tags) { 118 | this.metrics = metrics; 119 | this.tags = tags; 120 | 121 | 122 | _recordsProducedPerPartition = new ConcurrentHashMap<>(); 123 | _produceErrorPerPartition = new ConcurrentHashMap<>(); 124 | 125 | 126 | recordsProduce = metrics.sensor("records-produced"); 127 | recordsProduce.add(new MetricName("records-produced-total", METRIC_GROUP_NAME, "The total number of records that are produced", tags), new Total()); 128 | errorProduce = metrics.sensor("error-produce"); 129 | errorProduce.add(new MetricName("error-produce-total", METRIC_GROUP_NAME, "", tags), new Total()); 130 | 131 | metrics.addMetric(new MetricName("produce-availability-avg", METRIC_GROUP_NAME, "The average produce availability", tags), 132 | (config, now) -> { 133 | double availabilitySum = 0.0; 134 | //可用性等于每个partition的可用性之和除以partition总数 135 | //partition可用性等于成功发送率除以失败率 136 | int num = partitionNum.get(); 137 | 138 | for (int partition = 0; partition < num; partition++) { 139 | double recordsProduced = produceMetrics.metrics.metrics().get(new MetricName("records-produced-rate-partition-" + partition, METRIC_GROUP_NAME, tags)).value(); 140 | double produceError = produceMetrics.metrics.metrics().get(new MetricName("produce-error-rate-partition-" + partition, METRIC_GROUP_NAME, tags)).value(); 141 | 142 | if (Double.isNaN(produceError) || Double.isInfinite(produceError)) { 143 | produceError = 0; 144 | } 145 | if (recordsProduced + produceError > 0) { 146 | availabilitySum += recordsProduced / (recordsProduced + produceError); 147 | } 148 | } 149 | return availabilitySum / num; 150 | //return 0; 151 | }); 152 | 153 | 154 | } 155 | 156 | /** 157 | * 为每个partition添加Sensor 158 | * 159 | * @param partition 160 | */ 161 | public void addPartitionSensor(int partition) { 162 | try { 163 | Sensor recordsProducedSensor = metrics.sensor("records-produced-partition-" + partition); 164 | recordsProducedSensor.add(new MetricName("records-produced-rate-partition-" + partition, METRIC_GROUP_NAME, 165 | "The average number of records per second that are produced to this partition", tags), new Rate()); 166 | _recordsProducedPerPartition.put(partition, recordsProducedSensor); 167 | 168 | Sensor errorsSensor = metrics.sensor("produce-error-partition-" + partition); 169 | errorsSensor.add(new MetricName("produce-error-rate-partition-" + partition, METRIC_GROUP_NAME, 170 | "The average number of errors per second when producing to this partition", tags), new Rate()); 171 | _produceErrorPerPartition.put(partition, errorsSensor); 172 | } catch (Exception e) { 173 | logger.error("addPartitionSensor exception {}", e); 174 | } 175 | } 176 | } 177 | 178 | /** 179 | * producer生产者线程 180 | */ 181 | private class ProduceRunnable implements Runnable { 182 | 183 | int partition; 184 | 185 | 186 | public ProduceRunnable(int partition) { 187 | this.partition = partition; 188 | } 189 | 190 | @Override 191 | public void run() { 192 | try { 193 | 194 | long nextIndex = currentPartition.get(partition).get(); 195 | 196 | //组装消息,time用于consumer判断消息的延迟,index用于判断消息的重复与丢失,index为当前消息的序号 197 | 198 | JSONObject messageObj = new JSONObject(); 199 | messageObj.put("topic", MONITOR_TOPIC); 200 | messageObj.put("time", System.currentTimeMillis()); 201 | messageObj.put("partition", partition); 202 | messageObj.put("index", nextIndex); 203 | // String message = String.format("topic:%s,partition:%d,time:%s", MONITOR_TOPIC, partition, System.currentTimeMillis()); 204 | 205 | ProduceRecord produceRecord = new ProduceRecord(MONITOR_TOPIC, partition, null, messageObj.toJSONString()); 206 | RecordMetadata metadata = producer.send(produceRecord); 207 | produceMetrics.recordsProduce.record(); 208 | produceMetrics._recordsProducedPerPartition.get(partition).record(); 209 | 210 | 211 | currentPartition.get(partition).getAndIncrement(); 212 | } catch (Exception e) { 213 | produceMetrics.errorProduce.record(); 214 | produceMetrics._produceErrorPerPartition.get(partition).record(); 215 | logger.warn("failed to send message ", e); 216 | } 217 | } 218 | } 219 | 220 | /** 221 | * Partition变更处理,如有新增partition 需要关闭当前executor,后重新初始化executor 222 | */ 223 | private class PartitionHandler implements Runnable { 224 | 225 | @Override 226 | public void run() { 227 | int currentPartitionNum = Utils.getPartitionNumByTopic(zkConnect, MONITOR_TOPIC); 228 | if (currentPartitionNum > partitionNum.get()) { 229 | produceExecutor.shutdown(); 230 | 231 | try { 232 | produceExecutor.awaitTermination(Integer.MAX_VALUE, TimeUnit.MILLISECONDS); 233 | } catch (InterruptedException e) { 234 | throw new IllegalStateException(e); 235 | } 236 | 237 | producer.close(); 238 | partitionNum.set(currentPartitionNum); 239 | 240 | initialProducer(); 241 | produceExecutor = Executors.newScheduledThreadPool(4, r -> new Thread(r, "produce-service")); 242 | initialProducerSchedule(); 243 | 244 | } 245 | } 246 | } 247 | 248 | } 249 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/core/service/TopicService.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.core.service; 2 | 3 | import co.solinx.kafka.monitor.common.KafkaUtils; 4 | import kafka.admin.AdminUtils; 5 | import kafka.admin.RackAwareMode; 6 | import kafka.admin.RackAwareMode$; 7 | import kafka.utils.ZkUtils; 8 | 9 | import java.util.Properties; 10 | 11 | /** 12 | * @author linxin 13 | * @version v1.0 14 | * Copyright (c) 2015 by solinx 15 | * @date 2017/12/13. 16 | */ 17 | public class TopicService { 18 | 19 | private ZkUtils zkUtils = KafkaUtils.getZkUtils(); 20 | 21 | 22 | /** 23 | * 添加topic 24 | * 25 | * @param topic topic名称 26 | * @param partitions 分区数 27 | * @param replicationFactor 副本因子 28 | */ 29 | public void createTopic(String topic, int partitions, int replicationFactor) { 30 | 31 | AdminUtils.createTopic(zkUtils, topic, partitions, replicationFactor, new Properties(), RackAwareMode.Enforced$.MODULE$); 32 | } 33 | 34 | /** 35 | * 删除topic 36 | * 37 | * @param topic topic名称 38 | */ 39 | public void deleteTopic(String topic) { 40 | AdminUtils.deleteTopic(zkUtils, topic); 41 | 42 | } 43 | 44 | 45 | } 46 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/db/DBUtils.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.db; 2 | 3 | import co.solinx.kafka.monitor.common.DateUtils; 4 | import co.solinx.kafka.monitor.model.KafkaMonitorData; 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | import java.lang.reflect.Field; 9 | import java.lang.reflect.ParameterizedType; 10 | import java.lang.reflect.Type; 11 | import java.sql.*; 12 | import java.util.*; 13 | 14 | /** 15 | * @author linxin 16 | * @version v1.0 17 | * Copyright (c) 2015 by solinx 18 | * @date 2016/4/12. 19 | */ 20 | public class DBUtils { 21 | 22 | private static Logger logger = LoggerFactory.getLogger(DBUtils.class); 23 | private Connection conn; 24 | Class actualClass; 25 | 26 | public DBUtils(Class cla) { 27 | try { 28 | 29 | logger.debug("{}", cla); 30 | this.actualClass = cla; 31 | this.open(); 32 | } catch (SQLException e) { 33 | logger.error("open connection error {}", e); 34 | } catch (ClassNotFoundException e) { 35 | logger.error("ClassNotFoundException error {}", e); 36 | 37 | } 38 | } 39 | 40 | /** 41 | * 打开连接 42 | * 43 | * @throws SQLException 44 | */ 45 | private void open() throws SQLException, ClassNotFoundException { 46 | // URL path = DBUtils.class.getClassLoader().getResource("db"); 47 | String url = "jdbc:mysql://10.10.1.104:3306/kafkaMonitor"; //JDBC的URL 48 | String path = System.getProperty("user.dir").toString(); 49 | Class.forName("com.mysql.jdbc.Driver"); 50 | logger.debug("path : {} ", path); 51 | conn = DriverManager.getConnection(url, "root", "123456"); 52 | 53 | } 54 | 55 | /** 56 | * 更新操作 57 | * 58 | * @param sql 59 | * @throws SQLException 60 | */ 61 | public void executeUpdate(String sql) throws SQLException, ClassNotFoundException { 62 | checkConnect(); 63 | //开启WAL模式提高并发 减少SQLITE_BUSY问题的出现概率 64 | //updateJournalModel("PRAGMA journal_mode=WAL"); 65 | Statement stmt = conn.createStatement(); 66 | PreparedStatement pst = conn.prepareStatement(sql); 67 | 68 | 69 | logger.debug(" sql : {}", sql); 70 | stmt.executeUpdate(sql); 71 | stmt.close(); 72 | this.close(); 73 | 74 | } 75 | 76 | /** 77 | * 更新JournalModel模式 78 | * 79 | * @param model 80 | * @throws SQLException 81 | */ 82 | public void updateJournalModel(String model) throws SQLException, ClassNotFoundException { 83 | Statement stmt = null; 84 | try { 85 | this.checkConnect(); 86 | stmt = conn.createStatement(); 87 | stmt.executeUpdate(model); 88 | } finally { 89 | stmt.close(); 90 | } 91 | } 92 | 93 | /** 94 | * 查询 95 | * 96 | * @param sql 97 | * @return 98 | * @throws Exception 99 | */ 100 | public List query(String sql) throws Exception { 101 | List resultList = new ArrayList<>(); 102 | List columnList = new ArrayList<>(); 103 | 104 | this.checkConnect(); 105 | //开启WAL模式提高并发 减少SQLITE_BUSY问题的出现概率 106 | // updateJournalModel("PRAGMA journal_mode=WAL"); 107 | Statement stmt = conn.createStatement(); 108 | ResultSet set = stmt.executeQuery(sql); 109 | logger.debug(" sql : {}", sql); 110 | 111 | //获取表中的各列名称 112 | if (set != null) { 113 | ResultSetMetaData metaData = set.getMetaData(); 114 | int count = metaData.getColumnCount(); 115 | logger.debug(" columnCount: {}", count); 116 | for (int i = 1; i <= count; i++) { 117 | columnList.add(metaData.getColumnName(i)); 118 | } 119 | } 120 | 121 | 122 | while (set.next()) { 123 | Object model = Class.forName(actualClass.getName()).newInstance(); 124 | for (String column : 125 | columnList) { 126 | 127 | Field field = model.getClass().getDeclaredField(column); 128 | field.setAccessible(true); 129 | String type = field.getType().toString(); 130 | if (type.startsWith("long") || type.startsWith("int")) { 131 | field.setLong(model, set.getLong(column)); 132 | } else if (type.endsWith("String")) { 133 | field.set(model, set.getString(column)); 134 | } else if (type.endsWith("Date")) { 135 | field.set(model, DateUtils.convertFromStringToDate(set.getString(column), "yyyy-MM-dd HH:mm:ss")); 136 | } else if (type.endsWith("double")) { 137 | field.set(model, set.getDouble(column)); 138 | } else { 139 | field.set(model, set.getObject(column).toString()); 140 | } 141 | // logger.debug("{}",model); 142 | } 143 | resultList.add((T) model); 144 | } 145 | conn.close(); 146 | // logger.debug("{}",resultList); 147 | return resultList; 148 | } 149 | 150 | 151 | public static Class getSuperClassGenricType(Class clazz, int index) { 152 | 153 | Type genType = clazz.getGenericSuperclass(); 154 | if (!(genType instanceof ParameterizedType)) { 155 | return Object.class; 156 | } 157 | //返回表示此类型实际类型参数的 Type 对象的数组。 158 | Type[] params = ((ParameterizedType) genType).getActualTypeArguments(); 159 | 160 | if (index >= params.length || index < 0) { 161 | return Object.class; 162 | } 163 | if (!(params[index] instanceof Class)) { 164 | return Object.class; 165 | } 166 | 167 | return (Class) params[index]; 168 | 169 | } 170 | 171 | /** 172 | * 检查连接 173 | * 174 | * @throws SQLException 175 | */ 176 | public void checkConnect() throws SQLException, ClassNotFoundException { 177 | if (conn.isClosed()) { 178 | this.open(); 179 | } 180 | } 181 | 182 | public List executeQuery(String sql) throws SQLException, ClassNotFoundException { 183 | 184 | 185 | checkConnect(); 186 | //开启WAL模式提高并发 减少SQLITE_BUSY问题的出现概率 187 | updateJournalModel("PRAGMA journal_mode=WAL"); 188 | Statement stmt = conn.createStatement(); 189 | ResultSet set = stmt.executeQuery(sql); 190 | List mapList = new ArrayList<>(); 191 | List columnList = new ArrayList<>(); 192 | 193 | //获取表中的各列名称 194 | if (set != null) { 195 | ResultSetMetaData metaData = set.getMetaData(); 196 | int count = metaData.getColumnCount(); 197 | logger.debug(" columnCount: {}", count); 198 | for (int i = 1; i <= count; i++) { 199 | columnList.add(metaData.getColumnName(i)); 200 | } 201 | } 202 | 203 | logger.debug(" columnList : {}", columnList); 204 | 205 | while (set.next()) { 206 | for (String column : 207 | columnList) { 208 | Map map = new HashMap<>(); 209 | map.put(column, set.getObject(column)); 210 | mapList.add(map); 211 | } 212 | } 213 | 214 | set.close(); 215 | stmt.close(); 216 | this.close(); 217 | logger.debug("{}", mapList); 218 | return mapList; 219 | } 220 | 221 | /** 222 | * 检查表是否存在 223 | * 224 | * @param tableName 表名 225 | * @return 226 | */ 227 | public boolean tableIsExit(String tableName) { 228 | boolean result = false; 229 | String sql = "select * from sqlite_master where type='table' and name='" + tableName + "'"; 230 | 231 | try { 232 | logger.debug("sql : {}", sql); 233 | 234 | List mapList = executeQuery(sql); 235 | if (mapList.size() > 0) { 236 | result = true; 237 | } 238 | 239 | } catch (SQLException e) { 240 | logger.error("tableIsExit error {}", e); 241 | } catch (ClassNotFoundException e) { 242 | logger.error("ClassNotFoundException error {}", e); 243 | 244 | } 245 | return result; 246 | } 247 | 248 | private void close() throws SQLException { 249 | if (!conn.isClosed()) { 250 | conn.close(); 251 | } 252 | } 253 | 254 | 255 | public static void main(String[] args) { 256 | 257 | DBUtils dbUtils = new DBUtils<>(KafkaMonitorData.class); 258 | // boolean result = dbUtils.tableIsExit("logdata"); 259 | 260 | try { 261 | // dbUtils.executeUpdate("PRAGMA journal_mode=WAL"); 262 | 263 | String startTime = DateUtils.toDisplayStr("2016-12-30 10:00:00", DateUtils.HYPHEN_DISPLAY_DATE); 264 | String endTime = DateUtils.toDisplayStr("2016-12-30 10:50:00", DateUtils.HYPHEN_DISPLAY_DATE); 265 | 266 | String where = " where currentTime >= '" + startTime + "' and currentTime <='" + endTime + "'"; 267 | 268 | List dataList = dbUtils.query("select * from kafkaMonitorData " + where); 269 | 270 | logger.debug("{}", dataList); 271 | } catch (Exception e) { 272 | e.printStackTrace(); 273 | } 274 | 275 | //System.out.println(result); 276 | } 277 | 278 | } 279 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/model/Broker.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.model; 2 | 3 | import co.solinx.kafka.monitor.common.DateUtils; 4 | import lombok.Data; 5 | 6 | import java.util.Arrays; 7 | import java.util.Date; 8 | 9 | /** 10 | * @author linxin 11 | * @version v1.0 12 | * Copyright (c) 2015 by solinx 13 | * @date 2016/12/12. 14 | */ 15 | @Data 16 | public class Broker { 17 | 18 | private int id; 19 | private String[] endpoints; 20 | private int jmx_port; 21 | private int port; 22 | private String host; 23 | private int version; 24 | private Date timestamp; 25 | /** 26 | * 集群控制者 27 | */ 28 | private boolean controller; 29 | 30 | 31 | public String getStartTime() { 32 | return DateUtils.getTimeStr(timestamp, DateUtils.HYPHEN_DISPLAY_DATE); 33 | } 34 | 35 | @Override 36 | public String toString() { 37 | return "Broker{" + 38 | "id=" + id + 39 | ", endpoints=" + Arrays.toString(endpoints) + 40 | ", jmx_port=" + jmx_port + 41 | ", port=" + port + 42 | ", host='" + host + '\'' + 43 | ", version=" + version + 44 | ", timestamp=" + timestamp + 45 | '}'; 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/model/Consumer.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.model; 2 | 3 | import lombok.Data; 4 | 5 | /** 6 | * Consumer Model 7 | * 8 | * @author linxin 9 | * @version v1.0 10 | * Copyright (c) 2015 by solinx 11 | * @date 2016/12/20. 12 | */ 13 | @Data 14 | public class Consumer { 15 | 16 | /** 17 | * consumerId 18 | */ 19 | private String memberId; 20 | /** 21 | * consumerGroup 22 | */ 23 | private String group; 24 | /** 25 | * topic 26 | */ 27 | private String topic; 28 | /** 29 | * 当前partition 30 | */ 31 | private int partition; 32 | /** 33 | * 当前offset 34 | */ 35 | private long currentOffset; 36 | /** 37 | * 最后一条offset 38 | */ 39 | private long endOffset; 40 | private long lag; 41 | private String owner; 42 | private String clientId; 43 | private String host; 44 | 45 | 46 | @Override 47 | public String toString() { 48 | return "Consumer{" + 49 | "memberId='" + memberId + '\'' + 50 | ", group='" + group + '\'' + 51 | ", topic='" + topic + '\'' + 52 | ", partition=" + partition + 53 | ", currentOffset=" + currentOffset + 54 | ", endOffset=" + endOffset + 55 | ", lag=" + lag + 56 | ", owner='" + owner + '\'' + 57 | ", clientId='" + clientId + '\'' + 58 | ", host='" + host + '\'' + 59 | '}'; 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/model/Controller.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.model; 2 | 3 | import lombok.Data; 4 | 5 | import java.util.Date; 6 | 7 | /** 8 | * @author linxin 9 | * @version v1.0 10 | * Copyright (c) 2015 by linx 11 | * @date 2017/12/12. 12 | */ 13 | 14 | @Data 15 | public class Controller { 16 | 17 | /** 18 | * 版本 19 | */ 20 | private int version; 21 | /** 22 | * brokerId 23 | */ 24 | private int brokerId; 25 | private Date timestamp; 26 | } 27 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/model/KafkaConfig.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.model; 2 | 3 | import com.alibaba.fastjson.annotation.JSONField; 4 | import lombok.Data; 5 | 6 | /** 7 | * kafkaConfig 8 | * 9 | * @author linx 10 | * @create 2018-02 06-22:17 11 | **/ 12 | @Data 13 | public class KafkaConfig { 14 | 15 | @JSONField(name = "bootstrap.servers") 16 | private String bootstrapServers = "bootstrap.servers"; 17 | @JSONField(name = "key.deserializer") 18 | private String keyDeserializer = "key.deserializer"; 19 | @JSONField(name = "value.serializer") 20 | private String valueSerializer = "value.serializer"; 21 | @JSONField(name = "key.serializer") 22 | private String keySerializer = "key.serializer"; 23 | @JSONField(name = "value.deserializer") 24 | private String valueDeserializer = "value.deserializer"; 25 | @JSONField(name = "enable.auto.commit") 26 | private String enableAutoCommit = "enable.auto.commit"; 27 | @JSONField(name = "auto.commit.interval.ms") 28 | private String autoCommitIntervalMs = "auto.commit.interval.ms"; 29 | @JSONField(name = "session.timeout.ms") 30 | private String sessionTimeoutMs = "session.timeout.ms"; 31 | @JSONField(name = "group.id") 32 | private String groupId = "group.id"; 33 | } 34 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/model/KafkaMonitorData.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.model; 2 | 3 | 4 | import lombok.Data; 5 | 6 | import java.util.Date; 7 | 8 | /** 9 | * @author linxin 10 | * @version v1.0 11 | * Copyright (c) 2015 by solinx 12 | * @date 2016/12/29. 13 | */ 14 | @Data 15 | public class KafkaMonitorData { 16 | 17 | private long id; 18 | private double producerTotal; 19 | private double consumerTotal; 20 | private double delay; 21 | private double duplicated; 22 | private double lostTotal; 23 | private double consumerError; 24 | private double producerError; 25 | private double delayMsAvg; 26 | private double delayMsMax; 27 | private double duplicatedRate; 28 | private double lostRate; 29 | private double delayedRate; 30 | private double consumeAvailabilityAvg; 31 | private double produceAvailabilityAvg; 32 | private Date currentTime; 33 | 34 | 35 | 36 | 37 | public void setDelayMsAvg(double delayMsAvg) { 38 | if (Double.NEGATIVE_INFINITY == delayMsAvg || Double.POSITIVE_INFINITY == delayMsAvg) { 39 | this.delayMsAvg = 0; 40 | } else { 41 | this.delayMsAvg = delayMsAvg; 42 | } 43 | } 44 | 45 | public double getDelayMsMax() { 46 | return delayMsMax; 47 | } 48 | 49 | public void setDelayMsMax(double delayMsMax) { 50 | if (Double.NEGATIVE_INFINITY == delayMsMax || Double.POSITIVE_INFINITY == delayMsMax) { 51 | this.delayMsMax = 0; 52 | } else { 53 | this.delayMsMax = delayMsMax; 54 | } 55 | } 56 | 57 | 58 | 59 | @Override 60 | public String toString() { 61 | return "KafkaMonitorData{" + 62 | "id=" + id + 63 | ", producerTotal=" + producerTotal + 64 | ", consumerTotal=" + consumerTotal + 65 | ", delay=" + delay + 66 | ", duplicated=" + duplicated + 67 | ", lostTotal=" + lostTotal + 68 | ", consumerError=" + consumerError + 69 | ", producerError=" + producerError + 70 | ", delayMsAvg=" + delayMsAvg + 71 | ", delayMsMax=" + delayMsMax + 72 | ", duplicatedRate=" + duplicatedRate + 73 | ", lostRate=" + lostRate + 74 | ", delayedRate=" + delayedRate + 75 | ", consumeAvailabilityAvg=" + consumeAvailabilityAvg + 76 | ", produceAvailabilityAvg=" + produceAvailabilityAvg + 77 | ", currentTime=" + currentTime + 78 | '}'; 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/model/Message.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.model; 2 | 3 | import lombok.Data; 4 | 5 | /** 6 | * @author linxin 7 | * @version v1.0 8 | * Copyright (c) 2015 by solinx 9 | * @date 2016/12/15. 10 | */ 11 | @Data 12 | public class Message { 13 | 14 | /** 15 | * value 16 | */ 17 | private String message; 18 | /** 19 | * key 20 | */ 21 | private String key; 22 | private boolean valid; 23 | private long checksum; 24 | private String compressionCodec; 25 | 26 | 27 | @Override 28 | public String toString() { 29 | return "Message{" + 30 | "message='" + message + '\'' + 31 | ", key='" + key + '\'' + 32 | ", valid=" + valid + 33 | ", checksum=" + checksum + 34 | ", compressionCodec='" + compressionCodec + '\'' + 35 | '}'; 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/model/MonitorConfig.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.model; 2 | 3 | import com.alibaba.fastjson.annotation.JSONField; 4 | import lombok.Data; 5 | 6 | /** 7 | * Kafka monitor config 8 | * 9 | * @author linx 10 | * @create 2018-02 07-22:38 11 | **/ 12 | @Data 13 | public class MonitorConfig { 14 | 15 | @JSONField(name = "host") 16 | private String host; 17 | @JSONField(name = "port") 18 | private int port; 19 | @JSONField(name = "monitorTopic") 20 | private String monitorTopic; 21 | } 22 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/model/PageData.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.model; 2 | 3 | import com.alibaba.fastjson.JSONObject; 4 | import lombok.Data; 5 | 6 | /** 7 | * @author linxin 8 | * @version v1.0 9 | * Copyright (c) 2015 by linx 10 | * @date 2018/1/31. 11 | */ 12 | @Data 13 | public class PageData { 14 | 15 | private Object data; 16 | private int status=200; 17 | private String error=null; 18 | private JSONObject extend; 19 | 20 | public PageData() { 21 | this.setStatus(200); 22 | this.setExtend(new JSONObject()); 23 | } 24 | 25 | @Override 26 | public String toString() { 27 | JSONObject result = new JSONObject(); 28 | result.put("data", data); 29 | result.put("status", status); 30 | result.put("error", error); 31 | 32 | result.putAll(extend); 33 | 34 | return result.toJSONString(); 35 | } 36 | 37 | public static void main(String[] args) { 38 | PageData data = new PageData(); 39 | JSONObject jsonObject = new JSONObject(); 40 | jsonObject.put("total", 0); 41 | data.setData(jsonObject); 42 | data.setStatus(200); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/model/Partition.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.model; 2 | 3 | import lombok.Data; 4 | 5 | import java.util.*; 6 | import java.util.stream.Collectors; 7 | import java.util.stream.Stream; 8 | 9 | /** 10 | * partition 11 | * 12 | * @author linxin 13 | * @version v1.0 14 | * Copyright (c) 2015 by solinx 15 | * @date 2016/12/14. 16 | */ 17 | @Data 18 | public class Partition { 19 | 20 | /** 21 | * partitionId 22 | */ 23 | private int id; 24 | /** 25 | * 分区副本 26 | */ 27 | private Map replicas = new HashMap<>(); 28 | 29 | private int[] replicasArray; 30 | /** 31 | * leader节点 32 | */ 33 | private Integer leader = -1; 34 | /** 35 | * 首选leaderId 36 | */ 37 | private Integer preferredLeaderId; 38 | /** 39 | * lastOffset 该partition的最后一条消息偏移量 40 | */ 41 | private long size = -1; 42 | /** 43 | * firstOffset 该partition的第一条消息偏移量 44 | */ 45 | private long firstOffset = -1; 46 | 47 | private int[] isr; 48 | 49 | public Partition() { 50 | } 51 | 52 | public Partition(int id) { 53 | this.id = id; 54 | } 55 | 56 | public Collection getReplicas() { 57 | return replicas.values(); 58 | } 59 | 60 | 61 | /** 62 | * 添加副本 63 | * 64 | * @param replica 65 | */ 66 | public void addReplica(PartitionReplica replica) { 67 | replicas.put(replica.getId(), replica); 68 | if (replica.isLeader()) { 69 | leader = replica.getId(); 70 | } 71 | } 72 | 73 | /** 74 | * 副本leader 75 | * 76 | * @return 77 | */ 78 | public PartitionReplica getLeader() { 79 | return replicas.get(leader); 80 | } 81 | 82 | public Integer getLeaderId() { 83 | return leader; 84 | } 85 | 86 | 87 | /** 88 | * ISR集合 89 | * 90 | * @return 91 | */ 92 | public List getInSyncReplicas() { 93 | return inSyncReplicaStream() 94 | .sorted(Comparator.comparingInt(PartitionReplica::getId)) 95 | .collect(Collectors.toList()); 96 | } 97 | 98 | private Stream unSyncReplicaStream() { 99 | return replicas.values().stream() 100 | .filter(p -> !p.isInService()); 101 | } 102 | 103 | private Stream inSyncReplicaStream() { 104 | return replicas.values().stream() 105 | .filter(PartitionReplica::isInService); 106 | } 107 | 108 | //todo 109 | public boolean isUnderReplicated() { 110 | // long isrCount=isr.length; 111 | int replicasCount=replicasArray.length; 112 | long isrCount=inSyncReplicaStream().count(); 113 | // int replicasCount=replicas.size(); 114 | return isrCount< replicasCount; 115 | } 116 | 117 | public boolean isLeaderPreferred() { 118 | 119 | return leader.equals(preferredLeaderId); 120 | } 121 | 122 | @Override 123 | public String toString() { 124 | return "TopicPartition{" + 125 | "id=" + id + 126 | ", replicas=" + replicas + 127 | ", leader=" + leader + 128 | ", preferredLeaderId=" + preferredLeaderId + 129 | ", size=" + size + 130 | ", firstOffset=" + firstOffset + 131 | '}'; 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/model/PartitionReplica.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.model; 2 | 3 | import lombok.Data; 4 | 5 | /** 6 | * 分区与副本 7 | * @author linxin 8 | * @version v1.0 9 | * Copyright (c) 2015 by solinx 10 | * @date 2016/12/16. 11 | */ 12 | @Data 13 | public class PartitionReplica { 14 | 15 | /** 16 | * leader副本Id 17 | */ 18 | private Integer id; 19 | private boolean inService; 20 | private boolean leader; 21 | 22 | public PartitionReplica() { 23 | } 24 | 25 | public PartitionReplica(Integer id, boolean inService, boolean leader) { 26 | this.id = id; 27 | this.inService = inService; 28 | this.leader = leader; 29 | } 30 | 31 | 32 | @Override 33 | public String toString() { 34 | return "PartitionReplica{" + 35 | "id=" + id + 36 | ", inService=" + inService + 37 | ", leader=" + leader + 38 | '}'; 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/model/Topic.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.model; 2 | 3 | import com.alibaba.fastjson.JSONObject; 4 | import lombok.Data; 5 | 6 | import java.util.Collection; 7 | import java.util.HashMap; 8 | import java.util.Map; 9 | import java.util.stream.Collectors; 10 | 11 | /** 12 | * @author linxin 13 | * @version v1.0 14 | * Copyright (c) 2015 by solinx 15 | * @date 2016/12/13. 16 | */ 17 | @Data 18 | public class Topic { 19 | 20 | 21 | private String brokerId; 22 | /** 23 | * 名称 24 | */ 25 | private String name; 26 | /** 27 | * 配置 28 | */ 29 | private Map config; 30 | /** 31 | * topic分区 32 | */ 33 | private Map partitionMap = new HashMap<>(); 34 | 35 | private int partitionSum; 36 | 37 | /** 38 | * 副本leaderId/分区总数 39 | */ 40 | private double preferred; 41 | 42 | /** 43 | * 44 | */ 45 | private double underReplicated; 46 | 47 | private int version; 48 | private JSONObject partitions; 49 | 50 | public Topic(String name) { 51 | this.name = name; 52 | } 53 | 54 | public Topic() { 55 | 56 | } 57 | 58 | 59 | //public Collection getPartitions(){ 60 | // return this.partitionMap.values(); 61 | // } 62 | 63 | /** 64 | * 取分区信息 65 | * 66 | * @param partitionID 67 | * @return 68 | */ 69 | public Partition getPartition(int partitionID) { 70 | return this.partitionMap.get(partitionID); 71 | } 72 | 73 | public Collection getLeaderPartitions(int brokerID) { 74 | return partitionMap.values(). 75 | stream(). 76 | filter(temp -> temp.getLeader() != null && temp.getLeader().getId() == brokerID).collect(Collectors.toList()); 77 | } 78 | 79 | /** 80 | * topic消息总数 81 | * topic的partition所有消息总数 82 | * 83 | * @return 84 | */ 85 | public long getSize() { 86 | return partitionMap.values() 87 | .stream().map(p -> p.getSize()) 88 | .filter((s)-> s!=-1) 89 | .reduce(0L, Long::sum); 90 | } 91 | 92 | /** 93 | * 可用消息总数 94 | * 95 | * @return 96 | */ 97 | public long getAvailableSize() { 98 | return partitionMap.values() 99 | .stream().map(p -> p.getSize() - p.getFirstOffset()) 100 | .reduce(0L, Long::sum); 101 | } 102 | 103 | public double getPreferredReplicaPercent() { 104 | long preferredLeaderCount = partitionMap.values().stream() 105 | .filter(Partition::isLeaderPreferred) 106 | .count(); 107 | return ((double) preferredLeaderCount) / ((double) partitionMap.size()); 108 | } 109 | 110 | public Collection getUnderReplicatedPartitions() { 111 | return partitionMap.values().stream() 112 | .filter(Partition::isUnderReplicated) 113 | .collect(Collectors.toList()); 114 | } 115 | 116 | @Override 117 | public String toString() { 118 | return "Topic{" + 119 | "name='" + name + '\'' + 120 | ", config='" + config + '\'' + 121 | ", partitionMap=" + partitionMap + 122 | '}'; 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/model/ZooConfig.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.model; 2 | 3 | import lombok.Data; 4 | 5 | /** 6 | * zookeeper config 7 | * 8 | * @author linx 9 | * @create 2018-02 06-22:39 10 | **/ 11 | @Data 12 | public class ZooConfig { 13 | 14 | public static String HOST="host"; 15 | public static String SESSION_TIMEOUT_MS="SessionTimeoutMs"; 16 | public static String CONNECTION_TIMEOUT_MS="ConnectionTimeoutMs"; 17 | public static String RETRY_ONE_TIME="RetryOneTime"; 18 | private String host; 19 | private String sessionTimeoutMs; 20 | private String connectionTimeoutMs; 21 | private String retryOneTime; 22 | 23 | } 24 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/persist/MetricsDataPersist.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.persist; 2 | 3 | import co.solinx.kafka.monitor.common.DateUtils; 4 | import co.solinx.kafka.monitor.model.KafkaMonitorData; 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | /** 9 | * @author linxin 10 | * @version v1.0 11 | * Copyright (c) 2015 by solinx 12 | * @date 2016/12/30. 13 | */ 14 | public class MetricsDataPersist { 15 | 16 | //private static DBUtils dbUtils = new DBUtils(KafkaMonitorData.class); 17 | private static Logger logger = LoggerFactory.getLogger(MetricsDataPersist.class); 18 | 19 | public void toDB(KafkaMonitorData model) { 20 | String sql = "insert into kafkaMonitorData(producerTotal,consumerTotal,delay,duplicated,lostTotal,consumerError," + 21 | "producerError,delayMsAvg,delayMsMax,duplicatedRate,lostRate,delayedRate,currentTime,consumeAvailabilityAvg,produceAvailabilityAvg)" + 22 | " values(" + model.getProducerTotal() + "," + 23 | +model.getConsumerTotal() + "," + 24 | +model.getDelay() + "," + 25 | +model.getDuplicated() + "," + 26 | +model.getLostTotal() + "," + 27 | +model.getConsumerError() + "," + 28 | +model.getProducerError() + "," + 29 | +model.getDelayMsAvg() + "," + 30 | +model.getDelayMsMax() + "," + 31 | +model.getDuplicatedRate() + "," + 32 | +model.getLostRate() + "," + 33 | +model.getDelayedRate() + "," + 34 | "'" + DateUtils.getTimeStr(model.getCurrentTime(), DateUtils.HYPHEN_DISPLAY_DATE) + "'," + 35 | +model.getConsumeAvailabilityAvg()+ "," + 36 | +model.getProduceAvailabilityAvg()+ ")"; 37 | try { 38 | //dbUtils.executeUpdate(sql); 39 | } catch (Exception e) { 40 | e.printStackTrace(); 41 | logger.error("{}", e); 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/utils/IClientListener.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.utils; 2 | 3 | public interface IClientListener { 4 | /** 5 | * 连接事件 6 | */ 7 | public void onConnect(); 8 | /** 9 | * 断开事件 10 | */ 11 | public void onDisConnect(); 12 | } 13 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/utils/IDGenerator.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.utils; 2 | 3 | import java.util.concurrent.atomic.AtomicInteger; 4 | 5 | /** 6 | * @author linxin 7 | * @version v1.0 8 | * Copyright (c) 2015 by solinx 9 | * @date 2016/10/24. 10 | */ 11 | public class IDGenerator { 12 | 13 | private int start = 0; 14 | private int end; 15 | private AtomicInteger id; 16 | 17 | private IDGenerator(int end, int start) { 18 | this.end = end; 19 | this.start = start; 20 | id = new AtomicInteger(this.start); 21 | } 22 | 23 | 24 | public int getGeneratorID() { 25 | if (id.get() > this.end) { 26 | id.set(0); 27 | } 28 | return id.getAndIncrement(); 29 | } 30 | 31 | public static void main(String[] args) throws InterruptedException { 32 | IDGenerator generator = new IDGenerator(0, 255); 33 | 34 | while (true) { 35 | Thread.sleep(1000); 36 | System.out.println(generator.getGeneratorID()); 37 | } 38 | } 39 | 40 | } 41 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/utils/JsonLoader.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.utils; 2 | 3 | import com.alibaba.fastjson.JSONObject; 4 | import org.slf4j.Logger; 5 | import org.slf4j.LoggerFactory; 6 | 7 | import java.io.ByteArrayOutputStream; 8 | import java.io.InputStream; 9 | import java.nio.file.*; 10 | 11 | /** 12 | * Created by linx on 2016-03-08. 13 | * 加载 JSON 文件 14 | */ 15 | public class JsonLoader { 16 | 17 | static Logger logger = LoggerFactory.getLogger(JsonLoader.class); 18 | 19 | public static JSONObject loadJSONFile(InputStream inputStream) { 20 | 21 | JSONObject confJSONObj; 22 | try { 23 | byte[] buf = new byte[1024]; 24 | ByteArrayOutputStream baos = new ByteArrayOutputStream(); 25 | while (inputStream.available() > 0) { 26 | int readSize = inputStream.read(buf, 0, buf.length); 27 | if (readSize > 0) { 28 | baos.write(buf, 0, readSize); 29 | } 30 | } 31 | if (baos.size() > 0) { 32 | confJSONObj = JSONObject.parseObject(baos.toString("UTF-8")); 33 | } else { 34 | confJSONObj = JSONObject.parseObject("{}"); 35 | } 36 | } catch (Exception e) { 37 | logger.error(" load JSON file err {} ", e); 38 | confJSONObj = JSONObject.parseObject("{}"); 39 | } 40 | return confJSONObj; 41 | } 42 | 43 | public static JSONObject loadJSONFile(String path) { 44 | logger.debug(" file path {} ", path); 45 | JSONObject confJSONObj = new JSONObject(); 46 | try { 47 | if (!path.isEmpty()) { 48 | Path paths = Paths.get(path); 49 | ByteArrayOutputStream baos = new ByteArrayOutputStream(); 50 | if (Files.exists(paths, LinkOption.NOFOLLOW_LINKS)) { 51 | InputStream inputStream = Files.newInputStream(paths, StandardOpenOption.READ); 52 | byte[] buf = new byte[1024]; 53 | while (inputStream.available() > 0) { 54 | int readSize = inputStream.read(buf, 0, buf.length); 55 | if (readSize > 0) { 56 | baos.write(buf, 0, readSize); 57 | } 58 | } 59 | if (baos.size() > 0) { 60 | confJSONObj = JSONObject.parseObject(baos.toString("UTF-8")); 61 | } else { 62 | confJSONObj = JSONObject.parseObject("{}"); 63 | } 64 | } 65 | } 66 | } catch (Exception e) { 67 | logger.error(" load JSON file err {} ", e); 68 | confJSONObj = JSONObject.parseObject("{}"); 69 | } 70 | logger.debug("{}", confJSONObj); 71 | return confJSONObj; 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/utils/zookeeper/WatcherEvent.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.utils.zookeeper; 2 | 3 | import lombok.Data; 4 | import org.apache.zookeeper.Watcher; 5 | 6 | import java.util.Arrays; 7 | import java.util.List; 8 | 9 | /** 10 | * @author linxin 11 | * @version v1.0 12 | * Copyright (c) 2015 by solinx 13 | * @date 2016/10/21. 14 | */ 15 | @Data 16 | public class WatcherEvent { 17 | 18 | private String path; 19 | private byte[] data; 20 | private Watcher.Event.EventType eventType; 21 | private List childrenNode; 22 | private Watcher.Event.KeeperState state; 23 | private List oldChildrenNode; 24 | 25 | 26 | @Override 27 | public String toString() { 28 | return "WatcherEvent{" + 29 | "childrenNode=" + childrenNode + 30 | ", path='" + path + '\'' + 31 | ", data=" + Arrays.toString(data) + 32 | ", eventType=" + eventType + 33 | ", state=" + state + 34 | ", oldChildrenNode=" + oldChildrenNode + 35 | '}'; 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/utils/zookeeper/WatcherEventCallback.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.utils.zookeeper; 2 | 3 | /** 4 | * @author linxin 5 | * @version v1.0 6 | * Copyright (c) 2015 by solinx 7 | * @date 2016/8/19. 8 | */ 9 | public interface WatcherEventCallback { 10 | 11 | void watchedEvent(WatcherEvent watched) throws Exception; 12 | } 13 | -------------------------------------------------------------------------------- /src/main/java/co/solinx/kafka/monitor/utils/zookeeper/ZookeeperClient.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.monitor.utils.zookeeper; 2 | 3 | import co.solinx.kafka.monitor.utils.JsonLoader; 4 | import com.alibaba.fastjson.JSONObject; 5 | import org.apache.curator.framework.CuratorFramework; 6 | import org.apache.curator.framework.CuratorFrameworkFactory; 7 | import org.apache.curator.framework.api.CuratorWatcher; 8 | import org.apache.curator.framework.imps.CuratorFrameworkState; 9 | import org.apache.curator.framework.state.ConnectionState; 10 | import org.apache.curator.framework.state.ConnectionStateListener; 11 | import org.apache.curator.retry.RetryOneTime; 12 | import org.apache.zookeeper.CreateMode; 13 | import org.apache.zookeeper.WatchedEvent; 14 | import org.apache.zookeeper.data.Stat; 15 | import org.slf4j.Logger; 16 | import org.slf4j.LoggerFactory; 17 | 18 | import java.io.InputStream; 19 | import java.util.ArrayList; 20 | import java.util.List; 21 | 22 | /** 23 | * @author linxin 24 | * @version v1.0 25 | * Copyright (c) 2015 by solinx 26 | * @date 2016/10/20. 27 | */ 28 | public class ZookeeperClient { 29 | 30 | Logger logger = LoggerFactory.getLogger(ZookeeperClient.class); 31 | 32 | private CuratorFramework curator; 33 | private static ZookeeperClient zkClient = null; 34 | 35 | /** 36 | * @param confPath json格式Zk配置文件路径 37 | */ 38 | public ZookeeperClient(String confPath) { 39 | this.initZookeeper(confPath, true); 40 | } 41 | 42 | public static ZookeeperClient getInstance() { 43 | if (zkClient == null) { 44 | zkClient = new ZookeeperClient(ZookeeperClient.class.getClassLoader().getResourceAsStream("zkConfig.json"),false); 45 | } 46 | return zkClient; 47 | } 48 | 49 | /** 50 | * @param confPath json格式Zk配置文件路径 51 | * @param isAsync 是否异步,默认异步 52 | */ 53 | public ZookeeperClient(String confPath, boolean isAsync) { 54 | this.initZookeeper(confPath, isAsync); 55 | } 56 | 57 | /** 58 | * 使用默认配置文件 59 | */ 60 | public ZookeeperClient() { 61 | this.initZookeeper("", false); 62 | } 63 | 64 | 65 | public ZookeeperClient(InputStream stream, boolean isAsync) { 66 | this.initZookeeper(stream, isAsync); 67 | } 68 | 69 | public ZookeeperClient(InputStream stream) { 70 | this.initZookeeper(stream, true); 71 | } 72 | 73 | /** 74 | * 初始化zk 75 | * 76 | * @param confPath 77 | */ 78 | private void initZookeeper(String confPath, boolean isAsync) { 79 | 80 | JSONObject zkConfig = loadZkConf(confPath); 81 | 82 | buildCurator(zkConfig, isAsync); 83 | } 84 | 85 | /** 86 | * 初始化zk 87 | * 88 | * @param inputStream 89 | */ 90 | private void initZookeeper(InputStream inputStream, boolean isAsync) { 91 | 92 | JSONObject zkConfig = loadZkConf(inputStream); 93 | 94 | buildCurator(zkConfig, isAsync); 95 | } 96 | 97 | private void buildCurator(JSONObject zkConfig, boolean isAsync) { 98 | CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder(); 99 | builder.connectString(zkConfig.getString("host")); 100 | builder.sessionTimeoutMs(zkConfig.getInteger("SessionTimeoutMs")); 101 | builder.connectionTimeoutMs(zkConfig.getInteger("ConnectionTimeoutMs")); 102 | builder.retryPolicy(new RetryOneTime(zkConfig.getInteger("RetryOneTime"))); 103 | curator = builder.build(); 104 | curator.start(); 105 | if (!isAsync) { 106 | waitingConnect(); 107 | } 108 | } 109 | 110 | /** 111 | * 等待连接 112 | */ 113 | private void waitingConnect() { 114 | while (!checkConnect()) { 115 | try { 116 | Thread.sleep(10); 117 | } catch (InterruptedException e) { 118 | logger.error("{}", e); 119 | } 120 | } 121 | } 122 | 123 | private void reconnectProcess() { 124 | curator.getConnectionStateListenable().addListener(new ConnectionStateListener() { 125 | @Override 126 | public void stateChanged(CuratorFramework client, ConnectionState newState) { 127 | } 128 | }); 129 | } 130 | 131 | /** 132 | * 添加Connect状态监听器 133 | * 134 | * @param listener 135 | */ 136 | public void addConnectionStateListener(ConnectionStateListener listener) { 137 | curator.getConnectionStateListenable().addListener(listener); 138 | } 139 | 140 | 141 | private JSONObject loadZkConf(String path) { 142 | JSONObject zkConfig = JsonLoader.loadJSONFile(path); 143 | if (zkConfig.size() == 0) { 144 | zkConfig = defaultConfig(); 145 | } 146 | return zkConfig; 147 | } 148 | 149 | private JSONObject loadZkConf(InputStream inputStream) { 150 | JSONObject zkConfig = JsonLoader.loadJSONFile(inputStream); 151 | if (zkConfig.size() == 0) { 152 | zkConfig = defaultConfig(); 153 | } 154 | return zkConfig; 155 | } 156 | 157 | private JSONObject defaultConfig() { 158 | JSONObject zkConfig = new JSONObject(); 159 | zkConfig.put("host", "127.0.0.1:2181"); 160 | zkConfig.put("SessionTimeoutMs", "3000"); 161 | zkConfig.put("ConnectionTimeoutMs", "3000"); 162 | zkConfig.put("RetryOneTime", "3000"); 163 | logger.debug(" load default zkConfig {}", zkConfig); 164 | return zkConfig; 165 | } 166 | 167 | /** 168 | * 检查节点是否存在 169 | * 170 | * @param path 171 | * @return 172 | */ 173 | public boolean checkExists(String path) { 174 | boolean exists = false; 175 | try { 176 | if (checkConnect()) { 177 | Stat stat = curator.checkExists().forPath(path); 178 | if (stat != null) { 179 | exists = true; 180 | } 181 | } 182 | } catch (Exception e) { 183 | logger.error("{}", e); 184 | } 185 | return exists; 186 | } 187 | 188 | /** 189 | * 创建节点 190 | * 191 | * @param path 路径 192 | * @param mode 节点类型 193 | * @throws Exception 194 | */ 195 | public void createNode(String path, CreateMode mode) throws Exception { 196 | curator.create().withMode(mode).forPath(path); 197 | } 198 | 199 | /** 200 | * 创建节点,默认为临时节点 201 | * 202 | * @param path 路径 203 | * @throws Exception 204 | */ 205 | public void createNode(String path) throws Exception { 206 | curator.create().forPath(path); 207 | 208 | } 209 | 210 | /** 211 | * 递归创建节点 212 | * 213 | * @param path 路径 214 | * @throws Exception 215 | */ 216 | public void createContainers(String path) throws Exception { 217 | curator.createContainers(path); 218 | } 219 | 220 | /** 221 | * 创建节点 222 | * 223 | * @param path 路径 224 | * @param data 节点中数据 225 | * @throws Exception 226 | */ 227 | public void createNode(String path, String data) throws Exception { 228 | curator.create().forPath(path, data.getBytes()); 229 | } 230 | 231 | /** 232 | * 删除节点 233 | * 234 | * @param path 路径 235 | * @throws Exception 236 | */ 237 | public void deleteNode(String path) throws Exception { 238 | List childs = getChildrenFullPath(path); 239 | for (int i = 0; i < childs.size(); i++) { 240 | deleteNode(childs.get(i)); 241 | } 242 | curator.delete().forPath(path); 243 | } 244 | 245 | /** 246 | * 取得子节点 247 | * 248 | * @param path 路径 249 | * @return 250 | * @throws Exception 251 | */ 252 | public List getChildren(String path) throws Exception { 253 | if (!checkExists(path)) { 254 | createNode(path); 255 | } 256 | return curator.getChildren().forPath(path); 257 | } 258 | 259 | /** 260 | * 以绝对路径返回子节点 261 | * 262 | * @param path 263 | * @return 264 | * @throws Exception 265 | */ 266 | public List getChildrenFullPath(String path) throws Exception { 267 | List pathList = new ArrayList(); 268 | if (checkExists(path)) { 269 | if (checkConnect()) { 270 | pathList = curator.getChildren().forPath(path); 271 | for (int i = 0; i < pathList.size(); i++) { 272 | String temp = pathList.get(i); 273 | temp = path + "/" + temp; 274 | pathList.set(i, temp); 275 | } 276 | } 277 | } 278 | return pathList; 279 | } 280 | 281 | /** 282 | * 创建节点 283 | * 284 | * @param path 路径 285 | * @param mode 模式 286 | * @param data 数据 287 | * @throws Exception 288 | */ 289 | public void createNode(String path, CreateMode mode, String data) throws Exception { 290 | curator.create().withMode(mode).forPath(path, data.getBytes()); 291 | } 292 | 293 | /** 294 | * 检查链接 295 | * 296 | * @return 297 | */ 298 | public boolean checkConnect() { 299 | return curator.getZookeeperClient().isConnected(); 300 | } 301 | 302 | /** 303 | * 关闭连接 304 | */ 305 | public void close() { 306 | logger.debug("--------zoo close"); 307 | if (curator.getState() == CuratorFrameworkState.STARTED) { 308 | curator.close(); 309 | } 310 | } 311 | 312 | /** 313 | * 开启 314 | */ 315 | public void start() { 316 | curator.start(); 317 | } 318 | 319 | 320 | /** 321 | * 取得节点数据 322 | * 323 | * @param path 路径 324 | * @return 325 | * @throws Exception 326 | */ 327 | public byte[] getData(String path) throws Exception { 328 | byte[] result = new byte[]{}; 329 | if (checkExists(path)) { 330 | result = curator.getData().forPath(path); 331 | } 332 | return result; 333 | } 334 | 335 | /** 336 | * 设置节点data值 337 | * 338 | * @param path 339 | * @param data 340 | * @throws Exception 341 | */ 342 | public void setData(String path, String data) throws Exception { 343 | if (checkConnect()) { 344 | curator.setData().forPath(path, data.getBytes()); 345 | } 346 | } 347 | 348 | /** 349 | * 监听该路径下得子节点 350 | * 351 | * @param path 路径 352 | * @param callback 监听回调 353 | * @throws Exception 354 | */ 355 | public void watcherChildrenNode(final String path, final WatcherEventCallback callback) throws Exception { 356 | 357 | final List oldChildren = this.getChildren(path); 358 | logger.debug("oldChildren------{}", oldChildren); 359 | CuratorWatcher watcher = new CuratorWatcher() { 360 | @Override 361 | public void process(WatchedEvent event) throws Exception { 362 | try { 363 | WatcherEvent watcherEvent = new WatcherEvent(); 364 | watcherEvent.setState(event.getState()); 365 | watcherEvent.setPath(event.getPath()); 366 | watcherEvent.setEventType(event.getType()); 367 | watcherEvent.setOldChildrenNode(oldChildren); 368 | 369 | callback.watchedEvent(watcherEvent); 370 | if (checkExists(path)) { 371 | watcherChildrenNode(path, callback); 372 | } 373 | } catch (Exception e) { 374 | logger.debug("{}", e.getMessage()); 375 | } 376 | } 377 | }; 378 | curator.getChildren().usingWatcher(watcher).forPath(path); 379 | } 380 | 381 | /** 382 | * 监听多个节点下的子节点 383 | * 384 | * @param pathList 路径List 385 | * @param callback 回调函数 386 | */ 387 | public void watcherChildrenNodes(List pathList, final WatcherEventCallback callback) throws Exception { 388 | 389 | for (final String path : 390 | pathList) { 391 | this.watcherChildrenNode(path, callback); 392 | } 393 | } 394 | 395 | /** 396 | * 监听节点数据 397 | * 398 | * @param path 399 | * @param callback 400 | * @throws Exception 401 | */ 402 | public void watcherData(final String path, final WatcherEventCallback callback) throws Exception { 403 | 404 | CuratorWatcher watcherData = new CuratorWatcher() { 405 | @Override 406 | public void process(WatchedEvent event) throws Exception { 407 | try { 408 | WatcherEvent watcherEvent = new WatcherEvent(); 409 | watcherEvent.setState(event.getState()); 410 | watcherEvent.setPath(event.getPath()); 411 | watcherEvent.setEventType(event.getType()); 412 | 413 | 414 | callback.watchedEvent(watcherEvent); 415 | watcherData(path, callback); 416 | } catch (Exception e) { 417 | logger.debug("{}", e.getMessage()); 418 | } 419 | } 420 | }; 421 | if (checkExists(path)) { 422 | curator.getData().usingWatcher(watcherData).forPath(path); 423 | } 424 | } 425 | } 426 | -------------------------------------------------------------------------------- /src/main/resources/application.properties: -------------------------------------------------------------------------------- 1 | server.port=5050 2 | server.servlet.context-path=/ -------------------------------------------------------------------------------- /src/main/resources/kafkaMonitorConfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "host": "127.0.0.1", 3 | "port": 5050, 4 | "monitorTopic": "monitor-topic", 5 | "jmxReporterPrefix": "kfk.service", 6 | "producer": { 7 | "produceDelay": "2000", 8 | "partitionCheckDelay": "30000" 9 | }, 10 | "consume": { 11 | "delayedTime": "10000", 12 | "group.id": "group1" 13 | }, 14 | "zookeeper": { 15 | "host": "127.0.0.1:12181", 16 | "SessionTimeoutMs": "3000", 17 | "ConnectionTimeoutMs": "3000", 18 | "RetryOneTime": "3000" 19 | }, 20 | "kafka": { 21 | "enable.auto.commit": "true", 22 | "auto.commit.interval.ms": "1000", 23 | "session.timeout.ms": "30000", 24 | "key.deserializer": "org.apache.kafka.common.serialization.StringDeserializer", 25 | "value.deserializer": "org.apache.kafka.common.serialization.StringDeserializer", 26 | "key.serializer": "org.apache.kafka.common.serialization.StringSerializer", 27 | "value.serializer": "org.apache.kafka.common.serialization.StringSerializer", 28 | "group.id": "monitor" 29 | } 30 | } -------------------------------------------------------------------------------- /src/main/resources/log4j.properties: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linxin26/kafka-monitor/5ea333cf6d5f428ac2e86fd5e3437b73ea4efc97/src/main/resources/log4j.properties -------------------------------------------------------------------------------- /src/main/resources/static/asset-manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "Wysiwyg.js": "static/js/Wysiwyg.f30d8599.chunk.js", 3 | "Wysiwyg.js.map": "static/js/Wysiwyg.f30d8599.chunk.js.map", 4 | "charts.js": "static/js/charts.5b28f0c0.js", 5 | "charts.js.map": "static/js/charts.5b28f0c0.js.map", 6 | "main.css": "static/css/main.a5c0e99c.css", 7 | "main.css.map": "static/css/main.a5c0e99c.css.map", 8 | "main.js": "static/js/main.ae3dc27e.js", 9 | "main.js.map": "static/js/main.ae3dc27e.js.map", 10 | "static\\media\\beauty.jpg": "static/media/beauty.defb9858.jpg", 11 | "static\\media\\default-skin.svg": "static/media/default-skin.b257fa9c.svg", 12 | "vendor.js": "static/js/vendor.061917d4.js", 13 | "vendor.js.map": "static/js/vendor.061917d4.js.map" 14 | } -------------------------------------------------------------------------------- /src/main/resources/static/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linxin26/kafka-monitor/5ea333cf6d5f428ac2e86fd5e3437b73ea4efc97/src/main/resources/static/favicon.ico -------------------------------------------------------------------------------- /src/main/resources/static/index.html: -------------------------------------------------------------------------------- 1 | Kafka Monitor
-------------------------------------------------------------------------------- /src/main/resources/static/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "short_name": "React App", 3 | "name": "Create React App Sample", 4 | "icons": [ 5 | { 6 | "src": "favicon.ico", 7 | "sizes": "192x192", 8 | "type": "image/png" 9 | } 10 | ], 11 | "start_url": "./index.html", 12 | "display": "standalone", 13 | "theme_color": "#000000", 14 | "background_color": "#ffffff" 15 | } 16 | -------------------------------------------------------------------------------- /src/main/resources/static/service-worker.js: -------------------------------------------------------------------------------- 1 | "use strict";function setOfCachedUrls(e){return e.keys().then(function(e){return e.map(function(e){return e.url})}).then(function(e){return new Set(e)})}var precacheConfig=[["./index.html","94df8773fd0e6bd5646354149335d03d"],["./static/css/main.a5c0e99c.css","6e9199dee447a87eb0be1da978553c95"],["./static/js/Wysiwyg.f30d8599.chunk.js","f5e63b9449d9dcb1f9de63182678f2a6"],["./static/js/charts.5b28f0c0.js","a7f23263380a104235ee0caff6cc38b4"],["./static/js/main.ae3dc27e.js","82b33fe8ac99e151aa528dfd5c762825"],["./static/js/vendor.061917d4.js","ecfc6986d754ca187eef66f94205307a"],["./static/media/beauty.defb9858.jpg","defb98583257610e959baa67ab0fa53b"],["./static/media/default-skin.b257fa9c.svg","b257fa9c5ac8c515ac4d77a667ce2943"]],cacheName="sw-precache-v3-sw-precache-webpack-plugin-"+(self.registration?self.registration.scope:""),ignoreUrlParametersMatching=[/^utm_/],addDirectoryIndex=function(e,t){var n=new URL(e);return"/"===n.pathname.slice(-1)&&(n.pathname+=t),n.toString()},cleanResponse=function(e){return e.redirected?("body"in e?Promise.resolve(e.body):e.blob()).then(function(t){return new Response(t,{headers:e.headers,status:e.status,statusText:e.statusText})}):Promise.resolve(e)},createCacheKey=function(e,t,n,r){var a=new URL(e);return r&&a.pathname.match(r)||(a.search+=(a.search?"&":"")+encodeURIComponent(t)+"="+encodeURIComponent(n)),a.toString()},isPathWhitelisted=function(e,t){if(0===e.length)return!0;var n=new URL(t).pathname;return e.some(function(e){return n.match(e)})},stripIgnoredUrlParameters=function(e,t){var n=new URL(e);return n.hash="",n.search=n.search.slice(1).split("&").map(function(e){return e.split("=")}).filter(function(e){return t.every(function(t){return!t.test(e[0])})}).map(function(e){return e.join("=")}).join("&"),n.toString()},hashParamName="_sw-precache",urlsToCacheKeys=new Map(precacheConfig.map(function(e){var t=e[0],n=e[1],r=new URL(t,self.location),a=createCacheKey(r,hashParamName,n,/\.\w{8}\./);return[r.toString(),a]}));self.addEventListener("install",function(e){e.waitUntil(caches.open(cacheName).then(function(e){return setOfCachedUrls(e).then(function(t){return Promise.all(Array.from(urlsToCacheKeys.values()).map(function(n){if(!t.has(n)){var r=new Request(n,{credentials:"same-origin"});return fetch(r).then(function(t){if(!t.ok)throw new Error("Request for "+n+" returned a response with status "+t.status);return cleanResponse(t).then(function(t){return e.put(n,t)})})}}))})}).then(function(){return self.skipWaiting()}))}),self.addEventListener("activate",function(e){var t=new Set(urlsToCacheKeys.values());e.waitUntil(caches.open(cacheName).then(function(e){return e.keys().then(function(n){return Promise.all(n.map(function(n){if(!t.has(n.url))return e.delete(n)}))})}).then(function(){return self.clients.claim()}))}),self.addEventListener("fetch",function(e){if("GET"===e.request.method){var t,n=stripIgnoredUrlParameters(e.request.url,ignoreUrlParametersMatching);(t=urlsToCacheKeys.has(n))||(n=addDirectoryIndex(n,"index.html"),t=urlsToCacheKeys.has(n));!t&&"navigate"===e.request.mode&&isPathWhitelisted(["^(?!\\/__).*"],e.request.url)&&(n=new URL("./index.html",self.location).toString(),t=urlsToCacheKeys.has(n)),t&&e.respondWith(caches.open(cacheName).then(function(e){return e.match(urlsToCacheKeys.get(n)).then(function(e){if(e)return e;throw Error("The cached response that was expected is missing.")})}).catch(function(t){return console.warn('Couldn\'t serve response for "%s" from cache: %O',e.request.url,t),fetch(e.request)}))}}); -------------------------------------------------------------------------------- /src/main/resources/static/static/media/b1.553c69e9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linxin26/kafka-monitor/5ea333cf6d5f428ac2e86fd5e3437b73ea4efc97/src/main/resources/static/static/media/b1.553c69e9.jpg -------------------------------------------------------------------------------- /src/main/resources/static/static/media/beauty.defb9858.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linxin26/kafka-monitor/5ea333cf6d5f428ac2e86fd5e3437b73ea4efc97/src/main/resources/static/static/media/beauty.defb9858.jpg -------------------------------------------------------------------------------- /src/main/resources/static/static/media/default-skin.b257fa9c.svg: -------------------------------------------------------------------------------- 1 | default-skin 2 -------------------------------------------------------------------------------- /src/main/resources/static/temp/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | KafkaMonitor 7 | 8 | 34 | 35 | 36 | 37 |
38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /src/main/resources/static/temp/index.jsp: -------------------------------------------------------------------------------- 1 | 2 | 3 |

Hello kafkaMonitor!

4 | 5 | -------------------------------------------------------------------------------- /src/main/resources/zkConfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "connectIp" : "127.0.0.1:12181", 3 | "SessionTimeoutMs" : "3000", 4 | "ConnectionTimeoutMs" : "3000", 5 | "RetryOneTime" : "3000" 6 | } -------------------------------------------------------------------------------- /src/main/webapp/chart.html: -------------------------------------------------------------------------------- 1 | 10 | 11 | 12 | 13 | 14 | 15 | JS Bin 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 |

Kafka Monitor GUI

28 | 29 |
30 | 34 | 35 | 36 |
37 |
38 |

Search for figure:

39 |
40 |
41 |
42 | 43 |
44 |
45 |
46 |
47 | 48 | 49 | 216 | 217 | 218 | 219 | -------------------------------------------------------------------------------- /src/main/webapp/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | KafkaMonitor 7 | 8 | 34 | 35 | 36 | 37 |
38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /src/main/webapp/index.jsp: -------------------------------------------------------------------------------- 1 | 2 | 3 |

Hello kafkaMonitor!

4 | 5 | -------------------------------------------------------------------------------- /src/test/java/co/solinx/AppTest.java: -------------------------------------------------------------------------------- 1 | package co.solinx; 2 | 3 | import junit.framework.Test; 4 | import junit.framework.TestCase; 5 | import junit.framework.TestSuite; 6 | 7 | /** 8 | * Unit test for simple App. 9 | */ 10 | public class AppTest 11 | extends TestCase 12 | { 13 | /** 14 | * Create the test case 15 | * 16 | * @param testName name of the test case 17 | */ 18 | public AppTest( String testName ) 19 | { 20 | super( testName ); 21 | } 22 | 23 | /** 24 | * @return the suite of tests being tested 25 | */ 26 | public static Test suite() 27 | { 28 | return new TestSuite( AppTest.class ); 29 | } 30 | 31 | /** 32 | * Rigourous Test :-) 33 | */ 34 | public void testApp() 35 | { 36 | assertTrue( true ); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/test/java/co/solinx/kafka/BrokerDataTest.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka; 2 | 3 | import co.solinx.kafka.monitor.core.service.KafkaBaseInfoService; 4 | import com.alibaba.fastjson.JSONArray; 5 | import com.alibaba.fastjson.JSONObject; 6 | import co.solinx.kafka.monitor.model.Broker; 7 | import co.solinx.kafka.monitor.model.Topic; 8 | import co.solinx.kafka.monitor.model.Partition; 9 | import org.slf4j.Logger; 10 | import org.slf4j.LoggerFactory; 11 | 12 | import java.util.ArrayList; 13 | import java.util.Collection; 14 | import java.util.List; 15 | 16 | /** 17 | * @author linxin 18 | * @version v1.0 19 | * Copyright (c) 2015 by solinx 20 | * @date 2016/12/16. 21 | */ 22 | public class BrokerDataTest { 23 | 24 | static Logger logger= LoggerFactory.getLogger(BrokerDataTest.class); 25 | static KafkaBaseInfoService service = KafkaBaseInfoService.getInstance(); 26 | 27 | 28 | public static void main(String[] args) throws InterruptedException { 29 | brokers(); 30 | //broker(0); 31 | // topics(0); 32 | } 33 | 34 | 35 | 36 | public static void brokers() throws InterruptedException { 37 | while (true) { 38 | Thread.sleep(2000); 39 | JSONObject pageData=new JSONObject(); 40 | List brokerList = service.getBrokers(); 41 | JSONArray array=new JSONArray(); 42 | 43 | pageData.put("data", brokerList); 44 | 45 | 46 | pageData.put("result", 200); 47 | 48 | logger.debug("---- pageData {} ",pageData); 49 | } 50 | } 51 | 52 | public static void broker(int id) throws InterruptedException { 53 | 54 | while (true) { 55 | Thread.sleep(2000); 56 | Broker broker = service.getBrokerById(id); 57 | List topicList = service.getTopics(); 58 | JSONObject pageData = new JSONObject(); 59 | int partitionCount = 0; 60 | for (Topic topic : 61 | topicList) { 62 | partitionCount += topic.getLeaderPartitions(id).size(); 63 | } 64 | pageData.put("data", broker); 65 | pageData.put("partitionCount", partitionCount); 66 | pageData.put("topicCount", topicList.size()); 67 | 68 | 69 | logger.debug("---- pageData {} ", pageData); 70 | } 71 | } 72 | 73 | public static void topics(int brokerID) throws InterruptedException { 74 | 75 | while (true) { 76 | Thread.sleep(2000); 77 | List topicList = service.getTopics(); 78 | JSONObject pageData = new JSONObject(); 79 | JSONArray array=new JSONArray(); 80 | for (Topic topic : 81 | topicList) { 82 | Collection topicPar=topic.getLeaderPartitions(brokerID); 83 | int partitionCount = topicPar.size(); 84 | JSONObject topicObj=new JSONObject(); 85 | topicObj.put("name",topic.getName()); 86 | topicObj.put("partitionCount",topic.getPartitionMap().size()); 87 | topicObj.put("brokerPartitionCount", partitionCount); 88 | List idArray=new ArrayList<>(); 89 | for (Partition partition : 90 | topicPar) { 91 | idArray.add(partition.getId()); 92 | } 93 | topicObj.put("PartitionIds",idArray); 94 | array.add(topicObj); 95 | } 96 | pageData.put("data", array); 97 | 98 | logger.debug("---- pageData {} ", pageData); 99 | } 100 | } 101 | 102 | } 103 | -------------------------------------------------------------------------------- /src/test/java/co/solinx/kafka/KafkaBaseInfoTest.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka; 2 | 3 | import co.solinx.kafka.monitor.core.service.KafkaBaseInfoService; 4 | import co.solinx.kafka.monitor.model.Topic; 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | /** 9 | * @author linxin 10 | * @version v1.0 11 | * Copyright (c) 2015 by solinx 12 | * @date 2016/12/12. 13 | */ 14 | public class KafkaBaseInfoTest { 15 | 16 | static Logger logger = LoggerFactory.getLogger(KafkaBaseInfoTest.class); 17 | 18 | public static void main(String[] args) throws Exception { 19 | // KafkaBaseInfoService kafka = KafkaBaseInfoService.getInstance(); 20 | 21 | 22 | // List brokersList = kafka.getBrokers(); 23 | // 24 | // for (Broker temp : 25 | // brokersList) { 26 | // logger.debug("{} {} ", temp, DateUtils.getTimeStr(new Date(temp.getTimestamp()), DateUtils.HYPHEN_DISPLAY_DATE)); 27 | // 28 | // } 29 | // 30 | // List topicList = kafka.getTopics(); 31 | // 32 | // 33 | // for (Topic topic : 34 | // topicList) { 35 | // logger.debug("------ {}", topic); 36 | // } 37 | // 38 | // logger.debug("{}", JSONArray.toJSONString(topicList)); 39 | 40 | // CuratorFramework curator = CuratorService.getInstance(); 41 | // 42 | // TreeCache topicTreeCache = new TreeCache(curator, ZkUtils.BrokerIdsPath()); 43 | // logger.debug("---- {}", ZkUtils.BrokerIdsPath()); 44 | // topicTreeCache.start(); 45 | // Thread.sleep(2000); 46 | // 47 | // 48 | // while (true) { 49 | // Map brokerMap = topicTreeCache.getCurrentChildren(ZkUtils.BrokerIdsPath()); 50 | // logger.debug("====={}", brokerMap); 51 | // Thread.sleep(2000); 52 | // } 53 | 54 | 55 | KafkaBaseInfoService service = KafkaBaseInfoService.getInstance(); 56 | 57 | 58 | while (true) { 59 | Thread.sleep(5000); 60 | // logger.debug("-----------brokerCache {}", service.brokerCache); 61 | // service.getTopics(); 62 | logger.debug("----------- {}", service.getBrokerById(0)); 63 | Topic topic= service.getTopic("kafka-monitor-topic"); 64 | logger.debug("-----------topic {}", topic); 65 | } 66 | 67 | // MessageService messageService = new MessageService(); 68 | // Thread.sleep(2000); 69 | // logger.debug("==== {}", messageService.getMesage("foo", 0, 0, 100)); 70 | 71 | } 72 | 73 | } 74 | -------------------------------------------------------------------------------- /src/test/java/co/solinx/kafka/KafkaConfigTest.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka; 2 | 3 | import org.apache.kafka.clients.producer.KafkaProducer; 4 | import org.apache.kafka.clients.producer.Producer; 5 | import org.apache.kafka.clients.producer.ProducerRecord; 6 | import org.slf4j.Logger; 7 | import org.slf4j.LoggerFactory; 8 | 9 | import java.util.Properties; 10 | 11 | /** 12 | * @author linxin 13 | * @version v1.0 14 | * Copyright (c) 2015 by solinx 15 | * @date 2016/12/12. 16 | */ 17 | public class KafkaConfigTest { 18 | 19 | static Logger logger = LoggerFactory.getLogger(KafkaConfigTest.class); 20 | 21 | public static void main(String[] args) { 22 | 23 | Properties properties = new Properties(); 24 | properties.put("bootstrap.servers", "10.10.1.104:9093"); 25 | properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 26 | properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 27 | 28 | //“所有”设置将导致记录的完整提交阻塞,最慢的,但最持久的设置。 29 | //properties.put("acks", "all"); 30 | //如果请求失败,生产者也会自动重试,即使设置成0 the producer can automatically retry. 31 | properties.put("retries", 1); 32 | //The producer maintains buffers of unsent records for each partition. 33 | properties.put("batch.size", 16384); 34 | //默认立即发送,这里这是延时毫秒数 35 | properties.put("linger.ms", 1); 36 | //生产者缓冲大小,当缓冲区耗尽后,额外的发送调用将被阻塞。时间超过max.block.ms将抛出TimeoutException 37 | properties.put("buffer.memory", 34432); 38 | 39 | System.out.println("start..."); 40 | Producer producer = new KafkaProducer(properties); 41 | 42 | logger.debug("config {}", properties); 43 | 44 | 45 | 46 | 47 | 48 | while(true) { 49 | 50 | for (int i = 0; i < 1000; i++) { 51 | System.out.println("start......"); 52 | producer.send(new ProducerRecord("foo", Integer.toString(i), Integer.toString(i))); 53 | System.out.println("send " + i); 54 | 55 | } 56 | 57 | //producer.close(); 58 | producer.flush(); 59 | 60 | logger.debug("---------metrics{}", producer.metrics()); 61 | try { 62 | Thread.sleep(1000); 63 | } catch (InterruptedException e) { 64 | e.printStackTrace(); 65 | } 66 | 67 | } 68 | } 69 | 70 | 71 | } 72 | -------------------------------------------------------------------------------- /src/test/java/co/solinx/kafka/KafkaConsumer.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka; 2 | 3 | import co.solinx.kafka.monitor.core.service.CustomConsumerGroupService; 4 | import kafka.admin.ConsumerGroupCommand; 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | import java.util.ArrayList; 9 | import java.util.Arrays; 10 | import java.util.List; 11 | 12 | /** 13 | * @author linxin 14 | * @version v1.0 15 | * Copyright (c) 2015 by solinx 16 | * @date 2016/12/19. 17 | */ 18 | public class KafkaConsumer { 19 | 20 | 21 | static Logger logger = LoggerFactory.getLogger(KafkaConsumer.class); 22 | 23 | public static void main(String[] args) { 24 | List optionsList = new ArrayList<>(); 25 | //optionsList.add() 26 | // ConsumerGroupCommand.ConsumerGroupCommandOptions commandOptions = new ConsumerGroupCommand.ConsumerGroupCommandOptions(new String[]{"--bootstrap-server=10.10.1.104:9093", "--group=test"}); 27 | // ConsumerGroupCommand.ConsumerGroupService f = new ConsumerGroupCommand.KafkaConsumerGroupService(commandOptions); 28 | // 29 | // 30 | // f.list(); 31 | // f.describeGroup("test2"); 32 | 33 | //TestKafkaConsumerGroupService service = new TestKafkaConsumerGroupService(); 34 | 35 | CustomConsumerGroupService service = new CustomConsumerGroupService(); 36 | 37 | 38 | 39 | //logger.debug("consumerList {}", service.getConsumerList("test3")); 40 | // logger.debug("consumerList {}", service.getConsumerByTopic("foo2")); 41 | System.out.println(Arrays.toString(service.getConsumerList().toArray())); 42 | } 43 | 44 | 45 | } 46 | -------------------------------------------------------------------------------- /src/test/java/co/solinx/kafka/KafkaConsumerTest.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka; 2 | 3 | import org.apache.kafka.clients.consumer.ConsumerRecord; 4 | import org.apache.kafka.clients.consumer.ConsumerRecords; 5 | import org.apache.kafka.clients.consumer.KafkaConsumer; 6 | import org.slf4j.LoggerFactory; 7 | 8 | import java.util.Arrays; 9 | import java.util.Properties; 10 | 11 | /** 12 | * @author linxin 13 | * @version v1.0 14 | * Copyright (c) 2015 by solinx 15 | * @date 2016/12/12. 16 | */ 17 | public class KafkaConsumerTest { 18 | 19 | static org.slf4j.Logger logger = LoggerFactory.getLogger(KafkaConsumerTest.class); 20 | 21 | 22 | public static void main(String[] args) { 23 | Properties props = new Properties(); 24 | props.setProperty("bootstrap.servers", "10.10.1.104:9093"); 25 | props.put("group.id", "test2"); 26 | props.put("enable.auto.commit", "true"); 27 | props.put("auto.commit.interval.ms", "1000"); 28 | props.put("session.timeout.ms", "30000"); 29 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 30 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 31 | 32 | KafkaConsumer consumer = new KafkaConsumer(props); 33 | 34 | consumer.subscribe(Arrays.asList("foo")); 35 | 36 | logger.debug("config {}", props); 37 | 38 | while (true) { 39 | ConsumerRecords records = consumer.poll(2); 40 | for (ConsumerRecord record : records) { 41 | System.out.printf("offset = %d, key = %s, value = %s\n", record.offset(), record.key(), record.value()); 42 | } 43 | //logger.debug("{}",records); 44 | try { 45 | Thread.sleep(2000); 46 | } catch (InterruptedException e) { 47 | e.printStackTrace(); 48 | } 49 | } 50 | } 51 | 52 | } 53 | -------------------------------------------------------------------------------- /src/test/java/co/solinx/kafka/PartitionDataTest.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka; 2 | 3 | import co.solinx.kafka.monitor.model.Topic; 4 | import co.solinx.kafka.monitor.core.service.KafkaBaseInfoService; 5 | import com.alibaba.fastjson.JSONArray; 6 | import com.alibaba.fastjson.JSONObject; 7 | import co.solinx.kafka.monitor.model.Partition; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | import java.util.stream.Collectors; 12 | 13 | /** 14 | * Created by xin on 2016-12-16. 15 | */ 16 | public class PartitionDataTest { 17 | 18 | static Logger logger = LoggerFactory.getLogger(PartitionDataTest.class); 19 | static KafkaBaseInfoService service = KafkaBaseInfoService.getInstance(); 20 | 21 | 22 | public static void main(String[] args) throws InterruptedException { 23 | 24 | topics("foo"); 25 | 26 | } 27 | 28 | public static void topics(String topicName) throws InterruptedException { 29 | 30 | while (true) { 31 | Thread.sleep(2000); 32 | Topic topic = service.getTopic(topicName); 33 | JSONObject pageData = new JSONObject(); 34 | JSONArray array = new JSONArray(); 35 | //Collection topicPar = topic.getLeaderPartitions(brokerID); 36 | 37 | for (Partition tp : 38 | topic.getPartitionMap().values()) { 39 | JSONObject part = new JSONObject(); 40 | part.put("id", tp.getId()); 41 | part.put("firstOffset", tp.getFirstOffset()); 42 | part.put("lastOffset", tp.getSize()); 43 | part.put("size", tp.getSize() - tp.getFirstOffset()); 44 | part.put("Leader", tp.getLeader() != null ? tp.getLeader().getId() : ""); 45 | 46 | part.put("inSyncReplicas", tp.getInSyncReplicas().stream().map(p->p.getId()).collect(Collectors.toList())); 47 | part.put("leaderPreferred", tp.isLeaderPreferred()); 48 | part.put("underReplicated", tp.isUnderReplicated()); 49 | array.add(part); 50 | } 51 | 52 | pageData.put("data", array); 53 | 54 | logger.debug("---- pageData {} ", pageData); 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/test/java/co/solinx/kafka/PartitionServiceTest.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka; 2 | 3 | import co.solinx.kafka.monitor.core.service.PartitionService; 4 | 5 | /** 6 | * @author linxin 7 | * @version v1.0 8 | * Copyright (c) 2015 by solinx 9 | * @date 2017/12/13. 10 | */ 11 | public class PartitionServiceTest { 12 | 13 | 14 | public static void main(String[] args) { 15 | PartitionService service = new PartitionService(); 16 | service.addPartition("2222", 3); 17 | } 18 | 19 | 20 | } 21 | -------------------------------------------------------------------------------- /src/test/java/co/solinx/kafka/Test.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka; 2 | 3 | import java.sql.Time; 4 | import java.sql.Timestamp; 5 | import java.util.Date; 6 | import java.util.Random; 7 | 8 | /** 9 | * Created by xin on 2016-12-26. 10 | */ 11 | public class Test { 12 | 13 | 14 | public static void main(String[] args) { 15 | Timestamp timestamp = new Timestamp(1482759606); 16 | System.out.println(timestamp.toLocalDateTime()); 17 | System.out.println(new Time(1482759113)); 18 | 19 | System.out.println(new Random().nextInt(3)); 20 | } 21 | 22 | } 23 | -------------------------------------------------------------------------------- /src/test/java/co/solinx/kafka/TestKafkaConsumerGroupService.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka; 2 | 3 | import kafka.admin.AdminClient; 4 | import kafka.admin.ConsumerGroupCommand; 5 | import kafka.common.TopicAndPartition; 6 | import kafka.coordinator.GroupOverview; 7 | import org.apache.kafka.clients.consumer.*; 8 | import org.apache.kafka.common.TopicPartition; 9 | import org.slf4j.Logger; 10 | import org.slf4j.LoggerFactory; 11 | import scala.Function1; 12 | import scala.Option; 13 | import scala.collection.JavaConversions; 14 | import scala.collection.Seq; 15 | 16 | import java.util.*; 17 | import java.util.stream.Stream; 18 | 19 | /** 20 | * @author linxin 21 | * @version v1.0 22 | * Copyright (c) 2015 by solinx 23 | * @date 2016/12/19. 24 | */ 25 | public class TestKafkaConsumerGroupService implements ConsumerGroupCommand.ConsumerGroupService { 26 | 27 | AdminClient adminClient; 28 | Logger logger = LoggerFactory.getLogger(TestKafkaConsumerGroupService.class); 29 | org.apache.kafka.clients.consumer.KafkaConsumer consumer; 30 | 31 | public TestKafkaConsumerGroupService() { 32 | Properties props = new Properties(); 33 | props.setProperty("bootstrap.servers", "127.0.0.1:9092"); 34 | props.put("group.id", "test"); 35 | adminClient = AdminClient.create(props); 36 | 37 | logger.debug("{}", adminClient.bootstrapBrokers()); 38 | 39 | } 40 | 41 | @Override 42 | public void list() { 43 | 44 | } 45 | 46 | public List groupList() { 47 | return JavaConversions.asJavaList(adminClient.listAllConsumerGroupsFlattened()); 48 | } 49 | 50 | @Override 51 | public void describe() { 52 | 53 | } 54 | 55 | @Override 56 | public void close() { 57 | 58 | } 59 | 60 | @Override 61 | public ConsumerGroupCommand.ConsumerGroupCommandOptions opts() { 62 | return null; 63 | } 64 | 65 | @Override 66 | public ConsumerGroupCommand.LogEndOffsetResult getLogEndOffset(String topic, int partition) { 67 | return null; 68 | } 69 | 70 | @Override 71 | public void describeGroup(String group) { 72 | 73 | List consumerSummaryList = JavaConversions.asJavaList(adminClient.describeConsumerGroup(group)); 74 | 75 | Consumer consumer = getConsumer(); 76 | logger.debug("consumerList ----- {}", consumerSummaryList); 77 | 78 | consumerSummaryList.stream().forEach(e -> { 79 | List topicPartitions = JavaConversions.asJavaList(e.assignment()); 80 | Stream> partitionOffsets = topicPartitions.stream().flatMap(topicPartition -> { 81 | Map topic = new HashMap<>(); 82 | OffsetAndMetadata metadata = consumer.committed(new TopicPartition(topicPartition.topic(), topicPartition.partition())); 83 | if(metadata!=null) { 84 | topic.put(topicPartition.topic(), metadata.offset()); 85 | logger.debug("-------- offset {}", metadata.offset()); 86 | } 87 | 88 | return Stream.of(topic); 89 | }); 90 | //partitionOffsets 91 | // logger.debug("partitionOffsets {}", partitionOffsets.collect(Collectors.toList())); 92 | 93 | final Map partitionOffsetsMap = topicPartitions.size() > 0 ? partitionOffsets.findFirst().get() : new HashMap<>(); 94 | 95 | topicPartitions.forEach(tp -> { 96 | long endOff = findLogEndOffset(tp.topic(), tp.partition()); 97 | long currentOff = 0; 98 | if (partitionOffsetsMap.size() > 0) 99 | currentOff = partitionOffsetsMap.get(tp.topic()); 100 | logger.debug("{}", 101 | String.format("%s %s %s %s %s %s %s %s", 102 | group, tp.topic(), String.valueOf(tp.partition()), 103 | currentOff, endOff, endOff - currentOff, 104 | e.clientId(), e.clientHost())); 105 | }); 106 | 107 | 108 | }); 109 | 110 | } 111 | 112 | 113 | long findLogEndOffset(String topic, int partition) { 114 | Consumer consumer = getConsumer(); 115 | TopicPartition topicPartition = new TopicPartition(topic, partition); 116 | List tpList = new ArrayList(); 117 | tpList.add(topicPartition); 118 | consumer.assign(tpList); 119 | consumer.seekToEnd(tpList); 120 | Long longEndOffset = consumer.position(topicPartition); 121 | return longEndOffset; 122 | } 123 | 124 | public Consumer getConsumer() { 125 | if (consumer == null) { 126 | consumer = newConsumer(); 127 | } 128 | return consumer; 129 | } 130 | 131 | public org.apache.kafka.clients.consumer.KafkaConsumer newConsumer() { 132 | Properties props = new Properties(); 133 | props.setProperty("bootstrap.servers", "127.0.0.1:9092"); 134 | props.put("group.id", "test"); 135 | props.put("enable.auto.commit", "true"); 136 | props.put("auto.commit.interval.ms", "1000"); 137 | props.put("session.timeout.ms", "30000"); 138 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 139 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 140 | 141 | return new org.apache.kafka.clients.consumer.KafkaConsumer(props); 142 | } 143 | 144 | @Override 145 | public void describeTopicPartition(String group, Seq topicPartitions, Function1> getPartitionOffset, Function1> getOwner) { 146 | 147 | } 148 | 149 | @Override 150 | public void printDescribeHeader() { 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /src/test/java/co/solinx/kafka/TopicDataTest.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka; 2 | 3 | import co.solinx.kafka.monitor.model.Topic; 4 | import co.solinx.kafka.monitor.model.Partition; 5 | import co.solinx.kafka.monitor.core.service.KafkaBaseInfoService; 6 | import com.alibaba.fastjson.JSONArray; 7 | import com.alibaba.fastjson.JSONObject; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | import java.util.List; 12 | 13 | /** 14 | * @author linxin 15 | * @version v1.0 16 | * Copyright (c) 2015 by solinx 17 | * @date 2016/12/16. 18 | */ 19 | public class TopicDataTest { 20 | 21 | static Logger logger= LoggerFactory.getLogger(TopicDataTest.class); 22 | static KafkaBaseInfoService service = KafkaBaseInfoService.getInstance(); 23 | 24 | public static void main(String[] args) throws InterruptedException { 25 | 26 | Thread.sleep(3000); 27 | // logger.debug("----------- {}", service.getBrokerById(0)); 28 | 29 | // topic(); 30 | topics(); 31 | 32 | } 33 | 34 | public static void topics() throws InterruptedException { 35 | // while (true) { 36 | Thread.sleep(2000); 37 | JSONObject pageData=new JSONObject(); 38 | List topicList = service.getTopics(); 39 | JSONArray array=new JSONArray(); 40 | for (Topic 41 | topic : topicList) { 42 | JSONObject temp=new JSONObject(); 43 | double partitionSize= topic.getPartitionMap().size(); 44 | 45 | temp.put("name",topic.getName()); 46 | temp.put("partitionTotal",partitionSize); 47 | // double preferred=0; 48 | // for (Partition tPart : 49 | // topic.getPartitionMap().values()) { 50 | // if(tPart.getLeaderId()!=-1){ 51 | // preferred++; 52 | // } 53 | // } 54 | // temp.put("preferred",preferred/partitionSize*100+"%"); 55 | // temp.put("underReplicated",partitionSize-preferred); 56 | temp.put("preferred",topic.getPreferred()+"%"); 57 | temp.put("underReplicated",topic.getUnderReplicated()); 58 | JSONObject configObj= (JSONObject) topic.getConfig().get("config"); 59 | 60 | temp.put("customConfig",configObj.size()>0?true:false); 61 | 62 | array.add(temp); 63 | } 64 | 65 | pageData.put("data", array); 66 | 67 | 68 | pageData.put("result", 200); 69 | 70 | logger.debug("---- topics {} ",topicList); 71 | logger.debug("---- pageData {} ",pageData); 72 | // } 73 | } 74 | public static void topic() throws InterruptedException { 75 | while (true) { 76 | Thread.sleep(3000); 77 | Topic topic= service.getTopic("testest"); 78 | JSONObject jsonObject=new JSONObject(); 79 | 80 | jsonObject.put("PartitionTotal",topic.getPartitionMap().size()); 81 | jsonObject.put("totalSize",topic.getSize()); 82 | jsonObject.put("availableSize",topic.getAvailableSize()); 83 | jsonObject.put("PreferredReplicas",topic.getPreferredReplicaPercent()); 84 | jsonObject.put("UnderReplicatedPartitions",topic.getUnderReplicatedPartitions()); 85 | 86 | logger.debug("-----------jsonObject {}", jsonObject); 87 | logger.debug("-----------topic {}", topic); 88 | } 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/test/java/co/solinx/kafka/TopicServiceTest.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka; 2 | 3 | import co.solinx.kafka.monitor.core.service.TopicService; 4 | 5 | /** 6 | * @author linxin 7 | * @version v1.0 8 | * Copyright (c) 2015 by solinx 9 | * @date 2017/12/13. 10 | */ 11 | public class TopicServiceTest { 12 | 13 | public void topicCreate() { 14 | TopicService topicService = new TopicService(); 15 | topicService.createTopic("test", 2, 2); 16 | } 17 | 18 | 19 | public void deleteTopic() { 20 | TopicService topicService = new TopicService(); 21 | topicService.deleteTopic("test"); 22 | } 23 | 24 | public static void main(String[] args) { 25 | TopicServiceTest test = new TopicServiceTest(); 26 | test.topicCreate(); 27 | // test.deleteTopic(); 28 | } 29 | 30 | 31 | } 32 | -------------------------------------------------------------------------------- /src/test/java/co/solinx/kafka/alarm/KafkaAlarmTest.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.alarm; 2 | 3 | import co.solinx.kafka.monitor.core.service.KafkaBaseInfoService; 4 | import co.solinx.kafka.monitor.model.Partition; 5 | import co.solinx.kafka.monitor.model.Topic; 6 | import com.alibaba.fastjson.JSONArray; 7 | import com.alibaba.fastjson.JSONObject; 8 | 9 | import java.util.List; 10 | 11 | /** 12 | * @author linxin 13 | * @version v1.0 14 | * Copyright (c) 2015 by solinx 15 | * @date 2017/12/25. 16 | */ 17 | public class KafkaAlarmTest { 18 | 19 | static KafkaBaseInfoService service = KafkaBaseInfoService.getInstance(); 20 | 21 | public static void main(String[] args) { 22 | List topicList = service.getTopics(); 23 | 24 | JSONArray array = new JSONArray(); 25 | for (Topic 26 | topic : topicList) { 27 | // int preferred=0; 28 | // int partitionSize= topic.getPartitionMap().size(); 29 | // for (Partition tPart : 30 | // topic.getPartitionMap().values()) { 31 | // if(tPart.getLeaderId()!=-1){ 32 | // preferred++; 33 | // } 34 | // } 35 | // if(preferred!=1) { 36 | // JSONObject temp = new JSONObject(); 37 | // temp.put("preferred", preferred / partitionSize * 100 + "%"); 38 | // temp.put("underReplicated", partitionSize - preferred); 39 | // temp.put("topic", topic.getName()); 40 | // array.add(temp); 41 | // } 42 | int preferred = (int) topic.getPreferred(); 43 | if (preferred != 100) { 44 | JSONObject temp = new JSONObject(); 45 | temp.put("preferred", preferred + "%"); 46 | temp.put("underReplicated", topic.getUnderReplicated()); 47 | temp.put("topic", topic.getName()); 48 | array.add(temp); 49 | } 50 | } 51 | System.out.println(array.toJSONString()); 52 | } 53 | 54 | 55 | } 56 | -------------------------------------------------------------------------------- /src/test/java/co/solinx/kafka/metrics/ProduceServiceTest.java: -------------------------------------------------------------------------------- 1 | package co.solinx.kafka.metrics; 2 | 3 | import co.solinx.kafka.monitor.core.service.ConsumerService; 4 | import co.solinx.kafka.monitor.core.service.ProduceService; 5 | import co.solinx.kafka.monitor.core.service.JolokiaService; 6 | 7 | import java.io.IOException; 8 | 9 | /** 10 | * @author linxin 11 | * @version v1.0 12 | * Copyright (c) 2015 by solinx 13 | * @date 2016/12/22. 14 | */ 15 | public class ProduceServiceTest { 16 | 17 | public static void main(String[] args) throws IOException { 18 | ProduceService produceService = new ProduceService(); 19 | ConsumerService consumerService = new ConsumerService(); 20 | 21 | produceService.start(); 22 | consumerService.start(); 23 | 24 | 25 | // MetricsReportService reportService = new MetricsReportService(); 26 | // reportService.start(); 27 | 28 | JolokiaService jolokiaService = new JolokiaService(); 29 | jolokiaService.start(); 30 | } 31 | 32 | } 33 | -------------------------------------------------------------------------------- /src/test/resources/zkConfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "connectIp" : "127.0.0.1:2181", 3 | "SessionTimeoutMs" : "3000", 4 | "ConnectionTimeoutMs" : "3000", 5 | "RetryOneTime" : "3000" 6 | } --------------------------------------------------------------------------------