├── APIKafka.java ├── README.md ├── ClientKafka.java └── KafkaUtil.java /APIKafka.java: -------------------------------------------------------------------------------- 1 | package com.example.demo.controller; 2 | 3 | 4 | import com.alibaba.fastjson.JSON; 5 | import com.alibaba.fastjson.JSONObject; 6 | import com.example.demo.commom.RespValue; 7 | import com.example.demo.util.KafkaUtil; 8 | import com.example.demo.util.OgnlUtil; 9 | import lombok.extern.slf4j.Slf4j; 10 | import ognl.Ognl; 11 | import ognl.OgnlContext; 12 | import ognl.OgnlException; 13 | import org.springframework.web.bind.annotation.*; 14 | import java.util.*; 15 | import java.util.concurrent.ExecutionException; 16 | 17 | @Slf4j 18 | @RestController 19 | @RequestMapping(value="/data",method = {RequestMethod.GET,RequestMethod.POST}) 20 | public class APIKafka { 21 | 22 | private Object getValue(Object expression, JSONObject keyObject) throws OgnlException { 23 | OgnlContext oc = new OgnlContext(); 24 | oc.setRoot(keyObject); 25 | Object object = Ognl.getValue(expression, oc, oc.getRoot()); 26 | return object; 27 | } 28 | 29 | 30 | @PostMapping("/sendData") 31 | public RespValue sendMessage(@RequestParam("key")String key, 32 | @RequestParam("value")String value) throws ExecutionException, InterruptedException { 33 | //同步发送 34 | //KafkaUtil.sendToKafka("producer",key,value); 35 | //异步发送 36 | KafkaUtil.sendToKafkaAsync("producer",key,value); 37 | return new RespValue(0,"插入成功",""); 38 | } 39 | 40 | @PostMapping("/recvData") 41 | public RespValue recvData(@RequestParam(name="filter",required = false)String filter, 42 | @RequestParam(name="groupId",required = false)String groupId) throws ExecutionException, InterruptedException, OgnlException { 43 | if(filter == null){filter = "true";} 44 | if(groupId == null){groupId = "guest";} 45 | 46 | ArrayList> buffer = new ArrayList>(); 47 | ArrayList> bufferTmp = new ArrayList>(); 48 | try { 49 | bufferTmp = KafkaUtil.recvFromKafka("producer", groupId); 50 | } catch (ConcurrentModificationException e) { 51 | return new RespValue(0, "请使用单线程消费", buffer); 52 | } 53 | Object tree = Ognl.parseExpression(filter); 54 | 55 | long start = System.currentTimeMillis(); //获取开始时间 56 | for (int i = 0; i < bufferTmp.size(); i++) { 57 | 58 | LinkedHashMap o = bufferTmp.get(i); 59 | String key = (String) o.get("key"); 60 | System.out.println("key = " + key); 61 | if(key.contains("=")) continue; 62 | JSONObject keyObject = JSON.parseObject(key); 63 | 64 | Boolean object = false; 65 | try { 66 | object = (Boolean) getValue(tree,keyObject); 67 | }catch (Exception e){ 68 | System.out.println("e = " + e); 69 | System.out.println("标签不含表达式参数,跳过"); 70 | continue; 71 | } 72 | System.out.println("object = " + object); 73 | System.out.println("object = " + object.getClass()); 74 | if(object){ 75 | buffer.add(o); 76 | } 77 | } 78 | long end=System.currentTimeMillis(); //获取结束时间 79 | System.out.println("filter程序运行时间: "+(end-start)+"ms"); 80 | 81 | return new RespValue(0,"消费成功",buffer); 82 | } 83 | 84 | @PostMapping("/resetOffsetToEarliest") 85 | public RespValue resetOffsetToEarliest(@RequestParam("groupId")String groupId) throws ExecutionException, InterruptedException { 86 | KafkaUtil.resetOffsetToEarliest("producer", groupId); 87 | return new RespValue(0,"修改成功",""); 88 | } 89 | 90 | @PostMapping("/consumerPositions") 91 | public RespValue consumerPositions(@RequestParam("groupId")String groupId) throws ExecutionException, InterruptedException { 92 | LinkedHashMap oo= KafkaUtil.consumerPositions("producer", groupId); 93 | oo.remove("positions"); 94 | 95 | return new RespValue(0,"当前数据情况",oo); 96 | } 97 | 98 | 99 | } 100 | 101 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SimpleKafka(Kafka客户端封装工具类) 2 | 一个基于Kafka客户端封装的工具,Kafka开发效率神器 3 | 4 | ## 特点: 5 | 1. 封装了常用的Kafka客户端操作,无需维护配置,无需初始化客户端,真正实现了一行代码调用 6 | 2. 将连接池的维护封装在工具类里面,多线程使用也无需维护客户端集合 7 | 8 | ## 使用方式: 9 | 只需要集成1个KafkaUtil.java文件即可,修改里面的kafka服务地址即可 10 | 11 | ## 典型示例: 12 | 1. **同步生产:** LinkedHashMap recordMeta = KafkaUtil.sendToKafka("RULEa93304e6d844000","222","aaaa"); 13 | 2. **异步生产:** KafkaUtil.sendToKafkaAsync("RULEa93304e6d844000", "222", "aaaa"); 14 | 3. **消费数据:** ArrayList> buffer = KafkaUtil.recvFromKafka("RULEa93304e6d844000", "group1"); 15 | 4. **重置偏移:** KafkaUtil.resetOffsetToEarliest("RULEa93304e6d844000", "group1"); 16 | 17 | 18 | ## 接口介绍: 19 | 1. **kafkaListTopics:** topic列表 20 | 2. **createTopic:** topic创建 21 | 3. **delTopic:** topic删除 22 | 4. **partitionsTopic:** topic的分区列表,分区和副本数 23 | 5. **delGroupId:** 删除groupId 24 | 6. **descCluster:** 集群的节点列表 25 | 7. **kafkaConsumerGroups:** 消费者列表 26 | 8. **kafkaConsumerGroups:** 指定topic的活跃消费者列表 27 | 9. **sendToKafka:** 生产数据到指定的topic,同步接口{"topic":"RULEa93304e6d844000","partition":1,"offset":681} 28 | 10. **sendToKafkaAsync:** 生产数据到指定的topic,异步接口,默认回调 29 | 11. **sendToKafkaAsync:** 生产数据到指定的topic,异步接口,自定义回调 30 | 12. **recvFromKafka:** 按groupId消费指定topic的数据[{"topic":"RULEa93304e6d844000","key":"222","value":"aaaa","partition":1,"offset":681}] 31 | 13. **recvFromKafkaByOffset:** 消费指定topic指定partition对应的offset数据 32 | 14. **recvFromKafkaByTimestamp:** 消费指定topic指定partition对应的timestamp以后的数据 33 | 15. **resetOffsetToTimestamp:** 重置指定topic的offset到对应的timestamp 34 | 16. **resetOffsetToEarliest:** 重置指定topic的offset到最早 35 | 17. **resetOffsetToLatest:** 重置指定topic的offset到最晚,一般在跳过测试脏数据时候使用 36 | 18. **consumerPositions:** 获取当前消费偏移量情况{"partitionNum":2,"dataNum":1,"lagNum":0,"positions":[{"partition":0,"begin":0,"end":0,"current":0,"current1":0,"size":0,"lag":0},{"partition":1,"begin":681,"end":682,"current":682,"current1":682,"size":1,"lag":0}]} 37 | 19. **topicSize:** 获取指定topic数据量详情情况 [{"partition": 0,"begin": 65,"end": 65,"size": 0}] 38 | 20. **topicSizeAll:** 获取所有topic数据量详情情况 39 | 21. **topicSizeStatistics:** 获取指定topic数据量统计{"partitionNum":5452,"dataNum":41570647} 40 | 22. **topicSizeStatisticsAll:** 获取所有topic数据量统计{"topicNum":2550,"partitionNum":5452,"dataNum":41570647} 41 | 42 | ## 接口列表: 43 | 1. **kafkaListTopics:** List kafkaListTopics() 44 | 2. **createTopic:** void createTopic(String topic) 45 | 3. **delTopic:** void delTopic(String topic) 46 | 4. **partitionsTopic:** List partitionsTopic(String topic) 47 | 5. **delGroupId:** void delGroupId(String groupId) 48 | 6. **descCluster:** List descCluster() 49 | 7. **kafkaConsumerGroups:** List kafkaConsumerGroups() 50 | 8. **kafkaConsumerGroups:** List kafkaConsumerGroups(String topic) 51 | 9. **sendToKafka:** LinkedHashMap sendToKafka(String topic, String key, String value) 52 | 10. **sendToKafkaAsync:** void sendToKafkaAsync(String topic, String key, String value) 53 | 11. **sendToKafkaAsync:** void sendToKafkaAsync(String topic, String key, String value,Callback callback) 54 | 12. **recvFromKafka:** ArrayList> recvFromKafka(String topic, String groupId) 55 | 13. **recvFromKafkaByOffset:** ArrayList> recvFromKafkaByOffset(String topic, String groupId,int partition,long offset) 56 | 14. **recvFromKafkaByTimestamp:** ArrayList> recvFromKafkaByTimestamp(String topic, String groupId,int partition,long timestamp) 57 | 15. **resetOffsetToTimestamp:** boolean resetOffsetToTimestamp(String topic, String groupId, long timestamp) 58 | 16. **resetOffsetToEarliest:** boolean resetOffsetToEarliest(String topic, String groupId) 59 | 17. **resetOffsetToLatest:** boolean resetOffsetToLatest(String topic, String groupId) 60 | 18. **consumerPositions:** List> consumerPositions(String topic, String groupId) 61 | 19. **topicSize:** List> topicSize(String topic) 62 | 20. **topicSizeAll:** LinkedHashMap topicSizeAll() 63 | 21. **topicSizeStatistics:** LinkedHashMap topicSizeStatistics(String topic) 64 | 22. **topicSizeStatisticsAll:** LinkedHashMap topicSizeStatisticsALL() 65 | 66 | ## 示范应用: 67 | 为了说明该工具的效用,基于该工具实现了一个HTTP接口的消息队列服务,该服务只用了几十行代码,就实现了基于标签内容的发布订阅服务,服务见APIKafka.java,客户端示例见ClientKafka.java。 68 | 69 | 该服务支持生产者任意标注标签,支持消费者按表达式条件订阅数据,表达式支持与或非,支持集合查找,以及字符串子串匹配。 70 | 71 | 同时也支持消息回溯消费已经消息统计查询。 72 | 73 | 实现了流式消息检索的基本需求。 74 | ![示意图](https://user-images.githubusercontent.com/40593174/227403984-f5bd167b-445f-4652-90f0-f7c065e559d3.png) 75 | 76 | APIKafka,支持生产者任意标注标签,标签是开放的,可以是任意JSON,Key无需预先定义和Value也不必是枚举值,支持消费者按表达式条件订阅数据,支持开源表达式语言OGNL,包括支持与或非,支持对象取值,支持数组和集合的访问,也支持Java表达式,常用的有contains,startsWith,endsWith,length等,也支持matches正则匹配。可以满足流式消息检索的各种匹配要求。 77 | 78 | 79 | ## 联系人: 80 | 有问题可以联系:zhangchuang@iie.ac.cn 81 | -------------------------------------------------------------------------------- /ClientKafka.java: -------------------------------------------------------------------------------- 1 | package com.example.demo.client; 2 | 3 | import cn.hutool.http.HttpUtil; 4 | import java.util.HashMap; 5 | 6 | public class ClientKafka { 7 | 8 | public static void main(String[] args) { 9 | try { 10 | 11 | // HashMap paramMap1=new HashMap<>(); 12 | // paramMap1.put("key","{\"a\":1,\"b\":2}"); 13 | // paramMap1.put("value","aaa"); 14 | // String result1=HttpUtil.post("http://localhost:8080/api/data/sendData",paramMap1); 15 | // System.out.println("result1 = " + result1); 16 | // 17 | // HashMap paramMap2=new HashMap<>(); 18 | // paramMap2.put("key","{\"a\":2,\"b\":2}"); 19 | // paramMap2.put("value","aaa"); 20 | // String result2=HttpUtil.post("http://localhost:8080/api/data/sendData",paramMap2); 21 | // System.out.println("result2 = " + result2); 22 | // 23 | // HashMap paramMap3=new HashMap<>(); 24 | // paramMap3.put("key","{\"a\":3,\"b\":2}"); 25 | // paramMap3.put("value","aaa"); 26 | // String result3=HttpUtil.post("http://localhost:8080/api/data/sendData",paramMap3); 27 | // System.out.println("result3 = " + result3); 28 | // 29 | // HashMap paramMap4=new HashMap<>(); 30 | // paramMap4.put("filter","ab"); 43 | // paramMap6.put("groupId","group3"); 44 | // String result6=HttpUtil.post("http://localhost:8080/api/data/recvData",paramMap6); 45 | // System.out.println("result6 = " + result6); 46 | // 47 | // HashMap paramMap7=new HashMap<>(); 48 | // paramMap7.put("filter","a==1 && b==2"); 49 | // paramMap7.put("groupId","group4"); 50 | // String result7=HttpUtil.post("http://localhost:8080/api/data/recvData",paramMap7); 51 | // System.out.println("result7 = " + result7); 52 | 53 | // HashMap paramMap8=new HashMap<>(); 54 | // paramMap8.put("filter","c.length()>6"); 55 | // paramMap8.put("groupId","group4"); 56 | // String result8=HttpUtil.post("http://localhost:8080/api/data/recvData",paramMap8); 57 | // System.out.println("result8 = " + result8); 58 | 59 | 60 | // HashMap paramMap1=new HashMap<>(); 61 | // paramMap1.put("key","{\"a\":1,\"b\":2,\"c\":\"aaa\"}"); 62 | // paramMap1.put("value","aaa"); 63 | // String result1=HttpUtil.post("http://localhost:8080/api/data/sendData",paramMap1); 64 | // System.out.println("result1 = " + result1); 65 | 66 | // HashMap paramMap1=new HashMap<>(); 67 | // paramMap1.put("key","{\"c\":\"aaa\"}"); 68 | // paramMap1.put("value","aaa"); 69 | // String result1=HttpUtil.post("http://localhost:8080/api/data/sendData",paramMap1); 70 | // System.out.println("result1 = " + result1); 71 | // 72 | // HashMap paramMap7=new HashMap<>(); 73 | // paramMap7.put("filter","c=='aaa'"); 74 | // paramMap7.put("groupId","group3"); 75 | // String result7=HttpUtil.post("http://localhost:8080/api/data/recvData",paramMap7); 76 | // System.out.println("result7 = " + result7); 77 | // HashMap paramMap2=new HashMap<>(); 78 | // paramMap2.put("groupId","group4"); 79 | // String result2=HttpUtil.post("http://localhost:8080/api/data/resetOffsetToEarliest",paramMap2); 80 | // System.out.println("result2 = " + result2); 81 | // 82 | // HashMap paramMap8=new HashMap<>(); 83 | // paramMap8.put("groupId","group4"); 84 | // String result8=HttpUtil.post("http://localhost:8080/api/data/consumerPositions",paramMap8); 85 | // System.out.println("result8 = " + result8); 86 | 87 | // HashMap paramMap7=new HashMap<>(); 88 | // paramMap7.put("groupId","group4"); 89 | // String result7=HttpUtil.post("http://localhost:8080/api/data/recvData",paramMap7); 90 | // System.out.println("result7 = " + result7); 91 | 92 | 93 | // HashMap paramMap2=new HashMap<>(); 94 | // paramMap2.put("key","{\"other\":\"label1_22\"}"); 95 | // paramMap2.put("value","aaa"); 96 | // String result2=HttpUtil.post("http://localhost:8080/api/data/sendData",paramMap2); 97 | // System.out.println("result2 = " + result2); 98 | 99 | 100 | // HashMap paramMap3=new HashMap<>(); 101 | // paramMap3.put("groupId","group4"); 102 | // String result3=HttpUtil.post("http://localhost:8080/api/data/resetOffsetToEarliest",paramMap3); 103 | // System.out.println("result3 = " + result3); 104 | 105 | HashMap paramMap8=new HashMap<>(); 106 | //paramMap8.put("filter","!other.contains(\"label1_\")"); 107 | //paramMap8.put("filter","1==1"); 108 | //paramMap8.put("groupId","group4"); 109 | String result8=HttpUtil.post("http://localhost:8080/api/data/recvData",paramMap8); 110 | System.out.println("result8 = " + result8); 111 | 112 | 113 | } catch (Exception e) { 114 | e.printStackTrace(); 115 | } 116 | } 117 | 118 | } 119 | -------------------------------------------------------------------------------- /KafkaUtil.java: -------------------------------------------------------------------------------- 1 | package com.example.demo.util; 2 | 3 | import com.alibaba.fastjson.JSON; 4 | import org.apache.kafka.clients.admin.*; 5 | import org.apache.kafka.clients.consumer.*; 6 | import org.apache.kafka.clients.producer.*; 7 | import org.apache.kafka.common.Node; 8 | import org.apache.kafka.common.PartitionInfo; 9 | import org.apache.kafka.common.TopicPartition; 10 | import org.apache.kafka.common.TopicPartitionInfo; 11 | import org.apache.kafka.common.config.ConfigResource; 12 | import org.apache.kafka.common.serialization.StringDeserializer; 13 | 14 | import java.time.Duration; 15 | import java.util.*; 16 | import java.util.concurrent.ExecutionException; 17 | import java.util.concurrent.Future; 18 | import java.util.concurrent.TimeUnit; 19 | import java.util.concurrent.TimeoutException; 20 | import java.util.stream.Collectors; 21 | import java.util.stream.Stream; 22 | 23 | public class KafkaUtil { 24 | 25 | private static HashMap> kafkaConsumerMap = new HashMap<>(); 26 | private static HashMap> kafkaProducerMap = new HashMap<>(); 27 | 28 | private static String brokerList = "iZ2zehk94dstsat5cjspl6Z:9092"; 29 | 30 | 31 | //topic列表 32 | public static List kafkaListTopics() throws ExecutionException, InterruptedException { 33 | 34 | Properties props = new Properties(); 35 | // 只需要提供一个或多个 broker 的 IP 和端口 36 | props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); 37 | // 创建 AdminClient 对象 38 | AdminClient client = KafkaAdminClient.create(props); 39 | // 获取 topic 列表 40 | Set topics = client.listTopics().names().get(); 41 | System.out.println("======================="); 42 | System.out.println(topics); 43 | System.out.println("======================="); 44 | 45 | List topicList = new ArrayList(); 46 | topicList.addAll(topics); 47 | client.close(); 48 | return topicList; 49 | } 50 | 51 | //topic创建,,,一般不用创建,开启auto.create.topics.enable=true后生产消息的时候自动创建num.partitions(默认值为1)个分区和default.replication.factor (默认值为1)个副本的对应topic。 52 | public static void createTopic(String topic) throws ExecutionException, InterruptedException { 53 | 54 | Properties props = new Properties(); 55 | // 只需要提供一个或多个 broker 的 IP 和端口 56 | props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); 57 | // 创建 AdminClient 对象 58 | AdminClient client = KafkaAdminClient.create(props); 59 | // 获取 topic 列表 分区和副本数保持跟配置文件一致 60 | NewTopic newTopic = new NewTopic(topic,3, (short)1); 61 | client.createTopics(Arrays.asList(newTopic)).all().get(); 62 | System.out.println("======================="); 63 | 64 | //System.out.println("CreateTopicsResult : " + createTopicsResult); 65 | System.out.println("======================="); 66 | 67 | client.close(); 68 | return; 69 | } 70 | 71 | //topic创建 手工指定numPartitions, replicationFactor 72 | public static void createTopic(String topic, int numPartitions, short replicationFactor) throws ExecutionException, InterruptedException { 73 | 74 | Properties props = new Properties(); 75 | // 只需要提供一个或多个 broker 的 IP 和端口 76 | props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); 77 | // 创建 AdminClient 对象 78 | AdminClient client = KafkaAdminClient.create(props); 79 | // 创建 topic 分区和副本数保持跟配置文件一致 80 | NewTopic newTopic = new NewTopic(topic,numPartitions, replicationFactor); 81 | client.createTopics(Arrays.asList(newTopic)).all().get(); 82 | System.out.println("======================="); 83 | 84 | //System.out.println("CreateTopicsResult : " + createTopicsResult); 85 | System.out.println("======================="); 86 | 87 | client.close(); 88 | return; 89 | } 90 | 91 | //topic删除 92 | public static void delTopic(String topic) throws ExecutionException, InterruptedException { 93 | 94 | Properties props = new Properties(); 95 | // 只需要提供一个或多个 broker 的 IP 和端口 96 | props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); 97 | // 创建 AdminClient 对象 98 | AdminClient client = KafkaAdminClient.create(props); 99 | // 删除 topic 100 | client.deleteTopics(Arrays.asList(topic)).all().get(); 101 | 102 | System.out.println("======================="); 103 | //System.out.println("deleteTopicsResult : " + deleteTopicsResult ); 104 | System.out.println("======================="); 105 | 106 | client.close(); 107 | return; 108 | } 109 | 110 | //topic修改,,增加newPartitions 111 | public static void alterTopic(String topic, int newPartitions) throws ExecutionException, InterruptedException { 112 | 113 | Properties props = new Properties(); 114 | // 只需要提供一个或多个 broker 的 IP 和端口 115 | props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); 116 | // 创建 AdminClient 对象 117 | AdminClient client = KafkaAdminClient.create(props); 118 | 119 | Map partitionsMap = new HashMap<>(); 120 | //把Partition增加到2个 121 | NewPartitions newPartitionsObject = NewPartitions.increaseTo(newPartitions); 122 | partitionsMap.put(topic, newPartitionsObject); 123 | client.createPartitions(partitionsMap).all().get(); 124 | 125 | 126 | System.out.println("======================="); 127 | //System.out.println("alterTopicsResult : " + alterTopicsResult ); 128 | System.out.println("======================="); 129 | 130 | client.close(); 131 | return; 132 | } 133 | 134 | //topic的分区列表 135 | public static List partitionsTopic(String topic) throws ExecutionException, InterruptedException { 136 | 137 | Properties props = new Properties(); 138 | // 只需要提供一个或多个 broker 的 IP 和端口 139 | props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); 140 | // 创建 AdminClient 对象 141 | AdminClient client = KafkaAdminClient.create(props); 142 | // 获取 topic 列表 143 | List descGroups = client.describeTopics(Arrays.asList(topic)).all().get().get(topic).partitions().stream().flatMap(partition -> Stream.of(String.valueOf(partition.partition())+":"+String.valueOf(partition.replicas().size()))).collect(Collectors.toList());; 144 | //List descGroups1 = client.describeTopics(Arrays.asList(topic)).all().get().get(topic).partitions().stream().map(TopicPartitionInfo::replicas.).collect(Collectors.toList());; 145 | //AdminClient.deleteRecords() 146 | 147 | System.out.println("======================="); 148 | System.out.println("descGroups : " + descGroups ); 149 | //System.out.println("descGroups1 : " + descGroups1 ); 150 | System.out.println("======================="); 151 | 152 | client.close(); 153 | return descGroups; 154 | } 155 | 156 | //删除groupId 157 | public static void delGroupId(String groupId) throws ExecutionException, InterruptedException { 158 | 159 | Properties props = new Properties(); 160 | // 只需要提供一个或多个 broker 的 IP 和端口 161 | props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); 162 | // 创建 AdminClient 对象 163 | AdminClient client = KafkaAdminClient.create(props); 164 | // 获取 topic 列表 165 | client.deleteConsumerGroups(Arrays.asList(groupId)).all().get(); 166 | 167 | System.out.println("======================="); 168 | //System.out.println("deleteTopicsResult : " + deleteTopicsResult ); 169 | System.out.println("======================="); 170 | 171 | client.close(); 172 | return; 173 | } 174 | 175 | //集群的节点列表 176 | public static List descCluster() throws ExecutionException, InterruptedException { 177 | 178 | Properties props = new Properties(); 179 | // 只需要提供一个或多个 broker 的 IP 和端口 180 | props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); 181 | // 创建 AdminClient 对象 182 | AdminClient client = KafkaAdminClient.create(props); 183 | // 获取 topic 列表 184 | List descGroups = client.describeCluster().nodes().get().stream().flatMap(node -> Stream.of(node.host()+":"+String.valueOf(node.port()))).collect(Collectors.toList()); 185 | System.out.println("======================="); 186 | System.out.println("descGroups : " + descGroups ); 187 | //System.out.println("descGroups1 : " + descGroups1 ); 188 | System.out.println("======================="); 189 | client.close(); 190 | return descGroups; 191 | } 192 | 193 | 194 | //消费者列表 195 | public static List kafkaConsumerGroups() throws ExecutionException, InterruptedException, TimeoutException { 196 | Properties props = new Properties(); 197 | // 只需要提供一个或多个 broker 的 IP 和端口 198 | props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); 199 | // 创建 AdminClient 对象 200 | AdminClient client = KafkaAdminClient.create(props); 201 | List allGroups = client.listConsumerGroups() 202 | .valid() 203 | .get(10, TimeUnit.SECONDS) 204 | .stream() 205 | .map(ConsumerGroupListing::groupId) 206 | .collect(Collectors.toList()); 207 | 208 | //System.out.println(collection); 209 | System.out.println("======================="); 210 | System.out.println(JSON.toJSONString(allGroups)); 211 | //System.out.println(JSON.toJSONString(filteredGroups)); 212 | System.out.println("======================="); 213 | client.close(); 214 | return allGroups; 215 | 216 | 217 | } 218 | 219 | //消费者列表,值得注意的是,上面这个函数无法获取非运行中的consumer group,即虽然一个group订阅了某topic,但是若它所有的consumer成员都关闭的话这个函数是不会返回该group的。 220 | public static List kafkaConsumerGroups(String topic) throws ExecutionException, InterruptedException, TimeoutException { 221 | Properties props = new Properties(); 222 | // 只需要提供一个或多个 broker 的 IP 和端口 223 | props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); 224 | // 创建 AdminClient 对象 225 | AdminClient client = KafkaAdminClient.create(props); 226 | List allGroups = client.listConsumerGroups() 227 | .valid() 228 | .get(10, TimeUnit.SECONDS) 229 | .stream() 230 | .map(ConsumerGroupListing::groupId) 231 | .collect(Collectors.toList()); 232 | 233 | Map allGroupDetails = 234 | client.describeConsumerGroups(allGroups).all().get(10, TimeUnit.SECONDS); 235 | 236 | final List filteredGroups = new ArrayList<>(); 237 | allGroupDetails.entrySet().forEach(entry -> { 238 | String groupId = entry.getKey(); 239 | ConsumerGroupDescription description = entry.getValue(); 240 | boolean topicSubscribed = description.members().stream().map(MemberDescription::assignment) 241 | .map(MemberAssignment::topicPartitions) 242 | .map(tps -> tps.stream().map(TopicPartition::topic).collect(Collectors.toSet())) 243 | .anyMatch(tps -> tps.contains(topic)); 244 | if (topicSubscribed) 245 | filteredGroups.add(groupId); 246 | }); 247 | //System.out.println(collection); 248 | System.out.println("======================="); 249 | System.out.println(JSON.toJSONString(allGroups)); 250 | System.out.println(JSON.toJSONString(filteredGroups)); 251 | System.out.println("======================="); 252 | client.close(); 253 | return filteredGroups; 254 | 255 | 256 | } 257 | 258 | 259 | ///维护内部客户端池 260 | private static KafkaConsumer getKafkaConsumer(String topic, String groupId) throws ExecutionException, InterruptedException { 261 | //long ThreadId = Thread.currentThread().getId(); 262 | //System.out.println("topic+groupId+ThreadId = " + topic+groupId+ThreadId); 263 | KafkaConsumer kafkaConsumer = kafkaConsumerMap.get(topic+groupId); 264 | if (kafkaConsumer==null){ 265 | //创建 kafka 消费者实例 266 | kafkaConsumer = getNewKafkaConsumer(topic, groupId); 267 | kafkaConsumerMap.put(topic+groupId,kafkaConsumer); 268 | } 269 | 270 | return kafkaConsumer; 271 | } 272 | ///维护内部客户端池 273 | private static KafkaProducer getKafkaProducer() throws ExecutionException, InterruptedException { 274 | 275 | KafkaProducer kafkaProducer = kafkaProducerMap.get("default"); 276 | if (kafkaProducer==null){ 277 | 278 | //创建一个生产者对象kafkaProducer 279 | kafkaProducer = getNewKafkaProducer(); 280 | kafkaProducerMap.put("default",kafkaProducer); 281 | } 282 | 283 | return kafkaProducer; 284 | } 285 | ///正常不需要这个接口,本身支持多线程(不会抛出 ConcurrentModificationException),这个接口仅在想自己在多线程内初始化多个客户端时使用,依旧要受Kafka的“一个Partition只能被该Group里的一个Consumer线程消费”规则的限制,就是如果线程小于分区,也没问题,只是负载不见得均衡,如果大于分区,就会有一些线程消费不到数据 286 | public static KafkaConsumer getNewKafkaConsumer(String topic, String groupId) throws ExecutionException, InterruptedException { 287 | 288 | //String groupId = "group1"; 289 | //String topic = "hello-kafka"; 290 | //配置消费者客户端 291 | Properties properties = new Properties(); 292 | properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); 293 | properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true); 294 | properties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); 295 | properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); 296 | //properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "2"); 297 | properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); 298 | //可以根据需求修改序列化类型,其他代码都不需要修改,暂时不支持为特定topic指定序列化类型,只能全局使用一种序列化类型 299 | properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); 300 | //properties.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, 500); 301 | //properties.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, 1048576); 302 | //properties.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, 1100000000); 303 | //消费消息-fetch.min.bytes:服务器为消费者获取请求返回的最小数据量。如果可用的数据不足,请求将等待大量的数据在回答请求之前累积。默认1B 304 | //消费消息-fetch.max.wait.ms:我们通过 fetch.min.bytes 告诉 Kafka,等到有足够的数据时才把它返回给消费者。而 fetch.max.wait.ms 则用于指定 broker 的等待时间,默认是 500ms。 305 | //消费消息-fetch.max.bytes: 306 | 307 | //一次调用poll()操作时返回的最大记录数,默认值为500 308 | properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 2000); 309 | 310 | //创建 kafka 消费者实例 311 | KafkaConsumer kafkaConsumer = new KafkaConsumer(properties); 312 | //订阅主题 313 | kafkaConsumer.subscribe(Collections.singletonList(topic)); 314 | 315 | 316 | return kafkaConsumer; 317 | } 318 | ///正常不需要这个接口,本身都支持多线程,这个接口仅在想自己在多线程内初始化多个客户端时使用 319 | public static KafkaProducer getNewKafkaProducer() throws ExecutionException, InterruptedException { 320 | 321 | Properties properties = new Properties(); 322 | properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); 323 | //生产消息-ACK机制。0-意味着producer不等待broker同步完成的确认,继续发送下一条(批)信息;1-意味着producer要等待leader成功收到数据并得到确认,才发送下一条message;-1--意味着producer得到follwer确认,才发送下一条数据。 324 | properties.put(ProducerConfig.ACKS_CONFIG, "all"); 325 | // 生产消息-生产者可以重发消息的次数,如果达到这个次数,生产者会放弃重试并返回错误。 326 | properties.put(ProducerConfig.RETRIES_CONFIG, 3); 327 | //生产消息-能发送的单个消息的最大值,单位为B,默认为10M 328 | properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, 10485760); 329 | properties.put(ProducerConfig.LINGER_MS_CONFIG, 1); 330 | 331 | properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer"); 332 | //可以根据需求修改序列化类型,其他代码都不需要修改,暂时不支持为特定topic指定序列化类型,只能全局使用一种序列化类型 333 | properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer"); 334 | 335 | //创建一个生产者对象kafkaProducer 336 | KafkaProducer kafkaProducer = new KafkaProducer(properties); 337 | 338 | 339 | return kafkaProducer; 340 | } 341 | 342 | 343 | //生产数据到指定的topic,同步接口 {"topic":"RULEa93304e6d844000","partition":1,"offset":681} 344 | //KafkaProducer是线程安全的,鼓励用户在多个线程中共享一个KafkaProducer实例,这样通常都要比每个线程维护一个KafkaProducer实例效率要高。 345 | public static LinkedHashMap sendToKafka(String topic, String key, Object value) throws ExecutionException, InterruptedException { 346 | long start=System.currentTimeMillis(); //获取开始时间 347 | KafkaProducer kafkaProducer = getKafkaProducer(); 348 | ProducerRecord producerRecord = new ProducerRecord<>(topic,key,value); 349 | RecordMetadata recordMetadata = kafkaProducer.send(producerRecord).get(); 350 | 351 | System.out.println("recordMetadata = " + recordMetadata); 352 | System.out.println("topic=" + recordMetadata.topic()); 353 | System.out.println("partition=" + recordMetadata.partition()); 354 | System.out.println("offset=" + recordMetadata.offset()); 355 | 356 | LinkedHashMap recordMeta = new LinkedHashMap(); 357 | 358 | recordMeta.put("topic",recordMetadata.topic()); 359 | recordMeta.put("partition",recordMetadata.partition()); 360 | recordMeta.put("offset",recordMetadata.offset()); 361 | long end=System.currentTimeMillis(); //获取结束时间 362 | System.out.println("程序运行时间: "+(end-start)+"ms"); 363 | return recordMeta; 364 | } 365 | 366 | 367 | //生产数据到指定的topic,异步接口,默认回调 368 | public static void sendToKafkaAsync(String topic, String key, Object value) throws ExecutionException, InterruptedException { 369 | long start=System.currentTimeMillis(); //获取开始时间 370 | KafkaProducer kafkaProducer = getKafkaProducer(); 371 | ProducerRecord producerRecord = new ProducerRecord<>(topic,key,value); 372 | 373 | Callback callback = new Callback() { 374 | long start=System.currentTimeMillis(); //获取开始时间 375 | @Override 376 | public void onCompletion(RecordMetadata recordMetadata, Exception 377 | exception) { 378 | if (exception == null) { 379 | // 消息发送成功 380 | System.out.println("消息发送成功"); 381 | long end=System.currentTimeMillis(); //获取结束时间 382 | System.out.println("回调等待时间: "+(end-start)+"ms"); 383 | System.out.println(TimeUtil.getCurrentDateStringMillisecond()); 384 | System.out.println("recordMetadata = " + recordMetadata); 385 | System.out.println("topic=" + recordMetadata.topic()); 386 | System.out.println("partition=" + recordMetadata.partition()); 387 | System.out.println("offset=" + recordMetadata.offset()); 388 | } else { 389 | System.out.println("消息发送失败"); 390 | // 消息发送失败,需要重新发送 391 | } 392 | } 393 | }; 394 | Future recordMetadata = kafkaProducer.send(producerRecord,callback); 395 | 396 | long end=System.currentTimeMillis(); //获取结束时间 397 | System.out.println("程序运行时间: "+(end-start)+"ms"); 398 | return; 399 | } 400 | 401 | 402 | //生产数据到指定的topic,异步接口,自定义回调 403 | public static void sendToKafkaAsync(String topic, String key, Object value,Callback callback) throws ExecutionException, InterruptedException { 404 | long start=System.currentTimeMillis(); //获取开始时间 405 | KafkaProducer kafkaProducer = getKafkaProducer(); 406 | ProducerRecord producerRecord = new ProducerRecord<>(topic,key,value); 407 | Future recordMetadata = kafkaProducer.send(producerRecord,callback); 408 | long end=System.currentTimeMillis(); //获取结束时间 409 | System.out.println("程序运行时间: "+(end-start)+"ms"); 410 | return; 411 | } 412 | 413 | 414 | //按groupId消费指定topic的数据 [{"topic":"RULEa93304e6d844000","key":"222","value":"aaaa","partition":1,"offset":681}] 415 | public static ArrayList> recvFromKafka(String topic, String groupId) throws ExecutionException, InterruptedException { 416 | 417 | KafkaConsumer kafkaConsumer = getKafkaConsumer(topic, groupId); 418 | //用于保存消息的list 419 | ArrayList buffer = new ArrayList<>(); 420 | 421 | long start=System.currentTimeMillis(); //获取开始时间 422 | ConsumerRecords consumerRecords = kafkaConsumer.poll(Duration.ofMillis(5000)); 423 | kafkaConsumer.commitAsync(); 424 | long end=System.currentTimeMillis(); //获取结束时间 425 | System.out.println("程序运行时间: "+(end-start)+"ms"); 426 | //System.out.println("i = " + i); 427 | System.out.println("consumerRecords = " + consumerRecords.count()); 428 | 429 | for (ConsumerRecord record : consumerRecords) { 430 | LinkedHashMap data = new LinkedHashMap(); 431 | data.put("topic", record.topic()); 432 | data.put("key", record.key()); 433 | data.put("value", record.value()); 434 | data.put("partition", record.partition()); 435 | data.put("offset", record.offset()); 436 | buffer.add(data); 437 | System.out.println("订阅主题=" + record.topic()); 438 | System.out.println("消息键值=" + record.key()); 439 | System.out.println("消息内容=" + record.value()); 440 | System.out.println("消息内容分区=" + record.partition()); 441 | System.out.println("消息内容的偏移量=" + record.offset()); 442 | } 443 | return buffer; 444 | } 445 | 446 | 447 | //消费指定topic指定partition对应的offset数据 448 | public static ArrayList> recvFromKafkaByOffset(String topic, String groupId,int partition,long offset) throws ExecutionException, InterruptedException { 449 | 450 | KafkaConsumer kafkaConsumer = getKafkaConsumer(topic, groupId); 451 | //用于保存消息的list 452 | ArrayList buffer = new ArrayList<>(); 453 | 454 | long start=System.currentTimeMillis(); //获取开始时间 455 | // // 指定位置开始消费 456 | // Set assignment= new HashSet<>(); 457 | // while (assignment.size() == 0) { 458 | // kafkaConsumer.poll(Duration.ofSeconds(1)); 459 | // // 获取消费者分区分配信息(有了分区分配信息才能开始消费) 460 | // assignment = kafkaConsumer.assignment(); 461 | // } 462 | // 遍历所有分区,并指定 offset 从 100 的位置开始消费 463 | TopicPartition topicPartition = new TopicPartition(topic,partition); 464 | kafkaConsumer.seek(topicPartition,offset); // 指定offset 465 | 466 | ConsumerRecords consumerRecords = kafkaConsumer.poll(Duration.ofMillis(5000)); 467 | kafkaConsumer.commitAsync(); 468 | long end=System.currentTimeMillis(); //获取结束时间 469 | System.out.println("程序运行时间: "+(end-start)+"ms"); 470 | //System.out.println("i = " + i); 471 | System.out.println("consumerRecords = " + consumerRecords.count()); 472 | 473 | for (ConsumerRecord record : consumerRecords) { 474 | LinkedHashMap data = new LinkedHashMap(); 475 | data.put("topic", record.topic()); 476 | data.put("key", record.key()); 477 | data.put("value", record.value()); 478 | data.put("partition", record.partition()); 479 | data.put("offset", record.offset()); 480 | buffer.add(data); 481 | System.out.println("订阅主题=" + record.topic()); 482 | System.out.println("消息键值=" + record.key()); 483 | System.out.println("消息内容=" + record.value()); 484 | System.out.println("消息内容分区=" + record.partition()); 485 | System.out.println("消息内容的偏移量=" + record.offset()); 486 | } 487 | return buffer; 488 | } 489 | 490 | //消费指定topic指定partition对应的timestamp以后的数据 491 | public static ArrayList> recvFromKafkaByTimestamp(String topic, String groupId,int partition,long timestamp) throws ExecutionException, InterruptedException { 492 | 493 | KafkaConsumer kafkaConsumer = getKafkaConsumer(topic, groupId); 494 | //用于保存消息的list 495 | ArrayList buffer = new ArrayList<>(); 496 | 497 | long start=System.currentTimeMillis(); //获取开始时间 498 | // // 指定位置开始消费 499 | // Set assignment= new HashSet<>(); 500 | // while (assignment.size() == 0) { 501 | // kafkaConsumer.poll(Duration.ofSeconds(1)); 502 | // // 获取消费者分区分配信息(有了分区分配信息才能开始消费) 503 | // assignment = kafkaConsumer.assignment(); 504 | // } 505 | // 遍历所有分区,并指定 offset 从 100 的位置开始消费 506 | TopicPartition topicPartition = new TopicPartition(topic,partition); 507 | 508 | Map query = new HashMap<>();//构造offsetsForTimes参数,通过时间戳找到offset 509 | query.put(topicPartition, timestamp); 510 | Map result = kafkaConsumer.offsetsForTimes(query); 511 | long offset = result.get(topicPartition).offset(); 512 | 513 | kafkaConsumer.seek(topicPartition,offset); // 指定offset 514 | 515 | ConsumerRecords consumerRecords = kafkaConsumer.poll(Duration.ofMillis(5000)); 516 | kafkaConsumer.commitAsync(); 517 | long end=System.currentTimeMillis(); //获取结束时间 518 | System.out.println("程序运行时间: "+(end-start)+"ms"); 519 | //System.out.println("i = " + i); 520 | System.out.println("consumerRecords = " + consumerRecords.count()); 521 | 522 | for (ConsumerRecord record : consumerRecords) { 523 | LinkedHashMap data = new LinkedHashMap(); 524 | data.put("topic", record.topic()); 525 | data.put("key", record.key()); 526 | data.put("value", record.value()); 527 | data.put("partition", record.partition()); 528 | data.put("offset", record.offset()); 529 | buffer.add(data); 530 | System.out.println("订阅主题=" + record.topic()); 531 | System.out.println("消息键值=" + record.key()); 532 | System.out.println("消息内容=" + record.value()); 533 | System.out.println("消息内容分区=" + record.partition()); 534 | System.out.println("消息内容的偏移量=" + record.offset()); 535 | } 536 | return buffer; 537 | } 538 | 539 | //重置指定topic的offset到对应的timestamp 540 | public static boolean resetOffsetToTimestamp(String topic, String groupId, long timestamp) throws ExecutionException, InterruptedException { 541 | 542 | KafkaConsumer kafkaConsumer = getKafkaConsumer(topic, groupId); 543 | //用于保存消息的list 544 | ArrayList buffer = new ArrayList<>(); 545 | 546 | long start=System.currentTimeMillis(); //获取开始时间 547 | // 指定位置开始消费 548 | Set assignment= new HashSet<>(); 549 | while (assignment.size() == 0) { 550 | kafkaConsumer.poll(Duration.ofSeconds(1)); 551 | // 获取消费者分区分配信息(有了分区分配信息才能开始消费) 552 | assignment = kafkaConsumer.assignment(); 553 | } 554 | // 遍历所有分区,并指定 offset 从 100 的位置开始消费 555 | for (TopicPartition topicPartition : assignment) { 556 | Map query = new HashMap<>();//构造offsetsForTimes参数,通过时间戳找到offset 557 | query.put(topicPartition, timestamp); 558 | Map result = kafkaConsumer.offsetsForTimes(query); 559 | if (result.get(topicPartition) !=null){ 560 | long offset = result.get(topicPartition).offset(); 561 | kafkaConsumer.seek(topicPartition,offset); // 指定offset 562 | kafkaConsumer.commitAsync(); 563 | } 564 | 565 | } 566 | long end=System.currentTimeMillis(); //获取结束时间 567 | System.out.println("程序运行时间: "+(end-start)+"ms"); 568 | 569 | return true; 570 | } 571 | //重置指定topic的offset到最早 572 | public static boolean resetOffsetToEarliest(String topic, String groupId) throws ExecutionException, InterruptedException { 573 | 574 | KafkaConsumer kafkaConsumer = getKafkaConsumer(topic, groupId); 575 | 576 | long start=System.currentTimeMillis(); //获取开始时间 577 | // 指定位置开始消费 578 | Set assignment= new HashSet<>(); 579 | while (assignment.size() == 0) { 580 | kafkaConsumer.poll(Duration.ofSeconds(1)); 581 | // 获取消费者分区分配信息(有了分区分配信息才能开始消费) 582 | assignment = kafkaConsumer.assignment(); 583 | } 584 | // 遍历所有分区,并指定 offset 从 100 的位置开始消费 585 | for (TopicPartition topicPartition : assignment) { 586 | Map query = new HashMap<>();//构造offsetsForTimes参数,通过时间戳找到offset 587 | long begin = kafkaConsumer.beginningOffsets(Arrays.asList(topicPartition)).get(topicPartition); 588 | kafkaConsumer.seek(topicPartition,begin); // 指定offset 589 | kafkaConsumer.commitAsync(); 590 | } 591 | long end=System.currentTimeMillis(); //获取结束时间 592 | System.out.println("程序运行时间: "+(end-start)+"ms"); 593 | 594 | return true; 595 | } 596 | //重置指定topic的offset到最晚,一般在跳过测试脏数据时候使用 597 | public static boolean resetOffsetToLatest(String topic, String groupId) throws ExecutionException, InterruptedException { 598 | 599 | KafkaConsumer kafkaConsumer = getKafkaConsumer(topic, groupId); 600 | 601 | long start=System.currentTimeMillis(); //获取开始时间 602 | // 指定位置开始消费 603 | Set assignment= new HashSet<>(); 604 | while (assignment.size() == 0) { 605 | kafkaConsumer.poll(Duration.ofSeconds(1)); 606 | // 获取消费者分区分配信息(有了分区分配信息才能开始消费) 607 | assignment = kafkaConsumer.assignment(); 608 | } 609 | // 遍历所有分区,并指定 offset 从 100 的位置开始消费 610 | for (TopicPartition topicPartition : assignment) { 611 | Map query = new HashMap<>();//构造offsetsForTimes参数,通过时间戳找到offset 612 | long end = kafkaConsumer.endOffsets(Arrays.asList(topicPartition)).get(topicPartition); 613 | kafkaConsumer.seek(topicPartition,end); // 指定offset 614 | kafkaConsumer.commitAsync(); 615 | } 616 | long end=System.currentTimeMillis(); //获取结束时间 617 | System.out.println("程序运行时间: "+(end-start)+"ms"); 618 | 619 | return true; 620 | } 621 | 622 | ///获取当前消费偏移量情况 623 | // { 624 | // "partitionNum": 5, 625 | // "dataNum": 1, 626 | // "lagNum": 0, 627 | // "positions": [{ 628 | // "partition": 0, 629 | // "begin": 0, 630 | // "end": 0, 631 | // "current": 0, 632 | // "current1": 0, 633 | // "size": 0, 634 | // "lag": 0 635 | // }, { 636 | // "partition": 4, 637 | // "begin": 0, 638 | // "end": 0, 639 | // "current": 0, 640 | // "current1": 0, 641 | // "size": 0, 642 | // "lag": 0 643 | // }, { 644 | // "partition": 3, 645 | // "begin": 0, 646 | // "end": 0, 647 | // "current": 0, 648 | // "current1": 0, 649 | // "size": 0, 650 | // "lag": 0 651 | // }, { 652 | // "partition": 2, 653 | // "begin": 72, 654 | // "end": 72, 655 | // "current": 72, 656 | // "current1": 72, 657 | // "size": 0, 658 | // "lag": 0 659 | // }, { 660 | // "partition": 1, 661 | // "begin": 681, 662 | // "end": 682, 663 | // "current": 682, 664 | // "current1": 682, 665 | // "size": 1, 666 | // "lag": 0 667 | // }] 668 | // } 669 | public static LinkedHashMap consumerPositions(String topic, String groupId) throws ExecutionException, InterruptedException { 670 | KafkaConsumer kafkaConsumer = getKafkaConsumer(topic, groupId); 671 | 672 | Properties props = new Properties(); 673 | // 只需要提供一个或多个 broker 的 IP 和端口 674 | props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); 675 | // 创建 AdminClient 对象 676 | AdminClient client = KafkaAdminClient.create(props); 677 | 678 | 679 | List partitionInfos = kafkaConsumer.partitionsFor(topic); 680 | System.out.println("Get the partition info as below:"); 681 | LinkedHashMap result = new LinkedHashMap(); 682 | List> positions = new ArrayList<>(); 683 | long lagNum = 0L; 684 | long dataNum = 0L; 685 | for (PartitionInfo partitionInfo : partitionInfos) { 686 | LinkedHashMap offsetMeta = new LinkedHashMap(); 687 | TopicPartition topicPartition = new TopicPartition(topic, partitionInfo.partition()); 688 | try { 689 | // 5.通过Consumer.endOffsets(Collections)方法获取 690 | // 指定TopicPartition对应的lastOffset 691 | long begin = kafkaConsumer.beginningOffsets(Arrays.asList(topicPartition)).get(topicPartition); 692 | long end = kafkaConsumer.endOffsets(Arrays.asList(topicPartition)).get(topicPartition); 693 | Set partitions = new HashSet(); 694 | partitions.add(topicPartition); 695 | long current = kafkaConsumer.committed(partitions).get(topicPartition).offset(); 696 | 697 | long current1 = client.listConsumerGroupOffsets(groupId).partitionsToOffsetAndMetadata().get().get(topicPartition).offset(); 698 | long partition = topicPartition.partition(); 699 | 700 | long lag = end - current; 701 | long size = end - begin; 702 | lagNum+=lag; 703 | dataNum+=size; 704 | System.out.println("partition = " + partition); 705 | System.out.println("begin = " + begin); 706 | System.out.println("end = " + end); 707 | System.out.println("current = " + current); 708 | System.out.println("current1 = " + current1); 709 | System.out.println("size = " + size); 710 | System.out.println("lag = " + lag); 711 | offsetMeta.put("partition", partition); 712 | offsetMeta.put("begin", begin); 713 | offsetMeta.put("end", end); 714 | offsetMeta.put("current", current); 715 | offsetMeta.put("current1", current1); 716 | offsetMeta.put("size", size); 717 | offsetMeta.put("lag", lag); 718 | positions.add(offsetMeta); 719 | } catch (Exception e) { 720 | e.printStackTrace(); 721 | } 722 | 723 | } 724 | 725 | result.put("partitionNum",partitionInfos.size()); 726 | result.put("dataNum",dataNum); 727 | result.put("lagNum",lagNum); 728 | result.put("positions",positions); 729 | 730 | return result; 731 | } 732 | 733 | ///获取指定topic数据量详情情况 734 | //[{ 735 | // "partition": 0, 736 | // "begin": 0, 737 | // "end": 0, 738 | // "size": 0 739 | // }] 740 | public static List> topicSize(String topic) throws ExecutionException, InterruptedException{ 741 | 742 | String groupId = "guest"; 743 | KafkaConsumer kafkaConsumer = getKafkaConsumer(topic, groupId); 744 | 745 | List partitionInfos = kafkaConsumer.partitionsFor(topic); 746 | System.out.println("Get the partition info as below:"); 747 | List> result = new ArrayList<>(); 748 | for (PartitionInfo partitionInfo : partitionInfos) { 749 | LinkedHashMap offsetMeta = new LinkedHashMap(); 750 | TopicPartition topicPartition = new TopicPartition(topic, partitionInfo.partition()); 751 | try { 752 | // 5.通过Consumer.endOffsets(Collections)方法获取 753 | // 指定TopicPartition对应的lastOffset 754 | long begin = kafkaConsumer.beginningOffsets(Arrays.asList(topicPartition)).get(topicPartition); 755 | long end = kafkaConsumer.endOffsets(Arrays.asList(topicPartition)).get(topicPartition); 756 | //Set partitions = new HashSet(); 757 | //partitions.add(topicPartition); 758 | long partition = topicPartition.partition(); 759 | 760 | long size = end - begin; 761 | System.out.println("partition = " + partition); 762 | System.out.println("begin = " + begin); 763 | System.out.println("end = " + end); 764 | System.out.println("size = " + size); 765 | offsetMeta.put("partition",partition); 766 | offsetMeta.put("begin",begin); 767 | offsetMeta.put("end",end); 768 | offsetMeta.put("size",size); 769 | result.add(offsetMeta); 770 | } catch (Exception e) { 771 | e.printStackTrace(); 772 | } 773 | 774 | } 775 | 776 | return result; 777 | } 778 | 779 | 780 | 781 | 782 | 783 | ///获取所有topic数据量详情情况 784 | // { 785 | // "TAG6f6c4f162844000": [{ 786 | // "partition": 0, 787 | // "begin": 0, 788 | // "end": 0, 789 | // "size": 0 790 | // }], 791 | // "TAG77362004b844000": [{ 792 | // "partition": 0, 793 | // "begin": 65, 794 | // "end": 65, 795 | // "size": 0 796 | // }] 797 | // } 798 | public static LinkedHashMap topicSizeAll() throws ExecutionException, InterruptedException{ 799 | List topicList = kafkaListTopics(); 800 | LinkedHashMap result = new LinkedHashMap(); 801 | for (String topic : topicList) { 802 | List> list= topicSize(topic); 803 | System.out.println("list = " + list);; 804 | result.put(topic,list); 805 | } 806 | return result; 807 | } 808 | 809 | 810 | ///获取指定topic数据量统计{"partitionNum":5452,"dataNum":41570647} 811 | public static LinkedHashMap topicSizeStatistics(String topic) throws ExecutionException, InterruptedException { 812 | 813 | LinkedHashMap result = new LinkedHashMap(); 814 | long partitionNum = 0L; 815 | long dataNum = 0L; 816 | 817 | List> list = topicSize(topic); 818 | partitionNum = list.size(); 819 | //list.forEach(item->dataNum=dataNum+item.get("size")); 820 | dataNum = list.stream().mapToLong(item -> (long) item.get("size")).sum(); 821 | 822 | 823 | result.put("partitionNum", partitionNum); 824 | result.put("dataNum", dataNum); 825 | 826 | return result; 827 | } 828 | 829 | ///获取所有topic数据量统计{"topicNum":2550,"partitionNum":5452,"dataNum":41570647} 830 | public static LinkedHashMap topicSizeStatisticsAll() throws ExecutionException, InterruptedException{ 831 | List topicList = kafkaListTopics(); 832 | LinkedHashMap result = new LinkedHashMap(); 833 | long partitionNum = 0L; 834 | long dataNum = 0L; 835 | for (String topic : topicList) { 836 | List> list= topicSize(topic); 837 | partitionNum = partitionNum + list.size(); 838 | //list.forEach(item->dataNum=dataNum+item.get("size")); 839 | dataNum += list.stream().mapToLong(item -> (long) item.get("size")).sum(); 840 | } 841 | result.put("topicNum",topicList.size()); 842 | result.put("partitionNum",partitionNum); 843 | result.put("dataNum",dataNum); 844 | 845 | return result; 846 | } 847 | 848 | 849 | public static void main(String[] args) throws ExecutionException, InterruptedException,TimeoutException { 850 | long start=System.currentTimeMillis(); //获取开始时间 851 | 852 | // List list = KafkaUtil.kafkaListTopics(); 853 | // 854 | // for (String topic : list) { 855 | // 856 | // System.out.println("list = " + topicSize(topic,"group1"));; 857 | // 858 | // //TimeUnit.DAYS.sleep(1); 859 | // } 860 | //System.out.println("map = " + JSON.toJSONString(topicSizeAll())); 861 | // System.out.println("map = " + JSON.toJSONString(topicSizeStatistics("RULEa93304e6d844000")));; 862 | 863 | 864 | //resetOffsetToEarliest("RULEa93304e6d844000", "group1"); 865 | //LinkedHashMap recordMeta = sendToKafka("RULEa93304e6d844000","222","aaaa"); 866 | //JSONObject object = JSONUtil.parseObj(recordMeta); 867 | //System.out.println("object.toJSONString(4) = " + object.toJSONString(4)); 868 | // sendToKafka("RULEa93304e6d844000","333","aaaa"); 869 | //delGroupId("group1"); 870 | //System.out.println("kafkaListTopics() = " + kafkaListTopics()); 871 | //LinkedHashMap result = sendToKafka("RULEa93304e6d844000", "222", "aaaa"); 872 | //System.out.println("result = " + JSON.toJSONString(result)); 873 | //ArrayList> buffer = recvFromKafka("RULEa93304e6d844000", "group1"); 874 | //System.out.println("buffer = " + JSON.toJSONString(buffer)); 875 | // LinkedHashMap consumerPosition= consumerPositions("RULEa93304e6d844000", "group1"); 876 | // System.out.println("consumerPosition = " + JSON.toJSONString(consumerPosition)); 877 | 878 | 879 | LinkedHashMap o= topicSizeAll(); 880 | System.out.println("o = " + o); 881 | 882 | 883 | LinkedHashMap oo= topicSizeStatisticsAll(); 884 | System.out.println("oo = " + oo); 885 | 886 | //delTopic("producer"); 887 | 888 | 889 | //alterTopic("RULEa93304e6d844000", 3); 890 | 891 | 892 | //createTopic("aaa",3,(short)1); 893 | 894 | // recvFromKafka("RULEa93304e6d844000", "group1"); 895 | //recvFromKafka("RULEa93304e6d844000", "group1"); 896 | //recvFromKafka("RULEa93304e6d844000", "group3"); 897 | 898 | //kafkaConsumerGroups("RULEa93304e6d844000"); 899 | //kafkaConsumerGroups(); 900 | // sendToKafka("RULEa93304e6d844000", "222", "aaaa"); 901 | // 902 | // for (int i = 0; i < 100; i++) { 903 | // sendToKafkaAsync("RULEa93304e6d844000", "222", "aaaa"); 904 | // //sendToKafka("RULEa93304e6d844000", "333", "aaaa"); 905 | // } 906 | 907 | //createTopic("atest0908"); 908 | //partitionsTopic("atest0908"); 909 | //delTopic("atest0908"); 910 | //partitionsTopic("RULE735e0e863c44000");//50分区 911 | //descCluster(); 912 | //consumerPositions("RULEa93304e6d844000","group1"); 913 | //delGroupId("group1"); 914 | //recvFromKafka("RULE735e0e863c44000", "group1"); 915 | 916 | TimeUnit.DAYS.sleep(1); 917 | 918 | /* for (int i = 0; i < 100; i++) { 919 | 920 | try { 921 | sendToKafka("RULEa93304e6d844000","222","aaaa"); 922 | sendToKafka("RULEa93304e6d844000","333","aaaa"); 923 | // sendToKafka("RULEa93304e6d844000","222","aaaa"); 924 | // sendToKafka("RULEa93304e6d844000","333","aaaa"); 925 | // ArrayList buffer1 = recvFromKafka("RULEa93304e6d844000", "group1"); 926 | // System.out.println("buffer1.size() = " + buffer1.size()); 927 | // TimeUnit.SECONDS.sleep(30); // 休眠 1s 928 | //resetOffsetToTimestamp("RULEa93304e6d844000", "group1",1661853600000L); 929 | resetOffsetToEarliest("RULEa93304e6d844000", "group1"); 930 | 931 | //ArrayList buffer1 = recvFromKafkaByTimestamp("RULEa93304e6d844000", "group1",1,1661853600000L); 932 | //recvFromKafkaByOffset("RULEa93304e6d844000", "group1",1,10); 933 | //System.out.println("buffer1.size() = " + buffer1.size()); 934 | TimeUnit.SECONDS.sleep(30); // 休眠 1s 935 | 936 | List> result = consumerPositions("RULEa93304e6d844000", "group1"); 937 | JSONArray array = JSONUtil.parseArray(result); 938 | System.out.println("array.toJSONString(4) = " + array.toJSONString(4)); 939 | //TimeUnit.DAYS.sleep(1); // 休眠 1 天 940 | } catch (Exception e) { 941 | e.printStackTrace(); 942 | } 943 | 944 | }*/ 945 | 946 | 947 | 948 | 949 | 950 | 951 | //long end=System.currentTimeMillis(); //获取结束时间 952 | //System.out.println("程序运行时间: "+(end-start)+"ms"); 953 | 954 | } 955 | } 956 | --------------------------------------------------------------------------------