├── Readme.md ├── pom.xml └── src └── main ├── java └── com │ └── spnotes │ └── kafka │ ├── offset │ ├── Consumer.java │ └── Producer.java │ ├── partition │ ├── Consumer.java │ ├── CountryPartitioner.java │ └── Producer.java │ └── simple │ ├── Consumer.java │ └── Producer.java └── resources └── log4j.properties /Readme.md: -------------------------------------------------------------------------------- 1 | 2 | Compile the code using 3 | 4 | mvn compile assembly:single -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | com.spnotes.kafka 8 | KafkaAPIClient 9 | 1.0-SNAPSHOT 10 | 11 | 12 | 13 | org.apache.kafka 14 | kafka-clients 15 | 0.9.0.0 16 | 17 | 18 | org.slf4j 19 | slf4j-api 20 | 1.7.12 21 | 22 | 23 | org.slf4j 24 | slf4j-log4j12 25 | 1.7.12 26 | 27 | 28 | log4j 29 | log4j 30 | 1.2.17 31 | 32 | 33 | 34 | 35 | 36 | 37 | maven-assembly-plugin 38 | 39 | 40 | 41 | com.spnotes.kafka.simple.Producer 42 | 43 | 44 | 45 | jar-with-dependencies 46 | 47 | 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /src/main/java/com/spnotes/kafka/offset/Consumer.java: -------------------------------------------------------------------------------- 1 | package com.spnotes.kafka.offset; 2 | 3 | import org.apache.kafka.clients.consumer.*; 4 | import org.apache.kafka.common.TopicPartition; 5 | import org.apache.kafka.common.errors.WakeupException; 6 | 7 | import java.util.*; 8 | 9 | /** 10 | * Created by sunilpatil on 1/2/16. 11 | */ 12 | public class Consumer { 13 | private static Scanner in; 14 | 15 | public static void main(String[] argv)throws Exception{ 16 | if (argv.length != 3) { 17 | System.err.printf("Usage: %s \n", 18 | Consumer.class.getSimpleName()); 19 | System.exit(-1); 20 | } 21 | in = new Scanner(System.in); 22 | 23 | String topicName = argv[0]; 24 | String groupId = argv[1]; 25 | final long startingOffset = Long.parseLong(argv[2]); 26 | 27 | ConsumerThread consumerThread = new ConsumerThread(topicName,groupId,startingOffset); 28 | consumerThread.start(); 29 | String line = ""; 30 | while (!line.equals("exit")) { 31 | line = in.next(); 32 | } 33 | consumerThread.getKafkaConsumer().wakeup(); 34 | System.out.println("Stopping consumer ....."); 35 | consumerThread.join(); 36 | 37 | } 38 | 39 | private static class ConsumerThread extends Thread{ 40 | private String topicName; 41 | private String groupId; 42 | private long startingOffset; 43 | private KafkaConsumer kafkaConsumer; 44 | 45 | public ConsumerThread(String topicName, String groupId, long startingOffset){ 46 | this.topicName = topicName; 47 | this.groupId = groupId; 48 | this.startingOffset=startingOffset; 49 | } 50 | public void run() { 51 | Properties configProperties = new Properties(); 52 | configProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); 53 | configProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); 54 | configProperties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); 55 | configProperties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); 56 | configProperties.put(ConsumerConfig.CLIENT_ID_CONFIG, "offset123"); 57 | configProperties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,false); 58 | configProperties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest"); 59 | 60 | //Figure out where to start processing messages from 61 | kafkaConsumer = new KafkaConsumer(configProperties); 62 | kafkaConsumer.subscribe(Arrays.asList(topicName), new ConsumerRebalanceListener() { 63 | public void onPartitionsRevoked(Collection partitions) { 64 | System.out.printf("%s topic-partitions are revoked from this consumer\n", Arrays.toString(partitions.toArray())); 65 | } 66 | public void onPartitionsAssigned(Collection partitions) { 67 | System.out.printf("%s topic-partitions are assigned to this consumer\n", Arrays.toString(partitions.toArray())); 68 | Iterator topicPartitionIterator = partitions.iterator(); 69 | while(topicPartitionIterator.hasNext()){ 70 | TopicPartition topicPartition = topicPartitionIterator.next(); 71 | System.out.println("Current offset is " + kafkaConsumer.position(topicPartition) + " committed offset is ->" + kafkaConsumer.committed(topicPartition) ); 72 | if(startingOffset == -2) { 73 | System.out.println("Leaving it alone"); 74 | }else if(startingOffset ==0){ 75 | System.out.println("Setting offset to begining"); 76 | 77 | kafkaConsumer.seekToBeginning(topicPartition); 78 | }else if(startingOffset == -1){ 79 | System.out.println("Setting it to the end "); 80 | 81 | kafkaConsumer.seekToEnd(topicPartition); 82 | }else { 83 | System.out.println("Resetting offset to " + startingOffset); 84 | kafkaConsumer.seek(topicPartition, startingOffset); 85 | } 86 | } 87 | } 88 | }); 89 | //Start processing messages 90 | try { 91 | while (true) { 92 | ConsumerRecords records = kafkaConsumer.poll(100); 93 | for (ConsumerRecord record : records) { 94 | System.out.println(record.value()); 95 | } 96 | if(startingOffset == -2) 97 | kafkaConsumer.commitSync(); 98 | } 99 | }catch(WakeupException ex){ 100 | System.out.println("Exception caught " + ex.getMessage()); 101 | }finally{ 102 | kafkaConsumer.close(); 103 | System.out.println("After closing KafkaConsumer"); 104 | } 105 | } 106 | public KafkaConsumer getKafkaConsumer(){ 107 | return this.kafkaConsumer; 108 | } 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /src/main/java/com/spnotes/kafka/offset/Producer.java: -------------------------------------------------------------------------------- 1 | package com.spnotes.kafka.offset; 2 | 3 | import org.apache.kafka.clients.producer.*; 4 | 5 | import java.util.Properties; 6 | import java.util.Scanner; 7 | 8 | /** 9 | * Created by sunilpatil on 1/2/16. 10 | */ 11 | public class Producer { 12 | private static Scanner in; 13 | public static void main(String[] argv)throws Exception { 14 | if (argv.length != 1) { 15 | System.err.println("Please specify 1 parameters "); 16 | System.exit(-1); 17 | } 18 | String topicName = argv[0]; 19 | in = new Scanner(System.in); 20 | System.out.println("Enter message(type exit to quit)"); 21 | 22 | //Configure the Producer 23 | Properties configProperties = new Properties(); 24 | configProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"localhost:9092"); 25 | configProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.ByteArraySerializer"); 26 | configProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer"); 27 | 28 | org.apache.kafka.clients.producer.Producer producer = new KafkaProducer(configProperties); 29 | String line = in.nextLine(); 30 | while(!line.equals("exit")) { 31 | ProducerRecord rec = new ProducerRecord(topicName, null, line); 32 | producer.send(rec, new Callback() { 33 | public void onCompletion(RecordMetadata metadata, Exception exception) { 34 | System.out.println("Message sent to topic ->" + metadata.topic() +" stored at offset->" + metadata.offset()); 35 | } 36 | }); 37 | line = in.nextLine(); 38 | } 39 | in.close(); 40 | producer.close(); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /src/main/java/com/spnotes/kafka/partition/Consumer.java: -------------------------------------------------------------------------------- 1 | package com.spnotes.kafka.partition; 2 | 3 | import org.apache.kafka.clients.consumer.*; 4 | import org.apache.kafka.common.TopicPartition; 5 | import org.apache.kafka.common.errors.WakeupException; 6 | 7 | import java.lang.reflect.Array; 8 | import java.util.Arrays; 9 | import java.util.Collection; 10 | import java.util.Properties; 11 | import java.util.Scanner; 12 | 13 | /** 14 | * Created by sunilpatil on 12/28/15. 15 | */ 16 | public class Consumer { 17 | private static Scanner in; 18 | private static boolean stop = false; 19 | 20 | public static void main(String[] argv) throws Exception { 21 | if (argv.length != 2) { 22 | System.err.printf("Usage: %s \n", 23 | Consumer.class.getSimpleName()); 24 | System.exit(-1); 25 | } 26 | in = new Scanner(System.in); 27 | String topicName = argv[0]; 28 | String groupId = argv[1]; 29 | 30 | ConsumerThread consumerThread = new ConsumerThread(topicName, groupId); 31 | consumerThread.start(); 32 | String line = ""; 33 | while (!line.equals("exit")) { 34 | line = in.next(); 35 | } 36 | consumerThread.getKafkaConsumer().wakeup(); 37 | System.out.println("Stopping consumer ....."); 38 | consumerThread.join(); 39 | } 40 | 41 | private static class ConsumerThread extends Thread { 42 | private String topicName; 43 | private String groupId; 44 | private KafkaConsumer kafkaConsumer; 45 | 46 | public ConsumerThread(String topicName, String groupId) { 47 | this.topicName = topicName; 48 | this.groupId = groupId; 49 | } 50 | 51 | public void run() { 52 | Properties configProperties = new Properties(); 53 | configProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); 54 | configProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); 55 | configProperties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); 56 | configProperties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); 57 | 58 | //Figure out where to start processing messages from 59 | kafkaConsumer = new KafkaConsumer(configProperties); 60 | kafkaConsumer.subscribe(Arrays.asList(topicName), new ConsumerRebalanceListener() { 61 | public void onPartitionsRevoked(Collection partitions) { 62 | System.out.printf("%s topic-partitions are revoked from this consumer\n", Arrays.toString(partitions.toArray())); 63 | } 64 | public void onPartitionsAssigned(Collection partitions) { 65 | System.out.printf("%s topic-partitions are assigned to this consumer\n", Arrays.toString(partitions.toArray())); 66 | } 67 | }); 68 | //Start processing messages 69 | try { 70 | while (true) { 71 | ConsumerRecords records = kafkaConsumer.poll(100); 72 | for (ConsumerRecord record : records) 73 | System.out.println(record.value()); 74 | } 75 | } catch (WakeupException ex) { 76 | System.out.println("Exception caught " + ex.getMessage()); 77 | } finally { 78 | kafkaConsumer.close(); 79 | System.out.println("After closing KafkaConsumer"); 80 | } 81 | } 82 | 83 | public KafkaConsumer getKafkaConsumer() { 84 | return this.kafkaConsumer; 85 | } 86 | } 87 | } 88 | 89 | 90 | -------------------------------------------------------------------------------- /src/main/java/com/spnotes/kafka/partition/CountryPartitioner.java: -------------------------------------------------------------------------------- 1 | package com.spnotes.kafka.partition; 2 | 3 | import org.apache.kafka.clients.producer.Partitioner; 4 | import org.apache.kafka.common.Cluster; 5 | 6 | import java.util.HashMap; 7 | import java.util.List; 8 | import java.util.Map; 9 | 10 | public class CountryPartitioner implements Partitioner { 11 | private static Map countryToPartitionMap; 12 | 13 | // This method will gets called at the start, you should use it to do one time startup activity 14 | public void configure(Map configs) { 15 | System.out.println("Inside CountryPartitioner.configure " + configs); 16 | countryToPartitionMap = new HashMap(); 17 | for(Map.Entry entry: configs.entrySet()){ 18 | if(entry.getKey().startsWith("partitions.")){ 19 | String keyName = entry.getKey(); 20 | String value = (String)entry.getValue(); 21 | System.out.println( keyName.substring(11)); 22 | int paritionId = Integer.parseInt(keyName.substring(11)); 23 | countryToPartitionMap.put(value,paritionId); 24 | } 25 | } 26 | } 27 | 28 | //This method will get called once for each message 29 | public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, 30 | Cluster cluster) { 31 | List partitions = cluster.availablePartitionsForTopic(topic); 32 | String valueStr = (String)value; 33 | String countryName = ((String) value).split(":")[0]; 34 | if(countryToPartitionMap.containsKey(countryName)){ 35 | //If the country is mapped to particular partition return it 36 | return countryToPartitionMap.get(countryName); 37 | }else { 38 | //If no country is mapped to particular partition distribute between remaining partitions 39 | int noOfPartitions = cluster.topics().size(); 40 | return value.hashCode()%noOfPartitions + countryToPartitionMap.size() ; 41 | } 42 | } 43 | 44 | // This method will get called at the end and gives your partitioner class chance to cleanup 45 | public void close() {} 46 | } 47 | -------------------------------------------------------------------------------- /src/main/java/com/spnotes/kafka/partition/Producer.java: -------------------------------------------------------------------------------- 1 | package com.spnotes.kafka.partition; 2 | 3 | import org.apache.kafka.clients.producer.*; 4 | 5 | import java.util.Properties; 6 | import java.util.Scanner; 7 | 8 | /** 9 | * Created by sunilpatil on 1/2/16. 10 | */ 11 | public class Producer { 12 | private static Scanner in; 13 | public static void main(String[] argv)throws Exception { 14 | if (argv.length != 1) { 15 | System.err.println("Please specify 1 parameters "); 16 | System.exit(-1); 17 | } 18 | String topicName = argv[0]; 19 | in = new Scanner(System.in); 20 | System.out.println("Enter message(type exit to quit)"); 21 | 22 | //Configure the Producer 23 | Properties configProperties = new Properties(); 24 | configProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"localhost:9092"); 25 | configProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.ByteArraySerializer"); 26 | configProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer"); 27 | 28 | configProperties.put(ProducerConfig.PARTITIONER_CLASS_CONFIG,CountryPartitioner.class.getCanonicalName()); 29 | configProperties.put("partitions.0","USA"); 30 | configProperties.put("partitions.1","India"); 31 | 32 | org.apache.kafka.clients.producer.Producer producer = new KafkaProducer(configProperties); 33 | String line = in.nextLine(); 34 | while(!line.equals("exit")) { 35 | ProducerRecord rec = new ProducerRecord(topicName, line); 36 | 37 | producer.send(rec, new Callback() { 38 | public void onCompletion(RecordMetadata metadata, Exception exception) { 39 | System.out.println("Message sent to topic ->" + metadata.topic()+ " ,parition->" + metadata.partition() +" stored at offset->" + metadata.offset()); 40 | } 41 | }); 42 | line = in.nextLine(); 43 | } 44 | in.close(); 45 | producer.close(); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/main/java/com/spnotes/kafka/simple/Consumer.java: -------------------------------------------------------------------------------- 1 | package com.spnotes.kafka.simple; 2 | 3 | import org.apache.kafka.clients.consumer.ConsumerConfig; 4 | import org.apache.kafka.clients.consumer.ConsumerRecord; 5 | import org.apache.kafka.clients.consumer.ConsumerRecords; 6 | import org.apache.kafka.clients.consumer.KafkaConsumer; 7 | import org.apache.kafka.common.errors.WakeupException; 8 | 9 | import java.util.Arrays; 10 | import java.util.Properties; 11 | import java.util.Scanner; 12 | 13 | /** 14 | * Created by sunilpatil on 12/28/15. 15 | */ 16 | public class Consumer { 17 | private static Scanner in; 18 | 19 | public static void main(String[] argv)throws Exception{ 20 | if (argv.length != 2) { 21 | System.err.printf("Usage: %s \n", 22 | Consumer.class.getSimpleName()); 23 | System.exit(-1); 24 | } 25 | in = new Scanner(System.in); 26 | String topicName = argv[0]; 27 | String groupId = argv[1]; 28 | 29 | ConsumerThread consumerRunnable = new ConsumerThread(topicName,groupId); 30 | consumerRunnable.start(); 31 | String line = ""; 32 | while (!line.equals("exit")) { 33 | line = in.next(); 34 | } 35 | consumerRunnable.getKafkaConsumer().wakeup(); 36 | System.out.println("Stopping consumer ....."); 37 | consumerRunnable.join(); 38 | } 39 | 40 | private static class ConsumerThread extends Thread{ 41 | private String topicName; 42 | private String groupId; 43 | private KafkaConsumer kafkaConsumer; 44 | 45 | public ConsumerThread(String topicName, String groupId){ 46 | this.topicName = topicName; 47 | this.groupId = groupId; 48 | } 49 | public void run() { 50 | Properties configProperties = new Properties(); 51 | configProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); 52 | configProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); 53 | configProperties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); 54 | configProperties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); 55 | configProperties.put(ConsumerConfig.CLIENT_ID_CONFIG, "simple"); 56 | 57 | //Figure out where to start processing messages from 58 | kafkaConsumer = new KafkaConsumer(configProperties); 59 | kafkaConsumer.subscribe(Arrays.asList(topicName)); 60 | //Start processing messages 61 | try { 62 | while (true) { 63 | ConsumerRecords records = kafkaConsumer.poll(100); 64 | for (ConsumerRecord record : records) 65 | System.out.println(record.value()); 66 | } 67 | }catch(WakeupException ex){ 68 | System.out.println("Exception caught " + ex.getMessage()); 69 | }finally{ 70 | kafkaConsumer.close(); 71 | System.out.println("After closing KafkaConsumer"); 72 | } 73 | } 74 | public KafkaConsumer getKafkaConsumer(){ 75 | return this.kafkaConsumer; 76 | } 77 | } 78 | } 79 | 80 | -------------------------------------------------------------------------------- /src/main/java/com/spnotes/kafka/simple/Producer.java: -------------------------------------------------------------------------------- 1 | package com.spnotes.kafka.simple; 2 | 3 | import org.apache.kafka.clients.producer.KafkaProducer; 4 | import org.apache.kafka.clients.producer.ProducerConfig; 5 | import org.apache.kafka.clients.producer.ProducerRecord; 6 | 7 | import java.util.Properties; 8 | import java.util.Scanner; 9 | 10 | /** 11 | * Created by sunilpatil on 12/28/15. 12 | */ 13 | public class Producer { 14 | private static Scanner in; 15 | public static void main(String[] argv)throws Exception { 16 | if (argv.length != 1) { 17 | System.err.println("Please specify 1 parameters "); 18 | System.exit(-1); 19 | } 20 | String topicName = argv[0]; 21 | in = new Scanner(System.in); 22 | System.out.println("Enter message(type exit to quit)"); 23 | 24 | //Configure the Producer 25 | Properties configProperties = new Properties(); 26 | configProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"localhost:9092"); 27 | configProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.ByteArraySerializer"); 28 | configProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer"); 29 | 30 | org.apache.kafka.clients.producer.Producer producer = new KafkaProducer(configProperties); 31 | String line = in.nextLine(); 32 | while(!line.equals("exit")) { 33 | //TODO: Make sure to use the ProducerRecord constructor that does not take parition Id 34 | ProducerRecord rec = new ProducerRecord(topicName,line); 35 | producer.send(rec); 36 | line = in.nextLine(); 37 | } 38 | in.close(); 39 | producer.close(); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n 9 | --------------------------------------------------------------------------------