├── .travis.yml ├── gradle ├── wrapper │ ├── gradle-wrapper.jar │ └── gradle-wrapper.properties └── sona.gradle ├── .gitreview ├── .gitignore ├── producer_example_conf.json ├── consumer_example_conf.json ├── LICENSE.txt ├── src ├── test │ └── java │ │ └── com │ │ └── cyngn │ │ └── kafka │ │ ├── MetricPublisherTest.java │ │ ├── MessageProducerTest.java │ │ └── SimpleConsumerTest.java └── main │ └── java │ └── com │ └── cyngn │ └── kafka │ ├── consume │ ├── KafkaEvent.java │ └── SimpleConsumer.java │ ├── config │ └── ConfigConstants.java │ └── produce │ ├── KafkaPublisher.java │ └── MessageProducer.java ├── gradlew └── README.md /.travis.yml: -------------------------------------------------------------------------------- 1 | language: java 2 | jdk: 3 | - oraclejdk8 4 | -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cyngn/vertx-kafka/HEAD/gradle/wrapper/gradle-wrapper.jar -------------------------------------------------------------------------------- /.gitreview: -------------------------------------------------------------------------------- 1 | [gerrit] 2 | host=review.cyanogenmod.org 3 | port=29418 4 | project=cyngn/vertx-kafka 5 | defaultbranch=master 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .gradle 2 | out 3 | atlassian-ide-plugin.xml 4 | build 5 | *.iml 6 | *.ipr 7 | *.iws 8 | *.DS_Store 9 | /bin/ 10 | .classpath 11 | .project 12 | .settings/ 13 | .idea 14 | gradle.properties 15 | -------------------------------------------------------------------------------- /producer_example_conf.json: -------------------------------------------------------------------------------- 1 | // Global list of all configuration property at http://kafka.apache.org/documentation.html 2 | { 3 | "serializer.class": "org.apache.kafka.common.serialization.StringSerializer", 4 | "metadata.broker.list": "localhost:9092" 5 | } 6 | -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | #Sat Nov 21 13:01:18 PST 2015 2 | distributionBase=GRADLE_USER_HOME 3 | distributionPath=wrapper/dists 4 | zipStoreBase=GRADLE_USER_HOME 5 | zipStorePath=wrapper/dists 6 | distributionUrl=https\://services.gradle.org/distributions/gradle-2.9-bin.zip 7 | -------------------------------------------------------------------------------- /consumer_example_conf.json: -------------------------------------------------------------------------------- 1 | // Global list of all configuration property at http://kafka.apache.org/documentation.html 2 | { 3 | "zookeeper.connect": "localhost:2181", 4 | "group.id": "testGroup", 5 | "backoff.increment.ms": "100", 6 | "autooffset.reset": "smallest", 7 | "topics": [ 8 | "testTopic" 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright 2016 Cyanogen Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /gradle/sona.gradle: -------------------------------------------------------------------------------- 1 | apply plugin: 'signing' 2 | apply plugin: 'maven' 3 | 4 | signing { 5 | required { 6 | gradle.taskGraph.hasTask("uploadArchives") 7 | } 8 | sign configurations.archives 9 | } 10 | 11 | uploadArchives { 12 | repositories { 13 | mavenDeployer { 14 | beforeDeployment { MavenDeployment deployment -> signing.signPom(deployment) } 15 | 16 | repository(url: "https://oss.sonatype.org/service/local/staging/deploy/maven2/") { 17 | authentication(userName: sonaUsername, password: sonaPassword) 18 | } 19 | 20 | snapshotRepository(url: "https://oss.sonatype.org/content/repositories/snapshots/") { 21 | authentication(userName: sonaUsername, password: sonaPassword) 22 | } 23 | 24 | pom.project { 25 | name 'vertx-kafka' 26 | packaging 'jar' 27 | description 'A library for using kafka with vert.x' 28 | url 'https://github.com/cyngn/vertx-kafka' 29 | 30 | scm { 31 | url 'https://github.com/cyngn/vertx-kafka' 32 | } 33 | 34 | licenses { 35 | license { 36 | name 'The Apache License, Version 2.0' 37 | url 'http://www.apache.org/licenses/LICENSE-2.0.txt' 38 | } 39 | } 40 | 41 | developers { 42 | developer { 43 | id 'jtruelove' 44 | name 'Jeremy truelove' 45 | } 46 | } 47 | } 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/test/java/com/cyngn/kafka/MetricPublisherTest.java: -------------------------------------------------------------------------------- 1 | package com.cyngn.kafka; 2 | 3 | import com.cyngn.kafka.config.ConfigConstants; 4 | import com.cyngn.kafka.produce.KafkaPublisher; 5 | import com.cyngn.kafka.produce.MessageProducer; 6 | import io.vertx.core.Vertx; 7 | import io.vertx.ext.unit.Async; 8 | import io.vertx.ext.unit.TestContext; 9 | import io.vertx.ext.unit.junit.VertxUnitRunner; 10 | import org.junit.Assert; 11 | import org.junit.Test; 12 | import org.junit.runner.RunWith; 13 | 14 | /** 15 | * Basic tests for the publisher helper. 16 | * 17 | * @author truelove@cyngn.com (Jeremy Truelove) 11/21/15 18 | */ 19 | @RunWith(VertxUnitRunner.class) 20 | public class MetricPublisherTest { 21 | 22 | private static Vertx vertx; 23 | 24 | @Test 25 | public void testFailure(TestContext context) { 26 | Async async = context.async(); 27 | vertx = Vertx.vertx(); 28 | 29 | // fake verticle 30 | vertx.eventBus().consumer(MessageProducer.EVENTBUS_DEFAULT_ADDRESS, msg -> { 31 | msg.fail(-1, "I fail everything"); 32 | }); 33 | 34 | 35 | KafkaPublisher pub = new KafkaPublisher(vertx.eventBus()); 36 | pub.send("foo", "an empty message"); 37 | 38 | // listen for error message 39 | vertx.eventBus().consumer(ConfigConstants.PRODUCER_ERROR_TOPIC, msg -> { 40 | try { 41 | Assert.assertTrue(msg.body().toString().indexOf("I fail everything") != -1); 42 | } finally { 43 | async.complete(); 44 | vertx.close(); 45 | } 46 | }); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/main/java/com/cyngn/kafka/consume/KafkaEvent.java: -------------------------------------------------------------------------------- 1 | package com.cyngn.kafka.consume; 2 | 3 | import com.cyngn.kafka.config.ConfigConstants; 4 | import io.vertx.core.json.JsonObject; 5 | import org.apache.kafka.clients.consumer.ConsumerRecord; 6 | 7 | /** 8 | * Represents a Kafka message event from the event bus. 9 | * 10 | * @author truelove@cyngn.com (Jeremy Truelove) 11/30/15 11 | */ 12 | public class KafkaEvent { 13 | public final String topic; 14 | public final String key; 15 | public final String value; 16 | public final int partition; 17 | 18 | public KafkaEvent(JsonObject event) { 19 | topic = event.getString(ConfigConstants.TOPIC_FIELD); 20 | key = event.getString(ConfigConstants.KEY_FIELD); 21 | value = event.getString(ConfigConstants.VALUE_FIELD); 22 | partition = event.getInteger(ConfigConstants.PARTITION_FIELD); 23 | } 24 | 25 | @Override 26 | public String toString() { 27 | return "KafkaEvent{" + 28 | "topic='" + topic + '\'' + 29 | ", key='" + key + '\'' + 30 | ", value='" + value + '\'' + 31 | ", partition=" + partition + 32 | '}'; 33 | } 34 | 35 | /** 36 | * Convert a Kafka ConsumerRecord into an event bus event. 37 | * 38 | * @param record the Kafka record 39 | * @return the record to send over the event bus 40 | */ 41 | public static JsonObject createEventForBus(ConsumerRecord record) { 42 | return new JsonObject() 43 | .put(ConfigConstants.TOPIC_FIELD, record.topic()) 44 | .put(ConfigConstants.KEY_FIELD, record.key()) 45 | .put(ConfigConstants.VALUE_FIELD, record.value()) 46 | .put(ConfigConstants.PARTITION_FIELD, record.partition()); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/main/java/com/cyngn/kafka/config/ConfigConstants.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015 Cyanogen Inc. 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | package com.cyngn.kafka.config; 16 | 17 | /** 18 | * Constants to pull from config. 19 | * 20 | * @author truelove@cyngn.com (Jeremy Truelove) 1/27/15 21 | */ 22 | public class ConfigConstants { 23 | public static String TOPICS = "topics"; 24 | public static String GROUP_ID = "group.id"; 25 | public static String BACKOFF_INCREMENT_MS = "backoff.increment.ms"; 26 | public static String AUTO_OFFSET_RESET = "autooffset.reset"; 27 | public static String EVENTBUS_ADDRESS = "eventbus.address"; 28 | public static String CONSUMER_POLL_INTERVAL_MS = "consumer.poll.interval.ms"; 29 | public static String ZK_CONNECT = "zookeeper.connect"; 30 | public static String KEY_DESERIALIZER_CLASS = "key.deserializer"; 31 | public static String VALUE_DESERIALIZER_CLASS = "value.deserializer"; 32 | public static String DEFAULT_DESERIALIZER_CLASS = "org.apache.kafka.common.serialization.StringDeserializer"; 33 | public static String CONSUMER_ERROR_TOPIC = "kafka.consumer.error"; 34 | 35 | //common 36 | public static String BOOTSTRAP_SERVERS = "bootstrap.servers"; 37 | 38 | // producer constants 39 | public static String PRODUCER_ERROR_TOPIC = "kafka.producer.error"; 40 | public static String DEFAULT_TOPIC = "default.topic"; 41 | public static String KEY_SERIALIZER_CLASS = "key.serializer"; 42 | public static String PRODUCER_TYPE = "producer.type"; 43 | public static String SERIALIZER_CLASS = "serializer.class"; 44 | public static String VALUE_SERIALIZER_CLASS = "value.serializer"; 45 | public static String DEFAULT_SERIALIZER_CLASS = "org.apache.kafka.common.serialization.StringSerializer"; 46 | public static String MAX_BLOCK_MS = "max.block.ms"; 47 | 48 | // event bus fields 49 | public static final String TOPIC_FIELD = "topic"; 50 | public static final String KEY_FIELD = "key"; 51 | public static final String VALUE_FIELD = "value"; 52 | public static final String PARTITION_FIELD = "partition"; 53 | } 54 | -------------------------------------------------------------------------------- /src/test/java/com/cyngn/kafka/MessageProducerTest.java: -------------------------------------------------------------------------------- 1 | package com.cyngn.kafka; 2 | 3 | import com.cyngn.kafka.config.ConfigConstants; 4 | import com.cyngn.kafka.produce.KafkaPublisher; 5 | import com.cyngn.kafka.produce.MessageProducer; 6 | import io.vertx.core.DeploymentOptions; 7 | import io.vertx.core.Vertx; 8 | import io.vertx.core.json.JsonObject; 9 | import io.vertx.ext.unit.Async; 10 | import io.vertx.ext.unit.TestContext; 11 | import io.vertx.ext.unit.junit.VertxUnitRunner; 12 | import org.junit.Ignore; 13 | import org.junit.Test; 14 | import org.junit.runner.RunWith; 15 | import org.slf4j.Logger; 16 | import org.slf4j.LoggerFactory; 17 | 18 | 19 | /** 20 | * Basic test show how to consume messages from vertx event bus and send them to Kafka topic. 21 | * 22 | * To run test you need to do the following 23 | * 24 | * 1) start zookeeper, ie from a kafka local install dir 25 | * run: 'bin/zookeeper-server-start.sh config/zookeeper.properties' 26 | * 2) start a message broker, ie from a kafka install dir 27 | * run: 'bin/kafka-server-start.sh config/server.properties' 28 | * 3) setup a topic in kafka, ie from the kafka install dir 29 | * run: 'bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 8 --topic testTopic' 30 | * 4) start the test (note it will time out and fail if it doesn't receive a message in 20 seconds) 31 | * 5) publish a message on the topic from the kafka CLI, ie from the kafka install dir 32 | * run: bin/kafka-console-consumer.sh --topic testTopic --zookeeper localhost:2181 33 | * You should see: This is the message you should see in your consumer 34 | * 35 | * @author asarda@cyngn.com (Ajay Sarda) on 8/14/15. 36 | */ 37 | @RunWith(VertxUnitRunner.class) 38 | public class MessageProducerTest { 39 | private static final Logger logger = LoggerFactory.getLogger(MessageProducerTest.class); 40 | 41 | private static Vertx vertx; 42 | 43 | @Ignore("This is an integration test comment out to actually run it") 44 | @Test 45 | public void testMessageSend(TestContext testContext) { 46 | Async async = testContext.async(); 47 | vertx = Vertx.vertx(); 48 | 49 | JsonObject producerConfig = new JsonObject(); 50 | producerConfig.put(ConfigConstants.BOOTSTRAP_SERVERS, "localhost:9092"); 51 | producerConfig.put(ConfigConstants.DEFAULT_SERIALIZER_CLASS, "org.apache.kafka.common.serialization.StringSerializer"); 52 | producerConfig.put(ConfigConstants.MAX_BLOCK_MS, new Long(500)); 53 | producerConfig.put("default.topic", "testGroup"); 54 | 55 | KafkaPublisher publisher = new KafkaPublisher(vertx.eventBus()); 56 | 57 | vertx.deployVerticle(MessageProducer.class.getName(), 58 | new DeploymentOptions().setConfig(producerConfig), deploy -> { 59 | if (deploy.failed()) { 60 | logger.error("", deploy.cause()); 61 | testContext.fail("Could not deploy verticle"); 62 | async.complete(); 63 | vertx.close(); 64 | } else { 65 | publisher.send("This is the message you should see in your consumer"); 66 | // give vert.x some time to get the message off 67 | long timerId = vertx.setTimer(10000, timer -> { 68 | async.complete(); 69 | vertx.close(); 70 | }); 71 | 72 | vertx.eventBus().consumer(ConfigConstants.PRODUCER_ERROR_TOPIC, reply -> { 73 | testContext.fail("test has failed"); 74 | logger.error("error: " + reply.toString()); 75 | vertx.cancelTimer(timerId); 76 | vertx.close(); 77 | }); 78 | } 79 | }); 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/test/java/com/cyngn/kafka/SimpleConsumerTest.java: -------------------------------------------------------------------------------- 1 | package com.cyngn.kafka; 2 | 3 | import com.cyngn.kafka.config.ConfigConstants; 4 | import com.cyngn.kafka.consume.KafkaEvent; 5 | import com.cyngn.kafka.consume.SimpleConsumer; 6 | import io.vertx.core.DeploymentOptions; 7 | import io.vertx.core.Vertx; 8 | import io.vertx.core.eventbus.Message; 9 | import io.vertx.core.json.JsonArray; 10 | import io.vertx.core.json.JsonObject; 11 | import io.vertx.ext.unit.Async; 12 | import io.vertx.ext.unit.TestContext; 13 | import io.vertx.ext.unit.junit.VertxUnitRunner; 14 | import org.junit.Ignore; 15 | import org.junit.Test; 16 | import org.junit.runner.RunWith; 17 | import org.slf4j.Logger; 18 | import org.slf4j.LoggerFactory; 19 | 20 | import java.util.ArrayList; 21 | import java.util.List; 22 | 23 | import static org.junit.Assert.assertTrue; 24 | 25 | /** 26 | * Basic test show how to consume messages from kafka and then publish them on the event bus. 27 | * 28 | * To run test you need to do the following 29 | * 30 | * 1) start zookeeper, ie from a kafka local install dir 31 | * run: 'bin/zookeeper-server-start.sh config/zookeeper.properties' 32 | * 2) start a message broker, ie from a kafka install dir 33 | * run: 'bin/kafka-server-start.sh config/server.properties' 34 | * 3) setup a topic in kafka, ie from the kafka install dir 35 | * run: 'bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 8 --topic testTopic' 36 | * 4) start the test (note it will time out and fail if it doesn't receive a message in 20 seconds) 37 | * 5) publish a message on the topic from the kafka CLI, ie from the kafka install dir 38 | * run: bin/kafka-console-producer.sh --broker-list localhost:9092 --topic testTopic 39 | * run: [type text and press enter] 40 | * 41 | * 42 | * @author truelove@cyngn.com (Jeremy Truelove) 2/19/15 43 | */ 44 | @RunWith(VertxUnitRunner.class) 45 | public class SimpleConsumerTest { 46 | private static final Logger logger = LoggerFactory.getLogger(SimpleConsumerTest.class); 47 | 48 | private static Vertx vertx; 49 | 50 | @Ignore("This is an integration test comment out to actually run it") 51 | @Test 52 | public void testMessageReceipt(TestContext testContext) { 53 | Async async = testContext.async(); 54 | 55 | vertx = Vertx.vertx(); 56 | 57 | JsonObject consumerConfig = new JsonObject(); 58 | consumerConfig.put(ConfigConstants.GROUP_ID, "testGroup"); 59 | consumerConfig.put(ConfigConstants.ZK_CONNECT, "localhost:2181"); 60 | consumerConfig.put(ConfigConstants.BOOTSTRAP_SERVERS, "localhost:9092"); 61 | List topics = new ArrayList<>(); 62 | topics.add("test"); 63 | consumerConfig.put("topics", new JsonArray(topics)); 64 | 65 | vertx.deployVerticle(SimpleConsumer.class.getName(), 66 | new DeploymentOptions().setConfig(consumerConfig), deploy -> { 67 | if (deploy.failed()) { 68 | logger.error("", deploy.cause()); 69 | testContext.fail("Could not deploy verticle"); 70 | async.complete(); 71 | vertx.close(); 72 | } else { 73 | // have the test run for 20 seconds to give you enough time to get a message off 74 | long timerId = vertx.setTimer(20000, theTimerId -> 75 | { 76 | logger.info("Failed to get any messages"); 77 | testContext.fail("Test did not complete in 20 seconds"); 78 | async.complete(); 79 | vertx.close(); 80 | }); 81 | 82 | logger.info("Registering listener on event bus for kafka messages"); 83 | 84 | vertx.eventBus().consumer(SimpleConsumer.EVENTBUS_DEFAULT_ADDRESS, (Message message) -> { 85 | assertTrue(message.body().toString().length() > 0); 86 | logger.info("got message: " + message.body()); 87 | KafkaEvent event = new KafkaEvent(message.body()); 88 | vertx.cancelTimer(timerId); 89 | async.complete(); 90 | vertx.close(); 91 | }); 92 | } 93 | } 94 | ); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /src/main/java/com/cyngn/kafka/produce/KafkaPublisher.java: -------------------------------------------------------------------------------- 1 | package com.cyngn.kafka.produce; 2 | 3 | import com.cyngn.kafka.config.ConfigConstants; 4 | import io.vertx.core.eventbus.EventBus; 5 | import io.vertx.core.json.JsonObject; 6 | import io.vertx.core.logging.Logger; 7 | import io.vertx.core.logging.LoggerFactory; 8 | 9 | /** 10 | * Helper class for publishing kafka messages. 11 | * 12 | * @author truelove@cyngn.com (Jeremy Truelove) 11/19/15 13 | */ 14 | public class KafkaPublisher { 15 | private static final Logger logger = LoggerFactory.getLogger(KafkaPublisher.class); 16 | private EventBus bus; 17 | private String address; 18 | 19 | public static String TYPE_FIELD = "type"; 20 | 21 | public KafkaPublisher(EventBus bus) { 22 | this(bus, MessageProducer.EVENTBUS_DEFAULT_ADDRESS); 23 | } 24 | 25 | public KafkaPublisher(EventBus bus, String address) { 26 | this.bus = bus; 27 | this.address = address; 28 | } 29 | 30 | /** 31 | * Send a message to the default topic 32 | * 33 | * @param value the string value to send 34 | */ 35 | public void send(String value) { 36 | JsonObject obj = new JsonObject() 37 | .put(ConfigConstants.VALUE_FIELD, value) 38 | .put(TYPE_FIELD, MessageType.SIMPLE.value); 39 | send(obj); 40 | } 41 | 42 | /** 43 | * Send a message to a specific topic 44 | * 45 | * @param kafkaTopic the kafka topic to send to 46 | * @param value the string value to send 47 | */ 48 | public void send(String kafkaTopic, String value) { 49 | JsonObject obj = new JsonObject() 50 | .put(ConfigConstants.VALUE_FIELD, value) 51 | .put(ConfigConstants.TOPIC_FIELD, kafkaTopic) 52 | .put(TYPE_FIELD, MessageType.CUSTOM_TOPIC.value); 53 | send(obj); 54 | } 55 | 56 | /** 57 | * Send a message to a specific topic 58 | * 59 | * @param kafkaTopic the kafka topic to send to 60 | * @param msgKey the custom key to assist in determining the partition 61 | * @param value the string value to send 62 | */ 63 | public void send(String kafkaTopic, String msgKey, String value) { 64 | JsonObject obj = new JsonObject() 65 | .put(ConfigConstants.VALUE_FIELD, value) 66 | .put(ConfigConstants.TOPIC_FIELD, kafkaTopic) 67 | .put(ConfigConstants.KEY_FIELD, msgKey) 68 | .put(TYPE_FIELD, MessageType.CUSTOM_KEY.value); 69 | send(obj); 70 | } 71 | 72 | /** 73 | * Send a message to a specific topic 74 | * 75 | * @param kafkaTopic the kafka topic to send to 76 | * @param msgKey the custom key to assist in determining the partition 77 | * @param partitionKey the specific partition to use 78 | * @param value the string value to send 79 | */ 80 | public void send(String kafkaTopic, String msgKey, Integer partitionKey, String value) { 81 | JsonObject obj = new JsonObject() 82 | .put(ConfigConstants.VALUE_FIELD, value) 83 | .put(ConfigConstants.TOPIC_FIELD, kafkaTopic) 84 | .put(ConfigConstants.PARTITION_FIELD, partitionKey) 85 | .put(ConfigConstants.KEY_FIELD, msgKey) 86 | .put(TYPE_FIELD, MessageType.CUSTOM_PARTITION.value); 87 | send(obj); 88 | } 89 | 90 | private void send(JsonObject message) { 91 | bus.send(address, message, result -> { 92 | if(result.failed()) { 93 | logger.error(result.cause()); 94 | bus.send(ConfigConstants.PRODUCER_ERROR_TOPIC, result.cause().toString()); 95 | } 96 | }); 97 | } 98 | 99 | /** 100 | * Used to allow us to easily determine how to parse a message published 101 | */ 102 | public enum MessageType { 103 | INVALID(0), 104 | SIMPLE(1), 105 | CUSTOM_TOPIC(2), 106 | CUSTOM_KEY(3), 107 | CUSTOM_PARTITION(4); 108 | 109 | public final int value; 110 | 111 | MessageType(int value) { 112 | this.value = value; 113 | } 114 | 115 | public static MessageType fromInt(int value) { 116 | switch(value) { 117 | case 1: 118 | return SIMPLE; 119 | case 2: 120 | return CUSTOM_TOPIC; 121 | case 3: 122 | return CUSTOM_KEY; 123 | case 4: 124 | return CUSTOM_PARTITION; 125 | default: 126 | return INVALID; 127 | } 128 | } 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /gradlew: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ############################################################################## 4 | ## 5 | ## Gradle start up script for UN*X 6 | ## 7 | ############################################################################## 8 | 9 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 10 | DEFAULT_JVM_OPTS="" 11 | 12 | APP_NAME="Gradle" 13 | APP_BASE_NAME=`basename "$0"` 14 | 15 | # Use the maximum available, or set MAX_FD != -1 to use that value. 16 | MAX_FD="maximum" 17 | 18 | warn ( ) { 19 | echo "$*" 20 | } 21 | 22 | die ( ) { 23 | echo 24 | echo "$*" 25 | echo 26 | exit 1 27 | } 28 | 29 | # OS specific support (must be 'true' or 'false'). 30 | cygwin=false 31 | msys=false 32 | darwin=false 33 | case "`uname`" in 34 | CYGWIN* ) 35 | cygwin=true 36 | ;; 37 | Darwin* ) 38 | darwin=true 39 | ;; 40 | MINGW* ) 41 | msys=true 42 | ;; 43 | esac 44 | 45 | # For Cygwin, ensure paths are in UNIX format before anything is touched. 46 | if $cygwin ; then 47 | [ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"` 48 | fi 49 | 50 | # Attempt to set APP_HOME 51 | # Resolve links: $0 may be a link 52 | PRG="$0" 53 | # Need this for relative symlinks. 54 | while [ -h "$PRG" ] ; do 55 | ls=`ls -ld "$PRG"` 56 | link=`expr "$ls" : '.*-> \(.*\)$'` 57 | if expr "$link" : '/.*' > /dev/null; then 58 | PRG="$link" 59 | else 60 | PRG=`dirname "$PRG"`"/$link" 61 | fi 62 | done 63 | SAVED="`pwd`" 64 | cd "`dirname \"$PRG\"`/" >&- 65 | APP_HOME="`pwd -P`" 66 | cd "$SAVED" >&- 67 | 68 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar 69 | 70 | # Determine the Java command to use to start the JVM. 71 | if [ -n "$JAVA_HOME" ] ; then 72 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 73 | # IBM's JDK on AIX uses strange locations for the executables 74 | JAVACMD="$JAVA_HOME/jre/sh/java" 75 | else 76 | JAVACMD="$JAVA_HOME/bin/java" 77 | fi 78 | if [ ! -x "$JAVACMD" ] ; then 79 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME 80 | 81 | Please set the JAVA_HOME variable in your environment to match the 82 | location of your Java installation." 83 | fi 84 | else 85 | JAVACMD="java" 86 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 87 | 88 | Please set the JAVA_HOME variable in your environment to match the 89 | location of your Java installation." 90 | fi 91 | 92 | # Increase the maximum file descriptors if we can. 93 | if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then 94 | MAX_FD_LIMIT=`ulimit -H -n` 95 | if [ $? -eq 0 ] ; then 96 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then 97 | MAX_FD="$MAX_FD_LIMIT" 98 | fi 99 | ulimit -n $MAX_FD 100 | if [ $? -ne 0 ] ; then 101 | warn "Could not set maximum file descriptor limit: $MAX_FD" 102 | fi 103 | else 104 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" 105 | fi 106 | fi 107 | 108 | # For Darwin, add options to specify how the application appears in the dock 109 | if $darwin; then 110 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" 111 | fi 112 | 113 | # For Cygwin, switch paths to Windows format before running java 114 | if $cygwin ; then 115 | APP_HOME=`cygpath --path --mixed "$APP_HOME"` 116 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` 117 | 118 | # We build the pattern for arguments to be converted via cygpath 119 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` 120 | SEP="" 121 | for dir in $ROOTDIRSRAW ; do 122 | ROOTDIRS="$ROOTDIRS$SEP$dir" 123 | SEP="|" 124 | done 125 | OURCYGPATTERN="(^($ROOTDIRS))" 126 | # Add a user-defined pattern to the cygpath arguments 127 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then 128 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" 129 | fi 130 | # Now convert the arguments - kludge to limit ourselves to /bin/sh 131 | i=0 132 | for arg in "$@" ; do 133 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` 134 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option 135 | 136 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition 137 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` 138 | else 139 | eval `echo args$i`="\"$arg\"" 140 | fi 141 | i=$((i+1)) 142 | done 143 | case $i in 144 | (0) set -- ;; 145 | (1) set -- "$args0" ;; 146 | (2) set -- "$args0" "$args1" ;; 147 | (3) set -- "$args0" "$args1" "$args2" ;; 148 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;; 149 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; 150 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; 151 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; 152 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; 153 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; 154 | esac 155 | fi 156 | 157 | # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules 158 | function splitJvmOpts() { 159 | JVM_OPTS=("$@") 160 | } 161 | eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS 162 | JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" 163 | 164 | exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" 165 | -------------------------------------------------------------------------------- /src/main/java/com/cyngn/kafka/produce/MessageProducer.java: -------------------------------------------------------------------------------- 1 | package com.cyngn.kafka.produce; 2 | 3 | import com.cyngn.kafka.config.ConfigConstants; 4 | import io.vertx.core.AbstractVerticle; 5 | import io.vertx.core.Future; 6 | import io.vertx.core.eventbus.Message; 7 | import io.vertx.core.json.JsonObject; 8 | import org.apache.kafka.clients.producer.KafkaProducer; 9 | import org.apache.kafka.clients.producer.ProducerRecord; 10 | import org.slf4j.Logger; 11 | import org.slf4j.LoggerFactory; 12 | 13 | import java.util.Properties; 14 | import java.util.concurrent.ExecutorService; 15 | import java.util.concurrent.Executors; 16 | 17 | /** 18 | * Module to listen for messages from vertx event bus and send them to Kafka defaultTopic. 19 | * 20 | * @author asarda@cyngn.com (Ajay Sarda) on 8/14/15. 21 | */ 22 | public class MessageProducer extends AbstractVerticle { 23 | private KafkaProducer producer; 24 | public static String EVENTBUS_DEFAULT_ADDRESS = "kafka.message.publisher"; 25 | private String busAddress; 26 | private static final Logger logger = LoggerFactory.getLogger(MessageProducer.class); 27 | private String defaultTopic; 28 | private JsonObject producerConfig; 29 | private ExecutorService sender; 30 | 31 | @Override 32 | public void start(final Future startedResult) { 33 | try { 34 | producerConfig = config(); 35 | 36 | Properties properties = populateKafkaConfig(); 37 | 38 | busAddress = producerConfig.getString(ConfigConstants.EVENTBUS_ADDRESS, EVENTBUS_DEFAULT_ADDRESS); 39 | defaultTopic = producerConfig.getString(ConfigConstants.DEFAULT_TOPIC); 40 | sender = Executors.newSingleThreadExecutor(); 41 | 42 | producer = new KafkaProducer(properties); 43 | 44 | vertx.eventBus().consumer(busAddress, (Message message) -> sendMessage(message)); 45 | 46 | Runtime.getRuntime().addShutdownHook(new Thread() { 47 | // try to disconnect from ZK as gracefully as possible 48 | public void run() { 49 | shutdown(); 50 | } 51 | }); 52 | 53 | startedResult.complete(); 54 | } catch (Exception ex) { 55 | logger.error("Message producer initialization failed with ex: {}", ex); 56 | startedResult.fail(ex); 57 | } 58 | } 59 | 60 | /** 61 | * Send a message on a pre-configured defaultTopic. 62 | * 63 | * @param message the message to send 64 | */ 65 | public void sendMessage(Message message) { 66 | JsonObject payload = message.body(); 67 | ProducerRecord record; 68 | 69 | if (!payload.containsKey(KafkaPublisher.TYPE_FIELD)) { 70 | logger.error("Invalid message sent missing {} field, msg: {}", KafkaPublisher.TYPE_FIELD, message); 71 | return; 72 | } 73 | 74 | KafkaPublisher.MessageType type = KafkaPublisher.MessageType.fromInt(payload.getInteger(KafkaPublisher.TYPE_FIELD)); 75 | String value = payload.getString(ConfigConstants.VALUE_FIELD); 76 | switch (type) { 77 | case SIMPLE: 78 | record = new ProducerRecord(defaultTopic, value); 79 | break; 80 | case CUSTOM_TOPIC: 81 | record = new ProducerRecord(payload.getString(ConfigConstants.TOPIC_FIELD), value); 82 | break; 83 | case CUSTOM_KEY: 84 | record = new ProducerRecord(payload.getString(ConfigConstants.TOPIC_FIELD), 85 | payload.getString(ConfigConstants.KEY_FIELD), 86 | value); 87 | break; 88 | case CUSTOM_PARTITION: 89 | record = new ProducerRecord(payload.getString(ConfigConstants.TOPIC_FIELD), 90 | payload.getInteger(ConfigConstants.PARTITION_FIELD), 91 | payload.getString(ConfigConstants.KEY_FIELD), 92 | value); 93 | break; 94 | default: 95 | String error = String.format("Invalid type submitted: {} message being thrown away: %s", 96 | type.toString(), value); 97 | logger.error(error); 98 | message.fail(-1, error); 99 | return; 100 | } 101 | 102 | sender.submit(() -> 103 | producer.send(record, (metadata, exception) -> { 104 | if (exception != null) { 105 | vertx.runOnContext(aVoid -> message.fail(-1, exception.getMessage())); 106 | exception.printStackTrace(); 107 | logger.error("Failed to send message to kafka ex: ", exception); 108 | // reply with nothing for now 109 | } else { 110 | vertx.runOnContext(aVoid -> message.reply(new JsonObject())); 111 | } 112 | }) 113 | ); 114 | } 115 | 116 | private Properties populateKafkaConfig() { 117 | Properties properties = new Properties(); 118 | 119 | properties.put(ConfigConstants.BOOTSTRAP_SERVERS, getRequiredConfig(ConfigConstants.BOOTSTRAP_SERVERS)); 120 | 121 | // default serializer to the String one 122 | String defaultSerializer = producerConfig.getString(ConfigConstants.SERIALIZER_CLASS, 123 | ConfigConstants.DEFAULT_SERIALIZER_CLASS); 124 | 125 | properties.put(ConfigConstants.SERIALIZER_CLASS, defaultSerializer); 126 | properties.put(ConfigConstants.KEY_SERIALIZER_CLASS, 127 | producerConfig.getString(ConfigConstants.KEY_SERIALIZER_CLASS, defaultSerializer)); 128 | properties.put(ConfigConstants.VALUE_SERIALIZER_CLASS, 129 | producerConfig.getString(ConfigConstants.VALUE_SERIALIZER_CLASS, defaultSerializer)); 130 | properties.put(ConfigConstants.PRODUCER_TYPE, producerConfig.getString(ConfigConstants.PRODUCER_TYPE, "async")); 131 | properties.put(ConfigConstants.MAX_BLOCK_MS, producerConfig.getLong(ConfigConstants.MAX_BLOCK_MS, new Long(60000))); 132 | return properties; 133 | } 134 | 135 | private String getRequiredConfig(String key) { 136 | String value = producerConfig.getString(key, null); 137 | 138 | if (null == value) { 139 | throw new IllegalArgumentException(String.format("Required config value not found key: %s", key)); 140 | } 141 | return value; 142 | } 143 | 144 | private void shutdown() { 145 | try { 146 | if (producer != null) { 147 | producer.close(); 148 | producer = null; 149 | } 150 | 151 | if (sender != null) { 152 | sender.shutdown(); 153 | sender = null; 154 | } 155 | } catch (Exception ex) { 156 | logger.error("Failed to close producer", ex); 157 | } 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/cyngn/vertx-kafka.svg?branch=master)](https://travis-ci.org/cyngn/vertx-kafka) 2 | 3 | # Vert.x Kafka 4 | 5 | The Vert.x kafka library allows asynchronous publishing and receiving of messages on Kafka topic through the vert.x event bus. 6 | 7 | ####To use this library you must have kafka and zookeeper up and running. Follow instructions at [Kafka quick start guide](http://kafka.apache.org/documentation.html#quickstart) 8 | 9 | This is a multi-threaded worker library that consumes kafka messages and then re-broadcast them on an address on the vert.x event bus. 10 | 11 | ## Getting Started 12 | 13 | Add a dependency to vertx-kafka: 14 | 15 | ```xml 16 | 17 | com.cyngn.vertx 18 | vertx-kafka 19 | 3.3.0-SNAPSHOT 20 | 21 | ``` 22 | 23 | | vertx-kafka | vert.x | kafka | 24 | | ------- | --------------:| --------: | 25 | | 3.3.0-SNAPSHOT | 3.3.0-SNAPSHOT | 0.9.0 | 26 | | 0.4.1 | 3.1.0 | 0.9.0 | 27 | 28 | ## Consumer 29 | 30 | Listening for messages coming from a kafka broker. 31 | 32 | ### Configuration 33 | 34 | ```json 35 | { 36 | "zookeeper.connect" : "", 37 | "group.id" : "", 38 | "bootstrap.servers" "", 39 | "backoff.increment.ms" : "", 40 | "autooffset.reset" : "", 41 | "topics" : ["", ""], 42 | "eventbus.address" : "", 43 | "consumer.poll.interval.ms" : 44 | } 45 | ``` 46 | 47 | For example: 48 | 49 | ```json 50 | { 51 | "zookeeper.host" : "localhost:2181", 52 | "group.id" : "testGroup", 53 | "bootstrap.servers" "localhost:9092", 54 | "backoff.increment.ms" : "100", 55 | "autooffset.reset" : "smallest", 56 | "topics" : ["testTopic"], 57 | "eventbus.address" : "kafka.to.vertx.bridge", 58 | "consumer.poll.interval.ms" : 1000 59 | } 60 | ``` 61 | Field breakdown: 62 | 63 | * `zookeeper.connect` a zookeeper connection string of form hostname1:port1,hostname2:port2,hostname3:port3/chroot/path used with your kafka clusters 64 | * `group.id` the kafka consumer group name that will be consuming related to 65 | * `bootstrap.servers` the list of initial kafka hosts to connect to 66 | * `backoff.increment.ms` backoff interval for contacting broker without messages in milliseconds 67 | * `autooffset.reset` how to reset the offset 68 | * `topics` the kafka topics to listen for 69 | * `eventbus.address` the vert.x address to publish messages onto when received form kafka 70 | * `consumer.poll.interval.ms` how often to try and consume messages 71 | 72 | For a deeper look at kafka configuration parameters check [this](http://kafka.apache.org/documentation.html) page out. 73 | 74 | ### Usage 75 | 76 | You should only need one consumer per application. 77 | 78 | #### Deploy the verticle in your server 79 | 80 | ```java 81 | 82 | vertx = Vertx.vertx(); 83 | 84 | // sample config 85 | JsonObject consumerConfig = new JsonObject(); 86 | consumerConfig.put(ConfigConstants.GROUP_ID, "testGroup"); 87 | List topics = new ArrayList<>(); 88 | topics.add("testTopic"); 89 | consumerConfig.put("topics", new JsonArray(topics)); 90 | 91 | deployKafka(config); 92 | 93 | public void deployKafka(JsonObject config) { 94 | // use your vert.x reference to deploy the consumer verticle 95 | vertx.deployVerticle(MessageConsumer.class.getName(), 96 | new DeploymentOptions().setConfig(config), 97 | deploy -> { 98 | if(deploy.failed()) { 99 | System.err.println(String.format("Failed to start kafka consumer verticle, ex: %s", deploy.cause())); 100 | vertx.close() 101 | return; 102 | } 103 | System.out.println("kafka consumer verticle started"); 104 | } 105 | ); 106 | } 107 | ``` 108 | #### Listen for messages 109 | 110 | ```java 111 | 112 | vertx.eventBus().consumer(MessageConsumer.EVENTBUS_DEFAULT_ADDRESS, 113 | message -> { 114 | System.out.println(String.format("got message: %s", message.body())) 115 | // message handling code 116 | KafkaEvent event = new KafkaEvent(message.body()); 117 | }); 118 | ``` 119 | 120 | #### Consumer Errors 121 | 122 | You can listen on the address `kafka.producer.error` for errors from the kafka producer. 123 | 124 | ## Producer 125 | 126 | Send a message to a kafka cluster on a predefined topic. 127 | 128 | ### Configuration 129 | 130 | ```json 131 | { 132 | "serializer.class":"", 133 | "key.serializer":"", 134 | "value.serializer":"", 135 | "bootstrap.servers":"," 136 | "default_topic":"," 137 | "eventbus.address":"" 138 | "max.block.ms" : 139 | } 140 | ``` 141 | 142 | For example: 143 | 144 | ```json 145 | { 146 | "serializer.class":"org.apache.kafka.common.serialization.StringSerializer", 147 | "bootstrap.servers":"localhost:9092", 148 | "default_topic":"testTopic" 149 | } 150 | ``` 151 | 152 | * `serializer.class` The serializer class for messages 153 | * `key.serializer` The serializer class for keys, defaults to the serializel.class if not set 154 | * `value.serializer` The serializer class for values, defaults to the serializel.class if not set 155 | * `bootstrap.servers` The socket connections for sending the actual data will be established based on the broker information returned in the metadata. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a subset of brokers. 156 | * `default_topic` The default topic in kafka to send to 157 | * `eventbus.address` The address to listen to on the event bus, defaults to 'kafka.message.publisher' 158 | * `max.block.ms` How long should the sender wait before getting meta data or time out in ms. 159 | 160 | For a deeper look at kafka configuration parameters check [this](https://kafka.apache.org/08/configuration.html) page out. 161 | 162 | ### Usage 163 | 164 | You should only need one producer per application. 165 | 166 | #### Deploy the verticle in your server 167 | 168 | ```java 169 | 170 | vertx = Vertx.vertx(); 171 | 172 | // sample config 173 | JsonObject producerConfig = new JsonObject(); 174 | producerConfig.put("bootstrap.servers", "localhost:9092"); 175 | producerConfig.put("serializer.class", "org.apache.kafka.common.serialization.StringSerializer"); 176 | producerConfig.put("default_topic", "testTopic"); 177 | 178 | deployKafka(producerConfig); 179 | 180 | public void deployKafka(JsonObject config) { 181 | // use your vert.x reference to deploy the consumer verticle 182 | vertx.deployVerticle(MessageProducer.class.getName(), 183 | new DeploymentOptions().setConfig(config), 184 | deploy -> { 185 | if(deploy.failed()) { 186 | System.err.println(String.format("Failed to start kafka producer verticle, ex: %s", deploy.cause())); 187 | vertx.close() 188 | return; 189 | } 190 | System.out.println("kafka producer verticle started"); 191 | }); 192 | } 193 | ``` 194 | #### Send message to kafka topic 195 | 196 | ```java 197 | 198 | KafkaPublisher publisher = new KafkaPublisher(vertx.eventBus()); 199 | 200 | // send to the default topic 201 | publisher.send("a test message on a default topic"); 202 | // send to a specific topic 203 | publisher.send("SomeSpecialTopic", "a test message on a default topic"); 204 | // send to a specific topic with custom key 205 | publisher.send("SomeSpecialTopic", "aUserId", "a test message on a default topic"); 206 | // send to a specific topic and partition 207 | publisher.send("SomeSpecialTopic", "", 5, "a test message on a default topic"); 208 | 209 | ``` 210 | #### Producer Errors 211 | 212 | You can listen on the address `kafka.producer.error` for errors from the kafka producer. 213 | 214 | 215 | ### Test setup 216 | 217 | * cd [yourKafkaInstallDir] 218 | * bin/zookeeper-server-start.sh config/zookeeper.properties 219 | * bin/kafka-server-start.sh config/server.properties 220 | * bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 8 --topic [yourTestTopic] 221 | * bin/kafka-console-producer.sh --broker-list localhost:9092 --topic [yourTestTopic] 222 | * bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic [yourTestTopic] 223 | -------------------------------------------------------------------------------- /src/main/java/com/cyngn/kafka/consume/SimpleConsumer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015 Cyanogen Inc. 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | package com.cyngn.kafka.consume; 16 | 17 | import com.cyngn.kafka.config.ConfigConstants; 18 | import io.vertx.core.AbstractVerticle; 19 | import io.vertx.core.Future; 20 | import io.vertx.core.eventbus.EventBus; 21 | import io.vertx.core.json.JsonArray; 22 | import io.vertx.core.json.JsonObject; 23 | import org.apache.kafka.clients.consumer.ConsumerRecord; 24 | import org.apache.kafka.clients.consumer.ConsumerRecords; 25 | import org.apache.kafka.clients.consumer.KafkaConsumer; 26 | import org.slf4j.Logger; 27 | import org.slf4j.LoggerFactory; 28 | 29 | import java.util.ArrayList; 30 | import java.util.Iterator; 31 | import java.util.List; 32 | import java.util.Properties; 33 | import java.util.concurrent.ExecutorService; 34 | import java.util.concurrent.Executors; 35 | import java.util.concurrent.atomic.AtomicBoolean; 36 | 37 | /** 38 | * Verticle to listen for kafka messages and republish them on the vertx event bus. Uses the default auto commit 39 | * functionality to commit offsets in the background. 40 | * 41 | * to build: 42 | * ./gradlew clean build 43 | * 44 | * @author truelove@cyngn.com (Jeremy Truelove) 1/28/15 45 | */ 46 | public class SimpleConsumer extends AbstractVerticle { 47 | public static final String EVENTBUS_DEFAULT_ADDRESS = "kafka.message.consumer"; 48 | public static final int DEFAULT_POLL_MS = 100; 49 | private String busAddress; 50 | private static final Logger logger = LoggerFactory.getLogger(SimpleConsumer.class); 51 | private EventBus bus; 52 | 53 | private AtomicBoolean running; 54 | private KafkaConsumer consumer; 55 | private List topics; 56 | private JsonObject verticleConfig; 57 | private ExecutorService backgroundConsumer; 58 | private int pollIntervalMs; 59 | 60 | @Override 61 | public void start(final Future startedResult) { 62 | 63 | try { 64 | bus = vertx.eventBus(); 65 | running = new AtomicBoolean(true); 66 | 67 | verticleConfig = config(); 68 | Properties kafkaConfig = populateKafkaConfig(verticleConfig); 69 | JsonArray topicConfig = verticleConfig.getJsonArray(ConfigConstants.TOPICS); 70 | 71 | busAddress = verticleConfig.getString(ConfigConstants.EVENTBUS_ADDRESS, EVENTBUS_DEFAULT_ADDRESS); 72 | pollIntervalMs = verticleConfig.getInteger(ConfigConstants.CONSUMER_POLL_INTERVAL_MS, DEFAULT_POLL_MS); 73 | 74 | Runtime.getRuntime().addShutdownHook(new Thread() { 75 | // try to disconnect from ZK as gracefully as possible 76 | public void run() { 77 | shutdown(); 78 | } 79 | }); 80 | 81 | backgroundConsumer = Executors.newSingleThreadExecutor(); 82 | backgroundConsumer.submit(() -> { 83 | try { 84 | consumer = new KafkaConsumer(kafkaConfig); 85 | 86 | topics = new ArrayList<>(); 87 | for (int i = 0; i < topicConfig.size(); i++) { 88 | topics.add(topicConfig.getString(i)); 89 | logger.info("Subscribing to topic "); 90 | } 91 | 92 | // signal success before we enter read loop 93 | startedResult.complete(); 94 | consume(); 95 | } catch (Exception ex) { 96 | String error = "Failed to startup"; 97 | logger.error(error, ex); 98 | bus.send(ConfigConstants.CONSUMER_ERROR_TOPIC, getErrorString(error, ex.getMessage())); 99 | startedResult.fail(ex); 100 | } 101 | }); 102 | } catch (Exception ex) { 103 | String error = "Failed to startup"; 104 | logger.error(error, ex); 105 | bus.send(ConfigConstants.CONSUMER_ERROR_TOPIC, getErrorString("Failed to startup", ex.getMessage())); 106 | startedResult.fail(ex); 107 | } 108 | } 109 | 110 | private String getErrorString(String error, String errorMessage) { 111 | return String.format("%s - error: %s", error, errorMessage); 112 | } 113 | 114 | /** 115 | * Handles looping and consuming 116 | */ 117 | private void consume() { 118 | consumer.subscribe(topics); 119 | while (running.get()) { 120 | try { 121 | ConsumerRecords records = consumer.poll(pollIntervalMs); 122 | 123 | // there were no messages 124 | if (records == null) { continue; } 125 | 126 | Iterator> iterator = records.iterator(); 127 | 128 | // roll through and put each kafka message on the event bus 129 | while (iterator.hasNext()) { sendMessage(iterator.next()); } 130 | 131 | } catch (Exception ex) { 132 | String error = "Error consuming messages from kafka"; 133 | logger.error(error, ex); 134 | bus.send(ConfigConstants.CONSUMER_ERROR_TOPIC, getErrorString(error, ex.getMessage())); 135 | } 136 | } 137 | } 138 | 139 | @Override 140 | public void stop() { running.compareAndSet(true, false); } 141 | 142 | /** 143 | * Send the inbound message to the event bus consumer. 144 | * 145 | * @param record the kafka event 146 | */ 147 | private void sendMessage(ConsumerRecord record) { 148 | try { bus.send(busAddress, KafkaEvent.createEventForBus(record)); } 149 | catch (Exception ex) { 150 | String error = String.format("Error sending messages on event bus - record: %s", record.toString()); 151 | logger.error(error, ex); 152 | bus.send(ConfigConstants.CONSUMER_ERROR_TOPIC, getErrorString(error, ex.getMessage())); 153 | } 154 | } 155 | 156 | private Properties populateKafkaConfig(JsonObject config) { 157 | Properties consumerConfig = new Properties(); 158 | consumerConfig.put(ConfigConstants.ZK_CONNECT, config.getString(ConfigConstants.ZK_CONNECT, "localhost:2181")); 159 | consumerConfig.put(ConfigConstants.BACKOFF_INCREMENT_MS, 160 | config.getString(ConfigConstants.BACKOFF_INCREMENT_MS, "100")); 161 | consumerConfig.put(ConfigConstants.AUTO_OFFSET_RESET, 162 | config.getString(ConfigConstants.AUTO_OFFSET_RESET, "smallest")); 163 | 164 | consumerConfig.put(ConfigConstants.BOOTSTRAP_SERVERS, getRequiredConfig(ConfigConstants.BOOTSTRAP_SERVERS)); 165 | 166 | consumerConfig.put(ConfigConstants.KEY_DESERIALIZER_CLASS, 167 | config.getString(ConfigConstants.KEY_DESERIALIZER_CLASS, ConfigConstants.DEFAULT_DESERIALIZER_CLASS)); 168 | consumerConfig.put(ConfigConstants.VALUE_DESERIALIZER_CLASS, 169 | config.getString(ConfigConstants.VALUE_DESERIALIZER_CLASS, ConfigConstants.DEFAULT_DESERIALIZER_CLASS)); 170 | consumerConfig.put(ConfigConstants.GROUP_ID, getRequiredConfig(ConfigConstants.GROUP_ID)); 171 | return consumerConfig; 172 | } 173 | 174 | private String getRequiredConfig(String key) { 175 | String value = verticleConfig.getString(key, null); 176 | 177 | if (null == value) { 178 | throw new IllegalArgumentException(String.format("Required config value not found key: %s", key)); 179 | } 180 | return value; 181 | } 182 | 183 | /** 184 | * Handle stopping the consumer. 185 | */ 186 | private void shutdown() { 187 | running.compareAndSet(true, false); 188 | try { 189 | if(consumer != null) { 190 | try { 191 | consumer.unsubscribe(); 192 | consumer.close(); 193 | consumer = null; 194 | } catch (Exception ex) { } 195 | } 196 | 197 | if(backgroundConsumer != null) { 198 | backgroundConsumer.shutdown(); 199 | backgroundConsumer = null; 200 | } 201 | } catch (Exception ex) { 202 | logger.error("Failed to close consumer", ex); 203 | } 204 | } 205 | } --------------------------------------------------------------------------------