├── .gitignore ├── project └── build.properties ├── version.sbt ├── rx-kafka-core └── src │ ├── main │ ├── scala │ │ └── io │ │ │ └── bfil │ │ │ └── rx │ │ │ └── kafka │ │ │ ├── serialization │ │ │ ├── Serialization.scala │ │ │ └── JavaSerialization.scala │ │ │ ├── messaging │ │ │ ├── Topic.scala │ │ │ └── SerializableTopic.scala │ │ │ ├── KafkaObserver.scala │ │ │ ├── config │ │ │ ├── ConfigProvider.scala │ │ │ ├── PropertiesBuilder.scala │ │ │ ├── ConsumerConfig.scala │ │ │ └── ProducerConfig.scala │ │ │ ├── producer │ │ │ ├── KafkaTopicProducer.scala │ │ │ ├── AbstractKafkaProducer.scala │ │ │ └── KafkaProducer.scala │ │ │ ├── consumer │ │ │ ├── AbstractKafkaConsumer.scala │ │ │ ├── KafkaTopicConsumer.scala │ │ │ └── KafkaConsumer.scala │ │ │ └── KafkaObservable.scala │ └── resources │ │ └── reference.conf │ └── test │ ├── scala │ └── io │ │ └── bfil │ │ └── rx │ │ └── kafka │ │ ├── testkit │ │ └── KafkaSpec.scala │ │ └── RxKafkaSpec.scala │ └── resources │ └── log4j.properties ├── CHANGELOG ├── rx-kafka-json4s └── src │ ├── main │ └── scala │ │ └── io │ │ └── bfil │ │ └── rx │ │ └── kafka │ │ ├── messaging │ │ └── JsonTopic.scala │ │ └── serialization │ │ └── Json4sSerialization.scala │ └── test │ ├── scala │ └── io │ │ └── bfil │ │ └── rx │ │ └── kafka │ │ ├── testkit │ │ └── KafkaSpec.scala │ │ └── RxKafkaJson4sSpec.scala │ └── resources │ └── log4j.properties └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | logs 3 | -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=0.13.15 -------------------------------------------------------------------------------- /version.sbt: -------------------------------------------------------------------------------- 1 | version in ThisBuild := "0.2.0" -------------------------------------------------------------------------------- /rx-kafka-core/src/main/scala/io/bfil/rx/kafka/serialization/Serialization.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka.serialization 2 | 3 | trait Serializer[-T] { 4 | def toBytes(obj: T): Array[Byte] 5 | } 6 | 7 | trait Deserializer[+T] { 8 | def fromBytes(bytes: Array[Byte]): T 9 | } -------------------------------------------------------------------------------- /CHANGELOG: -------------------------------------------------------------------------------- 1 | Version 0.2.0 (2017-12-26) 2 | -------------------------- 3 | - renamed organization and packages from com.bfil to io.bfil 4 | - published the library to Bintray 5 | - updated Kafka to 0.10.2.1 6 | 7 | 8 | Version 0.1.0 (2015-03-15) 9 | -------------------------- 10 | first public release 11 | -------------------------------------------------------------------------------- /rx-kafka-core/src/main/scala/io/bfil/rx/kafka/messaging/Topic.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka.messaging 2 | 3 | import io.bfil.rx.kafka.serialization.{Deserializer, Serializer} 4 | 5 | trait Topic[T] { 6 | val name: String 7 | type Message = T 8 | val serializer: Serializer[T] 9 | val deserializer: Deserializer[T] 10 | } -------------------------------------------------------------------------------- /rx-kafka-core/src/main/scala/io/bfil/rx/kafka/KafkaObserver.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka 2 | 3 | import org.apache.kafka.clients.producer.KafkaProducer 4 | import rx.lang.scala.Observer 5 | 6 | object KafkaObserver { 7 | def apply[T](producer: KafkaProducer[String, Array[Byte]])(f: T => Unit) = 8 | Observer(f, ex => throw ex, () => producer.close) 9 | } -------------------------------------------------------------------------------- /rx-kafka-core/src/main/scala/io/bfil/rx/kafka/messaging/SerializableTopic.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka.messaging 2 | 3 | import io.bfil.rx.kafka.serialization.{JavaDeserializer, JavaSerializer} 4 | 5 | abstract class SerializableTopic[T](val name: String) extends Topic[T] { 6 | val serializer = new JavaSerializer 7 | val deserializer = new JavaDeserializer 8 | } -------------------------------------------------------------------------------- /rx-kafka-core/src/main/resources/reference.conf: -------------------------------------------------------------------------------- 1 | kafka { 2 | consumer { 3 | group.id = "default" 4 | zookeeper.connect = "localhost:2181" 5 | } 6 | producer { 7 | bootstrap.servers = "localhost:9092" 8 | key.serializer = "org.apache.kafka.common.serialization.StringSerializer" 9 | value.serializer = "org.apache.kafka.common.serialization.ByteArraySerializer" 10 | } 11 | } -------------------------------------------------------------------------------- /rx-kafka-core/src/main/scala/io/bfil/rx/kafka/config/ConfigProvider.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka.config 2 | 3 | import com.typesafe.config.ConfigFactory 4 | 5 | trait ConfigProvider { 6 | protected val config = ConfigFactory.load 7 | protected lazy val defaultProducerConfig = config.getConfig("kafka.producer") 8 | protected lazy val defaultConsumerConfig = config.getConfig("kafka.consumer") 9 | } -------------------------------------------------------------------------------- /rx-kafka-core/src/main/scala/io/bfil/rx/kafka/producer/KafkaTopicProducer.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka.producer 2 | 3 | import io.bfil.rx.kafka.messaging.Topic 4 | 5 | import rx.lang.scala.Observer 6 | 7 | class KafkaTopicProducer[T](topic: Topic[T], protected val config: java.util.Properties) extends AbstractKafkaProducer { 8 | def publish(message: T) = super.publish(topic, message) 9 | def toObserver: Observer[T] = super.toObserver(t => publish(t)) 10 | } -------------------------------------------------------------------------------- /rx-kafka-json4s/src/main/scala/io/bfil/rx/kafka/messaging/JsonTopic.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka.messaging 2 | 3 | import org.json4s.Formats 4 | 5 | import io.bfil.rx.kafka.serialization.{Json4sDeserializer, Json4sSerializer} 6 | 7 | abstract class JsonTopic[T <: AnyRef : Manifest](val name: String)(implicit formats: Formats) extends Topic[T] { 8 | val messageManifest = manifest[T] 9 | val serializer = new Json4sSerializer[T] 10 | val deserializer = new Json4sDeserializer[T] 11 | } -------------------------------------------------------------------------------- /rx-kafka-core/src/main/scala/io/bfil/rx/kafka/consumer/AbstractKafkaConsumer.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka.consumer 2 | 3 | import kafka.consumer.Consumer 4 | import rx.lang.scala.Observable 5 | 6 | abstract class AbstractKafkaConsumer[T] { 7 | protected val config: kafka.consumer.ConsumerConfig 8 | protected val connector = Consumer.create(config) 9 | def toObservable(): Observable[T] 10 | def subscribe(f: T => Unit) = toObservable.subscribe(f) 11 | def close = connector.shutdown 12 | } -------------------------------------------------------------------------------- /rx-kafka-core/src/main/scala/io/bfil/rx/kafka/KafkaObservable.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka 2 | 3 | import rx.lang.scala.Observable 4 | import rx.lang.scala.schedulers.NewThreadScheduler 5 | 6 | object KafkaObservable { 7 | def apply[T](iterator: Iterator[T]): Observable[T] = 8 | Observable[T] { subscriber => 9 | NewThreadScheduler().createWorker.scheduleRec { 10 | if (iterator.hasNext && !subscriber.isUnsubscribed) subscriber.onNext(iterator.next) 11 | else subscriber.onCompleted 12 | } 13 | } 14 | } -------------------------------------------------------------------------------- /rx-kafka-core/src/main/scala/io/bfil/rx/kafka/consumer/KafkaTopicConsumer.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka.consumer 2 | 3 | import io.bfil.rx.kafka.KafkaObservable 4 | import io.bfil.rx.kafka.messaging.Topic 5 | 6 | class KafkaTopicConsumer[T](topic: Topic[T], protected val config: kafka.consumer.ConsumerConfig) extends AbstractKafkaConsumer[T] { 7 | private val stream = connector.createMessageStreams(Map(topic.name -> 1)).get(topic.name).get(0) 8 | val iterator = stream.iterator.map(m => topic.deserializer.fromBytes(m.message)) 9 | def toObservable() = KafkaObservable(iterator) 10 | } -------------------------------------------------------------------------------- /rx-kafka-json4s/src/main/scala/io/bfil/rx/kafka/serialization/Json4sSerialization.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka.serialization 2 | 3 | import org.json4s.{DefaultFormats, Formats} 4 | import org.json4s.native.Serialization 5 | 6 | class Json4sSerializer[T <: AnyRef](implicit formats: Formats) extends Serializer[T] { 7 | def toBytes(obj: T): Array[Byte] = Serialization.write(obj).getBytes 8 | } 9 | 10 | class Json4sDeserializer[T: Manifest](implicit formats: Formats) extends Deserializer[T] { 11 | def fromBytes(bytes: Array[Byte]): T = Serialization.read[T](new String(bytes)) 12 | } 13 | 14 | trait JsonSupport { 15 | implicit val json4sFormats = DefaultFormats.lossless 16 | } -------------------------------------------------------------------------------- /rx-kafka-core/src/main/scala/io/bfil/rx/kafka/producer/AbstractKafkaProducer.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka.producer 2 | 3 | import io.bfil.rx.kafka.KafkaObserver 4 | import io.bfil.rx.kafka.messaging.Topic 5 | 6 | import org.apache.kafka.clients.producer.ProducerRecord 7 | 8 | abstract class AbstractKafkaProducer { 9 | protected val config: java.util.Properties 10 | protected val producer = new org.apache.kafka.clients.producer.KafkaProducer[String, Array[Byte]](config) 11 | protected def publish[T](topic: Topic[T], message: T) = producer.send(new ProducerRecord[String, Array[Byte]](topic.name, topic.serializer.toBytes(message))) 12 | protected def toObserver[T](f: T => Unit) = KafkaObserver(producer)(f) 13 | def close = producer.close 14 | } -------------------------------------------------------------------------------- /rx-kafka-core/src/main/scala/io/bfil/rx/kafka/serialization/JavaSerialization.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka.serialization 2 | 3 | import java.io.{ByteArrayInputStream, ObjectInputStream, ObjectOutputStream} 4 | 5 | import org.apache.commons.io.output.ByteArrayOutputStream 6 | 7 | class JavaSerializer extends Serializer[Any] { 8 | def toBytes(obj: Any): Array[Byte] = { 9 | val bos = new ByteArrayOutputStream 10 | val out = new ObjectOutputStream(bos) 11 | out.writeObject(obj) 12 | out.close() 13 | bos.toByteArray 14 | } 15 | } 16 | 17 | class JavaDeserializer[T <: Any] extends Deserializer[T] { 18 | def fromBytes(bytes: Array[Byte]): T = { 19 | val bais = new ByteArrayInputStream(bytes) 20 | val ois = new ObjectInputStream(bais) 21 | val result = ois.readObject 22 | result.asInstanceOf[T] 23 | } 24 | } -------------------------------------------------------------------------------- /rx-kafka-core/src/main/scala/io/bfil/rx/kafka/producer/KafkaProducer.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka.producer 2 | 3 | import io.bfil.rx.kafka.config.{ConfigProvider, ProducerConfig} 4 | import io.bfil.rx.kafka.messaging.Topic 5 | import com.typesafe.config.Config 6 | 7 | import rx.lang.scala.Observer 8 | 9 | class KafkaProducer(protected val config: java.util.Properties) extends AbstractKafkaProducer { 10 | override def publish[T](topic: Topic[T], message: T) = super.publish(topic, message) 11 | def toObserver[T]: Observer[(Topic[T], T)] = super.toObserver({ case (topic, message) => publish(topic, message) }) 12 | } 13 | 14 | object KafkaProducer extends ConfigProvider { 15 | def apply(config: Config = defaultProducerConfig) = new KafkaProducer(ProducerConfig(config)) 16 | def apply[T](topic: Topic[T]) = new KafkaTopicProducer(topic, ProducerConfig(defaultProducerConfig)) 17 | def apply[T](topic: Topic[T], config: Config) = new KafkaTopicProducer(topic, ProducerConfig(config)) 18 | } -------------------------------------------------------------------------------- /rx-kafka-core/src/test/scala/io/bfil/rx/kafka/testkit/KafkaSpec.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka.testkit 2 | 3 | import scala.concurrent.{Await, Future} 4 | import scala.concurrent.duration.DurationInt 5 | import scala.concurrent.ExecutionContext.Implicits.global 6 | 7 | import org.apache.log4j.Logger 8 | import org.specs2.mutable.{BeforeAfter, Specification} 9 | 10 | import rx.lang.scala.Observable 11 | 12 | trait KafkaSpec extends Specification { 13 | 14 | implicit val log = Logger.getRootLogger 15 | 16 | def logMessage[T](t: T): T = { 17 | log.info(t) 18 | t 19 | } 20 | 21 | val receiveTimeout = 2 seconds 22 | 23 | def waitForMessages[T](messages: => List[T]) = Await.result(Future(messages), receiveTimeout) 24 | 25 | def receiveMessages[T](iterator: Iterator[T], numMessages: Int): List[T] = 26 | waitForMessages(iterator.map(logMessage).take(numMessages).toList) 27 | def receiveMessages[T](observable: Observable[T], numMessages: Int): List[T] = 28 | waitForMessages(observable.map(logMessage).take(numMessages).toBlocking.toList) 29 | 30 | } -------------------------------------------------------------------------------- /rx-kafka-json4s/src/test/scala/io/bfil/rx/kafka/testkit/KafkaSpec.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka.testkit 2 | 3 | import scala.concurrent.{Await, Future} 4 | import scala.concurrent.duration.DurationInt 5 | import scala.concurrent.ExecutionContext.Implicits.global 6 | 7 | import org.apache.log4j.Logger 8 | import org.specs2.mutable.{BeforeAfter, Specification} 9 | 10 | import rx.lang.scala.Observable 11 | 12 | trait KafkaSpec extends Specification { 13 | 14 | implicit val log = Logger.getRootLogger 15 | 16 | def logMessage[T](t: T): T = { 17 | log.info(t) 18 | t 19 | } 20 | 21 | val receiveTimeout = 2 seconds 22 | 23 | def waitForMessages[T](messages: => List[T]) = Await.result(Future(messages), receiveTimeout) 24 | 25 | def receiveMessages[T](iterator: Iterator[T], numMessages: Int): List[T] = 26 | waitForMessages(iterator.map(logMessage).take(numMessages).toList) 27 | def receiveMessages[T](observable: Observable[T], numMessages: Int): List[T] = 28 | waitForMessages(observable.map(logMessage).take(numMessages).toBlocking.toList) 29 | 30 | } -------------------------------------------------------------------------------- /rx-kafka-core/src/main/scala/io/bfil/rx/kafka/consumer/KafkaConsumer.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka.consumer 2 | 3 | import io.bfil.rx.kafka.KafkaObservable 4 | import io.bfil.rx.kafka.config.{ConfigProvider, ConsumerConfig} 5 | import io.bfil.rx.kafka.messaging.Topic 6 | import com.typesafe.config.Config 7 | 8 | import rx.lang.scala.Observable 9 | 10 | class KafkaConsumer(topics: List[Topic[_]], protected val config: kafka.consumer.ConsumerConfig) extends AbstractKafkaConsumer[Any] { 11 | private val topicsMap: Map[String, Topic[_]] = topics.map(t => t.name -> t).toMap 12 | private val streams = connector.createMessageStreams(topics.map(_.name -> 1).toMap) 13 | val iterators = streams.map { 14 | case (topicName, stream) => 15 | val topic = topicsMap.get(topicName).get 16 | val iterator = stream(0).iterator.map { m => 17 | topic.deserializer.fromBytes(m.message) 18 | } 19 | (topicName, iterator) 20 | } 21 | def toObservable(): Observable[Any] = Observable.from(iterators.values.map(KafkaObservable.apply)).flatten 22 | } 23 | 24 | object KafkaConsumer extends ConfigProvider { 25 | def apply[T](topic: Topic[T], config: Config = defaultConsumerConfig) = new KafkaTopicConsumer(topic, ConsumerConfig(config)) 26 | def apply(topics: List[Topic[_]]) = new KafkaConsumer(topics, ConsumerConfig(defaultConsumerConfig)) 27 | def apply(topics: List[Topic[_]], config: Config) = new KafkaConsumer(topics, ConsumerConfig(config)) 28 | } -------------------------------------------------------------------------------- /rx-kafka-core/src/main/scala/io/bfil/rx/kafka/config/PropertiesBuilder.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka.config 2 | 3 | import java.util.Properties 4 | 5 | import scala.concurrent.duration.{Duration, FiniteDuration} 6 | 7 | import com.typesafe.config.Config 8 | 9 | sealed trait ConfigType 10 | object ConfigTypes { 11 | case object Int extends ConfigType 12 | case object Bytes extends ConfigType 13 | case object String extends ConfigType 14 | case object Boolean extends ConfigType 15 | case object Duration extends ConfigType 16 | case object FiniteDuration extends ConfigType 17 | } 18 | 19 | class PropertiesBuilder(config: Config) { 20 | 21 | implicit class RichConfig(config: Config) { 22 | def getDuration(key: String): Duration = { 23 | val stringifiedDuration = config.getString(key) 24 | val duration = Duration.create(if(stringifiedDuration == "infinite") "Inf" else stringifiedDuration) 25 | if (!duration.isFinite || duration.toMillis <= 0) Duration.Inf 26 | else duration 27 | } 28 | def getFiniteDuration(key: String): FiniteDuration = { 29 | val duration = getDuration(key) 30 | if (duration.isFinite) FiniteDuration(duration.length, duration.unit) 31 | else throw new IllegalArgumentException(s"$key must be finite") 32 | } 33 | } 34 | 35 | def build(configs: Map[String, (String, ConfigType)]): Properties = { 36 | val props = new Properties() 37 | configs.map { 38 | case (propKey, (configKey, configType)) => 39 | if (config.hasPath(configKey)) { 40 | configType match { 41 | case ConfigTypes.Int => props.put(propKey, config.getInt(configKey).toString) 42 | case ConfigTypes.Bytes => props.put(propKey, config.getBytes(configKey).toString) 43 | case ConfigTypes.String => props.put(propKey, config.getString(configKey)) 44 | case ConfigTypes.Boolean => props.put(propKey, config.getBoolean(configKey).toString) 45 | case ConfigTypes.Duration => { 46 | val d = config.getDuration(configKey) 47 | props.put(propKey, if (d.isFinite) d.toMillis.toString else "-1") 48 | } 49 | case ConfigTypes.FiniteDuration => props.put(propKey, config.getFiniteDuration(configKey).toMillis.toString) 50 | } 51 | } 52 | } 53 | props 54 | } 55 | } -------------------------------------------------------------------------------- /rx-kafka-core/src/main/scala/io/bfil/rx/kafka/config/ConsumerConfig.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka.config 2 | 3 | import com.typesafe.config.Config 4 | 5 | import kafka.consumer.{ConsumerConfig => KafkaConsumerConfig} 6 | 7 | object ConsumerConfig { 8 | def apply(config: Config): KafkaConsumerConfig = { 9 | 10 | val props = new PropertiesBuilder(config).build(Map( 11 | "group.id" -> ("group.id", ConfigTypes.String), 12 | "zookeeper.connect" -> ("zookeeper.connect", ConfigTypes.String), 13 | "consumer.id" -> ("consumer.id", ConfigTypes.String), 14 | "socket.timeout.ms" -> ("socket.timeout", ConfigTypes.FiniteDuration), 15 | "socket.receive.buffer.bytes" -> ("socket.receive.buffer", ConfigTypes.Bytes), 16 | "fetch.message.max.bytes" -> ("fetch.message.max", ConfigTypes.Bytes), 17 | "num.consumer.fetchers" -> ("num.consumer.fetchers", ConfigTypes.Int), 18 | "auto.commit.enable" -> ("auto.commit.enable", ConfigTypes.Boolean), 19 | "auto.commit.interval.ms" -> ("auto.commit.interval", ConfigTypes.FiniteDuration), 20 | "queued.max.message.chunks" -> ("queued.max.message.chunks", ConfigTypes.Int), 21 | "rebalance.max.retries" -> ("rebalance.max.retries", ConfigTypes.Int), 22 | "fetch.min.bytes" -> ("fetch.min", ConfigTypes.Bytes), 23 | "fetch.wait.max.ms" -> ("fetch.wait.max", ConfigTypes.FiniteDuration), 24 | "rebalance.backoff.ms" -> ("rebalance.backoff", ConfigTypes.FiniteDuration), 25 | "refresh.leader.backoff.ms" -> ("refresh.leader.backoff", ConfigTypes.FiniteDuration), 26 | "auto.offset.reset" -> ("auto.offset.reset", ConfigTypes.String), 27 | "consumer.timeout.ms" -> ("consumer.timeout", ConfigTypes.Duration), 28 | "exclude.internal.topics" -> ("exclude.internal.topics", ConfigTypes.Boolean), 29 | "client.id" -> ("client.id", ConfigTypes.String), 30 | "zookeeper.session.timeout.ms" -> ("zookeeper.session.timeout", ConfigTypes.FiniteDuration), 31 | "zookeeper.connection.timeout.ms" -> ("zookeeper.connection.timeout", ConfigTypes.FiniteDuration), 32 | "zookeeper.sync.time.ms" -> ("zookeeper.sync.time", ConfigTypes.FiniteDuration), 33 | "offsets.storage" -> ("offsets.storage", ConfigTypes.String), 34 | "offsets.channel.backoff.ms" -> ("offsets.channel.backoff", ConfigTypes.FiniteDuration), 35 | "offsets.channel.socket.timeout.ms" -> ("offsets.channel.socket.timeout", ConfigTypes.FiniteDuration), 36 | "offsets.commit.max.retries" -> ("offsets.commit.max.retries", ConfigTypes.Int), 37 | "dual.commit.enabled" -> ("dual.commit.enabled", ConfigTypes.Boolean), 38 | "partition.assignment.strategy" -> ("partition.assignment.strategy", ConfigTypes.String))) 39 | 40 | new KafkaConsumerConfig(props) 41 | } 42 | } -------------------------------------------------------------------------------- /rx-kafka-core/src/test/scala/io/bfil/rx/kafka/RxKafkaSpec.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka 2 | 3 | import scala.concurrent.Future 4 | import scala.concurrent.ExecutionContext.Implicits.global 5 | 6 | import org.apache.log4j.Level 7 | 8 | import io.bfil.kafka.specs2.{DefaultKafkaPorts, EmbeddedKafkaContext} 9 | import io.bfil.rx.kafka.consumer.KafkaConsumer 10 | import io.bfil.rx.kafka.messaging.SerializableTopic 11 | import io.bfil.rx.kafka.producer.KafkaProducer 12 | import io.bfil.rx.kafka.testkit.KafkaSpec 13 | 14 | import rx.lang.scala.Observable 15 | 16 | case class Test(value: String) 17 | case class AnotherTest(count: Int) 18 | 19 | class RxKafkaSpec extends KafkaSpec { 20 | 21 | log.setLevel(Level.INFO) 22 | 23 | case object TestTopic extends SerializableTopic[Test]("test") 24 | case object AnotherTestTopic extends SerializableTopic[AnotherTest]("another-test") 25 | 26 | trait WithEmbeddedKafkaTopics extends EmbeddedKafkaContext with DefaultKafkaPorts { 27 | val kafkaTopics = Set("test", "another-test") 28 | } 29 | 30 | val numMessages = 10 31 | val messages = (1 to numMessages).map(_.toString).toList 32 | 33 | def sendMessages(f: String => Unit) = Future { 34 | Thread.sleep(200) 35 | for (i <- messages) f(i) 36 | } 37 | 38 | def receiveMessages[T](iterator: Iterator[T]): List[T] = receiveMessages(iterator, numMessages) 39 | def receiveMessages[T](observable: Observable[T]): List[T] = receiveMessages(observable, numMessages) 40 | 41 | sequential 42 | 43 | "A consumer" should { 44 | 45 | "consume all messages published by a producer" in new WithEmbeddedKafkaTopics { 46 | 47 | log.info("Producer -> Consumer") 48 | 49 | val consumer = KafkaConsumer(TestTopic) 50 | val producer = KafkaProducer(TestTopic) 51 | 52 | sendMessages { i => 53 | producer.publish(Test(i)) 54 | } 55 | 56 | val receivedMessages = receiveMessages(consumer.iterator) 57 | 58 | consumer.close 59 | producer.close 60 | 61 | receivedMessages.length === numMessages 62 | receivedMessages.map(_.value) === messages 63 | } 64 | } 65 | 66 | "An rx observable" should { 67 | 68 | "consume all messages published by an rx observer" in new WithEmbeddedKafkaTopics { 69 | 70 | log.info("Rx Producer -> Rx Consumer") 71 | 72 | val consumer = KafkaConsumer(TestTopic) 73 | 74 | val observable = consumer.toObservable 75 | val observer = KafkaProducer(TestTopic).toObserver 76 | 77 | sendMessages { i => 78 | observer.onNext(Test(i)) 79 | } 80 | 81 | val receivedMessages = receiveMessages(observable) 82 | 83 | consumer.close 84 | observer.onCompleted 85 | 86 | receivedMessages.length === numMessages 87 | receivedMessages.map(_.value) === messages 88 | } 89 | } 90 | 91 | "A consumer of multiple topics" should { 92 | 93 | "consume all messages published by a producer" in new WithEmbeddedKafkaTopics { 94 | 95 | log.info("Multiple Topics Producer -> Multiple Topics Consumer") 96 | 97 | val consumer = KafkaConsumer(List(TestTopic, AnotherTestTopic)) 98 | val producer = KafkaProducer() 99 | 100 | sendMessages { i => 101 | if (i.toInt % 2 == 0) producer.publish(TestTopic, Test(i)) 102 | else producer.publish(AnotherTestTopic, AnotherTest(i.toInt)) 103 | } 104 | 105 | val receivedMessages = receiveMessages(consumer.toObservable) 106 | 107 | consumer.close 108 | producer.close 109 | 110 | receivedMessages.length === numMessages 111 | receivedMessages.map { 112 | case Test(value) => value 113 | case AnotherTest(count) => count.toString 114 | }.toSet === messages.toSet 115 | } 116 | } 117 | } -------------------------------------------------------------------------------- /rx-kafka-core/src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | kafka.logs.dir=logs 17 | 18 | log4j.rootLogger=INFO, stdout 19 | 20 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 21 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 22 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 23 | 24 | log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender 25 | log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH 26 | log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log 27 | log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout 28 | log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 29 | 30 | log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender 31 | log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH 32 | log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log 33 | log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout 34 | log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 35 | 36 | log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender 37 | log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH 38 | log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log 39 | log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout 40 | log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 41 | 42 | log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender 43 | log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH 44 | log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log 45 | log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout 46 | log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 47 | 48 | log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender 49 | log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH 50 | log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log 51 | log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout 52 | log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 53 | 54 | # Turn on all our debugging info 55 | #log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender 56 | #log4j.logger.kafka.client.ClientUtils=DEBUG, kafkaAppender 57 | #log4j.logger.kafka.perf=DEBUG, kafkaAppender 58 | #log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender 59 | #log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG 60 | log4j.logger.kafka=INFO, kafkaAppender 61 | 62 | log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender 63 | log4j.additivity.kafka.network.RequestChannel$=false 64 | 65 | #log4j.logger.kafka.network.Processor=TRACE, requestAppender 66 | #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender 67 | #log4j.additivity.kafka.server.KafkaApis=false 68 | log4j.logger.kafka.request.logger=WARN, requestAppender 69 | log4j.additivity.kafka.request.logger=false 70 | 71 | log4j.logger.kafka.controller=TRACE, controllerAppender 72 | log4j.additivity.kafka.controller=false 73 | 74 | log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender 75 | log4j.additivity.kafka.log.LogCleaner=false 76 | 77 | log4j.logger.state.change.logger=TRACE, stateChangeAppender 78 | log4j.additivity.state.change.logger=false 79 | -------------------------------------------------------------------------------- /rx-kafka-json4s/src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | kafka.logs.dir=logs 17 | 18 | log4j.rootLogger=INFO, stdout 19 | 20 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 21 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 22 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 23 | 24 | log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender 25 | log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH 26 | log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log 27 | log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout 28 | log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 29 | 30 | log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender 31 | log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH 32 | log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log 33 | log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout 34 | log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 35 | 36 | log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender 37 | log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH 38 | log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log 39 | log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout 40 | log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 41 | 42 | log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender 43 | log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH 44 | log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log 45 | log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout 46 | log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 47 | 48 | log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender 49 | log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH 50 | log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log 51 | log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout 52 | log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 53 | 54 | # Turn on all our debugging info 55 | #log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender 56 | #log4j.logger.kafka.client.ClientUtils=DEBUG, kafkaAppender 57 | #log4j.logger.kafka.perf=DEBUG, kafkaAppender 58 | #log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender 59 | #log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG 60 | log4j.logger.kafka=INFO, kafkaAppender 61 | 62 | log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender 63 | log4j.additivity.kafka.network.RequestChannel$=false 64 | 65 | #log4j.logger.kafka.network.Processor=TRACE, requestAppender 66 | #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender 67 | #log4j.additivity.kafka.server.KafkaApis=false 68 | log4j.logger.kafka.request.logger=WARN, requestAppender 69 | log4j.additivity.kafka.request.logger=false 70 | 71 | log4j.logger.kafka.controller=TRACE, controllerAppender 72 | log4j.additivity.kafka.controller=false 73 | 74 | log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender 75 | log4j.additivity.kafka.log.LogCleaner=false 76 | 77 | log4j.logger.state.change.logger=TRACE, stateChangeAppender 78 | log4j.additivity.state.change.logger=false 79 | -------------------------------------------------------------------------------- /rx-kafka-json4s/src/test/scala/io/bfil/rx/kafka/RxKafkaJson4sSpec.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka 2 | 3 | import scala.concurrent.Future 4 | import scala.concurrent.ExecutionContext.Implicits.global 5 | 6 | import org.apache.log4j.Level 7 | import org.json4s.DefaultFormats 8 | 9 | import io.bfil.kafka.specs2.{DefaultKafkaPorts, EmbeddedKafkaContext} 10 | import io.bfil.rx.kafka.consumer.KafkaConsumer 11 | import io.bfil.rx.kafka.messaging.JsonTopic 12 | import io.bfil.rx.kafka.producer.KafkaProducer 13 | import io.bfil.rx.kafka.serialization.JsonSupport 14 | import io.bfil.rx.kafka.testkit.KafkaSpec 15 | 16 | import rx.lang.scala.Observable 17 | 18 | case class Test(value: String) 19 | case class AnotherTest(count: Int) 20 | 21 | class RxKafkaJson4sSpec extends KafkaSpec with JsonSupport { 22 | 23 | log.setLevel(Level.INFO) 24 | 25 | case object JsonTestTopic extends JsonTopic[Test]("test") 26 | case object AnotherJsonTestTopic extends JsonTopic[AnotherTest]("another-test") 27 | 28 | trait WithEmbeddedKafkaTopics extends EmbeddedKafkaContext with DefaultKafkaPorts { 29 | val kafkaTopics = Set("test", "another-test") 30 | } 31 | 32 | val numMessages = 10 33 | val messages = (1 to numMessages).map(_.toString).toList 34 | 35 | def sendMessages(f: String => Unit) = Future { 36 | Thread.sleep(200) 37 | for (i <- messages) f(i) 38 | } 39 | 40 | def receiveMessages[T](iterator: Iterator[T]): List[T] = receiveMessages(iterator, numMessages) 41 | def receiveMessages[T](observable: Observable[T]): List[T] = receiveMessages(observable, numMessages) 42 | 43 | sequential 44 | 45 | "A consumer" should { 46 | 47 | "consume all messages published by a producer" in new WithEmbeddedKafkaTopics { 48 | 49 | log.info("JSON Producer -> JSON Consumer") 50 | 51 | implicit val json4sFormats = DefaultFormats 52 | 53 | val consumer = KafkaConsumer(JsonTestTopic) 54 | val producer = KafkaProducer(JsonTestTopic) 55 | 56 | sendMessages { i => 57 | producer.publish(Test(i)) 58 | } 59 | 60 | val receivedMessages = receiveMessages(consumer.iterator) 61 | 62 | consumer.close 63 | producer.close 64 | 65 | receivedMessages.length === numMessages 66 | receivedMessages.map(_.value) === messages 67 | } 68 | } 69 | 70 | "An rx observable" should { 71 | 72 | "consume all messages published by an rx observer" in new WithEmbeddedKafkaTopics { 73 | 74 | log.info("JSON Rx Producer -> JSON Rx Consumer") 75 | 76 | implicit val json4sFormats = DefaultFormats 77 | 78 | val consumer = KafkaConsumer(JsonTestTopic) 79 | 80 | val observable = consumer.toObservable 81 | val observer = KafkaProducer(JsonTestTopic).toObserver 82 | 83 | sendMessages { i => 84 | observer.onNext(Test(i)) 85 | } 86 | 87 | val receivedMessages = receiveMessages(observable) 88 | 89 | consumer.close 90 | observer.onCompleted 91 | 92 | receivedMessages.length === numMessages 93 | receivedMessages.map(_.value) === messages 94 | } 95 | } 96 | 97 | "A consumer of multiple topics" should { 98 | 99 | "consume all messages published by a producer" in new WithEmbeddedKafkaTopics { 100 | 101 | log.info("Multiple JSON Topics Producer -> Multiple JSON Topics Consumer") 102 | 103 | implicit val json4sFormats = DefaultFormats 104 | 105 | val consumer = KafkaConsumer(List(JsonTestTopic, AnotherJsonTestTopic)) 106 | val producer = KafkaProducer() 107 | 108 | sendMessages { i => 109 | if (i.toInt % 2 == 0) producer.publish(JsonTestTopic, Test(i)) 110 | else producer.publish(AnotherJsonTestTopic, AnotherTest(i.toInt)) 111 | } 112 | 113 | val receivedMessages = receiveMessages(consumer.toObservable) 114 | 115 | consumer.close 116 | producer.close 117 | 118 | receivedMessages.length === numMessages 119 | receivedMessages.map { 120 | case Test(value) => value 121 | case AnotherTest(count) => count.toString 122 | }.toSet === messages.toSet 123 | } 124 | } 125 | } -------------------------------------------------------------------------------- /rx-kafka-core/src/main/scala/io/bfil/rx/kafka/config/ProducerConfig.scala: -------------------------------------------------------------------------------- 1 | package io.bfil.rx.kafka.config 2 | 3 | import com.typesafe.config.Config 4 | 5 | object ProducerConfig { 6 | def apply(config: Config): java.util.Properties = 7 | new PropertiesBuilder(config).build(Map( 8 | "bootstrap.servers" -> ("bootstrap.servers", ConfigTypes.String), 9 | "key.serializer" -> ("key.serializer", ConfigTypes.String), 10 | "value.serializer" -> ("value.serializer", ConfigTypes.String), 11 | "acks" -> ("acks", ConfigTypes.Int), 12 | "buffer.memory" -> ("buffer.memory", ConfigTypes.Int), 13 | "compression.type" -> ("compression.type", ConfigTypes.String), 14 | "retries" -> ("retries", ConfigTypes.Int), 15 | "ssl.key.password" -> ("ssl.key.password", ConfigTypes.String), 16 | "ssl.keystore.location" -> ("ssl.keystore.location", ConfigTypes.String), 17 | "ssl.keystore.password" -> ("ssl.keystore.password", ConfigTypes.String), 18 | "ssl.truststore.location" -> ("ssl.truststore.location", ConfigTypes.String), 19 | "ssl.truststore.password" -> ("ssl.truststore.password", ConfigTypes.String), 20 | "batch.size" -> ("batch.size", ConfigTypes.Int), 21 | "client.id " -> ("client.id ", ConfigTypes.String), 22 | "connections.max.idle.ms" -> ("connections.max.idle", ConfigTypes.FiniteDuration), 23 | "linger.ms" -> ("linger", ConfigTypes.FiniteDuration), 24 | "max.block.ms" -> ("max.block", ConfigTypes.FiniteDuration), 25 | "max.request.size" -> ("max.request.size", ConfigTypes.Int), 26 | "partitioner.class " -> ("partitioner.class ", ConfigTypes.String), 27 | "receive.buffer.bytes" -> ("receive.buffer", ConfigTypes.Bytes), 28 | "request.timeout.ms" -> ("request.timeout", ConfigTypes.FiniteDuration), 29 | "sasl.jaas.config" -> ("sasl.jaas.config", ConfigTypes.String), 30 | "sasl.kerberos.service.name" -> ("sasl.kerberos.service.name", ConfigTypes.String), 31 | "sasl.mechanism" -> ("sasl.mechanism", ConfigTypes.String), 32 | "security.protocol" -> ("security.protocol", ConfigTypes.String), 33 | "send.buffer.bytes" -> ("send.buffer", ConfigTypes.Bytes), 34 | "ssl.enabled.protocols" -> ("ssl.enabled.protocols", ConfigTypes.String), 35 | "ssl.keystore.type" -> ("ssl.keystore.type", ConfigTypes.String), 36 | "ssl.protocol" -> ("ssl.protocol", ConfigTypes.String), 37 | "ssl.provider" -> ("ssl.provider", ConfigTypes.String), 38 | "ssl.truststore.type" -> ("ssl.truststore.type", ConfigTypes.String), 39 | "timeout.ms" -> ("timeout", ConfigTypes.FiniteDuration), 40 | "block.on.buffer.full" -> ("block.on.buffer.full", ConfigTypes.Boolean), 41 | "interceptor.classes" -> ("interceptor.classes", ConfigTypes.String), 42 | "max.in.flight.requests.per.connection" -> ("max.in.flight.requests.per.connection", ConfigTypes.Int), 43 | "metadata.fetch.timeout.ms" -> ("metadata.fetch.timeout", ConfigTypes.FiniteDuration), 44 | "metadata.max.age.ms" -> ("metadata.max.age", ConfigTypes.FiniteDuration), 45 | "metric.reporters" -> ("metric.reporters", ConfigTypes.String), 46 | "metrics.num.samples" -> ("metrics.num.samples", ConfigTypes.Int), 47 | "metrics.sample.window.ms" -> ("metrics.sample.window", ConfigTypes.FiniteDuration), 48 | "reconnect.backoff.ms" -> ("reconnect.backoff", ConfigTypes.FiniteDuration), 49 | "retry.backoff.ms" -> ("retry.backoff", ConfigTypes.FiniteDuration), 50 | "sasl.kerberos.kinit.cmd" -> ("sasl.kerberos.kinit.cmd", ConfigTypes.String), 51 | "sasl.kerberos.min.time.before.relogin" -> ("sasl.kerberos.min.time.before.relogin", ConfigTypes.Int), 52 | "sasl.kerberos.ticket.renew.jitter" -> ("sasl.kerberos.ticket.renew.jitter", ConfigTypes.String), 53 | "sasl.kerberos.ticket.renew.window.factor" -> ("sasl.kerberos.ticket.renew.window.factor", ConfigTypes.String), 54 | "ssl.cipher.suites" -> ("ssl.cipher.suites", ConfigTypes.String), 55 | "ssl.endpoint.identification.algorithm" -> ("ssl.endpoint.identification.algorithm", ConfigTypes.String), 56 | "ssl.keymanager.algorithm" -> ("ssl.keymanager.algorithm", ConfigTypes.String), 57 | "ssl.secure.random.implementation" -> ("ssl.secure.random.implementation", ConfigTypes.String), 58 | "ssl.trustmanager.algorithm" -> ("ssl.trustmanager.algorithm", ConfigTypes.String) 59 | )) 60 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | RxKafka 2 | ======= 3 | 4 | [![Codacy Badge](https://www.codacy.com/project/badge/78b35cfad9f44d209ed009fe6d7988a9)](https://www.codacy.com/app/bfil/rx-kafka) 5 | 6 | This library provides a simple way to create Kafka consumers and producers, it uses [RxScala](https://github.com/ReactiveX/RxScala) under the hood, and provides the ability to turn consumers and producers into `RxScala`'s observables and observers. 7 | 8 | Setting up the dependencies 9 | --------------------------- 10 | 11 | __RxKafka__ is available on `Maven Central` (since version `0.2.0`), and it is cross compiled and published for Scala 2.12 and 2.11. 12 | 13 | *Older artifacts versions are not available anymore due to the shutdown of my self-hosted Nexus Repository in favour of Bintray* 14 | 15 | Using SBT, add the following dependency to your build file: 16 | 17 | ```scala 18 | libraryDependencies ++= Seq( 19 | "io.bfil" %% "rx-kafka-core" % "0.2.0" 20 | ) 21 | ``` 22 | 23 | Include the following module if you need JSON serialization (it uses [Json4s](https://github.com/json4s/json4s)) 24 | 25 | ```scala 26 | libraryDependencies ++= Seq( 27 | "io.bfil" %% "rx-kafka-json4s" % "0.2.0" 28 | ) 29 | ``` 30 | 31 | If you have issues resolving the dependency, you can add the following resolver: 32 | 33 | ```scala 34 | resolvers += Resolver.bintrayRepo("bfil", "maven") 35 | ``` 36 | 37 | Usage 38 | ----- 39 | 40 | ### Topics 41 | 42 | The topics in `RxKafka` are typed, the following is the `Topic` trait: 43 | 44 | ```scala 45 | trait Topic[T] { 46 | val name: String 47 | type Message = T 48 | val serializer: Serializer[T] 49 | val deserializer: Deserializer[T] 50 | } 51 | ``` 52 | 53 | Topics can be defined by extending the trait, each topic has its own name (used by Kafka), and the type of the message it contains (often needed by the serialization process). 54 | 55 | The `core` module supports basic Java serialization, and exposes an abstract `SerializableTopic` class that can be easily extended to define new topics that will be serialized using Java serialization, as follows: 56 | 57 | ```scala 58 | case object TestTopic extends SerializableTopic[Test]("test") 59 | case object AnotherTestTopic extends SerializableTopic[AnotherTest]("another-test") 60 | ``` 61 | 62 | `Test` and `AnotherTest` are simple case classes representing the message. 63 | 64 | The `json4s` module provides a similar class, called `JsonTopic`, that can be used as follows: 65 | 66 | ```scala 67 | case object JsonTestTopic extends JsonTopic[Test]("test") 68 | case object AnotherJsonTestTopic extends JsonTopic[AnotherTest]("another-test") 69 | ``` 70 | 71 | This topics can then be used to create consumers and producers, and the self contained serialization simplifies the rest of the APIs. 72 | 73 | ### Consumers 74 | 75 | Two types of consumers can be created, consumers consuming a single topic, or multiple topics. 76 | 77 | #### Single-Topic Consumer 78 | 79 | To create and use an RxKafka consumer tied to a single topic: 80 | 81 | ```scala 82 | val consumer = KafkaConsumer(TestTopic) 83 | 84 | // an iterator can be accessed on the consumer 85 | val iterator = consumer.iterator 86 | 87 | // or the consumer can be turned into an Observable[T] where T is the type of the message (in this case it will be Test) 88 | val observable = consumer.toObservable 89 | 90 | // you can also call subscribe directly on the Consumer (it's just a proxy to the Observable's subscribe method) 91 | consumer.subscribe { message => 92 | // ... 93 | } 94 | 95 | // to close the Kafka connector 96 | consumer.close 97 | ``` 98 | 99 | #### Multiple-Topics Consumer 100 | 101 | To create and use an RxKafka consumer tied to multiple topics: 102 | 103 | ```scala 104 | val consumer = KafkaConsumer(List(TestTopic, AnotherTestTopic)) 105 | 106 | // a map of iterators can be accessed on the consumer as a Map[String, Iterator[Any]] 107 | val iterators = consumer.iterators 108 | 109 | // or the consumer can be turned into an Observable[Any] which will merge all topics together: 110 | val observable = consumer.toObservable 111 | 112 | // you can also call subscribe directly on the `Consumer` 113 | consumer.subscribe { 114 | case test: Test => // ... 115 | case anotherTest: AnotherTest => // ... 116 | } 117 | 118 | // to close the Kafka connector 119 | consumer.close 120 | ``` 121 | 122 | ### Producers 123 | 124 | Two types of producers can be created, producers producing a single topic, or multiple topics. 125 | 126 | #### Single-Topic Producer 127 | 128 | To create and use an RxKafka producer for a single topic: 129 | 130 | ```scala 131 | val producer = KafkaProducer(TestTopic) 132 | 133 | // plublish a message 134 | producer.publish(Test("test")) 135 | 136 | // or the producer can be turned into an Observer[T] where T is the type of the message (in this case it will be Test) 137 | val observer = producer.toObserver 138 | 139 | // then you can use the Observer API 140 | observer.onNext(Test("test")) 141 | observer.onComplete() 142 | 143 | // stop the Kafka producer 144 | producer.close 145 | ``` 146 | 147 | #### Multiple-Topics Producer 148 | 149 | ```scala 150 | val producer = KafkaProducer() 151 | 152 | // plublish a message to a particular topic 153 | producer.publish(TestTopic, Test("test")) 154 | producer.publish(AnotherTestTopic, AnotherTest("test")) 155 | 156 | // or the producer can be turned into an Observer[(Topic[T], T)] 157 | val observer = producer.toObserver 158 | 159 | // then you can use the Observer API 160 | observer.onNext(TestTopic, Test("test")) 161 | observer.onNext(AnotherTestTopic, AnotherTest("test")) 162 | observer.onComplete() 163 | 164 | // stop the Kafka producer 165 | producer.close 166 | ``` 167 | 168 | ### Configuration 169 | 170 | Configuration is supported using [Typesafe Config](https://github.com/typesafehub/config). 171 | 172 | To customize default consumers and producers configuration create an `application.conf` file under your `src/main/resources` folder. 173 | 174 | The following is an example configuration file: 175 | 176 | ``` 177 | kafka { 178 | consumer { 179 | group.id = "default" 180 | zookeeper.connect = "localhost:2181" 181 | } 182 | producer { 183 | bootstrap.servers = "localhost:9092" 184 | } 185 | } 186 | ``` 187 | 188 | **Please Note**: the configuration values support different types depending on the configuration, for durations or buffer sizes for example the `.ms` and `.bytes` suffixes are not necessary and the configuration allows to specify the actual unit. 189 | 190 | For example, in a consumer you should specify configuration values like so: 191 | 192 | ``` 193 | group.id = "default" 194 | zookeeper.connect = "localhost:2181" 195 | socket.timeout = 30 seconds 196 | socket.receive.buffer = 64B 197 | fetch.message.max = 1M 198 | num.consumer.fetchers = 1 199 | auto.commit.enable = true 200 | ``` 201 | 202 | For the full list of configurations refer to the Kafka documentation for the [Old Consumer](https://kafka.apache.org/0102/documentation.html#oldconsumerconfigs) and the [Producer](https://kafka.apache.org/0102/documentation.html#producerconfigs) 203 | 204 | #### Custom Consumers / Producers Configuration 205 | 206 | A Typesafe `Config` object can be passed into the `KafkaConsumer` and `KafkaProducer` constructors: 207 | 208 | You can define separate blocks into your `application.conf`: 209 | 210 | ``` 211 | my-consumer-1 { 212 | group.id = "group-1" 213 | } 214 | my-consumer-2 { 215 | group.id = "group-2" 216 | } 217 | 218 | async-producer { 219 | producer.type = "async" 220 | } 221 | ``` 222 | 223 | Then these configurations can be loaded like this: 224 | 225 | ```scala 226 | import com.typesafe.config.ConfigFactory 227 | val config = ConfigFactory.load 228 | 229 | val consumerConfig1 = config.getConfig("my-consumer-1") 230 | val consumerConfig2 = config.getConfig("my-consumer-2") 231 | 232 | val asyncProducerConfig = config.getConfig("async-producer") 233 | ``` 234 | 235 | And they can be specified in the constructor: 236 | 237 | ```scala 238 | val consumer1 = KafkaConsumer(TestTopic, consumerConfig1) 239 | val consumer2 = KafkaConsumer(List(TestTopic, AnotherTestTopic), consumerConfig2) 240 | 241 | val asyncProducer = KafkaProducer(asyncProducerConfig) 242 | val asyncTestProducer = KafkaProducer(TestTopic, asyncProducerConfig) 243 | ``` 244 | 245 | ### Custom Serialization 246 | 247 | Custom serializers are supported and it's really easy to create one, the following are the serialization interfaces: 248 | 249 | ```scala 250 | trait Serializer[-T] { 251 | def toBytes(obj: T): Array[Byte] 252 | } 253 | 254 | trait Deserializer[+T] { 255 | def fromBytes(bytes: Array[Byte]): T 256 | } 257 | ``` 258 | 259 | As an example, take a look at the implementation of the Json4s serialization in `rx-kafka-json4s`: 260 | 261 | ```scala 262 | import org.json4s.native.Serialization 263 | 264 | class Json4sSerializer[T <: AnyRef](implicit formats: Formats) extends Serializer[T] { 265 | def toBytes(obj: T): Array[Byte] = Serialization.write(obj).getBytes 266 | } 267 | 268 | class Json4sDeserializer[T: Manifest](implicit formats: Formats) extends Deserializer[T] { 269 | def fromBytes(bytes: Array[Byte]): T = Serialization.read[T](new String(bytes)) 270 | } 271 | ``` 272 | 273 | License 274 | ------- 275 | 276 | This software is licensed under the Apache 2 license, quoted below. 277 | 278 | Copyright © 2015-2017 Bruno Filippone 279 | 280 | Licensed under the Apache License, Version 2.0 (the "License"); you may not 281 | use this file except in compliance with the License. You may obtain a copy of 282 | the License at 283 | 284 | [http://www.apache.org/licenses/LICENSE-2.0] 285 | 286 | Unless required by applicable law or agreed to in writing, software 287 | distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 288 | WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 289 | License for the specific language governing permissions and limitations under 290 | the License. 291 | --------------------------------------------------------------------------------