├── .gitignore ├── project └── build.properties ├── .travis.yml ├── src ├── test │ ├── resources │ │ ├── log4j.properties │ │ └── example.conf │ └── scala │ │ └── akka │ │ └── persistence │ │ └── kafka │ │ ├── KafkaCleanup.scala │ │ ├── journal │ │ ├── KafkaJournalSpec.scala │ │ └── KafkaLoadSpec.scala │ │ ├── server │ │ └── TestServer.scala │ │ ├── snapshot │ │ └── KafkaSnapshotStoreSpec.scala │ │ ├── example │ │ └── Example.scala │ │ ├── BrokerWatcherSpec.scala │ │ └── integration │ │ └── KafkaIntegrationSpec.scala └── main │ ├── protobuf │ ├── SnapshotFormats.proto │ └── EventFormats.proto │ ├── scala │ └── akka │ │ └── persistence │ │ └── kafka │ │ ├── StickyPartitioner.scala │ │ ├── MetadataConsumerConfig.scala │ │ ├── package.scala │ │ ├── snapshot │ │ ├── KafkaSnapshotStoreConfig.scala │ │ ├── KafkaSnapshotSerializer.scala │ │ └── KafkaSnapshotStore.scala │ │ ├── BrokerWatcher.scala │ │ ├── journal │ │ ├── KafkaJournalConfig.scala │ │ ├── KafkaEventSerializer.scala │ │ └── KafkaJournal.scala │ │ ├── Event.scala │ │ ├── MessageIterator.scala │ │ ├── ChildDataWatcher.scala │ │ └── MetadataConsumer.scala │ ├── resources │ └── reference.conf │ └── java │ └── akka │ └── persistence │ └── kafka │ ├── snapshot │ └── SnapshotFormats.java │ └── journal │ └── EventFormats.java ├── LICENSE └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .idea* 2 | target/ 3 | -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=0.13.2 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: scala 2 | scala: 3 | - "2.10.4" 4 | - "2.11.2" 5 | jdk: 6 | - oraclejdk8 7 | - openjdk7 8 | branches: 9 | except: 10 | - /^wip-.*$/ 11 | -------------------------------------------------------------------------------- /src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | log4j.rootLogger=ERROR, stdout 2 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 3 | log4j.appender.stdout.layout=org.apache.log4j.SimpleLayout 4 | -------------------------------------------------------------------------------- /src/main/protobuf/SnapshotFormats.proto: -------------------------------------------------------------------------------- 1 | option java_package = "akka.persistence.kafka.snapshot"; 2 | option optimize_for = SPEED; 3 | 4 | message SnapshotMetadataFormat { 5 | optional string persistenceId = 1; 6 | optional int64 sequenceNr = 2; 7 | optional int64 timestamp = 3; 8 | } 9 | -------------------------------------------------------------------------------- /src/test/resources/example.conf: -------------------------------------------------------------------------------- 1 | akka.persistence.journal.plugin = "kafka-journal" 2 | akka.persistence.snapshot-store.plugin = "kafka-snapshot-store" 3 | 4 | kafka-journal.event.producer.topic.mapper.class = "akka.persistence.kafka.example.ExampleEventTopicMapper" 5 | 6 | test-server.zookeeper.dir = target/example/zookeeper 7 | test-server.kafka.log.dirs = target/example/kafka 8 | -------------------------------------------------------------------------------- /src/main/scala/akka/persistence/kafka/StickyPartitioner.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka 2 | 3 | import kafka.producer.Partitioner 4 | import kafka.utils.VerifiableProperties 5 | 6 | class StickyPartitioner(props: VerifiableProperties) extends Partitioner { 7 | val partition = props.getInt("partition") 8 | def partition(key: Any, numPartitions: Int): Int = partition 9 | } 10 | -------------------------------------------------------------------------------- /src/test/scala/akka/persistence/kafka/KafkaCleanup.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka 2 | 3 | import java.io.File 4 | 5 | import org.apache.commons.io.FileUtils 6 | import org.scalatest._ 7 | 8 | trait KafkaCleanup extends BeforeAndAfterAll { this: Suite => 9 | override protected def afterAll(): Unit = { 10 | FileUtils.deleteDirectory(new File("target/test")) 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /src/main/protobuf/EventFormats.proto: -------------------------------------------------------------------------------- 1 | option java_package = "akka.persistence.kafka.journal"; 2 | option optimize_for = SPEED; 3 | 4 | message EventFormat { 5 | optional string persistenceId = 1; 6 | optional int64 sequenceNr = 2; 7 | optional EventDataFormat data = 3; 8 | } 9 | 10 | message EventDataFormat { 11 | optional int32 serializerId = 1; 12 | optional bytes data = 2; 13 | optional bytes dataManifest = 3; 14 | } 15 | -------------------------------------------------------------------------------- /src/main/scala/akka/persistence/kafka/MetadataConsumerConfig.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka 2 | 3 | import com.typesafe.config.Config 4 | 5 | import kafka.consumer.ConsumerConfig 6 | import kafka.utils._ 7 | 8 | class MetadataConsumerConfig(config: Config) { 9 | val partition: Int = 10 | config.getInt("partition") 11 | 12 | val zookeeperConfig: ZKConfig = 13 | new ZKConfig(new VerifiableProperties(configToProperties(config))) 14 | 15 | val consumerConfig: ConsumerConfig = 16 | new ConsumerConfig(configToProperties(config.getConfig("consumer"), 17 | Map("zookeeper.connect" -> zookeeperConfig.zkConnect, "group.id" -> "snapshot"))) 18 | } 19 | -------------------------------------------------------------------------------- /src/main/scala/akka/persistence/kafka/package.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence 2 | 3 | import scala.collection.JavaConverters._ 4 | 5 | import java.util.Properties 6 | 7 | import com.typesafe.config.Config 8 | 9 | package object kafka { 10 | def journalTopic(persistenceId: String): String = 11 | persistenceId.replaceAll("[^\\w\\._-]", "_") 12 | 13 | def configToProperties(config: Config, extra: Map[String, String] = Map.empty): Properties = { 14 | val properties = new Properties() 15 | 16 | config.entrySet.asScala.foreach { entry => 17 | properties.put(entry.getKey, entry.getValue.unwrapped.toString) 18 | } 19 | 20 | extra.foreach { 21 | case (k, v) => properties.put(k, v) 22 | } 23 | 24 | properties 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/main/scala/akka/persistence/kafka/snapshot/KafkaSnapshotStoreConfig.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka.snapshot 2 | 3 | import akka.persistence.kafka._ 4 | import akka.persistence.kafka.MetadataConsumer.Broker 5 | 6 | import com.typesafe.config.Config 7 | 8 | import kafka.producer.ProducerConfig 9 | 10 | class KafkaSnapshotStoreConfig(config: Config) extends MetadataConsumerConfig(config) { 11 | val prefix: String = 12 | config.getString("prefix") 13 | 14 | val ignoreOrphan: Boolean = 15 | config.getBoolean("ignore-orphan") 16 | 17 | def producerConfig(brokers: List[Broker]): ProducerConfig = 18 | new ProducerConfig(configToProperties(config.getConfig("producer"), 19 | Map("metadata.broker.list" -> Broker.toString(brokers), "partition" -> config.getString("partition")))) 20 | } 21 | -------------------------------------------------------------------------------- /src/test/scala/akka/persistence/kafka/journal/KafkaJournalSpec.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka.journal 2 | 3 | import com.typesafe.config.ConfigFactory 4 | 5 | import akka.persistence.journal.JournalSpec 6 | import akka.persistence.kafka.KafkaCleanup 7 | import akka.persistence.kafka.server._ 8 | 9 | class KafkaJournalSpec extends JournalSpec with KafkaCleanup { 10 | lazy val config = ConfigFactory.parseString( 11 | """ 12 | |akka.persistence.journal.plugin = "kafka-journal" 13 | |akka.persistence.snapshot-store.plugin = "kafka-snapshot-store" 14 | |akka.test.single-expect-default = 10s 15 | |kafka-journal.event.producer.request.required.acks = 1 16 | |test-server.zookeeper.dir = target/test/zookeeper 17 | |test-server.kafka.log.dirs = target/test/kafka 18 | """.stripMargin) 19 | 20 | val systemConfig = system.settings.config 21 | val serverConfig = new TestServerConfig(systemConfig.getConfig("test-server")) 22 | val server = new TestServer(serverConfig) 23 | 24 | override protected def afterAll(): Unit = { 25 | server.stop() 26 | super.afterAll() 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/main/scala/akka/persistence/kafka/BrokerWatcher.scala: -------------------------------------------------------------------------------- 1 | 2 | package akka.persistence.kafka 3 | 4 | import akka.actor.ActorRef 5 | import akka.persistence.kafka.BrokerWatcher.BrokersUpdated 6 | import akka.persistence.kafka.MetadataConsumer.Broker 7 | import kafka.utils.{ZkUtils, ZKStringSerializer, ZKConfig} 8 | import org.I0Itec.zkclient.ZkClient 9 | 10 | object BrokerWatcher { 11 | 12 | case class BrokersUpdated(brokers: List[Broker]) 13 | 14 | } 15 | 16 | class BrokerWatcher(zkConfig: ZKConfig, listener: ActorRef) { 17 | 18 | lazy val zkClient = new ZkClient( 19 | zkConfig.zkConnect, 20 | zkConfig.zkSessionTimeoutMs, 21 | zkConfig.zkConnectionTimeoutMs, 22 | ZKStringSerializer) 23 | 24 | lazy val childWatcher = new ChildDataWatcher[String](zkClient, ZkUtils.BrokerIdsPath, { d => 25 | listener ! BrokersUpdated(buildBrokers(d)) 26 | }) 27 | 28 | def start(): List[Broker] = { 29 | buildBrokers(childWatcher.start()) 30 | } 31 | 32 | def stop(): Unit = { 33 | childWatcher.stop() 34 | zkClient.close() 35 | } 36 | 37 | private def buildBrokers(d: Map[String, String]): List[Broker] = { 38 | d.values.map(Broker.fromString).flatMap(x => x).toList 39 | } 40 | } -------------------------------------------------------------------------------- /src/main/scala/akka/persistence/kafka/journal/KafkaJournalConfig.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka.journal 2 | 3 | import akka.persistence.kafka._ 4 | import akka.persistence.kafka.MetadataConsumer.Broker 5 | 6 | import com.typesafe.config.Config 7 | 8 | import kafka.producer.ProducerConfig 9 | import kafka.utils._ 10 | 11 | class KafkaJournalConfig(config: Config) extends MetadataConsumerConfig(config) { 12 | val pluginDispatcher: String = 13 | config.getString("plugin-dispatcher") 14 | 15 | val writeConcurrency: Int = 16 | config.getInt("write-concurrency") 17 | 18 | val eventTopicMapper: EventTopicMapper = 19 | Utils.createObject[EventTopicMapper](config.getString("event.producer.topic.mapper.class")) 20 | 21 | def journalProducerConfig(brokers: List[Broker]): ProducerConfig = 22 | new ProducerConfig(configToProperties(config.getConfig("producer"), 23 | Map("metadata.broker.list" -> Broker.toString(brokers), "partition" -> config.getString("partition")))) 24 | 25 | def eventProducerConfig(brokers: List[Broker]): ProducerConfig = 26 | new ProducerConfig(configToProperties(config.getConfig("event.producer"), 27 | Map("metadata.broker.list" -> Broker.toString(brokers)))) 28 | } 29 | -------------------------------------------------------------------------------- /src/main/scala/akka/persistence/kafka/Event.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka 2 | 3 | import akka.actor.ActorSystem 4 | import akka.serialization.SerializationExtension 5 | 6 | import kafka.serializer._ 7 | 8 | import scala.collection.immutable.Seq 9 | 10 | /** 11 | * Event published to user-defined topics. 12 | * 13 | * @param persistenceId Id of the persistent actor that generates event `data`. 14 | * @param sequenceNr Sequence number of the event. 15 | * @param data Event data generated by a persistent actor. 16 | */ 17 | case class Event(persistenceId: String, sequenceNr: Long, data: Any) 18 | 19 | /** 20 | * Defines a mapping of events to user-defined topics. 21 | */ 22 | trait EventTopicMapper { 23 | /** 24 | * Maps an event to zero or more topics. 25 | * 26 | * @param event event to be mapped. 27 | * @return a sequence of topic names. 28 | */ 29 | def topicsFor(event: Event): Seq[String] 30 | } 31 | 32 | class DefaultEventTopicMapper extends EventTopicMapper { 33 | def topicsFor(event: Event): Seq[String] = List("events") 34 | } 35 | 36 | class EmptyEventTopicMapper extends EventTopicMapper { 37 | def topicsFor(event: Event): Seq[String] = Nil 38 | } 39 | 40 | class EventDecoder(system: ActorSystem) extends Decoder[Event] { 41 | val serialization = SerializationExtension(system) 42 | 43 | def fromBytes(bytes: Array[Byte]): Event = 44 | serialization.deserialize(bytes, classOf[Event]).get 45 | } 46 | -------------------------------------------------------------------------------- /src/test/scala/akka/persistence/kafka/server/TestServer.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka.server 2 | 3 | import java.io.File 4 | 5 | import akka.persistence.kafka._ 6 | 7 | import com.typesafe.config._ 8 | 9 | import kafka.server._ 10 | 11 | import org.apache.curator.test.TestingServer 12 | 13 | object TestServerConfig { 14 | def load(): TestServerConfig = 15 | load("application") 16 | 17 | def load(resource: String): TestServerConfig = 18 | new TestServerConfig(ConfigFactory.load(resource).getConfig("test-server")) 19 | } 20 | 21 | class TestServerConfig(config: Config) { 22 | object zookeeper { 23 | val port: Int = 24 | config.getInt("zookeeper.port") 25 | 26 | val dir: String = 27 | config.getString("zookeeper.dir") 28 | } 29 | 30 | val kafka: KafkaConfig = 31 | new KafkaConfig(configToProperties(config.getConfig("kafka"), 32 | Map("zookeeper.connect" -> s"localhost:${zookeeper.port}", "host.name" -> "localhost"))) 33 | } 34 | 35 | class TestServer(config: TestServerConfig = TestServerConfig.load()) { 36 | val zookeeper = new TestZookeeperServer(config) 37 | val kafka = new TestKafkaServer(config) 38 | 39 | def stop(): Unit = { 40 | kafka.stop() 41 | zookeeper.stop() 42 | } 43 | } 44 | 45 | class TestZookeeperServer(config: TestServerConfig) { 46 | import config._ 47 | 48 | private val server: TestingServer = 49 | new TestingServer(zookeeper.port, new File(zookeeper.dir)) 50 | 51 | def stop(): Unit = server.stop() 52 | } 53 | 54 | class TestKafkaServer(config: TestServerConfig) { 55 | private val server: KafkaServer = 56 | new KafkaServer(config.kafka) 57 | 58 | start() 59 | 60 | def start(): Unit = { 61 | server.startup() 62 | } 63 | 64 | def stop(): Unit = { 65 | server.shutdown() 66 | server.awaitShutdown() 67 | } 68 | } 69 | 70 | -------------------------------------------------------------------------------- /src/main/scala/akka/persistence/kafka/MessageIterator.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka 2 | 3 | import kafka.api.FetchRequestBuilder 4 | import kafka.common.ErrorMapping 5 | import kafka.consumer._ 6 | import kafka.message._ 7 | 8 | object MessageUtil { 9 | def payloadBytes(m: Message): Array[Byte] = { 10 | val payload = m.payload 11 | val payloadBytes = Array.ofDim[Byte](payload.limit()) 12 | 13 | payload.get(payloadBytes) 14 | payloadBytes 15 | } 16 | } 17 | 18 | class MessageIterator(host: String, port: Int, topic: String, partition: Int, offset: Long, consumerConfig: ConsumerConfig) extends Iterator[Message] { 19 | import consumerConfig._ 20 | import ErrorMapping._ 21 | 22 | val consumer = new SimpleConsumer(host, port, socketTimeoutMs, socketReceiveBufferBytes, clientId) 23 | var iter = iterator(offset) 24 | var readMessages = 0 25 | var nextOffset = offset 26 | 27 | def iterator(offset: Long): Iterator[MessageAndOffset] = { 28 | val request = new FetchRequestBuilder().addFetch(topic, partition, offset, fetchMessageMaxBytes).build() 29 | val response = consumer.fetch(request) 30 | 31 | response.errorCode(topic, partition) match { 32 | case NoError => response.messageSet(topic, partition).iterator 33 | case anError => throw exceptionFor(anError) 34 | } 35 | } 36 | 37 | def next(): Message = { 38 | val mo = iter.next() 39 | readMessages += 1 40 | nextOffset = mo.nextOffset 41 | mo.message 42 | } 43 | 44 | @annotation.tailrec 45 | final def hasNext: Boolean = 46 | if (iter.hasNext) { 47 | true 48 | } else if (readMessages == 0) { 49 | close() 50 | false 51 | } else { 52 | iter = iterator(nextOffset) 53 | readMessages = 0 54 | hasNext 55 | } 56 | 57 | def close(): Unit = { 58 | consumer.close() 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/test/scala/akka/persistence/kafka/snapshot/KafkaSnapshotStoreSpec.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka.snapshot 2 | 3 | import com.typesafe.config.ConfigFactory 4 | 5 | import akka.persistence._ 6 | import akka.persistence.SnapshotProtocol._ 7 | import akka.persistence.snapshot.SnapshotStoreSpec 8 | import akka.persistence.kafka.KafkaCleanup 9 | import akka.persistence.kafka.server._ 10 | import akka.testkit.TestProbe 11 | 12 | class KafkaSnapshotStoreSpec extends SnapshotStoreSpec with KafkaCleanup { 13 | lazy val maxMessageSize = 1000 * 1000 * 11 14 | lazy val config = ConfigFactory.parseString( 15 | s""" 16 | |akka.persistence.journal.plugin = "kafka-journal" 17 | |akka.persistence.snapshot-store.plugin = "kafka-snapshot-store" 18 | |akka.test.single-expect-default = 10s 19 | |kafka-snapshot-store.consumer.fetch.message.max.bytes = ${maxMessageSize} 20 | |kafka-snapshot-store.ignore-orphan = false 21 | |test-server.kafka.message.max.bytes = ${maxMessageSize} 22 | |test-server.kafka.replica.fetch.max.bytes = ${maxMessageSize} 23 | |test-server.zookeeper.dir = target/test/zookeeper 24 | |test-server.kafka.log.dirs = target/test/kafka 25 | """.stripMargin) 26 | 27 | val systemConfig = system.settings.config 28 | val serverConfig = new TestServerConfig(systemConfig.getConfig("test-server")) 29 | val server = new TestServer(serverConfig) 30 | 31 | override protected def afterAll(): Unit = { 32 | server.stop() 33 | super.afterAll() 34 | } 35 | 36 | "A Kafka snapshot store" must { 37 | "support large snapshots" in { 38 | val senderProbe = TestProbe() 39 | val snapshot = Array.ofDim[Byte](1000 * 1000 * 2).toList 40 | 41 | snapshotStore.tell(SaveSnapshot(SnapshotMetadata("large", 100), snapshot), senderProbe.ref) 42 | val metadata = senderProbe.expectMsgPF() { case SaveSnapshotSuccess(md) => md } 43 | 44 | snapshotStore.tell(LoadSnapshot("large", SnapshotSelectionCriteria.Latest, Long.MaxValue), senderProbe.ref) 45 | senderProbe.expectMsg(LoadSnapshotResult(Some(SelectedSnapshot(metadata, snapshot)), Long.MaxValue)) 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/main/scala/akka/persistence/kafka/journal/KafkaEventSerializer.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka.journal 2 | 3 | import akka.actor._ 4 | import akka.persistence.kafka.Event 5 | import akka.persistence.kafka.journal.EventFormats.{EventDataFormat, EventFormat} 6 | import akka.serialization._ 7 | 8 | import com.google.protobuf.ByteString 9 | 10 | class KafkaEventSerializer(system: ExtendedActorSystem) extends Serializer { 11 | def identifier: Int = 15443 12 | def includeManifest: Boolean = false 13 | 14 | def toBinary(o: AnyRef): Array[Byte] = o match { 15 | case e: Event => eventFormatBuilder(e).build().toByteArray 16 | case _ => throw new IllegalArgumentException(s"Can't serialize object of type ${o.getClass}") 17 | } 18 | 19 | def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): AnyRef = 20 | event(EventFormat.parseFrom(bytes)) 21 | 22 | def eventFormatBuilder(event: Event): EventFormat.Builder = { 23 | val builder = EventFormat.newBuilder 24 | builder.setPersistenceId(event.persistenceId) 25 | builder.setSequenceNr(event.sequenceNr) 26 | builder.setData(eventDataFormatBuilder(event.data.asInstanceOf[AnyRef])) 27 | builder 28 | } 29 | 30 | def eventDataFormatBuilder(payload: AnyRef): EventDataFormat.Builder = { 31 | val serializer = SerializationExtension(system).findSerializerFor(payload) 32 | val builder = EventDataFormat.newBuilder() 33 | 34 | if (serializer.includeManifest) 35 | builder.setDataManifest(ByteString.copyFromUtf8(payload.getClass.getName)) 36 | 37 | builder.setData(ByteString.copyFrom(serializer.toBinary(payload))) 38 | builder.setSerializerId(serializer.identifier) 39 | builder 40 | } 41 | 42 | def event(eventFormat: EventFormat): Event = { 43 | Event( 44 | eventFormat.getPersistenceId, 45 | eventFormat.getSequenceNr, 46 | eventData(eventFormat.getData)) 47 | } 48 | 49 | def eventData(eventDataFormat: EventDataFormat): Any = { 50 | val eventDataClass = if (eventDataFormat.hasDataManifest) 51 | Some(system.dynamicAccess.getClassFor[AnyRef](eventDataFormat.getDataManifest.toStringUtf8).get) else None 52 | 53 | SerializationExtension(system).deserialize( 54 | eventDataFormat.getData.toByteArray, 55 | eventDataFormat.getSerializerId, 56 | eventDataClass).get 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/main/scala/akka/persistence/kafka/snapshot/KafkaSnapshotSerializer.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka.snapshot 2 | 3 | import java.io._ 4 | 5 | import akka.actor._ 6 | import akka.persistence._ 7 | import akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat 8 | import akka.persistence.serialization.Snapshot 9 | import akka.serialization._ 10 | 11 | case class KafkaSnapshot(metadata: SnapshotMetadata, snapshot: Any) { 12 | def matches(criteria: SnapshotSelectionCriteria): Boolean = 13 | metadata.sequenceNr <= criteria.maxSequenceNr && 14 | metadata.timestamp <= criteria.maxTimestamp 15 | } 16 | 17 | class KafkaSnapshotSerializer(system: ExtendedActorSystem) extends Serializer { 18 | def identifier: Int = 15442 19 | def includeManifest: Boolean = false 20 | 21 | def toBinary(o: AnyRef): Array[Byte] = o match { 22 | case ks: KafkaSnapshot => snapshotToBinary(ks) 23 | case _ => throw new IllegalArgumentException(s"Can't serialize object of type ${o.getClass}") 24 | } 25 | 26 | def snapshotToBinary(ks: KafkaSnapshot): Array[Byte] = { 27 | val extension = SerializationExtension(system) 28 | val snapshot = Snapshot(ks.snapshot) 29 | val snapshotSerializer = extension.findSerializerFor(snapshot) 30 | 31 | val snapshotBytes = snapshotSerializer.toBinary(snapshot) 32 | val metadataBytes = snapshotMetadataToBinary(ks.metadata) 33 | 34 | val out = new ByteArrayOutputStream 35 | 36 | writeInt(out, metadataBytes.length) 37 | out.write(metadataBytes) 38 | out.write(snapshotBytes) 39 | out.toByteArray 40 | } 41 | 42 | def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): KafkaSnapshot = { 43 | val extension = SerializationExtension(system) 44 | val metadataLength = readInt(new ByteArrayInputStream(bytes)) 45 | val metadataBytes = bytes.slice(4, metadataLength + 4) 46 | val snapshotBytes = bytes.drop(metadataLength + 4) 47 | 48 | val metadata = snapshotMetadataFromBinary(metadataBytes) 49 | val snapshot = extension.deserialize(snapshotBytes, classOf[Snapshot]).get 50 | 51 | KafkaSnapshot(metadata, snapshot.data) 52 | } 53 | 54 | def snapshotMetadataToBinary(metadata: SnapshotMetadata): Array[Byte] = { 55 | SnapshotMetadataFormat.newBuilder() 56 | .setPersistenceId(metadata.persistenceId) 57 | .setSequenceNr(metadata.sequenceNr) 58 | .setTimestamp(metadata.timestamp) 59 | .build() 60 | .toByteArray 61 | } 62 | 63 | def snapshotMetadataFromBinary(metadataBytes: Array[Byte]): SnapshotMetadata = { 64 | val md = SnapshotMetadataFormat.parseFrom(metadataBytes) 65 | SnapshotMetadata( 66 | md.getPersistenceId, 67 | md.getSequenceNr, 68 | md.getTimestamp) 69 | } 70 | 71 | private def writeInt(outputStream: OutputStream, i: Int) = 72 | 0 to 24 by 8 foreach { shift ⇒ outputStream.write(i >> shift) } 73 | 74 | private def readInt(inputStream: InputStream) = 75 | (0 to 24 by 8).foldLeft(0) { (id, shift) ⇒ (id | (inputStream.read() << shift)) } 76 | } 77 | -------------------------------------------------------------------------------- /src/main/scala/akka/persistence/kafka/ChildDataWatcher.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka 2 | 3 | import java.util 4 | 5 | import org.I0Itec.zkclient.exception.ZkNoNodeException 6 | import org.I0Itec.zkclient.{IZkDataListener, ZkClient, IZkChildListener} 7 | 8 | private [kafka] class ChildDataWatcher[T]( 9 | zkClient: ZkClient, 10 | path: String, 11 | cb: (Map[String, T]) => Unit) 12 | extends IZkChildListener 13 | with IZkDataListener { 14 | 15 | import scala.collection.JavaConverters._ 16 | 17 | private var childDataMap = Map[String, T]() 18 | 19 | def start(): Map[String, T] = { 20 | val eventLock = zkClient.getEventLock 21 | try { 22 | eventLock.lock() 23 | val children = zkClient.subscribeChildChanges(path, this) 24 | if (children != null) { 25 | handleChildChange(path, children, invokeCallback = false) 26 | } 27 | childDataMap 28 | } finally { 29 | eventLock.unlock() 30 | } 31 | } 32 | 33 | def stop(): Unit = { 34 | val eventLock = zkClient.getEventLock 35 | try { 36 | eventLock.lock() 37 | zkClient.unsubscribeChildChanges(path, this) 38 | childDataMap.foreach { 39 | case (c, _) => zkClient.unsubscribeDataChanges(childPath(c), this) 40 | } 41 | } finally { 42 | eventLock.unlock() 43 | } 44 | } 45 | 46 | def handleChildChange(parentPath: String, currentChilds: util.List[String]): Unit = { 47 | handleChildChange(parentPath, currentChilds, invokeCallback = true) 48 | } 49 | 50 | def handleChildChange(parentPath: String, currentChilds: util.List[String], invokeCallback: Boolean): Unit = { 51 | val oldChildren = childDataMap.keySet 52 | // WARNING: if the currentChilds is null we can treat it as an empty set, however, we are never going to be notified 53 | // if the parentPath is re-created. 54 | val newChildren = if (currentChilds != null) currentChilds.asScala.toSet else Set.empty[String] 55 | val childrenAdded = newChildren.diff(oldChildren) 56 | val childrenRemoved = oldChildren.diff(newChildren) 57 | 58 | childrenRemoved.foreach { c => 59 | zkClient.unsubscribeDataChanges(childPath(c), this) 60 | childDataMap = childDataMap - c 61 | } 62 | 63 | childrenAdded.foreach { c => 64 | zkClient.subscribeDataChanges(childPath(c), this) 65 | try { 66 | val data = zkClient.readData[T](childPath(c)) 67 | childDataMap = childDataMap + (c -> data) 68 | } catch { 69 | case e: ZkNoNodeException => 70 | zkClient.unsubscribeDataChanges(childPath(c), this) 71 | } 72 | } 73 | 74 | if (invokeCallback) 75 | cb(childDataMap) 76 | } 77 | 78 | def handleDataChange(path: String, data: Object): Unit = { 79 | childDataMap = childDataMap + (childId(path) -> data.asInstanceOf[T]) 80 | cb(childDataMap) 81 | } 82 | 83 | def handleDataDeleted(path: String): Unit = { 84 | // znode deletion is handled by handleChildChange 85 | } 86 | 87 | private def childId(path: String): String = path.substring(this.path.size + 1) 88 | 89 | private def childPath(child: String): String = path + "/" + child 90 | } 91 | -------------------------------------------------------------------------------- /src/main/scala/akka/persistence/kafka/MetadataConsumer.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka 2 | 3 | import scala.util._ 4 | 5 | import kafka.api._ 6 | import kafka.common._ 7 | import kafka.consumer._ 8 | import kafka.utils._ 9 | 10 | import org.I0Itec.zkclient.ZkClient 11 | 12 | object MetadataConsumer { 13 | object Broker { 14 | def toString(brokers: List[Broker]) = brokers.mkString(",") 15 | 16 | def fromString(asString: String): Option[Broker] = { 17 | Json.parseFull(asString) match { 18 | case Some(m) => 19 | val brokerInfo = m.asInstanceOf[Map[String, Any]] 20 | val host = brokerInfo.get("host").get.asInstanceOf[String] 21 | val port = brokerInfo.get("port").get.asInstanceOf[Int] 22 | Some(Broker.apply(host, port)) 23 | case None => None 24 | } 25 | } 26 | } 27 | 28 | case class Broker(host: String, port: Int) { 29 | override def toString = s"${host}:${port}" 30 | } 31 | } 32 | 33 | trait MetadataConsumer { 34 | import MetadataConsumer._ 35 | 36 | val config: MetadataConsumerConfig 37 | 38 | def leaderFor(topic: String, brokers: List[Broker]): Option[Broker] = brokers match { 39 | case Nil => 40 | throw new IllegalArgumentException("empty broker list") 41 | case Broker(host, port) :: Nil => 42 | leaderFor(host, port, topic) 43 | case Broker(host, port) :: brokers => 44 | Try(leaderFor(host, port, topic)) match { 45 | case Failure(e) => leaderFor(topic, brokers) // failover 46 | case Success(l) => l 47 | } 48 | } 49 | 50 | def leaderFor(host: String, port: Int, topic: String): Option[Broker] = { 51 | import config.consumerConfig._ 52 | import ErrorMapping._ 53 | 54 | val consumer = new SimpleConsumer(host, port, socketTimeoutMs, socketReceiveBufferBytes, clientId) 55 | val request = new TopicMetadataRequest(TopicMetadataRequest.CurrentVersion, 0, clientId, List(topic)) 56 | val response = try { consumer.send(request) } finally { consumer.close() } 57 | val topicMetadata = response.topicsMetadata(0) 58 | 59 | try { 60 | topicMetadata.errorCode match { 61 | case LeaderNotAvailableCode => None 62 | case NoError => topicMetadata.partitionsMetadata.filter(_.partitionId == config.partition)(0).leader.map(leader => Broker(leader.host, leader.port)) 63 | case anError => throw exceptionFor(anError) 64 | } 65 | } finally { 66 | consumer.close() 67 | } 68 | } 69 | 70 | def offsetFor(host: String, port: Int, topic: String, partition: Int): Long = { 71 | import config.consumerConfig._ 72 | import ErrorMapping._ 73 | 74 | val consumer = new SimpleConsumer(host, port, socketTimeoutMs, socketReceiveBufferBytes, clientId) 75 | val offsetRequest = OffsetRequest(Map(TopicAndPartition(topic, partition) -> PartitionOffsetRequestInfo(OffsetRequest.LatestTime, 1))) 76 | val offsetResponse = try { consumer.getOffsetsBefore(offsetRequest) } finally { consumer.close() } 77 | val offsetPartitionResponse = offsetResponse.partitionErrorAndOffsets(TopicAndPartition(topic, partition)) 78 | 79 | try { 80 | offsetPartitionResponse.error match { 81 | case NoError => offsetPartitionResponse.offsets.head 82 | case anError => throw exceptionFor(anError) 83 | } 84 | } finally { 85 | consumer.close() 86 | } 87 | } 88 | } -------------------------------------------------------------------------------- /src/test/scala/akka/persistence/kafka/journal/KafkaLoadSpec.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka.journal 2 | 3 | import scala.concurrent.duration._ 4 | 5 | import akka.actor._ 6 | import akka.persistence.PersistentActor 7 | import akka.persistence.kafka._ 8 | import akka.persistence.kafka.server._ 9 | import akka.testkit._ 10 | 11 | import com.typesafe.config.ConfigFactory 12 | 13 | import org.scalatest._ 14 | 15 | object KafkaLoadSpec { 16 | val config = ConfigFactory.parseString( 17 | """ 18 | |akka.persistence.journal.plugin = "kafka-journal" 19 | |akka.persistence.snapshot-store.plugin = "kafka-snapshot-store" 20 | |akka.test.single-expect-default = 10s 21 | |kafka-journal.event.producer.request.required.acks = 1 22 | |kafka-journal.event.producer.topic.mapper.class = "akka.persistence.kafka.EmptyEventTopicMapper" 23 | |kafka-journal.zookeeper.connection.timeout.ms = 10000 24 | |kafka-journal.zookeeper.session.timeout.ms = 10000 25 | |test-server.zookeeper.dir = target/test/zookeeper 26 | |test-server.kafka.log.dirs = target/test/kafka 27 | """.stripMargin) 28 | 29 | trait Measure extends { this: Actor ⇒ 30 | val NanoToSecond = 1000.0 * 1000 * 1000 31 | 32 | var startTime: Long = 0L 33 | var stopTime: Long = 0L 34 | 35 | var startSequenceNr = 0L; 36 | var stopSequenceNr = 0L; 37 | 38 | def startMeasure(): Unit = { 39 | startSequenceNr = lastSequenceNr 40 | startTime = System.nanoTime 41 | } 42 | 43 | def stopMeasure(): Unit = { 44 | stopSequenceNr = lastSequenceNr 45 | stopTime = System.nanoTime 46 | sender ! (NanoToSecond * (stopSequenceNr - startSequenceNr) / (stopTime - startTime)) 47 | } 48 | 49 | def lastSequenceNr: Long 50 | } 51 | 52 | class TestPersistentActor(val persistenceId: String) extends PersistentActor with Measure { 53 | def receiveRecover: Receive = handle 54 | 55 | def receiveCommand: Receive = { 56 | case c @ "start" => 57 | defer(c) { _ => startMeasure(); sender ! "started" } 58 | case c @ "stop" => 59 | defer(c) { _ => stopMeasure() } 60 | case payload: String => 61 | persistAsync(payload)(handle) 62 | } 63 | 64 | def handle: Receive = { 65 | case payload: String => 66 | } 67 | } 68 | } 69 | 70 | class KafkaLoadSpec extends TestKit(ActorSystem("test", KafkaLoadSpec.config)) with ImplicitSender with WordSpecLike with Matchers with KafkaCleanup { 71 | import KafkaLoadSpec._ 72 | 73 | val systemConfig = system.settings.config 74 | val journalConfig = new KafkaJournalConfig(systemConfig.getConfig("kafka-journal")) 75 | val serverConfig = new TestServerConfig(systemConfig.getConfig("test-server")) 76 | val server = new TestServer(serverConfig) 77 | 78 | override def afterAll(): Unit = { 79 | server.stop() 80 | system.shutdown() 81 | super.afterAll() 82 | } 83 | 84 | "A Kafka Journal" must { 85 | "have some reasonable throughput" in { 86 | val warmCycles = 100L // set to 10000L to get reasonable results 87 | val loadCycles = 1000L // set to 300000L to get reasonable results 88 | 89 | val processor1 = system.actorOf(Props(classOf[TestPersistentActor], "test")) 90 | 1L to warmCycles foreach { i => processor1 ! "a" } 91 | processor1 ! "start" 92 | expectMsg("started") 93 | 1L to loadCycles foreach { i => processor1 ! "a" } 94 | processor1 ! "stop" 95 | expectMsgPF(100.seconds) { case throughput: Double ⇒ println(f"\nthroughput = $throughput%.2f persistent commands per second") } 96 | } 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /src/test/scala/akka/persistence/kafka/example/Example.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka.example 2 | 3 | import java.util.Properties 4 | 5 | import scala.collection.immutable.Seq 6 | 7 | import akka.actor._ 8 | import akka.persistence.{PersistenceFailure, PersistentActor, PersistentRepr, RecoveryFailure, SaveSnapshotFailure, SaveSnapshotSuccess, SnapshotOffer} 9 | import akka.persistence.kafka.{EventDecoder, Event, EventTopicMapper} 10 | import akka.persistence.kafka.server.{TestServerConfig, TestServer} 11 | import akka.serialization.SerializationExtension 12 | 13 | import com.typesafe.config.ConfigFactory 14 | 15 | import kafka.consumer.{Consumer, ConsumerConfig} 16 | import kafka.serializer.{DefaultDecoder, StringDecoder} 17 | 18 | class ExampleProcessor(val persistenceId: String) extends PersistentActor { 19 | import ExampleProcessor.Increment 20 | 21 | var state: Int = 0 22 | def receiveCommand: Receive = { 23 | case i: Increment => 24 | persist(i)(update) 25 | case "snap" => 26 | saveSnapshot(state) 27 | case SaveSnapshotSuccess(md) => 28 | println(s"snapshot saved (metadata = ${md})") 29 | case SaveSnapshotFailure(md, e) => 30 | println(s"snapshot saving failed (metadata = ${md}, error = ${e.getMessage})") 31 | case PersistenceFailure(payload, snr, e) => 32 | println(s"persistence failed (payload = ${payload}, sequenceNr = ${snr}, error = ${e.getMessage})") 33 | } 34 | 35 | def receiveRecover: Receive = { 36 | case i: Increment => 37 | update(i) 38 | case SnapshotOffer(md, snapshot: Int) => 39 | state = snapshot 40 | println(s"state initialized: ${state} (metadata = ${md})") 41 | case RecoveryFailure(e) => 42 | println(s"recovery failed (error = ${e.getMessage})") 43 | } 44 | 45 | def update(i: Increment): Unit = { 46 | state += i.value 47 | println(s"state updated: ${state} (last sequence nr = ${lastSequenceNr})") 48 | } 49 | } 50 | 51 | class ExampleEventTopicMapper extends EventTopicMapper { 52 | def topicsFor(event: Event): Seq[String] = event.persistenceId match { 53 | case "a" => List("topic-a-1", "topic-a-2") 54 | case "b" => List("topic-b") 55 | case _ => Nil 56 | } 57 | } 58 | 59 | object ExampleProcessor extends App { 60 | case class Increment(value: Int) 61 | 62 | val system = ActorSystem("example", ConfigFactory.load("example")) 63 | val actorA = system.actorOf(Props(new ExampleProcessor("a"))) 64 | 65 | actorA ! Increment(2) 66 | actorA ! Increment(3) 67 | actorA ! "snap" 68 | } 69 | 70 | object ExampleConsumer extends App { 71 | val props = new Properties() 72 | props.put("group.id", "consumer-1") 73 | props.put("zookeeper.connect", "localhost:2181") 74 | props.put("auto.offset.reset", "smallest") 75 | props.put("auto.commit.enable", "false") 76 | 77 | val system = ActorSystem("consumer") 78 | 79 | val consConn = Consumer.create(new ConsumerConfig(props)) 80 | val streams = consConn.createMessageStreams(Map("topic-a-2" -> 1), 81 | keyDecoder = new StringDecoder, valueDecoder = new EventDecoder(system)) 82 | 83 | streams("topic-a-2")(0).foreach { mm => 84 | val event: Event = mm.message 85 | println(s"consumed ${event}") 86 | } 87 | } 88 | 89 | object ExampleJournalConsumer extends App { 90 | val props = new Properties() 91 | props.put("group.id", "consumer-2") 92 | props.put("zookeeper.connect", "localhost:2181") 93 | props.put("auto.offset.reset", "smallest") 94 | props.put("auto.commit.enable", "false") 95 | 96 | val system = ActorSystem("example") 97 | val extension = SerializationExtension(system) 98 | 99 | val consConn = Consumer.create(new ConsumerConfig(props)) 100 | val streams = consConn.createMessageStreams(Map("a" -> 1), 101 | keyDecoder = new StringDecoder, valueDecoder = new DefaultDecoder) 102 | 103 | streams("a")(0).foreach { mm => 104 | val persistent: PersistentRepr = extension.deserialize(mm.message, classOf[PersistentRepr]).get 105 | println(s"consumed ${persistent}") 106 | } 107 | } 108 | 109 | object ExampleServer extends App { 110 | new TestServer(TestServerConfig.load("example")) 111 | } -------------------------------------------------------------------------------- /src/test/scala/akka/persistence/kafka/BrokerWatcherSpec.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka 2 | 3 | import java.io.File 4 | 5 | import akka.actor.ActorSystem 6 | import akka.persistence.kafka.BrokerWatcher.BrokersUpdated 7 | import akka.persistence.kafka.MetadataConsumer.Broker 8 | import akka.persistence.kafka.server.{TestKafkaServer, TestServerConfig, TestZookeeperServer} 9 | import akka.testkit.{ImplicitSender, TestKit} 10 | import com.typesafe.config.{ConfigValueFactory, ConfigFactory} 11 | import kafka.utils.{VerifiableProperties, ZKConfig} 12 | import org.apache.commons.io.FileUtils 13 | import org.scalatest.{BeforeAndAfterEach, BeforeAndAfterAll, Matchers, WordSpecLike} 14 | 15 | object BrokerWatcherSpec { 16 | 17 | val dataDir = "target/test" 18 | 19 | val config = ConfigFactory.parseString( 20 | s""" 21 | |akka.persistence.journal.plugin = "kafka-journal" 22 | |akka.persistence.snapshot-store.plugin = "kafka-snapshot-store" 23 | |akka.test.single-expect-default = 10s 24 | |kafka-journal.event.producer.request.required.acks = 1 25 | |kafka-journal.zookeeper.connection.timeout.ms = 10000 26 | |kafka-journal.zookeeper.session.timeout.ms = 10000 27 | |test-server.zookeeper.dir = "$dataDir/zookeeper" 28 | """.stripMargin).withFallback(ConfigFactory.load("reference")) 29 | 30 | val zkClientConfig = new ZKConfig(new VerifiableProperties(configToProperties(ConfigFactory.parseString( 31 | s""" 32 | |zookeeper.connect = "localhost:${config.getInt("test-server.zookeeper.port")}" 33 | |zookeeper.session.timeout.ms = 6000 34 | |zookeeper.connection.timeout.ms = 6000 35 | |zookeeper.sync.time.ms = 2000 36 | """.stripMargin)))) 37 | 38 | val basePort = 6667 39 | 40 | } 41 | 42 | abstract class BrokerWatcherSpec 43 | extends TestKit(ActorSystem("test", BrokerWatcherSpec.config)) 44 | with ImplicitSender 45 | with WordSpecLike 46 | with Matchers 47 | with BeforeAndAfterAll { 48 | 49 | import BrokerWatcherSpec._ 50 | 51 | FileUtils.deleteDirectory(new File(dataDir)) 52 | val zookeeper = new TestZookeeperServer(new TestServerConfig(config.getConfig("test-server"))) 53 | 54 | override def afterAll(): Unit = { 55 | zookeeper.stop() 56 | super.afterAll() 57 | } 58 | 59 | def spawnKafka(brokerId: Int): TestKafkaServer = { 60 | new TestKafkaServer(new TestServerConfig(config.getConfig("test-server") 61 | .withValue("kafka.broker.id", ConfigValueFactory.fromAnyRef(brokerId)) 62 | .withValue("kafka.port", ConfigValueFactory.fromAnyRef(basePort + (brokerId - 1))) 63 | .withValue("kafka.log.dirs", ConfigValueFactory.fromAnyRef(dataDir + "/" + s"kafka-$brokerId")))) 64 | } 65 | 66 | def withWatcher(body: BrokerWatcher => Unit) = { 67 | val watcher = new BrokerWatcher(zkClientConfig, testActor) 68 | try { 69 | body(watcher) 70 | } finally { 71 | watcher.stop() 72 | } 73 | } 74 | 75 | def withKafka(brokerId: Int)(body: TestKafkaServer => Unit) = { 76 | val kafka = spawnKafka(brokerId) 77 | try { 78 | body(kafka) 79 | } finally { 80 | kafka.stop() 81 | } 82 | } 83 | 84 | def expectBrokers(r: Range): Unit = { 85 | val msg = expectMsgClass(classOf[BrokersUpdated]) 86 | assertBrokers(msg.brokers, r) 87 | } 88 | 89 | def assertBrokers(brokers: List[Broker], r: Range): Unit = { 90 | assert(brokers.toSet === r.map(id => Broker("localhost", basePort + (id - 1))).toSet) 91 | } 92 | 93 | } 94 | 95 | class BrokerWatcherSpecWithKafkas extends BrokerWatcherSpec { 96 | 97 | val kafkas = (1 to 3).map(id => 98 | spawnKafka(id) 99 | ) 100 | 101 | override def afterAll(): Unit = { 102 | kafkas.foreach(_.stop()) 103 | super.afterAll() 104 | } 105 | 106 | "A BrokerWatcher" must { 107 | "return current list of brokers on start" in { 108 | withWatcher { watcher => 109 | val brokers = watcher.start() 110 | assertBrokers(brokers, 1 to 3) 111 | } 112 | } 113 | 114 | "notify listener when brokers are added and removed" in { 115 | withWatcher { watcher => 116 | watcher.start() 117 | withKafka(4) { newBroker => 118 | expectBrokers(1 to 4) 119 | kafkas(0).stop() 120 | expectBrokers(2 to 4) 121 | } 122 | } 123 | } 124 | } 125 | 126 | } 127 | 128 | class BrokerWatcherSpecWithoutKafkas extends BrokerWatcherSpec { 129 | 130 | "A BrokerWatcher" must { 131 | "return an empty list if no brokers are available on start" in { 132 | withWatcher { watcher => 133 | val brokers = watcher.start() 134 | assert(brokers === List.empty[Broker]) 135 | } 136 | } 137 | "notify listener when brokers eventually become available" in { 138 | withWatcher { watcher => 139 | val brokers = watcher.start() 140 | assert(brokers === List.empty[Broker]) 141 | withKafka(0) { newBroker => 142 | expectBrokers(0 until 0) 143 | expectBrokers(0 until 1) 144 | } 145 | } 146 | } 147 | } 148 | 149 | } 150 | -------------------------------------------------------------------------------- /src/test/scala/akka/persistence/kafka/integration/KafkaIntegrationSpec.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka.integration 2 | 3 | import scala.collection.immutable.Seq 4 | import scala.concurrent.duration._ 5 | 6 | import akka.actor._ 7 | import akka.persistence._ 8 | import akka.persistence.SnapshotProtocol._ 9 | import akka.persistence.kafka._ 10 | import akka.persistence.kafka.journal.KafkaJournalConfig 11 | import akka.persistence.kafka.server._ 12 | import akka.serialization.SerializationExtension 13 | import akka.testkit._ 14 | 15 | import com.typesafe.config.ConfigFactory 16 | 17 | import org.scalatest._ 18 | 19 | import _root_.kafka.message.Message 20 | 21 | object KafkaIntegrationSpec { 22 | val config = ConfigFactory.parseString( 23 | """ 24 | |akka.persistence.journal.plugin = "kafka-journal" 25 | |akka.persistence.snapshot-store.plugin = "kafka-snapshot-store" 26 | |akka.test.single-expect-default = 10s 27 | |kafka-journal.event.producer.request.required.acks = 1 28 | |kafka-journal.zookeeper.connection.timeout.ms = 10000 29 | |kafka-journal.zookeeper.session.timeout.ms = 10000 30 | |test-server.zookeeper.dir = target/test/zookeeper 31 | |test-server.kafka.log.dirs = target/test/kafka 32 | """.stripMargin) 33 | 34 | class TestPersistentActor(val persistenceId: String, probe: ActorRef) extends PersistentActor { 35 | def receiveCommand = { 36 | case s: String => 37 | persist(s)(s => probe ! s) 38 | } 39 | 40 | def receiveRecover = { 41 | case s: SnapshotOffer => 42 | probe ! s 43 | case s: String => 44 | probe ! s 45 | } 46 | } 47 | 48 | class TestPersistentView(val persistenceId: String, val viewId: String, probe: ActorRef) extends PersistentView { 49 | def receive = { 50 | case s: SnapshotOffer => 51 | probe ! s 52 | case s: String => 53 | probe ! s 54 | } 55 | } 56 | } 57 | 58 | class KafkaIntegrationSpec extends TestKit(ActorSystem("test", KafkaIntegrationSpec.config)) with ImplicitSender with WordSpecLike with Matchers with KafkaCleanup { 59 | import KafkaIntegrationSpec._ 60 | import MessageUtil._ 61 | 62 | val systemConfig = system.settings.config 63 | val journalConfig = new KafkaJournalConfig(systemConfig.getConfig("kafka-journal")) 64 | val serverConfig = new TestServerConfig(systemConfig.getConfig("test-server")) 65 | val server = new TestServer(serverConfig) 66 | 67 | val serialization = SerializationExtension(system) 68 | val eventDecoder = new EventDecoder(system) 69 | 70 | val persistence = Persistence(system) 71 | val journal = persistence.journalFor(null) 72 | val store = persistence.snapshotStoreFor(null) 73 | 74 | override def beforeAll(): Unit = { 75 | super.beforeAll() 76 | writeJournal("pa", 1 to 3 map { i => s"a-${i}" }) 77 | writeJournal("pb", 1 to 3 map { i => s"b-${i}" }) 78 | writeJournal("pc", 1 to 3 map { i => s"c-${i}" }) 79 | } 80 | 81 | override def afterAll(): Unit = { 82 | server.stop() 83 | system.shutdown() 84 | super.afterAll() 85 | } 86 | 87 | import serverConfig._ 88 | 89 | def withPersistentView(persistenceId: String, viewId: String)(body: ActorRef => Unit) = { 90 | val actor = system.actorOf(Props(new TestPersistentView(persistenceId, viewId, testActor))) 91 | try { body(actor) } finally { system.stop(actor) } 92 | } 93 | 94 | def withPersistentActor(persistenceId: String)(body: ActorRef => Unit) = { 95 | val actor = system.actorOf(Props(new TestPersistentActor(persistenceId, testActor))) 96 | try { body(actor) } finally { system.stop(actor) } 97 | } 98 | 99 | def writeJournal(persistenceId: String, events: Seq[String]): Unit = withPersistentActor(persistenceId) { actor => 100 | events.foreach { event => actor ! event; expectMsg(event) } 101 | } 102 | 103 | def readJournal(journalTopic: String): Seq[PersistentRepr] = 104 | readMessages(journalTopic, 0).map(m => serialization.deserialize(payloadBytes(m), classOf[PersistentRepr]).get) 105 | 106 | def readEvents(partition: Int): Seq[Event] = 107 | readMessages("events", partition).map(m => eventDecoder.fromBytes(payloadBytes(m))) 108 | 109 | def readMessages(topic: String, partition: Int): Seq[Message] = 110 | new MessageIterator(kafka.hostName, kafka.port, topic, partition, 0, journalConfig.consumerConfig).toVector 111 | 112 | "A Kafka journal" must { 113 | "publish all events to the events topic by default" in { 114 | val eventSeq = for { 115 | partition <- 0 until kafka.numPartitions 116 | event <- readEvents(partition) 117 | } yield event 118 | 119 | val eventMap = eventSeq.groupBy(_.persistenceId) 120 | 121 | eventMap("pa") should be(Seq(Event("pa", 1L, "a-1"), Event("pa", 2L, "a-2"), Event("pa", 3L, "a-3"))) 122 | eventMap("pb") should be(Seq(Event("pb", 1L, "b-1"), Event("pb", 2L, "b-2"), Event("pb", 3L, "b-3"))) 123 | eventMap("pc") should be(Seq(Event("pc", 1L, "c-1"), Event("pc", 2L, "c-2"), Event("pc", 3L, "c-3"))) 124 | } 125 | "publish events for each persistent actor to a separate topic" in { 126 | readJournal("pa").map(_.payload) should be(Seq("a-1", "a-2", "a-3")) 127 | readJournal("pb").map(_.payload) should be(Seq("b-1", "b-2", "b-3")) 128 | readJournal("pc").map(_.payload) should be(Seq("c-1", "c-2", "c-3")) 129 | } 130 | "properly encode topic names" in { 131 | val persistenceId = "x/y/z" // not a valid topic name 132 | writeJournal(persistenceId, Seq("a", "b", "c")) 133 | readJournal(journalTopic(persistenceId)).map(_.payload) should be(Seq("a", "b", "c")) 134 | } 135 | } 136 | 137 | "A Kafka snapshot store" when { 138 | "configured with ignore-orphan = true" must { 139 | "ignore orphan snapshots (snapshot sequence nr > highest journal sequence nr)" in { 140 | val persistenceId = "pa" 141 | 142 | store ! SaveSnapshot(SnapshotMetadata(persistenceId, 4), "test") 143 | expectMsgPF() { case SaveSnapshotSuccess(md) => md.sequenceNr should be(4L) } 144 | 145 | withPersistentActor(persistenceId) { _ => 146 | expectMsg("a-1") 147 | expectMsg("a-2") 148 | expectMsg("a-3") 149 | } 150 | } 151 | "fallback to non-orphan snapshots" in { 152 | val persistenceId = "pa" 153 | 154 | store ! SaveSnapshot(SnapshotMetadata(persistenceId, 2), "test") 155 | expectMsgPF() { case SaveSnapshotSuccess(md) => md.sequenceNr should be(2L) } 156 | 157 | store ! SaveSnapshot(SnapshotMetadata(persistenceId, 4), "test") 158 | expectMsgPF() { case SaveSnapshotSuccess(md) => md.sequenceNr should be(4L) } 159 | 160 | withPersistentActor(persistenceId) { _ => 161 | expectMsgPF() { case SnapshotOffer(SnapshotMetadata(_, snr, _), _) => snr should be(2) } 162 | expectMsg("a-3") 163 | } 164 | } 165 | "not ignore view snapshots (for which no corresponding journal topic exists)" in { 166 | val persistenceId = "pa" 167 | val viewId = "va" 168 | 169 | store ! SaveSnapshot(SnapshotMetadata(viewId, 2), "test") 170 | expectMsgPF() { case SaveSnapshotSuccess(md) => md.sequenceNr should be(2L) } 171 | 172 | withPersistentView(persistenceId, viewId) { _ => 173 | expectMsgPF() { case SnapshotOffer(SnapshotMetadata(_, snr, _), _) => snr should be(2) } 174 | expectMsg("a-3") 175 | } 176 | } 177 | } 178 | } 179 | } 180 | -------------------------------------------------------------------------------- /src/main/resources/reference.conf: -------------------------------------------------------------------------------- 1 | akka { 2 | actor { 3 | serializers { 4 | kafka-event = "akka.persistence.kafka.journal.KafkaEventSerializer" 5 | kafka-snapshot = "akka.persistence.kafka.snapshot.KafkaSnapshotSerializer" 6 | } 7 | 8 | serialization-bindings { 9 | "akka.persistence.kafka.Event" = kafka-event 10 | "akka.persistence.kafka.snapshot.KafkaSnapshot" = kafka-snapshot 11 | } 12 | } 13 | } 14 | 15 | kafka-journal { 16 | 17 | # FQCN of the Kafka journal plugin 18 | class = "akka.persistence.kafka.journal.KafkaJournal" 19 | 20 | # Dispatcher for the plugin actor 21 | plugin-dispatcher = "kafka-journal.default-dispatcher" 22 | 23 | # Number of concurrent writers (should be <= number of available threads in 24 | # dispatcher). 25 | write-concurrency = 8 26 | 27 | # The partition to use when publishing to and consuming from journal topics. 28 | partition = 0 29 | 30 | # Default dispatcher for plugin actor. 31 | default-dispatcher { 32 | type = Dispatcher 33 | executor = "fork-join-executor" 34 | fork-join-executor { 35 | parallelism-min = 2 36 | parallelism-max = 8 37 | } 38 | } 39 | 40 | consumer { 41 | # ------------------------------------------------------------------- 42 | # Simple consumer configuration (used for message replay and reading 43 | # metadata). 44 | # 45 | # See http://kafka.apache.org/documentation.html#consumerconfigs 46 | # See http://kafka.apache.org/documentation.html#simpleconsumerapi 47 | # ------------------------------------------------------------------- 48 | 49 | socket.timeout.ms = 30000 50 | 51 | socket.receive.buffer.bytes = 65536 52 | 53 | fetch.message.max.bytes = 1048576 54 | } 55 | 56 | producer { 57 | # ------------------------------------------------------------------- 58 | # PersistentRepr producer (to journal topics) configuration. 59 | # 60 | # See http://kafka.apache.org/documentation.html#producerconfigs 61 | # 62 | # The metadata.broker.list property is set dynamically by the journal. 63 | # No need to set it here. 64 | # ------------------------------------------------------------------- 65 | 66 | request.required.acks = 1 67 | 68 | # DO NOT CHANGE! 69 | producer.type = "sync" 70 | 71 | # DO NOT CHANGE! 72 | partitioner.class = "akka.persistence.kafka.StickyPartitioner" 73 | 74 | # DO NOT CHANGE! 75 | key.serializer.class = "kafka.serializer.StringEncoder" 76 | 77 | # Increase if hundreds of topics are created during initialization. 78 | message.send.max.retries = 5 79 | 80 | # Increase if hundreds of topics are created during initialization. 81 | retry.backoff.ms = 100 82 | 83 | # Add further Kafka producer settings here, if needed. 84 | # ... 85 | } 86 | 87 | event.producer { 88 | # ------------------------------------------------------------------- 89 | # Event producer (to user-defined topics) configuration. 90 | # 91 | # See http://kafka.apache.org/documentation.html#producerconfigs 92 | # ------------------------------------------------------------------- 93 | 94 | producer.type = "sync" 95 | 96 | request.required.acks = 0 97 | 98 | topic.mapper.class = "akka.persistence.kafka.DefaultEventTopicMapper" 99 | 100 | key.serializer.class = "kafka.serializer.StringEncoder" 101 | 102 | # Add further Kafka producer settings here, if needed. 103 | # ... 104 | } 105 | 106 | zookeeper { 107 | # ------------------------------------------------------------------- 108 | # Zookeeper client configuration 109 | # ------------------------------------------------------------------- 110 | 111 | connect = "localhost:2181" 112 | 113 | session.timeout.ms = 6000 114 | 115 | connection.timeout.ms = 6000 116 | 117 | sync.time.ms = 2000 118 | } 119 | } 120 | 121 | kafka-snapshot-store { 122 | 123 | # FQCN of the Kafka snapshot store plugin 124 | class = "akka.persistence.kafka.snapshot.KafkaSnapshotStore" 125 | 126 | # Dispatcher for the plugin actor. 127 | plugin-dispatcher = "kafka-snapshot-store.default-dispatcher" 128 | 129 | # The partition to use when publishing to and consuming from snapshot topics. 130 | partition = 0 131 | 132 | # Topic name prefix (which prepended to persistenceId) 133 | prefix = "snapshot-" 134 | 135 | # If set to true snapshots with sequence numbers higher than the sequence number 136 | # of the latest entry in their corresponding journal topic are ignored. This is 137 | # necessary to recover from certain Kafka failure scenarios. Should only be set 138 | # to false for isolated snapshot store tests. 139 | ignore-orphan = true 140 | 141 | # Default dispatcher for plugin actor. 142 | default-dispatcher { 143 | type = Dispatcher 144 | executor = "fork-join-executor" 145 | fork-join-executor { 146 | parallelism-min = 2 147 | parallelism-max = 8 148 | } 149 | } 150 | 151 | consumer { 152 | # ------------------------------------------------------------------- 153 | # Simple consumer configuration (used for loading snapshots and 154 | # reading metadata). 155 | # 156 | # See http://kafka.apache.org/documentation.html#consumerconfigs 157 | # See http://kafka.apache.org/documentation.html#simpleconsumerapi 158 | # ------------------------------------------------------------------- 159 | 160 | socket.timeout.ms = 30000 161 | 162 | socket.receive.buffer.bytes = 65536 163 | 164 | fetch.message.max.bytes = 1048576 165 | } 166 | 167 | producer { 168 | # ------------------------------------------------------------------- 169 | # Snapshot producer configuration. 170 | # 171 | # See http://kafka.apache.org/documentation.html#producerconfigs 172 | # 173 | # The metadata.broker.list property is set dynamically by the journal. 174 | # No need to set it here. 175 | # ------------------------------------------------------------------- 176 | 177 | request.required.acks = 1 178 | 179 | producer.type = "sync" 180 | 181 | # DO NOT CHANGE! 182 | partitioner.class = "akka.persistence.kafka.StickyPartitioner" 183 | 184 | # DO NOT CHANGE! 185 | key.serializer.class = "kafka.serializer.StringEncoder" 186 | 187 | # Increase if hundreds of topics are created during initialization. 188 | message.send.max.retries = 5 189 | 190 | # Increase if hundreds of topics are created during initialization. 191 | retry.backoff.ms = 500 192 | 193 | # Add further Kafka producer settings here, if needed. 194 | # ... 195 | } 196 | 197 | zookeeper { 198 | # ------------------------------------------------------------------- 199 | # Zookeeper client configuration 200 | # ------------------------------------------------------------------- 201 | 202 | connect = "localhost:2181" 203 | 204 | session.timeout.ms = 6000 205 | 206 | connection.timeout.ms = 6000 207 | 208 | sync.time.ms = 2000 209 | } 210 | } 211 | 212 | test-server { 213 | # ------------------------------------------------------------------- 214 | # Test Kafka and Zookeeper server configuration. 215 | # 216 | # See http://kafka.apache.org/documentation.html#brokerconfigs 217 | # ------------------------------------------------------------------- 218 | 219 | zookeeper { 220 | 221 | port = 2181 222 | 223 | dir = "data/zookeeper" 224 | } 225 | 226 | kafka { 227 | 228 | broker.id = 1 229 | 230 | port = 6667 231 | 232 | num.partitions = 2 233 | 234 | log.cleanup.policy = "compact" 235 | 236 | log.dirs = data/kafka 237 | 238 | log.index.size.max.bytes = 1024 239 | } 240 | } -------------------------------------------------------------------------------- /src/main/scala/akka/persistence/kafka/snapshot/KafkaSnapshotStore.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka.snapshot 2 | 3 | import scala.concurrent.{Promise, Future} 4 | import scala.concurrent.duration._ 5 | import scala.util._ 6 | 7 | import akka.actor._ 8 | import akka.pattern.{PromiseActorRef, pipe} 9 | import akka.persistence._ 10 | import akka.persistence.JournalProtocol._ 11 | import akka.persistence.kafka._ 12 | import akka.persistence.kafka.MetadataConsumer.Broker 13 | import akka.persistence.kafka.BrokerWatcher.BrokersUpdated 14 | import akka.serialization.SerializationExtension 15 | import akka.util.Timeout 16 | 17 | import _root_.kafka.producer.{Producer, KeyedMessage} 18 | 19 | /** 20 | * Optimized and fully async version of [[akka.persistence.snapshot.SnapshotStore]]. 21 | */ 22 | trait KafkaSnapshotStoreEndpoint extends Actor { 23 | import SnapshotProtocol._ 24 | import context.dispatcher 25 | 26 | val extension = Persistence(context.system) 27 | val publish = extension.settings.internal.publishPluginCommands 28 | 29 | def receive = { 30 | case LoadSnapshot(persistenceId, criteria, toSequenceNr) ⇒ 31 | val p = sender 32 | loadAsync(persistenceId, criteria.limit(toSequenceNr)) map { 33 | sso ⇒ LoadSnapshotResult(sso, toSequenceNr) 34 | } recover { 35 | case e ⇒ LoadSnapshotResult(None, toSequenceNr) 36 | } pipeTo (p) 37 | case SaveSnapshot(metadata, snapshot) ⇒ 38 | val p = sender 39 | val md = metadata.copy(timestamp = System.currentTimeMillis) 40 | saveAsync(md, snapshot) map { 41 | _ ⇒ SaveSnapshotSuccess(md) 42 | } recover { 43 | case e ⇒ SaveSnapshotFailure(metadata, e) 44 | } pipeTo (p) 45 | case d @ DeleteSnapshot(metadata) ⇒ 46 | deleteAsync(metadata) onComplete { 47 | case Success(_) => if (publish) context.system.eventStream.publish(d) 48 | case Failure(_) => 49 | } 50 | case d @ DeleteSnapshots(persistenceId, criteria) ⇒ 51 | deleteAsync(persistenceId, criteria) onComplete { 52 | case Success(_) => if (publish) context.system.eventStream.publish(d) 53 | case Failure(_) => 54 | } 55 | } 56 | 57 | def loadAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] 58 | def saveAsync(metadata: SnapshotMetadata, snapshot: Any): Future[Unit] 59 | def deleteAsync(metadata: SnapshotMetadata): Future[Unit] 60 | def deleteAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Unit] 61 | } 62 | 63 | class KafkaSnapshotStore extends KafkaSnapshotStoreEndpoint with MetadataConsumer with ActorLogging { 64 | import context.dispatcher 65 | 66 | type RangeDeletions = Map[String, SnapshotSelectionCriteria] 67 | type SingleDeletions = Map[String, List[SnapshotMetadata]] 68 | 69 | val serialization = SerializationExtension(context.system) 70 | val config = new KafkaSnapshotStoreConfig(context.system.settings.config.getConfig("kafka-snapshot-store")) 71 | val brokerWatcher = new BrokerWatcher(config.zookeeperConfig, self) 72 | var brokers: List[Broker] = brokerWatcher.start() 73 | 74 | override def postStop(): Unit = { 75 | brokerWatcher.stop() 76 | super.postStop() 77 | } 78 | 79 | def localReceive: Receive = { 80 | case BrokersUpdated(newBrokers) => 81 | brokers = newBrokers 82 | } 83 | 84 | override def receive: Receive = localReceive.orElse(super.receive) 85 | 86 | // Transient deletions only to pass TCK (persistent not supported) 87 | var rangeDeletions: RangeDeletions = Map.empty.withDefaultValue(SnapshotSelectionCriteria.None) 88 | var singleDeletions: SingleDeletions = Map.empty.withDefaultValue(Nil) 89 | 90 | def deleteAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Unit] = Future.successful { 91 | rangeDeletions += (persistenceId -> criteria) 92 | } 93 | 94 | def deleteAsync(metadata: SnapshotMetadata): Future[Unit] = Future.successful { 95 | singleDeletions.get(metadata.persistenceId) match { 96 | case Some(dels) => singleDeletions += (metadata.persistenceId -> (metadata :: dels)) 97 | case None => singleDeletions += (metadata.persistenceId -> List(metadata)) 98 | } 99 | } 100 | 101 | def saveAsync(metadata: SnapshotMetadata, snapshot: Any): Future[Unit] = Future { 102 | val snapshotBytes = serialization.serialize(KafkaSnapshot(metadata, snapshot)).get 103 | val snapshotMessage = new KeyedMessage[String, Array[Byte]](snapshotTopic(metadata.persistenceId), "static", snapshotBytes) 104 | val snapshotProducer = new Producer[String, Array[Byte]](config.producerConfig(brokers)) 105 | try { 106 | // TODO: take a producer from a pool 107 | snapshotProducer.send(snapshotMessage) 108 | } finally { 109 | snapshotProducer.close() 110 | } 111 | } 112 | 113 | def loadAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = { 114 | val singleDeletions = this.singleDeletions 115 | val rangeDeletions = this.rangeDeletions 116 | for { 117 | highest <- if (config.ignoreOrphan) highestJournalSequenceNr(persistenceId) else Future.successful(Long.MaxValue) 118 | adjusted = if (config.ignoreOrphan && 119 | highest < criteria.maxSequenceNr && 120 | highest > 0L) criteria.copy(maxSequenceNr = highest) else criteria 121 | snapshot <- Future { 122 | val topic = snapshotTopic(persistenceId) 123 | 124 | def matcher(snapshot: KafkaSnapshot): Boolean = snapshot.matches(adjusted) && 125 | !snapshot.matches(rangeDeletions(persistenceId)) && 126 | !singleDeletions(persistenceId).contains(snapshot.metadata) 127 | 128 | leaderFor(topic, brokers) match { 129 | case Some(Broker(host, port)) => load(host, port, topic, matcher).map(s => SelectedSnapshot(s.metadata, s.snapshot)) 130 | case None => None 131 | } 132 | } 133 | } yield snapshot 134 | } 135 | 136 | def load(host: String, port: Int, topic: String, matcher: KafkaSnapshot => Boolean): Option[KafkaSnapshot] = { 137 | val offset = offsetFor(host, port, topic, config.partition) 138 | 139 | @annotation.tailrec 140 | def load(host: String, port: Int, topic: String, offset: Long): Option[KafkaSnapshot] = 141 | if (offset < 0) None else { 142 | val s = snapshot(host, port, topic, offset) 143 | if (matcher(s)) Some(s) else load(host, port, topic, offset - 1) 144 | } 145 | 146 | load(host, port, topic, offset - 1) 147 | } 148 | 149 | /** 150 | * Fetches the highest sequence number for `persistenceId` from the journal actor. 151 | */ 152 | private def highestJournalSequenceNr(persistenceId: String): Future[Long] = { 153 | val journal = extension.journalFor(persistenceId) 154 | val promise = Promise[Any]() 155 | 156 | // We need to use a PromiseActorRef here instead of ask because the journal doesn't reply to ReadHighestSequenceNr requests 157 | val ref = PromiseActorRef(extension.system.provider, Timeout(config.consumerConfig.socketTimeoutMs.millis), journal.toString) 158 | 159 | journal ! ReadHighestSequenceNr(0L, persistenceId, ref) 160 | 161 | ref.result.future.flatMap { 162 | case ReadHighestSequenceNrSuccess(snr) => Future.successful(snr) 163 | case ReadHighestSequenceNrFailure(err) => Future.failed(err) 164 | } 165 | } 166 | 167 | private def snapshot(host: String, port: Int, topic: String, offset: Long): KafkaSnapshot = { 168 | val iter = new MessageIterator(host, port, topic, config.partition, offset, config.consumerConfig) 169 | try { serialization.deserialize(MessageUtil.payloadBytes(iter.next()), classOf[KafkaSnapshot]).get } finally { iter.close() } 170 | } 171 | 172 | private def snapshotTopic(persistenceId: String): String = 173 | s"${config.prefix}${journalTopic(persistenceId)}" 174 | } 175 | 176 | -------------------------------------------------------------------------------- /src/main/scala/akka/persistence/kafka/journal/KafkaJournal.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.kafka.journal 2 | 3 | import scala.collection.immutable.Seq 4 | 5 | import akka.persistence.{PersistentId, PersistentConfirmation} 6 | import akka.persistence.journal.AsyncWriteJournal 7 | import akka.serialization.{Serialization, SerializationExtension} 8 | 9 | import kafka.producer._ 10 | 11 | import scala.concurrent.Future 12 | import scala.concurrent.duration._ 13 | 14 | import akka.actor._ 15 | import akka.pattern.ask 16 | import akka.persistence.PersistentRepr 17 | import akka.persistence.kafka._ 18 | import akka.persistence.kafka.MetadataConsumer.Broker 19 | import akka.persistence.kafka.BrokerWatcher.BrokersUpdated 20 | import akka.util.Timeout 21 | 22 | class KafkaJournal extends AsyncWriteJournal with MetadataConsumer { 23 | import context.dispatcher 24 | 25 | type Deletions = Map[String, (Long, Boolean)] 26 | 27 | val serialization = SerializationExtension(context.system) 28 | val config = new KafkaJournalConfig(context.system.settings.config.getConfig("kafka-journal")) 29 | 30 | val brokerWatcher = new BrokerWatcher(config.zookeeperConfig, self) 31 | var brokers = brokerWatcher.start() 32 | 33 | override def postStop(): Unit = { 34 | brokerWatcher.stop() 35 | super.postStop() 36 | } 37 | 38 | override def receive: Receive = localReceive.orElse(super.receive) 39 | 40 | private def localReceive: Receive = { 41 | case BrokersUpdated(newBrokers) if newBrokers != brokers => 42 | brokers = newBrokers 43 | journalProducerConfig = config.journalProducerConfig(brokers) 44 | eventProducerConfig = config.eventProducerConfig(brokers) 45 | writers.foreach(_ ! UpdateKafkaJournalWriterConfig(writerConfig)) 46 | } 47 | 48 | // -------------------------------------------------------------------------------------- 49 | // Journal writes 50 | // -------------------------------------------------------------------------------------- 51 | 52 | var journalProducerConfig = config.journalProducerConfig(brokers) 53 | var eventProducerConfig = config.eventProducerConfig(brokers) 54 | 55 | var writers: Vector[ActorRef] = Vector.fill(config.writeConcurrency)(writer()) 56 | val writeTimeout = Timeout(journalProducerConfig.requestTimeoutMs.millis) 57 | 58 | // Transient deletions only to pass TCK (persistent not supported) 59 | var deletions: Deletions = Map.empty 60 | 61 | def asyncWriteMessages(messages: Seq[PersistentRepr]): Future[Unit] = { 62 | val sends = messages.groupBy(_.persistenceId).map { 63 | case (pid, msgs) => writerFor(pid).ask(msgs)(writeTimeout) 64 | } 65 | Future.sequence(sends).map(_ => ()) 66 | } 67 | 68 | def asyncDeleteMessagesTo(persistenceId: String, toSequenceNr: Long, permanent: Boolean): Future[Unit] = 69 | Future.successful(deleteMessagesTo(persistenceId, toSequenceNr, permanent)) 70 | 71 | def asyncDeleteMessages(messageIds: Seq[PersistentId], permanent: Boolean): Future[Unit] = 72 | Future.failed(new UnsupportedOperationException("Individual deletions not supported")) 73 | 74 | def asyncWriteConfirmations(confirmations: Seq[PersistentConfirmation]): Future[Unit] = 75 | Future.failed(new UnsupportedOperationException("Channels not supported")) 76 | 77 | def deleteMessagesTo(persistenceId: String, toSequenceNr: Long, permanent: Boolean): Unit = 78 | deletions = deletions + (persistenceId -> (toSequenceNr, permanent)) 79 | 80 | def deleteMessages(messageIds: Seq[PersistentId], permanent: Boolean): Unit = 81 | throw new UnsupportedOperationException("Individual deletions not supported") 82 | 83 | def writeConfirmations(confirmations: Seq[PersistentConfirmation]): Unit = 84 | throw new UnsupportedOperationException("Channels not supported") 85 | 86 | private def writerFor(persistenceId: String): ActorRef = 87 | writers(math.abs(persistenceId.hashCode) % config.writeConcurrency) 88 | 89 | private def writer(): ActorRef = { 90 | context.actorOf(Props(new KafkaJournalWriter(writerConfig)).withDispatcher(config.pluginDispatcher)) 91 | } 92 | 93 | private def writerConfig = { 94 | KafkaJournalWriterConfig(journalProducerConfig, eventProducerConfig, config.eventTopicMapper, serialization) 95 | } 96 | 97 | 98 | // -------------------------------------------------------------------------------------- 99 | // Journal reads 100 | // -------------------------------------------------------------------------------------- 101 | 102 | def asyncReadHighestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = 103 | Future(readHighestSequenceNr(persistenceId, fromSequenceNr)) 104 | 105 | def readHighestSequenceNr(persistenceId: String, fromSequenceNr: Long): Long = { 106 | val topic = journalTopic(persistenceId) 107 | leaderFor(topic, brokers) match { 108 | case Some(Broker(host, port)) => offsetFor(host, port, topic, config.partition) 109 | case None => fromSequenceNr 110 | } 111 | } 112 | 113 | def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: PersistentRepr => Unit): Future[Unit] = { 114 | val deletions = this.deletions 115 | Future(replayMessages(persistenceId, fromSequenceNr, toSequenceNr, max, deletions, replayCallback)) 116 | } 117 | 118 | def replayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long, deletions: Deletions, callback: PersistentRepr => Unit): Unit = { 119 | val (deletedTo, permanent) = deletions.getOrElse(persistenceId, (0L, false)) 120 | 121 | val adjustedFrom = if (permanent) math.max(deletedTo + 1L, fromSequenceNr) else fromSequenceNr 122 | val adjustedNum = toSequenceNr - adjustedFrom + 1L 123 | val adjustedTo = if (max < adjustedNum) adjustedFrom + max - 1L else toSequenceNr 124 | 125 | val lastSequenceNr = leaderFor(journalTopic(persistenceId), brokers) match { 126 | case None => 0L // topic for persistenceId doesn't exist yet 127 | case Some(MetadataConsumer.Broker(host, port)) => 128 | val iter = persistentIterator(host, port, journalTopic(persistenceId), adjustedFrom - 1L) 129 | iter.map(p => if (!permanent && p.sequenceNr <= deletedTo) p.update(deleted = true) else p).foldLeft(adjustedFrom) { 130 | case (snr, p) => if (p.sequenceNr >= adjustedFrom && p.sequenceNr <= adjustedTo) callback(p); p.sequenceNr 131 | } 132 | } 133 | } 134 | 135 | def persistentIterator(host: String, port: Int, topic: String, offset: Long): Iterator[PersistentRepr] = { 136 | new MessageIterator(host, port, topic, config.partition, offset, config.consumerConfig).map { m => 137 | serialization.deserialize(MessageUtil.payloadBytes(m), classOf[PersistentRepr]).get 138 | } 139 | } 140 | } 141 | 142 | private case class KafkaJournalWriterConfig( 143 | journalProducerConfig: ProducerConfig, 144 | eventProducerConfig: ProducerConfig, 145 | evtTopicMapper: EventTopicMapper, 146 | serialization: Serialization) 147 | 148 | private case class UpdateKafkaJournalWriterConfig(config: KafkaJournalWriterConfig) 149 | 150 | private class KafkaJournalWriter(var config: KafkaJournalWriterConfig) extends Actor { 151 | var msgProducer = createMessageProducer() 152 | var evtProducer = createEventProducer() 153 | 154 | def receive = { 155 | case UpdateKafkaJournalWriterConfig(newConfig) => 156 | msgProducer.close() 157 | evtProducer.close() 158 | config = newConfig 159 | msgProducer = createMessageProducer() 160 | evtProducer = createEventProducer() 161 | 162 | case messages: Seq[PersistentRepr] => 163 | writeMessages(messages) 164 | sender ! () 165 | } 166 | 167 | def writeMessages(messages: Seq[PersistentRepr]): Unit = { 168 | val keyedMsgs = for { 169 | m <- messages 170 | } yield new KeyedMessage[String, Array[Byte]](journalTopic(m.persistenceId), "static", config.serialization.serialize(m).get) 171 | 172 | val keyedEvents = for { 173 | m <- messages 174 | e = Event(m.persistenceId, m.sequenceNr, m.payload) 175 | t <- config.evtTopicMapper.topicsFor(e) 176 | } yield new KeyedMessage(t, e.persistenceId, config.serialization.serialize(e).get) 177 | 178 | msgProducer.send(keyedMsgs: _*) 179 | evtProducer.send(keyedEvents: _*) 180 | } 181 | 182 | override def postStop(): Unit = { 183 | msgProducer.close() 184 | evtProducer.close() 185 | super.postStop() 186 | } 187 | 188 | private def createMessageProducer() = new Producer[String, Array[Byte]](config.journalProducerConfig) 189 | 190 | private def createEventProducer() = new Producer[String, Array[Byte]](config.eventProducerConfig) 191 | } 192 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Kafka Plugins for Akka Persistence 2 | ================================== 3 | 4 | Replicated [Akka Persistence](http://doc.akka.io/docs/akka/2.3.11/scala/persistence.html) journal and snapshot store backed by [Apache Kafka](http://kafka.apache.org/). 5 | 6 | [![Build Status](https://travis-ci.org/krasserm/akka-persistence-kafka.svg?branch=travis)](https://travis-ci.org/krasserm/akka-persistence-kafka) 7 | 8 | Dependency 9 | ---------- 10 | 11 | To include the Kafka plugins into your `sbt` project, add the following lines to your `build.sbt` file: 12 | 13 | resolvers += "krasserm at bintray" at "http://dl.bintray.com/krasserm/maven" 14 | 15 | libraryDependencies += "com.github.krasserm" %% "akka-persistence-kafka" % “0.4” 16 | 17 | This version of `akka-persistence-kafka` depends on Kafka 0.8.2.1, Akka 2.3.11 and is cross-built against Scala 2.10.4 and 2.11.6. A complete list of released versions is [here](https://github.com/krasserm/akka-persistence-kafka/wiki/Releases). 18 | 19 | Usage hints 20 | ----------- 21 | 22 | Kafka does not permanently store log entries but rather deletes them after a configurable _retention time_ which defaults to 7 days in Kafka 0.8.x. Therefore, applications need to take snapshots of their persistent actors at intervals that are smaller than the configured retention time (for example, every 3 days). This ensures that persistent actors can always be recovered successfully. 23 | 24 | Alternatively, the retention time can be set to a maximum value so that Kafka will never delete old entries. In this case, all events written by a single persistent actor must fit on a single node. This is a limitation of the current implementation which may be removed in later versions. However, this limitation is likely not relevant when running Kafka with default (or comparable) retention times and taking snapshots. 25 | 26 | The latest snapshot of a persistent actor is never deleted if [log compaction](http://kafka.apache.org/documentation.html#compaction) is enabled. See also section [Configuration hints](#configuration-hints) for details how to properly configure Kafka for being used with the storage plugins. 27 | 28 | Journal plugin 29 | -------------- 30 | 31 | ### Activation 32 | 33 | To activate the journal plugin, add the following line to `application.conf`: 34 | 35 | akka.persistence.journal.plugin = "kafka-journal" 36 | 37 | This will run the journal plugin with default settings and connect to a Zookeeper instance running on `localhost:2181`. The Zookeeper connect string can be customized with the `kafka-journal.zookeeper.connect` configuration key (see also section [Kafka cluster](#kafka-cluster)). Recommended Kafka broker configurations are given in section [Configuration hints](#configuration-hints). 38 | 39 | ### Use cases 40 | 41 | - Akka Persistence [journal plugin](http://doc.akka.io/docs/akka/2.3.9/scala/persistence.html#journal-plugin-api) (obvious). 42 | - Event publishing to [user-defined topics](#user-defined-topics). 43 | - Event consumption from user-defined topics by [external consumers](#external-consumers). 44 | 45 | ### Journal topics 46 | 47 | For each persistent actor, the plugin creates a Kafka topic where the topic name equals the actor's `persistenceId` (only if it contains alphanumeric, `.`, `-` or `_` characters, otherwise, all other characters are replaced by `_`). Events published to these topics are serialized `akka.persistence.PersistentRepr` objects (see [journal plugin API](http://doc.akka.io/docs/akka/2.3.9/scala/persistence.html#journal-plugin-api)). Serialization of `PersistentRepr` objects can be [customized](http://doc.akka.io/docs/akka/2.3.11/scala/persistence.html#custom-serialization). Journal topics are mainly intended for internal use (for recovery of persistent actors) but can also be [consumed externally](#external-consumers). 48 | 49 | ### User-defined topics 50 | 51 | The journal plugin can also publish events to user-defined topics. By default, all events generated by all persistent actors are published to a single `events` topic. This topic is intended for [external consumption](#external-consumers) only. Events published to user-defined topics are serialized `Event` objects 52 | 53 | ```scala 54 | package akka.persistence.kafka 55 | 56 | /** 57 | * Event published to user-defined topics. 58 | * 59 | * @param persistenceId Id of the persistent actor that generates event `data`. 60 | * @param sequenceNr Sequence number of the event. 61 | * @param data Event data generated by a persistent actor. 62 | */ 63 | case class Event(persistenceId: String, sequenceNr: Long, data: Any) 64 | ``` 65 | 66 | where `data` is the actual event written by a persistent actor (by calling `persist` or `persistAsync`), `sequenceNr` is the event's sequence number and `persistenceId` the id of the persistent actor. `Event` objects are serialized with a [protobuf](https://github.com/google/protobuf) serializer and event `data` serialization can be customized with a [user-defined serializer](http://doc.akka.io/docs/akka/2.3.11/scala/persistence.html#custom-serialization) in the same way as for [journal topics](#journal-topics). Custom serializer configurations always apply to both, journal topics and user-defined topics. 67 | 68 | For publishing events to user-defined topics the journal plugin uses an `EventTopicMapper`: 69 | 70 | ```scala 71 | package akka.persistence.kafka 72 | 73 | /** 74 | * Defines a mapping of events to user-defined topics. 75 | */ 76 | trait EventTopicMapper { 77 | /** 78 | * Maps an event to zero or more topics. 79 | * 80 | * @param event event to be mapped. 81 | * @return a sequence of topic names. 82 | */ 83 | def topicsFor(event: Event): immutable.Seq[String] 84 | } 85 | ``` 86 | 87 | The default mapper is `DefaultEventTopicMapper` which maps all events to the `events` topic. It is configured in the [reference configuration](#reference-configuration) as follows: 88 | 89 | kafka-journal.event.producer.topic.mapper.class = "akka.persistence.kafka.DefaultEventTopicMapper" 90 | 91 | To customize the mapping of events to user-defined topics, applications can implement and configure a custom `EventTopicMapper`. For example, in order to publish 92 | 93 | - events from persistent actor `a` to topics `topic-a-1` and `topic-a-2` and 94 | - events from persistent actor `b` to topic `topic-b` 95 | 96 | and to turn of publishing of events from all other actors, one would implement the following `ExampleEventTopicMapper` 97 | 98 | ```scala 99 | package akka.persistence.kafka.example 100 | 101 | class ExampleEventTopicMapper extends EventTopicMapper { 102 | def topicsFor(event: Event): Seq[String] = event.persistenceId match { 103 | case "a" => List("topic-a-1", "topic-a-2") 104 | case "b" => List("topic-b") 105 | case _ => Nil 106 | } 107 | ``` 108 | 109 | and configure it in `application.conf`: 110 | 111 | kafka-journal.event.producer.topic.mapper.class = "akka.persistence.kafka.example.ExampleEventTopicMapper" 112 | 113 | To turn off publishing events to user-defined topics, the `EmptyEventTopicMapper` should be configured. 114 | 115 | kafka-journal.event.producer.topic.mapper.class = "akka.persistence.kafka.EmptyEventTopicMapper" 116 | 117 | ### External consumers 118 | 119 | The following example shows how to consume `Event`s from a user-defined topic with name `topic-a-2` (see [previous](#user-defined-topics) example) using Kafka's [high-level consumer API](http://kafka.apache.org/documentation.html#highlevelconsumerapi): 120 | 121 | ```scala 122 | import java.util.Properties 123 | 124 | import akka.persistence.kafka.{EventDecoder, Event} 125 | 126 | import kafka.consumer.{Consumer, ConsumerConfig} 127 | import kafka.serializer.StringDecoder 128 | 129 | val props = new Properties() 130 | props.put("group.id", "consumer-1") 131 | props.put("zookeeper.connect", "localhost:2181") 132 | // ... 133 | 134 | val system = ActorSystem("consumer") 135 | 136 | val consConn = Consumer.create(new ConsumerConfig(props)) 137 | val streams = consConn.createMessageStreams(Map("topic-a-2" -> 1), 138 | keyDecoder = new StringDecoder, valueDecoder = new EventDecoder(system)) 139 | 140 | streams("topic-a-2")(0).foreach { mm => 141 | val event: Event = mm.message 142 | println(s"consumed ${event}") 143 | } 144 | ``` 145 | 146 | Applications may also consume serialized `PersistentRepr` objects from journal topics and deserialize them with Akka's serialization extension: 147 | 148 | ```scala 149 | import java.util.Properties 150 | 151 | import akka.actor._ 152 | import akka.persistence.PersistentRepr 153 | import akka.serialization.SerializationExtension 154 | 155 | import com.typesafe.config.ConfigFactory 156 | 157 | import kafka.consumer.{Consumer, ConsumerConfig} 158 | import kafka.serializer.{DefaultDecoder, StringDecoder} 159 | 160 | val props = new Properties() 161 | props.put("group.id", "consumer-2") 162 | props.put("zookeeper.connect", "localhost:2181") 163 | // ... 164 | 165 | val system = ActorSystem("example") 166 | val extension = SerializationExtension(system) 167 | 168 | val consConn = Consumer.create(new ConsumerConfig(props)) 169 | val streams = consConn.createMessageStreams(Map("a" -> 1), 170 | keyDecoder = new StringDecoder, valueDecoder = new DefaultDecoder) 171 | 172 | streams("a")(0).foreach { mm => 173 | val persistent: PersistentRepr = extension.deserialize(mm.message, classOf[PersistentRepr]).get 174 | println(s"consumed ${persistent}") 175 | } 176 | ``` 177 | 178 | There are many other libraries that can be used to consume (event) streams from Kafka topics, such as [Spark Streaming](http://spark.apache.org/docs/latest/streaming-programming-guide.html), to mention only one example. 179 | 180 | ### Implementation notes 181 | 182 | - During initialization, the journal plugin fetches cluster metadata from Zookeeper which may take up to a few seconds. 183 | - The journal plugin always writes `PersistentRepr` entries to partition 0 of journal topics. This ensures that all events written by a single persistent actor are stored in correct order. Later versions of the plugin may switch to a higher partition after having written a configurable number of events to the current partition. 184 | - The journal plugin distributes `Event` entries to all available partitions of user-defined topics. The partition key is the event's `persistenceId` so that a partial ordering of events is preserved when consuming events from user-defined topics. In other words, events written by a single persistent actor are always consumed in correct order but the relative ordering of events from different persistent actors is not defined. 185 | 186 | ### Current limitations 187 | 188 | - The journal plugin does not support features that have been deprecated in Akka 2.3.4 (channels and single event deletions). 189 | - Range deletions are not persistent (which may not be relevant for applications that configure Kafka with reasonably small retention times). 190 | 191 | ### Example source code 192 | 193 | The complete source code of all examples from previous sections is in [Example.scala](https://github.com/krasserm/akka-persistence-kafka/blob/master/src/test/scala/akka/persistence/kafka/example/Example.scala), the corresponding configuration in [example.conf](https://github.com/krasserm/akka-persistence-kafka/blob/master/src/test/resources/example.conf). 194 | 195 | Snapshot store plugin 196 | --------------------- 197 | 198 | ### Activation 199 | 200 | To activate the snapshot store plugin, add the following line to `application.conf`: 201 | 202 | akka.persistence.snapshot-store.plugin = "kafka-snapshot-store" 203 | 204 | This will run the snapshot store plugin with default settings and connect to a Zookeeper instance running on `localhost:2181`. The Zookeeper connect string can be customized with the `kafka-snapshot-store.zookeeper.connect` configuration key (see also section [Kafka cluster](#kafka-cluster)). Recommended Kafka broker configurations are given in section [Configuration hints](#configuration-hints). 205 | 206 | ### Snapshot topics 207 | 208 | For each persistent actor, the plugin creates a Kafka topic where the topic name equals the actor's `persistenceId`, prefixed by the value of the `kafka-snapshot-store.prefix` configuration key which defaults to `snapshot-`. 209 | For example, if an actor's `persistenceId` is `example`, its snapshots are published to topic `snapshot-example`. For persistent views, the `viewId` is taken instead of the `persistenceId`. 210 | 211 | ### Implementation notes 212 | 213 | - During initialization, the journal plugin fetches cluster metadata from Zookeeper which may take up to a few seconds. 214 | - The journal plugin always writes snapshots to partition 0 of snapshot topics. 215 | 216 | ### Current limitations 217 | 218 | - Deletions are not persistent (which may not be relevant for applications that configure Kafka with reasonably small retention times). 219 | 220 | Kafka 221 | ----- 222 | 223 | ### Kafka cluster 224 | 225 | To connect to an existing Kafka cluster, an application must set a value for the `kafka-journal.zookeeper.connect` key in its `application.conf`: 226 | 227 | kafka-journal.zookeeper.connect = ":,:,..." 228 | 229 | If you want to run a Kafka cluster on a single node, you may find [this article](http://www.michael-noll.com/blog/2013/03/13/running-a-multi-broker-apache-kafka-cluster-on-a-single-node/) useful. 230 | 231 | ### Test server 232 | 233 | To use the test server, the following additional dependencies must be added to `build.sbt`: 234 | 235 | libraryDependencies ++= Seq( 236 | "com.github.krasserm" %% "akka-persistence-kafka" % "0.4" % "test" classifier "tests", 237 | "org.apache.curator" % "curator-test" % "2.7.1" % "test" 238 | ) 239 | 240 | This makes the `TestServer` class available which can be used to start a single Kafka and Zookeeper instance: 241 | 242 | ```scala 243 | import akka.persistence.kafka.server.TestServer 244 | 245 | // start a local Kafka and Zookeeper instance 246 | val server = new TestServer() 247 | 248 | // use the local instance 249 | // ... 250 | 251 | // and stop it 252 | server.stop() 253 | ``` 254 | 255 | The `TestServer` configuration can be customized with the `test-server.*` configuration keys (see [reference configuration](#reference-configuration) for details). 256 | 257 | ### Configuration hints 258 | 259 | The following [broker configurations](http://kafka.apache.org/documentation.html#brokerconfigs) are recommended for being used with the storage plugins: 260 | 261 | - `num.partitions` should be set to `1` by default because the plugins only write to partition 0 of [journal topics](#journal-topics) and [snapshot topics](#snapshot-topics). If a higher number of partitions is needed for [user-defined topics](#user-defined-topics) (e.g. for scalability or throughput reasons) then this should be configured manually with the `kafka-topics` command line tool. 262 | - `default.replication.factor` should be set to at least `2` for high-availability of topics created by the plugins. 263 | - `message.max.bytes` and `replica.fetch.max.bytes` should be set to a value that is larger than the largest snapshot size. The default value is `1024 * 1024` which may be large enough for journal entries but likely to small for snapshots. When changing these settings make sure to also set `kafka-snapshot-store.consumer.fetch.message.max.bytes` and `kafka-journal.consumer.fetch.message.max.bytes` to this value. 264 | - `log.cleanup.policy` must be set to `"compact"` otherwise the most recent snapshot may be deleted if the retention time is exceeded and complete state recovery of persistent actors is not possible any more. 265 | 266 | See also section [Usage hints](#usage-hints). 267 | 268 | Reference configuration 269 | ----------------------- 270 | 271 | kafka-journal { 272 | 273 | # FQCN of the Kafka journal plugin 274 | class = "akka.persistence.kafka.journal.KafkaJournal" 275 | 276 | # Dispatcher for the plugin actor 277 | plugin-dispatcher = "kafka-journal.default-dispatcher" 278 | 279 | # Number of concurrent writers (should be <= number of available threads in 280 | # dispatcher). 281 | write-concurrency = 8 282 | 283 | # The partition to use when publishing to and consuming from journal topics. 284 | partition = 0 285 | 286 | # Default dispatcher for plugin actor. 287 | default-dispatcher { 288 | type = Dispatcher 289 | executor = "fork-join-executor" 290 | fork-join-executor { 291 | parallelism-min = 2 292 | parallelism-max = 8 293 | } 294 | } 295 | 296 | consumer { 297 | # ------------------------------------------------------------------- 298 | # Simple consumer configuration (used for message replay and reading 299 | # metadata). 300 | # 301 | # See http://kafka.apache.org/documentation.html#consumerconfigs 302 | # See http://kafka.apache.org/documentation.html#simpleconsumerapi 303 | # ------------------------------------------------------------------- 304 | 305 | socket.timeout.ms = 30000 306 | 307 | socket.receive.buffer.bytes = 65536 308 | 309 | fetch.message.max.bytes = 1048576 310 | } 311 | 312 | producer { 313 | # ------------------------------------------------------------------- 314 | # PersistentRepr producer (to journal topics) configuration. 315 | # 316 | # See http://kafka.apache.org/documentation.html#producerconfigs 317 | # 318 | # The metadata.broker.list property is set dynamically by the journal. 319 | # No need to set it here. 320 | # ------------------------------------------------------------------- 321 | 322 | request.required.acks = 1 323 | 324 | # DO NOT CHANGE! 325 | producer.type = "sync" 326 | 327 | # DO NOT CHANGE! 328 | partitioner.class = "akka.persistence.kafka.StickyPartitioner" 329 | 330 | # DO NOT CHANGE! 331 | key.serializer.class = "kafka.serializer.StringEncoder" 332 | 333 | # Increase if hundreds of topics are created during initialization. 334 | message.send.max.retries = 5 335 | 336 | # Increase if hundreds of topics are created during initialization. 337 | retry.backoff.ms = 100 338 | 339 | # Add further Kafka producer settings here, if needed. 340 | # ... 341 | } 342 | 343 | event.producer { 344 | # ------------------------------------------------------------------- 345 | # Event producer (to user-defined topics) configuration. 346 | # 347 | # See http://kafka.apache.org/documentation.html#producerconfigs 348 | # ------------------------------------------------------------------- 349 | 350 | producer.type = "sync" 351 | 352 | request.required.acks = 0 353 | 354 | topic.mapper.class = "akka.persistence.kafka.DefaultEventTopicMapper" 355 | 356 | key.serializer.class = "kafka.serializer.StringEncoder" 357 | 358 | # Add further Kafka producer settings here, if needed. 359 | # ... 360 | } 361 | 362 | zookeeper { 363 | # ------------------------------------------------------------------- 364 | # Zookeeper client configuration 365 | # ------------------------------------------------------------------- 366 | 367 | connect = "localhost:2181" 368 | 369 | session.timeout.ms = 6000 370 | 371 | connection.timeout.ms = 6000 372 | 373 | sync.time.ms = 2000 374 | } 375 | } 376 | 377 | kafka-snapshot-store { 378 | 379 | # FQCN of the Kafka snapshot store plugin 380 | class = "akka.persistence.kafka.snapshot.KafkaSnapshotStore" 381 | 382 | # Dispatcher for the plugin actor. 383 | plugin-dispatcher = "kafka-snapshot-store.default-dispatcher" 384 | 385 | # The partition to use when publishing to and consuming from snapshot topics. 386 | partition = 0 387 | 388 | # Topic name prefix (which prepended to persistenceId) 389 | prefix = "snapshot-" 390 | 391 | # Default dispatcher for plugin actor. 392 | default-dispatcher { 393 | type = Dispatcher 394 | executor = "fork-join-executor" 395 | fork-join-executor { 396 | parallelism-min = 2 397 | parallelism-max = 8 398 | } 399 | } 400 | 401 | consumer { 402 | # ------------------------------------------------------------------- 403 | # Simple consumer configuration (used for loading snapshots and 404 | # reading metadata). 405 | # 406 | # See http://kafka.apache.org/documentation.html#consumerconfigs 407 | # See http://kafka.apache.org/documentation.html#simpleconsumerapi 408 | # ------------------------------------------------------------------- 409 | 410 | socket.timeout.ms = 30000 411 | 412 | socket.receive.buffer.bytes = 65536 413 | 414 | fetch.message.max.bytes = 1048576 415 | } 416 | 417 | producer { 418 | # ------------------------------------------------------------------- 419 | # Snapshot producer configuration. 420 | # 421 | # See http://kafka.apache.org/documentation.html#producerconfigs 422 | # 423 | # The metadata.broker.list property is set dynamically by the journal. 424 | # No need to set it here. 425 | # ------------------------------------------------------------------- 426 | 427 | request.required.acks = 1 428 | 429 | producer.type = "sync" 430 | 431 | # DO NOT CHANGE! 432 | partitioner.class = "akka.persistence.kafka.StickyPartitioner" 433 | 434 | # DO NOT CHANGE! 435 | key.serializer.class = "kafka.serializer.StringEncoder" 436 | 437 | # Add further Kafka producer settings here, if needed. 438 | # ... 439 | } 440 | 441 | zookeeper { 442 | # ------------------------------------------------------------------- 443 | # Zookeeper client configuration 444 | # ------------------------------------------------------------------- 445 | 446 | connect = "localhost:2181" 447 | 448 | session.timeout.ms = 6000 449 | 450 | connection.timeout.ms = 6000 451 | 452 | sync.time.ms = 2000 453 | } 454 | } 455 | 456 | test-server { 457 | # ------------------------------------------------------------------- 458 | # Test Kafka and Zookeeper server configuration. 459 | # 460 | # See http://kafka.apache.org/documentation.html#brokerconfigs 461 | # ------------------------------------------------------------------- 462 | 463 | zookeeper { 464 | 465 | port = 2181 466 | 467 | dir = "data/zookeeper" 468 | } 469 | 470 | kafka { 471 | 472 | broker.id = 1 473 | 474 | port = 6667 475 | 476 | num.partitions = 2 477 | 478 | log.cleanup.policy = "compact" 479 | 480 | log.dirs = data/kafka 481 | 482 | log.index.size.max.bytes = 1024 483 | } 484 | } 485 | 486 | akka { 487 | actor { 488 | serializers { 489 | kafka-snapshot = "akka.persistence.kafka.snapshot.KafkaSnapshotSerializer" 490 | } 491 | 492 | serialization-bindings { 493 | "akka.persistence.kafka.snapshot.KafkaSnapshot" = kafka-snapshot 494 | } 495 | } 496 | } 497 | -------------------------------------------------------------------------------- /src/main/java/akka/persistence/kafka/snapshot/SnapshotFormats.java: -------------------------------------------------------------------------------- 1 | // Generated by the protocol buffer compiler. DO NOT EDIT! 2 | // source: src/main/protobuf/SnapshotFormats.proto 3 | 4 | package akka.persistence.kafka.snapshot; 5 | 6 | public final class SnapshotFormats { 7 | private SnapshotFormats() {} 8 | public static void registerAllExtensions( 9 | com.google.protobuf.ExtensionRegistry registry) { 10 | } 11 | public interface SnapshotMetadataFormatOrBuilder 12 | extends com.google.protobuf.MessageOrBuilder { 13 | 14 | // optional string persistenceId = 1; 15 | /** 16 | * optional string persistenceId = 1; 17 | */ 18 | boolean hasPersistenceId(); 19 | /** 20 | * optional string persistenceId = 1; 21 | */ 22 | java.lang.String getPersistenceId(); 23 | /** 24 | * optional string persistenceId = 1; 25 | */ 26 | com.google.protobuf.ByteString 27 | getPersistenceIdBytes(); 28 | 29 | // optional int64 sequenceNr = 2; 30 | /** 31 | * optional int64 sequenceNr = 2; 32 | */ 33 | boolean hasSequenceNr(); 34 | /** 35 | * optional int64 sequenceNr = 2; 36 | */ 37 | long getSequenceNr(); 38 | 39 | // optional int64 timestamp = 3; 40 | /** 41 | * optional int64 timestamp = 3; 42 | */ 43 | boolean hasTimestamp(); 44 | /** 45 | * optional int64 timestamp = 3; 46 | */ 47 | long getTimestamp(); 48 | } 49 | /** 50 | * Protobuf type {@code SnapshotMetadataFormat} 51 | */ 52 | public static final class SnapshotMetadataFormat extends 53 | com.google.protobuf.GeneratedMessage 54 | implements SnapshotMetadataFormatOrBuilder { 55 | // Use SnapshotMetadataFormat.newBuilder() to construct. 56 | private SnapshotMetadataFormat(com.google.protobuf.GeneratedMessage.Builder builder) { 57 | super(builder); 58 | this.unknownFields = builder.getUnknownFields(); 59 | } 60 | private SnapshotMetadataFormat(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } 61 | 62 | private static final SnapshotMetadataFormat defaultInstance; 63 | public static SnapshotMetadataFormat getDefaultInstance() { 64 | return defaultInstance; 65 | } 66 | 67 | public SnapshotMetadataFormat getDefaultInstanceForType() { 68 | return defaultInstance; 69 | } 70 | 71 | private final com.google.protobuf.UnknownFieldSet unknownFields; 72 | @java.lang.Override 73 | public final com.google.protobuf.UnknownFieldSet 74 | getUnknownFields() { 75 | return this.unknownFields; 76 | } 77 | private SnapshotMetadataFormat( 78 | com.google.protobuf.CodedInputStream input, 79 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 80 | throws com.google.protobuf.InvalidProtocolBufferException { 81 | initFields(); 82 | int mutable_bitField0_ = 0; 83 | com.google.protobuf.UnknownFieldSet.Builder unknownFields = 84 | com.google.protobuf.UnknownFieldSet.newBuilder(); 85 | try { 86 | boolean done = false; 87 | while (!done) { 88 | int tag = input.readTag(); 89 | switch (tag) { 90 | case 0: 91 | done = true; 92 | break; 93 | default: { 94 | if (!parseUnknownField(input, unknownFields, 95 | extensionRegistry, tag)) { 96 | done = true; 97 | } 98 | break; 99 | } 100 | case 10: { 101 | bitField0_ |= 0x00000001; 102 | persistenceId_ = input.readBytes(); 103 | break; 104 | } 105 | case 16: { 106 | bitField0_ |= 0x00000002; 107 | sequenceNr_ = input.readInt64(); 108 | break; 109 | } 110 | case 24: { 111 | bitField0_ |= 0x00000004; 112 | timestamp_ = input.readInt64(); 113 | break; 114 | } 115 | } 116 | } 117 | } catch (com.google.protobuf.InvalidProtocolBufferException e) { 118 | throw e.setUnfinishedMessage(this); 119 | } catch (java.io.IOException e) { 120 | throw new com.google.protobuf.InvalidProtocolBufferException( 121 | e.getMessage()).setUnfinishedMessage(this); 122 | } finally { 123 | this.unknownFields = unknownFields.build(); 124 | makeExtensionsImmutable(); 125 | } 126 | } 127 | public static final com.google.protobuf.Descriptors.Descriptor 128 | getDescriptor() { 129 | return akka.persistence.kafka.snapshot.SnapshotFormats.internal_static_SnapshotMetadataFormat_descriptor; 130 | } 131 | 132 | protected com.google.protobuf.GeneratedMessage.FieldAccessorTable 133 | internalGetFieldAccessorTable() { 134 | return akka.persistence.kafka.snapshot.SnapshotFormats.internal_static_SnapshotMetadataFormat_fieldAccessorTable 135 | .ensureFieldAccessorsInitialized( 136 | akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat.class, akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat.Builder.class); 137 | } 138 | 139 | public static com.google.protobuf.Parser PARSER = 140 | new com.google.protobuf.AbstractParser() { 141 | public SnapshotMetadataFormat parsePartialFrom( 142 | com.google.protobuf.CodedInputStream input, 143 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 144 | throws com.google.protobuf.InvalidProtocolBufferException { 145 | return new SnapshotMetadataFormat(input, extensionRegistry); 146 | } 147 | }; 148 | 149 | @java.lang.Override 150 | public com.google.protobuf.Parser getParserForType() { 151 | return PARSER; 152 | } 153 | 154 | private int bitField0_; 155 | // optional string persistenceId = 1; 156 | public static final int PERSISTENCEID_FIELD_NUMBER = 1; 157 | private java.lang.Object persistenceId_; 158 | /** 159 | * optional string persistenceId = 1; 160 | */ 161 | public boolean hasPersistenceId() { 162 | return ((bitField0_ & 0x00000001) == 0x00000001); 163 | } 164 | /** 165 | * optional string persistenceId = 1; 166 | */ 167 | public java.lang.String getPersistenceId() { 168 | java.lang.Object ref = persistenceId_; 169 | if (ref instanceof java.lang.String) { 170 | return (java.lang.String) ref; 171 | } else { 172 | com.google.protobuf.ByteString bs = 173 | (com.google.protobuf.ByteString) ref; 174 | java.lang.String s = bs.toStringUtf8(); 175 | if (bs.isValidUtf8()) { 176 | persistenceId_ = s; 177 | } 178 | return s; 179 | } 180 | } 181 | /** 182 | * optional string persistenceId = 1; 183 | */ 184 | public com.google.protobuf.ByteString 185 | getPersistenceIdBytes() { 186 | java.lang.Object ref = persistenceId_; 187 | if (ref instanceof java.lang.String) { 188 | com.google.protobuf.ByteString b = 189 | com.google.protobuf.ByteString.copyFromUtf8( 190 | (java.lang.String) ref); 191 | persistenceId_ = b; 192 | return b; 193 | } else { 194 | return (com.google.protobuf.ByteString) ref; 195 | } 196 | } 197 | 198 | // optional int64 sequenceNr = 2; 199 | public static final int SEQUENCENR_FIELD_NUMBER = 2; 200 | private long sequenceNr_; 201 | /** 202 | * optional int64 sequenceNr = 2; 203 | */ 204 | public boolean hasSequenceNr() { 205 | return ((bitField0_ & 0x00000002) == 0x00000002); 206 | } 207 | /** 208 | * optional int64 sequenceNr = 2; 209 | */ 210 | public long getSequenceNr() { 211 | return sequenceNr_; 212 | } 213 | 214 | // optional int64 timestamp = 3; 215 | public static final int TIMESTAMP_FIELD_NUMBER = 3; 216 | private long timestamp_; 217 | /** 218 | * optional int64 timestamp = 3; 219 | */ 220 | public boolean hasTimestamp() { 221 | return ((bitField0_ & 0x00000004) == 0x00000004); 222 | } 223 | /** 224 | * optional int64 timestamp = 3; 225 | */ 226 | public long getTimestamp() { 227 | return timestamp_; 228 | } 229 | 230 | private void initFields() { 231 | persistenceId_ = ""; 232 | sequenceNr_ = 0L; 233 | timestamp_ = 0L; 234 | } 235 | private byte memoizedIsInitialized = -1; 236 | public final boolean isInitialized() { 237 | byte isInitialized = memoizedIsInitialized; 238 | if (isInitialized != -1) return isInitialized == 1; 239 | 240 | memoizedIsInitialized = 1; 241 | return true; 242 | } 243 | 244 | public void writeTo(com.google.protobuf.CodedOutputStream output) 245 | throws java.io.IOException { 246 | getSerializedSize(); 247 | if (((bitField0_ & 0x00000001) == 0x00000001)) { 248 | output.writeBytes(1, getPersistenceIdBytes()); 249 | } 250 | if (((bitField0_ & 0x00000002) == 0x00000002)) { 251 | output.writeInt64(2, sequenceNr_); 252 | } 253 | if (((bitField0_ & 0x00000004) == 0x00000004)) { 254 | output.writeInt64(3, timestamp_); 255 | } 256 | getUnknownFields().writeTo(output); 257 | } 258 | 259 | private int memoizedSerializedSize = -1; 260 | public int getSerializedSize() { 261 | int size = memoizedSerializedSize; 262 | if (size != -1) return size; 263 | 264 | size = 0; 265 | if (((bitField0_ & 0x00000001) == 0x00000001)) { 266 | size += com.google.protobuf.CodedOutputStream 267 | .computeBytesSize(1, getPersistenceIdBytes()); 268 | } 269 | if (((bitField0_ & 0x00000002) == 0x00000002)) { 270 | size += com.google.protobuf.CodedOutputStream 271 | .computeInt64Size(2, sequenceNr_); 272 | } 273 | if (((bitField0_ & 0x00000004) == 0x00000004)) { 274 | size += com.google.protobuf.CodedOutputStream 275 | .computeInt64Size(3, timestamp_); 276 | } 277 | size += getUnknownFields().getSerializedSize(); 278 | memoizedSerializedSize = size; 279 | return size; 280 | } 281 | 282 | private static final long serialVersionUID = 0L; 283 | @java.lang.Override 284 | protected java.lang.Object writeReplace() 285 | throws java.io.ObjectStreamException { 286 | return super.writeReplace(); 287 | } 288 | 289 | public static akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat parseFrom( 290 | com.google.protobuf.ByteString data) 291 | throws com.google.protobuf.InvalidProtocolBufferException { 292 | return PARSER.parseFrom(data); 293 | } 294 | public static akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat parseFrom( 295 | com.google.protobuf.ByteString data, 296 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 297 | throws com.google.protobuf.InvalidProtocolBufferException { 298 | return PARSER.parseFrom(data, extensionRegistry); 299 | } 300 | public static akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat parseFrom(byte[] data) 301 | throws com.google.protobuf.InvalidProtocolBufferException { 302 | return PARSER.parseFrom(data); 303 | } 304 | public static akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat parseFrom( 305 | byte[] data, 306 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 307 | throws com.google.protobuf.InvalidProtocolBufferException { 308 | return PARSER.parseFrom(data, extensionRegistry); 309 | } 310 | public static akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat parseFrom(java.io.InputStream input) 311 | throws java.io.IOException { 312 | return PARSER.parseFrom(input); 313 | } 314 | public static akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat parseFrom( 315 | java.io.InputStream input, 316 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 317 | throws java.io.IOException { 318 | return PARSER.parseFrom(input, extensionRegistry); 319 | } 320 | public static akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat parseDelimitedFrom(java.io.InputStream input) 321 | throws java.io.IOException { 322 | return PARSER.parseDelimitedFrom(input); 323 | } 324 | public static akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat parseDelimitedFrom( 325 | java.io.InputStream input, 326 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 327 | throws java.io.IOException { 328 | return PARSER.parseDelimitedFrom(input, extensionRegistry); 329 | } 330 | public static akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat parseFrom( 331 | com.google.protobuf.CodedInputStream input) 332 | throws java.io.IOException { 333 | return PARSER.parseFrom(input); 334 | } 335 | public static akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat parseFrom( 336 | com.google.protobuf.CodedInputStream input, 337 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 338 | throws java.io.IOException { 339 | return PARSER.parseFrom(input, extensionRegistry); 340 | } 341 | 342 | public static Builder newBuilder() { return Builder.create(); } 343 | public Builder newBuilderForType() { return newBuilder(); } 344 | public static Builder newBuilder(akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat prototype) { 345 | return newBuilder().mergeFrom(prototype); 346 | } 347 | public Builder toBuilder() { return newBuilder(this); } 348 | 349 | @java.lang.Override 350 | protected Builder newBuilderForType( 351 | com.google.protobuf.GeneratedMessage.BuilderParent parent) { 352 | Builder builder = new Builder(parent); 353 | return builder; 354 | } 355 | /** 356 | * Protobuf type {@code SnapshotMetadataFormat} 357 | */ 358 | public static final class Builder extends 359 | com.google.protobuf.GeneratedMessage.Builder 360 | implements akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormatOrBuilder { 361 | public static final com.google.protobuf.Descriptors.Descriptor 362 | getDescriptor() { 363 | return akka.persistence.kafka.snapshot.SnapshotFormats.internal_static_SnapshotMetadataFormat_descriptor; 364 | } 365 | 366 | protected com.google.protobuf.GeneratedMessage.FieldAccessorTable 367 | internalGetFieldAccessorTable() { 368 | return akka.persistence.kafka.snapshot.SnapshotFormats.internal_static_SnapshotMetadataFormat_fieldAccessorTable 369 | .ensureFieldAccessorsInitialized( 370 | akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat.class, akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat.Builder.class); 371 | } 372 | 373 | // Construct using akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat.newBuilder() 374 | private Builder() { 375 | maybeForceBuilderInitialization(); 376 | } 377 | 378 | private Builder( 379 | com.google.protobuf.GeneratedMessage.BuilderParent parent) { 380 | super(parent); 381 | maybeForceBuilderInitialization(); 382 | } 383 | private void maybeForceBuilderInitialization() { 384 | if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { 385 | } 386 | } 387 | private static Builder create() { 388 | return new Builder(); 389 | } 390 | 391 | public Builder clear() { 392 | super.clear(); 393 | persistenceId_ = ""; 394 | bitField0_ = (bitField0_ & ~0x00000001); 395 | sequenceNr_ = 0L; 396 | bitField0_ = (bitField0_ & ~0x00000002); 397 | timestamp_ = 0L; 398 | bitField0_ = (bitField0_ & ~0x00000004); 399 | return this; 400 | } 401 | 402 | public Builder clone() { 403 | return create().mergeFrom(buildPartial()); 404 | } 405 | 406 | public com.google.protobuf.Descriptors.Descriptor 407 | getDescriptorForType() { 408 | return akka.persistence.kafka.snapshot.SnapshotFormats.internal_static_SnapshotMetadataFormat_descriptor; 409 | } 410 | 411 | public akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat getDefaultInstanceForType() { 412 | return akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat.getDefaultInstance(); 413 | } 414 | 415 | public akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat build() { 416 | akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat result = buildPartial(); 417 | if (!result.isInitialized()) { 418 | throw newUninitializedMessageException(result); 419 | } 420 | return result; 421 | } 422 | 423 | public akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat buildPartial() { 424 | akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat result = new akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat(this); 425 | int from_bitField0_ = bitField0_; 426 | int to_bitField0_ = 0; 427 | if (((from_bitField0_ & 0x00000001) == 0x00000001)) { 428 | to_bitField0_ |= 0x00000001; 429 | } 430 | result.persistenceId_ = persistenceId_; 431 | if (((from_bitField0_ & 0x00000002) == 0x00000002)) { 432 | to_bitField0_ |= 0x00000002; 433 | } 434 | result.sequenceNr_ = sequenceNr_; 435 | if (((from_bitField0_ & 0x00000004) == 0x00000004)) { 436 | to_bitField0_ |= 0x00000004; 437 | } 438 | result.timestamp_ = timestamp_; 439 | result.bitField0_ = to_bitField0_; 440 | onBuilt(); 441 | return result; 442 | } 443 | 444 | public Builder mergeFrom(com.google.protobuf.Message other) { 445 | if (other instanceof akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat) { 446 | return mergeFrom((akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat)other); 447 | } else { 448 | super.mergeFrom(other); 449 | return this; 450 | } 451 | } 452 | 453 | public Builder mergeFrom(akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat other) { 454 | if (other == akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat.getDefaultInstance()) return this; 455 | if (other.hasPersistenceId()) { 456 | bitField0_ |= 0x00000001; 457 | persistenceId_ = other.persistenceId_; 458 | onChanged(); 459 | } 460 | if (other.hasSequenceNr()) { 461 | setSequenceNr(other.getSequenceNr()); 462 | } 463 | if (other.hasTimestamp()) { 464 | setTimestamp(other.getTimestamp()); 465 | } 466 | this.mergeUnknownFields(other.getUnknownFields()); 467 | return this; 468 | } 469 | 470 | public final boolean isInitialized() { 471 | return true; 472 | } 473 | 474 | public Builder mergeFrom( 475 | com.google.protobuf.CodedInputStream input, 476 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 477 | throws java.io.IOException { 478 | akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat parsedMessage = null; 479 | try { 480 | parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); 481 | } catch (com.google.protobuf.InvalidProtocolBufferException e) { 482 | parsedMessage = (akka.persistence.kafka.snapshot.SnapshotFormats.SnapshotMetadataFormat) e.getUnfinishedMessage(); 483 | throw e; 484 | } finally { 485 | if (parsedMessage != null) { 486 | mergeFrom(parsedMessage); 487 | } 488 | } 489 | return this; 490 | } 491 | private int bitField0_; 492 | 493 | // optional string persistenceId = 1; 494 | private java.lang.Object persistenceId_ = ""; 495 | /** 496 | * optional string persistenceId = 1; 497 | */ 498 | public boolean hasPersistenceId() { 499 | return ((bitField0_ & 0x00000001) == 0x00000001); 500 | } 501 | /** 502 | * optional string persistenceId = 1; 503 | */ 504 | public java.lang.String getPersistenceId() { 505 | java.lang.Object ref = persistenceId_; 506 | if (!(ref instanceof java.lang.String)) { 507 | java.lang.String s = ((com.google.protobuf.ByteString) ref) 508 | .toStringUtf8(); 509 | persistenceId_ = s; 510 | return s; 511 | } else { 512 | return (java.lang.String) ref; 513 | } 514 | } 515 | /** 516 | * optional string persistenceId = 1; 517 | */ 518 | public com.google.protobuf.ByteString 519 | getPersistenceIdBytes() { 520 | java.lang.Object ref = persistenceId_; 521 | if (ref instanceof String) { 522 | com.google.protobuf.ByteString b = 523 | com.google.protobuf.ByteString.copyFromUtf8( 524 | (java.lang.String) ref); 525 | persistenceId_ = b; 526 | return b; 527 | } else { 528 | return (com.google.protobuf.ByteString) ref; 529 | } 530 | } 531 | /** 532 | * optional string persistenceId = 1; 533 | */ 534 | public Builder setPersistenceId( 535 | java.lang.String value) { 536 | if (value == null) { 537 | throw new NullPointerException(); 538 | } 539 | bitField0_ |= 0x00000001; 540 | persistenceId_ = value; 541 | onChanged(); 542 | return this; 543 | } 544 | /** 545 | * optional string persistenceId = 1; 546 | */ 547 | public Builder clearPersistenceId() { 548 | bitField0_ = (bitField0_ & ~0x00000001); 549 | persistenceId_ = getDefaultInstance().getPersistenceId(); 550 | onChanged(); 551 | return this; 552 | } 553 | /** 554 | * optional string persistenceId = 1; 555 | */ 556 | public Builder setPersistenceIdBytes( 557 | com.google.protobuf.ByteString value) { 558 | if (value == null) { 559 | throw new NullPointerException(); 560 | } 561 | bitField0_ |= 0x00000001; 562 | persistenceId_ = value; 563 | onChanged(); 564 | return this; 565 | } 566 | 567 | // optional int64 sequenceNr = 2; 568 | private long sequenceNr_ ; 569 | /** 570 | * optional int64 sequenceNr = 2; 571 | */ 572 | public boolean hasSequenceNr() { 573 | return ((bitField0_ & 0x00000002) == 0x00000002); 574 | } 575 | /** 576 | * optional int64 sequenceNr = 2; 577 | */ 578 | public long getSequenceNr() { 579 | return sequenceNr_; 580 | } 581 | /** 582 | * optional int64 sequenceNr = 2; 583 | */ 584 | public Builder setSequenceNr(long value) { 585 | bitField0_ |= 0x00000002; 586 | sequenceNr_ = value; 587 | onChanged(); 588 | return this; 589 | } 590 | /** 591 | * optional int64 sequenceNr = 2; 592 | */ 593 | public Builder clearSequenceNr() { 594 | bitField0_ = (bitField0_ & ~0x00000002); 595 | sequenceNr_ = 0L; 596 | onChanged(); 597 | return this; 598 | } 599 | 600 | // optional int64 timestamp = 3; 601 | private long timestamp_ ; 602 | /** 603 | * optional int64 timestamp = 3; 604 | */ 605 | public boolean hasTimestamp() { 606 | return ((bitField0_ & 0x00000004) == 0x00000004); 607 | } 608 | /** 609 | * optional int64 timestamp = 3; 610 | */ 611 | public long getTimestamp() { 612 | return timestamp_; 613 | } 614 | /** 615 | * optional int64 timestamp = 3; 616 | */ 617 | public Builder setTimestamp(long value) { 618 | bitField0_ |= 0x00000004; 619 | timestamp_ = value; 620 | onChanged(); 621 | return this; 622 | } 623 | /** 624 | * optional int64 timestamp = 3; 625 | */ 626 | public Builder clearTimestamp() { 627 | bitField0_ = (bitField0_ & ~0x00000004); 628 | timestamp_ = 0L; 629 | onChanged(); 630 | return this; 631 | } 632 | 633 | // @@protoc_insertion_point(builder_scope:SnapshotMetadataFormat) 634 | } 635 | 636 | static { 637 | defaultInstance = new SnapshotMetadataFormat(true); 638 | defaultInstance.initFields(); 639 | } 640 | 641 | // @@protoc_insertion_point(class_scope:SnapshotMetadataFormat) 642 | } 643 | 644 | private static com.google.protobuf.Descriptors.Descriptor 645 | internal_static_SnapshotMetadataFormat_descriptor; 646 | private static 647 | com.google.protobuf.GeneratedMessage.FieldAccessorTable 648 | internal_static_SnapshotMetadataFormat_fieldAccessorTable; 649 | 650 | public static com.google.protobuf.Descriptors.FileDescriptor 651 | getDescriptor() { 652 | return descriptor; 653 | } 654 | private static com.google.protobuf.Descriptors.FileDescriptor 655 | descriptor; 656 | static { 657 | java.lang.String[] descriptorData = { 658 | "\n\'src/main/protobuf/SnapshotFormats.prot" + 659 | "o\"V\n\026SnapshotMetadataFormat\022\025\n\rpersisten" + 660 | "ceId\030\001 \001(\t\022\022\n\nsequenceNr\030\002 \001(\003\022\021\n\ttimest" + 661 | "amp\030\003 \001(\003B#\n\037akka.persistence.kafka.snap" + 662 | "shotH\001" 663 | }; 664 | com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = 665 | new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { 666 | public com.google.protobuf.ExtensionRegistry assignDescriptors( 667 | com.google.protobuf.Descriptors.FileDescriptor root) { 668 | descriptor = root; 669 | internal_static_SnapshotMetadataFormat_descriptor = 670 | getDescriptor().getMessageTypes().get(0); 671 | internal_static_SnapshotMetadataFormat_fieldAccessorTable = new 672 | com.google.protobuf.GeneratedMessage.FieldAccessorTable( 673 | internal_static_SnapshotMetadataFormat_descriptor, 674 | new java.lang.String[] { "PersistenceId", "SequenceNr", "Timestamp", }); 675 | return null; 676 | } 677 | }; 678 | com.google.protobuf.Descriptors.FileDescriptor 679 | .internalBuildGeneratedFileFrom(descriptorData, 680 | new com.google.protobuf.Descriptors.FileDescriptor[] { 681 | }, assigner); 682 | } 683 | 684 | // @@protoc_insertion_point(outer_class_scope) 685 | } 686 | -------------------------------------------------------------------------------- /src/main/java/akka/persistence/kafka/journal/EventFormats.java: -------------------------------------------------------------------------------- 1 | // Generated by the protocol buffer compiler. DO NOT EDIT! 2 | // source: src/main/protobuf/EventFormats.proto 3 | 4 | package akka.persistence.kafka.journal; 5 | 6 | public final class EventFormats { 7 | private EventFormats() {} 8 | public static void registerAllExtensions( 9 | com.google.protobuf.ExtensionRegistry registry) { 10 | } 11 | public interface EventFormatOrBuilder 12 | extends com.google.protobuf.MessageOrBuilder { 13 | 14 | // optional string persistenceId = 1; 15 | /** 16 | * optional string persistenceId = 1; 17 | */ 18 | boolean hasPersistenceId(); 19 | /** 20 | * optional string persistenceId = 1; 21 | */ 22 | java.lang.String getPersistenceId(); 23 | /** 24 | * optional string persistenceId = 1; 25 | */ 26 | com.google.protobuf.ByteString 27 | getPersistenceIdBytes(); 28 | 29 | // optional int64 sequenceNr = 2; 30 | /** 31 | * optional int64 sequenceNr = 2; 32 | */ 33 | boolean hasSequenceNr(); 34 | /** 35 | * optional int64 sequenceNr = 2; 36 | */ 37 | long getSequenceNr(); 38 | 39 | // optional .EventDataFormat data = 3; 40 | /** 41 | * optional .EventDataFormat data = 3; 42 | */ 43 | boolean hasData(); 44 | /** 45 | * optional .EventDataFormat data = 3; 46 | */ 47 | akka.persistence.kafka.journal.EventFormats.EventDataFormat getData(); 48 | /** 49 | * optional .EventDataFormat data = 3; 50 | */ 51 | akka.persistence.kafka.journal.EventFormats.EventDataFormatOrBuilder getDataOrBuilder(); 52 | } 53 | /** 54 | * Protobuf type {@code EventFormat} 55 | */ 56 | public static final class EventFormat extends 57 | com.google.protobuf.GeneratedMessage 58 | implements EventFormatOrBuilder { 59 | // Use EventFormat.newBuilder() to construct. 60 | private EventFormat(com.google.protobuf.GeneratedMessage.Builder builder) { 61 | super(builder); 62 | this.unknownFields = builder.getUnknownFields(); 63 | } 64 | private EventFormat(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } 65 | 66 | private static final EventFormat defaultInstance; 67 | public static EventFormat getDefaultInstance() { 68 | return defaultInstance; 69 | } 70 | 71 | public EventFormat getDefaultInstanceForType() { 72 | return defaultInstance; 73 | } 74 | 75 | private final com.google.protobuf.UnknownFieldSet unknownFields; 76 | @java.lang.Override 77 | public final com.google.protobuf.UnknownFieldSet 78 | getUnknownFields() { 79 | return this.unknownFields; 80 | } 81 | private EventFormat( 82 | com.google.protobuf.CodedInputStream input, 83 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 84 | throws com.google.protobuf.InvalidProtocolBufferException { 85 | initFields(); 86 | int mutable_bitField0_ = 0; 87 | com.google.protobuf.UnknownFieldSet.Builder unknownFields = 88 | com.google.protobuf.UnknownFieldSet.newBuilder(); 89 | try { 90 | boolean done = false; 91 | while (!done) { 92 | int tag = input.readTag(); 93 | switch (tag) { 94 | case 0: 95 | done = true; 96 | break; 97 | default: { 98 | if (!parseUnknownField(input, unknownFields, 99 | extensionRegistry, tag)) { 100 | done = true; 101 | } 102 | break; 103 | } 104 | case 10: { 105 | bitField0_ |= 0x00000001; 106 | persistenceId_ = input.readBytes(); 107 | break; 108 | } 109 | case 16: { 110 | bitField0_ |= 0x00000002; 111 | sequenceNr_ = input.readInt64(); 112 | break; 113 | } 114 | case 26: { 115 | akka.persistence.kafka.journal.EventFormats.EventDataFormat.Builder subBuilder = null; 116 | if (((bitField0_ & 0x00000004) == 0x00000004)) { 117 | subBuilder = data_.toBuilder(); 118 | } 119 | data_ = input.readMessage(akka.persistence.kafka.journal.EventFormats.EventDataFormat.PARSER, extensionRegistry); 120 | if (subBuilder != null) { 121 | subBuilder.mergeFrom(data_); 122 | data_ = subBuilder.buildPartial(); 123 | } 124 | bitField0_ |= 0x00000004; 125 | break; 126 | } 127 | } 128 | } 129 | } catch (com.google.protobuf.InvalidProtocolBufferException e) { 130 | throw e.setUnfinishedMessage(this); 131 | } catch (java.io.IOException e) { 132 | throw new com.google.protobuf.InvalidProtocolBufferException( 133 | e.getMessage()).setUnfinishedMessage(this); 134 | } finally { 135 | this.unknownFields = unknownFields.build(); 136 | makeExtensionsImmutable(); 137 | } 138 | } 139 | public static final com.google.protobuf.Descriptors.Descriptor 140 | getDescriptor() { 141 | return akka.persistence.kafka.journal.EventFormats.internal_static_EventFormat_descriptor; 142 | } 143 | 144 | protected com.google.protobuf.GeneratedMessage.FieldAccessorTable 145 | internalGetFieldAccessorTable() { 146 | return akka.persistence.kafka.journal.EventFormats.internal_static_EventFormat_fieldAccessorTable 147 | .ensureFieldAccessorsInitialized( 148 | akka.persistence.kafka.journal.EventFormats.EventFormat.class, akka.persistence.kafka.journal.EventFormats.EventFormat.Builder.class); 149 | } 150 | 151 | public static com.google.protobuf.Parser PARSER = 152 | new com.google.protobuf.AbstractParser() { 153 | public EventFormat parsePartialFrom( 154 | com.google.protobuf.CodedInputStream input, 155 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 156 | throws com.google.protobuf.InvalidProtocolBufferException { 157 | return new EventFormat(input, extensionRegistry); 158 | } 159 | }; 160 | 161 | @java.lang.Override 162 | public com.google.protobuf.Parser getParserForType() { 163 | return PARSER; 164 | } 165 | 166 | private int bitField0_; 167 | // optional string persistenceId = 1; 168 | public static final int PERSISTENCEID_FIELD_NUMBER = 1; 169 | private java.lang.Object persistenceId_; 170 | /** 171 | * optional string persistenceId = 1; 172 | */ 173 | public boolean hasPersistenceId() { 174 | return ((bitField0_ & 0x00000001) == 0x00000001); 175 | } 176 | /** 177 | * optional string persistenceId = 1; 178 | */ 179 | public java.lang.String getPersistenceId() { 180 | java.lang.Object ref = persistenceId_; 181 | if (ref instanceof java.lang.String) { 182 | return (java.lang.String) ref; 183 | } else { 184 | com.google.protobuf.ByteString bs = 185 | (com.google.protobuf.ByteString) ref; 186 | java.lang.String s = bs.toStringUtf8(); 187 | if (bs.isValidUtf8()) { 188 | persistenceId_ = s; 189 | } 190 | return s; 191 | } 192 | } 193 | /** 194 | * optional string persistenceId = 1; 195 | */ 196 | public com.google.protobuf.ByteString 197 | getPersistenceIdBytes() { 198 | java.lang.Object ref = persistenceId_; 199 | if (ref instanceof java.lang.String) { 200 | com.google.protobuf.ByteString b = 201 | com.google.protobuf.ByteString.copyFromUtf8( 202 | (java.lang.String) ref); 203 | persistenceId_ = b; 204 | return b; 205 | } else { 206 | return (com.google.protobuf.ByteString) ref; 207 | } 208 | } 209 | 210 | // optional int64 sequenceNr = 2; 211 | public static final int SEQUENCENR_FIELD_NUMBER = 2; 212 | private long sequenceNr_; 213 | /** 214 | * optional int64 sequenceNr = 2; 215 | */ 216 | public boolean hasSequenceNr() { 217 | return ((bitField0_ & 0x00000002) == 0x00000002); 218 | } 219 | /** 220 | * optional int64 sequenceNr = 2; 221 | */ 222 | public long getSequenceNr() { 223 | return sequenceNr_; 224 | } 225 | 226 | // optional .EventDataFormat data = 3; 227 | public static final int DATA_FIELD_NUMBER = 3; 228 | private akka.persistence.kafka.journal.EventFormats.EventDataFormat data_; 229 | /** 230 | * optional .EventDataFormat data = 3; 231 | */ 232 | public boolean hasData() { 233 | return ((bitField0_ & 0x00000004) == 0x00000004); 234 | } 235 | /** 236 | * optional .EventDataFormat data = 3; 237 | */ 238 | public akka.persistence.kafka.journal.EventFormats.EventDataFormat getData() { 239 | return data_; 240 | } 241 | /** 242 | * optional .EventDataFormat data = 3; 243 | */ 244 | public akka.persistence.kafka.journal.EventFormats.EventDataFormatOrBuilder getDataOrBuilder() { 245 | return data_; 246 | } 247 | 248 | private void initFields() { 249 | persistenceId_ = ""; 250 | sequenceNr_ = 0L; 251 | data_ = akka.persistence.kafka.journal.EventFormats.EventDataFormat.getDefaultInstance(); 252 | } 253 | private byte memoizedIsInitialized = -1; 254 | public final boolean isInitialized() { 255 | byte isInitialized = memoizedIsInitialized; 256 | if (isInitialized != -1) return isInitialized == 1; 257 | 258 | memoizedIsInitialized = 1; 259 | return true; 260 | } 261 | 262 | public void writeTo(com.google.protobuf.CodedOutputStream output) 263 | throws java.io.IOException { 264 | getSerializedSize(); 265 | if (((bitField0_ & 0x00000001) == 0x00000001)) { 266 | output.writeBytes(1, getPersistenceIdBytes()); 267 | } 268 | if (((bitField0_ & 0x00000002) == 0x00000002)) { 269 | output.writeInt64(2, sequenceNr_); 270 | } 271 | if (((bitField0_ & 0x00000004) == 0x00000004)) { 272 | output.writeMessage(3, data_); 273 | } 274 | getUnknownFields().writeTo(output); 275 | } 276 | 277 | private int memoizedSerializedSize = -1; 278 | public int getSerializedSize() { 279 | int size = memoizedSerializedSize; 280 | if (size != -1) return size; 281 | 282 | size = 0; 283 | if (((bitField0_ & 0x00000001) == 0x00000001)) { 284 | size += com.google.protobuf.CodedOutputStream 285 | .computeBytesSize(1, getPersistenceIdBytes()); 286 | } 287 | if (((bitField0_ & 0x00000002) == 0x00000002)) { 288 | size += com.google.protobuf.CodedOutputStream 289 | .computeInt64Size(2, sequenceNr_); 290 | } 291 | if (((bitField0_ & 0x00000004) == 0x00000004)) { 292 | size += com.google.protobuf.CodedOutputStream 293 | .computeMessageSize(3, data_); 294 | } 295 | size += getUnknownFields().getSerializedSize(); 296 | memoizedSerializedSize = size; 297 | return size; 298 | } 299 | 300 | private static final long serialVersionUID = 0L; 301 | @java.lang.Override 302 | protected java.lang.Object writeReplace() 303 | throws java.io.ObjectStreamException { 304 | return super.writeReplace(); 305 | } 306 | 307 | public static akka.persistence.kafka.journal.EventFormats.EventFormat parseFrom( 308 | com.google.protobuf.ByteString data) 309 | throws com.google.protobuf.InvalidProtocolBufferException { 310 | return PARSER.parseFrom(data); 311 | } 312 | public static akka.persistence.kafka.journal.EventFormats.EventFormat parseFrom( 313 | com.google.protobuf.ByteString data, 314 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 315 | throws com.google.protobuf.InvalidProtocolBufferException { 316 | return PARSER.parseFrom(data, extensionRegistry); 317 | } 318 | public static akka.persistence.kafka.journal.EventFormats.EventFormat parseFrom(byte[] data) 319 | throws com.google.protobuf.InvalidProtocolBufferException { 320 | return PARSER.parseFrom(data); 321 | } 322 | public static akka.persistence.kafka.journal.EventFormats.EventFormat parseFrom( 323 | byte[] data, 324 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 325 | throws com.google.protobuf.InvalidProtocolBufferException { 326 | return PARSER.parseFrom(data, extensionRegistry); 327 | } 328 | public static akka.persistence.kafka.journal.EventFormats.EventFormat parseFrom(java.io.InputStream input) 329 | throws java.io.IOException { 330 | return PARSER.parseFrom(input); 331 | } 332 | public static akka.persistence.kafka.journal.EventFormats.EventFormat parseFrom( 333 | java.io.InputStream input, 334 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 335 | throws java.io.IOException { 336 | return PARSER.parseFrom(input, extensionRegistry); 337 | } 338 | public static akka.persistence.kafka.journal.EventFormats.EventFormat parseDelimitedFrom(java.io.InputStream input) 339 | throws java.io.IOException { 340 | return PARSER.parseDelimitedFrom(input); 341 | } 342 | public static akka.persistence.kafka.journal.EventFormats.EventFormat parseDelimitedFrom( 343 | java.io.InputStream input, 344 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 345 | throws java.io.IOException { 346 | return PARSER.parseDelimitedFrom(input, extensionRegistry); 347 | } 348 | public static akka.persistence.kafka.journal.EventFormats.EventFormat parseFrom( 349 | com.google.protobuf.CodedInputStream input) 350 | throws java.io.IOException { 351 | return PARSER.parseFrom(input); 352 | } 353 | public static akka.persistence.kafka.journal.EventFormats.EventFormat parseFrom( 354 | com.google.protobuf.CodedInputStream input, 355 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 356 | throws java.io.IOException { 357 | return PARSER.parseFrom(input, extensionRegistry); 358 | } 359 | 360 | public static Builder newBuilder() { return Builder.create(); } 361 | public Builder newBuilderForType() { return newBuilder(); } 362 | public static Builder newBuilder(akka.persistence.kafka.journal.EventFormats.EventFormat prototype) { 363 | return newBuilder().mergeFrom(prototype); 364 | } 365 | public Builder toBuilder() { return newBuilder(this); } 366 | 367 | @java.lang.Override 368 | protected Builder newBuilderForType( 369 | com.google.protobuf.GeneratedMessage.BuilderParent parent) { 370 | Builder builder = new Builder(parent); 371 | return builder; 372 | } 373 | /** 374 | * Protobuf type {@code EventFormat} 375 | */ 376 | public static final class Builder extends 377 | com.google.protobuf.GeneratedMessage.Builder 378 | implements akka.persistence.kafka.journal.EventFormats.EventFormatOrBuilder { 379 | public static final com.google.protobuf.Descriptors.Descriptor 380 | getDescriptor() { 381 | return akka.persistence.kafka.journal.EventFormats.internal_static_EventFormat_descriptor; 382 | } 383 | 384 | protected com.google.protobuf.GeneratedMessage.FieldAccessorTable 385 | internalGetFieldAccessorTable() { 386 | return akka.persistence.kafka.journal.EventFormats.internal_static_EventFormat_fieldAccessorTable 387 | .ensureFieldAccessorsInitialized( 388 | akka.persistence.kafka.journal.EventFormats.EventFormat.class, akka.persistence.kafka.journal.EventFormats.EventFormat.Builder.class); 389 | } 390 | 391 | // Construct using akka.persistence.kafka.journal.EventFormats.EventFormat.newBuilder() 392 | private Builder() { 393 | maybeForceBuilderInitialization(); 394 | } 395 | 396 | private Builder( 397 | com.google.protobuf.GeneratedMessage.BuilderParent parent) { 398 | super(parent); 399 | maybeForceBuilderInitialization(); 400 | } 401 | private void maybeForceBuilderInitialization() { 402 | if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { 403 | getDataFieldBuilder(); 404 | } 405 | } 406 | private static Builder create() { 407 | return new Builder(); 408 | } 409 | 410 | public Builder clear() { 411 | super.clear(); 412 | persistenceId_ = ""; 413 | bitField0_ = (bitField0_ & ~0x00000001); 414 | sequenceNr_ = 0L; 415 | bitField0_ = (bitField0_ & ~0x00000002); 416 | if (dataBuilder_ == null) { 417 | data_ = akka.persistence.kafka.journal.EventFormats.EventDataFormat.getDefaultInstance(); 418 | } else { 419 | dataBuilder_.clear(); 420 | } 421 | bitField0_ = (bitField0_ & ~0x00000004); 422 | return this; 423 | } 424 | 425 | public Builder clone() { 426 | return create().mergeFrom(buildPartial()); 427 | } 428 | 429 | public com.google.protobuf.Descriptors.Descriptor 430 | getDescriptorForType() { 431 | return akka.persistence.kafka.journal.EventFormats.internal_static_EventFormat_descriptor; 432 | } 433 | 434 | public akka.persistence.kafka.journal.EventFormats.EventFormat getDefaultInstanceForType() { 435 | return akka.persistence.kafka.journal.EventFormats.EventFormat.getDefaultInstance(); 436 | } 437 | 438 | public akka.persistence.kafka.journal.EventFormats.EventFormat build() { 439 | akka.persistence.kafka.journal.EventFormats.EventFormat result = buildPartial(); 440 | if (!result.isInitialized()) { 441 | throw newUninitializedMessageException(result); 442 | } 443 | return result; 444 | } 445 | 446 | public akka.persistence.kafka.journal.EventFormats.EventFormat buildPartial() { 447 | akka.persistence.kafka.journal.EventFormats.EventFormat result = new akka.persistence.kafka.journal.EventFormats.EventFormat(this); 448 | int from_bitField0_ = bitField0_; 449 | int to_bitField0_ = 0; 450 | if (((from_bitField0_ & 0x00000001) == 0x00000001)) { 451 | to_bitField0_ |= 0x00000001; 452 | } 453 | result.persistenceId_ = persistenceId_; 454 | if (((from_bitField0_ & 0x00000002) == 0x00000002)) { 455 | to_bitField0_ |= 0x00000002; 456 | } 457 | result.sequenceNr_ = sequenceNr_; 458 | if (((from_bitField0_ & 0x00000004) == 0x00000004)) { 459 | to_bitField0_ |= 0x00000004; 460 | } 461 | if (dataBuilder_ == null) { 462 | result.data_ = data_; 463 | } else { 464 | result.data_ = dataBuilder_.build(); 465 | } 466 | result.bitField0_ = to_bitField0_; 467 | onBuilt(); 468 | return result; 469 | } 470 | 471 | public Builder mergeFrom(com.google.protobuf.Message other) { 472 | if (other instanceof akka.persistence.kafka.journal.EventFormats.EventFormat) { 473 | return mergeFrom((akka.persistence.kafka.journal.EventFormats.EventFormat)other); 474 | } else { 475 | super.mergeFrom(other); 476 | return this; 477 | } 478 | } 479 | 480 | public Builder mergeFrom(akka.persistence.kafka.journal.EventFormats.EventFormat other) { 481 | if (other == akka.persistence.kafka.journal.EventFormats.EventFormat.getDefaultInstance()) return this; 482 | if (other.hasPersistenceId()) { 483 | bitField0_ |= 0x00000001; 484 | persistenceId_ = other.persistenceId_; 485 | onChanged(); 486 | } 487 | if (other.hasSequenceNr()) { 488 | setSequenceNr(other.getSequenceNr()); 489 | } 490 | if (other.hasData()) { 491 | mergeData(other.getData()); 492 | } 493 | this.mergeUnknownFields(other.getUnknownFields()); 494 | return this; 495 | } 496 | 497 | public final boolean isInitialized() { 498 | return true; 499 | } 500 | 501 | public Builder mergeFrom( 502 | com.google.protobuf.CodedInputStream input, 503 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 504 | throws java.io.IOException { 505 | akka.persistence.kafka.journal.EventFormats.EventFormat parsedMessage = null; 506 | try { 507 | parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); 508 | } catch (com.google.protobuf.InvalidProtocolBufferException e) { 509 | parsedMessage = (akka.persistence.kafka.journal.EventFormats.EventFormat) e.getUnfinishedMessage(); 510 | throw e; 511 | } finally { 512 | if (parsedMessage != null) { 513 | mergeFrom(parsedMessage); 514 | } 515 | } 516 | return this; 517 | } 518 | private int bitField0_; 519 | 520 | // optional string persistenceId = 1; 521 | private java.lang.Object persistenceId_ = ""; 522 | /** 523 | * optional string persistenceId = 1; 524 | */ 525 | public boolean hasPersistenceId() { 526 | return ((bitField0_ & 0x00000001) == 0x00000001); 527 | } 528 | /** 529 | * optional string persistenceId = 1; 530 | */ 531 | public java.lang.String getPersistenceId() { 532 | java.lang.Object ref = persistenceId_; 533 | if (!(ref instanceof java.lang.String)) { 534 | java.lang.String s = ((com.google.protobuf.ByteString) ref) 535 | .toStringUtf8(); 536 | persistenceId_ = s; 537 | return s; 538 | } else { 539 | return (java.lang.String) ref; 540 | } 541 | } 542 | /** 543 | * optional string persistenceId = 1; 544 | */ 545 | public com.google.protobuf.ByteString 546 | getPersistenceIdBytes() { 547 | java.lang.Object ref = persistenceId_; 548 | if (ref instanceof String) { 549 | com.google.protobuf.ByteString b = 550 | com.google.protobuf.ByteString.copyFromUtf8( 551 | (java.lang.String) ref); 552 | persistenceId_ = b; 553 | return b; 554 | } else { 555 | return (com.google.protobuf.ByteString) ref; 556 | } 557 | } 558 | /** 559 | * optional string persistenceId = 1; 560 | */ 561 | public Builder setPersistenceId( 562 | java.lang.String value) { 563 | if (value == null) { 564 | throw new NullPointerException(); 565 | } 566 | bitField0_ |= 0x00000001; 567 | persistenceId_ = value; 568 | onChanged(); 569 | return this; 570 | } 571 | /** 572 | * optional string persistenceId = 1; 573 | */ 574 | public Builder clearPersistenceId() { 575 | bitField0_ = (bitField0_ & ~0x00000001); 576 | persistenceId_ = getDefaultInstance().getPersistenceId(); 577 | onChanged(); 578 | return this; 579 | } 580 | /** 581 | * optional string persistenceId = 1; 582 | */ 583 | public Builder setPersistenceIdBytes( 584 | com.google.protobuf.ByteString value) { 585 | if (value == null) { 586 | throw new NullPointerException(); 587 | } 588 | bitField0_ |= 0x00000001; 589 | persistenceId_ = value; 590 | onChanged(); 591 | return this; 592 | } 593 | 594 | // optional int64 sequenceNr = 2; 595 | private long sequenceNr_ ; 596 | /** 597 | * optional int64 sequenceNr = 2; 598 | */ 599 | public boolean hasSequenceNr() { 600 | return ((bitField0_ & 0x00000002) == 0x00000002); 601 | } 602 | /** 603 | * optional int64 sequenceNr = 2; 604 | */ 605 | public long getSequenceNr() { 606 | return sequenceNr_; 607 | } 608 | /** 609 | * optional int64 sequenceNr = 2; 610 | */ 611 | public Builder setSequenceNr(long value) { 612 | bitField0_ |= 0x00000002; 613 | sequenceNr_ = value; 614 | onChanged(); 615 | return this; 616 | } 617 | /** 618 | * optional int64 sequenceNr = 2; 619 | */ 620 | public Builder clearSequenceNr() { 621 | bitField0_ = (bitField0_ & ~0x00000002); 622 | sequenceNr_ = 0L; 623 | onChanged(); 624 | return this; 625 | } 626 | 627 | // optional .EventDataFormat data = 3; 628 | private akka.persistence.kafka.journal.EventFormats.EventDataFormat data_ = akka.persistence.kafka.journal.EventFormats.EventDataFormat.getDefaultInstance(); 629 | private com.google.protobuf.SingleFieldBuilder< 630 | akka.persistence.kafka.journal.EventFormats.EventDataFormat, akka.persistence.kafka.journal.EventFormats.EventDataFormat.Builder, akka.persistence.kafka.journal.EventFormats.EventDataFormatOrBuilder> dataBuilder_; 631 | /** 632 | * optional .EventDataFormat data = 3; 633 | */ 634 | public boolean hasData() { 635 | return ((bitField0_ & 0x00000004) == 0x00000004); 636 | } 637 | /** 638 | * optional .EventDataFormat data = 3; 639 | */ 640 | public akka.persistence.kafka.journal.EventFormats.EventDataFormat getData() { 641 | if (dataBuilder_ == null) { 642 | return data_; 643 | } else { 644 | return dataBuilder_.getMessage(); 645 | } 646 | } 647 | /** 648 | * optional .EventDataFormat data = 3; 649 | */ 650 | public Builder setData(akka.persistence.kafka.journal.EventFormats.EventDataFormat value) { 651 | if (dataBuilder_ == null) { 652 | if (value == null) { 653 | throw new NullPointerException(); 654 | } 655 | data_ = value; 656 | onChanged(); 657 | } else { 658 | dataBuilder_.setMessage(value); 659 | } 660 | bitField0_ |= 0x00000004; 661 | return this; 662 | } 663 | /** 664 | * optional .EventDataFormat data = 3; 665 | */ 666 | public Builder setData( 667 | akka.persistence.kafka.journal.EventFormats.EventDataFormat.Builder builderForValue) { 668 | if (dataBuilder_ == null) { 669 | data_ = builderForValue.build(); 670 | onChanged(); 671 | } else { 672 | dataBuilder_.setMessage(builderForValue.build()); 673 | } 674 | bitField0_ |= 0x00000004; 675 | return this; 676 | } 677 | /** 678 | * optional .EventDataFormat data = 3; 679 | */ 680 | public Builder mergeData(akka.persistence.kafka.journal.EventFormats.EventDataFormat value) { 681 | if (dataBuilder_ == null) { 682 | if (((bitField0_ & 0x00000004) == 0x00000004) && 683 | data_ != akka.persistence.kafka.journal.EventFormats.EventDataFormat.getDefaultInstance()) { 684 | data_ = 685 | akka.persistence.kafka.journal.EventFormats.EventDataFormat.newBuilder(data_).mergeFrom(value).buildPartial(); 686 | } else { 687 | data_ = value; 688 | } 689 | onChanged(); 690 | } else { 691 | dataBuilder_.mergeFrom(value); 692 | } 693 | bitField0_ |= 0x00000004; 694 | return this; 695 | } 696 | /** 697 | * optional .EventDataFormat data = 3; 698 | */ 699 | public Builder clearData() { 700 | if (dataBuilder_ == null) { 701 | data_ = akka.persistence.kafka.journal.EventFormats.EventDataFormat.getDefaultInstance(); 702 | onChanged(); 703 | } else { 704 | dataBuilder_.clear(); 705 | } 706 | bitField0_ = (bitField0_ & ~0x00000004); 707 | return this; 708 | } 709 | /** 710 | * optional .EventDataFormat data = 3; 711 | */ 712 | public akka.persistence.kafka.journal.EventFormats.EventDataFormat.Builder getDataBuilder() { 713 | bitField0_ |= 0x00000004; 714 | onChanged(); 715 | return getDataFieldBuilder().getBuilder(); 716 | } 717 | /** 718 | * optional .EventDataFormat data = 3; 719 | */ 720 | public akka.persistence.kafka.journal.EventFormats.EventDataFormatOrBuilder getDataOrBuilder() { 721 | if (dataBuilder_ != null) { 722 | return dataBuilder_.getMessageOrBuilder(); 723 | } else { 724 | return data_; 725 | } 726 | } 727 | /** 728 | * optional .EventDataFormat data = 3; 729 | */ 730 | private com.google.protobuf.SingleFieldBuilder< 731 | akka.persistence.kafka.journal.EventFormats.EventDataFormat, akka.persistence.kafka.journal.EventFormats.EventDataFormat.Builder, akka.persistence.kafka.journal.EventFormats.EventDataFormatOrBuilder> 732 | getDataFieldBuilder() { 733 | if (dataBuilder_ == null) { 734 | dataBuilder_ = new com.google.protobuf.SingleFieldBuilder< 735 | akka.persistence.kafka.journal.EventFormats.EventDataFormat, akka.persistence.kafka.journal.EventFormats.EventDataFormat.Builder, akka.persistence.kafka.journal.EventFormats.EventDataFormatOrBuilder>( 736 | data_, 737 | getParentForChildren(), 738 | isClean()); 739 | data_ = null; 740 | } 741 | return dataBuilder_; 742 | } 743 | 744 | // @@protoc_insertion_point(builder_scope:EventFormat) 745 | } 746 | 747 | static { 748 | defaultInstance = new EventFormat(true); 749 | defaultInstance.initFields(); 750 | } 751 | 752 | // @@protoc_insertion_point(class_scope:EventFormat) 753 | } 754 | 755 | public interface EventDataFormatOrBuilder 756 | extends com.google.protobuf.MessageOrBuilder { 757 | 758 | // optional int32 serializerId = 1; 759 | /** 760 | * optional int32 serializerId = 1; 761 | */ 762 | boolean hasSerializerId(); 763 | /** 764 | * optional int32 serializerId = 1; 765 | */ 766 | int getSerializerId(); 767 | 768 | // optional bytes data = 2; 769 | /** 770 | * optional bytes data = 2; 771 | */ 772 | boolean hasData(); 773 | /** 774 | * optional bytes data = 2; 775 | */ 776 | com.google.protobuf.ByteString getData(); 777 | 778 | // optional bytes dataManifest = 3; 779 | /** 780 | * optional bytes dataManifest = 3; 781 | */ 782 | boolean hasDataManifest(); 783 | /** 784 | * optional bytes dataManifest = 3; 785 | */ 786 | com.google.protobuf.ByteString getDataManifest(); 787 | } 788 | /** 789 | * Protobuf type {@code EventDataFormat} 790 | */ 791 | public static final class EventDataFormat extends 792 | com.google.protobuf.GeneratedMessage 793 | implements EventDataFormatOrBuilder { 794 | // Use EventDataFormat.newBuilder() to construct. 795 | private EventDataFormat(com.google.protobuf.GeneratedMessage.Builder builder) { 796 | super(builder); 797 | this.unknownFields = builder.getUnknownFields(); 798 | } 799 | private EventDataFormat(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } 800 | 801 | private static final EventDataFormat defaultInstance; 802 | public static EventDataFormat getDefaultInstance() { 803 | return defaultInstance; 804 | } 805 | 806 | public EventDataFormat getDefaultInstanceForType() { 807 | return defaultInstance; 808 | } 809 | 810 | private final com.google.protobuf.UnknownFieldSet unknownFields; 811 | @java.lang.Override 812 | public final com.google.protobuf.UnknownFieldSet 813 | getUnknownFields() { 814 | return this.unknownFields; 815 | } 816 | private EventDataFormat( 817 | com.google.protobuf.CodedInputStream input, 818 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 819 | throws com.google.protobuf.InvalidProtocolBufferException { 820 | initFields(); 821 | int mutable_bitField0_ = 0; 822 | com.google.protobuf.UnknownFieldSet.Builder unknownFields = 823 | com.google.protobuf.UnknownFieldSet.newBuilder(); 824 | try { 825 | boolean done = false; 826 | while (!done) { 827 | int tag = input.readTag(); 828 | switch (tag) { 829 | case 0: 830 | done = true; 831 | break; 832 | default: { 833 | if (!parseUnknownField(input, unknownFields, 834 | extensionRegistry, tag)) { 835 | done = true; 836 | } 837 | break; 838 | } 839 | case 8: { 840 | bitField0_ |= 0x00000001; 841 | serializerId_ = input.readInt32(); 842 | break; 843 | } 844 | case 18: { 845 | bitField0_ |= 0x00000002; 846 | data_ = input.readBytes(); 847 | break; 848 | } 849 | case 26: { 850 | bitField0_ |= 0x00000004; 851 | dataManifest_ = input.readBytes(); 852 | break; 853 | } 854 | } 855 | } 856 | } catch (com.google.protobuf.InvalidProtocolBufferException e) { 857 | throw e.setUnfinishedMessage(this); 858 | } catch (java.io.IOException e) { 859 | throw new com.google.protobuf.InvalidProtocolBufferException( 860 | e.getMessage()).setUnfinishedMessage(this); 861 | } finally { 862 | this.unknownFields = unknownFields.build(); 863 | makeExtensionsImmutable(); 864 | } 865 | } 866 | public static final com.google.protobuf.Descriptors.Descriptor 867 | getDescriptor() { 868 | return akka.persistence.kafka.journal.EventFormats.internal_static_EventDataFormat_descriptor; 869 | } 870 | 871 | protected com.google.protobuf.GeneratedMessage.FieldAccessorTable 872 | internalGetFieldAccessorTable() { 873 | return akka.persistence.kafka.journal.EventFormats.internal_static_EventDataFormat_fieldAccessorTable 874 | .ensureFieldAccessorsInitialized( 875 | akka.persistence.kafka.journal.EventFormats.EventDataFormat.class, akka.persistence.kafka.journal.EventFormats.EventDataFormat.Builder.class); 876 | } 877 | 878 | public static com.google.protobuf.Parser PARSER = 879 | new com.google.protobuf.AbstractParser() { 880 | public EventDataFormat parsePartialFrom( 881 | com.google.protobuf.CodedInputStream input, 882 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 883 | throws com.google.protobuf.InvalidProtocolBufferException { 884 | return new EventDataFormat(input, extensionRegistry); 885 | } 886 | }; 887 | 888 | @java.lang.Override 889 | public com.google.protobuf.Parser getParserForType() { 890 | return PARSER; 891 | } 892 | 893 | private int bitField0_; 894 | // optional int32 serializerId = 1; 895 | public static final int SERIALIZERID_FIELD_NUMBER = 1; 896 | private int serializerId_; 897 | /** 898 | * optional int32 serializerId = 1; 899 | */ 900 | public boolean hasSerializerId() { 901 | return ((bitField0_ & 0x00000001) == 0x00000001); 902 | } 903 | /** 904 | * optional int32 serializerId = 1; 905 | */ 906 | public int getSerializerId() { 907 | return serializerId_; 908 | } 909 | 910 | // optional bytes data = 2; 911 | public static final int DATA_FIELD_NUMBER = 2; 912 | private com.google.protobuf.ByteString data_; 913 | /** 914 | * optional bytes data = 2; 915 | */ 916 | public boolean hasData() { 917 | return ((bitField0_ & 0x00000002) == 0x00000002); 918 | } 919 | /** 920 | * optional bytes data = 2; 921 | */ 922 | public com.google.protobuf.ByteString getData() { 923 | return data_; 924 | } 925 | 926 | // optional bytes dataManifest = 3; 927 | public static final int DATAMANIFEST_FIELD_NUMBER = 3; 928 | private com.google.protobuf.ByteString dataManifest_; 929 | /** 930 | * optional bytes dataManifest = 3; 931 | */ 932 | public boolean hasDataManifest() { 933 | return ((bitField0_ & 0x00000004) == 0x00000004); 934 | } 935 | /** 936 | * optional bytes dataManifest = 3; 937 | */ 938 | public com.google.protobuf.ByteString getDataManifest() { 939 | return dataManifest_; 940 | } 941 | 942 | private void initFields() { 943 | serializerId_ = 0; 944 | data_ = com.google.protobuf.ByteString.EMPTY; 945 | dataManifest_ = com.google.protobuf.ByteString.EMPTY; 946 | } 947 | private byte memoizedIsInitialized = -1; 948 | public final boolean isInitialized() { 949 | byte isInitialized = memoizedIsInitialized; 950 | if (isInitialized != -1) return isInitialized == 1; 951 | 952 | memoizedIsInitialized = 1; 953 | return true; 954 | } 955 | 956 | public void writeTo(com.google.protobuf.CodedOutputStream output) 957 | throws java.io.IOException { 958 | getSerializedSize(); 959 | if (((bitField0_ & 0x00000001) == 0x00000001)) { 960 | output.writeInt32(1, serializerId_); 961 | } 962 | if (((bitField0_ & 0x00000002) == 0x00000002)) { 963 | output.writeBytes(2, data_); 964 | } 965 | if (((bitField0_ & 0x00000004) == 0x00000004)) { 966 | output.writeBytes(3, dataManifest_); 967 | } 968 | getUnknownFields().writeTo(output); 969 | } 970 | 971 | private int memoizedSerializedSize = -1; 972 | public int getSerializedSize() { 973 | int size = memoizedSerializedSize; 974 | if (size != -1) return size; 975 | 976 | size = 0; 977 | if (((bitField0_ & 0x00000001) == 0x00000001)) { 978 | size += com.google.protobuf.CodedOutputStream 979 | .computeInt32Size(1, serializerId_); 980 | } 981 | if (((bitField0_ & 0x00000002) == 0x00000002)) { 982 | size += com.google.protobuf.CodedOutputStream 983 | .computeBytesSize(2, data_); 984 | } 985 | if (((bitField0_ & 0x00000004) == 0x00000004)) { 986 | size += com.google.protobuf.CodedOutputStream 987 | .computeBytesSize(3, dataManifest_); 988 | } 989 | size += getUnknownFields().getSerializedSize(); 990 | memoizedSerializedSize = size; 991 | return size; 992 | } 993 | 994 | private static final long serialVersionUID = 0L; 995 | @java.lang.Override 996 | protected java.lang.Object writeReplace() 997 | throws java.io.ObjectStreamException { 998 | return super.writeReplace(); 999 | } 1000 | 1001 | public static akka.persistence.kafka.journal.EventFormats.EventDataFormat parseFrom( 1002 | com.google.protobuf.ByteString data) 1003 | throws com.google.protobuf.InvalidProtocolBufferException { 1004 | return PARSER.parseFrom(data); 1005 | } 1006 | public static akka.persistence.kafka.journal.EventFormats.EventDataFormat parseFrom( 1007 | com.google.protobuf.ByteString data, 1008 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 1009 | throws com.google.protobuf.InvalidProtocolBufferException { 1010 | return PARSER.parseFrom(data, extensionRegistry); 1011 | } 1012 | public static akka.persistence.kafka.journal.EventFormats.EventDataFormat parseFrom(byte[] data) 1013 | throws com.google.protobuf.InvalidProtocolBufferException { 1014 | return PARSER.parseFrom(data); 1015 | } 1016 | public static akka.persistence.kafka.journal.EventFormats.EventDataFormat parseFrom( 1017 | byte[] data, 1018 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 1019 | throws com.google.protobuf.InvalidProtocolBufferException { 1020 | return PARSER.parseFrom(data, extensionRegistry); 1021 | } 1022 | public static akka.persistence.kafka.journal.EventFormats.EventDataFormat parseFrom(java.io.InputStream input) 1023 | throws java.io.IOException { 1024 | return PARSER.parseFrom(input); 1025 | } 1026 | public static akka.persistence.kafka.journal.EventFormats.EventDataFormat parseFrom( 1027 | java.io.InputStream input, 1028 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 1029 | throws java.io.IOException { 1030 | return PARSER.parseFrom(input, extensionRegistry); 1031 | } 1032 | public static akka.persistence.kafka.journal.EventFormats.EventDataFormat parseDelimitedFrom(java.io.InputStream input) 1033 | throws java.io.IOException { 1034 | return PARSER.parseDelimitedFrom(input); 1035 | } 1036 | public static akka.persistence.kafka.journal.EventFormats.EventDataFormat parseDelimitedFrom( 1037 | java.io.InputStream input, 1038 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 1039 | throws java.io.IOException { 1040 | return PARSER.parseDelimitedFrom(input, extensionRegistry); 1041 | } 1042 | public static akka.persistence.kafka.journal.EventFormats.EventDataFormat parseFrom( 1043 | com.google.protobuf.CodedInputStream input) 1044 | throws java.io.IOException { 1045 | return PARSER.parseFrom(input); 1046 | } 1047 | public static akka.persistence.kafka.journal.EventFormats.EventDataFormat parseFrom( 1048 | com.google.protobuf.CodedInputStream input, 1049 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 1050 | throws java.io.IOException { 1051 | return PARSER.parseFrom(input, extensionRegistry); 1052 | } 1053 | 1054 | public static Builder newBuilder() { return Builder.create(); } 1055 | public Builder newBuilderForType() { return newBuilder(); } 1056 | public static Builder newBuilder(akka.persistence.kafka.journal.EventFormats.EventDataFormat prototype) { 1057 | return newBuilder().mergeFrom(prototype); 1058 | } 1059 | public Builder toBuilder() { return newBuilder(this); } 1060 | 1061 | @java.lang.Override 1062 | protected Builder newBuilderForType( 1063 | com.google.protobuf.GeneratedMessage.BuilderParent parent) { 1064 | Builder builder = new Builder(parent); 1065 | return builder; 1066 | } 1067 | /** 1068 | * Protobuf type {@code EventDataFormat} 1069 | */ 1070 | public static final class Builder extends 1071 | com.google.protobuf.GeneratedMessage.Builder 1072 | implements akka.persistence.kafka.journal.EventFormats.EventDataFormatOrBuilder { 1073 | public static final com.google.protobuf.Descriptors.Descriptor 1074 | getDescriptor() { 1075 | return akka.persistence.kafka.journal.EventFormats.internal_static_EventDataFormat_descriptor; 1076 | } 1077 | 1078 | protected com.google.protobuf.GeneratedMessage.FieldAccessorTable 1079 | internalGetFieldAccessorTable() { 1080 | return akka.persistence.kafka.journal.EventFormats.internal_static_EventDataFormat_fieldAccessorTable 1081 | .ensureFieldAccessorsInitialized( 1082 | akka.persistence.kafka.journal.EventFormats.EventDataFormat.class, akka.persistence.kafka.journal.EventFormats.EventDataFormat.Builder.class); 1083 | } 1084 | 1085 | // Construct using akka.persistence.kafka.journal.EventFormats.EventDataFormat.newBuilder() 1086 | private Builder() { 1087 | maybeForceBuilderInitialization(); 1088 | } 1089 | 1090 | private Builder( 1091 | com.google.protobuf.GeneratedMessage.BuilderParent parent) { 1092 | super(parent); 1093 | maybeForceBuilderInitialization(); 1094 | } 1095 | private void maybeForceBuilderInitialization() { 1096 | if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { 1097 | } 1098 | } 1099 | private static Builder create() { 1100 | return new Builder(); 1101 | } 1102 | 1103 | public Builder clear() { 1104 | super.clear(); 1105 | serializerId_ = 0; 1106 | bitField0_ = (bitField0_ & ~0x00000001); 1107 | data_ = com.google.protobuf.ByteString.EMPTY; 1108 | bitField0_ = (bitField0_ & ~0x00000002); 1109 | dataManifest_ = com.google.protobuf.ByteString.EMPTY; 1110 | bitField0_ = (bitField0_ & ~0x00000004); 1111 | return this; 1112 | } 1113 | 1114 | public Builder clone() { 1115 | return create().mergeFrom(buildPartial()); 1116 | } 1117 | 1118 | public com.google.protobuf.Descriptors.Descriptor 1119 | getDescriptorForType() { 1120 | return akka.persistence.kafka.journal.EventFormats.internal_static_EventDataFormat_descriptor; 1121 | } 1122 | 1123 | public akka.persistence.kafka.journal.EventFormats.EventDataFormat getDefaultInstanceForType() { 1124 | return akka.persistence.kafka.journal.EventFormats.EventDataFormat.getDefaultInstance(); 1125 | } 1126 | 1127 | public akka.persistence.kafka.journal.EventFormats.EventDataFormat build() { 1128 | akka.persistence.kafka.journal.EventFormats.EventDataFormat result = buildPartial(); 1129 | if (!result.isInitialized()) { 1130 | throw newUninitializedMessageException(result); 1131 | } 1132 | return result; 1133 | } 1134 | 1135 | public akka.persistence.kafka.journal.EventFormats.EventDataFormat buildPartial() { 1136 | akka.persistence.kafka.journal.EventFormats.EventDataFormat result = new akka.persistence.kafka.journal.EventFormats.EventDataFormat(this); 1137 | int from_bitField0_ = bitField0_; 1138 | int to_bitField0_ = 0; 1139 | if (((from_bitField0_ & 0x00000001) == 0x00000001)) { 1140 | to_bitField0_ |= 0x00000001; 1141 | } 1142 | result.serializerId_ = serializerId_; 1143 | if (((from_bitField0_ & 0x00000002) == 0x00000002)) { 1144 | to_bitField0_ |= 0x00000002; 1145 | } 1146 | result.data_ = data_; 1147 | if (((from_bitField0_ & 0x00000004) == 0x00000004)) { 1148 | to_bitField0_ |= 0x00000004; 1149 | } 1150 | result.dataManifest_ = dataManifest_; 1151 | result.bitField0_ = to_bitField0_; 1152 | onBuilt(); 1153 | return result; 1154 | } 1155 | 1156 | public Builder mergeFrom(com.google.protobuf.Message other) { 1157 | if (other instanceof akka.persistence.kafka.journal.EventFormats.EventDataFormat) { 1158 | return mergeFrom((akka.persistence.kafka.journal.EventFormats.EventDataFormat)other); 1159 | } else { 1160 | super.mergeFrom(other); 1161 | return this; 1162 | } 1163 | } 1164 | 1165 | public Builder mergeFrom(akka.persistence.kafka.journal.EventFormats.EventDataFormat other) { 1166 | if (other == akka.persistence.kafka.journal.EventFormats.EventDataFormat.getDefaultInstance()) return this; 1167 | if (other.hasSerializerId()) { 1168 | setSerializerId(other.getSerializerId()); 1169 | } 1170 | if (other.hasData()) { 1171 | setData(other.getData()); 1172 | } 1173 | if (other.hasDataManifest()) { 1174 | setDataManifest(other.getDataManifest()); 1175 | } 1176 | this.mergeUnknownFields(other.getUnknownFields()); 1177 | return this; 1178 | } 1179 | 1180 | public final boolean isInitialized() { 1181 | return true; 1182 | } 1183 | 1184 | public Builder mergeFrom( 1185 | com.google.protobuf.CodedInputStream input, 1186 | com.google.protobuf.ExtensionRegistryLite extensionRegistry) 1187 | throws java.io.IOException { 1188 | akka.persistence.kafka.journal.EventFormats.EventDataFormat parsedMessage = null; 1189 | try { 1190 | parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); 1191 | } catch (com.google.protobuf.InvalidProtocolBufferException e) { 1192 | parsedMessage = (akka.persistence.kafka.journal.EventFormats.EventDataFormat) e.getUnfinishedMessage(); 1193 | throw e; 1194 | } finally { 1195 | if (parsedMessage != null) { 1196 | mergeFrom(parsedMessage); 1197 | } 1198 | } 1199 | return this; 1200 | } 1201 | private int bitField0_; 1202 | 1203 | // optional int32 serializerId = 1; 1204 | private int serializerId_ ; 1205 | /** 1206 | * optional int32 serializerId = 1; 1207 | */ 1208 | public boolean hasSerializerId() { 1209 | return ((bitField0_ & 0x00000001) == 0x00000001); 1210 | } 1211 | /** 1212 | * optional int32 serializerId = 1; 1213 | */ 1214 | public int getSerializerId() { 1215 | return serializerId_; 1216 | } 1217 | /** 1218 | * optional int32 serializerId = 1; 1219 | */ 1220 | public Builder setSerializerId(int value) { 1221 | bitField0_ |= 0x00000001; 1222 | serializerId_ = value; 1223 | onChanged(); 1224 | return this; 1225 | } 1226 | /** 1227 | * optional int32 serializerId = 1; 1228 | */ 1229 | public Builder clearSerializerId() { 1230 | bitField0_ = (bitField0_ & ~0x00000001); 1231 | serializerId_ = 0; 1232 | onChanged(); 1233 | return this; 1234 | } 1235 | 1236 | // optional bytes data = 2; 1237 | private com.google.protobuf.ByteString data_ = com.google.protobuf.ByteString.EMPTY; 1238 | /** 1239 | * optional bytes data = 2; 1240 | */ 1241 | public boolean hasData() { 1242 | return ((bitField0_ & 0x00000002) == 0x00000002); 1243 | } 1244 | /** 1245 | * optional bytes data = 2; 1246 | */ 1247 | public com.google.protobuf.ByteString getData() { 1248 | return data_; 1249 | } 1250 | /** 1251 | * optional bytes data = 2; 1252 | */ 1253 | public Builder setData(com.google.protobuf.ByteString value) { 1254 | if (value == null) { 1255 | throw new NullPointerException(); 1256 | } 1257 | bitField0_ |= 0x00000002; 1258 | data_ = value; 1259 | onChanged(); 1260 | return this; 1261 | } 1262 | /** 1263 | * optional bytes data = 2; 1264 | */ 1265 | public Builder clearData() { 1266 | bitField0_ = (bitField0_ & ~0x00000002); 1267 | data_ = getDefaultInstance().getData(); 1268 | onChanged(); 1269 | return this; 1270 | } 1271 | 1272 | // optional bytes dataManifest = 3; 1273 | private com.google.protobuf.ByteString dataManifest_ = com.google.protobuf.ByteString.EMPTY; 1274 | /** 1275 | * optional bytes dataManifest = 3; 1276 | */ 1277 | public boolean hasDataManifest() { 1278 | return ((bitField0_ & 0x00000004) == 0x00000004); 1279 | } 1280 | /** 1281 | * optional bytes dataManifest = 3; 1282 | */ 1283 | public com.google.protobuf.ByteString getDataManifest() { 1284 | return dataManifest_; 1285 | } 1286 | /** 1287 | * optional bytes dataManifest = 3; 1288 | */ 1289 | public Builder setDataManifest(com.google.protobuf.ByteString value) { 1290 | if (value == null) { 1291 | throw new NullPointerException(); 1292 | } 1293 | bitField0_ |= 0x00000004; 1294 | dataManifest_ = value; 1295 | onChanged(); 1296 | return this; 1297 | } 1298 | /** 1299 | * optional bytes dataManifest = 3; 1300 | */ 1301 | public Builder clearDataManifest() { 1302 | bitField0_ = (bitField0_ & ~0x00000004); 1303 | dataManifest_ = getDefaultInstance().getDataManifest(); 1304 | onChanged(); 1305 | return this; 1306 | } 1307 | 1308 | // @@protoc_insertion_point(builder_scope:EventDataFormat) 1309 | } 1310 | 1311 | static { 1312 | defaultInstance = new EventDataFormat(true); 1313 | defaultInstance.initFields(); 1314 | } 1315 | 1316 | // @@protoc_insertion_point(class_scope:EventDataFormat) 1317 | } 1318 | 1319 | private static com.google.protobuf.Descriptors.Descriptor 1320 | internal_static_EventFormat_descriptor; 1321 | private static 1322 | com.google.protobuf.GeneratedMessage.FieldAccessorTable 1323 | internal_static_EventFormat_fieldAccessorTable; 1324 | private static com.google.protobuf.Descriptors.Descriptor 1325 | internal_static_EventDataFormat_descriptor; 1326 | private static 1327 | com.google.protobuf.GeneratedMessage.FieldAccessorTable 1328 | internal_static_EventDataFormat_fieldAccessorTable; 1329 | 1330 | public static com.google.protobuf.Descriptors.FileDescriptor 1331 | getDescriptor() { 1332 | return descriptor; 1333 | } 1334 | private static com.google.protobuf.Descriptors.FileDescriptor 1335 | descriptor; 1336 | static { 1337 | java.lang.String[] descriptorData = { 1338 | "\n$src/main/protobuf/EventFormats.proto\"X" + 1339 | "\n\013EventFormat\022\025\n\rpersistenceId\030\001 \001(\t\022\022\n\n" + 1340 | "sequenceNr\030\002 \001(\003\022\036\n\004data\030\003 \001(\0132\020.EventDa" + 1341 | "taFormat\"K\n\017EventDataFormat\022\024\n\014serialize" + 1342 | "rId\030\001 \001(\005\022\014\n\004data\030\002 \001(\014\022\024\n\014dataManifest\030" + 1343 | "\003 \001(\014B\"\n\036akka.persistence.kafka.journalH" + 1344 | "\001" 1345 | }; 1346 | com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = 1347 | new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { 1348 | public com.google.protobuf.ExtensionRegistry assignDescriptors( 1349 | com.google.protobuf.Descriptors.FileDescriptor root) { 1350 | descriptor = root; 1351 | internal_static_EventFormat_descriptor = 1352 | getDescriptor().getMessageTypes().get(0); 1353 | internal_static_EventFormat_fieldAccessorTable = new 1354 | com.google.protobuf.GeneratedMessage.FieldAccessorTable( 1355 | internal_static_EventFormat_descriptor, 1356 | new java.lang.String[] { "PersistenceId", "SequenceNr", "Data", }); 1357 | internal_static_EventDataFormat_descriptor = 1358 | getDescriptor().getMessageTypes().get(1); 1359 | internal_static_EventDataFormat_fieldAccessorTable = new 1360 | com.google.protobuf.GeneratedMessage.FieldAccessorTable( 1361 | internal_static_EventDataFormat_descriptor, 1362 | new java.lang.String[] { "SerializerId", "Data", "DataManifest", }); 1363 | return null; 1364 | } 1365 | }; 1366 | com.google.protobuf.Descriptors.FileDescriptor 1367 | .internalBuildGeneratedFileFrom(descriptorData, 1368 | new com.google.protobuf.Descriptors.FileDescriptor[] { 1369 | }, assigner); 1370 | } 1371 | 1372 | // @@protoc_insertion_point(outer_class_scope) 1373 | } 1374 | --------------------------------------------------------------------------------