├── benchmarks
├── project
│ └── build.properties
├── src
│ ├── main
│ │ ├── scala
│ │ │ └── monix
│ │ │ │ └── kafka
│ │ │ │ └── benchmarks
│ │ │ │ ├── BaseFixture.scala
│ │ │ │ ├── MonixFixture.scala
│ │ │ │ ├── ProducerBenchmark.scala
│ │ │ │ └── ConsumerBenchmark.scala
│ │ └── resources
│ │ │ └── logback.xml
│ └── test
│ │ └── scala
│ │ └── monix
│ │ └── kafka
│ │ └── benchmarks
│ │ ├── ProducerSpec.scala
│ │ └── ConsumerSpec.scala
├── start-kafka-cluster.sh
├── results
│ ├── producer.md
│ └── consumer.md
└── docker-compose.yml
├── kafka-0.10.x
└── src
│ ├── test
│ ├── scala
│ │ └── monix
│ │ │ └── kafka
│ │ │ ├── KafkaTestKit.scala
│ │ │ ├── package.scala
│ │ │ ├── ConfigTest.scala
│ │ │ ├── MergeByCommitCallbackTest.scala
│ │ │ ├── MonixKafkaTopicRegexTest.scala
│ │ │ └── SerializationTest.scala
│ └── resources
│ │ └── logback.xml
│ └── main
│ ├── scala
│ └── monix
│ │ └── kafka
│ │ ├── CommittableMessage.scala
│ │ ├── Commit.scala
│ │ ├── config
│ │ ├── PartitionerName.scala
│ │ ├── ClassName.scala
│ │ ├── ObservableCommitType.scala
│ │ ├── SecurityProtocol.scala
│ │ ├── AutoOffsetReset.scala
│ │ ├── CompressionType.scala
│ │ ├── ObservableSeekOnStart.scala
│ │ ├── ObservableCommitOrder.scala
│ │ ├── Acks.scala
│ │ └── SSLProtocol.scala
│ │ ├── CommittableOffset.scala
│ │ ├── Serializer.scala
│ │ ├── CommittableOffsetBatch.scala
│ │ └── Deserializer.scala
│ └── resources
│ └── monix
│ └── kafka
│ └── default.conf
├── kafka-1.0.x
└── src
│ ├── test
│ ├── scala
│ │ └── monix
│ │ │ └── kafka
│ │ │ ├── KafkaTestKit.scala
│ │ │ ├── package.scala
│ │ │ ├── ConfigTest.scala
│ │ │ ├── MergeByCommitCallbackTest.scala
│ │ │ ├── MonixKafkaTopicRegexTest.scala
│ │ │ └── SerializationTest.scala
│ └── resources
│ │ └── logback-test.xml
│ └── main
│ ├── scala
│ └── monix
│ │ └── kafka
│ │ ├── CommittableMessage.scala
│ │ ├── Commit.scala
│ │ ├── config
│ │ ├── PartitionerName.scala
│ │ ├── ClassName.scala
│ │ ├── ObservableCommitType.scala
│ │ ├── SecurityProtocol.scala
│ │ ├── AutoOffsetReset.scala
│ │ ├── CompressionType.scala
│ │ ├── ObservableSeekOnStart.scala
│ │ ├── ObservableCommitOrder.scala
│ │ ├── Acks.scala
│ │ └── SSLProtocol.scala
│ │ ├── CommittableOffset.scala
│ │ ├── Serializer.scala
│ │ ├── CommittableOffsetBatch.scala
│ │ ├── Deserializer.scala
│ │ └── KafkaConsumerObservableAutoCommit.scala
│ └── resources
│ └── monix
│ └── kafka
│ └── default.conf
├── kafka-0.11.x
└── src
│ ├── test
│ ├── scala
│ │ └── monix
│ │ │ └── kafka
│ │ │ ├── KafkaTestKit.scala
│ │ │ ├── package.scala
│ │ │ ├── ConfigTest.scala
│ │ │ ├── MergeByCommitCallbackTest.scala
│ │ │ ├── MonixKafkaTopicRegexTest.scala
│ │ │ └── SerializationTest.scala
│ └── resources
│ │ └── logback.xml
│ └── main
│ ├── scala
│ └── monix
│ │ └── kafka
│ │ ├── CommittableMessage.scala
│ │ ├── Commit.scala
│ │ ├── config
│ │ ├── PartitionerName.scala
│ │ ├── ClassName.scala
│ │ ├── ObservableCommitType.scala
│ │ ├── SecurityProtocol.scala
│ │ ├── AutoOffsetReset.scala
│ │ ├── CompressionType.scala
│ │ ├── ObservableSeekOnStart.scala
│ │ ├── ObservableCommitOrder.scala
│ │ ├── Acks.scala
│ │ └── SSLProtocol.scala
│ │ ├── CommittableOffset.scala
│ │ ├── Serializer.scala
│ │ ├── Deserializer.scala
│ │ └── CommittableOffsetBatch.scala
│ └── resources
│ └── monix
│ └── kafka
│ └── default.conf
├── .gitignore
├── project
├── plugins.sbt
└── build.properties
├── .github
└── workflows
│ ├── release.yml
│ └── build.yml
├── AUTHORS
├── .scalafmt.conf
├── CONTRIBUTING.md
├── CHANGES.md
└── RELEASES.md
/benchmarks/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version=1.3.3
2 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/test/scala/monix/kafka/KafkaTestKit.scala:
--------------------------------------------------------------------------------
1 | package monix.kafka
2 |
3 | import net.manub.embeddedkafka.EmbeddedKafka
4 | import org.scalatest.Suite
5 |
6 | trait KafkaTestKit extends EmbeddedKafka { self: Suite =>
7 |
8 | sys.addShutdownHook {
9 | EmbeddedKafka.stop()
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/test/scala/monix/kafka/KafkaTestKit.scala:
--------------------------------------------------------------------------------
1 | package monix.kafka
2 |
3 | import net.manub.embeddedkafka.EmbeddedKafka
4 | import org.scalatest.Suite
5 |
6 | trait KafkaTestKit extends EmbeddedKafka { self: Suite =>
7 |
8 | sys.addShutdownHook {
9 | EmbeddedKafka.stop()
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/test/scala/monix/kafka/KafkaTestKit.scala:
--------------------------------------------------------------------------------
1 | package monix.kafka
2 |
3 | import net.manub.embeddedkafka.EmbeddedKafka
4 | import org.scalatest.Suite
5 |
6 | trait KafkaTestKit extends EmbeddedKafka { self: Suite =>
7 |
8 | sys.addShutdownHook {
9 | EmbeddedKafka.stop()
10 | }
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.class
2 | *.log
3 |
4 | # sbt specific
5 | .cache
6 | .history
7 | .lib/
8 | dist/*
9 | target/
10 | lib_managed/
11 | src_managed/
12 | project/boot/
13 | project/plugins/project/
14 |
15 | # Scala-IDE specific
16 | .scala_dependencies
17 | .worksheet
18 | .idea
19 |
20 | .bsp/sbt.json
21 |
22 | .DS_Store
23 |
--------------------------------------------------------------------------------
/benchmarks/src/main/scala/monix/kafka/benchmarks/BaseFixture.scala:
--------------------------------------------------------------------------------
1 | package monix.kafka.benchmarks
2 |
3 | import monix.eval.Coeval
4 |
5 | import scala.util.Random
6 |
7 | trait BaseFixture {
8 | protected val brokerUrl = "127.0.0.1:9092"
9 | protected val randomId: Coeval[String] = Coeval(Random.alphanumeric.filter(_.isLetter).take(20).mkString)
10 | protected val monixTopic = "monix_topic"
11 | }
12 |
--------------------------------------------------------------------------------
/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | addSbtPlugin("com.github.sbt" % "sbt-pgp" % "2.1.2")
2 | addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.6.5")
3 | addSbtPlugin("org.xerial.sbt" % "sbt-sonatype" % "3.9.12")
4 | addSbtPlugin("com.typesafe.sbt" % "sbt-git" % "1.0.2")
5 | addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "1.0.1")
6 | addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.4.6")
7 | addSbtPlugin("pl.project13.scala" % "sbt-jmh" % "0.3.7")
8 | addSbtPlugin("com.github.sbt" % "sbt-ci-release" % "1.5.10")
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Release
2 | on:
3 | push:
4 | tags: ["*"]
5 | jobs:
6 | publish:
7 | runs-on: ubuntu-20.04
8 | steps:
9 | - uses: actions/checkout@v2.3.4
10 | with:
11 | fetch-depth: 0
12 | - uses: olafurpg/setup-scala@v13
13 | - run: sbt ci-release
14 | env:
15 | PGP_PASSPHRASE: ${{ secrets.PGP_PASSPHRASE }}
16 | PGP_SECRET: ${{ secrets.PGP_SECRET }}
17 | SONATYPE_PASSWORD: ${{ secrets.SONATYPE_PASSWORD }}
18 | SONATYPE_USERNAME: ${{ secrets.SONATYPE_USERNAME }}
--------------------------------------------------------------------------------
/benchmarks/src/test/scala/monix/kafka/benchmarks/ProducerSpec.scala:
--------------------------------------------------------------------------------
1 | package monix.kafka.benchmarks
2 |
3 | import monix.kafka.KafkaProducer
4 | import monix.execution.Scheduler.Implicits.global
5 | import org.scalatest.{FlatSpec, Matchers}
6 |
7 | class ProducerSpec extends FlatSpec with MonixFixture with Matchers {
8 |
9 | val producer = KafkaProducer[String, String](producerConf, global)
10 |
11 | s"Monix ${monixTopic}" should "exist befor running Producer Benchmark" in {
12 | val t = producer.send(topic = monixTopic, "test")
13 |
14 | t.runSyncUnsafe().isDefined shouldBe true
15 | }
16 |
17 | }
18 |
--------------------------------------------------------------------------------
/AUTHORS:
--------------------------------------------------------------------------------
1 | Authors that have contributed code, documentation, tests and feedback
2 | to project Monix and that own the copyright of their contributions:
3 |
4 | Alexandru Nedelcu
5 | https://github.com/alexandru
6 |
7 | Alex Gryzlov
8 | https://github.com/clayrat
9 |
10 | Piotr Gawryś
11 | https://github.com/Avasil
12 |
13 | Kacper Gunia
14 | https://github.com/cakper
15 |
16 | Sherwin Chiu
17 | https://github.com/sherwinschiu
18 |
19 | Mikhail Chugunkov
20 | https://github.com/poslegm
21 |
22 | Vasiliy Efimov
23 | https://github.com/voidconductor
24 |
25 | Adriel Velazquez
26 | https://github.com/AdrielVelazquez
27 |
28 | Arun Gopalpuri
29 | https://github.com/arun0009
30 |
31 | Pau Alarcón
32 | https://github.com/paualarco
--------------------------------------------------------------------------------
/benchmarks/start-kafka-cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | function create_topic {
6 | TOPIC_NAME=$1
7 | PARTITIONS=$2
8 | REPLICATION_FACTOR=$3
9 | echo "Creating topic ${TOPIC_NAME} with ${PARTITIONS} partitions and replication factor of ${REPLICATION_FACTOR}."
10 | docker-compose -f ./docker-compose.yml exec -T broker kafka-topics --create --topic ${TOPIC_NAME} --partitions ${PARTITIONS} --replication-factor ${REPLICATION_FACTOR} --if-not-exists --zookeeper zookeeper:2181
11 | }
12 |
13 | echo "Starting Kafka cluster..."
14 | docker-compose -f ./docker-compose.yml up -d zookeeper broker
15 |
16 | echo -e "Docker ps..."
17 | docker ps
18 |
19 | sleep 15
20 |
21 | create_topic monix_topic 2 1
--------------------------------------------------------------------------------
/project/build.properties:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2014-2019 by its authors. Some rights reserved.
3 | # See the project homepage at: https://github.com/monix/monix-kafka
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 |
18 | sbt.version=1.5.8
19 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/test/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %d{yyyyMMdd-HH:mm:ss.SSSZ} [%thread] %-5level %logger - %msg%n
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/benchmarks/src/main/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | %d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/test/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | %d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/benchmarks/results/producer.md:
--------------------------------------------------------------------------------
1 | ## Producer benchmarks
2 |
3 | This section includes benchmarks for single and sink producers.
4 |
5 | ## RC7
6 |
7 | ### 10iterations 1fork 1thread
8 | Benchmark Mode Cnt Score Error Units
9 | ProducerBenchmark.monix_single_producer thrpt 3 0.504 ± 0.668 ops/s
10 | ProducerBenchmark.monix_sink_producer thrpt 3 1.861 ± 5.475 ops/s
11 |
12 | ## RC8
13 |
14 | ### 10iterations 1fork 1thread
15 | ProducerBenchmark.monix_single_producer thrpt 10 0.473 ± 0.070 ops/s
16 | ProducerBenchmark.monix_sink_producer thrpt 10 2.009 ± 0.277 ops/s
17 |
18 | ### 10iterations 1fork 3threads
19 | Benchmark Mode Cnt Score Error Units
20 | ProducerBenchmark.monix_single_producer thrpt 10 0.981 ± 0.202 ops/s
21 | ProducerBenchmark.monix_sink_producer thrpt 10 3.241 ± 0.191 ops/s
22 |
--------------------------------------------------------------------------------
/benchmarks/src/main/scala/monix/kafka/benchmarks/MonixFixture.scala:
--------------------------------------------------------------------------------
1 | package monix.kafka.benchmarks
2 |
3 | import monix.eval.Coeval
4 | import monix.execution.Scheduler
5 | import monix.kafka.config.{AutoOffsetReset => MonixAutoOffsetReset}
6 | import monix.kafka.{KafkaConsumerConfig, KafkaProducerConfig}
7 |
8 | trait MonixFixture extends BaseFixture{
9 |
10 | implicit lazy val io = Scheduler.io("monix-kafka-benchmark")
11 |
12 | val producerConf = KafkaProducerConfig.default.copy(
13 | bootstrapServers = List(brokerUrl),
14 | clientId = monixTopic,
15 | )
16 |
17 | // we set a different group id every time so then the consumer will always
18 | // read from the beginning
19 | val consumerConf = Coeval(KafkaConsumerConfig.default.copy(
20 | bootstrapServers = List(brokerUrl),
21 | clientId = monixTopic,
22 | groupId = randomId.value(),
23 | enableAutoCommit = false,
24 | autoOffsetReset = MonixAutoOffsetReset.Earliest
25 | ))
26 |
27 | }
28 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/test/scala/monix/kafka/package.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2019 by its authors. Some rights reserved.
3 | * See the project homepage at: https://github.com/monix/monix-kafka
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License");
6 | * you may not use this file except in compliance with the License.
7 | * You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | */
17 |
18 | package monix
19 |
20 | import monix.execution.Scheduler
21 |
22 | package object kafka {
23 |
24 | /** I/O scheduler meant for tests. */
25 | lazy val io = Scheduler.io("monix-kafka-tests")
26 | }
27 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/test/scala/monix/kafka/package.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2019 by its authors. Some rights reserved.
3 | * See the project homepage at: https://github.com/monix/monix-kafka
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License");
6 | * you may not use this file except in compliance with the License.
7 | * You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | */
17 |
18 | package monix
19 |
20 | import monix.execution.Scheduler
21 |
22 | package object kafka {
23 |
24 | /** I/O scheduler meant for tests. */
25 | lazy val io = Scheduler.io("monix-kafka-tests")
26 | }
27 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/test/scala/monix/kafka/package.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2019 by its authors. Some rights reserved.
3 | * See the project homepage at: https://github.com/monix/monix-kafka
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License");
6 | * you may not use this file except in compliance with the License.
7 | * You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | */
17 |
18 | package monix
19 |
20 | import monix.execution.Scheduler
21 |
22 | package object kafka {
23 |
24 | /** I/O scheduler meant for tests. */
25 | lazy val io = Scheduler.io("monix-kafka-tests")
26 | }
27 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/main/scala/monix/kafka/CommittableMessage.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import org.apache.kafka.clients.consumer.ConsumerRecord
20 |
21 | /** Represents data consumed from Kafka and [[CommittableOffset]] built from it
22 | */
23 | final case class CommittableMessage[K, V](record: ConsumerRecord[K, V], committableOffset: CommittableOffset)
24 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/test/resources/logback-test.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %d{yyyyMMdd-HH:mm:ss.SSSZ} [%thread] %-5level %logger - %msg%n
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/main/scala/monix/kafka/CommittableMessage.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import org.apache.kafka.clients.consumer.ConsumerRecord
20 |
21 | /** Represents data consumed from Kafka and [[CommittableOffset]] built from it
22 | */
23 | final case class CommittableMessage[K, V](record: ConsumerRecord[K, V], committableOffset: CommittableOffset)
24 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/main/scala/monix/kafka/CommittableMessage.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import org.apache.kafka.clients.consumer.ConsumerRecord
20 |
21 | /** Represents data consumed from Kafka and [[CommittableOffset]] built from it
22 | */
23 | final case class CommittableMessage[K, V](record: ConsumerRecord[K, V], committableOffset: CommittableOffset)
24 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/test/scala/monix/kafka/ConfigTest.scala:
--------------------------------------------------------------------------------
1 | package monix.kafka
2 |
3 | import org.scalatest.FunSuite
4 |
5 | class ConfigTest extends FunSuite {
6 | test("overwrite properties with values from producer config") {
7 | val config =
8 | KafkaProducerConfig.default
9 | .copy(bootstrapServers = List("localhost:9092"), properties = Map("bootstrap.servers" -> "127.0.0.1:9092"))
10 |
11 | assert(
12 | config.toProperties.getProperty("bootstrap.servers") == "localhost:9092"
13 | )
14 | }
15 |
16 | test("overwrite properties with values from consumer config") {
17 | val config =
18 | KafkaConsumerConfig.default
19 | .copy(bootstrapServers = List("localhost:9092"), properties = Map("bootstrap.servers" -> "127.0.0.1:9092"))
20 |
21 | assert(
22 | config.toProperties.getProperty("bootstrap.servers") == "localhost:9092"
23 | )
24 | }
25 |
26 | test("convert to Java map from producer config and filter null values") {
27 | val config = KafkaProducerConfig.default.toJavaMap
28 |
29 | assert(!config.containsValue(null))
30 | }
31 |
32 | test("convert to Java map from consumer config and filter null values") {
33 | val config = KafkaConsumerConfig.default.toJavaMap
34 |
35 | assert(!config.containsValue(null))
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/test/scala/monix/kafka/ConfigTest.scala:
--------------------------------------------------------------------------------
1 | package monix.kafka
2 |
3 | import org.scalatest.FunSuite
4 |
5 | class ConfigTest extends FunSuite {
6 | test("overwrite properties with values from producer config") {
7 | val config =
8 | KafkaProducerConfig.default
9 | .copy(bootstrapServers = List("localhost:9092"), properties = Map("bootstrap.servers" -> "127.0.0.1:9092"))
10 |
11 | assert(
12 | config.toProperties.getProperty("bootstrap.servers") == "localhost:9092"
13 | )
14 | }
15 |
16 | test("overwrite properties with values from consumer config") {
17 | val config =
18 | KafkaConsumerConfig.default
19 | .copy(bootstrapServers = List("localhost:9092"), properties = Map("bootstrap.servers" -> "127.0.0.1:9092"))
20 |
21 | assert(
22 | config.toProperties.getProperty("bootstrap.servers") == "localhost:9092"
23 | )
24 | }
25 |
26 | test("convert to Java map from producer config and filter null values") {
27 | val config = KafkaProducerConfig.default.toJavaMap
28 |
29 | assert(!config.containsValue(null))
30 | }
31 |
32 | test("convert to Java map from consumer config and filter null values") {
33 | val config = KafkaConsumerConfig.default.toJavaMap
34 |
35 | assert(!config.containsValue(null))
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/test/scala/monix/kafka/ConfigTest.scala:
--------------------------------------------------------------------------------
1 | package monix.kafka
2 |
3 | import org.scalatest.FunSuite
4 |
5 | class ConfigTest extends FunSuite {
6 | test("overwrite properties with values from producer config") {
7 | val config =
8 | KafkaProducerConfig.default
9 | .copy(bootstrapServers = List("localhost:9092"), properties = Map("bootstrap.servers" -> "127.0.0.1:9092"))
10 |
11 | assert(
12 | config.toProperties.getProperty("bootstrap.servers") == "localhost:9092"
13 | )
14 | }
15 |
16 | test("overwrite properties with values from consumer config") {
17 | val config =
18 | KafkaConsumerConfig.default
19 | .copy(bootstrapServers = List("localhost:9092"), properties = Map("bootstrap.servers" -> "127.0.0.1:9092"))
20 |
21 | assert(
22 | config.toProperties.getProperty("bootstrap.servers") == "localhost:9092"
23 | )
24 | }
25 |
26 | test("convert to Java map from producer config and filter null values") {
27 | val config = KafkaProducerConfig.default.toJavaMap
28 |
29 | assert(!config.containsValue(null))
30 | }
31 |
32 | test("convert to Java map from consumer config and filter null values") {
33 | val config = KafkaConsumerConfig.default.toJavaMap
34 |
35 | assert(!config.containsValue(null))
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/main/scala/monix/kafka/Commit.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import monix.eval.Task
20 | import org.apache.kafka.common.TopicPartition
21 |
22 | /** Callback for batched commit realized as closure in [[KafkaConsumerObservable]] context.
23 | */
24 | trait Commit {
25 | def commitBatchSync(batch: Map[TopicPartition, Long]): Task[Unit]
26 | def commitBatchAsync(batch: Map[TopicPartition, Long]): Task[Unit]
27 | }
28 |
29 | private[kafka] object Commit {
30 |
31 | val empty: Commit = new Commit {
32 | override def commitBatchSync(batch: Map[TopicPartition, Long]): Task[Unit] = Task.unit
33 | override def commitBatchAsync(batch: Map[TopicPartition, Long]): Task[Unit] = Task.unit
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/main/scala/monix/kafka/Commit.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import monix.eval.Task
20 | import org.apache.kafka.common.TopicPartition
21 |
22 | /** Callback for batched commit realized as closure in [[KafkaConsumerObservable]] context.
23 | */
24 | trait Commit {
25 | def commitBatchSync(batch: Map[TopicPartition, Long]): Task[Unit]
26 | def commitBatchAsync(batch: Map[TopicPartition, Long]): Task[Unit]
27 | }
28 |
29 | private[kafka] object Commit {
30 |
31 | val empty: Commit = new Commit {
32 | override def commitBatchSync(batch: Map[TopicPartition, Long]): Task[Unit] = Task.unit
33 | override def commitBatchAsync(batch: Map[TopicPartition, Long]): Task[Unit] = Task.unit
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/main/scala/monix/kafka/Commit.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import monix.eval.Task
20 | import org.apache.kafka.common.TopicPartition
21 |
22 | /** Callback for batched commit realized as closure in [[KafkaConsumerObservable]] context.
23 | */
24 | trait Commit {
25 | def commitBatchSync(batch: Map[TopicPartition, Long]): Task[Unit]
26 | def commitBatchAsync(batch: Map[TopicPartition, Long]): Task[Unit]
27 | }
28 |
29 | private[kafka] object Commit {
30 |
31 | val empty: Commit = new Commit {
32 | override def commitBatchSync(batch: Map[TopicPartition, Long]): Task[Unit] = Task.unit
33 | override def commitBatchAsync(batch: Map[TopicPartition, Long]): Task[Unit] = Task.unit
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/benchmarks/docker-compose.yml:
--------------------------------------------------------------------------------
1 | ---
2 | version: '2'
3 | services:
4 |
5 | zookeeper:
6 | image: confluentinc/cp-zookeeper:4.1.1
7 | hostname: zookeeper
8 | container_name: zookeeper
9 | ports:
10 | - "2181:2181"
11 | environment:
12 | ZOOKEEPER_CLIENT_PORT: 2181
13 | ZOOKEEPER_TICK_TIME: 2000
14 |
15 | broker:
16 | image: confluentinc/cp-server:5.5.0
17 | hostname: broker
18 | container_name: broker
19 | depends_on:
20 | - zookeeper
21 | ports:
22 | - "9092:9092"
23 | environment:
24 | KAFKA_BROKER_ID: 1
25 | KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
26 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
27 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
28 | KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
29 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
30 | KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
31 | KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
32 | KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
33 | KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
34 | CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: broker:29092
35 | CONFLUENT_METRICS_REPORTER_ZOOKEEPER_CONNECT: zookeeper:2181
36 | CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
37 | CONFLUENT_METRICS_ENABLE: 'true'
38 | CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous'
39 |
40 |
--------------------------------------------------------------------------------
/benchmarks/src/main/scala/monix/kafka/benchmarks/ProducerBenchmark.scala:
--------------------------------------------------------------------------------
1 | package monix.kafka.benchmarks
2 |
3 | import org.openjdk.jmh.annotations.{BenchmarkMode, Fork, Measurement, Mode, OutputTimeUnit, Scope, State, Threads, Warmup}
4 |
5 | import java.util.concurrent.TimeUnit
6 | import monix.eval.Task
7 | import monix.kafka.{KafkaProducer, KafkaProducerSink}
8 | import monix.reactive.Observable
9 | import org.apache.kafka.clients.producer.ProducerRecord
10 | import org.openjdk.jmh.annotations._
11 |
12 | import scala.concurrent.duration._
13 | import scala.concurrent.Await
14 |
15 | @State(Scope.Thread)
16 | @BenchmarkMode(Array(Mode.Throughput))
17 | @OutputTimeUnit(TimeUnit.SECONDS)
18 | @Measurement(iterations = 3)
19 | @Warmup(iterations = 1)
20 | @Fork(1)
21 | @Threads(1)
22 | class ProducerBenchmark extends MonixFixture {
23 |
24 | var size: Int = 1000
25 | val producer = KafkaProducer[Integer, Integer](producerConf, io)
26 |
27 | @Benchmark
28 | def monix_single_producer(): Unit = {
29 | val f = Task.traverse((0 until size).toList)(i => producer.send(topic = monixTopic, i)).runToFuture(io)
30 | Await.ready(f, Duration.Inf)
31 | }
32 |
33 | @Benchmark
34 | def monix_sink_producer(): Unit = {
35 | val f = Observable
36 | .from(1 to size)
37 | .map(i => new ProducerRecord[Integer, Integer](monixTopic, i))
38 | .bufferTumbling(50)
39 | .consumeWith(KafkaProducerSink(producerConf.copy(monixSinkParallelism = 10), io))
40 | .runToFuture(io)
41 | Await.ready(f, Duration.Inf)
42 | }
43 |
44 | }
45 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/main/scala/monix/kafka/config/PartitionerName.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import org.apache.kafka.clients.producer.Partitioner
20 | import scala.reflect.ClassTag
21 |
22 | final case class PartitionerName(className: String) extends ClassName[Partitioner] {
23 |
24 | /** Creates a new instance of the referenced `Serializer`. */
25 | def createInstance(): Partitioner =
26 | classType.getDeclaredConstructor().newInstance()
27 | }
28 |
29 | object PartitionerName {
30 |
31 | /** Builds a [[PartitionerName]], given a class. */
32 | def apply[C <: Partitioner](implicit C: ClassTag[C]): PartitionerName =
33 | PartitionerName(C.runtimeClass.getCanonicalName)
34 |
35 | /** Returns the default `Partitioner` instance. */
36 | val default: PartitionerName =
37 | PartitionerName("org.apache.kafka.clients.producer.internals.DefaultPartitioner")
38 | }
39 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/main/scala/monix/kafka/config/PartitionerName.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import org.apache.kafka.clients.producer.Partitioner
20 | import scala.reflect.ClassTag
21 |
22 | final case class PartitionerName(className: String) extends ClassName[Partitioner] {
23 |
24 | /** Creates a new instance of the referenced `Serializer`. */
25 | def createInstance(): Partitioner =
26 | classType.getDeclaredConstructor().newInstance()
27 | }
28 |
29 | object PartitionerName {
30 |
31 | /** Builds a [[PartitionerName]], given a class. */
32 | def apply[C <: Partitioner](implicit C: ClassTag[C]): PartitionerName =
33 | PartitionerName(C.runtimeClass.getCanonicalName)
34 |
35 | /** Returns the default `Partitioner` instance. */
36 | val default: PartitionerName =
37 | PartitionerName("org.apache.kafka.clients.producer.internals.DefaultPartitioner")
38 | }
39 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/main/scala/monix/kafka/config/PartitionerName.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import org.apache.kafka.clients.producer.Partitioner
20 | import scala.reflect.ClassTag
21 |
22 | final case class PartitionerName(className: String) extends ClassName[Partitioner] {
23 |
24 | /** Creates a new instance of the referenced `Serializer`. */
25 | def createInstance(): Partitioner =
26 | classType.getDeclaredConstructor().newInstance()
27 | }
28 |
29 | object PartitionerName {
30 |
31 | /** Builds a [[PartitionerName]], given a class. */
32 | def apply[C <: Partitioner](implicit C: ClassTag[C]): PartitionerName =
33 | PartitionerName(C.runtimeClass.getCanonicalName)
34 |
35 | /** Returns the default `Partitioner` instance. */
36 | val default: PartitionerName =
37 | PartitionerName("org.apache.kafka.clients.producer.internals.DefaultPartitioner")
38 | }
39 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/main/scala/monix/kafka/config/ClassName.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import scala.reflect.ClassTag
20 |
21 | abstract class ClassName[T](implicit T: ClassTag[T]) extends Serializable {
22 | def className: String
23 |
24 | val classType: Class[_ <: T] =
25 | Class.forName(className).asInstanceOf[Class[_ <: T]]
26 |
27 | require(
28 | findClass(classType :: Nil, T.runtimeClass),
29 | s"Given type $className does not implement ${T.runtimeClass}"
30 | )
31 |
32 | private def findClass(stack: List[Class[_]], searched: Class[_]): Boolean =
33 | stack match {
34 | case Nil => false
35 | case x :: xs =>
36 | if (x == searched) true
37 | else {
38 | val superClass: List[Class[_]] = Option(x.getSuperclass).toList
39 | val rest = superClass ::: x.getInterfaces.toList ::: xs
40 | findClass(rest, searched)
41 | }
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/main/scala/monix/kafka/config/ClassName.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import scala.reflect.ClassTag
20 |
21 | abstract class ClassName[T](implicit T: ClassTag[T]) extends Serializable {
22 | def className: String
23 |
24 | val classType: Class[_ <: T] =
25 | Class.forName(className).asInstanceOf[Class[_ <: T]]
26 |
27 | require(
28 | findClass(classType :: Nil, T.runtimeClass),
29 | s"Given type $className does not implement ${T.runtimeClass}"
30 | )
31 |
32 | private def findClass(stack: List[Class[_]], searched: Class[_]): Boolean =
33 | stack match {
34 | case Nil => false
35 | case x :: xs =>
36 | if (x == searched) true
37 | else {
38 | val superClass: List[Class[_]] = Option(x.getSuperclass).toList
39 | val rest = superClass ::: x.getInterfaces.toList ::: xs
40 | findClass(rest, searched)
41 | }
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/main/scala/monix/kafka/config/ClassName.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import scala.reflect.ClassTag
20 |
21 | abstract class ClassName[T](implicit T: ClassTag[T]) extends Serializable {
22 | def className: String
23 |
24 | val classType: Class[_ <: T] =
25 | Class.forName(className).asInstanceOf[Class[_ <: T]]
26 |
27 | require(
28 | findClass(classType :: Nil, T.runtimeClass),
29 | s"Given type $className does not implement ${T.runtimeClass}"
30 | )
31 |
32 | private def findClass(stack: List[Class[_]], searched: Class[_]): Boolean =
33 | stack match {
34 | case Nil => false
35 | case x :: xs =>
36 | if (x == searched) true
37 | else {
38 | val superClass: List[Class[_]] = Option(x.getSuperclass).toList
39 | val rest = superClass ::: x.getInterfaces.toList ::: xs
40 | findClass(rest, searched)
41 | }
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/benchmarks/src/test/scala/monix/kafka/benchmarks/ConsumerSpec.scala:
--------------------------------------------------------------------------------
1 | package monix.kafka.benchmarks
2 |
3 | import monix.execution.Scheduler.Implicits.global
4 | import monix.kafka.{KafkaConsumerObservable, KafkaProducer, KafkaProducerSink}
5 | import monix.reactive.Observable
6 | import org.apache.kafka.clients.producer.ProducerRecord
7 | import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
8 |
9 | import scala.concurrent.Await
10 | import scala.concurrent.duration._
11 |
12 | class ConsumerSpec extends FlatSpec with MonixFixture with Matchers with BeforeAndAfterAll {
13 |
14 |
15 | override def beforeAll(): Unit = {
16 | super.beforeAll()
17 | Observable
18 | .from(1 to 1000)
19 | .map(i => new ProducerRecord[Integer, Integer](monixTopic, i))
20 | .bufferTumbling(100)
21 | .consumeWith(KafkaProducerSink(producerConf.copy(monixSinkParallelism = 10), io))
22 | .runSyncUnsafe()
23 | }
24 |
25 | s"Monix ${monixTopic}" should "exist" in {
26 | val producer = KafkaProducer[String, String](producerConf, global)
27 | val t = producer.send(topic = monixTopic, "test")
28 |
29 | t.runSyncUnsafe().isDefined shouldBe true
30 | }
31 |
32 | it should "consume with autocommit" in {
33 | val conf = consumerConf.value()
34 |
35 | val f2 = KafkaConsumerObservable[Integer, Integer](conf, List(monixTopic)).take(1000).toListL.runToFuture(io)
36 |
37 | val elements = Await.result(f2, 10.seconds)
38 | elements.size shouldBe 1000
39 | }
40 |
41 | it should "consume with manual commit" in {
42 | val conf = consumerConf.value()
43 |
44 | val f2 = KafkaConsumerObservable.manualCommit[Integer, Integer](conf, List(monixTopic))
45 | .take(1000).toListL.runToFuture(io)
46 |
47 | val elements = Await.result(f2, 10.seconds)
48 | elements.size shouldBe 1000
49 | }
50 |
51 | }
52 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/main/scala/monix/kafka/config/ObservableCommitType.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** Specifies the consumer commit type, to use by the
22 | * [[monix.kafka.KafkaConsumerObservable KafkaConsumerObservable]]
23 | * in case `kafka.enable.auto.commit` is set to `false`.
24 | *
25 | * Available options:
26 | *
27 | * - [[ObservableCommitType.Sync]]
28 | * - [[ObservableCommitType.Async]]
29 | */
30 | sealed trait ObservableCommitType extends Serializable {
31 | def id: String
32 | }
33 |
34 | object ObservableCommitType {
35 |
36 | @throws(classOf[BadValue])
37 | def apply(id: String): ObservableCommitType =
38 | id match {
39 | case Sync.id => Sync
40 | case Async.id => Async
41 | case _ =>
42 | throw new BadValue("kafka.monix.observable.commit.type", s"Invalid value: $id")
43 | }
44 |
45 | /** Uses `consumer.commitSync()` after each batch
46 | * if `enable.auto.commit` is `false`.
47 | */
48 | case object Sync extends ObservableCommitType {
49 | val id = "sync"
50 | }
51 |
52 | /** Uses `consumer.commitAsync()` after each batch
53 | * if `enable.auto.commit` is `false`.
54 | */
55 | case object Async extends ObservableCommitType {
56 | val id = "async"
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/main/scala/monix/kafka/config/ObservableCommitType.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** Specifies the consumer commit type, to use by the
22 | * [[monix.kafka.KafkaConsumerObservable KafkaConsumerObservable]]
23 | * in case `kafka.enable.auto.commit` is set to `false`.
24 | *
25 | * Available options:
26 | *
27 | * - [[ObservableCommitType.Sync]]
28 | * - [[ObservableCommitType.Async]]
29 | */
30 | sealed trait ObservableCommitType extends Serializable {
31 | def id: String
32 | }
33 |
34 | object ObservableCommitType {
35 |
36 | @throws(classOf[BadValue])
37 | def apply(id: String): ObservableCommitType =
38 | id match {
39 | case Sync.id => Sync
40 | case Async.id => Async
41 | case _ =>
42 | throw new BadValue("kafka.monix.observable.commit.type", s"Invalid value: $id")
43 | }
44 |
45 | /** Uses `consumer.commitSync()` after each batch
46 | * if `enable.auto.commit` is `false`.
47 | */
48 | case object Sync extends ObservableCommitType {
49 | val id = "sync"
50 | }
51 |
52 | /** Uses `consumer.commitAsync()` after each batch
53 | * if `enable.auto.commit` is `false`.
54 | */
55 | case object Async extends ObservableCommitType {
56 | val id = "async"
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/main/scala/monix/kafka/config/ObservableCommitType.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** Specifies the consumer commit type, to use by the
22 | * [[monix.kafka.KafkaConsumerObservable KafkaConsumerObservable]]
23 | * in case `kafka.enable.auto.commit` is set to `false`.
24 | *
25 | * Available options:
26 | *
27 | * - [[ObservableCommitType.Sync]]
28 | * - [[ObservableCommitType.Async]]
29 | */
30 | sealed trait ObservableCommitType extends Serializable {
31 | def id: String
32 | }
33 |
34 | object ObservableCommitType {
35 |
36 | @throws(classOf[BadValue])
37 | def apply(id: String): ObservableCommitType =
38 | id match {
39 | case Sync.id => Sync
40 | case Async.id => Async
41 | case _ =>
42 | throw new BadValue("kafka.monix.observable.commit.type", s"Invalid value: $id")
43 | }
44 |
45 | /** Uses `consumer.commitSync()` after each batch
46 | * if `enable.auto.commit` is `false`.
47 | */
48 | case object Sync extends ObservableCommitType {
49 | val id = "sync"
50 | }
51 |
52 | /** Uses `consumer.commitAsync()` after each batch
53 | * if `enable.auto.commit` is `false`.
54 | */
55 | case object Async extends ObservableCommitType {
56 | val id = "async"
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/main/scala/monix/kafka/config/SecurityProtocol.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** The `security.protocol` setting for the Kafka Producer.
22 | *
23 | * Represents the protocol used to communicate with brokers.
24 | *
25 | * Valid values are:
26 | *
27 | * - [[SecurityProtocol.PLAINTEXT]]
28 | * - [[SecurityProtocol.SSL]]
29 | * - [[SecurityProtocol.SASL_PLAINTEXT]]
30 | * - [[SecurityProtocol.SASL_SSL]]
31 | */
32 | sealed trait SecurityProtocol extends Serializable {
33 | def id: String
34 | }
35 |
36 | object SecurityProtocol {
37 |
38 | @throws(classOf[BadValue])
39 | def apply(id: String): SecurityProtocol =
40 | id match {
41 | case PLAINTEXT.id => PLAINTEXT
42 | case SSL.id => SSL
43 | case SASL_PLAINTEXT.id => SASL_PLAINTEXT
44 | case SASL_SSL.id => SASL_SSL
45 | case _ =>
46 | throw new BadValue("kafka.security.protocol", s"Invalid value: $id")
47 | }
48 |
49 | case object PLAINTEXT extends SecurityProtocol {
50 | val id = "PLAINTEXT"
51 | }
52 |
53 | case object SSL extends SecurityProtocol {
54 | val id = "SSL"
55 | }
56 |
57 | case object SASL_PLAINTEXT extends SecurityProtocol {
58 | val id = "SASL_PLAINTEXT"
59 | }
60 |
61 | case object SASL_SSL extends SecurityProtocol {
62 | val id = "SASL_SSL"
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/main/scala/monix/kafka/config/SecurityProtocol.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** The `security.protocol` setting for the Kafka Producer.
22 | *
23 | * Represents the protocol used to communicate with brokers.
24 | *
25 | * Valid values are:
26 | *
27 | * - [[SecurityProtocol.PLAINTEXT]]
28 | * - [[SecurityProtocol.SSL]]
29 | * - [[SecurityProtocol.SASL_PLAINTEXT]]
30 | * - [[SecurityProtocol.SASL_SSL]]
31 | */
32 | sealed trait SecurityProtocol extends Serializable {
33 | def id: String
34 | }
35 |
36 | object SecurityProtocol {
37 |
38 | @throws(classOf[BadValue])
39 | def apply(id: String): SecurityProtocol =
40 | id match {
41 | case PLAINTEXT.id => PLAINTEXT
42 | case SSL.id => SSL
43 | case SASL_PLAINTEXT.id => SASL_PLAINTEXT
44 | case SASL_SSL.id => SASL_SSL
45 | case _ =>
46 | throw new BadValue("kafka.security.protocol", s"Invalid value: $id")
47 | }
48 |
49 | case object PLAINTEXT extends SecurityProtocol {
50 | val id = "PLAINTEXT"
51 | }
52 |
53 | case object SSL extends SecurityProtocol {
54 | val id = "SSL"
55 | }
56 |
57 | case object SASL_PLAINTEXT extends SecurityProtocol {
58 | val id = "SASL_PLAINTEXT"
59 | }
60 |
61 | case object SASL_SSL extends SecurityProtocol {
62 | val id = "SASL_SSL"
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/main/scala/monix/kafka/config/SecurityProtocol.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** The `security.protocol` setting for the Kafka Producer.
22 | *
23 | * Represents the protocol used to communicate with brokers.
24 | *
25 | * Valid values are:
26 | *
27 | * - [[SecurityProtocol.PLAINTEXT]]
28 | * - [[SecurityProtocol.SSL]]
29 | * - [[SecurityProtocol.SASL_PLAINTEXT]]
30 | * - [[SecurityProtocol.SASL_SSL]]
31 | */
32 | sealed trait SecurityProtocol extends Serializable {
33 | def id: String
34 | }
35 |
36 | object SecurityProtocol {
37 |
38 | @throws(classOf[BadValue])
39 | def apply(id: String): SecurityProtocol =
40 | id match {
41 | case PLAINTEXT.id => PLAINTEXT
42 | case SSL.id => SSL
43 | case SASL_PLAINTEXT.id => SASL_PLAINTEXT
44 | case SASL_SSL.id => SASL_SSL
45 | case _ =>
46 | throw new BadValue("kafka.security.protocol", s"Invalid value: $id")
47 | }
48 |
49 | case object PLAINTEXT extends SecurityProtocol {
50 | val id = "PLAINTEXT"
51 | }
52 |
53 | case object SSL extends SecurityProtocol {
54 | val id = "SSL"
55 | }
56 |
57 | case object SASL_PLAINTEXT extends SecurityProtocol {
58 | val id = "SASL_PLAINTEXT"
59 | }
60 |
61 | case object SASL_SSL extends SecurityProtocol {
62 | val id = "SASL_SSL"
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/main/scala/monix/kafka/config/AutoOffsetReset.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** What to do when there is no initial offset in Kafka or if the
22 | * current offset does not exist any more on the server
23 | * (e.g. because that data has been deleted).
24 | *
25 | * Available choices:
26 | *
27 | * - [[AutoOffsetReset.Earliest]]
28 | * - [[AutoOffsetReset.Latest]]
29 | * - [[AutoOffsetReset.Throw]]
30 | */
31 | sealed trait AutoOffsetReset extends Serializable {
32 | def id: String
33 | }
34 |
35 | object AutoOffsetReset {
36 |
37 | @throws(classOf[BadValue])
38 | def apply(id: String): AutoOffsetReset =
39 | id.trim.toLowerCase match {
40 | case Earliest.id => Earliest
41 | case Latest.id => Latest
42 | case Throw.id => Throw
43 | case _ =>
44 | throw new BadValue("kafka.auto.offset.reset", s"Invalid value: $id")
45 | }
46 |
47 | /** Automatically reset the offset to the earliest offset. */
48 | case object Earliest extends AutoOffsetReset {
49 | val id = "earliest"
50 | }
51 |
52 | /** Automatically reset the offset to the latest offset. */
53 | case object Latest extends AutoOffsetReset {
54 | val id = "latest"
55 | }
56 |
57 | /** Throw exception to the consumer if no previous offset
58 | * is found for the consumer's group.
59 | */
60 | case object Throw extends AutoOffsetReset {
61 | val id = "none"
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/main/scala/monix/kafka/config/AutoOffsetReset.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** What to do when there is no initial offset in Kafka or if the
22 | * current offset does not exist any more on the server
23 | * (e.g. because that data has been deleted).
24 | *
25 | * Available choices:
26 | *
27 | * - [[AutoOffsetReset.Earliest]]
28 | * - [[AutoOffsetReset.Latest]]
29 | * - [[AutoOffsetReset.Throw]]
30 | */
31 | sealed trait AutoOffsetReset extends Serializable {
32 | def id: String
33 | }
34 |
35 | object AutoOffsetReset {
36 |
37 | @throws(classOf[BadValue])
38 | def apply(id: String): AutoOffsetReset =
39 | id.trim.toLowerCase match {
40 | case Earliest.id => Earliest
41 | case Latest.id => Latest
42 | case Throw.id => Throw
43 | case _ =>
44 | throw new BadValue("kafka.auto.offset.reset", s"Invalid value: $id")
45 | }
46 |
47 | /** Automatically reset the offset to the earliest offset. */
48 | case object Earliest extends AutoOffsetReset {
49 | val id = "earliest"
50 | }
51 |
52 | /** Automatically reset the offset to the latest offset. */
53 | case object Latest extends AutoOffsetReset {
54 | val id = "latest"
55 | }
56 |
57 | /** Throw exception to the consumer if no previous offset
58 | * is found for the consumer's group.
59 | */
60 | case object Throw extends AutoOffsetReset {
61 | val id = "none"
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/main/scala/monix/kafka/config/AutoOffsetReset.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** What to do when there is no initial offset in Kafka or if the
22 | * current offset does not exist any more on the server
23 | * (e.g. because that data has been deleted).
24 | *
25 | * Available choices:
26 | *
27 | * - [[AutoOffsetReset.Earliest]]
28 | * - [[AutoOffsetReset.Latest]]
29 | * - [[AutoOffsetReset.Throw]]
30 | */
31 | sealed trait AutoOffsetReset extends Serializable {
32 | def id: String
33 | }
34 |
35 | object AutoOffsetReset {
36 |
37 | @throws(classOf[BadValue])
38 | def apply(id: String): AutoOffsetReset =
39 | id.trim.toLowerCase match {
40 | case Earliest.id => Earliest
41 | case Latest.id => Latest
42 | case Throw.id => Throw
43 | case _ =>
44 | throw new BadValue("kafka.auto.offset.reset", s"Invalid value: $id")
45 | }
46 |
47 | /** Automatically reset the offset to the earliest offset. */
48 | case object Earliest extends AutoOffsetReset {
49 | val id = "earliest"
50 | }
51 |
52 | /** Automatically reset the offset to the latest offset. */
53 | case object Latest extends AutoOffsetReset {
54 | val id = "latest"
55 | }
56 |
57 | /** Throw exception to the consumer if no previous offset
58 | * is found for the consumer's group.
59 | */
60 | case object Throw extends AutoOffsetReset {
61 | val id = "none"
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/.scalafmt.conf:
--------------------------------------------------------------------------------
1 | version = "3.4.3"
2 | #
3 | # See http://scalameta.org/scalafmt/#Configuration for details
4 | #
5 | project {
6 | includeFilters = [
7 | ".*.\\.scala$"
8 | ".*\\..sbt$"
9 | ]
10 | }
11 |
12 | maxColumn = 120
13 |
14 | # Vertical alignment, options: none, some, more
15 | #
16 | # This awkward, self-contradictory, configuration ensures that only
17 | # the common sbt tokens get aligned, and not "production" code.
18 | #
19 | align = none
20 | align {
21 | openParenCallSite = false
22 | openParenDefnSite = false
23 | tokens = ["%", ":=", "~="]
24 | }
25 |
26 | # If true, the margin character | is aligned with the opening triple quote string literals
27 | assumeStandardLibraryStripMargin = true
28 |
29 | #From scalafmt website:
30 | #see: http://scalameta.org/scalafmt/#includeCurlyBraceInSelectChains
31 | includeCurlyBraceInSelectChains = false
32 |
33 | continuationIndent {
34 | callSite = 2
35 | defnSite = 2
36 | extendSite = 4
37 | }
38 |
39 | danglingParentheses = false
40 |
41 |
42 | newlines {
43 | alwaysBeforeTopLevelStatements = true
44 | sometimesBeforeColonInMethodReturnType = true
45 | penalizeSingleSelectMultiArgList = false
46 | alwaysBeforeElseAfterCurlyIf = false
47 | neverInResultType = false
48 | }
49 |
50 | spaces {
51 | afterKeywordBeforeParen = true
52 | }
53 |
54 | binPack {
55 | parentConstructors = true
56 | literalArgumentLists = true
57 | }
58 |
59 | optIn {
60 | breaksInsideChains = false
61 | breakChainOnFirstMethodDot = true
62 | configStyleArguments = true
63 | }
64 |
65 | runner {
66 | optimizer {
67 | # Set to -1 to disable. Number of characters needed to trigger "config-style" formatting
68 | # see: http://scalameta.org/scalafmt/#runner.optimizer.forceConfigStyleOnOffset
69 | forceConfigStyleOnOffset = 150
70 |
71 | # minimum number of func arguments before config-style (look at top of file) is enabled
72 | forceConfigStyleMinArgCount = 2
73 | }
74 | }
75 |
76 | rewrite {
77 | rules = [
78 | SortImports
79 | # if your for has more than one single <- then it gets transformed into a multit-line curly brace one
80 | # PreferCurlyFors
81 | ]
82 | }
83 |
84 | lineEndings=preserve
--------------------------------------------------------------------------------
/kafka-1.0.x/src/main/scala/monix/kafka/config/CompressionType.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** The compression type for all data generated by the producer,
22 | * the `compression.type` from the Kafka Producer configuration.
23 | *
24 | * The default is none (i.e. no compression). Compression is of full
25 | * batches of data, so the efficacy of batching will also impact
26 | * the compression ratio (more batching means better compression).
27 | *
28 | * Valid values:
29 | *
30 | * - [[CompressionType.Uncompressed]]
31 | * - [[CompressionType.Gzip]]
32 | * - [[CompressionType.Snappy]]
33 | * - [[CompressionType.Lz4]]
34 | */
35 | sealed trait CompressionType extends Serializable {
36 | def id: String
37 | }
38 |
39 | object CompressionType {
40 |
41 | @throws(classOf[BadValue])
42 | def apply(id: String): CompressionType =
43 | id match {
44 | case Uncompressed.id => Uncompressed
45 | case Gzip.id => Gzip
46 | case Snappy.id => Snappy
47 | case Lz4.id => Lz4
48 | case _ =>
49 | throw new BadValue("kafka.compression.type", s"Invalid value: $id")
50 | }
51 |
52 | case object Uncompressed extends CompressionType {
53 | val id = "none"
54 | }
55 |
56 | case object Gzip extends CompressionType {
57 | val id = "gzip"
58 | }
59 |
60 | case object Snappy extends CompressionType {
61 | val id = "snappy"
62 | }
63 |
64 | case object Lz4 extends CompressionType {
65 | val id = "lz4"
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/main/scala/monix/kafka/config/CompressionType.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** The compression type for all data generated by the producer,
22 | * the `compression.type` from the Kafka Producer configuration.
23 | *
24 | * The default is none (i.e. no compression). Compression is of full
25 | * batches of data, so the efficacy of batching will also impact
26 | * the compression ratio (more batching means better compression).
27 | *
28 | * Valid values:
29 | *
30 | * - [[CompressionType.Uncompressed]]
31 | * - [[CompressionType.Gzip]]
32 | * - [[CompressionType.Snappy]]
33 | * - [[CompressionType.Lz4]]
34 | */
35 | sealed trait CompressionType extends Serializable {
36 | def id: String
37 | }
38 |
39 | object CompressionType {
40 |
41 | @throws(classOf[BadValue])
42 | def apply(id: String): CompressionType =
43 | id match {
44 | case Uncompressed.id => Uncompressed
45 | case Gzip.id => Gzip
46 | case Snappy.id => Snappy
47 | case Lz4.id => Lz4
48 | case _ =>
49 | throw new BadValue("kafka.compression.type", s"Invalid value: $id")
50 | }
51 |
52 | case object Uncompressed extends CompressionType {
53 | val id = "none"
54 | }
55 |
56 | case object Gzip extends CompressionType {
57 | val id = "gzip"
58 | }
59 |
60 | case object Snappy extends CompressionType {
61 | val id = "snappy"
62 | }
63 |
64 | case object Lz4 extends CompressionType {
65 | val id = "lz4"
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/main/scala/monix/kafka/config/CompressionType.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** The compression type for all data generated by the producer,
22 | * the `compression.type` from the Kafka Producer configuration.
23 | *
24 | * The default is none (i.e. no compression). Compression is of full
25 | * batches of data, so the efficacy of batching will also impact
26 | * the compression ratio (more batching means better compression).
27 | *
28 | * Valid values:
29 | *
30 | * - [[CompressionType.Uncompressed]]
31 | * - [[CompressionType.Gzip]]
32 | * - [[CompressionType.Snappy]]
33 | * - [[CompressionType.Lz4]]
34 | */
35 | sealed trait CompressionType extends Serializable {
36 | def id: String
37 | }
38 |
39 | object CompressionType {
40 |
41 | @throws(classOf[BadValue])
42 | def apply(id: String): CompressionType =
43 | id match {
44 | case Uncompressed.id => Uncompressed
45 | case Gzip.id => Gzip
46 | case Snappy.id => Snappy
47 | case Lz4.id => Lz4
48 | case _ =>
49 | throw new BadValue("kafka.compression.type", s"Invalid value: $id")
50 | }
51 |
52 | case object Uncompressed extends CompressionType {
53 | val id = "none"
54 | }
55 |
56 | case object Gzip extends CompressionType {
57 | val id = "gzip"
58 | }
59 |
60 | case object Snappy extends CompressionType {
61 | val id = "snappy"
62 | }
63 |
64 | case object Lz4 extends CompressionType {
65 | val id = "lz4"
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/main/scala/monix/kafka/CommittableOffset.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import monix.eval.Task
20 | import org.apache.kafka.common.TopicPartition
21 |
22 | /** Represents offset for specified topic and partition that can be
23 | * committed synchronously by [[commitSync]] method call or asynchronously by one of commitAsync methods.
24 | * To achieve good performance it is recommended to use batched commit with
25 | * [[CommittableOffsetBatch]] class.
26 | *
27 | * @param topicPartition is the topic and partition identifier
28 | *
29 | * @param offset is the offset to be committed
30 | *
31 | * @param commitCallback is the set of callbacks for batched commit realized as closures
32 | * in [[KafkaConsumerObservable]] context.
33 | */
34 | final class CommittableOffset private[kafka] (
35 | val topicPartition: TopicPartition,
36 | val offset: Long,
37 | private[kafka] val commitCallback: Commit) {
38 |
39 | /** Synchronously commits [[offset]] for the [[topicPartition]] to Kafka. It is recommended
40 | * to use batched commit with [[CommittableOffsetBatch]] class.
41 | */
42 | def commitSync(): Task[Unit] = commitCallback.commitBatchSync(Map(topicPartition -> offset))
43 |
44 | /** Asynchronously commits [[offset]] to Kafka. It is recommended
45 | * to use batched commit with [[CommittableOffsetBatch]] class.
46 | */
47 | def commitAsync(): Task[Unit] = commitCallback.commitBatchAsync(Map(topicPartition -> offset))
48 | }
49 |
50 | object CommittableOffset {
51 |
52 | private[kafka] def apply(topicPartition: TopicPartition, offset: Long, commitCallback: Commit): CommittableOffset =
53 | new CommittableOffset(topicPartition, offset, commitCallback)
54 | }
55 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/main/scala/monix/kafka/CommittableOffset.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import monix.eval.Task
20 | import org.apache.kafka.common.TopicPartition
21 |
22 | /** Represents offset for specified topic and partition that can be
23 | * committed synchronously by [[commitSync]] method call or asynchronously by one of commitAsync methods.
24 | * To achieve good performance it is recommended to use batched commit with
25 | * [[CommittableOffsetBatch]] class.
26 | *
27 | * @param topicPartition is the topic and partition identifier
28 | *
29 | * @param offset is the offset to be committed
30 | *
31 | * @param commitCallback is the set of callbacks for batched commit realized as closures
32 | * in [[KafkaConsumerObservable]] context.
33 | */
34 | final class CommittableOffset private[kafka] (
35 | val topicPartition: TopicPartition,
36 | val offset: Long,
37 | private[kafka] val commitCallback: Commit) {
38 |
39 | /** Synchronously commits [[offset]] for the [[topicPartition]] to Kafka. It is recommended
40 | * to use batched commit with [[CommittableOffsetBatch]] class.
41 | */
42 | def commitSync(): Task[Unit] = commitCallback.commitBatchSync(Map(topicPartition -> offset))
43 |
44 | /** Asynchronously commits [[offset]] to Kafka. It is recommended
45 | * to use batched commit with [[CommittableOffsetBatch]] class.
46 | */
47 | def commitAsync(): Task[Unit] = commitCallback.commitBatchAsync(Map(topicPartition -> offset))
48 | }
49 |
50 | object CommittableOffset {
51 |
52 | private[kafka] def apply(topicPartition: TopicPartition, offset: Long, commitCallback: Commit): CommittableOffset =
53 | new CommittableOffset(topicPartition, offset, commitCallback)
54 | }
55 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/main/scala/monix/kafka/CommittableOffset.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import monix.eval.Task
20 | import org.apache.kafka.common.TopicPartition
21 |
22 | /** Represents offset for specified topic and partition that can be
23 | * committed synchronously by [[commitSync]] method call or asynchronously by one of commitAsync methods.
24 | * To achieve good performance it is recommended to use batched commit with
25 | * [[CommittableOffsetBatch]] class.
26 | *
27 | * @param topicPartition is the topic and partition identifier
28 | *
29 | * @param offset is the offset to be committed
30 | *
31 | * @param commitCallback is the set of callbacks for batched commit realized as closures
32 | * in [[KafkaConsumerObservable]] context.
33 | */
34 | final class CommittableOffset private[kafka] (
35 | val topicPartition: TopicPartition,
36 | val offset: Long,
37 | private[kafka] val commitCallback: Commit) {
38 |
39 | /** Synchronously commits [[offset]] for the [[topicPartition]] to Kafka. It is recommended
40 | * to use batched commit with [[CommittableOffsetBatch]] class.
41 | */
42 | def commitSync(): Task[Unit] = commitCallback.commitBatchSync(Map(topicPartition -> offset))
43 |
44 | /** Asynchronously commits [[offset]] to Kafka. It is recommended
45 | * to use batched commit with [[CommittableOffsetBatch]] class.
46 | */
47 | def commitAsync(): Task[Unit] = commitCallback.commitBatchAsync(Map(topicPartition -> offset))
48 | }
49 |
50 | object CommittableOffset {
51 |
52 | private[kafka] def apply(topicPartition: TopicPartition, offset: Long, commitCallback: Commit): CommittableOffset =
53 | new CommittableOffset(topicPartition, offset, commitCallback)
54 | }
55 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/main/scala/monix/kafka/config/ObservableSeekOnStart.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** Specifies whether to call `seekToEnd` or `seekToBeginning` when starting
22 | * [[monix.kafka.KafkaConsumerObservable KafkaConsumerObservable]]
23 | *
24 | * Available options:
25 | *
26 | * - [[ObservableSeekOnStart.End]]
27 | * - [[ObservableSeekOnStart.Beginning]]
28 | * - [[ObservableSeekOnStart.NoSeek]]
29 | */
30 | sealed trait ObservableSeekOnStart extends Serializable {
31 | def id: String
32 |
33 | def isSeekBeginning: Boolean =
34 | this match {
35 | case ObservableSeekOnStart.Beginning => true
36 | case _ => false
37 | }
38 |
39 | def isSeekEnd: Boolean =
40 | this match {
41 | case ObservableSeekOnStart.End => true
42 | case _ => false
43 | }
44 | }
45 |
46 | object ObservableSeekOnStart {
47 |
48 | @throws(classOf[BadValue])
49 | def apply(id: String): ObservableSeekOnStart =
50 | id match {
51 | case End.id => End
52 | case Beginning.id => Beginning
53 | case NoSeek.id => NoSeek
54 | case _ =>
55 | throw new BadValue("kafka.monix.observable.seek.onStart", s"Invalid value: $id")
56 | }
57 |
58 | /** Calls `consumer.seekToEnd()` when starting consumer.
59 | */
60 | case object End extends ObservableSeekOnStart {
61 | val id = "end"
62 | }
63 |
64 | /** Calls `consumer.seekToBeginning()` when starting consumer.
65 | */
66 | case object Beginning extends ObservableSeekOnStart {
67 | val id = "beginning"
68 | }
69 |
70 | /** Does not call neither `consumer.seekToEnd()` nor `consumer.seekToBeginning`
71 | * when starting consumer.
72 | */
73 | case object NoSeek extends ObservableSeekOnStart {
74 | val id = "no-seek"
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/main/scala/monix/kafka/config/ObservableSeekOnStart.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** Specifies whether to call `seekToEnd` or `seekToBeginning` when starting
22 | * [[monix.kafka.KafkaConsumerObservable KafkaConsumerObservable]]
23 | *
24 | * Available options:
25 | *
26 | * - [[ObservableSeekOnStart.End]]
27 | * - [[ObservableSeekOnStart.Beginning]]
28 | * - [[ObservableSeekOnStart.NoSeek]]
29 | */
30 | sealed trait ObservableSeekOnStart extends Serializable {
31 | def id: String
32 |
33 | def isSeekBeginning: Boolean =
34 | this match {
35 | case ObservableSeekOnStart.Beginning => true
36 | case _ => false
37 | }
38 |
39 | def isSeekEnd: Boolean =
40 | this match {
41 | case ObservableSeekOnStart.End => true
42 | case _ => false
43 | }
44 | }
45 |
46 | object ObservableSeekOnStart {
47 |
48 | @throws(classOf[BadValue])
49 | def apply(id: String): ObservableSeekOnStart =
50 | id match {
51 | case End.id => End
52 | case Beginning.id => Beginning
53 | case NoSeek.id => NoSeek
54 | case _ =>
55 | throw new BadValue("kafka.monix.observable.seek.onStart", s"Invalid value: $id")
56 | }
57 |
58 | /** Calls `consumer.seekToEnd()` when starting consumer.
59 | */
60 | case object End extends ObservableSeekOnStart {
61 | val id = "end"
62 | }
63 |
64 | /** Calls `consumer.seekToBeginning()` when starting consumer.
65 | */
66 | case object Beginning extends ObservableSeekOnStart {
67 | val id = "beginning"
68 | }
69 |
70 | /** Does not call neither `consumer.seekToEnd()` nor `consumer.seekToBeginning`
71 | * when starting consumer.
72 | */
73 | case object NoSeek extends ObservableSeekOnStart {
74 | val id = "no-seek"
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/main/scala/monix/kafka/config/ObservableSeekOnStart.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** Specifies whether to call `seekToEnd` or `seekToBeginning` when starting
22 | * [[monix.kafka.KafkaConsumerObservable KafkaConsumerObservable]]
23 | *
24 | * Available options:
25 | *
26 | * - [[ObservableSeekOnStart.End]]
27 | * - [[ObservableSeekOnStart.Beginning]]
28 | * - [[ObservableSeekOnStart.NoSeek]]
29 | */
30 | sealed trait ObservableSeekOnStart extends Serializable {
31 | def id: String
32 |
33 | def isSeekBeginning: Boolean =
34 | this match {
35 | case ObservableSeekOnStart.Beginning => true
36 | case _ => false
37 | }
38 |
39 | def isSeekEnd: Boolean =
40 | this match {
41 | case ObservableSeekOnStart.End => true
42 | case _ => false
43 | }
44 | }
45 |
46 | object ObservableSeekOnStart {
47 |
48 | @throws(classOf[BadValue])
49 | def apply(id: String): ObservableSeekOnStart =
50 | id match {
51 | case End.id => End
52 | case Beginning.id => Beginning
53 | case NoSeek.id => NoSeek
54 | case _ =>
55 | throw new BadValue("kafka.monix.observable.seek.onStart", s"Invalid value: $id")
56 | }
57 |
58 | /** Calls `consumer.seekToEnd()` when starting consumer.
59 | */
60 | case object End extends ObservableSeekOnStart {
61 | val id = "end"
62 | }
63 |
64 | /** Calls `consumer.seekToBeginning()` when starting consumer.
65 | */
66 | case object Beginning extends ObservableSeekOnStart {
67 | val id = "beginning"
68 | }
69 |
70 | /** Does not call neither `consumer.seekToEnd()` nor `consumer.seekToBeginning`
71 | * when starting consumer.
72 | */
73 | case object NoSeek extends ObservableSeekOnStart {
74 | val id = "no-seek"
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/main/resources/monix/kafka/default.conf:
--------------------------------------------------------------------------------
1 | kafka {
2 | bootstrap.servers = "localhost:9092"
3 | client.id = ""
4 |
5 | # E.g. "org.apache.kafka.clients.producer.internals.DefaultPartitioner"
6 | partitioner.class = null
7 |
8 | acks = "1"
9 | buffer.memory = 33554432
10 | compression.type = "none"
11 | retries = 0
12 | max.in.flight.requests.per.connection = 5
13 |
14 | ssl.key.password = null
15 | ssl.keystore.password = null
16 | ssl.keystore.location = null
17 | ssl.truststore.password = null
18 | ssl.truststore.location = null
19 |
20 | batch.size = 16384
21 | connections.max.idle.ms = 540000
22 | linger.ms = 0
23 | max.block.ms = 60000
24 | max.request.size = 1048576
25 |
26 | receive.buffer.bytes = 32768
27 | request.timeout.ms = 40000
28 |
29 | sasl.kerberos.service.name = null
30 | sasl.mechanism = "GSSAPI"
31 |
32 | security.protocol = "PLAINTEXT"
33 | send.buffer.bytes = 131072
34 | ssl.enabled.protocols = "TLSv1.2,TLSv1.1,TLSv1"
35 | ssl.keystore.type = "JKS"
36 | ssl.protocol = "TLS"
37 | ssl.provider = null
38 | ssl.truststore.type = "JKS"
39 |
40 | reconnect.backoff.ms = 50
41 | retry.backoff.ms = 100
42 |
43 | metadata.max.age.ms = 300000
44 |
45 | metric.reporters = ""
46 | metrics.num.samples = 2
47 | metrics.sample.window.ms = 30000
48 |
49 | # Consumer specific settings
50 |
51 | fetch.min.bytes = 1
52 | group.id = ""
53 | heartbeat.interval.ms = 3000
54 | max.partition.fetch.bytes = 1048576
55 | auto.offset.reset = "latest"
56 | # Disabled to use back-pressure or manual commits instead
57 | enable.auto.commit = false
58 | exclude.internal.topics = true
59 | receive.buffer.bytes = 65536
60 | check.crcs = true
61 | fetch.max.wait.ms = 500
62 | # Default values for polling
63 | # See https://cwiki.apache.org/confluence/display/KAFKA/KIP-62%3A+Allow+consumer+to+send+heartbeats+from+a+background+thread
64 | session.timeout.ms = 10000
65 | max.poll.records = 500
66 | max.poll.interval.ms = 300000
67 |
68 | # Monix specific settings
69 |
70 | # Number of requests that KafkaProducerSink
71 | # can push in parallel
72 | monix.producer.sink.parallelism = 100
73 | # Triggers either seekToEnd or seektoBeginning when the observable starts
74 | # Possible values: end, beginning, no-seek
75 | monix.observable.seek.onStart = "no-seek"
76 | # Possible values: sync, async
77 | monix.observable.commit.type = "sync"
78 | # Possible values: before-ack, after-ack or no-ack
79 | monix.observable.commit.order = "after-ack"
80 | }
--------------------------------------------------------------------------------
/kafka-1.0.x/src/main/resources/monix/kafka/default.conf:
--------------------------------------------------------------------------------
1 | kafka {
2 | bootstrap.servers = "localhost:9092"
3 | client.id = ""
4 |
5 | # E.g. "org.apache.kafka.clients.producer.internals.DefaultPartitioner"
6 | partitioner.class = null
7 |
8 | acks = "1"
9 | buffer.memory = 33554432
10 | compression.type = "none"
11 | retries = 0
12 | max.in.flight.requests.per.connection = 5
13 |
14 | ssl.key.password = null
15 | ssl.keystore.password = null
16 | ssl.keystore.location = null
17 | ssl.truststore.password = null
18 | ssl.truststore.location = null
19 |
20 | batch.size = 16384
21 | connections.max.idle.ms = 540000
22 | linger.ms = 0
23 | max.block.ms = 60000
24 | max.request.size = 1048576
25 |
26 | receive.buffer.bytes = 32768
27 | request.timeout.ms = 40000
28 |
29 | sasl.kerberos.service.name = null
30 | sasl.mechanism = "GSSAPI"
31 |
32 | security.protocol = "PLAINTEXT"
33 | send.buffer.bytes = 131072
34 | ssl.enabled.protocols = "TLSv1.2,TLSv1.1,TLSv1"
35 | ssl.keystore.type = "JKS"
36 | ssl.protocol = "TLS"
37 | ssl.provider = null
38 | ssl.truststore.type = "JKS"
39 |
40 | reconnect.backoff.ms = 50
41 | retry.backoff.ms = 100
42 |
43 | metadata.max.age.ms = 300000
44 |
45 | metric.reporters = ""
46 | metrics.num.samples = 2
47 | metrics.sample.window.ms = 30000
48 |
49 | # Consumer specific settings
50 | client.rack = ""
51 | fetch.min.bytes = 1
52 | fetch.max.bytes = 52428800
53 | group.id = ""
54 | heartbeat.interval.ms = 3000
55 | max.partition.fetch.bytes = 1048576
56 | auto.offset.reset = "latest"
57 | # Disabled to use back-pressure or manual commits instead
58 | enable.auto.commit = false
59 | exclude.internal.topics = true
60 | receive.buffer.bytes = 65536
61 | check.crcs = true
62 | fetch.max.wait.ms = 500
63 | # Default values for polling
64 | # See https://cwiki.apache.org/confluence/display/KAFKA/KIP-62%3A+Allow+consumer+to+send+heartbeats+from+a+background+thread
65 | session.timeout.ms = 10000
66 | max.poll.records = 500
67 | max.poll.interval.ms = 300000
68 |
69 | # Monix specific settings
70 |
71 | # Number of requests that KafkaProducerSink
72 | # can push in parallel
73 | monix.producer.sink.parallelism = 100
74 | # Triggers either seekToEnd or seektoBeginning when the observable starts
75 | # Possible values: end, beginning, no-seek
76 | monix.observable.seek.onStart = "no-seek"
77 | # Possible values: sync, async
78 | monix.observable.commit.type = "sync"
79 | # Possible values: before-ack, after-ack or no-ack
80 | monix.observable.commit.order = "after-ack"
81 | }
82 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/main/resources/monix/kafka/default.conf:
--------------------------------------------------------------------------------
1 | kafka {
2 | bootstrap.servers = "localhost:9092"
3 | client.id = ""
4 |
5 | # E.g. "org.apache.kafka.clients.producer.internals.DefaultPartitioner"
6 | partitioner.class = null
7 |
8 | acks = "1"
9 | buffer.memory = 33554432
10 | compression.type = "none"
11 | retries = 0
12 | max.in.flight.requests.per.connection = 5
13 |
14 | ssl.key.password = null
15 | ssl.keystore.password = null
16 | ssl.keystore.location = null
17 | ssl.truststore.password = null
18 | ssl.truststore.location = null
19 |
20 | batch.size = 16384
21 | connections.max.idle.ms = 540000
22 | linger.ms = 0
23 | max.block.ms = 60000
24 | max.request.size = 1048576
25 |
26 | receive.buffer.bytes = 32768
27 | request.timeout.ms = 40000
28 |
29 | sasl.kerberos.service.name = null
30 | sasl.mechanism = "GSSAPI"
31 |
32 | security.protocol = "PLAINTEXT"
33 | send.buffer.bytes = 131072
34 | ssl.enabled.protocols = "TLSv1.2,TLSv1.1,TLSv1"
35 | ssl.keystore.type = "JKS"
36 | ssl.protocol = "TLS"
37 | ssl.provider = null
38 | ssl.truststore.type = "JKS"
39 |
40 | reconnect.backoff.ms = 50
41 | retry.backoff.ms = 100
42 |
43 | metadata.max.age.ms = 300000
44 |
45 | metric.reporters = ""
46 | metrics.num.samples = 2
47 | metrics.sample.window.ms = 30000
48 |
49 | # Consumer specific settings
50 |
51 | fetch.min.bytes = 1
52 | fetch.max.bytes = 52428800
53 | group.id = ""
54 | heartbeat.interval.ms = 3000
55 | max.partition.fetch.bytes = 1048576
56 | auto.offset.reset = "latest"
57 | # Disabled to use back-pressure or manual commits instead
58 | enable.auto.commit = false
59 | exclude.internal.topics = true
60 | max.poll.records = 2147483647
61 | receive.buffer.bytes = 65536
62 | check.crcs = true
63 | fetch.max.wait.ms = 500
64 | # Default values for polling
65 | # See https://cwiki.apache.org/confluence/display/KAFKA/KIP-62%3A+Allow+consumer+to+send+heartbeats+from+a+background+thread
66 | session.timeout.ms = 10000
67 | max.poll.records = 500
68 | max.poll.interval.ms = 300000
69 |
70 | # Monix specific settings
71 |
72 | # Number of requests that KafkaProducerSink
73 | # can push in parallel
74 | monix.producer.sink.parallelism = 100
75 | # Triggers either seekToEnd or seektoBeginning when the observable starts
76 | # Possible values: end, beginning, no-seek
77 | monix.observable.seek.onStart = "no-seek"
78 | # Possible values: sync, async
79 | monix.observable.commit.type = "sync"
80 | # Possible values: before-ack, after-ack or no-ack
81 | monix.observable.commit.order = "after-ack"
82 | }
--------------------------------------------------------------------------------
/kafka-0.10.x/src/main/scala/monix/kafka/config/ObservableCommitOrder.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** Specifies the consumer commit order, to use by the
22 | * [[monix.kafka.KafkaConsumerObservable KafkaConsumerObservable]]
23 | * in case `kafka.enable.auto.commit` is set to `false`.
24 | *
25 | * Available options:
26 | *
27 | * - [[ObservableCommitOrder.BeforeAck]] specifies to do a commit
28 | * before acknowledgement is received from downstream
29 | * - [[ObservableCommitOrder.AfterAck]] specifies to do a commit
30 | * after acknowledgement is received from downstream
31 | * - [[ObservableCommitOrder.NoAck]] specifies to skip committing
32 | */
33 | sealed trait ObservableCommitOrder extends Serializable {
34 | def id: String
35 |
36 | def isBefore: Boolean =
37 | this match {
38 | case ObservableCommitOrder.BeforeAck => true
39 | case _ => false
40 | }
41 |
42 | def isAfter: Boolean =
43 | this match {
44 | case ObservableCommitOrder.AfterAck => true
45 | case _ => false
46 | }
47 | }
48 |
49 | object ObservableCommitOrder {
50 |
51 | @throws(classOf[BadValue])
52 | def apply(id: String): ObservableCommitOrder =
53 | id match {
54 | case BeforeAck.id => BeforeAck
55 | case AfterAck.id => AfterAck
56 | case NoAck.id => NoAck
57 | case _ =>
58 | throw new BadValue("kafka.monix.observable.commit.order", s"Invalid value: $id")
59 | }
60 |
61 | /** Do a `commit` in the Kafka Consumer before
62 | * receiving an acknowledgement from downstream.
63 | */
64 | case object BeforeAck extends ObservableCommitOrder {
65 | val id = "before-ack"
66 | }
67 |
68 | /** Do a `commit` in the Kafka Consumer after
69 | * receiving an acknowledgement from downstream.
70 | */
71 | case object AfterAck extends ObservableCommitOrder {
72 | val id = "after-ack"
73 | }
74 |
75 | /** Do not `commit` in the Kafka Consumer.
76 | */
77 | case object NoAck extends ObservableCommitOrder {
78 | val id = "no-ack"
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/main/scala/monix/kafka/config/ObservableCommitOrder.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** Specifies the consumer commit order, to use by the
22 | * [[monix.kafka.KafkaConsumerObservable KafkaConsumerObservable]]
23 | * in case `kafka.enable.auto.commit` is set to `false`.
24 | *
25 | * Available options:
26 | *
27 | * - [[ObservableCommitOrder.BeforeAck]] specifies to do a commit
28 | * before acknowledgement is received from downstream
29 | * - [[ObservableCommitOrder.AfterAck]] specifies to do a commit
30 | * after acknowledgement is received from downstream
31 | * - [[ObservableCommitOrder.NoAck]] specifies to skip committing
32 | */
33 | sealed trait ObservableCommitOrder extends Serializable {
34 | def id: String
35 |
36 | def isBefore: Boolean =
37 | this match {
38 | case ObservableCommitOrder.BeforeAck => true
39 | case _ => false
40 | }
41 |
42 | def isAfter: Boolean =
43 | this match {
44 | case ObservableCommitOrder.AfterAck => true
45 | case _ => false
46 | }
47 | }
48 |
49 | object ObservableCommitOrder {
50 |
51 | @throws(classOf[BadValue])
52 | def apply(id: String): ObservableCommitOrder =
53 | id match {
54 | case BeforeAck.id => BeforeAck
55 | case AfterAck.id => AfterAck
56 | case NoAck.id => NoAck
57 | case _ =>
58 | throw new BadValue("kafka.monix.observable.commit.order", s"Invalid value: $id")
59 | }
60 |
61 | /** Do a `commit` in the Kafka Consumer before
62 | * receiving an acknowledgement from downstream.
63 | */
64 | case object BeforeAck extends ObservableCommitOrder {
65 | val id = "before-ack"
66 | }
67 |
68 | /** Do a `commit` in the Kafka Consumer after
69 | * receiving an acknowledgement from downstream.
70 | */
71 | case object AfterAck extends ObservableCommitOrder {
72 | val id = "after-ack"
73 | }
74 |
75 | /** Do not `commit` in the Kafka Consumer.
76 | */
77 | case object NoAck extends ObservableCommitOrder {
78 | val id = "no-ack"
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/main/scala/monix/kafka/config/ObservableCommitOrder.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** Specifies the consumer commit order, to use by the
22 | * [[monix.kafka.KafkaConsumerObservable KafkaConsumerObservable]]
23 | * in case `kafka.enable.auto.commit` is set to `false`.
24 | *
25 | * Available options:
26 | *
27 | * - [[ObservableCommitOrder.BeforeAck]] specifies to do a commit
28 | * before acknowledgement is received from downstream
29 | * - [[ObservableCommitOrder.AfterAck]] specifies to do a commit
30 | * after acknowledgement is received from downstream
31 | * - [[ObservableCommitOrder.NoAck]] specifies to skip committing
32 | */
33 | sealed trait ObservableCommitOrder extends Serializable {
34 | def id: String
35 |
36 | def isBefore: Boolean =
37 | this match {
38 | case ObservableCommitOrder.BeforeAck => true
39 | case _ => false
40 | }
41 |
42 | def isAfter: Boolean =
43 | this match {
44 | case ObservableCommitOrder.AfterAck => true
45 | case _ => false
46 | }
47 | }
48 |
49 | object ObservableCommitOrder {
50 |
51 | @throws(classOf[BadValue])
52 | def apply(id: String): ObservableCommitOrder =
53 | id match {
54 | case BeforeAck.id => BeforeAck
55 | case AfterAck.id => AfterAck
56 | case NoAck.id => NoAck
57 | case _ =>
58 | throw new BadValue("kafka.monix.observable.commit.order", s"Invalid value: $id")
59 | }
60 |
61 | /** Do a `commit` in the Kafka Consumer before
62 | * receiving an acknowledgement from downstream.
63 | */
64 | case object BeforeAck extends ObservableCommitOrder {
65 | val id = "before-ack"
66 | }
67 |
68 | /** Do a `commit` in the Kafka Consumer after
69 | * receiving an acknowledgement from downstream.
70 | */
71 | case object AfterAck extends ObservableCommitOrder {
72 | val id = "after-ack"
73 | }
74 |
75 | /** Do not `commit` in the Kafka Consumer.
76 | */
77 | case object NoAck extends ObservableCommitOrder {
78 | val id = "no-ack"
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/benchmarks/results/consumer.md:
--------------------------------------------------------------------------------
1 |
2 | ## Consumer Benchmarks
3 |
4 | The consumer benchmark covers the *manual* and *auto commit* consumer implementations of the different libraries.
5 | The manual commit will also cover producing committing back the consumed offsets.
6 | It also runs on different range `pollHeartbeatRate` [1, 10, 15, 100, 1000], which is an important configuration
7 | implemented in this `monix-kafka` library.
8 |
9 | ## RC7
10 | ### 1fork 1thread
11 | Benchmark Mode Cnt Score Error Units
12 | ConsumerBenchmark.monix_auto_commit_async thrpt 7 13.097 ± 0.827 ops/s
13 | ConsumerBenchmark.monix_auto_commit_sync thrpt 7 12.486 ± 1.087 ops/s
14 | ConsumerBenchmark.monix_manual_commit thrpt 10 11.519 ± 2.247 ops/s
15 |
16 | ### 1 fork 3 threads
17 | Benchmark Mode Cnt Score Error Units
18 | ConsumerBenchmark.monix_auto_commit thrpt 10 16.186 ± 0.920 ops/s
19 | ConsumerBenchmark.monix_manual_commit thrpt 10 16.319 ± 1.465 ops/s
20 |
21 | ## RC8 - (Introduces PollHeartbeatRate)
22 | ### 1fork 1thread
23 | ---
24 | Benchmark Mode Cnt Score Error Units
25 | ConsumerBenchmark.monixAsyncAutoCommitHeartbeat15ms thrpt 7 13.126 ± 1.737 ops/s
26 | ConsumerBenchmark.monixSyncAutoCommitHeartbeat15ms thrpt 7 12.102 ± 1.214 ops/s
27 | ConsumerBenchmark.monixManualCommitHeartbeat1000ms thrpt 7 2.978 ± 0.006 ops/s
28 | ConsumerBenchmark.monixManualCommitHeartbeat100ms thrpt 7 9.961 ± 1.317 ops/s
29 | ConsumerBenchmark.monixManualCommitHeartbeat10ms thrpt 7 13.346 ± 0.716 ops/s
30 | ConsumerBenchmark.monixManualCommitHeartbeat15ms thrpt 7 13.454 ± 2.680 ops/s
31 | ConsumerBenchmark.monixManualCommitHeartbeat1ms thrpt 7 14.281 ± 1.591 ops/s
32 | ConsumerBenchmark.monixManualCommitHeartbeat50ms thrpt 7 11.900 ± 0.698 ops/s
33 | ---
34 | ### 1 fork 3 threads
35 | Benchmark Mode Cnt Score Error Units
36 | ConsumerBenchmark.monixAsyncAutoCommitHeartbeat15ms thrpt 7 16.966 ± 2.659 ops/s
37 | ConsumerBenchmark.monixSyncAutoCommitHeartbeat15ms thrpt 7 15.083 ± 4.242 ops/s
38 | ConsumerBenchmark.monixManualCommitHeartbeat1000ms thrpt 7 2.978 ± 0.006 ops/s
39 | ConsumerBenchmark.monixManualCommitHeartbeat100ms thrpt 7 9.961 ± 1.317 ops/s
40 | ConsumerBenchmark.monixManualCommitHeartbeat10ms thrpt 7 13.346 ± 0.716 ops/s
41 | ConsumerBenchmark.monixManualCommitHeartbeat15ms thrpt 7 13.454 ± 2.680 ops/s
42 | ConsumerBenchmark.monixManualCommitHeartbeat1ms thrpt 7 14.281 ± 1.591 ops/s
43 | ConsumerBenchmark.monixManualCommitHeartbeat50ms thrpt 7 11.900 ± 0.698 ops/s
44 |
45 | ```sbt
46 | sbt 'benchmarks/jmh:run -i 5 -wi 1 -f1 -t1 monix.kafka.benchmarks.ConsumerBenchmark.*'
47 | ```
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to Monix-Kafka
2 |
3 | The Monix project welcomes contributions from anybody wishing to participate.
4 | All code or documentation that is provided must be licensed with the same
5 | license that Monix is licensed with (Apache 2.0, see LICENSE.txt).
6 |
7 | ## Code of Conduct
8 |
9 | People are expected to follow the [Typelevel Code of Conduct](http://typelevel.org/conduct.html)
10 | when discussing Monix on the Github page, Gitter channel, or other venues.
11 |
12 | We hope that our community will be respectful, helpful, and kind. If you find
13 | yourself embroiled in a situation that becomes heated, or that fails to live up
14 | to our expectations, you should disengage and contact one of the project maintainers
15 | in private. We hope to avoid letting minor aggressions and misunderstandings
16 | escalate into larger problems.
17 |
18 | ## General Workflow
19 |
20 | 1. Make sure you can license your work under Apache 2.0
21 |
22 | 2. Before starting to work, make sure there is a ticket in the issue
23 | or create one first. It can help accelerate the acceptance process
24 | if the change is agreed upon
25 |
26 | 3. If you don't have write access to the repository, you should do
27 | your work in a local branch of your own fork and then submit a pull
28 | request. If you do have write access to the repository, never work
29 | directly on master.
30 |
31 | 4. When the work is completed, submit a Pull Request.
32 |
33 | 5. Anyone can comment on a pull request and you are expected to
34 | answer questions or to incorporate feedback.
35 |
36 | 6. It is not allowed to force push to the branch on which the pull
37 | request is based.
38 |
39 | ## General Guidelines
40 |
41 | 1. It is recommended that the work is accompanied by unit tests.
42 |
43 | 2. The commit messages should be clear and short one lines, if more
44 | details are needed, specify a body.
45 |
46 | 3. New source files should be accompanied by the copyright header.
47 |
48 | 4. Follow the structure of the code in this repository and the
49 | indentation rules used.
50 |
51 | 5. Your first commit request should be accompanied with a change to
52 | the AUTHORS file, adding yourself to the authors list.
53 |
54 | ## License
55 |
56 | All code must be licensed under the Apache 2.0 license and all files
57 | must include the following copyright header:
58 |
59 | ```
60 | Copyright (c) 2014-2019 by its authors. Some rights reserved.
61 | See the project homepage at: https://github.com/monix/monix-kafka
62 |
63 | Licensed under the Apache License, Version 2.0 (the "License");
64 | you may not use this file except in compliance with the License.
65 | You may obtain a copy of the License at
66 |
67 | http://www.apache.org/licenses/LICENSE-2.0
68 |
69 | Unless required by applicable law or agreed to in writing, software
70 | distributed under the License is distributed on an "AS IS" BASIS,
71 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
72 | See the License for the specific language governing permissions and
73 | limitations under the License.
74 | ```
75 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/test/scala/monix/kafka/MergeByCommitCallbackTest.scala:
--------------------------------------------------------------------------------
1 | package monix.kafka
2 |
3 | import monix.eval.Task
4 | import monix.kafka.config.AutoOffsetReset
5 | import monix.reactive.Observable
6 | import org.apache.kafka.clients.producer.ProducerRecord
7 | import org.scalatest.{FunSuite, Matchers}
8 |
9 | import scala.concurrent.duration._
10 | import scala.concurrent.Await
11 | import monix.execution.Scheduler.Implicits.global
12 | import org.apache.kafka.common.TopicPartition
13 | import org.scalacheck.Gen
14 | import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
15 |
16 | class MergeByCommitCallbackTest extends FunSuite with KafkaTestKit with ScalaCheckDrivenPropertyChecks with Matchers {
17 |
18 | val commitCallbacks: List[Commit] = List.fill(4)(new Commit {
19 | override def commitBatchSync(batch: Map[TopicPartition, Long]): Task[Unit] = Task.unit
20 | override def commitBatchAsync(batch: Map[TopicPartition, Long]): Task[Unit] = Task.unit
21 | })
22 |
23 | val committableOffsetsGen: Gen[CommittableOffset] = for {
24 | partition <- Gen.posNum[Int]
25 | offset <- Gen.posNum[Long]
26 | commit <- Gen.oneOf(commitCallbacks)
27 | } yield CommittableOffset(new TopicPartition("topic", partition), offset, commit)
28 |
29 | test("merge by commit callback works") {
30 | forAll(Gen.nonEmptyListOf(committableOffsetsGen)) { offsets =>
31 | val partitions = offsets.map(_.topicPartition)
32 | val received: List[CommittableOffsetBatch] = CommittableOffsetBatch.mergeByCommitCallback(offsets)
33 |
34 | received.foreach { batch => partitions should contain allElementsOf batch.offsets.keys }
35 |
36 | received.size should be <= 4
37 | }
38 | }
39 |
40 | test("merge by commit callback for multiple consumers") {
41 | withRunningKafka {
42 | val count = 10000
43 | val topicName = "monix-kafka-merge-by-commit"
44 |
45 | val producerCfg = KafkaProducerConfig.default.copy(
46 | bootstrapServers = List("127.0.0.1:6001"),
47 | clientId = "monix-kafka-1-0-producer-test"
48 | )
49 |
50 | val producer = KafkaProducerSink[String, String](producerCfg, io)
51 |
52 | val pushT = Observable
53 | .range(0, count)
54 | .map(msg => new ProducerRecord(topicName, "obs", msg.toString))
55 | .bufferIntrospective(1024)
56 | .consumeWith(producer)
57 |
58 | val listT = Observable
59 | .range(0, 4)
60 | .mergeMap(i => createConsumer(i.toInt, topicName).take(500))
61 | .bufferTumbling(2000)
62 | .map(CommittableOffsetBatch.mergeByCommitCallback)
63 | .map { offsetBatches => assert(offsetBatches.length == 4) }
64 | .completedL
65 |
66 | Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds)
67 | }
68 | }
69 |
70 | private def createConsumer(i: Int, topicName: String): Observable[CommittableOffset] = {
71 | val cfg = KafkaConsumerConfig.default.copy(
72 | bootstrapServers = List("127.0.0.1:6001"),
73 | groupId = s"kafka-tests-$i",
74 | autoOffsetReset = AutoOffsetReset.Earliest
75 | )
76 |
77 | KafkaConsumerObservable
78 | .manualCommit[String, String](cfg, List(topicName))
79 | .executeOn(io)
80 | .map(_.committableOffset)
81 | }
82 | }
83 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/test/scala/monix/kafka/MergeByCommitCallbackTest.scala:
--------------------------------------------------------------------------------
1 | package monix.kafka
2 |
3 | import monix.eval.Task
4 | import monix.kafka.config.AutoOffsetReset
5 | import monix.reactive.Observable
6 | import org.apache.kafka.clients.producer.ProducerRecord
7 | import org.scalatest.{FunSuite, Matchers}
8 |
9 | import scala.concurrent.duration._
10 | import scala.concurrent.Await
11 | import monix.execution.Scheduler.Implicits.global
12 | import org.apache.kafka.common.TopicPartition
13 | import org.scalacheck.Gen
14 | import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
15 |
16 | class MergeByCommitCallbackTest extends FunSuite with KafkaTestKit with ScalaCheckDrivenPropertyChecks with Matchers {
17 |
18 | val commitCallbacks: List[Commit] = List.fill(4)(new Commit {
19 | override def commitBatchSync(batch: Map[TopicPartition, Long]): Task[Unit] = Task.unit
20 | override def commitBatchAsync(batch: Map[TopicPartition, Long]): Task[Unit] = Task.unit
21 | })
22 |
23 | val committableOffsetsGen: Gen[CommittableOffset] = for {
24 | partition <- Gen.posNum[Int]
25 | offset <- Gen.posNum[Long]
26 | commit <- Gen.oneOf(commitCallbacks)
27 | } yield CommittableOffset(new TopicPartition("topic", partition), offset, commit)
28 |
29 | test("merge by commit callback works") {
30 | forAll(Gen.nonEmptyListOf(committableOffsetsGen)) { offsets =>
31 | val partitions = offsets.map(_.topicPartition)
32 | val received: List[CommittableOffsetBatch] = CommittableOffsetBatch.mergeByCommitCallback(offsets)
33 |
34 | received.foreach { batch => partitions should contain allElementsOf batch.offsets.keys }
35 |
36 | received.size should be <= 4
37 | }
38 | }
39 |
40 | test("merge by commit callback for multiple consumers") {
41 | withRunningKafka {
42 | val count = 10000
43 | val topicName = "monix-kafka-merge-by-commit"
44 |
45 | val producerCfg = KafkaProducerConfig.default.copy(
46 | bootstrapServers = List("127.0.0.1:6001"),
47 | clientId = "monix-kafka-1-0-producer-test"
48 | )
49 |
50 | val producer = KafkaProducerSink[String, String](producerCfg, io)
51 |
52 | val pushT = Observable
53 | .range(0, count)
54 | .map(msg => new ProducerRecord(topicName, "obs", msg.toString))
55 | .bufferIntrospective(1024)
56 | .consumeWith(producer)
57 |
58 | val listT = Observable
59 | .range(0, 4)
60 | .mergeMap(i => createConsumer(i.toInt, topicName).take(500))
61 | .bufferTumbling(2000)
62 | .map(CommittableOffsetBatch.mergeByCommitCallback)
63 | .map { offsetBatches => assert(offsetBatches.length == 4) }
64 | .completedL
65 |
66 | Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds)
67 | }
68 | }
69 |
70 | private def createConsumer(i: Int, topicName: String): Observable[CommittableOffset] = {
71 | val cfg = KafkaConsumerConfig.default.copy(
72 | bootstrapServers = List("127.0.0.1:6001"),
73 | groupId = s"kafka-tests-$i",
74 | autoOffsetReset = AutoOffsetReset.Earliest
75 | )
76 |
77 | KafkaConsumerObservable
78 | .manualCommit[String, String](cfg, List(topicName))
79 | .executeOn(io)
80 | .map(_.committableOffset)
81 | }
82 | }
83 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/test/scala/monix/kafka/MergeByCommitCallbackTest.scala:
--------------------------------------------------------------------------------
1 | package monix.kafka
2 |
3 | import monix.eval.Task
4 | import monix.kafka.config.AutoOffsetReset
5 | import monix.reactive.Observable
6 | import org.apache.kafka.clients.producer.ProducerRecord
7 | import org.scalatest.{FunSuite, Matchers}
8 |
9 | import scala.concurrent.duration._
10 | import scala.concurrent.Await
11 | import monix.execution.Scheduler.Implicits.global
12 | import org.apache.kafka.common.TopicPartition
13 | import org.scalacheck.Gen
14 | import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
15 |
16 | class MergeByCommitCallbackTest extends FunSuite with KafkaTestKit with ScalaCheckDrivenPropertyChecks with Matchers {
17 |
18 | val commitCallbacks: List[Commit] = List.fill(4)(new Commit {
19 | override def commitBatchSync(batch: Map[TopicPartition, Long]): Task[Unit] = Task.unit
20 |
21 | override def commitBatchAsync(batch: Map[TopicPartition, Long]): Task[Unit] =
22 | Task.unit
23 | })
24 |
25 | val committableOffsetsGen: Gen[CommittableOffset] = for {
26 | partition <- Gen.posNum[Int]
27 | offset <- Gen.posNum[Long]
28 | commit <- Gen.oneOf(commitCallbacks)
29 | } yield CommittableOffset(new TopicPartition("topic", partition), offset, commit)
30 |
31 | test("merge by commit callback works") {
32 | forAll(Gen.nonEmptyListOf(committableOffsetsGen)) { offsets =>
33 | val partitions = offsets.map(_.topicPartition)
34 | val received: List[CommittableOffsetBatch] = CommittableOffsetBatch.mergeByCommitCallback(offsets)
35 |
36 | received.foreach { batch => partitions should contain allElementsOf batch.offsets.keys }
37 |
38 | received.size should be <= 4
39 | }
40 | }
41 |
42 | test("merge by commit callback for multiple consumers") {
43 | withRunningKafka {
44 | val count = 10000
45 | val topicName = "monix-kafka-merge-by-commit"
46 |
47 | val producerCfg = KafkaProducerConfig.default.copy(
48 | bootstrapServers = List("127.0.0.1:6001"),
49 | clientId = "monix-kafka-1-0-producer-test"
50 | )
51 |
52 | val producer = KafkaProducerSink[String, String](producerCfg, io)
53 |
54 | val pushT = Observable
55 | .range(0, count)
56 | .map(msg => new ProducerRecord(topicName, "obs", msg.toString))
57 | .bufferIntrospective(1024)
58 | .consumeWith(producer)
59 |
60 | val listT = Observable
61 | .range(0, 4)
62 | .mergeMap(i => createConsumer(i.toInt, topicName).take(500))
63 | .bufferTumbling(2000)
64 | .map(CommittableOffsetBatch.mergeByCommitCallback)
65 | .map { offsetBatches => assert(offsetBatches.length == 4) }
66 | .completedL
67 |
68 | Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds)
69 | }
70 | }
71 |
72 | private def createConsumer(i: Int, topicName: String): Observable[CommittableOffset] = {
73 | val cfg = KafkaConsumerConfig.default.copy(
74 | bootstrapServers = List("127.0.0.1:6001"),
75 | groupId = s"kafka-tests-$i",
76 | autoOffsetReset = AutoOffsetReset.Earliest
77 | )
78 |
79 | KafkaConsumerObservable
80 | .manualCommit[String, String](cfg, List(topicName))
81 | .executeOn(io)
82 | .map(_.committableOffset)
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/main/scala/monix/kafka/config/Acks.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** Enumeration for specifying the `acks` setting in
22 | * [[monix.kafka.KafkaProducerConfig KafkaProducerConfig]].
23 | *
24 | * Represents the number of acknowledgments the producer requires
25 | * the leader to have received before considering a request complete.
26 | * This controls the durability of records that are sent.
27 | *
28 | * For the available options see:
29 | *
30 | * - [[Acks.Zero]]
31 | * - [[Acks.NonZero]]
32 | * - [[Acks.All]]
33 | */
34 | sealed trait Acks extends Product with Serializable {
35 | def id: String
36 | }
37 |
38 | object Acks {
39 |
40 | @throws(classOf[BadValue])
41 | def apply(id: String): Acks =
42 | id match {
43 | case All.id => All
44 | case Number(nrStr) =>
45 | val nr = nrStr.toInt
46 | if (nr == 0) Zero else NonZero(nr)
47 | case _ =>
48 | throw new BadValue("kafka.acks", s"Invalid value: $id")
49 | }
50 |
51 | /** If set to zero then the producer will not wait
52 | * for any acknowledgment from the server at all.
53 | *
54 | * The record will be immediately added to the socket buffer and
55 | * considered sent. No guarantee can be made that the server has received
56 | * the record in this case, and the retries configuration will not
57 | * take effect (as the client won't generally know of any failures).
58 | * The offset given back for each record will always be set to -1.
59 | */
60 | case object Zero extends Acks {
61 | val id = "0"
62 | }
63 |
64 | /** This will mean the leader will write the record to its local
65 | * log but will respond without awaiting full acknowledgement
66 | * from all followers.
67 | *
68 | * In this case should the leader fail immediately after acknowledging
69 | * the record but before the followers have replicated it then the
70 | * record will be lost.
71 | */
72 | final case class NonZero(nr: Int) extends Acks {
73 | require(nr > 0, "nr > 0")
74 | val id = nr.toString
75 | }
76 |
77 | /** This means the leader will wait for the
78 | * full set of in-sync replicas to acknowledge the record.
79 | *
80 | * This guarantees that the record will not be lost as long as
81 | * at least one in-sync replica remains alive. This is the strongest
82 | * available guarantee.
83 | */
84 | case object All extends Acks {
85 | val id = "all"
86 | }
87 |
88 | // Regular expression for parsing IDs
89 | private val Number = """^(\d+)$""".r
90 | }
91 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/main/scala/monix/kafka/config/Acks.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** Enumeration for specifying the `acks` setting in
22 | * [[monix.kafka.KafkaProducerConfig KafkaProducerConfig]].
23 | *
24 | * Represents the number of acknowledgments the producer requires
25 | * the leader to have received before considering a request complete.
26 | * This controls the durability of records that are sent.
27 | *
28 | * For the available options see:
29 | *
30 | * - [[Acks.Zero]]
31 | * - [[Acks.NonZero]]
32 | * - [[Acks.All]]
33 | */
34 | sealed trait Acks extends Product with Serializable {
35 | def id: String
36 | }
37 |
38 | object Acks {
39 |
40 | @throws(classOf[BadValue])
41 | def apply(id: String): Acks =
42 | id match {
43 | case All.id => All
44 | case Number(nrStr) =>
45 | val nr = nrStr.toInt
46 | if (nr == 0) Zero else NonZero(nr)
47 | case _ =>
48 | throw new BadValue("kafka.acks", s"Invalid value: $id")
49 | }
50 |
51 | /** If set to zero then the producer will not wait
52 | * for any acknowledgment from the server at all.
53 | *
54 | * The record will be immediately added to the socket buffer and
55 | * considered sent. No guarantee can be made that the server has received
56 | * the record in this case, and the retries configuration will not
57 | * take effect (as the client won't generally know of any failures).
58 | * The offset given back for each record will always be set to -1.
59 | */
60 | case object Zero extends Acks {
61 | val id = "0"
62 | }
63 |
64 | /** This will mean the leader will write the record to its local
65 | * log but will respond without awaiting full acknowledgement
66 | * from all followers.
67 | *
68 | * In this case should the leader fail immediately after acknowledging
69 | * the record but before the followers have replicated it then the
70 | * record will be lost.
71 | */
72 | final case class NonZero(nr: Int) extends Acks {
73 | require(nr > 0, "nr > 0")
74 | val id = nr.toString
75 | }
76 |
77 | /** This means the leader will wait for the
78 | * full set of in-sync replicas to acknowledge the record.
79 | *
80 | * This guarantees that the record will not be lost as long as
81 | * at least one in-sync replica remains alive. This is the strongest
82 | * available guarantee.
83 | */
84 | case object All extends Acks {
85 | val id = "all"
86 | }
87 |
88 | // Regular expression for parsing IDs
89 | private val Number = """^(\d+)$""".r
90 | }
91 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/main/scala/monix/kafka/config/Acks.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import com.typesafe.config.ConfigException.BadValue
20 |
21 | /** Enumeration for specifying the `acks` setting in
22 | * [[monix.kafka.KafkaProducerConfig KafkaProducerConfig]].
23 | *
24 | * Represents the number of acknowledgments the producer requires
25 | * the leader to have received before considering a request complete.
26 | * This controls the durability of records that are sent.
27 | *
28 | * For the available options see:
29 | *
30 | * - [[Acks.Zero]]
31 | * - [[Acks.NonZero]]
32 | * - [[Acks.All]]
33 | */
34 | sealed trait Acks extends Product with Serializable {
35 | def id: String
36 | }
37 |
38 | object Acks {
39 |
40 | @throws(classOf[BadValue])
41 | def apply(id: String): Acks =
42 | id match {
43 | case All.id => All
44 | case Number(nrStr) =>
45 | val nr = nrStr.toInt
46 | if (nr == 0) Zero else NonZero(nr)
47 | case _ =>
48 | throw new BadValue("kafka.acks", s"Invalid value: $id")
49 | }
50 |
51 | /** If set to zero then the producer will not wait
52 | * for any acknowledgment from the server at all.
53 | *
54 | * The record will be immediately added to the socket buffer and
55 | * considered sent. No guarantee can be made that the server has received
56 | * the record in this case, and the retries configuration will not
57 | * take effect (as the client won't generally know of any failures).
58 | * The offset given back for each record will always be set to -1.
59 | */
60 | case object Zero extends Acks {
61 | val id = "0"
62 | }
63 |
64 | /** This will mean the leader will write the record to its local
65 | * log but will respond without awaiting full acknowledgement
66 | * from all followers.
67 | *
68 | * In this case should the leader fail immediately after acknowledging
69 | * the record but before the followers have replicated it then the
70 | * record will be lost.
71 | */
72 | final case class NonZero(nr: Int) extends Acks {
73 | require(nr > 0, "nr > 0")
74 | val id = nr.toString
75 | }
76 |
77 | /** This means the leader will wait for the
78 | * full set of in-sync replicas to acknowledge the record.
79 | *
80 | * This guarantees that the record will not be lost as long as
81 | * at least one in-sync replica remains alive. This is the strongest
82 | * available guarantee.
83 | */
84 | case object All extends Acks {
85 | val id = "all"
86 | }
87 |
88 | // Regular expression for parsing IDs
89 | private val Number = """^(\d+)$""".r
90 | }
91 |
--------------------------------------------------------------------------------
/CHANGES.md:
--------------------------------------------------------------------------------
1 |
2 | ## Version 1.0.0-RC7 (January 23, 2021)
3 |
4 | Depends on Monix 3.3.0
5 |
6 | Main changes:
7 |
8 | - PR #248: Enable '-Xfatal-warnings' flag (internal)
9 | - PR #245: Use Kafka Producer Consumer interfaces
10 |
11 | ## Version 1.0.0-RC6 (April 18, 2020)
12 |
13 | Depends on Monix 3.x
14 |
15 | Main changes:
16 |
17 | - PR #133: Configure serializers in kafka producer after creating
18 | - PR #150: Allow customization of error handling in ProducerSink
19 | - PR #179: Add seekToBeginning option and reuse instances of kafka (de)serializers
20 | - PR #180: Add client.rack option
21 |
22 | Thanks to all the contributors!
23 |
24 | ## Version 1.0.0-RC5 (September 15, 2019)
25 |
26 | Depends on Monix 3.0.0
27 |
28 | Main changes:
29 |
30 | - PR #89: Added fetch.max.bytes setting
31 | - PR #95: Improve Producer performance
32 | - PR #100: Add method `mergeByCommitCallback`
33 | - PR #115: Allow to use Kafka Serializers
34 |
35 | Thanks to all the contributors!
36 |
37 | ## Version 1.0.0-RC4 (June 16, 2019)
38 |
39 | Depends on Monix `3.0.0-RC3`.
40 |
41 | ## Version 1.0.0-RC3 (May 1, 2019)
42 |
43 | Changes:
44 | - [PR #64](https://github.com/monix/monix-kafka/pull/64) Manual commits support
45 | - [PR #68](https://github.com/monix/monix-kafka/pull/68) Scalafmt
46 | - Updates of dependencies
47 |
48 | Depends on Monix `3.0.0-RC2`.
49 |
50 |
51 | ## Version 1.0.0-RC2 (Nov 15, 2018)
52 |
53 | - Updates Monix to 3.0.0-RC2
54 | - [PR #44](https://github.com/monix/monix-kafka/pull/44) - Add possibility of adding custom configs
55 | - [PR #48](https://github.com/monix/monix-kafka/pull/48) - Fix Kafka's Config classloader conflicts
56 |
57 | ## Version 0.17 (Nov 8, 2018)
58 |
59 | - [PR #45](https://github.com/monix/monix-kafka/pull/45)
60 | - This version uses Monix 2.3.x
61 |
62 | ## Version 1.0.0-RC1 (Mar 23, 2018)
63 |
64 | - Updates Monix to 3.0.0-RC1
65 | - Updates Kafka to 1.0.1
66 | - Adds tests using Embedded Kafka
67 |
68 | ## Version 1.0.0-M3 (Feb 8, 2018)
69 |
70 | - Updates Monix to 3.0.0-M3
71 |
72 | ## Version 0.16 (Feb 6, 2018)
73 |
74 | - Adds [Kip62](https://cwiki.apache.org/confluence/display/KAFKA/KIP-62%3A+Allow+consumer+to+send+heartbeats+from+a+background+thread) config parameters and default values to Kafka 1.0.0 `default.conf`.
75 | - Adds configs for metrics
76 |
77 | ## Version 0.15 (Jan 31, 2018)
78 |
79 | - Updates Monix to 2.3.3
80 | - Updates Scala to 2.11.12 and 2.12.4
81 | - Added support for Kafka 0.11.0.1 and 1.0.0
82 |
83 | ## Version 0.14 (May 3, 2017)
84 |
85 | - Updates Monix to 2.3.0
86 | - Updates Kafka 10 to 0.10.2.1
87 | - Updates Scala to 2.11.11 and 2.12.2
88 |
89 | ## Version 0.13 (Mar 27, 2017)
90 |
91 | - Updates Monix to 2.2.4
92 | - Activate Scala 2.10 and 2.12 cross-builds
93 |
94 | ## Version 0.12 (Mar 10, 2017)
95 |
96 | - Updates Monix to 2.2.3
97 |
98 | ## Version 0.11 (Feb 22, 2017)
99 |
100 | - Updates Monix to 2.2.2
101 |
102 | ## Version 0.9 (Jan 25, 2016)
103 |
104 | - Updates Monix to 2.2.0
105 | - [Issue #5](https://github.com/monix/monix-kafka/pull/5):
106 | added new optional param `classInstance` to `Serializer` and `Deserializer`
107 |
108 | ## Version 0.8 (Dec 19, 2016)
109 |
110 | - Updates Monix to 2.1.2
111 | - Updates the Kafka 10 client to 0.10.1.0
112 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/main/scala/monix/kafka/config/SSLProtocol.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import java.security.NoSuchAlgorithmException
20 | import javax.net.ssl.SSLContext
21 |
22 | import com.typesafe.config.ConfigException.BadValue
23 |
24 | /** Represents the available protocols to use for
25 | * SSL connections.
26 | *
27 | * Available values:
28 | *
29 | * - [[SSLProtocol.TLSv12]]
30 | * - [[SSLProtocol.TLSv11]]
31 | * - [[SSLProtocol.TLSv1]]
32 | * - [[SSLProtocol.TLS]]
33 | * - [[SSLProtocol.SSLv3]] (prefer only for older JVMs)
34 | * - [[SSLProtocol.SSLv2]] (prefer only for older JVMs, no longer available for Java 8)
35 | * - [[SSLProtocol.SSL]] (prefer only for older JVMs)
36 | */
37 | sealed trait SSLProtocol extends Serializable {
38 | def id: String
39 |
40 | def getInstance(): Option[SSLContext] =
41 | try Some(SSLContext.getInstance(id))
42 | catch {
43 | case _: NoSuchAlgorithmException =>
44 | None
45 | }
46 | }
47 |
48 | object SSLProtocol {
49 |
50 | @throws(classOf[BadValue])
51 | def apply(id: String): SSLProtocol = {
52 | val algorithm = id match {
53 | case TLSv12.id => TLSv12
54 | case TLSv11.id => TLSv11
55 | case TLSv1.id => TLSv1
56 | case TLS.id => TLS
57 | case SSLv3.id => SSLv3
58 | case SSLv2.id => SSLv2
59 | case SSL.id => SSL
60 | case _ =>
61 | throw new BadValue("kafka.ssl.enabled.protocols", s"Invalid value: $id")
62 | }
63 |
64 | algorithm.getInstance() match {
65 | case Some(_) => algorithm
66 | case None =>
67 | throw new BadValue("kafka.ssl.enabled.protocols", s"Unsupported SSL protocol: $id")
68 | }
69 | }
70 |
71 | case object TLSv12 extends SSLProtocol {
72 | val id = "TLSv1.2"
73 | }
74 |
75 | case object TLSv11 extends SSLProtocol {
76 | val id = "TLSv1.1"
77 | }
78 |
79 | case object TLSv1 extends SSLProtocol {
80 | val id = "TLSv1"
81 | }
82 |
83 | case object TLS extends SSLProtocol {
84 | val id = "TLS"
85 | }
86 |
87 | /** WARNING: deprecated, might not work on recent versions
88 | * of the JVM. Prefer TLS.
89 | */
90 | case object SSLv3 extends SSLProtocol {
91 | val id = "SSLv3"
92 | }
93 |
94 | /** WARNING: deprecated, might not work on recent versions
95 | * of the JVM. Prefer TLS.
96 | */
97 | case object SSLv2 extends SSLProtocol {
98 | val id = "SSLv2"
99 | }
100 |
101 | /** WARNING: deprecated, might not work on recent versions
102 | * of the JVM. Prefer TLS.
103 | */
104 | case object SSL extends SSLProtocol {
105 | val id = "SSL"
106 | }
107 | }
108 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/main/scala/monix/kafka/config/SSLProtocol.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import java.security.NoSuchAlgorithmException
20 | import javax.net.ssl.SSLContext
21 |
22 | import com.typesafe.config.ConfigException.BadValue
23 |
24 | /** Represents the available protocols to use for
25 | * SSL connections.
26 | *
27 | * Available values:
28 | *
29 | * - [[SSLProtocol.TLSv12]]
30 | * - [[SSLProtocol.TLSv11]]
31 | * - [[SSLProtocol.TLSv1]]
32 | * - [[SSLProtocol.TLS]]
33 | * - [[SSLProtocol.SSLv3]] (prefer only for older JVMs)
34 | * - [[SSLProtocol.SSLv2]] (prefer only for older JVMs, no longer available for Java 8)
35 | * - [[SSLProtocol.SSL]] (prefer only for older JVMs)
36 | */
37 | sealed trait SSLProtocol extends Serializable {
38 | def id: String
39 |
40 | def getInstance(): Option[SSLContext] =
41 | try Some(SSLContext.getInstance(id))
42 | catch {
43 | case _: NoSuchAlgorithmException =>
44 | None
45 | }
46 | }
47 |
48 | object SSLProtocol {
49 |
50 | @throws(classOf[BadValue])
51 | def apply(id: String): SSLProtocol = {
52 | val algorithm = id match {
53 | case TLSv12.id => TLSv12
54 | case TLSv11.id => TLSv11
55 | case TLSv1.id => TLSv1
56 | case TLS.id => TLS
57 | case SSLv3.id => SSLv3
58 | case SSLv2.id => SSLv2
59 | case SSL.id => SSL
60 | case _ =>
61 | throw new BadValue("kafka.ssl.enabled.protocols", s"Invalid value: $id")
62 | }
63 |
64 | algorithm.getInstance() match {
65 | case Some(_) => algorithm
66 | case None =>
67 | throw new BadValue("kafka.ssl.enabled.protocols", s"Unsupported SSL protocol: $id")
68 | }
69 | }
70 |
71 | case object TLSv12 extends SSLProtocol {
72 | val id = "TLSv1.2"
73 | }
74 |
75 | case object TLSv11 extends SSLProtocol {
76 | val id = "TLSv1.1"
77 | }
78 |
79 | case object TLSv1 extends SSLProtocol {
80 | val id = "TLSv1"
81 | }
82 |
83 | case object TLS extends SSLProtocol {
84 | val id = "TLS"
85 | }
86 |
87 | /** WARNING: deprecated, might not work on recent versions
88 | * of the JVM. Prefer TLS.
89 | */
90 | case object SSLv3 extends SSLProtocol {
91 | val id = "SSLv3"
92 | }
93 |
94 | /** WARNING: deprecated, might not work on recent versions
95 | * of the JVM. Prefer TLS.
96 | */
97 | case object SSLv2 extends SSLProtocol {
98 | val id = "SSLv2"
99 | }
100 |
101 | /** WARNING: deprecated, might not work on recent versions
102 | * of the JVM. Prefer TLS.
103 | */
104 | case object SSL extends SSLProtocol {
105 | val id = "SSL"
106 | }
107 | }
108 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/main/scala/monix/kafka/config/SSLProtocol.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka.config
18 |
19 | import java.security.NoSuchAlgorithmException
20 | import javax.net.ssl.SSLContext
21 |
22 | import com.typesafe.config.ConfigException.BadValue
23 |
24 | /** Represents the available protocols to use for
25 | * SSL connections.
26 | *
27 | * Available values:
28 | *
29 | * - [[SSLProtocol.TLSv12]]
30 | * - [[SSLProtocol.TLSv11]]
31 | * - [[SSLProtocol.TLSv1]]
32 | * - [[SSLProtocol.TLS]]
33 | * - [[SSLProtocol.SSLv3]] (prefer only for older JVMs)
34 | * - [[SSLProtocol.SSLv2]] (prefer only for older JVMs, no longer available for Java 8)
35 | * - [[SSLProtocol.SSL]] (prefer only for older JVMs)
36 | */
37 | sealed trait SSLProtocol extends Serializable {
38 | def id: String
39 |
40 | def getInstance(): Option[SSLContext] =
41 | try Some(SSLContext.getInstance(id))
42 | catch {
43 | case _: NoSuchAlgorithmException =>
44 | None
45 | }
46 | }
47 |
48 | object SSLProtocol {
49 |
50 | @throws(classOf[BadValue])
51 | def apply(id: String): SSLProtocol = {
52 | val algorithm = id match {
53 | case TLSv12.id => TLSv12
54 | case TLSv11.id => TLSv11
55 | case TLSv1.id => TLSv1
56 | case TLS.id => TLS
57 | case SSLv3.id => SSLv3
58 | case SSLv2.id => SSLv2
59 | case SSL.id => SSL
60 | case _ =>
61 | throw new BadValue("kafka.ssl.enabled.protocols", s"Invalid value: $id")
62 | }
63 |
64 | algorithm.getInstance() match {
65 | case Some(_) => algorithm
66 | case None =>
67 | throw new BadValue("kafka.ssl.enabled.protocols", s"Unsupported SSL protocol: $id")
68 | }
69 | }
70 |
71 | case object TLSv12 extends SSLProtocol {
72 | val id = "TLSv1.2"
73 | }
74 |
75 | case object TLSv11 extends SSLProtocol {
76 | val id = "TLSv1.1"
77 | }
78 |
79 | case object TLSv1 extends SSLProtocol {
80 | val id = "TLSv1"
81 | }
82 |
83 | case object TLS extends SSLProtocol {
84 | val id = "TLS"
85 | }
86 |
87 | /** WARNING: deprecated, might not work on recent versions
88 | * of the JVM. Prefer TLS.
89 | */
90 | case object SSLv3 extends SSLProtocol {
91 | val id = "SSLv3"
92 | }
93 |
94 | /** WARNING: deprecated, might not work on recent versions
95 | * of the JVM. Prefer TLS.
96 | */
97 | case object SSLv2 extends SSLProtocol {
98 | val id = "SSLv2"
99 | }
100 |
101 | /** WARNING: deprecated, might not work on recent versions
102 | * of the JVM. Prefer TLS.
103 | */
104 | case object SSL extends SSLProtocol {
105 | val id = "SSL"
106 | }
107 | }
108 |
--------------------------------------------------------------------------------
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | name: build
2 |
3 | on: [push, pull_request]
4 |
5 | jobs:
6 |
7 | compile:
8 | name: scala-${{ matrix.scala }} jdk-${{ matrix.java }} tests
9 | runs-on: ubuntu-latest
10 |
11 | strategy:
12 | fail-fast: true
13 | matrix:
14 | java: [ 8 ]
15 | scala: [ 2.12.15, 2.13.8 ]
16 |
17 | steps:
18 | - uses: actions/checkout@v2
19 | - uses: olafurpg/setup-scala@v13
20 | with:
21 | java-version: "adopt@1.${{ matrix.java }}"
22 |
23 | - name: Cache SBT Coursier directory
24 | uses: actions/cache@v2
25 | with:
26 | path: ~/.cache/coursier/v1
27 | key: ${{ runner.os }}-coursier-${{ hashFiles('**/*.sbt') }}
28 | restore-keys: |
29 | ${{ runner.os }}-coursier-
30 | - name: Cache SBT directory
31 | uses: actions/cache@v2
32 | with:
33 | path: ~/.sbt
34 | key: |
35 | ${{ runner.os }}-sbt-${{ hashFiles('project/build.properties') }}-${{ hashFiles('project/plugins.sbt') }}
36 | restore-keys: ${{ runner.os }}-sbt-
37 |
38 | - name: Run Compile for Kafka 0.10.x, Java ${{ matrix.java }} and Scala ${{ matrix.scala }}
39 | run: sbt -J-Xmx6144m ";++${{ matrix.scala }} ;clean ;kafka10/compile"
40 |
41 | - name: Run Compile for Kafka 0.11.x, Java ${{ matrix.java }} and Scala ${{ matrix.scala }}
42 | run: sbt -J-Xmx6144m ";++${{ matrix.scala }} ;clean ;kafka11/compile"
43 |
44 | - name: Run Compile for Kafka 1.x.x, Java ${{ matrix.java }} and Scala ${{ matrix.scala }}
45 | run: sbt -J-Xmx6144m ";++${{ matrix.scala }} ;clean ;kafka1x/compile"
46 |
47 | tests:
48 | name: scala-${{ matrix.scala }} jdk-${{ matrix.java }} tests
49 | runs-on: ubuntu-latest
50 |
51 | strategy:
52 | fail-fast: true
53 | matrix:
54 | java: [8]
55 | scala: [2.12.15]
56 |
57 | steps:
58 | - uses: actions/checkout@v2
59 | - uses: olafurpg/setup-scala@v13
60 | with:
61 | java-version: "adopt@1.${{ matrix.java }}"
62 |
63 | - name: Cache SBT Coursier directory
64 | uses: actions/cache@v2
65 | with:
66 | path: ~/.cache/coursier/v1
67 | key: ${{ runner.os }}-coursier-${{ hashFiles('**/*.sbt') }}
68 | restore-keys: |
69 | ${{ runner.os }}-coursier-
70 | - name: Cache SBT directory
71 | uses: actions/cache@v2
72 | with:
73 | path: ~/.sbt
74 | key: |
75 | ${{ runner.os }}-sbt-${{ hashFiles('project/build.properties') }}-${{ hashFiles('project/plugins.sbt') }}
76 | restore-keys: ${{ runner.os }}-sbt-
77 |
78 | - name: Run Tests for Kafka 0.10.x, Java ${{ matrix.java }} and Scala ${{ matrix.scala }}
79 | run: sbt -J-Xmx6144m ";++${{ matrix.scala }} ;clean ;kafka10/test"
80 |
81 | - name: Run Tests for Kafka 0.11.x, Java ${{ matrix.java }} and Scala ${{ matrix.scala }}
82 | run: sbt -J-Xmx6144m ";++${{ matrix.scala }} ;clean ;kafka11/test"
83 |
84 | - name: Run Tests for Kafka 1.x.x, Java ${{ matrix.java }} and Scala ${{ matrix.scala }}
85 | run: sbt -J-Xmx6144m ";++${{ matrix.scala }} ;clean ;kafka1x/test"
86 |
87 | publish:
88 | needs: [ compile, tests ]
89 | if: github.event_name == 'push' && github.ref == 'refs/heads/master'
90 | runs-on: ubuntu-latest
91 | steps:
92 | - uses: actions/checkout@v2.3.4
93 | with:
94 | fetch-depth: 0
95 | - uses: olafurpg/setup-scala@v13
96 | - name: Publish release ${{ github.ref }}
97 | run: sbt ci-release
98 | env:
99 | PGP_PASSPHRASE: ${{ secrets.PGP_PASSPHRASE }}
100 | PGP_SECRET: ${{ secrets.PGP_SECRET }}
101 | SONATYPE_PASSWORD: ${{ secrets.SONATYPE_PASSWORD }}
102 | SONATYPE_USERNAME: ${{ secrets.SONATYPE_USERNAME }}
103 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/main/scala/monix/kafka/Serializer.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import java.nio.ByteBuffer
20 | import org.apache.kafka.common.serialization._
21 | import org.apache.kafka.common.serialization.{Serializer => KafkaSerializer}
22 | import org.apache.kafka.common.utils.Bytes
23 |
24 | /** Wraps a Kafka `Serializer`, provided for
25 | * convenience, since it can be implicitly fetched
26 | * from the context.
27 | *
28 | * @param className is the full package path to the Kafka `Serializer`
29 | *
30 | * @param classType is the `java.lang.Class` for [[className]]
31 | *
32 | * @param constructor creates an instance of [[classType]].
33 | * This is defaulted with a `Serializer.Constructor[A]` function that creates a
34 | * new instance using an assumed empty constructor.
35 | * Supplying this parameter allows for manual provision of the `Serializer`.
36 | */
37 | final case class Serializer[A](
38 | className: String,
39 | classType: Class[_ <: KafkaSerializer[A]],
40 | constructor: Serializer.Constructor[A] = (s: Serializer[A]) => s.classType.getDeclaredConstructor().newInstance()) {
41 |
42 | /** Creates a new instance. */
43 | def create(): KafkaSerializer[A] =
44 | constructor(this)
45 | }
46 |
47 | object Serializer {
48 |
49 | implicit def fromKafkaSerializer[A](implicit ser: KafkaSerializer[A]): Serializer[A] =
50 | Serializer[A](
51 | className = ser.getClass.getName,
52 | classType = ser.getClass,
53 | constructor = _ => ser
54 | )
55 |
56 | /** Alias for the function that provides an instance of
57 | * the Kafka `Serializer`.
58 | */
59 | type Constructor[A] = (Serializer[A]) => KafkaSerializer[A]
60 |
61 | implicit val forStrings: Serializer[String] =
62 | Serializer[String](
63 | className = "org.apache.kafka.common.serialization.StringSerializer",
64 | classType = classOf[StringSerializer]
65 | )
66 |
67 | implicit val forByteArray: Serializer[Array[Byte]] =
68 | Serializer[Array[Byte]](
69 | className = "org.apache.kafka.common.serialization.ByteArraySerializer",
70 | classType = classOf[ByteArraySerializer]
71 | )
72 |
73 | implicit val forByteBuffer: Serializer[ByteBuffer] =
74 | Serializer[ByteBuffer](
75 | className = "org.apache.kafka.common.serialization.ByteBufferSerializer",
76 | classType = classOf[ByteBufferSerializer]
77 | )
78 |
79 | implicit val forBytes: Serializer[Bytes] =
80 | Serializer[Bytes](
81 | className = "org.apache.kafka.common.serialization.BytesSerializer",
82 | classType = classOf[BytesSerializer]
83 | )
84 |
85 | implicit val forJavaDouble: Serializer[java.lang.Double] =
86 | Serializer[java.lang.Double](
87 | className = "org.apache.kafka.common.serialization.DoubleSerializer",
88 | classType = classOf[DoubleSerializer]
89 | )
90 |
91 | implicit val forJavaInteger: Serializer[java.lang.Integer] =
92 | Serializer[java.lang.Integer](
93 | className = "org.apache.kafka.common.serialization.IntegerSerializer",
94 | classType = classOf[IntegerSerializer]
95 | )
96 |
97 | implicit val forJavaLong: Serializer[java.lang.Long] =
98 | Serializer[java.lang.Long](
99 | className = "org.apache.kafka.common.serialization.LongSerializer",
100 | classType = classOf[LongSerializer]
101 | )
102 | }
103 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/main/scala/monix/kafka/Serializer.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import java.nio.ByteBuffer
20 | import org.apache.kafka.common.serialization._
21 | import org.apache.kafka.common.serialization.{Serializer => KafkaSerializer}
22 | import org.apache.kafka.common.utils.Bytes
23 |
24 | /** Wraps a Kafka `Serializer`, provided for
25 | * convenience, since it can be implicitly fetched
26 | * from the context.
27 | *
28 | * @param className is the full package path to the Kafka `Serializer`
29 | *
30 | * @param classType is the `java.lang.Class` for [[className]]
31 | *
32 | * @param constructor creates an instance of [[classType]].
33 | * This is defaulted with a `Serializer.Constructor[A]` function that creates a
34 | * new instance using an assumed empty constructor.
35 | * Supplying this parameter allows for manual provision of the `Serializer`.
36 | */
37 | final case class Serializer[A](
38 | className: String,
39 | classType: Class[_ <: KafkaSerializer[A]],
40 | constructor: Serializer.Constructor[A] = (s: Serializer[A]) => s.classType.getDeclaredConstructor().newInstance()) {
41 |
42 | /** Creates a new instance. */
43 | def create(): KafkaSerializer[A] =
44 | constructor(this)
45 | }
46 |
47 | object Serializer {
48 |
49 | implicit def fromKafkaSerializer[A](implicit ser: KafkaSerializer[A]): Serializer[A] =
50 | Serializer[A](
51 | className = ser.getClass.getName,
52 | classType = ser.getClass,
53 | constructor = _ => ser
54 | )
55 |
56 | /** Alias for the function that provides an instance of
57 | * the Kafka `Serializer`.
58 | */
59 | type Constructor[A] = (Serializer[A]) => KafkaSerializer[A]
60 |
61 | implicit val forStrings: Serializer[String] =
62 | Serializer[String](
63 | className = "org.apache.kafka.common.serialization.StringSerializer",
64 | classType = classOf[StringSerializer]
65 | )
66 |
67 | implicit val forByteArray: Serializer[Array[Byte]] =
68 | Serializer[Array[Byte]](
69 | className = "org.apache.kafka.common.serialization.ByteArraySerializer",
70 | classType = classOf[ByteArraySerializer]
71 | )
72 |
73 | implicit val forByteBuffer: Serializer[ByteBuffer] =
74 | Serializer[ByteBuffer](
75 | className = "org.apache.kafka.common.serialization.ByteBufferSerializer",
76 | classType = classOf[ByteBufferSerializer]
77 | )
78 |
79 | implicit val forBytes: Serializer[Bytes] =
80 | Serializer[Bytes](
81 | className = "org.apache.kafka.common.serialization.BytesSerializer",
82 | classType = classOf[BytesSerializer]
83 | )
84 |
85 | implicit val forJavaDouble: Serializer[java.lang.Double] =
86 | Serializer[java.lang.Double](
87 | className = "org.apache.kafka.common.serialization.DoubleSerializer",
88 | classType = classOf[DoubleSerializer]
89 | )
90 |
91 | implicit val forJavaInteger: Serializer[java.lang.Integer] =
92 | Serializer[java.lang.Integer](
93 | className = "org.apache.kafka.common.serialization.IntegerSerializer",
94 | classType = classOf[IntegerSerializer]
95 | )
96 |
97 | implicit val forJavaLong: Serializer[java.lang.Long] =
98 | Serializer[java.lang.Long](
99 | className = "org.apache.kafka.common.serialization.LongSerializer",
100 | classType = classOf[LongSerializer]
101 | )
102 | }
103 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/main/scala/monix/kafka/Serializer.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import java.nio.ByteBuffer
20 | import org.apache.kafka.common.serialization._
21 | import org.apache.kafka.common.serialization.{Serializer => KafkaSerializer}
22 | import org.apache.kafka.common.utils.Bytes
23 |
24 | /** Wraps a Kafka `Serializer`, provided for
25 | * convenience, since it can be implicitly fetched
26 | * from the context.
27 | *
28 | * @param className is the full package path to the Kafka `Serializer`
29 | *
30 | * @param classType is the `java.lang.Class` for [[className]]
31 | *
32 | * @param constructor creates an instance of [[classType]].
33 | * This is defaulted with a `Serializer.Constructor[A]` function that creates a
34 | * new instance using an assumed empty constructor.
35 | * Supplying this parameter allows for manual provision of the `Serializer`.
36 | */
37 | final case class Serializer[A](
38 | className: String,
39 | classType: Class[_ <: KafkaSerializer[A]],
40 | constructor: Serializer.Constructor[A] = (s: Serializer[A]) => s.classType.getDeclaredConstructor().newInstance()) {
41 |
42 | /** Creates a new instance. */
43 | def create(): KafkaSerializer[A] =
44 | constructor(this)
45 | }
46 |
47 | object Serializer {
48 |
49 | implicit def fromKafkaSerializer[A](implicit ser: KafkaSerializer[A]): Serializer[A] =
50 | Serializer[A](
51 | className = ser.getClass.getName,
52 | classType = ser.getClass,
53 | constructor = _ => ser
54 | )
55 |
56 | /** Alias for the function that provides an instance of
57 | * the Kafka `Serializer`.
58 | */
59 | type Constructor[A] = (Serializer[A]) => KafkaSerializer[A]
60 |
61 | implicit val forStrings: Serializer[String] =
62 | Serializer[String](
63 | className = "org.apache.kafka.common.serialization.StringSerializer",
64 | classType = classOf[StringSerializer]
65 | )
66 |
67 | implicit val forByteArray: Serializer[Array[Byte]] =
68 | Serializer[Array[Byte]](
69 | className = "org.apache.kafka.common.serialization.ByteArraySerializer",
70 | classType = classOf[ByteArraySerializer]
71 | )
72 |
73 | implicit val forByteBuffer: Serializer[ByteBuffer] =
74 | Serializer[ByteBuffer](
75 | className = "org.apache.kafka.common.serialization.ByteBufferSerializer",
76 | classType = classOf[ByteBufferSerializer]
77 | )
78 |
79 | implicit val forBytes: Serializer[Bytes] =
80 | Serializer[Bytes](
81 | className = "org.apache.kafka.common.serialization.BytesSerializer",
82 | classType = classOf[BytesSerializer]
83 | )
84 |
85 | implicit val forJavaDouble: Serializer[java.lang.Double] =
86 | Serializer[java.lang.Double](
87 | className = "org.apache.kafka.common.serialization.DoubleSerializer",
88 | classType = classOf[DoubleSerializer]
89 | )
90 |
91 | implicit val forJavaInteger: Serializer[java.lang.Integer] =
92 | Serializer[java.lang.Integer](
93 | className = "org.apache.kafka.common.serialization.IntegerSerializer",
94 | classType = classOf[IntegerSerializer]
95 | )
96 |
97 | implicit val forJavaLong: Serializer[java.lang.Long] =
98 | Serializer[java.lang.Long](
99 | className = "org.apache.kafka.common.serialization.LongSerializer",
100 | classType = classOf[LongSerializer]
101 | )
102 | }
103 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/main/scala/monix/kafka/CommittableOffsetBatch.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import monix.eval.Task
20 | import org.apache.kafka.common.TopicPartition
21 |
22 | /** Batch of Kafka offsets which can be committed together.
23 | * Can be built from offsets sequence by [[CommittableOffsetBatch#apply]] method.
24 | * You can also use [[CommittableOffsetBatch#empty]] method to create empty batch and
25 | * add offsets to it using [[updated]] method.
26 | *
27 | * WARNING: Order of the offsets is important. Only the last added offset
28 | * for topic and partition will be committed to Kafka.
29 | *
30 | * @param offsets is the offsets batch for a provided topics and partitions.
31 | * Make sure that each of them was received from one [[KafkaConsumerObservable]].
32 | *
33 | * @param commitCallback is the set of callbacks for batched commit realized as closure
34 | * in [[KafkaConsumerObservable]] context. This parameter is obtained from the last [[CommittableOffset]]
35 | * added to batch.
36 | */
37 | final class CommittableOffsetBatch private[kafka] (val offsets: Map[TopicPartition, Long], commitCallback: Commit) {
38 |
39 | /** Synchronously commits [[offsets]] to Kafka
40 | */
41 | def commitSync(): Task[Unit] = commitCallback.commitBatchSync(offsets)
42 |
43 | /** Asynchronously commits [[offsets]] to Kafka
44 | */
45 | def commitAsync(): Task[Unit] = commitCallback.commitBatchAsync(offsets)
46 |
47 | /** Adds new [[CommittableOffset]] to batch. Added offset replaces previous one specified
48 | * for same topic and partition.
49 | */
50 | def updated(committableOffset: CommittableOffset): CommittableOffsetBatch =
51 | new CommittableOffsetBatch(
52 | offsets.updated(committableOffset.topicPartition, committableOffset.offset),
53 | committableOffset.commitCallback
54 | )
55 | }
56 |
57 | object CommittableOffsetBatch {
58 |
59 | /** Creates empty [[CommittableOffsetBatch]]. Can be used as neutral element in fold:
60 | * {{{
61 | * offsets.foldLeft(CommittableOffsetBatch.empty)(_ updated _)
62 | * }}}
63 | */
64 | val empty: CommittableOffsetBatch = new CommittableOffsetBatch(Map.empty, Commit.empty)
65 |
66 | /** Builds [[CommittableOffsetBatch]] from offsets sequence. Be careful with
67 | * sequence order. If there is more than once offset for a topic and partition in the
68 | * sequence then the last one will remain.
69 | */
70 | def apply(offsets: Seq[CommittableOffset]): CommittableOffsetBatch =
71 | if (offsets.nonEmpty) {
72 | val aggregatedOffsets = offsets.foldLeft(Map.empty[TopicPartition, Long]) { (acc, o) =>
73 | acc.updated(o.topicPartition, o.offset)
74 | }
75 | new CommittableOffsetBatch(aggregatedOffsets, offsets.head.commitCallback)
76 | } else {
77 | empty
78 | }
79 |
80 | /** Builds [[CommittableOffsetBatch]] list from offsets sequence by merging the offsets
81 | * that have the same commit callback. This will help when the committable offsets are
82 | * from different consumers.
83 | * {{{
84 | * CommittableOffsetBatch.mergeByCommitCallback(offsets)
85 | * }}}
86 | */
87 | def mergeByCommitCallback(committableOffsets: Seq[CommittableOffset]): List[CommittableOffsetBatch] = {
88 | if (committableOffsets.nonEmpty) {
89 | committableOffsets
90 | .groupBy(_.commitCallback)
91 | .values
92 | .map(CommittableOffsetBatch(_))
93 | .toList
94 | } else {
95 | List.empty
96 | }
97 | }
98 | }
99 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/main/scala/monix/kafka/CommittableOffsetBatch.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import monix.eval.Task
20 | import org.apache.kafka.common.TopicPartition
21 |
22 | /** Batch of Kafka offsets which can be committed together.
23 | * Can be built from offsets sequence by [[CommittableOffsetBatch#apply]] method.
24 | * You can also use [[CommittableOffsetBatch#empty]] method to create empty batch and
25 | * add offsets to it using [[updated]] method.
26 | *
27 | * WARNING: Order of the offsets is important. Only the last added offset
28 | * for topic and partition will be committed to Kafka.
29 | *
30 | * @param offsets is the offsets batch for a provided topics and partitions.
31 | * Make sure that each of them was received from one [[KafkaConsumerObservable]].
32 | *
33 | * @param commitCallback is the set of callbacks for batched commit realized as closure
34 | * in [[KafkaConsumerObservable]] context. This parameter is obtained from the last [[CommittableOffset]]
35 | * added to batch.
36 | */
37 | final class CommittableOffsetBatch private[kafka] (val offsets: Map[TopicPartition, Long], commitCallback: Commit) {
38 |
39 | /** Synchronously commits [[offsets]] to Kafka
40 | */
41 | def commitSync(): Task[Unit] = commitCallback.commitBatchSync(offsets)
42 |
43 | /** Asynchronously commits [[offsets]] to Kafka
44 | */
45 | def commitAsync(): Task[Unit] = commitCallback.commitBatchAsync(offsets)
46 |
47 | /** Adds new [[CommittableOffset]] to batch. Added offset replaces previous one specified
48 | * for same topic and partition.
49 | */
50 | def updated(committableOffset: CommittableOffset): CommittableOffsetBatch =
51 | new CommittableOffsetBatch(
52 | offsets.updated(committableOffset.topicPartition, committableOffset.offset),
53 | committableOffset.commitCallback
54 | )
55 | }
56 |
57 | object CommittableOffsetBatch {
58 |
59 | /** Creates empty [[CommittableOffsetBatch]]. Can be used as neutral element in fold:
60 | * {{{
61 | * offsets.foldLeft(CommittableOffsetBatch.empty)(_ updated _)
62 | * }}}
63 | */
64 | val empty: CommittableOffsetBatch = new CommittableOffsetBatch(Map.empty, Commit.empty)
65 |
66 | /** Builds [[CommittableOffsetBatch]] from offsets sequence. Be careful with
67 | * sequence order. If there is more than once offset for a topic and partition in the
68 | * sequence then the last one will remain.
69 | */
70 | def apply(offsets: Seq[CommittableOffset]): CommittableOffsetBatch =
71 | if (offsets.nonEmpty) {
72 | val aggregatedOffsets = offsets.foldLeft(Map.empty[TopicPartition, Long]) { (acc, o) =>
73 | acc.updated(o.topicPartition, o.offset)
74 | }
75 | new CommittableOffsetBatch(aggregatedOffsets, offsets.head.commitCallback)
76 | } else {
77 | empty
78 | }
79 |
80 | /** Builds [[CommittableOffsetBatch]] list from offsets sequence by merging the offsets
81 | * that have the same commit callback. This will help when the committable offsets are
82 | * from different consumers.
83 | * {{{
84 | * CommittableOffsetBatch.mergeByCommitCallback(offsets)
85 | * }}}
86 | */
87 | def mergeByCommitCallback(committableOffsets: Seq[CommittableOffset]): List[CommittableOffsetBatch] = {
88 | if (committableOffsets.nonEmpty) {
89 | committableOffsets
90 | .groupBy(_.commitCallback)
91 | .values
92 | .map(CommittableOffsetBatch(_))
93 | .toList
94 | } else {
95 | List.empty
96 | }
97 | }
98 | }
99 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/main/scala/monix/kafka/Deserializer.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import java.nio.ByteBuffer
20 | import org.apache.kafka.common.serialization._
21 | import org.apache.kafka.common.serialization.{Deserializer => KafkaDeserializer}
22 | import org.apache.kafka.common.utils.Bytes
23 |
24 | /** Wraps a Kafka `Deserializer`, provided for
25 | * convenience, since it can be implicitly fetched
26 | * from the context.
27 | *
28 | * @param className is the full package path to the Kafka `Deserializer`
29 | *
30 | * @param classType is the `java.lang.Class` for [[className]]
31 | *
32 | * @param constructor creates an instance of [[classType]].
33 | * This is defaulted with a `Deserializer.Constructor[A]` function that creates a
34 | * new instance using an assumed empty constructor.
35 | * Supplying this parameter allows for manual provision of the `Deserializer`.
36 | */
37 | final case class Deserializer[A](
38 | className: String,
39 | classType: Class[_ <: KafkaDeserializer[A]],
40 | constructor: Deserializer.Constructor[A] = (d: Deserializer[A]) => d.classType.getDeclaredConstructor().newInstance()) {
41 |
42 | /** Creates a new instance. */
43 | def create(): KafkaDeserializer[A] =
44 | constructor(this)
45 | }
46 |
47 | object Deserializer {
48 |
49 | implicit def fromKafkaDeserializer[A](implicit des: KafkaDeserializer[A]): Deserializer[A] =
50 | Deserializer[A](
51 | className = des.getClass.getName,
52 | classType = des.getClass,
53 | constructor = _ => des
54 | )
55 |
56 | /** Alias for the function that provides an instance of
57 | * the Kafka `Deserializer`.
58 | */
59 | type Constructor[A] = (Deserializer[A]) => KafkaDeserializer[A]
60 |
61 | implicit val forStrings: Deserializer[String] =
62 | Deserializer[String](
63 | className = "org.apache.kafka.common.serialization.StringDeserializer",
64 | classType = classOf[StringDeserializer]
65 | )
66 |
67 | implicit val forByteArray: Deserializer[Array[Byte]] =
68 | Deserializer[Array[Byte]](
69 | className = "org.apache.kafka.common.serialization.ByteArrayDeserializer",
70 | classType = classOf[ByteArrayDeserializer]
71 | )
72 |
73 | implicit val forByteBuffer: Deserializer[ByteBuffer] =
74 | Deserializer[ByteBuffer](
75 | className = "org.apache.kafka.common.serialization.ByteBufferDeserializer",
76 | classType = classOf[ByteBufferDeserializer]
77 | )
78 |
79 | implicit val forBytes: Deserializer[Bytes] =
80 | Deserializer[Bytes](
81 | className = "org.apache.kafka.common.serialization.BytesDeserializer",
82 | classType = classOf[BytesDeserializer]
83 | )
84 |
85 | implicit val forJavaDouble: Deserializer[java.lang.Double] =
86 | Deserializer[java.lang.Double](
87 | className = "org.apache.kafka.common.serialization.DoubleDeserializer",
88 | classType = classOf[DoubleDeserializer]
89 | )
90 |
91 | implicit val forJavaInteger: Deserializer[java.lang.Integer] =
92 | Deserializer[java.lang.Integer](
93 | className = "org.apache.kafka.common.serialization.IntegerDeserializer",
94 | classType = classOf[IntegerDeserializer]
95 | )
96 |
97 | implicit val forJavaLong: Deserializer[java.lang.Long] =
98 | Deserializer[java.lang.Long](
99 | className = "org.apache.kafka.common.serialization.LongDeserializer",
100 | classType = classOf[LongDeserializer]
101 | )
102 | }
103 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/main/scala/monix/kafka/Deserializer.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import java.nio.ByteBuffer
20 | import org.apache.kafka.common.serialization._
21 | import org.apache.kafka.common.serialization.{Deserializer => KafkaDeserializer}
22 | import org.apache.kafka.common.utils.Bytes
23 |
24 | /** Wraps a Kafka `Deserializer`, provided for
25 | * convenience, since it can be implicitly fetched
26 | * from the context.
27 | *
28 | * @param className is the full package path to the Kafka `Deserializer`
29 | *
30 | * @param classType is the `java.lang.Class` for [[className]]
31 | *
32 | * @param constructor creates an instance of [[classType]].
33 | * This is defaulted with a `Deserializer.Constructor[A]` function that creates a
34 | * new instance using an assumed empty constructor.
35 | * Supplying this parameter allows for manual provision of the `Deserializer`.
36 | */
37 | final case class Deserializer[A](
38 | className: String,
39 | classType: Class[_ <: KafkaDeserializer[A]],
40 | constructor: Deserializer.Constructor[A] = (d: Deserializer[A]) => d.classType.getDeclaredConstructor().newInstance()) {
41 |
42 | /** Creates a new instance. */
43 | def create(): KafkaDeserializer[A] =
44 | constructor(this)
45 | }
46 |
47 | object Deserializer {
48 |
49 | implicit def fromKafkaDeserializer[A](implicit des: KafkaDeserializer[A]): Deserializer[A] =
50 | Deserializer[A](
51 | className = des.getClass.getName,
52 | classType = des.getClass,
53 | constructor = _ => des
54 | )
55 |
56 | /** Alias for the function that provides an instance of
57 | * the Kafka `Deserializer`.
58 | */
59 | type Constructor[A] = (Deserializer[A]) => KafkaDeserializer[A]
60 |
61 | implicit val forStrings: Deserializer[String] =
62 | Deserializer[String](
63 | className = "org.apache.kafka.common.serialization.StringDeserializer",
64 | classType = classOf[StringDeserializer]
65 | )
66 |
67 | implicit val forByteArray: Deserializer[Array[Byte]] =
68 | Deserializer[Array[Byte]](
69 | className = "org.apache.kafka.common.serialization.ByteArrayDeserializer",
70 | classType = classOf[ByteArrayDeserializer]
71 | )
72 |
73 | implicit val forByteBuffer: Deserializer[ByteBuffer] =
74 | Deserializer[ByteBuffer](
75 | className = "org.apache.kafka.common.serialization.ByteBufferDeserializer",
76 | classType = classOf[ByteBufferDeserializer]
77 | )
78 |
79 | implicit val forBytes: Deserializer[Bytes] =
80 | Deserializer[Bytes](
81 | className = "org.apache.kafka.common.serialization.BytesDeserializer",
82 | classType = classOf[BytesDeserializer]
83 | )
84 |
85 | implicit val forJavaDouble: Deserializer[java.lang.Double] =
86 | Deserializer[java.lang.Double](
87 | className = "org.apache.kafka.common.serialization.DoubleDeserializer",
88 | classType = classOf[DoubleDeserializer]
89 | )
90 |
91 | implicit val forJavaInteger: Deserializer[java.lang.Integer] =
92 | Deserializer[java.lang.Integer](
93 | className = "org.apache.kafka.common.serialization.IntegerDeserializer",
94 | classType = classOf[IntegerDeserializer]
95 | )
96 |
97 | implicit val forJavaLong: Deserializer[java.lang.Long] =
98 | Deserializer[java.lang.Long](
99 | className = "org.apache.kafka.common.serialization.LongDeserializer",
100 | classType = classOf[LongDeserializer]
101 | )
102 | }
103 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/main/scala/monix/kafka/Deserializer.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import java.nio.ByteBuffer
20 | import org.apache.kafka.common.serialization._
21 | import org.apache.kafka.common.serialization.{Deserializer => KafkaDeserializer}
22 | import org.apache.kafka.common.utils.Bytes
23 |
24 | /** Wraps a Kafka `Deserializer`, provided for
25 | * convenience, since it can be implicitly fetched
26 | * from the context.
27 | *
28 | * @param className is the full package path to the Kafka `Deserializer`
29 | *
30 | * @param classType is the `java.lang.Class` for [[className]]
31 | *
32 | * @param constructor creates an instance of [[classType]].
33 | * This is defaulted with a `Deserializer.Constructor[A]` function that creates a
34 | * new instance using an assumed empty constructor.
35 | * Supplying this parameter allows for manual provision of the `Deserializer`.
36 | */
37 | final case class Deserializer[A](
38 | className: String,
39 | classType: Class[_ <: KafkaDeserializer[A]],
40 | constructor: Deserializer.Constructor[A] = (d: Deserializer[A]) => d.classType.getDeclaredConstructor().newInstance()) {
41 |
42 | /** Creates a new instance. */
43 | def create(): KafkaDeserializer[A] =
44 | constructor(this)
45 | }
46 |
47 | object Deserializer {
48 |
49 | implicit def fromKafkaDeserializer[A](implicit des: KafkaDeserializer[A]): Deserializer[A] =
50 | Deserializer[A](
51 | className = des.getClass.getName,
52 | classType = des.getClass,
53 | constructor = _ => des
54 | )
55 |
56 | /** Alias for the function that provides an instance of
57 | * the Kafka `Deserializer`.
58 | */
59 | type Constructor[A] = (Deserializer[A]) => KafkaDeserializer[A]
60 |
61 | implicit val forStrings: Deserializer[String] =
62 | Deserializer[String](
63 | className = "org.apache.kafka.common.serialization.StringDeserializer",
64 | classType = classOf[StringDeserializer]
65 | )
66 |
67 | implicit val forByteArray: Deserializer[Array[Byte]] =
68 | Deserializer[Array[Byte]](
69 | className = "org.apache.kafka.common.serialization.ByteArrayDeserializer",
70 | classType = classOf[ByteArrayDeserializer]
71 | )
72 |
73 | implicit val forByteBuffer: Deserializer[ByteBuffer] =
74 | Deserializer[ByteBuffer](
75 | className = "org.apache.kafka.common.serialization.ByteBufferDeserializer",
76 | classType = classOf[ByteBufferDeserializer]
77 | )
78 |
79 | implicit val forBytes: Deserializer[Bytes] =
80 | Deserializer[Bytes](
81 | className = "org.apache.kafka.common.serialization.BytesDeserializer",
82 | classType = classOf[BytesDeserializer]
83 | )
84 |
85 | implicit val forJavaDouble: Deserializer[java.lang.Double] =
86 | Deserializer[java.lang.Double](
87 | className = "org.apache.kafka.common.serialization.DoubleDeserializer",
88 | classType = classOf[DoubleDeserializer]
89 | )
90 |
91 | implicit val forJavaInteger: Deserializer[java.lang.Integer] =
92 | Deserializer[java.lang.Integer](
93 | className = "org.apache.kafka.common.serialization.IntegerDeserializer",
94 | classType = classOf[IntegerDeserializer]
95 | )
96 |
97 | implicit val forJavaLong: Deserializer[java.lang.Long] =
98 | Deserializer[java.lang.Long](
99 | className = "org.apache.kafka.common.serialization.LongDeserializer",
100 | classType = classOf[LongDeserializer]
101 | )
102 | }
103 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/test/scala/monix/kafka/MonixKafkaTopicRegexTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2019 by its authors. Some rights reserved.
3 | * See the project homepage at: https://github.com/monix/monix-kafka
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License");
6 | * you may not use this file except in compliance with the License.
7 | * You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | */
17 |
18 | package monix.kafka
19 |
20 | import monix.eval.Task
21 | import monix.execution.Scheduler.Implicits.global
22 | import monix.kafka.config.AutoOffsetReset
23 | import monix.reactive.Observable
24 | import org.apache.kafka.clients.producer.ProducerRecord
25 | import org.scalatest.FunSuite
26 |
27 | import scala.collection.JavaConverters._
28 | import scala.concurrent.Await
29 | import scala.concurrent.duration._
30 |
31 | class MonixKafkaTopicRegexTest extends FunSuite with KafkaTestKit {
32 | val topicsRegex = "monix-kafka-tests-.*".r
33 | val topicMatchingRegex = "monix-kafka-tests-anything"
34 |
35 | val producerCfg = KafkaProducerConfig.default.copy(
36 | bootstrapServers = List("127.0.0.1:6001"),
37 | clientId = "monix-kafka-1-0-producer-test"
38 | )
39 |
40 | val consumerCfg = KafkaConsumerConfig.default.copy(
41 | bootstrapServers = List("127.0.0.1:6001"),
42 | groupId = "kafka-tests",
43 | clientId = "monix-kafka-1-0-consumer-test",
44 | autoOffsetReset = AutoOffsetReset.Earliest
45 | )
46 |
47 | test("publish one message when subscribed to topics regex") {
48 | withRunningKafka {
49 |
50 | val producer = KafkaProducer[String, String](producerCfg, io)
51 | val consumerTask = KafkaConsumerObservable.createConsumer[String, String](consumerCfg, topicsRegex).executeOn(io)
52 | val consumer = Await.result(consumerTask.runToFuture, 60.seconds)
53 |
54 | try {
55 | // Publishing one message
56 | val send = producer.send(topicMatchingRegex, "my-message")
57 | Await.result(send.runToFuture, 30.seconds)
58 |
59 | val records = consumer.poll(10.seconds.toMillis).asScala.map(_.value()).toList
60 | assert(records === List("my-message"))
61 | } finally {
62 | Await.result(producer.close().runToFuture, Duration.Inf)
63 | consumer.close()
64 | }
65 | }
66 | }
67 |
68 | test("listen for one message when subscribed to topics regex") {
69 | withRunningKafka {
70 | val producer = KafkaProducer[String, String](producerCfg, io)
71 | val consumer = KafkaConsumerObservable[String, String](consumerCfg, topicsRegex).executeOn(io)
72 | try {
73 | // Publishing one message
74 | val send = producer.send(topicMatchingRegex, "test-message")
75 | Await.result(send.runToFuture, 30.seconds)
76 |
77 | val first = consumer.take(1).map(_.value()).firstL
78 | val result = Await.result(first.runToFuture, 30.seconds)
79 | assert(result === "test-message")
80 | } finally {
81 | Await.result(producer.close().runToFuture, Duration.Inf)
82 | }
83 | }
84 | }
85 |
86 | test("full producer/consumer test when subscribed to topics regex") {
87 | withRunningKafka {
88 | val count = 10000
89 |
90 | val producer = KafkaProducerSink[String, String](producerCfg, io)
91 | val consumer = KafkaConsumerObservable[String, String](consumerCfg, topicsRegex).executeOn(io).take(count)
92 |
93 | val pushT = Observable
94 | .range(0, count)
95 | .map(msg => new ProducerRecord(topicMatchingRegex, "obs", msg.toString))
96 | .bufferIntrospective(1024)
97 | .consumeWith(producer)
98 |
99 | val listT = consumer
100 | .map(_.value())
101 | .toListL
102 |
103 | val (result, _) = Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds)
104 | assert(result.map(_.toInt).sum === (0 until count).sum)
105 | }
106 | }
107 | }
108 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/test/scala/monix/kafka/MonixKafkaTopicRegexTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2019 by its authors. Some rights reserved.
3 | * See the project homepage at: https://github.com/monix/monix-kafka
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License");
6 | * you may not use this file except in compliance with the License.
7 | * You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | */
17 |
18 | package monix.kafka
19 |
20 | import monix.eval.Task
21 | import monix.execution.Scheduler.Implicits.global
22 | import monix.kafka.config.AutoOffsetReset
23 | import monix.reactive.Observable
24 | import org.apache.kafka.clients.producer.ProducerRecord
25 | import org.scalatest.FunSuite
26 |
27 | import scala.collection.JavaConverters._
28 | import scala.concurrent.Await
29 | import scala.concurrent.duration._
30 |
31 | class MonixKafkaTopicRegexTest extends FunSuite with KafkaTestKit {
32 | val topicsRegex = "monix-kafka-tests-.*".r
33 | val topicMatchingRegex = "monix-kafka-tests-anything"
34 |
35 | val producerCfg = KafkaProducerConfig.default.copy(
36 | bootstrapServers = List("127.0.0.1:6001"),
37 | clientId = "monix-kafka-1-0-producer-test"
38 | )
39 |
40 | val consumerCfg = KafkaConsumerConfig.default.copy(
41 | bootstrapServers = List("127.0.0.1:6001"),
42 | groupId = "kafka-tests",
43 | clientId = "monix-kafka-1-0-consumer-test",
44 | autoOffsetReset = AutoOffsetReset.Earliest
45 | )
46 |
47 | test("publish one message when subscribed to topics regex") {
48 | withRunningKafka {
49 |
50 | val producer = KafkaProducer[String, String](producerCfg, io)
51 | val consumerTask = KafkaConsumerObservable.createConsumer[String, String](consumerCfg, topicsRegex).executeOn(io)
52 | val consumer = Await.result(consumerTask.runToFuture, 60.seconds)
53 |
54 | try {
55 | // Publishing one message
56 | val send = producer.send(topicMatchingRegex, "my-message")
57 | Await.result(send.runToFuture, 30.seconds)
58 |
59 | val records = consumer.poll(10.seconds.toMillis).asScala.map(_.value()).toList
60 | assert(records === List("my-message"))
61 | } finally {
62 | Await.result(producer.close().runToFuture, Duration.Inf)
63 | consumer.close()
64 | }
65 | }
66 | }
67 |
68 | test("listen for one message when subscribed to topics regex") {
69 |
70 | withRunningKafka {
71 | val producer = KafkaProducer[String, String](producerCfg, io)
72 | val consumer = KafkaConsumerObservable[String, String](consumerCfg, topicsRegex).executeOn(io)
73 | try {
74 | // Publishing one message
75 | val send = producer.send(topicMatchingRegex, "test-message")
76 | Await.result(send.runToFuture, 30.seconds)
77 |
78 | val first = consumer.take(1).map(_.value()).firstL
79 | val result = Await.result(first.runToFuture, 30.seconds)
80 | assert(result === "test-message")
81 | } finally {
82 | Await.result(producer.close().runToFuture, Duration.Inf)
83 | }
84 | }
85 | }
86 |
87 | test("full producer/consumer test when subscribed to topics regex") {
88 | withRunningKafka {
89 | val count = 10000
90 |
91 | val producer = KafkaProducerSink[String, String](producerCfg, io)
92 | val consumer = KafkaConsumerObservable[String, String](consumerCfg, topicsRegex).executeOn(io).take(count)
93 |
94 | val pushT = Observable
95 | .range(0, count)
96 | .map(msg => new ProducerRecord(topicMatchingRegex, "obs", msg.toString))
97 | .bufferIntrospective(1024)
98 | .consumeWith(producer)
99 |
100 | val listT = consumer
101 | .map(_.value())
102 | .toListL
103 |
104 | val (result, _) = Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds)
105 | assert(result.map(_.toInt).sum === (0 until count).sum)
106 | }
107 | }
108 | }
109 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/test/scala/monix/kafka/MonixKafkaTopicRegexTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2019 by its authors. Some rights reserved.
3 | * See the project homepage at: https://github.com/monix/monix-kafka
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License");
6 | * you may not use this file except in compliance with the License.
7 | * You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | */
17 |
18 | package monix.kafka
19 |
20 | import monix.eval.Task
21 | import monix.execution.Scheduler.Implicits.global
22 | import monix.kafka.config.AutoOffsetReset
23 | import monix.reactive.Observable
24 | import org.apache.kafka.clients.producer.ProducerRecord
25 | import org.scalatest.FunSuite
26 |
27 | import scala.collection.JavaConverters._
28 | import scala.concurrent.Await
29 | import scala.concurrent.duration._
30 |
31 | class MonixKafkaTopicRegexTest extends FunSuite with KafkaTestKit {
32 | val topicsRegex = "monix-kafka-tests-.*".r
33 | val topicMatchingRegex = "monix-kafka-tests-anything"
34 |
35 | val producerCfg = KafkaProducerConfig.default.copy(
36 | bootstrapServers = List("127.0.0.1:6001"),
37 | clientId = "monix-kafka-1-0-producer-test"
38 | )
39 |
40 | val consumerCfg = KafkaConsumerConfig.default.copy(
41 | bootstrapServers = List("127.0.0.1:6001"),
42 | groupId = "kafka-tests",
43 | clientId = "monix-kafka-1-0-consumer-test",
44 | autoOffsetReset = AutoOffsetReset.Earliest
45 | )
46 |
47 | test("publish one message when subscribed to topics regex") {
48 |
49 | withRunningKafka {
50 | val producer = KafkaProducer[String, String](producerCfg, io)
51 | val consumerTask = KafkaConsumerObservable.createConsumer[String, String](consumerCfg, topicsRegex).executeOn(io)
52 | val consumer = Await.result(consumerTask.runToFuture, 60.seconds)
53 |
54 | try {
55 | // Publishing one message
56 | val send = producer.send(topicMatchingRegex, "my-message")
57 | Await.result(send.runToFuture, 30.seconds)
58 |
59 | val records = consumer.poll(10.seconds.toMillis).asScala.map(_.value()).toList
60 | assert(records === List("my-message"))
61 | } finally {
62 | Await.result(producer.close().runToFuture, Duration.Inf)
63 | consumer.close()
64 | }
65 | }
66 | }
67 |
68 | test("listen for one message when subscribed to topics regex") {
69 |
70 | withRunningKafka {
71 | val producer = KafkaProducer[String, String](producerCfg, io)
72 | val consumer = KafkaConsumerObservable[String, String](consumerCfg, topicsRegex).executeOn(io)
73 | try {
74 | // Publishing one message
75 | val send = producer.send(topicMatchingRegex, "test-message")
76 | Await.result(send.runToFuture, 30.seconds)
77 |
78 | val first = consumer.take(1).map(_.value()).firstL
79 | val result = Await.result(first.runToFuture, 30.seconds)
80 | assert(result === "test-message")
81 | } finally {
82 | Await.result(producer.close().runToFuture, Duration.Inf)
83 | }
84 | }
85 | }
86 |
87 | test("full producer/consumer test when subscribed to topics regex") {
88 | withRunningKafka {
89 | val count = 10000
90 |
91 | val producer = KafkaProducerSink[String, String](producerCfg, io)
92 | val consumer = KafkaConsumerObservable[String, String](consumerCfg, topicsRegex)
93 | .executeOn(io)
94 | .take(count)
95 |
96 | val pushT = Observable
97 | .range(0, count)
98 | .map(msg => new ProducerRecord(topicMatchingRegex, "obs", msg.toString))
99 | .bufferIntrospective(1024)
100 | .consumeWith(producer)
101 |
102 | val listT = consumer
103 | .map(_.value())
104 | .toListL
105 |
106 | val (result, _) = Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds)
107 | assert(result.map(_.toInt).sum === (0 until count).sum)
108 | }
109 | }
110 | }
111 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/main/scala/monix/kafka/CommittableOffsetBatch.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import monix.eval.Task
20 | import org.apache.kafka.clients.consumer.OffsetCommitCallback
21 | import org.apache.kafka.common.TopicPartition
22 |
23 | /** Batch of Kafka offsets which can be committed together.
24 | * Can be built from offsets sequence by [[CommittableOffsetBatch#apply]] method.
25 | * Besides you can use [[CommittableOffsetBatch#empty]] method to create empty batch and
26 | * add offsets to it by [[updated]] method.
27 | *
28 | * Anyway offsets order make sense! Only last added offset for topic and partition will
29 | * be committed to Kafka.
30 | *
31 | * @param offsets is the offsets batch for a few topics and partitions.
32 | * Make sure that each of them was received from one [[KafkaConsumerObservable]].
33 | *
34 | * @param commitCallback is the set of callbacks for batched commit realized as closure
35 | * in [[KafkaConsumerObservable]] context. This parameter is obtained from the last [[CommittableOffset]]
36 | * added to batch.
37 | */
38 | final class CommittableOffsetBatch private[kafka] (val offsets: Map[TopicPartition, Long], commitCallback: Commit) {
39 |
40 | /** Synchronously commits [[offsets]] to Kafka
41 | */
42 | def commitSync(): Task[Unit] = commitCallback.commitBatchSync(offsets)
43 |
44 | /** Asynchronously commits [[offsets]] to Kafka
45 | */
46 | def commitAsync(): Task[Unit] = commitCallback.commitBatchAsync(offsets)
47 |
48 | /** Asynchronously commits [[offsets]] to Kafka
49 | */
50 | def commitAsync(callback: OffsetCommitCallback): Task[Unit] = commitCallback.commitBatchAsync(offsets)
51 |
52 | /** Adds new [[CommittableOffset]] to batch. Added offset replaces previous one specified
53 | * for same topic and partition.
54 | */
55 | def updated(committableOffset: CommittableOffset): CommittableOffsetBatch =
56 | new CommittableOffsetBatch(
57 | offsets.updated(committableOffset.topicPartition, committableOffset.offset),
58 | committableOffset.commitCallback
59 | )
60 | }
61 |
62 | object CommittableOffsetBatch {
63 |
64 | /** Creates empty [[CommittableOffsetBatch]]. Can be used as neutral element in fold:
65 | * {{{
66 | * offsets.foldLeft(CommittableOffsetBatch.empty)(_ updated _)
67 | * }}}
68 | */
69 | val empty: CommittableOffsetBatch = new CommittableOffsetBatch(Map.empty, Commit.empty)
70 |
71 | /** Builds [[CommittableOffsetBatch]] from offsets sequence. Be careful with
72 | * sequence order. If there is more than once offset for a topic and partition in the
73 | * sequence then the last one will remain.
74 | */
75 | def apply(offsets: Seq[CommittableOffset]): CommittableOffsetBatch =
76 | if (offsets.nonEmpty) {
77 | val aggregatedOffsets = offsets.foldLeft(Map.empty[TopicPartition, Long]) { (acc, o) =>
78 | acc.updated(o.topicPartition, o.offset)
79 | }
80 | new CommittableOffsetBatch(aggregatedOffsets, offsets.head.commitCallback)
81 | } else {
82 | empty
83 | }
84 |
85 | /** Builds [[CommittableOffsetBatch]] list from offsets sequence by merging the offsets
86 | * that have the same commit callback. This will help when the committable offsets are
87 | * from different consumers.
88 | * {{{
89 | * CommittableOffsetBatch.mergeByCommitCallback(offsets)
90 | * }}}
91 | */
92 | def mergeByCommitCallback(committableOffsets: Seq[CommittableOffset]): List[CommittableOffsetBatch] = {
93 | if (committableOffsets.nonEmpty) {
94 | committableOffsets
95 | .groupBy(_.commitCallback)
96 | .values
97 | .map(CommittableOffsetBatch(_))
98 | .toList
99 | } else {
100 | List.empty
101 | }
102 | }
103 | }
104 |
--------------------------------------------------------------------------------
/RELEASES.md:
--------------------------------------------------------------------------------
1 | ## Release Process
2 |
3 | This section is for maintainers.
4 |
5 | For the release process you need to activate Git signing, see
6 | [the tutorial on GitHub](https://help.github.com/articles/signing-commits-using-gpg/).
7 |
8 | Also add these to your `$HOME/.sbt/1.0/build.sbt`:
9 |
10 | ```scala
11 | credentials += Credentials(
12 | "Sonatype Nexus Repository Manager",
13 | "oss.sonatype.org",
14 | "USERNAME",
15 | "PASSWORD"
16 | )
17 | ```
18 |
19 | You might also need to configure the [sbt-pgp](http://www.scala-sbt.org/sbt-pgp/)
20 | plugin. Even if it's included in `plugins.sbt`, you might want to tune it according
21 | to your local setup. So if it doesn't work out, you can try playing its settings.
22 |
23 | For example you could also try adding these in `$HOME/.sbt/1.0/build.sbt`:
24 |
25 | ```scala
26 | useGpg := true
27 | useGpgAgent := true
28 | ```
29 |
30 | Plus if you do that, you'd need to add the plugin globally as well, so
31 | according to the official docs you need to edit
32 | `$HOME/.sbt/1.0/plugins/gpg.sbt` and add something like:
33 |
34 | ```scala
35 | addSbtPlugin("com.jsuereth" % "sbt-pgp" % "1.1.0")
36 | ```
37 |
38 | You can then test that you can sign packages locally with:
39 |
40 | ```
41 | sbt +publishLocalSigned
42 | ```
43 |
44 | In order to release a new version, these commands need to be executed:
45 |
46 | ```
47 | VERSION="v0.15"
48 |
49 | git tag -s -m "Tagging $VERSION" $VERSION
50 |
51 | git verify-tag $VERSION
52 |
53 | git checkout $VERSION
54 |
55 | sbt release
56 |
57 | git push upstream $VERSION
58 | ```
59 |
60 | Please execute one by one, at each step verify that there haven't been any errors.
61 |
62 | ## Cryptographically Verifying Releases
63 |
64 | All release artifacts must be cryptographically signed by a GPG key from one of [the maintainers](https://github.com/monix/monix-kafka/graphs/contributors). Every release corresponds to a tag of the form `/v(\d+)\.(\d+)(\.(\d+))?` which is pushed to [the upstream Git repository](https://github.com/monix/monix-kafka), and that tag is always signed by the *same* key.
65 |
66 | To locally cryptographically verify the integrity of a release, you should start by verifying the tag itself:
67 |
68 | ```bash
69 | $ git verify-tag v0.15
70 | ```
71 |
72 | (replace `v0.15` with the version you're checking)
73 |
74 | The output should be something like this:
75 |
76 | ```
77 | gpg: Signature made Mon Jan 29 13:19:48 2018 EET
78 | gpg: using RSA key 971E5587E7EDA30AE2F0C230397C67E28DFD7BB4
79 | gpg: Good signature from "Alexandru Nedelcu " [ultimate]
80 | gpg: aka "[jpeg image of size 4363]" [ultimate]
81 | ```
82 |
83 | Note the "*using RSA key*" line, which is the signature of the signing key. You can also look at this tag on Github and, if you trust their servers, verify that it is linked to a profile you trust. An even better way of doing this is to visit [Keybase](https://keybase.io) and search for that 8 character signature, since this can be done without trusting any third parties (or rather, without trusting any single third party).
84 |
85 | Once you've verified that the key signature matches someone you would expect to be releasing `monix-kafka` artifacts, you should import the key to pin it for subsequent verifications and note that only the 8 characters are needed:
86 |
87 | ```bash
88 | $ gpg --recv-keys 8DFD7BB4
89 | ```
90 |
91 | (replace those eight characters with the signature from above)
92 |
93 | It's always a good exercise to take that primary key fingerprint (all 120 characters) and ensure that it matches the other key sources (e.g. Keybase). It is relatively easy to generate keys which signature collide on the final eight bits.
94 |
95 | Now that you've grabbed the signature of the tag and verified that it correspond to an individual you would *expect* should be pushing `monix-kafka` releases, you can move on to verifying the artifacts themselves.
96 |
97 | ```bash
98 | sbt check-pgp-signatures
99 | ```
100 |
101 | You will need the [sbt-gpg](http://www.scala-sbt.org/sbt-pgp/index.html) plugin to run this command. It will grab all of the signatures for all of your dependencies and verify them. Each one should indicate either `[OK]` or `[UNTRUSTED(...)]`. Each `UNTRUSTED` artifact will list the signature of the signing key, just as with the tag verification. Since we have already imported the key of the developer who signed the release tag, we should *definitely* see `[OK]` for the `monix-kafka` artifact:
102 |
103 | ```
104 | [info] io.monix : monix-kafka-11_2.12 : 0.15 : jar [OK]
105 | ```
106 |
107 | If you do see `UNTRUSTED` (which will happen if you don't import the key), it should look like the following:
108 |
109 | ```
110 | [info] io.monix : monix-kafka-11_2.12 : 0.14 : jar [UNTRUSTED(0x2bae5960)]
111 | ```
112 |
--------------------------------------------------------------------------------
/benchmarks/src/main/scala/monix/kafka/benchmarks/ConsumerBenchmark.scala:
--------------------------------------------------------------------------------
1 | package monix.kafka.benchmarks
2 |
3 | import monix.kafka.config.ObservableCommitOrder.BeforeAck
4 | import monix.kafka.config.ObservableCommitType
5 |
6 | import java.util.concurrent.TimeUnit
7 | import monix.kafka.{KafkaConsumerObservable, KafkaProducerSink}
8 | import monix.reactive.Observable
9 | import org.apache.kafka.clients.producer.ProducerRecord
10 | import org.openjdk.jmh.annotations.{BenchmarkMode, Fork, Measurement, Mode, OutputTimeUnit, Scope, State, Threads, Warmup, _}
11 | import scala.concurrent.duration._
12 |
13 | @State(Scope.Thread)
14 | @BenchmarkMode(Array(Mode.Throughput))
15 | @OutputTimeUnit(TimeUnit.SECONDS)
16 | @Measurement(iterations = 1)
17 | @Warmup(iterations = 1)
18 | @Fork(1)
19 | @Threads(3)
20 | class ConsumerBenchmark extends MonixFixture {
21 |
22 | var totalRecords: Int = 1100
23 | val consumedRecords = 1000
24 | var maxPollRecords: Int = 1
25 |
26 | @Setup
27 | def setup(): Unit = {
28 | // preparing test data
29 | Observable
30 | .from(0 to totalRecords)
31 | .map(i => new ProducerRecord[Integer, Integer](monixTopic, i))
32 | .bufferTumbling(totalRecords)
33 | .consumeWith(KafkaProducerSink(producerConf.copy(monixSinkParallelism = 10), io))
34 | .runSyncUnsafe()
35 | }
36 |
37 | @Benchmark
38 | def monixAsyncAutoCommitHeartbeat15ms(): Unit = {
39 | val conf = consumerConf.value().copy(
40 | maxPollRecords = maxPollRecords,
41 | observableCommitType = ObservableCommitType.Async,
42 | observableCommitOrder = BeforeAck)
43 | .withPollHeartBeatRate(15.millis)
44 |
45 | KafkaConsumerObservable[Integer, Integer](conf, List(monixTopic))
46 | .take(consumedRecords)
47 | .headL
48 | .runSyncUnsafe()
49 | }
50 |
51 | @Benchmark
52 | def monixSyncAutoCommitHeartbeat15ms(): Unit = {
53 | val conf = consumerConf.value().copy(
54 | maxPollRecords = maxPollRecords,
55 | observableCommitType = ObservableCommitType.Sync,
56 | observableCommitOrder = BeforeAck)
57 | .withPollHeartBeatRate(15.millis)
58 |
59 | KafkaConsumerObservable[Integer, Integer](conf, List(monixTopic))
60 | .take(consumedRecords)
61 | .headL
62 | .runSyncUnsafe()
63 | }
64 |
65 | @Benchmark
66 | def monixManualCommitHeartbeat1ms(): Unit = {
67 | val conf = consumerConf.value().copy(maxPollRecords = maxPollRecords)
68 | .withPollHeartBeatRate(1.millis)
69 |
70 | KafkaConsumerObservable.manualCommit[Integer, Integer](conf, List(monixTopic))
71 | .mapEvalF(_.committableOffset.commitAsync())
72 | .take(consumedRecords)
73 | .headL
74 | .runSyncUnsafe()
75 | }
76 |
77 | @Benchmark
78 | def monixManualCommitHeartbeat10ms(): Unit = {
79 | val conf = consumerConf.value().copy(maxPollRecords = maxPollRecords)
80 | .withPollHeartBeatRate(10.millis)
81 |
82 | KafkaConsumerObservable.manualCommit[Integer, Integer](conf, List(monixTopic))
83 | .mapEvalF(_.committableOffset.commitAsync())
84 | .take(consumedRecords)
85 | .headL
86 | .runSyncUnsafe()
87 | }
88 |
89 | @Benchmark
90 | def monixManualCommitHeartbeat15ms(): Unit = {
91 | val conf = consumerConf.value().copy(maxPollRecords = maxPollRecords)
92 | .withPollHeartBeatRate(15.millis)
93 |
94 | KafkaConsumerObservable.manualCommit[Integer, Integer](conf, List(monixTopic))
95 | .mapEvalF(_.committableOffset.commitAsync())
96 | .take(consumedRecords)
97 | .headL
98 | .runSyncUnsafe()
99 | }
100 |
101 | @Benchmark
102 | def monixManualCommitHeartbeat50ms(): Unit = {
103 | val conf = consumerConf.value().copy(maxPollRecords = maxPollRecords)
104 | .withPollHeartBeatRate(50.millis)
105 |
106 | KafkaConsumerObservable.manualCommit[Integer, Integer](conf, List(monixTopic))
107 | .mapEvalF(_.committableOffset.commitAsync())
108 | .take(consumedRecords)
109 | .headL
110 | .runSyncUnsafe()
111 | }
112 |
113 | @Benchmark
114 | def monixManualCommitHeartbeat100ms(): Unit = {
115 | val conf = consumerConf.value().copy(maxPollRecords = maxPollRecords)
116 | .withPollHeartBeatRate(100.millis)
117 |
118 | KafkaConsumerObservable.manualCommit[Integer, Integer](conf, List(monixTopic))
119 | .mapEvalF(_.committableOffset.commitAsync())
120 | .take(consumedRecords)
121 | .headL
122 | .runSyncUnsafe()
123 | }
124 |
125 | @Benchmark
126 | def monixManualCommitHeartbeat1000ms(): Unit = {
127 | val conf = consumerConf.value().copy(maxPollRecords = maxPollRecords)
128 | .withPollHeartBeatRate(1000.millis)
129 |
130 | KafkaConsumerObservable.manualCommit[Integer, Integer](conf, List(monixTopic))
131 | .mapEvalF(_.committableOffset.commitAsync())
132 | .take(consumedRecords)
133 | .headL
134 | .runSyncUnsafe()
135 | }
136 |
137 |
138 | }
139 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/test/scala/monix/kafka/SerializationTest.scala:
--------------------------------------------------------------------------------
1 | package monix.kafka
2 |
3 | import java.util
4 |
5 | import monix.eval.Task
6 | import monix.kafka.config.AutoOffsetReset
7 | import monix.reactive.Observable
8 | import org.apache.kafka.clients.producer.ProducerRecord
9 | import org.scalatest.FunSuite
10 | import org.apache.kafka.common.serialization.{Serializer => KafkaSerializer}
11 | import org.apache.kafka.common.serialization.{Deserializer => KafkaDeserializer}
12 |
13 | import scala.concurrent.duration._
14 | import scala.concurrent.Await
15 | import monix.execution.Scheduler.Implicits.global
16 | import monix.execution.exceptions.DummyException
17 |
18 | class SerializationTest extends FunSuite with KafkaTestKit {
19 |
20 | val producerCfg = KafkaProducerConfig.default.copy(
21 | bootstrapServers = List("127.0.0.1:6001"),
22 | clientId = "monix-kafka-1-0-serialization-test"
23 | )
24 |
25 | val consumerCfg = KafkaConsumerConfig.default.copy(
26 | bootstrapServers = List("127.0.0.1:6001"),
27 | groupId = "kafka-tests",
28 | clientId = "monix-kafka-1-0-serialization-test",
29 | autoOffsetReset = AutoOffsetReset.Earliest
30 | )
31 |
32 | test("serialization/deserialization using kafka.common.serialization") {
33 | withRunningKafka {
34 | val topicName = "monix-kafka-serialization-tests"
35 | val count = 10000
36 |
37 | implicit val serializer: KafkaSerializer[A] = new ASerializer
38 | implicit val deserializer: KafkaDeserializer[A] = new ADeserializer
39 |
40 | val producer = KafkaProducerSink[String, A](producerCfg, io)
41 | val consumer = KafkaConsumerObservable[String, A](consumerCfg, List(topicName)).executeOn(io).take(count)
42 |
43 | val pushT = Observable
44 | .range(0, count)
45 | .map(msg => new ProducerRecord(topicName, "obs", A(msg.toString)))
46 | .bufferIntrospective(1024)
47 | .consumeWith(producer)
48 |
49 | val listT = consumer
50 | .map(_.value())
51 | .toListL
52 |
53 | val (result, _) = Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds)
54 | assert(result.map(_.value.toInt).sum === (0 until count).sum)
55 | }
56 | }
57 |
58 | test("allow to fail the stream if serialization throws") {
59 | withRunningKafka {
60 | val topicName = "monix-kafka-serialization-failing-tests"
61 | val dummy = DummyException("boom")
62 |
63 | implicit val serializer: KafkaSerializer[A] = new AFailingSerializer
64 |
65 | val producer = KafkaProducerSink[String, A](producerCfg, io, (_: Throwable) => Task.raiseError(dummy))
66 |
67 | val pushT = Observable
68 | .evalOnce(new ProducerRecord(topicName, "obs", A(1.toString)))
69 | .bufferIntrospective(1024)
70 | .consumeWith(producer)
71 |
72 | assertThrows[DummyException] {
73 | Await.result(pushT.runToFuture, 60.seconds)
74 | }
75 | }
76 | }
77 |
78 | test("allow to recover from serialization errors") {
79 | withRunningKafka {
80 | val topicName = "monix-kafka-serialization-continuing-tests"
81 | val count = 100
82 |
83 | implicit val serializer: KafkaSerializer[A] = new AHalfFailingSerializer
84 | implicit val deserializer: KafkaDeserializer[A] = new ADeserializer
85 |
86 | val producer = KafkaProducerSink[String, A](producerCfg, io)
87 | val consumer = KafkaConsumerObservable[String, A](consumerCfg, List(topicName)).executeOn(io).take(count / 2)
88 |
89 | val pushT = Observable
90 | .range(0, count)
91 | .map(msg => new ProducerRecord(topicName, "obs", A(msg.toString)))
92 | .bufferIntrospective(1024)
93 | .consumeWith(producer)
94 |
95 | val listT = consumer
96 | .map(_.value())
97 | .toListL
98 |
99 | val (result, _) = Await.result(Task.parZip2(listT, pushT).runToFuture, 70.seconds)
100 | assert(result.map(_.value.toInt).sum === (0 until count).filter(_ % 2 == 0).sum)
101 | }
102 | }
103 |
104 | }
105 |
106 | case class A(value: String)
107 |
108 | class ASerializer extends KafkaSerializer[A] {
109 | override def configure(configs: util.Map[String, _], isKey: Boolean): Unit = ()
110 |
111 | override def serialize(topic: String, data: A): Array[Byte] =
112 | if (data == null) null else data.value.getBytes
113 |
114 | override def close(): Unit = ()
115 | }
116 |
117 | class ADeserializer extends KafkaDeserializer[A] {
118 | override def configure(configs: util.Map[String, _], isKey: Boolean): Unit = ()
119 |
120 | override def deserialize(topic: String, data: Array[Byte]): A = if (data == null) null else A(new String(data))
121 |
122 | override def close(): Unit = ()
123 | }
124 |
125 | class AFailingSerializer extends ASerializer {
126 | override def serialize(topic: String, data: A): Array[Byte] = throw new RuntimeException("fail")
127 | }
128 |
129 | class AHalfFailingSerializer extends ASerializer {
130 |
131 | override def serialize(topic: String, data: A): Array[Byte] = {
132 | if (data.value.toInt % 2 == 0) super.serialize(topic, data)
133 | else throw new RuntimeException("fail")
134 | }
135 | }
136 |
--------------------------------------------------------------------------------
/kafka-0.10.x/src/test/scala/monix/kafka/SerializationTest.scala:
--------------------------------------------------------------------------------
1 | package monix.kafka
2 |
3 | import java.util
4 |
5 | import monix.eval.Task
6 | import monix.kafka.config.AutoOffsetReset
7 | import monix.reactive.Observable
8 | import org.apache.kafka.clients.producer.ProducerRecord
9 | import org.scalatest.FunSuite
10 | import org.apache.kafka.common.serialization.{Serializer => KafkaSerializer}
11 | import org.apache.kafka.common.serialization.{Deserializer => KafkaDeserializer}
12 |
13 | import scala.concurrent.duration._
14 | import scala.concurrent.Await
15 | import monix.execution.Scheduler.Implicits.global
16 | import monix.execution.exceptions.DummyException
17 |
18 | class SerializationTest extends FunSuite with KafkaTestKit {
19 |
20 | val producerCfg = KafkaProducerConfig.default.copy(
21 | bootstrapServers = List("127.0.0.1:6001"),
22 | clientId = "monix-kafka-1-0-serialization-test"
23 | )
24 |
25 | val consumerCfg = KafkaConsumerConfig.default.copy(
26 | bootstrapServers = List("127.0.0.1:6001"),
27 | groupId = "kafka-tests",
28 | clientId = "monix-kafka-1-0-serialization-test",
29 | autoOffsetReset = AutoOffsetReset.Earliest
30 | )
31 |
32 | test("serialization/deserialization using kafka.common.serialization") {
33 | withRunningKafka {
34 | val topicName = "monix-kafka-serialization-tests"
35 | val count = 10000
36 |
37 | implicit val serializer: KafkaSerializer[A] = new ASerializer
38 | implicit val deserializer: KafkaDeserializer[A] = new ADeserializer
39 |
40 | val producer = KafkaProducerSink[String, A](producerCfg, io)
41 | val consumer = KafkaConsumerObservable[String, A](consumerCfg, List(topicName)).executeOn(io).take(count)
42 |
43 | val pushT = Observable
44 | .range(0, count)
45 | .map(msg => new ProducerRecord(topicName, "obs", A(msg.toString)))
46 | .bufferIntrospective(1024)
47 | .consumeWith(producer)
48 |
49 | val listT = consumer
50 | .map(_.value())
51 | .toListL
52 |
53 | val (result, _) = Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds)
54 | assert(result.map(_.value.toInt).sum === (0 until count).sum)
55 | }
56 | }
57 |
58 | test("allow to fail the stream if serialization throws") {
59 | withRunningKafka {
60 | val topicName = "monix-kafka-serialization-failing-tests"
61 | val dummy = DummyException("boom")
62 |
63 | implicit val serializer: KafkaSerializer[A] = new AFailingSerializer
64 |
65 | val producer = KafkaProducerSink[String, A](producerCfg, io, (_: Throwable) => Task.raiseError(dummy))
66 |
67 | val pushT = Observable
68 | .evalOnce(new ProducerRecord(topicName, "obs", A(1.toString)))
69 | .bufferIntrospective(1024)
70 | .consumeWith(producer)
71 |
72 | assertThrows[DummyException] {
73 | Await.result(pushT.runToFuture, 60.seconds)
74 | }
75 | }
76 | }
77 |
78 | test("allow to recover from serialization errors") {
79 | withRunningKafka {
80 | val topicName = "monix-kafka-serialization-continuing-tests"
81 | val count = 100
82 |
83 | implicit val serializer: KafkaSerializer[A] = new AHalfFailingSerializer
84 | implicit val deserializer: KafkaDeserializer[A] = new ADeserializer
85 |
86 | val producer = KafkaProducerSink[String, A](producerCfg, io)
87 | val consumer = KafkaConsumerObservable[String, A](consumerCfg, List(topicName)).executeOn(io).take(count / 2)
88 |
89 | val pushT = Observable
90 | .range(0, count)
91 | .map(msg => new ProducerRecord(topicName, "obs", A(msg.toString)))
92 | .bufferIntrospective(1024)
93 | .consumeWith(producer)
94 |
95 | val listT = consumer
96 | .map(_.value())
97 | .toListL
98 |
99 | val (result, _) = Await.result(Task.parZip2(listT, pushT).runToFuture, 70.seconds)
100 | assert(result.map(_.value.toInt).sum === (0 until count).filter(_ % 2 == 0).sum)
101 | }
102 | }
103 |
104 | }
105 |
106 | case class A(value: String)
107 |
108 | class ASerializer extends KafkaSerializer[A] {
109 | override def configure(configs: util.Map[String, _], isKey: Boolean): Unit = ()
110 |
111 | override def serialize(topic: String, data: A): Array[Byte] =
112 | if (data == null) null else data.value.getBytes
113 |
114 | override def close(): Unit = ()
115 | }
116 |
117 | class ADeserializer extends KafkaDeserializer[A] {
118 | override def configure(configs: util.Map[String, _], isKey: Boolean): Unit = ()
119 |
120 | override def deserialize(topic: String, data: Array[Byte]): A = if (data == null) null else A(new String(data))
121 |
122 | override def close(): Unit = ()
123 | }
124 |
125 | class AFailingSerializer extends ASerializer {
126 | override def serialize(topic: String, data: A): Array[Byte] = throw new RuntimeException("fail")
127 | }
128 |
129 | class AHalfFailingSerializer extends ASerializer {
130 |
131 | override def serialize(topic: String, data: A): Array[Byte] = {
132 | if (data.value.toInt % 2 == 0) super.serialize(topic, data)
133 | else throw new RuntimeException("fail")
134 | }
135 | }
136 |
--------------------------------------------------------------------------------
/kafka-0.11.x/src/test/scala/monix/kafka/SerializationTest.scala:
--------------------------------------------------------------------------------
1 | package monix.kafka
2 |
3 | import java.util
4 |
5 | import monix.eval.Task
6 | import monix.kafka.config.AutoOffsetReset
7 | import monix.reactive.Observable
8 | import org.apache.kafka.clients.producer.ProducerRecord
9 | import org.scalatest.FunSuite
10 | import org.apache.kafka.common.serialization.{Serializer => KafkaSerializer}
11 | import org.apache.kafka.common.serialization.{Deserializer => KafkaDeserializer}
12 |
13 | import scala.concurrent.duration._
14 | import scala.concurrent.Await
15 | import monix.execution.Scheduler.Implicits.global
16 | import monix.execution.exceptions.DummyException
17 |
18 | class SerializationTest extends FunSuite with KafkaTestKit {
19 |
20 | val producerCfg = KafkaProducerConfig.default.copy(
21 | bootstrapServers = List("127.0.0.1:6001"),
22 | clientId = "monix-kafka-1-0-serialization-test"
23 | )
24 |
25 | val consumerCfg = KafkaConsumerConfig.default.copy(
26 | bootstrapServers = List("127.0.0.1:6001"),
27 | groupId = "kafka-tests",
28 | clientId = "monix-kafka-1-0-serialization-test",
29 | autoOffsetReset = AutoOffsetReset.Earliest
30 | )
31 |
32 | test("serialization/deserialization using kafka.common.serialization") {
33 | withRunningKafka {
34 | val topicName = "monix-kafka-serialization-tests"
35 | val count = 10000
36 |
37 | implicit val serializer: KafkaSerializer[A] = new ASerializer
38 | implicit val deserializer: KafkaDeserializer[A] = new ADeserializer
39 |
40 | val producer = KafkaProducerSink[String, A](producerCfg, io)
41 | val consumer = KafkaConsumerObservable[String, A](consumerCfg, List(topicName)).executeOn(io).take(count)
42 |
43 | val pushT = Observable
44 | .range(0, count)
45 | .map(msg => new ProducerRecord(topicName, "obs", A(msg.toString)))
46 | .bufferIntrospective(1024)
47 | .consumeWith(producer)
48 |
49 | val listT = consumer
50 | .map(_.value())
51 | .toListL
52 |
53 | val (result, _) = Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds)
54 | assert(result.map(_.value.toInt).sum === (0 until count).sum)
55 | }
56 | }
57 |
58 | test("allow to fail the stream if serialization throws") {
59 | withRunningKafka {
60 | val topicName = "monix-kafka-serialization-failing-tests"
61 | val dummy = DummyException("boom")
62 |
63 | implicit val serializer: KafkaSerializer[A] = new AFailingSerializer
64 |
65 | val producer = KafkaProducerSink[String, A](producerCfg, io, (_: Throwable) => Task.raiseError(dummy))
66 |
67 | val pushT = Observable
68 | .evalOnce(new ProducerRecord(topicName, "obs", A(1.toString)))
69 | .bufferIntrospective(1024)
70 | .consumeWith(producer)
71 |
72 | assertThrows[DummyException] {
73 | Await.result(pushT.runToFuture, 60.seconds)
74 | }
75 | }
76 | }
77 |
78 | test("allow to recover from serialization errors") {
79 | withRunningKafka {
80 | val topicName = "monix-kafka-serialization-continuing-tests"
81 | val count = 100
82 |
83 | implicit val serializer: KafkaSerializer[A] = new AHalfFailingSerializer
84 | implicit val deserializer: KafkaDeserializer[A] = new ADeserializer
85 |
86 | val producer = KafkaProducerSink[String, A](producerCfg, io)
87 | val consumer = KafkaConsumerObservable[String, A](consumerCfg, List(topicName)).executeOn(io).take(count / 2)
88 |
89 | val pushT = Observable
90 | .range(0, count)
91 | .map(msg => new ProducerRecord(topicName, "obs", A(msg.toString)))
92 | .bufferIntrospective(1024)
93 | .consumeWith(producer)
94 |
95 | val listT = consumer
96 | .map(_.value())
97 | .toListL
98 |
99 | val (result, _) = Await.result(Task.parZip2(listT, pushT).runToFuture, 70.seconds)
100 | assert(result.map(_.value.toInt).sum === (0 until count).filter(_ % 2 == 0).sum)
101 | }
102 | }
103 |
104 | }
105 |
106 | case class A(value: String)
107 |
108 | class ASerializer extends KafkaSerializer[A] {
109 | override def configure(configs: util.Map[String, _], isKey: Boolean): Unit = ()
110 |
111 | override def serialize(topic: String, data: A): Array[Byte] =
112 | if (data == null) null else data.value.getBytes
113 |
114 | override def close(): Unit = ()
115 | }
116 |
117 | class ADeserializer extends KafkaDeserializer[A] {
118 | override def configure(configs: util.Map[String, _], isKey: Boolean): Unit = ()
119 |
120 | override def deserialize(topic: String, data: Array[Byte]): A = if (data == null) null else A(new String(data))
121 |
122 | override def close(): Unit = ()
123 | }
124 |
125 | class AFailingSerializer extends ASerializer {
126 | override def serialize(topic: String, data: A): Array[Byte] = throw new RuntimeException("fail")
127 | }
128 |
129 | class AHalfFailingSerializer extends ASerializer {
130 |
131 | override def serialize(topic: String, data: A): Array[Byte] = {
132 | if (data.value.toInt % 2 == 0) super.serialize(topic, data)
133 | else throw new RuntimeException("fail")
134 | }
135 | }
136 |
--------------------------------------------------------------------------------
/kafka-1.0.x/src/main/scala/monix/kafka/KafkaConsumerObservableAutoCommit.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014-2022 by The Monix Project Developers.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package monix.kafka
18 |
19 | import monix.eval.Task
20 | import monix.execution.Ack.Stop
21 | import monix.execution.cancelables.BooleanCancelable
22 | import monix.execution.{Ack, Callback}
23 | import monix.kafka.config.ObservableCommitType
24 | import monix.reactive.Observer
25 | import monix.reactive.observers.Subscriber
26 | import org.apache.kafka.clients.consumer.{Consumer, ConsumerRecord}
27 |
28 | import scala.jdk.CollectionConverters._
29 | import scala.concurrent.{blocking, Future}
30 | import scala.util.control.NonFatal
31 | import scala.util.{Failure, Success}
32 |
33 | /** KafkaConsumerObservable implementation which commits offsets itself.
34 | */
35 | final class KafkaConsumerObservableAutoCommit[K, V] private[kafka] (
36 | override protected val config: KafkaConsumerConfig,
37 | override protected val consumerT: Task[Consumer[K, V]])
38 | extends KafkaConsumerObservable[K, V, ConsumerRecord[K, V]] {
39 |
40 | /* Based on the [[KafkaConsumerConfig.observableCommitType]] it
41 | * chooses to commit the offsets in the consumer, by doing either
42 | * a `commitSync` or a `commitAsync`.
43 | *
44 | * MUST BE synchronized by `consumer`.
45 | */
46 | private def consumerCommit(consumer: Consumer[K, V]): Unit =
47 | config.observableCommitType match {
48 | case ObservableCommitType.Sync =>
49 | blocking(consumer.commitSync())
50 | case ObservableCommitType.Async =>
51 | blocking(consumer.commitAsync())
52 | }
53 |
54 | // Caching value to save CPU cycles
55 | private val pollTimeoutMillis = config.fetchMaxWaitTime.toMillis
56 | // Boolean value indicating that we should trigger a commit before downstream ack
57 | private val shouldCommitBefore = !config.enableAutoCommit && config.observableCommitOrder.isBefore
58 | // Boolean value indicating that we should trigger a commit after downstream ack
59 | private val shouldCommitAfter = !config.enableAutoCommit && config.observableCommitOrder.isAfter
60 |
61 | override protected def ackTask(consumer: Consumer[K, V], out: Subscriber[ConsumerRecord[K, V]]): Task[Ack] =
62 | Task.create { (scheduler, cb) =>
63 | implicit val s = scheduler
64 | val asyncCb = Callback.forked(cb)
65 | val cancelable = BooleanCancelable()
66 |
67 | // Forced asynchronous boundary (on the I/O scheduler)
68 | s.execute { () =>
69 | val ackFuture =
70 | try consumer.synchronized {
71 | val assignment = consumer.assignment()
72 | if (cancelable.isCanceled) Stop
73 | else {
74 | consumer.resume(assignment)
75 | val next = blocking(consumer.poll(pollTimeoutMillis))
76 | consumer.pause(assignment)
77 | if (shouldCommitBefore) consumerCommit(consumer)
78 | // Feeding the observer happens on the Subscriber's scheduler
79 | // if any asynchronous boundaries happen
80 | isAcked = false
81 | Observer.feed(out, next.asScala)(out.scheduler)
82 | }
83 | } catch {
84 | case NonFatal(ex) =>
85 | Future.failed(ex)
86 | }
87 |
88 | ackFuture.syncOnComplete {
89 | case Success(ack) =>
90 | isAcked = true
91 | // The `streamError` flag protects against contract violations
92 | // (i.e. onSuccess/onError should happen only once).
93 | // Not really required, but we don't want to depend on the
94 | // scheduler implementation.
95 | var streamErrors = true
96 | try consumer.synchronized {
97 | // In case the task has been cancelled, there's no point
98 | // in continuing to do anything else
99 | if (cancelable.isCanceled) {
100 | streamErrors = false
101 | asyncCb.onSuccess(Stop)
102 | } else {
103 | if (shouldCommitAfter) consumerCommit(consumer)
104 | streamErrors = false
105 | asyncCb.onSuccess(ack)
106 | }
107 | } catch {
108 | case NonFatal(ex) =>
109 | if (streamErrors) asyncCb.onError(ex)
110 | else s.reportFailure(ex)
111 | }
112 |
113 | case Failure(ex) =>
114 | isAcked = true
115 | asyncCb.onError(ex)
116 | }
117 | }
118 | cancelable
119 | }
120 | }
121 |
--------------------------------------------------------------------------------