├── .fossa.yml ├── .git-blame-ignore-revs ├── .github └── workflows │ ├── check-build-test.yml │ ├── dependency-submission.yml │ ├── fossa.yml │ ├── link-validator.yml │ └── release.yml ├── .gitignore ├── .scala-steward.conf ├── .scalafmt.conf ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── RELEASING.md ├── benchmarks ├── LICENSE └── src │ ├── it │ └── scala │ │ └── akka │ │ └── kafka │ │ └── benchmarks │ │ ├── AlpakkaCommittableProducer.scala │ │ ├── BatchedConsumer.scala │ │ ├── Benchmarks.scala │ │ ├── NoCommitBackpressure.scala │ │ ├── Producer.scala │ │ ├── SpecBase.scala │ │ └── Transactions.scala │ └── main │ ├── resources │ ├── logback.xml │ └── reference.conf │ └── scala │ └── akka │ └── kafka │ └── benchmarks │ ├── AlpakkaCommittableSinkFixtures.scala │ ├── FixtureGen.scala │ ├── InflightMetrics.scala │ ├── KafkaConsumerBenchmarks.scala │ ├── KafkaConsumerFixtureGen.scala │ ├── KafkaProducerBenchmarks.scala │ ├── KafkaProducerFixtureGen.scala │ ├── KafkaTransactionBenchmarks.scala │ ├── KafkaTransactionFixtureGen.scala │ ├── PerfFixtureHelpers.scala │ ├── ReactiveKafkaConsumerBenchmarks.scala │ ├── ReactiveKafkaConsumerFixtures.scala │ ├── ReactiveKafkaProducerBenchmarks.scala │ ├── ReactiveKafkaProducerFixtures.scala │ ├── ReactiveKafkaTransactionBenchmarks.scala │ ├── ReactiveKafkaTransactionFixtures.scala │ ├── Timed.scala │ └── app │ └── RunTestCommand.scala ├── build.sbt ├── cluster-sharding └── src │ └── main │ └── scala │ └── akka │ └── kafka │ └── cluster │ └── sharding │ └── KafkaClusterSharding.scala ├── core └── src │ └── main │ ├── mima-filters │ ├── 0.20.backwards.excludes │ ├── 0.22.backwards.excludes │ ├── 1.0-M1.backwards.excludes │ ├── 1.0-RC1.backwards.excludes │ ├── 1.0.4.backwards.excludes │ ├── 1.0.x.backwards.excludes │ ├── 1.1.0.backwards.excludes │ │ ├── PR-986-partittionAssignmentHandler-onLost.excludes │ │ ├── PR930-partitioned-transactions-source.excludes │ │ ├── PR931-committer-type-bounds.excludes │ │ ├── PR945-remove-deprecations.excludes │ │ ├── PR949-partition-assignment-handler.excludes │ │ └── PR959-committable-deprecations.excludes │ ├── 1.1.x.backwards.excludes │ ├── 2.0.1.backwards.excludes │ │ └── PR1033-widen-type-for-committablebatch-factory-method.excludes │ ├── 2.0.2.backwards.excludes │ ├── 2.1.0-M1.backwards.excludes │ │ ├── PR1262-committables-used-by-commit-when.excludes │ │ └── PR1299-consumer-settings-constructor.excludes │ ├── 4.0.0.backwards.excludes │ └── 5.0.0.backwards.excludes │ ├── resources │ └── reference.conf │ └── scala │ └── akka │ └── kafka │ ├── CommitTimeoutException.scala │ ├── CommitterSettings.scala │ ├── ConnectionCheckerSettings.scala │ ├── ConsumerFailed.scala │ ├── ConsumerMessage.scala │ ├── ConsumerSettings.scala │ ├── KafkaConnectionFailed.scala │ ├── KafkaConsumerActor.scala │ ├── Metadata.scala │ ├── OffsetResetProtectionSettings.scala │ ├── ProducerMessage.scala │ ├── ProducerSettings.scala │ ├── RestrictedConsumer.scala │ ├── Subscriptions.scala │ ├── internal │ ├── BaseSingleSourceLogic.scala │ ├── CommitCollectorStage.scala │ ├── CommitObservationLogic.scala │ ├── CommitTrigger.scala │ ├── CommittableSources.scala │ ├── CommittingProducerSinkStage.scala │ ├── ConfigSettings.scala │ ├── ConnectionChecker.scala │ ├── ConsumerProgressTracking.scala │ ├── ConsumerResetProtection.scala │ ├── ControlImplementations.scala │ ├── DefaultProducerStage.scala │ ├── DeferredProducer.scala │ ├── ExternalSingleSourceLogic.scala │ ├── KafkaConsumerActor.scala │ ├── KafkaSourceStage.scala │ ├── LoggingWithId.scala │ ├── MessageBuilder.scala │ ├── PartitionAssignmentHelpers.scala │ ├── PlainSources.scala │ ├── ProducerStage.scala │ ├── SingleSourceLogic.scala │ ├── SourceLogicBuffer.scala │ ├── SourceLogicSubscription.scala │ ├── SubSourceLogic.scala │ ├── TransactionalProducerStage.scala │ └── TransactionalSources.scala │ ├── javadsl │ ├── Committer.scala │ ├── Consumer.scala │ ├── DiscoverySupport.scala │ ├── MetadataClient.scala │ ├── PartitionAssignmentHandler.scala │ ├── Producer.scala │ ├── SendProducer.scala │ └── Transactional.scala │ └── scaladsl │ ├── Committer.scala │ ├── Consumer.scala │ ├── DiscoverySupport.scala │ ├── MetadataClient.scala │ ├── PartitionAssignmentHandler.scala │ ├── Producer.scala │ ├── SendProducer.scala │ └── Transactional.scala ├── docs ├── LICENSE ├── release-train-issue-template.md └── src │ └── main │ └── paradox │ ├── .htaccess │ ├── _template │ └── projectSpecificFooter.st │ ├── assets │ └── js │ │ └── warnOldVersion.js │ ├── atleastonce.md │ ├── cluster-sharding.md │ ├── consumer-metadata.md │ ├── consumer-rebalance.md │ ├── consumer.md │ ├── debugging.md │ ├── discovery.md │ ├── errorhandling.md │ ├── home.md │ ├── images │ ├── akka-alpakka-reverse.svg │ └── alpakka-kafka-stream-trace.png │ ├── index.md │ ├── producer.md │ ├── production.md │ ├── release-notes │ ├── 1.0.x.md │ ├── 1.1.x.md │ ├── 2.0.x.md │ ├── 2.1.x.md │ ├── 3.0.x.md │ ├── 4.0.x.md │ └── index.md │ ├── send-producer.md │ ├── serialization.md │ ├── snapshots.md │ ├── subscription.md │ ├── testing-testcontainers.md │ ├── testing.md │ └── transactions.md ├── project ├── AutomaticModuleName.scala ├── PackagingTypePlugin.scala ├── Publish.scala ├── VersionGenerator.scala ├── build.properties ├── plugins.sbt └── project-info.conf ├── scripts ├── cat-log.sh ├── create-release-issue.sh └── link-validator.conf ├── testkit └── src │ └── main │ ├── java │ └── akka │ │ └── kafka │ │ └── testkit │ │ ├── internal │ │ ├── AlpakkaKafkaContainer.java │ │ ├── KafkaContainerCluster.java │ │ └── SchemaRegistryContainer.java │ │ └── javadsl │ │ ├── BaseKafkaTest.java │ │ ├── KafkaJunit4Test.java │ │ ├── KafkaTest.java │ │ ├── TestcontainersKafkaJunit4Test.java │ │ └── TestcontainersKafkaTest.java │ ├── mima-filters │ ├── 2.0.5.backwards.excludes │ │ ├── PR1229-drop-embedded-kafka.excludes │ │ └── PR1235-start_stop_kafka_process.excludes │ ├── 2.0.x.backwards.excludes │ ├── 2.1.0-M1.backwards.excludes │ │ ├── PR1281-testcontainers-logs.excludes │ │ └── PR1287-testcontainer-docker-images.excludes │ └── 3.0.1.backwards.excludes │ │ └── PR1489.excludes │ ├── resources │ └── reference.conf │ └── scala │ └── akka │ └── kafka │ └── testkit │ ├── ConsumerResultFactory.scala │ ├── KafkaTestkitSettings.scala │ ├── KafkaTestkitTestcontainersSettings.scala │ ├── ProducerResultFactory.scala │ ├── internal │ ├── KafkaTestKit.scala │ ├── KafkaTestKitChecks.scala │ ├── TestFrameworkInterface.scala │ └── TestcontainersKafka.scala │ ├── javadsl │ └── ConsumerControlFactory.scala │ └── scaladsl │ ├── ConsumerControlFactory.scala │ ├── KafkaSpec.scala │ ├── ScalatestKafkaSpec.scala │ ├── TestcontainersKafkaLike.scala │ └── TestcontainersKafkaPerClassLike.scala └── tests ├── LICENSE └── src ├── it ├── resources │ ├── logback-test.xml │ └── reference.conf └── scala │ └── akka │ └── kafka │ ├── IntegrationTests.scala │ ├── PartitionedSourceFailoverSpec.scala │ ├── PlainSourceFailoverSpec.scala │ ├── TransactionsPartitionedSourceSpec.scala │ └── TransactionsSourceSpec.scala ├── main ├── proto │ └── order.proto └── scala │ └── akka │ └── kafka │ └── KafkaPorts.scala └── test ├── java └── docs │ └── javadsl │ ├── AssignmentTest.java │ ├── AtLeastOnceTest.java │ ├── ClusterShardingExample.java │ ├── ConsumerExampleTest.java │ ├── ConsumerSettingsTest.java │ ├── FetchMetadataTest.java │ ├── MetadataClientTest.java │ ├── ProducerSettingsTest.java │ ├── ProducerTest.java │ ├── SampleData.java │ ├── SchemaRegistrySerializationTest.java │ ├── SendProducerTest.java │ ├── SerializationTest.java │ ├── TestkitSamplesTest.java │ ├── TestkitTestcontainersTest.java │ ├── TransactionsExampleTest.java │ └── proto │ └── OrderMessages.java ├── resources ├── application.conf └── logback-test.xml └── scala ├── akka └── kafka │ ├── ConfigSettingsSpec.scala │ ├── ConsumerSettingsSpec.scala │ ├── ProducerSettingsSpec.scala │ ├── Repeated.scala │ ├── TransactionsOps.scala │ ├── internal │ ├── CommitCollectorStageSpec.scala │ ├── CommittingProducerSinkSpec.scala │ ├── CommittingWithMockSpec.scala │ ├── ConnectionCheckerSpec.scala │ ├── ConsumerDummy.scala │ ├── ConsumerMock.scala │ ├── ConsumerProgressTrackingSpec.scala │ ├── ConsumerResetProtectionSpec.scala │ ├── ConsumerSpec.scala │ ├── EnhancedConfigSpec.scala │ ├── OffsetAggregationSpec.scala │ ├── PartitionedSourceSpec.scala │ ├── ProducerSpec.scala │ └── SubscriptionsSpec.scala │ ├── javadsl │ └── ControlSpec.scala │ ├── scaladsl │ ├── CommittableSinkSpec.scala │ ├── CommittingSpec.scala │ ├── ConnectionCheckerSpec.scala │ ├── ControlSpec.scala │ ├── IntegrationSpec.scala │ ├── MetadataClientSpec.scala │ ├── MisconfiguredConsumerSpec.scala │ ├── MisconfiguredProducerSpec.scala │ ├── MultiConsumerSpec.scala │ ├── PartitionedSourcesSpec.scala │ ├── RebalanceExtSpec.scala │ ├── RebalanceSpec.scala │ ├── ReconnectSpec.scala │ ├── RetentionPeriodSpec.scala │ ├── SpecBase.scala │ ├── TimestampSpec.scala │ └── TransactionsSpec.scala │ └── tests │ ├── CapturingAppender.scala │ ├── LogbackUtil.scala │ ├── javadsl │ ├── LogCapturingExtension.scala │ └── LogCapturingJunit4.scala │ └── scaladsl │ └── LogCapturing.scala └── docs ├── sample └── order │ ├── Order.scala │ └── OrderProto.scala └── scaladsl ├── AssignmentSpec.scala ├── AtLeastOnce.scala ├── ClusterShardingExample.scala ├── ConsumerExample.scala ├── DocsSpecBase.scala ├── FetchMetadata.scala ├── PartitionExamples.scala ├── ProducerExample.scala ├── SchemaRegistrySerializationSpec.scala ├── SendProducerSpec.scala ├── SerializationSpec.scala ├── TestkitSamplesSpec.scala └── TransactionsExample.scala /.fossa.yml: -------------------------------------------------------------------------------- 1 | version: 3 2 | 3 | # https://github.com/fossas/fossa-cli/blob/master/docs/references/files/fossa-yml.md 4 | # detect targets and paths with: `fossa list-targets` 5 | 6 | targets: 7 | exclude: 8 | - type: scala 9 | path: tests 10 | - type: scala 11 | path: integration-tests 12 | - type: scala 13 | path: benchmarks 14 | - type: scala 15 | path: docs 16 | -------------------------------------------------------------------------------- /.git-blame-ignore-revs: -------------------------------------------------------------------------------- 1 | # Scala Steward: Reformat with sbt-java-formatter 0.8.0 2 | 4ccee08177d1795422c2831ac89ae877627efb45 3 | -------------------------------------------------------------------------------- /.github/workflows/dependency-submission.yml: -------------------------------------------------------------------------------- 1 | name: Dependency Graph submission 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - main 8 | tags: ["*"] 9 | 10 | permissions: 11 | contents: read 12 | 13 | jobs: 14 | dependency-graph: 15 | name: Update Dependency Graph 16 | if: github.event.repository.fork == false 17 | runs-on: ubuntu-22.04 18 | permissions: 19 | contents: write 20 | steps: 21 | - name: Checkout 22 | # https://github.com/actions/checkout/releases 23 | # v4.1.1 24 | uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 25 | with: 26 | fetch-depth: 0 27 | - name: Submit dependencies to GitHub 28 | # https://github.com/scalacenter/sbt-dependency-submission/releases 29 | # v2.3.1 30 | uses: scalacenter/sbt-dependency-submission@f3c0455a87097de07b66c3dc1b8619b5976c1c89 31 | with: 32 | modules-ignore: akka-stream-kafka-benchmarks_2.13 akka-stream-kafka-benchmarks_3 akka-stream-kafka-tests_2.13 akka-stream-kafka-tests_3 33 | configs-ignore: test It 34 | -------------------------------------------------------------------------------- /.github/workflows/fossa.yml: -------------------------------------------------------------------------------- 1 | name: Dependency License Scanning 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: '0 0 * * 0' # At 00:00 on Sunday 7 | 8 | permissions: 9 | contents: read # allow actions/checkout 10 | 11 | jobs: 12 | fossa: 13 | name: Fossa 14 | runs-on: ubuntu-22.04 15 | if: github.repository == 'akka/alpakka-kafka' 16 | steps: 17 | - name: Checkout 18 | # https://github.com/actions/checkout/releases 19 | # v4.1.1 20 | uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 21 | with: 22 | # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves 23 | fetch-depth: 0 24 | 25 | - name: Cache Coursier cache 26 | # https://github.com/coursier/cache-action/releases 27 | # v6.4.5 28 | uses: coursier/cache-action@1ff273bff02a8787bc9f1877d347948af647956d 29 | 30 | - name: Set up JDK 17 31 | # https://github.com/coursier/setup-action/releases 32 | # v1.3.5 33 | uses: coursier/setup-action@7bde40eee928896f074dbb76d22dd772eed5c65f 34 | with: 35 | jvm: temurin:1.17 36 | 37 | - name: FOSSA policy check 38 | run: |- 39 | curl -H 'Cache-Control: no-cache' https://raw.githubusercontent.com/fossas/fossa-cli/master/install-latest.sh | bash 40 | fossa analyze && fossa test 41 | env: 42 | FOSSA_API_KEY: "${{secrets.FOSSA_API_KEY}}" 43 | -------------------------------------------------------------------------------- /.github/workflows/link-validator.yml: -------------------------------------------------------------------------------- 1 | name: Link Validator 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | schedule: 7 | - cron: '10 6 1 * *' 8 | 9 | permissions: 10 | contents: read # allow actions/checkout 11 | 12 | jobs: 13 | validate-links: 14 | runs-on: ubuntu-22.04 15 | if: github.event.repository.fork == false 16 | steps: 17 | - name: Checkout 18 | # https://github.com/actions/checkout/releases 19 | # v4.1.1 20 | uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 21 | with: 22 | # See https://github.com/actions/checkout/issues/299#issuecomment-677674415 23 | ref: ${{ github.event.pull_request.head.sha }} 24 | fetch-depth: 100 25 | 26 | - name: Fetch tags 27 | run: git fetch --depth=100 origin +refs/tags/*:refs/tags/* 28 | 29 | - name: Cache Coursier cache 30 | # https://github.com/coursier/cache-action/releases 31 | # v6.4.5 32 | uses: coursier/cache-action@1ff273bff02a8787bc9f1877d347948af647956d 33 | 34 | - name: Set up JDK 17 35 | # https://github.com/coursier/setup-action/releases 36 | # v1.3.5 37 | uses: coursier/setup-action@7bde40eee928896f074dbb76d22dd772eed5c65f 38 | with: 39 | jvm: temurin:1.17.0.5 40 | apps: cs 41 | 42 | - name: sbt site 43 | run: sbt docs/makeSite 44 | 45 | - name: Run Link Validator 46 | run: cs launch net.runne::site-link-validator:0.2.5 -- scripts/link-validator.conf 47 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - release-* 8 | tags: ["*"] 9 | 10 | permissions: 11 | contents: read # allow actions/checkout 12 | 13 | jobs: 14 | release: 15 | # runs on main repo only 16 | if: github.event.repository.fork == false 17 | name: Release 18 | # the release environment provides access to secrets required in the release process 19 | # https://github.com/akka/alpakka-kafka/settings/environments/84275678/edit 20 | environment: release 21 | runs-on: ubuntu-22.04 22 | steps: 23 | - name: Checkout 24 | # https://github.com/actions/checkout/releases 25 | # v4.1.1 26 | uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 27 | with: 28 | # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves 29 | fetch-depth: 0 30 | 31 | - name: Checkout GitHub merge 32 | if: github.event.pull_request 33 | run: |- 34 | git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch 35 | git checkout scratch 36 | 37 | - name: Cache Coursier cache 38 | # https://github.com/coursier/cache-action/releases 39 | # v6.4.5 40 | uses: coursier/cache-action@1ff273bff02a8787bc9f1877d347948af647956d 41 | 42 | - name: Set up JDK 11 43 | # https://github.com/coursier/setup-action/releases 44 | # v1.3.5 45 | uses: coursier/setup-action@7bde40eee928896f074dbb76d22dd772eed5c65f 46 | with: 47 | jvm: temurin:1.11.0.17 48 | 49 | - name: Publish artifacts for all Scala versions 50 | env: 51 | PGP_PASSPHRASE: ${{ secrets.PGP_PASSPHRASE }} 52 | PGP_SECRET: ${{ secrets.PGP_SECRET }} 53 | PUBLISH_USER: ${{ secrets.PUBLISH_USER }} 54 | PUBLISH_PASSWORD: ${{ secrets.PUBLISH_PASSWORD }} 55 | run: sbt +publishSigned 56 | 57 | documentation: 58 | name: Documentation 59 | runs-on: ubuntu-22.04 60 | if: github.event.repository.fork == false 61 | steps: 62 | - name: Checkout 63 | # https://github.com/actions/checkout/releases 64 | # v4.1.1 65 | uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 66 | with: 67 | # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves 68 | fetch-depth: 0 69 | 70 | - name: Set up JDK 17 71 | # https://github.com/coursier/setup-action/releases 72 | # v1.3.5 73 | uses: coursier/setup-action@7bde40eee928896f074dbb76d22dd772eed5c65f 74 | with: 75 | jvm: temurin:1.17.0.5 76 | 77 | - name: Publish 78 | run: |- 79 | eval "$(ssh-agent -s)" 80 | echo $AKKA_RSYNC_GUSTAV | base64 -d > .github/id_rsa 81 | chmod 600 .github/id_rsa 82 | ssh-add .github/id_rsa 83 | sbt publishRsync 84 | env: 85 | AKKA_RSYNC_GUSTAV: ${{ secrets.AKKA_RSYNC_GUSTAV }} 86 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | logs 2 | project/project 3 | project/target 4 | target 5 | tmp 6 | .history 7 | dist 8 | /.idea 9 | /*.iml 10 | /out 11 | /.idea_modules 12 | /.classpath 13 | /.project 14 | /RUNNING_PID 15 | /.settings 16 | .cache 17 | .target/ 18 | /bin 19 | /nbproject 20 | node_modules/ 21 | .DS_Store 22 | test-output 23 | .classpath 24 | .project 25 | .settings/ 26 | .tmpBin 27 | .bsp/ 28 | -------------------------------------------------------------------------------- /.scala-steward.conf: -------------------------------------------------------------------------------- 1 | pullRequests.frequency = "@monthly" 2 | 3 | updates.ignore = [ 4 | { groupId = "com.typesafe.akka" } 5 | { groupId = "org.scalameta", artifactId = "scalafmt-core" } 6 | ] 7 | updates.pin = [ 8 | { groupId = "org.apache.kafka", artifactId="kafka-clients", version="3.9." } 9 | # To be updated in tandem with upstream Akka 10 | { groupId = "org.scalatest", artifactId = "scalatest", version = "3.1." } 11 | { groupId = "org.slf4j", artifactId = "log4j-over-slf4j", version = "1." } 12 | { groupId = "org.slf4j", artifactId = "jul-to-slf4j", version = "1." } 13 | ] 14 | 15 | commits.message = "bump: ${artifactName} ${nextVersion} (was ${currentVersion})" 16 | -------------------------------------------------------------------------------- /.scalafmt.conf: -------------------------------------------------------------------------------- 1 | version = 2.1.0 2 | 3 | style = defaultWithAlign 4 | 5 | align.tokens = [off] 6 | align.openParenDefnSite = true 7 | align.openParenCallSite = true 8 | danglingParentheses = true 9 | docstrings = JavaDoc 10 | indentOperator = spray 11 | maxColumn = 120 12 | rewrite.rules = [RedundantParens, SortImports] 13 | unindentTopLevelOperators = true 14 | -------------------------------------------------------------------------------- /RELEASING.md: -------------------------------------------------------------------------------- 1 | # Releasing 2 | 3 | Create a new issue from the [Alpakka Kafka Release Train Issue Template](docs/release-train-issue-template.md) and follow the steps. 4 | 5 | ```bash 6 | ~/alpakka> scripts/create-release-issue.sh `version-to-be-released` 7 | ``` 8 | 9 | ### Releasing only updated docs 10 | 11 | It is possible to release a revised documentation to the already existing release. 12 | 13 | 1. Create a new branch from a release tag. If a revised documentation is for the `v1.1.1` release, then the name of the new branch should be `docs/v1.1.1`. 14 | 1. Add and commit `version.sbt` file that pins the version to the one, that is being revised. Also set `isSnapshot` to `false` for the stable documentation links. For example: 15 | ```scala 16 | ThisBuild / version := "1.1.1" 17 | ThisBuild / isSnapshot := false 18 | ``` 19 | 1. Make all of the required changes to the documentation. 20 | 1. Check the documentation update locally: 21 | ```sh 22 | sbt docs/previewSite 23 | ``` 24 | 1. If the generated documentation looks good, send it to Gustav: 25 | ```sh 26 | sbt docs/publishRsync 27 | ``` 28 | 1. Do not forget to push the new branch back to GitHub. 29 | -------------------------------------------------------------------------------- /benchmarks/src/it/scala/akka/kafka/benchmarks/AlpakkaCommittableProducer.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.benchmarks 7 | 8 | import akka.kafka.benchmarks.BenchmarksBase.{topic_100_100, topic_100_5000} 9 | import akka.kafka.benchmarks.Timed.runPerfTest 10 | import akka.kafka.benchmarks.app.RunTestCommand 11 | 12 | /** 13 | * Compares the `CommittingProducerSinkStage` with the composed implementation of `Producer.flexiFlow` and `Committer.sink`. 14 | */ 15 | class AlpakkaCommittableProducer extends BenchmarksBase() { 16 | it should "bench composed sink with 100b messages" in { 17 | val cmd = RunTestCommand("alpakka-committable-producer-composed", bootstrapServers, topic_100_100) 18 | runPerfTest( 19 | cmd, 20 | AlpakkaCommittableSinkFixtures.composedSink(cmd), 21 | AlpakkaCommittableSinkBenchmarks.run 22 | ) 23 | } 24 | 25 | it should "bench composed sink with 5000b messages" in { 26 | val cmd = RunTestCommand("alpakka-committable-producer-composed-5000b", bootstrapServers, topic_100_5000) 27 | runPerfTest( 28 | cmd, 29 | AlpakkaCommittableSinkFixtures.composedSink(cmd), 30 | AlpakkaCommittableSinkBenchmarks.run 31 | ) 32 | } 33 | 34 | it should "bench `Producer.committableSink` with 100b messages" in { 35 | val cmd = RunTestCommand("alpakka-committable-producer", bootstrapServers, topic_100_100) 36 | runPerfTest( 37 | cmd, 38 | AlpakkaCommittableSinkFixtures.producerSink(cmd), 39 | AlpakkaCommittableSinkBenchmarks.run 40 | ) 41 | } 42 | 43 | it should "bench `Producer.committableSink` with 5000b messages" in { 44 | val cmd = RunTestCommand("alpakka-committable-producer-5000b", bootstrapServers, topic_100_5000) 45 | runPerfTest( 46 | cmd, 47 | AlpakkaCommittableSinkFixtures.producerSink(cmd), 48 | AlpakkaCommittableSinkBenchmarks.run 49 | ) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /benchmarks/src/it/scala/akka/kafka/benchmarks/BatchedConsumer.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.benchmarks 7 | 8 | import akka.kafka.benchmarks.BenchmarksBase.{topic_1000_100, topic_1000_5000, topic_1000_5000_8} 9 | import akka.kafka.benchmarks.Timed.runPerfTest 10 | import akka.kafka.benchmarks.app.RunTestCommand 11 | 12 | class ApacheKafkaBatchedConsumer extends BenchmarksBase() { 13 | it should "bench with small messages" in { 14 | val cmd = RunTestCommand("apache-kafka-batched-consumer", bootstrapServers, topic_1000_100.freshTopic) 15 | runPerfTest(cmd, 16 | KafkaConsumerFixtures.filledTopics(cmd), 17 | KafkaConsumerBenchmarks.consumerAtLeastOnceBatched(batchSize = 1000)) 18 | } 19 | 20 | it should "bench with normal messages" in { 21 | val cmd = 22 | RunTestCommand("apache-kafka-batched-consumer-normal-msg", bootstrapServers, topic_1000_5000.freshTopic) 23 | runPerfTest(cmd, 24 | KafkaConsumerFixtures.filledTopics(cmd), 25 | KafkaConsumerBenchmarks.consumerAtLeastOnceBatched(batchSize = 1000)) 26 | } 27 | 28 | it should "bench with normal messages and eight partitions" in { 29 | val cmd = 30 | RunTestCommand("apache-kafka-batched-consumer-normal-msg-8-partitions", 31 | bootstrapServers, 32 | topic_1000_5000_8.freshTopic) 33 | runPerfTest(cmd, 34 | KafkaConsumerFixtures.filledTopics(cmd), 35 | KafkaConsumerBenchmarks.consumerAtLeastOnceBatched(batchSize = 1000)) 36 | } 37 | } 38 | 39 | class AlpakkaKafkaBatchedConsumer extends BenchmarksBase() { 40 | 41 | it should "bench with small messages" in { 42 | val cmd = RunTestCommand("alpakka-kafka-batched-consumer", bootstrapServers, topic_1000_100) 43 | runPerfTest(cmd, 44 | ReactiveKafkaConsumerFixtures.committableSources(cmd), 45 | ReactiveKafkaConsumerBenchmarks.consumerAtLeastOnceBatched(batchSize = 1000)) 46 | } 47 | 48 | it should "bench with normal messages" in { 49 | val cmd = RunTestCommand("alpakka-kafka-batched-consumer-normal-msg", bootstrapServers, topic_1000_5000) 50 | runPerfTest(cmd, 51 | ReactiveKafkaConsumerFixtures.committableSources(cmd), 52 | ReactiveKafkaConsumerBenchmarks.consumerAtLeastOnceBatched(batchSize = 1000)) 53 | } 54 | 55 | it should "bench with normal messages and eight partitions" in { 56 | val cmd = 57 | RunTestCommand("alpakka-kafka-batched-consumer-normal-msg-8-partitions", bootstrapServers, topic_1000_5000_8) 58 | runPerfTest(cmd, 59 | ReactiveKafkaConsumerFixtures.committableSources(cmd), 60 | ReactiveKafkaConsumerBenchmarks.consumerAtLeastOnceBatched(batchSize = 1000)) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /benchmarks/src/it/scala/akka/kafka/benchmarks/NoCommitBackpressure.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.benchmarks 7 | 8 | import akka.kafka.benchmarks.Timed.runPerfTest 9 | import akka.kafka.benchmarks.app.RunTestCommand 10 | 11 | import BenchmarksBase._ 12 | 13 | class RawKafkaCommitEveryPollConsumer extends BenchmarksBase() { 14 | private val prefix = "apache-kafka-batched-no-pausing-" 15 | 16 | it should "bench with small messages" in { 17 | val cmd = RunTestCommand(prefix + "consumer", bootstrapServers, topic_1000_100) 18 | runPerfTest(cmd, 19 | KafkaConsumerFixtures.filledTopics(cmd), 20 | KafkaConsumerBenchmarks.consumerAtLeastOnceCommitEveryPoll()) 21 | } 22 | 23 | // These are not plotted anyway 24 | // it should "bench with normal messages" in { 25 | // val cmd = RunTestCommand(prefix + "consumer-normal-msg", bootstrapServers, topic_1000_5000) 26 | // runPerfTest(cmd, 27 | // KafkaConsumerFixtures.filledTopics(cmd), 28 | // KafkaConsumerBenchmarks.consumerAtLeastOnceCommitEveryPoll()) 29 | // } 30 | // 31 | // it should "bench with normal messages and eight partitions" in { 32 | // val cmd = RunTestCommand(prefix + "consumer-normal-msg-8-partitions", 33 | // bootstrapServers, 34 | // topic_1000_5000_8) 35 | // runPerfTest(cmd, 36 | // KafkaConsumerFixtures.filledTopics(cmd), 37 | // KafkaConsumerBenchmarks.consumerAtLeastOnceCommitEveryPoll()) 38 | // } 39 | } 40 | 41 | class AlpakkaCommitAndForgetConsumer extends BenchmarksBase() { 42 | val prefix = "alpakka-kafka-commit-and-forget-" 43 | 44 | it should "bench with small messages" in { 45 | val cmd = RunTestCommand(prefix + "consumer", bootstrapServers, topic_1000_100) 46 | runPerfTest(cmd, 47 | ReactiveKafkaConsumerFixtures.committableSources(cmd), 48 | ReactiveKafkaConsumerBenchmarks.consumerCommitAndForget(commitBatchSize = 1000)) 49 | } 50 | 51 | // These are not plotted anyway 52 | // it should "bench with normal messages" in { 53 | // val cmd = RunTestCommand(prefix + "normal-msg", bootstrapServers, topic_1000_5000) 54 | // runPerfTest(cmd, 55 | // ReactiveKafkaConsumerFixtures.committableSources(cmd), 56 | // ReactiveKafkaConsumerBenchmarks.consumerCommitAndForget(commitBatchSize = 1000)) 57 | // } 58 | // 59 | // it should "bench with normal messages and eight partitions" in { 60 | // val cmd = RunTestCommand(prefix + "normal-msg-8-partitions", 61 | // bootstrapServers, 62 | // topic_1000_5000_8) 63 | // runPerfTest(cmd, 64 | // ReactiveKafkaConsumerFixtures.committableSources(cmd), 65 | // ReactiveKafkaConsumerBenchmarks.consumerCommitAndForget(commitBatchSize = 1000)) 66 | // } 67 | } 68 | -------------------------------------------------------------------------------- /benchmarks/src/it/scala/akka/kafka/benchmarks/Producer.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.benchmarks 7 | 8 | import akka.kafka.benchmarks.BenchmarksBase.{topic_2000_100, topic_2000_500, topic_2000_5000, topic_2000_5000_8} 9 | import akka.kafka.benchmarks.Timed.runPerfTest 10 | import akka.kafka.benchmarks.app.RunTestCommand 11 | 12 | class ApacheKafkaPlainProducer extends BenchmarksBase() { 13 | private val prefix = "apache-kafka-plain-producer" 14 | 15 | it should "bench with small messages" in { 16 | val cmd = RunTestCommand(prefix, bootstrapServers, topic_2000_100.freshTopic) 17 | runPerfTest(cmd, KafkaProducerFixtures.initializedProducer(cmd), KafkaProducerBenchmarks.plainFlow) 18 | } 19 | 20 | it should "bench with 500b messages" in { 21 | val cmd = RunTestCommand(prefix + "-500b", bootstrapServers, topic_2000_500.freshTopic) 22 | runPerfTest(cmd, KafkaProducerFixtures.initializedProducer(cmd), KafkaProducerBenchmarks.plainFlow) 23 | } 24 | 25 | it should "bench with normal messages" in { 26 | val cmd = RunTestCommand(prefix + "-normal-msg", bootstrapServers, topic_2000_5000.freshTopic) 27 | runPerfTest(cmd, KafkaProducerFixtures.initializedProducer(cmd), KafkaProducerBenchmarks.plainFlow) 28 | } 29 | 30 | it should "bench with normal messages written to 8 partitions" in { 31 | val cmd = 32 | RunTestCommand(prefix + "-normal-msg-8-partitions", bootstrapServers, topic_2000_5000_8.freshTopic) 33 | runPerfTest(cmd, KafkaProducerFixtures.initializedProducer(cmd), KafkaProducerBenchmarks.plainFlow) 34 | } 35 | } 36 | 37 | class AlpakkaKafkaPlainProducer extends BenchmarksBase() { 38 | private val prefix = "alpakka-kafka-plain-producer" 39 | 40 | it should "bench with small messages" in { 41 | val cmd = RunTestCommand(prefix, bootstrapServers, topic_2000_100.freshTopic) 42 | runPerfTest(cmd, ReactiveKafkaProducerFixtures.flowFixture(cmd), ReactiveKafkaProducerBenchmarks.plainFlow) 43 | } 44 | 45 | it should "bench with 500b messages" in { 46 | val cmd = RunTestCommand(prefix + "-500b", bootstrapServers, topic_2000_500.freshTopic) 47 | runPerfTest(cmd, ReactiveKafkaProducerFixtures.flowFixture(cmd), ReactiveKafkaProducerBenchmarks.plainFlow) 48 | } 49 | 50 | it should "bench with normal messages" in { 51 | val cmd = RunTestCommand(prefix + "-normal-msg", bootstrapServers, topic_2000_5000.freshTopic) 52 | runPerfTest(cmd, ReactiveKafkaProducerFixtures.flowFixture(cmd), ReactiveKafkaProducerBenchmarks.plainFlow) 53 | } 54 | 55 | it should "bench with normal messages written to 8 partitions" in { 56 | val cmd = 57 | RunTestCommand(prefix + "-normal-msg-8-partitions", bootstrapServers, topic_2000_5000_8.freshTopic) 58 | runPerfTest(cmd, ReactiveKafkaProducerFixtures.flowFixture(cmd), ReactiveKafkaProducerBenchmarks.plainFlow) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /benchmarks/src/it/scala/akka/kafka/benchmarks/SpecBase.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.benchmarks 7 | 8 | import akka.kafka.testkit.scaladsl.ScalatestKafkaSpec 9 | import org.scalatest.concurrent.{Eventually, ScalaFutures} 10 | import org.scalatest.flatspec.AnyFlatSpecLike 11 | import org.scalatest.matchers.should.Matchers 12 | 13 | abstract class SpecBase(kafkaPort: Int) 14 | extends ScalatestKafkaSpec(kafkaPort) 15 | with AnyFlatSpecLike 16 | with Matchers 17 | with ScalaFutures 18 | with Eventually { 19 | 20 | protected def this() = this(kafkaPort = -1) 21 | } 22 | -------------------------------------------------------------------------------- /benchmarks/src/it/scala/akka/kafka/benchmarks/Transactions.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.benchmarks 7 | 8 | import akka.kafka.benchmarks.BenchmarksBase.{topic_100_100, topic_100_5000} 9 | import akka.kafka.benchmarks.Timed.runPerfTest 10 | import akka.kafka.benchmarks.app.RunTestCommand 11 | import scala.concurrent.duration._ 12 | 13 | class ApacheKafkaTransactions extends BenchmarksBase() { 14 | it should "bench with small messages" in { 15 | val cmd = RunTestCommand("apache-kafka-transactions", bootstrapServers, topic_100_100) 16 | runPerfTest(cmd, 17 | KafkaTransactionFixtures.initialize(cmd), 18 | KafkaTransactionBenchmarks.consumeTransformProduceTransaction(commitInterval = 100.milliseconds)) 19 | } 20 | 21 | it should "bench with normal messages" in { 22 | val cmd = RunTestCommand("apache-kafka-transactions-normal-msg", bootstrapServers, topic_100_5000) 23 | runPerfTest(cmd, 24 | KafkaTransactionFixtures.initialize(cmd), 25 | KafkaTransactionBenchmarks.consumeTransformProduceTransaction(commitInterval = 100.milliseconds)) 26 | } 27 | } 28 | 29 | class AlpakkaKafkaTransactions extends BenchmarksBase() { 30 | it should "bench with small messages" in { 31 | val cmd = RunTestCommand("alpakka-kafka-transactions", bootstrapServers, topic_100_100) 32 | runPerfTest( 33 | cmd, 34 | ReactiveKafkaTransactionFixtures.transactionalSourceAndSink(cmd, commitInterval = 100.milliseconds), 35 | ReactiveKafkaTransactionBenchmarks.consumeTransformProduceTransaction 36 | ) 37 | } 38 | 39 | it should "bench with normal messages" in { 40 | val cmd = RunTestCommand("alpakka-kafka-transactions-normal-msg", bootstrapServers, topic_100_5000) 41 | runPerfTest( 42 | cmd, 43 | ReactiveKafkaTransactionFixtures.transactionalSourceAndSink(cmd, commitInterval = 100.milliseconds), 44 | ReactiveKafkaTransactionBenchmarks.consumeTransformProduceTransaction 45 | ) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /benchmarks/src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | target/benchmarks.log 4 | false 5 | 6 | %d{ISO8601} %-5level [%-20.20thread] [%-36.36logger{36}] %msg%n%rEx 7 | 8 | 9 | 10 | 11 | %d{HH:mm:ss.SSS} %-5level [%-20.20thread] %-36.36logger{36} %msg%n%rEx 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /benchmarks/src/main/resources/reference.conf: -------------------------------------------------------------------------------- 1 | akka { 2 | loggers = ["akka.event.slf4j.Slf4jLogger"] 3 | loglevel = "INFO" 4 | logging-filter = "akka.event.slf4j.Slf4jLoggingFilter" 5 | } -------------------------------------------------------------------------------- /benchmarks/src/main/scala/akka/kafka/benchmarks/FixtureGen.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.benchmarks 7 | 8 | import akka.kafka.benchmarks.app.RunTestCommand 9 | 10 | case class FixtureGen[F](command: RunTestCommand, generate: Int => F) 11 | -------------------------------------------------------------------------------- /benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaConsumerFixtureGen.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.benchmarks 7 | 8 | import akka.kafka.benchmarks.app.RunTestCommand 9 | import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer} 10 | import org.apache.kafka.common.serialization.{ByteArrayDeserializer, StringDeserializer} 11 | 12 | import scala.jdk.CollectionConverters._ 13 | 14 | case class KafkaConsumerTestFixture(topic: String, msgCount: Int, consumer: KafkaConsumer[Array[Byte], String]) { 15 | def close(): Unit = consumer.close() 16 | } 17 | 18 | object KafkaConsumerFixtures extends PerfFixtureHelpers { 19 | 20 | def noopFixtureGen(c: RunTestCommand) = FixtureGen[KafkaConsumerTestFixture]( 21 | c, 22 | msgCount => { 23 | KafkaConsumerTestFixture("topic", msgCount, null) 24 | } 25 | ) 26 | 27 | def filledTopics(c: RunTestCommand) = FixtureGen[KafkaConsumerTestFixture]( 28 | c, 29 | msgCount => { 30 | fillTopic(c.filledTopic, c.kafkaHost) 31 | val consumerJavaProps = new java.util.Properties 32 | consumerJavaProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, c.kafkaHost) 33 | consumerJavaProps.put(ConsumerConfig.CLIENT_ID_CONFIG, randomId()) 34 | consumerJavaProps.put(ConsumerConfig.GROUP_ID_CONFIG, randomId()) 35 | consumerJavaProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") 36 | 37 | val consumer = 38 | new KafkaConsumer[Array[Byte], String](consumerJavaProps, new ByteArrayDeserializer, new StringDeserializer) 39 | consumer.subscribe(Set(c.filledTopic.topic).asJava) 40 | KafkaConsumerTestFixture(c.filledTopic.topic, msgCount, consumer) 41 | } 42 | ) 43 | } 44 | -------------------------------------------------------------------------------- /benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaProducerBenchmarks.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.benchmarks 7 | 8 | import com.codahale.metrics.Meter 9 | import com.typesafe.scalalogging.LazyLogging 10 | import org.apache.kafka.clients.producer.{Callback, ProducerRecord, RecordMetadata} 11 | 12 | import scala.concurrent.duration._ 13 | 14 | object KafkaProducerBenchmarks extends LazyLogging { 15 | 16 | val logStep = 100000 17 | 18 | /** 19 | * Streams generated numbers to a Kafka producer. Does not commit. 20 | */ 21 | def plainFlow(fixture: KafkaProducerTestFixture, meter: Meter): Unit = { 22 | val producer = fixture.producer 23 | var lastPartStart = System.nanoTime() 24 | 25 | val msg = PerfFixtureHelpers.stringOfSize(fixture.msgSize) 26 | 27 | for (i <- 1 to fixture.msgCount) { 28 | val partition: Int = (i % fixture.numberOfPartitions).toInt 29 | producer.send( 30 | new ProducerRecord[Array[Byte], String](fixture.topic, partition, null, msg), 31 | new Callback { 32 | override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = meter.mark() 33 | } 34 | ) 35 | 36 | if (i % logStep == 0) { 37 | val lastPartEnd = System.nanoTime() 38 | val took = (lastPartEnd - lastPartStart).nanos 39 | logger.info(s"Sent $i, took ${took.toMillis} ms to send last $logStep") 40 | lastPartStart = lastPartEnd 41 | } 42 | } 43 | fixture.close() 44 | } 45 | 46 | } 47 | -------------------------------------------------------------------------------- /benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaProducerFixtureGen.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.benchmarks 7 | 8 | import akka.kafka.benchmarks.PerfFixtureHelpers.FilledTopic 9 | import akka.kafka.benchmarks.app.RunTestCommand 10 | import org.apache.kafka.clients.producer.KafkaProducer 11 | 12 | case class KafkaProducerTestFixture(topic: String, 13 | msgCount: Int, 14 | msgSize: Int, 15 | producer: KafkaProducer[Array[Byte], String], 16 | numberOfPartitions: Int) { 17 | def close(): Unit = producer.close() 18 | } 19 | 20 | object KafkaProducerFixtures extends PerfFixtureHelpers { 21 | 22 | def noopFixtureGen(c: RunTestCommand) = FixtureGen[KafkaProducerTestFixture]( 23 | c, 24 | msgCount => { 25 | KafkaProducerTestFixture("topic", msgCount, c.msgSize, null, c.numberOfPartitions) 26 | } 27 | ) 28 | 29 | def initializedProducer(c: RunTestCommand) = FixtureGen[KafkaProducerTestFixture]( 30 | c, 31 | msgCount => { 32 | val ft = FilledTopic(msgCount = 1, msgSize = c.msgSize, numberOfPartitions = c.numberOfPartitions) 33 | val rawProducer = createTopic(ft, c.kafkaHost) 34 | KafkaProducerTestFixture(ft.topic, msgCount, c.msgSize, rawProducer, c.numberOfPartitions) 35 | } 36 | ) 37 | } 38 | -------------------------------------------------------------------------------- /benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaConsumerFixtures.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.benchmarks 7 | 8 | import akka.actor.ActorSystem 9 | import akka.kafka.ConsumerMessage.CommittableMessage 10 | import akka.kafka.benchmarks.app.RunTestCommand 11 | import akka.kafka.scaladsl.Consumer 12 | import akka.kafka.scaladsl.Consumer.Control 13 | import akka.kafka.{ConsumerSettings, Subscriptions} 14 | import akka.stream.scaladsl.Source 15 | import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord} 16 | import org.apache.kafka.common.serialization.{ByteArrayDeserializer, StringDeserializer} 17 | 18 | case class ReactiveKafkaConsumerTestFixture[T](topic: String, 19 | msgCount: Int, 20 | source: Source[T, Control], 21 | numberOfPartitions: Int) 22 | 23 | object ReactiveKafkaConsumerFixtures extends PerfFixtureHelpers { 24 | 25 | private def createConsumerSettings(kafkaHost: String)(implicit actorSystem: ActorSystem) = 26 | ConsumerSettings(actorSystem, new ByteArrayDeserializer, new StringDeserializer) 27 | .withBootstrapServers(kafkaHost) 28 | .withGroupId(randomId()) 29 | .withClientId(randomId()) 30 | .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") 31 | 32 | def plainSources(c: RunTestCommand)(implicit actorSystem: ActorSystem) = 33 | FixtureGen[ReactiveKafkaConsumerTestFixture[ConsumerRecord[Array[Byte], String]]]( 34 | c, 35 | msgCount => { 36 | fillTopic(c.filledTopic, c.kafkaHost) 37 | val settings = createConsumerSettings(c.kafkaHost) 38 | val source = Consumer.plainSource(settings, Subscriptions.topics(c.filledTopic.topic)) 39 | ReactiveKafkaConsumerTestFixture(c.filledTopic.topic, msgCount, source, c.numberOfPartitions) 40 | } 41 | ) 42 | 43 | def committableSources(c: RunTestCommand)(implicit actorSystem: ActorSystem) = 44 | FixtureGen[ReactiveKafkaConsumerTestFixture[CommittableMessage[Array[Byte], String]]]( 45 | c, 46 | msgCount => { 47 | fillTopic(c.filledTopic, c.kafkaHost) 48 | val settings = createConsumerSettings(c.kafkaHost) 49 | val source = Consumer.committableSource(settings, Subscriptions.topics(c.filledTopic.topic)) 50 | ReactiveKafkaConsumerTestFixture(c.filledTopic.topic, msgCount, source, c.numberOfPartitions) 51 | } 52 | ) 53 | 54 | def noopFixtureGen(c: RunTestCommand) = 55 | FixtureGen[ReactiveKafkaConsumerTestFixture[ConsumerRecord[Array[Byte], String]]]( 56 | c, 57 | msgCount => { 58 | ReactiveKafkaConsumerTestFixture("topic", msgCount, null, c.numberOfPartitions) 59 | } 60 | ) 61 | 62 | } 63 | -------------------------------------------------------------------------------- /benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaProducerBenchmarks.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.benchmarks 7 | 8 | import akka.kafka.ConsumerMessage.CommittableMessage 9 | import akka.kafka.ProducerMessage 10 | import akka.kafka.ProducerMessage.{Result, Results} 11 | import akka.kafka.benchmarks.ReactiveKafkaProducerFixtures.ReactiveKafkaProducerTestFixture 12 | import akka.stream.Materializer 13 | import akka.stream.scaladsl.{Sink, Source} 14 | import com.codahale.metrics.Meter 15 | import com.typesafe.scalalogging.LazyLogging 16 | import org.apache.kafka.clients.producer.ProducerRecord 17 | 18 | import scala.concurrent.Await 19 | import scala.concurrent.duration._ 20 | import scala.language.postfixOps 21 | 22 | object ReactiveKafkaProducerBenchmarks extends LazyLogging { 23 | val streamingTimeout = 30 minutes 24 | val logStep = 100000 25 | 26 | type Fixture = ReactiveKafkaConsumerTestFixture[CommittableMessage[Array[Byte], String]] 27 | 28 | /** 29 | * Iterates over N lazily-generated elements and passes them through a Kafka flow. 30 | */ 31 | def plainFlow(fixture: ReactiveKafkaProducerTestFixture[Int], meter: Meter)(implicit mat: Materializer): Unit = { 32 | logger.debug("Creating and starting a stream") 33 | @volatile var lastPartStart = System.nanoTime() 34 | 35 | val msg = PerfFixtureHelpers.stringOfSize(fixture.msgSize) 36 | 37 | val future = Source(0 to fixture.msgCount) 38 | .map { number => 39 | val partition: Int = (number % fixture.numberOfPartitions).toInt 40 | ProducerMessage.single(new ProducerRecord[Array[Byte], String](fixture.topic, partition, null, msg), number) 41 | } 42 | .via(fixture.flow) 43 | .map { 44 | case msg: Result[Array[Byte], String, Int] => 45 | meter.mark() 46 | if (msg.offset % logStep == 0) { 47 | val lastPartEnd = System.nanoTime() 48 | val took = (lastPartEnd - lastPartStart).nanos 49 | logger.info(s"Sent ${msg.offset}, took ${took.toMillis} ms to send last $logStep") 50 | lastPartStart = lastPartEnd 51 | } 52 | msg 53 | 54 | case other: Results[Array[Byte], String, Int] => 55 | meter.mark() 56 | other 57 | } 58 | .runWith(Sink.ignore) 59 | Await.result(future, atMost = streamingTimeout) 60 | logger.info("Stream finished") 61 | } 62 | 63 | } 64 | -------------------------------------------------------------------------------- /benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaProducerFixtures.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.benchmarks 7 | 8 | import akka.NotUsed 9 | import akka.actor.ActorSystem 10 | import akka.kafka.ProducerMessage.{Envelope, Results} 11 | import akka.kafka.ProducerSettings 12 | import akka.kafka.benchmarks.app.RunTestCommand 13 | import akka.kafka.scaladsl.Producer 14 | import akka.stream.scaladsl.Flow 15 | import org.apache.kafka.clients.consumer.ConsumerRecord 16 | import org.apache.kafka.common.serialization.{ByteArraySerializer, StringSerializer} 17 | 18 | object ReactiveKafkaProducerFixtures extends PerfFixtureHelpers { 19 | 20 | val Parallelism = 10000 21 | 22 | type K = Array[Byte] 23 | type V = String 24 | type In[PassThrough] = Envelope[K, V, PassThrough] 25 | type Out[PassThrough] = Results[K, V, PassThrough] 26 | type FlowType[PassThrough] = Flow[In[PassThrough], Out[PassThrough], NotUsed] 27 | 28 | case class ReactiveKafkaProducerTestFixture[PassThrough](topic: String, 29 | msgCount: Int, 30 | msgSize: Int, 31 | flow: FlowType[PassThrough], 32 | numberOfPartitions: Int) 33 | 34 | private def createProducerSettings(kafkaHost: String)(implicit actorSystem: ActorSystem): ProducerSettings[K, V] = 35 | ProducerSettings(actorSystem, new ByteArraySerializer, new StringSerializer) 36 | .withBootstrapServers(kafkaHost) 37 | .withParallelism(Parallelism) 38 | 39 | def flowFixture(c: RunTestCommand)(implicit actorSystem: ActorSystem) = 40 | FixtureGen[ReactiveKafkaProducerTestFixture[Int]]( 41 | c, 42 | msgCount => { 43 | val flow: FlowType[Int] = Producer.flexiFlow(createProducerSettings(c.kafkaHost)) 44 | fillTopic(c.filledTopic.copy(msgCount = 1), c.kafkaHost) 45 | ReactiveKafkaProducerTestFixture(c.filledTopic.topic, msgCount, c.msgSize, flow, c.numberOfPartitions) 46 | } 47 | ) 48 | 49 | def noopFixtureGen(c: RunTestCommand) = 50 | FixtureGen[ReactiveKafkaConsumerTestFixture[ConsumerRecord[Array[Byte], String]]]( 51 | c, 52 | msgCount => { 53 | ReactiveKafkaConsumerTestFixture("topic", msgCount, null, c.numberOfPartitions) 54 | } 55 | ) 56 | 57 | } 58 | -------------------------------------------------------------------------------- /benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaTransactionBenchmarks.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.benchmarks 7 | 8 | import akka.kafka.ProducerMessage 9 | import akka.kafka.ProducerMessage.{Result, Results} 10 | import akka.kafka.benchmarks.ReactiveKafkaTransactionFixtures._ 11 | import akka.stream.Materializer 12 | import akka.stream.scaladsl.{Keep, Sink} 13 | import com.codahale.metrics.Meter 14 | import com.typesafe.scalalogging.LazyLogging 15 | import org.apache.kafka.clients.producer.ProducerRecord 16 | 17 | import scala.concurrent.duration._ 18 | import scala.concurrent.{Await, Promise} 19 | import scala.language.postfixOps 20 | import scala.util.Success 21 | 22 | object ReactiveKafkaTransactionBenchmarks extends LazyLogging { 23 | val streamingTimeout: FiniteDuration = 30 minutes 24 | type TransactionFixture = ReactiveKafkaTransactionTestFixture[KTransactionMessage, KProducerMessage, KResult] 25 | 26 | /** 27 | * Process records in a consume-transform-produce transactional workflow and commit every interval. 28 | */ 29 | def consumeTransformProduceTransaction(fixture: TransactionFixture, 30 | meter: Meter)(implicit mat: Materializer): Unit = { 31 | logger.debug("Creating and starting a stream") 32 | val msgCount = fixture.msgCount 33 | val sinkTopic = fixture.sinkTopic 34 | val source = fixture.source 35 | 36 | val promise = Promise[Unit]() 37 | val logPercentStep = 1 38 | val loggedStep = if (msgCount > logPercentStep) 100 else 1 39 | 40 | val control = source 41 | .map { msg => 42 | ProducerMessage.single(new ProducerRecord[Array[Byte], String](sinkTopic, msg.record.value()), 43 | msg.partitionOffset) 44 | } 45 | .via(fixture.flow) 46 | .toMat(Sink.foreach { 47 | case result: Result[Key, Val, PassThrough] => 48 | meter.mark() 49 | val offset = result.offset 50 | if (result.offset % loggedStep == 0) 51 | logger.info(s"Transformed $offset elements to Kafka (${100 * offset / msgCount}%)") 52 | if (result.offset >= fixture.msgCount - 1) 53 | promise.complete(Success(())) 54 | case other: Results[Key, Val, PassThrough] => 55 | meter.mark() 56 | promise.complete(Success(())) 57 | })(Keep.left) 58 | .run() 59 | 60 | Await.result(promise.future, streamingTimeout) 61 | control.shutdown() 62 | logger.debug("Stream finished") 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /benchmarks/src/main/scala/akka/kafka/benchmarks/app/RunTestCommand.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.benchmarks.app 7 | 8 | import akka.kafka.benchmarks.PerfFixtureHelpers.FilledTopic 9 | 10 | case class RunTestCommand(testName: String, kafkaHost: String, filledTopic: FilledTopic) { 11 | 12 | val msgCount = filledTopic.msgCount 13 | val msgSize = filledTopic.msgSize 14 | val numberOfPartitions = filledTopic.numberOfPartitions 15 | val replicationFactor = filledTopic.replicationFactor 16 | 17 | } 18 | -------------------------------------------------------------------------------- /core/src/main/mima-filters/0.22.backwards.excludes: -------------------------------------------------------------------------------- 1 | ProblemFilters.exclude[Problem]("akka.kafka.internal.*") 2 | 3 | # Committer sink 4 | # https://github.com/akka/alpakka-kafka/pull/622 5 | ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.kafka.ConsumerMessage#CommittableOffsetBatch.updated") 6 | ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.kafka.ConsumerMessage#Committable.batchSize") 7 | 8 | # Use named classes for consumer stages and set a name attribute 9 | # https://github.com/akka/alpakka-kafka/pull/571 10 | ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.kafka.Subscription.renderStageAttribute") 11 | 12 | # Add support for merging commit batches 13 | # https://github.com/akka/alpakka-kafka/pull/584 14 | ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.kafka.ConsumerMessage#CommittableOffsetBatch.updated") 15 | 16 | # revised settings classes 17 | # https://github.com/akka/alpakka-kafka/pull/616 18 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ProducerSettings.lazyProducer") 19 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerSettings.$default$13") 20 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerSettings.$default$14") 21 | -------------------------------------------------------------------------------- /core/src/main/mima-filters/1.0-M1.backwards.excludes: -------------------------------------------------------------------------------- 1 | ProblemFilters.exclude[Problem]("akka.kafka.internal.*") 2 | 3 | # Committer parallelism 4 | # https://github.com/akka/alpakka-kafka/pull/647 5 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.CommitterSettings.this") 6 | 7 | # Include requested partition in CommittedOffset 8 | # https://github.com/akka/alpakka-kafka/pull/626 9 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.Metadata#CommittedOffset.*") 10 | ProblemFilters.exclude[MissingTypesProblem]("akka.kafka.Metadata$CommittedOffset$") 11 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.Metadata#CommittedOffset.apply") 12 | 13 | # PR #639 Remove rebalance listener for manual subscriptions 14 | # https://github.com/akka/alpakka-kafka/pull/639 15 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.Subscriptions#Assignment.copy$default$2") 16 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.Subscriptions#Assignment.copy") 17 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.Subscriptions#Assignment.this") 18 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.Subscriptions#Assignment.apply") 19 | ProblemFilters.exclude[MissingTypesProblem]("akka.kafka.Subscriptions$Assignment$") 20 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.Subscriptions#AssignmentWithOffset.copy$default$2") 21 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.Subscriptions#AssignmentWithOffset.copy") 22 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.Subscriptions#AssignmentWithOffset.this") 23 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.Subscriptions#AssignmentWithOffset.apply") 24 | ProblemFilters.exclude[MissingTypesProblem]("akka.kafka.Subscriptions$AssignmentWithOffset$") 25 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.Subscriptions#AssignmentOffsetsForTimes.copy$default$2") 26 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.Subscriptions#AssignmentOffsetsForTimes.copy") 27 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.Subscriptions#AssignmentOffsetsForTimes.this") 28 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.Subscriptions#AssignmentOffsetsForTimes.apply") 29 | ProblemFilters.exclude[MissingTypesProblem]("akka.kafka.Subscriptions$AssignmentOffsetsForTimes$") 30 | 31 | # PR #686 Remove wakeup settings 32 | # https://github.com/akka/alpakka-kafka/pull/686 33 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerSettings.maxWakeups") 34 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerSettings.wakeupTimeout") 35 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerSettings.wakeupDebug") 36 | # the public, deprecated constructor is unchanged 37 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerSettings.this") 38 | -------------------------------------------------------------------------------- /core/src/main/mima-filters/1.0-RC1.backwards.excludes: -------------------------------------------------------------------------------- 1 | # PR #710 Generalize API for ProducerSettings 2 | # https://github.com/akka/alpakka-kafka/pull/710 3 | ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.kafka.ProducerSettings.createKafkaProducer") 4 | -------------------------------------------------------------------------------- /core/src/main/mima-filters/1.0.4.backwards.excludes: -------------------------------------------------------------------------------- 1 | # PR #780 2 | ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.kafka.ProducerMessage#Envelope.withPassThrough") 3 | 4 | # PR #831 5 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerSettings.this") 6 | -------------------------------------------------------------------------------- /core/src/main/mima-filters/1.0.x.backwards.excludes: -------------------------------------------------------------------------------- 1 | # always exclude changes to the internal APIs 2 | ProblemFilters.exclude[Problem]("akka.kafka.internal.*") 3 | 4 | # PR #883 committing without backpressure 5 | # https://github.com/akka/alpakka-kafka/pull/883 6 | ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.kafka.ConsumerMessage#CommittableOffsetBatch.tellCommit") 7 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.CommitterSettings.this") 8 | -------------------------------------------------------------------------------- /core/src/main/mima-filters/1.1.0.backwards.excludes/PR-986-partittionAssignmentHandler-onLost.excludes: -------------------------------------------------------------------------------- 1 | # The `PartitionAssignmentHandler` was not public API in 1.1.0 2 | ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.kafka.scaladsl.PartitionAssignmentHandler.onLost") 3 | -------------------------------------------------------------------------------- /core/src/main/mima-filters/1.1.0.backwards.excludes/PR930-partitioned-transactions-source.excludes: -------------------------------------------------------------------------------- 1 | ProblemFilters.exclude[MissingTypesProblem]("akka.kafka.ConsumerMessage$PartitionOffsetCommittedMarker$") 2 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerMessage#PartitionOffsetCommittedMarker.apply") 3 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerMessage#PartitionOffsetCommittedMarker.this") 4 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerMessage#PartitionOffset.withCommittedMarker") 5 | ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.kafka.ConsumerMessage#PartitionOffsetCommittedMarker.unapply") 6 | -------------------------------------------------------------------------------- /core/src/main/mima-filters/1.1.0.backwards.excludes/PR931-committer-type-bounds.excludes: -------------------------------------------------------------------------------- 1 | ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.kafka.javadsl.Committer.flowWithOffsetContext") 2 | ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.kafka.javadsl.Committer.sinkWithOffsetContext") 3 | ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.kafka.scaladsl.Committer.flowWithOffsetContext") 4 | ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.kafka.scaladsl.Committer.sinkWithOffsetContext") 5 | -------------------------------------------------------------------------------- /core/src/main/mima-filters/1.1.0.backwards.excludes/PR945-remove-deprecations.excludes: -------------------------------------------------------------------------------- 1 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.Metadata.createGetCommitedOffset") 2 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerSettings.withWakeupTimeout") 3 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerSettings.withMaxWakeups") 4 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerSettings.withWakeupDebug") 5 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.javadsl.Producer.commitableSink") 6 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.scaladsl.Producer.commitableSink") 7 | -------------------------------------------------------------------------------- /core/src/main/mima-filters/1.1.0.backwards.excludes/PR949-partition-assignment-handler.excludes: -------------------------------------------------------------------------------- 1 | # Internal API 2 | ProblemFilters.exclude[Problem]("akka.kafka.Subscriptions#TopicSubscription.*") 3 | ProblemFilters.exclude[Problem]("akka.kafka.Subscriptions#TopicSubscriptionPattern.*") 4 | ProblemFilters.exclude[MissingTypesProblem]("akka.kafka.Subscriptions$TopicSubscription$") 5 | ProblemFilters.exclude[MissingTypesProblem]("akka.kafka.Subscriptions$TopicSubscriptionPattern$") 6 | # Sealed trait 7 | ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.kafka.AutoSubscription.partitionAssignmentHandler") 8 | ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.kafka.AutoSubscription.withPartitionAssignmentHandler") 9 | -------------------------------------------------------------------------------- /core/src/main/mima-filters/1.1.0.backwards.excludes/PR959-committable-deprecations.excludes: -------------------------------------------------------------------------------- 1 | # The trait had been marked "do not inherit" for a while 2 | ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.kafka.ConsumerMessage#Committable.commitInternal") 3 | -------------------------------------------------------------------------------- /core/src/main/mima-filters/1.1.x.backwards.excludes: -------------------------------------------------------------------------------- 1 | # allow more parameters to private constructors 2 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ProducerSettings.this") 3 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerSettings.this") 4 | -------------------------------------------------------------------------------- /core/src/main/mima-filters/2.0.1.backwards.excludes/PR1033-widen-type-for-committablebatch-factory-method.excludes: -------------------------------------------------------------------------------- 1 | ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.kafka.ConsumerMessage.createCommittableOffsetBatch") 2 | -------------------------------------------------------------------------------- /core/src/main/mima-filters/2.0.2.backwards.excludes: -------------------------------------------------------------------------------- 1 | # Internal API 2 | ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.kafka.ConsumerMessage#CommittableOffsetBatch.isEmpty") 3 | ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.kafka.ConsumerMessage#CommittableOffsetBatch.tellCommitEmergency") 4 | 5 | # private API, triggered by Mima upgrade https://github.com/akka/alpakka-kafka/pull/1112 6 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.CommitterSettings.this") 7 | -------------------------------------------------------------------------------- /core/src/main/mima-filters/2.1.0-M1.backwards.excludes/PR1262-committables-used-by-commit-when.excludes: -------------------------------------------------------------------------------- 1 | ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.kafka.ConsumerMessage#CommittableOffsetBatch.filter") 2 | -------------------------------------------------------------------------------- /core/src/main/mima-filters/2.1.0-M1.backwards.excludes/PR1299-consumer-settings-constructor.excludes: -------------------------------------------------------------------------------- 1 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerSettings.this") 2 | -------------------------------------------------------------------------------- /core/src/main/mima-filters/4.0.0.backwards.excludes: -------------------------------------------------------------------------------- 1 | # Scala3 requirement 2 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerMessage#PartitionOffset.copy") 3 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerMessage#PartitionOffset.copy$default$1") 4 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerMessage#PartitionOffset.copy$default$2") -------------------------------------------------------------------------------- /core/src/main/mima-filters/5.0.0.backwards.excludes: -------------------------------------------------------------------------------- 1 | # ApiMayChange 2 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.javadsl.Transactional.flowWithOffsetContext") 3 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.javadsl.Transactional.sinkWithOffsetContext") 4 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.scaladsl.Transactional.flowWithOffsetContext") 5 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.scaladsl.Transactional.sinkWithOffsetContext") 6 | 7 | # internal 8 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ProducerSettings.this") 9 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.ConsumerSettings.this") -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/CommitTimeoutException.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka 7 | 8 | import java.util.concurrent.TimeoutException 9 | 10 | /** 11 | * Commits will be failed with this exception if Kafka doesn't respond within `commit-timeout` 12 | */ 13 | class CommitTimeoutException(msg: String) extends TimeoutException(msg) {} 14 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/ConnectionCheckerSettings.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka 7 | 8 | import com.typesafe.config.Config 9 | 10 | import scala.concurrent.duration._ 11 | import scala.jdk.DurationConverters._ 12 | 13 | import java.time.{Duration => JDuration} 14 | 15 | class ConnectionCheckerSettings private[kafka] (val enable: Boolean, 16 | val maxRetries: Int, 17 | val checkInterval: FiniteDuration, 18 | val factor: Double) { 19 | 20 | require(factor > 0, "Backoff factor for connection checker must be finite positive number") 21 | require(maxRetries >= 0, "retries for connection checker must be not negative number") 22 | 23 | private def copy(enable: Boolean = enable, 24 | maxRetries: Int = maxRetries, 25 | checkInterval: FiniteDuration = checkInterval, 26 | factor: Double = factor): ConnectionCheckerSettings = 27 | new ConnectionCheckerSettings( 28 | enable, 29 | maxRetries, 30 | checkInterval, 31 | factor 32 | ) 33 | 34 | def withEnable(enable: Boolean): ConnectionCheckerSettings = copy(enable = enable) 35 | def withMaxRetries(maxRetries: Int): ConnectionCheckerSettings = copy(maxRetries = maxRetries) 36 | def withFactor(factor: Double): ConnectionCheckerSettings = copy(factor = factor) 37 | 38 | /** Scala API */ 39 | def withCheckInterval(checkInterval: FiniteDuration): ConnectionCheckerSettings = copy(checkInterval = checkInterval) 40 | 41 | /** Java API */ 42 | def withCheckInterval(checkInterval: JDuration): ConnectionCheckerSettings = 43 | copy(checkInterval = checkInterval.toScala) 44 | 45 | override def toString: String = 46 | s"akka.kafka.ConnectionCheckerSettings(" + 47 | s"enable=$enable," + 48 | s"maxRetries=$maxRetries," + 49 | s"checkInterval=${checkInterval.toCoarsest}," + 50 | s"factor=$factor" + 51 | ")" 52 | } 53 | 54 | object ConnectionCheckerSettings { 55 | 56 | val configPath: String = "connection-checker" 57 | val fullConfigPath: String = ConsumerSettings.configPath + "." + configPath 58 | 59 | def apply(maxRetries: Int, checkInterval: FiniteDuration, factor: Double): ConnectionCheckerSettings = 60 | new ConnectionCheckerSettings(true, maxRetries, checkInterval, factor) 61 | 62 | def create(maxRetries: Int, checkInterval: FiniteDuration, factor: Double): ConnectionCheckerSettings = 63 | apply(maxRetries, checkInterval, factor) 64 | 65 | /** 66 | * Create settings from a configuration with layout `connection-checker`. 67 | */ 68 | def apply(config: Config): ConnectionCheckerSettings = { 69 | val enable = config.getBoolean("enable") 70 | if (enable) { 71 | val retries = config.getInt("max-retries") 72 | val factor = config.getDouble("backoff-factor") 73 | val checkInterval = config.getDuration("check-interval").toScala 74 | apply(retries, checkInterval, factor) 75 | } else Disabled 76 | } 77 | 78 | /** 79 | * Java API: Create settings from a configuration with layout `connection-checker`. 80 | */ 81 | def create(config: Config): ConnectionCheckerSettings = apply(config) 82 | 83 | val Disabled: ConnectionCheckerSettings = new ConnectionCheckerSettings(false, 3, 15.seconds, 2d) 84 | 85 | } 86 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/ConsumerFailed.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka 7 | 8 | import scala.concurrent.duration.FiniteDuration 9 | 10 | /** 11 | * Kafka consumer stages fail with this exception. 12 | */ 13 | class ConsumerFailed(msg: String) extends RuntimeException(msg) { 14 | def this() = this("Consumer actor terminated") // for backwards compatibility 15 | def this(cause: Throwable) = { 16 | this() 17 | initCause(cause) 18 | } 19 | def this(msg: String, cause: Throwable) = { 20 | this(msg) 21 | initCause(cause) 22 | } 23 | } 24 | 25 | class InitialPollFailed(val timeout: Long, val bootstrapServers: String) 26 | extends ConsumerFailed( 27 | s"Initial consumer poll($timeout) with bootstrap servers " + 28 | s"$bootstrapServers did not succeed, correct address?" 29 | ) 30 | 31 | class WakeupsExceeded(val timeout: Long, val maxWakeups: Int, val wakeupTimeout: FiniteDuration) 32 | extends ConsumerFailed( 33 | s"WakeupException limit exceeded during poll($timeout), stopping (max-wakeups = $maxWakeups, wakeup-timeout = ${wakeupTimeout.toCoarsest})." 34 | ) 35 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/KafkaConnectionFailed.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka 7 | 8 | import org.apache.kafka.common.errors.TimeoutException 9 | 10 | final case class KafkaConnectionFailed(te: TimeoutException, attempts: Int) 11 | extends Exception(s"Can't establish connection with kafkaBroker after $attempts attempts", te) 12 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/KafkaConsumerActor.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka 7 | 8 | import akka.actor.{ActorRef, NoSerializationVerificationNeeded, Props} 9 | import akka.annotation.InternalApi 10 | import akka.kafka.internal.{KafkaConsumerActor => InternalKafkaConsumerActor} 11 | 12 | object KafkaConsumerActor { 13 | 14 | @InternalApi 15 | private[kafka] trait StopLike 16 | 17 | /** 18 | * Message to send for stopping the Kafka consumer actor. 19 | */ 20 | case object Stop extends NoSerializationVerificationNeeded with StopLike 21 | 22 | /** 23 | * Java API: 24 | * Message to send for stopping the Kafka consumer actor. 25 | */ 26 | val stop = Stop 27 | 28 | case class StoppingException() extends RuntimeException("Kafka consumer is stopping") 29 | 30 | /** 31 | * Creates Props for the Kafka Consumer Actor. 32 | */ 33 | def props[K, V](settings: ConsumerSettings[K, V]): Props = 34 | Props(new InternalKafkaConsumerActor(None, settings)).withDispatcher(settings.dispatcher) 35 | 36 | /** 37 | * Creates Props for the Kafka Consumer Actor with a reference back to the owner of it 38 | * which will be signalled with [[akka.actor.Status.Failure Failure(exception)]], in case the 39 | * Kafka client instance can't be created. 40 | */ 41 | def props[K, V](owner: ActorRef, settings: ConsumerSettings[K, V]): Props = 42 | Props(new InternalKafkaConsumerActor(Some(owner), settings)).withDispatcher(settings.dispatcher) 43 | } 44 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/internal/CommitTrigger.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.internal 7 | 8 | private[akka] object CommitTrigger { 9 | sealed trait TriggerdBy 10 | case object BatchSize extends TriggerdBy { 11 | override def toString: String = "batch size" 12 | } 13 | case object Interval extends TriggerdBy { 14 | override def toString: String = "interval" 15 | } 16 | case object UpstreamClosed extends TriggerdBy { 17 | override def toString: String = "upstream closed" 18 | } 19 | case object UpstreamFinish extends TriggerdBy { 20 | override def toString: String = "upstream finish" 21 | } 22 | case object UpstreamFailure extends TriggerdBy { 23 | override def toString: String = "upstream failure" 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/internal/ConfigSettings.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.internal 7 | 8 | import java.util 9 | 10 | import akka.annotation.InternalApi 11 | import com.typesafe.config.{Config, ConfigObject} 12 | 13 | import scala.annotation.tailrec 14 | import scala.concurrent.duration.Duration 15 | import scala.jdk.CollectionConverters._ 16 | import scala.jdk.DurationConverters._ 17 | 18 | /** 19 | * INTERNAL API 20 | * 21 | * Converts a [[com.typesafe.config.Config]] section to a Map for use with Kafka Consumer or Producer. 22 | */ 23 | @InternalApi private[kafka] object ConfigSettings { 24 | 25 | def parseKafkaClientsProperties(config: Config): Map[String, String] = { 26 | @tailrec 27 | def collectKeys(c: ConfigObject, processedKeys: Set[String], unprocessedKeys: List[String]): Set[String] = 28 | if (unprocessedKeys.isEmpty) processedKeys 29 | else { 30 | c.toConfig.getAnyRef(unprocessedKeys.head) match { 31 | case o: util.Map[_, _] => 32 | collectKeys(c, 33 | processedKeys, 34 | unprocessedKeys.tail ::: o.keySet().asScala.toList.map(unprocessedKeys.head + "." + _)) 35 | case _ => 36 | collectKeys(c, processedKeys + unprocessedKeys.head, unprocessedKeys.tail) 37 | } 38 | } 39 | 40 | val keys = collectKeys(config.root, Set.empty[String], config.root().keySet().asScala.toList) 41 | keys.map(key => key -> config.getString(key)).toMap 42 | } 43 | 44 | def getPotentiallyInfiniteDuration(underlying: Config, path: String): Duration = underlying.getString(path) match { 45 | case "infinite" => Duration.Inf 46 | case _ => underlying.getDuration(path).toScala 47 | } 48 | 49 | } 50 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/internal/ConnectionChecker.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.internal 7 | 8 | import akka.actor.{Actor, ActorLogging, Props, Timers} 9 | import akka.annotation.InternalApi 10 | import akka.event.LoggingReceive 11 | import akka.kafka.{ConnectionCheckerSettings, KafkaConnectionFailed, Metadata} 12 | import org.apache.kafka.common.errors.TimeoutException 13 | 14 | import scala.concurrent.duration.FiniteDuration 15 | import scala.util.{Failure, Success} 16 | 17 | @InternalApi private class ConnectionChecker(config: ConnectionCheckerSettings) 18 | extends Actor 19 | with ActorLogging 20 | with Timers { 21 | 22 | import ConnectionChecker.Internal._ 23 | import config.{enable => _, _} 24 | 25 | override def preStart(): Unit = { 26 | super.preStart() 27 | startTimer() 28 | } 29 | 30 | override def receive: Receive = regular 31 | 32 | def regular: Receive = 33 | LoggingReceive.withLabel("regular")(behaviour(0, checkInterval)) 34 | 35 | def backoff(failedAttempts: Int = 1, backoffCheckInterval: FiniteDuration): Receive = 36 | LoggingReceive.withLabel(s"backoff($failedAttempts, $backoffCheckInterval)")( 37 | behaviour(failedAttempts, backoffCheckInterval) 38 | ) 39 | 40 | def behaviour(failedAttempts: Int, interval: FiniteDuration): Receive = { 41 | case CheckConnection => 42 | context.parent ! Metadata.ListTopics 43 | 44 | case Metadata.Topics(Failure(te: TimeoutException)) => 45 | //failedAttempts is a sum of first triggered failure and retries (retries + 1) 46 | if (failedAttempts == maxRetries) { 47 | context.parent ! KafkaConnectionFailed(te, maxRetries) 48 | context.stop(self) 49 | } else context.become(backoff(failedAttempts + 1, startBackoffTimer(interval))) 50 | 51 | case Metadata.Topics(Success(_)) => 52 | startTimer() 53 | context.become(regular) 54 | } 55 | 56 | def startTimer(): Unit = timers.startSingleTimer(RegularCheck, CheckConnection, checkInterval) 57 | 58 | /** start single timer and return it's interval 59 | * 60 | * @param previousInterval previous CheckConnection interval 61 | * @return new backoff interval (previousInterval * factor) 62 | */ 63 | def startBackoffTimer(previousInterval: FiniteDuration): FiniteDuration = { 64 | val backoffCheckInterval = (previousInterval * factor).asInstanceOf[FiniteDuration] 65 | timers.startSingleTimer(BackoffCheck, CheckConnection, backoffCheckInterval) 66 | backoffCheckInterval 67 | } 68 | 69 | } 70 | 71 | @InternalApi object ConnectionChecker { 72 | 73 | def props(config: ConnectionCheckerSettings): Props = Props(new ConnectionChecker(config)) 74 | 75 | private object Internal { 76 | //Timer labels 77 | case object RegularCheck 78 | case object BackoffCheck 79 | 80 | //Commands 81 | case object CheckConnection 82 | } 83 | 84 | } 85 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/internal/ExternalSingleSourceLogic.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.internal 7 | 8 | import akka.actor.ActorRef 9 | import akka.annotation.InternalApi 10 | import akka.kafka.ManualSubscription 11 | import akka.stream.SourceShape 12 | 13 | import scala.concurrent.Future 14 | 15 | /** 16 | * Internal API. 17 | * 18 | * Single source logic for externally provided [[KafkaConsumerActor]]. 19 | */ 20 | @InternalApi private abstract class ExternalSingleSourceLogic[K, V, Msg]( 21 | shape: SourceShape[Msg], 22 | _consumerActor: ActorRef, 23 | val subscription: ManualSubscription 24 | ) extends BaseSingleSourceLogic[K, V, Msg](shape) { 25 | 26 | final override protected def logSource: Class[_] = classOf[ExternalSingleSourceLogic[K, V, Msg]] 27 | 28 | final val consumerFuture: Future[ActorRef] = Future.successful(_consumerActor) 29 | 30 | final def createConsumerActor(): ActorRef = _consumerActor 31 | 32 | final def configureSubscription(): Unit = 33 | configureManualSubscription(subscription) 34 | 35 | final override def performShutdown(): Unit = { 36 | super.performShutdown() 37 | completeStage() 38 | } 39 | 40 | } 41 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/internal/KafkaSourceStage.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.internal 7 | 8 | import akka.annotation.InternalApi 9 | import akka.kafka.scaladsl.Consumer._ 10 | import akka.stream._ 11 | import akka.stream.stage.{GraphStageLogic, GraphStageWithMaterializedValue} 12 | 13 | /** 14 | * INTERNAL API 15 | */ 16 | @InternalApi 17 | private[kafka] abstract class KafkaSourceStage[K, V, Msg](stageName: String) 18 | extends GraphStageWithMaterializedValue[SourceShape[Msg], Control] { 19 | protected val out = Outlet[Msg]("out") 20 | val shape = new SourceShape(out) 21 | 22 | override protected def initialAttributes: Attributes = Attributes.name(stageName) 23 | 24 | protected def logic(shape: SourceShape[Msg]): GraphStageLogic with Control 25 | override def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = { 26 | val result = logic(shape) 27 | (result, result) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/internal/LoggingWithId.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.internal 7 | 8 | import akka.actor.{Actor, ActorLogging} 9 | import akka.event.LoggingAdapter 10 | import akka.stream.stage.{GraphStageLogic, StageLogging} 11 | 12 | /** 13 | * Generate a short random UID for something. 14 | */ 15 | private[internal] trait InstanceId { 16 | private val instanceId = java.util.UUID.randomUUID().toString.take(5) 17 | def id: String = instanceId 18 | } 19 | 20 | /** 21 | * Override akka streams [[StageLogging]] to include an ID from [[InstanceId]] as a prefix to each logging statement. 22 | */ 23 | private[internal] trait StageIdLogging extends StageLogging with InstanceId { self: GraphStageLogic => 24 | private[this] var _log: LoggingAdapter = _ 25 | protected def idLogPrefix: String = s"[$id] " 26 | override def log: LoggingAdapter = { 27 | if (_log eq null) { 28 | _log = new LoggingAdapterWithPrefix(super.log, idLogPrefix) 29 | } 30 | _log 31 | } 32 | } 33 | 34 | /** 35 | * Override akka classic [[ActorLogging]] to include an ID from [[InstanceId]] as a prefix to each logging statement. 36 | */ 37 | private[internal] trait ActorIdLogging extends ActorLogging with InstanceId { this: Actor => 38 | private[this] var _log: LoggingAdapter = _ 39 | protected def idLogPrefix: String = s"[$id] " 40 | override def log: LoggingAdapter = { 41 | if (_log eq null) { 42 | _log = new LoggingAdapterWithPrefix(super.log, idLogPrefix) 43 | } 44 | _log 45 | } 46 | } 47 | 48 | private[internal] final class LoggingAdapterWithPrefix(logger: LoggingAdapter, prefix: String) extends LoggingAdapter { 49 | private def msgWithId(message: String): String = prefix + message 50 | 51 | override protected def notifyError(message: String): Unit = logger.error(msgWithId(message)) 52 | override protected def notifyError(cause: Throwable, message: String): Unit = logger.error(cause, msgWithId(message)) 53 | override protected def notifyWarning(message: String): Unit = logger.warning(msgWithId(message)) 54 | override protected def notifyInfo(message: String): Unit = logger.info(msgWithId(message)) 55 | override protected def notifyDebug(message: String): Unit = logger.debug(msgWithId(message)) 56 | 57 | override def isErrorEnabled: Boolean = logger.isErrorEnabled 58 | override def isWarningEnabled: Boolean = logger.isWarningEnabled 59 | override def isInfoEnabled: Boolean = logger.isInfoEnabled 60 | override def isDebugEnabled: Boolean = logger.isDebugEnabled 61 | } 62 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/internal/ProducerStage.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.internal 7 | 8 | import akka.annotation.InternalApi 9 | import akka.kafka.ProducerMessage._ 10 | import akka.kafka.ProducerSettings 11 | import akka.stream._ 12 | 13 | import scala.concurrent.Future 14 | 15 | /** 16 | * INTERNAL API 17 | * 18 | * Implemented by [[DefaultProducerStage]] and [[TransactionalProducerStage]]. 19 | */ 20 | @InternalApi 21 | private[internal] trait ProducerStage[K, V, P, IN <: Envelope[K, V, P], OUT <: Results[K, V, P]] { 22 | val settings: ProducerSettings[K, V] 23 | 24 | val in: Inlet[IN] = Inlet[IN]("messages") 25 | val out: Outlet[Future[OUT]] = Outlet[Future[OUT]]("result") 26 | val shape: FlowShape[IN, Future[OUT]] = FlowShape(in, out) 27 | } 28 | 29 | /** 30 | * INTERNAL API 31 | */ 32 | @InternalApi 33 | private[internal] object ProducerStage { 34 | 35 | trait ProducerCompletionState { 36 | def onCompletionSuccess(): Unit 37 | def onCompletionFailure(ex: Throwable): Unit 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/internal/SourceLogicBuffer.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.internal 7 | import akka.annotation.InternalApi 8 | import akka.stream.stage.{AsyncCallback, GraphStageLogic} 9 | import org.apache.kafka.clients.consumer.ConsumerRecord 10 | import org.apache.kafka.common.TopicPartition 11 | 12 | /** 13 | * A buffer of messages provided by the [[KafkaConsumerActor]] for a Source Logic. When partitions are rebalanced 14 | * away from this Source Logic preemptively filter out messages for those partitions. 15 | * 16 | * NOTE: Due to the asynchronous nature of Akka Streams, it's not possible to guarantee that a message has not 17 | * already been sent downstream for a revoked partition before the rebalance handler invokes 18 | * `filterRevokedPartitionsCB`. The best we can do is filter as many messages as possible to reduce the amount of 19 | * duplicate messages sent downstream. 20 | */ 21 | @InternalApi 22 | private[kafka] trait SourceLogicBuffer[K, V, Msg] { 23 | self: GraphStageLogic with StageIdLogging => 24 | 25 | protected var buffer: Iterator[ConsumerRecord[K, V]] = Iterator.empty 26 | 27 | protected val filterRevokedPartitionsCB: AsyncCallback[Set[TopicPartition]] = 28 | getAsyncCallback[Set[TopicPartition]](filterRevokedPartitions) 29 | 30 | private def filterRevokedPartitions(topicPartitions: Set[TopicPartition]): Unit = { 31 | if (topicPartitions.nonEmpty) { 32 | log.debug("filtering out messages from revoked partitions {}", topicPartitions) 33 | // as buffer is an Iterator the filtering will be applied during `pump` 34 | buffer = buffer.filterNot { record => 35 | val tp = new TopicPartition(record.topic, record.partition) 36 | topicPartitions.contains(tp) 37 | } 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/internal/SourceLogicSubscription.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.internal 7 | import akka.actor.ActorRef 8 | import akka.annotation.InternalApi 9 | import akka.kafka.{AutoSubscription, ManualSubscription, Subscription} 10 | import akka.kafka.Subscriptions._ 11 | import akka.kafka.scaladsl.PartitionAssignmentHandler 12 | import akka.stream.stage.GraphStageLogic.StageActor 13 | import akka.stream.stage.{AsyncCallback, GraphStageLogic} 14 | import org.apache.kafka.common.TopicPartition 15 | 16 | /** 17 | * Common subscription logic that's shared across sources. 18 | * 19 | * The implementation can inject its own behaviour in two ways: 20 | * 21 | * 1. Asynchronously by providing [[AsyncCallback]]s for rebalance events 22 | * 2. Synchronously by overriding `addToPartitionAssignmentHandler` 23 | */ 24 | @InternalApi 25 | private[kafka] trait SourceLogicSubscription { 26 | self: GraphStageLogic => 27 | 28 | protected def subscription: Subscription 29 | 30 | protected def consumerActor: ActorRef 31 | protected def sourceActor: StageActor 32 | 33 | protected def configureSubscription(partitionAssignedCB: AsyncCallback[Set[TopicPartition]], 34 | partitionRevokedCB: AsyncCallback[Set[TopicPartition]]): Unit = { 35 | 36 | def rebalanceListener(autoSubscription: AutoSubscription): PartitionAssignmentHandler = { 37 | PartitionAssignmentHelpers.chain( 38 | addToPartitionAssignmentHandler(autoSubscription.partitionAssignmentHandler), 39 | new PartitionAssignmentHelpers.AsyncCallbacks(autoSubscription, 40 | sourceActor.ref, 41 | partitionAssignedCB, 42 | partitionRevokedCB) 43 | ) 44 | } 45 | 46 | subscription match { 47 | case sub @ TopicSubscription(topics, _, _) => 48 | consumerActor.tell( 49 | KafkaConsumerActor.Internal.Subscribe( 50 | topics, 51 | addToPartitionAssignmentHandler(rebalanceListener(sub)) 52 | ), 53 | sourceActor.ref 54 | ) 55 | case sub @ TopicSubscriptionPattern(topics, _, _) => 56 | consumerActor.tell( 57 | KafkaConsumerActor.Internal.SubscribePattern( 58 | topics, 59 | addToPartitionAssignmentHandler(rebalanceListener(sub)) 60 | ), 61 | sourceActor.ref 62 | ) 63 | case s: ManualSubscription => configureManualSubscription(s) 64 | } 65 | } 66 | 67 | protected def configureManualSubscription(subscription: ManualSubscription): Unit = () 68 | 69 | /** 70 | * Opportunity for subclasses to add a different logic to the partition assignment callbacks. 71 | */ 72 | protected def addToPartitionAssignmentHandler(handler: PartitionAssignmentHandler): PartitionAssignmentHandler = 73 | handler 74 | } 75 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/javadsl/Committer.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.javadsl 7 | import java.util.concurrent.CompletionStage 8 | 9 | import akka.annotation.ApiMayChange 10 | import akka.japi.Pair 11 | import akka.{Done, NotUsed} 12 | import akka.kafka.ConsumerMessage.{Committable, CommittableOffsetBatch} 13 | import akka.kafka.{scaladsl, CommitterSettings} 14 | import akka.stream.javadsl.{Flow, FlowWithContext, Sink} 15 | 16 | import scala.jdk.FutureConverters._ 17 | 18 | object Committer { 19 | 20 | /** 21 | * Batches offsets and commits them to Kafka, emits `Done` for every committed batch. 22 | */ 23 | def flow[C <: Committable](settings: CommitterSettings): Flow[C, Done, NotUsed] = 24 | scaladsl.Committer.flow(settings).asJava 25 | 26 | /** 27 | * Batches offsets and commits them to Kafka, emits `CommittableOffsetBatch` for every committed batch. 28 | */ 29 | def batchFlow[C <: Committable](settings: CommitterSettings): Flow[C, CommittableOffsetBatch, NotUsed] = 30 | scaladsl.Committer.batchFlow(settings).asJava 31 | 32 | /** 33 | * API MAY CHANGE 34 | * 35 | * Batches offsets from context and commits them to Kafka, emits no useful value, but keeps the committed 36 | * `CommittableOffsetBatch` as context. 37 | */ 38 | @ApiMayChange 39 | def flowWithOffsetContext[E, C <: Committable]( 40 | settings: CommitterSettings 41 | ): FlowWithContext[E, C, NotUsed, CommittableOffsetBatch, NotUsed] = 42 | scaladsl.Committer.flowWithOffsetContext[E](settings).asJava 43 | 44 | /** 45 | * Batches offsets and commits them to Kafka. 46 | */ 47 | def sink[C <: Committable](settings: CommitterSettings): Sink[C, CompletionStage[Done]] = 48 | scaladsl.Committer.sink(settings).mapMaterializedValue(_.asJava).asJava 49 | 50 | /** 51 | * API MAY CHANGE 52 | * 53 | * Batches offsets from context and commits them to Kafka. 54 | */ 55 | @ApiMayChange 56 | def sinkWithOffsetContext[E, C <: Committable]( 57 | settings: CommitterSettings 58 | ): Sink[Pair[E, C], CompletionStage[Done]] = 59 | akka.stream.scaladsl 60 | .Flow[Pair[E, C]] 61 | .map(_.toScala) 62 | .toMat(scaladsl.Committer.sinkWithOffsetContext(settings))(akka.stream.scaladsl.Keep.right) 63 | .mapMaterializedValue[CompletionStage[Done]](_.asJava) 64 | .asJava[Pair[E, C]] 65 | } 66 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/javadsl/DiscoverySupport.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.javadsl 7 | 8 | import java.util.concurrent.CompletionStage 9 | 10 | import akka.actor.{ActorSystem, ClassicActorSystemProvider} 11 | import akka.kafka.{scaladsl, ConsumerSettings, ProducerSettings} 12 | import com.typesafe.config.Config 13 | 14 | import scala.concurrent.Future 15 | import scala.jdk.FunctionConverters._ 16 | import scala.jdk.FutureConverters._ 17 | 18 | /** 19 | * Scala API. 20 | * 21 | * Reads Kafka bootstrap servers from configured sources via [[akka.discovery.Discovery]] configuration. 22 | */ 23 | object DiscoverySupport { 24 | 25 | /** 26 | * Expects a `service` section in the given Config and reads the given service name's address 27 | * to be used as `bootstrapServers`. 28 | */ 29 | def consumerBootstrapServers[K, V]( 30 | config: Config, 31 | system: ClassicActorSystemProvider 32 | ): java.util.function.Function[ConsumerSettings[K, V], CompletionStage[ConsumerSettings[K, V]]] = { 33 | implicit val sys: ClassicActorSystemProvider = system 34 | val function: ConsumerSettings[K, V] => Future[ConsumerSettings[K, V]] = 35 | scaladsl.DiscoverySupport.consumerBootstrapServers(config) 36 | function.andThen(_.asJava).asJava 37 | } 38 | 39 | // kept for bin-compatibility 40 | def consumerBootstrapServers[K, V]( 41 | config: Config, 42 | system: ActorSystem 43 | ): java.util.function.Function[ConsumerSettings[K, V], CompletionStage[ConsumerSettings[K, V]]] = { 44 | val sys: ClassicActorSystemProvider = system 45 | consumerBootstrapServers(config, sys) 46 | } 47 | 48 | /** 49 | * Expects a `service` section in the given Config and reads the given service name's address 50 | * to be used as `bootstrapServers`. 51 | */ 52 | def producerBootstrapServers[K, V]( 53 | config: Config, 54 | system: ClassicActorSystemProvider 55 | ): java.util.function.Function[ProducerSettings[K, V], CompletionStage[ProducerSettings[K, V]]] = { 56 | implicit val sys: ClassicActorSystemProvider = system 57 | val function: ProducerSettings[K, V] => Future[ProducerSettings[K, V]] = 58 | scaladsl.DiscoverySupport.producerBootstrapServers(config) 59 | function.andThen(_.asJava).asJava 60 | } 61 | 62 | // kept for bin-compatibility 63 | def producerBootstrapServers[K, V]( 64 | config: Config, 65 | system: ActorSystem 66 | ): java.util.function.Function[ProducerSettings[K, V], CompletionStage[ProducerSettings[K, V]]] = { 67 | val sys: ClassicActorSystemProvider = system 68 | producerBootstrapServers(config, sys) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/javadsl/PartitionAssignmentHandler.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.javadsl 7 | 8 | import akka.kafka.RestrictedConsumer 9 | import org.apache.kafka.common.TopicPartition 10 | 11 | /** 12 | * The API is new and may change in further releases. 13 | * 14 | * Allows the user to execute user code when Kafka rebalances partitions between consumers, or an Alpakka Kafka consumer is stopped. 15 | * Use with care: These callbacks are called synchronously on the same thread Kafka's `poll()` is called. 16 | * A warning will be logged if a callback takes longer than the configured `partition-handler-warning`. 17 | * 18 | * There is no point in calling `Committable`'s commit methods as their committing won't be executed as long as any of 19 | * the callbacks in this class are called. Calling `commitSync` on the passed [[akka.kafka.RestrictedConsumer]] is available. 20 | * 21 | * This complements the methods of Kafka's [[org.apache.kafka.clients.consumer.ConsumerRebalanceListener ConsumerRebalanceListener]] with 22 | * an `onStop` callback which is called before `Consumer.close`. 23 | */ 24 | trait PartitionAssignmentHandler { 25 | 26 | /** 27 | * See [[org.apache.kafka.clients.consumer.ConsumerRebalanceListener#onPartitionsRevoked]] 28 | * 29 | * @param revokedTps The list of partitions that were revoked from the consumer 30 | * @param consumer The [[akka.kafka.RestrictedConsumer]] gives some access to the internally used [[org.apache.kafka.clients.consumer.Consumer Consumer]] 31 | */ 32 | def onRevoke(revokedTps: java.util.Set[TopicPartition], consumer: RestrictedConsumer): Unit 33 | 34 | /** 35 | * See [[org.apache.kafka.clients.consumer.ConsumerRebalanceListener#onPartitionsAssigned]] 36 | * 37 | * @param assignedTps The list of partitions that are now assigned to the consumer (may include partitions previously assigned to the consumer) 38 | * @param consumer The [[akka.kafka.RestrictedConsumer]] gives some access to the internally used [[org.apache.kafka.clients.consumer.Consumer Consumer]] 39 | */ 40 | def onAssign(assignedTps: java.util.Set[TopicPartition], consumer: RestrictedConsumer): Unit 41 | 42 | /** 43 | * Called when partition metadata has changed and partitions no longer exist. This can occur if a topic is deleted or if the leader's metadata is stale. 44 | * See [[org.apache.kafka.clients.consumer.ConsumerRebalanceListener#onPartitionsLost]] 45 | * 46 | * @param lostTps The list of partitions that are no longer valid 47 | * @param consumer The [[akka.kafka.RestrictedConsumer]] gives some access to the internally used [[org.apache.kafka.clients.consumer.Consumer Consumer]] 48 | */ 49 | def onLost(lostTps: java.util.Set[TopicPartition], consumer: RestrictedConsumer): Unit 50 | 51 | /** 52 | * Called before a consumer is closed. 53 | * See [[org.apache.kafka.clients.consumer.ConsumerRebalanceListener#onPartitionsRevoked]] 54 | * 55 | * @param currentTps The list of partitions that are currently assigned to the consumer 56 | * @param consumer The [[akka.kafka.RestrictedConsumer]] gives some access to the internally used [[org.apache.kafka.clients.consumer.Consumer Consumer]] 57 | */ 58 | def onStop(currentTps: java.util.Set[TopicPartition], consumer: RestrictedConsumer): Unit 59 | 60 | } 61 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/javadsl/SendProducer.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.javadsl 7 | 8 | import java.util.concurrent.CompletionStage 9 | 10 | import akka.Done 11 | import akka.actor.{ActorSystem, ClassicActorSystemProvider} 12 | import akka.kafka.ProducerMessage._ 13 | import akka.kafka.{scaladsl, ProducerSettings} 14 | import org.apache.kafka.clients.producer.{ProducerRecord, RecordMetadata} 15 | 16 | import scala.jdk.FutureConverters._ 17 | 18 | /** 19 | * Utility class for producing to Kafka without using Akka Streams. 20 | */ 21 | final class SendProducer[K, V] private (underlying: scaladsl.SendProducer[K, V]) { 22 | 23 | // kept for bin-compatibility 24 | @deprecated("use the variant with ClassicActorSystemProvider instead", "2.0.5") 25 | private[kafka] def this(settings: ProducerSettings[K, V], system: ActorSystem) = 26 | this(scaladsl.SendProducer(settings)(system)) 27 | 28 | /** 29 | * Utility class for producing to Kafka without using Akka Streams. 30 | * @param settings producer settings used to create or access the [[org.apache.kafka.clients.producer.Producer]] 31 | * 32 | * The internal asynchronous operations run on the provided `Executor` (which may be an `ActorSystem`'s dispatcher). 33 | */ 34 | def this(settings: ProducerSettings[K, V], system: ClassicActorSystemProvider) = 35 | this(scaladsl.SendProducer(settings)(system.classicSystem)) 36 | 37 | /** 38 | * Send records to Kafka topics and complete a future with the result. 39 | * 40 | * It publishes records to Kafka topics conditionally: 41 | * 42 | * - [[akka.kafka.ProducerMessage.Message Message]] publishes a single message to its topic, and completes the future with [[akka.kafka.ProducerMessage.Result Result]] 43 | * 44 | * - [[akka.kafka.ProducerMessage.MultiMessage MultiMessage]] publishes all messages in its `records` field, and completes the future with [[akka.kafka.ProducerMessage.MultiResult MultiResult]] 45 | * 46 | * - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, and completes the future with [[akka.kafka.ProducerMessage.PassThroughResult PassThroughResult]] 47 | * 48 | * The messages support passing through arbitrary data. 49 | */ 50 | def sendEnvelope[PT](envelope: Envelope[K, V, PT]): CompletionStage[Results[K, V, PT]] = 51 | underlying.sendEnvelope(envelope).asJava 52 | 53 | /** 54 | * Send a raw Kafka [[org.apache.kafka.clients.producer.ProducerRecord]] and complete a future with the resulting metadata. 55 | */ 56 | def send(record: ProducerRecord[K, V]): CompletionStage[RecordMetadata] = 57 | underlying.send(record).asJava 58 | 59 | /** 60 | * Close the underlying producer (depending on the "close producer on stop" setting). 61 | */ 62 | def close(): CompletionStage[Done] = underlying.close().asJava 63 | 64 | override def toString: String = s"SendProducer(${underlying.settings})" 65 | } 66 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/scaladsl/Committer.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.scaladsl 7 | 8 | import akka.annotation.ApiMayChange 9 | import akka.kafka.CommitterSettings 10 | import akka.kafka.ConsumerMessage.{Committable, CommittableOffsetBatch} 11 | import akka.kafka.internal.CommitCollectorStage 12 | import akka.stream.scaladsl.{Flow, FlowWithContext, Keep, Sink} 13 | import akka.{Done, NotUsed} 14 | 15 | import scala.concurrent.ExecutionContext 16 | import scala.concurrent.Future 17 | 18 | object Committer { 19 | 20 | /** 21 | * Batches offsets and commits them to Kafka, emits `Done` for every committed batch. 22 | */ 23 | def flow(settings: CommitterSettings): Flow[Committable, Done, NotUsed] = 24 | batchFlow(settings).map(_ => Done) 25 | 26 | /** 27 | * Batches offsets and commits them to Kafka, emits `CommittableOffsetBatch` for every committed batch. 28 | */ 29 | def batchFlow(settings: CommitterSettings): Flow[Committable, CommittableOffsetBatch, NotUsed] = { 30 | val offsetBatches: Flow[Committable, CommittableOffsetBatch, NotUsed] = 31 | Flow 32 | .fromGraph(new CommitCollectorStage(settings)) 33 | 34 | // See https://github.com/akka/alpakka-kafka/issues/882 35 | import akka.kafka.CommitDelivery._ 36 | settings.delivery match { 37 | case WaitForAck => 38 | offsetBatches 39 | .mapAsyncUnordered(settings.parallelism) { batch => 40 | batch.commitInternal().map(_ => batch)(ExecutionContext.parasitic) 41 | } 42 | case SendAndForget => 43 | offsetBatches.map(_.tellCommit()) 44 | } 45 | } 46 | 47 | /** 48 | * API MAY CHANGE 49 | * 50 | * Batches offsets from context and commits them to Kafka, emits no useful value, but keeps the committed 51 | * `CommittableOffsetBatch` as context. 52 | */ 53 | @ApiMayChange 54 | def flowWithOffsetContext[E]( 55 | settings: CommitterSettings 56 | ): FlowWithContext[E, Committable, NotUsed, CommittableOffsetBatch, NotUsed] = { 57 | val value = Flow[(E, Committable)] 58 | .map(_._2) 59 | .via(batchFlow(settings)) 60 | .map(b => (NotUsed, b)) 61 | new FlowWithContext(value) 62 | } 63 | 64 | /** 65 | * Batches offsets and commits them to Kafka. 66 | */ 67 | def sink(settings: CommitterSettings): Sink[Committable, Future[Done]] = 68 | flow(settings) 69 | .toMat(Sink.ignore)(Keep.right) 70 | 71 | /** 72 | * API MAY CHANGE 73 | * 74 | * Batches offsets from context and commits them to Kafka. 75 | */ 76 | @ApiMayChange 77 | def sinkWithOffsetContext[E](settings: CommitterSettings): Sink[(E, Committable), Future[Done]] = 78 | Flow[(E, Committable)] 79 | .via(flowWithOffsetContext(settings)) 80 | .toMat(Sink.ignore)(Keep.right) 81 | 82 | } 83 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/kafka/scaladsl/PartitionAssignmentHandler.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.scaladsl 7 | 8 | import akka.kafka.RestrictedConsumer 9 | import org.apache.kafka.common.TopicPartition 10 | 11 | /** 12 | * The API is new and may change in further releases. 13 | * 14 | * Allows the user to execute user code when Kafka rebalances partitions between consumers, or an Alpakka Kafka consumer is stopped. 15 | * Use with care: These callbacks are called synchronously on the same thread Kafka's `poll()` is called. 16 | * A warning will be logged if a callback takes longer than the configured `partition-handler-warning`. 17 | * 18 | * There is no point in calling `Committable`'s commit methods as their committing won't be executed as long as any of 19 | * the callbacks in this class are called. Calling `commitSync` on the passed [[akka.kafka.RestrictedConsumer]] is available. 20 | * 21 | * This complements the methods of Kafka's [[org.apache.kafka.clients.consumer.ConsumerRebalanceListener ConsumerRebalanceListener]] with 22 | * an `onStop` callback which is called before `Consumer.close`. 23 | */ 24 | trait PartitionAssignmentHandler { 25 | 26 | /** 27 | * See [[org.apache.kafka.clients.consumer.ConsumerRebalanceListener#onPartitionsRevoked]] 28 | * 29 | * @param revokedTps The list of partitions that were revoked from the consumer 30 | * @param consumer The [[akka.kafka.RestrictedConsumer]] gives some access to the internally used [[org.apache.kafka.clients.consumer.Consumer Consumer]] 31 | */ 32 | def onRevoke(revokedTps: Set[TopicPartition], consumer: RestrictedConsumer): Unit 33 | 34 | /** 35 | * See [[org.apache.kafka.clients.consumer.ConsumerRebalanceListener#onPartitionsAssigned]] 36 | * 37 | * @param assignedTps The list of partitions that are now assigned to the consumer (may include partitions previously assigned to the consumer) 38 | * @param consumer The [[akka.kafka.RestrictedConsumer]] gives some access to the internally used [[org.apache.kafka.clients.consumer.Consumer Consumer]] 39 | */ 40 | def onAssign(assignedTps: Set[TopicPartition], consumer: RestrictedConsumer): Unit 41 | 42 | /** 43 | * Called when partition metadata has changed and partitions no longer exist. This can occur if a topic is deleted or if the leader's metadata is stale. 44 | * See [[org.apache.kafka.clients.consumer.ConsumerRebalanceListener#onPartitionsLost]] 45 | * 46 | * @param lostTps The list of partitions that are no longer valid 47 | * @param consumer The [[akka.kafka.RestrictedConsumer]] gives some access to the internally used [[org.apache.kafka.clients.consumer.Consumer Consumer]] 48 | */ 49 | def onLost(lostTps: Set[TopicPartition], consumer: RestrictedConsumer): Unit 50 | 51 | /** 52 | * Called before a consumer is closed. 53 | * See [[org.apache.kafka.clients.consumer.ConsumerRebalanceListener#onPartitionsRevoked]] 54 | * 55 | * @param currentTps The list of partitions that are currently assigned to the consumer 56 | * @param consumer The [[akka.kafka.RestrictedConsumer]] gives some access to the internally used [[org.apache.kafka.clients.consumer.Consumer Consumer]] 57 | */ 58 | def onStop(currentTps: Set[TopicPartition], consumer: RestrictedConsumer): Unit 59 | } 60 | -------------------------------------------------------------------------------- /docs/src/main/paradox/.htaccess: -------------------------------------------------------------------------------- 1 | # No patch version redirect 2 | RedirectMatch 301 ^/docs/alpakka-kafka/([^/]+)/release-notes/1.0.html https://doc.akka.io/libraries/alpakka-kafka/$1/release-notes/1.0.x.html 3 | -------------------------------------------------------------------------------- /docs/src/main/paradox/_template/projectSpecificFooter.st: -------------------------------------------------------------------------------- 1 | 2 | 3 | -------------------------------------------------------------------------------- /docs/src/main/paradox/assets/js/warnOldVersion.js: -------------------------------------------------------------------------------- 1 | function initOldVersionWarnings($, thisVersion, projectUrl) { 2 | if (projectUrl && projectUrl !== "") { 3 | var schemeLessUrl = projectUrl; 4 | if (projectUrl.startsWith("http://")) projectUrl = schemeLessUrl.substring(5); 5 | else if (projectUrl.startsWith("https://")) projectUrl = schemeLessUrl.substring(6); 6 | const url = schemeLessUrl + (schemeLessUrl.endsWith("\/") ? "" : "/") + "paradox.json"; 7 | $.get(url, function (versionData) { 8 | const currentVersion = versionData.version; 9 | if (thisVersion !== currentVersion) { 10 | showVersionWarning(thisVersion, currentVersion, projectUrl); 11 | } 12 | }); 13 | } 14 | } 15 | 16 | function showVersionWarning(thisVersion, currentVersion, projectUrl) { 17 | $('#docs').prepend( 18 | '
' + 19 | '

This documentation regards version ' + thisVersion + ', ' + 20 | 'however the current version is ' + currentVersion + '.

' + 21 | '
' 22 | ); 23 | } 24 | -------------------------------------------------------------------------------- /docs/src/main/paradox/debugging.md: -------------------------------------------------------------------------------- 1 | # Debugging 2 | 3 | Debugging setups with the Alpakka Kafka Connector will be required at times. This page collects a few ideas to start out with in case the connector does not behave as you expected. 4 | 5 | ## Logging with SLF4J 6 | 7 | Akka, Akka Streams and thus the Alpakka Kafka Connector support [SLF4J logging API](https://www.slf4j.org/) by adding Akka's SLF4J module and an SLF4J compatible logging framework, eg. [Logback](https://logback.qos.ch/). 8 | 9 | The Kafka client library used by the Alpakka Kafka connector uses SLF4J, as well. 10 | 11 | @@dependency [Maven,sbt,Gradle] { 12 | symbol=AkkaVersion 13 | value="$akka.version$" 14 | group=com.typesafe.akka 15 | artifact=akka-slf4j_$scala.binary.version$ 16 | version=AkkaVersion 17 | group2=ch.qos.logback 18 | artifact2=logback-classic 19 | version2=1.2.3 20 | } 21 | 22 | To enable Akka SLF4J logging, configure Akka in `application.conf` as below. Refer to the @extref[Akka documentation](akka:logging.html#slf4j) for details. 23 | 24 | ```hocon 25 | akka { 26 | loggers = ["akka.event.slf4j.Slf4jLogger"] 27 | loglevel = "DEBUG" 28 | logging-filter = "akka.event.slf4j.Slf4jLoggingFilter" 29 | } 30 | ``` 31 | 32 | ## Receive logging 33 | 34 | In case you're debugging the internals in the Kafka Consumer actor, you might want to enable receive logging to see all messages it receives. To lower the log message volume, change the Kafka poll interval to something larger, eg. 300 ms. 35 | 36 | ```hocon 37 | akka { 38 | actor { 39 | debug.receive = true 40 | } 41 | kafka.consumer { 42 | poll-interval = 300ms 43 | } 44 | } 45 | ``` 46 | -------------------------------------------------------------------------------- /docs/src/main/paradox/images/alpakka-kafka-stream-trace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akka/alpakka-kafka/73a2880f1b5063fc5822780ddfb922d75d7a5e2b/docs/src/main/paradox/images/alpakka-kafka-stream-trace.png -------------------------------------------------------------------------------- /docs/src/main/paradox/index.md: -------------------------------------------------------------------------------- 1 | # Alpakka Kafka Documentation 2 | 3 | The [Alpakka project](https://doc.akka.io/libraries/alpakka/current/) is an open source initiative to implement stream-aware and reactive integration pipelines for Java and Scala. It is built on top of @extref[Akka Streams](akka:stream/index.html), and has been designed from the ground up to understand streaming natively and provide a DSL for reactive and stream-oriented programming, with built-in support for backpressure. Akka Streams is a [Reactive Streams](https://www.reactive-streams.org/) and JDK 9+ @extref[java.util.concurrent.Flow](java-docs:docs/api/java.base/java/util/concurrent/Flow.html)-compliant implementation and therefore @extref[fully interoperable](akka:general/stream/stream-design.html#interoperation-with-other-reactive-streams-implementations) with other implementations. 4 | 5 | This **Alpakka Kafka connector** lets you connect [Apache Kafka](https://kafka.apache.org/) to Akka Streams. It was formerly known as **Akka Streams Kafka** and even **Reactive Kafka**. 6 | 7 | @@toc { .main depth=2 } 8 | 9 | @@@ index 10 | 11 | * [overview](home.md) 12 | * [Producer](producer.md) 13 | * [Consumer](consumer.md) 14 | * [Discovery](discovery.md) 15 | * [Cluster Sharding](cluster-sharding.md) 16 | * [Error Handling](errorhandling.md) 17 | * [At-Least-Once Delivery](atleastonce.md) 18 | * [Transactions](transactions.md) 19 | * [deser](serialization.md) 20 | * [debug](debugging.md) 21 | * [test](testing.md) 22 | * [test-testcontainers](testing-testcontainers.md) 23 | * [in prod](production.md) 24 | * [Snapshots](snapshots.md) 25 | 26 | @@@ 27 | -------------------------------------------------------------------------------- /docs/src/main/paradox/production.md: -------------------------------------------------------------------------------- 1 | --- 2 | project.description: Consider these areas when using Alpakka Kafka in production. 3 | --- 4 | # Production considerations 5 | 6 | 7 | ## Alpakka Kafka API 8 | 9 | 1. Do not use `Consumer.atMostOnceSource` in production as it internally commits the offset after every element. 10 | 1. If you create `Producer` sinks in "inner flows", be sure to @ref:[share the `Producer` instance](producer.md#sharing-the-kafkaproducer-instance). This avoids the expensive creation of `KafkaProducer` instances. 11 | 12 | @@@ note 13 | 14 | This is just a start, please add your experiences to this list by [opening a Pull Request](https://github.com/akka/alpakka-kafka/pulls). 15 | 16 | @@@ 17 | 18 | 19 | ## Monitoring and Tracing 20 | 21 | For performance monitoring consider [Lightbend Telemetry](https://developer.lightbend.com/docs/telemetry/current/home.html) which gives insights into Akka and Akka Streams. 22 | 23 | Lightbend Telemetry supports OpenTracing context propagation so that you can follow individual messages through Kafka producers and consumers. 24 | 25 | ![OpenTracing with Alpakka Kafka](.../alpakka-kafka-stream-trace.png) 26 | 27 | See [Enabling OpenTracing in your app](https://developer.lightbend.com/docs/telemetry/current/extensions/opentracing/enabling.html#alpakka-kafka-configuration). 28 | 29 | ## Security setup 30 | 31 | The different security setups offered by Kafka brokers are described in the @extref[Apache Kafka documentation](kafka:/documentation.html#security). 32 | 33 | 34 | ### SSL 35 | 36 | The properties described in Kafka's @extref[Configuring Kafka Clients for SSL](kafka:/documentation.html#security_configclients) go in the 37 | `akka.kafka.consumer.kafka-clients` and `akka.kafka.producer.kafka-clients` sections of the configuration, or can be added programmatically via 38 | `ProducerSettings.withProperties` and `ConsumerSettings.withProperties`. The necessary property name constants are available in @javadoc[SslConfigs](org.apache.kafka.common.config.SslConfigs). 39 | 40 | ```hocon 41 | akka.kafka.producer { # and akka.kafka.consumer respectively 42 | kafka-clients { 43 | security.protocol=SSL 44 | ssl.truststore.location=/var/private/ssl/kafka.client.truststore.jks 45 | ssl.truststore.password=test1234 46 | ssl.keystore.location=/var/private/ssl/kafka.client.keystore.jks 47 | ssl.keystore.password=test1234 48 | ssl.key.password=test1234 49 | } 50 | } 51 | ``` 52 | 53 | The truststore and keystore locations may specify URLs, absolute paths or relative paths (starting with `./`). 54 | 55 | You have the option to pass the passwords as command line parameters or environment values via the support in [Config](https://github.com/lightbend/config#optional-system-or-env-variable-overrides). 56 | 57 | 58 | ### Kerberos 59 | 60 | The properties described in Kafka's @extref[Configuring Kafka Clients for Kerberos](kafka:/documentation.html#security_sasl_kerberos_clientconfig) go in the 61 | `akka.kafka.consumer.kafka-clients` and `akka.kafka.producer.kafka-clients` sections of the configuration, or can be added programmatically via 62 | `ProducerSettings.withProperties` and `ConsumerSettings.withProperties`. 63 | 64 | ```hocon 65 | akka.kafka.producer { # and akka.kafka.consumer respectively 66 | kafka-clients { 67 | security.protocol=SASL_PLAINTEXT # (or SASL_SSL) 68 | sasl.mechanism=GSSAPI 69 | sasl.kerberos.service.name=kafka 70 | } 71 | } 72 | ``` 73 | -------------------------------------------------------------------------------- /docs/src/main/paradox/release-notes/3.0.x.md: -------------------------------------------------------------------------------- 1 | --- 2 | project.description: Release notes for all Alpakka Kafka 3.0.x releases. 3 | --- 4 | # Alpakka Kafka 3.0.x 5 | 6 | @@@ note 7 | In case you are browsing a specific version's documentation: check out the [latest release notes](https://github.com/akka/alpakka-kafka/releases) 8 | @@@ 9 | 10 | # 3.0.1 11 | 12 | ## Noteworthy 13 | 14 | This version [bumped the kafka client](https://github.com/akka/alpakka-kafka/pull/1490) which changes a default config as mentiond [here](https://cwiki.apache.org/confluence/display/KAFKA/KIP-679%3A+Producer+will+enable+the+strongest+delivery+guarantee+by+default). The following error can show up in the logs: 15 | 16 | ``` 17 | org.apache.kafka.common.errors.ClusterAuthorizationException: Cluster authorization failed 18 | 19 | ``` 20 | 21 | In that case the following config needs to be provided in order to set the right default: 22 | 23 | ``` 24 | akka.kafka.producer.kafka-clients.enable.idempotence=false 25 | ``` 26 | -------------------------------------------------------------------------------- /docs/src/main/paradox/release-notes/4.0.x.md: -------------------------------------------------------------------------------- 1 | --- 2 | project.description: Release notes for all Alpakka Kafka 4.0.x releases. 3 | --- 4 | # Alpakka Kafka 4.0.x 5 | 6 | See the [GitHub releases page](https://github.com/akka/alpakka-kafka/releases). 7 | -------------------------------------------------------------------------------- /docs/src/main/paradox/release-notes/index.md: -------------------------------------------------------------------------------- 1 | # Release Notes 2 | 3 | * All [GitHub releases](https://github.com/akka/alpakka-kafka/releases) 4 | * [2.2.0](https://github.com/akka/alpakka-kafka/releases/tag/v2.2.0) 5 | 6 | @@toc { depth=2 } 7 | 8 | @@@ index 9 | 10 | * [4.0.x](4.0.x.md) 11 | * [3.0.x](3.0.x.md) 12 | * [2.1.x](2.1.x.md) 13 | * [2.0.x](2.0.x.md) 14 | * [1.1.x](1.1.x.md) 15 | * [1.0.x](1.0.x.md) 16 | 17 | @@@ 18 | -------------------------------------------------------------------------------- /docs/src/main/paradox/send-producer.md: -------------------------------------------------------------------------------- 1 | --- 2 | project.description: Produce messages to Apache Kafka topics with a Java or Scala future API. 3 | --- 4 | # Send Producer 5 | 6 | A producer publishes messages to Kafka topics. The message itself contains information about what topic and partition to publish to so you can publish to different topics with the same producer. 7 | 8 | The Alpakka Kafka @apidoc[SendProducer] does not integrate with Akka Streams. Instead, it offers a wrapper of the Apache Kafka @javadoc[KafkaProducer](org.apache.kafka.clients.producer.KafkaProducer) to send data to Kafka topics in a per-element fashion with a @scala[`Future`-based]@java[`CompletionStage`-based] API. 9 | 10 | It supports the same @ref[settings](producer.md#settings) as Alpakka @apidoc[Producer$] flows and sinks and supports @ref[service discovery](discovery.md). 11 | 12 | After use, the `Producer` needs to be properly closed via the asynchronous `close()` method. 13 | 14 | ## Producing 15 | 16 | The Send Producer offers methods for sending 17 | 18 | * @javadoc[ProducerRecord](org.apache.kafka.clients.producer.ProducerRecord) with `send` 19 | * @apidoc[ProducerMessage.Envelope] with `sendEnvelope` (similar to `Producer.flexiFlow`) 20 | 21 | After use, the Send Producer should be closed with `close()`. 22 | 23 | ### ProducerRecord 24 | 25 | Produce a @javadoc[ProducerRecord](org.apache.kafka.clients.producer.ProducerRecord) to a topic. 26 | 27 | Scala 28 | : @@ snip [snip](/tests/src/test/scala/docs/scaladsl/SendProducerSpec.scala) { #record } 29 | 30 | Java 31 | : @@ snip [snip](/tests/src/test/java/docs/javadsl/SendProducerTest.java) { #record } 32 | 33 | 34 | ### Envelope 35 | 36 | The @apidoc[ProducerMessage.Envelope] can be used to send one record, or a list of of @javadoc[ProducerRecord](org.apache.kafka.clients.producer.ProducerRecord)s to produce a single or multiple messages to Kafka topics. The envelope can be used to pass through an arbitrary value which will be attached to the result. 37 | 38 | Scala 39 | : @@ snip [snip](/tests/src/test/scala/docs/scaladsl/SendProducerSpec.scala) { #multiMessage } 40 | 41 | Java 42 | : @@ snip [snip](/tests/src/test/java/docs/javadsl/SendProducerTest.java) { #multiMessage } 43 | 44 | After successful sending, a @apidoc[ProducerMessage.Message] will return a @apidoc[akka.kafka.ProducerMessage.Result] element containing: 45 | 46 | 1. the original input message, 47 | 1. the record metadata (Kafka @javadoc[RecordMetadata](org.apache.kafka.clients.producer.RecordMetadata) API), and 48 | 1. access to the `passThrough` within the message. 49 | 50 | A @apidoc[ProducerMessage.MultiMessage] will return a @apidoc[akka.kafka.ProducerMessage.MultiResult] containing: 51 | 52 | 1. a list of @apidoc[ProducerMessage.MultiResultPart] with 53 | 1. the original input message, 54 | 1. the record metadata (Kafka @javadoc[RecordMetadata](org.apache.kafka.clients.producer.RecordMetadata) API), and 55 | 1. the `passThrough` data. 56 | -------------------------------------------------------------------------------- /docs/src/main/paradox/snapshots.md: -------------------------------------------------------------------------------- 1 | --- 2 | project.description: Snapshot builds of Alpakka Kafka. 3 | --- 4 | # Snapshots 5 | 6 | Snapshots are published to https://repo.akka.io/snapshots repository after every successful build on main. 7 | Add the following to your project build definition to resolve Alpakka Kafka connector snapshots: 8 | 9 | ## Configure repository 10 | 11 | Maven 12 | : ```xml 13 | 14 | ... 15 | 16 | 17 | 18 | akka-repository 19 | Akka library snapshot repository 20 | https://repo.akka.io/snapshots 21 | 22 | 23 | 24 | ... 25 | 26 | ``` 27 | 28 | sbt 29 | : ```scala 30 | resolvers += "Akka library snapshot repository".at("https://repo.akka.io/snapshots") 31 | ``` 32 | 33 | Gradle 34 | : ```gradle 35 | repositories { 36 | maven { 37 | url "https://repo.akka.io/snapshots" 38 | } 39 | } 40 | ``` 41 | 42 | ## Documentation 43 | 44 | The [snapshot documentation](https://doc.akka.io/libraries/alpakka-kafka/snapshot/) is updated with every snapshot build. 45 | 46 | -------------------------------------------------------------------------------- /docs/src/main/paradox/subscription.md: -------------------------------------------------------------------------------- 1 | --- 2 | project.description: An Alpakka Kafka consumer source can subscribe to Kafka topics within a consumer group, or to specific partitions. 3 | --- 4 | # Subscription 5 | 6 | Consumer Sources are created with different types of subscriptions, which control from which topics, partitions and offsets data is to be consumed. 7 | 8 | Subscriptions are grouped into two categories: with automatic partition assignment and with manual control of partition assignment. 9 | 10 | Factory methods for all subscriptions can be found in the @apidoc[Subscriptions$] factory. 11 | 12 | ## Automatic Partition Assignment 13 | 14 | ### Topic 15 | 16 | Subscribes to one or more topics. Partitions will be assigned automatically by the Kafka Client. 17 | 18 | Scala 19 | : @@ snip [snip](/tests/src/test/scala/docs/scaladsl/AssignmentSpec.scala) { #single-topic } 20 | 21 | Java 22 | : @@ snip [snip](/tests/src/test/java/docs/javadsl/AssignmentTest.java) { #single-topic } 23 | 24 | 25 | ### Topic Pattern 26 | 27 | Subscribes to one or more topics which match the given pattern. Take a look at the @javadoc[subscribe​(java.util.regex.Pattern pattern, 28 | ...)](org.apache.kafka.clients.consumer.KafkaConsumer) method documentation for more information on topic pattern matching. 29 | 30 | Scala 31 | : @@ snip [snip](/tests/src/test/scala/docs/scaladsl/AssignmentSpec.scala) { #topic-pattern } 32 | 33 | Java 34 | : @@ snip [snip](/tests/src/test/java/docs/javadsl/AssignmentTest.java) { #topic-pattern } 35 | 36 | 37 | ## Manual Partition Assignment 38 | 39 | ### Partition Assignment 40 | 41 | Subscribes to given topics and their given partitions. 42 | 43 | Scala 44 | : @@ snip [snip](/tests/src/test/scala/docs/scaladsl/AssignmentSpec.scala) { #assingment-single-partition } 45 | 46 | Java 47 | : @@ snip [snip](/tests/src/test/java/docs/javadsl/AssignmentTest.java) { #assingment-single-partition } 48 | 49 | 50 | ### Partition Assignment with Offset 51 | 52 | Subscribes to given topics and their partitions allowing to also set an offset from which messages will be read. 53 | 54 | Scala 55 | : @@ snip [snip](/tests/src/test/scala/docs/scaladsl/AssignmentSpec.scala) { #assingment-single-partition-offset } 56 | 57 | Java 58 | : @@ snip [snip](/tests/src/test/java/docs/javadsl/AssignmentTest.java) { #assingment-single-partition-offset } 59 | 60 | 61 | This subscription can be used when offsets are stored in Kafka or on external storage. For more information, take a look at the @ref[Offset Storage external to Kafka](consumer.md#offset-storage-external-to-kafka) documentation page. 62 | 63 | ### Partition Assignment with Timestamp 64 | 65 | Subscribes to given topics and their partitions allowing to also set a timestamp which will be used to find the offset from which messages will be read. 66 | 67 | Scala 68 | : @@ snip [snip](/tests/src/test/scala/docs/scaladsl/AssignmentSpec.scala) { #assingment-single-partition-timestamp } 69 | 70 | Java 71 | : @@ snip [snip](/tests/src/test/java/docs/javadsl/AssignmentTest.java) { #assingment-single-partition-timestamp } 72 | -------------------------------------------------------------------------------- /project/AutomaticModuleName.scala: -------------------------------------------------------------------------------- 1 | import sbt.{Def, _} 2 | import sbt.Keys._ 3 | 4 | /** 5 | * Helper to set Automatic-Module-Name in projects. 6 | * 7 | * !! DO NOT BE TEMPTED INTO AUTOMATICALLY DERIVING THE NAMES FROM PROJECT NAMES !! 8 | * 9 | * The names carry a lot of implications and DO NOT have to always align 1:1 with the group ids or package names, 10 | * though there should be of course a strong relationship between them. 11 | */ 12 | object AutomaticModuleName { 13 | private val AutomaticModuleName = "Automatic-Module-Name" 14 | 15 | def settings(name: String): Seq[Def.Setting[Task[Seq[PackageOption]]]] = Seq( 16 | Compile / packageBin / packageOptions += Package.ManifestAttributes(AutomaticModuleName → name) 17 | ) 18 | } 19 | -------------------------------------------------------------------------------- /project/PackagingTypePlugin.scala: -------------------------------------------------------------------------------- 1 | import sbt._ 2 | 3 | // See https://github.com/sbt/sbt/issues/3618#issuecomment-424924293 4 | // for "javax.ws.rs" % "javax.ws.rs-api" % "2.1" artifacts Artifact("javax.ws.rs-api", "jar", "jar"), 5 | object PackagingTypePlugin extends AutoPlugin { 6 | override val buildSettings = { 7 | sys.props += "packaging.type" -> "jar" 8 | Nil 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /project/Publish.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023 - 2024 Lightbend Inc. 3 | */ 4 | package akka 5 | 6 | import java.util.concurrent.atomic.AtomicBoolean 7 | 8 | import scala.language.postfixOps 9 | 10 | import sbt.{Def, _} 11 | import Keys._ 12 | import com.geirsson.CiReleasePlugin 13 | import com.jsuereth.sbtpgp.PgpKeys.publishSigned 14 | import xerial.sbt.Sonatype.autoImport.sonatypeProfileName 15 | 16 | /** 17 | * For projects that are not published. 18 | */ 19 | object NoPublish extends AutoPlugin { 20 | override def requires = plugins.JvmPlugin 21 | 22 | override def projectSettings = Seq( 23 | publish / skip := true, 24 | publishArtifact := false, 25 | publish := {}, 26 | publishLocal := {} 27 | ) 28 | } 29 | 30 | object Publish extends AutoPlugin { 31 | override def requires = plugins.JvmPlugin 32 | override def trigger = AllRequirements 33 | 34 | lazy val beforePublishTask = taskKey[Unit]("setup before publish") 35 | 36 | lazy val beforePublishDone = new AtomicBoolean(false) 37 | 38 | def beforePublish(snapshot: Boolean) = { 39 | if (beforePublishDone.compareAndSet(false, true)) { 40 | CiReleasePlugin.setupGpg() 41 | if (!snapshot) 42 | cloudsmithCredentials(validate = true) 43 | } 44 | } 45 | 46 | override def projectSettings: Seq[Def.Setting[_]] = 47 | Seq( 48 | sonatypeProfileName := "com.typesafe", 49 | beforePublishTask := beforePublish(isSnapshot.value), 50 | publishSigned := publishSigned.dependsOn(beforePublishTask).value, 51 | publishTo := 52 | (if (isSnapshot.value) 53 | Some("Cloudsmith API".at("https://maven.cloudsmith.io/lightbend/akka-snapshots/")) 54 | else 55 | Some("Cloudsmith API".at("https://maven.cloudsmith.io/lightbend/akka/"))), 56 | credentials ++= cloudsmithCredentials(validate = false) 57 | ) 58 | 59 | def cloudsmithCredentials(validate: Boolean): Seq[Credentials] = { 60 | (sys.env.get("PUBLISH_USER"), sys.env.get("PUBLISH_PASSWORD")) match { 61 | case (Some(user), Some(password)) => 62 | Seq(Credentials("Cloudsmith API", "maven.cloudsmith.io", user, password)) 63 | case _ => 64 | if (validate) 65 | throw new Exception("Publishing credentials expected in `PUBLISH_USER` and `PUBLISH_PASSWORD`.") 66 | else 67 | Nil 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /project/VersionGenerator.scala: -------------------------------------------------------------------------------- 1 | import sbt._ 2 | import sbt.Keys._ 3 | 4 | /** 5 | * Generate version.conf and akka/kafka/Version.scala files based on the version setting. 6 | * 7 | * This was adapted from https://github.com/akka/akka/blob/v2.6.8/project/VersionGenerator.scala 8 | */ 9 | object VersionGenerator { 10 | 11 | val settings: Seq[Setting[_]] = inConfig(Compile)( 12 | Seq( 13 | resourceGenerators += generateVersion(resourceManaged, _ / "version.conf", """|akka.kafka.version = "%s" 14 | |"""), 15 | sourceGenerators += generateVersion( 16 | sourceManaged, 17 | _ / "akka" / "kafka" / "Version.scala", 18 | """|package akka.kafka 19 | | 20 | |object Version { 21 | | val current: String = "%s" 22 | |} 23 | |""" 24 | ) 25 | ) 26 | ) 27 | 28 | def generateVersion(dir: SettingKey[File], locate: File => File, template: String) = Def.task[Seq[File]] { 29 | val file = locate(dir.value) 30 | val content = template.stripMargin.format(version.value) 31 | if (!file.exists || IO.read(file) != content) IO.write(file, content) 32 | Seq(file) 33 | } 34 | 35 | } 36 | -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=1.10.11 2 | -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | resolvers += "Akka library repository".at("https://repo.akka.io/maven") 2 | 3 | addSbtPlugin("com.github.sbt" % "sbt-dynver" % "5.1.0") 4 | addSbtPlugin("com.github.sbt" % "sbt-ci-release" % "1.6.1") 5 | addSbtPlugin("net.aichler" % "sbt-jupiter-interface" % "0.11.1") 6 | // discipline 7 | addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.10.0") 8 | addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.5.4") 9 | addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "1.1.4") 10 | addSbtPlugin("com.lightbend.sbt" % "sbt-java-formatter" % "0.8.0") 11 | // docs 12 | addSbtPlugin("io.akka" % "sbt-paradox-akka" % "25.10.0") 13 | addSbtPlugin("com.lightbend.paradox" % "sbt-paradox-dependencies" % "0.2.4") 14 | addSbtPlugin("com.lightbend.sbt" % "sbt-publish-rsync" % "0.3") 15 | addSbtPlugin("com.github.sbt" % "sbt-unidoc" % "0.5.0") 16 | addSbtPlugin("com.thoughtworks.sbt-api-mappings" % "sbt-api-mappings" % "3.0.2") 17 | addSbtPlugin("com.github.sbt" % "sbt-site-paradox" % "1.5.0") 18 | 19 | resolvers += Resolver.jcenterRepo 20 | -------------------------------------------------------------------------------- /project/project-info.conf: -------------------------------------------------------------------------------- 1 | project-info { 2 | version: "current" 3 | shared-info { 4 | jdk-versions: ["Eclipse Temurin JDK 11", "Eclipse Temurin JDK 17", "Eclipse Temurin JDK 21"] 5 | snapshots: { 6 | url: "snapshots.html" 7 | text: "Snapshots are available" 8 | new-tab: false 9 | } 10 | issues: { 11 | url: "https://github.com/akka/alpakka-kafka/issues" 12 | text: "Github issues" 13 | } 14 | release-notes: { 15 | url: "https://doc.akka.io/libraries/alpakka-kafka/current/release-notes/index.html" 16 | text: "In the documentation" 17 | new-tab: false 18 | } 19 | forums: [ 20 | { 21 | text: "Lightbend Discuss" 22 | url: "https://discuss.akka.io" 23 | } 24 | ] 25 | } 26 | core: ${project-info.shared-info} { 27 | title: "Alpakka Kafka" 28 | jpms-name: "akka.stream.alpakka.kafka" 29 | levels: [ 30 | { 31 | readiness: Supported 32 | since: "2017-05-02" 33 | since-version: "0.16" 34 | } 35 | { 36 | readiness: Incubating 37 | since: "2015-01-07" 38 | since-version: "0.1.0" 39 | } 40 | ] 41 | api-docs: [ 42 | { 43 | url: "https://doc.akka.io/api/alpakka-kafka/"${project-info.version}"/akka/kafka/index.html" 44 | text: "API (Scaladoc)" 45 | } 46 | ] 47 | } 48 | testkit: ${project-info.shared-info} { 49 | title: "Alpakka Kafka testkit" 50 | jpms-name: "akka.stream.alpakka.kafka.testkit" 51 | levels: [ 52 | { 53 | readiness: Incubating 54 | since: "2018-11-06" 55 | since-version: "1.0-M1" 56 | note: "The API of the testkit may change even for minor versions." 57 | } 58 | ] 59 | } 60 | clusterSharding: ${project-info.shared-info} { 61 | title: "Alpakka Kafka Cluster Sharding" 62 | jpms-name: "akka.stream.alpakka.kafka.cluster.sharding" 63 | levels: [ 64 | { 65 | readiness: Incubating 66 | since: "2020-03-05" 67 | since-version: "2.0.3" 68 | note: "The API of the cluster sharding is experimental and may change even for minor versions." 69 | } 70 | ] 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /scripts/cat-log.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # ---------- helper script to separate log files in build 3 | printf "\n\n\n" 4 | ls -lh $1 5 | printf "\n\n" 6 | cat $1 7 | -------------------------------------------------------------------------------- /scripts/create-release-issue.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | VERSION=$1 4 | if [ -z $VERSION ] 5 | then 6 | echo specify the version name to be released, eg. 2.2.0 7 | else 8 | sed -e 's/\$VERSION\$/'$VERSION'/g' docs/release-train-issue-template.md > /tmp/release-$VERSION.md 9 | echo Created $(gh issue create --title 'Release Alpakka Kafka' --body-file /tmp/release-$VERSION.md --milestone $VERSION --web) 10 | fi 11 | -------------------------------------------------------------------------------- /scripts/link-validator.conf: -------------------------------------------------------------------------------- 1 | // config for https://github.com/ennru/site-link-validator/ 2 | site-link-validator { 3 | root-dir = "./docs/target/site/" 4 | # relative to `root-dir` 5 | start-file = "libraries/alpakka-kafka/snapshot/index.html" 6 | 7 | # Resolves URLs with the given prefix as local files instead 8 | link-mappings = [ 9 | { 10 | prefix = "https://doc.akka.io/libraries/alpakka-kafka/snapshot/" 11 | replace = "/libraries/alpakka-kafka/snapshot/" 12 | } 13 | { 14 | prefix = "https://doc.akka.io/api/alpakka-kafka/snapshot/" 15 | replace = "/api/alpakka-kafka/snapshot/" 16 | } 17 | ] 18 | 19 | ignore-missing-local-files-regex = "" 20 | 21 | ignore-files = [] 22 | 23 | ignore-prefixes = [ 24 | # runtime is NOT part of the published Scaladoc 25 | "https://www.scala-lang.org/api/2.13.14/scala/runtime/AbstractFunction2.html" 26 | # GitHub will block with "429 Too Many Requests" 27 | "https://github.com/" 28 | # MVN repository forbids access after a few requests 29 | "https://mvnrepository.com/artifact/" 30 | # gives: javax.net.ssl.SSLHandshakeException: Received fatal alert: handshake_failure requests 31 | "https://javadoc.io/static/", 32 | # The address is hit too often and blocked 33 | "https://opensource.org/licenses/Apache-2.0", 34 | "https://repo.akka.io/" 35 | ] 36 | 37 | non-https-whitelist = [ 38 | ] 39 | } 40 | -------------------------------------------------------------------------------- /testkit/src/main/java/akka/kafka/testkit/internal/SchemaRegistryContainer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.testkit.internal; 7 | 8 | import org.testcontainers.containers.GenericContainer; 9 | import org.testcontainers.containers.Network; 10 | import org.testcontainers.utility.DockerImageName; 11 | 12 | public class SchemaRegistryContainer extends GenericContainer { 13 | // Align these confluent platform constants with testkit/src/main/resources/reference.conf 14 | public static final DockerImageName DEFAULT_SCHEMA_REGISTRY_IMAGE_NAME = 15 | DockerImageName.parse("confluentinc/cp-schema-registry") 16 | .withTag(AlpakkaKafkaContainer.DEFAULT_CONFLUENT_PLATFORM_VERSION); 17 | 18 | public static int SCHEMA_REGISTRY_PORT = 8081; 19 | 20 | public SchemaRegistryContainer() { 21 | this(DEFAULT_SCHEMA_REGISTRY_IMAGE_NAME); 22 | } 23 | 24 | public SchemaRegistryContainer(final DockerImageName schemaRegistryImage) { 25 | super(schemaRegistryImage); 26 | 27 | withNetwork(Network.SHARED); 28 | withExposedPorts(SCHEMA_REGISTRY_PORT); 29 | withEnv("SCHEMA_REGISTRY_HOST_NAME", "schema-registry"); 30 | withEnv("SCHEMA_REGISTRY_LISTENERS", "http://0.0.0.0:" + SCHEMA_REGISTRY_PORT); 31 | } 32 | 33 | public SchemaRegistryContainer withCluster(KafkaContainerCluster cluster) { 34 | withNetwork(cluster.getNetwork()); 35 | withEnv( 36 | "SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS", 37 | "PLAINTEXT://" + cluster.getInternalNetworkBootstrapServers()); 38 | return self(); 39 | } 40 | 41 | public String getSchemaRegistryUrl() { 42 | return String.format("http://%s:%s", getHost(), getMappedPort(SCHEMA_REGISTRY_PORT)); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /testkit/src/main/java/akka/kafka/testkit/javadsl/KafkaJunit4Test.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.testkit.javadsl; 7 | 8 | import akka.actor.ActorSystem; 9 | import akka.actor.ClassicActorSystemProvider; 10 | import akka.stream.Materializer; 11 | import akka.stream.testkit.javadsl.StreamTestKit; 12 | import org.junit.After; 13 | import org.junit.Before; 14 | 15 | /** JUnit 4 base-class with some convenience for accessing a Kafka broker. */ 16 | public abstract class KafkaJunit4Test extends BaseKafkaTest { 17 | 18 | /** 19 | * @deprecated Materializer no longer necessary in Akka 2.6, use 20 | * `KafkaJunit4Test(ClassicActorSystemProvider, String)` instead, since 2.1.0 21 | */ 22 | @Deprecated 23 | protected KafkaJunit4Test(ActorSystem system, Materializer mat, String bootstrapServers) { 24 | super(system, mat, bootstrapServers); 25 | } 26 | 27 | protected KafkaJunit4Test(ClassicActorSystemProvider system, String bootstrapServers) { 28 | super(system, bootstrapServers); 29 | } 30 | 31 | @Before 32 | public void setUpAdmin() { 33 | setUpAdminClient(); 34 | } 35 | 36 | @After 37 | public void cleanUpAdmin() { 38 | cleanUpAdminClient(); 39 | } 40 | 41 | @After 42 | public void checkForStageLeaks() { 43 | // you might need to configure `stop-timeout` in your `application.conf` 44 | // as the default of 30s will fail this 45 | StreamTestKit.assertAllStagesStopped(materializer); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /testkit/src/main/java/akka/kafka/testkit/javadsl/KafkaTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.testkit.javadsl; 7 | 8 | import akka.actor.ActorSystem; 9 | import akka.actor.ClassicActorSystemProvider; 10 | import akka.stream.Materializer; 11 | import akka.stream.testkit.javadsl.StreamTestKit; 12 | import org.junit.jupiter.api.AfterAll; 13 | import org.junit.jupiter.api.AfterEach; 14 | import org.junit.jupiter.api.BeforeAll; 15 | 16 | /** 17 | * JUnit 5 aka Jupiter base-class with some convenience for accessing a Kafka broker. Extending 18 | * classes must be annotated with `@TestInstance(Lifecycle.PER_CLASS)` to create a single instance 19 | * of the test class with `@BeforeAll` and `@AfterAll` annotated methods called by the test 20 | * framework. 21 | */ 22 | public abstract class KafkaTest extends BaseKafkaTest { 23 | 24 | /** 25 | * @deprecated Materializer no longer necessary in Akka 2.6, use 26 | * `KafkaTest(ClassicActorSystemProvider, String)` instead, since 2.1.0 27 | */ 28 | @Deprecated 29 | protected KafkaTest(ActorSystem system, Materializer mat, String bootstrapServers) { 30 | super(system, mat, bootstrapServers); 31 | } 32 | 33 | protected KafkaTest(ClassicActorSystemProvider system, String bootstrapServers) { 34 | super(system, bootstrapServers); 35 | } 36 | 37 | @BeforeAll 38 | public void setupAdmin() { 39 | setUpAdminClient(); 40 | } 41 | 42 | @AfterAll 43 | public void cleanUpAdmin() { 44 | cleanUpAdminClient(); 45 | } 46 | 47 | @AfterEach 48 | public void checkForStageLeaks() { 49 | // you might need to configure `stop-timeout` in your `application.conf` 50 | // as the default of 30s will fail this 51 | StreamTestKit.assertAllStagesStopped(materializer); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /testkit/src/main/java/akka/kafka/testkit/javadsl/TestcontainersKafkaJunit4Test.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.testkit.javadsl; 7 | 8 | import akka.actor.ActorSystem; 9 | import akka.actor.ClassicActorSystemProvider; 10 | import akka.kafka.testkit.KafkaTestkitTestcontainersSettings; 11 | import akka.kafka.testkit.internal.TestcontainersKafka; 12 | import akka.stream.Materializer; 13 | import org.junit.After; 14 | import org.junit.Before; 15 | 16 | /** 17 | * JUnit 4 base class using [[https://java.testcontainers.org/ Testcontainers]] to start a Kafka 18 | * broker in a Docker container. The Kafka broker will be kept around across multiple test classes, 19 | * unless `stopKafka()` is called. 20 | * 21 | *

The Testcontainers dependency has to be added explicitly. 22 | */ 23 | public abstract class TestcontainersKafkaJunit4Test extends KafkaJunit4Test { 24 | 25 | private static final KafkaTestkitTestcontainersSettings settings = 26 | TestcontainersKafka.Singleton().testcontainersSettings(); 27 | 28 | /** 29 | * @deprecated Materializer no longer necessary in Akka 2.6, use 30 | * `TestcontainersKafkaJunit4Test(ClassicActorSystemProvider)` instead, since 2.1.0 31 | */ 32 | @Deprecated 33 | protected TestcontainersKafkaJunit4Test(ActorSystem system, Materializer mat) { 34 | super(system, mat, startKafka(settings)); 35 | } 36 | 37 | protected TestcontainersKafkaJunit4Test(ClassicActorSystemProvider system) { 38 | super(system, startKafka(settings)); 39 | } 40 | 41 | protected TestcontainersKafkaJunit4Test( 42 | ActorSystem system, KafkaTestkitTestcontainersSettings settings) { 43 | super(system, startKafka(settings)); 44 | } 45 | 46 | protected static String startKafka(KafkaTestkitTestcontainersSettings settings) { 47 | return TestcontainersKafka.Singleton().startCluster(settings); 48 | } 49 | 50 | protected static void stopKafka() { 51 | TestcontainersKafka.Singleton().stopCluster(); 52 | } 53 | 54 | @Before 55 | @Override 56 | public void setUpAdminClient() { 57 | super.setUpAdminClient(); 58 | } 59 | 60 | @After 61 | @Override 62 | public void cleanUpAdminClient() { 63 | super.cleanUpAdminClient(); 64 | } 65 | 66 | protected String getSchemaRegistryUrl() { 67 | return TestcontainersKafka.Singleton().getSchemaRegistryUrl(); 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /testkit/src/main/java/akka/kafka/testkit/javadsl/TestcontainersKafkaTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.testkit.javadsl; 7 | 8 | import akka.actor.ActorSystem; 9 | import akka.actor.ClassicActorSystemProvider; 10 | import akka.kafka.testkit.KafkaTestkitTestcontainersSettings; 11 | import akka.kafka.testkit.internal.TestcontainersKafka; 12 | import akka.stream.Materializer; 13 | 14 | /** 15 | * JUnit 5 base class using [[https://java.testcontainers.org/ Testcontainers]] to start a Kafka 16 | * broker in a Docker container. The Kafka broker will be kept around across multiple test classes, 17 | * unless `stopKafka()` is called (eg. from an `@AfterAll`-annotated method. 18 | * 19 | *

Extending classes must be annotated with `@TestInstance(Lifecycle.PER_CLASS)` to create a 20 | * single instance of the test class with `@BeforeAll` and `@AfterAll` annotated methods called by 21 | * the test framework. 22 | * 23 | *

The Testcontainers dependency has to be added explicitly. 24 | */ 25 | public abstract class TestcontainersKafkaTest extends KafkaTest { 26 | 27 | public static final KafkaTestkitTestcontainersSettings settings = 28 | TestcontainersKafka.Singleton().testcontainersSettings(); 29 | 30 | /** 31 | * @deprecated Materializer no longer necessary in Akka 2.6, use 32 | * `TestcontainersKafkaTest(ClassicActorSystemProvider)` instead, since 2.1.0 33 | */ 34 | @Deprecated 35 | protected TestcontainersKafkaTest(ActorSystem system, Materializer mat) { 36 | super(system, mat, startKafka(settings)); 37 | } 38 | 39 | protected TestcontainersKafkaTest(ClassicActorSystemProvider system) { 40 | super(system, startKafka(settings)); 41 | } 42 | 43 | protected TestcontainersKafkaTest( 44 | ActorSystem system, KafkaTestkitTestcontainersSettings settings) { 45 | super(system, startKafka(settings)); 46 | } 47 | 48 | protected static String startKafka(KafkaTestkitTestcontainersSettings settings) { 49 | return TestcontainersKafka.Singleton().startCluster(settings); 50 | } 51 | 52 | protected static void stopKafka() { 53 | TestcontainersKafka.Singleton().stopCluster(); 54 | } 55 | 56 | protected String getSchemaRegistryUrl() { 57 | return TestcontainersKafka.Singleton().getSchemaRegistryUrl(); 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /testkit/src/main/mima-filters/2.0.5.backwards.excludes/PR1229-drop-embedded-kafka.excludes: -------------------------------------------------------------------------------- 1 | ProblemFilters.exclude[MissingClassProblem]("akka.kafka.testkit.javadsl.EmbeddedKafkaJunit4Test") 2 | ProblemFilters.exclude[MissingClassProblem]("akka.kafka.testkit.javadsl.EmbeddedKafkaTest") 3 | ProblemFilters.exclude[MissingClassProblem]("akka.kafka.testkit.scaladsl.EmbeddedKafkaLike") 4 | -------------------------------------------------------------------------------- /testkit/src/main/mima-filters/2.0.5.backwards.excludes/PR1235-start_stop_kafka_process.excludes: -------------------------------------------------------------------------------- 1 | ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.kafka.testkit.scaladsl.TestcontainersKafkaLike.startKafka") 2 | ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.kafka.testkit.scaladsl.TestcontainersKafkaLike.startKafka") -------------------------------------------------------------------------------- /testkit/src/main/mima-filters/2.0.x.backwards.excludes: -------------------------------------------------------------------------------- 1 | # Inherited methods removed in ScalaTest 3.1 (#1207) 2 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.testkit.scaladsl.ScalatestKafkaSpec.execute") 3 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.testkit.scaladsl.ScalatestKafkaSpec.newAssertionFailedException") 4 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.testkit.scaladsl.ScalatestKafkaSpec.trap") 5 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.testkit.scaladsl.ScalatestKafkaSpec.assertionsHelper") 6 | 7 | # Rollback Confluent Platform version; add Jackson Databind #1226 8 | ProblemFilters.exclude[MissingFieldProblem]("akka.kafka.testkit.internal.AlpakkaKafkaContainer.DEFAULT_CP_PLATFORM_VERSION") 9 | 10 | # Changed signature, but EmbeddedKafka will be removed with #1229 11 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.testkit.javadsl.EmbeddedKafkaJunit4Test.this") 12 | 13 | # Changed to return the Admin interface instead of the AdminClient class #1183 14 | ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.kafka.testkit.scaladsl.KafkaSpec.adminClient") 15 | -------------------------------------------------------------------------------- /testkit/src/main/mima-filters/2.1.0-M1.backwards.excludes/PR1281-testcontainers-logs.excludes: -------------------------------------------------------------------------------- 1 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.testkit.KafkaTestkitTestcontainersSettings.this") 2 | -------------------------------------------------------------------------------- /testkit/src/main/mima-filters/2.1.0-M1.backwards.excludes/PR1287-testcontainer-docker-images.excludes: -------------------------------------------------------------------------------- 1 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.testkit.KafkaTestkitTestcontainersSettings.confluentPlatformVersion") 2 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.testkit.KafkaTestkitTestcontainersSettings.getConfluentPlatformVersion") 3 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.testkit.KafkaTestkitTestcontainersSettings.withConfluentPlatformVersion") 4 | -------------------------------------------------------------------------------- /testkit/src/main/mima-filters/3.0.1.backwards.excludes/PR1489.excludes: -------------------------------------------------------------------------------- 1 | ProblemFilters.exclude[DirectMissingMethodProblem]("akka.kafka.testkit.KafkaTestkitTestcontainersSettings.this") -------------------------------------------------------------------------------- /testkit/src/main/resources/reference.conf: -------------------------------------------------------------------------------- 1 | # // #testkit-settings 2 | akka.kafka.testkit { 3 | 4 | # amount of time to wait until the desired cluster state is reached 5 | cluster-timeout = 10 seconds 6 | 7 | # amount of time to wait until the desired consumer group state is reached 8 | consumer-group-timeout = 10 seconds 9 | 10 | # amount of time to wait in-between state checks 11 | check-interval = 100 ms 12 | } 13 | # // #testkit-settings 14 | 15 | # // #testkit-testcontainers-settings 16 | akka.kafka.testkit.testcontainers { 17 | 18 | # define these settings to select a different Kafka/ZooKeeper docker image 19 | # we recommend using Confluent Platform docker images and using the same version across all images 20 | # Confluent publishes images on DockerHub: https://hub.docker.com/r/confluentinc/cp-kafka/tags 21 | # Kafka versions in Confluent Platform: https://docs.confluent.io/current/installation/versions-interoperability.html 22 | zookeeper-image = "confluentinc/cp-zookeeper" 23 | zookeeper-image-tag = ${akka.kafka.testkit.testcontainers.confluent-platform-version} 24 | kafka-image = "confluentinc/cp-kafka" 25 | kafka-image-tag = ${akka.kafka.testkit.testcontainers.confluent-platform-version} 26 | schema-registry-image = "confluentinc/cp-schema-registry" 27 | schema-registry-image-tag = ${akka.kafka.testkit.testcontainers.confluent-platform-version} 28 | # See https://docs.confluent.io/platform/current/installation/versions-interoperability.html 29 | confluent-platform-version = "7.0.0" 30 | 31 | # the number of Kafka brokers to include in a test cluster 32 | num-brokers = 1 33 | 34 | # set this to use a replication factor for internal Kafka topics such as Consumer Offsets and Transaction log. 35 | # this replication factor must be less than or equal to `num-brokers` 36 | internal-topics-replication-factor = 1 37 | 38 | # set this to true to launch a testcontainer for Confluent Schema Registry 39 | use-schema-registry = false 40 | 41 | # set this to true to stream the STDOUT and STDERR of containers to SLF4J loggers 42 | # this requires the SLF4J dependency to be on the classpath and the loggers enabled in your logging configuration 43 | container-logging = false 44 | 45 | # set this to the total length of time to wait for a Kafka container cluster to start. this includes all brokers 46 | # zookeeper, and schema registry nodes. note that this can take a considerable time in limited resource environments. 47 | cluster-start-timeout = 360 s 48 | 49 | # set this to the total length of time to wait for a Kafka container readiness check to complete. note that this can 50 | # take a considerable time in limited resource environments. 51 | readiness-check-timeout = 360 s 52 | } 53 | # // #testkit-testcontainers-settings 54 | -------------------------------------------------------------------------------- /testkit/src/main/scala/akka/kafka/testkit/ConsumerResultFactory.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.testkit 7 | 8 | import akka.Done 9 | import akka.annotation.ApiMayChange 10 | import akka.kafka.ConsumerMessage 11 | import akka.kafka.ConsumerMessage.{CommittableOffset, GroupTopicPartition, PartitionOffsetCommittedMarker} 12 | import akka.kafka.internal.{CommittableOffsetImpl, KafkaAsyncConsumerCommitterRef} 13 | import org.apache.kafka.clients.consumer.{ConsumerRecord, OffsetAndMetadata} 14 | import org.apache.kafka.common.TopicPartition 15 | 16 | import scala.concurrent.Future 17 | 18 | /** 19 | * Factory methods to create instances that normally are emitted by [[akka.kafka.scaladsl.Consumer]] and [[akka.kafka.javadsl.Consumer]] flows. 20 | */ 21 | @ApiMayChange 22 | object ConsumerResultFactory { 23 | 24 | val fakeCommitter: KafkaAsyncConsumerCommitterRef = new KafkaAsyncConsumerCommitterRef(null, null)( 25 | ec = scala.concurrent.ExecutionContext.global 26 | ) { 27 | private val done = Future.successful(Done) 28 | 29 | override def commitSingle(topicPartition: TopicPartition, offset: OffsetAndMetadata): Future[Done] = done 30 | 31 | override def commitOneOfMulti(topicPartition: TopicPartition, offset: OffsetAndMetadata): Future[Done] = done 32 | 33 | override def tellCommit(topicPartition: TopicPartition, offset: OffsetAndMetadata, emergency: Boolean): Unit = () 34 | } 35 | 36 | def partitionOffset(groupId: String, topic: String, partition: Int, offset: Long): ConsumerMessage.PartitionOffset = 37 | new ConsumerMessage.PartitionOffset(ConsumerMessage.GroupTopicPartition(groupId, topic, partition), offset) 38 | 39 | def partitionOffset(key: GroupTopicPartition, offset: Long) = ConsumerMessage.PartitionOffset(key, offset) 40 | 41 | def committableOffset(groupId: String, 42 | topic: String, 43 | partition: Int, 44 | offset: Long, 45 | metadata: String): ConsumerMessage.CommittableOffset = 46 | committableOffset(partitionOffset(groupId, topic, partition, offset), metadata) 47 | 48 | def committableOffset(partitionOffset: ConsumerMessage.PartitionOffset, 49 | metadata: String): ConsumerMessage.CommittableOffset = 50 | CommittableOffsetImpl(partitionOffset, metadata)(fakeCommitter) 51 | 52 | def committableMessage[K, V]( 53 | record: ConsumerRecord[K, V], 54 | committableOffset: CommittableOffset 55 | ): ConsumerMessage.CommittableMessage[K, V] = ConsumerMessage.CommittableMessage(record, committableOffset) 56 | 57 | def transactionalMessage[K, V]( 58 | record: ConsumerRecord[K, V], 59 | partitionOffset: PartitionOffsetCommittedMarker 60 | ): ConsumerMessage.TransactionalMessage[K, V] = ConsumerMessage.TransactionalMessage(record, partitionOffset) 61 | 62 | } 63 | -------------------------------------------------------------------------------- /testkit/src/main/scala/akka/kafka/testkit/KafkaTestkitSettings.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.testkit 7 | 8 | import akka.actor.ActorSystem 9 | import com.typesafe.config.Config 10 | 11 | import scala.concurrent.duration._ 12 | 13 | class KafkaTestkitSettings private (val clusterTimeout: FiniteDuration, 14 | val consumerGroupTimeout: FiniteDuration, 15 | val checkInterval: FiniteDuration) { 16 | 17 | /** 18 | * Java Api 19 | */ 20 | def getClusterTimeout(): java.time.Duration = java.time.Duration.ofMillis(clusterTimeout.toMillis) 21 | 22 | /** 23 | * Java Api 24 | */ 25 | def getConsumerGroupTimeout(): java.time.Duration = java.time.Duration.ofMillis(consumerGroupTimeout.toMillis) 26 | 27 | /** 28 | * Java Api 29 | */ 30 | def getCheckInterval(): java.time.Duration = java.time.Duration.ofMillis(checkInterval.toMillis) 31 | } 32 | 33 | object KafkaTestkitSettings { 34 | final val ConfigPath = "akka.kafka.testkit" 35 | 36 | /** 37 | * Create testkit settings from ActorSystem settings. 38 | */ 39 | def apply(system: ActorSystem): KafkaTestkitSettings = 40 | KafkaTestkitSettings(system.settings.config.getConfig(ConfigPath)) 41 | 42 | /** 43 | * Java Api 44 | * 45 | * Create testkit settings from ActorSystem settings. 46 | */ 47 | def create(system: ActorSystem): KafkaTestkitSettings = KafkaTestkitSettings(system) 48 | 49 | /** 50 | * Create testkit settings from a Config. 51 | */ 52 | def apply(config: Config): KafkaTestkitSettings = { 53 | val clusterTimeout = config.getDuration("cluster-timeout").toMillis.millis 54 | val consumerGroupTimeout = config.getDuration("consumer-group-timeout").toMillis.millis 55 | val checkInterval = config.getDuration("check-interval").toMillis.millis 56 | 57 | new KafkaTestkitSettings(clusterTimeout, consumerGroupTimeout, checkInterval) 58 | } 59 | 60 | /** 61 | * Java Api 62 | * 63 | * Create testkit settings from a Config. 64 | */ 65 | def create(config: Config): KafkaTestkitSettings = KafkaTestkitSettings(config) 66 | } 67 | -------------------------------------------------------------------------------- /testkit/src/main/scala/akka/kafka/testkit/ProducerResultFactory.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.testkit 7 | 8 | import akka.annotation.ApiMayChange 9 | import akka.kafka.ProducerMessage 10 | import org.apache.kafka.clients.producer.{ProducerRecord, RecordMetadata} 11 | import org.apache.kafka.common.TopicPartition 12 | 13 | import scala.jdk.CollectionConverters._ 14 | import scala.collection.immutable 15 | 16 | /** 17 | * Factory methods to create instances that normally are emitted by [[akka.kafka.scaladsl.Producer]] and [[akka.kafka.javadsl.Producer]] flows. 18 | */ 19 | @ApiMayChange 20 | object ProducerResultFactory { 21 | 22 | def recordMetadata(msg: ProducerRecord[_, _]): RecordMetadata = { 23 | // null checks are required on Scala 2.11 24 | val partition = if (msg.partition == null) 0 else msg.partition.toInt 25 | val timestamp = if (msg.timestamp == null) 0L else msg.timestamp.toLong 26 | new RecordMetadata(new TopicPartition(msg.topic, partition), -1, 1, timestamp, 2, 2) 27 | } 28 | 29 | def recordMetadata(topic: String, partition: Int, offset: Long): RecordMetadata = 30 | new RecordMetadata(new TopicPartition(topic, partition), offset, 0, 12345L, 2, 2) 31 | 32 | def result[K, V, PassThrough]( 33 | message: ProducerMessage.Message[K, V, PassThrough] 34 | ): ProducerMessage.Result[K, V, PassThrough] = ProducerMessage.Result(recordMetadata(message.record), message) 35 | 36 | def result[K, V, PassThrough]( 37 | metadata: RecordMetadata, 38 | message: ProducerMessage.Message[K, V, PassThrough] 39 | ): ProducerMessage.Result[K, V, PassThrough] = ProducerMessage.Result(metadata, message) 40 | 41 | def multiResultPart[K, V]( 42 | metadata: RecordMetadata, 43 | record: ProducerRecord[K, V] 44 | ): ProducerMessage.MultiResultPart[K, V] = ProducerMessage.MultiResultPart(metadata, record) 45 | 46 | def multiResult[K, V, PassThrough]( 47 | parts: immutable.Seq[ProducerMessage.MultiResultPart[K, V]], 48 | passThrough: PassThrough 49 | ): ProducerMessage.MultiResult[K, V, PassThrough] = ProducerMessage.MultiResult(parts, passThrough) 50 | 51 | def multiResult[K, V, PassThrough]( 52 | message: ProducerMessage.MultiMessage[K, V, PassThrough] 53 | ): ProducerMessage.MultiResult[K, V, PassThrough] = 54 | ProducerResultFactory.multiResult( 55 | message.records.map(r => ProducerResultFactory.multiResultPart(recordMetadata(r), r)), 56 | message.passThrough 57 | ) 58 | 59 | /** Java API */ 60 | def multiResult[K, V, PassThrough]( 61 | parts: java.util.Collection[ProducerMessage.MultiResultPart[K, V]], 62 | passThrough: PassThrough 63 | ): ProducerMessage.MultiResult[K, V, PassThrough] = 64 | ProducerMessage.MultiResult(parts.asScala.toList, passThrough) 65 | 66 | def passThroughResult[K, V, PassThrough]( 67 | passThrough: PassThrough 68 | ): ProducerMessage.PassThroughResult[K, V, PassThrough] = ProducerMessage.PassThroughResult(passThrough) 69 | } 70 | -------------------------------------------------------------------------------- /testkit/src/main/scala/akka/kafka/testkit/internal/KafkaTestKitChecks.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.testkit.internal 7 | 8 | import java.util.Collections 9 | import java.util.concurrent.TimeUnit 10 | 11 | import org.apache.kafka.clients.admin.{ 12 | Admin, 13 | ConsumerGroupDescription, 14 | DescribeClusterResult, 15 | DescribeConsumerGroupsOptions 16 | } 17 | import org.slf4j.Logger 18 | 19 | import scala.annotation.tailrec 20 | import scala.concurrent.duration.FiniteDuration 21 | import scala.util.{Failure, Success, Try} 22 | 23 | object KafkaTestKitChecks { 24 | def waitUntilCluster(timeout: FiniteDuration, 25 | sleepInBetween: FiniteDuration, 26 | adminClient: Admin, 27 | predicate: DescribeClusterResult => Boolean, 28 | log: Logger): Unit = 29 | periodicalCheck("cluster state", timeout, sleepInBetween)(() => adminClient.describeCluster())(predicate)(log) 30 | 31 | def waitUntilConsumerGroup(groupId: String, 32 | timeout: FiniteDuration, 33 | sleepInBetween: FiniteDuration, 34 | adminClient: Admin, 35 | predicate: ConsumerGroupDescription => Boolean, 36 | log: Logger): Unit = 37 | periodicalCheck("consumer group state", timeout, sleepInBetween)( 38 | () => 39 | adminClient 40 | .describeConsumerGroups( 41 | Collections.singleton(groupId), 42 | new DescribeConsumerGroupsOptions().timeoutMs(timeout.toMillis.toInt) 43 | ) 44 | .describedGroups() 45 | .get(groupId) 46 | .get(timeout.toMillis, TimeUnit.MILLISECONDS) 47 | )(predicate)(log) 48 | 49 | def periodicalCheck[T](description: String, timeout: FiniteDuration, sleepInBetween: FiniteDuration)( 50 | data: () => T 51 | )(predicate: T => Boolean)(log: Logger): Unit = { 52 | val maxTries = (timeout / sleepInBetween).toInt 53 | 54 | @tailrec def check(triesLeft: Int): Unit = 55 | Try(predicate(data())).recover { 56 | case ex => 57 | log.debug(s"Ignoring [${ex.getClass.getName}: ${ex.getMessage}] while waiting for desired state") 58 | false 59 | } match { 60 | case Success(false) if triesLeft > 0 => 61 | Thread.sleep(sleepInBetween.toMillis) 62 | check(triesLeft - 1) 63 | case Success(false) => 64 | throw new Error( 65 | s"Timeout while waiting for desired $description. Tried [$maxTries] times, slept [$sleepInBetween] in between." 66 | ) 67 | case Failure(ex) => 68 | throw ex 69 | case Success(true) => // predicate has been fulfilled, stop checking 70 | } 71 | 72 | check(maxTries) 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /testkit/src/main/scala/akka/kafka/testkit/internal/TestFrameworkInterface.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.testkit.internal 7 | 8 | import org.scalatest.{BeforeAndAfterAll, Suite} 9 | 10 | trait TestFrameworkInterface { 11 | def setUp(): Unit 12 | def cleanUp(): Unit 13 | } 14 | 15 | object TestFrameworkInterface { 16 | 17 | trait Scalatest extends TestFrameworkInterface with BeforeAndAfterAll { 18 | this: Suite => 19 | 20 | abstract override protected def beforeAll(): Unit = { 21 | setUp() 22 | super.beforeAll() 23 | } 24 | 25 | abstract override protected def afterAll(): Unit = { 26 | cleanUp() 27 | super.afterAll() 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /testkit/src/main/scala/akka/kafka/testkit/javadsl/ConsumerControlFactory.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.testkit.javadsl 7 | 8 | import java.util.concurrent.{CompletableFuture, CompletionStage, Executor} 9 | 10 | import akka.Done 11 | import akka.annotation.ApiMayChange 12 | import akka.kafka.javadsl.Consumer 13 | import akka.stream.javadsl.{Flow, Keep, Source} 14 | import akka.stream.{scaladsl, KillSwitch, KillSwitches} 15 | import org.apache.kafka.common.{Metric, MetricName} 16 | 17 | /** 18 | * Helper factory to create [[akka.kafka.javadsl.Consumer.Control]] instances when 19 | * testing without a Kafka broker. 20 | */ 21 | @ApiMayChange 22 | object ConsumerControlFactory { 23 | 24 | def attachControl[A, B](source: Source[A, B]): Source[A, Consumer.Control] = 25 | source 26 | .viaMat(controlFlow(), Keep.right[B, Consumer.Control]) 27 | 28 | def controlFlow[A](): Flow[A, A, Consumer.Control] = 29 | scaladsl 30 | .Flow[A] 31 | .viaMat(KillSwitches.single[A])(scaladsl.Keep.right) 32 | .mapMaterializedValue(killSwitch => control(killSwitch)) 33 | .asJava 34 | 35 | def control(killSwitch: KillSwitch): Consumer.Control = new FakeControl(killSwitch) 36 | 37 | class FakeControl(val killSwitch: KillSwitch) extends Consumer.Control { 38 | 39 | val shutdownPromise: CompletableFuture[Done] = new CompletableFuture[Done]() 40 | 41 | override def stop(): CompletionStage[Done] = { 42 | killSwitch.shutdown() 43 | shutdownPromise.complete(Done) 44 | shutdownPromise 45 | } 46 | 47 | override def shutdown(): CompletionStage[Done] = stop() 48 | 49 | override def isShutdown: CompletionStage[Done] = shutdownPromise 50 | 51 | override def getMetrics: CompletionStage[java.util.Map[MetricName, Metric]] = ??? 52 | 53 | override def drainAndShutdown[T]( 54 | streamCompletion: CompletionStage[T], 55 | ec: Executor 56 | ): CompletionStage[T] = 57 | stop().thenCompose(new java.util.function.Function[Done, CompletionStage[T]] { 58 | override def apply(t: Done): CompletionStage[T] = streamCompletion 59 | }) 60 | 61 | } 62 | 63 | } 64 | -------------------------------------------------------------------------------- /testkit/src/main/scala/akka/kafka/testkit/scaladsl/ConsumerControlFactory.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.testkit.scaladsl 7 | 8 | import akka.Done 9 | import akka.annotation.ApiMayChange 10 | import akka.kafka.scaladsl.Consumer 11 | import akka.stream.scaladsl.{Flow, Keep, Source} 12 | import akka.stream.{KillSwitch, KillSwitches} 13 | import org.apache.kafka.common.{Metric, MetricName} 14 | 15 | import scala.concurrent.{Future, Promise} 16 | 17 | /** 18 | * Helper factory to create [[akka.kafka.scaladsl.Consumer.Control]] instances when 19 | * testing without a Kafka broker. 20 | */ 21 | @ApiMayChange 22 | object ConsumerControlFactory { 23 | 24 | def attachControl[A, B](source: Source[A, B]): Source[A, Consumer.Control] = 25 | source 26 | .viaMat(controlFlow())(Keep.right) 27 | 28 | def controlFlow[A](): Flow[A, A, Consumer.Control] = 29 | Flow[A] 30 | .viaMat(KillSwitches.single[A])(Keep.right) 31 | .mapMaterializedValue(killSwitch => control(killSwitch)) 32 | 33 | def control(killSwitch: KillSwitch): Consumer.Control = new FakeControl(killSwitch) 34 | 35 | class FakeControl(val killSwitch: KillSwitch) extends Consumer.Control { 36 | 37 | val shutdownPromise = Promise[Done]() 38 | 39 | override def stop(): Future[Done] = { 40 | killSwitch.shutdown() 41 | shutdownPromise.trySuccess(Done) 42 | shutdownPromise.future 43 | } 44 | 45 | override def shutdown(): Future[Done] = { 46 | killSwitch.shutdown() 47 | shutdownPromise.trySuccess(Done) 48 | shutdownPromise.future 49 | } 50 | 51 | override def isShutdown: Future[Done] = shutdownPromise.future 52 | 53 | override def metrics: Future[Map[MetricName, Metric]] = ??? 54 | } 55 | 56 | } 57 | -------------------------------------------------------------------------------- /testkit/src/main/scala/akka/kafka/testkit/scaladsl/ScalatestKafkaSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.testkit.scaladsl 7 | 8 | import akka.kafka.testkit.internal.TestFrameworkInterface 9 | import org.scalatest.Suite 10 | 11 | abstract class ScalatestKafkaSpec(kafkaPort: Int) 12 | extends KafkaSpec(kafkaPort) 13 | with Suite 14 | with TestFrameworkInterface.Scalatest { this: Suite => 15 | } 16 | -------------------------------------------------------------------------------- /testkit/src/main/scala/akka/kafka/testkit/scaladsl/TestcontainersKafkaLike.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.testkit.scaladsl 7 | 8 | import akka.kafka.testkit.KafkaTestkitTestcontainersSettings 9 | import akka.kafka.testkit.internal.{AlpakkaKafkaContainer, SchemaRegistryContainer, TestcontainersKafka} 10 | import org.testcontainers.containers.GenericContainer 11 | 12 | /** 13 | * Uses [[https://java.testcontainers.org/ Testcontainers]] to start a Kafka cluster in a Docker container. 14 | * This trait will start Kafka only once per test session. To create a Kafka cluster per test class see 15 | * [[TestcontainersKafkaPerClassLike]]. 16 | * 17 | * The Testcontainers dependency has to be added explicitly. 18 | */ 19 | trait TestcontainersKafkaLike extends TestcontainersKafka.Spec { 20 | override def kafkaPort: Int = TestcontainersKafka.Singleton.kafkaPort 21 | override def bootstrapServers: String = TestcontainersKafka.Singleton.bootstrapServers 22 | override def brokerContainers: Vector[AlpakkaKafkaContainer] = TestcontainersKafka.Singleton.brokerContainers 23 | override def zookeeperContainer: GenericContainer[_] = TestcontainersKafka.Singleton.zookeeperContainer 24 | override def schemaRegistryContainer: Option[SchemaRegistryContainer] = 25 | TestcontainersKafka.Singleton.schemaRegistryContainer 26 | override def schemaRegistryUrl: String = TestcontainersKafka.Singleton.schemaRegistryUrl 27 | override def startCluster(): String = TestcontainersKafka.Singleton.startCluster() 28 | override def startCluster(settings: KafkaTestkitTestcontainersSettings): String = 29 | TestcontainersKafka.Singleton.startCluster(settings) 30 | override def stopCluster(): Unit = TestcontainersKafka.Singleton.stopCluster() 31 | override def startKafka(): Unit = TestcontainersKafka.Singleton.startKafka() 32 | override def stopKafka(): Unit = TestcontainersKafka.Singleton.stopKafka() 33 | 34 | override def setUp(): Unit = { 35 | startCluster(testcontainersSettings) 36 | super.setUp() 37 | } 38 | 39 | override def cleanUp(): Unit = { 40 | // do nothing to keep everything running. testcontainers runs as a daemon and will shut all containers down 41 | // when the sbt session terminates. 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /testkit/src/main/scala/akka/kafka/testkit/scaladsl/TestcontainersKafkaPerClassLike.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.testkit.scaladsl 7 | 8 | import akka.kafka.testkit.internal.TestcontainersKafka 9 | 10 | /** 11 | * Uses [[https://java.testcontainers.org/ Testcontainers]] to start a Kafka broker in a Docker container once per class. 12 | * The Testcontainers dependency has to be added explicitly. 13 | */ 14 | trait TestcontainersKafkaPerClassLike extends TestcontainersKafka.Spec { 15 | override def setUp(): Unit = { 16 | startCluster() 17 | super.setUp() 18 | } 19 | 20 | override def cleanUp(): Unit = { 21 | super.cleanUp() 22 | stopCluster() 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /tests/src/it/resources/logback-test.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | %d{HH:mm:ss.SSS} %-5level [%-20.20thread] %-36.36logger{36} %msg%n%rEx 5 | 6 | 7 | 8 | target/tests-it.log 9 | false 10 | 11 | %d{ISO8601} %-5level [%-20.20thread] [%-36.36logger{36}] %msg%n%rEx 12 | 13 | 14 | 15 | 16 | 17 | %d{HH:mm:ss.SSS} %-5level [%-20.20thread] %-36.36logger{36} %msg%n%rEx 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | -------------------------------------------------------------------------------- /tests/src/it/resources/reference.conf: -------------------------------------------------------------------------------- 1 | akka { 2 | loggers = ["akka.event.slf4j.Slf4jLogger"] 3 | loglevel = "DEBUG" 4 | logging-filter = "akka.event.slf4j.Slf4jLoggingFilter" 5 | logger-startup-timeout = 15s 6 | 7 | actor { 8 | debug { 9 | lifecycle = off 10 | receive = off 11 | } 12 | } 13 | 14 | test { 15 | # https://github.com/akka/alpakka-kafka/pull/994 16 | timefactor = 3.0 17 | timefactor = ${?AKKA_TEST_TIMEFACTOR} 18 | single-expect-default = 10s 19 | } 20 | 21 | kafka.testkit { 22 | # waitUntilConsumerSummary timeout 23 | consumer-group-timeout = 60 seconds 24 | # amount of time to wait in-between state checks 25 | check-interval = 500 ms 26 | testcontainers.container-logging = true 27 | } 28 | 29 | kafka.consumer { 30 | stop-timeout = 3 s 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /tests/src/it/scala/akka/kafka/IntegrationTests.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka 7 | 8 | import akka.NotUsed 9 | import akka.stream.scaladsl.Flow 10 | import org.apache.kafka.common.TopicPartition 11 | import org.slf4j.Logger 12 | import org.testcontainers.containers.GenericContainer 13 | 14 | import scala.jdk.CollectionConverters._ 15 | 16 | object IntegrationTests { 17 | val MessageLogInterval = 500L 18 | 19 | def logSentMessages()(implicit log: Logger): Flow[Long, Long, NotUsed] = Flow[Long].map { i => 20 | if (i % MessageLogInterval == 0) log.info(s"Sent [$i] messages so far.") 21 | i 22 | } 23 | 24 | def logReceivedMessages()(implicit log: Logger): Flow[Long, Long, NotUsed] = Flow[Long].map { i => 25 | if (i % MessageLogInterval == 0) log.info(s"Received [$i] messages so far.") 26 | i 27 | } 28 | 29 | def logReceivedMessages(tp: TopicPartition)(implicit log: Logger): Flow[Long, Long, NotUsed] = Flow[Long].map { i => 30 | if (i % MessageLogInterval == 0) log.info(s"$tp: Received [$i] messages so far.") 31 | i 32 | } 33 | 34 | def stopRandomBroker(brokers: Vector[GenericContainer[_]], msgCount: Long)(implicit log: Logger): Unit = { 35 | val broker: GenericContainer[_] = brokers(scala.util.Random.nextInt(brokers.length)) 36 | val id = broker.getContainerId 37 | val networkAliases = broker.getNetworkAliases.asScala.mkString(",") 38 | log.warn( 39 | s"Stopping one Kafka container with network aliases [$networkAliases], container id [$id], after [$msgCount] messages" 40 | ) 41 | broker.stop() 42 | } 43 | 44 | } 45 | -------------------------------------------------------------------------------- /tests/src/main/proto/order.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option java_package = "docs.sample"; 4 | option java_outer_classname = "OrderMessages"; 5 | 6 | message Order { 7 | string id = 1; 8 | } 9 | -------------------------------------------------------------------------------- /tests/src/main/scala/akka/kafka/KafkaPorts.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka 7 | 8 | /** 9 | * Ports to use for Kafka and Zookeeper throughout integration tests. 10 | * Zookeeper is Kafka port + 1 if nothing else specified. 11 | */ 12 | object KafkaPorts { 13 | 14 | val AssignmentTest = 9082 15 | val ProducerExamplesTest = 9112 16 | 17 | } 18 | -------------------------------------------------------------------------------- /tests/src/test/java/docs/javadsl/ConsumerSettingsTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package docs.javadsl; 7 | 8 | import akka.actor.ActorSystem; 9 | import akka.kafka.ConsumerSettings; 10 | import akka.kafka.ConsumerSettingsSpec$; 11 | // #discovery-settings 12 | import akka.kafka.javadsl.DiscoverySupport; 13 | // #discovery-settings 14 | import akka.testkit.javadsl.TestKit; 15 | import com.typesafe.config.Config; 16 | import com.typesafe.config.ConfigFactory; 17 | import org.apache.kafka.clients.consumer.ConsumerConfig; 18 | import org.apache.kafka.common.serialization.StringDeserializer; 19 | import org.junit.jupiter.api.Test; 20 | 21 | import static org.junit.jupiter.api.Assertions.*; 22 | 23 | public class ConsumerSettingsTest { 24 | 25 | @Test 26 | public void discoverySetup() throws Exception { 27 | Config config = 28 | ConfigFactory.parseString(ConsumerSettingsSpec$.MODULE$.DiscoveryConfigSection()) 29 | .withFallback(ConfigFactory.load()) 30 | .resolve(); 31 | ActorSystem system = ActorSystem.create("ConsumerSettingsTest", config); 32 | 33 | // #discovery-settings 34 | 35 | Config consumerConfig = system.settings().config().getConfig("discovery-consumer"); 36 | ConsumerSettings settings = 37 | ConsumerSettings.create(consumerConfig, new StringDeserializer(), new StringDeserializer()) 38 | .withEnrichCompletionStage( 39 | DiscoverySupport.consumerBootstrapServers(consumerConfig, system)); 40 | // #discovery-settings 41 | TestKit.shutdownActorSystem(system); 42 | } 43 | 44 | @Test 45 | public void setAssignor() throws Exception { 46 | ActorSystem system = ActorSystem.create("ConsumerSettingsTest"); 47 | ConsumerSettings settings = ConsumerSettings.create(system, new StringDeserializer(), new StringDeserializer()) 48 | .withPartitionAssignmentStrategies(new String[] { 49 | org.apache.kafka.clients.consumer.CooperativeStickyAssignor.class.getName(), 50 | org.apache.kafka.clients.consumer.StickyAssignor.class.getName() 51 | }); 52 | assertEquals( 53 | settings.getProperty(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG), 54 | "org.apache.kafka.clients.consumer.CooperativeStickyAssignor,org.apache.kafka.clients.consumer.StickyAssignor"); 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /tests/src/test/java/docs/javadsl/FetchMetadataTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package docs.javadsl; 7 | 8 | // #metadata 9 | import akka.actor.ActorRef; 10 | import akka.kafka.ConsumerSettings; 11 | import akka.kafka.KafkaConsumerActor; 12 | import akka.kafka.Metadata; 13 | import akka.kafka.testkit.javadsl.TestcontainersKafkaJunit4Test; 14 | import akka.kafka.tests.javadsl.LogCapturingJunit4; 15 | import akka.pattern.Patterns; 16 | import java.time.Duration; 17 | import java.util.List; 18 | import java.util.Optional; 19 | import java.util.concurrent.CompletionStage; 20 | import java.util.stream.Collectors; 21 | import org.apache.kafka.common.PartitionInfo; 22 | 23 | // #metadata 24 | import akka.actor.ActorSystem; 25 | import java.util.concurrent.TimeUnit; 26 | import akka.testkit.javadsl.TestKit; 27 | import org.junit.AfterClass; 28 | import org.junit.Rule; 29 | import org.junit.Test; 30 | 31 | import static junit.framework.TestCase.assertTrue; 32 | 33 | public class FetchMetadataTest extends TestcontainersKafkaJunit4Test { 34 | 35 | @Rule public final LogCapturingJunit4 logCapturing = new LogCapturingJunit4(); 36 | 37 | private static final ActorSystem sys = ActorSystem.create("FetchMetadataTest"); 38 | 39 | public FetchMetadataTest() { 40 | super(sys); 41 | } 42 | 43 | @AfterClass 44 | public static void afterClass() { 45 | TestKit.shutdownActorSystem(sys); 46 | } 47 | 48 | @Test 49 | public void demo() throws Exception { 50 | ConsumerSettings consumerSettings = 51 | consumerDefaults().withGroupId(createGroupId()); 52 | // #metadata 53 | Duration timeout = Duration.ofSeconds(2); 54 | ConsumerSettings settings = 55 | consumerSettings.withMetadataRequestTimeout(timeout); 56 | 57 | ActorRef consumer = system().actorOf((KafkaConsumerActor.props(settings))); 58 | 59 | CompletionStage topicsStage = 60 | Patterns.ask(consumer, Metadata.createListTopics(), timeout) 61 | .thenApply(reply -> ((Metadata.Topics) reply)); 62 | 63 | // convert response 64 | CompletionStage>> response = 65 | topicsStage 66 | .thenApply(Metadata.Topics::getResponse) 67 | .thenApply( 68 | responseOptional -> 69 | responseOptional.map( 70 | map -> 71 | map.entrySet().stream() 72 | .flatMap( 73 | entry -> { 74 | String topic = entry.getKey(); 75 | List partitionInfos = entry.getValue(); 76 | return partitionInfos.stream() 77 | .map(info -> topic + ": " + info.toString()); 78 | }) 79 | .collect(Collectors.toList()))); 80 | 81 | // #metadata 82 | Optional> optionalStrings = 83 | response.toCompletableFuture().get(5, TimeUnit.SECONDS); 84 | assertTrue(optionalStrings.isPresent()); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /tests/src/test/java/docs/javadsl/ProducerSettingsTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package docs.javadsl; 7 | 8 | import akka.actor.ActorSystem; 9 | import akka.kafka.ProducerSettings; 10 | import akka.kafka.ProducerSettingsSpec$; 11 | // #discovery-settings 12 | import akka.kafka.javadsl.DiscoverySupport; 13 | // #discovery-settings 14 | import akka.testkit.javadsl.TestKit; 15 | import com.typesafe.config.Config; 16 | import com.typesafe.config.ConfigFactory; 17 | import org.apache.kafka.common.serialization.StringSerializer; 18 | import org.junit.jupiter.api.Test; 19 | 20 | public class ProducerSettingsTest { 21 | 22 | @Test 23 | public void discoverySetup() throws Exception { 24 | Config config = 25 | ConfigFactory.parseString(ProducerSettingsSpec$.MODULE$.DiscoveryConfigSection()) 26 | .withFallback(ConfigFactory.load()) 27 | .resolve(); 28 | ActorSystem system = ActorSystem.create("ProducerSettingsTest", config); 29 | 30 | // #discovery-settings 31 | 32 | Config producerConfig = system.settings().config().getConfig("discovery-producer"); 33 | ProducerSettings settings = 34 | ProducerSettings.create(producerConfig, new StringSerializer(), new StringSerializer()) 35 | .withEnrichCompletionStage( 36 | DiscoverySupport.producerBootstrapServers(producerConfig, system)); 37 | // #discovery-settings 38 | TestKit.shutdownActorSystem(system); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /tests/src/test/java/docs/javadsl/SampleData.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package docs.javadsl; 7 | 8 | import com.fasterxml.jackson.annotation.JsonCreator; 9 | import com.fasterxml.jackson.annotation.JsonProperty; 10 | import java.util.Objects; 11 | 12 | public class SampleData { 13 | 14 | public final String name; 15 | public final int value; 16 | 17 | @JsonCreator 18 | public SampleData(@JsonProperty("name") String name, @JsonProperty("value") int value) { 19 | this.name = name; 20 | this.value = value; 21 | } 22 | 23 | @Override 24 | public String toString() { 25 | return "SampleData(name=" + name + ",value=" + value + ")"; 26 | } 27 | 28 | @Override 29 | public boolean equals(Object o) { 30 | if (this == o) return true; 31 | if (o == null || getClass() != o.getClass()) return false; 32 | SampleData that = (SampleData) o; 33 | return value == that.value && Objects.equals(name, that.name); 34 | } 35 | 36 | @Override 37 | public int hashCode() { 38 | return Objects.hash(name, value); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /tests/src/test/java/docs/javadsl/TestkitTestcontainersTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package docs.javadsl; 7 | 8 | // #testcontainers-settings 9 | import akka.actor.ActorSystem; 10 | import akka.kafka.testkit.KafkaTestkitTestcontainersSettings; 11 | import akka.kafka.testkit.javadsl.TestcontainersKafkaTest; 12 | import org.junit.jupiter.api.AfterAll; 13 | import org.junit.jupiter.api.TestInstance; 14 | 15 | @TestInstance(TestInstance.Lifecycle.PER_CLASS) 16 | class TestkitTestcontainersTest extends TestcontainersKafkaTest { 17 | 18 | private static final ActorSystem system = ActorSystem.create("TestkitTestcontainersTest"); 19 | 20 | private static KafkaTestkitTestcontainersSettings testcontainersSettings = 21 | KafkaTestkitTestcontainersSettings.create(system) 22 | .withNumBrokers(3) 23 | .withInternalTopicsReplicationFactor(2) 24 | .withConfigureKafkaConsumer( 25 | brokerContainers -> 26 | brokerContainers.forEach( 27 | b -> b.withEnv("KAFKA_AUTO_CREATE_TOPICS_ENABLE", "false"))); 28 | 29 | TestkitTestcontainersTest() { 30 | // this will only start a new cluster if it has not already been started. 31 | // 32 | // you must stop the cluster in the afterClass implementation if you want to create a cluster 33 | // per test class 34 | // using (TestInstance.Lifecycle.PER_CLASS) 35 | super(system, testcontainersSettings); 36 | } 37 | 38 | // ... 39 | 40 | // omit this implementation if you want the cluster to stay up for all your tests 41 | @AfterAll 42 | void afterClass() { 43 | TestcontainersKafkaTest.stopKafka(); 44 | } 45 | } 46 | // #testcontainers-settings 47 | -------------------------------------------------------------------------------- /tests/src/test/resources/application.conf: -------------------------------------------------------------------------------- 1 | akka { 2 | loggers = ["akka.event.slf4j.Slf4jLogger"] 3 | loglevel = "DEBUG" 4 | logging-filter = "akka.event.slf4j.Slf4jLoggingFilter" 5 | logger-startup-timeout = 15s 6 | 7 | actor { 8 | # debug.receive = true 9 | } 10 | 11 | test { 12 | # https://github.com/akka/alpakka-kafka/pull/994 13 | timefactor = 3.0 14 | timefactor = ${?AKKA_TEST_TIMEFACTOR} 15 | single-expect-default = 10s 16 | } 17 | 18 | kafka.consumer { 19 | stop-timeout = 10ms 20 | } 21 | } 22 | 23 | # default is 10 seconds 24 | # akka.kafka.testkit.consumer-group-timeout = 20 seconds 25 | 26 | # #consumer-config-inheritance 27 | our-kafka-consumer: ${akka.kafka.consumer} { 28 | kafka-clients { 29 | bootstrap.servers = "kafka-host:9092" 30 | } 31 | } 32 | # #consumer-config-inheritance 33 | 34 | akka.kafka.testkit.testcontainers { 35 | # enabled for all tests because the cluster is only started once per test run 36 | use-schema-registry = true 37 | container-logging = true 38 | } 39 | -------------------------------------------------------------------------------- /tests/src/test/resources/logback-test.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | target/kafka.log 4 | false 5 | 6 | %d{ISO8601} %-5level [%-20.20thread] [%-36.36logger{36}] %msg%n%rEx 7 | 8 | 9 | 10 | 11 | 12 | %d{HH:mm:ss.SSS} %-5level [%-20.20thread] %-36.36logger{36} %msg%n%rEx 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /tests/src/test/scala/akka/kafka/ConfigSettingsSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka 7 | 8 | import akka.kafka.internal.ConfigSettings 9 | import akka.kafka.tests.scaladsl.LogCapturing 10 | import com.typesafe.config.ConfigFactory 11 | import org.scalatest.matchers.should.Matchers 12 | import org.scalatest.wordspec.AnyWordSpec 13 | 14 | class ConfigSettingsSpec extends AnyWordSpec with Matchers with LogCapturing { 15 | 16 | "ConfigSettings" must { 17 | 18 | "handle nested properties" in { 19 | val conf = ConfigFactory 20 | .parseString( 21 | """ 22 | kafka-client.bootstrap.servers = "localhost:9092" 23 | kafka-client.bootstrap.foo = baz 24 | kafka-client.foo = bar 25 | kafka-client.client.id = client1 26 | """ 27 | ) 28 | .withFallback(ConfigFactory.load()) 29 | .getConfig("kafka-client") 30 | val settings = ConfigSettings.parseKafkaClientsProperties(conf) 31 | settings("bootstrap.servers") should ===("localhost:9092") 32 | settings("client.id") should ===("client1") 33 | settings("foo") should ===("bar") 34 | settings("bootstrap.foo") should ===("baz") 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /tests/src/test/scala/akka/kafka/Repeated.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka 7 | 8 | import org.scalatest._ 9 | 10 | /** 11 | * Repeat test suite n times. Default: 1. 12 | * Define number of times to repeat by overriding `timesToRepeat` or passing `-DtimesToRepeat=n` 13 | * 14 | * Ex) To run a single test 10 times from the terminal 15 | * 16 | * {{{ 17 | * sbt "tests/testOnly *.TransactionsSpec -- -z \"must support copy stream with merging and multi message\" -DtimesToRepeat=2" 18 | * }}} 19 | */ 20 | trait Repeated extends TestSuiteMixin { this: TestSuite => 21 | def timesToRepeat: Int = 1 22 | 23 | protected abstract override def runTest(testName: String, args: Args): Status = { 24 | def run0(times: Int): Status = { 25 | val status = super.runTest(testName, args) 26 | if (times <= 1) status else status.thenRun(run0(times - 1)) 27 | } 28 | 29 | run0(args.configMap.getWithDefault("timesToRepeat", timesToRepeat.toString).toInt) 30 | } 31 | 32 | /** 33 | * Retry a code block n times or until Success 34 | */ 35 | @annotation.tailrec 36 | final def retry[T](n: Int)(fn: Int => T): T = 37 | util.Try { fn(n + 1) } match { 38 | case util.Success(x) => x 39 | case _ if n > 1 => retry(n - 1)(fn) 40 | case util.Failure(e) => throw e 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /tests/src/test/scala/akka/kafka/internal/ConnectionCheckerSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.internal 7 | 8 | import akka.actor.{ActorRef, ActorSystem} 9 | import akka.kafka.Metadata 10 | import akka.kafka.ConnectionCheckerSettings 11 | import akka.kafka.KafkaConnectionFailed 12 | import akka.kafka.tests.scaladsl.LogCapturing 13 | import akka.testkit.TestKit 14 | import com.typesafe.config.ConfigFactory 15 | import org.apache.kafka.common.errors.TimeoutException 16 | import org.scalatest.wordspec.AnyWordSpecLike 17 | import org.scalatest.matchers.should.Matchers 18 | 19 | import scala.concurrent.duration._ 20 | import scala.util.{Failure, Success} 21 | 22 | class ConnectionCheckerSpec 23 | extends TestKit(ActorSystem("KafkaConnectionCheckerSpec", ConfigFactory.load())) 24 | with AnyWordSpecLike 25 | with Matchers 26 | with LogCapturing { 27 | 28 | "KafkaConnectionChecker" must { 29 | 30 | val retryInterval = 100.millis 31 | implicit val config: ConnectionCheckerSettings = 32 | ConnectionCheckerSettings(3, retryInterval, 2d) 33 | 34 | "wait for response and retryInterval before perform new ask" in withCheckerActorRef { checker => 35 | expectListTopicsRequest(retryInterval) 36 | 37 | Thread.sleep(retryInterval.toMillis) 38 | checker ! Metadata.Topics(Success(Map.empty)) 39 | 40 | expectListTopicsRequest(retryInterval) 41 | } 42 | 43 | "exponentially retry on failure and failed after max retries exceeded" in withCheckerActorRef { checker => 44 | var interval = retryInterval 45 | for (_ <- 1 to (config.maxRetries + 1)) { 46 | expectListTopicsRequest(interval) 47 | checker ! Metadata.Topics(Failure(new TimeoutException())) 48 | interval = newExponentialInterval(interval, config.factor) 49 | } 50 | 51 | watch(checker) 52 | expectMsgType[KafkaConnectionFailed] 53 | expectTerminated(checker) 54 | } 55 | 56 | "return to normal mode if in backoff mode receive Metadata.Topics(success)" in withCheckerActorRef { checker => 57 | expectListTopicsRequest(retryInterval) 58 | checker ! Metadata.Topics(Failure(new TimeoutException())) 59 | 60 | expectListTopicsRequest(newExponentialInterval(retryInterval, config.factor)) 61 | checker ! Metadata.Topics(Success(Map.empty)) 62 | 63 | expectListTopicsRequest(retryInterval) 64 | } 65 | } 66 | 67 | def newExponentialInterval(previousInterval: FiniteDuration, factor: Double): FiniteDuration = 68 | (previousInterval * factor).asInstanceOf[FiniteDuration] 69 | 70 | def expectListTopicsRequest(interval: FiniteDuration): Unit = { 71 | expectNoMessage(interval - 20.millis) 72 | expectMsg(Metadata.ListTopics) 73 | } 74 | 75 | def withCheckerActorRef[T](block: ActorRef => T)(implicit config: ConnectionCheckerSettings): T = 76 | withCheckerActorRef(config)(block) 77 | def withCheckerActorRef[T](config: ConnectionCheckerSettings)(block: ActorRef => T): T = { 78 | val checker = childActorOf(ConnectionChecker.props(config)) 79 | val res = block(checker) 80 | system.stop(watch(checker)) 81 | expectTerminated(checker) 82 | res 83 | } 84 | 85 | } 86 | -------------------------------------------------------------------------------- /tests/src/test/scala/akka/kafka/internal/EnhancedConfigSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.internal 7 | 8 | import akka.kafka.tests.scaladsl.LogCapturing 9 | import com.typesafe.config.ConfigFactory 10 | import org.scalatest.matchers.should.Matchers 11 | import org.scalatest.wordspec.AnyWordSpec 12 | 13 | import scala.concurrent.duration._ 14 | 15 | class EnhancedConfigSpec extends AnyWordSpec with Matchers with LogCapturing { 16 | 17 | "EnhancedConfig" must { 18 | 19 | "parse infinite durations" in { 20 | val conf = ConfigFactory.parseString("foo-interval = infinite") 21 | val interval = ConfigSettings.getPotentiallyInfiniteDuration(conf, "foo-interval") 22 | interval should ===(Duration.Inf) 23 | } 24 | 25 | "parse finite durations" in { 26 | val conf = ConfigFactory.parseString("foo-interval = 1m") 27 | val interval = ConfigSettings.getPotentiallyInfiniteDuration(conf, "foo-interval") 28 | interval should ===(1.minute) 29 | } 30 | 31 | } 32 | 33 | } 34 | -------------------------------------------------------------------------------- /tests/src/test/scala/akka/kafka/internal/OffsetAggregationSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.internal 7 | 8 | import akka.kafka.tests.scaladsl.LogCapturing 9 | import org.apache.kafka.clients.consumer.OffsetAndMetadata 10 | import org.apache.kafka.common.TopicPartition 11 | import org.apache.kafka.common.requests.OffsetFetchResponse 12 | import org.scalatest.matchers.should.Matchers 13 | import org.scalatest.wordspec.AnyWordSpec 14 | 15 | class OffsetAggregationSpec extends AnyWordSpec with Matchers with LogCapturing { 16 | 17 | val topicA = "topicA" 18 | val topicB = "topicB" 19 | 20 | "aggregateOffsets" should { 21 | "give all offsets for one element" in { 22 | val in = new TopicPartition(topicA, 1) -> new OffsetAndMetadata(12, OffsetFetchResponse.NO_METADATA) 23 | KafkaConsumerActor.aggregateOffsets(List(in)) shouldBe Map(in) 24 | } 25 | 26 | "give the highest offsets" in { 27 | val in1 = new TopicPartition(topicA, 1) -> new OffsetAndMetadata(42, OffsetFetchResponse.NO_METADATA) 28 | val in2 = new TopicPartition(topicA, 1) -> new OffsetAndMetadata(12, OffsetFetchResponse.NO_METADATA) 29 | KafkaConsumerActor.aggregateOffsets(List(in1, in2)) shouldBe Map(in1) 30 | } 31 | 32 | "give the highest offsets (other order)" in { 33 | val in1 = new TopicPartition(topicA, 1) -> new OffsetAndMetadata(42, OffsetFetchResponse.NO_METADATA) 34 | val in2 = new TopicPartition(topicA, 1) -> new OffsetAndMetadata(12, OffsetFetchResponse.NO_METADATA) 35 | KafkaConsumerActor.aggregateOffsets(List(in2, in1)) shouldBe Map(in1) 36 | } 37 | 38 | "give the highest offsets (when mixed)" in { 39 | val in1 = List( 40 | new TopicPartition(topicA, 1) -> new OffsetAndMetadata(42, OffsetFetchResponse.NO_METADATA), 41 | new TopicPartition(topicB, 1) -> new OffsetAndMetadata(11, OffsetFetchResponse.NO_METADATA) 42 | ) 43 | val in2 = List( 44 | new TopicPartition(topicA, 1) -> new OffsetAndMetadata(12, OffsetFetchResponse.NO_METADATA), 45 | new TopicPartition(topicB, 1) -> new OffsetAndMetadata(43, OffsetFetchResponse.NO_METADATA) 46 | ) 47 | KafkaConsumerActor.aggregateOffsets(in1 ++ in2) shouldBe Map( 48 | new TopicPartition(topicA, 1) -> new OffsetAndMetadata(42, OffsetFetchResponse.NO_METADATA), 49 | new TopicPartition(topicB, 1) -> new OffsetAndMetadata(43, OffsetFetchResponse.NO_METADATA) 50 | ) 51 | } 52 | } 53 | 54 | } 55 | -------------------------------------------------------------------------------- /tests/src/test/scala/akka/kafka/internal/SubscriptionsSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.internal 7 | 8 | import java.net.URLEncoder 9 | 10 | import akka.kafka.tests.scaladsl.LogCapturing 11 | import akka.kafka.{Subscription, Subscriptions} 12 | import akka.util.ByteString 13 | import org.apache.kafka.common.TopicPartition 14 | import org.scalatest.matchers.should.Matchers 15 | import org.scalatest.wordspec.AnyWordSpec 16 | 17 | class SubscriptionsSpec extends AnyWordSpec with Matchers with LogCapturing { 18 | 19 | "URL encoded subscription" should { 20 | "be readable for topics" in { 21 | encode(Subscriptions.topics(Set("topic1", "topic2"))) should be( 22 | "topic1+topic2" 23 | ) 24 | } 25 | 26 | "be readable for patterns" in { 27 | encode(Subscriptions.topicPattern("topic.*")) should be("pattern+topic.*") 28 | } 29 | 30 | "be readable for assignments" in { 31 | encode(Subscriptions.assignment(Set(new TopicPartition("topic1", 1)))) should be("topic1-1") 32 | } 33 | 34 | "be readable for assignments with offset" in { 35 | encode(Subscriptions.assignmentWithOffset(Map(new TopicPartition("topic1", 1) -> 123L))) should be( 36 | "topic1-1+offset123" 37 | ) 38 | } 39 | 40 | "be readable for multiple assignments with offset" in { 41 | encode( 42 | Subscriptions.assignmentWithOffset( 43 | Map(new TopicPartition("topic1", 1) -> 123L, new TopicPartition("A-Topic-Name", 2) -> 456L) 44 | ) 45 | ) should be( 46 | "topic1-1+offset123+A-Topic-Name-2+offset456" 47 | ) 48 | } 49 | 50 | "be readable for multiple assignments with timestamp" in { 51 | encode( 52 | Subscriptions.assignmentOffsetsForTimes( 53 | Map(new TopicPartition("topic1", 1) -> 12345L, new TopicPartition("Another0Topic", 1) -> 998822L) 54 | ) 55 | ) should be( 56 | "topic1-1+timestamp12345+Another0Topic-1+timestamp998822" 57 | ) 58 | } 59 | } 60 | 61 | private def encode(subscription: Subscription) = 62 | URLEncoder.encode(subscription.renderStageAttribute, ByteString.UTF_8) 63 | 64 | } 65 | -------------------------------------------------------------------------------- /tests/src/test/scala/akka/kafka/scaladsl/ControlSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.scaladsl 7 | 8 | import java.util.concurrent.atomic.AtomicBoolean 9 | 10 | import akka.Done 11 | import akka.kafka.scaladsl.Consumer.DrainingControl 12 | import akka.kafka.tests.scaladsl.LogCapturing 13 | import org.apache.kafka.common.{Metric, MetricName} 14 | import org.scalatest.concurrent.ScalaFutures 15 | import org.scalatest.wordspec.AnyWordSpec 16 | import org.scalatest.matchers.should.Matchers 17 | 18 | import scala.concurrent.ExecutionContext.Implicits.global 19 | import scala.concurrent.Future 20 | 21 | object ControlSpec { 22 | class ControlImpl(stopFuture: Future[Done] = Future.successful(Done), 23 | shutdownFuture: Future[Done] = Future.successful(Done)) 24 | extends Consumer.Control { 25 | val shutdownCalled = new AtomicBoolean(false) 26 | 27 | override def stop(): Future[Done] = stopFuture 28 | override def shutdown(): Future[Done] = { 29 | shutdownCalled.set(true) 30 | shutdownFuture 31 | } 32 | override def isShutdown: Future[Done] = Future.failed(new NotImplementedError("")) 33 | override def metrics: Future[Map[MetricName, Metric]] = ??? 34 | } 35 | } 36 | 37 | class ControlSpec extends AnyWordSpec with ScalaFutures with Matchers with LogCapturing { 38 | import ControlSpec._ 39 | 40 | "Control" should { 41 | "drain to stream result" in { 42 | val control = new ControlImpl 43 | val drainingControl = DrainingControl.apply(control, Future.successful("expected")) 44 | drainingControl.drainAndShutdown().futureValue should be("expected") 45 | control.shutdownCalled.get() should be(true) 46 | } 47 | 48 | "drain to stream failure" in { 49 | val control = new ControlImpl 50 | 51 | val drainingControl = DrainingControl.apply(control, Future.failed(new RuntimeException("expected"))) 52 | val value = drainingControl.drainAndShutdown().failed.futureValue 53 | value shouldBe a[RuntimeException] 54 | value.getMessage should be("expected") 55 | control.shutdownCalled.get() should be(true) 56 | } 57 | 58 | "drain to stream failure even if shutdown fails" in { 59 | val control = new ControlImpl(shutdownFuture = Future.failed(new RuntimeException("not this"))) 60 | 61 | val drainingControl = DrainingControl.apply(control, Future.failed(new RuntimeException("expected"))) 62 | val value = drainingControl.drainAndShutdown().failed.futureValue 63 | value shouldBe a[RuntimeException] 64 | value.getMessage should be("expected") 65 | control.shutdownCalled.get() should be(true) 66 | } 67 | 68 | "drain to shutdown failure when stream succeeds" in { 69 | val control = new ControlImpl(shutdownFuture = Future.failed(new RuntimeException("expected"))) 70 | 71 | val drainingControl = DrainingControl.apply(control, Future.successful(Done)) 72 | val value = drainingControl.drainAndShutdown().failed.futureValue 73 | value shouldBe a[RuntimeException] 74 | value.getMessage should be("expected") 75 | control.shutdownCalled.get() should be(true) 76 | } 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /tests/src/test/scala/akka/kafka/scaladsl/MisconfiguredConsumerSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.scaladsl 7 | 8 | import akka.actor.ActorSystem 9 | import akka.kafka.tests.scaladsl.LogCapturing 10 | import akka.kafka.{ConsumerSettings, Subscriptions} 11 | import akka.stream.scaladsl.Sink 12 | import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped 13 | import akka.testkit.TestKit 14 | import org.apache.kafka.common.TopicPartition 15 | import org.apache.kafka.common.serialization.StringDeserializer 16 | import org.scalatest.concurrent.{Eventually, IntegrationPatience, ScalaFutures} 17 | import org.scalatest.matchers.should.Matchers 18 | import org.scalatest.wordspec.AnyWordSpecLike 19 | 20 | class MisconfiguredConsumerSpec 21 | extends TestKit(ActorSystem()) 22 | with AnyWordSpecLike 23 | with Matchers 24 | with ScalaFutures 25 | with Eventually 26 | with IntegrationPatience 27 | with LogCapturing { 28 | 29 | def bootstrapServers = "nowhere:6666" 30 | 31 | "Failing consumer construction" must { 32 | "be signalled to the stream by single sources" in assertAllStagesStopped { 33 | val consumerSettings = 34 | ConsumerSettings(system, new StringDeserializer, new StringDeserializer).withGroupId("group") 35 | val result = Consumer 36 | .plainSource(consumerSettings, Subscriptions.topics("topic")) 37 | .runWith(Sink.head) 38 | 39 | result.failed.futureValue shouldBe a[org.apache.kafka.common.KafkaException] 40 | } 41 | 42 | "be signalled to the stream by single sources with external offset" in assertAllStagesStopped { 43 | val consumerSettings = 44 | ConsumerSettings(system, new StringDeserializer, new StringDeserializer).withGroupId("group") 45 | val result = Consumer 46 | .plainSource(consumerSettings, Subscriptions.assignmentWithOffset(new TopicPartition("topic", 0) -> 3123L)) 47 | .runWith(Sink.ignore) 48 | 49 | result.failed.futureValue shouldBe a[org.apache.kafka.common.KafkaException] 50 | } 51 | 52 | "be signalled to the stream by partitioned sources" in assertAllStagesStopped { 53 | val consumerSettings = 54 | ConsumerSettings(system, new StringDeserializer, new StringDeserializer).withGroupId("group") 55 | val result = Consumer 56 | .plainPartitionedSource(consumerSettings, Subscriptions.topics("topic")) 57 | .runWith(Sink.head) 58 | 59 | result.failed.futureValue shouldBe a[org.apache.kafka.common.KafkaException] 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /tests/src/test/scala/akka/kafka/scaladsl/MisconfiguredProducerSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.scaladsl 7 | 8 | import akka.actor.ActorSystem 9 | import akka.kafka.ProducerSettings 10 | import akka.kafka.tests.scaladsl.LogCapturing 11 | import akka.stream.scaladsl.Source 12 | import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped 13 | import akka.testkit.TestKit 14 | import org.apache.kafka.clients.producer.ProducerRecord 15 | import org.apache.kafka.common.serialization.StringSerializer 16 | import org.scalatest.concurrent.{Eventually, IntegrationPatience, ScalaFutures} 17 | import org.scalatest.matchers.should.Matchers 18 | import org.scalatest.wordspec.AnyWordSpecLike 19 | 20 | class MisconfiguredProducerSpec 21 | extends TestKit(ActorSystem()) 22 | with AnyWordSpecLike 23 | with Matchers 24 | with ScalaFutures 25 | with Eventually 26 | with IntegrationPatience 27 | with LogCapturing { 28 | 29 | "Failing producer construction" must { 30 | "fail stream appropriately" in assertAllStagesStopped { 31 | val producerSettings = 32 | ProducerSettings(system, new StringSerializer, new StringSerializer) 33 | .withBootstrapServers("invalid-bootstrap-server") 34 | 35 | val completion = Source 36 | .single(new ProducerRecord[String, String]("topic", "key", "value")) 37 | .runWith(Producer.plainSink(producerSettings)) 38 | 39 | val exception = completion.failed.futureValue 40 | exception shouldBe a[org.apache.kafka.common.KafkaException] 41 | exception.getMessage shouldBe "Failed to construct kafka producer" 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /tests/src/test/scala/akka/kafka/scaladsl/SpecBase.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.scaladsl 7 | 8 | import akka.kafka.Repeated 9 | import akka.kafka.tests.scaladsl.LogCapturing 10 | // #testkit 11 | import akka.kafka.testkit.scaladsl.ScalatestKafkaSpec 12 | import org.scalatest.concurrent.{Eventually, IntegrationPatience, ScalaFutures} 13 | import org.scalatest.wordspec.AnyWordSpecLike 14 | import org.scalatest.matchers.should.Matchers 15 | 16 | abstract class SpecBase(kafkaPort: Int) 17 | extends ScalatestKafkaSpec(kafkaPort) 18 | with AnyWordSpecLike 19 | with Matchers 20 | with ScalaFutures 21 | // #testkit 22 | with LogCapturing 23 | with IntegrationPatience 24 | with Repeated 25 | // #testkit 26 | with Eventually { 27 | 28 | protected def this() = this(kafkaPort = -1) 29 | } 30 | 31 | // #testkit 32 | 33 | // #testcontainers 34 | import akka.kafka.testkit.scaladsl.TestcontainersKafkaLike 35 | 36 | class TestcontainersSampleSpec extends SpecBase with TestcontainersKafkaLike { 37 | // ... 38 | } 39 | // #testcontainers 40 | 41 | // #testcontainers-settings 42 | import akka.kafka.testkit.KafkaTestkitTestcontainersSettings 43 | import akka.kafka.testkit.scaladsl.TestcontainersKafkaPerClassLike 44 | 45 | class TestcontainersNewSettingsSampleSpec extends SpecBase with TestcontainersKafkaPerClassLike { 46 | 47 | override val testcontainersSettings = KafkaTestkitTestcontainersSettings(system) 48 | .withNumBrokers(3) 49 | .withInternalTopicsReplicationFactor(2) 50 | .withConfigureKafka { brokerContainers => 51 | brokerContainers.foreach(_.withEnv("KAFKA_AUTO_CREATE_TOPICS_ENABLE", "false")) 52 | } 53 | 54 | // ... 55 | } 56 | // #testcontainers-settings 57 | -------------------------------------------------------------------------------- /tests/src/test/scala/akka/kafka/tests/CapturingAppender.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.tests 7 | 8 | import akka.annotation.InternalApi 9 | import ch.qos.logback.classic.spi.ILoggingEvent 10 | import ch.qos.logback.core.AppenderBase 11 | 12 | /** 13 | * See https://doc.akka.io/libraries/akka-core/current/typed/testing-async.html#silence-logging-output-from-tests 14 | * 15 | * INTERNAL API 16 | */ 17 | @InternalApi private[akka] object CapturingAppender { 18 | import LogbackUtil._ 19 | 20 | private val CapturingAppenderName = "CapturingAppender" 21 | 22 | def get(loggerName: String): CapturingAppender = { 23 | val logbackLogger = getLogbackLogger(loggerName) 24 | logbackLogger.getAppender(CapturingAppenderName) match { 25 | case null => 26 | throw new IllegalStateException( 27 | s"$CapturingAppenderName not defined for [${loggerNameOrRoot(loggerName)}] in logback-test.xml" 28 | ) 29 | case appender: CapturingAppender => appender 30 | case other => 31 | throw new IllegalStateException(s"Unexpected $CapturingAppender: $other") 32 | } 33 | } 34 | 35 | } 36 | 37 | /** 38 | * See https://doc.akka.io/libraries/akka-core/current/typed/testing-async.html#silence-logging-output-from-tests 39 | * 40 | * INTERNAL API 41 | * 42 | * Logging from tests can be silenced by this appender. When there is a test failure 43 | * the captured logging events are flushed to the appenders defined for the 44 | * akka.actor.testkit.typed.internal.CapturingAppenderDelegate logger. 45 | * 46 | * The flushing on test failure is handled by [[akka.actor.testkit.typed.scaladsl.LogCapturing]] 47 | * for ScalaTest and [[akka.actor.testkit.typed.javadsl.LogCapturing]] for JUnit. 48 | * 49 | * Use configuration like the following the logback-test.xml: 50 | * 51 | * {{{ 52 | * 53 | * 54 | * 55 | * 56 | * 57 | * 58 | * 59 | * 60 | * 61 | * }}} 62 | */ 63 | @InternalApi private[akka] class CapturingAppender extends AppenderBase[ILoggingEvent] { 64 | import LogbackUtil._ 65 | 66 | private var buffer: Vector[ILoggingEvent] = Vector.empty 67 | 68 | // invocations are synchronized via doAppend in AppenderBase 69 | override def append(event: ILoggingEvent): Unit = { 70 | event.prepareForDeferredProcessing() 71 | buffer :+= event 72 | } 73 | 74 | /** 75 | * Flush buffered logging events to the output appenders 76 | * Also clears the buffer.. 77 | */ 78 | def flush(): Unit = synchronized { 79 | import scala.jdk.CollectionConverters._ 80 | val logbackLogger = getLogbackLogger(classOf[CapturingAppender].getName + "Delegate") 81 | val appenders = logbackLogger.iteratorForAppenders().asScala.filterNot(_ == this).toList 82 | for (event <- buffer; appender <- appenders) { 83 | appender.doAppend(event) 84 | } 85 | clear() 86 | } 87 | 88 | /** 89 | * Discards the buffered logging events without output. 90 | */ 91 | def clear(): Unit = synchronized { 92 | buffer = Vector.empty 93 | } 94 | 95 | } 96 | -------------------------------------------------------------------------------- /tests/src/test/scala/akka/kafka/tests/LogbackUtil.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.tests 7 | 8 | import akka.annotation.InternalApi 9 | import ch.qos.logback.classic.Level 10 | import org.slf4j.LoggerFactory 11 | 12 | /** 13 | * See https://doc.akka.io/libraries/akka-core/current/typed/testing-async.html#silence-logging-output-from-tests 14 | * 15 | * INTERNAL API 16 | */ 17 | @InternalApi private[akka] object LogbackUtil { 18 | def loggerNameOrRoot(loggerName: String): String = 19 | if (loggerName == "") org.slf4j.Logger.ROOT_LOGGER_NAME else loggerName 20 | 21 | def getLogbackLogger(loggerName: String): ch.qos.logback.classic.Logger = { 22 | LoggerFactory.getLogger(loggerNameOrRoot(loggerName)) match { 23 | case logger: ch.qos.logback.classic.Logger => logger 24 | case null => 25 | throw new IllegalArgumentException(s"Couldn't find logger for [$loggerName].") 26 | case other => 27 | throw new IllegalArgumentException( 28 | s"Requires Logback logger for [$loggerName], it was a [${other.getClass.getName}]" 29 | ) 30 | } 31 | } 32 | 33 | def convertLevel(level: ch.qos.logback.classic.Level): Level = { 34 | level.levelInt match { 35 | case ch.qos.logback.classic.Level.TRACE_INT => Level.TRACE 36 | case ch.qos.logback.classic.Level.DEBUG_INT => Level.DEBUG 37 | case ch.qos.logback.classic.Level.INFO_INT => Level.INFO 38 | case ch.qos.logback.classic.Level.WARN_INT => Level.WARN 39 | case ch.qos.logback.classic.Level.ERROR_INT => Level.ERROR 40 | case _ => 41 | throw new IllegalArgumentException("Level " + level.levelStr + ", " + level.levelInt + " is unknown.") 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /tests/src/test/scala/akka/kafka/tests/javadsl/LogCapturingExtension.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.tests.javadsl 7 | 8 | import akka.kafka.tests.CapturingAppender 9 | import org.junit.jupiter.api.extension.{AfterTestExecutionCallback, BeforeTestExecutionCallback, ExtensionContext} 10 | 11 | class LogCapturingExtension extends BeforeTestExecutionCallback with AfterTestExecutionCallback { 12 | 13 | // eager access of CapturingAppender to fail fast if misconfigured 14 | private val capturingAppender = CapturingAppender.get("") 15 | 16 | override def beforeTestExecution(context: ExtensionContext): Unit = { 17 | capturingAppender.clear() 18 | } 19 | 20 | override def afterTestExecution(context: ExtensionContext): Unit = { 21 | if (context.getExecutionException.isPresent) { 22 | val error = context.getExecutionException.get().toString 23 | val method = 24 | s"[${Console.BLUE}${context.getRequiredTestClass.getName}: ${context.getRequiredTestMethod.getName}${Console.RESET}]" 25 | System.out.println( 26 | s"--> $method Start of log messages of test that failed with $error" 27 | ) 28 | capturingAppender.flush() 29 | System.out.println( 30 | s"<-- $method End of log messages of test that failed with $error" 31 | ) 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /tests/src/test/scala/akka/kafka/tests/javadsl/LogCapturingJunit4.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.tests.javadsl 7 | 8 | import akka.kafka.tests.CapturingAppender 9 | 10 | import scala.util.control.NonFatal 11 | import org.junit.rules.TestRule 12 | import org.junit.runner.Description 13 | import org.junit.runners.model.Statement 14 | import org.slf4j.LoggerFactory 15 | 16 | /** 17 | * See https://doc.akka.io/libraries/akka-core/current/typed/testing-async.html#silence-logging-output-from-tests 18 | * 19 | * JUnit `TestRule` to make log lines appear only when the test failed. 20 | * 21 | * Use this in test by adding a public field annotated with `@TestRule`: 22 | * {{{ 23 | * @Rule public final LogCapturingJunit4 logCapturing = new LogCapturingJunit4(); 24 | * }}} 25 | * 26 | * Requires Logback and configuration like the following the logback-test.xml: 27 | * 28 | * {{{ 29 | * 30 | * 31 | * 32 | * 33 | * 34 | * 35 | * 36 | * 37 | * 38 | * }}} 39 | */ 40 | final class LogCapturingJunit4 extends TestRule { 41 | // eager access of CapturingAppender to fail fast if misconfigured 42 | private val capturingAppender = CapturingAppender.get("") 43 | 44 | private val myLogger = LoggerFactory.getLogger(classOf[LogCapturingJunit4]) 45 | 46 | override def apply(base: Statement, description: Description): Statement = { 47 | new Statement { 48 | override def evaluate(): Unit = { 49 | try { 50 | myLogger.info(s"Logging started for test [${description.getClassName}: ${description.getMethodName}]") 51 | base.evaluate() 52 | myLogger.info( 53 | s"Logging finished for test [${description.getClassName}: ${description.getMethodName}] that was successful" 54 | ) 55 | } catch { 56 | case NonFatal(e) => 57 | val method = s"[${Console.BLUE}${description.getClassName}: ${description.getMethodName}${Console.RESET}]" 58 | val error = e.toString 59 | System.out.println(s"--> $method Start of log messages of test that failed with $error") 60 | capturingAppender.flush() 61 | System.out.println(s"<-- $method End of log messages of test that failed with $error") 62 | throw e 63 | } finally { 64 | capturingAppender.clear() 65 | } 66 | } 67 | } 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /tests/src/test/scala/akka/kafka/tests/scaladsl/LogCapturing.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package akka.kafka.tests.scaladsl 7 | 8 | import akka.kafka.tests.CapturingAppender 9 | 10 | import scala.util.control.NonFatal 11 | 12 | import org.scalatest.BeforeAndAfterAll 13 | import org.scalatest.Outcome 14 | import org.scalatest.TestSuite 15 | import org.slf4j.LoggerFactory 16 | 17 | /** 18 | * See https://doc.akka.io/libraries/akka-core/current/typed/testing-async.html#silence-logging-output-from-tests 19 | * 20 | * Mixin this trait to a ScalaTest test to make log lines appear only when the test failed. 21 | * 22 | * Requires Logback and configuration like the following the logback-test.xml: 23 | * 24 | * {{{ 25 | * 26 | * 27 | * 28 | * 29 | * 30 | * 31 | * 32 | * 33 | * 34 | * }}} 35 | */ 36 | trait LogCapturing extends BeforeAndAfterAll { self: TestSuite => 37 | 38 | // eager access of CapturingAppender to fail fast if misconfigured 39 | private val capturingAppender = CapturingAppender.get("") 40 | 41 | private val myLogger = LoggerFactory.getLogger(classOf[LogCapturing]) 42 | 43 | override protected def afterAll(): Unit = { 44 | try { 45 | super.afterAll() 46 | } catch { 47 | case NonFatal(e) => 48 | myLogger.error("Exception from afterAll", e) 49 | capturingAppender.flush() 50 | } finally { 51 | capturingAppender.clear() 52 | } 53 | } 54 | 55 | abstract override def withFixture(test: NoArgTest): Outcome = { 56 | myLogger.info(s"Logging started for test [${self.getClass.getName}: ${test.name}]") 57 | val res = test() 58 | myLogger.info(s"Logging finished for test [${self.getClass.getName}: ${test.name}] that [$res]") 59 | 60 | if (!(res.isSucceeded || res.isPending)) { 61 | println( 62 | s"--> [${Console.BLUE}${self.getClass.getName}: ${test.name}${Console.RESET}] Start of log messages of test that [$res]" 63 | ) 64 | capturingAppender.flush() 65 | println( 66 | s"<-- [${Console.BLUE}${self.getClass.getName}: ${test.name}${Console.RESET}] End of log messages of test that [$res]" 67 | ) 68 | } 69 | 70 | res 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /tests/src/test/scala/docs/sample/order/OrderProto.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | // Generated by the Scala Plugin for the Protocol Buffer Compiler. 7 | // Do not edit! 8 | // 9 | // Protofile syntax: PROTO3 10 | 11 | package docs.sample.order 12 | 13 | object OrderProto extends _root_.scalapb.GeneratedFileObject { 14 | lazy val dependencies: Seq[_root_.scalapb.GeneratedFileObject] = Seq.empty 15 | lazy val messagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] = 16 | Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]]( 17 | docs.sample.order.Order 18 | ) 19 | private lazy val ProtoBytes: _root_.scala.Array[Byte] = 20 | scalapb.Encoding.fromBase64( 21 | scala.collection.immutable 22 | .Seq( 23 | """CgtvcmRlci5wcm90byIgCgVPcmRlchIXCgJpZBgBIAEoCUIH4j8EEgJpZFICaWRCHAoLZG9jcy5zYW1wbGVCDU9yZGVyTWVzc 24 | 2FnZXNiBnByb3RvMw==""" 25 | ) 26 | .mkString 27 | ) 28 | lazy val scalaDescriptor: _root_.scalapb.descriptors.FileDescriptor = { 29 | val scalaProto = com.google.protobuf.descriptor.FileDescriptorProto.parseFrom(ProtoBytes) 30 | _root_.scalapb.descriptors.FileDescriptor.buildFrom(scalaProto, dependencies.map(_.scalaDescriptor)) 31 | } 32 | lazy val javaDescriptor: com.google.protobuf.Descriptors.FileDescriptor = { 33 | val javaProto = com.google.protobuf.DescriptorProtos.FileDescriptorProto.parseFrom(ProtoBytes) 34 | com.google.protobuf.Descriptors.FileDescriptor.buildFrom(javaProto, 35 | _root_.scala.Array( 36 | )) 37 | } 38 | @deprecated("Use javaDescriptor instead. In a future version this will refer to scalaDescriptor.", "ScalaPB 0.5.47") 39 | def descriptor: com.google.protobuf.Descriptors.FileDescriptor = javaDescriptor 40 | } 41 | -------------------------------------------------------------------------------- /tests/src/test/scala/docs/scaladsl/DocsSpecBase.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2016 Softwaremill 3 | * Copyright (C) 2016 - 2024 Lightbend Inc. 4 | */ 5 | 6 | package docs.scaladsl 7 | 8 | import akka.NotUsed 9 | import akka.kafka.testkit.scaladsl.KafkaSpec 10 | import akka.kafka.testkit.internal.TestFrameworkInterface 11 | import akka.stream.scaladsl.Flow 12 | import org.scalatest.Suite 13 | import org.scalatest.concurrent.{Eventually, IntegrationPatience, ScalaFutures} 14 | import org.scalatest.flatspec.AnyFlatSpecLike 15 | import org.scalatest.matchers.should.Matchers 16 | 17 | abstract class DocsSpecBase(kafkaPort: Int) 18 | extends KafkaSpec(kafkaPort) 19 | with AnyFlatSpecLike 20 | with TestFrameworkInterface.Scalatest 21 | with Matchers 22 | with ScalaFutures 23 | with IntegrationPatience 24 | with Eventually { 25 | 26 | this: Suite => 27 | 28 | protected def this() = this(kafkaPort = -1) 29 | 30 | def businessFlow[T]: Flow[T, T, NotUsed] = Flow[T] 31 | 32 | } 33 | --------------------------------------------------------------------------------