├── .gitattributes ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE.md ├── ISSUE_TEMPLATE │ ├── 1_feature_request.md │ ├── 2_enhancement_request.md │ └── 3_bug_report.md ├── close-label.yml ├── dependabot.yml ├── release-notes.yml └── workflows │ ├── add-to-project.yml │ ├── dependabot-automation.yml │ ├── docs.yml │ ├── main.yml │ ├── pullrequest.yml │ └── release-notes.yml ├── .gitignore ├── .mvn └── wrapper │ ├── maven-wrapper.jar │ └── maven-wrapper.properties ├── .run ├── Kafka Axon Example - PooledStreaming Producer and Consumer using Cloud Events.run.xml ├── Kafka Axon Example - PooledStreaming Producer and Consumer.run.xml ├── Kafka Axon Example - PooledStreaming Producer and Subscribing Consumer.run.xml ├── Kafka Axon Example - PooledStreaming Producer and Tracking Consumer.run.xml ├── Kafka Axon Example - Subscribing Producer and Consumer.run.xml ├── Kafka Axon Example - Subscribing Producer and PooledStreaming Consumer.run.xml ├── Kafka Axon Example - Subscribing Producer and Tracking Consumer.run.xml ├── Kafka Axon Example - Tracking Producer and Consumer.run.xml ├── Kafka Axon Example - Tracking Producer and PooledStreaming Consumer.run.xml └── Kafka Axon Example - Tracking Producer and Subscribing Consumer.run.xml ├── CONTRIBUTING.md ├── LICENSE.txt ├── README.md ├── coverage-report └── pom.xml ├── docs ├── README.md ├── _playbook │ ├── .gitignore │ ├── .vale.ini │ ├── package.json │ └── playbook.yaml └── reference │ ├── antora.yml │ └── modules │ ├── ROOT │ └── pages │ │ ├── consuming.adoc │ │ ├── index.adoc │ │ ├── message-format.adoc │ │ ├── publishing.adoc │ │ ├── release-notes.adoc │ │ └── springboot-configuration.adoc │ └── nav.adoc ├── kafka-axon-example ├── README.md ├── docker-compose.yaml ├── pom.xml └── src │ └── main │ ├── kotlin │ └── org │ │ └── axonframework │ │ └── extensions │ │ └── kafka │ │ └── example │ │ ├── KafkaAxonExampleApplication.kt │ │ ├── api │ │ ├── Commands.kt │ │ └── Events.kt │ │ ├── client │ │ └── BankClient.kt │ │ ├── core │ │ └── BankAccount.kt │ │ └── handler │ │ └── BankEventHandler.kt │ └── resources │ ├── application-cloud-event.yml │ ├── application-pooled-streaming-consumer.yml │ ├── application-pooled-streaming-producer.yml │ ├── application-subscribing-consumer.yml │ ├── application-subscribing-producer.yml │ ├── application-tracking-consumer.yml │ ├── application-tracking-producer.yml │ └── application.yml ├── kafka-spring-boot-3-integrationtests ├── pom.xml └── src │ └── test │ ├── java │ └── org │ │ └── axonframework │ │ └── extensions │ │ └── kafka │ │ ├── eventhandling │ │ └── util │ │ │ └── ProducerConfigUtil.java │ │ └── integration │ │ ├── StreamableKafkaSourceIntegrationTest.java │ │ └── TokenReplayIntegrationTest.java │ └── resources │ └── logback-test.xml ├── kafka-spring-boot-autoconfigure ├── pom.xml └── src │ ├── main │ ├── java │ │ └── org │ │ │ └── axonframework │ │ │ └── extensions │ │ │ └── kafka │ │ │ ├── KafkaProperties.java │ │ │ └── autoconfig │ │ │ └── KafkaAutoConfiguration.java │ └── resources │ │ └── META-INF │ │ ├── spring.factories │ │ └── spring │ │ └── org.springframework.boot.autoconfigure.AutoConfiguration.imports │ └── test │ ├── java │ └── org │ │ └── axonframework │ │ └── extensions │ │ └── kafka │ │ ├── KafkaMessageSourceConfigurerIntegrationTest.java │ │ ├── KafkaPropertiesIntegrationTest.java │ │ ├── SubscribingProducerIntegrationTest.java │ │ └── autoconfig │ │ └── KafkaAutoConfigurationIntegrationTest.java │ └── resources │ ├── application-map-style.properties │ ├── application-source-configurer.properties │ ├── application-subscribing.properties │ ├── ksLoc │ ├── ksLocP │ ├── log4j2.properties │ ├── tsLoc │ └── tsLocP ├── kafka-spring-boot-starter └── pom.xml ├── kafka ├── pom.xml └── src │ ├── main │ ├── java │ │ └── org │ │ │ └── axonframework │ │ │ └── extensions │ │ │ └── kafka │ │ │ ├── configuration │ │ │ └── KafkaMessageSourceConfigurer.java │ │ │ └── eventhandling │ │ │ ├── DefaultKafkaMessageConverter.java │ │ │ ├── HeaderUtils.java │ │ │ ├── KafkaMessageConverter.java │ │ │ ├── cloudevent │ │ │ ├── CloudEventKafkaMessageConverter.java │ │ │ ├── ExtensionUtils.java │ │ │ ├── InvalidMetaDataException.java │ │ │ └── MetadataUtils.java │ │ │ ├── consumer │ │ │ ├── AsyncFetcher.java │ │ │ ├── ConsumerFactory.java │ │ │ ├── ConsumerSeekUtil.java │ │ │ ├── DefaultConsumerFactory.java │ │ │ ├── EventConsumer.java │ │ │ ├── FetchEventException.java │ │ │ ├── FetchEventsTask.java │ │ │ ├── Fetcher.java │ │ │ ├── OffsetCommitType.java │ │ │ ├── RecordConverter.java │ │ │ ├── RuntimeErrorHandler.java │ │ │ ├── TopicListSubscriber.java │ │ │ ├── TopicPatternSubscriber.java │ │ │ ├── TopicSubscriber.java │ │ │ ├── TopicSubscriberBuilder.java │ │ │ ├── streamable │ │ │ │ ├── Buffer.java │ │ │ │ ├── ConsumerPositionsUtil.java │ │ │ │ ├── KafkaEventMessage.java │ │ │ │ ├── KafkaMessageStream.java │ │ │ │ ├── KafkaRecordMetaData.java │ │ │ │ ├── KafkaTrackingToken.java │ │ │ │ ├── SortedKafkaMessageBuffer.java │ │ │ │ ├── StreamableKafkaMessageSource.java │ │ │ │ ├── TopicPartitionDeserializer.java │ │ │ │ ├── TrackingRecordConverter.java │ │ │ │ └── TrackingTokenConsumerRebalanceListener.java │ │ │ └── subscribable │ │ │ │ └── SubscribableKafkaMessageSource.java │ │ │ ├── producer │ │ │ ├── ConfirmationMode.java │ │ │ ├── DefaultProducerFactory.java │ │ │ ├── KafkaEventPublisher.java │ │ │ ├── KafkaPublisher.java │ │ │ ├── ProducerFactory.java │ │ │ └── TopicResolver.java │ │ │ └── tokenstore │ │ │ ├── KafkaTokenStore.java │ │ │ ├── TokenStoreInitializationException.java │ │ │ ├── TokenStoreState.java │ │ │ ├── TokenUpdate.java │ │ │ ├── TokenUpdateDeserializer.java │ │ │ └── TokenUpdateSerializer.java │ └── resources │ │ └── META-INF │ │ └── spring-devtools.properties │ └── test │ ├── java │ └── org │ │ └── axonframework │ │ └── extensions │ │ └── kafka │ │ ├── configuration │ │ └── KafkaMessageSourceConfigurerTest.java │ │ ├── eventhandling │ │ ├── DefaultKafkaMessageConverterTest.java │ │ ├── HeaderUtilsTest.java │ │ ├── KafkaIntegrationTest.java │ │ ├── benchmark │ │ │ ├── MessageBufferBenchmarks_PutPeekPoll.java │ │ │ ├── MessageBufferBenchmarks_PutPeekTake.java │ │ │ └── SimpleRandom.java │ │ ├── cloudevent │ │ │ └── CloudEventKafkaMessageConverterTest.java │ │ ├── consumer │ │ │ ├── AsyncFetcherIntegrationTest.java │ │ │ ├── ConsumerRecordConverter.java │ │ │ ├── ConsumerSeekUtilIntegrationTest.java │ │ │ ├── ConsumerSeekUtilTest.java │ │ │ ├── DefaultConsumerFactoryIntegrationTest.java │ │ │ ├── FetchEventsTaskTest.java │ │ │ ├── streamable │ │ │ │ ├── ConsumerPositionsUtilIntegrationTest.java │ │ │ │ ├── FailingConsumerErrorThroughBufferTest.java │ │ │ │ ├── JSR166TestCase.java │ │ │ │ ├── KafkaMessageStreamTest.java │ │ │ │ ├── KafkaTrackingTokenSerializationTest.java │ │ │ │ ├── KafkaTrackingTokenTest.java │ │ │ │ ├── SortedKafkaMessageBufferTest.java │ │ │ │ ├── StreamableKafkaMessageSourceTest.java │ │ │ │ ├── TopicPartitionDeserializerTest.java │ │ │ │ └── TrackingRecordConverterTest.java │ │ │ └── subscribable │ │ │ │ └── SubscribableKafkaMessageSourceTest.java │ │ ├── producer │ │ │ ├── DefaultProducerFactoryClusteringIntegrationTest.java │ │ │ ├── DefaultProducerFactoryIntegrationTest.java │ │ │ ├── KafkaPublisherBuilderTest.java │ │ │ └── KafkaPublisherIntegrationTest.java │ │ ├── tokenstore │ │ │ ├── KafkaTokenStoreBuilderTest.java │ │ │ ├── KafkaTokenStoreConcurrentIntegrationTest.java │ │ │ ├── KafkaTokenStoreIntegrationTest.java │ │ │ ├── TokenUpdateDeserializerTest.java │ │ │ └── TokenUpdateSerializerTest.java │ │ └── util │ │ │ ├── AssertUtils.java │ │ │ ├── ConsumerConfigUtil.java │ │ │ ├── HeaderAssertUtil.java │ │ │ ├── KafkaAdminUtils.java │ │ │ ├── KafkaContainerCluster.java │ │ │ ├── KafkaContainerClusterTest.java │ │ │ ├── KafkaContainerTest.java │ │ │ ├── KafkaTestUtils.java │ │ │ └── ProducerConfigUtil.java │ │ └── utils │ │ └── TestSerializer.java │ └── resources │ └── log4j2.properties ├── mvnw ├── mvnw.cmd └── pom.xml /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | 4 | # Explicitly declare text files we want to always be normalized and converted 5 | # to native line endings on checkout. 6 | *.java text diff=java 7 | *.html text diff=html 8 | *.properties text 9 | *.xml text 10 | *.txt text 11 | *.md text 12 | *.css text 13 | *.js text 14 | *.sql text 15 | core/src/main/resources/META-INF/services/org.axonframework.serialization.ContentTypeConverter text -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @AxonFramework/framework-developers -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/1_feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: 'Feature request' 3 | about: 'Suggest a feature for the Kafka Extension' 4 | title: 5 | labels: 'Type: Feature' 6 | --- 7 | 8 | 9 | 10 | ### Feature Description 11 | 12 | 16 | 17 | ### Current Behaviour 18 | 19 | 20 | 21 | ### Wanted Behaviour 22 | 23 | 24 | 25 | ### Possible Workarounds 26 | 27 | 28 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/2_enhancement_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: 'Enhancement request' 3 | about: 'Suggest an enhancement/change to an existing feature for the Kafka Extension' 4 | title: 5 | labels: 'Type: Enhancement' 6 | --- 7 | 8 | 9 | 10 | ### Enhancement Description 11 | 12 | 13 | 14 | ### Current Behaviour 15 | 16 | 17 | 18 | ### Wanted Behaviour 19 | 20 | 21 | 22 | ### Possible Workarounds 23 | 24 | 25 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/3_bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: 'Bug report' 3 | about: 'Report a bug for the Kafka Extension' 4 | title: 5 | labels: 'Type: Bug' 6 | --- 7 | 8 | 9 | 10 | ### Basic information 11 | 12 | * Axon Framework version: 13 | * JDK version: 14 | * Kafka Extension version: 15 | * Complete executable reproducer if available (e.g. GitHub Repo): 16 | 17 | ### Steps to reproduce 18 | 19 | 23 | 24 | ### Expected behaviour 25 | 26 | 27 | 28 | ### Actual behaviour 29 | 30 | 34 | -------------------------------------------------------------------------------- /.github/close-label.yml: -------------------------------------------------------------------------------- 1 | "Type: Bug": "Status: Resolved" 2 | "Type: Enhancement": "Status: Resolved" 3 | "Type: Feature": "Status: Resolved" 4 | "Type: Dependency Upgrade": "Status: Resolved" -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | updates: 4 | - package-ecosystem: github-actions 5 | directory: "/" 6 | schedule: 7 | interval: weekly 8 | day: "sunday" 9 | open-pull-requests-limit: 5 10 | labels: 11 | - "Type: Dependency Upgrade" 12 | - "Priority 1: Must" 13 | milestone: 18 14 | groups: 15 | github-dependencies: 16 | update-types: 17 | - "patch" 18 | - "minor" 19 | - "major" 20 | 21 | - package-ecosystem: maven 22 | directory: "/" 23 | schedule: 24 | interval: weekly 25 | day: "sunday" 26 | open-pull-requests-limit: 5 27 | labels: 28 | - "Type: Dependency Upgrade" 29 | - "Priority 1: Must" 30 | milestone: 18 31 | groups: 32 | maven-dependencies: 33 | update-types: 34 | - "patch" 35 | - "minor" 36 | - "major" 37 | ignore: 38 | - dependency-name: "org.mockito:*" 39 | versions: [ "[5.0.0,)" ] 40 | - dependency-name: "org.springframework.boot:*" 41 | versions: [ "[3.0.0,)" ] 42 | - dependency-name: "org.apache.kafka:kafka-clients" 43 | versions: [ "[3.9.0,)" ] -------------------------------------------------------------------------------- /.github/release-notes.yml: -------------------------------------------------------------------------------- 1 | changelog: 2 | sections: 3 | - title: ":star: Features" 4 | labels: [ "Type: Feature" ] 5 | - title: ":chart_with_upwards_trend: Enhancements" 6 | labels: [ "Type: Enhancement" ] 7 | - title: ":beetle: Bug Fixes" 8 | labels: [ "Type: Bug" ] 9 | - title: ":hammer_and_wrench: Dependency Upgrade" 10 | labels: [ "Type: Dependency Upgrade" ] 11 | issues: 12 | exclude: 13 | labels: [ "Type: Incorrect Repository", "Type: Question" ] 14 | contributors: 15 | exclude: 16 | names: [ "dependabot", "dependabot[bot]" ] 17 | -------------------------------------------------------------------------------- /.github/workflows/add-to-project.yml: -------------------------------------------------------------------------------- 1 | name: Kafka Extension 2 | 3 | on: 4 | pull_request: 5 | types: 6 | - labeled 7 | 8 | jobs: 9 | add-to-project: 10 | name: Add Dependency Upgrade PR to project 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/add-to-project@main 14 | with: 15 | project-url: https://github.com/orgs/AxonFramework/projects/2 16 | github-token: ${{ secrets.ADD_PROJECT_TOKEN }} 17 | labeled: 'Type: Dependency Upgrade' 18 | -------------------------------------------------------------------------------- /.github/workflows/dependabot-automation.yml: -------------------------------------------------------------------------------- 1 | name: Dependabot Automation 2 | on: pull_request 3 | 4 | permissions: 5 | contents: write 6 | pull-requests: write 7 | 8 | jobs: 9 | dependabot-approve: 10 | name: Dependabot PR Automation 11 | 12 | runs-on: ubuntu-latest 13 | if: ${{ github.actor == 'dependabot[bot]' }} 14 | steps: 15 | - name: Retrieve Dependabot metadata 16 | id: metadata 17 | uses: dependabot/fetch-metadata@v2 18 | with: 19 | github-token: "${{ secrets.GITHUB_TOKEN }}" 20 | 21 | - name: Approve Pull Request 22 | run: gh pr review --approve "$PR_URL" 23 | env: 24 | PR_URL: ${{github.event.pull_request.html_url}} 25 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} 26 | 27 | dependabot-auto-merge: 28 | runs-on: ubuntu-latest 29 | if: ${{ github.actor == 'dependabot[bot]' }} 30 | steps: 31 | - name: Retrieve Dependabot metadata 32 | id: metadata 33 | uses: dependabot/fetch-metadata@v2 34 | with: 35 | github-token: "${{ secrets.GITHUB_TOKEN }}" 36 | 37 | - name: Auto-merge Pull Request 38 | if: ${{steps.metadata.outputs.update-type != 'version-update:semver-major'}} 39 | run: gh pr merge --auto --merge "$PR_URL" 40 | env: 41 | PR_URL: ${{github.event.pull_request.html_url}} 42 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Trigger documentation build 2 | 3 | on: 4 | push: 5 | branches: 6 | - 'main' 7 | paths: 8 | - 'docs/**' 9 | pull_request: 10 | branches: 11 | - 'main' 12 | paths: 13 | - 'docs/**' 14 | 15 | env: 16 | VALE_VERSION: ${{ vars.LIBRARY_VALE_VERSION || '3.3.0' }} 17 | 18 | jobs: 19 | build: 20 | runs-on: ubuntu-latest 21 | steps: 22 | - name: Checkout repository 23 | uses: actions/checkout@v4 24 | 25 | - name: Install Node.js 26 | uses: actions/setup-node@v4 27 | with: 28 | node-version: '20' 29 | 30 | - name: Install vale 31 | run: | 32 | wget "https://github.com/errata-ai/vale/releases/download/v${VALE_VERSION}/vale_${VALE_VERSION}_Linux_64-bit.tar.gz" 33 | sudo tar -xvzf vale_${VALE_VERSION}_Linux_64-bit.tar.gz -C /usr/local/bin vale 34 | 35 | - name: Generate Site 36 | run: | 37 | cd docs/_playbook/ 38 | npm install 39 | export GIT_CREDENTIALS='https://axoniq-devops:${{ secrets.LIBRARY_DEVBOT_TOKEN }}@github.com' 40 | echo 'Using' `vale -v` 41 | npx antora playbook.yaml 42 | 43 | - name: Notify AxonIQ Library (if a push to a tracked branch) 44 | if: ${{ github.event_name == 'push'}} 45 | uses: actions/github-script@v7 46 | with: 47 | github-token: ${{ secrets.LIBRARY_DEVBOT_TOKEN }} 48 | script: | 49 | await github.rest.actions.createWorkflowDispatch({ 50 | owner: 'AxonIQ', 51 | repo: 'axoniq-library-site', 52 | workflow_id: 'publish.yml', 53 | ref: 'main' 54 | }) -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Kafka Extension 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - master 8 | - axon-kafka-*.*.x 9 | 10 | jobs: 11 | build: 12 | name: Test and Build on JDK ${{ matrix.java-version }} 13 | runs-on: ubuntu-latest 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | include: 18 | - java-version: 8 19 | sonar-enabled: false 20 | deploy-enabled: true 21 | - java-version: 11 22 | sonar-enabled: false 23 | deploy-enabled: false 24 | - java-version: 17 25 | sonar-enabled: true 26 | deploy-enabled: false 27 | 28 | steps: 29 | - name: Checkout code 30 | uses: actions/checkout@v4 31 | 32 | - name: Set up JDK ${{ matrix.java-version }} 33 | uses: actions/setup-java@v4.7.1 34 | with: 35 | distribution: 'zulu' 36 | java-version: ${{ matrix.java-version }} 37 | cache: "maven" 38 | server-id: central 39 | server-username: MAVEN_USERNAME 40 | server-password: MAVEN_PASSWORD 41 | 42 | - name: Run regular build 43 | run: | 44 | ./mvnw -B -U -Dstyle.color=always clean verify 45 | 46 | - name: Run integration tests 47 | run: | # no clean 48 | ./mvnw -B -U -Pintegration-test -DskipExamples 49 | 50 | - name: Build coverage report 51 | if: matrix.sonar-enabled 52 | run: | # no clean 53 | ./mvnw -B -U -Pcoverage-aggregate -DskipExamples 54 | 55 | - name: Sonar analysis 56 | if: matrix.sonar-enabled 57 | run: | # no clean 58 | ./mvnw -B -Dstyle.color=always sonar:sonar \ 59 | -Dsonar.projectKey=AxonFramework_extension-kafka \ 60 | -Dsonar.organization=axonframework \ 61 | -Dsonar.host.url=https://sonarcloud.io \ 62 | -Dsonar.login=${{ secrets.SONAR_TOKEN }} 63 | env: 64 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 65 | 66 | - name: Deploy to Sonatype 67 | if: success() && matrix.deploy-enabled 68 | run: | 69 | ./mvnw -B -U -Dstyle.color=always deploy -DskipTests=true -DskipExamples=true 70 | env: 71 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 72 | MAVEN_USERNAME: ${{ secrets.SONATYPE_TOKEN_ID }} 73 | MAVEN_PASSWORD: ${{ secrets.SONATYPE_TOKEN_PASS }} 74 | 75 | - name: Notify success to Slack 76 | if: success() 77 | env: 78 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} 79 | uses: voxmedia/github-action-slack-notify-build@v2 80 | with: 81 | channel_id: CAGSEC92A 82 | status: SUCCESS 83 | color: good 84 | 85 | - name: Notify failure to Slack 86 | if: failure() 87 | env: 88 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} 89 | uses: voxmedia/github-action-slack-notify-build@v2 90 | with: 91 | channel_id: CAGSEC92A 92 | status: FAILED 93 | color: danger 94 | -------------------------------------------------------------------------------- /.github/workflows/pullrequest.yml: -------------------------------------------------------------------------------- 1 | name: Kafka Extension 2 | 3 | on: 4 | pull_request: 5 | 6 | jobs: 7 | build: 8 | name: Test and Build on JDK ${{ matrix.java-version }} 9 | runs-on: ubuntu-latest 10 | strategy: 11 | fail-fast: false 12 | matrix: 13 | include: 14 | - java-version: 8 15 | sonar-enabled: false 16 | - java-version: 11 17 | sonar-enabled: false 18 | - java-version: 17 19 | sonar-enabled: true 20 | 21 | steps: 22 | - name: Checkout code 23 | uses: actions/checkout@v4 24 | 25 | - name: Set up JDK ${{ matrix.java-version }} 26 | uses: actions/setup-java@v4.7.1 27 | with: 28 | distribution: 'zulu' 29 | java-version: ${{ matrix.java-version }} 30 | cache: "maven" 31 | server-id: central 32 | server-username: MAVEN_USERNAME 33 | server-password: MAVEN_PASSWORD 34 | 35 | - name: Run regular build 36 | run: | 37 | ./mvnw -B -U -Dstyle.color=always clean verify 38 | 39 | - name: Run integration tests 40 | run: | # no clean 41 | ./mvnw -B -U -Pintegration-test -DskipExamples 42 | 43 | - name: Build coverage report 44 | if: matrix.sonar-enabled 45 | run: | # no clean 46 | ./mvnw -B -U -Pcoverage-aggregate -DskipExamples 47 | 48 | - name: Sonar Analysis 49 | if: ${{ success() && matrix.sonar-enabled && github.event.pull_request.head.repo.full_name == github.repository }} 50 | run: | 51 | ./mvnw -B -Dstyle.color=always sonar:sonar \ 52 | -Dsonar.projectKey=AxonFramework_extension-kafka \ 53 | -Dsonar.organization=axonframework \ 54 | -Dsonar.host.url=https://sonarcloud.io \ 55 | -Dsonar.login=${{ secrets.SONAR_TOKEN }} 56 | env: 57 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 58 | -------------------------------------------------------------------------------- /.github/workflows/release-notes.yml: -------------------------------------------------------------------------------- 1 | # Trigger the workflow on milestone events 2 | on: 3 | milestone: 4 | types: [closed] 5 | name: Milestone Closure 6 | jobs: 7 | create-release-notes: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Checkout code 11 | uses: actions/checkout@v4 12 | - name: Create Release Notes Markdown 13 | uses: docker://decathlon/release-notes-generator-action:3.1.5 14 | env: 15 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token 16 | OUTPUT_FOLDER: temp_release_notes 17 | USE_MILESTONE_TITLE: "true" 18 | - name: Get the name of the created Release Notes file and extract Version 19 | run: | 20 | RELEASE_NOTES_FILE=$(ls temp_release_notes/*.md | head -n 1) 21 | echo "RELEASE_NOTES_FILE=$RELEASE_NOTES_FILE" >> $GITHUB_ENV 22 | VERSION=$(echo ${{ github.event.milestone.title }} | cut -d' ' -f2) 23 | echo "VERSION=$VERSION" >> $GITHUB_ENV 24 | - name: Create a Draft Release Notes on GitHub 25 | id: create_release 26 | uses: actions/create-release@v1 27 | env: 28 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token 29 | with: 30 | tag_name: axon-kafka-${{ env.VERSION }} 31 | release_name: Axon Kafka Extension v${{ env.VERSION }} 32 | body_path: ${{ env.RELEASE_NOTES_FILE }} 33 | draft: true -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Maven build artifacts can be ignored 2 | target/ 3 | # ...but keep the wrapper jar 4 | !.mvn/wrapper/maven-wrapper.jar 5 | 6 | *.iml 7 | axon.ipr 8 | axon.iws 9 | .idea/ 10 | **/*.iml 11 | .classpath 12 | .project 13 | .settings/ 14 | events/ 15 | .DS_Store 16 | -------------------------------------------------------------------------------- /.mvn/wrapper/maven-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AxonFramework/extension-kafka/944c2b039445c41d7582ded25414ed9e652b0e62/.mvn/wrapper/maven-wrapper.jar -------------------------------------------------------------------------------- /.mvn/wrapper/maven-wrapper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an 13 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | # KIND, either express or implied. See the License for the 15 | # specific language governing permissions and limitations 16 | # under the License. 17 | distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip 18 | wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar 19 | -------------------------------------------------------------------------------- /.run/Kafka Axon Example - PooledStreaming Producer and Consumer using Cloud Events.run.xml: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | 20 | 30 | -------------------------------------------------------------------------------- /.run/Kafka Axon Example - PooledStreaming Producer and Consumer.run.xml: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | 19 | 27 | -------------------------------------------------------------------------------- /.run/Kafka Axon Example - PooledStreaming Producer and Subscribing Consumer.run.xml: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | 19 | 27 | -------------------------------------------------------------------------------- /.run/Kafka Axon Example - PooledStreaming Producer and Tracking Consumer.run.xml: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | 19 | 27 | -------------------------------------------------------------------------------- /.run/Kafka Axon Example - Subscribing Producer and Consumer.run.xml: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | 19 | 27 | -------------------------------------------------------------------------------- /.run/Kafka Axon Example - Subscribing Producer and PooledStreaming Consumer.run.xml: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | 19 | 27 | -------------------------------------------------------------------------------- /.run/Kafka Axon Example - Subscribing Producer and Tracking Consumer.run.xml: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | 19 | 27 | -------------------------------------------------------------------------------- /.run/Kafka Axon Example - Tracking Producer and Consumer.run.xml: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | 19 | 27 | -------------------------------------------------------------------------------- /.run/Kafka Axon Example - Tracking Producer and PooledStreaming Consumer.run.xml: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | 19 | 27 | -------------------------------------------------------------------------------- /.run/Kafka Axon Example - Tracking Producer and Subscribing Consumer.run.xml: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | 19 | 27 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribution Guidelines 2 | 3 | Thank you for your interest in contributing to the Axon Framework Kafka Extension. To make sure using Axon is a smooth 4 | experience for everybody, we've set up a number of guidelines to follow. 5 | 6 | There are different ways in which you can contribute to the framework: 7 | 8 | 1. You can report any bugs, feature requests or ideas about improvements on 9 | our [issue page](https://github.com/AxonFramework/extension-kafka/issues/new/choose). All ideas are welcome. Please 10 | be as exact as possible when reporting bugs. This will help us reproduce and thus solve the problem faster. 11 | 2. If you have created a component for your own application that you think might be useful to include in the framework, 12 | send us a pull request (or a patch / zip containing the source code). We will evaluate it and try to fit it in the 13 | framework. Please make sure code is properly documented using JavaDoc. This helps us to understand what is going on. 14 | 3. If you know of any other way you think you can help us, do not hesitate to send a message to 15 | the [AxonIQ's discussion platform](https://discuss.axoniq.io/). 16 | 17 | ## Code Contributions 18 | 19 | If you're contributing code, please take care of the following: 20 | 21 | ### Contributor Licence Agreement 22 | 23 | To keep everyone out of trouble (both you and us), we require that all contributors (digitally) sign a Contributor 24 | License Agreement. Basically, the agreement says that we may freely use the code you contribute to the Axon Framework 25 | Kafka Extension, and that we won't hold you liable for any unfortunate side effects that the code may cause. 26 | 27 | To sign the CLA, visit: https://cla-assistant.io/AxonFramework/extension-kafka 28 | 29 | ### Code Style 30 | 31 | We're trying very hard to maintain a consistent style of coding throughout the code base. Think of things like indenting 32 | using 4 spaces, putting opening brackets (the '{') on the same line and putting proper JavaDoc on all non-private 33 | members. 34 | 35 | If you're using IntelliJ IDEA, you can download the code style 36 | definition [here](https://github.com/AxonFramework/AxonFramework/blob/master/axon_code_style.xml). Simply import the XML 37 | file in under "Settings -> Code Style -> Scheme -> Import Scheme". Doing so should make the code style selectable 38 | immediately. 39 | 40 | ### Project Build 41 | 42 | The project is built with Apache Maven, supplied by the Maven Wrapper `mvnw`. For separate aspects of the build Maven 43 | profiles are used. 44 | 45 | For a **regular** build, execute from your command line: `./mvnw`. This operation will run the build and execute JUnit 46 | tests of all modules and package the resulting artifacts. 47 | 48 | This repository contains an example project. You can skip its build by adding `-DskipExamples` to your build command. 49 | 50 | There are long-running integration tests present (starting Spring Boot Application and/or running Kafka in a 51 | TestContainer), which **ARE NOT** executed by default. A unique `integration-test` build is needed to run those 52 | long-running tests. If you want to run them, please call `./mvnw -Pintegration-test` from your command line. When 53 | introducing additional integration tests, make sure the class name ends with `IntegrationTest`. 54 | 55 | The project uses JaCoCo to measure test coverage of the code and automatically generate coverage reports on regular 56 | and `integration-test` builds. If you are interested in the overall test coverage, please run `./mvnw -Pcoverage` after 57 | running both without clean. and check the resulting aggregated report 58 | in `./coverage-report/target/site/jacoco-aggregate/index.html` 59 | 60 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Axon Framework - Kafka Extension 2 | [![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.axonframework.extensions.kafka/axon-kafka/badge.svg)](https://maven-badges.herokuapp.com/maven-central/org.axonframework.extensions.kafka/axon-kafka/) 3 | ![Build Status](https://github.com/AxonFramework/extension-kafka/workflows/Kafka%20Extension/badge.svg?branch=master) 4 | [![SonarCloud Status](https://sonarcloud.io/api/project_badges/measure?project=AxonFramework_extension-kafka&metric=alert_status)](https://sonarcloud.io/dashboard?id=AxonFramework_extension-kafka) 5 | 6 | Axon Framework is a framework for building evolutionary, event-driven microservice systems, 7 | based on the principles of Domain-Driven Design, Command-Query Responsibility Separation (CQRS), and Event Sourcing. 8 | 9 | As such, it provides the necessary building blocks to follow these principles. 10 | Examples of these building blocks are Aggregate factories and Repositories, Command, Event and Query Buses, and an Event Store. 11 | The framework provides sensible defaults for all of these components out of the box. 12 | 13 | This setup helps to create a well-structured application without having to bother with the infrastructure. 14 | The main focus can thus become the business functionality. 15 | 16 | This repository provides an extension to the Axon Framework: [Kafka](https://kafka.apache.org/). 17 | It provides functionality to leverage Kafka to send and receive Events from one (micro)service to another. 18 | Thus, it does not include command or query distribution, nor event store specifics required for event sourcing. 19 | 20 | For more information on anything Axon, please visit our website, [http://axoniq.io](http://axoniq.io). 21 | 22 | ## Getting started 23 | 24 | The [AxonIQ Docs](https://docs.axoniq.io/home/) contain a section for the guides of all the Axon Framework extensions. 25 | The Kafka extension guide can be found [here](https://docs.axoniq.io/kafka-extension-reference/latest/). 26 | 27 | This extension should be regarded as a partial replacement of [Axon Server](https://axoniq.io/product-overview/axon-server), 28 | since it only cover the event routing part. 29 | 30 | ## Receiving help 31 | 32 | Are you having trouble using the extension? 33 | We'd like to help you out the best we can! 34 | There are a couple of things to consider when you're traversing anything Axon: 35 | 36 | * Checking the [documentation](https://docs.axoniq.io/home/) should be your first stop, 37 | as the majority of possible scenarios you might encounter when using Axon should be covered there. 38 | * If the Reference Guide does not cover a specific topic you would've expected, 39 | we'd appreciate if you could post a [new thread/topic on our library fourms describing the problem](https://discuss.axoniq.io/c/26). 40 | * There is a [forum](https://discuss.axoniq.io/) to support you in the case the reference guide did not sufficiently answer your question. 41 | Axon Framework and Server developers will help out on a best effort basis. 42 | Know that any support from contributors on posted question is very much appreciated on the forum. 43 | * Next to the forum we also monitor Stack Overflow for any questions which are tagged with `axon`. 44 | 45 | ## Feature requests and issue reporting 46 | 47 | We use GitHub's [issue tracking system](https://github.com/AxonFramework/extension-kafka/issues) for new feature 48 | request, extension enhancements and bugs. 49 | Prior to filing an issue, please verify that it's not already reported by someone else. 50 | 51 | When filing bugs: 52 | * A description of your setup and what's happening helps us figuring out what the issue might be 53 | * Do not forget to provide version you're using 54 | * If possible, share a stack trace, using the Markdown semantic ``` 55 | 56 | When filing features: 57 | * A description of the envisioned addition or enhancement should be provided 58 | * (Pseudo-)Code snippets showing what it might look like help us understand your suggestion better 59 | * If you have any thoughts on where to plug this into the framework, that would be very helpful too 60 | * Lastly, we value contributions to the framework highly. So please provide a Pull Request as well! 61 | 62 | -------------------------------------------------------------------------------- /coverage-report/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 17 | 18 | 19 | 4.0.0 20 | 21 | 22 | org.axonframework.extensions.kafka 23 | axon-kafka-parent 24 | 4.11.2-SNAPSHOT 25 | 26 | 27 | axon-kafka-coverage-report 28 | 29 | Axon Framework Kafka Extension - Coverage Report Generator 30 | Coverage Report Generator for the Axon Kafka Extension 31 | 32 | 33 | AxonIQ B.V. 34 | https://axoniq.io 35 | 36 | 37 | 38 | 39 | org.axonframework.extensions.kafka 40 | axon-kafka 41 | ${project.version} 42 | runtime 43 | 44 | 45 | org.axonframework.extensions.kafka 46 | axon-kafka-spring-boot-autoconfigure 47 | ${project.version} 48 | runtime 49 | 50 | 51 | org.axonframework.extensions.kafka 52 | axon-kafka-spring-boot-3-integrationtests 53 | ${project.version} 54 | runtime 55 | 56 | 57 | 58 | 59 | 60 | 61 | org.jacoco 62 | jacoco-maven-plugin 63 | 64 | 65 | report-aggregate 66 | verify 67 | 68 | report-aggregate 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Documentation For Axon Framework - Kafka Extension. 2 | 3 | This folder contains the docs related to the Kafka Extension for Axon Framework. The docs in this folder are written as part of the [AxonIQ Library](https://library.axoniq.io), and are [written in Ascii and built with Antora.](https://library.axoniq.io/contibution-guide/overview/platform.html) 4 | 5 | The following are the current documentation sources (folders): 6 | 7 | - `extension-guide` : [The Kafka Extension Guide](https://library.axoniq.io/kafka-extension-reference/index.html) 8 | 9 | ## Contributing to the docs. 10 | 11 | You are welcome to contribute to these docs. Whether you want to fix a typo, or you find something missing, something that is not clear or can be improved, or even if you want to write an entire piece of docs to illustrate something that could help others to understand the use of the Bike Rental App, you are more than welcome to send a Pull Request to this GitHub repository. Just make sure you follow the guidelines explained in [AxonIQ Library Contribution Guide](https://library.axoniq.io/contibution-guide/index.html) 12 | 13 | ## Building and testing these docs locally. 14 | 15 | If you want to build and explore the docs locally (because you have made changes or before contributing), you can use the Antora's build file in `docs/_playbook` folder. 16 | 17 | You can check the [detailed information on how the process to build the docs works](https://library.axoniq.io/contibution-guide/overview/build.html), but in short, all you have to do is: 18 | 19 | 1. Make sure you have Node (a LTS version is preferred), Antora and Vale installed in your system. 20 | 2. CD to the `docs/_playbook` folder. 21 | 3. Run `npx antora playbook.yaml`. Antora will generate the set of static html files under `docs/_playbook/build/site` 22 | 4. Move to `docs/_playbook/build/site` and execute some local http server to serve files in that directory. For example by executing `python3 -m http.server 8070` 23 | 5. Open your browser and go to `http://localhost:8070`. You should be able to navigate the local version of the docs. 24 | -------------------------------------------------------------------------------- /docs/_playbook/.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | node_modules 3 | .vscode 4 | vale 5 | package-lock.json 6 | -------------------------------------------------------------------------------- /docs/_playbook/.vale.ini: -------------------------------------------------------------------------------- 1 | StylesPath = vale 2 | 3 | MinAlertLevel = suggestion 4 | 5 | Packages = http://github.com/AxonIQ/axoniq-vale-package/releases/latest/download/axoniq-vale-package.zip 6 | 7 | Vocab = general, AxonIQ, Java, Names_Terms, misc 8 | 9 | [*.{adoc,html}] 10 | BasedOnStyles = AxonIQ, proselint, Google 11 | 12 | Google.Headings = NO # Diasable in favor od AxonIQ one 13 | Google.Parens = NO # Disable warning about using parens 14 | Google.Quotes = NO # Diasable "commas and periods go inside quotation marks" 15 | Google.WordList = NO # Disable Google's word list 16 | Google.Passive = NO # Allow the use of Passive voice 17 | Google.Colons = NO # Allow the use of Colons 18 | Google.Will = NO # Allow use will 19 | Google.Contractions = NO 20 | Google.We = NO 21 | 22 | 23 | AxonIQ.AcronymCase = NO 24 | AxonIQ.HeadingTitle = NO 25 | -------------------------------------------------------------------------------- /docs/_playbook/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "devDependencies": { 3 | "@antora/atlas-extension": "^1.0.0-alpha.2", 4 | "@antora/cli": "^3.2.0-alpha.2", 5 | "@antora/lunr-extension": "^1.0.0-alpha.8", 6 | "@antora/site-generator": "^3.2.0-alpha.2", 7 | "@asciidoctor/tabs": "^1.0.0-beta.6", 8 | "@axoniq/antora-vale-extension": "^0.1.1", 9 | "asciidoctor-kroki": "^0.17.0" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /docs/_playbook/playbook.yaml: -------------------------------------------------------------------------------- 1 | site: 2 | title: Axon Kafka Extension docs PREVIEW 3 | start_page: kafka-extension-reference::index.adoc 4 | 5 | content: 6 | sources: 7 | - url: ../.. 8 | start_paths: ['docs/*', '!docs/_*'] 9 | 10 | asciidoc: 11 | attributes: 12 | experimental: true 13 | page-pagination: true 14 | kroki-fetch-diagram: true 15 | # primary-site-manifest-url: https://library.axoniq.io/site-manifest.json 16 | extensions: 17 | - asciidoctor-kroki 18 | - '@asciidoctor/tabs' 19 | 20 | antora: 21 | extensions: 22 | - id: prose-linting 23 | require: '@axoniq/antora-vale-extension' 24 | enabled: true 25 | vale_config: .vale.ini 26 | update_styles: true 27 | - id: lunr 28 | require: '@antora/lunr-extension' 29 | enabled: true 30 | index_latest_only: true 31 | - id: atlas 32 | require: '@antora/atlas-extension' 33 | 34 | runtime: 35 | fetch: true # fetch remote repos 36 | log: 37 | level: info 38 | failure_level: error 39 | 40 | ui: 41 | bundle: 42 | url: https://github.com/AxonIQ/axoniq-library-ui/releases/download/v.0.1.10/ui-bundle.zip 43 | -------------------------------------------------------------------------------- /docs/reference/antora.yml: -------------------------------------------------------------------------------- 1 | name: kafka-extension-reference 2 | title: Kafka Extension Reference 3 | version: 4 | axon-kafka-(?+({0..9}).+({0..9})).*: $ 5 | master: development 6 | prerelease: true 7 | start_page: ROOT:index.adoc 8 | 9 | asciidoc: 10 | attributes: 11 | component_description: Extension adding Kafka integration for event streaming 12 | type: extension-reference 13 | group: axon-framework 14 | 15 | nav: 16 | - modules/nav.adoc -------------------------------------------------------------------------------- /docs/reference/modules/ROOT/pages/index.adoc: -------------------------------------------------------------------------------- 1 | :navtitle: Kafka Extension Guide 2 | = Kafka Extension 3 | 4 | Apache Kafka is a popular system for publishing and consuming events. Its architecture is fundamentally different from most messaging systems and combines speed with reliability. 5 | 6 | Axon provides an extension dedicated to _publishing_ and _receiving_ event messages from Kafka. The Kafka Extension should be regarded as an alternative approach to distributing events, besides (the default) Axon Server. It's also possible to use the extension to stream events from Kafka to Axon server, or the other way around. 7 | 8 | The implementation of the extension can be found link:https://github.com/AxonFramework/extension-kafka[here,window=_blank,role=extenral]. The shared repository also contains a link:https://github.com/AxonFramework/extension-kafka/tree/master/kafka-axon-example[sample project,window=_blank,role=extenral] using the extension. 9 | 10 | To use the Kafka Extension components from Axon, make sure the `axon-kafka` module is available on the classpath. Using the extension requires setting up and configuring Kafka following your project's requirements. How this is achieved is outside of the scope of this reference guide and should be found in link:https://kafka.apache.org/[Kafka's documentation,window=_blank,role=extenral]. 11 | 12 | NOTE: Note that Kafka is a perfectly fine event distribution mechanism, but it is not an event store. Along those lines this extension only provides the means to distributed Axon's events through Kafka. Due to this the extension cannot be used to event source aggregates, as this requires an event store implementation. We recommend using a built-for-purpose event store like link:https://www.axoniq.io/products/axon-server[Axon Server,window=_blank,role=extenral], or alternatively an RDBMS based (the JPA or JDBC implementations for example). 13 | 14 | 15 | -------------------------------------------------------------------------------- /docs/reference/modules/ROOT/pages/message-format.adoc: -------------------------------------------------------------------------------- 1 | :navtitle: Customizing Event Message Format 2 | = Customizing Event Message Format 3 | 4 | In the previous sections the `KafkaMessageConverter` has been shown as a requirement for event production and consumption. The `K` is the format of the message's key, where the `V` stand for the message's value. The extension provides a `DefaultKafkaMessageConverter` which converts an axon `EventMessage` to a Kafka `ProducerRecord`, and an `ConsumerRecord` back into an `EventMessage`. This `DefaultKafkaMessageConverter` uses `String` as the key and `byte[]` as the value of the message to de-/serialize. 5 | 6 | Albeit the default, this implementation allows for some customization, such as how the `EventMessage` `MetaData` is mapped to Kafka headers. This is achieved by adjusting the "header value mapper" in the `DefaultKafkaMessageConverter` builder. 7 | 8 | The `SequencingPolicy` can be adjusted to change the behaviour of the record key being used. The default sequencing policy is the `SequentialPerAggregatePolicy`, which leads to the aggregate identifier of an event being the key of a `ProducerRecord` and `ConsumerRecord`. 9 | 10 | The format of an event message defines an API between the producer and the consumer of the message. This API may change over time, leading to incompatibility between the event class' structure on the receiving side and the event structure of a message containing the old format. Axon addresses the topic of xref:axon-framework-reference:events:event-versioning.adoc[Event Versioning] by introducing Event Upcasters. The `DefaultKafkaMessageConverter` supports this by provisioning an `EventUpcasterChain` and run the upcasting process on the `MetaData` and `Payload` of individual messages converted from `ConsumerRecord` before those are passed to the `Serializer` and converted into `Event` instances. 11 | 12 | Note that the `KafkaMessageConverter` feeds the upcasters with messages one-by-one, limiting it to one-to-one or one-to-many upcasting only. Upcasters performing a many-to-one or many-to-many operation thus won't be able to operate inside the extension (yet). 13 | 14 | Lastly, the `Serializer` used by the converter can be adjusted. See the xref:axon-framework-reference:ROOT:serialization.adoc[Serializer] section for more details on this. 15 | 16 | [source,java] 17 | ---- 18 | public class KafkaMessageConversationConfiguration { 19 | // ... 20 | public KafkaMessageConverter kafkaMessageConverter(Serializer serializer, 21 | SequencingPolicy> sequencingPolicy, 22 | BiFunction headerValueMapper, 23 | EventUpcasterChain upcasterChain) { 24 | return DefaultKafkaMessageConverter.builder() 25 | .serializer(serializer) // Hard requirement 26 | .sequencingPolicy(sequencingPolicy) // Defaults to a "SequentialPerAggregatePolicy" 27 | .upcasterChain(upcasterChain) // Defaults to empty upcaster chain 28 | .headerValueMapper(headerValueMapper) // Defaults to "HeaderUtils#byteMapper()" 29 | .build(); 30 | } 31 | // ... 32 | } 33 | ---- 34 | 35 | Make sure to use an identical `KafkaMessageConverter` on both the producing and consuming end, as otherwise exception upon deserialization should be expected. A `CloudEventKafkaMessageConverter` is also available using the link:https://cloudevents.io/[Cloud Events spec,window=_blank,role=external]. 36 | 37 | -------------------------------------------------------------------------------- /docs/reference/modules/ROOT/pages/release-notes.adoc: -------------------------------------------------------------------------------- 1 | = Release Notes Kafka Extension 2 | :navtitle: Release notes 3 | 4 | You can find the release notes for the Kafka Extension of version 4.10.0 and up, below. 5 | For earlier releases, please go to the link:https://legacydocs.axoniq.io/reference-guide/release-notes/rn-extensions/rn-kafka[legacy documentation]. 6 | 7 | == Release 4.11.0 8 | 9 | === _Features_ 10 | 11 | - KafkaPublisher Ack timeout can be set via properties and autoconfiguration link:https://github.com/AxonFramework/extension-kafka/pull/528[#528] 12 | 13 | === _Contributors_ 14 | 15 | We'd like to thank all the contributors who worked on this release! 16 | 17 | - link:https://github.com/aupodogov[@aupodogov] 18 | 19 | == Release 4.10.0 20 | 21 | . Upgrades the Kafka Extension to be compatible with Axon Framework 4.10.0 22 | . Updates various dependencies 23 | . Adds the ability to ignore metadata keys 24 | 25 | See the link:https://github.com/AxonFramework/extension-kafka/releases/tag/axon-kafka-4.10.0[GitHub release notes] for an exhaustive list of all changes. -------------------------------------------------------------------------------- /docs/reference/modules/ROOT/pages/springboot-configuration.adoc: -------------------------------------------------------------------------------- 1 | :navtitle: Configuration in SpringBoot 2 | = Configuration in SpringBoot 3 | 4 | This extension can be added as a Spring Boot starter dependency to your project using group id `org.axonframework.extensions.kafka` and artifact id `axon-kafka-spring-boot-starter`. When using the auto configuration, the following components will be created for you automatically: 5 | 6 | == Generic components 7 | 8 | === `DefaultKafkaMessageConverter` 9 | A using the configured `eventSerializer` (which defaults to `XStreamSerializer`), which is used by default to convert between Axon Event messages and Kafka records. 10 | 11 | Uses a `String` for the keys and a `byte[]` for the record's values. 12 | 13 | When the property `axon.kafka.message-converter-mode` is set to `cloud_event` a `CloudEventKafkaMessageConverter` will be used instead. This will use `String` for the keys and `CloudEvent`. 14 | 15 | For each the matching Kafka (de)serializers will also be set as default. 16 | 17 | == Producer components 18 | 19 | === `DefaultProducerFactory` 20 | 21 | A `DefaultProducerFactory` using a `String` for the keys and a `byte[]` for the record's values. 22 | 23 | This creates a `ProducerFactory` in confirmation mode "NONE", as is specified xref:publishing.adoc[here]. 24 | 25 | The `axon.kafka.publisher.confirmation-mode` should be adjusted to change this mode, 26 | where the "TRANSACTIONAL" mode requires `axon.kafka.producer.transaction-id-prefix` property to be provided. 27 | 28 | If the `axon.kafka.producer.transaction-id-prefix` is non-null and non-empty,it is assumed a "TRANSACTIONAL" confirmation mode is desired. 29 | 30 | === `KafkaPublisher` 31 | 32 | Uses a `Producer` instance from the `ProducerFactory` to publish events to the configured Kafka topic. 33 | 34 | === `KafkaEventPublisher` 35 | 36 | Used to provide events to the `KafkaPublisher` and to assign a processor name and processing group called `__axon-kafka-event-publishing-group` to it. Defaults to a `SubscribingEventProcessor`. 37 | 38 | If a `TrackingEventProcessor` is desired, the `axon.kafka.producer.event-processor-mode` should be set to `tracking`. 39 | 40 | == Consumer components 41 | 42 | === `DefaultConsumerFactory` 43 | 44 | A `DefaultConsumerFactory` using a `String` for the keys and a `byte[]` for the record's values 45 | 46 | === `AsyncFetcher` 47 | 48 | An `AsyncFetcher`. To adjust the `Fetcher` poll timeout, the `axon.kafka.fetcher.poll-timeout` can be set. 49 | 50 | === `StreamableKafkaMessageSource` 51 | 52 | A `StreamableKafkaMessageSource` which can be used for `TrackingEventProcessor` instances 53 | 54 | == Properties file configuration 55 | 56 | When using the Spring Boot auto-configuration be mindful to provide an `application.properties` file. The Kafka extension configuration specifics should be placed under prefix `axon.kafka`. On this level, the `bootstrapServers` (defaults to `localhost:9092`) and `default-topic` used by the producing and consuming side can be defined. 57 | 58 | The `DefaultProducerFactory` and `DefaultConsumerFactory` expects a `Map` of configuration properties, which correspond to Kafka `Producer` and `Consumer` specific properties respectively. As such, Axon itself passes along these properties without using them directly itself. The `application.properties` file provides a number of named properties under the `axon.kafka.producer.` and `axon.kafka.consumer.` prefixes. If the property you are looking for is not predefined in Axon `KafkaProperties` file, you are always able to introduce properties in a map style. 59 | 60 | [source,yaml] 61 | ---- 62 | # This is a sample properties file to configure the Kafka Extension 63 | axon: 64 | kafka: 65 | bootstrap-servers: localhost:9092 66 | client-id: kafka-axon-example 67 | default-topic: local.event 68 | properties: 69 | security.protocol: PLAINTEXT 70 | 71 | publisher: 72 | confirmation-mode: transactional 73 | 74 | producer: 75 | transaction-id-prefix: kafka-sample 76 | retries: 0 77 | event-processor-mode: subscribing 78 | # For additional unnamed properties, add them to the `properties` map like so 79 | properties: 80 | some-key: [some-value] 81 | 82 | fetcher: 83 | poll-timeout: 3000 84 | 85 | consumer: 86 | enable-auto-commit: true 87 | auto-commit-interval: 3000 88 | event-processor-mode: tracking 89 | # For additional unnamed properties, add them to the `properties` map like so 90 | properties: 91 | some-key: [some-value] 92 | ---- 93 | 94 | [NOTE] 95 | .Auto configuring a `SubscribableKafkaMessageSource` 96 | ==== 97 | 98 | The auto configured `StreamableKafkaMessageSource` can be toggled off by setting the `axon.kafka.consumer.event-processing-mode` to `subscribing`. 99 | 100 | Note that this *does not* create a `SubscribableKafkaMessageSource` for you out of the box. To set up a subscribable message, we recommend to read xref:consuming.adoc#subscribable-message-source[this] section. 101 | ==== 102 | -------------------------------------------------------------------------------- /docs/reference/modules/nav.adoc: -------------------------------------------------------------------------------- 1 | * xref:ROOT:publishing.adoc[] 2 | * xref:ROOT:consuming.adoc[] 3 | * xref:ROOT:message-format.adoc[] 4 | * xref:ROOT:springboot-configuration.adoc[] 5 | * xref:ROOT:release-notes.adoc[] -------------------------------------------------------------------------------- /kafka-axon-example/README.md: -------------------------------------------------------------------------------- 1 | # Kafka Axon Springboot Example 2 | 3 | This is an example SpringBoot application using the Kafka Axon extension. 4 | It configures a simple Kafka message publishing using Kafka infrastructure run locally. 5 | 6 | ## How to run 7 | 8 | ### Preparation 9 | 10 | You will need `docker` and `docker-compose` to run this example. 11 | 12 | Please run: 13 | 14 | ```bash 15 | docker-compose -f ./kafka-axon-example/docker-compose.yaml up -d 16 | ``` 17 | 18 | This will start [Zookeeper](https://zookeeper.apache.org/), [Kafka](https://github.com/wurstmeister/kafka-docker), 19 | [KafkaCat](https://github.com/edenhill/kafkacat), [Kafka Rest](https://github.com/nodefluent/kafka-rest) 20 | and [Kafka Rest UI](https://github.com/nodefluent/kafka-rest-ui). 21 | KafkaCat can be used to investigate the setup, whereas the UI (accessed through localhost:8000, user `admin` and 22 | password `admin`) provides visualization of the internals. 23 | 24 | If you use IntelliJ the run configuration from ./run can be used, otherwise build the application using: 25 | 26 | ```bash 27 | mvn clean package -f ./kafka-axon-example 28 | ``` 29 | 30 | ### Running example application 31 | 32 | You can start the application by running `java -jar ./kafka-axon-example/target/kafka-axon-example.jar`. 33 | 34 | From a Kafka Message Source perspective, there are several options you have, as both consumption and production of 35 | event messages can be Subscribing or Streaming (aka push or pull). 36 | Thus, the application can run in six different modes due to the possibility to define a producer 37 | and consumer event processing mode. 38 | At this stage the following profiles can be used: 39 | 40 | * `subscribing-producer` 41 | * `tracking-producer` 42 | * `pooled-streaming-producer` 43 | * `subscribing-consumer` 44 | * `tracking-consumer` 45 | * `pooled-streaming-consumer` 46 | * `cloud-events` 47 | 48 | If not specified, a `subscribing` producer and `tracking` consumer will be used. 49 | If `cloud-events` is not used, the format on the wire will be Axon Framework specific, using the 50 | [DefaultKafkaMessageConverter.java](https://github.com/AxonFramework/extension-kafka/blob/master/kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/DefaultKafkaMessageConverter.java) 51 | To activate these modes, please use Spring profiles in the run configuration like so: 52 | `--spring.profiles.active=tracking-producer,subscribing-consumer` 53 | 54 | ### Checking the format on the wire 55 | 56 | To check the format on the wire, including the headers you can get inside the Kafka container with: 57 | 58 | ```bash 59 | docker exec -it kafka-axon-example_kafka_1 bash 60 | ``` 61 | 62 | You can use the `kafka-console-consumer.sh` script located in the Kafka container. The folder is located in a path 63 | similar to `/opt/kafka_2.13-2.8.1/bin`. To consume all events from the default topic and also print the headers use: 64 | 65 | ```bash 66 | ./kafka-console-consumer.sh --topic Axon.Events --from-beginning --bootstrap-server localhost:9092 --property print.headers=true 67 | ``` 68 | -------------------------------------------------------------------------------- /kafka-axon-example/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.5" 2 | services: 3 | zookeeper: 4 | image: wurstmeister/zookeeper 5 | ports: 6 | - 2181:2181 7 | networks: 8 | - network1 9 | 10 | kafka: 11 | image: wurstmeister/kafka 12 | ports: 13 | - 9092:9092 14 | - 29092:29092 15 | links: 16 | - zookeeper 17 | depends_on: 18 | - zookeeper 19 | environment: 20 | KAFKA_BROKER_ID: 1 21 | KAFKA_PORT: 9092 22 | KAFKA_LISTENERS: 23 | INTERNAL://:9092, 24 | EXTERNAL://:29092 25 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 26 | INTERNAL:PLAINTEXT, 27 | EXTERNAL:PLAINTEXT 28 | KAFKA_ADVERTISED_HOST_NAME: kafka 29 | KAFKA_ADVERTISED_LISTENERS: 30 | INTERNAL://kafka:9092, 31 | EXTERNAL://localhost:29092 32 | KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL 33 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 34 | KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' 35 | networks: 36 | - network1 37 | 38 | kafkacat: 39 | image: confluentinc/cp-kafkacat 40 | command: sleep infinity 41 | 42 | kafka-rest: 43 | image: nodefluent/kafka-rest 44 | ports: 45 | - 8083:8083 46 | links: 47 | - kafka 48 | - zookeeper 49 | depends_on: 50 | - kafka 51 | - zookeeper 52 | environment: 53 | DEBUG: "*" 54 | KAFKA_REST_DEBUG: "all" 55 | KAFKA_REST_HTTP_PORT: 8083 56 | KAFKA_REST_CONSUMER_METADATA_BROKER_LIST: "kafka:9092" 57 | KAFKA_REST_PRODUCER_METADATA_BROKER_LIST: "kafka:9092" 58 | networks: 59 | - network1 60 | 61 | kafka-rest-ui: 62 | image: nodefluent/kafka-rest-ui 63 | ports: 64 | - 8000:8000 65 | links: 66 | - kafka-rest 67 | depends_on: 68 | - kafka-rest 69 | environment: 70 | DEBUG: "*" 71 | REACT_APP_KAFKA_REST_URL: "http://kafka-rest:8083/" 72 | REACT_APP_TIMEOUT: "3000" 73 | PROXY: "yes" 74 | BASIC_AUTH_USER: "admin" 75 | BASIC_AUTH_PASSWORD: "admin" 76 | networks: 77 | - network1 78 | 79 | networks: 80 | network1: -------------------------------------------------------------------------------- /kafka-axon-example/src/main/kotlin/org/axonframework/extensions/kafka/example/api/Commands.kt: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2021. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.example.api 18 | 19 | import org.axonframework.modelling.command.TargetAggregateIdentifier 20 | import javax.validation.constraints.Min 21 | 22 | /** 23 | * Create account. 24 | */ 25 | data class CreateBankAccountCommand( 26 | @TargetAggregateIdentifier 27 | val bankAccountId: String, 28 | @Min(value = 0, message = "Overdraft limit must not be less than zero") 29 | val overdraftLimit: Long 30 | ) 31 | 32 | /** 33 | * Deposit money. 34 | */ 35 | data class DepositMoneyCommand( 36 | @TargetAggregateIdentifier 37 | val bankAccountId: String, 38 | val amountOfMoney: Long 39 | ) 40 | 41 | /** 42 | * Withdraw money. 43 | */ 44 | data class WithdrawMoneyCommand( 45 | @TargetAggregateIdentifier 46 | val bankAccountId: String, 47 | val amountOfMoney: Long 48 | ) 49 | 50 | /** 51 | * Return money if transfer is not possible. 52 | */ 53 | data class ReturnMoneyOfFailedBankTransferCommand( 54 | @TargetAggregateIdentifier 55 | val bankAccountId: String, 56 | val amount: Long 57 | ) -------------------------------------------------------------------------------- /kafka-axon-example/src/main/kotlin/org/axonframework/extensions/kafka/example/api/Events.kt: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2021. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.example.api 18 | 19 | /** 20 | * Account created. 21 | */ 22 | data class BankAccountCreatedEvent( 23 | val id: String, 24 | val overdraftLimit: Long 25 | ) 26 | 27 | /** 28 | * Collecting event for increasing amount. 29 | */ 30 | sealed class MoneyAddedEvent( 31 | open val bankAccountId: String, 32 | open val amount: Long 33 | ) 34 | 35 | /** 36 | * Money deposited. 37 | */ 38 | data class MoneyDepositedEvent(override val bankAccountId: String, override val amount: Long) : MoneyAddedEvent(bankAccountId, amount) 39 | 40 | /** 41 | * Money returned. 42 | */ 43 | data class MoneyOfFailedBankTransferReturnedEvent(override val bankAccountId: String, override val amount: Long) : MoneyAddedEvent(bankAccountId, amount) 44 | 45 | /** 46 | * Money received via transfer. 47 | */ 48 | data class DestinationBankAccountCreditedEvent(override val bankAccountId: String, override val amount: Long, val bankTransferId: String) : MoneyAddedEvent(bankAccountId, amount) 49 | 50 | /** 51 | * Collecting event for decreasing amount. 52 | */ 53 | sealed class MoneySubtractedEvent( 54 | open val bankAccountId: String, 55 | open val amount: Long 56 | ) 57 | 58 | /** 59 | * Money withdrawn. 60 | */ 61 | data class MoneyWithdrawnEvent(override val bankAccountId: String, override val amount: Long) : MoneySubtractedEvent(bankAccountId, amount) 62 | 63 | /** 64 | * Money transferred. 65 | */ 66 | data class SourceBankAccountDebitedEvent(override val bankAccountId: String, override val amount: Long, val bankTransferId: String) : MoneySubtractedEvent(bankAccountId, amount) 67 | 68 | /** 69 | * Money transfer rejected. 70 | */ 71 | data class SourceBankAccountDebitRejectedEvent(val bankTransferId: String) -------------------------------------------------------------------------------- /kafka-axon-example/src/main/kotlin/org/axonframework/extensions/kafka/example/client/BankClient.kt: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2021. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.example.client 18 | 19 | import mu.KLogging 20 | import org.axonframework.commandhandling.gateway.CommandGateway 21 | import org.axonframework.extensions.kafka.example.api.CreateBankAccountCommand 22 | import org.axonframework.extensions.kafka.example.api.DepositMoneyCommand 23 | import org.springframework.scheduling.annotation.Scheduled 24 | import org.springframework.stereotype.Component 25 | import java.util.* 26 | import java.util.concurrent.CompletableFuture 27 | 28 | /** 29 | * Bank client sending scheduled commands. 30 | */ 31 | @Component 32 | class BankClient(private val commandGateway: CommandGateway) { 33 | 34 | companion object : KLogging() 35 | 36 | private val accountId = UUID.randomUUID().toString() 37 | private var amount = 100 38 | 39 | /** 40 | * Creates account once. 41 | */ 42 | @Scheduled(initialDelay = 5_000, fixedDelay = 1000_000_000) 43 | fun createAccount() { 44 | logger.debug { "creating account $accountId" } 45 | commandGateway.send>(CreateBankAccountCommand(bankAccountId = accountId, overdraftLimit = 1000)) 46 | } 47 | 48 | /** 49 | * Deposit some money every 20 seconds. 50 | */ 51 | @Scheduled(initialDelay = 10_000, fixedDelay = 20_000) 52 | fun deposit() { 53 | logger.debug { "depositing $amount from account $accountId" } 54 | commandGateway.send>(DepositMoneyCommand(bankAccountId = accountId, amountOfMoney = amount.toLong())) 55 | amount = amount.inc() 56 | } 57 | } -------------------------------------------------------------------------------- /kafka-axon-example/src/main/kotlin/org/axonframework/extensions/kafka/example/core/BankAccount.kt: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2021. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.example.core 18 | 19 | import org.axonframework.commandhandling.CommandHandler 20 | import org.axonframework.eventsourcing.EventSourcingHandler 21 | import org.axonframework.extensions.kafka.example.api.* 22 | import org.axonframework.modelling.command.AggregateIdentifier 23 | import org.axonframework.modelling.command.AggregateLifecycle.apply 24 | import org.axonframework.spring.stereotype.Aggregate 25 | 26 | /** 27 | * Represent account. 28 | */ 29 | @Suppress("unused") 30 | @Aggregate 31 | class BankAccount() { 32 | 33 | @AggregateIdentifier 34 | private lateinit var id: String 35 | private var overdraftLimit: Long = 0 36 | private var balanceInCents: Long = 0 37 | 38 | /** 39 | * Creates a new bank account. 40 | */ 41 | @CommandHandler 42 | constructor(command: CreateBankAccountCommand) : this() { 43 | apply(BankAccountCreatedEvent(command.bankAccountId, command.overdraftLimit)) 44 | } 45 | 46 | /** 47 | * Deposits money to account. 48 | */ 49 | @CommandHandler 50 | fun deposit(command: DepositMoneyCommand) { 51 | apply(MoneyDepositedEvent(id, command.amountOfMoney)) 52 | } 53 | 54 | /** 55 | * Withdraw money from account. 56 | */ 57 | @CommandHandler 58 | fun withdraw(command: WithdrawMoneyCommand) { 59 | if (command.amountOfMoney <= balanceInCents + overdraftLimit) { 60 | apply(MoneyWithdrawnEvent(id, command.amountOfMoney)) 61 | } 62 | } 63 | 64 | /** 65 | * Return money from account. 66 | */ 67 | @CommandHandler 68 | fun returnMoney(command: ReturnMoneyOfFailedBankTransferCommand) { 69 | apply(MoneyOfFailedBankTransferReturnedEvent(id, command.amount)) 70 | } 71 | 72 | /** 73 | * Handler to initialize bank accounts attributes. 74 | */ 75 | @EventSourcingHandler 76 | fun on(event: BankAccountCreatedEvent) { 77 | id = event.id 78 | overdraftLimit = event.overdraftLimit 79 | balanceInCents = 0 80 | } 81 | 82 | /** 83 | * Handler adjusting balance. 84 | */ 85 | @EventSourcingHandler 86 | fun on(event: MoneyAddedEvent) { 87 | balanceInCents += event.amount 88 | } 89 | 90 | /** 91 | * Handler adjusting balance. 92 | */ 93 | @EventSourcingHandler 94 | fun on(event: MoneySubtractedEvent) { 95 | balanceInCents -= event.amount 96 | } 97 | } -------------------------------------------------------------------------------- /kafka-axon-example/src/main/kotlin/org/axonframework/extensions/kafka/example/handler/BankEventHandler.kt: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2021. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.example.handler 18 | 19 | import mu.KLogging 20 | import org.axonframework.config.ProcessingGroup 21 | import org.axonframework.eventhandling.EventHandler 22 | import org.axonframework.eventhandling.EventMessage 23 | import org.springframework.stereotype.Component 24 | 25 | /** 26 | * Collecting event handler for logging, connected to a Kafka consumer with the processing group name "kafka-group". 27 | * This processing group is further configured in the [org.axonframework.extensions.kafka.example.KafkaAxonExampleApplication]. 28 | */ 29 | @Component 30 | @ProcessingGroup("kafka-group") 31 | class BankEventHandler { 32 | 33 | companion object : KLogging() 34 | 35 | /** 36 | * Receive all events and log them. 37 | */ 38 | @EventHandler 39 | fun > on(event: T) { 40 | logger.info { "received event ${event.payload}" } 41 | } 42 | } -------------------------------------------------------------------------------- /kafka-axon-example/src/main/resources/application-cloud-event.yml: -------------------------------------------------------------------------------- 1 | axon: 2 | kafka: 3 | message-converter-mode: cloud_event -------------------------------------------------------------------------------- /kafka-axon-example/src/main/resources/application-pooled-streaming-consumer.yml: -------------------------------------------------------------------------------- 1 | axon: 2 | kafka: 3 | consumer: 4 | event-processor-mode: pooled_streaming -------------------------------------------------------------------------------- /kafka-axon-example/src/main/resources/application-pooled-streaming-producer.yml: -------------------------------------------------------------------------------- 1 | axon: 2 | kafka: 3 | producer: 4 | event-processor-mode: pooled_streaming -------------------------------------------------------------------------------- /kafka-axon-example/src/main/resources/application-subscribing-consumer.yml: -------------------------------------------------------------------------------- 1 | axon: 2 | kafka: 3 | consumer: 4 | event-processor-mode: subscribing -------------------------------------------------------------------------------- /kafka-axon-example/src/main/resources/application-subscribing-producer.yml: -------------------------------------------------------------------------------- 1 | axon: 2 | kafka: 3 | producer: 4 | event-processor-mode: subscribing -------------------------------------------------------------------------------- /kafka-axon-example/src/main/resources/application-tracking-consumer.yml: -------------------------------------------------------------------------------- 1 | axon: 2 | kafka: 3 | consumer: 4 | event-processor-mode: tracking -------------------------------------------------------------------------------- /kafka-axon-example/src/main/resources/application-tracking-producer.yml: -------------------------------------------------------------------------------- 1 | axon: 2 | kafka: 3 | producer: 4 | event-processor-mode: tracking -------------------------------------------------------------------------------- /kafka-axon-example/src/main/resources/application.yml: -------------------------------------------------------------------------------- 1 | spring: 2 | application: 3 | name: KafkaAxonExample 4 | 5 | axon: 6 | axonserver: 7 | enabled: false 8 | serializer: 9 | events: jackson 10 | kafka: 11 | clientid: kafka-axon-example 12 | producer: 13 | retries: 0 14 | bootstrap-servers: localhost:29092 15 | consumer: 16 | bootstrap-servers: localhost:29092 17 | 18 | properties: 19 | security.protocol: PLAINTEXT 20 | -------------------------------------------------------------------------------- /kafka-spring-boot-3-integrationtests/src/test/java/org/axonframework/extensions/kafka/eventhandling/util/ProducerConfigUtil.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2023. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.util; 18 | 19 | import io.cloudevents.CloudEvent; 20 | import io.cloudevents.kafka.CloudEventSerializer; 21 | import org.apache.kafka.clients.producer.KafkaProducer; 22 | import org.apache.kafka.clients.producer.ProducerConfig; 23 | import org.apache.kafka.common.serialization.StringSerializer; 24 | 25 | import java.util.HashMap; 26 | import java.util.Map; 27 | 28 | /** 29 | * Test utility for generating a {@link ProducerConfig}. 30 | * 31 | * @author Nakul Mishra 32 | * @author Steven van Beelen 33 | */ 34 | public abstract class ProducerConfigUtil { 35 | 36 | private ProducerConfigUtil() { 37 | // Utility class 38 | } 39 | 40 | /** 41 | * Minimal configuration required for creating a {@link KafkaProducer}. 42 | *
    43 | *
  • key.serializer - {@link StringSerializer}.
  • 44 | *
  • value.serializer - {@link StringSerializer}.
  • 45 | *
46 | * 47 | * @param bootstrapServer the Kafka Container address 48 | * @return the configuration. 49 | */ 50 | public static KafkaProducer newProducer(String bootstrapServer) { 51 | Map configs = new HashMap<>(); 52 | configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer); 53 | configs.put(ProducerConfig.RETRIES_CONFIG, 10); 54 | configs.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384); 55 | configs.put(ProducerConfig.LINGER_MS_CONFIG, 1); 56 | configs.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432); 57 | configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); 58 | configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, CloudEventSerializer.class); 59 | return new KafkaProducer<>(configs);} 60 | } 61 | -------------------------------------------------------------------------------- /kafka-spring-boot-3-integrationtests/src/test/java/org/axonframework/extensions/kafka/integration/StreamableKafkaSourceIntegrationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2023. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.integration; 18 | 19 | import org.axonframework.common.stream.BlockingStream; 20 | import org.axonframework.eventhandling.DomainEventMessage; 21 | import org.axonframework.eventhandling.TrackedEventMessage; 22 | import org.axonframework.eventhandling.gateway.EventGateway; 23 | import org.axonframework.extensions.kafka.eventhandling.consumer.streamable.StreamableKafkaMessageSource; 24 | import org.junit.jupiter.api.*; 25 | import org.springframework.boot.autoconfigure.EnableAutoConfiguration; 26 | import org.springframework.boot.test.context.runner.ApplicationContextRunner; 27 | import org.springframework.context.annotation.EnableMBeanExport; 28 | import org.springframework.jmx.support.RegistrationPolicy; 29 | import org.springframework.test.context.ContextConfiguration; 30 | import org.testcontainers.junit.jupiter.Container; 31 | import org.testcontainers.junit.jupiter.Testcontainers; 32 | import org.testcontainers.kafka.KafkaContainer; 33 | 34 | import java.time.Duration; 35 | 36 | import static org.awaitility.Awaitility.await; 37 | import static org.axonframework.eventsourcing.utils.EventStoreTestUtils.createEvent; 38 | import static org.junit.jupiter.api.Assertions.*; 39 | 40 | @Testcontainers 41 | class StreamableKafkaSourceIntegrationTest { 42 | 43 | @Container 44 | private static final KafkaContainer KAFKA_CONTAINER = new KafkaContainer("apache/kafka-native") 45 | .withEnv("KAFKA_LISTENERS", "PLAINTEXT://:9092,BROKER://:9093,CONTROLLER://:9094"); 46 | 47 | private ApplicationContextRunner testApplicationContext; 48 | 49 | @BeforeEach 50 | void setUp() { 51 | testApplicationContext = new ApplicationContextRunner(); 52 | } 53 | 54 | @Test 55 | void messageSendViaKafkaShouldBeReceived() { 56 | testApplicationContext 57 | .withPropertyValues("axon.axonserver.enabled=false") 58 | .withPropertyValues("axon.kafka.fetcher.enabled=true") 59 | .withPropertyValues("axon.kafka.consumer.event-processor-mode=tracking") 60 | .withPropertyValues("axon.kafka.producer.bootstrap-servers=" + KAFKA_CONTAINER.getBootstrapServers()) 61 | .withPropertyValues("axon.kafka.consumer.bootstrap-servers=" + KAFKA_CONTAINER.getBootstrapServers()) 62 | .withUserConfiguration(DefaultContext.class) 63 | .run(context -> { 64 | EventGateway eventGateway = context.getBean(EventGateway.class); 65 | assertNotNull(eventGateway); 66 | publishEvent(eventGateway); 67 | 68 | //noinspection unchecked 69 | StreamableKafkaMessageSource messageSource = 70 | context.getBean(StreamableKafkaMessageSource.class); 71 | 72 | assertNotNull(messageSource); 73 | receiveMessage(messageSource); 74 | }); 75 | } 76 | 77 | private void publishEvent(EventGateway eventGateway) { 78 | DomainEventMessage event = createEvent(); 79 | eventGateway.publish(event); 80 | } 81 | 82 | private void receiveMessage( 83 | StreamableKafkaMessageSource messageSource 84 | ) throws InterruptedException { 85 | //noinspection resource 86 | BlockingStream> stream = messageSource.openStream(null); 87 | await().atMost(Duration.ofSeconds(5L)).until(stream::hasNextAvailable); 88 | TrackedEventMessage message = stream.nextAvailable(); 89 | assertNotNull(message); 90 | assertEquals("payload", message.getPayload()); 91 | } 92 | 93 | @ContextConfiguration 94 | @EnableAutoConfiguration 95 | @EnableMBeanExport(registration = RegistrationPolicy.IGNORE_EXISTING) 96 | public static class DefaultContext { 97 | 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /kafka-spring-boot-3-integrationtests/src/test/resources/logback-test.xml: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | 19 | 20 | %d{HH:mm:ss.SSS} [%thread] %-5level %logger - %msg%n 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /kafka-spring-boot-autoconfigure/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 17 | 18 | 19 | 4.0.0 20 | 21 | org.axonframework.extensions.kafka 22 | axon-kafka-parent 23 | 4.11.2-SNAPSHOT 24 | 25 | 26 | axon-kafka-spring-boot-autoconfigure 27 | 28 | Axon Framework Kafka Extension - Spring Boot Support 29 | 30 | Module providing support for auto configuration of the Axon Kafka Extension through Spring Boot 31 | 32 | 33 | 34 | 35 | org.axonframework.extensions.kafka 36 | axon-kafka 37 | ${project.version} 38 | 39 | 40 | 41 | org.axonframework 42 | axon-spring-boot-autoconfigure 43 | ${axon.version} 44 | true 45 | 46 | 47 | org.springframework 48 | spring-web 49 | 50 | 51 | 52 | 53 | 54 | org.springframework.boot 55 | spring-boot-configuration-processor 56 | ${spring.boot.version} 57 | true 58 | 59 | 60 | 61 | org.springframework.boot 62 | spring-boot-starter 63 | ${spring.boot.version} 64 | true 65 | 66 | 67 | ch.qos.logback 68 | logback-classic 69 | 70 | 71 | org.apache.logging.log4j 72 | log4j-to-slf4j 73 | 74 | 75 | 76 | 77 | 78 | org.apache.kafka 79 | kafka-clients 80 | ${kafka.version} 81 | true 82 | 83 | 84 | 85 | org.springframework.boot 86 | spring-boot-starter-test 87 | ${spring.boot.version} 88 | test 89 | 90 | 91 | 92 | 93 | 94 | 95 | src/main/resources 96 | true 97 | 98 | 99 | 100 | 101 | org.apache.maven.plugins 102 | maven-jar-plugin 103 | ${maven-jar.version} 104 | 105 | 106 | 107 | org.axonframework.extensions.kafka.springboot 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | -------------------------------------------------------------------------------- /kafka-spring-boot-autoconfigure/src/main/resources/META-INF/spring.factories: -------------------------------------------------------------------------------- 1 | org.springframework.boot.autoconfigure.EnableAutoConfiguration=\ 2 | org.axonframework.extensions.kafka.autoconfig.KafkaAutoConfiguration 3 | -------------------------------------------------------------------------------- /kafka-spring-boot-autoconfigure/src/main/resources/META-INF/spring/org.springframework.boot.autoconfigure.AutoConfiguration.imports: -------------------------------------------------------------------------------- 1 | org.axonframework.extensions.kafka.autoconfig.KafkaAutoConfiguration -------------------------------------------------------------------------------- /kafka-spring-boot-autoconfigure/src/test/java/org/axonframework/extensions/kafka/KafkaPropertiesIntegrationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2021. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka; 18 | 19 | import org.junit.jupiter.api.*; 20 | import org.junit.jupiter.api.extension.*; 21 | import org.springframework.beans.factory.annotation.Autowired; 22 | import org.springframework.boot.autoconfigure.EnableAutoConfiguration; 23 | import org.springframework.test.context.TestPropertySource; 24 | import org.springframework.test.context.junit.jupiter.SpringExtension; 25 | 26 | import java.util.Map; 27 | 28 | import static org.junit.jupiter.api.Assertions.*; 29 | 30 | /** 31 | * Test class to verify property injection in the {@link KafkaProperties} class through Spring Boot. 32 | * 33 | * @author Steven van Beelen 34 | */ 35 | @EnableAutoConfiguration 36 | @ExtendWith(SpringExtension.class) 37 | @TestPropertySource("classpath:application-map-style.properties") 38 | class KafkaPropertiesIntegrationTest { 39 | 40 | private static final String PROPERTY_KEY_ONE = "keyOne"; 41 | private static final String PROPERTY_VALUE_ONE = "valueOne"; 42 | private static final String PROPERTY_KEY_TWO = "keyTwo"; 43 | private static final String PROPERTY_VALUE_TWO = "valueTwo"; 44 | private static final String PROPERTY_KEY_THREE = "keyThree"; 45 | private static final String PROPERTY_VALUE_THREE = "valueThree"; 46 | private static final String PROPERTY_KEY_FOUR = "keyFour"; 47 | private static final String PROPERTY_VALUE_FOUR = "valueFour"; 48 | private static final String PROPERTY_KEY_FIVE = "keyFive"; 49 | private static final String PROPERTY_VALUE_FIVE = "valueFive"; 50 | 51 | @Autowired 52 | private KafkaProperties testSubject; 53 | 54 | @Test 55 | void testPropertyMapIsInjectedAsExpected() { 56 | assertPropertyMap(testSubject.getProperties()); 57 | assertPropertyMap(testSubject.getProducer().getProperties()); 58 | assertPropertyMap(testSubject.getConsumer().getProperties()); 59 | } 60 | 61 | private static void assertPropertyMap(Map resultProperties) { 62 | assertTrue(resultProperties.containsKey(PROPERTY_KEY_ONE)); 63 | assertEquals(PROPERTY_VALUE_ONE, resultProperties.get(PROPERTY_KEY_ONE)); 64 | assertTrue(resultProperties.containsKey(PROPERTY_KEY_TWO)); 65 | assertEquals(PROPERTY_VALUE_TWO, resultProperties.get(PROPERTY_KEY_TWO)); 66 | assertTrue(resultProperties.containsKey(PROPERTY_KEY_THREE)); 67 | assertEquals(PROPERTY_VALUE_THREE, resultProperties.get(PROPERTY_KEY_THREE)); 68 | assertTrue(resultProperties.containsKey(PROPERTY_KEY_FOUR)); 69 | assertEquals(PROPERTY_VALUE_FOUR, resultProperties.get(PROPERTY_KEY_FOUR)); 70 | assertTrue(resultProperties.containsKey(PROPERTY_KEY_FIVE)); 71 | assertEquals(PROPERTY_VALUE_FIVE, resultProperties.get(PROPERTY_KEY_FIVE)); 72 | } 73 | } -------------------------------------------------------------------------------- /kafka-spring-boot-autoconfigure/src/test/java/org/axonframework/extensions/kafka/SubscribingProducerIntegrationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2022. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka; 18 | 19 | import org.axonframework.eventhandling.EventBus; 20 | import org.axonframework.eventhandling.GenericEventMessage; 21 | import org.axonframework.eventhandling.tokenstore.TokenStore; 22 | import org.axonframework.eventhandling.tokenstore.inmemory.InMemoryTokenStore; 23 | import org.axonframework.eventsourcing.eventstore.EmbeddedEventStore; 24 | import org.axonframework.eventsourcing.eventstore.EventStore; 25 | import org.axonframework.eventsourcing.eventstore.inmemory.InMemoryEventStorageEngine; 26 | import org.axonframework.extensions.kafka.eventhandling.producer.KafkaPublisher; 27 | import org.axonframework.springboot.autoconfig.AxonServerAutoConfiguration; 28 | import org.junit.jupiter.api.*; 29 | import org.mockito.invocation.*; 30 | import org.mockito.stubbing.*; 31 | import org.springframework.beans.factory.annotation.Autowired; 32 | import org.springframework.boot.autoconfigure.SpringBootApplication; 33 | import org.springframework.boot.test.context.SpringBootTest; 34 | import org.springframework.boot.test.mock.mockito.MockBean; 35 | import org.springframework.context.annotation.Bean; 36 | import org.springframework.test.context.TestPropertySource; 37 | 38 | import java.util.concurrent.CountDownLatch; 39 | import java.util.concurrent.TimeUnit; 40 | 41 | import static org.junit.jupiter.api.Assertions.*; 42 | import static org.mockito.ArgumentMatchers.*; 43 | import static org.mockito.Mockito.*; 44 | 45 | @SpringBootTest 46 | @TestPropertySource("classpath:application-subscribing.properties") 47 | class SubscribingProducerIntegrationTest { 48 | 49 | @MockBean 50 | private KafkaPublisher kafkaPublisher; 51 | 52 | @Autowired 53 | private EventBus eventBus; 54 | 55 | @Test 56 | void shouldPublishMessagesSynchronously() throws Exception { 57 | // given 58 | ThreadIdCaptor threadIdCaptor = new ThreadIdCaptor(); 59 | doAnswer(threadIdCaptor).when(kafkaPublisher).send(any()); 60 | 61 | // when 62 | eventBus.publish(new GenericEventMessage<>("test")); 63 | 64 | // then 65 | assertEquals(threadIdCaptor.getThreadId(), Thread.currentThread().getId()); 66 | } 67 | 68 | private static class ThreadIdCaptor implements Answer { 69 | 70 | private static final int TIMEOUT = 10; 71 | 72 | private Long threadId; 73 | private final CountDownLatch latch = new CountDownLatch(1); 74 | 75 | @Override 76 | public Void answer(InvocationOnMock invocationOnMock) { 77 | threadId = Thread.currentThread().getId(); 78 | latch.countDown(); 79 | return null; 80 | } 81 | 82 | public Long getThreadId() throws InterruptedException { 83 | //noinspection ResultOfMethodCallIgnored 84 | latch.await(TIMEOUT, TimeUnit.SECONDS); 85 | if (threadId == null) { 86 | throw new IllegalStateException("Unable to capture thread id in " + TIMEOUT + " minutes."); 87 | } 88 | return threadId; 89 | } 90 | } 91 | 92 | @SpringBootApplication(exclude = AxonServerAutoConfiguration.class) 93 | static class Application { 94 | 95 | @Bean 96 | public EventStore eventStore() { 97 | return EmbeddedEventStore.builder().storageEngine(new InMemoryEventStorageEngine()).build(); 98 | } 99 | 100 | @Bean 101 | public TokenStore tokenStore() { 102 | return new InMemoryTokenStore(); 103 | } 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /kafka-spring-boot-autoconfigure/src/test/resources/application-map-style.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2010-2022. Axon Framework 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | axon.kafka.default-topic=some-topic 17 | axon.kafka.properties.keyOne=valueOne 18 | axon.kafka.properties.keyTwo=valueTwo 19 | axon.kafka.properties.keyThree=valueThree 20 | axon.kafka.properties.keyFour=valueFour 21 | axon.kafka.properties.keyFive=valueFive 22 | axon.kafka.producer.properties.keyOne=valueOne 23 | axon.kafka.producer.properties.keyTwo=valueTwo 24 | axon.kafka.producer.properties.keyThree=valueThree 25 | axon.kafka.producer.properties.keyFour=valueFour 26 | axon.kafka.producer.properties.keyFive=valueFive 27 | axon.kafka.consumer.properties.keyOne=valueOne 28 | axon.kafka.consumer.properties.keyTwo=valueTwo 29 | axon.kafka.consumer.properties.keyThree=valueThree 30 | axon.kafka.consumer.properties.keyFour=valueFour 31 | axon.kafka.consumer.properties.keyFive=valueFive 32 | axon.axonserver.enabled=false -------------------------------------------------------------------------------- /kafka-spring-boot-autoconfigure/src/test/resources/application-source-configurer.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2010-2022. Axon Framework 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | axon.kafka.fetcher.enabled=true 17 | axon.kafka.publisher.enabled=false 18 | axon.axonserver.enabled=false 19 | spring.main.allow-circular-references=true -------------------------------------------------------------------------------- /kafka-spring-boot-autoconfigure/src/test/resources/application-subscribing.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2010-2022. Axon Framework 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | axon.kafka.producer.event-processor-mode=SUBSCRIBING 17 | axon.axonserver.enabled=false -------------------------------------------------------------------------------- /kafka-spring-boot-autoconfigure/src/test/resources/ksLoc: -------------------------------------------------------------------------------- 1 | Test file for Kafka. 2 | -------------------------------------------------------------------------------- /kafka-spring-boot-autoconfigure/src/test/resources/ksLocP: -------------------------------------------------------------------------------- 1 | Test file for Kafka. 2 | -------------------------------------------------------------------------------- /kafka-spring-boot-autoconfigure/src/test/resources/log4j2.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2010-2020. Axon Framework 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | name=AxonTestConfiguration 18 | appenders = console 19 | 20 | appender.console.type = Console 21 | appender.console.name = STDOUT 22 | appender.console.layout.type = PatternLayout 23 | appender.console.layout.pattern = %d [%t] %-5p %-30.30c{1} %x - %m%n 24 | 25 | rootLogger.level = info 26 | rootLogger.appenderRefs = stdout 27 | rootLogger.appenderRef.stdout.ref = STDOUT 28 | 29 | logger.axon.name = org.axonframework 30 | logger.axon.level = info 31 | logger.axon.additivity = false 32 | logger.axon.appenderRefs = stdout 33 | logger.axon.appenderRef.stdout.ref = STDOUT 34 | -------------------------------------------------------------------------------- /kafka-spring-boot-autoconfigure/src/test/resources/tsLoc: -------------------------------------------------------------------------------- 1 | Test file for Kafka. 2 | -------------------------------------------------------------------------------- /kafka-spring-boot-autoconfigure/src/test/resources/tsLocP: -------------------------------------------------------------------------------- 1 | Test file for Kafka. 2 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/configuration/KafkaMessageSourceConfigurer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2021. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.configuration; 18 | 19 | import org.axonframework.config.Component; 20 | import org.axonframework.config.Configuration; 21 | import org.axonframework.config.ModuleConfiguration; 22 | import org.axonframework.extensions.kafka.eventhandling.consumer.subscribable.SubscribableKafkaMessageSource; 23 | import org.axonframework.lifecycle.Phase; 24 | 25 | import java.util.ArrayList; 26 | import java.util.List; 27 | import java.util.function.Function; 28 | 29 | /** 30 | * A {@link ModuleConfiguration} to configure Kafka as a message source for {@link 31 | * org.axonframework.eventhandling.EventProcessor} instances. This ModuleConfiguration should be registered towards the 32 | * {@link org.axonframework.config.Configurer} for it to start amy Kafka sources for an EventProcessor. 33 | * 34 | * @author Steven van Beelen 35 | * @since 4.0 36 | */ 37 | public class KafkaMessageSourceConfigurer implements ModuleConfiguration { 38 | 39 | private Configuration configuration; 40 | private final List>> subscribableKafkaMessageSources = new ArrayList<>(); 41 | 42 | @Override 43 | public void initialize(Configuration config) { 44 | this.configuration = config; 45 | 46 | if (!subscribableKafkaMessageSources.isEmpty()) { 47 | this.configuration.onStart( 48 | Phase.INBOUND_EVENT_CONNECTORS, 49 | () -> subscribableKafkaMessageSources.stream().map(Component::get) 50 | .forEach(SubscribableKafkaMessageSource::start) 51 | ); 52 | this.configuration.onShutdown( 53 | Phase.INBOUND_EVENT_CONNECTORS, 54 | () -> subscribableKafkaMessageSources.stream().map(Component::get) 55 | .forEach(SubscribableKafkaMessageSource::close) 56 | ); 57 | } 58 | } 59 | 60 | /** 61 | * Register a {@link Function} which uses the provided {@link Configuration} to build a {@link 62 | * SubscribableKafkaMessageSource}. 63 | * 64 | * @param subscribableKafkaMessageSource the {@link Function} which will build a {@link SubscribableKafkaMessageSource} 65 | */ 66 | public void configureSubscribableSource( 67 | Function> subscribableKafkaMessageSource 68 | ) { 69 | subscribableKafkaMessageSources.add(new Component<>( 70 | () -> configuration, "subscribableKafkaMessageSource", subscribableKafkaMessageSource 71 | )); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/KafkaMessageConverter.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2021. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling; 18 | 19 | import org.apache.kafka.clients.consumer.ConsumerRecord; 20 | import org.apache.kafka.clients.producer.ProducerRecord; 21 | import org.axonframework.eventhandling.EventMessage; 22 | 23 | import java.util.Optional; 24 | 25 | /** 26 | * Converts Kafka records from Axon {@link EventMessage}s and vice versa. 27 | * 28 | * @param the key type of the Kafka record 29 | * @param the value type of the Kafka record 30 | * @author Nakul Mishra 31 | * @author Steven van Beelen 32 | * @since 4.0 33 | */ 34 | public interface KafkaMessageConverter { 35 | 36 | /** 37 | * Creates a {@link ProducerRecord} for a given {@link EventMessage} to be published on a Kafka Producer. 38 | * 39 | * @param eventMessage the event message to convert into a {@link ProducerRecord} for Kafka 40 | * @param topic the Kafka topic to publish the message on 41 | * @return the converted {@code eventMessage} as a {@link ProducerRecord} 42 | */ 43 | ProducerRecord createKafkaMessage(EventMessage eventMessage, String topic); 44 | 45 | /** 46 | * Reconstruct an {@link EventMessage} from the given {@link ConsumerRecord}. The returned optional resolves to a 47 | * message if the given input parameters represented a correct EventMessage. 48 | * 49 | * @param consumerRecord the Event Message represented inside Kafka 50 | * @return the converted {@code consumerRecord} as an {@link EventMessage} 51 | */ 52 | @SuppressWarnings("squid:S1452") 53 | //needs wildcard to be generic 54 | Optional> readKafkaMessage(ConsumerRecord consumerRecord); 55 | } 56 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/cloudevent/InvalidMetaDataException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2022. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.cloudevent; 18 | 19 | import org.axonframework.common.AxonException; 20 | 21 | /** 22 | * Exception thrown when failing to add metadata to a cloud event. 23 | * 24 | * @author Gerard Klijs 25 | * @since 4.6.0 26 | */ 27 | public class InvalidMetaDataException extends AxonException { 28 | 29 | /** 30 | * Initializes the exception using the given {@code message}. 31 | * 32 | * @param message The message describing the exception. 33 | */ 34 | public InvalidMetaDataException(String message) { 35 | super(message); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/ConsumerFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2022. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer; 18 | 19 | import org.apache.kafka.clients.consumer.Consumer; 20 | 21 | /** 22 | * A functional interface towards building {@link Consumer} instances. 23 | * 24 | * @param the key type of a build {@link Consumer} instance 25 | * @param the value type of a build {@link Consumer} instance 26 | * @author Nakul Mishra 27 | * @author Steven van Beelen 28 | * @author Gerard Klijs 29 | * @since 4.0 30 | */ 31 | @FunctionalInterface 32 | public interface ConsumerFactory { 33 | 34 | /** 35 | * Create a {@link Consumer} that should be part of the Consumer Group with the given {@code groupId}, or without a 36 | * consumer group if called with {@code null}. 37 | * 38 | * @param groupId a {@link String} defining the group the constructed {@link Consumer} will be a part of, this can 39 | * be {@code null} to not add it to a group. 40 | * @return a {@link Consumer} which is part of Consumer Group with the given {@code groupId}, or without a groupId 41 | * when called with {@code null}. 42 | */ 43 | Consumer createConsumer(String groupId); 44 | } 45 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/DefaultConsumerFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2022. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer; 18 | 19 | import org.apache.kafka.clients.consumer.Consumer; 20 | import org.apache.kafka.clients.consumer.KafkaConsumer; 21 | import org.slf4j.Logger; 22 | import org.slf4j.LoggerFactory; 23 | 24 | import java.util.Collections; 25 | import java.util.HashMap; 26 | import java.util.Map; 27 | 28 | import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG; 29 | import static org.axonframework.common.BuilderUtils.assertNonNull; 30 | 31 | /** 32 | * The {@link ConsumerFactory} implementation to produce a new {@link Consumer} instance. On each invocation of {@link 33 | * #createConsumer(String)} a new instance will be created based on the supplied {@code configuration} properties. 34 | * 35 | * @param the key type of a build {@link Consumer} instance 36 | * @param the value type of a build {@link Consumer} instance 37 | * @author Nakul Mishra 38 | * @author Steven van Beelen 39 | * @author Gerard Klijs 40 | * @since 4.0 41 | */ 42 | public class DefaultConsumerFactory implements ConsumerFactory { 43 | 44 | private static final Logger logger = LoggerFactory.getLogger(DefaultConsumerFactory.class); 45 | 46 | private final Map consumerConfiguration; 47 | 48 | /** 49 | * Build a default {@link ConsumerFactory} which uses the provided {@code configuration} to build it's {@link 50 | * Consumer}s. 51 | * 52 | * @param consumerConfiguration a {@link Map} containing the configuration for the {@link Consumer}s this factory 53 | * builds 54 | */ 55 | public DefaultConsumerFactory(Map consumerConfiguration) { 56 | assertNonNull(consumerConfiguration, "The configuration may not be null"); 57 | this.consumerConfiguration = new HashMap<>(consumerConfiguration); 58 | } 59 | 60 | @Override 61 | public Consumer createConsumer(String groupId) { 62 | Map configuration = new HashMap<>(this.consumerConfiguration); 63 | if (configuration.remove(GROUP_ID_CONFIG) != null) { 64 | logger.warn("Found a global {} whilst it is required to be provided consciously", GROUP_ID_CONFIG); 65 | } 66 | if (groupId != null) { 67 | configuration.put(GROUP_ID_CONFIG, groupId); 68 | } 69 | return new KafkaConsumer<>(configuration); 70 | } 71 | 72 | /** 73 | * Return an unmodifiable reference to the configuration map for this factory. Useful for cloning to make a similar 74 | * factory. 75 | * 76 | * @return a configuration {@link Map} used by this {@link ConsumerFactory} to build {@link Consumer}s 77 | */ 78 | public Map configurationProperties() { 79 | return Collections.unmodifiableMap(this.consumerConfiguration); 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/EventConsumer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2019. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer; 18 | 19 | import java.util.List; 20 | 21 | /** 22 | * A functional interface towards consuming a {@link List} of records of type {@code E}. Provides added functionality 23 | * over the regular {@link java.util.function.Consumer} functional interface by specifying that it might throw an {@link 24 | * InterruptedException}. 25 | * 26 | * @param the element type of the records to consume 27 | * @author Steven van Beelen 28 | * @since 4.0 29 | */ 30 | @FunctionalInterface 31 | public interface EventConsumer { 32 | 33 | /** 34 | * Consume a {@link List} of records of type {@code E}. 35 | * 36 | * @param records the {@link List} of type {@code E} to consume 37 | * @throws InterruptedException if consumption is interrupted 38 | */ 39 | void consume(List records) throws InterruptedException; 40 | } 41 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/FetchEventException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2022. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer; 18 | 19 | import org.axonframework.common.AxonException; 20 | 21 | /** 22 | * Exception thrown when there is an error while either fetching records from Kafka, or processing them. 23 | * 24 | * @author Gerard Klijs 25 | * @since 4.5.4 26 | */ 27 | public class FetchEventException extends AxonException { 28 | 29 | /** 30 | * Creates a new {@link FetchEventException}. 31 | * 32 | * @param message some info about the exception 33 | */ 34 | public FetchEventException(String message) { 35 | super(message); 36 | } 37 | 38 | /** 39 | * Creates a new {@link FetchEventException}. 40 | * 41 | * @param message some info about the exception 42 | * @param cause the {@link Throwable} that is the cause of the exception 43 | */ 44 | public FetchEventException(String message, Throwable cause) { 45 | super(message, cause); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/Fetcher.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2022. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer; 18 | 19 | import org.apache.kafka.clients.consumer.Consumer; 20 | import org.axonframework.common.Registration; 21 | 22 | /** 23 | * Interface describing the component responsible for fetching messages from a Kafka topic through a {@link Consumer}. 24 | * 25 | * @param the key of the {@link org.apache.kafka.clients.consumer.ConsumerRecords} produced in the {@link Consumer} 26 | * and used in the {@link RecordConverter} 27 | * @param the value type of {@link org.apache.kafka.clients.consumer.ConsumerRecords} produced in the {@link 28 | * Consumer} and used in the {@link RecordConverter} 29 | * @param the element type the {@link org.apache.kafka.clients.consumer.ConsumerRecords} will be converted in to by 30 | * the {@link RecordConverter} and consumed by the {@link EventConsumer} 31 | * @author Nakul Mishra 32 | * @author Steven van Beelen 33 | * @since 4.0 34 | */ 35 | public interface Fetcher { 36 | 37 | /** 38 | * Instruct this Fetcher to start polling message through the provided {@link Consumer}. After retrieval, the {@link 39 | * org.apache.kafka.clients.consumer.ConsumerRecords} will be converted by the given {@code recordConverter} and 40 | * there after consumed by the given {@code recordConsumer}. A {@link Registration} will be returned to cancel 41 | * message consumption and conversion. 42 | * 43 | * @param consumer the {@link Consumer} used to consume message from a Kafka topic 44 | * @param recordConverter a {@link RecordConverter} instance which will convert the "consumed" {@link 45 | * org.apache.kafka.clients.consumer.ConsumerRecords} in to a List of {@code E} 46 | * @param eventConsumer a {@link EventConsumer} instance which will consume the converted records 47 | * @return a close handler of type {@link org.axonframework.common.Registration} to stop the {@link Fetcher} 48 | * operation 49 | * @deprecated instead {@link #poll(Consumer, RecordConverter, EventConsumer, RuntimeErrorHandler)} should be used, 50 | * so including an error handler. 51 | */ 52 | @Deprecated 53 | @SuppressWarnings("squid:S1133") 54 | //Removal will break the API, so can only be done in a new major version. 55 | Registration poll(Consumer consumer, 56 | RecordConverter recordConverter, 57 | EventConsumer eventConsumer); 58 | 59 | /** 60 | * Instruct this Fetcher to start polling message through the provided {@link Consumer}. After retrieval, the {@link 61 | * org.apache.kafka.clients.consumer.ConsumerRecords} will be converted by the given {@code recordConverter} and 62 | * there after consumed by the given {@code recordConsumer}. A {@link Registration} will be returned to cancel 63 | * message consumption and conversion. 64 | * 65 | * @param consumer the {@link Consumer} used to consume message from a Kafka topic 66 | * @param recordConverter a {@link RecordConverter} instance which will convert the "consumed" {@link 67 | * org.apache.kafka.clients.consumer.ConsumerRecords} in to a List of {@code E} 68 | * @param eventConsumer a {@link EventConsumer} instance which will consume the converted records 69 | * @param runtimeErrorHandler a {@link RuntimeErrorHandler} function used to handle errors 70 | * @return a close handler of type {@link org.axonframework.common.Registration} to stop the {@link Fetcher} 71 | * operation 72 | */ 73 | Registration poll(Consumer consumer, 74 | RecordConverter recordConverter, 75 | EventConsumer eventConsumer, 76 | RuntimeErrorHandler runtimeErrorHandler); 77 | 78 | /** 79 | * Shuts the fetcher down, closing any resources used by this fetcher. 80 | */ 81 | void shutdown(); 82 | } 83 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/OffsetCommitType.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2025. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer; 18 | 19 | import org.apache.kafka.clients.consumer.KafkaConsumer; 20 | 21 | /** 22 | * Enum to define how the consumer will handle committing offsets. 23 | * 24 | * @author Bradley Skuse 25 | * @since 4.11.1 26 | */ 27 | public enum OffsetCommitType { 28 | 29 | /** 30 | * Kafka consumer will commit offsets automatically in the background. 31 | */ 32 | AUTO, 33 | 34 | /** 35 | * Kafka consumer will commit offsets asynchronously after processing 36 | * 37 | * @see KafkaConsumer#commitAsync() 38 | */ 39 | COMMIT_ASYNC, 40 | 41 | /** 42 | * Kafka consumer will commit offsets synchronously after processing 43 | * 44 | * @see KafkaConsumer#commitSync() 45 | */ 46 | COMMIT_SYNC 47 | } 48 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/RecordConverter.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2019. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer; 18 | 19 | import org.apache.kafka.clients.consumer.ConsumerRecords; 20 | 21 | import java.util.List; 22 | 23 | /** 24 | * A functional interface towards converting the {@link org.apache.kafka.clients.consumer.ConsumerRecord} instances in 25 | * to a {@link List} of {@code E}. 26 | * 27 | * @param the key of the {@link ConsumerRecords} 28 | * @param the value type of {@link ConsumerRecords} 29 | * @param the element type each {@link org.apache.kafka.clients.consumer.ConsumerRecord} instance is converted to 30 | * @author Steven van Beelen 31 | * @since 4.0 32 | */ 33 | @FunctionalInterface 34 | public interface RecordConverter { 35 | 36 | /** 37 | * Covert the provided {@code records} in to a {@link List} of elements of type {@code E}. 38 | * 39 | * @param records a {@link ConsumerRecords} instance to convert in to a {@link List} of {@code E} 40 | * @return the {@link List} of elements of type {@code E} 41 | */ 42 | List convert(ConsumerRecords records); 43 | } 44 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/RuntimeErrorHandler.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2022. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer; 18 | 19 | /** 20 | * Used to implement an error handler such that exceptions from one thread can be passed to another thread. 21 | * 22 | * @author Gerard Klijs 23 | * @since 4.5.4 24 | */ 25 | @FunctionalInterface 26 | public interface RuntimeErrorHandler { 27 | 28 | /** 29 | * Error that is passed to another thread, to be handled there. 30 | * 31 | * @param exception, the {@link RuntimeException} that's need to be handled. 32 | */ 33 | void handle(RuntimeException exception); 34 | } 35 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/TopicListSubscriber.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2023. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer; 18 | 19 | import org.apache.kafka.clients.consumer.Consumer; 20 | 21 | import java.util.Collection; 22 | 23 | /** 24 | * Implementation of {@link TopicSubscriber} that subscribes a {@link Consumer} to a list of topics. 25 | * Using the {@link Consumer#subscribe(Collection)} method. This was standard behavior prior to 4.8.0 26 | * 27 | * @author Ben Kornmeier 28 | * @since 4.8.0 29 | */ 30 | public class TopicListSubscriber implements TopicSubscriber { 31 | private final Collection topics; 32 | 33 | /** 34 | * Instantiate a {@link TopicListSubscriber} that is backed by a list of specific topics. 35 | * 36 | * @param topics the topics to subscribe to 37 | */ 38 | public TopicListSubscriber(Collection topics) { 39 | this.topics = topics; 40 | } 41 | 42 | /** 43 | * Adds a topic to the list of topics. 44 | * 45 | * @param topic the topic to add 46 | */ 47 | public void addTopic(String topic) { 48 | if(topic != null && !topic.isEmpty()) 49 | this.topics.add(topic); 50 | } 51 | 52 | @Override 53 | public void subscribeTopics(Consumer consumer) { 54 | consumer.subscribe(topics); 55 | } 56 | 57 | @Override 58 | public boolean subscribesToTopicName(String topic) { 59 | return topics.contains(topic); 60 | } 61 | 62 | @Override 63 | public String describe() { 64 | return "topics=[" + String.join(", ", topics) + "]"; 65 | } 66 | } -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/TopicPatternSubscriber.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2023. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package org.axonframework.extensions.kafka.eventhandling.consumer; 17 | 18 | import org.apache.kafka.clients.consumer.Consumer; 19 | 20 | import java.util.regex.Pattern; 21 | 22 | /** 23 | * Implementation of {@link TopicSubscriber} that subscribes a {@link Consumer} to a pattern of topics. 24 | * Using the {@link Consumer#subscribe(Pattern)} method. 25 | * 26 | * @author Ben Kornmeier 27 | * @since 4.8.0 28 | */ 29 | public class TopicPatternSubscriber implements TopicSubscriber { 30 | private final Pattern pattern; 31 | 32 | /** 33 | * Instantiate a {@link TopicPatternSubscriber} that uses {@link Pattern} to subscribe to topics as well as check if it is responsible for a given topic. 34 | * 35 | * @param pattern {@link Pattern} to use to subscribe to topics with and check if it is responsible for a given topic. 36 | */ 37 | public TopicPatternSubscriber(Pattern pattern) { 38 | this.pattern = pattern; 39 | } 40 | 41 | @Override 42 | public void subscribeTopics(Consumer consumer) { 43 | consumer.subscribe(pattern); 44 | } 45 | 46 | @Override 47 | public boolean subscribesToTopicName(String topic) { 48 | return pattern.matcher(topic).matches(); 49 | } 50 | 51 | @Override 52 | public String describe() { 53 | return "pattern=[" + pattern + "]"; 54 | } 55 | } -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/TopicSubscriber.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2023. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer; 18 | 19 | import org.apache.kafka.clients.consumer.Consumer; 20 | 21 | /** 22 | * Used by {@link org.axonframework.extensions.kafka.eventhandling.consumer.streamable.StreamableKafkaMessageSource} and {@link org.axonframework.extensions.kafka.eventhandling.consumer.subscribable.SubscribableKafkaMessageSource} to subscribe a {@link Consumer} 23 | * to topic(s). 24 | * 25 | * @author Ben Kornmeier 26 | * @since 4.8.0 27 | */ 28 | public interface TopicSubscriber { 29 | /** 30 | * Subscribes the given {@link Consumer} to the topic(s) this {@link TopicSubscriber} is responsible for. 31 | * 32 | * @param consumer The {@link Consumer} to subscribe to the topic(s) this {@link TopicSubscriber} is responsible for. 33 | */ 34 | void subscribeTopics(Consumer consumer); 35 | 36 | /** 37 | * Checks if this {@link TopicSubscriber} is responsible for the given topic. 38 | * 39 | * @param topic The topic to check if this {@link TopicSubscriber} is responsible for. 40 | * @return true if this {@link TopicSubscriber} is responsible for the given topic, false otherwise. 41 | */ 42 | boolean subscribesToTopicName(String topic); 43 | 44 | /** 45 | * Returns a description of the topic(s) this {@link TopicSubscriber} is responsible for. 46 | * @return The description of the topic(s) this {@link TopicSubscriber} is responsible for. 47 | */ 48 | String describe(); 49 | } -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/TopicSubscriberBuilder.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2023. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer; 18 | 19 | import org.apache.kafka.clients.consumer.Consumer; 20 | 21 | import java.util.Collections; 22 | import java.util.List; 23 | import java.util.Objects; 24 | import java.util.regex.Pattern; 25 | 26 | import static org.axonframework.common.BuilderUtils.assertNonNull; 27 | import static org.axonframework.common.BuilderUtils.assertThat; 28 | 29 | /** 30 | * Used by {@link org.axonframework.extensions.kafka.eventhandling.consumer.streamable.StreamableKafkaMessageSource.Builder} and {@link org.axonframework.extensions.kafka.eventhandling.consumer.subscribable.SubscribableKafkaMessageSource.Builder} 31 | * to provide a {@link TopicSubscriber} to subscribe a {@link Consumer} to topic(s). 32 | * 33 | * @param The actual type of the builder. Mainly used to return the correct type in fluent interfaces. 34 | * @author Ben Kornmeier 35 | * @since 4.8.0 36 | */ 37 | public abstract class TopicSubscriberBuilder> { 38 | protected TopicSubscriber subscriber = new TopicListSubscriber(Collections.singletonList("Axon.Events")); 39 | 40 | /** 41 | * Allows methods defined in this class to return the concrete class for fluent api usage. 42 | * 43 | * @return the current instance of the T 44 | */ 45 | protected abstract T self(); 46 | 47 | /** 48 | * Returns the {@link TopicSubscriber} that is used to subscribe a {@link Consumer} to topic(s). 49 | * 50 | * @return the {@link TopicSubscriber} 51 | */ 52 | public TopicSubscriber getSubscriber() { 53 | return subscriber; 54 | } 55 | 56 | /** 57 | * Set the Kafka {@code topics} to read {@link org.axonframework.eventhandling.EventMessage}s from. Defaults to 58 | * {@code Axon.Events}. 59 | * 60 | * @param topics the Kafka {@code topics} to read {@link org.axonframework.eventhandling.EventMessage}s from 61 | * @return the current Builder instance, for fluent interfacing 62 | */ 63 | public T topics(List topics) { 64 | assertThat(topics, topicList -> Objects.nonNull(topicList) && !topicList.isEmpty(), 65 | "The topics may not be null or empty"); 66 | this.subscriber = new TopicListSubscriber(topics); 67 | return self(); 68 | } 69 | 70 | /** 71 | * Add a Kafka {@code topic} to read {@link org.axonframework.eventhandling.EventMessage}s from. 72 | * 73 | * @param topic the Kafka {@code topic} to add to the list of topics 74 | * @return the current Builder instance, for fluent interfacing 75 | */ 76 | public T addTopic(String topic) { 77 | assertThat(topic, name -> Objects.nonNull(name) && !"".equals(name), "The topic may not be null or empty"); 78 | if (isListBasedSubscription()) { 79 | ((TopicListSubscriber) subscriber).addTopic(topic); 80 | } else { 81 | throw new IllegalStateException("Cannot add topic to a pattern subscriber"); 82 | } 83 | return self(); 84 | } 85 | 86 | /** 87 | * Set the Kafka {@code pattern} to read {@link org.axonframework.eventhandling.EventMessage}s from. 88 | * 89 | * @param pattern the Kafka {@code pattern} to read {@link org.axonframework.eventhandling.EventMessage}s from 90 | * @return the current Builder instance, for fluent interfacing 91 | */ 92 | public T topicPattern(Pattern pattern) { 93 | assertNonNull(pattern, "The pattern may not be null"); 94 | this.subscriber = new TopicPatternSubscriber(pattern); 95 | return self(); 96 | } 97 | 98 | private boolean isListBasedSubscription() { 99 | return subscriber instanceof TopicListSubscriber; 100 | } 101 | } -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/Buffer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2022. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer.streamable; 18 | 19 | import org.slf4j.Logger; 20 | import org.slf4j.LoggerFactory; 21 | 22 | import java.util.Collection; 23 | import java.util.concurrent.TimeUnit; 24 | 25 | /** 26 | * Defines a buffer that waits for the space to become non-empty when retrieving an element, and wait for space to 27 | * become available in the buffer when storing an element. 28 | * 29 | * @param the type of the element contained in the buffer 30 | * @author Nakul Mishra 31 | * @author Steven van Beelen 32 | * @author Gerard Klijs 33 | * @since 4.0 34 | */ 35 | public interface Buffer { 36 | 37 | Logger logger = LoggerFactory.getLogger(Buffer.class); 38 | 39 | /** 40 | * Inserts the provided element in this buffer, waiting for space to become available if the buffer is full. 41 | * 42 | * @param e the element to insert in this buffer 43 | * @throws InterruptedException if interrupted while waiting to put the element 44 | */ 45 | void put(E e) throws InterruptedException; 46 | 47 | /** 48 | * Inserts the provided elements in this buffer, waiting for space to become available if the buffer is full. 49 | * 50 | * @param c the {@link Collection} of elements to be inserted in this buffer 51 | * @throws InterruptedException if interrupted while waiting to put any of the elements 52 | */ 53 | void putAll(Collection c) throws InterruptedException; 54 | 55 | /** 56 | * Retrieves and removes the first message of this buffer, waiting up to the specified wait time if necessary for a 57 | * message to become available. 58 | * 59 | * @param timeout how long to wait before giving up, in units of {@code unit} 60 | * @param unit a {@link TimeUnit} determining how to interpret the {@code timeout} parameter 61 | * @return the first message of this buffer, or {@code null} if the specified waiting time elapses before a message 62 | * is available 63 | * @throws InterruptedException if interrupted while waiting to poll an element 64 | */ 65 | E poll(long timeout, TimeUnit unit) throws InterruptedException; 66 | 67 | /** 68 | * Retrieves and removes the first messages of this buffer, waiting if necessary until a message becomes available. 69 | * 70 | * @return the first message of this buffer 71 | * @throws InterruptedException if interrupted while waiting to take the first element 72 | */ 73 | E take() throws InterruptedException; 74 | 75 | /** 76 | * Retrieves, but does not remove, the first message of this buffer, or returns {@code null} if this buffer is 77 | * empty. 78 | * 79 | * @return the first message in this buffer or {@code null} if the buffer is empty 80 | */ 81 | E peek(); 82 | 83 | /** 84 | * Returns the number of elements in this buffer. 85 | * 86 | * @return the number of elements in this buffer 87 | */ 88 | int size(); 89 | 90 | /** 91 | * Verify whether this buffer is empty or not. 92 | * 93 | * @return {@code true} if the buffer is empty and {@code false} if it isn't 94 | */ 95 | boolean isEmpty(); 96 | 97 | /** 98 | * Returns the number of additional elements that this buffer can ideally (in the absence of memory or resource 99 | * constraints) accept without blocking. This is always equal to the initial capacity of this buffer less the 100 | * current {@code size} of this buffer. 101 | *

102 | * Note that you cannot always tell if an attempt to insert an element will succeed by inspecting {@code 103 | * remainingCapacity} because it may be the case that another thread is about to insert or remove an element. 104 | * 105 | * @return the remaining capacity of this buffer 106 | */ 107 | int remainingCapacity(); 108 | 109 | /** 110 | * Removes all of the messages from this buffer. 111 | */ 112 | void clear(); 113 | 114 | /** 115 | * Can be used to set some exception originating from another thread, that should pop up using the buffer. 116 | * 117 | * @param exception the exception thrown from a thread that fills the buffer. 118 | */ 119 | default void setException(RuntimeException exception) { 120 | logger.warn("setException was called, but is not implemented to do something with it", exception); 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/ConsumerPositionsUtil.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2023. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer.streamable; 18 | 19 | import org.apache.kafka.clients.consumer.Consumer; 20 | import org.apache.kafka.clients.consumer.OffsetAndTimestamp; 21 | import org.apache.kafka.common.TopicPartition; 22 | import org.axonframework.extensions.kafka.eventhandling.consumer.TopicSubscriber; 23 | 24 | import java.time.Instant; 25 | import java.util.HashMap; 26 | import java.util.List; 27 | import java.util.Map; 28 | import java.util.Optional; 29 | import javax.annotation.Nonnull; 30 | 31 | import static org.axonframework.extensions.kafka.eventhandling.consumer.ConsumerSeekUtil.topicPartitions; 32 | 33 | /** 34 | * Contains static util functions related to the Kafka consumer to find the correct positions. 35 | * 36 | * @author Gerard Klijs 37 | * @since 4.8.0 38 | */ 39 | class ConsumerPositionsUtil { 40 | 41 | private ConsumerPositionsUtil() { 42 | //prevent instantiation 43 | } 44 | 45 | static Map getPositionsBasedOnTime( 46 | @Nonnull Consumer consumer, 47 | @Nonnull TopicSubscriber subscriber, 48 | @Nonnull Instant rawDefaultAt 49 | ) { 50 | List all = topicPartitions(consumer, subscriber); 51 | Map positions = new HashMap<>(); 52 | OffsetSupplier offsetSupplier = new OffsetSupplier(consumer, rawDefaultAt, all); 53 | all.forEach(assignedPartition -> { 54 | Long offset = offsetSupplier.getOffset(assignedPartition); 55 | //if it's 0, we otherwise miss the first event 56 | if (offset > 1) { 57 | positions.put(assignedPartition, offset - 1); 58 | } 59 | }); 60 | return positions; 61 | } 62 | 63 | static Map getHeadPositions( 64 | @Nonnull Consumer consumer, 65 | @Nonnull TopicSubscriber subscriber 66 | ) { 67 | List all = topicPartitions(consumer, subscriber); 68 | Map positions = new HashMap<>(); 69 | Map endOffsets = consumer.endOffsets(all); 70 | endOffsets.forEach((assignedPartition, offset) -> { 71 | //if it's 0, we otherwise miss the first event 72 | if (offset > 1) { 73 | positions.put(assignedPartition, offset - 1); 74 | } 75 | }); 76 | return positions; 77 | } 78 | 79 | private static class OffsetSupplier { 80 | 81 | private final Map partitionOffsetMap; 82 | private final Map endOffsets; 83 | 84 | private OffsetSupplier(Consumer consumer, Instant rawDefaultAt, List all) { 85 | long defaultAt = rawDefaultAt.toEpochMilli(); 86 | Map timestampsToSearch = new HashMap<>(); 87 | all.forEach(tp -> timestampsToSearch.put(tp, defaultAt)); 88 | partitionOffsetMap = consumer.offsetsForTimes(timestampsToSearch); 89 | endOffsets = consumer.endOffsets(all); 90 | } 91 | 92 | private Optional getDefaultOffset(TopicPartition assignedPartition) { 93 | return Optional.ofNullable(partitionOffsetMap.get(assignedPartition)) 94 | .map(OffsetAndTimestamp::offset); 95 | } 96 | 97 | private long getEndOffset(TopicPartition assignedPartition) { 98 | return endOffsets.get(assignedPartition); 99 | } 100 | 101 | private Long getOffset(TopicPartition assignedPartition) { 102 | return getDefaultOffset(assignedPartition).orElseGet(() -> getEndOffset(assignedPartition)); 103 | } 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/KafkaMessageStream.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2019. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer.streamable; 18 | 19 | import org.axonframework.common.Registration; 20 | import org.axonframework.eventhandling.TrackedEventMessage; 21 | import org.axonframework.eventhandling.TrackingEventStream; 22 | import org.slf4j.Logger; 23 | import org.slf4j.LoggerFactory; 24 | 25 | import java.util.Optional; 26 | import java.util.concurrent.TimeUnit; 27 | 28 | import static org.axonframework.common.BuilderUtils.assertNonNull; 29 | 30 | /** 31 | * Create message stream from a specific Kafka topic. Messages are fetch in bulk and stored in an in-memory buffer. We 32 | * try to introduce some sort and stored them in a local buffer. Consumer position is tracked via {@link 33 | * KafkaTrackingToken}. Records are fetched from Kafka and stored in-memory buffer. 34 | *

35 | * This is not thread safe. 36 | * 37 | * @author Allard Buijze 38 | * @author Nakul Mishra 39 | * @since 4.0 40 | */ 41 | public class KafkaMessageStream implements TrackingEventStream { 42 | 43 | private static final Logger logger = LoggerFactory.getLogger(KafkaMessageStream.class); 44 | 45 | private final Buffer buffer; 46 | private final Registration closeHandler; 47 | private KafkaEventMessage peekedEvent; 48 | 49 | /** 50 | * Create a {@link TrackingEventStream} dedicated to {@link KafkaEventMessage}s. Uses the provided {@code buffer} to 51 | * retrieve event messages from. 52 | * 53 | * @param buffer the {@link KafkaEventMessage} {@link Buffer} containing the fetched messages 54 | * @param closeHandler the service {@link Registration} which fills the buffer. Will be canceled upon executing a 55 | * {@link #close()} 56 | */ 57 | @SuppressWarnings("WeakerAccess") 58 | public KafkaMessageStream(Buffer buffer, Registration closeHandler) { 59 | assertNonNull(buffer, "Buffer may not be null"); 60 | this.buffer = buffer; 61 | this.closeHandler = closeHandler; 62 | } 63 | 64 | @Override 65 | public Optional> peek() { 66 | return Optional.ofNullable( 67 | peekedEvent == null && !hasNextAvailable(0, TimeUnit.NANOSECONDS) ? null : peekedEvent.value() 68 | ); 69 | } 70 | 71 | @Override 72 | public boolean hasNextAvailable(int timeout, TimeUnit unit) { 73 | try { 74 | return peekedEvent != null || (peekedEvent = buffer.poll(timeout, unit)) != null; 75 | } catch (InterruptedException e) { 76 | logger.warn("Consumer thread was interrupted. Returning thread to event processor.", e); 77 | Thread.currentThread().interrupt(); 78 | return false; 79 | } 80 | } 81 | 82 | @Override 83 | public TrackedEventMessage nextAvailable() { 84 | try { 85 | return peekedEvent == null ? buffer.take().value() : peekedEvent.value(); 86 | } catch (InterruptedException e) { 87 | logger.warn("Consumer thread was interrupted. Returning thread to event processor.", e); 88 | Thread.currentThread().interrupt(); 89 | return null; 90 | } finally { 91 | peekedEvent = null; 92 | } 93 | } 94 | 95 | @Override 96 | public void close() { 97 | if (closeHandler != null) { 98 | closeHandler.close(); 99 | } 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/KafkaRecordMetaData.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2019. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer.streamable; 18 | 19 | /** 20 | * An interface for messages originating from Kafka capable of providing information about their source. 21 | * 22 | * @param the type of body used for this record 23 | * @author Nakul Mishra 24 | * @author Steven van Beelen 25 | * @since 4.0 26 | */ 27 | public interface KafkaRecordMetaData { 28 | 29 | /** 30 | * The partition from which this record is received. 31 | * 32 | * @return an {@code int} defining the partition from which this record is received 33 | */ 34 | int partition(); 35 | 36 | /** 37 | * The position of the record in the corresponding Kafka {@code partition}. 38 | * 39 | * @return a {@code long} defining the position of the record in the corresponding Kafka {@code partition} 40 | */ 41 | long offset(); 42 | 43 | /** 44 | * The timestamp of the record. 45 | * 46 | * @return a {@code long} defining the timestamp of this record 47 | */ 48 | long timestamp(); 49 | 50 | /** 51 | * The value of this record. 52 | * 53 | * @return the value of this record of type {@code V} 54 | */ 55 | V value(); 56 | } 57 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/TopicPartitionDeserializer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2021. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer.streamable; 18 | 19 | import com.fasterxml.jackson.databind.DeserializationContext; 20 | import com.fasterxml.jackson.databind.KeyDeserializer; 21 | import org.apache.kafka.common.TopicPartition; 22 | import org.axonframework.serialization.SerializationException; 23 | 24 | /** 25 | * Custom {@link KeyDeserializer} used to deserialize the {@link TopicPartition}. 26 | * 27 | * @author leechedan 28 | * @since 4.0 29 | */ 30 | public class TopicPartitionDeserializer extends KeyDeserializer { 31 | 32 | private static final char HYPHEN = '-'; 33 | 34 | @Override 35 | public TopicPartition deserializeKey(String key, DeserializationContext context) { 36 | if (null == key || key.lastIndexOf(HYPHEN) < 1) { 37 | return null; 38 | } 39 | 40 | int hyphenIndex = key.lastIndexOf(HYPHEN); 41 | String positionString = key.substring(hyphenIndex + 1); 42 | 43 | int position; 44 | try { 45 | position = Integer.parseInt(positionString); 46 | } catch (NumberFormatException e) { 47 | throw new SerializationException(String.format( 48 | "Cannot parse the position of TopicPartition from json:[%s].", key 49 | )); 50 | } 51 | 52 | if (position < 0) { 53 | throw new SerializationException("The position of the TopicPartition should be greater than zero."); 54 | } 55 | 56 | return new TopicPartition(key.substring(0, hyphenIndex), position); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/TrackingRecordConverter.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2019. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer.streamable; 18 | 19 | import org.apache.kafka.clients.consumer.ConsumerRecord; 20 | import org.apache.kafka.clients.consumer.ConsumerRecords; 21 | import org.axonframework.extensions.kafka.eventhandling.KafkaMessageConverter; 22 | import org.axonframework.extensions.kafka.eventhandling.consumer.RecordConverter; 23 | import org.slf4j.Logger; 24 | import org.slf4j.LoggerFactory; 25 | 26 | import java.util.ArrayList; 27 | import java.util.List; 28 | 29 | import static org.axonframework.common.Assert.nonNull; 30 | 31 | /** 32 | * {@link RecordConverter} instances which keeps track of the converted {@link ConsumerRecords} through a {@link 33 | * KafkaTrackingToken}. Consequently it converts the ConsumerRecords in to {@link KafkaEventMessage} instances. 34 | * 35 | * @param the key of the Kafka {@link ConsumerRecords} to be converted 36 | * @param the value type of Kafka {@link ConsumerRecords} to be converted 37 | * @author Steven van Beelen 38 | * @since 4.0 39 | */ 40 | public class TrackingRecordConverter implements RecordConverter { 41 | 42 | private static final Logger logger = LoggerFactory.getLogger(TrackingRecordConverter.class); 43 | 44 | private final KafkaMessageConverter messageConverter; 45 | private KafkaTrackingToken currentToken; 46 | 47 | /** 48 | * Instantiates a {@link TrackingRecordConverter}, using the {@link KafkaMessageConverter} to convert {@link 49 | * ConsumerRecord} instances in to an {@link org.axonframework.eventhandling.EventMessage} instances. As it 50 | * traverses the {@link ConsumerRecords} it will advance the provided {@code token}. An {@link 51 | * IllegalArgumentException} will be thrown if the provided {@code token} is {@code null}. 52 | * 53 | * @param messageConverter the {@link KafkaMessageConverter} used to convert a {@link ConsumerRecord} in to an 54 | * {@link org.axonframework.eventhandling.EventMessage} 55 | * @param token the {@link KafkaTrackingToken} to advance for every fetched {@link ConsumerRecord} 56 | */ 57 | public TrackingRecordConverter(KafkaMessageConverter messageConverter, KafkaTrackingToken token) { 58 | this.messageConverter = messageConverter; 59 | this.currentToken = nonNull(token, () -> "Token may not be null"); 60 | } 61 | 62 | /** 63 | * {@inheritDoc} 64 | *

65 | * {@code E} is defined as a {@link KafkaEventMessage} for this implementation. Every {@link ConsumerRecord} will 66 | * advance the defined {@code token}'s position further with the ConsumerRecord's {@link ConsumerRecord#partition()} 67 | * and {@link ConsumerRecord#offset()}. 68 | */ 69 | @Override 70 | public List convert(ConsumerRecords records) { 71 | List eventMessages = new ArrayList<>(records.count()); 72 | for (ConsumerRecord consumerRecord : records) { 73 | messageConverter.readKafkaMessage(consumerRecord).ifPresent(eventMessage -> { 74 | KafkaTrackingToken nextToken = 75 | currentToken.advancedTo(consumerRecord.topic(), 76 | consumerRecord.partition(), 77 | consumerRecord.offset()); 78 | logger.debug("Advancing token from [{}] to [{}]", currentToken, nextToken); 79 | 80 | currentToken = nextToken; 81 | eventMessages.add(KafkaEventMessage.from(eventMessage, consumerRecord, currentToken)); 82 | }); 83 | } 84 | return eventMessages; 85 | } 86 | 87 | /** 88 | * Return the current state of the {@link KafkaTrackingToken} this converter updates 89 | * 90 | * @return the current state of the {@link KafkaTrackingToken} this converter updates 91 | */ 92 | public KafkaTrackingToken currentToken() { 93 | return currentToken; 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/TrackingTokenConsumerRebalanceListener.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2022. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer.streamable; 18 | 19 | import org.apache.kafka.clients.consumer.Consumer; 20 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; 21 | import org.apache.kafka.common.TopicPartition; 22 | import org.axonframework.extensions.kafka.eventhandling.consumer.ConsumerSeekUtil; 23 | import org.slf4j.Logger; 24 | import org.slf4j.LoggerFactory; 25 | 26 | import java.lang.invoke.MethodHandles; 27 | import java.util.Collection; 28 | import java.util.List; 29 | import java.util.Map; 30 | import java.util.function.Supplier; 31 | 32 | /** 33 | * A {@link ConsumerRebalanceListener} which upon {@link ConsumerRebalanceListener#onPartitionsAssigned(Collection)} 34 | * will perform a {@link Consumer#seek(TopicPartition, long)} using the partition offsets in the given {@link 35 | * KafkaTrackingToken}. 36 | *

37 | * This implementation ensures that Axon is in charge of event handling progress by always starting from the beginning 38 | * of the stream for unknown partitions. This approach follows how a {@link org.axonframework.eventhandling.TrackingEventProcessor} 39 | * deals with unknown progress in any {@link org.axonframework.eventhandling.TrackingToken}. 40 | * 41 | * @param the key of the records the {@link Consumer} polls 42 | * @param the value type of the records the {@link Consumer} polls 43 | * @author Steven van Beelen 44 | * @since 4.0 45 | * @deprecated functionality moved to {@link ConsumerSeekUtil#seekToCurrentPositions(Consumer, 46 | * Supplier, List)} when group id was removed from the consumer. 47 | */ 48 | @Deprecated 49 | @SuppressWarnings("squid:S1133") //removing would be a breaking change and can only be done in a major release 50 | public class TrackingTokenConsumerRebalanceListener implements ConsumerRebalanceListener { 51 | 52 | private static final Logger logger = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); 53 | 54 | private final Consumer consumer; 55 | private final Supplier tokenSupplier; 56 | 57 | /** 58 | * Create a {@link ConsumerRebalanceListener} which uses a {@link KafkaTrackingToken} through the given {@code 59 | * tokenSupplier} to consciously {@link Consumer#seek(TopicPartition, long)} the offsets of the partitions assigned 60 | * to the given {@code consumer}. 61 | * 62 | * @param consumer the {@link Consumer} used to call {@link Consumer#seek(TopicPartition, long)} on 63 | * @param tokenSupplier the {@link Supplier} of a {@link KafkaTrackingToken}. This should provide the most recent 64 | * progress of the given {@code consumer} 65 | */ 66 | public TrackingTokenConsumerRebalanceListener(Consumer consumer, Supplier tokenSupplier) { 67 | this.consumer = consumer; 68 | this.tokenSupplier = tokenSupplier; 69 | } 70 | 71 | @Override 72 | public void onPartitionsRevoked(Collection partitions) { 73 | // Not implemented 74 | } 75 | 76 | /** 77 | * {@inheritDoc} 78 | *

79 | * This implementation will use {@link Consumer#seek(TopicPartition, long)} for all given {@code assignedPartitions} 80 | * using the offsets known in the {@link KafkaTrackingToken}, retrieved through the {@code tokenSupplier}. 81 | *

82 | * If no offset is known for a given {@link TopicPartition} then we enforce the offset to {@code 0} to ensure all 83 | * known records for the new partition are processed. This could occur if polling is started for the first time or 84 | * if the number of partitions for the {@link Consumer}s topic is administratively adjusted. 85 | */ 86 | @Override 87 | public void onPartitionsAssigned(Collection assignedPartitions) { 88 | KafkaTrackingToken currentToken = tokenSupplier.get(); 89 | assignedPartitions.forEach(assignedPartition -> { 90 | Map tokenPartitionPositions = currentToken.getPositions(); 91 | 92 | long offset = 0L; 93 | if (tokenPartitionPositions.containsKey(assignedPartition)) { 94 | offset = tokenPartitionPositions.get(assignedPartition) + 1; 95 | } 96 | 97 | logger.info("Seeking topic-partition [{}] with offset [{}]", assignedPartition, offset); 98 | consumer.seek(assignedPartition, offset); 99 | }); 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/producer/ConfirmationMode.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2021. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.producer; 18 | 19 | /** 20 | * Modes for publishing Axon Event Messages to Kafka. 21 | *

    22 | *
  • TRANSACTIONAL: use Kafka transactions while sending messages
  • 23 | *
  • WAIT_FOR_ACK: send messages and wait for acknowledgment
  • 24 | *
  • NONE: Fire and forget
  • 25 | *
26 | * 27 | * @author Nakul Mishra 28 | * @since 4.0 29 | */ 30 | public enum ConfirmationMode { 31 | 32 | /** 33 | * Indicates a confirmation mode which uses Kafka transactions whilst sending messages. 34 | */ 35 | TRANSACTIONAL, 36 | 37 | /** 38 | * Indicates a confirmation mode which sends messages and waits for consumption acknowledgements. 39 | */ 40 | WAIT_FOR_ACK, 41 | 42 | /** 43 | * Indicates a confirmation mode resembling fire and forget. 44 | */ 45 | NONE; 46 | 47 | /** 48 | * Verify whether {@code this} confirmation mode is of type {@link #TRANSACTIONAL}. 49 | * 50 | * @return {@code true} if {@code this} confirmation mode matches {@link #TRANSACTIONAL}, {@code false} if it 51 | * doesn't 52 | */ 53 | public boolean isTransactional() { 54 | return this == TRANSACTIONAL; 55 | } 56 | 57 | /** 58 | * Verify whether {@code this} confirmation mode is of type {@link #WAIT_FOR_ACK}. 59 | * 60 | * @return {@code true} if {@code this} confirmation mode matches {@link #WAIT_FOR_ACK}, {@code false} if it doesn't 61 | */ 62 | public boolean isWaitForAck() { 63 | return this == WAIT_FOR_ACK; 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/producer/ProducerFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2021. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.producer; 18 | 19 | import org.apache.kafka.clients.producer.Producer; 20 | 21 | /** 22 | * A functional interface towards building {@link Producer} instances. 23 | * 24 | * @param the key type of a build {@link Producer} instance 25 | * @param the value type of a build {@link Producer} instance 26 | * @author Nakul Mishra 27 | * @author Steven van Beelen 28 | * @since 4.0 29 | */ 30 | public interface ProducerFactory { 31 | 32 | /** 33 | * Create a {@link Producer}. 34 | * 35 | * @return a {@link Producer} 36 | */ 37 | Producer createProducer(); 38 | 39 | /** 40 | * The {@link ConfirmationMode} all created {@link Producer} instances should comply to. Defaults to {@link 41 | * ConfirmationMode#NONE}. 42 | * 43 | * @return the configured confirmation mode 44 | */ 45 | default ConfirmationMode confirmationMode() { 46 | return ConfirmationMode.NONE; 47 | } 48 | 49 | /** 50 | * Closes all {@link Producer} instances created by this factory. 51 | */ 52 | void shutDown(); 53 | } 54 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/producer/TopicResolver.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2022. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.producer; 18 | 19 | import org.axonframework.eventhandling.EventMessage; 20 | 21 | import java.util.Optional; 22 | import java.util.function.Function; 23 | 24 | /** 25 | * Interface to determine if an {@code EventMessage} should be published to Kafka, and if so to which topic. If the 26 | * result from the call is {@code Optional.empty()} is will not be published, else the result will be used for the 27 | * topic. 28 | * 29 | * @author Gerard Klijs 30 | * @since 4.6.0 31 | */ 32 | @FunctionalInterface 33 | public interface TopicResolver extends Function, Optional> { 34 | 35 | /** 36 | * resolve an {@code EventMessage} to an optional topic to publish the event to 37 | * 38 | * @param event an {@code EventMessage} 39 | * @return the optional topic, when empty the event message will not be published 40 | */ 41 | default Optional resolve(EventMessage event) { 42 | return this.apply(event); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/tokenstore/TokenStoreInitializationException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2022. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.tokenstore; 18 | 19 | import org.axonframework.common.AxonException; 20 | 21 | /** 22 | * Exception thrown when failing to initialize the token store. 23 | * 24 | * @author Gerard Klijs 25 | * @since 4.6.0 26 | */ 27 | public class TokenStoreInitializationException extends AxonException { 28 | 29 | /** 30 | * Initializes the exception using the given {@code message}. 31 | * 32 | * @param message The message describing the exception 33 | */ 34 | public TokenStoreInitializationException(String message) { 35 | super(message); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/tokenstore/TokenUpdateDeserializer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2022. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.tokenstore; 18 | 19 | import org.apache.kafka.common.header.Headers; 20 | import org.apache.kafka.common.serialization.Deserializer; 21 | 22 | /** 23 | * Kafka deserializer used for the {@link KafkaTokenStore}. 24 | * 25 | * @author Gerard Klijs 26 | * @since 4.6.0 27 | */ 28 | public class TokenUpdateDeserializer implements Deserializer { 29 | 30 | /** 31 | * This method should not be used, instead {@link #deserialize(String, Headers, byte[]) deserialize}, with headers 32 | * should be used. 33 | * 34 | * @param topic the topic the bytes are read from, part of the interface, currently only used for logging. 35 | * @param bytes the bytes received from the Kafka broker. 36 | * @return a {@link UnsupportedOperationException} exception 37 | */ 38 | @Override 39 | public TokenUpdate deserialize(String topic, byte[] bytes) { 40 | throw new UnsupportedOperationException("deserialize should be called also using the headers"); 41 | } 42 | 43 | /** 44 | * Deserializes the bytes to a {@link TokenUpdate} object 45 | * 46 | * @param topic the topic the bytes are read from, part of the interface, currently only used for logging. 47 | * @param headers the headers received from the Kafka broker. 48 | * @param data the bytes received from the Kafka broker. 49 | * @return a {@link TokenUpdate} object 50 | */ 51 | @Override 52 | public TokenUpdate deserialize(String topic, Headers headers, byte[] data) { 53 | return new TokenUpdate(headers, data); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/tokenstore/TokenUpdateSerializer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2022. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.tokenstore; 18 | 19 | import org.apache.kafka.common.header.Headers; 20 | import org.apache.kafka.common.serialization.Serializer; 21 | 22 | /** 23 | * Kafka serializer used for the {@link TokenUpdate}. 24 | * 25 | * @author Gerard Klijs 26 | * @since 4.6.0 27 | */ 28 | public class TokenUpdateSerializer implements Serializer { 29 | 30 | /** 31 | * This method should not be used, instead {@link #serialize(String, Headers, TokenUpdate)}) serialize}, with 32 | * headers should be used. 33 | * 34 | * @param topic topic the bytes are written to, part of the interface, currently not used 35 | * @param tokenUpdate the token update object to send to Kafka 36 | * @return a {@link UnsupportedOperationException} exception 37 | */ 38 | @Override 39 | @SuppressWarnings("squid:S1168") //needs to return null to work as tombstone 40 | public byte[] serialize(String topic, TokenUpdate tokenUpdate) { 41 | throw new UnsupportedOperationException("serialize should be called also using the headers"); 42 | } 43 | 44 | /** 45 | * Serializes the {@code tokenUpdate} to bytes. 46 | * 47 | * @param topic topic the bytes are written to, part of the interface, currently not used 48 | * @param headers kafka headers 49 | * @param tokenUpdate the token update object to send to Kafka 50 | * @return the bytes to add to the Kafka record 51 | */ 52 | @Override 53 | public byte[] serialize(String topic, Headers headers, TokenUpdate tokenUpdate) { 54 | tokenUpdate.setHeaders(headers); 55 | return tokenUpdate.getToken(); 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /kafka/src/main/resources/META-INF/spring-devtools.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2010-2021. Axon Framework 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | restart.include.axon-kafka=axon-kafka-${project.version}.jar 17 | restart.include.axon-kafka-spring-boot-autoconfigure=axon-kafka-spring-boot-autoconfigure-${project.version}.jar 18 | -------------------------------------------------------------------------------- /kafka/src/test/java/org/axonframework/extensions/kafka/configuration/KafkaMessageSourceConfigurerTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2021. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.configuration; 18 | 19 | import org.axonframework.config.Configuration; 20 | import org.axonframework.config.DefaultConfigurer; 21 | import org.axonframework.extensions.kafka.eventhandling.consumer.subscribable.SubscribableKafkaMessageSource; 22 | import org.junit.jupiter.api.*; 23 | import org.junit.jupiter.api.extension.*; 24 | import org.mockito.*; 25 | import org.mockito.junit.jupiter.*; 26 | 27 | import static org.mockito.Mockito.*; 28 | 29 | /** 30 | * Test classes verifying registered message sources are started and closed through the {@link 31 | * org.axonframework.config.ModuleConfiguration} API. 32 | * 33 | * @author Steven van Beelen 34 | */ 35 | @ExtendWith(MockitoExtension.class) 36 | class KafkaMessageSourceConfigurerTest { 37 | 38 | private Configuration configuration; 39 | 40 | private final KafkaMessageSourceConfigurer testSubject = new KafkaMessageSourceConfigurer(); 41 | 42 | @BeforeEach 43 | void setUp() { 44 | configuration = DefaultConfigurer.defaultConfiguration() 45 | .buildConfiguration(); 46 | } 47 | 48 | @Test 49 | void testStartInitiatesRegisteredSubscribableSources( 50 | @Mock SubscribableKafkaMessageSource sourceOne, 51 | @Mock SubscribableKafkaMessageSource sourceTwo 52 | ) { 53 | testSubject.configureSubscribableSource(conf -> sourceOne); 54 | testSubject.configureSubscribableSource(conf -> sourceTwo); 55 | 56 | testSubject.initialize(configuration); 57 | configuration.start(); 58 | 59 | verify(sourceOne).start(); 60 | verify(sourceTwo).start(); 61 | } 62 | 63 | @Test 64 | void testShutdownClosesRegisteredSubscribableSources( 65 | @Mock SubscribableKafkaMessageSource sourceOne, 66 | @Mock SubscribableKafkaMessageSource sourceTwo 67 | ) { 68 | testSubject.configureSubscribableSource(conf -> sourceOne); 69 | testSubject.configureSubscribableSource(conf -> sourceTwo); 70 | 71 | testSubject.initialize(configuration); 72 | configuration.shutdown(); 73 | 74 | verify(sourceOne).close(); 75 | verify(sourceTwo).close(); 76 | } 77 | } -------------------------------------------------------------------------------- /kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/benchmark/SimpleRandom.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2023. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.benchmark; 18 | 19 | import java.util.concurrent.atomic.AtomicLong; 20 | 21 | /** 22 | * @author Pavel Tcholakov. 23 | * @see JCTools 24 | */ 25 | final class SimpleRandom { 26 | 27 | private final static long multiplier = 0x5DEECE66DL; 28 | private final static long addend = 0xBL; 29 | private final static long mask = (1L << 48) - 1; 30 | private static final AtomicLong seq = new AtomicLong(-715159705); 31 | private long seed; 32 | 33 | SimpleRandom() { 34 | seed = System.nanoTime() + seq.getAndAdd(129); 35 | } 36 | 37 | public int next() { 38 | long nextSeed = (seed * multiplier + addend) & mask; 39 | seed = nextSeed; 40 | return ((int) (nextSeed >>> 17)) & 0x7FFFFFFF; 41 | } 42 | } -------------------------------------------------------------------------------- /kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/consumer/ConsumerRecordConverter.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2019. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer; 18 | 19 | import org.apache.kafka.clients.consumer.ConsumerRecord; 20 | import org.apache.kafka.clients.producer.ProducerRecord; 21 | import org.axonframework.eventhandling.EventMessage; 22 | import org.axonframework.extensions.kafka.eventhandling.KafkaMessageConverter; 23 | 24 | import java.util.Optional; 25 | 26 | import static org.axonframework.eventhandling.GenericEventMessage.asEventMessage; 27 | 28 | /** 29 | * A {@link KafkaMessageConverter} implementation solely intended to test message consumption. 30 | * 31 | * @author Steven van Beelen 32 | */ 33 | class ConsumerRecordConverter implements KafkaMessageConverter { 34 | 35 | @Override 36 | public ProducerRecord createKafkaMessage(EventMessage eventMessage, String topic) { 37 | throw new UnsupportedOperationException(); 38 | } 39 | 40 | @Override 41 | public Optional> readKafkaMessage(ConsumerRecord consumerRecord) { 42 | return Optional.of(asEventMessage(consumerRecord.value())); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/consumer/DefaultConsumerFactoryIntegrationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2022. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer; 18 | 19 | import org.apache.kafka.clients.consumer.Consumer; 20 | import org.apache.kafka.clients.producer.Producer; 21 | import org.apache.kafka.clients.producer.ProducerRecord; 22 | import org.axonframework.common.AxonConfigurationException; 23 | import org.axonframework.extensions.kafka.eventhandling.producer.ProducerFactory; 24 | import org.axonframework.extensions.kafka.eventhandling.util.KafkaAdminUtils; 25 | import org.axonframework.extensions.kafka.eventhandling.util.KafkaContainerTest; 26 | import org.junit.jupiter.api.*; 27 | 28 | import java.util.Collections; 29 | 30 | import static org.axonframework.extensions.kafka.eventhandling.util.ConsumerConfigUtil.DEFAULT_GROUP_ID; 31 | import static org.axonframework.extensions.kafka.eventhandling.util.ConsumerConfigUtil.minimal; 32 | import static org.axonframework.extensions.kafka.eventhandling.util.KafkaTestUtils.getRecords; 33 | import static org.axonframework.extensions.kafka.eventhandling.util.ProducerConfigUtil.producerFactory; 34 | import static org.junit.jupiter.api.Assertions.*; 35 | import static org.mockito.Mockito.*; 36 | 37 | /** 38 | * Tests for the {@link DefaultConsumerFactory}, asserting construction and utilization of the class. 39 | * 40 | * @author Nakul Mishra 41 | * @author Steven van Beelen 42 | */ 43 | class DefaultConsumerFactoryIntegrationTest extends KafkaContainerTest { 44 | 45 | private static final String TEST_TOPIC = "testCreatedConsumer_ValidConfig_CanCommunicateToKafka"; 46 | 47 | private ProducerFactory producerFactory; 48 | private Consumer testConsumer; 49 | 50 | @BeforeAll 51 | static void before() { 52 | KafkaAdminUtils.createTopics(getBootstrapServers(), TEST_TOPIC); 53 | } 54 | 55 | @AfterAll 56 | public static void after() { 57 | KafkaAdminUtils.deleteTopics(getBootstrapServers(), TEST_TOPIC); 58 | } 59 | 60 | @BeforeEach 61 | void setUp() { 62 | producerFactory = producerFactory(getBootstrapServers()); 63 | testConsumer = mock(Consumer.class); 64 | } 65 | 66 | @AfterEach 67 | void tearDown() { 68 | producerFactory.shutDown(); 69 | testConsumer.close(); 70 | } 71 | 72 | @Test 73 | void testCreateConsumerInvalidConfig() { 74 | assertThrows(AxonConfigurationException.class, () -> new DefaultConsumerFactory<>(null)); 75 | } 76 | 77 | @Test 78 | void testCreatedConsumerValidConfigCanCommunicateToKafka() { 79 | String testTopic = "testCreatedConsumer_ValidConfig_CanCommunicateToKafka"; 80 | 81 | Producer testProducer = producerFactory.createProducer(); 82 | testProducer.send(new ProducerRecord<>(testTopic, 0, null, null, "foo")); 83 | testProducer.flush(); 84 | 85 | ConsumerFactory testSubject = new DefaultConsumerFactory<>(minimal(KAFKA_CONTAINER 86 | .getBootstrapServers())); 87 | testConsumer = testSubject.createConsumer(DEFAULT_GROUP_ID); 88 | testConsumer.subscribe(Collections.singleton(testTopic)); 89 | 90 | assertEquals(1, getRecords(testConsumer).count()); 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/KafkaTrackingTokenSerializationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2021. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer.streamable; 18 | 19 | import org.apache.kafka.common.TopicPartition; 20 | import org.axonframework.eventhandling.ReplayToken; 21 | import org.axonframework.eventhandling.TrackingToken; 22 | import org.axonframework.extensions.kafka.utils.TestSerializer; 23 | import org.junit.*; 24 | import org.junit.runner.*; 25 | import org.junit.runners.*; 26 | 27 | import java.util.Collection; 28 | 29 | import static java.util.Collections.singletonMap; 30 | import static org.axonframework.extensions.kafka.eventhandling.consumer.streamable.KafkaTrackingToken.newInstance; 31 | import static org.junit.jupiter.api.Assertions.*; 32 | 33 | /** 34 | * Test class validating the {@link KafkaTrackingToken} can be serialized and deserialized with a {@link 35 | * org.axonframework.serialization.Serializer}. 36 | * 37 | * @author leechedan 38 | */ 39 | @RunWith(Parameterized.class) 40 | public class KafkaTrackingTokenSerializationTest { 41 | 42 | private static final String TEST_TOPIC = "topic"; 43 | private static final int TEST_PARTITION = 0; 44 | private static final TopicPartition TEST_TOPIC_PARTITION = new TopicPartition(TEST_TOPIC, TEST_PARTITION); 45 | 46 | private final TestSerializer serializer; 47 | 48 | public KafkaTrackingTokenSerializationTest(TestSerializer serializer) { 49 | this.serializer = serializer; 50 | } 51 | 52 | @Parameterized.Parameters(name = "{index} {0}") 53 | public static Collection serializers() { 54 | return TestSerializer.all(); 55 | } 56 | 57 | @Test 58 | public void testReplayTokenShouldBeSerializable() { 59 | ReplayToken tokenReset = new ReplayToken(nonEmptyToken(TEST_TOPIC_PARTITION, 0L)); 60 | KafkaTrackingToken tokenStart = nonEmptyToken(TEST_TOPIC_PARTITION, 1L); 61 | ReplayToken replayToken = new ReplayToken(tokenReset, tokenStart); 62 | String serializeReplayToken = serializer.serialize(replayToken); 63 | TrackingToken deserializeReplayToken = serializer.deserialize(serializeReplayToken, ReplayToken.class); 64 | assertEquals(deserializeReplayToken, replayToken); 65 | } 66 | 67 | @Test 68 | public void testTokenShouldBeSerializable() { 69 | KafkaTrackingToken token = nonEmptyToken(TEST_TOPIC_PARTITION, 0L); 70 | String serializeCopy = serializer.serialize(token); 71 | KafkaTrackingToken deserializeCopy = serializer.deserialize(serializeCopy, KafkaTrackingToken.class); 72 | assertEquals(deserializeCopy, token); 73 | } 74 | 75 | @SuppressWarnings("SameParameterValue") 76 | private static KafkaTrackingToken nonEmptyToken(TopicPartition topic, Long pos) { 77 | return newInstance(singletonMap(topic, pos)); 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/TopicPartitionDeserializerTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2021. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.consumer.streamable; 18 | 19 | import org.apache.kafka.common.TopicPartition; 20 | import org.junit.*; 21 | 22 | import java.util.Arrays; 23 | import java.util.List; 24 | 25 | import static org.junit.jupiter.api.Assertions.*; 26 | 27 | /** 28 | * Tests for the {@link TopicPartitionDeserializer}. 29 | * 30 | * @author leechedan 31 | */ 32 | public class TopicPartitionDeserializerTest { 33 | 34 | private static final List TOPIC_PARTITIONS = Arrays.asList( 35 | new TopicPartition("local", 0), 36 | new TopicPartition("local-", 1), 37 | new TopicPartition("local-event", 100) 38 | ); 39 | 40 | private final TopicPartitionDeserializer testSubject = new TopicPartitionDeserializer(); 41 | 42 | @Test 43 | public void testDeserializeShouldSuccess() { 44 | TOPIC_PARTITIONS.forEach( 45 | item -> assertEquals(item, testSubject.deserializeKey(item.toString(), null), item + " fail") 46 | ); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/producer/KafkaPublisherBuilderTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2023. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.producer; 18 | 19 | import org.junit.jupiter.api.*; 20 | import org.junit.jupiter.api.extension.*; 21 | import org.mockito.*; 22 | import org.mockito.junit.jupiter.*; 23 | 24 | import static org.junit.jupiter.api.Assertions.*; 25 | 26 | @ExtendWith(MockitoExtension.class) 27 | class KafkaPublisherBuilderTest { 28 | 29 | @Mock 30 | private ProducerFactory producerFactory; 31 | 32 | @Test 33 | void testKafkaPublisherInitialisationShouldNotThrowException() { 34 | assertDoesNotThrow(() -> KafkaPublisher.builder() 35 | .producerFactory(producerFactory) 36 | .build()); 37 | } 38 | } -------------------------------------------------------------------------------- /kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/tokenstore/TokenUpdateDeserializerTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2022. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.tokenstore; 18 | 19 | import org.apache.kafka.common.header.Headers; 20 | import org.apache.kafka.common.header.internals.RecordHeaders; 21 | import org.axonframework.extensions.kafka.eventhandling.HeaderUtils; 22 | import org.junit.jupiter.api.*; 23 | 24 | import java.time.Instant; 25 | import java.util.UUID; 26 | 27 | import static org.junit.jupiter.api.Assertions.*; 28 | 29 | /** 30 | * Test class validating the {@link TokenUpdateSerializer}. 31 | * 32 | * @author Gerard Klijs 33 | */ 34 | class TokenUpdateDeserializerTest { 35 | 36 | @Test 37 | void testDeserializeUnsupportedOperation() { 38 | TokenUpdateDeserializer deserializer = new TokenUpdateDeserializer(); 39 | byte[] bytes = new byte[0]; 40 | assertThrows(UnsupportedOperationException.class, () -> deserializer.deserialize("topic", bytes)); 41 | } 42 | 43 | @Test 44 | void testDeserializerMostlyEmpty() { 45 | TokenUpdateDeserializer deserializer = new TokenUpdateDeserializer(); 46 | byte[] bytes = new byte[0]; 47 | Headers headers = new RecordHeaders(); 48 | HeaderUtils.addHeader(headers, "id", UUID.randomUUID()); 49 | Instant now = Instant.now(); 50 | HeaderUtils.addHeader(headers, "timestamp", now.toEpochMilli()); 51 | TokenUpdate update = deserializer.deserialize("topic", headers, bytes); 52 | assertNotNull(update); 53 | assertEquals(now.toEpochMilli(), update.getTimestamp().toEpochMilli()); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/tokenstore/TokenUpdateSerializerTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2022. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.tokenstore; 18 | 19 | import org.apache.kafka.common.header.Headers; 20 | import org.apache.kafka.common.header.internals.RecordHeaders; 21 | import org.axonframework.eventhandling.GlobalSequenceTrackingToken; 22 | import org.axonframework.eventhandling.TrackingToken; 23 | import org.axonframework.eventhandling.tokenstore.AbstractTokenEntry; 24 | import org.axonframework.eventhandling.tokenstore.GenericTokenEntry; 25 | import org.axonframework.extensions.kafka.eventhandling.HeaderUtils; 26 | import org.axonframework.serialization.json.JacksonSerializer; 27 | import org.junit.jupiter.api.*; 28 | import org.junit.jupiter.api.extension.*; 29 | import org.mockito.junit.jupiter.*; 30 | 31 | import static org.junit.jupiter.api.Assertions.*; 32 | 33 | /** 34 | * Test class validating the {@link TokenUpdateDeserializer}. 35 | * 36 | * @author Gerard Klijs 37 | */ 38 | @ExtendWith(MockitoExtension.class) 39 | class TokenUpdateSerializerTest { 40 | 41 | @Test 42 | void testSerializeUnsupportedOperation() { 43 | TokenUpdateSerializer serializer = new TokenUpdateSerializer(); 44 | assertThrows(UnsupportedOperationException.class, () -> serializer.serialize("topic", null)); 45 | } 46 | 47 | @Test 48 | void testSerializeHappyFlow() { 49 | TokenUpdateSerializer serializer = new TokenUpdateSerializer(); 50 | TrackingToken someToken = new GlobalSequenceTrackingToken(42); 51 | AbstractTokenEntry tokenEntry = 52 | new GenericTokenEntry<>(someToken, 53 | JacksonSerializer.defaultSerializer(), 54 | byte[].class, 55 | "processorName", 56 | 0); 57 | TokenUpdate update = new TokenUpdate(tokenEntry, 0); 58 | Headers headers = new RecordHeaders(); 59 | byte[] bytes = serializer.serialize("topic", headers, update); 60 | assertNotNull(bytes); 61 | assertNotEquals(0, bytes.length); 62 | assertEquals(GlobalSequenceTrackingToken.class.getCanonicalName(), 63 | HeaderUtils.valueAsString(headers, "tokenType")); 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/util/AssertUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2019. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.util; 18 | 19 | import java.time.Duration; 20 | 21 | /** 22 | * Utility class for special assertions. 23 | */ 24 | public class AssertUtils { 25 | 26 | private AssertUtils() { 27 | // Utility class 28 | } 29 | 30 | /** 31 | * Assert that the given {@code assertion} succeeds with the given {@code time} and {@code unit}. 32 | * 33 | * @param duration The time in which the assertion must pass 34 | * @param assertion the assertion to succeed within the deadline 35 | */ 36 | public static void assertWithin(Duration duration, Runnable assertion) { 37 | long now = System.currentTimeMillis(); 38 | long deadline = now + duration.toMillis(); 39 | do { 40 | try { 41 | assertion.run(); 42 | break; 43 | } catch (AssertionError e) { 44 | Thread.yield(); 45 | if (now >= deadline) { 46 | throw e; 47 | } 48 | } 49 | now = System.currentTimeMillis(); 50 | } while (true); 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/util/HeaderAssertUtil.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2018. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.util; 18 | 19 | import org.apache.kafka.common.header.Headers; 20 | import org.axonframework.eventhandling.DomainEventMessage; 21 | import org.axonframework.eventhandling.EventMessage; 22 | import org.axonframework.serialization.SerializedObject; 23 | 24 | import static org.axonframework.extensions.kafka.eventhandling.HeaderUtils.*; 25 | import static org.axonframework.messaging.Headers.*; 26 | import static org.junit.jupiter.api.Assertions.*; 27 | 28 | /** 29 | * Utility for asserting Kafka headers sent via Axon. 30 | * 31 | * @author Nakul Mishra 32 | * @author Steven van Beelen 33 | */ 34 | public abstract class HeaderAssertUtil { 35 | 36 | private HeaderAssertUtil() { 37 | // Utility class 38 | } 39 | 40 | public static void assertEventHeaders(String metaDataKey, 41 | EventMessage eventMessage, 42 | SerializedObject so, 43 | Headers headers) { 44 | assertTrue(headers.toArray().length >= 5); 45 | assertEquals(eventMessage.getIdentifier(), valueAsString(headers, MESSAGE_ID)); 46 | assertEquals(eventMessage.getTimestamp().toEpochMilli(), valueAsLong(headers, MESSAGE_TIMESTAMP)); 47 | assertEquals(so.getType().getName(), valueAsString(headers, MESSAGE_TYPE)); 48 | assertEquals(so.getType().getRevision(), valueAsString(headers, MESSAGE_REVISION)); 49 | assertEquals(eventMessage.getMetaData().get(metaDataKey), 50 | valueAsString(headers, generateMetadataKey(metaDataKey))); 51 | } 52 | 53 | public static void assertDomainHeaders(DomainEventMessage eventMessage, Headers headers) { 54 | assertTrue(headers.toArray().length >= 8); 55 | assertEquals(eventMessage.getSequenceNumber(), valueAsLong(headers, AGGREGATE_SEQ)); 56 | assertEquals(eventMessage.getAggregateIdentifier(), valueAsString(headers, AGGREGATE_ID)); 57 | assertEquals(eventMessage.getType(), valueAsString(headers, AGGREGATE_TYPE)); 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/util/KafkaContainerClusterTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2023. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.util; 18 | 19 | import org.testcontainers.junit.jupiter.Container; 20 | import org.testcontainers.junit.jupiter.Testcontainers; 21 | 22 | /** 23 | * A {@link KafkaContainerCluster} set up by using {@link Testcontainers}. 24 | * 25 | * @author Lucas Campos 26 | */ 27 | @Testcontainers 28 | public abstract class KafkaContainerClusterTest { 29 | 30 | @Container 31 | protected static final KafkaContainerCluster KAFKA_CLUSTER = new KafkaContainerCluster("5.4.3", 3, 1); 32 | 33 | protected static String getBootstrapServers() { 34 | return KAFKA_CLUSTER.getBootstrapServers(); 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/util/KafkaContainerTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2021. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.eventhandling.util; 18 | 19 | import org.testcontainers.containers.KafkaContainer; 20 | import org.testcontainers.junit.jupiter.Container; 21 | import org.testcontainers.junit.jupiter.Testcontainers; 22 | import org.testcontainers.utility.DockerImageName; 23 | 24 | /** 25 | * A single {@link KafkaContainer} set up by using {@link Testcontainers}. 26 | * 27 | * @author Lucas Campos 28 | */ 29 | @Testcontainers 30 | public abstract class KafkaContainerTest { 31 | 32 | @Container 33 | protected static final KafkaContainer KAFKA_CONTAINER = 34 | new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka")); 35 | 36 | protected static String getBootstrapServers() { 37 | return KAFKA_CONTAINER.getBootstrapServers(); 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /kafka/src/test/java/org/axonframework/extensions/kafka/utils/TestSerializer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2010-2021. Axon Framework 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package org.axonframework.extensions.kafka.utils; 18 | 19 | import com.thoughtworks.xstream.XStream; 20 | import org.axonframework.serialization.JavaSerializer; 21 | import org.axonframework.serialization.SerializedObject; 22 | import org.axonframework.serialization.Serializer; 23 | import org.axonframework.serialization.SimpleSerializedObject; 24 | import org.axonframework.serialization.SimpleSerializedType; 25 | import org.axonframework.serialization.json.JacksonSerializer; 26 | import org.axonframework.serialization.xml.CompactDriver; 27 | import org.axonframework.serialization.xml.XStreamSerializer; 28 | import org.junit.*; 29 | 30 | import java.util.Base64; 31 | import java.util.Collection; 32 | import java.util.EnumSet; 33 | 34 | /** 35 | * Enumeration of serializers for testing purposes. 36 | * 37 | * @author JohT 38 | */ 39 | @Ignore 40 | public enum TestSerializer { 41 | 42 | JAVA { 43 | @SuppressWarnings("deprecation") 44 | private final Serializer serializer = JavaSerializer.builder().build(); 45 | 46 | @Override 47 | public Serializer getSerializer() { 48 | return serializer; 49 | } 50 | 51 | @Override 52 | public String serialize(Object object) { 53 | return Base64.getEncoder().encodeToString(getSerializer().serialize(object, byte[].class).getData()); 54 | } 55 | 56 | @Override 57 | public T deserialize(String serialized, Class type) { 58 | return getSerializer().deserialize(asSerializedData(Base64.getDecoder().decode(serialized), type)); 59 | } 60 | }, 61 | XSTREAM { 62 | private final Serializer serializer = createSerializer(); 63 | 64 | private XStreamSerializer createSerializer() { 65 | XStream xStream = new XStream(new CompactDriver()); 66 | xStream.allowTypesByWildcard(new String[]{"org.apache.kafka.**"}); 67 | return XStreamSerializer.builder() 68 | .xStream(xStream) 69 | .classLoader(this.getClass().getClassLoader()) 70 | .build(); 71 | } 72 | 73 | @Override 74 | public Serializer getSerializer() { 75 | return serializer; 76 | } 77 | }, 78 | JACKSON { 79 | private final Serializer serializer = JacksonSerializer.builder().build(); 80 | 81 | @Override 82 | public Serializer getSerializer() { 83 | return serializer; 84 | } 85 | }; 86 | 87 | public String serialize(Object object) { 88 | return new String(getSerializer().serialize(object, byte[].class).getData()); 89 | } 90 | 91 | public T deserialize(String serialized, Class type) { 92 | return getSerializer().deserialize(asSerializedData(serialized.getBytes(), type)); 93 | } 94 | 95 | public abstract Serializer getSerializer(); 96 | 97 | public static Collection all() { 98 | return EnumSet.allOf(TestSerializer.class); 99 | } 100 | 101 | static SerializedObject asSerializedData(byte[] serialized, Class type) { 102 | SimpleSerializedType serializedType = new SimpleSerializedType(type.getName(), null); 103 | return new SimpleSerializedObject<>(serialized, byte[].class, serializedType); 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /kafka/src/test/resources/log4j2.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2010-2020. Axon Framework 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | name=AxonTestConfiguration 17 | appenders = console 18 | 19 | appender.console.type = Console 20 | appender.console.name = STDOUT 21 | appender.console.layout.type = PatternLayout 22 | appender.console.layout.pattern = %d [%t] %-5p %-30.30c{1} %x - %m%n 23 | 24 | rootLogger.level = warn 25 | rootLogger.appenderRefs = stdout 26 | rootLogger.appenderRef.stdout.ref = STDOUT 27 | 28 | logger.axon.name = org.axonframework 29 | logger.axon.level = info 30 | logger.axon.additivity = false 31 | logger.axon.appenderRefs = stdout 32 | logger.axon.appenderRef.stdout.ref = STDOUT 33 | 34 | logger.chaining-converter.name = org.axonframework.serialization.ChainingConverter 35 | logger.chaining-converter.level = OFF 36 | 37 | logger.kafka.name = org.apache.kafka 38 | logger.kafka.level = off 39 | 40 | logger.kafka-clients.name = org.apache.kafka.clients 41 | logger.kafka-clients.level = off 42 | 43 | logger.kafka-server.name = kafka.server 44 | logger.kafka-server.level = off 45 | 46 | logger.kafka-common.name = kafka.common 47 | logger.kafka-common.level = off 48 | 49 | logger.kafka-spring.name = org.springframework.kafka 50 | logger.kafka-spring.level = off 51 | 52 | logger.zookeeper.name = org.apache.zookeeper 53 | logger.zookeeper.level = off 54 | 55 | logger.metrics-reporting.name = com.yammer.metrics.reporting 56 | logger.metrics-reporting.level = off 57 | --------------------------------------------------------------------------------