├── .editorconfig ├── .github ├── ISSUE_TEMPLATE │ ├── type_bug.md │ ├── type_docs.md │ ├── type_feature.md │ ├── type_maintenance.md │ ├── type_project.md │ └── type_test.md ├── dependabot.yml ├── pull_request_template.md ├── release-drafter.yml └── workflows │ ├── backport.yml │ ├── ci.yml │ ├── codeql-analysis.yml │ └── release.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md ├── SECURITY.md ├── docker-compose.yml ├── exporter ├── exporter.yml ├── pom.xml ├── revapi.json └── src │ ├── main │ ├── java │ │ └── io │ │ │ └── zeebe │ │ │ └── exporters │ │ │ └── kafka │ │ │ ├── KafkaExporter.java │ │ │ ├── config │ │ │ ├── Config.java │ │ │ ├── ProducerConfig.java │ │ │ ├── RecordConfig.java │ │ │ ├── RecordsConfig.java │ │ │ ├── parser │ │ │ │ ├── AllowedType.java │ │ │ │ ├── ConfigParser.java │ │ │ │ ├── ConfigParserUtil.java │ │ │ │ ├── RawConfigParser.java │ │ │ │ ├── RawProducerConfigParser.java │ │ │ │ ├── RawRecordConfigParser.java │ │ │ │ └── RawRecordsConfigParser.java │ │ │ └── raw │ │ │ │ ├── RawConfig.java │ │ │ │ ├── RawProducerConfig.java │ │ │ │ ├── RawRecordConfig.java │ │ │ │ └── RawRecordsConfig.java │ │ │ ├── producer │ │ │ ├── BoundedTransactionalRecordBatch.java │ │ │ ├── DefaultKafkaProducerFactory.java │ │ │ ├── KafkaProducerFactory.java │ │ │ ├── RecordBatch.java │ │ │ ├── RecordBatchFactory.java │ │ │ └── RecordIdPartitioner.java │ │ │ └── record │ │ │ ├── FullRecordBatchException.java │ │ │ ├── KafkaRecordFilter.java │ │ │ ├── RecordHandler.java │ │ │ └── RecordSerializer.java │ └── resources │ │ └── META-INF │ │ └── services │ │ └── io.camunda.zeebe.exporter.api.Exporter │ └── test │ ├── java │ └── io │ │ └── zeebe │ │ └── exporters │ │ └── kafka │ │ ├── KafkaExporterTest.java │ │ ├── config │ │ └── parser │ │ │ ├── MockConfigParser.java │ │ │ ├── RawConfigParserTest.java │ │ │ ├── RawProducerConfigParserTest.java │ │ │ ├── RawRecordConfigParserTest.java │ │ │ └── RawRecordsConfigParserTest.java │ │ ├── producer │ │ ├── BoundedTransactionRecordBatchTest.java │ │ ├── MockKafkaProducerFactory.java │ │ └── RecordBatchStub.java │ │ └── record │ │ └── RecordHandlerTest.java │ └── resources │ └── simplelogger.properties ├── pom.xml ├── qa ├── pom.xml └── src │ └── test │ ├── java │ └── io │ │ └── zeebe │ │ └── exporters │ │ └── kafka │ │ └── qa │ │ ├── DebugHttpExporterClient.java │ │ ├── KafkaExporterIT.java │ │ └── SampleWorkload.java │ └── resources │ ├── exporters.yml │ ├── log4j2.xml │ └── simplelogger.properties ├── revapi.json └── serde ├── pom.xml ├── revapi.json └── src ├── main └── java │ └── io │ └── zeebe │ └── exporters │ └── kafka │ └── serde │ ├── JacksonDeserializer.java │ ├── JacksonSerializer.java │ ├── RecordDeserializer.java │ ├── RecordId.java │ ├── RecordIdDeserializer.java │ ├── RecordIdSerializer.java │ └── RecordSerializer.java └── test └── java └── io └── zeebe └── exporters └── kafka └── serde ├── RecordIdTest.java └── RecordTest.java /.editorconfig: -------------------------------------------------------------------------------- 1 | 2 | [*] 3 | indent_style = space 4 | charset = utf-8 5 | end_of_line = lf 6 | trim_trailing_whitespace = true 7 | insert_final_newline = true 8 | 9 | [*.java] 10 | indent_size = 2 11 | 12 | [*.xml] 13 | indent_size = 2 14 | 15 | [*.html] 16 | indent_size = 2 17 | 18 | [*.md] 19 | indent_size = 2 20 | 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/type_bug.md: -------------------------------------------------------------------------------- 1 | ## Description 2 | 3 | 4 | 5 | ## Related issues 6 | 7 | 8 | 9 | closes # 10 | 11 | ## Pull Request Checklist 12 | 13 | - [ ] All commit messages match our [commit message guidelines](https://github.com/zeebe-io/zeebe/blob/develop/CONTRIBUTING.md#commit-message-guidelines) 14 | - [ ] The submitting code follows our [code style](https://github.com/zeebe-io/zeebe/wiki/Code-Style) 15 | - [ ] If submitting code, please run `mvn clean install -DskipTests` locally before committing 16 | - [ ] Ensure all PR checks are green 17 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/type_docs.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Documentation issue 3 | about: Improvements, additions, or modifications of the documentation 4 | title: '' 5 | labels: 'type/docs' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Description** 11 | 12 | A clear and concise description of what this issue is about. 13 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/type_feature.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Request user facing changes, e.g. API changes 4 | title: '' 5 | labels: 'type/feature' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/type_maintenance.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: General issue 3 | about: General, non-user facing changes, e.g. refactoring, clean ups, etc. 4 | title: '' 5 | labels: 'type/maintenance' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Description** 11 | 12 | A clear and concise description of what this issue is about. 13 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/type_project.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Project management issue 3 | about: Changes or improvements to the project, e.g. bot configurations, code style, etc. 4 | title: '' 5 | labels: 'type/project' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Description** 11 | 12 | A clear and concise description of what this issue is about. 13 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/type_test.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Test issue 3 | about: Improvements or changes to the existing tests 4 | title: '' 5 | labels: 'type/test' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Description** 11 | 12 | A clear and concise description of what this issue is about. 13 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | # Enable version updates for main 4 | - package-ecosystem: "maven" 5 | # Look for the root pom 6 | directory: "/" 7 | schedule: 8 | interval: "daily" 9 | commit-message: 10 | prefix: "build: " 11 | labels: 12 | - "dependencies" 13 | open-pull-requests-limit: 10 14 | # Enable version updates for the github actions 15 | - package-ecosystem: "github-actions" 16 | directory: "/" 17 | schedule: 18 | interval: "daily" 19 | commit-message: 20 | prefix: "deps: " 21 | labels: 22 | - "dependencies" 23 | open-pull-requests-limit: 5 24 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Description 2 | 3 | 4 | 5 | ## Related issues 6 | 7 | 8 | 9 | closes # 10 | 11 | ## Pull Request Checklist 12 | 13 | - [ ] All commit messages match our [commit message guidelines](https://github.com/zeebe-io/zeebe/blob/develop/CONTRIBUTING.md#commit-message-guidelines) 14 | - [ ] The submitting code follows our [code style](https://github.com/zeebe-io/zeebe/wiki/Code-Style) 15 | - [ ] If submitting code, please run `mvn clean install -DskipTests` locally before committing 16 | - [ ] Ensure all PR checks are green 17 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name-template: $NEXT_PATCH_VERSION 2 | tag-template: $NEXT_PATCH_VERSION 3 | template: | 4 | # What's Changed 5 | 6 | $CHANGES 7 | 8 | # Contributors 9 | 10 | $CONTRIBUTORS 11 | 12 | categories: 13 | - title: 🚀 Features 14 | label: type/feature 15 | - title: 🐛 Bug Fixes 16 | label: type/bug 17 | - title: 📖 Documentation 18 | label: type/docs 19 | - title: 🧹 Maintenance 20 | labels: 21 | - type/maintenance 22 | - type/test 23 | - type/project 24 | - title: 📦 Dependency updates 25 | label: dependencies 26 | 27 | -------------------------------------------------------------------------------- /.github/workflows/backport.yml: -------------------------------------------------------------------------------- 1 | name: Backport labeled PRs after merge 2 | on: 3 | pull_request: 4 | types: [closed] 5 | jobs: 6 | build: 7 | name: Create backport PRs 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v3 11 | with: 12 | fetch-depth: 0 13 | - name: Create backport PRs 14 | uses: zeebe-io/backport-action@master 15 | with: 16 | github_token: ${{ secrets.GITHUB_TOKEN }} 17 | github_workspace: ${{ github.workspace }} 18 | version: main 19 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Java CI 2 | on: 3 | pull_request: 4 | paths: 5 | - '**/*.java' 6 | - '**/*.xml' 7 | - '**/*.json' 8 | - '**/*.properties' 9 | - '**/*.yml' 10 | push: 11 | branches: [ main ] 12 | paths: 13 | - '**/*.java' 14 | - '**/*.xml' 15 | - '**/*.json' 16 | - '**/*.properties' 17 | - '**/*.yml' 18 | workflow_dispatch: { } 19 | jobs: 20 | build: 21 | name: Build & Verify 22 | runs-on: ubuntu-latest 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up JDK 11 26 | uses: actions/setup-java@v2 27 | with: 28 | java-version: '11' 29 | distribution: 'temurin' 30 | cache: 'maven' 31 | - name: Build 32 | id: build 33 | run: mvn -B -DskipTests -T1C clean verify 34 | - name: Test 35 | id: test 36 | timeout-minutes: 20 37 | run: > 38 | mvn -B verify 39 | - name: Archive Test Results on Failure 40 | uses: actions/upload-artifact@v3 41 | if: failure() 42 | with: 43 | name: test-results 44 | path: target/surefire-reports/ 45 | retention-days: 7 46 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ main ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ main ] 20 | schedule: 21 | - cron: '15 15 * * 2' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | 28 | strategy: 29 | fail-fast: false 30 | matrix: 31 | language: [ 'java' ] 32 | 33 | steps: 34 | - name: Checkout repository 35 | uses: actions/checkout@v3 36 | 37 | - uses: actions/setup-java@v1 38 | with: 39 | java-version: '11.0.7' 40 | 41 | # Initializes the CodeQL tools for scanning. 42 | - name: Initialize CodeQL 43 | uses: github/codeql-action/init@v2 44 | with: 45 | languages: ${{ matrix.language }} 46 | # If you wish to specify custom queries, you can do so here or in a config file. 47 | # By default, queries listed here will override any specified in a config file. 48 | # Prefix the list here with "+" to use these queries and those in the config file. 49 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 50 | 51 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 52 | # If this step fails, then you should remove it and run the build manually (see below) 53 | # - name: Autobuild 54 | # uses: github/codeql-action/autobuild@v1 55 | 56 | # ℹ️ Command-line programs to run using the OS shell. 57 | # 📚 https://git.io/JvXDl 58 | 59 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 60 | # and modify them (or add more) to build your code if your project 61 | # uses a compiled language 62 | 63 | - run: | 64 | mvn package -DskipTests 65 | - name: Perform CodeQL Analysis 66 | uses: github/codeql-action/analyze@v2 67 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | # If this workflow is triggered by a push to main, it 2 | # deploys a SNAPSHOT 3 | # If this workflow is triggered by publishing a Release, it 4 | # deploys a RELEASE with the selected version 5 | # updates the project version by incrementing the patch version 6 | # commits the version update change to the repository's default branch. 7 | name: Deploy artifacts with Maven 8 | on: 9 | push: 10 | branches: [ main ] 11 | paths: 12 | - '**/*.java' 13 | - '**/*.xml' 14 | - '**/*.json' 15 | - '**/*.properties' 16 | - '**/*.yml' 17 | release: 18 | types: [ published ] 19 | workflow_dispatch: { } 20 | jobs: 21 | publish: 22 | runs-on: ubuntu-20.04 23 | steps: 24 | - uses: actions/checkout@v3 # pin@v2 25 | - name: Cache 26 | uses: actions/cache@ac8075791e805656e71b4ba23325ace9e3421120 # pin@v2 27 | with: 28 | path: ~/.m2/repository 29 | key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} 30 | restore-keys: | 31 | ${{ runner.os }}-maven- 32 | - name: Set up Java environment 33 | uses: actions/setup-java@e54a62b3df9364d4b4c1c29c7225e57fe605d7dd # pin@v1 34 | with: 35 | java-version: 11 36 | gpg-private-key: ${{ secrets.MAVEN_CENTRAL_GPG_SIGNING_KEY_SEC }} 37 | gpg-passphrase: MAVEN_CENTRAL_GPG_PASSPHRASE 38 | - name: Deploy SNAPSHOT / Release 39 | uses: camunda-community-hub/community-action-maven-release@a9e964bf56978eef9bca81551cecceebb246a8e5 # pin@v1 40 | with: 41 | release-version: ${{ github.event.release.tag_name }} 42 | release-profile: community-action-maven-release 43 | nexus-usr: ${{ secrets.NEXUS_USR }} 44 | nexus-psw: ${{ secrets.NEXUS_PSW }} 45 | maven-usr: ${{ secrets.MAVEN_CENTRAL_DEPLOYMENT_USR }} 46 | maven-psw: ${{ secrets.MAVEN_CENTRAL_DEPLOYMENT_PSW }} 47 | maven-gpg-passphrase: ${{ secrets.MAVEN_CENTRAL_GPG_SIGNING_KEY_PASSPHRASE }} 48 | github-token: ${{ secrets.GITHUB_TOKEN }} 49 | id: release 50 | - if: github.event.release 51 | name: Attach artifacts to GitHub Release (Release only) 52 | uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 # pin@v1 53 | env: 54 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 55 | with: 56 | upload_url: ${{ github.event.release.upload_url }} 57 | asset_path: ${{ steps.release.outputs.artifacts_archive_path }} 58 | asset_name: ${{ steps.release.outputs.artifacts_archive_path }} 59 | asset_content_type: application/zip 60 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | target/ 3 | *.iml 4 | *.jar 5 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | nationality, personal appearance, race, religion, or sexual identity and 10 | orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at code-of-conduct@zeebe.io. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | These are the versions currently maintained and supported. As there's only a single maintainer, time is limited, so only one version is supported. 6 | 7 | | Version | Supported | 8 | | ------- | ------------------ | 9 | | 3.x.x | :white_check_mark: | 10 | | < 3.0 | :x: | 11 | 12 | ## Reporting a Vulnerability 13 | 14 | If you detect any security vulnerabilities, please open a new issue about it. 15 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.3' 2 | 3 | services: 4 | zeebe: 5 | image: ${ZEEBE_IMAGE:-camunda/zeebe}:${ZEEBE_VERSION:-1.0.0} 6 | container_name: zeebe 7 | hostname: zeebe 8 | depends_on: 9 | - kafka 10 | ports: 11 | - "26500:26500" 12 | restart: always 13 | environment: 14 | SPRING_CONFIG_ADDITIONAL_LOCATION: /usr/local/zeebe/config/exporter.yml 15 | ZEEBE_BROKER_EXPORTERS_KAFKA_ARGS_PRODUCER_SERVERS: "kafka:9092" 16 | volumes: 17 | - ./exporter/exporter.yml:/usr/local/zeebe/config/exporter.yml 18 | - ./exporter/target/zeebe-kafka-exporter-jar-with-dependencies.jar:/usr/local/zeebe/lib/zeebe-kafka-exporter.jar 19 | 20 | consumer: 21 | image: edenhill/kafkacat:${KAFKACAT_VERSION:-1.5.0} 22 | container_name: consumer 23 | hostname: consumer 24 | depends_on: 25 | - kafka 26 | restart: always 27 | command: [ "kafkacat", "-b", "kafka:9092", "-C", "-J", "-X", "metadata.max.age.ms=1000", "-X", "topic.metadata.refresh.interval.ms=1000", "-G", "zeebe", "^zeebe.*$$" ] 28 | 29 | zookeeper: 30 | image: confluentinc/cp-zookeeper:${KAFKA_VERSION:-5.5.1} 31 | container_name: zookeeper 32 | hostname: zookeeper 33 | ports: 34 | - "2181:2181" 35 | environment: 36 | ZOOKEEPER_CLIENT_PORT: 2181 37 | ZOOKEEPER_TICK_TIME: 2000 38 | restart: always 39 | 40 | kafka: 41 | image: confluentinc/cp-kafka:${KAFKA_VERSION:-5.5.1} 42 | hostname: kafka 43 | container_name: kafka 44 | depends_on: 45 | - zookeeper 46 | ports: 47 | - '29092:29092' 48 | environment: 49 | KAFKA_BROKER_ID: 1 50 | KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' 51 | KAFKA_ADVERTISED_LISTENERS: INSIDE://kafka:9092,OUTSIDE://localhost:29092 52 | KAFKA_LISTENERS: INSIDE://0.0.0.0:9092,OUTSIDE://0.0.0.0:29092 53 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT 54 | KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE 55 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 56 | restart: always 57 | -------------------------------------------------------------------------------- /exporter/exporter.yml: -------------------------------------------------------------------------------- 1 | zeebe: 2 | broker: 3 | exporters: 4 | kafka: 5 | className: io.zeebe.exporters.kafka.KafkaExporter 6 | # Update this path to the location of the JAR 7 | # Note that this must be visible to the broker process 8 | jarPath: /path/to/zeebe-kafka-exporter-3.0.0-jar-with-dependencies.jar 9 | args: 10 | # Controls the number of records to buffer in a single record batch before forcing a flush. Note 11 | # that a flush may occur before anyway due to periodic flushing. This setting should help you 12 | # estimate a soft upper bound to the memory consumption of the exporter. If you assume a worst 13 | # case scenario where every record is the size of your zeebe.broker.network.maxMessageSize, then 14 | # the memory required by the exporter would be at least: 15 | # (maxBatchSize * zeebe.broker.network.maxMessageSize * 2) 16 | # 17 | # We multiply by 2 as the records are buffered twice - once in the exporter itself, and once 18 | # in the producer's network buffers (but serialized at that point). There's some additional 19 | # memory overhead used by the producer as well for compression/encryption/etc., so you have to 20 | # add a bit, but that one is not proportional to the number of records and is more or less 21 | # constant. 22 | # 23 | # Once the batch has reached this size, a flush is automatically triggered. Too small a number 24 | # here would cause many flush, which is not good for performance, but would mean you will see 25 | # your records faster/sooner. 26 | # 27 | # Default is 100 28 | maxBatchSize: 100 29 | # The maximum time to block when the batch is full. If the batch is full, and a new 30 | # record comes in, the exporter will block until there is space in the batch, or until 31 | # maxBlockingTimeoutMs milliseconds elapse. 32 | maxBlockingTimeoutMs: 1000 33 | # How often should pending batches be flushed to the Kafka broker. Too low a value will 34 | # cause more load on the broker, but means your records will be visible faster. 35 | flushIntervalMs: 1000 36 | 37 | # Producer specific configuration 38 | producer: 39 | # The list of initial Kafka broker contact points. The format should be the same 40 | # one as the ProducerConfig expects, i.e. "host:port" 41 | # Maps to ProducerConfig.BOOTSTRAP_SERVERS_CONFIG 42 | # For example: 43 | # servers: "kafka:9092,localhost:29092" 44 | servers: "" 45 | # Controls how long the producer will wait for a request to be acknowledged by 46 | # the Kafka broker before retrying it 47 | # Maps to ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG 48 | requestTimeoutMs: 5000 49 | # Grace period when shutting down the producer in milliseconds 50 | closeTimeoutMs: 5000 51 | # Producer client identifier 52 | clientId: zeebe 53 | 54 | # Any setting under the following section will be passed verbatim to 55 | # ProducerConfig; you can use this to configure authentication, compression, 56 | # etc. Note that you can overwrite some important settings, so avoid changing 57 | # idempotency, delivery timeout, and retries, unless you know what you're doing 58 | config: | 59 | linger.ms=5 60 | buffer.memory=8388608 61 | batch.size=32768 62 | max.block.ms=5000 63 | 64 | # Controls which records are pushed to Kafka and to which topic 65 | # Each entry is a sub-map which can contain two entries: 66 | # type => string 67 | # topic => string 68 | # 69 | # Topic is the topic to which the record with the given value type 70 | # should be sent to, e.g. for a deployment record below we would 71 | # send the record to "zeebe-deployment" topic. 72 | # 73 | # Type is a comma separated string of accepted record types, allowing you to filter if you 74 | # want nothing (""), commands ("command"), events ("events"), or rejections ("rejection"), 75 | # or a combination of the three, e.g. "command,event". 76 | # 77 | # To omit certain records entirely, set type to an empty string. For example, 78 | # records: 79 | # deployment: { type: "" } 80 | records: 81 | # If a record value type is omitted in your configuration file, 82 | # it will fall back to whatever is configured in the defaults 83 | defaults: { type: "event", topic: zeebe } 84 | # For records with a value of type DEPLOYMENT 85 | deployment: { topic: zeebe-deployment } 86 | # For records with a value of type DEPLOYMENT_DISTRIBUTION 87 | deploymentDistribution: { topic: zeebe-deployment-distribution } 88 | # For records with a value of type ERROR 89 | error: { topic: zeebe-error } 90 | # For records with a value of type INCIDENT 91 | incident: { topic: zeebe-incident } 92 | # For records with a value of type JOB_BATCH 93 | jobBatch: { topic: zeebe-job-batch } 94 | # For records with a value of type JOB 95 | job: { topic: zeebe-job } 96 | # For records with a value of type MESSAGE 97 | message: { topic: zeebe-message } 98 | # For records with a value of type MESSAGE_SUBSCRIPTION 99 | messageSubscription: { topic: zeebe-message-subscription } 100 | # For records with a value of type MESSAGE_START_EVENT_SUBSCRIPTION 101 | messageStartEventSubscription: { topic: zeebe-message-subscription-start-event } 102 | # For records with a value of type PROCESS 103 | process: { topic: zeebe-process } 104 | # For records with a value of type PROCESS_EVENT 105 | processEvent: { topic: zeebe-process-event } 106 | # For records with a value of type PROCESS_INSTANCE 107 | processInstance: { topic: zeebe-process-instance } 108 | # For records with a value of type PROCESS_INSTANCE_RESULT 109 | processInstanceResult: { topic: zeebe-process-instance-result } 110 | # For records with a value of type PROCESS_MESSAGE_SUBSCRIPTION 111 | processMessageSubscription: { topic: zeebe-process-message-subscription } 112 | # For records with a value of type TIMER 113 | timer: { topic: zeebe-timer } 114 | # For records with a value of type VARIABLE 115 | variable: { topic: zeebe-variable } 116 | -------------------------------------------------------------------------------- /exporter/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 4.0.0 5 | Zeebe Kafka Exporter 6 | zeebe-kafka-exporter 7 | jar 8 | https://github.com/zeebe-io/zeebe-kafka-exporter/exporter 9 | 10 | 11 | zeebe-kafka-exporter-root 12 | io.zeebe 13 | ../pom.xml 14 | 3.1.2-SNAPSHOT 15 | 16 | 17 | 18 | ${project.artifactId}-${project.version} 19 | 20 | 21 | 22 | 23 | 24 | io.zeebe 25 | zeebe-kafka-exporter-serde 26 | 27 | 28 | 29 | org.apache.kafka 30 | kafka-clients 31 | 32 | 33 | 34 | 35 | io.camunda 36 | zeebe-protocol 37 | provided 38 | 39 | 40 | 41 | io.camunda 42 | zeebe-exporter-api 43 | provided 44 | 45 | 46 | 47 | slf4j-api 48 | org.slf4j 49 | provided 50 | 51 | 52 | 53 | org.agrona 54 | agrona 55 | provided 56 | 57 | 58 | 59 | 60 | io.camunda 61 | zeebe-protocol-jackson 62 | test 63 | 64 | 65 | 66 | io.camunda 67 | zeebe-test 68 | test 69 | 70 | 71 | 72 | org.slf4j 73 | slf4j-simple 74 | test 75 | 76 | 77 | 78 | org.junit.jupiter 79 | junit-jupiter-api 80 | test 81 | 82 | 83 | 84 | org.junit.jupiter 85 | junit-jupiter-params 86 | test 87 | 88 | 89 | 90 | org.assertj 91 | assertj-core 92 | test 93 | 94 | 95 | 96 | 97 | ${exporter.finalName} 98 | 99 | 100 | org.apache.maven.plugins 101 | maven-dependency-plugin 102 | 103 | 104 | 105 | org.slf4j:slf4j-simple 106 | 107 | 108 | 109 | 110 | 111 | 112 | org.revapi 113 | revapi-maven-plugin 114 | 115 | 116 | 117 | revapi.json 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | org.apache.maven.plugins 126 | maven-assembly-plugin 127 | 128 | 129 | jar-with-dependencies 130 | 131 | 132 | 133 | 134 | standalone 135 | 136 | single 137 | 138 | package 139 | 140 | 141 | 142 | 143 | 144 | 145 | -------------------------------------------------------------------------------- /exporter/revapi.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "extension": "revapi.filter", 4 | "configuration": { 5 | "enabled": true, 6 | "elements": { 7 | "include": [ 8 | { 9 | "matcher": "java-package", 10 | "match": "io.zeebe.exporters.kafka.config.raw" 11 | } 12 | ] 13 | } 14 | } 15 | } 16 | ] 17 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/KafkaExporter.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka; 17 | 18 | import io.camunda.zeebe.exporter.api.Exporter; 19 | import io.camunda.zeebe.exporter.api.context.Context; 20 | import io.camunda.zeebe.exporter.api.context.Controller; 21 | import io.camunda.zeebe.exporter.api.context.ScheduledTask; 22 | import io.camunda.zeebe.protocol.record.Record; 23 | import io.zeebe.exporters.kafka.config.Config; 24 | import io.zeebe.exporters.kafka.config.parser.ConfigParser; 25 | import io.zeebe.exporters.kafka.config.parser.RawConfigParser; 26 | import io.zeebe.exporters.kafka.config.raw.RawConfig; 27 | import io.zeebe.exporters.kafka.producer.RecordBatch; 28 | import io.zeebe.exporters.kafka.producer.RecordBatchFactory; 29 | import io.zeebe.exporters.kafka.record.KafkaRecordFilter; 30 | import io.zeebe.exporters.kafka.record.RecordHandler; 31 | import io.zeebe.exporters.kafka.record.RecordSerializer; 32 | import java.util.Objects; 33 | import org.slf4j.Logger; 34 | 35 | /** Implementation of a Zeebe exporter producing serialized records to a given Kafka topic. */ 36 | public final class KafkaExporter implements Exporter { 37 | private final RecordBatchFactory recordBatchFactory; 38 | private final ConfigParser configParser; 39 | 40 | private Controller controller; 41 | private Logger logger; 42 | private Config config; 43 | private RecordHandler recordHandler; 44 | private ScheduledTask flushTask; 45 | private RecordBatch recordBatch; 46 | 47 | // the constructor is used by the Zeebe broker to instantiate it 48 | @SuppressWarnings("unused") 49 | public KafkaExporter() { 50 | this(RecordBatchFactory.defaultFactory(), new RawConfigParser()); 51 | } 52 | 53 | public KafkaExporter( 54 | final RecordBatchFactory recordBatchFactory, 55 | final ConfigParser configParser) { 56 | this.recordBatchFactory = Objects.requireNonNull(recordBatchFactory); 57 | this.configParser = Objects.requireNonNull(configParser); 58 | } 59 | 60 | @Override 61 | public void configure(final Context context) { 62 | logger = Objects.requireNonNull(context.getLogger()); 63 | 64 | final var rawConfig = 65 | Objects.requireNonNull(context.getConfiguration().instantiate(RawConfig.class)); 66 | config = configParser.parse(rawConfig); 67 | 68 | final var serializer = new RecordSerializer(); 69 | serializer.configure(config.getProducer().getConfig(), false); 70 | recordHandler = new RecordHandler(config.getRecords(), serializer); 71 | 72 | context.setFilter(new KafkaRecordFilter(config.getRecords())); 73 | 74 | if (logger.isDebugEnabled()) { 75 | logger.debug("Configured Kafka exporter: {}", config); 76 | } else { 77 | logger.info("Configured Kafka exporter"); 78 | } 79 | } 80 | 81 | @Override 82 | public void open(final Controller controller) { 83 | this.controller = controller; 84 | recordBatch = 85 | recordBatchFactory.newRecordBatch( 86 | config.getProducer(), config.getMaxBatchSize(), this::updatePosition, logger); 87 | 88 | scheduleFlushBatchTask(); 89 | 90 | if (logger.isDebugEnabled()) { 91 | logger.debug("Opened Kafka exporter with configuration: {}", config); 92 | } else { 93 | logger.info("Opened Kafka exporter"); 94 | } 95 | } 96 | 97 | @Override 98 | public void close() { 99 | if (flushTask != null) { 100 | flushTask.cancel(); 101 | } 102 | 103 | if (recordBatch != null) { 104 | recordBatch.flush(); 105 | recordBatch.close(); 106 | } 107 | 108 | if (logger != null) { 109 | logger.info("Closed Kafka exporter"); 110 | } 111 | } 112 | 113 | @Override 114 | public void export(final Record record) { 115 | if (!recordHandler.isAllowed(record)) { 116 | logger.trace("Ignoring record {}", record); 117 | return; 118 | } 119 | 120 | final var producerRecord = recordHandler.transform(record); 121 | recordBatch.add(producerRecord); 122 | logger.trace("Added {} to the batch", producerRecord); 123 | } 124 | 125 | private void scheduleFlushBatchTask() { 126 | logger.trace("Rescheduling flush task in {}", config.getFlushInterval()); 127 | flushTask = controller.scheduleCancellableTask(config.getFlushInterval(), this::flushBatchTask); 128 | } 129 | 130 | private void flushBatchTask() { 131 | try { 132 | recordBatch.flush(); 133 | } finally { 134 | scheduleFlushBatchTask(); 135 | } 136 | } 137 | 138 | private void updatePosition(final long position) { 139 | controller.updateLastExportedRecordPosition(position); 140 | logger.trace("Flushed batch and updated last exported record position to {}", position); 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/config/Config.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config; 17 | 18 | import java.time.Duration; 19 | import java.util.Objects; 20 | 21 | /** 22 | * Entrypoint for the effective {@link io.zeebe.exporters.kafka.KafkaExporter} configuration. This 23 | * is what the exporter will use as final configuration. See {@link 24 | * io.zeebe.exporters.kafka.config.raw.RawConfig} and {@link 25 | * io.zeebe.exporters.kafka.config.parser.RawConfigParser} for more on how the external 26 | * configuration is parsed into an instance of this class. 27 | */ 28 | public final class Config { 29 | private final ProducerConfig producer; 30 | private final RecordsConfig records; 31 | private final int maxBatchSize; 32 | private final Duration flushInterval; 33 | 34 | public Config( 35 | final ProducerConfig producer, 36 | final RecordsConfig records, 37 | final int maxBatchSize, 38 | final Duration flushInterval) { 39 | this.producer = Objects.requireNonNull(producer); 40 | this.records = Objects.requireNonNull(records); 41 | this.maxBatchSize = maxBatchSize; 42 | this.flushInterval = Objects.requireNonNull(flushInterval); 43 | } 44 | 45 | public ProducerConfig getProducer() { 46 | return producer; 47 | } 48 | 49 | public RecordsConfig getRecords() { 50 | return records; 51 | } 52 | 53 | public int getMaxBatchSize() { 54 | return maxBatchSize; 55 | } 56 | 57 | public Duration getFlushInterval() { 58 | return flushInterval; 59 | } 60 | 61 | @Override 62 | public int hashCode() { 63 | return Objects.hash(producer, records, maxBatchSize, flushInterval); 64 | } 65 | 66 | @Override 67 | public boolean equals(final Object o) { 68 | if (this == o) { 69 | return true; 70 | } 71 | if (o == null || getClass() != o.getClass()) { 72 | return false; 73 | } 74 | final Config config = (Config) o; 75 | return getMaxBatchSize() == config.getMaxBatchSize() 76 | && Objects.equals(getProducer(), config.getProducer()) 77 | && Objects.equals(getRecords(), config.getRecords()) 78 | && Objects.equals(getMaxBatchSize(), config.getMaxBatchSize()) 79 | && Objects.equals(getFlushInterval(), config.getFlushInterval()); 80 | } 81 | 82 | @Override 83 | public String toString() { 84 | return "Config{" 85 | + "producer=" 86 | + producer 87 | + ", records=" 88 | + records 89 | + ", maxBatchSize=" 90 | + maxBatchSize 91 | + ", commitInterval=" 92 | + flushInterval 93 | + '}'; 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/config/ProducerConfig.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config; 17 | 18 | import java.time.Duration; 19 | import java.util.List; 20 | import java.util.Map; 21 | import java.util.Objects; 22 | 23 | /** 24 | * {@link ProducerConfig} is used by instances of {@link 25 | * io.zeebe.exporters.kafka.producer.KafkaProducerFactory} to configure a producer. A few standard 26 | * configuration options were extracted as options (e.g. {@code clientId}, {@code servers}) as they 27 | * were common - everything else can be configured via the free-form {@code config} map. 28 | * 29 | *

NOTE: be aware the when configuring a producer using the {@code config} map, Kafka expects the 30 | * values to either be strings OR very specific data types. While these are well documented, if 31 | * you're unsure of the expected data type (e.g. Integer, Long, Boolean), then just pass a string 32 | * representation of what you want to use. 33 | */ 34 | public final class ProducerConfig { 35 | private final String clientId; 36 | private final Duration closeTimeout; 37 | private final Map config; 38 | private final Duration requestTimeout; 39 | private final Duration maxBlockingTimeout; 40 | private final List servers; 41 | 42 | public ProducerConfig( 43 | final String clientId, 44 | final Duration closeTimeout, 45 | final Map config, 46 | final Duration requestTimeout, 47 | final Duration maxBlockingTimeout, 48 | final List servers) { 49 | this.clientId = Objects.requireNonNull(clientId); 50 | this.closeTimeout = Objects.requireNonNull(closeTimeout); 51 | this.config = Objects.requireNonNull(config); 52 | this.requestTimeout = Objects.requireNonNull(requestTimeout); 53 | this.maxBlockingTimeout = Objects.requireNonNull(maxBlockingTimeout); 54 | this.servers = Objects.requireNonNull(servers); 55 | } 56 | 57 | public String getClientId() { 58 | return clientId; 59 | } 60 | 61 | public Duration getCloseTimeout() { 62 | return closeTimeout; 63 | } 64 | 65 | public Map getConfig() { 66 | return config; 67 | } 68 | 69 | public Duration getRequestTimeout() { 70 | return requestTimeout; 71 | } 72 | 73 | public Duration getMaxBlockingTimeout() { 74 | return maxBlockingTimeout; 75 | } 76 | 77 | public List getServers() { 78 | return servers; 79 | } 80 | 81 | @Override 82 | public int hashCode() { 83 | return Objects.hash( 84 | clientId, closeTimeout, config, requestTimeout, maxBlockingTimeout, servers); 85 | } 86 | 87 | @Override 88 | public boolean equals(final Object o) { 89 | if (this == o) { 90 | return true; 91 | } 92 | if (o == null || getClass() != o.getClass()) { 93 | return false; 94 | } 95 | final ProducerConfig that = (ProducerConfig) o; 96 | return Objects.equals(getClientId(), that.getClientId()) 97 | && Objects.equals(getCloseTimeout(), that.getCloseTimeout()) 98 | && Objects.equals(getConfig(), that.getConfig()) 99 | && Objects.equals(getRequestTimeout(), that.getRequestTimeout()) 100 | && Objects.equals(getMaxBlockingTimeout(), that.getMaxBlockingTimeout()) 101 | && Objects.equals(getServers(), that.getServers()); 102 | } 103 | 104 | @Override 105 | public String toString() { 106 | return "ProducerConfig{" 107 | + "clientId='" 108 | + clientId 109 | + '\'' 110 | + ", closeTimeout=" 111 | + closeTimeout 112 | + ", config=" 113 | + config 114 | + ", requestTimeout=" 115 | + requestTimeout 116 | + ", maxBlockingTimeout=" 117 | + maxBlockingTimeout 118 | + ", servers=" 119 | + servers 120 | + '}'; 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/config/RecordConfig.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config; 17 | 18 | import io.camunda.zeebe.protocol.record.Record; 19 | import io.camunda.zeebe.protocol.record.RecordType; 20 | import java.util.Objects; 21 | import java.util.Set; 22 | 23 | /** 24 | * {@link RecordConfig} describes what the exporter should do with a record of a given {@link 25 | * io.camunda.zeebe.protocol.record.ValueType} - this is mapped via {@link RecordsConfig}, which 26 | * holds a map of {@link io.camunda.zeebe.protocol.record.ValueType} to {@link RecordConfig}. 27 | * 28 | *

For the {@link io.camunda.zeebe.protocol.record.ValueType} associated with this instance, only 29 | * records with a {@link Record#getRecordType()} which is included in {@code allowedTypes} will be 30 | * exported. An empty set of {@code allowedTypes} means nothing gets exported. 31 | */ 32 | public final class RecordConfig { 33 | private final Set allowedTypes; 34 | private final String topic; 35 | 36 | public RecordConfig(final Set allowedTypes, final String topic) { 37 | this.allowedTypes = Objects.requireNonNull(allowedTypes); 38 | this.topic = Objects.requireNonNull(topic); 39 | } 40 | 41 | public Set getAllowedTypes() { 42 | return allowedTypes; 43 | } 44 | 45 | public String getTopic() { 46 | return topic; 47 | } 48 | 49 | @Override 50 | public int hashCode() { 51 | return Objects.hash(allowedTypes, topic); 52 | } 53 | 54 | @Override 55 | public boolean equals(final Object o) { 56 | if (this == o) { 57 | return true; 58 | } 59 | if (o == null || getClass() != o.getClass()) { 60 | return false; 61 | } 62 | final RecordConfig that = (RecordConfig) o; 63 | return Objects.equals(getAllowedTypes(), that.getAllowedTypes()) 64 | && Objects.equals(getTopic(), that.getTopic()); 65 | } 66 | 67 | @Override 68 | public String toString() { 69 | return "RecordConfig{" + "allowedTypes=" + allowedTypes + ", topic='" + topic + '\'' + '}'; 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/config/RecordsConfig.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config; 17 | 18 | import io.camunda.zeebe.protocol.record.ValueType; 19 | import java.util.Map; 20 | import java.util.Objects; 21 | import java.util.Optional; 22 | 23 | /** 24 | * {@link RecordsConfig} provides a default {@link RecordConfig} for every {@link ValueType}, with 25 | * the possibility of setting a specific {@link RecordConfig} for a given {@link ValueType}. 26 | */ 27 | public final class RecordsConfig { 28 | private final Map typeMap; 29 | private final RecordConfig defaults; 30 | 31 | public RecordsConfig(final Map typeMap, final RecordConfig defaults) { 32 | this.typeMap = Objects.requireNonNull(typeMap); 33 | this.defaults = Objects.requireNonNull(defaults); 34 | } 35 | 36 | public Map getTypeMap() { 37 | return typeMap; 38 | } 39 | 40 | public RecordConfig getDefaults() { 41 | return defaults; 42 | } 43 | 44 | /** 45 | * Returns the correct {@link RecordConfig} for this type, or {@link #getDefaults()} if none 46 | * defined for the given type. 47 | * 48 | * @param type the value type to get the {@link RecordConfig} of 49 | * @return the configured {@link RecordConfig} for this type, or {@link #getDefaults()} 50 | */ 51 | public RecordConfig forType(final ValueType type) { 52 | return Optional.ofNullable(typeMap.get(type)).orElse(defaults); 53 | } 54 | 55 | @Override 56 | public int hashCode() { 57 | return Objects.hash(defaults, typeMap); 58 | } 59 | 60 | @Override 61 | public boolean equals(final Object o) { 62 | if (this == o) { 63 | return true; 64 | } 65 | if (o == null || getClass() != o.getClass()) { 66 | return false; 67 | } 68 | final RecordsConfig that = (RecordsConfig) o; 69 | return Objects.equals(getTypeMap(), that.getTypeMap()) 70 | && Objects.equals(getDefaults(), that.getDefaults()); 71 | } 72 | 73 | @Override 74 | public String toString() { 75 | return "RecordsConfig{" + "typeMap=" + typeMap + ", defaults=" + defaults + '}'; 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/config/parser/AllowedType.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config.parser; 17 | 18 | import io.camunda.zeebe.protocol.record.RecordType; 19 | import java.util.Objects; 20 | 21 | /** 22 | * {@link AllowedType} maps string values to {@link RecordType} values, and is used purely for 23 | * parsing purposes. {@link RecordType} is not used directly as not all types are supported. 24 | */ 25 | public enum AllowedType { 26 | COMMAND("command", RecordType.COMMAND), 27 | EVENT("event", RecordType.EVENT), 28 | REJECTION("rejection", RecordType.COMMAND_REJECTION); 29 | 30 | private final String typeName; 31 | private final RecordType recordType; 32 | 33 | AllowedType(final String typeName, final RecordType recordType) { 34 | this.typeName = Objects.requireNonNull(typeName); 35 | this.recordType = Objects.requireNonNull(recordType); 36 | } 37 | 38 | public String getTypeName() { 39 | return typeName; 40 | } 41 | 42 | public RecordType getRecordType() { 43 | return recordType; 44 | } 45 | 46 | public static AllowedType forName(final String name) { 47 | if (COMMAND.typeName.equals(name)) { 48 | return COMMAND; 49 | } else if (EVENT.typeName.equals(name)) { 50 | return EVENT; 51 | } else if (REJECTION.typeName.equals(name)) { 52 | return REJECTION; 53 | } else { 54 | throw new IllegalArgumentException("Unknown record type name: " + name); 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/config/parser/ConfigParser.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config.parser; 17 | 18 | import java.util.function.Supplier; 19 | 20 | /** 21 | * {@link ConfigParser} is a single-responsibility interface which should parse any given instance 22 | * of type {@code T} into a valid instance of type {@code R}. 23 | * 24 | * @param the raw configuration type to be parsed 25 | * @param the parsed configuration type 26 | */ 27 | @FunctionalInterface 28 | public interface ConfigParser { 29 | 30 | R parse(T config); 31 | 32 | default R parse(T config, final Supplier defaultValue) { 33 | if (config == null) { 34 | config = defaultValue.get(); 35 | } 36 | 37 | return parse(config); 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/config/parser/ConfigParserUtil.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config.parser; 17 | 18 | import java.util.Arrays; 19 | import java.util.List; 20 | import java.util.Optional; 21 | import java.util.function.Function; 22 | import java.util.stream.Collectors; 23 | 24 | /** 25 | * Utility tool belt to parse configuration. Only add methods here if they are used in more than one 26 | * class. 27 | */ 28 | final class ConfigParserUtil { 29 | private ConfigParserUtil() {} 30 | 31 | static T get(final T property, final T fallback) { 32 | return Optional.ofNullable(property).orElse(fallback); 33 | } 34 | 35 | static R get(final T property, final R fallback, final Function transformer) { 36 | return Optional.ofNullable(property).map(transformer).orElse(fallback); 37 | } 38 | 39 | static List splitCommaSeparatedString(final String value) { 40 | return Arrays.stream(value.split(",")).map(String::trim).collect(Collectors.toList()); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/config/parser/RawConfigParser.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config.parser; 17 | 18 | import static io.zeebe.exporters.kafka.config.parser.ConfigParserUtil.get; 19 | 20 | import io.zeebe.exporters.kafka.config.Config; 21 | import io.zeebe.exporters.kafka.config.ProducerConfig; 22 | import io.zeebe.exporters.kafka.config.RecordsConfig; 23 | import io.zeebe.exporters.kafka.config.raw.RawConfig; 24 | import io.zeebe.exporters.kafka.config.raw.RawProducerConfig; 25 | import io.zeebe.exporters.kafka.config.raw.RawRecordsConfig; 26 | import java.time.Duration; 27 | import java.util.Objects; 28 | 29 | /** 30 | * {@link RawConfigParser} parses a given {@link RawConfig} into a valid {@link Config} instance, 31 | * substituting sane defaults for missing properties. 32 | * 33 | *

You can inject your own {@code recordsConfigParser} and {@code producerConfig} implementations 34 | * to overwrite the parsing for nested types. 35 | */ 36 | public final class RawConfigParser implements ConfigParser { 37 | static final int DEFAULT_MAX_BATCH_SIZE = 100; 38 | static final Duration DEFAULT_FLUSH_INTERVAL_MS = Duration.ofSeconds(1); 39 | 40 | private final ConfigParser recordsConfigParser; 41 | private final ConfigParser producerConfigParser; 42 | 43 | public RawConfigParser() { 44 | this(new RawRecordsConfigParser(), new RawProducerConfigParser()); 45 | } 46 | 47 | RawConfigParser( 48 | final ConfigParser recordsConfigParser, 49 | final ConfigParser producerConfigParser) { 50 | this.recordsConfigParser = Objects.requireNonNull(recordsConfigParser); 51 | this.producerConfigParser = Objects.requireNonNull(producerConfigParser); 52 | } 53 | 54 | @Override 55 | public Config parse(final RawConfig config) { 56 | Objects.requireNonNull(config); 57 | 58 | final ProducerConfig producerConfig = 59 | producerConfigParser.parse(config.producer, RawProducerConfig::new); 60 | final RecordsConfig recordsConfig = 61 | recordsConfigParser.parse(config.records, RawRecordsConfig::new); 62 | final Integer maxBatchSize = get(config.maxBatchSize, DEFAULT_MAX_BATCH_SIZE); 63 | final Duration flushInterval = 64 | get(config.flushIntervalMs, DEFAULT_FLUSH_INTERVAL_MS, Duration::ofMillis); 65 | 66 | return new Config(producerConfig, recordsConfig, maxBatchSize, flushInterval); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/config/parser/RawProducerConfigParser.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config.parser; 17 | 18 | import static io.zeebe.exporters.kafka.config.parser.ConfigParserUtil.get; 19 | 20 | import io.zeebe.exporters.kafka.config.ProducerConfig; 21 | import io.zeebe.exporters.kafka.config.raw.RawProducerConfig; 22 | import java.io.IOException; 23 | import java.io.Reader; 24 | import java.io.StringReader; 25 | import java.io.UncheckedIOException; 26 | import java.time.Duration; 27 | import java.util.Collections; 28 | import java.util.HashMap; 29 | import java.util.List; 30 | import java.util.Map; 31 | import java.util.Objects; 32 | import java.util.Properties; 33 | 34 | /** 35 | * {@link RawProducerConfigParser} parses instances of {@link RawProducerConfig} into valid 36 | * instances of {@link ProducerConfig}, substituting sane defaults for missing properties. 37 | * 38 | *

One thing to note, is it will parse the {@link RawProducerConfig#config} string as if it were 39 | * a properties file, delegating this to {@link Properties#load(Reader)}. 40 | */ 41 | public class RawProducerConfigParser implements ConfigParser { 42 | 43 | public static final Duration DEFAULT_MAX_BLOCKING_TIMEOUT = Duration.ofSeconds(2); 44 | static final List DEFAULT_SERVERS = Collections.singletonList("localhost:9092"); 45 | static final String DEFAULT_CLIENT_ID = "zeebe"; 46 | static final Duration DEFAULT_CLOSE_TIMEOUT = Duration.ofSeconds(20); 47 | static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(5); 48 | 49 | @Override 50 | public ProducerConfig parse(final RawProducerConfig config) { 51 | Objects.requireNonNull(config); 52 | 53 | final List servers = 54 | get(config.servers, DEFAULT_SERVERS, ConfigParserUtil::splitCommaSeparatedString); 55 | final String clientId = get(config.clientId, DEFAULT_CLIENT_ID); 56 | final Duration closeTimeout = 57 | get(config.closeTimeoutMs, DEFAULT_CLOSE_TIMEOUT, Duration::ofMillis); 58 | final Duration requestTimeout = 59 | get(config.requestTimeoutMs, DEFAULT_REQUEST_TIMEOUT, Duration::ofMillis); 60 | final Duration maxBlockingTimeout = 61 | get(config.maxBlockingTimeoutMs, DEFAULT_MAX_BLOCKING_TIMEOUT, Duration::ofMillis); 62 | final Map producerConfig = 63 | get(config.config, new HashMap<>(), this::parseProperties); 64 | 65 | return new ProducerConfig( 66 | clientId, closeTimeout, producerConfig, requestTimeout, maxBlockingTimeout, servers); 67 | } 68 | 69 | private Map parseProperties(final String propertiesString) { 70 | final Properties properties = new Properties(); 71 | final Map parsed = new HashMap<>(); 72 | 73 | try { 74 | properties.load(new StringReader(propertiesString)); 75 | } catch (final IOException e) { 76 | throw new UncheckedIOException(e); 77 | } 78 | 79 | for (final String property : properties.stringPropertyNames()) { 80 | parsed.put(property, properties.get(property)); 81 | } 82 | 83 | return parsed; 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/config/parser/RawRecordConfigParser.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config.parser; 17 | 18 | import static io.zeebe.exporters.kafka.config.parser.ConfigParserUtil.get; 19 | 20 | import io.camunda.zeebe.protocol.record.RecordType; 21 | import io.zeebe.exporters.kafka.config.RecordConfig; 22 | import io.zeebe.exporters.kafka.config.raw.RawRecordConfig; 23 | import java.util.Collections; 24 | import java.util.EnumSet; 25 | import java.util.Objects; 26 | import java.util.Optional; 27 | import java.util.Set; 28 | import java.util.function.Predicate; 29 | 30 | /** 31 | * {@link RawRecordConfigParser} parses instances of {@link RawRecordConfig} into valid instances of 32 | * {@link RecordConfig}, substituting defaults for missing properties. 33 | * 34 | *

The defaults can be overridden with an instance of {@link RecordConfig}, and default 35 | * properties will be taken from there. This is used notably in {@link RawRecordsConfigParser} where 36 | * it first parses {@link io.zeebe.exporters.kafka.config.raw.RawRecordsConfig#defaults} and passes 37 | * the parsed value as a defaults here for all subsequent properties. 38 | */ 39 | public class RawRecordConfigParser implements ConfigParser { 40 | static final String DEFAULT_TOPIC_NAME = "zeebe"; 41 | static final EnumSet DEFAULT_ALLOWED_TYPES = 42 | EnumSet.complementOf(EnumSet.of(RecordType.NULL_VAL, RecordType.SBE_UNKNOWN)); 43 | 44 | private final RecordConfig defaults; 45 | 46 | public RawRecordConfigParser() { 47 | this(new RecordConfig(DEFAULT_ALLOWED_TYPES, DEFAULT_TOPIC_NAME)); 48 | } 49 | 50 | public RawRecordConfigParser(final RecordConfig defaults) { 51 | this.defaults = defaults; 52 | } 53 | 54 | @Override 55 | public RecordConfig parse(final RawRecordConfig config) { 56 | Objects.requireNonNull(config); 57 | 58 | final Set allowedTypes; 59 | final String topic = Optional.ofNullable(config.topic).orElse(defaults.getTopic()); 60 | 61 | if (config.type != null) { 62 | allowedTypes = EnumSet.noneOf(RecordType.class); 63 | get(config.type, Collections.emptyList(), ConfigParserUtil::splitCommaSeparatedString) 64 | .stream() 65 | .filter(Predicate.not(String::isBlank)) 66 | .forEach(t -> allowedTypes.add(AllowedType.forName(t).getRecordType())); 67 | } else { 68 | allowedTypes = defaults.getAllowedTypes(); 69 | } 70 | 71 | return new RecordConfig(allowedTypes, topic); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/config/parser/RawRecordsConfigParser.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config.parser; 17 | 18 | import io.camunda.zeebe.protocol.record.ValueType; 19 | import io.zeebe.exporters.kafka.config.RecordConfig; 20 | import io.zeebe.exporters.kafka.config.RecordsConfig; 21 | import io.zeebe.exporters.kafka.config.raw.RawRecordConfig; 22 | import io.zeebe.exporters.kafka.config.raw.RawRecordsConfig; 23 | import java.util.EnumMap; 24 | import java.util.Map; 25 | import java.util.Objects; 26 | import java.util.Optional; 27 | 28 | /** 29 | * {@link RawRecordsConfigParser} parses instances of {@link RawRecordsConfig} into valid instances 30 | * of {@link RecordsConfig}. 31 | * 32 | *

You'll note that it's not possible to pass your own implementation of {@code 33 | * ConfigParser} - this is because after parsing {@link 34 | * RawRecordsConfig#defaults}, the result is passed as defaults to a new instance of {@link 35 | * RawRecordConfigParser}. This breaks the usual design and usage of DI, and should be refactored. 36 | */ 37 | public class RawRecordsConfigParser implements ConfigParser { 38 | private static final ConfigParser DEFAULTS_RECORD_CONFIG_PARSER = 39 | new RawRecordConfigParser(); 40 | 41 | @SuppressWarnings("java:S138") 42 | @Override 43 | public RecordsConfig parse(final RawRecordsConfig config) { 44 | Objects.requireNonNull(config); 45 | 46 | final Map typeMap = new EnumMap<>(ValueType.class); 47 | final RecordConfig defaults = 48 | DEFAULTS_RECORD_CONFIG_PARSER.parse(config.defaults, RawRecordConfig::new); 49 | final ConfigParser recordConfigParser = 50 | new RawRecordConfigParser(defaults); 51 | 52 | Optional.ofNullable(config.deployment) 53 | .map(recordConfigParser::parse) 54 | .ifPresent(c -> typeMap.put(ValueType.DEPLOYMENT, c)); 55 | Optional.ofNullable(config.deploymentDistribution) 56 | .map(recordConfigParser::parse) 57 | .ifPresent(c -> typeMap.put(ValueType.DEPLOYMENT_DISTRIBUTION, c)); 58 | Optional.ofNullable(config.error) 59 | .map(recordConfigParser::parse) 60 | .ifPresent(c -> typeMap.put(ValueType.ERROR, c)); 61 | Optional.ofNullable(config.incident) 62 | .map(recordConfigParser::parse) 63 | .ifPresent(c -> typeMap.put(ValueType.INCIDENT, c)); 64 | Optional.ofNullable(config.job) 65 | .map(recordConfigParser::parse) 66 | .ifPresent(c -> typeMap.put(ValueType.JOB, c)); 67 | Optional.ofNullable(config.jobBatch) 68 | .map(recordConfigParser::parse) 69 | .ifPresent(c -> typeMap.put(ValueType.JOB_BATCH, c)); 70 | Optional.ofNullable(config.message) 71 | .map(recordConfigParser::parse) 72 | .ifPresent(c -> typeMap.put(ValueType.MESSAGE, c)); 73 | Optional.ofNullable(config.messageSubscription) 74 | .map(recordConfigParser::parse) 75 | .ifPresent(c -> typeMap.put(ValueType.MESSAGE_SUBSCRIPTION, c)); 76 | Optional.ofNullable(config.messageStartEventSubscription) 77 | .map(recordConfigParser::parse) 78 | .ifPresent(c -> typeMap.put(ValueType.MESSAGE_START_EVENT_SUBSCRIPTION, c)); 79 | Optional.ofNullable(config.processInstance) 80 | .map(recordConfigParser::parse) 81 | .ifPresent(c -> typeMap.put(ValueType.PROCESS_INSTANCE, c)); 82 | Optional.ofNullable(config.processInstanceCreation) 83 | .map(recordConfigParser::parse) 84 | .ifPresent(c -> typeMap.put(ValueType.PROCESS_INSTANCE_CREATION, c)); 85 | Optional.ofNullable(config.processInstanceResult) 86 | .map(recordConfigParser::parse) 87 | .ifPresent(c -> typeMap.put(ValueType.PROCESS_INSTANCE_RESULT, c)); 88 | Optional.ofNullable(config.processMessageSubscription) 89 | .map(recordConfigParser::parse) 90 | .ifPresent(c -> typeMap.put(ValueType.PROCESS_MESSAGE_SUBSCRIPTION, c)); 91 | Optional.ofNullable(config.process) 92 | .map(recordConfigParser::parse) 93 | .ifPresent(c -> typeMap.put(ValueType.PROCESS, c)); 94 | Optional.ofNullable(config.processEvent) 95 | .map(recordConfigParser::parse) 96 | .ifPresent(c -> typeMap.put(ValueType.PROCESS_EVENT, c)); 97 | Optional.ofNullable(config.timer) 98 | .map(recordConfigParser::parse) 99 | .ifPresent(c -> typeMap.put(ValueType.TIMER, c)); 100 | Optional.ofNullable(config.variable) 101 | .map(recordConfigParser::parse) 102 | .ifPresent(c -> typeMap.put(ValueType.VARIABLE, c)); 103 | Optional.ofNullable(config.variableDocument) 104 | .map(recordConfigParser::parse) 105 | .ifPresent(c -> typeMap.put(ValueType.VARIABLE_DOCUMENT, c)); 106 | 107 | return new RecordsConfig(typeMap, defaults); 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/config/raw/RawConfig.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config.raw; 17 | 18 | @SuppressWarnings("squid:ClassVariableVisibilityCheck") 19 | public final class RawConfig { 20 | /** 21 | * Controls the number of records to buffer in a single record batch before forcing a flush. Note 22 | * that a flush may occur before anyway due to periodic flushing. This setting should help you 23 | * estimate a soft upper bound to the memory consumption of the exporter. If you assume a worst 24 | * case scenario where every record is the size of your zeebe.broker.network.maxMessageSize, then 25 | * the memory required by the exporter would be at least: (maxBatchSize * 26 | * zeebe.broker.network.maxMessageSize * 2) 27 | * 28 | *

We multiply by 2 as the records are buffered twice - once in the exporter itself, and once 29 | * in the producer's network buffers (but serialized at that point). There's some additional 30 | * memory overhead used by the producer as well for compression/encryption/etc., so you have to 31 | * add a bit, but that one is not proportional to the number of records and is more or less 32 | * constant. 33 | * 34 | *

Once the batch has reached this size, a flush is automatically triggered. Too small a number 35 | * here would cause many flush, which is not good for performance, but would mean you will see 36 | * your records faster/sooner. 37 | */ 38 | public Integer maxBatchSize; 39 | 40 | /** 41 | * How often should the current batch be flushed to Kafka, regardless of whether its full or not. 42 | */ 43 | public Long flushIntervalMs; 44 | 45 | /** Producer specific configuration; see {@link RawProducerConfig}. */ 46 | public RawProducerConfig producer; 47 | 48 | /** Records specific configuration; see {@link RawRecordsConfig}. */ 49 | public RawRecordsConfig records; 50 | } 51 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/config/raw/RawProducerConfig.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config.raw; 17 | 18 | @SuppressWarnings("squid:ClassVariableVisibilityCheck") 19 | public final class RawProducerConfig { 20 | 21 | /** 22 | * Producer client identifier. 23 | * 24 | * @see org.apache.kafka.clients.producer.ProducerConfig#CLIENT_ID_CONFIG 25 | */ 26 | public String clientId; 27 | 28 | /** 29 | * Grace period when shutting down the producer in milliseconds. A period which is too short could 30 | * result in possible resource leaks, but generally should be fine. 31 | */ 32 | public Long closeTimeoutMs; 33 | 34 | /** 35 | * Line-separated list of Java properties, e.g. the contents of a properties file. The resulting 36 | * map is passed verbatim as part of the {@link org.apache.kafka.clients.producer.ProducerConfig}. 37 | * You can use any of the properties defined there. This allows you to configure OAuth, SSL, SASL, 38 | * etc. 39 | * 40 | *

Be careful as this allows you to overwrite anything - e.g. key and value serializers - which 41 | * can break the exporter behaviour, so make sure to properly test your settings before deploying. 42 | */ 43 | public String config; 44 | 45 | /** 46 | * Controls how long the producer will wait for a request to be acknowledged by the Kafka broker 47 | * before retrying it. 48 | * 49 | * @see org.apache.kafka.clients.producer.ProducerConfig#REQUEST_TIMEOUT_MS_CONFIG 50 | */ 51 | public Long requestTimeoutMs; 52 | 53 | /** 54 | * The maximum time to block for all blocking requests, e.g. beginTransaction, commitTransaction. 55 | * It's recommended to keep this low, around a second, as it's also the time the exporter will 56 | * block if the batch is full when trying to commit/flush it. Keeping it low isn't a big issue, as 57 | * even if it times out the first time, Kafka will still commit the transaction in the background, 58 | * and on the next try the transaction will commit much faster (e.g. if it's already committed as 59 | * far as the brokers are concerned, then it should be really fast). 60 | * 61 | * @see org.apache.kafka.clients.producer.ProducerConfig#MAX_BLOCK_MS_CONFIG 62 | */ 63 | public Long maxBlockingTimeoutMs; 64 | 65 | /** 66 | * The comma separated list of initial Kafka broker contact points. The format should be the same 67 | * one as the {@link org.apache.kafka.clients.producer.ProducerConfig} expects, i.e. "host:port". 68 | * 69 | * @see org.apache.kafka.clients.producer.ProducerConfig#BOOTSTRAP_SERVERS_CONFIG 70 | */ 71 | public String servers; 72 | } 73 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/config/raw/RawRecordConfig.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config.raw; 17 | 18 | @SuppressWarnings("squid:ClassVariableVisibilityCheck") 19 | public final class RawRecordConfig { 20 | 21 | /** 22 | * Type is a comma separated string of accepted record types, allowing you to filter if you want 23 | * nothing (""), commands ("command"), events ("events"), or rejections ("rejection"), or a 24 | * combination of the three, e.g. "command,event". 25 | */ 26 | public String type; 27 | 28 | /** 29 | * Topic is the topic to which the record with the given value type should be sent to, e.g. for a 30 | * deployment record below we would send the record to "zeebe-deployment" topic. 31 | */ 32 | public String topic; 33 | } 34 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/config/raw/RawRecordsConfig.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config.raw; 17 | 18 | @SuppressWarnings("squid:ClassVariableVisibilityCheck") 19 | public final class RawRecordsConfig { 20 | 21 | /** 22 | * If a record value type is omitted in your configuration file, it will fall back to whatever is 23 | * configured in the defaults. 24 | */ 25 | public RawRecordConfig defaults; 26 | 27 | /** 28 | * For records with a value of type {@link io.camunda.zeebe.protocol.record.ValueType#DEPLOYMENT} 29 | */ 30 | public RawRecordConfig deployment; 31 | 32 | /** 33 | * For records with a value of type {@link 34 | * io.camunda.zeebe.protocol.record.ValueType#DEPLOYMENT_DISTRIBUTION} 35 | */ 36 | public RawRecordConfig deploymentDistribution; 37 | 38 | /** For records with a value of type {@link io.camunda.zeebe.protocol.record.ValueType#ERROR} */ 39 | public RawRecordConfig error; 40 | 41 | /** 42 | * For records with a value of type {@link io.camunda.zeebe.protocol.record.ValueType#INCIDENT} 43 | */ 44 | public RawRecordConfig incident; 45 | 46 | /** 47 | * For records with a value of type {@link io.camunda.zeebe.protocol.record.ValueType#JOB_BATCH} 48 | */ 49 | public RawRecordConfig jobBatch; 50 | 51 | /** For records with a value of type {@link io.camunda.zeebe.protocol.record.ValueType#JOB} */ 52 | public RawRecordConfig job; 53 | 54 | /** For records with a value of type {@link io.camunda.zeebe.protocol.record.ValueType#MESSAGE} */ 55 | public RawRecordConfig message; 56 | 57 | /** 58 | * For records with a value of type {@link 59 | * io.camunda.zeebe.protocol.record.ValueType#MESSAGE_SUBSCRIPTION} 60 | */ 61 | public RawRecordConfig messageSubscription; 62 | 63 | /** 64 | * For records with a value of type {@link 65 | * io.camunda.zeebe.protocol.record.ValueType#MESSAGE_START_EVENT_SUBSCRIPTION} 66 | */ 67 | public RawRecordConfig messageStartEventSubscription; 68 | 69 | /** For records with a value of type {@link io.camunda.zeebe.protocol.record.ValueType#PROCESS} */ 70 | public RawRecordConfig process; 71 | 72 | /** 73 | * For records with a value of type {@link 74 | * io.camunda.zeebe.protocol.record.ValueType#PROCESS_EVENT} 75 | */ 76 | public RawRecordConfig processEvent; 77 | 78 | /** 79 | * For records with a value of type {@link 80 | * io.camunda.zeebe.protocol.record.ValueType#PROCESS_INSTANCE} 81 | */ 82 | public RawRecordConfig processInstance; 83 | 84 | /** 85 | * For records with a value of type {@link 86 | * io.camunda.zeebe.protocol.record.ValueType#PROCESS_INSTANCE_CREATION} 87 | */ 88 | public RawRecordConfig processInstanceCreation; 89 | 90 | /** 91 | * For records with a value of type {@link 92 | * io.camunda.zeebe.protocol.record.ValueType#PROCESS_INSTANCE_RESULT} 93 | */ 94 | public RawRecordConfig processInstanceResult; 95 | 96 | /** 97 | * For records with a value of type {@link 98 | * io.camunda.zeebe.protocol.record.ValueType#PROCESS_MESSAGE_SUBSCRIPTION} 99 | */ 100 | public RawRecordConfig processMessageSubscription; 101 | 102 | /** For records with a value of type {@link io.camunda.zeebe.protocol.record.ValueType#TIMER} */ 103 | public RawRecordConfig timer; 104 | 105 | /** 106 | * For records with a value of type {@link io.camunda.zeebe.protocol.record.ValueType#VARIABLE} 107 | */ 108 | public RawRecordConfig variable; 109 | 110 | /** 111 | * For records with a value of type {@link 112 | * io.camunda.zeebe.protocol.record.ValueType#VARIABLE_DOCUMENT} 113 | */ 114 | public RawRecordConfig variableDocument; 115 | } 116 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/producer/BoundedTransactionalRecordBatch.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.producer; 17 | 18 | import io.zeebe.exporters.kafka.config.ProducerConfig; 19 | import io.zeebe.exporters.kafka.record.FullRecordBatchException; 20 | import io.zeebe.exporters.kafka.serde.RecordId; 21 | import java.util.LinkedList; 22 | import java.util.Objects; 23 | import java.util.UUID; 24 | import java.util.function.LongConsumer; 25 | import org.apache.kafka.clients.producer.Producer; 26 | import org.apache.kafka.clients.producer.ProducerRecord; 27 | import org.apache.kafka.common.KafkaException; 28 | import org.apache.kafka.common.errors.InterruptException; 29 | import org.apache.kafka.common.errors.TimeoutException; 30 | import org.slf4j.Logger; 31 | 32 | /** 33 | * An implementation of {@link RecordBatch} which uses Kafka transactions to guarantee the atomicity 34 | * of the flush operation. When records are added, it will first add them to a linked list before 35 | * immediately forwarding them to the producer. If there was no transaction yet, it will be started 36 | * before. On flush, the transaction is committed. 37 | * 38 | *

NOTE: while atomicity could still be guaranteed without transactions, they make the whole 39 | * error handling much simpler. I do realize that we cannot use the exactly-once semantics due to 40 | * Zeebe's own at-least-once semantics, but it still seems useful to simplify error handling. 41 | * 42 | *

NOTE: whenever an error occurs, if it is recoverable, it will be logged and the batch remains 43 | * as is - the operation will be retried either by adding a new record or by attempting to flush the 44 | * batch externally. If it's unrecoverable, the current producer is closed, the state is reset 45 | * (minus the linked list which remains the same so we can retry the records), and on the next add 46 | * or flush operation, the whole batch is retried. 47 | * 48 | *

NOTE: when adding a record to a full batch, it will attempt to flush the batch, blocking up to 49 | * {@link io.zeebe.exporters.kafka.config.raw.RawProducerConfig#maxBlockingTimeoutMs} milliseconds. 50 | * If it flushed successfully, then the record will be added and operations will resume as normal. 51 | * If it failed to flush, then the error will bubble up wrapped in a {@link 52 | * FullRecordBatchException}. 53 | * 54 | *

NOTE: when using this type of batch, make sure your consumers use "read_committed" as 55 | * isolation level, otherwise they may see uncommitted records. This isn't too big of a deal as 56 | * these records are anyway committed on the Zeebe side, but they may show up as duplicates. 57 | */ 58 | final class BoundedTransactionalRecordBatch implements RecordBatch { 59 | private final LinkedList> records = new LinkedList<>(); 60 | 61 | private final KafkaProducerFactory producerFactory; 62 | private final ProducerConfig config; 63 | private final String producerId; 64 | private final int maxBatchSize; 65 | private final LongConsumer onFlushCallback; 66 | private final Logger logger; 67 | 68 | private Producer producer; 69 | private boolean producerInitialized = false; 70 | private boolean transactionBegan = false; 71 | private int nextSendIndex = 0; 72 | 73 | public BoundedTransactionalRecordBatch( 74 | final ProducerConfig config, 75 | final int maxBatchSize, 76 | final LongConsumer onFlushCallback, 77 | final Logger logger, 78 | final KafkaProducerFactory producerFactory) { 79 | this( 80 | config, 81 | maxBatchSize, 82 | onFlushCallback, 83 | logger, 84 | producerFactory, 85 | UUID.randomUUID().toString()); 86 | } 87 | 88 | public BoundedTransactionalRecordBatch( 89 | final ProducerConfig config, 90 | final int maxBatchSize, 91 | final LongConsumer onFlushCallback, 92 | final Logger logger, 93 | final KafkaProducerFactory producerFactory, 94 | final String producerId) { 95 | this.config = Objects.requireNonNull(config); 96 | this.maxBatchSize = maxBatchSize; 97 | this.onFlushCallback = Objects.requireNonNull(onFlushCallback); 98 | this.logger = Objects.requireNonNull(logger); 99 | this.producerFactory = Objects.requireNonNull(producerFactory); 100 | this.producerId = Objects.requireNonNull(producerId); 101 | } 102 | 103 | @Override 104 | public void add(final ProducerRecord record) throws FullRecordBatchException { 105 | if (records.size() >= maxBatchSize) { 106 | try { 107 | flushBatch(); 108 | } catch (final TimeoutException | InterruptException e) { 109 | throw new FullRecordBatchException(maxBatchSize, e); 110 | } catch (final Exception e) { 111 | close(); 112 | throw new FullRecordBatchException(maxBatchSize, e); 113 | } 114 | } 115 | 116 | records.add(record); 117 | 118 | try { 119 | sendUnsentRecords(); 120 | } catch (final TimeoutException | InterruptException e) { 121 | logger.debug( 122 | "Timed out or interrupted while sending unsent records, will be retried later", e); 123 | } catch (final Exception e) { 124 | logger.warn("Failed to send unsent record, will be retried later with a new producer", e); 125 | close(); 126 | } 127 | } 128 | 129 | @Override 130 | public void flush() { 131 | if (records.isEmpty()) { 132 | logger.trace("Skipping batch commit as there are no records in the batch"); 133 | return; 134 | } 135 | 136 | logger.trace( 137 | "Committing {} from the current batch, up to position {}", 138 | records.size(), 139 | records.getLast().key().getPosition()); 140 | 141 | try { 142 | flushBatch(); 143 | } catch (final TimeoutException | InterruptException e) { 144 | logger.debug("Timed out or interrupted while committing, will be retried later", e); 145 | } catch (final Exception e) { 146 | logger.warn("Non-recoverable error occurred while committing, retrying with new producer", e); 147 | close(); 148 | } 149 | } 150 | 151 | @Override 152 | public void close() { 153 | if (producer == null) { 154 | return; 155 | } 156 | 157 | final var closeTimeout = config.getCloseTimeout(); 158 | logger.debug("Closing producer with timeout {}", closeTimeout); 159 | 160 | try { 161 | producer.close(closeTimeout); 162 | } catch (final Exception e) { 163 | logger.warn( 164 | "Failed to gracefully close Kafka exporter; this is most likely fine, but may cause " 165 | + "resource to leaks. Investigate if it keeps repeating itself.", 166 | e); 167 | } 168 | 169 | producer = null; 170 | producerInitialized = false; 171 | transactionBegan = false; 172 | nextSendIndex = 0; 173 | // the records' list is not cleared on purpose, so that we can later try it 174 | } 175 | 176 | private void flushBatch() throws KafkaException, IllegalStateException { 177 | sendUnsentRecords(); 178 | 179 | final var commitPosition = records.getLast().key().getPosition(); 180 | commitTransaction(); 181 | onFlushCallback.accept(commitPosition); 182 | } 183 | 184 | private void commitTransaction() { 185 | if (!transactionBegan) { 186 | throw new IllegalStateException( 187 | "Expected to be in transaction, but no transaction is in flight"); 188 | } 189 | 190 | producer.commitTransaction(); 191 | transactionBegan = false; 192 | records.clear(); 193 | nextSendIndex = 0; 194 | } 195 | 196 | private void sendUnsentRecords() { 197 | final var unsentRecords = Math.max(0, records.size() - nextSendIndex); 198 | logger.trace("Sending {} remaining unsent records from the current batch", unsentRecords); 199 | 200 | ensureWithinTransaction(); 201 | 202 | while (nextSendIndex < records.size()) { 203 | final var record = records.get(nextSendIndex); 204 | producer.send(record); 205 | logger.trace("Sent record {}", record); 206 | nextSendIndex++; 207 | } 208 | } 209 | 210 | private void ensureProducer() { 211 | if (producer != null) { 212 | return; 213 | } 214 | 215 | producer = producerFactory.newProducer(config, producerId); 216 | logger.trace("Created new producer"); 217 | } 218 | 219 | private void ensureProducerInitialized() { 220 | ensureProducer(); 221 | 222 | if (!producerInitialized) { 223 | producer.initTransactions(); 224 | producerInitialized = true; 225 | logger.trace("Initialized producer for transactions"); 226 | } 227 | } 228 | 229 | private void ensureWithinTransaction() { 230 | ensureProducerInitialized(); 231 | 232 | if (!transactionBegan) { 233 | producer.beginTransaction(); 234 | transactionBegan = true; 235 | logger.trace("Began new producer transaction"); 236 | } 237 | } 238 | } 239 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/producer/DefaultKafkaProducerFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.producer; 17 | 18 | import io.zeebe.exporters.kafka.config.Config; 19 | import io.zeebe.exporters.kafka.serde.RecordId; 20 | import io.zeebe.exporters.kafka.serde.RecordIdSerializer; 21 | import java.util.HashMap; 22 | import org.apache.kafka.clients.producer.KafkaProducer; 23 | import org.apache.kafka.clients.producer.Producer; 24 | import org.apache.kafka.clients.producer.ProducerConfig; 25 | import org.apache.kafka.common.serialization.ByteArraySerializer; 26 | 27 | /** 28 | * {@link DefaultKafkaProducerFactory} is the default implementation of {@link KafkaProducerFactory} 29 | * used by {@link io.zeebe.exporters.kafka.KafkaExporter}. It creates a new {@link Producer} based 30 | * on the given {@link Config}, and adds a few default properties. 31 | * 32 | *

It's tuned for small, fast batching, and low memory consumption. By default, it will wait up 33 | * to 10ms or until it has batched 4MB (the default maxMessageSize of Zeebe) in memory before 34 | * sending a request. This is to lessen the load on Kafka while remaining fairly responsive. 35 | * 36 | *

The memory usage of the producer is soft capped to 40Mb - if you produce much faster than it 37 | * can export, then you may run into exceptions. In this case, you can increase the memory to 38 | * something you feel more comfortable with via {@link 39 | * io.zeebe.exporters.kafka.config.raw.RawProducerConfig#config}. 40 | */ 41 | final class DefaultKafkaProducerFactory implements KafkaProducerFactory { 42 | @Override 43 | public Producer newProducer( 44 | final io.zeebe.exporters.kafka.config.ProducerConfig config, final String producerId) { 45 | final var options = new HashMap(); 46 | final var clientId = String.format("%s-%s", config.getClientId(), producerId); 47 | 48 | options.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, producerId); 49 | options.put(ProducerConfig.CLIENT_ID_CONFIG, clientId); 50 | options.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true); 51 | 52 | // disable concurrent connections to ensure order is preserved 53 | options.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 1); 54 | options.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, Integer.MAX_VALUE); 55 | options.put( 56 | ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, (int) config.getRequestTimeout().toMillis()); 57 | options.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, config.getServers()); 58 | 59 | // provides a soft memory bound - there's some memory overhead used by SSL, compression, etc., 60 | // but this gives us a good idea of how much memory will be used by the exporter 61 | options.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 40 * 1024 * 1024L); 62 | 63 | // wait up to 10ms or until the batch is full before sending 64 | options.put(ProducerConfig.LINGER_MS_CONFIG, 10L); 65 | options.put(ProducerConfig.BATCH_SIZE_CONFIG, 4 * 1024 * 1024L); 66 | options.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, config.getMaxBlockingTimeout().toMillis()); 67 | 68 | // leave always close to the last step to allow user configuration to override producer options 69 | options.putAll(config.getConfig()); 70 | 71 | options.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, RecordIdSerializer.class); 72 | options.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); 73 | options.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, RecordIdPartitioner.class); 74 | 75 | return new KafkaProducer<>(options); 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/producer/KafkaProducerFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.producer; 17 | 18 | import io.zeebe.exporters.kafka.config.ProducerConfig; 19 | import io.zeebe.exporters.kafka.serde.RecordId; 20 | import org.apache.kafka.clients.producer.Producer; 21 | 22 | /** 23 | * Implementations may or may not make use of the given configuration, but must always return a 24 | * valid producer. 25 | */ 26 | @FunctionalInterface 27 | public interface KafkaProducerFactory { 28 | 29 | Producer newProducer(final ProducerConfig config, final String producerId); 30 | 31 | static KafkaProducerFactory defaultFactory() { 32 | return new DefaultKafkaProducerFactory(); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/producer/RecordBatch.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.producer; 17 | 18 | import io.zeebe.exporters.kafka.record.FullRecordBatchException; 19 | import io.zeebe.exporters.kafka.serde.RecordId; 20 | import org.apache.kafka.clients.producer.ProducerRecord; 21 | 22 | /** 23 | * Represents a batch of producer records which can be committed at will. Implementations can decide 24 | * whether to bound the batch, or the semantics of it, as long as they respect this contract. 25 | * 26 | *

NOTE: while it may seem like overhead to create this abstraction, it gives us the following: 27 | * 28 | *

35 | */ 36 | public interface RecordBatch extends AutoCloseable { 37 | 38 | /** 39 | * Adds the record to the batch. May throw {@link FullRecordBatchException} if the batch is 40 | * bounded. Unbounded implementations are free to erase the throws portion of the signature. 41 | * 42 | * @param record the record to add 43 | * @throws FullRecordBatchException if the batch is full 44 | */ 45 | void add(final ProducerRecord record) throws FullRecordBatchException; 46 | 47 | /** 48 | * Commits the batch, returning the highest guaranteed exported position. This is expected to be a 49 | * blocking operation - if it returns with a value, it should then be guaranteed that ALL records 50 | * up to that position have been committed. On success, the batch should be cleared and new 51 | * records can be added to it. 52 | * 53 | *

NOTE: This method should not throw any error, as it's not expected to be called from a path 54 | * where errors can be safely handled, i.e. in a scheduled task. 55 | * 56 | *

NOTE: this is expected to be an atomic operation. Either ALL records were flushed, or none 57 | * of them were. 58 | */ 59 | void flush(); 60 | 61 | /** 62 | * Should release any resources belonging to the batch. It's not expected that other operations 63 | * are called after this. 64 | */ 65 | void close(); 66 | } 67 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/producer/RecordBatchFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.producer; 17 | 18 | import io.zeebe.exporters.kafka.config.ProducerConfig; 19 | import java.util.function.LongConsumer; 20 | import org.slf4j.Logger; 21 | 22 | /** 23 | * While this seems like overhead, it's the only way to inject the record batch type into the 24 | * exporter instance, as the exporter instance is created by the Zeebe broker using the 25 | * argument-less constructor. The other option would be via configuration, which would be more 26 | * overhead, but the right approach in the future if multiple types are available. 27 | * 28 | *

The primary goal of this and the {@link RecordBatch} interface are to ease unit testing. 29 | */ 30 | @FunctionalInterface 31 | public interface RecordBatchFactory { 32 | 33 | RecordBatch newRecordBatch( 34 | final ProducerConfig config, 35 | final int maxBatchSize, 36 | final LongConsumer onFlushCallback, 37 | final Logger logger); 38 | 39 | static RecordBatchFactory defaultFactory() { 40 | return (config, maxBatchSize, onFlushCallback, logger) -> 41 | new BoundedTransactionalRecordBatch( 42 | config, maxBatchSize, onFlushCallback, logger, KafkaProducerFactory.defaultFactory()); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/producer/RecordIdPartitioner.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.producer; 17 | 18 | import io.zeebe.exporters.kafka.serde.RecordId; 19 | import java.util.List; 20 | import java.util.Map; 21 | import org.apache.kafka.clients.producer.Partitioner; 22 | import org.apache.kafka.clients.producer.internals.DefaultPartitioner; 23 | import org.apache.kafka.common.Cluster; 24 | import org.apache.kafka.common.PartitionInfo; 25 | import org.slf4j.Logger; 26 | import org.slf4j.LoggerFactory; 27 | 28 | /** 29 | * A {@link Partitioner} implementation which expects only {@link RecordId} objects as keys. 30 | * 31 | *

It will partition the records using {@link RecordId#getPartitionId()}, ensuring that all Zeebe 32 | * records on the same Zeebe partition will also be on the same Kafka partition, preserving the 33 | * ordering. It does so by taking the Zeebe partition ID (which starts at 1), and applying a modulo 34 | * against the number of Kafka partitions for the given topic, e.g. {@code zeebePartitionId % 35 | * kafkaPartitionsCount}. 36 | * 37 | *

One downside is that if you have more Kafka partitions than Zeebe partitions, some of your 38 | * partitions will be unused: partition 0, and any partition whose number is greater than the count 39 | * of Zeebe partitions. 40 | * 41 | *

For example, if you have 3 Zeebe partitions, and 2 Kafka partitions: 42 | * 43 | *

    44 | *
  • RecordId{partitionId=1, position=1} to Kafka partition 1 45 | *
  • RecordId{partitionId=2, position=1} to Kafka partition 0 46 | *
  • RecordId{partitionId=3, position=1} to Kafka partition 1 47 | *
  • RecordId{partitionId=3, position=2} to Kafka partition 1 48 | *
  • RecordId{partitionId=2, position=2} to Kafka partition 0 49 | *
50 | * 51 | *

With more Kafka partitions, for example, 4 Kafka partitions, and 3 Zeebe partitions: 52 | * 53 | *

    54 | *
  • RecordId{partitionId=1, position=1} to Kafka partition 1 55 | *
  • RecordId{partitionId=2, position=1} to Kafka partition 2 56 | *
  • RecordId{partitionId=3, position=1} to Kafka partition 3 57 | *
  • RecordId{partitionId=3, position=2} to Kafka partition 3 58 | *
  • RecordId{partitionId=2, position=2} to Kafka partition 2 59 | *
60 | */ 61 | public final class RecordIdPartitioner implements Partitioner { 62 | private static final Logger LOGGER = LoggerFactory.getLogger(RecordIdPartitioner.class); 63 | 64 | private final DefaultPartitioner defaultPartitioner = new DefaultPartitioner(); 65 | 66 | @Override 67 | public int partition( 68 | final String topic, 69 | final Object key, 70 | final byte[] keyBytes, 71 | final Object value, 72 | final byte[] valueBytes, 73 | final Cluster cluster) { 74 | if (!(key instanceof RecordId)) { 75 | LOGGER.warn( 76 | "Expected to partition a RecordId object, but got {}; falling back to default partitioner", 77 | key.getClass()); 78 | return defaultPartitioner.partition(topic, key, keyBytes, value, valueBytes, cluster); 79 | } 80 | 81 | final List partitions = cluster.partitionsForTopic(topic); 82 | final int numPartitions = partitions.size(); 83 | final RecordId recordId = (RecordId) key; 84 | final int partitionId = recordId.getPartitionId() % numPartitions; 85 | 86 | LOGGER.trace("Assigning partition {} to record ID {}", partitionId, recordId); 87 | 88 | return partitionId; 89 | } 90 | 91 | @Override 92 | public void close() { 93 | // do nothing 94 | } 95 | 96 | @Override 97 | public void configure(final Map configs) { 98 | // not configurable yet 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/record/FullRecordBatchException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.record; 17 | 18 | @SuppressWarnings("unused") 19 | public final class FullRecordBatchException extends RuntimeException { 20 | private static final String MESSAGE_FORMAT = 21 | "No new records can be added to the record batch with a maximum size of %d"; 22 | 23 | private final int maxBatchSize; 24 | 25 | public FullRecordBatchException(final int maxBatchSize, final Throwable cause) { 26 | super(String.format(MESSAGE_FORMAT, maxBatchSize), cause); 27 | this.maxBatchSize = maxBatchSize; 28 | } 29 | 30 | public int getMaxBatchSize() { 31 | return maxBatchSize; 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/record/KafkaRecordFilter.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.record; 17 | 18 | import io.camunda.zeebe.exporter.api.context.Context.RecordFilter; 19 | import io.camunda.zeebe.protocol.record.RecordType; 20 | import io.camunda.zeebe.protocol.record.ValueType; 21 | import io.zeebe.exporters.kafka.config.RecordsConfig; 22 | import java.util.Objects; 23 | import java.util.Optional; 24 | 25 | /** 26 | * {@link KafkaRecordFilter} is an implementation of {@link RecordFilter} which uses the {@link 27 | * RecordsConfig} to build the filter. 28 | */ 29 | public final class KafkaRecordFilter implements RecordFilter { 30 | private final RecordsConfig config; 31 | 32 | public KafkaRecordFilter(final RecordsConfig config) { 33 | this.config = Objects.requireNonNull(config); 34 | } 35 | 36 | /** 37 | * If any of the {@link RecordsConfig#getTypeMap()} accept the given record type, the {@code 38 | * recordType} is accepted. 39 | * 40 | * @param recordType {@inheritDoc} 41 | * @return {@inheritDoc} 42 | */ 43 | @Override 44 | public boolean acceptType(final RecordType recordType) { 45 | return config.getDefaults().getAllowedTypes().contains(recordType) 46 | || config.getTypeMap().values().stream() 47 | .anyMatch(c -> c.getAllowedTypes().contains(recordType)); 48 | } 49 | 50 | /** 51 | * If the {@link io.zeebe.exporters.kafka.config.RecordConfig} instance stored in {@link 52 | * RecordsConfig#getTypeMap()} for {@code valueType} has any allowed type at all, the {@code 53 | * valueType} is accepted. 54 | * 55 | * @param valueType {@inheritDoc} 56 | * @return {@inheritDoc} 57 | */ 58 | @Override 59 | public boolean acceptValue(final ValueType valueType) { 60 | return !Optional.ofNullable(config.getTypeMap().get(valueType)) 61 | .orElse(config.getDefaults()) 62 | .getAllowedTypes() 63 | .isEmpty(); 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/record/RecordHandler.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.record; 17 | 18 | import io.camunda.zeebe.protocol.record.Record; 19 | import io.zeebe.exporters.kafka.config.RecordConfig; 20 | import io.zeebe.exporters.kafka.config.RecordsConfig; 21 | import io.zeebe.exporters.kafka.serde.RecordId; 22 | import java.util.Objects; 23 | import org.apache.kafka.clients.producer.ProducerRecord; 24 | import org.apache.kafka.common.serialization.Serializer; 25 | 26 | /** 27 | * {@link RecordHandler} is responsible for testing if certain records are allowed, and if so, 28 | * transforming them. 29 | * 30 | *

Should be refactored into two for single responsibility. 31 | */ 32 | public final class RecordHandler { 33 | private final RecordsConfig configuration; 34 | private final Serializer> serializer; 35 | 36 | public RecordHandler(final RecordsConfig configuration) { 37 | this(configuration, new RecordSerializer()); 38 | } 39 | 40 | public RecordHandler(final RecordsConfig configuration, final Serializer> serializer) { 41 | this.configuration = Objects.requireNonNull(configuration); 42 | this.serializer = Objects.requireNonNull(serializer); 43 | } 44 | 45 | /** 46 | * Transforms the given {@link Record} into a Kafka {@link ProducerRecord}. 47 | * 48 | * @param record the record to transform 49 | * @return the transformed record 50 | */ 51 | public ProducerRecord transform(final Record record) { 52 | final RecordConfig config = getRecordConfig(record); 53 | final byte[] serializedRecord = serializer.serialize(config.getTopic(), record); 54 | return new ProducerRecord<>( 55 | config.getTopic(), 56 | new RecordId(record.getPartitionId(), record.getPosition()), 57 | serializedRecord); 58 | } 59 | 60 | /** 61 | * Tests whether or not the given record is allowed, as specified by the configuration. 62 | * 63 | * @param record the record to test 64 | * @return true if allowed, false otherwise 65 | */ 66 | public boolean isAllowed(final Record record) { 67 | final RecordConfig config = getRecordConfig(record); 68 | return config.getAllowedTypes().contains(record.getRecordType()); 69 | } 70 | 71 | private RecordConfig getRecordConfig(final Record record) { 72 | return configuration.forType(Objects.requireNonNull(record).getValueType()); 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /exporter/src/main/java/io/zeebe/exporters/kafka/record/RecordSerializer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.record; 17 | 18 | import io.camunda.zeebe.protocol.record.Record; 19 | import java.util.Map; 20 | import org.apache.kafka.common.serialization.Serializer; 21 | import org.apache.kafka.common.serialization.StringSerializer; 22 | 23 | /** 24 | * A {@link Serializer} implementations for {@link Record} objects, which first uses a wrapped 25 | * {@link StringSerializer} to serialize {@link Record} to JSON. You can specify your encoding of 26 | * preference via {@link StringSerializer} configuration. Any configuration given to this serializer 27 | * is also passed to the wrapped {@link StringSerializer}. 28 | */ 29 | public final class RecordSerializer implements Serializer> { 30 | private final StringSerializer delegate; 31 | 32 | public RecordSerializer() { 33 | this(new StringSerializer()); 34 | } 35 | 36 | public RecordSerializer(final StringSerializer delegate) { 37 | this.delegate = delegate; 38 | } 39 | 40 | @Override 41 | public void configure(final Map configs, final boolean isKey) { 42 | delegate.configure(configs, isKey); 43 | } 44 | 45 | @Override 46 | public byte[] serialize(final String topic, final Record data) { 47 | return delegate.serialize(topic, data.toJson()); 48 | } 49 | 50 | @Override 51 | public void close() { 52 | delegate.close(); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /exporter/src/main/resources/META-INF/services/io.camunda.zeebe.exporter.api.Exporter: -------------------------------------------------------------------------------- 1 | io.zeebe.exporters.kafka.KafkaExporter 2 | -------------------------------------------------------------------------------- /exporter/src/test/java/io/zeebe/exporters/kafka/KafkaExporterTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka; 17 | 18 | import static org.assertj.core.api.Assertions.assertThat; 19 | import static org.assertj.core.api.Assertions.assertThatThrownBy; 20 | import static org.assertj.core.api.Assertions.tuple; 21 | 22 | import io.camunda.zeebe.protocol.record.ValueType; 23 | import io.camunda.zeebe.test.exporter.ExporterTestHarness; 24 | import io.camunda.zeebe.test.exporter.record.MockRecordMetadata; 25 | import io.zeebe.exporters.kafka.config.Config; 26 | import io.zeebe.exporters.kafka.config.parser.MockConfigParser; 27 | import io.zeebe.exporters.kafka.config.parser.RawConfigParser; 28 | import io.zeebe.exporters.kafka.config.raw.RawConfig; 29 | import io.zeebe.exporters.kafka.config.raw.RawRecordConfig; 30 | import io.zeebe.exporters.kafka.config.raw.RawRecordsConfig; 31 | import io.zeebe.exporters.kafka.producer.RecordBatchStub; 32 | import io.zeebe.exporters.kafka.record.RecordHandler; 33 | import io.zeebe.exporters.kafka.serde.RecordId; 34 | import java.util.stream.Collectors; 35 | import org.apache.kafka.clients.producer.ProducerRecord; 36 | import org.junit.jupiter.api.Test; 37 | import org.junit.jupiter.api.parallel.Execution; 38 | import org.junit.jupiter.api.parallel.ExecutionMode; 39 | 40 | @SuppressWarnings("rawtypes") 41 | @Execution(ExecutionMode.CONCURRENT) 42 | final class KafkaExporterTest { 43 | private static final String EXPORTER_ID = "kafka"; 44 | 45 | private final RawConfig rawConfig = new RawConfig(); 46 | private final MockConfigParser mockConfigParser = 47 | new MockConfigParser<>(new RawConfigParser()); 48 | private final RecordBatchStub.Factory batchStubFactory = new RecordBatchStub.Factory(); 49 | private final KafkaExporter exporter = new KafkaExporter(batchStubFactory, mockConfigParser); 50 | private final ExporterTestHarness testHarness = new ExporterTestHarness(exporter); 51 | 52 | @Test 53 | void shouldAddRecordToBatchOnExport() throws Exception { 54 | // given 55 | rawConfig.maxBatchSize = 5; 56 | testHarness.configure(EXPORTER_ID, rawConfig); 57 | testHarness.open(); 58 | 59 | // when 60 | final var records = testHarness.stream().export(5); 61 | 62 | // then 63 | final var expectedIds = 64 | records.stream() 65 | .map(r -> new RecordId(r.getPartitionId(), r.getPosition())) 66 | .collect(Collectors.toList()); 67 | assertThat(batchStubFactory.stub.getPendingRecords()) 68 | .as("the records were added to the batch in order") 69 | .extracting(ProducerRecord::key) 70 | .containsExactlyElementsOf(expectedIds); 71 | assertThat(batchStubFactory.stub.getFlushedRecords()) 72 | .as("no records were flushed yet") 73 | .isEmpty(); 74 | } 75 | 76 | @Test 77 | void shouldUseCorrectSerializer() throws Exception { 78 | // given 79 | testHarness.configure(EXPORTER_ID, rawConfig); 80 | testHarness.open(); 81 | final var recordHandler = new RecordHandler(mockConfigParser.config.getRecords()); 82 | 83 | // when 84 | final var json = "{\"a\": 1}"; 85 | final var record = testHarness.export(r -> r.setJson(json)); 86 | 87 | // then 88 | final var expectedRecord = recordHandler.transform(record); 89 | assertThat(batchStubFactory.stub.getPendingRecords()) 90 | .as("the serialized record was added to the batch") 91 | .extracting("topic", "key", "value") 92 | .containsExactly( 93 | tuple(expectedRecord.topic(), expectedRecord.key(), expectedRecord.value())); 94 | } 95 | 96 | @Test 97 | void shouldSkipDisallowedRecords() throws Exception { 98 | // given 99 | rawConfig.records = new RawRecordsConfig(); 100 | rawConfig.records.deployment = new RawRecordConfig(); 101 | rawConfig.records.deployment.type = ""; 102 | testHarness.configure(EXPORTER_ID, rawConfig); 103 | testHarness.open(); 104 | 105 | // when 106 | testHarness.export( 107 | r -> r.setMetadata(new MockRecordMetadata().setValueType(ValueType.DEPLOYMENT))); 108 | 109 | // then 110 | assertThat(batchStubFactory.stub.getPendingRecords()) 111 | .as("disallowed record should not be added to the batch") 112 | .isEmpty(); 113 | } 114 | 115 | @Test 116 | void shouldFlushOnScheduledTask() throws Exception { 117 | // given 118 | rawConfig.maxBatchSize = 5; 119 | testHarness.configure(EXPORTER_ID, rawConfig); 120 | testHarness.open(); 121 | 122 | // when 123 | final var records = testHarness.stream().export(5); 124 | triggerFlushTask(); 125 | 126 | // then 127 | final var expectedIds = 128 | records.stream() 129 | .map(r -> new RecordId(r.getPartitionId(), r.getPosition())) 130 | .collect(Collectors.toList()); 131 | assertThat(batchStubFactory.stub.getFlushedRecords()) 132 | .as("the records were added to the batch in order") 133 | .extracting(ProducerRecord::key) 134 | .containsExactlyElementsOf(expectedIds); 135 | assertThat(batchStubFactory.stub.getPendingRecords()) 136 | .as("no pending records after flush") 137 | .isEmpty(); 138 | } 139 | 140 | @Test 141 | void shouldUpdatePositionOnFlush() throws Exception { 142 | // given 143 | testHarness.configure(EXPORTER_ID, rawConfig); 144 | testHarness.open(); 145 | 146 | // when 147 | final var records = testHarness.stream().export(5); 148 | triggerFlushTask(); 149 | 150 | // then 151 | assertThat(testHarness.getLastUpdatedPosition()) 152 | .as("position should be updated since after flush") 153 | .isEqualTo(records.get(4).getPosition()); 154 | } 155 | 156 | @Test 157 | void shouldRescheduleFlushTaskEvenOnException() throws Exception { 158 | // given 159 | testHarness.configure(EXPORTER_ID, rawConfig); 160 | testHarness.open(); 161 | 162 | // when 163 | final var records = testHarness.stream().export(2); 164 | batchStubFactory.stub.flushException = new RuntimeException("failed to flush"); 165 | assertThatThrownBy(this::triggerFlushTask).isEqualTo(batchStubFactory.stub.flushException); 166 | batchStubFactory.stub.flushException = null; 167 | triggerFlushTask(); 168 | 169 | // then 170 | assertThat(testHarness.getLastUpdatedPosition()) 171 | .as("position should be updated since we managed to flush after the second try") 172 | .isEqualTo(records.get(1).getPosition()); 173 | } 174 | 175 | @Test 176 | void shouldFlushBatchOnClose() throws Exception { 177 | // given 178 | testHarness.configure(EXPORTER_ID, rawConfig); 179 | testHarness.open(); 180 | 181 | // when 182 | final var records = testHarness.stream().export(2); 183 | testHarness.close(); 184 | 185 | // then 186 | assertThat(testHarness.getLastUpdatedPosition()) 187 | .as("position should be updated since we managed to flush after the second try") 188 | .isEqualTo(records.get(1).getPosition()); 189 | assertThat(batchStubFactory.stub.isClosed()) 190 | .as("batch should be closed on exporter close") 191 | .isTrue(); 192 | } 193 | 194 | @Test 195 | void shouldRescheduleFlush() throws Exception { 196 | // given 197 | testHarness.configure(EXPORTER_ID, rawConfig); 198 | testHarness.open(); 199 | 200 | // when 201 | triggerFlushTask(); 202 | final var records = testHarness.stream().export(2); 203 | triggerFlushTask(); 204 | 205 | // then 206 | assertThat(testHarness.getLastUpdatedPosition()) 207 | .as("position should be updated after triggering the second flush task") 208 | .isEqualTo(records.get(1).getPosition()); 209 | } 210 | 211 | private void triggerFlushTask() { 212 | mockConfigParser.parse(rawConfig); 213 | testHarness.runScheduledTasks(mockConfigParser.config.getFlushInterval()); 214 | } 215 | } 216 | -------------------------------------------------------------------------------- /exporter/src/test/java/io/zeebe/exporters/kafka/config/parser/MockConfigParser.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config.parser; 17 | 18 | import java.util.Objects; 19 | 20 | /** 21 | * {@link MockConfigParser} allows setting a predefined parsed value for any given value. If not 22 | * set, it will delegate to an underlying parser of the same types, and memoize the value, such that 23 | * every subsequent {@link #parse(Object)} call will return the same object. 24 | * 25 | *

You can override this by calling {@link #forceParse(Object)} if you need. 26 | * 27 | * @param {@inheritDoc} 28 | * @param {@inheritDoc} 29 | */ 30 | public final class MockConfigParser implements ConfigParser { 31 | public R config; 32 | 33 | private final ConfigParser delegate; 34 | 35 | public MockConfigParser(final ConfigParser delegate) { 36 | this.delegate = Objects.requireNonNull(delegate); 37 | } 38 | 39 | @Override 40 | public R parse(final T config) { 41 | if (this.config == null) { 42 | this.config = delegate.parse(config); 43 | } 44 | 45 | return this.config; 46 | } 47 | 48 | /** A helper method in tests to force re-parsing an updated configuration. */ 49 | public void forceParse(final T config) { 50 | this.config = null; 51 | parse(config); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /exporter/src/test/java/io/zeebe/exporters/kafka/config/parser/RawConfigParserTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config.parser; 17 | 18 | import static org.assertj.core.api.Assertions.assertThat; 19 | 20 | import io.zeebe.exporters.kafka.config.Config; 21 | import io.zeebe.exporters.kafka.config.ProducerConfig; 22 | import io.zeebe.exporters.kafka.config.RecordsConfig; 23 | import io.zeebe.exporters.kafka.config.raw.RawConfig; 24 | import io.zeebe.exporters.kafka.config.raw.RawProducerConfig; 25 | import io.zeebe.exporters.kafka.config.raw.RawRecordsConfig; 26 | import java.time.Duration; 27 | import org.junit.jupiter.api.Test; 28 | import org.junit.jupiter.api.parallel.Execution; 29 | import org.junit.jupiter.api.parallel.ExecutionMode; 30 | 31 | @Execution(ExecutionMode.CONCURRENT) 32 | final class RawConfigParserTest { 33 | private final MockConfigParser recordsConfigParser = 34 | new MockConfigParser<>(new RawRecordsConfigParser()); 35 | private final MockConfigParser producerConfigParser = 36 | new MockConfigParser<>(new RawProducerConfigParser()); 37 | private final RawConfigParser parser = 38 | new RawConfigParser(recordsConfigParser, producerConfigParser); 39 | 40 | @Test 41 | void shouldUseDefaultValues() { 42 | // given 43 | final RawConfig config = new RawConfig(); 44 | 45 | // when 46 | final Config parsed = parser.parse(config); 47 | 48 | // then 49 | assertThat(parsed.getRecords()).isEqualTo(recordsConfigParser.parse(new RawRecordsConfig())); 50 | assertThat(parsed.getProducer()).isEqualTo(producerConfigParser.parse(new RawProducerConfig())); 51 | assertThat(parsed.getMaxBatchSize()).isEqualTo(RawConfigParser.DEFAULT_MAX_BATCH_SIZE); 52 | assertThat(parsed.getFlushInterval()).isEqualTo(RawConfigParser.DEFAULT_FLUSH_INTERVAL_MS); 53 | } 54 | 55 | @Test 56 | void shouldParse() { 57 | // given 58 | final RawConfig config = new RawConfig(); 59 | final ProducerConfig producerConfig = producerConfigParser.parse(new RawProducerConfig()); 60 | final RecordsConfig recordsConfig = recordsConfigParser.parse(new RawRecordsConfig()); 61 | config.maxBatchSize = 2; 62 | config.flushIntervalMs = 500L; 63 | 64 | // when 65 | final Config parsed = parser.parse(config); 66 | 67 | // then 68 | assertThat(parsed.getProducer()).isEqualTo(producerConfig); 69 | assertThat(parsed.getRecords()).isEqualTo(recordsConfig); 70 | assertThat(parsed.getMaxBatchSize()).isEqualTo(2); 71 | assertThat(parsed.getFlushInterval()).isEqualTo(Duration.ofMillis(500)); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /exporter/src/test/java/io/zeebe/exporters/kafka/config/parser/RawProducerConfigParserTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config.parser; 17 | 18 | import static org.assertj.core.api.Assertions.assertThat; 19 | 20 | import io.zeebe.exporters.kafka.config.ProducerConfig; 21 | import io.zeebe.exporters.kafka.config.raw.RawProducerConfig; 22 | import java.time.Duration; 23 | import java.util.Collections; 24 | import java.util.HashMap; 25 | import java.util.Map; 26 | import org.junit.jupiter.api.Test; 27 | import org.junit.jupiter.api.parallel.Execution; 28 | import org.junit.jupiter.api.parallel.ExecutionMode; 29 | 30 | @Execution(ExecutionMode.CONCURRENT) 31 | final class RawProducerConfigParserTest { 32 | private final RawProducerConfigParser parser = new RawProducerConfigParser(); 33 | 34 | @Test 35 | void shouldUseDefaultValuesForMissingProperties() { 36 | // given 37 | final RawProducerConfig config = new RawProducerConfig(); 38 | 39 | // when 40 | final ProducerConfig parsed = parser.parse(config); 41 | 42 | // then 43 | assertThat(parsed) 44 | .extracting( 45 | "servers", "clientId", "closeTimeout", "requestTimeout", "maxBlockingTimeout", "config") 46 | .containsExactly( 47 | RawProducerConfigParser.DEFAULT_SERVERS, 48 | RawProducerConfigParser.DEFAULT_CLIENT_ID, 49 | RawProducerConfigParser.DEFAULT_CLOSE_TIMEOUT, 50 | RawProducerConfigParser.DEFAULT_REQUEST_TIMEOUT, 51 | RawProducerConfigParser.DEFAULT_MAX_BLOCKING_TIMEOUT, 52 | new HashMap<>()); 53 | } 54 | 55 | @Test 56 | void shouldParse() { 57 | // given 58 | final RawProducerConfig config = new RawProducerConfig(); 59 | config.servers = "localhost:3000"; 60 | config.clientId = "client"; 61 | config.closeTimeoutMs = 3000L; 62 | config.requestTimeoutMs = 3000L; 63 | config.maxBlockingTimeoutMs = 5000L; 64 | config.config = "linger.ms=5\nmax.buffer.count=2"; 65 | 66 | // when 67 | final ProducerConfig parsed = parser.parse(config); 68 | 69 | // then 70 | assertThat(parsed) 71 | .extracting( 72 | "servers", "clientId", "closeTimeout", "requestTimeout", "maxBlockingTimeout", "config") 73 | .containsExactly( 74 | Collections.singletonList("localhost:3000"), 75 | "client", 76 | Duration.ofSeconds(3), 77 | Duration.ofSeconds(3), 78 | Duration.ofSeconds(5), 79 | Map.of("linger.ms", "5", "max.buffer.count", "2")); 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /exporter/src/test/java/io/zeebe/exporters/kafka/config/parser/RawRecordConfigParserTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config.parser; 17 | 18 | import static org.assertj.core.api.Assertions.assertThat; 19 | import static org.assertj.core.api.Assertions.assertThatThrownBy; 20 | 21 | import io.camunda.zeebe.protocol.record.RecordType; 22 | import io.zeebe.exporters.kafka.config.RecordConfig; 23 | import io.zeebe.exporters.kafka.config.raw.RawRecordConfig; 24 | import java.util.EnumSet; 25 | import org.junit.jupiter.api.Test; 26 | import org.junit.jupiter.api.parallel.Execution; 27 | import org.junit.jupiter.api.parallel.ExecutionMode; 28 | 29 | @Execution(ExecutionMode.CONCURRENT) 30 | final class RawRecordConfigParserTest { 31 | private final RawRecordConfigParser parser = new RawRecordConfigParser(); 32 | 33 | @Test 34 | void shouldParseAllowedTypes() { 35 | // given 36 | final RawRecordConfig config = new RawRecordConfig(); 37 | config.type = 38 | String.format("%s,%s", AllowedType.COMMAND.getTypeName(), AllowedType.EVENT.getTypeName()); 39 | 40 | // when 41 | final RecordConfig parsed = parser.parse(config); 42 | 43 | // then 44 | assertThat(parsed.getAllowedTypes()) 45 | .containsExactlyInAnyOrder(RecordType.COMMAND, RecordType.EVENT); 46 | } 47 | 48 | @Test 49 | void shouldParseTopic() { 50 | // given 51 | final RawRecordConfig config = new RawRecordConfig(); 52 | config.topic = "something"; 53 | 54 | // when 55 | final RecordConfig parsed = parser.parse(config); 56 | 57 | // then 58 | assertThat(parsed.getTopic()).isEqualTo("something"); 59 | } 60 | 61 | @Test 62 | void shouldSetDefaultsIfNull() { 63 | // given 64 | final RawRecordConfig config = new RawRecordConfig(); 65 | 66 | // when 67 | final RecordConfig parsed = parser.parse(config); 68 | 69 | // then 70 | assertThat(parsed.getTopic()).isEqualTo(RawRecordConfigParser.DEFAULT_TOPIC_NAME); 71 | assertThat(parsed.getAllowedTypes()).isEqualTo(RawRecordConfigParser.DEFAULT_ALLOWED_TYPES); 72 | } 73 | 74 | @Test 75 | void shouldSetExplicitDefaultsIfNull() { 76 | // given 77 | final RecordConfig defaults = new RecordConfig(EnumSet.allOf(RecordType.class), "topic"); 78 | final RawRecordConfigParser explicitParser = new RawRecordConfigParser(defaults); 79 | final RawRecordConfig config = new RawRecordConfig(); 80 | 81 | // when 82 | final RecordConfig parsed = explicitParser.parse(config); 83 | 84 | // then 85 | assertThat(parsed.getTopic()).isEqualTo(defaults.getTopic()); 86 | assertThat(parsed.getAllowedTypes()).isEqualTo(defaults.getAllowedTypes()); 87 | } 88 | 89 | @Test 90 | void shouldThrowExceptionIfAllowedTypeIsUnknown() { 91 | // given 92 | final RawRecordConfig config = new RawRecordConfig(); 93 | config.type = "something unlikely"; 94 | 95 | // when - then 96 | assertThatThrownBy(() -> parser.parse(config)).isInstanceOf(IllegalArgumentException.class); 97 | } 98 | 99 | @Test 100 | void shouldDisallowOnEmptyString() { 101 | // given 102 | final RawRecordConfigParser explicitParser = new RawRecordConfigParser(); 103 | final RawRecordConfig config = new RawRecordConfig(); 104 | config.type = ""; 105 | 106 | // when 107 | final RecordConfig parsed = explicitParser.parse(config); 108 | 109 | // then 110 | assertThat(parsed.getAllowedTypes()).isEmpty(); 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /exporter/src/test/java/io/zeebe/exporters/kafka/config/parser/RawRecordsConfigParserTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.config.parser; 17 | 18 | import static org.assertj.core.api.Assertions.assertThat; 19 | 20 | import io.camunda.zeebe.protocol.record.RecordType; 21 | import io.camunda.zeebe.protocol.record.ValueType; 22 | import io.zeebe.exporters.kafka.config.RecordsConfig; 23 | import io.zeebe.exporters.kafka.config.raw.RawRecordConfig; 24 | import io.zeebe.exporters.kafka.config.raw.RawRecordsConfig; 25 | import java.util.EnumSet; 26 | import java.util.Set; 27 | import org.junit.jupiter.api.Test; 28 | import org.junit.jupiter.api.parallel.Execution; 29 | import org.junit.jupiter.api.parallel.ExecutionMode; 30 | 31 | @Execution(ExecutionMode.CONCURRENT) 32 | final class RawRecordsConfigParserTest { 33 | private static final Set EXPECTED_VALUE_TYPES = 34 | EnumSet.complementOf(EnumSet.of(ValueType.NULL_VAL, ValueType.SBE_UNKNOWN)); 35 | 36 | private final RawRecordsConfigParser parser = new RawRecordsConfigParser(); 37 | 38 | @Test 39 | void shouldParseDefaultsWithDefaultValue() { 40 | // given 41 | final RawRecordsConfig config = new RawRecordsConfig(); 42 | 43 | // when 44 | final RecordsConfig parsed = parser.parse(config); 45 | 46 | // then 47 | assertThat(parsed.getDefaults().getAllowedTypes()) 48 | .isEqualTo(RawRecordConfigParser.DEFAULT_ALLOWED_TYPES); 49 | assertThat(parsed.getDefaults().getTopic()).isEqualTo(RawRecordConfigParser.DEFAULT_TOPIC_NAME); 50 | } 51 | 52 | @Test 53 | void shouldParseRecordConfigUnderCorrectValueType() { 54 | // given 55 | final RawRecordsConfig config = new RawRecordsConfig(); 56 | config.deployment = newConfigFromType(ValueType.DEPLOYMENT); 57 | config.deploymentDistribution = newConfigFromType(ValueType.DEPLOYMENT_DISTRIBUTION); 58 | config.error = newConfigFromType(ValueType.ERROR); 59 | config.incident = newConfigFromType(ValueType.INCIDENT); 60 | config.job = newConfigFromType(ValueType.JOB); 61 | config.jobBatch = newConfigFromType(ValueType.JOB_BATCH); 62 | config.message = newConfigFromType(ValueType.MESSAGE); 63 | config.messageSubscription = newConfigFromType(ValueType.MESSAGE_SUBSCRIPTION); 64 | config.messageStartEventSubscription = 65 | newConfigFromType(ValueType.MESSAGE_START_EVENT_SUBSCRIPTION); 66 | config.process = newConfigFromType(ValueType.PROCESS); 67 | config.processEvent = newConfigFromType(ValueType.PROCESS_EVENT); 68 | config.processInstance = newConfigFromType(ValueType.PROCESS_INSTANCE); 69 | config.processInstanceCreation = newConfigFromType(ValueType.PROCESS_INSTANCE_CREATION); 70 | config.processInstanceResult = newConfigFromType(ValueType.PROCESS_INSTANCE_RESULT); 71 | config.processMessageSubscription = newConfigFromType(ValueType.PROCESS_MESSAGE_SUBSCRIPTION); 72 | config.timer = newConfigFromType(ValueType.TIMER); 73 | config.variable = newConfigFromType(ValueType.VARIABLE); 74 | config.variableDocument = newConfigFromType(ValueType.VARIABLE_DOCUMENT); 75 | 76 | // when 77 | final RecordsConfig parsed = parser.parse(config); 78 | 79 | // then 80 | for (final ValueType type : EXPECTED_VALUE_TYPES) { 81 | assertThat(parsed.forType(type).getTopic()).isEqualTo(type.name()); 82 | } 83 | } 84 | 85 | @Test 86 | void shouldUseDefaultsOnMissingProperties() { 87 | // given 88 | final RawRecordsConfig config = new RawRecordsConfig(); 89 | config.defaults = new RawRecordConfig(); 90 | config.defaults.topic = "default"; 91 | config.defaults.type = 92 | String.format( 93 | "%s,%s", AllowedType.COMMAND.getTypeName(), AllowedType.REJECTION.getTypeName()); 94 | 95 | // when 96 | final RecordsConfig parsed = parser.parse(config); 97 | 98 | // then 99 | parsed 100 | .getTypeMap() 101 | .forEach( 102 | (t, c) -> { 103 | assertThat(c.getTopic()).isEqualTo(config.defaults.topic); 104 | assertThat(c.getAllowedTypes()) 105 | .containsExactly(RecordType.COMMAND, RecordType.COMMAND_REJECTION); 106 | }); 107 | } 108 | 109 | private RawRecordConfig newConfigFromType(final ValueType type) { 110 | final RawRecordConfig recordConfig = new RawRecordConfig(); 111 | recordConfig.topic = type.name(); 112 | 113 | return recordConfig; 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /exporter/src/test/java/io/zeebe/exporters/kafka/producer/MockKafkaProducerFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.producer; 17 | 18 | import io.zeebe.exporters.kafka.config.ProducerConfig; 19 | import io.zeebe.exporters.kafka.serde.RecordId; 20 | import java.util.Objects; 21 | import java.util.function.Supplier; 22 | import org.apache.kafka.clients.producer.MockProducer; 23 | import org.apache.kafka.clients.producer.Producer; 24 | 25 | /** 26 | * A utility implementation to allow more control of the execution of the {@link 27 | * io.zeebe.exporters.kafka.KafkaExporter} in tests. Allows overriding the producer which will be 28 | * given to the exporter - if none given, it will create a {@link MockProducer} and memoize the 29 | * value. 30 | */ 31 | public class MockKafkaProducerFactory implements KafkaProducerFactory { 32 | public Supplier> mockProducerSupplier; 33 | public MockProducer mockProducer; 34 | public String producerId; 35 | 36 | public MockKafkaProducerFactory( 37 | final Supplier> mockProducerSupplier) { 38 | this.mockProducerSupplier = Objects.requireNonNull(mockProducerSupplier); 39 | } 40 | 41 | @Override 42 | public Producer newProducer( 43 | final ProducerConfig config, final String producerId) { 44 | this.producerId = Objects.requireNonNull(producerId); 45 | if (mockProducer == null || mockProducer.closed()) { 46 | mockProducer = mockProducerSupplier.get(); 47 | } 48 | 49 | return mockProducer; 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /exporter/src/test/java/io/zeebe/exporters/kafka/producer/RecordBatchStub.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.producer; 17 | 18 | import io.zeebe.exporters.kafka.config.ProducerConfig; 19 | import io.zeebe.exporters.kafka.record.FullRecordBatchException; 20 | import io.zeebe.exporters.kafka.serde.RecordId; 21 | import java.nio.BufferOverflowException; 22 | import java.util.LinkedList; 23 | import java.util.List; 24 | import java.util.Objects; 25 | import java.util.function.LongConsumer; 26 | import org.apache.kafka.clients.producer.ProducerRecord; 27 | import org.slf4j.Logger; 28 | 29 | public final class RecordBatchStub implements RecordBatch { 30 | public RuntimeException flushException; 31 | 32 | private final ProducerConfig config; 33 | private final int maxBatchSize; 34 | private final LongConsumer onFlushCallback; 35 | private final Logger logger; 36 | 37 | private final LinkedList> flushedRecords = new LinkedList<>(); 38 | 39 | private final LinkedList> pendingRecords = new LinkedList<>(); 40 | 41 | private boolean closed = false; 42 | 43 | public RecordBatchStub( 44 | final ProducerConfig config, 45 | final int maxBatchSize, 46 | final LongConsumer onFlushCallback, 47 | final Logger logger) { 48 | this.config = Objects.requireNonNull(config); 49 | this.maxBatchSize = maxBatchSize; 50 | this.onFlushCallback = Objects.requireNonNull(onFlushCallback); 51 | this.logger = Objects.requireNonNull(logger); 52 | } 53 | 54 | @Override 55 | public void add(final ProducerRecord record) throws FullRecordBatchException { 56 | if (pendingRecords.size() >= maxBatchSize) { 57 | throw new FullRecordBatchException(maxBatchSize, new BufferOverflowException()); 58 | } 59 | 60 | pendingRecords.add(record); 61 | } 62 | 63 | @Override 64 | public void flush() { 65 | if (flushException != null) { 66 | throw flushException; 67 | } 68 | 69 | flushedRecords.addAll(pendingRecords); 70 | pendingRecords.clear(); 71 | 72 | if (!flushedRecords.isEmpty()) { 73 | onFlushCallback.accept(flushedRecords.getLast().key().getPosition()); 74 | } 75 | } 76 | 77 | @Override 78 | public void close() { 79 | closed = true; 80 | } 81 | 82 | public List> getFlushedRecords() { 83 | return flushedRecords; 84 | } 85 | 86 | public List> getPendingRecords() { 87 | return pendingRecords; 88 | } 89 | 90 | public boolean isClosed() { 91 | return closed; 92 | } 93 | 94 | public static class Factory implements RecordBatchFactory { 95 | public RecordBatchStub stub; 96 | 97 | @Override 98 | public RecordBatch newRecordBatch( 99 | final ProducerConfig config, 100 | final int maxBatchSize, 101 | final LongConsumer onFlushCallback, 102 | final Logger logger) { 103 | if (stub == null) { 104 | stub = new RecordBatchStub(config, maxBatchSize, onFlushCallback, logger); 105 | } 106 | 107 | return stub; 108 | } 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /exporter/src/test/java/io/zeebe/exporters/kafka/record/RecordHandlerTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.record; 17 | 18 | import static org.assertj.core.api.Assertions.assertThat; 19 | 20 | import io.camunda.zeebe.protocol.jackson.record.DeploymentRecordValueBuilder; 21 | import io.camunda.zeebe.protocol.jackson.record.RecordBuilder; 22 | import io.camunda.zeebe.protocol.record.Record; 23 | import io.camunda.zeebe.protocol.record.RecordType; 24 | import io.camunda.zeebe.protocol.record.ValueType; 25 | import io.camunda.zeebe.protocol.record.intent.DeploymentIntent; 26 | import io.camunda.zeebe.protocol.record.value.DeploymentRecordValue; 27 | import io.zeebe.exporters.kafka.config.RecordConfig; 28 | import io.zeebe.exporters.kafka.config.RecordsConfig; 29 | import io.zeebe.exporters.kafka.serde.RecordId; 30 | import java.nio.charset.StandardCharsets; 31 | import java.util.EnumSet; 32 | import java.util.Map; 33 | import org.apache.kafka.clients.producer.ProducerRecord; 34 | import org.junit.jupiter.api.Test; 35 | import org.junit.jupiter.api.parallel.Execution; 36 | import org.junit.jupiter.api.parallel.ExecutionMode; 37 | 38 | @Execution(ExecutionMode.CONCURRENT) 39 | final class RecordHandlerTest { 40 | 41 | private static final RecordConfig DEFAULT_RECORD_CONFIG = 42 | new RecordConfig(EnumSet.allOf(RecordType.class), "zeebe"); 43 | 44 | @Test 45 | void shouldTransformRecord() { 46 | // given 47 | final Record record = 48 | buildDeploymentRecord().recordType(RecordType.COMMAND).build(); 49 | final RecordConfig deploymentRecordConfig = 50 | new RecordConfig(EnumSet.allOf(RecordType.class), "topic"); 51 | final RecordHandler recordHandler = new RecordHandler(newRecordsConfig(RecordType.COMMAND)); 52 | 53 | // when 54 | final ProducerRecord transformed = recordHandler.transform(record); 55 | 56 | // then 57 | assertThat(transformed.topic()).isEqualTo(deploymentRecordConfig.getTopic()); 58 | assertThat(transformed.key()) 59 | .isEqualTo(new RecordId(record.getPartitionId(), record.getPosition())); 60 | assertThat(transformed.value()).isEqualTo(record.toJson().getBytes(StandardCharsets.UTF_8)); 61 | } 62 | 63 | @Test 64 | void shouldTestRecordAsNotAllowed() { 65 | // given 66 | final Record record = 67 | buildDeploymentRecord().recordType(RecordType.COMMAND).build(); 68 | final RecordHandler recordHandler = new RecordHandler(newRecordsConfig(RecordType.EVENT)); 69 | 70 | // when - then 71 | assertThat(recordHandler.isAllowed(record)).isFalse(); 72 | } 73 | 74 | @Test 75 | void shouldTestRecordAsAllowed() { 76 | // given 77 | final Record record = 78 | buildDeploymentRecord().recordType(RecordType.EVENT).build(); 79 | final RecordHandler recordHandler = new RecordHandler(newRecordsConfig(RecordType.EVENT)); 80 | 81 | // when - then 82 | assertThat(recordHandler.isAllowed(record)).isTrue(); 83 | } 84 | 85 | private RecordsConfig newRecordsConfig(final RecordType allowedType) { 86 | final RecordConfig recordConfig = new RecordConfig(EnumSet.of(allowedType), "topic"); 87 | return new RecordsConfig(Map.of(ValueType.DEPLOYMENT, recordConfig), DEFAULT_RECORD_CONFIG); 88 | } 89 | 90 | private RecordBuilder buildDeploymentRecord() { 91 | return new RecordBuilder() 92 | .valueType(ValueType.DEPLOYMENT) 93 | .recordType(RecordType.EVENT) 94 | .timestamp(System.currentTimeMillis()) 95 | .intent(DeploymentIntent.CREATE) 96 | .value(new DeploymentRecordValueBuilder().build()) 97 | .partitionId(1) 98 | .position(1); 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /exporter/src/test/resources/simplelogger.properties: -------------------------------------------------------------------------------- 1 | org.slf4j.simpleLogger.logFile=System.out 2 | org.slf4j.simplerLogger.showShortLogName=true 3 | org.slf4j.simpleLogger.defaultLogLevel=info 4 | org.slf4j.simpleLogger.log.io.zeebe.exporters.kafka=debug 5 | org.slf4j.simpleLogger.log.io.zeebe.broker.exporter=debug 6 | org.slf4j.simpleLogger.log.org.apache=warn 7 | org.slf4j.simpleLogger.log.kafka=warn 8 | -------------------------------------------------------------------------------- /qa/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 4.0.0 6 | Zeebe Kafka Exporter QA 7 | zeebe-kafka-exporter-qa 8 | jar 9 | https://github.com/zeebe-io/zeebe-kafka-exporter/qa 10 | 11 | 12 | zeebe-kafka-exporter-root 13 | io.zeebe 14 | 3.1.2-SNAPSHOT 15 | ../pom.xml 16 | 17 | 18 | 19 | 20 | 21 | io.zeebe 22 | zeebe-kafka-exporter 23 | test 24 | 25 | 26 | 27 | io.zeebe 28 | zeebe-kafka-exporter-serde 29 | test 30 | 31 | 32 | 33 | io.camunda 34 | zeebe-protocol-jackson 35 | test 36 | 37 | 38 | 39 | com.fasterxml.jackson.core 40 | jackson-core 41 | test 42 | 43 | 44 | 45 | com.fasterxml.jackson.core 46 | jackson-databind 47 | test 48 | 49 | 50 | 51 | io.camunda 52 | zeebe-client-java 53 | test 54 | 55 | 56 | 57 | io.camunda 58 | zeebe-bpmn-model 59 | test 60 | 61 | 62 | 63 | io.camunda 64 | zeebe-protocol 65 | test 66 | 67 | 68 | 69 | io.camunda 70 | zeebe-protocol-asserts 71 | test 72 | 73 | 74 | 75 | org.agrona 76 | agrona 77 | test 78 | 79 | 80 | 81 | org.apache.kafka 82 | kafka-clients 83 | test 84 | 85 | 86 | 87 | org.slf4j 88 | slf4j-api 89 | test 90 | 91 | 92 | 93 | org.slf4j 94 | slf4j-simple 95 | test 96 | 97 | 98 | 99 | org.testcontainers 100 | testcontainers 101 | test 102 | 103 | 104 | 105 | org.testcontainers 106 | kafka 107 | test 108 | 109 | 110 | 111 | org.testcontainers 112 | junit-jupiter 113 | test 114 | 115 | 116 | 117 | io.zeebe 118 | zeebe-test-container 119 | test 120 | 121 | 122 | 123 | org.junit.jupiter 124 | junit-jupiter-api 125 | test 126 | 127 | 128 | 129 | org.assertj 130 | assertj-core 131 | test 132 | 133 | 134 | 135 | org.awaitility 136 | awaitility 137 | test 138 | 139 | 140 | 141 | 142 | 143 | 145 | 146 | org.apache.maven.plugins 147 | maven-dependency-plugin 148 | 149 | 150 | copy 151 | pre-integration-test 152 | 153 | copy 154 | 155 | 156 | 157 | 158 | io.zeebe 159 | zeebe-kafka-exporter 160 | ${project.version} 161 | jar 162 | jar-with-dependencies 163 | ${project.basedir}/src/test/resources 164 | zeebe-kafka-exporter.jar 165 | 166 | 167 | false 168 | true 169 | 170 | 171 | 172 | 173 | 174 | 178 | io.zeebe:zeebe-kafka-exporter 179 | 180 | org.slf4j:slf4j-simple 181 | 182 | 183 | 184 | 185 | 186 | org.apache.maven.plugins 187 | maven-failsafe-plugin 188 | 189 | 190 | 191 | 192 | -------------------------------------------------------------------------------- /qa/src/test/java/io/zeebe/exporters/kafka/qa/DebugHttpExporterClient.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.qa; 17 | 18 | import com.fasterxml.jackson.core.type.TypeReference; 19 | import com.fasterxml.jackson.databind.ObjectMapper; 20 | import com.fasterxml.jackson.databind.ObjectReader; 21 | import io.camunda.zeebe.protocol.jackson.record.AbstractRecord; 22 | import io.camunda.zeebe.protocol.record.Record; 23 | import java.io.IOException; 24 | import java.io.UncheckedIOException; 25 | import java.net.URL; 26 | import java.util.Collections; 27 | import java.util.List; 28 | import java.util.stream.Stream; 29 | 30 | /** 31 | * A dumb client for the DebugHttpExporter. This exporter starts a server on a single broker for all 32 | * known partitions (of that broker), and simply exposes a poll mechanism for the records. 33 | * 34 | *

NOTE: the server returns records in reverse order, from newest to oldest, which is the 35 | * opposite of what we typically want, i.e. sorted in causal order. The {@link #streamRecords()} 36 | * method therefore returns them reversed. 37 | * 38 | *

NOTE: the streaming is "dumb", and really only returns the records from the server as is as a 39 | * stream. This is fine for now since we typically don't have a lot of records, but it means you may 40 | * have to call the method multiple times. 41 | */ 42 | final class DebugHttpExporterClient { 43 | 44 | private static final ObjectReader READER = 45 | new ObjectMapper().readerFor(new TypeReference>>() {}); 46 | 47 | private final URL serverUrl; 48 | 49 | DebugHttpExporterClient(final URL serverUrl) { 50 | this.serverUrl = serverUrl; 51 | } 52 | 53 | Stream> streamRecords() { 54 | try { 55 | // the HTTP exporter returns records in reversed order, so flip them before returning 56 | final List> records = READER.readValue(serverUrl); 57 | Collections.reverse(records); 58 | 59 | return records.stream().map(r -> r); 60 | } catch (final IOException e) { 61 | throw new UncheckedIOException(e); 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /qa/src/test/java/io/zeebe/exporters/kafka/qa/KafkaExporterIT.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.qa; 17 | 18 | import static org.assertj.core.api.Assertions.assertThat; 19 | 20 | import io.camunda.zeebe.client.ZeebeClient; 21 | import io.camunda.zeebe.protocol.record.Record; 22 | import io.zeebe.containers.ZeebeContainer; 23 | import io.zeebe.exporters.kafka.serde.RecordDeserializer; 24 | import io.zeebe.exporters.kafka.serde.RecordId; 25 | import io.zeebe.exporters.kafka.serde.RecordIdDeserializer; 26 | import java.net.MalformedURLException; 27 | import java.net.URL; 28 | import java.time.Duration; 29 | import java.util.ArrayList; 30 | import java.util.Comparator; 31 | import java.util.HashMap; 32 | import java.util.List; 33 | import java.util.Map; 34 | import java.util.concurrent.CompletableFuture; 35 | import java.util.concurrent.CountDownLatch; 36 | import java.util.concurrent.TimeUnit; 37 | import java.util.regex.Pattern; 38 | import java.util.stream.Collectors; 39 | import org.agrona.CloseHelper; 40 | import org.apache.kafka.clients.admin.AdminClient; 41 | import org.apache.kafka.clients.admin.AdminClientConfig; 42 | import org.apache.kafka.clients.admin.NewTopic; 43 | import org.apache.kafka.clients.consumer.Consumer; 44 | import org.apache.kafka.clients.consumer.ConsumerConfig; 45 | import org.apache.kafka.clients.consumer.ConsumerRecord; 46 | import org.apache.kafka.clients.consumer.KafkaConsumer; 47 | import org.awaitility.Awaitility; 48 | import org.junit.jupiter.api.AfterEach; 49 | import org.junit.jupiter.api.Test; 50 | import org.junit.jupiter.api.Timeout; 51 | import org.junit.jupiter.api.parallel.Execution; 52 | import org.junit.jupiter.api.parallel.ExecutionMode; 53 | import org.slf4j.Logger; 54 | import org.slf4j.LoggerFactory; 55 | import org.testcontainers.containers.KafkaContainer; 56 | import org.testcontainers.containers.Network; 57 | import org.testcontainers.containers.output.Slf4jLogConsumer; 58 | import org.testcontainers.junit.jupiter.Testcontainers; 59 | import org.testcontainers.utility.DockerImageName; 60 | import org.testcontainers.utility.MountableFile; 61 | 62 | /** 63 | * This tests the deployment of the exporter into a Zeebe broker in a as-close-to-production way as 64 | * possible, by starting a Zeebe container and deploying the exporter as one normally would. 65 | * 66 | *

In order to verify certain properties - i.e. all records were exported correctly, order was 67 | * maintained on a per partition basis, etc. - we use an exporter deemed "reliable", the 68 | * DebugHttpExporter, to compare results. 69 | */ 70 | @Testcontainers 71 | @Timeout(value = 5, unit = TimeUnit.MINUTES) 72 | @Execution(ExecutionMode.CONCURRENT) 73 | final class KafkaExporterIT { 74 | private static final Pattern TOPIC_SUBSCRIPTION_PATTERN = Pattern.compile("zeebe.*"); 75 | 76 | private final Network network = Network.newNetwork(); 77 | private KafkaContainer kafkaContainer = newKafkaContainer(); 78 | private final ZeebeContainer zeebeContainer = newZeebeContainer(); 79 | 80 | private ZeebeClient zeebeClient; 81 | private DebugHttpExporterClient debugExporter; 82 | 83 | @AfterEach 84 | void tearDown() { 85 | CloseHelper.quietCloseAll(zeebeClient, zeebeContainer, kafkaContainer, network); 86 | } 87 | 88 | @Test 89 | void shouldExportToKafka() throws MalformedURLException { 90 | // given 91 | startKafka(); 92 | zeebeContainer.start(); 93 | final var sampleWorkload = newSampleWorkload(); 94 | 95 | // when 96 | sampleWorkload.execute(); 97 | 98 | // then 99 | assertRecordsExported(sampleWorkload); 100 | } 101 | 102 | @Test 103 | void shouldExportEvenIfKafkaStartedLater() throws MalformedURLException { 104 | // given 105 | zeebeContainer.start(); 106 | final var sampleWorkload = newSampleWorkload(); 107 | 108 | // when 109 | sampleWorkload.execute(); 110 | startKafka(); 111 | 112 | // then 113 | assertRecordsExported(sampleWorkload); 114 | } 115 | 116 | @Test 117 | void shouldExportEvenIfKafkaRestartedInTheMiddle() 118 | throws MalformedURLException, InterruptedException { 119 | // given 120 | startKafka(); 121 | zeebeContainer.start(); 122 | final var sampleWorkload = newSampleWorkload(); 123 | 124 | // when 125 | final var latch = new CountDownLatch(1); 126 | final var workloadFinished = 127 | CompletableFuture.runAsync(() -> sampleWorkload.execute(latch::countDown)); 128 | 129 | assertThat(latch.await(15, TimeUnit.SECONDS)) 130 | .as("midpoint hook was called to stop kafka") 131 | .isTrue(); 132 | kafkaContainer.stop(); 133 | kafkaContainer = newKafkaContainer(); 134 | startKafka(); 135 | workloadFinished.join(); 136 | 137 | // then 138 | assertRecordsExported(sampleWorkload); 139 | } 140 | 141 | private SampleWorkload newSampleWorkload() throws MalformedURLException { 142 | return new SampleWorkload(getLazyZeebeClient(), getLazyDebugExporter()); 143 | } 144 | 145 | /** 146 | * Asserts that the expected records have been correctly exported. 147 | * 148 | *

The properties asserted are the following for every partition: 149 | * 150 | *

    151 | *
  1. every record for partition X was exported to Kafka 152 | *
  2. every record for partition X was exported to the same Kafka partition Y 153 | *
  3. every record for partition X is consumed in the order in which they were written (i.e. by 154 | * position) 155 | *
156 | * 157 | * The first property is self explanatory - just ensure all records can be consumed from the 158 | * expected Kafka topic. 159 | * 160 | *

The second property checks the partitioning logic - Zeebe records are causally linked, and 161 | * exporting them to different partitions will result in them being consumed out of order. So this 162 | * ensures that all records from a given Zeebe partition are exported to the same Kafka partition 163 | * in order to preserve ordering. 164 | * 165 | *

The third property is an extension of this, and checks that they are indeed ordered by 166 | * position. 167 | */ 168 | private void assertRecordsExported(final SampleWorkload workload) { 169 | final var expectedRecords = workload.getExpectedRecords(Duration.ofSeconds(5)); 170 | final var expectedRecordsPerPartition = 171 | expectedRecords.stream().collect(Collectors.groupingBy(Record::getPartitionId)); 172 | final var actualRecords = awaitAllExportedRecords(expectedRecordsPerPartition); 173 | 174 | assertThat(expectedRecords).as("there should have been some records exported").isNotEmpty(); 175 | assertThat(actualRecords) 176 | .allSatisfy( 177 | (partitionId, records) -> { 178 | assertExportedRecordsPerPartition( 179 | partitionId, records, expectedRecordsPerPartition.get(partitionId)); 180 | }); 181 | } 182 | 183 | @SuppressWarnings("rawtypes") 184 | private void assertExportedRecordsPerPartition( 185 | final Integer partitionId, 186 | final List>> exportedRecords, 187 | final List> expectedRecords) { 188 | final var expectedKafkaPartition = exportedRecords.get(0).partition(); 189 | assertThat(exportedRecords) 190 | .as( 191 | "all exported records from Zeebe partition %d were exported to the same Kafka partition %d", 192 | partitionId, expectedKafkaPartition) 193 | .allMatch(r -> r.partition() == expectedKafkaPartition) 194 | // cast to raw type to be able to compare the containers 195 | .map(r -> (Record) r.value()) 196 | .as( 197 | "the records for partition %d are the same as those reported by the DebugHttpExporter", 198 | partitionId) 199 | .containsExactlyInAnyOrderElementsOf(expectedRecords) 200 | .as("the records for partition %d are sorted by position", partitionId) 201 | .isSortedAccordingTo(Comparator.comparing(Record::getPosition)); 202 | } 203 | 204 | /** 205 | * A wrapper around {@link #consumeExportedRecords(Map)} to avoid race conditions where we poll 206 | * too early and receive less records. Doing this avoids any potential flakiness at the cost of a 207 | * bit more complexity/unreadability. 208 | */ 209 | private Map>>> awaitAllExportedRecords( 210 | final Map>> expectedRecords) { 211 | final var records = new HashMap>>>(); 212 | 213 | Awaitility.await("until the expected number of records has been consumed") 214 | .atMost(Duration.ofSeconds(30)) 215 | .pollDelay(Duration.ZERO) 216 | .pollInterval(Duration.ofMillis(100)) 217 | .pollInSameThread() 218 | .untilAsserted( 219 | () -> { 220 | consumeExportedRecords(records); 221 | assertThat(records) 222 | .allSatisfy( 223 | (partitionId, list) -> { 224 | assertThat(list) 225 | .as("records consumed for partition %d", partitionId) 226 | .hasSameSizeAs(expectedRecords.get(partitionId)); 227 | }); 228 | }); 229 | 230 | return records; 231 | } 232 | 233 | private void consumeExportedRecords( 234 | final Map>>> records) { 235 | final var timeout = Duration.ofSeconds(5); 236 | 237 | try (final Consumer> consumer = newConsumer()) { 238 | final var consumedRecords = consumer.poll(timeout); 239 | for (final var consumedRecord : consumedRecords) { 240 | final var perPartitionRecords = 241 | records.computeIfAbsent( 242 | consumedRecord.value().getPartitionId(), ignored -> new ArrayList<>()); 243 | 244 | perPartitionRecords.add(consumedRecord); 245 | perPartitionRecords.sort(Comparator.comparing(ConsumerRecord::offset, Long::compareTo)); 246 | } 247 | } 248 | } 249 | 250 | private ZeebeClient getLazyZeebeClient() { 251 | if (zeebeClient == null) { 252 | zeebeClient = 253 | ZeebeClient.newClientBuilder() 254 | .gatewayAddress(zeebeContainer.getExternalGatewayAddress()) 255 | .usePlaintext() 256 | .build(); 257 | } 258 | 259 | return zeebeClient; 260 | } 261 | 262 | private DebugHttpExporterClient getLazyDebugExporter() throws MalformedURLException { 263 | if (debugExporter == null) { 264 | final var exporterServerUrl = 265 | new URL(String.format("http://%s/records.json", zeebeContainer.getExternalAddress(8000))); 266 | debugExporter = new DebugHttpExporterClient((exporterServerUrl)); 267 | } 268 | 269 | return debugExporter; 270 | } 271 | 272 | @SuppressWarnings("OctalInteger") 273 | private ZeebeContainer newZeebeContainer() { 274 | final var container = new ZeebeContainer(); 275 | final var exporterJar = MountableFile.forClasspathResource("zeebe-kafka-exporter.jar", 0775); 276 | final var exporterConfig = MountableFile.forClasspathResource("exporters.yml", 0775); 277 | final var loggingConfig = MountableFile.forClasspathResource("log4j2.xml", 0775); 278 | final var networkAlias = "zeebe"; 279 | final var logConsumer = new Slf4jLogConsumer(newContainerLogger("zeebeContainer"), true); 280 | 281 | container.addExposedPort(8000); 282 | return container 283 | .withNetwork(network) 284 | .withNetworkAliases(networkAlias) 285 | .withEnv("ZEEBE_BROKER_NETWORK_ADVERTISEDHOST", networkAlias) 286 | .withEnv("ZEEBE_BROKER_CLUSTER_PARTITIONSCOUNT", "3") 287 | .withEnv("ZEEBE_BROKER_EXPORTERS_KAFKA_ARGS_PRODUCER_SERVERS", "kafka:9092") 288 | .withEnv("ZEEBE_LOG_LEVEL", "info") 289 | .withEnv( 290 | "LOG4J_CONFIGURATION_FILE", 291 | "/usr/local/zeebe/config/log4j2.xml,/usr/local/zeebe/config/log4j2-exporter.xml") 292 | .withCopyFileToContainer(exporterJar, "/usr/local/zeebe/exporters/zeebe-kafka-exporter.jar") 293 | .withCopyFileToContainer(exporterConfig, "/usr/local/zeebe/config/exporters.yml") 294 | .withCopyFileToContainer(loggingConfig, "/usr/local/zeebe/config/log4j2-exporter.xml") 295 | .withEnv("SPRING_CONFIG_ADDITIONAL_LOCATION", "file:/usr/local/zeebe/config/exporters.yml") 296 | .withLogConsumer(logConsumer); 297 | } 298 | 299 | private Consumer> newConsumer() { 300 | final var config = new HashMap(); 301 | config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); 302 | config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaContainer.getBootstrapServers()); 303 | config.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true); 304 | config.put(ConsumerConfig.GROUP_ID_CONFIG, this.getClass().getName()); 305 | config.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, Integer.MAX_VALUE); 306 | config.put(ConsumerConfig.METADATA_MAX_AGE_CONFIG, 500); 307 | config.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); 308 | 309 | final var consumer = 310 | new KafkaConsumer<>(config, new RecordIdDeserializer(), new RecordDeserializer()); 311 | consumer.subscribe(TOPIC_SUBSCRIPTION_PATTERN); 312 | 313 | return consumer; 314 | } 315 | 316 | private KafkaContainer newKafkaContainer() { 317 | final var kafkaImage = DockerImageName.parse("confluentinc/cp-kafka").withTag("5.5.1"); 318 | final var container = new KafkaContainer(kafkaImage); 319 | final var logConsumer = new Slf4jLogConsumer(newContainerLogger("kafkaContainer"), true); 320 | 321 | return container 322 | .withEnv("KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR", "1") 323 | .withEnv("KAFKA_TRANSACTION_STATE_LOG_MIN_ISR", "1") 324 | .withEmbeddedZookeeper() 325 | .withNetwork(network) 326 | .withNetworkAliases("kafka") 327 | .withLogConsumer(logConsumer); 328 | } 329 | 330 | private void startKafka() { 331 | kafkaContainer.start(); 332 | 333 | // provision Kafka topics - this is difficult at the moment to achieve purely via 334 | // configuration, so we do it as a pre-step 335 | final NewTopic topic = new NewTopic("zeebe", 3, (short) 1); 336 | try (final AdminClient admin = 337 | AdminClient.create( 338 | Map.of( 339 | AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, 340 | kafkaContainer.getBootstrapServers()))) { 341 | admin.createTopics(List.of(topic)); 342 | } 343 | } 344 | 345 | private static Logger newContainerLogger(final String containerName) { 346 | return LoggerFactory.getLogger(KafkaExporterIT.class.getName() + "." + containerName); 347 | } 348 | } 349 | -------------------------------------------------------------------------------- /qa/src/test/java/io/zeebe/exporters/kafka/qa/SampleWorkload.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.qa; 17 | 18 | import static org.assertj.core.api.Assertions.assertThat; 19 | 20 | import io.camunda.zeebe.client.ZeebeClient; 21 | import io.camunda.zeebe.client.api.response.ActivatedJob; 22 | import io.camunda.zeebe.client.api.worker.JobClient; 23 | import io.camunda.zeebe.client.api.worker.JobHandler; 24 | import io.camunda.zeebe.client.api.worker.JobWorker; 25 | import io.camunda.zeebe.model.bpmn.Bpmn; 26 | import io.camunda.zeebe.model.bpmn.BpmnModelInstance; 27 | import io.camunda.zeebe.protocol.record.Record; 28 | import io.camunda.zeebe.protocol.record.RecordAssert; 29 | import io.camunda.zeebe.protocol.record.intent.IncidentIntent; 30 | import io.camunda.zeebe.protocol.record.intent.MessageIntent; 31 | import io.camunda.zeebe.protocol.record.intent.ProcessInstanceIntent; 32 | import io.camunda.zeebe.protocol.record.value.BpmnElementType; 33 | import io.camunda.zeebe.protocol.record.value.IncidentRecordValue; 34 | import io.camunda.zeebe.protocol.record.value.ProcessInstanceRecordValue; 35 | import java.time.Duration; 36 | import java.util.ArrayList; 37 | import java.util.HashMap; 38 | import java.util.List; 39 | import java.util.Map; 40 | import java.util.Objects; 41 | import java.util.Optional; 42 | import java.util.concurrent.atomic.AtomicBoolean; 43 | import java.util.stream.Collectors; 44 | import org.awaitility.Awaitility; 45 | 46 | public final class SampleWorkload { 47 | private static final String JOB_TYPE = "work"; 48 | private static final String MESSAGE_NAME = "catch"; 49 | private static final String CORRELATION_KEY = "foo-bar-123"; 50 | private static final String PROCESS_NAME = "testProcess"; 51 | private static final String PROCESS_FILE_NAME = "sample_workflow.bpmn"; 52 | private static final String TASK_NAME = "task"; 53 | private static final BpmnModelInstance SAMPLE_PROCESS = 54 | Bpmn.createExecutableProcess(PROCESS_NAME) 55 | .startEvent() 56 | .intermediateCatchEvent( 57 | "message", 58 | e -> e.message(m -> m.name(MESSAGE_NAME).zeebeCorrelationKeyExpression("orderId"))) 59 | .serviceTask(TASK_NAME, t -> t.zeebeJobType(JOB_TYPE).zeebeTaskHeader("foo", "bar")) 60 | .endEvent() 61 | .done(); 62 | 63 | private final ZeebeClient client; 64 | private final DebugHttpExporterClient exporterClient; 65 | 66 | private long endMarkerKey; 67 | 68 | public SampleWorkload(final ZeebeClient client, final DebugHttpExporterClient exporterClient) { 69 | this.client = Objects.requireNonNull(client); 70 | this.exporterClient = Objects.requireNonNull(exporterClient); 71 | } 72 | 73 | public void execute() { 74 | execute(() -> {}); 75 | } 76 | 77 | /** Runs a sample workload on the broker, exporting several records of different types. */ 78 | public void execute(final Runnable midpointHook) { 79 | deployWorkflow(); 80 | 81 | final Map variables = new HashMap<>(); 82 | variables.put("orderId", CORRELATION_KEY); 83 | variables.put("largeValue", "x".repeat(8192)); 84 | variables.put("unicode", "Á"); 85 | 86 | final long workflowInstanceKey = createWorkflowInstance(variables); 87 | final AtomicBoolean fail = new AtomicBoolean(true); 88 | final JobWorker worker = createJobWorker((jobClient, job) -> handleJob(fail, jobClient, job)); 89 | 90 | midpointHook.run(); 91 | publishMessage(); 92 | 93 | final Record incident = awaitIncidentRaised(workflowInstanceKey); 94 | client.newUpdateRetriesCommand(incident.getValue().getJobKey()).retries(3).send().join(); 95 | client.newResolveIncidentCommand(incident.getKey()).send().join(); 96 | 97 | // wrap up 98 | awaitWorkflowCompletion(workflowInstanceKey); 99 | worker.close(); 100 | publishEndMarker(); 101 | } 102 | 103 | public List> getExpectedRecords(final Duration timeout) { 104 | final var records = new ArrayList>(); 105 | assertThat(endMarkerKey).as("the end marker was published so it can be looked up").isPositive(); 106 | 107 | Awaitility.await("until all expected records have been exported") 108 | .atMost(timeout) 109 | .pollInterval(Duration.ofMillis(250)) 110 | .pollDelay(Duration.ZERO) 111 | .pollInSameThread() 112 | .untilAsserted( 113 | () -> { 114 | records.clear(); 115 | records.addAll(exporterClient.streamRecords().collect(Collectors.toList())); 116 | assertEndMarkerExported(records); 117 | }); 118 | 119 | return records; 120 | } 121 | 122 | private void assertEndMarkerExported(final ArrayList> records) { 123 | assertThat(records) 124 | .last() 125 | .as("exported records contain the last expected record") 126 | .satisfies( 127 | r -> RecordAssert.assertThat(r).hasKey(endMarkerKey).hasIntent(MessageIntent.EXPIRED)); 128 | } 129 | 130 | private void publishEndMarker() { 131 | final var response = 132 | client 133 | .newPublishMessageCommand() 134 | .messageName("endMarker") 135 | .correlationKey("endMarker") 136 | .messageId("endMarker") 137 | .timeToLive(Duration.ZERO) 138 | .send() 139 | .join(); 140 | 141 | endMarkerKey = response.getMessageKey(); 142 | } 143 | 144 | private Record awaitIncidentRaised(final long workflowInstanceKey) { 145 | return Awaitility.await("await incident to be raised") 146 | .pollInterval(Duration.ofMillis(200)) 147 | .atMost(Duration.ofSeconds(30)) 148 | .until(() -> findIncident(workflowInstanceKey), Optional::isPresent) 149 | .orElseThrow(); 150 | } 151 | 152 | @SuppressWarnings({"unchecked", "java:S1905"}) 153 | private Optional> findIncident(final long processInstanceKey) { 154 | return exporterClient 155 | .streamRecords() 156 | .filter(r -> r.getIntent() == IncidentIntent.CREATED) 157 | .map(r -> (Record) r) 158 | .filter(r -> r.getValue().getProcessInstanceKey() == processInstanceKey) 159 | .filter(r -> r.getValue().getElementId().equals(TASK_NAME)) 160 | .findFirst(); 161 | } 162 | 163 | private void handleJob( 164 | final AtomicBoolean fail, final JobClient jobClient, final ActivatedJob job) { 165 | if (fail.getAndSet(false)) { 166 | jobClient.newFailCommand(job.getKey()).retries(0).errorMessage("failed").send().join(); 167 | } else { 168 | jobClient.newCompleteCommand(job.getKey()).send().join(); 169 | } 170 | } 171 | 172 | private void deployWorkflow() { 173 | client.newDeployCommand().addProcessModel(SAMPLE_PROCESS, PROCESS_FILE_NAME).send().join(); 174 | } 175 | 176 | private long createWorkflowInstance(final Map variables) { 177 | return client 178 | .newCreateInstanceCommand() 179 | .bpmnProcessId(PROCESS_NAME) 180 | .latestVersion() 181 | .variables(variables) 182 | .send() 183 | .join() 184 | .getProcessInstanceKey(); 185 | } 186 | 187 | private JobWorker createJobWorker(final JobHandler handler) { 188 | return client.newWorker().jobType(JOB_TYPE).handler(handler).open(); 189 | } 190 | 191 | private void publishMessage() { 192 | client 193 | .newPublishMessageCommand() 194 | .messageName(MESSAGE_NAME) 195 | .correlationKey(CORRELATION_KEY) 196 | .send() 197 | .join(); 198 | } 199 | 200 | private void awaitWorkflowCompletion(final long workflowInstanceKey) { 201 | Awaitility.await("await workflow " + workflowInstanceKey + " completion") 202 | .pollInterval(Duration.ofMillis(200)) 203 | .atMost(Duration.ofSeconds(30)) 204 | .untilAsserted(() -> assertThat(getProcessCompleted(workflowInstanceKey)).isPresent()); 205 | } 206 | 207 | @SuppressWarnings({"unchecked", "java:S1905"}) 208 | private Optional> getProcessCompleted( 209 | final long workflowInstanceKey) { 210 | return exporterClient 211 | .streamRecords() 212 | .filter(r -> r.getIntent() == ProcessInstanceIntent.ELEMENT_COMPLETED) 213 | .filter(r -> r.getKey() == workflowInstanceKey) 214 | .map(r -> (Record) r) 215 | .filter(r -> r.getValue().getBpmnElementType() == BpmnElementType.PROCESS) 216 | .findFirst(); 217 | } 218 | } 219 | -------------------------------------------------------------------------------- /qa/src/test/resources/exporters.yml: -------------------------------------------------------------------------------- 1 | zeebe: 2 | broker: 3 | exporters: 4 | debug: 5 | className: io.camunda.zeebe.broker.exporter.debug.DebugHttpExporter 6 | args: 7 | port: 8000 8 | kafka: 9 | className: io.zeebe.exporters.kafka.KafkaExporter 10 | jarPath: /usr/local/zeebe/exporters/zeebe-kafka-exporter.jar 11 | args: 12 | maxBatchSize: 100 13 | maxBlockingTimeoutMs: 1000 14 | inFlightRecordCheckIntervalMs: 1000 15 | 16 | producer: 17 | requestTimeoutMs: 5000 18 | closeTimeoutMs: 5000 19 | clientId: zeebe 20 | maxConcurrentRequests: 3 21 | 22 | config: | 23 | linger.ms=5 24 | buffer.memory=8388608 25 | batch.size=32768 26 | max.block.ms=5000 27 | 28 | records: 29 | defaults: { type: "command,event,rejection", topic: zeebe } 30 | -------------------------------------------------------------------------------- /qa/src/test/resources/log4j2.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /qa/src/test/resources/simplelogger.properties: -------------------------------------------------------------------------------- 1 | org.slf4j.simpleLogger.logFile=System.out 2 | org.slf4j.simplerLogger.showShortLogName=true 3 | org.slf4j.simpleLogger.defaultLogLevel=info 4 | org.slf4j.simpleLogger.log.io.zeebe.exporters.kafka.qa.KafkaExporterIT=debug 5 | org.slf4j.simpleLogger.log.io.zeebe.exporters.kafka.qa.KafkaExporterIT.zeebeContainer=warn 6 | org.slf4j.simpleLogger.log.io.zeebe.exporters.kafka.qa.KafkaExporterIT.kafkaContainer=warn 7 | org.slf4j.simpleLogger.log.org.apache=warn 8 | org.slf4j.simpleLogger.log.kafka=warn 9 | -------------------------------------------------------------------------------- /revapi.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "extension": "revapi.java", 4 | "configuration": { 5 | "reportUsesFor": "all-differences", 6 | "missing-classes": { 7 | "behavior": "ignore", 8 | "ignoreMissingAnnotations": true 9 | }, 10 | "matchOverloads": false 11 | } 12 | }, 13 | { 14 | "extension": "revapi.versions", 15 | "configuration": { 16 | "enabled": true, 17 | "passThroughDifferences": [ 18 | "java.class.nonPublicPartOfAPI" 19 | ], 20 | "versionIncreaseAllows": { 21 | "major": { 22 | "severity": "BREAKING" 23 | }, 24 | "minor": { 25 | "classification": { 26 | "BINARY": "NON_BREAKING", 27 | "SOURCE": "BREAKING", 28 | "SEMANTIC": "BREAKING", 29 | "OTHER": "BREAKING" 30 | } 31 | }, 32 | "patch": { 33 | "classification": { 34 | "BINARY": "NON_BREAKING", 35 | "SOURCE": "BREAKING", 36 | "SEMANTIC": "BREAKING", 37 | "OTHER": "BREAKING" 38 | } 39 | } 40 | } 41 | } 42 | }, 43 | { 44 | "extension": "revapi.filter", 45 | "justification": "Ignore everything not included in the module itself", 46 | "configuration": { 47 | "archives": { 48 | "include": [ 49 | "io\\.zeebe:zeebe-kafka-exporter:.*" 50 | ] 51 | }, 52 | "exclude": [ 53 | { 54 | "matcher": "java", 55 | "match": "@org.apiguardian.api.API(status != org.apiguardian.api.API.Status.STABLE) ^*;" 56 | } 57 | ] 58 | } 59 | }, 60 | { 61 | "extension": "revapi.differences", 62 | "id": "intentional-api-changes", 63 | "configuration": { 64 | "differences": [] 65 | } 66 | } 67 | ] 68 | -------------------------------------------------------------------------------- /serde/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 4.0.0 6 | Zeebe Kafka Exporter Serialization 7 | zeebe-kafka-exporter-serde 8 | jar 9 | https://github.com/zeebe-io/zeebe-kafka-exporter/serde 10 | 11 | 12 | zeebe-kafka-exporter-root 13 | io.zeebe 14 | 3.1.2-SNAPSHOT 15 | ../pom.xml 16 | 17 | 18 | 19 | 20 | 8 21 | 22 | 23 | 24 | 25 | 26 | io.camunda 27 | zeebe-protocol-jackson 28 | 29 | 30 | 31 | com.fasterxml.jackson.core 32 | jackson-databind 33 | 34 | 35 | 36 | com.fasterxml.jackson.core 37 | jackson-annotations 38 | 39 | 40 | 41 | com.fasterxml.jackson.core 42 | jackson-core 43 | 44 | 45 | 46 | 47 | org.apache.kafka 48 | kafka-clients 49 | 50 | 51 | 52 | 53 | io.camunda 54 | zeebe-protocol 55 | 56 | 57 | 58 | org.junit.jupiter 59 | junit-jupiter-api 60 | test 61 | 62 | 63 | 64 | org.assertj 65 | assertj-core 66 | test 67 | 68 | 69 | 70 | com.fasterxml.jackson.dataformat 71 | jackson-dataformat-cbor 72 | test 73 | 74 | 75 | 76 | 77 | 78 | 79 | org.revapi 80 | revapi-maven-plugin 81 | 82 | 83 | 84 | revapi.json 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | -------------------------------------------------------------------------------- /serde/revapi.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "extension": "revapi.differences", 4 | "configuration": { 5 | "justification": "The serializers depend on Jackson, so exposing its types is fine", 6 | "ignore": true, 7 | "differences": [ 8 | { 9 | "code": "java.class.externalClassExposedInAPI", 10 | "oldArchive": "com.fasterxml.jackson.core:*:jar" 11 | }, 12 | { 13 | "code": "java.class.nonPublicPartOfAPI", 14 | "oldArchive": "com.fasterxml.jackson.core:*:jar" 15 | } 16 | ] 17 | } 18 | } 19 | ] 20 | -------------------------------------------------------------------------------- /serde/src/main/java/io/zeebe/exporters/kafka/serde/JacksonDeserializer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.serde; 17 | 18 | import com.fasterxml.jackson.databind.ObjectReader; 19 | import java.io.IOException; 20 | import java.util.Map; 21 | import org.apache.kafka.common.errors.SerializationException; 22 | import org.apache.kafka.common.serialization.Deserializer; 23 | 24 | /** 25 | * Deserializer implementation which reads an object from a pre-configured {@link ObjectReader}. 26 | * 27 | * @param the concrete type to deserialize 28 | */ 29 | public abstract class JacksonDeserializer implements Deserializer { 30 | protected final ObjectReader reader; 31 | 32 | protected JacksonDeserializer(final ObjectReader reader) { 33 | this.reader = reader; 34 | } 35 | 36 | @Override 37 | public void configure(final Map configs, final boolean isKey) {} 38 | 39 | @Override 40 | public T deserialize(final String topic, final byte[] data) { 41 | try { 42 | return reader.readValue(data); 43 | } catch (final IOException e) { 44 | throw new SerializationException( 45 | String.format("Expected to deserialize data from topic [%s], but failed", topic), e); 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /serde/src/main/java/io/zeebe/exporters/kafka/serde/JacksonSerializer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.serde; 17 | 18 | import com.fasterxml.jackson.core.JsonProcessingException; 19 | import com.fasterxml.jackson.databind.ObjectWriter; 20 | import java.util.Map; 21 | import org.apache.kafka.common.errors.SerializationException; 22 | import org.apache.kafka.common.serialization.Serializer; 23 | 24 | /** 25 | * Serializer implementation which writes an object from a pre-configured {@link ObjectWriter}. 26 | * 27 | * @param the concrete type to serialize 28 | */ 29 | public abstract class JacksonSerializer implements Serializer { 30 | protected final ObjectWriter writer; 31 | 32 | protected JacksonSerializer(final ObjectWriter writer) { 33 | this.writer = writer; 34 | } 35 | 36 | @Override 37 | public void configure(final Map configs, final boolean isKey) {} 38 | 39 | @Override 40 | public byte[] serialize(final String topic, final T data) { 41 | try { 42 | return writer.writeValueAsBytes(data); 43 | } catch (final JsonProcessingException e) { 44 | throw new SerializationException( 45 | String.format("Expected to serialize data for topic [%s], but failed", topic), e); 46 | } 47 | } 48 | 49 | @Override 50 | public void close() {} 51 | } 52 | -------------------------------------------------------------------------------- /serde/src/main/java/io/zeebe/exporters/kafka/serde/RecordDeserializer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.serde; 17 | 18 | import com.fasterxml.jackson.core.type.TypeReference; 19 | import com.fasterxml.jackson.databind.ObjectMapper; 20 | import com.fasterxml.jackson.databind.ObjectReader; 21 | import io.camunda.zeebe.protocol.jackson.record.AbstractRecord; 22 | import io.camunda.zeebe.protocol.record.Record; 23 | import org.apache.kafka.common.serialization.Deserializer; 24 | 25 | /** 26 | * A {@link Deserializer} implementations for {@link Record} objects, which uses a pre-configured * 27 | * {@link ObjectReader} for that type, and {@link 28 | * io.camunda.zeebe.protocol.jackson.record.AbstractRecord} as the concrete {@link Record} 29 | * implementation. 30 | */ 31 | public final class RecordDeserializer extends JacksonDeserializer> { 32 | 33 | public RecordDeserializer() { 34 | this(new ObjectMapper()); 35 | } 36 | 37 | public RecordDeserializer(final ObjectMapper objectMapper) { 38 | this(objectMapper.readerFor(new TypeReference>() {})); 39 | } 40 | 41 | public RecordDeserializer(final ObjectReader objectReader) { 42 | super(objectReader); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /serde/src/main/java/io/zeebe/exporters/kafka/serde/RecordId.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.serde; 17 | 18 | import com.fasterxml.jackson.annotation.JsonCreator; 19 | import com.fasterxml.jackson.annotation.JsonGetter; 20 | import com.fasterxml.jackson.annotation.JsonProperty; 21 | import io.camunda.zeebe.protocol.record.Record; 22 | import java.util.Objects; 23 | 24 | /** 25 | * {@link RecordId} represents a unique identifier for a given Zeebe {@link 26 | * io.camunda.zeebe.protocol.record.Record}. On a single partition (identified via {@link 27 | * Record#getPartitionId()}), every record has a unique position (identified via {@link 28 | * Record#getPosition()}). 29 | */ 30 | public final class RecordId { 31 | @JsonProperty("partitionId") 32 | private final int partitionId; 33 | 34 | @JsonProperty("position") 35 | private final long position; 36 | 37 | @JsonCreator 38 | public RecordId( 39 | final @JsonProperty("partitionId") int partitionId, 40 | final @JsonProperty("position") long position) { 41 | this.partitionId = partitionId; 42 | this.position = position; 43 | } 44 | 45 | @JsonGetter 46 | public int getPartitionId() { 47 | return partitionId; 48 | } 49 | 50 | @JsonGetter 51 | public long getPosition() { 52 | return position; 53 | } 54 | 55 | @Override 56 | public int hashCode() { 57 | return Objects.hash(getPartitionId(), getPosition()); 58 | } 59 | 60 | @Override 61 | public boolean equals(final Object o) { 62 | if (this == o) { 63 | return true; 64 | } 65 | if (o == null || getClass() != o.getClass()) { 66 | return false; 67 | } 68 | final RecordId recordId = (RecordId) o; 69 | return getPartitionId() == recordId.getPartitionId() && getPosition() == recordId.getPosition(); 70 | } 71 | 72 | @Override 73 | public String toString() { 74 | return "RecordId{" + "partitionId=" + partitionId + ", position=" + position + '}'; 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /serde/src/main/java/io/zeebe/exporters/kafka/serde/RecordIdDeserializer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.serde; 17 | 18 | import com.fasterxml.jackson.databind.ObjectMapper; 19 | import com.fasterxml.jackson.databind.ObjectReader; 20 | import org.apache.kafka.common.serialization.Deserializer; 21 | 22 | /** 23 | * A {@link Deserializer} implementations for {@link RecordId} objects, which uses a pre-configured 24 | * * {@link ObjectReader} for that type. 25 | */ 26 | public final class RecordIdDeserializer extends JacksonDeserializer { 27 | public RecordIdDeserializer() { 28 | this(new ObjectMapper()); 29 | } 30 | 31 | public RecordIdDeserializer(final ObjectMapper objectMapper) { 32 | this(objectMapper.readerFor(RecordId.class)); 33 | } 34 | 35 | public RecordIdDeserializer(final ObjectReader objectReader) { 36 | super(objectReader); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /serde/src/main/java/io/zeebe/exporters/kafka/serde/RecordIdSerializer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.serde; 17 | 18 | import com.fasterxml.jackson.databind.ObjectMapper; 19 | import com.fasterxml.jackson.databind.ObjectWriter; 20 | import org.apache.kafka.common.serialization.Serializer; 21 | 22 | /** 23 | * A {@link Serializer} implementations for {@link RecordId} objects which uses a pre-configured 24 | * {@link ObjectWriter} for that type. 25 | */ 26 | public final class RecordIdSerializer extends JacksonSerializer { 27 | 28 | public RecordIdSerializer() { 29 | this(new ObjectMapper()); 30 | } 31 | 32 | protected RecordIdSerializer(final ObjectMapper objectMapper) { 33 | this(objectMapper.writerFor(RecordId.class)); 34 | } 35 | 36 | protected RecordIdSerializer(final ObjectWriter writer) { 37 | super(writer); 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /serde/src/main/java/io/zeebe/exporters/kafka/serde/RecordSerializer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.serde; 17 | 18 | import com.fasterxml.jackson.core.type.TypeReference; 19 | import com.fasterxml.jackson.databind.ObjectMapper; 20 | import com.fasterxml.jackson.databind.ObjectWriter; 21 | import io.camunda.zeebe.protocol.jackson.record.AbstractRecord; 22 | import io.camunda.zeebe.protocol.record.Record; 23 | import org.apache.kafka.common.serialization.Serializer; 24 | 25 | /** 26 | * A {@link Serializer} implementations for {@link Record} objects which uses a pre-configured 27 | * {@link ObjectWriter} for that type. 28 | * 29 | *

NOTE: this serializer is not used by the exporter itself. The exporter uses a custom 30 | * serializer which piggybacks on Zeebe's built-in {@link Record#toJson()} method which does not 31 | * allow customization of the underlying {@link ObjectWriter}. It's provided here for testing 32 | * purposes, and potentially for users who would like to produce records to the same topics but 33 | * separately. 34 | */ 35 | public final class RecordSerializer extends JacksonSerializer> { 36 | public RecordSerializer() { 37 | this(new ObjectMapper()); 38 | } 39 | 40 | protected RecordSerializer(final ObjectMapper objectMapper) { 41 | this(objectMapper.writerFor(new TypeReference>() {})); 42 | } 43 | 44 | protected RecordSerializer(final ObjectWriter writer) { 45 | super(writer); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /serde/src/test/java/io/zeebe/exporters/kafka/serde/RecordIdTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.serde; 17 | 18 | import static org.assertj.core.api.Assertions.assertThat; 19 | 20 | import com.fasterxml.jackson.databind.ObjectMapper; 21 | import com.fasterxml.jackson.dataformat.cbor.databind.CBORMapper; 22 | import org.junit.jupiter.api.Test; 23 | import org.junit.jupiter.api.parallel.Execution; 24 | import org.junit.jupiter.api.parallel.ExecutionMode; 25 | 26 | @Execution(ExecutionMode.CONCURRENT) 27 | final class RecordIdTest { 28 | private static final String TOPIC = "zeebe"; 29 | 30 | @Test 31 | void shouldSerialize() { 32 | // given 33 | final RecordId id = new RecordId(1, 1); 34 | final RecordIdSerializer serializer = new RecordIdSerializer(); 35 | final RecordIdDeserializer deserializer = new RecordIdDeserializer(); 36 | 37 | // when 38 | final byte[] serialized = serializer.serialize(TOPIC, id); 39 | final RecordId deserialized = deserializer.deserialize(TOPIC, serialized); 40 | 41 | // then 42 | assertThat(deserialized).as("the deserialized ID is the same as the original").isEqualTo(id); 43 | } 44 | 45 | @Test 46 | void shouldSerializeOtherFormat() { 47 | // given 48 | final ObjectMapper cborMapper = new CBORMapper(); 49 | final RecordId id = new RecordId(1, 1); 50 | final RecordIdSerializer serializer = new RecordIdSerializer(cborMapper); 51 | final RecordIdDeserializer deserializer = new RecordIdDeserializer(cborMapper); 52 | 53 | // when 54 | final byte[] serialized = serializer.serialize(TOPIC, id); 55 | final RecordId deserialized = deserializer.deserialize(TOPIC, serialized); 56 | 57 | // then 58 | assertThat(deserialized).as("the deserialized ID is the same as the original").isEqualTo(id); 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /serde/src/test/java/io/zeebe/exporters/kafka/serde/RecordTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2019 camunda services GmbH (info@camunda.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package io.zeebe.exporters.kafka.serde; 17 | 18 | import static org.assertj.core.api.Assertions.assertThat; 19 | 20 | import com.fasterxml.jackson.databind.ObjectMapper; 21 | import com.fasterxml.jackson.dataformat.cbor.databind.CBORMapper; 22 | import io.camunda.zeebe.protocol.jackson.record.DeploymentRecordValueBuilder; 23 | import io.camunda.zeebe.protocol.jackson.record.RecordBuilder; 24 | import io.camunda.zeebe.protocol.record.Record; 25 | import io.camunda.zeebe.protocol.record.RecordType; 26 | import io.camunda.zeebe.protocol.record.ValueType; 27 | import io.camunda.zeebe.protocol.record.intent.DeploymentIntent; 28 | import io.camunda.zeebe.protocol.record.value.DeploymentRecordValue; 29 | import org.junit.jupiter.api.Test; 30 | import org.junit.jupiter.api.parallel.Execution; 31 | import org.junit.jupiter.api.parallel.ExecutionMode; 32 | 33 | @Execution(ExecutionMode.CONCURRENT) 34 | final class RecordTest { 35 | private static final String TOPIC = "zeebe"; 36 | 37 | @Test 38 | void shouldSerialize() { 39 | // given 40 | final Record record = 41 | new RecordBuilder() 42 | .intent(DeploymentIntent.CREATED) 43 | .recordType(RecordType.EVENT) 44 | .valueType(ValueType.DEPLOYMENT) 45 | .value(new DeploymentRecordValueBuilder().build()) 46 | .build(); 47 | final RecordSerializer serializer = new RecordSerializer(); 48 | final RecordDeserializer deserializer = new RecordDeserializer(); 49 | 50 | // when 51 | final byte[] serialized = serializer.serialize(TOPIC, record); 52 | final Record deserialized = deserializer.deserialize(TOPIC, serialized); 53 | 54 | // then 55 | assertThat(deserialized) 56 | .as("the deserialized record is the same as the original") 57 | .isEqualTo(record); 58 | } 59 | 60 | @Test 61 | void shouldSerializeOtherFormat() { 62 | // given 63 | final ObjectMapper cborMapper = new CBORMapper(); 64 | final Record record = 65 | new RecordBuilder() 66 | .intent(DeploymentIntent.CREATED) 67 | .recordType(RecordType.EVENT) 68 | .valueType(ValueType.DEPLOYMENT) 69 | .value(new DeploymentRecordValueBuilder().build()) 70 | .build(); 71 | final RecordSerializer serializer = new RecordSerializer(cborMapper); 72 | final RecordDeserializer deserializer = new RecordDeserializer(cborMapper); 73 | 74 | // when 75 | final byte[] serialized = serializer.serialize(TOPIC, record); 76 | final Record deserialized = deserializer.deserialize(TOPIC, serialized); 77 | 78 | // then 79 | assertThat(deserialized) 80 | .as("the deserialized record is the same as the original") 81 | .isEqualTo(record); 82 | } 83 | } 84 | --------------------------------------------------------------------------------