├── .build ├── check-kafka-tag.sh ├── cut-release.sh ├── local-build-test.sh └── set-kafka-version.sh ├── .dockerignore ├── .github ├── ci-maven-settings.xml ├── dependabot.yml └── workflows │ ├── build-kafka-images.yml │ ├── build-kafka-versions.yml │ ├── build.yml │ ├── deploy-release.yml │ ├── pr.yml │ └── prepare-release.yml ├── .gitignore ├── .mvn └── wrapper │ ├── MavenWrapperDownloader.java │ ├── maven-wrapper.jar │ └── maven-wrapper.properties ├── LICENSE ├── README.md ├── docker-compose.yml ├── kafka-native-test-container ├── pom.xml └── src │ ├── main │ └── java │ │ └── com │ │ └── ozangunalp │ │ └── kafka │ │ └── test │ │ └── container │ │ ├── KafkaNativeContainer.java │ │ └── ToFileConsumer.java │ └── test │ ├── java │ └── com │ │ └── ozangunalp │ │ └── kafka │ │ └── test │ │ └── container │ │ ├── KafkaNativeContainerIT.java │ │ ├── KerberosContainer.java │ │ └── KeycloakContainer.java │ └── resources │ ├── kafka-keystore.p12 │ ├── kafka-truststore.p12 │ ├── kerberos │ ├── kafkaServer.properties │ ├── krb5ClientTemplate.conf │ └── krb5KafkaBroker.conf │ ├── keycloak │ └── realms │ │ └── kafka-authz-realm.json │ ├── oauth.properties │ ├── sasl_plain_plaintext.properties │ ├── sasl_scram_plaintext.properties │ └── ssl.properties ├── kafka-server ├── pom.xml └── src │ ├── main │ ├── docker │ │ ├── Dockerfile.jvm │ │ ├── Dockerfile.legacy-jar │ │ ├── Dockerfile.native │ │ └── Dockerfile.native-micro │ ├── java │ │ └── com │ │ │ └── ozangunalp │ │ │ └── kafka │ │ │ └── server │ │ │ ├── BrokerConfig.java │ │ │ ├── EmbeddedKafkaBroker.java │ │ │ ├── Endpoints.java │ │ │ ├── ScramUtils.java │ │ │ ├── ServerConfig.java │ │ │ ├── Startup.java │ │ │ ├── Storage.java │ │ │ └── metrics │ │ │ └── Reporter.java │ └── resources │ │ └── application.properties │ └── test │ └── java │ └── com │ └── ozangunalp │ └── kafka │ └── server │ ├── BrokerConfigTest.java │ ├── ScramUtilsTest.java │ └── SmokeTest.java ├── mvnw ├── mvnw.cmd ├── pom.xml └── quarkus-kafka-server-extension ├── deployment ├── pom.xml └── src │ └── main │ └── java │ └── com │ └── ozangunalp │ └── kafka │ └── server │ └── extension │ └── deployment │ └── KafkaServerExtensionProcessor.java ├── pom.xml └── runtime ├── pom.xml └── src └── main ├── java └── com │ └── ozangunalp │ └── kafka │ └── server │ └── extension │ └── runtime │ ├── JsonPathConfigRecorder.java │ ├── KafkaServerSubstitutions.java │ └── StrimziSubstitutions.java └── resources └── META-INF └── quarkus-extension.yaml /.build/check-kafka-tag.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | echo "Checking branch ${RELEASE_VERSION} against ${KAFKA_VERSION}" 5 | 6 | if [[ ! "${RELEASE_VERSION}" =~ ^.*-kafka-${KAFKA_VERSION} ]]; then 7 | echo "::error Branch name does not contain the kafka version" 8 | exit 1 9 | fi -------------------------------------------------------------------------------- /.build/cut-release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | init_git() { 5 | git config --global user.name "${GITHUB_ACTOR}" 6 | git config --global user.email "ozangunalp@gmail.com" 7 | } 8 | 9 | # -------- SCRIPT START HERE ----------------- 10 | 11 | init_git 12 | 13 | export BRANCH="HEAD" 14 | export GPG_CONFIG="" 15 | 16 | if [[ ${DRY_RUN} == "true" ]]; then 17 | echo "[DRY RUN] - Dry Run Enabled - Git push will be skipped." 18 | fi 19 | 20 | if [[ ! -z "${GPG_KEYNAME}" ]]; then 21 | echo "[GPG] - Seting gpg.keyname to '${GPG_KEYNAME}'" 22 | GPG_CONFIG="-Dgpg.keyname=${GPG_KEYNAME}" 23 | fi 24 | 25 | echo "Cutting release ${RELEASE_VERSION}" 26 | ./mvnw -s .github/ci-maven-settings.xml -B -fn clean 27 | git checkout ${BRANCH} 28 | HASH=$(git rev-parse --verify $BRANCH) 29 | echo "Last commit is ${HASH} - creating detached branch" 30 | git checkout -b "r${RELEASE_VERSION}" "${HASH}" 31 | 32 | echo "Update version to ${RELEASE_VERSION}" 33 | ./mvnw -B versions:set -DnewVersion="${RELEASE_VERSION}" -DgenerateBackupPoms=false -s .github/ci-maven-settings.xml 34 | 35 | if [[ ${SKIP_TESTS} == "true" ]]; then 36 | ./mvnw -B clean verify -Prelease ${GPG_CONFIG} -DskipTests -s .github/ci-maven-settings.xml 37 | else 38 | ./mvnw -B clean verify -Prelease ${GPG_CONFIG} -s .github/ci-maven-settings.xml 39 | fi 40 | 41 | git commit -am "[RELEASE] - Bump version to ${RELEASE_VERSION}" 42 | git tag "${RELEASE_VERSION}" 43 | echo "Pushing tag to origin" 44 | if [[ ${DRY_RUN} == "true" ]]; then 45 | echo "[DRY RUN] - Skipping push: git push origin ${RELEASE_VERSION}" 46 | else 47 | git push origin "${RELEASE_VERSION}" 48 | fi 49 | -------------------------------------------------------------------------------- /.build/local-build-test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | mvn -B clean package -Dnative \ 5 | -Dquarkus.native.container-build=true \ 6 | -Dquarkus.container-image.build=true \ 7 | -Dquarkus.container-image.additional-tags=latest 8 | 9 | mvn -B clean verify -Dtest-container 10 | -------------------------------------------------------------------------------- /.build/set-kafka-version.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | echo Setting kafka version to $1 5 | KAFKA_VERSION=$1 6 | 7 | ./mvnw -N versions:set-property -Dproperty=kafka.version -DnewVersion=${KAFKA_VERSION} -DgenerateBackupPoms=false 8 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | * 2 | !target/*-runner 3 | !target/*-runner.jar 4 | !target/lib/* 5 | !target/quarkus-app/* 6 | !target/app-cds.jsa -------------------------------------------------------------------------------- /.github/ci-maven-settings.xml: -------------------------------------------------------------------------------- 1 | 4 | 5 | 6 | ossrh 7 | ${env.OSSRH_USERNAME} 8 | ${env.OSSRH_TOKEN} 9 | 10 | 11 | gpg.passphrase 12 | ${env.MAVEN_GPG_PASSPHRASE} 13 | 14 | 15 | 16 | 17 | google-mirror 18 | 19 | true 20 | 21 | 22 | 23 | google-maven-central 24 | GCS Maven Central mirror EU 25 | https://maven-central.storage-download.googleapis.com/maven2/ 26 | 27 | true 28 | 29 | 30 | false 31 | 32 | 33 | 34 | 35 | 36 | google-maven-central 37 | GCS Maven Central mirror 38 | https://maven-central.storage-download.googleapis.com/maven2/ 39 | 40 | true 41 | 42 | 43 | false 44 | 45 | 46 | 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: maven 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | open-pull-requests-limit: 10 8 | target-branch: main 9 | reviewers: 10 | - ozangunalp 11 | - k-wall 12 | - package-ecosystem: "github-actions" 13 | directory: "/" 14 | schedule: 15 | interval: "weekly" 16 | -------------------------------------------------------------------------------- /.github/workflows/build-kafka-images.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Build Kafka Images 3 | 4 | on: 5 | workflow_dispatch: 6 | inputs: 7 | branch: 8 | description: 'The branch from which to deploy the release, must follow [project-version]-kafka-[kafka-version]' 9 | required: true 10 | 11 | jobs: 12 | build: 13 | runs-on: ubuntu-latest 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | arch: 18 | - amd64 19 | - arm64 20 | permissions: 21 | contents: read 22 | packages: write 23 | name: Build Docker Images for ${{ matrix.arch }} 24 | steps: 25 | - uses: actions/checkout@v4 26 | with: 27 | ref: ${{ github.event.inputs.branch }} 28 | token: ${{ secrets.RELEASE_TOKEN }} 29 | - uses: actions/setup-java@v4 30 | with: 31 | java-version: 21 32 | distribution: temurin 33 | cache: maven 34 | - name: Set Release Version & Image Tags 35 | run: | 36 | RELEASE_VERSION=$(mvn help:evaluate -Dexpression=project.version -q -DforceStdout) 37 | KAFKA_VERSION=$(mvn help:evaluate -Dexpression=kafka.version -q -DforceStdout) 38 | echo "RELEASE_VERSION=${RELEASE_VERSION}" >> $GITHUB_ENV 39 | echo "KAFKA_VERSION=${KAFKA_VERSION}" >> $GITHUB_ENV 40 | echo "BUILD_IMAGE_TAG=${RELEASE_VERSION}" >> $GITHUB_ENV 41 | echo "CONTAINER_REGISTRY=quay.io" >> $GITHUB_ENV 42 | echo "CONTAINER_GROUP=ogunalp" >> $GITHUB_ENV 43 | echo "LATEST_IMAGE_TAG=latest-kafka-${KAFKA_VERSION}" >> $GITHUB_ENV 44 | - name: Check Tag name 45 | run: .build/check-kafka-tag.sh 46 | - name: Set up QEMU 47 | if: matrix.arch == 'arm64' 48 | uses: docker/setup-qemu-action@v3 49 | with: 50 | platforms: ${{ matrix.arch }} 51 | - name: Set up Docker Buildx 52 | uses: docker/setup-buildx-action@v3 53 | with: 54 | install: true 55 | - name: Build with Maven 56 | run: | 57 | mvn -s .github/ci-maven-settings.xml -B clean package -Dnative \ 58 | -Dquarkus.native.additional-build-args=${{ matrix.arch == 'arm64' && '-H:PageSize=65536' || '-march=compatibility' }} \ 59 | -Dquarkus.container-image.registry=${{ env.CONTAINER_REGISTRY }} \ 60 | -Dquarkus.container-image.group=${{ env.CONTAINER_GROUP }} \ 61 | -Dquarkus.native.container-build=true \ 62 | -Dquarkus.native.container-runtime-options=--platform=linux/${{ matrix.arch }} \ 63 | -Dquarkus.docker.buildx.platform=linux/${{ matrix.arch }} \ 64 | -Dquarkus.container-image.build=true \ 65 | -Dquarkus.container-image.push=false \ 66 | -Dquarkus.container-image.tag=${{ env.BUILD_IMAGE_TAG }}-${{ matrix.arch }} \ 67 | -Dquarkus.container-image.additional-tags=${{ env.LATEST_IMAGE_TAG }}-${{ matrix.arch }} 68 | - name: Integration Tests with Maven 69 | run: | 70 | mvn -s .github/ci-maven-settings.xml -B clean verify -Dtest-container \ 71 | -Dkafka-native-container-image=${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native \ 72 | -Dkafka-native-container-version=${{ env.BUILD_IMAGE_TAG }}-${{ matrix.arch }} 73 | - name: Log in to the Container registry 74 | uses: docker/login-action@v3 75 | with: 76 | registry: ${{ env.CONTAINER_REGISTRY }} 77 | username: ${{ secrets.QUAY_USERNAME }} # ${{ github.actor }} 78 | password: ${{ secrets.QUAY_ROBOT_TOKEN }} # ${{ secrets.GITHUB_TOKEN }} 79 | - name: Push kafka-native images 80 | working-directory: ${{ matrix.project.name }} 81 | run: "docker push -a ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native" 82 | 83 | create-multiarch-manifest: 84 | needs: build 85 | runs-on: ubuntu-latest 86 | permissions: 87 | contents: read 88 | packages: write 89 | name: Create Multiarch Docker Images for kafka-native 90 | steps: 91 | - uses: actions/checkout@v4 92 | with: 93 | ref: ${{ github.event.inputs.branch }} 94 | token: ${{ secrets.RELEASE_TOKEN }} 95 | - uses: actions/setup-java@v4 96 | with: 97 | java-version: 21 98 | distribution: temurin 99 | cache: maven 100 | - name: Set Release Version & Image Tags 101 | run: | 102 | RELEASE_VERSION=$(mvn help:evaluate -Dexpression=project.version -q -DforceStdout) 103 | KAFKA_VERSION=$(mvn help:evaluate -Dexpression=kafka.version -q -DforceStdout) 104 | echo "RELEASE_VERSION=${RELEASE_VERSION}" >> $GITHUB_ENV 105 | echo "KAFKA_VERSION=${KAFKA_VERSION}" >> $GITHUB_ENV 106 | echo "BUILD_IMAGE_TAG=${RELEASE_VERSION}" >> $GITHUB_ENV 107 | echo "CONTAINER_REGISTRY=quay.io" >> $GITHUB_ENV 108 | echo "CONTAINER_GROUP=ogunalp" >> $GITHUB_ENV 109 | echo "LATEST_IMAGE_TAG=latest-kafka-${KAFKA_VERSION}" >> $GITHUB_ENV 110 | - name: Log in to the Container registry 111 | uses: docker/login-action@v3 112 | with: 113 | registry: ${{ env.CONTAINER_REGISTRY }} 114 | username: ${{ secrets.QUAY_USERNAME }} # ${{ github.actor }} 115 | password: ${{ secrets.QUAY_ROBOT_TOKEN }} # ${{ secrets.GITHUB_TOKEN }} 116 | - name: Create and push multi-arch manifests kafka-native 117 | shell: bash 118 | run: | 119 | docker manifest create ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.BUILD_IMAGE_TAG }} \ 120 | -a ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.BUILD_IMAGE_TAG }}-amd64 \ 121 | -a ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.BUILD_IMAGE_TAG }}-arm64 122 | docker manifest push ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.BUILD_IMAGE_TAG }} 123 | docker manifest create ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.LATEST_IMAGE_TAG }} \ 124 | -a ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.LATEST_IMAGE_TAG }}-amd64 \ 125 | -a ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.LATEST_IMAGE_TAG }}-arm64 126 | docker manifest push ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.LATEST_IMAGE_TAG }} 127 | -------------------------------------------------------------------------------- /.github/workflows/build-kafka-versions.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Continuous Integration for Kafka versions 3 | 4 | on: 5 | push: 6 | branches: 7 | - 'kafka/**' 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | - uses: actions/setup-java@v4 15 | with: 16 | java-version: 21 17 | distribution: temurin 18 | cache: maven 19 | - name: Build with Maven 20 | run: | 21 | mvn -s .github/ci-maven-settings.xml -B \ 22 | clean package -Dnative \ 23 | -Dquarkus.native.container-build=true \ 24 | -Dquarkus.container-image.build=true \ 25 | -Dquarkus.container-image.additional-tags=latest 26 | - name: Integration Tests with Maven 27 | run: | 28 | mvn -s .github/ci-maven-settings.xml -B \ 29 | clean verify -Dtest-container 30 | - name: Upload artifacts 31 | if: failure() 32 | uses: actions/upload-artifact@v4 33 | with: 34 | name: integration-test-logs 35 | retention-days: 3 36 | path: kafka-native-test-container/target/container-logs/ 37 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Continuous Integration Build 3 | 4 | on: 5 | push: 6 | branches: [ "main" ] 7 | workflow_dispatch: 8 | 9 | concurrency: 10 | group: ${{ github.workflow }}-${{ github.ref }} 11 | cancel-in-progress: true 12 | 13 | jobs: 14 | build: 15 | runs-on: ubuntu-latest 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | arch: 20 | - amd64 21 | - arm64 22 | permissions: 23 | contents: read 24 | packages: write 25 | name: Build Docker Images for ${{ matrix.arch }} 26 | steps: 27 | - uses: actions/checkout@v4 28 | - uses: actions/setup-java@v4 29 | with: 30 | java-version: 21 31 | distribution: temurin 32 | cache: maven 33 | - name: Set Image Tag 34 | run: | 35 | echo "BUILD_IMAGE_TAG=${GITHUB_REF_NAME}-${GITHUB_SHA}" >> $GITHUB_ENV 36 | echo "CONTAINER_REGISTRY=quay.io" >> $GITHUB_ENV 37 | echo "CONTAINER_GROUP=ogunalp" >> $GITHUB_ENV 38 | echo "ADDITIONAL_TAG=latest-snapshot" >> $GITHUB_ENV 39 | - name: Set up QEMU 40 | if: matrix.arch == 'arm64' 41 | uses: docker/setup-qemu-action@v3 42 | with: 43 | platforms: ${{ matrix.arch }} 44 | - name: Set up Docker Buildx 45 | uses: docker/setup-buildx-action@v3 46 | with: 47 | install: true 48 | - name: Build with Maven 49 | run: | 50 | mvn -s .github/ci-maven-settings.xml -B clean package -Dnative \ 51 | -Dquarkus.native.additional-build-args=${{ matrix.arch == 'arm64' && '-H:PageSize=65536' || '-march=compatibility' }} \ 52 | -Dquarkus.container-image.registry=${{ env.CONTAINER_REGISTRY }} \ 53 | -Dquarkus.container-image.group=${{ env.CONTAINER_GROUP }} \ 54 | -Dquarkus.native.container-build=true \ 55 | -Dquarkus.native.container-runtime-options=--platform=linux/${{ matrix.arch }} \ 56 | -Dquarkus.docker.buildx.platform=linux/${{ matrix.arch }} \ 57 | -Dquarkus.container-image.build=true \ 58 | -Dquarkus.container-image.push=false \ 59 | -Dquarkus.container-image.tag=${{ env.BUILD_IMAGE_TAG }}-${{ matrix.arch }} \ 60 | -Dquarkus.container-image.additional-tags=${{ env.ADDITIONAL_TAG }}-${{ matrix.arch }} 61 | - name: Integration Tests with Maven 62 | run: | 63 | mvn -s .github/ci-maven-settings.xml -B clean verify -Dtest-container \ 64 | -Dkafka-native-container-image=${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native \ 65 | -Dkafka-native-container-version=${{ env.BUILD_IMAGE_TAG }}-${{ matrix.arch }} 66 | - name: Log in to the Container registry 67 | uses: docker/login-action@v3 68 | with: 69 | registry: ${{ env.CONTAINER_REGISTRY }} 70 | username: ${{ secrets.QUAY_USERNAME }} # ${{ github.actor }} 71 | password: ${{ secrets.QUAY_ROBOT_TOKEN }} # ${{ secrets.GITHUB_TOKEN }} 72 | - name: Push kafka-native images 73 | working-directory: ${{ matrix.project.name }} 74 | run: "docker push -a ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native" 75 | - name: Upload artifacts 76 | if: failure() 77 | uses: actions/upload-artifact@v4 78 | with: 79 | name: integration-test-logs 80 | retention-days: 3 81 | path: kafka-native-test-container/target/container-logs/ 82 | 83 | create-multiarch-manifest: 84 | needs: build 85 | runs-on: ubuntu-latest 86 | permissions: 87 | contents: read 88 | packages: write 89 | name: Create Multiarch Docker Images for kafka-native 90 | steps: 91 | - uses: actions/checkout@v4 92 | - uses: actions/setup-java@v4 93 | with: 94 | java-version: 21 95 | distribution: temurin 96 | cache: maven 97 | - name: Set Image Tag 98 | run: | 99 | echo "BUILD_IMAGE_TAG=${GITHUB_REF_NAME}-${GITHUB_SHA}" >> $GITHUB_ENV 100 | echo "CONTAINER_REGISTRY=quay.io" >> $GITHUB_ENV 101 | echo "CONTAINER_GROUP=ogunalp" >> $GITHUB_ENV 102 | echo "ADDITIONAL_TAG=latest-snapshot" >> $GITHUB_ENV 103 | - name: Log in to the Container registry 104 | uses: docker/login-action@v3 105 | with: 106 | registry: ${{ env.CONTAINER_REGISTRY }} 107 | username: ${{ secrets.QUAY_USERNAME }} # ${{ github.actor }} 108 | password: ${{ secrets.QUAY_ROBOT_TOKEN }} # ${{ secrets.GITHUB_TOKEN }} 109 | - name: Create and push multi-arch manifests kafka-native 110 | shell: bash 111 | run: | 112 | docker manifest create ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.BUILD_IMAGE_TAG }} \ 113 | -a ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.BUILD_IMAGE_TAG }}-amd64 \ 114 | -a ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.BUILD_IMAGE_TAG }}-arm64 115 | docker manifest push ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.BUILD_IMAGE_TAG }} 116 | docker manifest create ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.ADDITIONAL_TAG }} \ 117 | -a ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.ADDITIONAL_TAG }}-amd64 \ 118 | -a ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.ADDITIONAL_TAG }}-arm64 119 | docker manifest push ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.ADDITIONAL_TAG }} 120 | 121 | release-maven: 122 | needs: create-multiarch-manifest 123 | runs-on: ubuntu-latest 124 | name: Release Snapshots to Maven Central 125 | steps: 126 | - uses: actions/checkout@v4 127 | - uses: actions/setup-java@v4 128 | with: 129 | java-version: 21 130 | distribution: temurin 131 | cache: maven 132 | server-id: ossrh 133 | server-username: OSSRH_USERNAME # env variable for username in deploy 134 | server-password: OSSRH_TOKEN # env variable for token in deploy 135 | gpg-private-key: ${{ secrets.GPG_PRIVATE_KEY }} # Value of the GPG private key to import 136 | gpg-passphrase: MAVEN_GPG_PASSPHRASE # env variable for GPG private key passphrase 137 | overwrite-settings: false 138 | - name: Release Snapshots 139 | run: | 140 | mvn -s .github/ci-maven-settings.xml -B \ 141 | clean deploy -DskipTests -Prelease 142 | env: 143 | OSSRH_USERNAME: ${{ secrets.OSSRH_USERNAME }} 144 | OSSRH_TOKEN: ${{ secrets.OSSRH_TOKEN }} 145 | MAVEN_GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} 146 | -------------------------------------------------------------------------------- /.github/workflows/deploy-release.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Deploy Release 3 | 4 | on: 5 | workflow_dispatch: 6 | inputs: 7 | branch: 8 | description: 'The branch from which to deploy the release' 9 | required: true 10 | 11 | jobs: 12 | build: 13 | runs-on: ubuntu-latest 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | arch: 18 | - amd64 19 | - arm64 20 | permissions: 21 | contents: read 22 | packages: write 23 | name: Build Docker Images for ${{ matrix.arch }} 24 | steps: 25 | - uses: actions/checkout@v4 26 | with: 27 | ref: ${{ github.event.inputs.branch }} 28 | token: ${{ secrets.RELEASE_TOKEN }} 29 | - uses: actions/setup-java@v4 30 | with: 31 | java-version: 21 32 | distribution: temurin 33 | cache: maven 34 | - name: Set Release Version & Image Tags 35 | run: | 36 | RELEASE_VERSION=$(mvn help:evaluate -Dexpression=project.version -q -DforceStdout) 37 | KAFKA_VERSION=$(mvn help:evaluate -Dexpression=kafka.version -q -DforceStdout) 38 | echo "BUILD_IMAGE_TAG=${RELEASE_VERSION}-kafka-${KAFKA_VERSION}" >> $GITHUB_ENV 39 | echo "CONTAINER_REGISTRY=quay.io" >> $GITHUB_ENV 40 | echo "CONTAINER_GROUP=ogunalp" >> $GITHUB_ENV 41 | echo "LATEST_IMAGE_TAG=latest-kafka-${KAFKA_VERSION}" >> $GITHUB_ENV 42 | echo "ADDITIONAL_TAG=latest" >> $GITHUB_ENV 43 | - name: Set up QEMU 44 | if: matrix.arch == 'arm64' 45 | uses: docker/setup-qemu-action@v3 46 | with: 47 | platforms: ${{ matrix.arch }} 48 | - name: Set up Docker Buildx 49 | uses: docker/setup-buildx-action@v3 50 | with: 51 | install: true 52 | - name: Build with Maven 53 | run: | 54 | mvn -s .github/ci-maven-settings.xml -B clean package -Dnative \ 55 | -Dquarkus.native.additional-build-args=${{ matrix.arch == 'arm64' && '-H:PageSize=65536' || '-march=compatibility' }} \ 56 | -Dquarkus.container-image.registry=${{ env.CONTAINER_REGISTRY }} \ 57 | -Dquarkus.container-image.group=${{ env.CONTAINER_GROUP }} \ 58 | -Dquarkus.native.container-build=true \ 59 | -Dquarkus.native.container-runtime-options=--platform=linux/${{ matrix.arch }} \ 60 | -Dquarkus.docker.buildx.platform=linux/${{ matrix.arch }} \ 61 | -Dquarkus.container-image.build=true \ 62 | -Dquarkus.container-image.push=false \ 63 | -Dquarkus.container-image.tag=${{ env.BUILD_IMAGE_TAG }}-${{ matrix.arch }} \ 64 | -Dquarkus.container-image.additional-tags=${{ env.ADDITIONAL_TAG }}-${{ matrix.arch }},${{ env.LATEST_IMAGE_TAG }}-${{ matrix.arch }} 65 | - name: Integration Tests with Maven 66 | run: | 67 | mvn -s .github/ci-maven-settings.xml -B clean verify -Dtest-container \ 68 | -Dkafka-native-container-image=${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native \ 69 | -Dkafka-native-container-version=${{ env.BUILD_IMAGE_TAG }}-${{ matrix.arch }} 70 | - name: Log in to the Container registry 71 | uses: docker/login-action@v3 72 | with: 73 | registry: ${{ env.CONTAINER_REGISTRY }} 74 | username: ${{ secrets.QUAY_USERNAME }} # ${{ github.actor }} 75 | password: ${{ secrets.QUAY_ROBOT_TOKEN }} # ${{ secrets.GITHUB_TOKEN }} 76 | - name: Push kafka-native images 77 | working-directory: ${{ matrix.project.name }} 78 | run: "docker push -a ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native" 79 | 80 | create-multiarch-manifest: 81 | needs: build 82 | runs-on: ubuntu-latest 83 | permissions: 84 | contents: read 85 | packages: write 86 | name: Create Multiarch Docker Images for kafka-native 87 | steps: 88 | - uses: actions/checkout@v4 89 | with: 90 | ref: ${{ github.event.inputs.branch }} 91 | token: ${{ secrets.RELEASE_TOKEN }} 92 | - uses: actions/setup-java@v4 93 | with: 94 | java-version: 21 95 | distribution: temurin 96 | cache: maven 97 | - name: Set Release Version & Image Tags 98 | run: | 99 | RELEASE_VERSION=$(mvn help:evaluate -Dexpression=project.version -q -DforceStdout) 100 | KAFKA_VERSION=$(mvn help:evaluate -Dexpression=kafka.version -q -DforceStdout) 101 | echo "BUILD_IMAGE_TAG=${RELEASE_VERSION}-kafka-${KAFKA_VERSION}" >> $GITHUB_ENV 102 | echo "CONTAINER_REGISTRY=quay.io" >> $GITHUB_ENV 103 | echo "CONTAINER_GROUP=ogunalp" >> $GITHUB_ENV 104 | echo "LATEST_IMAGE_TAG=latest-kafka-${KAFKA_VERSION}" >> $GITHUB_ENV 105 | echo "ADDITIONAL_TAG=latest" >> $GITHUB_ENV 106 | - name: Log in to the Container registry 107 | uses: docker/login-action@v3 108 | with: 109 | registry: ${{ env.CONTAINER_REGISTRY }} 110 | username: ${{ secrets.QUAY_USERNAME }} # ${{ github.actor }} 111 | password: ${{ secrets.QUAY_ROBOT_TOKEN }} # ${{ secrets.GITHUB_TOKEN }} 112 | - name: Create and push multi-arch manifests kafka-native 113 | shell: bash 114 | run: | 115 | docker manifest create ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.BUILD_IMAGE_TAG }} \ 116 | -a ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.BUILD_IMAGE_TAG }}-amd64 \ 117 | -a ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.BUILD_IMAGE_TAG }}-arm64 118 | docker manifest push ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.BUILD_IMAGE_TAG }} 119 | docker manifest create ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.ADDITIONAL_TAG }} \ 120 | -a ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.ADDITIONAL_TAG }}-amd64 \ 121 | -a ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.ADDITIONAL_TAG }}-arm64 122 | docker manifest push ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.ADDITIONAL_TAG }} 123 | docker manifest create ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.LATEST_IMAGE_TAG }} \ 124 | -a ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.LATEST_IMAGE_TAG }}-amd64 \ 125 | -a ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.LATEST_IMAGE_TAG }}-arm64 126 | docker manifest push ${{ env.CONTAINER_REGISTRY }}/${{ env.CONTAINER_GROUP }}/kafka-native:${{ env.LATEST_IMAGE_TAG }} 127 | 128 | release-maven: 129 | needs: create-multiarch-manifest 130 | runs-on: ubuntu-latest 131 | name: Release to Maven Central 132 | steps: 133 | - uses: actions/checkout@v4 134 | with: 135 | ref: ${{ github.event.inputs.branch }} 136 | token: ${{ secrets.RELEASE_TOKEN }} 137 | - uses: actions/setup-java@v4 138 | with: 139 | java-version: 21 140 | distribution: temurin 141 | cache: maven 142 | server-id: ossrh 143 | server-username: OSSRH_USERNAME # env variable for username in deploy 144 | server-password: OSSRH_TOKEN # env variable for token in deploy 145 | gpg-private-key: ${{ secrets.GPG_PRIVATE_KEY }} # Value of the GPG private key to import 146 | gpg-passphrase: MAVEN_GPG_PASSPHRASE # env variable for GPG private key passphrase 147 | overwrite-settings: false 148 | - name: Release 149 | run: | 150 | mvn -s .github/ci-maven-settings.xml -B \ 151 | clean deploy -DskipTests -Prelease 152 | env: 153 | OSSRH_USERNAME: ${{ secrets.OSSRH_USERNAME }} 154 | OSSRH_TOKEN: ${{ secrets.OSSRH_TOKEN }} 155 | MAVEN_GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} 156 | -------------------------------------------------------------------------------- /.github/workflows/pr.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Continuous Integration Pull Request 3 | 4 | on: 5 | pull_request: 6 | branches: [ "main" ] 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | - uses: actions/setup-java@v4 14 | with: 15 | java-version: 21 16 | distribution: temurin 17 | cache: maven 18 | - name: Build with Maven 19 | run: | 20 | mvn -s .github/ci-maven-settings.xml -B \ 21 | clean package -Dnative \ 22 | -Dquarkus.native.container-build=true \ 23 | -Dquarkus.container-image.build=true \ 24 | -Dquarkus.container-image.additional-tags=latest 25 | - name: Integration Tests with Maven 26 | run: | 27 | mvn -s .github/ci-maven-settings.xml -B \ 28 | clean verify -Dtest-container 29 | - name: Upload artifacts 30 | if: failure() 31 | uses: actions/upload-artifact@v4 32 | with: 33 | name: integration-test-logs 34 | retention-days: 3 35 | path: kafka-native-test-container/target/container-logs/ 36 | -------------------------------------------------------------------------------- /.github/workflows/prepare-release.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Prepare Release 3 | 4 | on: 5 | workflow_dispatch: 6 | inputs: 7 | release-version: 8 | description: 'The release version, if not set it computes the version automatically' 9 | required: false 10 | skip-tests: 11 | description: 'Whether to skip the tests before pushing the tag' 12 | required: false 13 | default: true 14 | branch: 15 | description: 'The branch from which the release is cut' 16 | required: false 17 | default: 'main' 18 | dry-run: 19 | description: 'Skip Git push' 20 | required: false 21 | default: true 22 | 23 | jobs: 24 | prepare-release: 25 | runs-on: ubuntu-latest 26 | steps: 27 | - uses: actions/checkout@v4 28 | with: 29 | ref: ${{ github.event.inputs.branch }} 30 | token: ${{ secrets.RELEASE_TOKEN }} 31 | - uses: actions/setup-java@v4 32 | with: 33 | java-version: 21 34 | distribution: temurin 35 | cache: maven 36 | gpg-private-key: ${{ secrets.GPG_PRIVATE_KEY }} # Value of the GPG private key to import 37 | gpg-passphrase: MAVEN_GPG_PASSPHRASE # env variable for GPG private key passphrase 38 | overwrite-settings: false 39 | - name: Prepare release branch 40 | env: 41 | GITHUB_CONTEXT: ${{ toJson(github) }} 42 | RELEASE_VERSION: ${{ github.event.inputs.release-version }} 43 | SKIP_TESTS: ${{ github.event.inputs.skip-tests }} 44 | DRY_RUN: ${{ github.event.inputs.dry-run }} 45 | MAVEN_GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} 46 | run: .build/cut-release.sh 47 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | #Maven 2 | target/ 3 | pom.xml.tag 4 | pom.xml.releaseBackup 5 | pom.xml.versionsBackup 6 | release.properties 7 | 8 | # Eclipse 9 | .project 10 | .classpath 11 | .settings/ 12 | bin/ 13 | 14 | # IntelliJ 15 | .idea 16 | *.ipr 17 | *.iml 18 | *.iws 19 | 20 | # NetBeans 21 | nb-configuration.xml 22 | 23 | # Visual Studio Code 24 | .vscode 25 | .factorypath 26 | 27 | # OSX 28 | .DS_Store 29 | 30 | # Vim 31 | *.swp 32 | *.swo 33 | 34 | # patch 35 | *.orig 36 | *.rej 37 | 38 | # Local environment 39 | .env 40 | -------------------------------------------------------------------------------- /.mvn/wrapper/MavenWrapperDownloader.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2007-present the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | import java.net.*; 17 | import java.io.*; 18 | import java.nio.channels.*; 19 | import java.util.Properties; 20 | 21 | public class MavenWrapperDownloader { 22 | 23 | private static final String WRAPPER_VERSION = "0.5.6"; 24 | /** 25 | * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. 26 | */ 27 | private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/" 28 | + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar"; 29 | 30 | /** 31 | * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to 32 | * use instead of the default one. 33 | */ 34 | private static final String MAVEN_WRAPPER_PROPERTIES_PATH = 35 | ".mvn/wrapper/maven-wrapper.properties"; 36 | 37 | /** 38 | * Path where the maven-wrapper.jar will be saved to. 39 | */ 40 | private static final String MAVEN_WRAPPER_JAR_PATH = 41 | ".mvn/wrapper/maven-wrapper.jar"; 42 | 43 | /** 44 | * Name of the property which should be used to override the default download url for the wrapper. 45 | */ 46 | private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; 47 | 48 | public static void main(String args[]) { 49 | System.out.println("- Downloader started"); 50 | File baseDirectory = new File(args[0]); 51 | System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); 52 | 53 | // If the maven-wrapper.properties exists, read it and check if it contains a custom 54 | // wrapperUrl parameter. 55 | File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); 56 | String url = DEFAULT_DOWNLOAD_URL; 57 | if(mavenWrapperPropertyFile.exists()) { 58 | FileInputStream mavenWrapperPropertyFileInputStream = null; 59 | try { 60 | mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); 61 | Properties mavenWrapperProperties = new Properties(); 62 | mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); 63 | url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); 64 | } catch (IOException e) { 65 | System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); 66 | } finally { 67 | try { 68 | if(mavenWrapperPropertyFileInputStream != null) { 69 | mavenWrapperPropertyFileInputStream.close(); 70 | } 71 | } catch (IOException e) { 72 | // Ignore ... 73 | } 74 | } 75 | } 76 | System.out.println("- Downloading from: " + url); 77 | 78 | File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); 79 | if(!outputFile.getParentFile().exists()) { 80 | if(!outputFile.getParentFile().mkdirs()) { 81 | System.out.println( 82 | "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'"); 83 | } 84 | } 85 | System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); 86 | try { 87 | downloadFileFromURL(url, outputFile); 88 | System.out.println("Done"); 89 | System.exit(0); 90 | } catch (Throwable e) { 91 | System.out.println("- Error downloading"); 92 | e.printStackTrace(); 93 | System.exit(1); 94 | } 95 | } 96 | 97 | private static void downloadFileFromURL(String urlString, File destination) throws Exception { 98 | if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) { 99 | String username = System.getenv("MVNW_USERNAME"); 100 | char[] password = System.getenv("MVNW_PASSWORD").toCharArray(); 101 | Authenticator.setDefault(new Authenticator() { 102 | @Override 103 | protected PasswordAuthentication getPasswordAuthentication() { 104 | return new PasswordAuthentication(username, password); 105 | } 106 | }); 107 | } 108 | URL website = new URL(urlString); 109 | ReadableByteChannel rbc; 110 | rbc = Channels.newChannel(website.openStream()); 111 | FileOutputStream fos = new FileOutputStream(destination); 112 | fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); 113 | fos.close(); 114 | rbc.close(); 115 | } 116 | 117 | } 118 | -------------------------------------------------------------------------------- /.mvn/wrapper/maven-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ozangunalp/kafka-native/5ea1ad35dfd0985d9844e8e8e7cd1fbcd41c4319/.mvn/wrapper/maven-wrapper.jar -------------------------------------------------------------------------------- /.mvn/wrapper/maven-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.8.1/apache-maven-3.8.1-bin.zip 2 | wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kafka Native 2 | 3 | Kafka broker compiled to native using Quarkus and GraalVM. 4 | 5 | ## Project Structure 6 | 7 | - `quarkus-kafka-server-extension`: Quarkus extension including for compiling Kafka Server to native using GraalVM. 8 | - `kafka-server`: Quarkus application starting a Kafka server using the kafka-server-extension. Compiles to JVM and native executable. 9 | - `kafka-native-test-container`: Test containers starting a single-node Kafka broker using the native-compiled kafka-server. Includes integration tests. 10 | 11 | ## Building the project 12 | 13 | ```shell script 14 | mvn install 15 | ``` 16 | 17 | ## Running kafka in dev mode 18 | 19 | You can run kafka in dev mode that enables live coding using: 20 | ```shell script 21 | cd kafka-server 22 | mvn compile quarkus:dev 23 | ``` 24 | 25 | Starts a single-node Kafka broker listening on `PLAINTEXT://9092`. 26 | Uses `./target/log-dir` as log directory. 27 | 28 | ## Packaging and running the application 29 | 30 | The application can be packaged using the following on either the `kafka-server` directory: 31 | ```shell script 32 | mvn package 33 | ``` 34 | It produces the `quarkus-run.jar` file in the `target/quarkus-app/` directory. 35 | Be aware that it’s not an _über-jar_ as the dependencies are copied into the `target/quarkus-app/lib/` directory. 36 | 37 | The application is now runnable using `java -jar target/quarkus-app/quarkus-run.jar`. 38 | 39 | ## Creating native executables 40 | 41 | You can create a native executable using the following either the `kafka-server` directory: 42 | ```shell script 43 | mvn package -Pnative 44 | ``` 45 | 46 | Or, if you don't have GraalVM installed, you can run the native executable build in a container using: 47 | ```shell script 48 | mvn package -Pnative -Dquarkus.native.container-build=true 49 | ``` 50 | 51 | You can then execute your native executable with: `./target/kafka-server-1.0.0-SNAPSHOT-runner` 52 | 53 | ## Creating a container from native executable 54 | 55 | You can create a container from the native executable using: 56 | ```shell script 57 | mvn package -Dnative -Dquarkus.native.container-build=true -Dquarkus.container-image.build=true 58 | ``` 59 | 60 | The container images will be built with tag `quay.io/ogunalp/kafka-native:1.0.0-SNAPSHOT`. 61 | 62 | If you want to reuse the existing native executable: 63 | 64 | ```shell script 65 | mvn package -Dnative -Dquarkus.native.reuse-existing=true -Dquarkus.container-image.build=true 66 | ``` 67 | 68 | In case your container runtime is not running locally, use the parameter `-Dquarkus.native.remote-container-build=true` instead of `-Dquarkus.native.container-build=true`. 69 | 70 | Then you can run the docker image using: 71 | 72 | ```shell script 73 | docker run -p 19092:9092 -it --rm -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://localhost:19092 quay.io/ogunalp/kafka-native:1.0.0-SNAPSHOT 74 | ``` 75 | 76 | ## Configuring the Kafka broker 77 | 78 | By default, the `kafka-server` application configures an embedded Kafka Kraft server for a single node cluster. 79 | 80 | Following configuration options are available: 81 | 82 | | Key | Description | Default | 83 | |-------------------------------|-------------------------------------------------------------------------------------------|--------------------| 84 | | `server.kafka-port` | External listener port | 9092 | 85 | | `server.internal-port` | Internal listener port | 9093 | 86 | | `server.controller-port` | Controller listener port | 9094 | 87 | | `server.delete-dirs-on-close` | Whether to delete `log-dir` on application close | false | 88 | | `server.host` | Hostname of listeners | `` (empty string) | 89 | | `server.cluster-id` | Provide `cluster-id`, generated if empty | | 90 | | `server.properties-file` | Path to `server.properties` file | | 91 | | `server.auto-configure ` | Automatically configure server properties, if false only `server.properties` is respected | true | 92 | | `kafka.log.dir` | Path to `log-dir` directory, will create the directory if | `./target/log-dir` | 93 | | `kafka.advertised.listeners` | Override `advertised.listeners` | | 94 | | `kafka.*` | Override broker properties | | 95 | 96 | 97 | You can set configuration options using Java system properties, e.g. 98 | 99 | ```shell script 100 | java -Dserver.delete-dirs-on-close=true \ 101 | -Dserver.properties-file=server.properties \ 102 | -Dkafka.advertised.listeners=SSL://localhost:9092 -jar ./target/quarkus-app/quarkus-run.jar 103 | ``` 104 | 105 | Or environment variables, e.g. 106 | 107 | ```shell script 108 | docker run -it --rm -p 19092:9092 \ 109 | -v $(pwd):/conf \ 110 | -e SERVER_PROPERTIES_FILE=/conf/server.properties \ 111 | -e KAFKA_ADVERTISED_LISTENERS=SASL_PLAINTEXT://localhost:19092 \ 112 | quay.io/ogunalp/kafka-native:1.0.0-SNAPSHOT 113 | ``` 114 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.5' 2 | 3 | name: kafka-native 4 | services: 5 | broker1: 6 | image: quay.io/ogunalp/kafka-native:latest 7 | ports: 8 | - "9095:9092" 9 | environment: 10 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9095 11 | SERVER_HOST: broker1 12 | KAFKA_BROKER_ID: 1 13 | SERVER_CLUSTER_ID: BmMQZPq6Qoua1-yvgGJFbA 14 | KAFKA_CONTROLLER_QUORUM_VOTERS: 1@broker1:9094,2@broker2:9094 15 | networks: 16 | - kafka-network 17 | broker2: 18 | image: quay.io/ogunalp/kafka-native:latest 19 | ports: 20 | - "9096:9092" 21 | environment: 22 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9096 23 | SERVER_HOST: broker2 24 | KAFKA_BROKER_ID: 2 25 | SERVER_CLUSTER_ID: BmMQZPq6Qoua1-yvgGJFbA 26 | KAFKA_CONTROLLER_QUORUM_VOTERS: 1@broker1:9094,2@broker2:9094 27 | networks: 28 | - kafka-network 29 | broker3: 30 | image: quay.io/ogunalp/kafka-native:latest 31 | ports: 32 | - "9097:9092" 33 | environment: 34 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9097 35 | SERVER_HOST: broker3 36 | KAFKA_BROKER_ID: 3 37 | SERVER_CLUSTER_ID: BmMQZPq6Qoua1-yvgGJFbA 38 | KAFKA_CONTROLLER_QUORUM_VOTERS: 1@broker1:9094,2@broker2:9094 39 | KAFKA_PROCESS_ROLES: broker 40 | networks: 41 | - kafka-network 42 | 43 | networks: 44 | kafka-network: 45 | name: kafka-native -------------------------------------------------------------------------------- /kafka-native-test-container/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | kafka-native-parent 7 | com.ozangunalp 8 | 999-SNAPSHOT 9 | 10 | 4.0.0 11 | 12 | kafka-native-test-container 13 | Kafka Native Test Container 14 | 15 | 16 | 11 17 | 11 18 | UTF-8 19 | 20 | 21 | 22 | org.testcontainers 23 | testcontainers 24 | 25 | 26 | io.quarkus 27 | quarkus-junit5 28 | test 29 | 30 | 31 | com.ozangunalp 32 | kafka-server 33 | test 34 | 35 | 36 | io.smallrye.reactive 37 | smallrye-reactive-messaging-kafka-test-companion 38 | test 39 | 40 | 41 | io.strimzi 42 | kafka-oauth-client 43 | test 44 | 45 | 46 | 47 | 48 | 49 | org.apache.maven.plugins 50 | maven-surefire-plugin 51 | 52 | true 53 | 54 | target/hosts 55 | target/krb5.conf 56 | ${project.build.directory}/container-logs/ 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | test-container 65 | 66 | 67 | test-container 68 | 69 | 70 | 71 | 72 | 73 | org.apache.maven.plugins 74 | maven-failsafe-plugin 75 | ${failsafe-plugin.version} 76 | 77 | 78 | 79 | integration-test 80 | verify 81 | 82 | 83 | 84 | target/hosts 85 | target/krb5.conf 86 | ${project.build.directory}/container-logs/ 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | -------------------------------------------------------------------------------- /kafka-native-test-container/src/main/java/com/ozangunalp/kafka/test/container/KafkaNativeContainer.java: -------------------------------------------------------------------------------- 1 | package com.ozangunalp.kafka.test.container; 2 | 3 | import java.nio.charset.StandardCharsets; 4 | import java.util.function.Consumer; 5 | import java.util.function.Function; 6 | 7 | import org.testcontainers.containers.GenericContainer; 8 | import org.testcontainers.containers.output.OutputFrame; 9 | import org.testcontainers.containers.wait.strategy.Wait; 10 | import org.testcontainers.images.builder.Transferable; 11 | import org.testcontainers.utility.DockerImageName; 12 | 13 | import com.github.dockerjava.api.command.InspectContainerResponse; 14 | 15 | public class KafkaNativeContainer extends GenericContainer { 16 | 17 | public static final String DEFAULT_REPOSITORY = System.getProperty("kafka-native-container-image", "quay.io/ogunalp/kafka-native"); 18 | public static final String DEFAULT_VERSION = System.getProperty("kafka-native-container-version", "latest"); 19 | private static final String STARTER_SCRIPT = "/work/run.sh"; 20 | private static final String SERVER_PROPERTIES = "/work/server.properties"; 21 | private static final int KAFKA_PORT = 9092; 22 | 23 | // dynamic config 24 | private boolean hasServerProperties = false; 25 | private Function advertisedListenersProvider = KafkaNativeContainer::defaultAdvertisedAddresses; 26 | private String additionalArgs = null; 27 | private int exposedPort = -1; 28 | private Function, Consumer> outputFrameConsumer; 29 | private boolean autoConfigure = true; 30 | 31 | public static DockerImageName imageName(String version) { 32 | return DockerImageName.parse(DEFAULT_REPOSITORY + ":" + version); 33 | } 34 | 35 | public static DockerImageName imageName() { 36 | return DockerImageName.parse(DEFAULT_REPOSITORY + ":" + DEFAULT_VERSION); 37 | } 38 | 39 | public KafkaNativeContainer() { 40 | this(imageName()); 41 | } 42 | 43 | public KafkaNativeContainer(DockerImageName dockerImageName) { 44 | super(dockerImageName); 45 | super.addExposedPort(9092); 46 | String cmd = String.format("while [ ! -f %s ]; do sleep 0.1; done; sleep 0.1; %s", STARTER_SCRIPT, STARTER_SCRIPT); 47 | super.withCommand("sh", "-c", cmd); 48 | super.waitingFor(Wait.forLogMessage(".*Kafka broker started.*", 1)); 49 | } 50 | 51 | public KafkaNativeContainer withPort(int fixedPort) { 52 | assertNotRunning(); 53 | if (fixedPort <= 0) { 54 | throw new IllegalArgumentException("The fixed Kafka port must be greater than 0"); 55 | } 56 | addFixedExposedPort(fixedPort, KAFKA_PORT); 57 | return self(); 58 | } 59 | 60 | public KafkaNativeContainer withServerProperties(Transferable transferable) { 61 | assertNotRunning(); 62 | super.withCopyToContainer(transferable, SERVER_PROPERTIES); 63 | this.hasServerProperties = true; 64 | return self(); 65 | } 66 | 67 | public KafkaNativeContainer withAutoConfigure(boolean autoConfigure) { 68 | assertNotRunning(); 69 | this.autoConfigure = autoConfigure; 70 | return self(); 71 | } 72 | 73 | public KafkaNativeContainer withAdvertisedListeners(final Function provider) { 74 | assertNotRunning(); 75 | this.advertisedListenersProvider = provider; 76 | return self(); 77 | } 78 | 79 | public KafkaNativeContainer withArgs(String args) { 80 | assertNotRunning(); 81 | this.additionalArgs = args; 82 | return self(); 83 | } 84 | 85 | public KafkaNativeContainer withFollowOutput(Function, Consumer> outputFrameConsumer) { 86 | this.outputFrameConsumer = outputFrameConsumer; 87 | return self(); 88 | } 89 | 90 | @Override 91 | protected void containerIsStarting(InspectContainerResponse containerInfo, boolean reused) { 92 | super.containerIsStarting(containerInfo, reused); 93 | // Set exposed port 94 | this.exposedPort = getMappedPort(KAFKA_PORT); 95 | // follow output 96 | if (outputFrameConsumer != null) { 97 | followOutput(outputFrameConsumer.apply(this)); 98 | } 99 | // Start and configure the advertised address 100 | String cmd = "#!/bin/bash\n/work/kafka"; 101 | cmd += " -Dkafka.advertised.listeners=" + getBootstrapServers(); 102 | if (hasServerProperties) { 103 | cmd += " -Dserver.properties-file=" + SERVER_PROPERTIES; 104 | } 105 | cmd += " -Dserver.auto-configure=" + autoConfigure; 106 | if (additionalArgs != null) { 107 | cmd += " " + additionalArgs; 108 | } 109 | 110 | //noinspection OctalInteger 111 | copyFileToContainer( 112 | Transferable.of(cmd.getBytes(StandardCharsets.UTF_8), 0777), 113 | STARTER_SCRIPT); 114 | } 115 | 116 | public static String defaultAdvertisedAddresses(KafkaNativeContainer container) { 117 | return String.format("PLAINTEXT://%s:%d", container.getHost(), container.getExposedKafkaPort()); 118 | } 119 | 120 | public int getExposedKafkaPort() { 121 | return exposedPort; 122 | } 123 | 124 | public String getBootstrapServers() { 125 | return advertisedListenersProvider.apply(this); 126 | } 127 | 128 | private void assertNotRunning() { 129 | if (isRunning()) { 130 | throw new IllegalStateException("Configuration of the running broker is not permitted."); 131 | } 132 | } 133 | 134 | } 135 | -------------------------------------------------------------------------------- /kafka-native-test-container/src/main/java/com/ozangunalp/kafka/test/container/ToFileConsumer.java: -------------------------------------------------------------------------------- 1 | package com.ozangunalp.kafka.test.container; 2 | 3 | import java.io.FileWriter; 4 | import java.io.IOException; 5 | import java.nio.file.Files; 6 | import java.nio.file.Path; 7 | import java.util.Optional; 8 | 9 | import org.testcontainers.containers.GenericContainer; 10 | import org.testcontainers.containers.output.BaseConsumer; 11 | import org.testcontainers.containers.output.OutputFrame; 12 | import org.testcontainers.utility.DockerImageName; 13 | 14 | public class ToFileConsumer extends BaseConsumer { 15 | 16 | private static final String CONTAINER_LOGS_DIR = "container.logs.dir"; 17 | 18 | private static Path getContainerLogsDir() { 19 | return Optional.ofNullable(System.getProperty(CONTAINER_LOGS_DIR)) 20 | .map(Path::of) 21 | .orElseGet(() -> { 22 | try { 23 | return Files.createTempDirectory(CONTAINER_LOGS_DIR); 24 | } catch (IOException e) { 25 | throw new RuntimeException(e); 26 | } 27 | }); 28 | } 29 | 30 | private final FileWriter writer; 31 | 32 | public ToFileConsumer(String testName, GenericContainer container) { 33 | this(testName, container, container.getContainerId().substring(0, 12)); 34 | } 35 | 36 | public ToFileConsumer(String testName, GenericContainer container, String identifier) { 37 | this(testName, DockerImageName.parse(container.getDockerImageName()).getRepository().replace("/", ".") 38 | + "." + identifier); 39 | } 40 | 41 | public ToFileConsumer(String testName, String fileName) { 42 | this(getContainerLogsDir(), testName, fileName); 43 | } 44 | 45 | public ToFileConsumer(Path parent, String testName, String fileName) { 46 | this(parent.resolve(testName).resolve(fileName + ".log")); 47 | } 48 | 49 | public ToFileConsumer(Path logFile) { 50 | try { 51 | Files.createDirectories(logFile.getParent()); 52 | this.writer = new FileWriter(logFile.toFile()); 53 | } catch (IOException e) { 54 | throw new RuntimeException(e); 55 | } 56 | } 57 | 58 | 59 | @Override 60 | public void accept(OutputFrame outputFrame) { 61 | try { 62 | if (outputFrame.getType() == OutputFrame.OutputType.END) { 63 | writer.close(); 64 | } else { 65 | writer.write(outputFrame.getUtf8String()); 66 | } 67 | } catch (IOException e) { 68 | // ignore 69 | } 70 | } 71 | 72 | } 73 | -------------------------------------------------------------------------------- /kafka-native-test-container/src/test/java/com/ozangunalp/kafka/test/container/KafkaNativeContainerIT.java: -------------------------------------------------------------------------------- 1 | package com.ozangunalp.kafka.test.container; 2 | 3 | import com.ozangunalp.kafka.server.Endpoints; 4 | import io.smallrye.reactive.messaging.kafka.companion.KafkaCompanion; 5 | import org.apache.kafka.clients.CommonClientConfigs; 6 | import org.apache.kafka.clients.producer.ProducerRecord; 7 | import org.apache.kafka.common.Uuid; 8 | import org.apache.kafka.common.config.SaslConfigs; 9 | import org.apache.kafka.common.config.SslConfigs; 10 | import org.apache.kafka.server.common.MetadataVersion; 11 | import org.jetbrains.annotations.NotNull; 12 | import org.junit.jupiter.api.BeforeEach; 13 | import org.junit.jupiter.api.Test; 14 | import org.junit.jupiter.api.TestInfo; 15 | import org.testcontainers.containers.ContainerLaunchException; 16 | import org.testcontainers.containers.Network; 17 | import org.testcontainers.images.builder.Transferable; 18 | import org.testcontainers.lifecycle.Startables; 19 | import org.testcontainers.utility.MountableFile; 20 | 21 | import java.io.File; 22 | import java.io.IOException; 23 | import java.io.StringWriter; 24 | import java.lang.reflect.Method; 25 | import java.time.Duration; 26 | import java.time.OffsetDateTime; 27 | import java.time.format.DateTimeFormatter; 28 | import java.util.Map; 29 | import java.util.Properties; 30 | import java.util.UUID; 31 | import java.util.concurrent.ExecutionException; 32 | import java.util.concurrent.TimeUnit; 33 | import java.util.concurrent.TimeoutException; 34 | 35 | import static org.assertj.core.api.Assertions.assertThat; 36 | import static org.assertj.core.api.Assertions.assertThatThrownBy; 37 | import static org.awaitility.Awaitility.await; 38 | 39 | public class KafkaNativeContainerIT { 40 | 41 | public String topic; 42 | private String testOutputName; 43 | 44 | @NotNull 45 | private KafkaNativeContainer createKafkaNativeContainer() { 46 | return new KafkaNativeContainer() 47 | .withFollowOutput(c -> new ToFileConsumer(testOutputName, c)); 48 | } 49 | 50 | @NotNull 51 | private KafkaNativeContainer createKafkaNativeContainer(String containerName) { 52 | return new KafkaNativeContainer() 53 | .withFollowOutput(c -> new ToFileConsumer(testOutputName, c, containerName)); 54 | } 55 | 56 | @BeforeEach 57 | public void initTopic(TestInfo testInfo) { 58 | String cn = testInfo.getTestClass().map(Class::getSimpleName).orElse(UUID.randomUUID().toString()); 59 | String mn = testInfo.getTestMethod().map(Method::getName).orElse(UUID.randomUUID().toString()); 60 | testOutputName = String.format("%s.%s", testInfo.getDisplayName().replaceAll("\\(\\)$", ""), 61 | OffsetDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd-HH.mm.ss"))); 62 | topic = cn + "-" + mn + "-" + UUID.randomUUID().getMostSignificantBits(); 63 | } 64 | 65 | void checkProduceConsume(KafkaNativeContainer container) { 66 | checkProduceConsume(container, Map.of()); 67 | } 68 | 69 | void checkProduceConsume(KafkaNativeContainer container, Map configs) { 70 | try (KafkaCompanion companion = new KafkaCompanion(container.getBootstrapServers())) { 71 | companion.setCommonClientConfig(configs); 72 | companion.produceStrings() 73 | .usingGenerator(i -> new ProducerRecord<>(topic, "k" + i, "v" + i), 100) 74 | .awaitCompletion(); 75 | 76 | assertThat(companion.consumeStrings() 77 | .fromTopics(topic, 100) 78 | .awaitCompletion() 79 | .count()).isEqualTo(100L); 80 | } catch (Throwable throwable) { 81 | throw new AssertionError("Kafka container is not in good health, logs : " + container.getLogs(), throwable); 82 | } 83 | } 84 | 85 | void verifyClusterMembers(KafkaNativeContainer container, Map configs, int expected) { 86 | try (KafkaCompanion companion = new KafkaCompanion(container.getBootstrapServers())) { 87 | companion.setCommonClientConfig(configs); 88 | await().atMost(30, TimeUnit.SECONDS) 89 | .untilAsserted(() -> assertThat(companion.cluster().nodes().size()).isEqualTo(expected)); 90 | } 91 | } 92 | 93 | @Test 94 | void testSimpleContainer() { 95 | try (var container = createKafkaNativeContainer()) { 96 | container.start(); 97 | checkProduceConsume(container); 98 | } 99 | } 100 | 101 | @Test 102 | void testFixedPortContainer() { 103 | int unusedPort = Endpoints.getUnusedPort(0); 104 | try (var container = createKafkaNativeContainer().withPort(unusedPort)) { 105 | container.start(); 106 | assertThat(container.getBootstrapServers()).contains("" + unusedPort); 107 | checkProduceConsume(container); 108 | } 109 | } 110 | 111 | @Test 112 | void testSaslPlainContainer() { 113 | try (var container = createKafkaNativeContainer() 114 | .withServerProperties(MountableFile.forClasspathResource("sasl_plain_plaintext.properties")) 115 | .withAdvertisedListeners(c -> String.format("SASL_PLAINTEXT://%s:%s", c.getHost(), c.getExposedKafkaPort()))) { 116 | container.start(); 117 | checkProduceConsume(container, Map.of( 118 | CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT", 119 | SaslConfigs.SASL_MECHANISM, "PLAIN", 120 | SaslConfigs.SASL_JAAS_CONFIG, "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"client\" password=\"client-secret\";")); 121 | } 122 | } 123 | 124 | @Test 125 | void testMinimumMetadataVersion() { 126 | try (var container = createKafkaNativeContainer() 127 | .withEnv("SERVER_STORAGE_METADATA_VERSION", MetadataVersion.MINIMUM_VERSION.version())) { 128 | container.start(); 129 | checkProduceConsume(container); 130 | } 131 | } 132 | 133 | @Test 134 | void testSaslScramContainer() { 135 | try (var container = createKafkaNativeContainer() 136 | .withEnv("SERVER_SCRAM_CREDENTIALS", "SCRAM-SHA-512=[name=client,password=client-secret]") 137 | .withServerProperties(MountableFile.forClasspathResource("sasl_scram_plaintext.properties")) 138 | .withAdvertisedListeners(c -> String.format("SASL_PLAINTEXT://%s:%s", c.getHost(), c.getExposedKafkaPort()))) { 139 | container.start(); 140 | checkProduceConsume(container, Map.of( 141 | CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT", 142 | SaslConfigs.SASL_MECHANISM, "SCRAM-SHA-512", 143 | SaslConfigs.SASL_JAAS_CONFIG, "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"client\" password=\"client-secret\";")); 144 | } 145 | } 146 | 147 | @Test 148 | void testSaslScramContainerCluster() throws ExecutionException, InterruptedException, TimeoutException { 149 | String clusterId = Uuid.randomUuid().toString(); 150 | String broker1 = "broker1"; 151 | String broker2 = "broker2"; 152 | String quorumVotes = String.format("1@%s:9094,2@%s:9094", broker1, broker2); 153 | try (var network = Network.newNetwork(); 154 | var b1 = createKafkaNativeContainer(broker1) 155 | .withServerProperties(MountableFile.forClasspathResource("sasl_scram_plaintext.properties")) 156 | .withAdvertisedListeners(c -> String.format("SASL_PLAINTEXT://%s:%s", c.getHost(), c.getExposedKafkaPort())); 157 | var b2 = createKafkaNativeContainer(broker2) 158 | .withServerProperties(MountableFile.forClasspathResource("sasl_scram_plaintext.properties")) 159 | .withAdvertisedListeners(c -> String.format("SASL_PLAINTEXT://%s:%s", c.getHost(), c.getExposedKafkaPort()))) { 160 | 161 | var common = Map.of( 162 | "SERVER_CLUSTER_ID", clusterId, 163 | "SERVER_SCRAM_CREDENTIALS", 164 | "SCRAM-SHA-512=[name=client,password=client-secret];SCRAM-SHA-512=[name=broker,password=broker-secret]", 165 | "KAFKA_CONTROLLER_QUORUM_VOTERS", quorumVotes); 166 | 167 | common.forEach(b1::addEnv); 168 | b1.withNetworkAliases(broker1); 169 | b1.withNetwork(network); 170 | b1.addEnv("SERVER_HOST", broker1); 171 | b1.addEnv("KAFKA_BROKER_ID", "1"); 172 | 173 | common.forEach(b2::addEnv); 174 | b2.withNetworkAliases(broker2); 175 | b2.withNetwork(network); 176 | b2.addEnv("SERVER_HOST", broker2); 177 | b2.addEnv("KAFKA_BROKER_ID", "2"); 178 | 179 | Startables.deepStart(b1, b2).get(30, TimeUnit.SECONDS); 180 | 181 | Map clientOptions = Map.of( 182 | CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT", 183 | SaslConfigs.SASL_MECHANISM, "SCRAM-SHA-512", 184 | SaslConfigs.SASL_JAAS_CONFIG, "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"client\" password=\"client-secret\";"); 185 | verifyClusterMembers(b1, clientOptions, 2); 186 | checkProduceConsume(b1, clientOptions); 187 | checkProduceConsume(b2, clientOptions); 188 | } 189 | } 190 | 191 | @Test 192 | void testSslContainer() { 193 | try (var container = createKafkaNativeContainer() 194 | .withServerProperties(MountableFile.forClasspathResource("ssl.properties")) 195 | .withAdvertisedListeners(c -> String.format("SSL://%s:%s", c.getHost(), c.getExposedKafkaPort())) 196 | .withCopyFileToContainer(MountableFile.forClasspathResource("kafka-keystore.p12"), "/dir/kafka-keystore.p12") 197 | .withCopyFileToContainer(MountableFile.forClasspathResource("kafka-truststore.p12"), "/dir/kafka-truststore.p12")) { 198 | container.start(); 199 | File tsFile = new File("src/test/resources/kafka-truststore.p12"); 200 | checkProduceConsume(container, Map.of( 201 | CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL", 202 | SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, tsFile.getAbsolutePath(), 203 | SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L", 204 | SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "PKCS12", 205 | SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "")); 206 | } 207 | } 208 | 209 | @Test 210 | void testOAuthContainer() { 211 | try (KeycloakContainer keycloak = new KeycloakContainer()) { 212 | keycloak.start(); 213 | keycloak.followOutput(new ToFileConsumer(testOutputName, keycloak)); 214 | keycloak.createHostsFile(); 215 | try (var container = createKafkaNativeContainer() 216 | .withNetworkAliases("kafka") 217 | .withNetwork(Network.SHARED) 218 | .withServerProperties(MountableFile.forClasspathResource("oauth.properties")) 219 | .withArgs("-Dquarkus.log.level=debug") 220 | .withAdvertisedListeners(c -> String.format("JWT://%s:%s", c.getHost(), c.getExposedKafkaPort()))) { 221 | container.start(); 222 | checkProduceConsume(container, Map.of( 223 | CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT", 224 | SaslConfigs.SASL_MECHANISM, "OAUTHBEARER", 225 | SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS, "io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler", 226 | SaslConfigs.SASL_JAAS_CONFIG, "org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required" + 227 | " oauth.client.id=\"kafka-client\"" + 228 | " oauth.client.secret=\"kafka-client-secret\"" + 229 | " oauth.token.endpoint.uri=\"http://keycloak:8080/realms/kafka-authz/protocol/openid-connect/token\";")); 230 | } 231 | } 232 | } 233 | 234 | @Test 235 | void testKerberosContainer() { 236 | try (KerberosContainer kerberos = new KerberosContainer("gcavalcante8808/krb5-server")) { 237 | kerberos.start(); 238 | kerberos.followOutput(new ToFileConsumer(testOutputName, kerberos)); 239 | kerberos.createTestPrincipals(); 240 | kerberos.createKrb5File(); 241 | try (var container = createKafkaNativeContainer() 242 | .withNetworkAliases("kafka") 243 | .withNetwork(Network.SHARED) 244 | .withServerProperties(MountableFile.forClasspathResource("kerberos/kafkaServer.properties")) 245 | .withAdvertisedListeners( 246 | c -> String.format("SASL_PLAINTEXT://%s:%s", c.getHost(), c.getExposedKafkaPort())) 247 | .withFileSystemBind("target/test-classes/kerberos/krb5KafkaBroker.conf", "/etc/krb5.conf") 248 | .withFileSystemBind("target/test-classes/kerberos/kafkabroker.keytab", "/opt/kafka/config/kafkabroker.keytab") 249 | ) { 250 | container.start(); 251 | checkProduceConsume(container, Map.of( 252 | CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT", 253 | SaslConfigs.SASL_MECHANISM, "GSSAPI", 254 | SaslConfigs.SASL_JAAS_CONFIG, "com.sun.security.auth.module.Krb5LoginModule required " + 255 | "useKeyTab=true " + 256 | "storeKey=true " + 257 | "debug=true " + 258 | "serviceName=\"kafka\" " + 259 | "keyTab=\"target/test-classes/kerberos/client.keytab\" " + 260 | "principal=\"client/localhost@EXAMPLE.COM\";", 261 | SaslConfigs.SASL_KERBEROS_SERVICE_NAME, "kafka", 262 | SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "https")); 263 | } 264 | } 265 | } 266 | 267 | @Test 268 | void testKraftClusterBothControllers() throws Exception { 269 | String clusterId = Uuid.randomUuid().toString(); 270 | String broker1 = "broker1"; 271 | String broker2 = "broker2"; 272 | String quorumVotes = String.format("1@%s:9094,2@%s:9094", broker1, broker2); 273 | try (var network = Network.newNetwork(); 274 | var b1 = createKafkaNativeContainer(broker1); 275 | var b2 = createKafkaNativeContainer(broker2)) { 276 | 277 | var common = Map.of( 278 | "SERVER_CLUSTER_ID", clusterId, 279 | "KAFKA_CONTROLLER_QUORUM_VOTERS", quorumVotes); 280 | 281 | common.forEach(b1::addEnv); 282 | b1.withNetworkAliases(broker1); 283 | b1.withNetwork(network); 284 | b1.addEnv("SERVER_HOST", broker1); 285 | b1.addEnv("KAFKA_BROKER_ID", "1"); 286 | 287 | common.forEach(b2::addEnv); 288 | b2.withNetworkAliases(broker2); 289 | b2.withNetwork(network); 290 | b2.addEnv("SERVER_HOST", broker2); 291 | b2.addEnv("KAFKA_BROKER_ID", "2"); 292 | 293 | Startables.deepStart(b1, b2).get(1, TimeUnit.MINUTES); 294 | 295 | verifyClusterMembers(b1, Map.of(), 2); 296 | checkProduceConsume(b1); 297 | } 298 | } 299 | 300 | @Test 301 | void testKraftClusterWithOneControllerOnlyNode() throws Exception { 302 | String clusterId = Uuid.randomUuid().toString(); 303 | String brokerController = "broker-controller"; 304 | String controllerOnly = "controller"; 305 | String quorumVotes = String.format("1@%s:9094,2@%s:9094", brokerController, controllerOnly); 306 | 307 | try (var network = Network.newNetwork(); 308 | var b1 = createKafkaNativeContainer(brokerController); 309 | var b2 = createKafkaNativeContainer(controllerOnly)) { 310 | 311 | b1.addEnv("SERVER_CLUSTER_ID", clusterId); 312 | b1.addEnv("KAFKA_CONTROLLER_QUORUM_VOTERS", quorumVotes); 313 | b1.withNetworkAliases(brokerController); 314 | b1.withNetwork(network); 315 | b1.addEnv("SERVER_HOST", brokerController); 316 | b1.addEnv("KAFKA_BROKER_ID", "1"); 317 | 318 | b2.addEnv("SERVER_CLUSTER_ID", clusterId); 319 | b2.withNetworkAliases(controllerOnly); 320 | b2.withNetwork(network); 321 | b2.withAutoConfigure(false); 322 | Transferable controllerProps = controllerOnlyProperties(quorumVotes, "2"); 323 | b2.withServerProperties(controllerProps); 324 | 325 | Startables.deepStart(b1, b2).get(30, TimeUnit.SECONDS); 326 | 327 | verifyClusterMembers(b1, Map.of(), 1); 328 | checkProduceConsume(b1); 329 | } 330 | } 331 | 332 | private Transferable controllerOnlyProperties(String quorumVotes, String nodeId) { 333 | Properties properties = new Properties(); 334 | properties.put("process.roles", "controller"); 335 | properties.put("node.id", nodeId); 336 | properties.put("controller.quorum.voters", quorumVotes); 337 | properties.put("listeners", "CONTROLLER://:9094"); 338 | properties.put("controller.listener.names", "CONTROLLER"); 339 | properties.put("num.network.threads", "1"); 340 | properties.put("num.io.threads", "1"); 341 | properties.put("socket.send.buffer.bytes", "102400"); 342 | properties.put("socket.receive.buffer.bytes", "102400"); 343 | properties.put("socket.request.max.bytes", "104857600"); 344 | properties.put("log.dirs", "/tmp/kraft-controller-logs"); 345 | properties.put("num.partitions", "1"); 346 | properties.put("num.num.recovery.threads.per.data.dir", "1"); 347 | properties.put("offsets.topic.replication.factor", "1"); 348 | properties.put("transaction.state.log.replication.factor", "1"); 349 | properties.put("transaction.state.log.min.isr", "1"); 350 | properties.put("log.retention.hours", "168"); 351 | properties.put("log.segment.bytes", "1073741824"); 352 | properties.put("log.retention.check.interval.ms", "300000"); 353 | try (StringWriter writer = new StringWriter()) { 354 | properties.store(writer, null); 355 | return Transferable.of(writer.toString()); 356 | } catch (IOException e) { 357 | throw new RuntimeException(e); 358 | } 359 | } 360 | 361 | @Test 362 | void testKraftClusterOneController() throws Exception { 363 | String clusterId = Uuid.randomUuid().toString(); 364 | String broker1 = "broker1"; 365 | String broker2 = "broker2"; 366 | String quorumVotes = String.format("1@%s:9094", broker1); 367 | try (var network = Network.newNetwork(); 368 | var controllerAndBroker = createKafkaNativeContainer(broker1); 369 | var brokerOnly = createKafkaNativeContainer(broker2)) { 370 | 371 | var common = Map.of( 372 | "SERVER_CLUSTER_ID", clusterId, 373 | "KAFKA_CONTROLLER_QUORUM_VOTERS", quorumVotes); 374 | 375 | common.forEach(controllerAndBroker::addEnv); 376 | controllerAndBroker.withNetworkAliases(broker1); 377 | controllerAndBroker.withNetwork(network); 378 | controllerAndBroker.addEnv("SERVER_HOST", broker1); 379 | controllerAndBroker.addEnv("KAFKA_BROKER_ID", "1"); 380 | 381 | common.forEach(brokerOnly::addEnv); 382 | brokerOnly.withNetworkAliases(broker2); 383 | brokerOnly.withNetwork(network); 384 | brokerOnly.addEnv("SERVER_HOST", broker2); 385 | brokerOnly.addEnv("KAFKA_BROKER_ID", "2"); 386 | brokerOnly.addEnv("KAFKA_PROCESS_ROLES", "broker"); 387 | 388 | Startables.deepStart(controllerAndBroker, brokerOnly).get(30, TimeUnit.SECONDS); 389 | 390 | verifyClusterMembers(controllerAndBroker, Map.of(), 2); 391 | checkProduceConsume(controllerAndBroker); 392 | } 393 | } 394 | 395 | } -------------------------------------------------------------------------------- /kafka-native-test-container/src/test/java/com/ozangunalp/kafka/test/container/KerberosContainer.java: -------------------------------------------------------------------------------- 1 | package com.ozangunalp.kafka.test.container; 2 | 3 | import java.io.BufferedReader; 4 | import java.io.FileInputStream; 5 | import java.io.FileOutputStream; 6 | import java.io.InputStreamReader; 7 | import java.time.Duration; 8 | import java.util.stream.Collectors; 9 | 10 | import org.testcontainers.containers.GenericContainer; 11 | import org.testcontainers.containers.Network; 12 | import org.testcontainers.containers.wait.strategy.Wait; 13 | 14 | public class KerberosContainer extends GenericContainer { 15 | 16 | public KerberosContainer(String dockerImageName) { 17 | super(dockerImageName); 18 | withStartupTimeout(Duration.ofMillis(20000)); 19 | withEnv("KRB5_REALM", "EXAMPLE.COM"); 20 | withEnv("KRB5_KDC", "localhost"); 21 | withEnv("KRB5_PASS", "mypass"); 22 | withExposedPorts(749, 464, 88); 23 | waitingFor(Wait.forListeningPorts(88)); 24 | withNetwork(Network.SHARED); 25 | withNetworkAliases("kerberos"); 26 | } 27 | 28 | public void createTestPrincipals() { 29 | try { 30 | ExecResult lsResult = execInContainer("kadmin.local", "-q", 31 | "addprinc -randkey kafka/localhost@EXAMPLE.COM"); 32 | lsResult = execInContainer("kadmin.local", "-q", 33 | "ktadd -norandkey -k /kafkabroker.keytab kafka/localhost@EXAMPLE.COM"); 34 | lsResult = execInContainer("kadmin.local", "-q", 35 | "addprinc -randkey client/localhost@EXAMPLE.COM"); 36 | lsResult = execInContainer("kadmin.local", "-q", 37 | "ktadd -norandkey -k /client.keytab client/localhost@EXAMPLE.COM"); 38 | copyFileFromContainer("/kafkabroker.keytab", "target/test-classes/kerberos/kafkabroker.keytab"); 39 | copyFileFromContainer("/client.keytab", "target/test-classes/kerberos/client.keytab"); 40 | } catch (Exception e) { 41 | e.printStackTrace(); 42 | } 43 | } 44 | 45 | public void createKrb5File() { 46 | try (FileInputStream fis = new FileInputStream("target/test-classes/kerberos/krb5ClientTemplate.conf"); 47 | BufferedReader reader = new BufferedReader(new InputStreamReader(fis)); 48 | FileOutputStream file = new FileOutputStream("target/krb5.conf")) { 49 | String content = reader.lines().collect(Collectors.joining(System.lineSeparator())); 50 | content = content.replaceAll("", getHost()); 51 | content = content.replaceAll("", getMappedPort(88).toString()); 52 | content = content.replaceAll("", getMappedPort(749).toString()); 53 | file.write(content.getBytes()); 54 | } catch (Exception e) { 55 | e.printStackTrace(); 56 | } 57 | 58 | } 59 | 60 | } 61 | -------------------------------------------------------------------------------- /kafka-native-test-container/src/test/java/com/ozangunalp/kafka/test/container/KeycloakContainer.java: -------------------------------------------------------------------------------- 1 | package com.ozangunalp.kafka.test.container; 2 | 3 | import java.io.FileWriter; 4 | import java.io.IOException; 5 | import java.io.UncheckedIOException; 6 | 7 | import org.testcontainers.containers.FixedHostPortGenericContainer; 8 | import org.testcontainers.containers.GenericContainer; 9 | import org.testcontainers.containers.Network; 10 | import org.testcontainers.containers.wait.strategy.Wait; 11 | import org.testcontainers.utility.MountableFile; 12 | 13 | public class KeycloakContainer extends GenericContainer { 14 | 15 | public KeycloakContainer() { 16 | super("quay.io/keycloak/keycloak:20.0.0"); 17 | withExposedPorts(8080, 8443); 18 | addFixedExposedPort(8080, 8080); 19 | withEnv("KEYCLOAK_ADMIN", "admin"); 20 | withEnv("KEYCLOAK_ADMIN_PASSWORD", "admin"); 21 | withEnv("KC_PROXY", "passthrough"); 22 | withEnv("KC_HOSTNAME", "keycloak:8080"); 23 | withEnv("KC_HTTP_ENABLED", "true"); 24 | waitingFor(Wait.forLogMessage(".*Listening.*", 1)); 25 | withNetwork(Network.SHARED); 26 | withNetworkAliases("keycloak"); 27 | withCopyFileToContainer(MountableFile.forClasspathResource("keycloak/realms/kafka-authz-realm.json"), 28 | "/opt/keycloak/data/import/kafka-authz-realm.json"); 29 | withCommand("start-dev", "--import-realm"); 30 | } 31 | 32 | public void createHostsFile() { 33 | try (FileWriter fileWriter = new FileWriter("target/hosts")) { 34 | String dockerHost = this.getHost(); 35 | if ("localhost".equals(dockerHost)) { 36 | fileWriter.write("127.0.0.1 keycloak"); 37 | } else { 38 | fileWriter.write(dockerHost + " keycloak"); 39 | } 40 | fileWriter.flush(); 41 | } catch (IOException e) { 42 | throw new UncheckedIOException(e); 43 | } 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /kafka-native-test-container/src/test/resources/kafka-keystore.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ozangunalp/kafka-native/5ea1ad35dfd0985d9844e8e8e7cd1fbcd41c4319/kafka-native-test-container/src/test/resources/kafka-keystore.p12 -------------------------------------------------------------------------------- /kafka-native-test-container/src/test/resources/kafka-truststore.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ozangunalp/kafka-native/5ea1ad35dfd0985d9844e8e8e7cd1fbcd41c4319/kafka-native-test-container/src/test/resources/kafka-truststore.p12 -------------------------------------------------------------------------------- /kafka-native-test-container/src/test/resources/kerberos/kafkaServer.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # see kafka.server.KafkaConfig for additional details and defaults 17 | 18 | ############################# Server Basics ############################# 19 | 20 | # The id of the broker. This must be set to a unique integer for each broker. 21 | broker.id=0 22 | process.roles=broker,controller 23 | controller.quorum.voters=0@localhost:9093 24 | 25 | ############################# Socket Server Settings ############################# 26 | 27 | # The address the socket server listens on. It will get the value returned from 28 | # java.net.InetAddress.getCanonicalHostName() if not configured. 29 | # FORMAT: 30 | # listeners = listener_name://host_name:port 31 | # EXAMPLE: 32 | # listeners = PLAINTEXT://your.host.name:9092 33 | #listeners=PLAINTEXT://:9092 34 | listeners=SASL_PLAINTEXT://:9092,PLAINTEXT://:9093 35 | #advertised.listeners=SASL_PLAINTEXT://localhost:9092 36 | controller.listener.names=PLAINTEXT 37 | 38 | 39 | # Hostname and port the broker will advertise to producers and consumers. If not set, 40 | # it uses the value for "listeners" if configured. Otherwise, it will use the value 41 | # returned from java.net.InetAddress.getCanonicalHostName(). 42 | #advertised.listeners=PLAINTEXT://your.host.name:9092 43 | #advertised.listeners=SASL_PLAINTEXT://localhost:9092 44 | 45 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details 46 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL 47 | listener.security.protocol.map=SASL_PLAINTEXT:SASL_PLAINTEXT,PLAINTEXT:PLAINTEXT 48 | 49 | 50 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network 51 | num.network.threads=3 52 | 53 | # The number of threads that the server uses for processing requests, which may include disk I/O 54 | num.io.threads=8 55 | 56 | # The send buffer (SO_SNDBUF) used by the socket server 57 | socket.send.buffer.bytes=102400 58 | 59 | # The receive buffer (SO_RCVBUF) used by the socket server 60 | socket.receive.buffer.bytes=102400 61 | 62 | # The maximum size of a request that the socket server will accept (protection against OOM) 63 | socket.request.max.bytes=104857600 64 | 65 | 66 | inter.broker.listener.name=SASL_PLAINTEXT 67 | 68 | 69 | #### SASL #### 70 | 71 | sasl.enabled.mechanisms=GSSAPI 72 | 73 | sasl.mechanism.inter.broker.protocol=GSSAPI 74 | 75 | #listener.name.sasl_plaintext.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 76 | # username="broker" \ 77 | # password="broker-secret" \ 78 | # user_broker="broker-secret" \ 79 | # user_client="client-secret"; 80 | 81 | listener.name.sasl_plaintext.gssapi.sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 82 | useKeyTab=true \ 83 | storeKey=true \ 84 | debug=true \ 85 | refreshKrb5Config=true \ 86 | serviceName="kafka" \ 87 | keyTab="/opt/kafka/config/kafkabroker.keytab" \ 88 | principal="kafka/localhost@EXAMPLE.COM"; 89 | 90 | sasl.kerberos.service.name=kafka 91 | 92 | #ssl.endpoint.identification.algortigm=https://localhost 93 | ssl.endpoint.identification.algorithm=https 94 | ssl.client.auth=none 95 | 96 | ############################# Log Basics ############################# 97 | 98 | # A comma separated list of directories under which to store log files 99 | log.dirs=/tmp/kafka-logs 100 | 101 | # The default number of log partitions per topic. More partitions allow greater 102 | # parallelism for consumption, but this will also result in more files across 103 | # the brokers. 104 | num.partitions=1 105 | 106 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. 107 | # This value is recommended to be increased for installations with data dirs located in RAID array. 108 | num.recovery.threads.per.data.dir=1 109 | 110 | ############################# Internal Topic Settings ############################# 111 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" 112 | # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. 113 | offsets.topic.replication.factor=1 114 | transaction.state.log.replication.factor=1 115 | transaction.state.log.min.isr=1 116 | 117 | ############################# Log Flush Policy ############################# 118 | 119 | # Messages are immediately written to the filesystem but by default we only fsync() to sync 120 | # the OS cache lazily. The following configurations control the flush of data to disk. 121 | # There are a few important trade-offs here: 122 | # 1. Durability: Unflushed data may be lost if you are not using replication. 123 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. 124 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. 125 | # The settings below allow one to configure the flush policy to flush data after a period of time or 126 | # every N messages (or both). This can be done globally and overridden on a per-topic basis. 127 | 128 | # The number of messages to accept before forcing a flush of data to disk 129 | #log.flush.interval.messages=10000 130 | 131 | # The maximum amount of time a message can sit in a log before we force a flush 132 | #log.flush.interval.ms=1000 133 | 134 | ############################# Log Retention Policy ############################# 135 | 136 | # The following configurations control the disposal of log segments. The policy can 137 | # be set to delete segments after a period of time, or after a given size has accumulated. 138 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens 139 | # from the end of the log. 140 | 141 | # The minimum age of a log file to be eligible for deletion due to age 142 | log.retention.hours=168 143 | 144 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining 145 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours. 146 | #log.retention.bytes=1073741824 147 | 148 | # The maximum size of a log segment file. When this size is reached a new log segment will be created. 149 | log.segment.bytes=1073741824 150 | 151 | # The interval at which log segments are checked to see if they can be deleted according 152 | # to the retention policies 153 | log.retention.check.interval.ms=300000 154 | 155 | ############################# Group Coordinator Settings ############################# 156 | 157 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance. 158 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms. 159 | # The default value for this is 3 seconds. 160 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing. 161 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup. 162 | group.initial.rebalance.delay.ms=0 -------------------------------------------------------------------------------- /kafka-native-test-container/src/test/resources/kerberos/krb5ClientTemplate.conf: -------------------------------------------------------------------------------- 1 | [logging] 2 | default = FILE:/var/log/kerberos/krb5libs.log 3 | kdc = FILE:/var/log/kerberos/krb5kdc.log 4 | admin_server = FILE:/var/log/kerberos/kadmind.log 5 | 6 | [libdefaults] 7 | default_realm = EXAMPLE.COM 8 | dns_lookup_realm = false 9 | dns_lookup_kdc = false 10 | ticket_lifetime = 24h 11 | renew_lifetime = 7d 12 | forwardable = true 13 | rdns = false 14 | udp_preference_limit = 1 15 | 16 | [realms] 17 | EXAMPLE.COM = { 18 | kdc = : 19 | admin_server = : 20 | } 21 | 22 | [domain_realm] 23 | .EXAMPLE.COM = EXAMPLE.COM 24 | EXAMPLE.COM = EXAMPLE.COM 25 | .localhost = EXAMPLE.COM 26 | localhost = EXAMPLE.COM -------------------------------------------------------------------------------- /kafka-native-test-container/src/test/resources/kerberos/krb5KafkaBroker.conf: -------------------------------------------------------------------------------- 1 | [logging] 2 | default = FILE:/var/log/kerberos/krb5libs.log 3 | kdc = FILE:/var/log/kerberos/krb5kdc.log 4 | admin_server = FILE:/var/log/kerberos/kadmind.log 5 | 6 | [libdefaults] 7 | default_realm = EXAMPLE.COM 8 | dns_lookup_realm = false 9 | dns_lookup_kdc = false 10 | ticket_lifetime = 24h 11 | renew_lifetime = 7d 12 | forwardable = true 13 | rdns = false 14 | 15 | [realms] 16 | EXAMPLE.COM = { 17 | kdc = kerberos:88 18 | admin_server = kerberos:749 19 | } 20 | 21 | [domain_realm] 22 | .EXAMPLE.COM = EXAMPLE.COM 23 | EXAMPLE.COM = EXAMPLE.COM 24 | .localhost = EXAMPLE.COM 25 | localhost = EXAMPLE.COM -------------------------------------------------------------------------------- /kafka-native-test-container/src/test/resources/keycloak/realms/kafka-authz-realm.json: -------------------------------------------------------------------------------- 1 | { 2 | "realm": "kafka-authz", 3 | "accessTokenLifespan": 300, 4 | "ssoSessionIdleTimeout": 864000, 5 | "ssoSessionMaxLifespan": 864000, 6 | "enabled": true, 7 | "sslRequired": "external", 8 | "roles": { 9 | "realm": [], 10 | "client": { 11 | "kafka-cli": [], 12 | "kafka": [ 13 | { 14 | "name": "uma_protection", 15 | "clientRole": true 16 | }, 17 | { 18 | "name": "kafka-user", 19 | "clientRole": true 20 | } 21 | ] 22 | } 23 | }, 24 | "groups" : [], 25 | "users": [ 26 | { 27 | "username": "service-account-kafka-producer-client", 28 | "enabled": true, 29 | "realmRoles" : [ "offline_access" ], 30 | "email": "service-account-kafka-producer-client@placeholder.org", 31 | "serviceAccountClientId": "kafka-producer-client" 32 | }, 33 | { 34 | "username": "service-account-kafka-consumer-client", 35 | "enabled": true, 36 | "realmRoles" : [ "offline_access" ], 37 | "email": "service-account-kafka-consumer-client@placeholder.org", 38 | "serviceAccountClientId": "kafka-consumer-client" 39 | } 40 | ], 41 | "clients": [ 42 | { 43 | "clientId": "kafka", 44 | "enabled": true, 45 | "clientAuthenticatorType": "client-secret", 46 | "secret": "kafka-secret", 47 | "bearerOnly": false, 48 | "consentRequired": false, 49 | "standardFlowEnabled": false, 50 | "implicitFlowEnabled": false, 51 | "directAccessGrantsEnabled": true, 52 | "serviceAccountsEnabled": true, 53 | "authorizationServicesEnabled": true, 54 | "publicClient": false, 55 | "fullScopeAllowed": true, 56 | "protocolMappers": [ 57 | { 58 | "name": "kafka audience", 59 | "protocol": "openid-connect", 60 | "protocolMapper": "oidc-audience-mapper", 61 | "consentRequired": false, 62 | "config": { 63 | "included.client.audience": "kafka", 64 | "id.token.claim": "false", 65 | "access.token.claim": "true" 66 | } 67 | } 68 | ], 69 | "authorizationSettings": { 70 | "allowRemoteResourceManagement": true, 71 | "policyEnforcementMode": "ENFORCING", 72 | "resources": [ 73 | { 74 | "name": "Group:*", 75 | "type": "Group", 76 | "ownerManagedAccess": false, 77 | "displayName": "Any group", 78 | "attributes": {}, 79 | "uris": [], 80 | "scopes": [ 81 | { 82 | "name": "Describe" 83 | }, 84 | { 85 | "name": "Read" 86 | }, 87 | { 88 | "name": "DescribeConfigs" 89 | }, 90 | { 91 | "name": "AlterConfigs" 92 | } 93 | ] 94 | }, 95 | { 96 | "name": "Topic:*", 97 | "type": "Topic", 98 | "ownerManagedAccess": false, 99 | "displayName": "Any topic", 100 | "attributes": {}, 101 | "uris": [], 102 | "scopes": [ 103 | { 104 | "name": "Create" 105 | }, 106 | { 107 | "name": "Delete" 108 | }, 109 | { 110 | "name": "Describe" 111 | }, 112 | { 113 | "name": "Write" 114 | }, 115 | { 116 | "name": "Read" 117 | }, 118 | { 119 | "name": "Alter" 120 | }, 121 | { 122 | "name": "DescribeConfigs" 123 | }, 124 | { 125 | "name": "AlterConfigs" 126 | } 127 | ] 128 | }, 129 | { 130 | "name" : "Cluster:*", 131 | "type" : "Cluster", 132 | "ownerManagedAccess" : false, 133 | "attributes" : { }, 134 | "uris" : [ ] 135 | }, 136 | { 137 | "name": "Topic:messages", 138 | "type": "topic", 139 | "scopes": [ 140 | { 141 | "name": "Delete" 142 | }, 143 | { 144 | "name": "Describe" 145 | }, 146 | { 147 | "name": "Create" 148 | }, 149 | { 150 | "name": "Write" 151 | }, 152 | { 153 | "name": "Alter" 154 | }, 155 | { 156 | "name": "Read" 157 | }, 158 | { 159 | "name": "DescribeConfigs" 160 | }, 161 | { 162 | "name": "AlterConfigs" 163 | } 164 | ] 165 | } 166 | ], 167 | "policies": [ 168 | { 169 | "name": "Producer Client", 170 | "type": "client", 171 | "logic": "POSITIVE", 172 | "decisionStrategy": "UNANIMOUS", 173 | "config": { 174 | "clients": "[\"kafka-producer-client\", \"kafka-client\"]" 175 | } 176 | }, 177 | { 178 | "name": "Consumer Client", 179 | "type": "client", 180 | "logic": "POSITIVE", 181 | "decisionStrategy": "UNANIMOUS", 182 | "config": { 183 | "clients": "[\"kafka-consumer-client\", \"kafka-client\"]" 184 | } 185 | }, 186 | { 187 | "name": "Producer Client can write to topic 'messages'", 188 | "type": "scope", 189 | "logic": "POSITIVE", 190 | "decisionStrategy": "UNANIMOUS", 191 | "config": { 192 | "resources": "[\"Topic:messages\"]", 193 | "scopes": "[\"Delete\",\"Describe\",\"Create\",\"Write\"]", 194 | "applyPolicies": "[\"Producer Client\"]" 195 | } 196 | }, 197 | { 198 | "name": "Consumer Client can read from topic 'messages'", 199 | "type": "scope", 200 | "logic": "POSITIVE", 201 | "decisionStrategy": "UNANIMOUS", 202 | "config": { 203 | "resources": "[\"Topic:messages\"]", 204 | "scopes": "[\"Describe\",\"Read\"]", 205 | "applyPolicies": "[\"Consumer Client\"]" 206 | } 207 | }, 208 | { 209 | "name": "Consumer Client can use any group", 210 | "type": "scope", 211 | "logic": "POSITIVE", 212 | "decisionStrategy": "UNANIMOUS", 213 | "config": { 214 | "resources": "[\"Group:*\"]", 215 | "scopes": "[\"Describe\",\"Write\",\"Read\"]", 216 | "applyPolicies": "[\"Consumer Client\"]" 217 | } 218 | } 219 | ], 220 | "scopes": [ 221 | { 222 | "name": "Create" 223 | }, 224 | { 225 | "name": "Read" 226 | }, 227 | { 228 | "name": "Write" 229 | }, 230 | { 231 | "name": "Delete" 232 | }, 233 | { 234 | "name": "Alter" 235 | }, 236 | { 237 | "name": "Describe" 238 | }, 239 | { 240 | "name": "ClusterAction" 241 | }, 242 | { 243 | "name": "DescribeConfigs" 244 | }, 245 | { 246 | "name": "AlterConfigs" 247 | }, 248 | { 249 | "name": "IdempotentWrite" 250 | } 251 | ], 252 | "decisionStrategy": "AFFIRMATIVE" 253 | } 254 | }, 255 | { 256 | "clientId": "kafka-cli", 257 | "enabled": true, 258 | "clientAuthenticatorType": "client-secret", 259 | "secret": "kafka-cli-secret", 260 | "bearerOnly": false, 261 | "consentRequired": false, 262 | "standardFlowEnabled": false, 263 | "implicitFlowEnabled": false, 264 | "directAccessGrantsEnabled": true, 265 | "serviceAccountsEnabled": false, 266 | "publicClient": true, 267 | "fullScopeAllowed": true 268 | }, 269 | { 270 | "clientId": "kafka-producer-client", 271 | "enabled": true, 272 | "clientAuthenticatorType": "client-secret", 273 | "secret": "kafka-producer-client-secret", 274 | "publicClient": false, 275 | "bearerOnly": false, 276 | "standardFlowEnabled": false, 277 | "implicitFlowEnabled": false, 278 | "directAccessGrantsEnabled": true, 279 | "serviceAccountsEnabled": true, 280 | "consentRequired" : false, 281 | "fullScopeAllowed" : false, 282 | "attributes": { 283 | "access.token.lifespan": "36000" 284 | } 285 | }, 286 | { 287 | "clientId": "kafka-consumer-client", 288 | "enabled": true, 289 | "clientAuthenticatorType": "client-secret", 290 | "secret": "kafka-consumer-client-secret", 291 | "publicClient": false, 292 | "bearerOnly": false, 293 | "standardFlowEnabled": false, 294 | "implicitFlowEnabled": false, 295 | "directAccessGrantsEnabled": true, 296 | "serviceAccountsEnabled": true, 297 | "consentRequired" : false, 298 | "fullScopeAllowed" : false, 299 | "attributes": { 300 | "access.token.lifespan": "36000" 301 | } 302 | }, 303 | { 304 | "clientId": "kafka-client", 305 | "enabled": true, 306 | "clientAuthenticatorType": "client-secret", 307 | "secret": "kafka-client-secret", 308 | "publicClient": false, 309 | "bearerOnly": false, 310 | "standardFlowEnabled": false, 311 | "implicitFlowEnabled": false, 312 | "directAccessGrantsEnabled": true, 313 | "serviceAccountsEnabled": true, 314 | "consentRequired" : false, 315 | "fullScopeAllowed" : false, 316 | "attributes": { 317 | "access.token.lifespan": "36000" 318 | } 319 | } 320 | ] 321 | } -------------------------------------------------------------------------------- /kafka-native-test-container/src/test/resources/oauth.properties: -------------------------------------------------------------------------------- 1 | listener.security.protocol.map=JWT:SASL_PLAINTEXT 2 | 3 | sasl.enabled.mechanisms=OAUTHBEARER 4 | 5 | #sasl.mechanism.inter.broker.protocol=OAUTHBEARER 6 | 7 | oauth.username.claim=preferred_username 8 | principal.builder.class=io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder 9 | 10 | listener.name.jwt.sasl.enabled.mechanisms=OAUTHBEARER,PLAIN 11 | listener.name.jwt.oauthbearer.sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required \ 12 | oauth.jwks.endpoint.uri="http://keycloak:8080/realms/kafka-authz/protocol/openid-connect/certs" \ 13 | oauth.valid.issuer.uri="http://keycloak:8080/realms/kafka-authz" \ 14 | oauth.token.endpoint.uri="http://keycloak:8080/realms/kafka-authz/protocol/openid-connect/token" \ 15 | oauth.client.id="kafka" \ 16 | oauth.client.secret="kafka-secret"; 17 | 18 | listener.name.jwt.oauthbearer.sasl.server.callback.handler.class=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler 19 | listener.name.jwt.oauthbearer.sasl.login.callback.handler.class=io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler 20 | listener.name.jwt.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 21 | oauth.jwks.endpoint.uri="http://keycloak:8080/realms/kafka-authz/protocol/openid-connect/certs" \ 22 | oauth.valid.issuer.uri="http://keycloak:8080/realms/kafka-authz" \ 23 | oauth.token.endpoint.uri="http://keycloak:8080/realms/kafka-authz/protocol/openid-connect/token" \ 24 | oauth.client.id="kafka" \ 25 | oauth.client.secret="kafka-secret" \ 26 | unsecuredLoginStringClaim_sub="admin"; 27 | 28 | listener.name.jwt.plain.sasl.server.callback.handler.class=io.strimzi.kafka.oauth.server.plain.JaasServerOauthOverPlainValidatorCallbackHandler 29 | -------------------------------------------------------------------------------- /kafka-native-test-container/src/test/resources/sasl_plain_plaintext.properties: -------------------------------------------------------------------------------- 1 | sasl.enabled.mechanisms=PLAIN 2 | 3 | listener.name.sasl_plaintext.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="broker" \ 5 | password="broker-secret" \ 6 | user_broker="broker-secret" \ 7 | user_client="client-secret"; 8 | -------------------------------------------------------------------------------- /kafka-native-test-container/src/test/resources/sasl_scram_plaintext.properties: -------------------------------------------------------------------------------- 1 | sasl.enabled.mechanisms=SCRAM-SHA-512 2 | 3 | listener.name.sasl_plaintext.scram-sha-512.sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="broker" \ 5 | password="broker-secret"; 6 | -------------------------------------------------------------------------------- /kafka-native-test-container/src/test/resources/ssl.properties: -------------------------------------------------------------------------------- 1 | ssl.keystore.location=/dir/kafka-keystore.p12 2 | ssl.keystore.password=Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L 3 | ssl.keystore.type=PKCS12 4 | ssl.key.password=Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L 5 | ssl.truststore.location=/dir/kafka-truststore.p12 6 | ssl.truststore.password=Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L 7 | ssl.truststore.type=PKCS12 8 | ssl.endpoint.identification.algorithm= -------------------------------------------------------------------------------- /kafka-server/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | com.ozangunalp 8 | kafka-native-parent 9 | 999-SNAPSHOT 10 | 11 | kafka-server 12 | Kafka Server 13 | 14 | 15 | 16 | io.quarkus 17 | quarkus-arc 18 | 19 | 20 | io.quarkus 21 | quarkus-netty 22 | 23 | 24 | com.ozangunalp 25 | quarkus-kafka-server 26 | 27 | 28 | io.quarkus 29 | quarkus-container-image-docker 30 | 31 | 32 | 33 | org.apache.kafka 34 | kafka_2.13 35 | 36 | 37 | org.apache.kafka 38 | kafka-metadata 39 | 40 | 41 | org.apache.kafka 42 | kafka-server-common 43 | 44 | 45 | org.apache.kafka 46 | kafka-transaction-coordinator 47 | 48 | 49 | org.apache.kafka 50 | kafka-server 51 | 52 | 53 | org.apache.kafka 54 | kafka-raft 55 | 56 | 57 | org.apache.kafka 58 | kafka-clients 59 | 60 | 61 | org.scala-lang 62 | scala-library 63 | 64 | 65 | org.jboss.logmanager 66 | log4j2-jboss-logmanager 67 | 68 | 69 | io.quarkus 70 | quarkus-junit5 71 | test 72 | 73 | 74 | io.smallrye.reactive 75 | smallrye-reactive-messaging-kafka-test-companion 76 | 77 | 78 | net.sourceforge.argparse4j 79 | argparse4j 80 | 81 | 82 | 83 | org.apache.logging.log4j 84 | log4j-core 85 | 2.24.3 86 | 87 | 88 | 89 | 90 | 91 | ${quarkus.platform.group-id} 92 | quarkus-maven-plugin 93 | ${quarkus.platform.version} 94 | true 95 | 96 | 97 | 98 | build 99 | generate-code 100 | generate-code-tests 101 | 102 | 103 | 104 | 105 | 106 | org.apache.maven.plugins 107 | maven-compiler-plugin 108 | ${compiler-plugin.version} 109 | 110 | ${maven.compiler.parameters} 111 | 112 | 113 | 114 | org.apache.maven.plugins 115 | maven-surefire-plugin 116 | ${surefire-plugin.version} 117 | 118 | 119 | org.jboss.logmanager.LogManager 120 | ${maven.home} 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | native 129 | 130 | 131 | native 132 | 133 | 134 | 135 | 136 | 137 | org.apache.maven.plugins 138 | maven-failsafe-plugin 139 | ${surefire-plugin.version} 140 | 141 | 142 | 143 | integration-test 144 | verify 145 | 146 | 147 | 148 | ${project.build.directory}/${project.build.finalName}-runner 149 | org.jboss.logmanager.LogManager 150 | ${maven.home} 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | true 160 | 161 | 162 | 163 | 164 | -------------------------------------------------------------------------------- /kafka-server/src/main/docker/Dockerfile.jvm: -------------------------------------------------------------------------------- 1 | #### 2 | # This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode 3 | # 4 | # Before building the container image run: 5 | # 6 | # ./mvnw package 7 | # 8 | # Then, build the image with: 9 | # 10 | # docker build -f src/main/docker/Dockerfile.jvm -t quarkus/embedded-kafka-jvm . 11 | # 12 | # Then run the container using: 13 | # 14 | # docker run -i --rm -p 8080:8080 quarkus/embedded-kafka-jvm 15 | # 16 | # If you want to include the debug port into your docker image 17 | # you will have to expose the debug port (default 5005) like this : EXPOSE 8080 5005 18 | # 19 | # Then run the container using : 20 | # 21 | # docker run -i --rm -p 8080:8080 quarkus/embedded-kafka-jvm 22 | # 23 | # This image uses the `run-java.sh` script to run the application. 24 | # This scripts computes the command line to execute your Java application, and 25 | # includes memory/GC tuning. 26 | # You can configure the behavior using the following environment properties: 27 | # - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") 28 | # - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options 29 | # in JAVA_OPTS (example: "-Dsome.property=foo") 30 | # - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is 31 | # used to calculate a default maximal heap memory based on a containers restriction. 32 | # If used in a container without any memory constraints for the container then this 33 | # option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio 34 | # of the container available memory as set here. The default is `50` which means 50% 35 | # of the available memory is used as an upper boundary. You can skip this mechanism by 36 | # setting this value to `0` in which case no `-Xmx` option is added. 37 | # - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This 38 | # is used to calculate a default initial heap memory based on the maximum heap memory. 39 | # If used in a container without any memory constraints for the container then this 40 | # option has no effect. If there is a memory constraint then `-Xms` is set to a ratio 41 | # of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` 42 | # is used as the initial heap size. You can skip this mechanism by setting this value 43 | # to `0` in which case no `-Xms` option is added (example: "25") 44 | # - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. 45 | # This is used to calculate the maximum value of the initial heap memory. If used in 46 | # a container without any memory constraints for the container then this option has 47 | # no effect. If there is a memory constraint then `-Xms` is limited to the value set 48 | # here. The default is 4096MB which means the calculated value of `-Xms` never will 49 | # be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") 50 | # - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output 51 | # when things are happening. This option, if set to true, will set 52 | # `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). 53 | # - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: 54 | # true"). 55 | # - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). 56 | # - CONTAINER_CORE_LIMIT: A calculated core limit as described in 57 | # https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") 58 | # - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). 59 | # - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. 60 | # (example: "20") 61 | # - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. 62 | # (example: "40") 63 | # - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. 64 | # (example: "4") 65 | # - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus 66 | # previous GC times. (example: "90") 67 | # - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") 68 | # - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") 69 | # - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should 70 | # contain the necessary JRE command-line options to specify the required GC, which 71 | # will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). 72 | # - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") 73 | # - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") 74 | # - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be 75 | # accessed directly. (example: "foo.example.com,bar.example.com") 76 | # 77 | ### 78 | FROM registry.access.redhat.com/ubi8/openjdk-17:1.11 79 | 80 | ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' 81 | 82 | 83 | # We make four distinct layers so if there are application changes the library layers can be re-used 84 | COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/ 85 | COPY --chown=185 target/quarkus-app/*.jar /deployments/ 86 | COPY --chown=185 target/quarkus-app/app/ /deployments/app/ 87 | COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/ 88 | RUN mkdir -m 777 -p /deployments/target/log-dir 89 | 90 | EXPOSE 9092 91 | USER 185 92 | ENV JAVA_OPTS="-Djava.util.logging.manager=org.jboss.logmanager.LogManager" 93 | ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" 94 | 95 | -------------------------------------------------------------------------------- /kafka-server/src/main/docker/Dockerfile.legacy-jar: -------------------------------------------------------------------------------- 1 | #### 2 | # This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode 3 | # 4 | # Before building the container image run: 5 | # 6 | # ./mvnw package -Dquarkus.package.jar.type=legacy-jar 7 | # 8 | # Then, build the image with: 9 | # 10 | # docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/code-with-quarkus-legacy-jar . 11 | # 12 | # Then run the container using: 13 | # 14 | # docker run -i --rm -p 8080:8080 quarkus/code-with-quarkus-legacy-jar 15 | # 16 | # If you want to include the debug port into your docker image 17 | # you will have to expose the debug port (default 5005) like this : EXPOSE 8080 5005 18 | # 19 | # Then run the container using : 20 | # 21 | # docker run -i --rm -p 8080:8080 quarkus/code-with-quarkus-legacy-jar 22 | # 23 | # This image uses the `run-java.sh` script to run the application. 24 | # This scripts computes the command line to execute your Java application, and 25 | # includes memory/GC tuning. 26 | # You can configure the behavior using the following environment properties: 27 | # - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") 28 | # - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options 29 | # in JAVA_OPTS (example: "-Dsome.property=foo") 30 | # - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is 31 | # used to calculate a default maximal heap memory based on a containers restriction. 32 | # If used in a container without any memory constraints for the container then this 33 | # option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio 34 | # of the container available memory as set here. The default is `50` which means 50% 35 | # of the available memory is used as an upper boundary. You can skip this mechanism by 36 | # setting this value to `0` in which case no `-Xmx` option is added. 37 | # - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This 38 | # is used to calculate a default initial heap memory based on the maximum heap memory. 39 | # If used in a container without any memory constraints for the container then this 40 | # option has no effect. If there is a memory constraint then `-Xms` is set to a ratio 41 | # of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` 42 | # is used as the initial heap size. You can skip this mechanism by setting this value 43 | # to `0` in which case no `-Xms` option is added (example: "25") 44 | # - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. 45 | # This is used to calculate the maximum value of the initial heap memory. If used in 46 | # a container without any memory constraints for the container then this option has 47 | # no effect. If there is a memory constraint then `-Xms` is limited to the value set 48 | # here. The default is 4096MB which means the calculated value of `-Xms` never will 49 | # be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") 50 | # - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output 51 | # when things are happening. This option, if set to true, will set 52 | # `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). 53 | # - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: 54 | # true"). 55 | # - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). 56 | # - CONTAINER_CORE_LIMIT: A calculated core limit as described in 57 | # https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") 58 | # - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). 59 | # - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. 60 | # (example: "20") 61 | # - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. 62 | # (example: "40") 63 | # - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. 64 | # (example: "4") 65 | # - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus 66 | # previous GC times. (example: "90") 67 | # - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") 68 | # - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") 69 | # - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should 70 | # contain the necessary JRE command-line options to specify the required GC, which 71 | # will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). 72 | # - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") 73 | # - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") 74 | # - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be 75 | # accessed directly. (example: "foo.example.com,bar.example.com") 76 | # 77 | ### 78 | FROM registry.access.redhat.com/ubi8/openjdk-17:1.11 79 | 80 | ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' 81 | 82 | 83 | COPY target/lib/* /deployments/lib/ 84 | COPY target/*-runner.jar /deployments/quarkus-run.jar 85 | RUN mkdir -m 777 -p /deployments/target/log-dir 86 | 87 | EXPOSE 9092 88 | USER 185 89 | ENV JAVA_OPTS="-Djava.util.logging.manager=org.jboss.logmanager.LogManager" 90 | ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" 91 | -------------------------------------------------------------------------------- /kafka-server/src/main/docker/Dockerfile.native: -------------------------------------------------------------------------------- 1 | #### 2 | # This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. 3 | # 4 | # Before building the container image run: 5 | # 6 | # ./mvnw package -Pnative 7 | # 8 | # Then, build the image with: 9 | # 10 | # docker build -f src/main/docker/Dockerfile.native -t quarkus/code-with-quarkus . 11 | # 12 | # Then run the container using: 13 | # 14 | # docker run -i --rm -p 8080:8080 quarkus/code-with-quarkus 15 | # 16 | ### 17 | FROM registry.access.redhat.com/ubi9/ubi-minimal:9.5 18 | WORKDIR /work/ 19 | RUN chown 1001 /work \ 20 | && chmod "g+rwX" /work \ 21 | && chown 1001:root /work 22 | COPY --chown=1001:root target/*-runner /work/kafka 23 | 24 | EXPOSE 9092 25 | USER 1001 26 | RUN mkdir -m 777 -p /work/target/log-dir 27 | 28 | CMD ["./kafka"] 29 | -------------------------------------------------------------------------------- /kafka-server/src/main/docker/Dockerfile.native-micro: -------------------------------------------------------------------------------- 1 | #### 2 | # This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. 3 | # It uses a micro base image, tuned for Quarkus native executables. 4 | # It reduces the size of the resulting container image. 5 | # Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image. 6 | # 7 | # Before building the container image run: 8 | # 9 | # ./mvnw package -Pnative 10 | # 11 | # Then, build the image with: 12 | # 13 | # docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/code-with-quarkus . 14 | # 15 | # Then run the container using: 16 | # 17 | # docker run -i --rm -p 8080:8080 quarkus/code-with-quarkus 18 | # 19 | ### 20 | FROM quay.io/quarkus/ubi9-quarkus-micro-image:2.0 21 | WORKDIR /work/ 22 | RUN chown 1001 /work \ 23 | && chmod "g+rwX" /work \ 24 | && chown 1001:root /work 25 | COPY --chown=1001:root target/*-runner /work/kafka 26 | 27 | EXPOSE 9092 28 | USER 1001 29 | RUN mkdir -m 777 -p /work/target/log-dir 30 | 31 | CMD ["./kafka"] 32 | -------------------------------------------------------------------------------- /kafka-server/src/main/java/com/ozangunalp/kafka/server/BrokerConfig.java: -------------------------------------------------------------------------------- 1 | package com.ozangunalp.kafka.server; 2 | 3 | import org.apache.kafka.common.Endpoint; 4 | import org.apache.kafka.common.config.TopicConfig; 5 | import org.apache.kafka.common.security.auth.SecurityProtocol; 6 | import org.apache.kafka.common.utils.Utils; 7 | import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; 8 | import org.apache.kafka.coordinator.transaction.TransactionLogConfig; 9 | import org.apache.kafka.network.SocketServerConfigs; 10 | import org.apache.kafka.raft.QuorumConfig; 11 | import org.apache.kafka.server.config.KRaftConfigs; 12 | import org.apache.kafka.server.config.ReplicationConfigs; 13 | import org.apache.kafka.server.config.ServerConfigs; 14 | import org.apache.kafka.server.config.ServerLogConfigs; 15 | import org.apache.kafka.storage.internals.log.CleanerConfig; 16 | import org.eclipse.microprofile.config.Config; 17 | import org.eclipse.microprofile.config.ConfigProvider; 18 | import org.jboss.logging.Logger; 19 | 20 | import java.util.ArrayList; 21 | import java.util.Arrays; 22 | import java.util.Collection; 23 | import java.util.List; 24 | import java.util.Map; 25 | import java.util.Properties; 26 | import java.util.TreeMap; 27 | import java.util.function.Function; 28 | import java.util.stream.Collectors; 29 | import java.util.stream.Stream; 30 | 31 | public final class BrokerConfig { 32 | 33 | static final Logger LOGGER = Logger.getLogger(BrokerConfig.class.getName()); 34 | 35 | final static String CONFIG_PREFIX = "kafka"; 36 | 37 | private BrokerConfig() { 38 | } 39 | 40 | 41 | /** 42 | * broker.id 43 | * process.roles 44 | * quorum.voters 45 | *

46 | * controller.listener.names 47 | * inter.broker.listener.name 48 | * listeners 49 | * advertised.listeners 50 | * listener.security.protocol.map 51 | *

52 | * early.start.listeners 53 | *

54 | * 55 | * @param host 56 | * @param kafkaPort 57 | * @param internalPort 58 | * @param controllerPort 59 | * @param defaultProtocol 60 | * @return 61 | */ 62 | public static Properties defaultCoreConfig(Properties props, String host, int kafkaPort, 63 | int internalPort, int controllerPort, SecurityProtocol defaultProtocol) { 64 | Endpoint internal = Endpoints.internal(host, internalPort); 65 | Endpoint controller = Endpoints.controller(host, controllerPort); 66 | List advertised = new ArrayList<>(); 67 | String advertisedListenersStr = props.getProperty(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG); 68 | if (!Utils.isBlank(advertisedListenersStr)) { 69 | advertised.addAll(Endpoints.parseEndpoints(advertisedListenersStr, defaultProtocol)); 70 | } 71 | if (advertised.isEmpty()) { 72 | advertised.add(Endpoints.endpoint(defaultProtocol, kafkaPort)); 73 | } 74 | 75 | // Configure node id 76 | String brokerId = props.getProperty(ServerConfigs.BROKER_ID_CONFIG); 77 | if (brokerId == null) { 78 | brokerId = "1"; 79 | props.put(ServerConfigs.BROKER_ID_CONFIG, brokerId); 80 | } 81 | 82 | boolean kraftController = !props.containsKey(KRaftConfigs.PROCESS_ROLES_CONFIG) || 83 | Arrays.asList(props.getProperty(KRaftConfigs.PROCESS_ROLES_CONFIG).split(",")).contains("controller"); 84 | // Configure kraft 85 | props.putIfAbsent(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker,controller"); 86 | if (kraftController) { 87 | props.putIfAbsent(QuorumConfig.QUORUM_VOTERS_CONFIG, brokerId + "@" + controller.host() + ":" + controller.port()); 88 | } 89 | 90 | // auto-configure listeners if 91 | // - no controller.listener.names config 92 | // - no inter.broker.listener.name config 93 | // - no listeners config 94 | if (!props.containsKey(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG) 95 | && !props.containsKey(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG) 96 | && !props.containsKey(SocketServerConfigs.LISTENERS_CONFIG)) { 97 | // Configure listeners 98 | List earlyStartListeners = new ArrayList<>(); 99 | earlyStartListeners.add(Endpoints.BROKER_PROTOCOL_NAME); 100 | 101 | Map listeners = advertised.stream() 102 | .map(l -> new Endpoint(l.listenerName().orElse(null), l.securityProtocol(), "", kafkaPort)) 103 | .collect(Collectors.toMap(Endpoints::listenerName, Function.identity())); 104 | listeners.put(Endpoints.listenerName(internal), internal); 105 | 106 | Map securityProtocolMapListeners = new TreeMap<>(listeners); 107 | if (kraftController) { 108 | earlyStartListeners.add(Endpoints.CONTROLLER_PROTOCOL_NAME); 109 | listeners.put(Endpoints.listenerName(controller), controller); 110 | } 111 | securityProtocolMapListeners.put(Endpoints.listenerName(controller), controller); 112 | props.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, Endpoints.listenerName(controller)); 113 | 114 | props.put(SocketServerConfigs.LISTENERS_CONFIG, joinListeners(listeners.values())); 115 | 116 | // Configure internal listener 117 | props.put(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG, Endpoints.listenerName(internal)); 118 | advertised.add(internal); 119 | 120 | // Configure security protocol map, by respecting existing map 121 | props.compute(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, (k, v) -> 122 | mergeSecurityProtocolMap(securityProtocolMapListeners, (String) v)); 123 | 124 | // Configure early start listeners 125 | props.put(ServerConfigs.EARLY_START_LISTENERS_CONFIG, String.join(",", earlyStartListeners)); 126 | } else { 127 | LOGGER.warnf("Broker configs %s, %s, %s, %s will not be configured automatically, " + 128 | "make sure to provide necessary configuration manually.", 129 | KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, 130 | SocketServerConfigs.LISTENERS_CONFIG, 131 | ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG, 132 | SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG); 133 | } 134 | 135 | // Configure advertised listeners 136 | props.put(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, joinListeners(advertised)); 137 | 138 | return props; 139 | } 140 | 141 | private static String joinListeners(Collection endpoints) { 142 | return endpoints.stream() 143 | .map(Endpoints::toListenerString) 144 | .distinct() 145 | .collect(Collectors.joining(",")); 146 | } 147 | 148 | private static String mergeSecurityProtocolMap(Map listeners, String current) { 149 | Map existing = Stream.ofNullable(current).flatMap(m -> Arrays.stream(m.split(","))) 150 | .collect(Collectors.toMap(s -> s.split(":")[0], s -> s)); 151 | String toAdd = listeners.entrySet().stream() 152 | .filter(e -> !existing.containsKey(e.getKey())) 153 | .map(Map.Entry::getValue) 154 | .map(Endpoints::toProtocolMap) 155 | .collect(Collectors.joining(",")); 156 | return current == null ? toAdd : current + "," + toAdd; 157 | } 158 | 159 | public static Properties defaultStaticConfig(Properties props) { 160 | // Configure static default props 161 | props.putIfAbsent(ReplicationConfigs.REPLICA_SOCKET_TIMEOUT_MS_CONFIG, "1000"); 162 | props.putIfAbsent(ReplicationConfigs.REPLICA_HIGH_WATERMARK_CHECKPOINT_INTERVAL_MS_CONFIG, String.valueOf(Long.MAX_VALUE)); 163 | props.putIfAbsent(ReplicationConfigs.CONTROLLER_SOCKET_TIMEOUT_MS_CONFIG, "1000"); 164 | props.putIfAbsent(ServerConfigs.CONTROLLED_SHUTDOWN_ENABLE_CONFIG, Boolean.toString(false)); 165 | props.putIfAbsent(ServerConfigs.DELETE_TOPIC_ENABLE_CONFIG, Boolean.toString(true)); 166 | props.putIfAbsent(ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG, Boolean.toString(true)); 167 | props.putIfAbsent(ServerLogConfigs.LOG_DELETE_DELAY_MS_CONFIG, "1000"); 168 | props.putIfAbsent(CleanerConfig.LOG_CLEANER_DEDUPE_BUFFER_SIZE_PROP, "2097152"); 169 | props.putIfAbsent(TopicConfig.MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG, String.valueOf(Long.MAX_VALUE)); 170 | props.putIfAbsent(TopicConfig.MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG, String.valueOf(Long.MAX_VALUE)); 171 | props.putIfAbsent(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, "1"); 172 | props.putIfAbsent(GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, "5"); 173 | props.putIfAbsent(GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, "0"); 174 | props.putIfAbsent(ServerLogConfigs.NUM_PARTITIONS_CONFIG, "1"); 175 | props.putIfAbsent(ReplicationConfigs.DEFAULT_REPLICATION_FACTOR_CONFIG, "1"); 176 | props.putIfAbsent(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1"); 177 | props.putIfAbsent(TransactionLogConfig.TRANSACTIONS_TOPIC_REPLICATION_FACTOR_CONFIG, "1"); 178 | props.putIfAbsent(TransactionLogConfig.TRANSACTIONS_TOPIC_MIN_ISR_CONFIG, "1"); 179 | return props; 180 | } 181 | 182 | public static Properties providedConfig(Properties props) { 183 | Config config = ConfigProvider.getConfig(); 184 | for (String propertyName : config.getPropertyNames()) { 185 | String propertyNameLowerCase = propertyName.toLowerCase(); 186 | if (!propertyNameLowerCase.startsWith(CONFIG_PREFIX)) { 187 | continue; 188 | } 189 | // Replace _ by . - This is because Kafka properties tend to use . and env variables use _ for every special 190 | // character. So, replace _ with . 191 | String effectivePropertyName = propertyNameLowerCase.substring(CONFIG_PREFIX.length() + 1).toLowerCase() 192 | .replace("_", "."); 193 | String value = config.getValue(propertyName, String.class); 194 | props.put(effectivePropertyName, value); 195 | } 196 | return props; 197 | } 198 | 199 | } 200 | -------------------------------------------------------------------------------- /kafka-server/src/main/java/com/ozangunalp/kafka/server/EmbeddedKafkaBroker.java: -------------------------------------------------------------------------------- 1 | package com.ozangunalp.kafka.server; 2 | 3 | import kafka.cluster.EndPoint; 4 | import kafka.server.KafkaConfig; 5 | import kafka.server.KafkaRaftServer; 6 | import kafka.server.Server; 7 | import org.apache.kafka.common.Endpoint; 8 | import org.apache.kafka.common.Uuid; 9 | import org.apache.kafka.common.security.auth.SecurityProtocol; 10 | import org.apache.kafka.common.utils.Time; 11 | import org.apache.kafka.common.utils.Utils; 12 | import org.apache.kafka.network.SocketServerConfigs; 13 | import org.apache.kafka.server.common.MetadataVersion; 14 | import org.jboss.logging.Logger; 15 | import scala.jdk.javaapi.StreamConverters; 16 | 17 | import java.io.Closeable; 18 | import java.io.File; 19 | import java.util.Arrays; 20 | import java.util.List; 21 | import java.util.Properties; 22 | import java.util.function.Consumer; 23 | import java.util.stream.Collectors; 24 | 25 | import static org.apache.kafka.common.security.auth.SecurityProtocol.PLAINTEXT; 26 | 27 | /** 28 | * Embedded Kafka Broker, by default listens on localhost with random broker and controller ports. 29 | *

30 | */ 31 | public class EmbeddedKafkaBroker implements Closeable { 32 | 33 | static final Logger LOGGER = Logger.getLogger(EmbeddedKafkaBroker.class.getName()); 34 | 35 | static final String KAFKA_PREFIX = "kafka-server-"; 36 | 37 | private Server kafkaServer; 38 | private KafkaConfig config; 39 | 40 | private String host = "localhost"; 41 | private int kafkaPort = 0; 42 | private int internalPort = 0; 43 | private int controllerPort = 0; 44 | private boolean deleteDirsOnClose = true; 45 | private String clusterId = Uuid.randomUuid().toString(); 46 | private String storageMetadataVersion = MetadataVersion.LATEST_PRODUCTION.version(); 47 | private final Properties brokerConfig = new Properties(); 48 | public SecurityProtocol defaultProtocol = PLAINTEXT; 49 | private boolean autoConfigure = true; 50 | private List scramCredentials = List.of(); 51 | 52 | /** 53 | * Configure properties for the broker. 54 | * 55 | * @param function the config modifier function. 56 | * @return this {@link EmbeddedKafkaBroker} 57 | */ 58 | public EmbeddedKafkaBroker withConfig(Consumer function) { 59 | assertNotRunning(); 60 | function.accept(this.brokerConfig); 61 | return this; 62 | } 63 | 64 | /** 65 | * Automatically configure broker for embedded testing, exposing relevant listeners, configuring broker to run 66 | * in KRaft mode if required, tuning timeouts. See {@link BrokerConfig} for details. Disabling autoConfigure should 67 | * be used in combination with user supplied configuration. 68 | * 69 | * @param autoConfigure autoConfigure 70 | * @return this {@link EmbeddedKafkaBroker} 71 | */ 72 | public EmbeddedKafkaBroker withAutoConfigure(boolean autoConfigure) { 73 | this.autoConfigure = autoConfigure; 74 | return this; 75 | } 76 | 77 | /** 78 | * Configure the port on which the broker will listen. 79 | * 80 | * @param port the port. 81 | * @return this {@link EmbeddedKafkaBroker} 82 | */ 83 | public EmbeddedKafkaBroker withKafkaPort(int port) { 84 | assertNotRunning(); 85 | this.kafkaPort = port; 86 | return this; 87 | } 88 | 89 | /** 90 | * Configure the controller port for the broker. 91 | * 92 | * @param port the port. 93 | * @return this {@link EmbeddedKafkaBroker} 94 | */ 95 | public EmbeddedKafkaBroker withControllerPort(int port) { 96 | assertNotRunning(); 97 | this.controllerPort = port; 98 | return this; 99 | } 100 | 101 | 102 | /** 103 | * Configure the internal port for the broker. 104 | * 105 | * @param port the port. 106 | * @return this {@link EmbeddedKafkaBroker} 107 | */ 108 | public EmbeddedKafkaBroker withInternalPort(int port) { 109 | assertNotRunning(); 110 | this.internalPort = port; 111 | return this; 112 | } 113 | 114 | /** 115 | * Configure the hostname on which the broker will listen. 116 | * 117 | * @param host the host. 118 | * @return this {@link EmbeddedKafkaBroker} 119 | */ 120 | public EmbeddedKafkaBroker withKafkaHost(String host) { 121 | assertNotRunning(); 122 | this.host = host; 123 | return this; 124 | } 125 | 126 | /** 127 | * Configure the cluster id for the broker storage dirs. 128 | * 129 | * @param clusterId the cluster id. 130 | * @return this {@link EmbeddedKafkaBroker} 131 | */ 132 | public EmbeddedKafkaBroker withClusterId(String clusterId) { 133 | assertNotRunning(); 134 | this.clusterId = clusterId; 135 | return this; 136 | } 137 | 138 | /** 139 | * Configure the metadata version for the broker storage dirs. 140 | * 141 | * @param storageMetadataVersion the cluster id. 142 | * @return this {@link EmbeddedKafkaBroker} 143 | */ 144 | public EmbeddedKafkaBroker withStorageMetadataVersion(String storageMetadataVersion) { 145 | assertNotRunning(); 146 | this.storageMetadataVersion = storageMetadataVersion; 147 | return this; 148 | } 149 | 150 | /** 151 | * Configure the list of scram credentials for the broker. 152 | * 153 | * @param scramCredentials the list of strings representing scram credentials. 154 | * @return this {@link EmbeddedKafkaBroker} 155 | */ 156 | public EmbeddedKafkaBroker withScramCredentials(List scramCredentials) { 157 | assertNotRunning(); 158 | this.scramCredentials = scramCredentials; 159 | return this; 160 | } 161 | 162 | /** 163 | * Configure whether log directories will be deleted on broker shutdown. 164 | * 165 | * @param deleteDirsOnClose {@code true} 166 | * @return this {@link EmbeddedKafkaBroker} 167 | */ 168 | public EmbeddedKafkaBroker withDeleteLogDirsOnClose(boolean deleteDirsOnClose) { 169 | assertNotRunning(); 170 | this.deleteDirsOnClose = deleteDirsOnClose; 171 | return this; 172 | } 173 | 174 | /** 175 | * Configure custom listeners for the broker. 176 | *

177 | * Note that this will override the default PLAINTEXT listener. 178 | * A CONTROLLER listener will be added automatically. 179 | * 180 | * @return this {@link EmbeddedKafkaBroker} 181 | */ 182 | public EmbeddedKafkaBroker withAdvertisedListeners(Endpoint... endpoints) { 183 | String advertisedListeners = Arrays.stream(endpoints) 184 | .map(Endpoints::toListenerString) 185 | .collect(Collectors.joining(",")); 186 | return withAdvertisedListeners(advertisedListeners); 187 | } 188 | 189 | /** 190 | * Configure custom listeners for the broker. 191 | *

192 | * Note that this will override the default PLAINTEXT listener. 193 | * A CONTROLLER listener will be added automatically. 194 | * 195 | * @return this {@link EmbeddedKafkaBroker} 196 | */ 197 | public EmbeddedKafkaBroker withAdvertisedListeners(String advertisedListeners) { 198 | assertNotRunning(); 199 | this.brokerConfig.compute(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, 200 | (k, v) -> v == null ? advertisedListeners : v + "," + advertisedListeners); 201 | return this; 202 | } 203 | 204 | /** 205 | * Create and start the broker. 206 | * 207 | * @return this {@link EmbeddedKafkaBroker} 208 | */ 209 | public synchronized EmbeddedKafkaBroker start() { 210 | if (isRunning()) { 211 | return this; 212 | } 213 | 214 | if (autoConfigure) { 215 | LOGGER.info("auto-configuring server"); 216 | BrokerConfig.providedConfig(brokerConfig); 217 | BrokerConfig.defaultStaticConfig(brokerConfig); 218 | BrokerConfig.defaultCoreConfig(brokerConfig, host, kafkaPort, internalPort, controllerPort, defaultProtocol); 219 | } 220 | 221 | Storage.ensureLogDirExists(brokerConfig); 222 | 223 | long start = System.currentTimeMillis(); 224 | this.config = KafkaConfig.fromProps(brokerConfig, false); 225 | Server server; 226 | 227 | var metadataVersion = MetadataVersion.fromVersionString(storageMetadataVersion); 228 | Storage.formatStorageFromConfig(config, clusterId, true, metadataVersion, scramCredentials); 229 | server = new KafkaRaftServer(config, Time.SYSTEM); 230 | server.startup(); 231 | this.kafkaServer = server; 232 | LOGGER.infof("Kafka broker started in %d ms with advertised listeners: %s", 233 | System.currentTimeMillis() - start, getAdvertisedListeners()); 234 | return this; 235 | } 236 | 237 | @Override 238 | public synchronized void close() { 239 | try { 240 | if (isRunning()) { 241 | kafkaServer.shutdown(); 242 | kafkaServer.awaitShutdown(); 243 | } 244 | } catch (Exception e) { 245 | LOGGER.error("Error shutting down broker", e); 246 | } finally { 247 | if (deleteDirsOnClose) { 248 | try { 249 | for (String logDir : getLogDirs()) { 250 | Utils.delete(new File(logDir)); 251 | } 252 | } catch (Exception e) { 253 | LOGGER.error("Error deleting logdirs", e); 254 | } 255 | } 256 | kafkaServer = null; 257 | } 258 | } 259 | 260 | public boolean isRunning() { 261 | return kafkaServer != null; 262 | } 263 | 264 | private void assertNotRunning() { 265 | if (isRunning()) { 266 | throw new IllegalStateException("Configuration of the running broker is not permitted."); 267 | } 268 | } 269 | 270 | public KafkaConfig getKafkaConfig() { 271 | return config; 272 | } 273 | 274 | public String getAdvertisedListeners() { 275 | return StreamConverters.asJavaParStream(config.effectiveAdvertisedBrokerListeners()) 276 | .map(EndPoint::connectionString) 277 | .collect(Collectors.joining(",")); 278 | } 279 | 280 | public List getLogDirs() { 281 | return StreamConverters.asJavaParStream(config.logDirs()) 282 | .toList(); 283 | } 284 | 285 | public String getClusterId() { 286 | return this.clusterId; 287 | } 288 | 289 | } 290 | -------------------------------------------------------------------------------- /kafka-server/src/main/java/com/ozangunalp/kafka/server/Endpoints.java: -------------------------------------------------------------------------------- 1 | package com.ozangunalp.kafka.server; 2 | 3 | import static org.apache.kafka.common.security.auth.SecurityProtocol.PLAINTEXT; 4 | 5 | import java.io.IOException; 6 | import java.net.ServerSocket; 7 | import java.util.Arrays; 8 | import java.util.List; 9 | import java.util.stream.Collectors; 10 | 11 | import org.apache.kafka.common.Endpoint; 12 | import org.apache.kafka.common.security.auth.SecurityProtocol; 13 | 14 | public final class Endpoints { 15 | private Endpoints() { 16 | } 17 | 18 | public static final String BROKER_PROTOCOL_NAME = "BROKER"; 19 | public static final String CONTROLLER_PROTOCOL_NAME = "CONTROLLER"; 20 | 21 | public static Endpoint endpoint(SecurityProtocol protocol, int port) { 22 | return endpoint(protocol.name, protocol, "", port); 23 | } 24 | 25 | public static Endpoint endpoint(SecurityProtocol protocol, String host, int port) { 26 | return endpoint(protocol.name, protocol, host, port); 27 | } 28 | 29 | public static Endpoint endpoint(String listener, SecurityProtocol protocol, int port) { 30 | return endpoint(listener, protocol, "", port); 31 | } 32 | 33 | public static Endpoint endpoint(String listener, SecurityProtocol protocol, String host, int port) { 34 | return new Endpoint(listener, protocol, host, getUnusedPort(port)); 35 | } 36 | 37 | public static List parseEndpoints(String listenerStr, SecurityProtocol defaultProtocol) { 38 | return Arrays.stream(listenerStr.split(",")) 39 | .map(s -> parseEndpoint(s, defaultProtocol)) 40 | .collect(Collectors.toList()); 41 | } 42 | 43 | public static Endpoint parseEndpoint(String listenerStr, SecurityProtocol defaultProtocol) { 44 | String[] parts = listenerStr.split(":"); 45 | if (parts.length == 2) { 46 | return new Endpoint(null, defaultProtocol, parts[0], Integer.parseInt(parts[1])); 47 | } else if (parts.length == 3) { 48 | String listenerName = parts[0]; 49 | String host = parts[1].replace("//", ""); 50 | int port = Integer.parseInt(parts[2]); 51 | if (SecurityProtocol.names().contains(listenerName)) { 52 | return new Endpoint(listenerName, SecurityProtocol.forName(listenerName), host, port); 53 | } else { 54 | return new Endpoint(listenerName, defaultProtocol, host, port); 55 | } 56 | } 57 | throw new IllegalArgumentException("Cannot parse listener: " + listenerStr); 58 | } 59 | 60 | public static Endpoint internal(String host, int port) { 61 | return endpoint(BROKER_PROTOCOL_NAME, PLAINTEXT, host, port); 62 | } 63 | 64 | public static Endpoint controller(String host, int port) { 65 | return endpoint(CONTROLLER_PROTOCOL_NAME, PLAINTEXT, host, port); 66 | } 67 | 68 | public static String toListenerString(Endpoint endpoint) { 69 | return String.format("%s://%s:%d", listenerName(endpoint), endpoint.host(), endpoint.port()); 70 | } 71 | 72 | public static String toProtocolMap(Endpoint endpoint) { 73 | return String.format("%s:%s", listenerName(endpoint), endpoint.securityProtocol().name); 74 | } 75 | 76 | public static String listenerName(Endpoint endpoint) { 77 | return endpoint.listenerName().orElse(endpoint.securityProtocol().name); 78 | } 79 | 80 | public static int getUnusedPort(int port) { 81 | if (port != 0) { 82 | return port; 83 | } 84 | try (ServerSocket s = new ServerSocket(0)) { 85 | return s.getLocalPort(); 86 | } catch (IOException e) { 87 | throw new RuntimeException(e); 88 | } 89 | } 90 | 91 | } 92 | -------------------------------------------------------------------------------- /kafka-server/src/main/java/com/ozangunalp/kafka/server/ScramUtils.java: -------------------------------------------------------------------------------- 1 | package com.ozangunalp.kafka.server; 2 | 3 | import org.apache.kafka.common.metadata.UserScramCredentialRecord; 4 | import org.apache.kafka.common.security.scram.ScramCredential; 5 | 6 | final class ScramUtils { 7 | 8 | private ScramUtils() { 9 | throw new IllegalArgumentException(); 10 | } 11 | 12 | static ScramCredential asScramCredential(UserScramCredentialRecord uscr) { 13 | return new ScramCredential(uscr.salt(), uscr.storedKey(), uscr.serverKey(), uscr.iterations()); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /kafka-server/src/main/java/com/ozangunalp/kafka/server/ServerConfig.java: -------------------------------------------------------------------------------- 1 | package com.ozangunalp.kafka.server; 2 | 3 | import java.nio.file.Path; 4 | import java.util.Arrays; 5 | import java.util.Collections; 6 | import java.util.List; 7 | import java.util.Optional; 8 | 9 | import io.smallrye.config.ConfigMapping; 10 | import io.smallrye.config.WithDefault; 11 | 12 | @ConfigMapping(prefix = "server") 13 | public interface ServerConfig { 14 | 15 | @WithDefault("9092") 16 | int kafkaPort(); 17 | 18 | @WithDefault("9093") 19 | int internalPort(); 20 | 21 | @WithDefault("9094") 22 | int controllerPort(); 23 | 24 | @WithDefault("false") 25 | boolean deleteDirsOnClose(); 26 | 27 | Optional clusterId(); 28 | 29 | Optional host(); 30 | 31 | Optional propertiesFile(); 32 | 33 | @WithDefault("true") 34 | boolean autoConfigure(); 35 | 36 | /** 37 | * List of scram credentials, separated by semicolon. 38 | *
39 | * Format of the scram string must be in one of the following forms: 40 | *

41 |      * SCRAM-SHA-256=[user=alice,password=alice-secret]
42 |      * SCRAM-SHA-512=[user=alice,iterations=8192,salt="N3E=",saltedpassword="YCE="]
43 |      * 
44 | * 45 | * @return list of scram credentials 46 | */ 47 | Optional scramCredentials(); 48 | 49 | default List scramCredentialsList() { 50 | return scramCredentials().map(s -> Arrays.stream(s.split(";")).toList()) 51 | .orElse(Collections.emptyList()); 52 | } 53 | 54 | /** Metadata version used for the Kafka storage. */ 55 | Optional storageMetadataVersion(); 56 | } 57 | -------------------------------------------------------------------------------- /kafka-server/src/main/java/com/ozangunalp/kafka/server/Startup.java: -------------------------------------------------------------------------------- 1 | package com.ozangunalp.kafka.server; 2 | 3 | import jakarta.enterprise.context.ApplicationScoped; 4 | import jakarta.enterprise.event.Observes; 5 | import jakarta.inject.Inject; 6 | 7 | import org.apache.kafka.clients.CommonClientConfigs; 8 | import org.apache.kafka.common.utils.Utils; 9 | 10 | import com.ozangunalp.kafka.server.metrics.Reporter; 11 | import io.quarkus.runtime.ShutdownEvent; 12 | import io.quarkus.runtime.StartupEvent; 13 | import io.smallrye.mutiny.unchecked.Unchecked; 14 | 15 | @ApplicationScoped 16 | public class Startup { 17 | 18 | @Inject 19 | ServerConfig config; 20 | 21 | private EmbeddedKafkaBroker broker; 22 | 23 | void startup(@Observes StartupEvent event) { 24 | broker = new EmbeddedKafkaBroker() 25 | .withDeleteLogDirsOnClose(config.deleteDirsOnClose()) 26 | .withKafkaPort(config.kafkaPort()) 27 | .withControllerPort(config.controllerPort()) 28 | .withInternalPort(config.internalPort()) 29 | .withKafkaHost(config.host().orElse("")) 30 | .withAutoConfigure(config.autoConfigure()) 31 | .withScramCredentials(config.scramCredentialsList()) 32 | .withConfig(properties -> { 33 | properties.put(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, Reporter.class.getName()); 34 | config.propertiesFile().ifPresent(Unchecked.consumer(file -> 35 | properties.putAll(Utils.loadProps(file.toFile().getAbsolutePath())))); 36 | }); 37 | config.clusterId().ifPresent(id -> broker.withClusterId(id)); 38 | config.storageMetadataVersion().ifPresent(storageMetadataVersion -> broker.withStorageMetadataVersion(storageMetadataVersion)); 39 | broker.start(); 40 | } 41 | 42 | void shutdown(@Observes ShutdownEvent event) { 43 | broker.close(); 44 | } 45 | } -------------------------------------------------------------------------------- /kafka-server/src/main/java/com/ozangunalp/kafka/server/Storage.java: -------------------------------------------------------------------------------- 1 | package com.ozangunalp.kafka.server; 2 | 3 | import java.io.ByteArrayOutputStream; 4 | import java.io.IOException; 5 | import java.io.PrintStream; 6 | import java.io.UncheckedIOException; 7 | import java.nio.file.Files; 8 | import java.nio.file.Paths; 9 | import java.util.List; 10 | import java.util.Properties; 11 | import java.util.UUID; 12 | 13 | import org.apache.kafka.metadata.storage.Formatter; 14 | import org.apache.kafka.server.common.MetadataVersion; 15 | import org.jboss.logging.Logger; 16 | 17 | import kafka.server.KafkaConfig; 18 | import kafka.tools.StorageTool; 19 | import scala.jdk.javaapi.CollectionConverters; 20 | 21 | public final class Storage { 22 | 23 | static final Logger LOGGER = Logger.getLogger(Storage.class.getName()); 24 | public static final String LOG_DIR = "log.dir"; 25 | 26 | private Storage() { 27 | } 28 | 29 | public static void ensureLogDirExists(Properties properties) { 30 | String logDir = properties.getProperty(LOG_DIR); 31 | if (logDir != null) { 32 | try { 33 | Files.createDirectories(Paths.get(logDir)); 34 | } catch (Throwable throwable) { 35 | LOGGER.warnf(throwable, "Error using %s as `log.dir`, setting up a temporary directory.", logDir); 36 | Storage.createAndSetLogDir(properties); 37 | } 38 | } else { 39 | Storage.createAndSetLogDir(properties); 40 | } 41 | } 42 | 43 | public static void createAndSetLogDir(Properties properties) { 44 | try { 45 | properties.put(LOG_DIR, 46 | Files.createTempDirectory(EmbeddedKafkaBroker.KAFKA_PREFIX + UUID.randomUUID()).toString()); 47 | } catch (IOException e) { 48 | throw new UncheckedIOException(e); 49 | } 50 | } 51 | 52 | public static void formatStorageFromConfig(KafkaConfig config, String clusterId, boolean ignoreFormatted, MetadataVersion metadataVersion, List scramCredentials) { 53 | if (!scramCredentials.isEmpty() && !metadataVersion.isScramSupported()) { 54 | throw new IllegalArgumentException("SCRAM is only supported in metadataVersion IBP_3_5_IV2 or later."); 55 | } 56 | var controllerListenerName = CollectionConverters.asJava(config.controllerListenerNames()).stream().findFirst().orElseThrow(); 57 | var logDirs = CollectionConverters.asJava(StorageTool.configToLogDirectories(config)); 58 | var storageFormatter = new Formatter() 59 | .setClusterId(clusterId) 60 | .setNodeId(config.nodeId()) 61 | .setControllerListenerName(controllerListenerName) 62 | .setMetadataLogDirectory(config.metadataLogDir()) 63 | .setDirectories(logDirs) 64 | .setScramArguments(scramCredentials) 65 | .setIgnoreFormatted(ignoreFormatted) 66 | .setPrintStream(LoggingOutputStream.loggerPrintStream(LOGGER)) 67 | .setReleaseVersion(metadataVersion); 68 | 69 | try { 70 | storageFormatter.run(); 71 | } catch (Exception e) { 72 | throw new RuntimeException("Failed to format storage", e); 73 | } 74 | } 75 | 76 | public static class LoggingOutputStream extends java.io.OutputStream { 77 | 78 | public static PrintStream loggerPrintStream(Logger logger) { 79 | return new PrintStream(new LoggingOutputStream(logger)); 80 | } 81 | 82 | private final ByteArrayOutputStream os = new ByteArrayOutputStream(1000); 83 | private final Logger logger; 84 | 85 | LoggingOutputStream(Logger logger) { 86 | this.logger = logger; 87 | } 88 | 89 | @Override 90 | public void write(int b) throws IOException { 91 | if (b == '\n' || b == '\r') { 92 | os.flush(); 93 | String log = os.toString(); 94 | logger.info(log); 95 | } else { 96 | os.write(b); 97 | } 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /kafka-server/src/main/java/com/ozangunalp/kafka/server/metrics/Reporter.java: -------------------------------------------------------------------------------- 1 | package com.ozangunalp.kafka.server.metrics; 2 | 3 | import java.util.List; 4 | import java.util.Map; 5 | 6 | import org.apache.kafka.common.metrics.KafkaMetric; 7 | import org.apache.kafka.common.metrics.MetricsReporter; 8 | 9 | import io.quarkus.runtime.annotations.RegisterForReflection; 10 | 11 | @RegisterForReflection 12 | public class Reporter implements MetricsReporter { 13 | 14 | @Override 15 | public void init(List metrics) { 16 | 17 | } 18 | 19 | @Override 20 | public void metricChange(KafkaMetric metric) { 21 | 22 | } 23 | 24 | @Override 25 | public void metricRemoval(KafkaMetric metric) { 26 | 27 | } 28 | 29 | @Override 30 | public void close() { 31 | 32 | } 33 | 34 | @Override 35 | public void configure(Map configs) { 36 | 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /kafka-server/src/main/resources/application.properties: -------------------------------------------------------------------------------- 1 | kafka.log.dir=./target/log-dir 2 | quarkus.docker.dockerfile-native-path=src/main/docker/Dockerfile.native-micro 3 | quarkus.container-image.registry=quay.io 4 | quarkus.container-image.group=ogunalp 5 | quarkus.application.name=kafka-native 6 | quarkus.container-image.name=${quarkus.application.name} 7 | quarkus.native.auto-service-loader-registration=true 8 | -------------------------------------------------------------------------------- /kafka-server/src/test/java/com/ozangunalp/kafka/server/BrokerConfigTest.java: -------------------------------------------------------------------------------- 1 | package com.ozangunalp.kafka.server; 2 | 3 | 4 | import static org.apache.kafka.common.security.auth.SecurityProtocol.PLAINTEXT; 5 | import static org.assertj.core.api.Assertions.assertThat; 6 | 7 | import java.util.Properties; 8 | 9 | import org.junit.jupiter.api.Test; 10 | 11 | 12 | class BrokerConfigTest { 13 | 14 | @Test 15 | void testEmptyOverride() { 16 | Properties properties = BrokerConfig.defaultCoreConfig(new Properties(), "", 9092, 9093, 9094, PLAINTEXT); 17 | assertThat(properties).containsEntry("broker.id", "1"); 18 | assertThat(properties).containsEntry("controller.quorum.voters", "1@:9094"); 19 | assertThat(properties).containsEntry("listeners", "BROKER://:9093,PLAINTEXT://:9092,CONTROLLER://:9094"); 20 | assertThat(properties).containsEntry("process.roles", "broker,controller"); 21 | assertThat(properties).containsEntry("controller.listener.names", "CONTROLLER"); 22 | assertThat(properties).containsEntry("inter.broker.listener.name", "BROKER"); 23 | assertThat(properties).containsEntry("advertised.listeners", "PLAINTEXT://:9092,BROKER://:9093"); 24 | assertThat(properties).containsEntry("early.start.listeners", "BROKER,CONTROLLER"); 25 | assertThat(properties).containsEntry("listener.security.protocol.map", "BROKER:PLAINTEXT,CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT"); 26 | } 27 | 28 | @Test 29 | void testOverrideAdvertisedListeners() { 30 | Properties props = new Properties(); 31 | props.put("advertised.listeners", "PLAINTEXT://:9092"); 32 | Properties properties = BrokerConfig.defaultCoreConfig(props, "", 9092, 9093, 9094, PLAINTEXT); 33 | assertThat(properties).containsEntry("broker.id", "1"); 34 | assertThat(properties).containsEntry("controller.quorum.voters", "1@:9094"); 35 | assertThat(properties).containsEntry("listeners", "BROKER://:9093,PLAINTEXT://:9092,CONTROLLER://:9094"); 36 | assertThat(properties).containsEntry("process.roles", "broker,controller"); 37 | assertThat(properties).containsEntry("controller.listener.names", "CONTROLLER"); 38 | assertThat(properties).containsEntry("inter.broker.listener.name", "BROKER"); 39 | assertThat(properties).containsEntry("advertised.listeners", "PLAINTEXT://:9092,BROKER://:9093"); 40 | assertThat(properties).containsEntry("early.start.listeners", "BROKER,CONTROLLER"); 41 | assertThat(properties).containsEntry("listener.security.protocol.map", "BROKER:PLAINTEXT,CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT"); 42 | } 43 | 44 | @Test 45 | void testOverrideProcessRoles() { 46 | Properties props = new Properties(); 47 | props.put("advertised.listeners", "PLAINTEXT://:9092"); 48 | props.put("process.roles", "broker"); 49 | props.put("listeners", "BROKER://:9093,PLAINTEXT://:9092"); 50 | props.put("listener.security.protocol.map", "BROKER:PLAINTEXT"); 51 | props.put("controller.quorum.voters", "1@:9094"); 52 | Properties properties = BrokerConfig.defaultCoreConfig(props, "", 9092, 9093, 9094, PLAINTEXT); 53 | assertThat(properties).doesNotContainKey("controller.listener.names"); 54 | assertThat(properties).doesNotContainKey("inter.broker.listener.name"); 55 | assertThat(properties).doesNotContainKey("early.start.listeners"); 56 | assertThat(properties).containsEntry("broker.id", "1"); 57 | assertThat(properties).containsEntry("controller.quorum.voters", "1@:9094"); 58 | assertThat(properties).containsEntry("listeners", "BROKER://:9093,PLAINTEXT://:9092"); 59 | assertThat(properties).containsEntry("process.roles", "broker"); 60 | assertThat(properties).containsEntry("advertised.listeners", "PLAINTEXT://:9092"); 61 | assertThat(properties).containsEntry("listener.security.protocol.map", "BROKER:PLAINTEXT"); 62 | } 63 | 64 | @Test 65 | void testOverrideProcessRolesWithNoQuorumVotersOverride() { 66 | Properties props = new Properties(); 67 | props.put("advertised.listeners", "PLAINTEXT://:9092"); 68 | props.put("process.roles", "broker"); 69 | props.put("listeners", "BROKER://:9093,PLAINTEXT://:9092"); 70 | props.put("listener.security.protocol.map", "BROKER:PLAINTEXT"); 71 | Properties properties = BrokerConfig.defaultCoreConfig(props, "", 9092, 9093, 9094, PLAINTEXT); 72 | assertThat(properties).doesNotContainKey("controller.listener.names"); 73 | assertThat(properties).doesNotContainKey("inter.broker.listener.name"); 74 | assertThat(properties).doesNotContainKey("early.start.listeners"); 75 | assertThat(properties).doesNotContainKey("controller.quorum.voters"); 76 | assertThat(properties).containsEntry("broker.id", "1"); 77 | assertThat(properties).containsEntry("listeners", "BROKER://:9093,PLAINTEXT://:9092"); 78 | assertThat(properties).containsEntry("process.roles", "broker"); 79 | assertThat(properties).containsEntry("advertised.listeners", "PLAINTEXT://:9092"); 80 | assertThat(properties).containsEntry("listener.security.protocol.map", "BROKER:PLAINTEXT"); 81 | } 82 | 83 | @Test 84 | void testOverrideListeners() { 85 | Properties props = new Properties(); 86 | props.put("advertised.listeners", "SSL://:9092"); 87 | props.put("listeners", "SSL://:9092,CONTROLLER://9093"); 88 | props.put("controller.listener.names", "CONTROLLER"); 89 | props.put("inter.broker.listener.name", "SSL"); 90 | props.put("listener.security.protocol.map", "SSL:SSL,CONTROLLER:PLAINTEXT"); 91 | Properties properties = BrokerConfig.defaultCoreConfig(props, "", 9092, 9093, 9094, PLAINTEXT); 92 | assertThat(properties).containsEntry("broker.id", "1"); 93 | assertThat(properties).containsEntry("controller.quorum.voters", "1@:9094"); 94 | assertThat(properties).containsEntry("listeners", "SSL://:9092,CONTROLLER://9093"); 95 | assertThat(properties).containsEntry("process.roles", "broker,controller"); 96 | assertThat(properties).containsEntry("controller.listener.names", "CONTROLLER"); 97 | assertThat(properties).containsEntry("inter.broker.listener.name", "SSL"); 98 | assertThat(properties).containsEntry("advertised.listeners", "SSL://:9092"); 99 | assertThat(properties).containsEntry("listener.security.protocol.map", "SSL:SSL,CONTROLLER:PLAINTEXT"); 100 | } 101 | 102 | @Test 103 | void testKraftBrokerRoleOnly() { 104 | Properties props = new Properties(); 105 | props.put("process.roles", "broker"); 106 | props.put("broker.id", "2"); 107 | props.put("controller.quorum.voters", "1@:9094"); 108 | 109 | Properties properties = BrokerConfig.defaultCoreConfig(props, "", 9092, 9093, 9094, PLAINTEXT); 110 | 111 | assertThat(properties).containsEntry("broker.id", "2"); 112 | assertThat(properties).containsEntry("controller.quorum.voters", "1@:9094"); 113 | assertThat(properties).containsEntry("listeners", "BROKER://:9093,PLAINTEXT://:9092"); 114 | assertThat(properties).containsEntry("process.roles", "broker"); 115 | assertThat(properties).containsEntry("controller.listener.names", "CONTROLLER"); 116 | assertThat(properties).containsEntry("inter.broker.listener.name", "BROKER"); 117 | assertThat(properties).containsEntry("advertised.listeners", "PLAINTEXT://:9092,BROKER://:9093"); 118 | assertThat(properties).containsEntry("early.start.listeners", "BROKER"); 119 | assertThat(properties).containsEntry("listener.security.protocol.map", "BROKER:PLAINTEXT,CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT"); 120 | } 121 | 122 | @Test 123 | void testMergedSecurityProtocolMap() { 124 | Properties props = new Properties(); 125 | props.put("advertised.listeners", "JWT://:9092"); 126 | props.put("listener.security.protocol.map", "JWT:SSL"); 127 | Properties properties = BrokerConfig.defaultCoreConfig(props, "", 9092, 9093, 9094, PLAINTEXT); 128 | assertThat(properties).containsEntry("broker.id", "1"); 129 | assertThat(properties).containsEntry("controller.quorum.voters", "1@:9094"); 130 | assertThat(properties).containsEntry("listeners", "BROKER://:9093,CONTROLLER://:9094,JWT://:9092"); 131 | assertThat(properties).containsEntry("process.roles", "broker,controller"); 132 | assertThat(properties).containsEntry("controller.listener.names", "CONTROLLER"); 133 | assertThat(properties).containsEntry("inter.broker.listener.name", "BROKER"); 134 | assertThat(properties).containsEntry("advertised.listeners", "JWT://:9092,BROKER://:9093"); 135 | assertThat(properties).containsEntry("early.start.listeners", "BROKER,CONTROLLER"); 136 | assertThat(properties).containsEntry("listener.security.protocol.map", "JWT:SSL,BROKER:PLAINTEXT,CONTROLLER:PLAINTEXT"); 137 | } 138 | 139 | 140 | } -------------------------------------------------------------------------------- /kafka-server/src/test/java/com/ozangunalp/kafka/server/ScramUtilsTest.java: -------------------------------------------------------------------------------- 1 | package com.ozangunalp.kafka.server; 2 | 3 | import org.apache.kafka.clients.admin.ScramMechanism; 4 | import org.apache.kafka.common.metadata.UserScramCredentialRecord; 5 | import org.apache.kafka.common.security.scram.ScramCredential; 6 | import org.junit.jupiter.api.Test; 7 | 8 | import org.junit.jupiter.params.ParameterizedTest; 9 | import org.junit.jupiter.params.provider.EnumSource; 10 | 11 | import java.nio.charset.StandardCharsets; 12 | 13 | import static org.assertj.core.api.Assertions.assertThat; 14 | 15 | class ScramUtilsTest { 16 | @Test 17 | void asScramCredential() { 18 | int iterations = 4096; 19 | byte[] salt = "salt".getBytes(StandardCharsets.UTF_8); 20 | byte[] server = "key".getBytes(StandardCharsets.UTF_8); 21 | var uscr = new UserScramCredentialRecord() 22 | .setIterations(iterations) 23 | .setSalt(salt) 24 | .setServerKey(server); 25 | 26 | var sc = ScramUtils.asScramCredential(uscr); 27 | assertThat(sc).extracting(ScramCredential::iterations).isEqualTo(iterations); 28 | assertThat(sc).extracting(ScramCredential::salt).isEqualTo(salt); 29 | assertThat(sc).extracting(ScramCredential::serverKey).isEqualTo(server); 30 | 31 | 32 | } 33 | } -------------------------------------------------------------------------------- /kafka-server/src/test/java/com/ozangunalp/kafka/server/SmokeTest.java: -------------------------------------------------------------------------------- 1 | package com.ozangunalp.kafka.server; 2 | 3 | import static org.assertj.core.api.Assertions.assertThat; 4 | 5 | import static org.awaitility.Awaitility.await; 6 | 7 | import org.junit.jupiter.api.Test; 8 | 9 | import io.quarkus.test.junit.QuarkusTest; 10 | import io.smallrye.reactive.messaging.kafka.companion.KafkaCompanion; 11 | 12 | @QuarkusTest 13 | public class SmokeTest { 14 | 15 | @Test 16 | void test() { 17 | try (KafkaCompanion companion = new KafkaCompanion("localhost:9092")) { 18 | await().untilAsserted(() -> assertThat(companion.cluster().nodes().size()).isGreaterThan(0)); 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /mvnw: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # ---------------------------------------------------------------------------- 3 | # Licensed to the Apache Software Foundation (ASF) under one 4 | # or more contributor license agreements. See the NOTICE file 5 | # distributed with this work for additional information 6 | # regarding copyright ownership. The ASF licenses this file 7 | # to you under the Apache License, Version 2.0 (the 8 | # "License"); you may not use this file except in compliance 9 | # with the License. You may obtain a copy of the License at 10 | # 11 | # https://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, 14 | # software distributed under the License is distributed on an 15 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 16 | # KIND, either express or implied. See the License for the 17 | # specific language governing permissions and limitations 18 | # under the License. 19 | # ---------------------------------------------------------------------------- 20 | 21 | # ---------------------------------------------------------------------------- 22 | # Maven Start Up Batch script 23 | # 24 | # Required ENV vars: 25 | # ------------------ 26 | # JAVA_HOME - location of a JDK home dir 27 | # 28 | # Optional ENV vars 29 | # ----------------- 30 | # M2_HOME - location of maven2's installed home dir 31 | # MAVEN_OPTS - parameters passed to the Java VM when running Maven 32 | # e.g. to debug Maven itself, use 33 | # set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 34 | # MAVEN_SKIP_RC - flag to disable loading of mavenrc files 35 | # ---------------------------------------------------------------------------- 36 | 37 | if [ -z "$MAVEN_SKIP_RC" ] ; then 38 | 39 | if [ -f /etc/mavenrc ] ; then 40 | . /etc/mavenrc 41 | fi 42 | 43 | if [ -f "$HOME/.mavenrc" ] ; then 44 | . "$HOME/.mavenrc" 45 | fi 46 | 47 | fi 48 | 49 | # OS specific support. $var _must_ be set to either true or false. 50 | cygwin=false; 51 | darwin=false; 52 | mingw=false 53 | case "`uname`" in 54 | CYGWIN*) cygwin=true ;; 55 | MINGW*) mingw=true;; 56 | Darwin*) darwin=true 57 | # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home 58 | # See https://developer.apple.com/library/mac/qa/qa1170/_index.html 59 | if [ -z "$JAVA_HOME" ]; then 60 | if [ -x "/usr/libexec/java_home" ]; then 61 | export JAVA_HOME="`/usr/libexec/java_home`" 62 | else 63 | export JAVA_HOME="/Library/Java/Home" 64 | fi 65 | fi 66 | ;; 67 | esac 68 | 69 | if [ -z "$JAVA_HOME" ] ; then 70 | if [ -r /etc/gentoo-release ] ; then 71 | JAVA_HOME=`java-config --jre-home` 72 | fi 73 | fi 74 | 75 | if [ -z "$M2_HOME" ] ; then 76 | ## resolve links - $0 may be a link to maven's home 77 | PRG="$0" 78 | 79 | # need this for relative symlinks 80 | while [ -h "$PRG" ] ; do 81 | ls=`ls -ld "$PRG"` 82 | link=`expr "$ls" : '.*-> \(.*\)$'` 83 | if expr "$link" : '/.*' > /dev/null; then 84 | PRG="$link" 85 | else 86 | PRG="`dirname "$PRG"`/$link" 87 | fi 88 | done 89 | 90 | saveddir=`pwd` 91 | 92 | M2_HOME=`dirname "$PRG"`/.. 93 | 94 | # make it fully qualified 95 | M2_HOME=`cd "$M2_HOME" && pwd` 96 | 97 | cd "$saveddir" 98 | # echo Using m2 at $M2_HOME 99 | fi 100 | 101 | # For Cygwin, ensure paths are in UNIX format before anything is touched 102 | if $cygwin ; then 103 | [ -n "$M2_HOME" ] && 104 | M2_HOME=`cygpath --unix "$M2_HOME"` 105 | [ -n "$JAVA_HOME" ] && 106 | JAVA_HOME=`cygpath --unix "$JAVA_HOME"` 107 | [ -n "$CLASSPATH" ] && 108 | CLASSPATH=`cygpath --path --unix "$CLASSPATH"` 109 | fi 110 | 111 | # For Mingw, ensure paths are in UNIX format before anything is touched 112 | if $mingw ; then 113 | [ -n "$M2_HOME" ] && 114 | M2_HOME="`(cd "$M2_HOME"; pwd)`" 115 | [ -n "$JAVA_HOME" ] && 116 | JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" 117 | fi 118 | 119 | if [ -z "$JAVA_HOME" ]; then 120 | javaExecutable="`which javac`" 121 | if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then 122 | # readlink(1) is not available as standard on Solaris 10. 123 | readLink=`which readlink` 124 | if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then 125 | if $darwin ; then 126 | javaHome="`dirname \"$javaExecutable\"`" 127 | javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" 128 | else 129 | javaExecutable="`readlink -f \"$javaExecutable\"`" 130 | fi 131 | javaHome="`dirname \"$javaExecutable\"`" 132 | javaHome=`expr "$javaHome" : '\(.*\)/bin'` 133 | JAVA_HOME="$javaHome" 134 | export JAVA_HOME 135 | fi 136 | fi 137 | fi 138 | 139 | if [ -z "$JAVACMD" ] ; then 140 | if [ -n "$JAVA_HOME" ] ; then 141 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 142 | # IBM's JDK on AIX uses strange locations for the executables 143 | JAVACMD="$JAVA_HOME/jre/sh/java" 144 | else 145 | JAVACMD="$JAVA_HOME/bin/java" 146 | fi 147 | else 148 | JAVACMD="`which java`" 149 | fi 150 | fi 151 | 152 | if [ ! -x "$JAVACMD" ] ; then 153 | echo "Error: JAVA_HOME is not defined correctly." >&2 154 | echo " We cannot execute $JAVACMD" >&2 155 | exit 1 156 | fi 157 | 158 | if [ -z "$JAVA_HOME" ] ; then 159 | echo "Warning: JAVA_HOME environment variable is not set." 160 | fi 161 | 162 | CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher 163 | 164 | # traverses directory structure from process work directory to filesystem root 165 | # first directory with .mvn subdirectory is considered project base directory 166 | find_maven_basedir() { 167 | 168 | if [ -z "$1" ] 169 | then 170 | echo "Path not specified to find_maven_basedir" 171 | return 1 172 | fi 173 | 174 | basedir="$1" 175 | wdir="$1" 176 | while [ "$wdir" != '/' ] ; do 177 | if [ -d "$wdir"/.mvn ] ; then 178 | basedir=$wdir 179 | break 180 | fi 181 | # workaround for JBEAP-8937 (on Solaris 10/Sparc) 182 | if [ -d "${wdir}" ]; then 183 | wdir=`cd "$wdir/.."; pwd` 184 | fi 185 | # end of workaround 186 | done 187 | echo "${basedir}" 188 | } 189 | 190 | # concatenates all lines of a file 191 | concat_lines() { 192 | if [ -f "$1" ]; then 193 | echo "$(tr -s '\n' ' ' < "$1")" 194 | fi 195 | } 196 | 197 | BASE_DIR=`find_maven_basedir "$(pwd)"` 198 | if [ -z "$BASE_DIR" ]; then 199 | exit 1; 200 | fi 201 | 202 | ########################################################################################## 203 | # Extension to allow automatically downloading the maven-wrapper.jar from Maven-central 204 | # This allows using the maven wrapper in projects that prohibit checking in binary data. 205 | ########################################################################################## 206 | if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then 207 | if [ "$MVNW_VERBOSE" = true ]; then 208 | echo "Found .mvn/wrapper/maven-wrapper.jar" 209 | fi 210 | else 211 | if [ "$MVNW_VERBOSE" = true ]; then 212 | echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." 213 | fi 214 | if [ -n "$MVNW_REPOURL" ]; then 215 | jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" 216 | else 217 | jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" 218 | fi 219 | while IFS="=" read key value; do 220 | case "$key" in (wrapperUrl) jarUrl="$value"; break ;; 221 | esac 222 | done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" 223 | if [ "$MVNW_VERBOSE" = true ]; then 224 | echo "Downloading from: $jarUrl" 225 | fi 226 | wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" 227 | if $cygwin; then 228 | wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"` 229 | fi 230 | 231 | if command -v wget > /dev/null; then 232 | if [ "$MVNW_VERBOSE" = true ]; then 233 | echo "Found wget ... using wget" 234 | fi 235 | if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then 236 | wget "$jarUrl" -O "$wrapperJarPath" 237 | else 238 | wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" 239 | fi 240 | elif command -v curl > /dev/null; then 241 | if [ "$MVNW_VERBOSE" = true ]; then 242 | echo "Found curl ... using curl" 243 | fi 244 | if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then 245 | curl -o "$wrapperJarPath" "$jarUrl" -f 246 | else 247 | curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f 248 | fi 249 | 250 | else 251 | if [ "$MVNW_VERBOSE" = true ]; then 252 | echo "Falling back to using Java to download" 253 | fi 254 | javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" 255 | # For Cygwin, switch paths to Windows format before running javac 256 | if $cygwin; then 257 | javaClass=`cygpath --path --windows "$javaClass"` 258 | fi 259 | if [ -e "$javaClass" ]; then 260 | if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then 261 | if [ "$MVNW_VERBOSE" = true ]; then 262 | echo " - Compiling MavenWrapperDownloader.java ..." 263 | fi 264 | # Compiling the Java class 265 | ("$JAVA_HOME/bin/javac" "$javaClass") 266 | fi 267 | if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then 268 | # Running the downloader 269 | if [ "$MVNW_VERBOSE" = true ]; then 270 | echo " - Running MavenWrapperDownloader.java ..." 271 | fi 272 | ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") 273 | fi 274 | fi 275 | fi 276 | fi 277 | ########################################################################################## 278 | # End of extension 279 | ########################################################################################## 280 | 281 | export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} 282 | if [ "$MVNW_VERBOSE" = true ]; then 283 | echo $MAVEN_PROJECTBASEDIR 284 | fi 285 | MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" 286 | 287 | # For Cygwin, switch paths to Windows format before running java 288 | if $cygwin; then 289 | [ -n "$M2_HOME" ] && 290 | M2_HOME=`cygpath --path --windows "$M2_HOME"` 291 | [ -n "$JAVA_HOME" ] && 292 | JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` 293 | [ -n "$CLASSPATH" ] && 294 | CLASSPATH=`cygpath --path --windows "$CLASSPATH"` 295 | [ -n "$MAVEN_PROJECTBASEDIR" ] && 296 | MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` 297 | fi 298 | 299 | # Provide a "standardized" way to retrieve the CLI args that will 300 | # work with both Windows and non-Windows executions. 301 | MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" 302 | export MAVEN_CMD_LINE_ARGS 303 | 304 | WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain 305 | 306 | exec "$JAVACMD" \ 307 | $MAVEN_OPTS \ 308 | -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ 309 | "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ 310 | ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" 311 | -------------------------------------------------------------------------------- /mvnw.cmd: -------------------------------------------------------------------------------- 1 | @REM ---------------------------------------------------------------------------- 2 | @REM Licensed to the Apache Software Foundation (ASF) under one 3 | @REM or more contributor license agreements. See the NOTICE file 4 | @REM distributed with this work for additional information 5 | @REM regarding copyright ownership. The ASF licenses this file 6 | @REM to you under the Apache License, Version 2.0 (the 7 | @REM "License"); you may not use this file except in compliance 8 | @REM with the License. You may obtain a copy of the License at 9 | @REM 10 | @REM https://www.apache.org/licenses/LICENSE-2.0 11 | @REM 12 | @REM Unless required by applicable law or agreed to in writing, 13 | @REM software distributed under the License is distributed on an 14 | @REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | @REM KIND, either express or implied. See the License for the 16 | @REM specific language governing permissions and limitations 17 | @REM under the License. 18 | @REM ---------------------------------------------------------------------------- 19 | 20 | @REM ---------------------------------------------------------------------------- 21 | @REM Maven Start Up Batch script 22 | @REM 23 | @REM Required ENV vars: 24 | @REM JAVA_HOME - location of a JDK home dir 25 | @REM 26 | @REM Optional ENV vars 27 | @REM M2_HOME - location of maven2's installed home dir 28 | @REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands 29 | @REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending 30 | @REM MAVEN_OPTS - parameters passed to the Java VM when running Maven 31 | @REM e.g. to debug Maven itself, use 32 | @REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 33 | @REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files 34 | @REM ---------------------------------------------------------------------------- 35 | 36 | @REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' 37 | @echo off 38 | @REM set title of command window 39 | title %0 40 | @REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' 41 | @if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% 42 | 43 | @REM set %HOME% to equivalent of $HOME 44 | if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") 45 | 46 | @REM Execute a user defined script before this one 47 | if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre 48 | @REM check for pre script, once with legacy .bat ending and once with .cmd ending 49 | if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" 50 | if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" 51 | :skipRcPre 52 | 53 | @setlocal 54 | 55 | set ERROR_CODE=0 56 | 57 | @REM To isolate internal variables from possible post scripts, we use another setlocal 58 | @setlocal 59 | 60 | @REM ==== START VALIDATION ==== 61 | if not "%JAVA_HOME%" == "" goto OkJHome 62 | 63 | echo. 64 | echo Error: JAVA_HOME not found in your environment. >&2 65 | echo Please set the JAVA_HOME variable in your environment to match the >&2 66 | echo location of your Java installation. >&2 67 | echo. 68 | goto error 69 | 70 | :OkJHome 71 | if exist "%JAVA_HOME%\bin\java.exe" goto init 72 | 73 | echo. 74 | echo Error: JAVA_HOME is set to an invalid directory. >&2 75 | echo JAVA_HOME = "%JAVA_HOME%" >&2 76 | echo Please set the JAVA_HOME variable in your environment to match the >&2 77 | echo location of your Java installation. >&2 78 | echo. 79 | goto error 80 | 81 | @REM ==== END VALIDATION ==== 82 | 83 | :init 84 | 85 | @REM Find the project base dir, i.e. the directory that contains the folder ".mvn". 86 | @REM Fallback to current working directory if not found. 87 | 88 | set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% 89 | IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir 90 | 91 | set EXEC_DIR=%CD% 92 | set WDIR=%EXEC_DIR% 93 | :findBaseDir 94 | IF EXIST "%WDIR%"\.mvn goto baseDirFound 95 | cd .. 96 | IF "%WDIR%"=="%CD%" goto baseDirNotFound 97 | set WDIR=%CD% 98 | goto findBaseDir 99 | 100 | :baseDirFound 101 | set MAVEN_PROJECTBASEDIR=%WDIR% 102 | cd "%EXEC_DIR%" 103 | goto endDetectBaseDir 104 | 105 | :baseDirNotFound 106 | set MAVEN_PROJECTBASEDIR=%EXEC_DIR% 107 | cd "%EXEC_DIR%" 108 | 109 | :endDetectBaseDir 110 | 111 | IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig 112 | 113 | @setlocal EnableExtensions EnableDelayedExpansion 114 | for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a 115 | @endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% 116 | 117 | :endReadAdditionalConfig 118 | 119 | SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" 120 | set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" 121 | set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain 122 | 123 | set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" 124 | 125 | FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( 126 | IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B 127 | ) 128 | 129 | @REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central 130 | @REM This allows using the maven wrapper in projects that prohibit checking in binary data. 131 | if exist %WRAPPER_JAR% ( 132 | if "%MVNW_VERBOSE%" == "true" ( 133 | echo Found %WRAPPER_JAR% 134 | ) 135 | ) else ( 136 | if not "%MVNW_REPOURL%" == "" ( 137 | SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" 138 | ) 139 | if "%MVNW_VERBOSE%" == "true" ( 140 | echo Couldn't find %WRAPPER_JAR%, downloading it ... 141 | echo Downloading from: %DOWNLOAD_URL% 142 | ) 143 | 144 | powershell -Command "&{"^ 145 | "$webclient = new-object System.Net.WebClient;"^ 146 | "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ 147 | "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ 148 | "}"^ 149 | "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ 150 | "}" 151 | if "%MVNW_VERBOSE%" == "true" ( 152 | echo Finished downloading %WRAPPER_JAR% 153 | ) 154 | ) 155 | @REM End of extension 156 | 157 | @REM Provide a "standardized" way to retrieve the CLI args that will 158 | @REM work with both Windows and non-Windows executions. 159 | set MAVEN_CMD_LINE_ARGS=%* 160 | 161 | %MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* 162 | if ERRORLEVEL 1 goto error 163 | goto end 164 | 165 | :error 166 | set ERROR_CODE=1 167 | 168 | :end 169 | @endlocal & set ERROR_CODE=%ERROR_CODE% 170 | 171 | if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost 172 | @REM check for post script, once with legacy .bat ending and once with .cmd ending 173 | if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" 174 | if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" 175 | :skipRcPost 176 | 177 | @REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' 178 | if "%MAVEN_BATCH_PAUSE%" == "on" pause 179 | 180 | if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% 181 | 182 | exit /B %ERROR_CODE% 183 | -------------------------------------------------------------------------------- /quarkus-kafka-server-extension/deployment/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4 | 4.0.0 5 | 6 | com.ozangunalp 7 | quarkus-kafka-server-extension 8 | 999-SNAPSHOT 9 | 10 | quarkus-kafka-server-deployment 11 | Kafka Server Extension - Deployment 12 | 13 | 14 | 15 | io.quarkus 16 | quarkus-arc-deployment 17 | 18 | 19 | com.ozangunalp 20 | quarkus-kafka-server 21 | 22 | 23 | io.quarkus 24 | quarkus-junit5-internal 25 | test 26 | 27 | 28 | 29 | 30 | 31 | maven-compiler-plugin 32 | 33 | 34 | 35 | io.quarkus 36 | quarkus-extension-processor 37 | ${quarkus.platform.version} 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /quarkus-kafka-server-extension/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | com.ozangunalp 8 | kafka-native-parent 9 | 999-SNAPSHOT 10 | 11 | quarkus-kafka-server-extension 12 | pom 13 | Kafka Server Extension - Parent 14 | 15 | 16 | deployment 17 | runtime 18 | 19 | 20 | 21 | 22 | 23 | 24 | io.quarkus 25 | quarkus-maven-plugin 26 | ${quarkus.platform.version} 27 | 28 | 29 | maven-surefire-plugin 30 | ${surefire-plugin.version} 31 | 32 | 33 | org.jboss.logmanager.LogManager 34 | ${maven.home} 35 | ${settings.localRepository} 36 | 37 | 38 | 39 | 40 | maven-failsafe-plugin 41 | ${failsafe-plugin.version} 42 | 43 | 44 | org.jboss.logmanager.LogManager 45 | ${maven.home} 46 | ${settings.localRepository} 47 | 48 | 49 | 50 | 51 | maven-compiler-plugin 52 | ${compiler-plugin.version} 53 | 54 | 55 | -parameters 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | -------------------------------------------------------------------------------- /quarkus-kafka-server-extension/runtime/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4 | 4.0.0 5 | 6 | com.ozangunalp 7 | quarkus-kafka-server-extension 8 | 999-SNAPSHOT 9 | 10 | quarkus-kafka-server 11 | Kafka Server Extension - Runtime 12 | 13 | 14 | 15 | io.quarkus 16 | quarkus-arc 17 | 18 | 19 | org.graalvm.sdk 20 | graal-sdk 21 | provided 22 | 23 | 24 | org.apache.kafka 25 | kafka-server-common 26 | 27 | 28 | org.apache.kafka 29 | kafka-storage 30 | 31 | 32 | org.apache.kafka 33 | kafka-group-coordinator 34 | 35 | 36 | org.apache.kafka 37 | kafka-group-coordinator-api 38 | 39 | 40 | org.apache.kafka 41 | kafka_2.13 42 | 43 | 44 | org.apache.kafka 45 | kafka-clients 46 | 47 | 48 | org.jboss.logmanager 49 | log4j2-jboss-logmanager 50 | 51 | 52 | io.strimzi 53 | kafka-oauth-server 54 | 55 | 56 | io.strimzi 57 | kafka-oauth-server-plain 58 | 59 | 60 | io.strimzi 61 | kafka-oauth-client 62 | 63 | 64 | 65 | 66 | 67 | 68 | io.quarkus 69 | quarkus-extension-maven-plugin 70 | ${quarkus.platform.version} 71 | 72 | 73 | compile 74 | 75 | extension-descriptor 76 | 77 | 78 | ${project.groupId}:${project.artifactId}-deployment:${project.version} 79 | 80 | 81 | 82 | 83 | 84 | maven-compiler-plugin 85 | 86 | 87 | 88 | io.quarkus 89 | quarkus-extension-processor 90 | ${quarkus.platform.version} 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | -------------------------------------------------------------------------------- /quarkus-kafka-server-extension/runtime/src/main/java/com/ozangunalp/kafka/server/extension/runtime/JsonPathConfigRecorder.java: -------------------------------------------------------------------------------- 1 | package com.ozangunalp.kafka.server.extension.runtime; 2 | 3 | import java.util.EnumSet; 4 | import java.util.Set; 5 | 6 | import com.jayway.jsonpath.Configuration; 7 | import com.jayway.jsonpath.Option; 8 | import com.jayway.jsonpath.spi.json.JacksonJsonProvider; 9 | import com.jayway.jsonpath.spi.json.JsonProvider; 10 | import com.jayway.jsonpath.spi.mapper.JacksonMappingProvider; 11 | import com.jayway.jsonpath.spi.mapper.MappingProvider; 12 | import io.quarkus.runtime.annotations.Recorder; 13 | 14 | @Recorder 15 | public class JsonPathConfigRecorder { 16 | 17 | public void setDefaults() { 18 | Configuration.setDefaults(new Configuration.Defaults() { 19 | 20 | private final JsonProvider jsonProvider = new JacksonJsonProvider(); 21 | private final MappingProvider mappingProvider = new JacksonMappingProvider(); 22 | 23 | 24 | @Override 25 | public JsonProvider jsonProvider() { 26 | return jsonProvider; 27 | } 28 | 29 | @Override 30 | public Set