├── .dockerignore ├── .editorconfig ├── .flake8 ├── .github ├── PULL_REQUEST_TEMPLATE.md ├── release.yml └── workflows │ ├── main.yml │ ├── pull_request.yml │ ├── pull_request_label.yml │ └── unit_tests.yml ├── .gitignore ├── .gitmodules ├── .licenseignore ├── .spi.yml ├── .swift-format ├── .unacceptablelanguageignore ├── .yamllint.yml ├── Benchmarks ├── Benchmarks │ ├── SwiftKafkaConsumerBenchmarks │ │ ├── KafkaConsumerBenchmark.swift │ │ └── Utilities.swift │ └── SwiftKafkaProducerBenchmarks │ │ └── KafkaProducerBenchmark.swift └── Package.swift ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── CONTRIBUTORS.txt ├── LICENSE.txt ├── NOTICE.txt ├── Package.swift ├── README.md ├── Sources ├── COpenSSL │ └── module.modulemap ├── Crdkafka │ ├── custom │ │ └── config │ │ │ ├── config.h │ │ │ └── dummy │ │ │ └── empty │ └── include │ │ └── rdkafka.h ├── Kafka │ ├── Configuration │ │ ├── KafkaConfiguration+Metrics.swift │ │ ├── KafkaConfiguration+Security.swift │ │ ├── KafkaConfiguration.swift │ │ ├── KafkaConsumerConfiguration.swift │ │ ├── KafkaProducerConfiguration.swift │ │ └── KafkaTopicConfiguration.swift │ ├── Data │ │ ├── Array+KafkaContiguousBytes.swift │ │ ├── ByteBuffer+KafkaContiguousBytes.swift │ │ ├── KafkaContiguousBytes.swift │ │ ├── Never+KafkaContiguousBytes.swift │ │ └── String+KafkaContiguousBytes.swift │ ├── ForTesting │ │ ├── RDKafkaClient+Topic.swift │ │ └── TestMessages.swift │ ├── KafkaAcknowledgedMessage.swift │ ├── KafkaConsumer.swift │ ├── KafkaConsumerEvent.swift │ ├── KafkaConsumerMessage.swift │ ├── KafkaDeliveryReport.swift │ ├── KafkaError.swift │ ├── KafkaHeader.swift │ ├── KafkaOffset.swift │ ├── KafkaPartition.swift │ ├── KafkaProducer.swift │ ├── KafkaProducerEvent.swift │ ├── KafkaProducerMessage.swift │ ├── KafkaProducerMessageID.swift │ ├── RDKafka │ │ ├── RDKafkaClient.swift │ │ ├── RDKafkaConfig.swift │ │ ├── RDKafkaEvent.swift │ │ ├── RDKafkaStatistics.swift │ │ ├── RDKafkaTopicConfig.swift │ │ ├── RDKafkaTopicHandles.swift │ │ └── RDKafkaTopicPartitionList.swift │ └── Utilities │ │ ├── DispatchQueueTaskExecutor.swift │ │ ├── Duration+Helpers.swift │ │ ├── NIOAsyncSequenceBackPressureStrategies+NoBackPressure.swift │ │ └── SendableOpaquePointer.swift └── KafkaFoundationCompat │ └── Data+KafkaContiguousBytes.swift ├── Tests ├── IntegrationTests │ ├── KafkaTests.swift │ └── Utilities.swift └── KafkaTests │ ├── KafkaConsumerTests.swift │ ├── KafkaProducerTests.swift │ └── Utilities.swift ├── dev ├── git.commit.template ├── test-benchmark-thresholds.sh └── update-benchmark-thresholds.sh └── docker ├── Dockerfile ├── docker-compose.2204.510.yaml ├── docker-compose.2204.57.yaml ├── docker-compose.2204.58.yaml ├── docker-compose.2204.59.yaml ├── docker-compose.2204.main.yaml └── docker-compose.yaml /.dockerignore: -------------------------------------------------------------------------------- 1 | .build 2 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | indent_style = space 5 | indent_size = 4 6 | end_of_line = lf 7 | insert_final_newline = true 8 | trim_trailing_whitespace = true -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | 3 | ignore = 4 | # These are needed to make our license headers pass the linting 5 | E265, 6 | E266, 7 | 8 | # 10% larger than the standard 80 character limit. Conforms to the black 9 | # standard and Bugbear's B950. 10 | max-line-length = 88 11 | 12 | # Custom rules: 13 | exclude = 14 | Sources/Crdkafka/ 15 | Sources/COpenSSL/ 16 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | _[One line description of your change]_ 2 | 3 | ### Motivation: 4 | 5 | _[Explain here the context, and why you're making that change. What is the problem you're trying to solve.]_ 6 | 7 | ### Modifications: 8 | 9 | _[Describe the modifications you've done.]_ 10 | 11 | ### Result: 12 | 13 | _[After your change, what will change.]_ 14 | -------------------------------------------------------------------------------- /.github/release.yml: -------------------------------------------------------------------------------- 1 | changelog: 2 | categories: 3 | - title: SemVer Major 4 | labels: 5 | - ⚠️ semver/major 6 | - title: SemVer Minor 7 | labels: 8 | - 🆕 semver/minor 9 | - title: SemVer Patch 10 | labels: 11 | - 🔨 semver/patch 12 | - title: Other Changes 13 | labels: 14 | - semver/none 15 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Main 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | schedule: 7 | - cron: "0 8,20 * * *" 8 | 9 | jobs: 10 | unit-tests: 11 | name: Unit tests 12 | uses: ./.github/workflows/unit_tests.yml 13 | with: 14 | linux_5_10_arguments_override: "--explicit-target-dependency-import-check error" 15 | linux_6_0_arguments_override: "--explicit-target-dependency-import-check error" 16 | linux_6_1_arguments_override: "--explicit-target-dependency-import-check error" 17 | linux_nightly_next_arguments_override: "--explicit-target-dependency-import-check error" 18 | linux_nightly_main_arguments_override: "--explicit-target-dependency-import-check error" 19 | -------------------------------------------------------------------------------- /.github/workflows/pull_request.yml: -------------------------------------------------------------------------------- 1 | name: PR 2 | 3 | on: 4 | pull_request: 5 | types: [opened, reopened, synchronize] 6 | 7 | jobs: 8 | soundness: 9 | name: Soundness 10 | uses: swiftlang/github-workflows/.github/workflows/soundness.yml@main 11 | with: 12 | linux_pre_build_command: "apt-get update -y -q && apt-get install -y -q libsasl2-dev libssl-dev" 13 | license_header_check_project_name: "swift-kafka-client" 14 | 15 | unit-tests: 16 | name: Unit tests 17 | uses: ./.github/workflows/unit_tests.yml 18 | with: 19 | linux_5_10_arguments_override: "--explicit-target-dependency-import-check error" 20 | linux_6_0_arguments_override: "--explicit-target-dependency-import-check error" 21 | linux_6_1_arguments_override: "--explicit-target-dependency-import-check error" 22 | linux_nightly_next_arguments_override: "--explicit-target-dependency-import-check error" 23 | linux_nightly_main_arguments_override: "--explicit-target-dependency-import-check error" 24 | 25 | construct-cxx-matrix: 26 | name: Construct Cxx interop matrix 27 | runs-on: ubuntu-latest 28 | outputs: 29 | cxx-interop-matrix: '${{ steps.generate-matrix.outputs.cxx-interop-matrix }}' 30 | steps: 31 | - name: Checkout repository 32 | uses: actions/checkout@v4 33 | with: 34 | persist-credentials: false 35 | - id: generate-matrix 36 | run: echo "cxx-interop-matrix=$(curl -s https://raw.githubusercontent.com/apple/swift-nio/main/scripts/generate_matrix.sh | bash)" >> "$GITHUB_OUTPUT" 37 | env: 38 | MATRIX_LINUX_COMMAND: "curl -s https://raw.githubusercontent.com/apple/swift-nio/main/scripts/check-cxx-interop-compatibility.sh | bash" 39 | MATRIX_LINUX_SETUP_COMMAND: "apt-get update -y -q && apt-get install -y -q curl jq libsasl2-dev libssl-dev" 40 | 41 | cxx-interop: 42 | name: Cxx interop 43 | needs: construct-cxx-matrix 44 | uses: apple/swift-nio/.github/workflows/swift_test_matrix.yml@main 45 | with: 46 | name: "Cxx interop" 47 | matrix_string: '${{ needs.construct-cxx-matrix.outputs.cxx-interop-matrix }}' 48 | -------------------------------------------------------------------------------- /.github/workflows/pull_request_label.yml: -------------------------------------------------------------------------------- 1 | name: PR label 2 | 3 | on: 4 | pull_request: 5 | types: [labeled, unlabeled, opened, reopened, synchronize] 6 | 7 | jobs: 8 | semver-label-check: 9 | name: Semantic version label check 10 | runs-on: ubuntu-latest 11 | timeout-minutes: 1 12 | steps: 13 | - name: Checkout repository 14 | uses: actions/checkout@v4 15 | with: 16 | persist-credentials: false 17 | - name: Check for Semantic Version label 18 | uses: apple/swift-nio/.github/actions/pull_request_semver_label_checker@main 19 | -------------------------------------------------------------------------------- /.github/workflows/unit_tests.yml: -------------------------------------------------------------------------------- 1 | name: Unit tests 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | linux_5_9_enabled: 7 | type: boolean 8 | description: "Boolean to enable the Linux 5.9 Swift version matrix job. Defaults to false." 9 | default: false 10 | linux_5_9_arguments_override: 11 | type: string 12 | description: "The arguments passed to swift test in the Linux 5.9 Swift version matrix job." 13 | default: "" 14 | linux_5_10_enabled: 15 | type: boolean 16 | description: "Boolean to enable the Linux 5.10 Swift version matrix job. Defaults to true." 17 | default: true 18 | linux_5_10_arguments_override: 19 | type: string 20 | description: "The arguments passed to swift test in the Linux 5.10 Swift version matrix job." 21 | default: "" 22 | linux_6_0_enabled: 23 | type: boolean 24 | description: "Boolean to enable the Linux 6.0 Swift version matrix job. Defaults to true." 25 | default: true 26 | linux_6_0_arguments_override: 27 | type: string 28 | description: "The arguments passed to swift test in the Linux 6.0 Swift version matrix job." 29 | default: "" 30 | linux_6_1_enabled: 31 | type: boolean 32 | description: "Boolean to enable the Linux 6.1 Swift version matrix job. Defaults to true." 33 | default: true 34 | linux_6_1_arguments_override: 35 | type: string 36 | description: "The arguments passed to swift test in the Linux 6.1 Swift version matrix job." 37 | default: "" 38 | linux_nightly_next_enabled: 39 | type: boolean 40 | description: "Boolean to enable the Linux nightly next Swift version matrix job. Defaults to true." 41 | default: true 42 | linux_nightly_next_arguments_override: 43 | type: string 44 | description: "The arguments passed to swift test in the Linux nightly next Swift version matrix job." 45 | default: "" 46 | linux_nightly_main_enabled: 47 | type: boolean 48 | description: "Boolean to enable the Linux nightly main Swift version matrix job. Defaults to true." 49 | default: true 50 | linux_nightly_main_arguments_override: 51 | type: string 52 | description: "The arguments passed to swift test in the Linux nightly main Swift version matrix job." 53 | default: "" 54 | 55 | jobs: 56 | unit-tests: 57 | name: Unit tests (${{ matrix.swift.swift_version }}) 58 | runs-on: ubuntu-latest 59 | strategy: 60 | fail-fast: false 61 | matrix: 62 | # We are specifying only the major and minor of the docker images to automatically pick up the latest patch release 63 | swift: 64 | - image: "swift:5.9-jammy" 65 | swift_version: "5.9" 66 | enabled: ${{ inputs.linux_5_9_enabled }} 67 | - image: "swift:5.10-jammy" 68 | swift_version: "5.10" 69 | enabled: ${{ inputs.linux_5_10_enabled }} 70 | - image: "swift:6.0-jammy" 71 | swift_version: "6.0" 72 | enabled: ${{ inputs.linux_6_0_enabled }} 73 | - image: "swift:6.1-jammy" 74 | swift_version: "6.1" 75 | enabled: ${{ inputs.linux_6_1_enabled }} 76 | - image: "swiftlang/swift:nightly-6.1-jammy" 77 | swift_version: "nightly-6.1" 78 | enabled: ${{ inputs.linux_nightly_next_enabled }} 79 | - image: "swiftlang/swift:nightly-main-jammy" 80 | swift_version: "nightly-main" 81 | enabled: ${{ inputs.linux_nightly_main_enabled }} 82 | steps: 83 | - name: Checkout repository 84 | if: ${{ matrix.swift.enabled }} 85 | uses: actions/checkout@v4 86 | with: 87 | persist-credentials: false 88 | submodules: true 89 | - name: Mark the workspace as safe 90 | if: ${{ matrix.swift.enabled }} 91 | # https://github.com/actions/checkout/issues/766 92 | run: git config --global --add safe.directory ${GITHUB_WORKSPACE} 93 | - name: Run matrix job 94 | if: ${{ matrix.swift.enabled }} 95 | env: 96 | SWIFT_VERSION: ${{ matrix.swift.swift_version }} 97 | COMMAND: "swift test" 98 | COMMAND_OVERRIDE_5_9: "swift test ${{ inputs.linux_5_9_arguments_override }}" 99 | COMMAND_OVERRIDE_5_10: "swift test ${{ inputs.linux_5_10_arguments_override }}" 100 | COMMAND_OVERRIDE_6_0: "swift test ${{ inputs.linux_6_0_arguments_override }}" 101 | COMMAND_OVERRIDE_6_1: "swift test ${{ inputs.linux_6_1_arguments_override }}" 102 | COMMAND_OVERRIDE_NIGHTLY_NEXT: "swift test ${{ inputs.linux_nightly_next_arguments_override }}" 103 | COMMAND_OVERRIDE_NIGHTLY_MAIN: "swift test ${{ inputs.linux_nightly_main_arguments_override }}" 104 | run: | 105 | apt-get -qq update && apt-get -qq -y install curl && apt-get -y install libsasl2-dev libssl-dev 106 | curl -s https://raw.githubusercontent.com/apple/swift-nio/main/scripts/check-matrix-job.sh | bash 107 | container: 108 | image: ${{ matrix.swift.image }} 109 | services: 110 | zookeeper: 111 | image: ubuntu/zookeeper 112 | kafka: 113 | image: ubuntu/kafka 114 | env: 115 | ZOOKEEPER_HOST: zookeeper 116 | env: 117 | KAFKA_HOST: kafka 118 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | /.build 3 | /Benchmarks/.build 4 | /Packages 5 | /*.xcodeproj 6 | xcuserdata/ 7 | DerivedData/ 8 | .netrc 9 | Package.resolved 10 | .*.sw? 11 | .swiftpm 12 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "Sources/Crdkafka/librdkafka"] 2 | path = Sources/Crdkafka/librdkafka 3 | url = https://github.com/confluentinc/librdkafka 4 | -------------------------------------------------------------------------------- /.licenseignore: -------------------------------------------------------------------------------- 1 | .dockerignore 2 | .gitignore 3 | **/.gitignore 4 | .licenseignore 5 | .gitattributes 6 | .gitmodules 7 | .git-blame-ignore-revs 8 | .mailfilter 9 | .mailmap 10 | .spi.yml 11 | .swift-format 12 | .swiftformatignore 13 | .editorconfig 14 | .yamlignore 15 | .github/* 16 | .yamllint.yml 17 | .flake8 18 | *.md 19 | *.txt 20 | *.yml 21 | *.yaml 22 | *.json 23 | Package.swift 24 | **/Package.swift 25 | Package@-*.swift 26 | **/Package@-*.swift 27 | Package.resolved 28 | **/Package.resolved 29 | Makefile 30 | *.modulemap 31 | **/*.modulemap 32 | **/*.docc/* 33 | *.xcprivacy 34 | **/*.xcprivacy 35 | *.symlink 36 | **/*.symlink 37 | Dockerfile 38 | **/Dockerfile 39 | Snippets/* 40 | dev/git.commit.template 41 | .unacceptablelanguageignore 42 | Sources/Crdkafka/* 43 | Sources/COpenSSL/* 44 | -------------------------------------------------------------------------------- /.spi.yml: -------------------------------------------------------------------------------- 1 | version: 1 2 | builder: 3 | configs: 4 | - documentation_targets: [Kafka, KafkaFoundationCompat] 5 | -------------------------------------------------------------------------------- /.swift-format: -------------------------------------------------------------------------------- 1 | { 2 | "version" : 1, 3 | "indentation" : { 4 | "spaces" : 4 5 | }, 6 | "tabWidth" : 4, 7 | "fileScopedDeclarationPrivacy" : { 8 | "accessLevel" : "private" 9 | }, 10 | "spacesAroundRangeFormationOperators" : false, 11 | "indentConditionalCompilationBlocks" : false, 12 | "indentSwitchCaseLabels" : false, 13 | "lineBreakAroundMultilineExpressionChainComponents" : false, 14 | "lineBreakBeforeControlFlowKeywords" : false, 15 | "lineBreakBeforeEachArgument" : true, 16 | "lineBreakBeforeEachGenericRequirement" : true, 17 | "lineLength" : 120, 18 | "maximumBlankLines" : 1, 19 | "respectsExistingLineBreaks" : true, 20 | "prioritizeKeepingFunctionOutputTogether" : true, 21 | "noAssignmentInExpressions" : { 22 | "allowedFunctions" : [ 23 | "XCTAssertNoThrow", 24 | "XCTAssertThrowsError" 25 | ] 26 | }, 27 | "rules" : { 28 | "AllPublicDeclarationsHaveDocumentation" : false, 29 | "AlwaysUseLiteralForEmptyCollectionInit" : false, 30 | "AlwaysUseLowerCamelCase" : false, 31 | "AmbiguousTrailingClosureOverload" : true, 32 | "BeginDocumentationCommentWithOneLineSummary" : false, 33 | "DoNotUseSemicolons" : true, 34 | "DontRepeatTypeInStaticProperties" : true, 35 | "FileScopedDeclarationPrivacy" : true, 36 | "FullyIndirectEnum" : true, 37 | "GroupNumericLiterals" : true, 38 | "IdentifiersMustBeASCII" : true, 39 | "NeverForceUnwrap" : false, 40 | "NeverUseForceTry" : false, 41 | "NeverUseImplicitlyUnwrappedOptionals" : false, 42 | "NoAccessLevelOnExtensionDeclaration" : true, 43 | "NoAssignmentInExpressions" : true, 44 | "NoBlockComments" : true, 45 | "NoCasesWithOnlyFallthrough" : true, 46 | "NoEmptyTrailingClosureParentheses" : true, 47 | "NoLabelsInCasePatterns" : true, 48 | "NoLeadingUnderscores" : false, 49 | "NoParensAroundConditions" : true, 50 | "NoVoidReturnOnFunctionSignature" : true, 51 | "OmitExplicitReturns" : true, 52 | "OneCasePerLine" : true, 53 | "OneVariableDeclarationPerLine" : true, 54 | "OnlyOneTrailingClosureArgument" : true, 55 | "OrderedImports" : true, 56 | "ReplaceForEachWithForLoop" : true, 57 | "ReturnVoidInsteadOfEmptyTuple" : true, 58 | "UseEarlyExits" : false, 59 | "UseExplicitNilCheckInConditions" : false, 60 | "UseLetInEveryBoundCaseVariable" : false, 61 | "UseShorthandTypeNames" : true, 62 | "UseSingleLinePropertyGetter" : false, 63 | "UseSynthesizedInitializer" : false, 64 | "UseTripleSlashForDocumentationComments" : true, 65 | "UseWhereClausesInForLoops" : false, 66 | "ValidateDocumentationComments" : false 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /.unacceptablelanguageignore: -------------------------------------------------------------------------------- 1 | Sources/Crdkafka/* 2 | Sources/COpenSSL/* 3 | -------------------------------------------------------------------------------- /.yamllint.yml: -------------------------------------------------------------------------------- 1 | extends: default 2 | 3 | rules: 4 | line-length: false 5 | document-start: false 6 | truthy: 7 | check-keys: false # Otherwise we get a false positive on GitHub action's `on` key 8 | 9 | # Custom ignores 10 | ignore: | 11 | Sources/Crdkafka/ 12 | Sources/COpenSSL/ 13 | -------------------------------------------------------------------------------- /Benchmarks/Benchmarks/SwiftKafkaConsumerBenchmarks/KafkaConsumerBenchmark.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Benchmark 16 | import Crdkafka 17 | import Dispatch 18 | import Kafka 19 | import Logging 20 | import ServiceLifecycle 21 | 22 | import struct Foundation.Date 23 | import struct Foundation.UUID 24 | 25 | let benchmarks = { 26 | var uniqueTestTopic: String! 27 | let messageCount: UInt = 1000 28 | 29 | Benchmark.defaultConfiguration = .init( 30 | metrics: [ 31 | .wallClock, 32 | .cpuTotal, 33 | .contextSwitches, 34 | .throughput, 35 | .allocatedResidentMemory, 36 | ] + .arc, 37 | warmupIterations: 0, 38 | scalingFactor: .one, 39 | maxDuration: .seconds(5), 40 | maxIterations: 100, 41 | thresholds: [ 42 | .wallClock: .init(relative: [.p90: 35]), 43 | .cpuTotal: .init(relative: [.p90: 35]), 44 | .allocatedResidentMemory: .init(relative: [.p90: 20]), 45 | .contextSwitches: .init(relative: [.p90: 35]), 46 | .throughput: .init(relative: [.p90: 35]), 47 | .objectAllocCount: .init(relative: [.p90: 20]), 48 | .retainCount: .init(relative: [.p90: 20]), 49 | .releaseCount: .init(relative: [.p90: 20]), 50 | .retainReleaseDelta: .init(relative: [.p90: 20]), 51 | ] 52 | ) 53 | 54 | Benchmark.setup = { 55 | uniqueTestTopic = try await prepareTopic(messagesCount: messageCount, partitions: 6) 56 | } 57 | 58 | Benchmark.teardown = { 59 | if let uniqueTestTopic { 60 | try deleteTopic(uniqueTestTopic) 61 | } 62 | uniqueTestTopic = nil 63 | } 64 | 65 | Benchmark("SwiftKafkaConsumer_basic_consumer_messages_\(messageCount)") { benchmark in 66 | let uniqueGroupID = UUID().uuidString 67 | var consumerConfig = KafkaConsumerConfiguration( 68 | consumptionStrategy: .group( 69 | id: uniqueGroupID, 70 | topics: [uniqueTestTopic] 71 | ), 72 | bootstrapBrokerAddresses: [brokerAddress] 73 | ) 74 | consumerConfig.autoOffsetReset = .beginning 75 | consumerConfig.broker.addressFamily = .v4 76 | // We must specify it at least 10 otherwise CI will timeout 77 | consumerConfig.pollInterval = .milliseconds(1) 78 | 79 | let consumer = try KafkaConsumer( 80 | configuration: consumerConfig, 81 | logger: .perfLogger 82 | ) 83 | 84 | let serviceGroupConfiguration = ServiceGroupConfiguration( 85 | services: [consumer], 86 | gracefulShutdownSignals: [.sigterm, .sigint], 87 | logger: .perfLogger 88 | ) 89 | let serviceGroup = ServiceGroup(configuration: serviceGroupConfiguration) 90 | 91 | try await withThrowingTaskGroup(of: Void.self) { group in 92 | benchLog("Start consuming") 93 | defer { 94 | benchLog("Finish consuming") 95 | } 96 | // Run Task 97 | group.addTask { 98 | try await serviceGroup.run() 99 | } 100 | 101 | // Second Consumer Task 102 | group.addTask { 103 | var ctr: UInt64 = 0 104 | var tmpCtr: UInt64 = 0 105 | let interval: UInt64 = Swift.max(UInt64(messageCount / 20), 1) 106 | let totalStartDate = Date.timeIntervalSinceReferenceDate 107 | var totalBytes: UInt64 = 0 108 | 109 | try await benchmark.withMeasurement { 110 | for try await record in consumer.messages { 111 | ctr += 1 112 | totalBytes += UInt64(record.value.readableBytes) 113 | 114 | tmpCtr += 1 115 | if tmpCtr >= interval { 116 | benchLog("read \(ctr * 100 / UInt64(messageCount))%") 117 | tmpCtr = 0 118 | } 119 | if ctr >= messageCount { 120 | break 121 | } 122 | } 123 | } 124 | 125 | let timeIntervalTotal = Date.timeIntervalSinceReferenceDate - totalStartDate 126 | let avgRateMb = Double(totalBytes) / timeIntervalTotal / 1024 127 | benchLog( 128 | "All read up to ctr: \(ctr), avgRate: (\(Int(avgRateMb))KB/s), timePassed: \(Int(timeIntervalTotal))sec" 129 | ) 130 | } 131 | 132 | // Wait for second Consumer Task to complete 133 | try await group.next() 134 | // Shutdown the serviceGroup 135 | await serviceGroup.triggerGracefulShutdown() 136 | } 137 | } 138 | 139 | Benchmark("SwiftKafkaConsumer_with_offset_commit_messages_\(messageCount)") { benchmark in 140 | let uniqueGroupID = UUID().uuidString 141 | var consumerConfig = KafkaConsumerConfiguration( 142 | consumptionStrategy: .group( 143 | id: uniqueGroupID, 144 | topics: [uniqueTestTopic] 145 | ), 146 | bootstrapBrokerAddresses: [brokerAddress] 147 | ) 148 | consumerConfig.autoOffsetReset = .beginning 149 | consumerConfig.broker.addressFamily = .v4 150 | consumerConfig.isAutoCommitEnabled = false 151 | // We must specify it at least 10 otherwise CI will timeout 152 | consumerConfig.pollInterval = .milliseconds(1) 153 | 154 | let consumer = try KafkaConsumer( 155 | configuration: consumerConfig, 156 | logger: .perfLogger 157 | ) 158 | 159 | let serviceGroupConfiguration = ServiceGroupConfiguration( 160 | services: [consumer], 161 | gracefulShutdownSignals: [.sigterm, .sigint], 162 | logger: .perfLogger 163 | ) 164 | let serviceGroup = ServiceGroup(configuration: serviceGroupConfiguration) 165 | 166 | try await withThrowingTaskGroup(of: Void.self) { group in 167 | benchLog("Start consuming") 168 | defer { 169 | benchLog("Finish consuming") 170 | } 171 | // Run Task 172 | group.addTask { 173 | try await serviceGroup.run() 174 | } 175 | 176 | // Second Consumer Task 177 | group.addTask { 178 | var ctr: UInt64 = 0 179 | var tmpCtr: UInt64 = 0 180 | let interval: UInt64 = Swift.max(UInt64(messageCount / 20), 1) 181 | let totalStartDate = Date.timeIntervalSinceReferenceDate 182 | var totalBytes: UInt64 = 0 183 | 184 | try await benchmark.withMeasurement { 185 | for try await record in consumer.messages { 186 | try consumer.scheduleCommit(record) 187 | 188 | ctr += 1 189 | totalBytes += UInt64(record.value.readableBytes) 190 | 191 | tmpCtr += 1 192 | if tmpCtr >= interval { 193 | benchLog("read \(ctr * 100 / UInt64(messageCount))%") 194 | tmpCtr = 0 195 | } 196 | if ctr >= messageCount { 197 | break 198 | } 199 | } 200 | } 201 | 202 | let timeIntervalTotal = Date.timeIntervalSinceReferenceDate - totalStartDate 203 | let avgRateMb = Double(totalBytes) / timeIntervalTotal / 1024 204 | benchLog( 205 | "All read up to ctr: \(ctr), avgRate: (\(Int(avgRateMb))KB/s), timePassed: \(Int(timeIntervalTotal))sec" 206 | ) 207 | } 208 | 209 | // Wait for second Consumer Task to complete 210 | try await group.next() 211 | // Shutdown the serviceGroup 212 | await serviceGroup.triggerGracefulShutdown() 213 | } 214 | } 215 | 216 | Benchmark("librdkafka_basic_consumer_messages_\(messageCount)") { benchmark in 217 | let uniqueGroupID = UUID().uuidString 218 | let rdKafkaConsumerConfig: [String: String] = [ 219 | "group.id": uniqueGroupID, 220 | "bootstrap.servers": "\(brokerAddress.host):\(brokerAddress.port)", 221 | "broker.address.family": "v4", 222 | "auto.offset.reset": "beginning", 223 | ] 224 | 225 | let configPointer: OpaquePointer = rd_kafka_conf_new() 226 | for (key, value) in rdKafkaConsumerConfig { 227 | precondition(rd_kafka_conf_set(configPointer, key, value, nil, 0) == RD_KAFKA_CONF_OK) 228 | } 229 | 230 | let kafkaHandle = rd_kafka_new(RD_KAFKA_CONSUMER, configPointer, nil, 0) 231 | guard let kafkaHandle else { 232 | preconditionFailure("Kafka handle was not created") 233 | } 234 | defer { 235 | rd_kafka_destroy(kafkaHandle) 236 | } 237 | 238 | rd_kafka_poll_set_consumer(kafkaHandle) 239 | let subscriptionList = rd_kafka_topic_partition_list_new(1) 240 | defer { 241 | rd_kafka_topic_partition_list_destroy(subscriptionList) 242 | } 243 | rd_kafka_topic_partition_list_add( 244 | subscriptionList, 245 | uniqueTestTopic, 246 | RD_KAFKA_PARTITION_UA 247 | ) 248 | rd_kafka_subscribe(kafkaHandle, subscriptionList) 249 | rd_kafka_poll(kafkaHandle, 0) 250 | 251 | var ctr: UInt64 = 0 252 | var tmpCtr: UInt64 = 0 253 | 254 | let interval: UInt64 = Swift.max(UInt64(messageCount / 20), 1) 255 | let totalStartDate = Date.timeIntervalSinceReferenceDate 256 | var totalBytes: UInt64 = 0 257 | 258 | benchmark.withMeasurement { 259 | while ctr < messageCount { 260 | guard let record = rd_kafka_consumer_poll(kafkaHandle, 10) else { 261 | continue 262 | } 263 | defer { 264 | rd_kafka_message_destroy(record) 265 | } 266 | ctr += 1 267 | totalBytes += UInt64(record.pointee.len) 268 | 269 | tmpCtr += 1 270 | if tmpCtr >= interval { 271 | benchLog("read \(ctr * 100 / UInt64(messageCount))%") 272 | tmpCtr = 0 273 | } 274 | } 275 | } 276 | 277 | rd_kafka_consumer_close(kafkaHandle) 278 | 279 | let timeIntervalTotal = Date.timeIntervalSinceReferenceDate - totalStartDate 280 | let avgRateMb = Double(totalBytes) / timeIntervalTotal / 1024 281 | benchLog( 282 | "All read up to ctr: \(ctr), avgRate: (\(Int(avgRateMb))KB/s), timePassed: \(Int(timeIntervalTotal))sec" 283 | ) 284 | } 285 | 286 | Benchmark("librdkafka_with_offset_commit_messages_\(messageCount)") { benchmark in 287 | let uniqueGroupID = UUID().uuidString 288 | let rdKafkaConsumerConfig: [String: String] = [ 289 | "group.id": uniqueGroupID, 290 | "bootstrap.servers": "\(brokerAddress.host):\(brokerAddress.port)", 291 | "broker.address.family": "v4", 292 | "auto.offset.reset": "beginning", 293 | "enable.auto.commit": "false", 294 | ] 295 | 296 | let configPointer: OpaquePointer = rd_kafka_conf_new() 297 | for (key, value) in rdKafkaConsumerConfig { 298 | precondition(rd_kafka_conf_set(configPointer, key, value, nil, 0) == RD_KAFKA_CONF_OK) 299 | } 300 | 301 | let kafkaHandle = rd_kafka_new(RD_KAFKA_CONSUMER, configPointer, nil, 0) 302 | guard let kafkaHandle else { 303 | preconditionFailure("Kafka handle was not created") 304 | } 305 | defer { 306 | rd_kafka_destroy(kafkaHandle) 307 | } 308 | 309 | rd_kafka_poll_set_consumer(kafkaHandle) 310 | let subscriptionList = rd_kafka_topic_partition_list_new(1) 311 | defer { 312 | rd_kafka_topic_partition_list_destroy(subscriptionList) 313 | } 314 | rd_kafka_topic_partition_list_add( 315 | subscriptionList, 316 | uniqueTestTopic, 317 | RD_KAFKA_PARTITION_UA 318 | ) 319 | rd_kafka_subscribe(kafkaHandle, subscriptionList) 320 | rd_kafka_poll(kafkaHandle, 0) 321 | 322 | var ctr: UInt64 = 0 323 | var tmpCtr: UInt64 = 0 324 | 325 | let interval: UInt64 = Swift.max(UInt64(messageCount / 20), 1) 326 | let totalStartDate = Date.timeIntervalSinceReferenceDate 327 | var totalBytes: UInt64 = 0 328 | 329 | benchmark.withMeasurement { 330 | while ctr < messageCount { 331 | guard let record = rd_kafka_consumer_poll(kafkaHandle, 10) else { 332 | continue 333 | } 334 | defer { 335 | rd_kafka_message_destroy(record) 336 | } 337 | guard record.pointee.err != RD_KAFKA_RESP_ERR__PARTITION_EOF else { 338 | continue 339 | } 340 | let result = rd_kafka_commit_message(kafkaHandle, record, 0) 341 | precondition(result == RD_KAFKA_RESP_ERR_NO_ERROR) 342 | 343 | ctr += 1 344 | totalBytes += UInt64(record.pointee.len) 345 | 346 | tmpCtr += 1 347 | if tmpCtr >= interval { 348 | benchLog("read \(ctr * 100 / UInt64(messageCount))%") 349 | tmpCtr = 0 350 | } 351 | } 352 | } 353 | 354 | rd_kafka_consumer_close(kafkaHandle) 355 | 356 | let timeIntervalTotal = Date.timeIntervalSinceReferenceDate - totalStartDate 357 | let avgRateMb = Double(totalBytes) / timeIntervalTotal / 1024 358 | benchLog( 359 | "All read up to ctr: \(ctr), avgRate: (\(Int(avgRateMb))KB/s), timePassed: \(Int(timeIntervalTotal))sec" 360 | ) 361 | } 362 | } 363 | -------------------------------------------------------------------------------- /Benchmarks/Benchmarks/SwiftKafkaConsumerBenchmarks/Utilities.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Benchmark 16 | import Kafka 17 | @_spi(Internal) import Kafka 18 | import Logging 19 | import ServiceLifecycle 20 | 21 | import class Foundation.ProcessInfo 22 | import struct Foundation.UUID 23 | 24 | let brokerAddress = KafkaConfiguration.BrokerAddress( 25 | host: ProcessInfo.processInfo.environment["KAFKA_HOST"] ?? "localhost", 26 | port: 9092 27 | ) 28 | 29 | // swift-format-ignore: DontRepeatTypeInStaticProperties 30 | extension Logger { 31 | static let perfLogger = { 32 | var logger = Logger(label: "perf logger") 33 | logger.logLevel = .critical 34 | return logger 35 | }() 36 | } 37 | 38 | // For perf tests debugging 39 | func benchLog(_ log: @autoclosure () -> Logger.Message) { 40 | #if DEBUG 41 | Logger.perfLogger.info(log()) 42 | #endif 43 | } 44 | 45 | func createTopic(partitions: Int32) throws -> String { 46 | var basicConfig = KafkaConsumerConfiguration( 47 | consumptionStrategy: .group(id: "no-group", topics: []), 48 | bootstrapBrokerAddresses: [brokerAddress] 49 | ) 50 | basicConfig.broker.addressFamily = .v4 51 | 52 | let client = try RDKafkaClient.makeClientForTopics(config: basicConfig, logger: .perfLogger) 53 | return try client._createUniqueTopic(partitions: partitions, timeout: 10 * 1000) 54 | } 55 | 56 | func deleteTopic(_ topic: String) throws { 57 | var basicConfig = KafkaConsumerConfiguration( 58 | consumptionStrategy: .group(id: "no-group", topics: []), 59 | bootstrapBrokerAddresses: [brokerAddress] 60 | ) 61 | basicConfig.broker.addressFamily = .v4 62 | 63 | let client = try RDKafkaClient.makeClientForTopics(config: basicConfig, logger: .perfLogger) 64 | try client._deleteTopic(topic, timeout: 10 * 1000) 65 | } 66 | 67 | func prepareTopic(messagesCount: UInt, partitions: Int32 = -1, logger: Logger = .perfLogger) async throws -> String { 68 | let uniqueTestTopic = try createTopic(partitions: partitions) 69 | 70 | benchLog("Created topic \(uniqueTestTopic)") 71 | 72 | benchLog("Generating \(messagesCount) messages") 73 | let testMessages = _createTestMessages(topic: uniqueTestTopic, count: messagesCount) 74 | benchLog("Finish generating \(messagesCount) messages") 75 | 76 | var producerConfig = KafkaProducerConfiguration(bootstrapBrokerAddresses: [brokerAddress]) 77 | producerConfig.broker.addressFamily = .v4 78 | 79 | let (producer, acks) = try KafkaProducer.makeProducerWithEvents(configuration: producerConfig, logger: logger) 80 | 81 | let serviceGroupConfiguration = ServiceGroupConfiguration( 82 | services: [producer], 83 | gracefulShutdownSignals: [.sigterm, .sigint], 84 | logger: logger 85 | ) 86 | let serviceGroup = ServiceGroup(configuration: serviceGroupConfiguration) 87 | 88 | try await withThrowingTaskGroup(of: Void.self) { group in 89 | benchLog("Start producing \(messagesCount) messages") 90 | defer { 91 | benchLog("Finish producing") 92 | } 93 | // Run Task 94 | group.addTask { 95 | try await serviceGroup.run() 96 | } 97 | 98 | // Producer Task 99 | group.addTask { 100 | try await _sendAndAcknowledgeMessages( 101 | producer: producer, 102 | events: acks, 103 | messages: testMessages, 104 | skipConsistencyCheck: true 105 | ) 106 | } 107 | 108 | // Wait for Producer Task to complete 109 | try await group.next() 110 | await serviceGroup.triggerGracefulShutdown() 111 | } 112 | 113 | return uniqueTestTopic 114 | } 115 | 116 | // swift-format-ignore: AmbiguousTrailingClosureOverload 117 | extension Benchmark { 118 | @discardableResult 119 | func withMeasurement(_ body: () throws -> T) rethrows -> T { 120 | self.startMeasurement() 121 | defer { 122 | self.stopMeasurement() 123 | } 124 | return try body() 125 | } 126 | 127 | @discardableResult 128 | func withMeasurement(_ body: () async throws -> T) async rethrows -> T { 129 | self.startMeasurement() 130 | defer { 131 | self.stopMeasurement() 132 | } 133 | return try await body() 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /Benchmarks/Benchmarks/SwiftKafkaProducerBenchmarks/KafkaProducerBenchmark.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Benchmark 16 | import Crdkafka 17 | import Kafka 18 | 19 | let benchmarks = { 20 | Benchmark.defaultConfiguration = .init( 21 | metrics: [.wallClock, .cpuTotal, .allocatedResidentMemory, .contextSwitches, .throughput] + .arc, 22 | warmupIterations: 0, 23 | scalingFactor: .one, 24 | maxDuration: .seconds(5), 25 | maxIterations: 100, 26 | thresholds: [ 27 | // Thresholds are wild guess mostly. Have to adjust with time. 28 | .wallClock: .init(relative: [.p90: 10]), 29 | .cpuTotal: .init(relative: [.p90: 10]), 30 | .allocatedResidentMemory: .init(relative: [.p90: 20]), 31 | .contextSwitches: .init(relative: [.p90: 10]), 32 | .throughput: .init(relative: [.p90: 10]), 33 | .objectAllocCount: .init(relative: [.p90: 10]), 34 | .retainCount: .init(relative: [.p90: 10]), 35 | .releaseCount: .init(relative: [.p90: 10]), 36 | .retainReleaseDelta: .init(relative: [.p90: 10]), 37 | ] 38 | ) 39 | 40 | Benchmark.setup = {} 41 | 42 | Benchmark.teardown = {} 43 | } 44 | -------------------------------------------------------------------------------- /Benchmarks/Package.swift: -------------------------------------------------------------------------------- 1 | // swift-tools-version:5.10 2 | //===----------------------------------------------------------------------===// 3 | // 4 | // This source file is part of the swift-kafka-client open source project 5 | // 6 | // Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors 7 | // Licensed under Apache License v2.0 8 | // 9 | // See LICENSE.txt for license information 10 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 11 | // 12 | // SPDX-License-Identifier: Apache-2.0 13 | // 14 | //===----------------------------------------------------------------------===// 15 | 16 | import PackageDescription 17 | 18 | let package = Package( 19 | name: "benchmarks", 20 | platforms: [ 21 | .macOS(.v13) 22 | ], 23 | dependencies: [ 24 | .package(path: "../"), 25 | .package(url: "https://github.com/ordo-one/package-benchmark.git", from: "1.22.3"), 26 | ], 27 | targets: [ 28 | .executableTarget( 29 | name: "SwiftKafkaConsumerBenchmarks", 30 | dependencies: [ 31 | .product(name: "Benchmark", package: "package-benchmark"), 32 | .product(name: "Kafka", package: "swift-kafka-client"), 33 | ], 34 | path: "Benchmarks/SwiftKafkaConsumerBenchmarks", 35 | plugins: [ 36 | .plugin(name: "BenchmarkPlugin", package: "package-benchmark") 37 | ] 38 | ), 39 | .executableTarget( 40 | name: "SwiftKafkaProducerBenchmarks", 41 | dependencies: [ 42 | .product(name: "Benchmark", package: "package-benchmark"), 43 | .product(name: "Kafka", package: "swift-kafka-client"), 44 | ], 45 | path: "Benchmarks/SwiftKafkaProducerBenchmarks", 46 | plugins: [ 47 | .plugin(name: "BenchmarkPlugin", package: "package-benchmark") 48 | ] 49 | ), 50 | ] 51 | ) 52 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | The code of conduct for this project can be found at https://swift.org/code-of-conduct. 4 | 5 | 6 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## Legal 2 | 3 | By submitting a pull request, you represent that you have the right to license 4 | your contribution to Apple and the community, and agree by submitting the patch 5 | that your contributions are licensed under the Apache 2.0 license (see 6 | `LICENSE.txt`). 7 | 8 | 9 | ## How to submit a bug report 10 | 11 | Please ensure to specify the following: 12 | 13 | * swift-kafka-client commit hash 14 | * Contextual information (e.g. what you were trying to achieve with swift-kafka-client) 15 | * Simplest possible steps to reproduce 16 | * More complex the steps are, lower the priority will be. 17 | * A pull request with failing test case is preferred, but it's just fine to paste the test case into the issue description. 18 | * Anything that might be relevant in your opinion, such as: 19 | * Swift version or the output of `swift --version` 20 | * OS version and the output of `uname -a` 21 | * Network configuration 22 | 23 | 24 | ### Example 25 | 26 | ``` 27 | swift-kafka-client commit hash: 22ec043dc9d24bb011b47ece4f9ee97ee5be2757 28 | 29 | Context: 30 | While load testing my program written with swift-kafka-client, I noticed 31 | that one file descriptor is leaked per request. 32 | 33 | Steps to reproduce: 34 | 1. ... 35 | 2. ... 36 | 3. ... 37 | 4. ... 38 | 39 | $ swift --version 40 | Swift version 4.0.2 (swift-4.0.2-RELEASE) 41 | Target: x86_64-unknown-linux-gnu 42 | 43 | Operating system: Ubuntu Linux 16.04 64-bit 44 | 45 | $ uname -a 46 | Linux beefy.machine 4.4.0-101-generic #124-Ubuntu SMP Fri Nov 10 18:29:59 UTC 2017 x86_64 x86_64 x86_64 GNU/Linux 47 | 48 | My system has IPv6 disabled. 49 | ``` 50 | 51 | ## Writing a Patch 52 | 53 | A good swift-kafka-client patch is: 54 | 55 | 1. Concise, and contains as few changes as needed to achieve the end result. 56 | 2. Tested, ensuring that any tests provided failed before the patch and pass after it. 57 | 3. Documented, adding API documentation as needed to cover new functions and properties. 58 | 4. Accompanied by a great commit message, using our commit message template. 59 | 60 | ### Commit Message Template 61 | 62 | We require that your commit messages match our template. The easiest way to do that is to get git to help you by explicitly using the template. To do that, `cd` to the root of our repository and run: 63 | 64 | git config commit.template dev/git.commit.template 65 | 66 | ### Run CI checks locally 67 | 68 | You can run the GitHub Actions workflows locally using [act](https://github.com/nektos/act). For detailed steps on how to do this please see [https://github.com/swiftlang/github-workflows?tab=readme-ov-file#running-workflows-locally](https://github.com/swiftlang/github-workflows?tab=readme-ov-file#running-workflows-locally). 69 | 70 | ## How to contribute your work 71 | 72 | Please open a pull request at https://github.com/swift-server/swift-kafka-client. Make sure the CI passes, and then wait for code review. 73 | -------------------------------------------------------------------------------- /CONTRIBUTORS.txt: -------------------------------------------------------------------------------- 1 | For the purpose of tracking copyright, this is the list of individuals and 2 | organizations who have contributed source code to swift-kafka-client. 3 | 4 | For employees of an organization/company where the copyright of work done 5 | by employees of that company is held by the company itself, only the company 6 | needs to be listed here. 7 | 8 | ## COPYRIGHT HOLDERS 9 | 10 | - Apple Inc. (all contributors with '@apple.com') 11 | 12 | ### Contributors 13 | 14 | - Felix Schlegel 15 | - Felix Schlegel 16 | - Franz Busch 17 | - FranzBusch 18 | - Rick Newton-Rogers 19 | - SHILPEE GUPTA <78029920+shilpeegupta14@users.noreply.github.com> 20 | - Yim Lee 21 | - blindspotbounty <127803250+blindspotbounty@users.noreply.github.com> 22 | - mr-swifter <103502437+mr-swifter@users.noreply.github.com> 23 | 24 | **Updating this list** 25 | 26 | Please do not edit this file manually. It is generated using `./scripts/generate_contributors_list.sh`. If a name is misspelled or appearing multiple times: add an entry in `./.mailmap` 27 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /NOTICE.txt: -------------------------------------------------------------------------------- 1 | 2 | The Swift Kafka Client Project 3 | ==================================== 4 | 5 | Please visit the Swift Kafka Client web site for more information: 6 | 7 | * https://github.com/swift-server/swift-kafka-client 8 | 9 | Copyright 2023 The Swift Kafka Client Project 10 | 11 | The Swift Kafka Client Project licenses this file to you under the Apache License, 12 | version 2.0 (the "License"); you may not use this file except in compliance 13 | with the License. You may obtain a copy of the License at: 14 | 15 | https://www.apache.org/licenses/LICENSE-2.0 16 | 17 | Unless required by applicable law or agreed to in writing, software 18 | distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 19 | WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 20 | License for the specific language governing permissions and limitations 21 | under the License. 22 | 23 | Also, please refer to each LICENSE..txt file, which is located in 24 | the 'license' directory of the distribution file, for the license terms of the 25 | components that this product depends on. 26 | 27 | ------------------------------------------------------------------------------- 28 | 29 | This product is based on librdkafka - the Apache Kafka C driver library. 30 | 31 | * LICENSE (BSD-2): 32 | * https://opensource.org/license/BSD-2-Clause 33 | * HOMEPAGE: 34 | * https://github.com/confluentinc/librdkafka 35 | 36 | --- 37 | 38 | This product uses zstd. 39 | 40 | * LICENSE (BSD-3): 41 | * https://opensource.org/license/BSD-3-Clause 42 | * HOMEPAGE: 43 | * https://github.com/facebook/zstd 44 | -------------------------------------------------------------------------------- /Package.swift: -------------------------------------------------------------------------------- 1 | // swift-tools-version:5.10 2 | //===----------------------------------------------------------------------===// 3 | // 4 | // This source file is part of the swift-kafka-client open source project 5 | // 6 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 7 | // Licensed under Apache License v2.0 8 | // 9 | // See LICENSE.txt for license information 10 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 11 | // 12 | // SPDX-License-Identifier: Apache-2.0 13 | // 14 | //===----------------------------------------------------------------------===// 15 | 16 | import PackageDescription 17 | 18 | let rdkafkaExclude = [ 19 | "./librdkafka/src/CMakeLists.txt", 20 | "./librdkafka/src/Makefile", 21 | "./librdkafka/src/README.lz4.md", 22 | "./librdkafka/src/generate_proto.sh", 23 | "./librdkafka/src/librdkafka_cgrp_synch.png", 24 | "./librdkafka/src/opentelemetry/metrics.options", 25 | "./librdkafka/src/rdkafka_sasl_win32.c", 26 | "./librdkafka/src/rdwin32.h", 27 | "./librdkafka/src/statistics_schema.json", 28 | "./librdkafka/src/win32_config.h", 29 | // Remove dependency on cURL. Disabling `ENABLE_CURL` and `WITH_CURL` does 30 | // not appear to prevent processing of the below files, so we have to exclude 31 | // them explicitly. 32 | "./librdkafka/src/rdkafka_sasl_oauthbearer.c", 33 | "./librdkafka/src/rdkafka_sasl_oauthbearer_oidc.c", 34 | "./librdkafka/src/rdhttp.c", 35 | ] 36 | 37 | let package = Package( 38 | name: "swift-kafka-client", 39 | platforms: [ 40 | .macOS(.v13), 41 | .iOS(.v16), 42 | .watchOS(.v9), 43 | .tvOS(.v16), 44 | ], 45 | products: [ 46 | .library( 47 | name: "Kafka", 48 | targets: ["Kafka"] 49 | ), 50 | .library( 51 | name: "KafkaFoundationCompat", 52 | targets: ["KafkaFoundationCompat"] 53 | ), 54 | ], 55 | dependencies: [ 56 | .package(url: "https://github.com/apple/swift-nio.git", from: "2.55.0"), 57 | .package(url: "https://github.com/swift-server/swift-service-lifecycle.git", from: "2.1.0"), 58 | .package(url: "https://github.com/apple/swift-log.git", from: "1.0.0"), 59 | .package(url: "https://github.com/apple/swift-metrics", from: "2.4.1"), 60 | // The zstd Swift package produces warnings that we cannot resolve: 61 | // https://github.com/facebook/zstd/issues/3328 62 | .package(url: "https://github.com/facebook/zstd.git", from: "1.5.0"), 63 | ], 64 | targets: [ 65 | .target( 66 | name: "Crdkafka", 67 | dependencies: [ 68 | "COpenSSL", 69 | .product(name: "libzstd", package: "zstd"), 70 | ], 71 | exclude: rdkafkaExclude, 72 | sources: ["./librdkafka/src/"], 73 | publicHeadersPath: "./include", 74 | cSettings: [ 75 | // dummy folder, because config.h is included as "../config.h" in librdkafka 76 | .headerSearchPath("./custom/config/dummy"), 77 | .headerSearchPath("./librdkafka/src"), 78 | .define("_GNU_SOURCE", to: "1"), // Fix build error for Swift 5.9 onwards 79 | ], 80 | linkerSettings: [ 81 | .linkedLibrary("sasl2"), 82 | .linkedLibrary("z"), // zlib 83 | ] 84 | ), 85 | .target( 86 | name: "Kafka", 87 | dependencies: [ 88 | "Crdkafka", 89 | .product(name: "NIOCore", package: "swift-nio"), 90 | .product(name: "ServiceLifecycle", package: "swift-service-lifecycle"), 91 | .product(name: "Logging", package: "swift-log"), 92 | .product(name: "Metrics", package: "swift-metrics"), 93 | ] 94 | ), 95 | .target( 96 | name: "KafkaFoundationCompat", 97 | dependencies: [ 98 | "Kafka" 99 | ] 100 | ), 101 | .systemLibrary( 102 | name: "COpenSSL", 103 | pkgConfig: "openssl", 104 | providers: [ 105 | .brew(["openssl@3"]), 106 | .apt(["libssl-dev"]), 107 | ] 108 | ), 109 | .testTarget( 110 | name: "KafkaTests", 111 | dependencies: [ 112 | "Kafka", 113 | .product(name: "MetricsTestKit", package: "swift-metrics"), 114 | ] 115 | ), 116 | .testTarget( 117 | name: "IntegrationTests", 118 | dependencies: ["Kafka"] 119 | ), 120 | ] 121 | ) 122 | 123 | for target in package.targets { 124 | switch target.type { 125 | case .regular, .test, .executable: 126 | var settings = target.swiftSettings ?? [] 127 | settings.append(.enableExperimentalFeature("StrictConcurrency=complete")) 128 | target.swiftSettings = settings 129 | case .macro, .plugin, .system, .binary: 130 | break // These targets do not support settings 131 | @unknown default: 132 | fatalError("Update to handle new target type \(target.type)") 133 | } 134 | } 135 | 136 | // --- STANDARD CROSS-REPO SETTINGS DO NOT EDIT --- // 137 | for target in package.targets { 138 | switch target.type { 139 | case .regular, .test, .executable: 140 | var settings = target.swiftSettings ?? [] 141 | // https://github.com/swiftlang/swift-evolution/blob/main/proposals/0444-member-import-visibility.md 142 | settings.append(.enableUpcomingFeature("MemberImportVisibility")) 143 | target.swiftSettings = settings 144 | case .macro, .plugin, .system, .binary: 145 | () // not applicable 146 | @unknown default: 147 | () // we don't know what to do here, do nothing 148 | } 149 | } 150 | // --- END: STANDARD CROSS-REPO SETTINGS DO NOT EDIT --- // 151 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Swift Kafka Client 2 | 3 | The Swift Kafka Client library provides a convenient way to interact with [Apache Kafka](https://kafka.apache.org) by leveraging [Swift's new concurrency features](https://docs.swift.org/swift-book/LanguageGuide/Concurrency.html). This package wraps the native [`librdkafka`](https://github.com/confluentinc/librdkafka) library. 4 | 5 | ## Adding Kafka as a Dependency 6 | 7 | To use the `Kafka` library in a SwiftPM project, 8 | add the following line to the dependencies in your `Package.swift` file: 9 | 10 | ```swift 11 | .package(url: "https://github.com/swift-server/swift-kafka-client", branch: "main") 12 | ``` 13 | 14 | Include `"Kafka"` as a dependency for your executable target: 15 | 16 | ```swift 17 | .target(name: "", dependencies: [ 18 | .product(name: "Kafka", package: "swift-kafka-client"), 19 | ]), 20 | ``` 21 | 22 | Finally, add `import Kafka` to your source code. 23 | 24 | ## Usage 25 | 26 | `Kafka` should be used within a [`Swift Service Lifecycle`](https://github.com/swift-server/swift-service-lifecycle) 27 | [`ServiceGroup`](https://swiftpackageindex.com/swift-server/swift-service-lifecycle/main/documentation/servicelifecycle/servicegroup) for proper startup and shutdown handling. 28 | Both the `KafkaProducer` and the `KafkaConsumer` implement the [`Service`](https://swiftpackageindex.com/swift-server/swift-service-lifecycle/main/documentation/servicelifecycle/service) protocol. 29 | 30 | ### Producer API 31 | 32 | The `send(_:)` method of `KafkaProducer` returns a message-id that can later be used to identify the corresponding acknowledgement. Acknowledgements are received through the `events` [`AsyncSequence`](https://developer.apple.com/documentation/swift/asyncsequence). Each acknowledgement indicates that producing a message was successful or returns an error. 33 | 34 | ```swift 35 | let brokerAddress = KafkaConfiguration.BrokerAddress(host: "localhost", port: 9092) 36 | let configuration = KafkaProducerConfiguration(bootstrapBrokerAddresses: [brokerAddress]) 37 | 38 | let (producer, events) = try KafkaProducer.makeProducerWithEvents( 39 | configuration: configuration, 40 | logger: logger 41 | ) 42 | 43 | await withThrowingTaskGroup(of: Void.self) { group in 44 | 45 | // Run Task 46 | group.addTask { 47 | let serviceGroup = ServiceGroup( 48 | services: [producer], 49 | configuration: ServiceGroupConfiguration(gracefulShutdownSignals: []), 50 | logger: logger 51 | ) 52 | try await serviceGroup.run() 53 | } 54 | 55 | // Task sending message and receiving events 56 | group.addTask { 57 | let messageID = try producer.send( 58 | KafkaProducerMessage( 59 | topic: "topic-name", 60 | value: "Hello, World!" 61 | ) 62 | ) 63 | 64 | for await event in events { 65 | switch event { 66 | case .deliveryReports(let deliveryReports): 67 | // Check what messages the delivery reports belong to 68 | default: 69 | break // Ignore any other events 70 | } 71 | } 72 | } 73 | } 74 | ``` 75 | 76 | ### Consumer API 77 | 78 | After initializing the `KafkaConsumer` with a topic-partition pair to read from, messages can be consumed using the `messages` [`AsyncSequence`](https://developer.apple.com/documentation/swift/asyncsequence). 79 | 80 | ```swift 81 | let brokerAddress = KafkaConfiguration.BrokerAddress(host: "localhost", port: 9092) 82 | let configuration = KafkaConsumerConfiguration( 83 | consumptionStrategy: .partition( 84 | KafkaPartition(rawValue: 0), 85 | topic: "topic-name" 86 | ), 87 | bootstrapBrokerAddresses: [brokerAddress] 88 | ) 89 | 90 | let consumer = try KafkaConsumer( 91 | configuration: configuration, 92 | logger: logger 93 | ) 94 | 95 | await withThrowingTaskGroup(of: Void.self) { group in 96 | 97 | // Run Task 98 | group.addTask { 99 | let serviceGroup = ServiceGroup( 100 | services: [consumer], 101 | configuration: ServiceGroupConfiguration(gracefulShutdownSignals: []), 102 | logger: logger 103 | ) 104 | try await serviceGroup.run() 105 | } 106 | 107 | // Task receiving messages 108 | group.addTask { 109 | for try await message in consumer.messages { 110 | // Do something with message 111 | } 112 | } 113 | } 114 | ``` 115 | 116 | #### Consumer Groups 117 | 118 | Kafka also allows users to subscribe to an array of topics as part of a consumer group. 119 | 120 | ```swift 121 | let brokerAddress = KafkaConfiguration.BrokerAddress(host: "localhost", port: 9092) 122 | let configuration = KafkaConsumerConfiguration( 123 | consumptionStrategy: .group(id: "example-group", topics: ["topic-name"]), 124 | bootstrapBrokerAddresses: [brokerAddress] 125 | ) 126 | 127 | let consumer = try KafkaConsumer( 128 | configuration: configuration, 129 | logger: logger 130 | ) 131 | 132 | await withThrowingTaskGroup(of: Void.self) { group in 133 | 134 | // Run Task 135 | group.addTask { 136 | let serviceGroup = ServiceGroup( 137 | services: [consumer], 138 | configuration: ServiceGroupConfiguration(gracefulShutdownSignals: []), 139 | logger: logger 140 | ) 141 | try await serviceGroup.run() 142 | } 143 | 144 | // Task receiving messages 145 | group.addTask { 146 | for try await message in consumer.messages { 147 | // Do something with message 148 | } 149 | } 150 | } 151 | ``` 152 | 153 | #### Manual commits 154 | 155 | By default, the `KafkaConsumer` automatically commits message offsets after receiving the corresponding message. However, we allow users to disable this setting and commit message offsets manually. 156 | 157 | ```swift 158 | let brokerAddress = KafkaConfiguration.BrokerAddress(host: "localhost", port: 9092) 159 | var configuration = KafkaConsumerConfiguration( 160 | consumptionStrategy: .group(id: "example-group", topics: ["topic-name"]), 161 | bootstrapBrokerAddresses: [brokerAddress] 162 | ) 163 | configuration.isAutoCommitEnabled = false 164 | 165 | let consumer = try KafkaConsumer( 166 | configuration: configuration, 167 | logger: logger 168 | ) 169 | 170 | await withThrowingTaskGroup(of: Void.self) { group in 171 | 172 | // Run Task 173 | group.addTask { 174 | let serviceGroup = ServiceGroup( 175 | services: [consumer], 176 | configuration: ServiceGroupConfiguration(gracefulShutdownSignals: []), 177 | logger: logger 178 | ) 179 | try await serviceGroup.run() 180 | } 181 | 182 | // Task receiving messages 183 | group.addTask { 184 | for try await message in consumer.messages { 185 | // Do something with message 186 | // ... 187 | try await consumer.commitSync(message) 188 | } 189 | } 190 | } 191 | ``` 192 | 193 | ### Security Mechanisms 194 | 195 | Both the `KafkaProducer` and the `KafkaConsumer` can be configured to use different security mechanisms. 196 | 197 | #### Plaintext 198 | 199 | ```swift 200 | var configuration = KafkaProducerConfiguration(bootstrapBrokerAddresses: []) 201 | configuration.securityProtocol = .plaintext 202 | ``` 203 | 204 | #### TLS 205 | 206 | ```swift 207 | var configuration = KafkaProducerConfiguration(bootstrapBrokerAddresses: []) 208 | configuration.securityProtocol = .tls() 209 | ``` 210 | 211 | #### SASL 212 | 213 | ```swift 214 | let kerberosConfiguration = KafkaConfiguration.SASLMechanism.KerberosConfiguration( 215 | keytab: "KEYTAB_FILE" 216 | ) 217 | 218 | var config = KafkaProducerConfiguration(bootstrapBrokerAddresses: []) 219 | config.securityProtocol = .saslPlaintext( 220 | mechanism: .gssapi(kerberosConfiguration: kerberosConfiguration) 221 | ) 222 | ``` 223 | 224 | #### SASL + TLS 225 | 226 | ```swift 227 | let saslMechanism = KafkaConfiguration.SASLMechanism.scramSHA256( 228 | username: "USERNAME", 229 | password: "PASSWORD" 230 | ) 231 | 232 | var config = KafkaProducerConfiguration(bootstrapBrokerAddresses: []) 233 | config.securityProtocol = .saslTLS( 234 | saslMechanism: saslMechanism 235 | ) 236 | ``` 237 | 238 | ## librdkafka 239 | 240 | The Package depends on [the `librdkafka` library](https://github.com/confluentinc/librdkafka), which is included as a git submodule. 241 | It has source files that are excluded in `Package.swift`. 242 | 243 | ### Dependencies 244 | 245 | `librdkafka` depends on `openssl`, meaning that `libssl-dev` must be present at build time. 246 | `openssl@3` can be installed on macOS, among others, through `brew`. 247 | 248 | ## Development Setup 249 | 250 | We provide a Docker environment for this package. This will automatically start a local Kafka server and run the package tests. 251 | 252 | ```bash 253 | docker-compose -f docker/docker-compose.yaml run test 254 | ``` 255 | -------------------------------------------------------------------------------- /Sources/COpenSSL/module.modulemap: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/swift-kafka-client/4b4bc0cedcb1068a55e02a47a5666696271fcda0/Sources/COpenSSL/module.modulemap -------------------------------------------------------------------------------- /Sources/Crdkafka/custom/config/config.h: -------------------------------------------------------------------------------- 1 | #ifndef _CONFIG_H_ 2 | #define _CONFIG_H_ 3 | 4 | // distro 5 | #ifdef __APPLE__ 6 | #define SOLIB_EXT ".dylib" 7 | #elif defined(__linux__) 8 | #define SOLIB_EXT ".so" 9 | #endif 10 | 11 | #pragma clang diagnostic ignored "-Wimplicit-function-declaration" 12 | 13 | #ifdef __x86_64__ 14 | #define ARCH "x86_64" 15 | #elif defined(__arm64__) || defined(__arm__) || defined(__aarch64__) 16 | #define ARCH "arm64" 17 | #endif 18 | 19 | #define CPU "generic" 20 | #define WITHOUT_OPTIMIZATION 0 21 | 22 | #define ENABLE_ZLIB 1 23 | #define ENABLE_ZSTD 1 24 | #define ENABLE_SSL 1 25 | #define ENABLE_GSSAPI 1 26 | #define ENABLE_CURL 0 27 | #define ENABLE_DEVEL 0 28 | #define ENABLE_VALGRIND 0 29 | #define ENABLE_REFCNT_DEBUG 0 30 | #define ENABLE_LZ4_EXT 1 31 | #define ENABLE_LZ4_EXT 1 32 | #define ENABLE_REGEX_EXT 1 33 | #define ENABLE_C11THREADS "try" 34 | #define ENABLE_ZLIB 1 35 | #define ENABLE_ZSTD 1 36 | #define ENABLE_SSL 1 37 | #define ENABLE_GSSAPI 1 38 | #define ENABLE_LZ4_EXT 1 39 | #define WITH_STATIC_LINKING 1 40 | #define MKL_APP_NAME "librdkafka" 41 | #define MKL_APP_DESC_ONELINE "The Apache Kafka C/C++ library" 42 | 43 | #ifdef __APPLE__ 44 | #define WITH_STRIP 0 45 | #define ENABLE_SYSLOG 1 46 | #endif 47 | 48 | #ifdef __APPLE__ 49 | // gcc 50 | #define WITH_GCC 1 51 | // gxx 52 | #define WITH_GXX 1 53 | #elif defined(__linux__) 54 | // ccenv 55 | #define WITH_CC 1 56 | // cxxenv 57 | #define WITH_CXX 1 58 | #endif 59 | 60 | // pkgconfig 61 | #if !(defined(__linux__) && (defined(__arm64__) || defined(__arm__) || defined(__aarch64__))) 62 | #define WITH_PKGCONFIG 1 63 | #endif 64 | 65 | #ifdef __linux__ 66 | // install 67 | #define WITH_INSTALL 1 68 | // gnulib 69 | #define WITH_GNULD 1 70 | #endif 71 | 72 | #ifdef __APPLE__ 73 | // osxlib 74 | #define WITH_OSXLD 1 75 | // syslog 76 | #define WITH_SYSLOG 1 77 | #endif 78 | 79 | // crc32chw 80 | #ifdef __x86_64__ 81 | #define WITH_CRC32C_HW 1 82 | #endif 83 | 84 | #ifdef __APPLE__ 85 | // rand_r 86 | #define HAVE_RAND_R 1 87 | // strlcpy 88 | #define HAVE_STRLCPY 1 89 | // strcasestr 90 | #define HAVE_STRCASESTR 1 91 | // pthread_setname_darwin 92 | #define HAVE_PTHREAD_SETNAME_DARWIN 1 93 | // getrusage 94 | #define HAVE_GETRUSAGE 1 95 | #endif 96 | 97 | #ifdef __linux__ 98 | // pthread_setname_gnu 99 | #define HAVE_PTHREAD_SETNAME_GNU 1 100 | #endif 101 | 102 | // Common identifiers 103 | // PIC 104 | #define HAVE_PIC 1 105 | // __atomic_32 106 | #define HAVE_ATOMICS_32 1 107 | // __atomic_32 108 | #define HAVE_ATOMICS_32_ATOMIC 1 109 | // atomic_32 110 | #define ATOMIC_OP32(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST) 111 | // __atomic_64 112 | #define HAVE_ATOMICS_64 1 113 | // __atomic_64 114 | #define HAVE_ATOMICS_64_ATOMIC 1 115 | // atomic_64 116 | #define ATOMIC_OP64(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST) 117 | // atomic_64 118 | #define ATOMIC_OP(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST) 119 | // parseversion 120 | #define RDKAFKA_VERSION_STR "2.1.0" 121 | // parseversion 122 | #define MKL_APP_VERSION "2.1.0" 123 | // libdl 124 | #define WITH_LIBDL 1 125 | // WITH_PLUGINS 126 | #define WITH_PLUGINS 1 127 | // zlib 128 | #define WITH_ZLIB 1 129 | // libssl 130 | #define WITH_SSL 1 131 | // libcrypto 132 | #define OPENSSL_SUPPRESS_DEPRECATED "OPENSSL_SUPPRESS_DEPRECATED" 133 | // libsasl2 134 | #define WITH_SASL_CYRUS 1 135 | // libzstd 136 | #define WITH_ZSTD 1 137 | // libcurl 138 | #define WITH_CURL 0 139 | // WITH_HDRHISTOGRAM 140 | #define WITH_HDRHISTOGRAM 1 141 | // WITH_SNAPPY 142 | #define WITH_SNAPPY 1 143 | // WITH_SOCKEM 144 | #define WITH_SOCKEM 1 145 | // WITH_SASL_SCRAM 146 | #define WITH_SASL_SCRAM 1 147 | // WITH_SASL_OAUTHBEARER 148 | #define WITH_SASL_OAUTHBEARER 0 149 | // WITH_OAUTHBEARER_OIDC 150 | #define WITH_OAUTHBEARER_OIDC 0 151 | // regex 152 | #define HAVE_REGEX 1 153 | // strndup 154 | #define HAVE_STRNDUP 1 155 | // strerror_r 156 | #define HAVE_STRERROR_R 1 157 | 158 | // BUILT_WITH 159 | #ifdef __APPLE__ 160 | #ifdef __x86_64__ 161 | #define BUILT_WITH "STATIC_LINKING GCC GXX PKGCONFIG OSXLD LIBDL PLUGINS ZLIB SSL SASL_CYRUS ZSTD CURL HDRHISTOGRAM SYSLOG SNAPPY SOCKEM SASL_SCRAM SASL_OAUTHBEARER OAUTHBEARER_OIDC CRC32C_HW" 162 | #elif defined(__arm64__) || defined(__arm__) || (__aarch64__) 163 | #define BUILT_WITH "STATIC_LINKING GCC GXX PKGCONFIG OSXLD LIBDL PLUGINS ZLIB SSL SASL_CYRUS ZSTD CURL HDRHISTOGRAM SYSLOG SNAPPY SOCKEM SASL_SCRAM SASL_OAUTHBEARER OAUTHBEARER_OIDC" 164 | #endif 165 | #elif defined(__linux__) 166 | #ifdef __x86_64__ 167 | #define BUILT_WITH "STATIC_LINKING CC CXX PKGCONFIG INSTALL GNULD LIBDL PLUGINS ZLIB SSL SASL_CYRUS ZSTD CURL HDRHISTOGRAM SNAPPY SOCKEM SASL_SCRAM SASL_OAUTHBEARER OAUTHBEARER_OIDC CRC32C_HW" 168 | #elif defined(__arm64__) || defined(__arm__) || defined(__aarch64__) 169 | #define BUILT_WITH "STATIC_LINKING CC CXX INSTALL GNULD LIBDL PLUGINS ZLIB SSL SASL_CYRUS ZSTD CURL HDRHISTOGRAM SNAPPY SOCKEM SASL_SCRAM SASL_OAUTHBEARER OAUTHBEARER_OIDC" 170 | #endif 171 | #endif 172 | 173 | #endif /* _CONFIG_H_ */ 174 | -------------------------------------------------------------------------------- /Sources/Crdkafka/custom/config/dummy/empty: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/swift-kafka-client/4b4bc0cedcb1068a55e02a47a5666696271fcda0/Sources/Crdkafka/custom/config/dummy/empty -------------------------------------------------------------------------------- /Sources/Crdkafka/include/rdkafka.h: -------------------------------------------------------------------------------- 1 | ../librdkafka/src/rdkafka.h -------------------------------------------------------------------------------- /Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Metrics 16 | 17 | extension KafkaConfiguration { 18 | // MARK: - Metrics 19 | 20 | /// Configuration for the consumer metrics emitted by `SwiftKafka`. 21 | public struct ConsumerMetrics: Sendable { 22 | internal var enabled: Bool { 23 | self.updateInterval != nil 24 | && (self.queuedOperation != nil || self.totalKafkaBrokerRequests != nil 25 | || self.totalKafkaBrokerBytesSent != nil || self.totalKafkaBrokerResponses != nil 26 | || self.totalKafkaBrokerResponsesSize != nil || self.totalKafkaBrokerMessagesBytesRecieved != nil 27 | || self.topicsInMetadataCache != nil) 28 | } 29 | 30 | /// Update interval for statistics. 31 | public var updateInterval: Duration? 32 | 33 | /// Number of operations (callbacks, events, etc) waiting in the queue. 34 | public var queuedOperation: Gauge? 35 | 36 | /// Total number of requests sent to Kafka brokers. 37 | public var totalKafkaBrokerRequests: Gauge? 38 | /// Total number of bytes transmitted to Kafka brokers. 39 | public var totalKafkaBrokerBytesSent: Gauge? 40 | /// Total number of responses received from Kafka brokers. 41 | public var totalKafkaBrokerResponses: Gauge? 42 | /// Total number of bytes received from Kafka brokers. 43 | public var totalKafkaBrokerResponsesSize: Gauge? 44 | 45 | /// Total number of messages consumed, not including ignored messages (due to offset, etc), from Kafka brokers. 46 | public var totalKafkaBrokerMessagesRecieved: Gauge? 47 | /// Total number of message bytes (including framing) received from Kafka brokers. 48 | public var totalKafkaBrokerMessagesBytesRecieved: Gauge? 49 | 50 | /// Number of topics in the metadata cache. 51 | public var topicsInMetadataCache: Gauge? 52 | 53 | private static func record(_ value: T?, to: Gauge?) { 54 | guard let value, 55 | let to 56 | else { 57 | return 58 | } 59 | to.record(value) 60 | } 61 | 62 | internal func update(with rdKafkaStatistics: RDKafkaStatistics) { 63 | Self.record(rdKafkaStatistics.queuedOperation, to: self.queuedOperation) 64 | 65 | Self.record(rdKafkaStatistics.totalKafkaBrokerRequests, to: self.totalKafkaBrokerRequests) 66 | Self.record(rdKafkaStatistics.totalKafkaBrokerBytesSent, to: self.totalKafkaBrokerBytesSent) 67 | Self.record(rdKafkaStatistics.totalKafkaBrokerResponses, to: self.totalKafkaBrokerResponses) 68 | Self.record(rdKafkaStatistics.totalKafkaBrokerResponsesSize, to: self.totalKafkaBrokerResponsesSize) 69 | 70 | Self.record(rdKafkaStatistics.totalKafkaBrokerMessagesRecieved, to: self.totalKafkaBrokerMessagesRecieved) 71 | Self.record( 72 | rdKafkaStatistics.totalKafkaBrokerMessagesBytesRecieved, 73 | to: self.totalKafkaBrokerMessagesBytesRecieved 74 | ) 75 | 76 | Self.record(rdKafkaStatistics.topicsInMetadataCache, to: self.topicsInMetadataCache) 77 | } 78 | } 79 | 80 | /// Configuration for the producer metrics emitted by `SwiftKafka`. 81 | public struct ProducerMetrics: Sendable { 82 | internal var enabled: Bool { 83 | self.updateInterval != nil 84 | && (self.queuedOperation != nil || self.queuedProducerMessages != nil 85 | || self.queuedProducerMessagesSize != nil || self.totalKafkaBrokerRequests != nil 86 | || self.totalKafkaBrokerBytesSent != nil || self.totalKafkaBrokerResponses != nil 87 | || self.totalKafkaBrokerResponsesSize != nil || self.totalKafkaBrokerMessagesSent != nil 88 | || self.totalKafkaBrokerMessagesBytesSent != nil || self.topicsInMetadataCache != nil) 89 | } 90 | 91 | /// Update interval for statistics. 92 | public var updateInterval: Duration? 93 | 94 | /// Number of operations (callbacks, events, etc) waiting in the queue. 95 | public var queuedOperation: Gauge? 96 | /// Current number of queued producer messages. 97 | public var queuedProducerMessages: Gauge? 98 | /// Current total size in bytes of queued producer messages. 99 | public var queuedProducerMessagesSize: Gauge? 100 | 101 | /// Total number of requests sent to Kafka brokers. 102 | public var totalKafkaBrokerRequests: Gauge? 103 | /// Total number of bytes transmitted to Kafka brokers. 104 | public var totalKafkaBrokerBytesSent: Gauge? 105 | /// Total number of responses received from Kafka brokers. 106 | public var totalKafkaBrokerResponses: Gauge? 107 | /// Total number of bytes received from Kafka brokers. 108 | public var totalKafkaBrokerResponsesSize: Gauge? 109 | 110 | /// Total number of messages transmitted (produced) to Kafka brokers. 111 | public var totalKafkaBrokerMessagesSent: Gauge? 112 | /// Total number of message bytes (including framing, such as per-Message framing and MessageSet/batch framing) transmitted to Kafka brokers. 113 | public var totalKafkaBrokerMessagesBytesSent: Gauge? 114 | 115 | /// Number of topics in the metadata cache. 116 | public var topicsInMetadataCache: Gauge? 117 | 118 | private static func record(_ value: T?, to: Gauge?) { 119 | guard let value, 120 | let to 121 | else { 122 | return 123 | } 124 | to.record(value) 125 | } 126 | 127 | internal func update(with rdKafkaStatistics: RDKafkaStatistics) { 128 | Self.record(rdKafkaStatistics.queuedOperation, to: self.queuedOperation) 129 | Self.record(rdKafkaStatistics.queuedProducerMessages, to: self.queuedProducerMessages) 130 | Self.record(rdKafkaStatistics.queuedProducerMessagesSize, to: self.queuedProducerMessagesSize) 131 | 132 | Self.record(rdKafkaStatistics.totalKafkaBrokerRequests, to: self.totalKafkaBrokerRequests) 133 | Self.record(rdKafkaStatistics.totalKafkaBrokerBytesSent, to: self.totalKafkaBrokerBytesSent) 134 | Self.record(rdKafkaStatistics.totalKafkaBrokerResponses, to: self.totalKafkaBrokerResponses) 135 | Self.record(rdKafkaStatistics.totalKafkaBrokerResponsesSize, to: self.totalKafkaBrokerResponsesSize) 136 | 137 | Self.record(rdKafkaStatistics.totalKafkaBrokerMessagesSent, to: self.totalKafkaBrokerMessagesSent) 138 | Self.record(rdKafkaStatistics.totalKafkaBrokerMessagesBytesSent, to: self.totalKafkaBrokerMessagesBytesSent) 139 | 140 | Self.record(rdKafkaStatistics.topicsInMetadataCache, to: self.topicsInMetadataCache) 141 | } 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /Sources/Kafka/Configuration/KafkaConfiguration.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | /// Collection of types used in the configuration structs this library provides. 16 | public enum KafkaConfiguration { 17 | /// The address of a Kafka broker. 18 | public struct BrokerAddress: Sendable, Hashable, CustomStringConvertible { 19 | /// The host component of the broker address. 20 | public var host: String 21 | 22 | /// The port to connect to. 23 | public var port: Int 24 | 25 | public var description: String { 26 | "\(self.host):\(self.port)" 27 | } 28 | 29 | public init( 30 | host: String, 31 | port: Int 32 | ) { 33 | self.host = host 34 | self.port = port 35 | } 36 | } 37 | 38 | /// Message options. 39 | public struct MessageOptions: Sendable, Hashable { 40 | /// Maximum Kafka protocol request message size. Due to differing framing overhead between protocol versions, the producer is unable to reliably enforce a strict max message limit at produce time and may exceed the maximum size by one message in protocol ProduceRequests. 41 | /// The broker will enforce the topic's `max.message.bytes` limit [(see Apache Kafka documentation)](https://kafka.apache.org/documentation/#brokerconfigs_message.max.bytes). 42 | /// Default: `1_000_000` 43 | public var maximumBytes: Int = 1_000_000 44 | 45 | /// Maximum size for a message to be copied to buffer. Messages larger than this will be passed by reference (zero-copy) at the expense of larger iovecs. 46 | /// Default: `65535` 47 | public var maximumBytesToCopy: Int = 65535 48 | 49 | public init() {} 50 | } 51 | 52 | /// Topic metadata options. 53 | public struct TopicMetadataOptions: Sendable, Hashable { 54 | /// Period of time at which topic and broker metadata is refreshed to proactively discover any new brokers, topics, partitions or partition leader changes. 55 | public struct RefreshInterval: Sendable, Hashable { 56 | internal let rawValue: Int 57 | 58 | private init(rawValue: Int) { 59 | self.rawValue = rawValue 60 | } 61 | 62 | /// (Lowest granularity is milliseconds) 63 | public static func interval(_ value: Duration) -> RefreshInterval { 64 | precondition( 65 | value.canBeRepresentedAsMilliseconds, 66 | "Lowest granularity is milliseconds" 67 | ) 68 | return .init(rawValue: Int(value.inMilliseconds)) 69 | } 70 | 71 | /// Disable the intervalled refresh (not recommended). 72 | public static let disabled: RefreshInterval = .init(rawValue: -1) 73 | } 74 | 75 | /// Period of time at which topic and broker metadata is refreshed to proactively discover any new brokers, topics, partitions or partition leader changes. 76 | /// If there are no locally referenced topics (no topic objects created, no messages produced, no subscription or no assignment) then only the broker list will be refreshed every interval but no more often than every 10s. 77 | /// Default: `.interval(.milliseconds(300_000))` 78 | public var refreshInterval: RefreshInterval = .interval(.milliseconds(300_000)) 79 | 80 | /// When a topic loses its leader a new metadata request will be enqueued with this initial interval, exponentially increasing until the topic metadata has been refreshed. This is used to recover quickly from transitioning leader brokers. 81 | /// Default: `.milliseconds(250)` 82 | public var refreshFastInterval: Duration = .milliseconds(250) { 83 | didSet { 84 | precondition( 85 | self.refreshFastInterval.canBeRepresentedAsMilliseconds, 86 | "Lowest granularity is milliseconds" 87 | ) 88 | } 89 | } 90 | 91 | /// Sparse metadata requests (consumes less network bandwidth). 92 | /// Default: `true` 93 | public var isSparseRefreshingEnabled: Bool = true 94 | 95 | /// Apache Kafka topic creation is asynchronous and it takes some time for a new topic to propagate throughout the cluster to all brokers. If a client requests topic metadata after manual topic creation but before the topic has been fully propagated to the broker the client is requesting metadata from, the topic will seem to be non-existent and the client will mark the topic as such, failing queued produced messages with ERR__UNKNOWN_TOPIC. This setting delays marking a topic as non-existent until the configured propagation max time has passed. The maximum propagation time is calculated from the time the topic is first referenced in the client, e.g., on `send()`. 96 | /// Default: `.milliseconds(30000)` 97 | public var maximumPropagation: Duration = .milliseconds(30000) { 98 | didSet { 99 | precondition( 100 | self.maximumPropagation.canBeRepresentedAsMilliseconds, 101 | "Lowest granularity is milliseconds" 102 | ) 103 | } 104 | } 105 | 106 | public init() {} 107 | } 108 | 109 | /// Socket options. 110 | public struct SocketOptions: Sendable, Hashable { 111 | /// Default timeout for network requests. Producer: ProduceRequests will use the lesser value of ``KafkaConfiguration/SocketOptions/timeout`` 112 | /// and remaining ``KafkaTopicConfiguration/messageTimeout``for the first message in the batch. 113 | /// Default: `.milliseconds(60000)` 114 | public var timeout: Duration = .milliseconds(60000) { 115 | didSet { 116 | precondition( 117 | self.timeout.canBeRepresentedAsMilliseconds, 118 | "Lowest granularity is milliseconds" 119 | ) 120 | } 121 | } 122 | 123 | /// Broker socket send/receive buffer size. 124 | public struct BufferSize: Sendable, Hashable { 125 | internal let rawValue: Int 126 | 127 | private init(rawValue: Int) { 128 | self.rawValue = rawValue 129 | } 130 | 131 | public static func value(_ value: Int) -> BufferSize { 132 | .init(rawValue: value) 133 | } 134 | 135 | /// System default for send/receive buffer size. 136 | public static let systemDefault: BufferSize = .init(rawValue: 0) 137 | } 138 | 139 | /// Broker socket send buffer size. 140 | /// Default: `.systemDefault` 141 | public var sendBufferBytes: BufferSize = .systemDefault 142 | 143 | /// Broker socket receive buffer size. 144 | /// Default: `.systemDefault` 145 | public var receiveBufferBytes: BufferSize = .systemDefault 146 | 147 | /// Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets. 148 | /// Default: `false` 149 | public var isKeepaliveEnabled: Bool = false 150 | 151 | /// Disable the Nagle algorithm (TCP_NODELAY) on broker sockets. 152 | /// Default: `false` 153 | public var isNagleDisabled: Bool = false 154 | 155 | /// Disconnect from the broker when this number of send failures (e.g., timed-out requests) is reached. 156 | public struct MaximumFailures: Sendable, Hashable { 157 | internal let rawValue: Int 158 | 159 | private init(rawValue: Int) { 160 | self.rawValue = rawValue 161 | } 162 | 163 | public static func failures(_ value: Int) -> MaximumFailures { 164 | .init(rawValue: value) 165 | } 166 | 167 | /// Disable disconnecting from the broker on a number of send failures. 168 | public static let disabled: MaximumFailures = .init(rawValue: 0) 169 | } 170 | 171 | /// Disconnect from the broker when this number of send failures (e.g., timed-out requests) is reached. 172 | /// 173 | /// - Warning: It is highly recommended to leave this setting at its default value of 1 to avoid the client and broker becoming desynchronized in case of request timeouts. 174 | /// - Note: The connection is automatically re-established. 175 | /// Default: `.failures(1)` 176 | public var maximumFailures: MaximumFailures = .failures(1) 177 | 178 | /// Maximum time allowed for broker connection setup (TCP connection setup as well SSL and SASL handshake). 179 | /// If the connection to the broker is not fully functional after this the connection will be closed and retried. 180 | /// Default: `.milliseconds(30000)` 181 | public var connectionSetupTimeout: Duration = .milliseconds(30000) 182 | 183 | public init() {} 184 | } 185 | 186 | /// Broker options. 187 | public struct BrokerOptions: Sendable, Hashable { 188 | /// How long to cache the broker address resolving results. 189 | /// (Lowest granularity is milliseconds) 190 | /// Default: `.milliseconds(1000)` 191 | public var addressTimeToLive: Duration = .milliseconds(1000) { 192 | didSet { 193 | precondition( 194 | self.addressTimeToLive.canBeRepresentedAsMilliseconds, 195 | "Lowest granularity is milliseconds" 196 | ) 197 | } 198 | } 199 | 200 | /// Allowed broker ``KafkaConfiguration/IPAddressFamily``. 201 | /// Default: `.any` 202 | public var addressFamily: IPAddressFamily = .any 203 | 204 | public init() {} 205 | } 206 | 207 | /// Reconnect options. 208 | public struct ReconnectOptions: Sendable, Hashable { 209 | /// The initial time to wait before reconnecting to a broker after the connection has been closed. 210 | public struct Backoff: Sendable, Hashable { 211 | internal let rawValue: UInt 212 | 213 | private init(rawValue: UInt) { 214 | self.rawValue = rawValue 215 | } 216 | 217 | /// (Lowest granularity is milliseconds) 218 | public static func backoff(_ value: Duration) -> Backoff { 219 | precondition( 220 | value.canBeRepresentedAsMilliseconds, 221 | "Lowest granularity is milliseconds" 222 | ) 223 | return .init(rawValue: value.inMilliseconds) 224 | } 225 | 226 | /// Disable the backoff and reconnect immediately. 227 | public static let disabled: Backoff = .init(rawValue: 0) 228 | } 229 | 230 | /// The initial time to wait before reconnecting to a broker after the connection has been closed. 231 | /// The time is increased exponentially until ``KafkaConfiguration/ReconnectOptions/maximumBackoff``is reached. 232 | /// -25% to +50% jitter is applied to each reconnect backoff. 233 | /// Default: `.backoff(.milliseconds(100))` 234 | public var backoff: Backoff = .backoff(.milliseconds(100)) 235 | 236 | /// The maximum time to wait before reconnecting to a broker after the connection has been closed. 237 | /// Default: `.milliseconds(10000)` 238 | public var maximumBackoff: Duration = .milliseconds(10000) { 239 | didSet { 240 | precondition( 241 | self.maximumBackoff.canBeRepresentedAsMilliseconds, 242 | "Lowest granularity is milliseconds" 243 | ) 244 | } 245 | } 246 | 247 | public init() {} 248 | } 249 | 250 | // MARK: - Enum-like Option types 251 | 252 | /// Available debug contexts to enable. 253 | public struct DebugOption: Sendable, Hashable, CustomStringConvertible { 254 | public let description: String 255 | 256 | public static let generic = DebugOption(description: "generic") 257 | public static let broker = DebugOption(description: "broker") 258 | public static let topic = DebugOption(description: "topic") 259 | public static let metadata = DebugOption(description: "metadata") 260 | public static let feature = DebugOption(description: "feature") 261 | public static let queue = DebugOption(description: "queue") 262 | public static let msg = DebugOption(description: "msg") 263 | public static let `protocol` = DebugOption(description: "protocol") 264 | public static let cgrp = DebugOption(description: "cgrp") 265 | public static let security = DebugOption(description: "security") 266 | public static let fetch = DebugOption(description: "fetch") 267 | public static let interceptor = DebugOption(description: "interceptor") 268 | public static let plugin = DebugOption(description: "plugin") 269 | public static let consumer = DebugOption(description: "consumer") 270 | public static let admin = DebugOption(description: "admin") 271 | public static let eos = DebugOption(description: "eos") 272 | public static let all = DebugOption(description: "all") 273 | } 274 | 275 | /// Available IP address families. 276 | public struct IPAddressFamily: Sendable, Hashable, CustomStringConvertible { 277 | public let description: String 278 | 279 | /// Use any IP address family. 280 | public static let any = IPAddressFamily(description: "any") 281 | /// Use the IPv4 address family. 282 | public static let v4 = IPAddressFamily(description: "v4") 283 | /// Use the IPv6 address family. 284 | public static let v6 = IPAddressFamily(description: "v6") 285 | } 286 | } 287 | -------------------------------------------------------------------------------- /Sources/Kafka/Configuration/KafkaProducerConfiguration.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | public struct KafkaProducerConfiguration { 16 | // MARK: - Kafka-specific Config properties 17 | 18 | /// If the ``isAutoCreateTopicsEnabled`` option is set to `true`, 19 | /// the broker will automatically generate topics when producing data to non-existent topics. 20 | /// The configuration specified in this ``KafkaTopicConfiguration`` will be applied to the newly created topic. 21 | /// Default: See default values of ``KafkaTopicConfiguration`` 22 | public var topicConfiguration: KafkaTopicConfiguration = .init() 23 | 24 | /// The time between two consecutive polls. 25 | /// Effectively controls the rate at which incoming events are consumed. 26 | /// Default: `.milliseconds(100)` 27 | public var pollInterval: Duration = .milliseconds(100) 28 | 29 | /// Maximum timeout for flushing outstanding produce requests when the ``KafkaProducer`` is shutting down. 30 | /// Default: `10000` 31 | public var flushTimeoutMilliseconds: Int = 10000 { 32 | didSet { 33 | precondition( 34 | 0...Int(Int32.max) ~= self.flushTimeoutMilliseconds, 35 | "Flush timeout outside of valid range \(0...Int32.max)" 36 | ) 37 | } 38 | } 39 | 40 | // MARK: - Producer-specific Config Properties 41 | 42 | /// When set to true, the producer will ensure that messages are successfully produced exactly once and in the original produce order. 43 | /// The following configuration properties are adjusted automatically (if not modified by the user) when idempotence is enabled: 44 | /// ``KafkaProducerConfiguration/maximumInFlightRequestsPerConnection`` = `5` (must be less than or equal to 5), 45 | /// ``KafkaProducerConfiguration/maximumMessageSendRetries`` = `UInt32.max` (must be greater than 0), 46 | /// ``KafkaTopicConfiguration/requiredAcknowledgements`` = ``KafkaTopicConfiguration/RequiredAcknowledgments/all``, 47 | /// queuing strategy = FIFO. 48 | /// Producer instantiation will fail if the user-supplied configuration is incompatible. 49 | /// Default: `false` 50 | public var isIdempotenceEnabled: Bool = false 51 | 52 | /// Producer queue options. 53 | public struct QueueConfiguration: Sendable, Hashable { 54 | /// Maximum number of messages allowed on the producer queue. This queue is shared by all topics and partitions. 55 | public struct MessageLimit: Sendable, Hashable { 56 | internal let rawValue: Int 57 | 58 | private init(rawValue: Int) { 59 | self.rawValue = rawValue 60 | } 61 | 62 | public static func maximumLimit(_ value: Int) -> MessageLimit { 63 | .init(rawValue: value) 64 | } 65 | 66 | /// No limit for the maximum number of messages allowed on the producer queue. 67 | public static let unlimited: MessageLimit = .init(rawValue: 0) 68 | } 69 | 70 | /// Maximum number of messages allowed on the producer queue. This queue is shared by all topics and partitions. 71 | /// Default: `.maximumLimit(100_000)` 72 | public var messageLimit: MessageLimit = .maximumLimit(100_000) 73 | 74 | /// Maximum total message size sum allowed on the producer queue. This queue is shared by all topics and partitions. 75 | /// This property has higher priority than ``KafkaProducerConfiguration/QueueConfiguration/MessageLimit-swift.struct``. 76 | /// Default: `1_048_576 * 1024` 77 | public var maximumMessageBytes: Int = 1_048_576 * 1024 78 | 79 | /// How long wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. 80 | /// A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency. 81 | /// (Lowest granularity is milliseconds) 82 | /// Default: `.milliseconds(5)` 83 | public var maximumMessageQueueTime: Duration = .milliseconds(5) { 84 | didSet { 85 | precondition( 86 | self.maximumMessageQueueTime.canBeRepresentedAsMilliseconds, 87 | "Lowest granularity is milliseconds" 88 | ) 89 | } 90 | } 91 | 92 | public init() {} 93 | } 94 | 95 | /// Producer queue options. 96 | public var queue: QueueConfiguration = .init() 97 | 98 | /// How many times to retry sending a failing Message. 99 | /// 100 | /// - Note: retrying may cause reordering unless ``KafkaProducerConfiguration/isIdempotenceEnabled`` is set to `true`. 101 | /// Default: `2_147_483_647` 102 | public var maximumMessageSendRetries: Int = 2_147_483_647 103 | 104 | /// Allow automatic topic creation on the broker when producing to non-existent topics. 105 | /// The broker must also be configured with ``isAutoCreateTopicsEnabled`` = `true` for this configuration to take effect. 106 | /// Default: `true` 107 | public var isAutoCreateTopicsEnabled: Bool = true 108 | 109 | // MARK: - Common Client Config Properties 110 | 111 | /// Client identifier. 112 | /// Default: `"rdkafka"` 113 | public var identifier: String = "rdkafka" 114 | 115 | /// Initial list of brokers. 116 | /// Default: `[]` 117 | public var bootstrapBrokerAddresses: [KafkaConfiguration.BrokerAddress] = [] 118 | 119 | /// Message options. 120 | public var message: KafkaConfiguration.MessageOptions = .init() 121 | 122 | /// Maximum Kafka protocol response message size. This serves as a safety precaution to avoid memory exhaustion in case of protocol hiccups. 123 | /// Default: `100_000_000` 124 | public var maximumReceiveMessageBytes: Int = 100_000_000 125 | 126 | /// Maximum number of in-flight requests per broker connection. 127 | /// This is a generic property applied to all broker communication, however, it is primarily relevant to produce requests. 128 | /// In particular, note that other mechanisms limit the number of outstanding consumer fetch requests per broker to one. 129 | /// Default: `1_000_000` 130 | public var maximumInFlightRequestsPerConnection: Int = 1_000_000 131 | 132 | /// Metadata cache max age. 133 | /// (Lowest granularity is milliseconds) 134 | /// Default: `.milliseconds(900_000)` 135 | public var maximumMetadataAge: Duration = .milliseconds(900_000) { 136 | didSet { 137 | precondition( 138 | self.maximumMetadataAge.canBeRepresentedAsMilliseconds, 139 | "Lowest granularity is milliseconds" 140 | ) 141 | } 142 | } 143 | 144 | /// Topic metadata options. 145 | public var topicMetadata: KafkaConfiguration.TopicMetadataOptions = .init() 146 | 147 | /// Topic denylist. 148 | /// Default: `[]` 149 | public var topicDenylist: [String] = [] 150 | 151 | /// Debug options. 152 | /// Default: `[]` 153 | public var debugOptions: [KafkaConfiguration.DebugOption] = [] 154 | 155 | /// Socket options. 156 | public var socket: KafkaConfiguration.SocketOptions = .init() 157 | 158 | /// Broker options. 159 | public var broker: KafkaConfiguration.BrokerOptions = .init() 160 | 161 | /// Reconnect options. 162 | public var reconnect: KafkaConfiguration.ReconnectOptions = .init() 163 | 164 | /// Options for librdkafka metrics updates 165 | public var metrics: KafkaConfiguration.ProducerMetrics = .init() 166 | 167 | /// Security protocol to use (plaintext, ssl, sasl_plaintext, sasl_ssl). 168 | /// Default: `.plaintext` 169 | public var securityProtocol: KafkaConfiguration.SecurityProtocol = .plaintext 170 | 171 | public init( 172 | bootstrapBrokerAddresses: [KafkaConfiguration.BrokerAddress] 173 | ) { 174 | self.bootstrapBrokerAddresses = bootstrapBrokerAddresses 175 | } 176 | } 177 | 178 | // MARK: - KafkaProducerConfiguration + Dictionary 179 | 180 | extension KafkaProducerConfiguration { 181 | internal var dictionary: [String: String] { 182 | var resultDict: [String: String] = [:] 183 | 184 | resultDict["enable.idempotence"] = String(self.isIdempotenceEnabled) 185 | resultDict["queue.buffering.max.messages"] = String(self.queue.messageLimit.rawValue) 186 | resultDict["queue.buffering.max.kbytes"] = String(self.queue.maximumMessageBytes / 1024) 187 | resultDict["queue.buffering.max.ms"] = String(self.queue.maximumMessageQueueTime.inMilliseconds) 188 | resultDict["message.send.max.retries"] = String(self.maximumMessageSendRetries) 189 | resultDict["allow.auto.create.topics"] = String(self.isAutoCreateTopicsEnabled) 190 | 191 | resultDict["client.id"] = self.identifier 192 | resultDict["bootstrap.servers"] = self.bootstrapBrokerAddresses.map(\.description).joined(separator: ",") 193 | resultDict["message.max.bytes"] = String(self.message.maximumBytes) 194 | resultDict["message.copy.max.bytes"] = String(self.message.maximumBytesToCopy) 195 | resultDict["receive.message.max.bytes"] = String(self.maximumReceiveMessageBytes) 196 | resultDict["max.in.flight.requests.per.connection"] = String(self.maximumInFlightRequestsPerConnection) 197 | resultDict["metadata.max.age.ms"] = String(self.maximumMetadataAge.inMilliseconds) 198 | resultDict["topic.metadata.refresh.interval.ms"] = String(self.topicMetadata.refreshInterval.rawValue) 199 | resultDict["topic.metadata.refresh.fast.interval.ms"] = String( 200 | self.topicMetadata.refreshFastInterval.inMilliseconds 201 | ) 202 | resultDict["topic.metadata.refresh.sparse"] = String(self.topicMetadata.isSparseRefreshingEnabled) 203 | resultDict["topic.metadata.propagation.max.ms"] = String(self.topicMetadata.maximumPropagation.inMilliseconds) 204 | resultDict["topic.blacklist"] = self.topicDenylist.joined(separator: ",") // ignore-unacceptable-language 205 | if !self.debugOptions.isEmpty { 206 | resultDict["debug"] = self.debugOptions.map(\.description).joined(separator: ",") 207 | } 208 | resultDict["socket.timeout.ms"] = String(self.socket.timeout.inMilliseconds) 209 | resultDict["socket.send.buffer.bytes"] = String(self.socket.sendBufferBytes.rawValue) 210 | resultDict["socket.receive.buffer.bytes"] = String(self.socket.receiveBufferBytes.rawValue) 211 | resultDict["socket.keepalive.enable"] = String(self.socket.isKeepaliveEnabled) 212 | resultDict["socket.nagle.disable"] = String(self.socket.isNagleDisabled) 213 | resultDict["socket.max.fails"] = String(self.socket.maximumFailures.rawValue) 214 | resultDict["socket.connection.setup.timeout.ms"] = String(self.socket.connectionSetupTimeout.inMilliseconds) 215 | resultDict["broker.address.ttl"] = String(self.broker.addressTimeToLive.inMilliseconds) 216 | resultDict["broker.address.family"] = self.broker.addressFamily.description 217 | resultDict["reconnect.backoff.ms"] = String(self.reconnect.backoff.rawValue) 218 | resultDict["reconnect.backoff.max.ms"] = String(self.reconnect.maximumBackoff.inMilliseconds) 219 | 220 | if self.metrics.enabled, 221 | let updateInterval = self.metrics.updateInterval 222 | { 223 | resultDict["statistics.interval.ms"] = String(updateInterval.inMilliseconds) 224 | } 225 | 226 | // Merge with SecurityProtocol configuration dictionary 227 | resultDict.merge(self.securityProtocol.dictionary) { _, _ in 228 | fatalError("securityProtocol and \(#file) should not have duplicate keys") 229 | } 230 | 231 | return resultDict 232 | } 233 | } 234 | 235 | // MARK: - KafkaProducerConfiguration + Sendable 236 | 237 | extension KafkaProducerConfiguration: Sendable {} 238 | -------------------------------------------------------------------------------- /Sources/Kafka/Configuration/KafkaTopicConfiguration.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | /// Used to configure new topics created by the ``KafkaProducer``. 16 | public struct KafkaTopicConfiguration { 17 | /// This number of acknowledgments the leader broker must receive from ISR brokers before responding to the request. 18 | public struct RequiredAcknowledgments: Sendable, Hashable { 19 | internal let rawValue: Int 20 | 21 | private init(rawValue: Int) { 22 | self.rawValue = rawValue 23 | } 24 | 25 | public static func atLeast(_ value: Int) -> RequiredAcknowledgments { 26 | .init(rawValue: value) 27 | } 28 | 29 | /// Broker will block until the message is committed by all in-sync replicas (ISRs). 30 | public static let all: RequiredAcknowledgments = .init(rawValue: -1) 31 | 32 | /// Broker does not send any response/ack to the client. 33 | public static let noAcknowledgments: RequiredAcknowledgments = .init(rawValue: 0) 34 | } 35 | 36 | /// This field indicates the number of acknowledgments the leader broker must receive from ISR brokers before responding to the request. 37 | /// If there are less than `min.insync.replicas` (broker configuration) in the ISR set the produce request will fail. 38 | /// Default: `.all` 39 | public var requiredAcknowledgements: RequiredAcknowledgments = .all 40 | 41 | /// The ack timeout of the producer request. This value is only enforced by the broker and relies on ``requiredAcknowledgements`` being != 0. 42 | /// (Lowest granularity is milliseconds) 43 | /// Default: `.milliseconds(30000)` 44 | public var requestTimeout: Duration = .milliseconds(30000) { 45 | didSet { 46 | precondition( 47 | self.requestTimeout.canBeRepresentedAsMilliseconds, 48 | "Lowest granularity is milliseconds" 49 | ) 50 | } 51 | } 52 | 53 | /// Local message timeout. 54 | public struct MessageTimeout: Sendable, Hashable { 55 | internal let rawValue: UInt 56 | 57 | private init(rawValue: UInt) { 58 | self.rawValue = rawValue 59 | } 60 | 61 | /// (Lowest granularity is milliseconds) 62 | public static func timeout(_ value: Duration) -> MessageTimeout { 63 | precondition( 64 | value.canBeRepresentedAsMilliseconds, 65 | "Lowest granularity is milliseconds" 66 | ) 67 | return .init(rawValue: value.inMilliseconds) 68 | } 69 | 70 | public static let infinite: MessageTimeout = .init(rawValue: 0) 71 | } 72 | 73 | /// Local message timeout. 74 | /// This value is only enforced locally and limits the time a produced message waits for successful delivery. 75 | /// This is the maximum time librdkafka may use to deliver a message (including retries). 76 | /// Delivery error occurs when either the retry count or the message timeout is exceeded. 77 | /// (Lowest granularity is milliseconds) 78 | /// Default: `.timeout(.milliseconds(300_000))` 79 | public var messageTimeout: MessageTimeout = .timeout(.milliseconds(300_000)) 80 | 81 | /// Partitioner. Computes the partition that a message is stored in. 82 | public struct Partitioner: Sendable, Hashable, CustomStringConvertible { 83 | public let description: String 84 | 85 | /// Random distribution. 86 | public static let random = Partitioner(description: "random") 87 | /// CRC32 hash of key (Empty and NULL keys are mapped to a single partition). 88 | public static let consistent = Partitioner(description: "consistent") 89 | /// CRC32 hash of key (Empty and NULL keys are randomly partitioned). 90 | public static let consistentRandom = Partitioner(description: "consistent_random") 91 | /// Java Producer compatible Murmur2 hash of key (NULL keys are mapped to a single partition). 92 | public static let murmur2 = Partitioner(description: "murmur2") 93 | /// Java Producer compatible Murmur2 hash of key (NULL keys are randomly partitioned. This is functionally equivalent to the default partitioner in the Java Producer). 94 | public static let murmur2Random = Partitioner(description: "murmur2_random") 95 | /// FNV-1a hash of key (NULL keys are mapped to a single partition). 96 | public static let fnv1a = Partitioner(description: "fnv1a") 97 | /// FNV-1a hash of key (NULL keys are randomly partitioned). 98 | public static let fnv1aRandom = Partitioner(description: "fnv1a_random") 99 | } 100 | 101 | /// Partitioner. See ``KafkaTopicConfiguration/Partitioner-swift.struct`` for more information. 102 | /// Default: `.consistentRandom` 103 | public var partitioner: Partitioner = .consistentRandom 104 | 105 | /// Compression-related configuration options. 106 | public struct Compression: Sendable, Hashable { 107 | /// Compression level parameter for algorithm selected by configuration property ``codec-swift.property``. 108 | /// Higher values will result in better compression at the cost of more CPU usage. 109 | public struct Level: Sendable, Hashable { 110 | internal let rawValue: Int 111 | 112 | private init(rawValue: Int) { 113 | self.rawValue = rawValue 114 | } 115 | 116 | public static func level(_ value: Int) -> Level { 117 | .init(rawValue: value) 118 | } 119 | 120 | /// Codec-dependent default compression level. 121 | public static let codecDependent: Level = .init(rawValue: -1) 122 | } 123 | 124 | /// Process to compress and decompress data. 125 | public struct Codec: Sendable, Hashable, CustomStringConvertible { 126 | private enum _Codec: Sendable, Hashable, CustomStringConvertible { 127 | case none 128 | case gzip(compressionLevel: Level) 129 | case snappy // only compression level is 0 130 | case lz4(compressionLevel: Level) 131 | case zstd(compressionLevel: Level) 132 | case inherit 133 | 134 | internal var description: String { 135 | switch self { 136 | case .none: 137 | return "none" 138 | case .gzip: 139 | return "gzip" 140 | case .snappy: 141 | return "snappy" 142 | case .lz4: 143 | return "lz4" 144 | case .zstd: 145 | return "zstd" 146 | case .inherit: 147 | return "inherit" 148 | } 149 | } 150 | 151 | internal var level: Level { 152 | switch self { 153 | case .none: 154 | return .codecDependent 155 | case .gzip(let compressionLevel): 156 | return compressionLevel 157 | case .snappy: 158 | return .codecDependent 159 | case .lz4(let compressionLevel): 160 | return compressionLevel 161 | case .zstd(let compressionLevel): 162 | return compressionLevel 163 | case .inherit: 164 | return .codecDependent 165 | } 166 | } 167 | } 168 | 169 | private let _internal: _Codec 170 | 171 | public var description: String { 172 | self._internal.description 173 | } 174 | 175 | public var level: Level { 176 | self._internal.level 177 | } 178 | 179 | /// No compression. 180 | public static let none = Codec(_internal: .none) 181 | 182 | /// gzip compression. 183 | /// 184 | /// Usable compression level range: `0-9`. 185 | public static func gzip(compressionLevel: Level) -> Codec { 186 | precondition( 187 | 0...9 ~= compressionLevel.rawValue || compressionLevel == .codecDependent, 188 | "Compression level outside of valid range" 189 | ) 190 | return Codec(_internal: .gzip(compressionLevel: compressionLevel)) 191 | } 192 | 193 | /// snappy compression. 194 | public static let snappy = Codec(_internal: .snappy) 195 | 196 | /// lz4 compression. 197 | /// 198 | /// Usable compression level range: `0-12`. 199 | public func lz4(compressionLevel: Level) -> Codec { 200 | precondition( 201 | 0...12 ~= compressionLevel.rawValue || compressionLevel == .codecDependent, 202 | "Compression level outside of valid range" 203 | ) 204 | return Codec(_internal: .lz4(compressionLevel: compressionLevel)) 205 | } 206 | 207 | /// zstd compression. 208 | public func zstd(compressionLevel: Level) -> Codec { 209 | Codec(_internal: .zstd(compressionLevel: compressionLevel)) 210 | } 211 | 212 | /// Inherit global `compression.codec` configuration. 213 | public static let inherit = Codec(_internal: .inherit) 214 | } 215 | 216 | /// Compression codec to use for compressing message sets. 217 | /// Default: `.inherit` 218 | public var codec: Codec = .inherit 219 | } 220 | 221 | /// Compression-related configuration options. 222 | public var compression: Compression = .init() 223 | 224 | public init() {} 225 | } 226 | 227 | // MARK: - KafkaTopicConfiguration + Sendable 228 | 229 | extension KafkaTopicConfiguration: Sendable {} 230 | 231 | // MARK: - KafkaTopicConfiguration + Dictionary 232 | 233 | extension KafkaTopicConfiguration { 234 | internal var dictionary: [String: String] { 235 | var resultDict: [String: String] = [:] 236 | 237 | resultDict["acks"] = String(self.requiredAcknowledgements.rawValue) 238 | resultDict["request.timeout.ms"] = String(self.requestTimeout.inMilliseconds) 239 | resultDict["message.timeout.ms"] = String(self.messageTimeout.rawValue) 240 | resultDict["partitioner"] = self.partitioner.description 241 | resultDict["compression.codec"] = self.compression.codec.description 242 | resultDict["compression.level"] = String(self.compression.codec.level.rawValue) 243 | 244 | return resultDict 245 | } 246 | } 247 | -------------------------------------------------------------------------------- /Sources/Kafka/Data/Array+KafkaContiguousBytes.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | extension Array: KafkaContiguousBytes where Array.Element == UInt8 {} 16 | -------------------------------------------------------------------------------- /Sources/Kafka/Data/ByteBuffer+KafkaContiguousBytes.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import NIOCore 16 | 17 | extension ByteBuffer: KafkaContiguousBytes { 18 | public func withUnsafeBytes(_ body: (UnsafeRawBufferPointer) throws -> R) rethrows -> R { 19 | try self.withUnsafeReadableBytes { 20 | try body($0) 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /Sources/Kafka/Data/KafkaContiguousBytes.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | /// Conformance to this protocol gives users a way to provide their own "bag of bytes" types 16 | /// to be used for the serialization of Kafka messages. 17 | /// It provides a general interface for bytes since the Swift Standard Library currently does not 18 | /// provide such a protocol. 19 | /// 20 | /// By conforming your own types to this protocol, you will be able to pass instances of said types 21 | /// directly to ``KafkaProducerMessage`` as key and value. 22 | public protocol KafkaContiguousBytes { 23 | /// Calls the given closure with the contents of the underlying storage. 24 | /// 25 | /// - note: Calling `withUnsafeBytes` multiple times does not guarantee that 26 | /// the same buffer pointer will be passed in every time. 27 | /// - warning: The buffer argument to the body should not be stored or used 28 | /// outside of the lifetime of the call to the closure. 29 | func withUnsafeBytes(_ body: (UnsafeRawBufferPointer) throws -> R) rethrows -> R 30 | } 31 | -------------------------------------------------------------------------------- /Sources/Kafka/Data/Never+KafkaContiguousBytes.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | extension Never: KafkaContiguousBytes { 16 | public func withUnsafeBytes(_: (UnsafeRawBufferPointer) throws -> R) rethrows -> R { 17 | fatalError("This statement should never be reached") 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /Sources/Kafka/Data/String+KafkaContiguousBytes.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import NIOCore 16 | 17 | extension String: KafkaContiguousBytes { 18 | public func withUnsafeBytes(_ body: (UnsafeRawBufferPointer) throws -> R) rethrows -> R { 19 | if let read = try self.utf8.withContiguousStorageIfAvailable({ unsafePointer in 20 | // Fast Path 21 | let unsafeRawBufferPointer = UnsafeRawBufferPointer( 22 | start: unsafePointer.baseAddress, 23 | count: self.utf8.count 24 | ) 25 | return try body(unsafeRawBufferPointer) 26 | }) { 27 | return read 28 | } else { 29 | // Slow path 30 | return try ByteBuffer(string: self).withUnsafeBytes(body) 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /Sources/Kafka/ForTesting/RDKafkaClient+Topic.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Crdkafka 16 | import Logging 17 | 18 | import struct Foundation.UUID 19 | 20 | @_spi(Internal) 21 | extension RDKafkaClient { 22 | /// Create a topic with a unique name (`UUID`). 23 | /// Blocks for a maximum of `timeout` milliseconds. 24 | /// - Parameter partitions: Partitions in topic (default: -1 - default for broker) 25 | /// - Parameter timeout: Timeout in milliseconds. 26 | /// - Returns: Name of newly created topic. 27 | /// - Throws: A ``KafkaError`` if the topic creation failed. 28 | public func _createUniqueTopic(partitions: Int32 = -1, timeout: Int32) throws -> String { 29 | let uniqueTopicName = UUID().uuidString 30 | 31 | let errorChars = UnsafeMutablePointer.allocate(capacity: RDKafkaClient.stringSize) 32 | defer { errorChars.deallocate() } 33 | 34 | guard 35 | let newTopic = rd_kafka_NewTopic_new( 36 | uniqueTopicName, 37 | partitions, 38 | -1, // use default replication_factor 39 | errorChars, 40 | RDKafkaClient.stringSize 41 | ) 42 | else { 43 | let errorString = String(cString: errorChars) 44 | throw KafkaError.topicCreation(reason: errorString) 45 | } 46 | defer { rd_kafka_NewTopic_destroy(newTopic) } 47 | 48 | try self.withKafkaHandlePointer { kafkaHandle in 49 | let resultQueue = rd_kafka_queue_new(kafkaHandle) 50 | defer { rd_kafka_queue_destroy(resultQueue) } 51 | 52 | var newTopicsArray: [OpaquePointer?] = [newTopic] 53 | rd_kafka_CreateTopics( 54 | kafkaHandle, 55 | &newTopicsArray, 56 | 1, 57 | nil, 58 | resultQueue 59 | ) 60 | 61 | guard let resultEvent = rd_kafka_queue_poll(resultQueue, timeout) else { 62 | throw KafkaError.topicCreation(reason: "No CreateTopics result after 10s timeout") 63 | } 64 | defer { rd_kafka_event_destroy(resultEvent) } 65 | 66 | let resultCode = rd_kafka_event_error(resultEvent) 67 | guard resultCode == RD_KAFKA_RESP_ERR_NO_ERROR else { 68 | throw KafkaError.rdKafkaError(wrapping: resultCode) 69 | } 70 | 71 | guard let topicsResultEvent = rd_kafka_event_CreateTopics_result(resultEvent) else { 72 | throw KafkaError.topicCreation( 73 | reason: "Received event that is not of type rd_kafka_CreateTopics_result_t" 74 | ) 75 | } 76 | 77 | var resultTopicCount = 0 78 | let topicResults = rd_kafka_CreateTopics_result_topics( 79 | topicsResultEvent, 80 | &resultTopicCount 81 | ) 82 | 83 | guard resultTopicCount == 1, let topicResult = topicResults?[0] else { 84 | throw KafkaError.topicCreation(reason: "Received less/more than one topic result") 85 | } 86 | 87 | let topicResultError = rd_kafka_topic_result_error(topicResult) 88 | guard topicResultError == RD_KAFKA_RESP_ERR_NO_ERROR else { 89 | throw KafkaError.rdKafkaError(wrapping: topicResultError) 90 | } 91 | 92 | let receivedTopicName = String(cString: rd_kafka_topic_result_name(topicResult)) 93 | guard receivedTopicName == uniqueTopicName else { 94 | throw KafkaError.topicCreation(reason: "Received topic result for topic with different name") 95 | } 96 | } 97 | 98 | return uniqueTopicName 99 | } 100 | 101 | /// Delete a topic. 102 | /// Blocks for a maximum of `timeout` milliseconds. 103 | /// - Parameter topic: Topic to delete. 104 | /// - Parameter timeout: Timeout in milliseconds. 105 | /// - Throws: A ``KafkaError`` if the topic deletion failed. 106 | public func _deleteTopic(_ topic: String, timeout: Int32) throws { 107 | let deleteTopic = rd_kafka_DeleteTopic_new(topic) 108 | defer { rd_kafka_DeleteTopic_destroy(deleteTopic) } 109 | 110 | try self.withKafkaHandlePointer { kafkaHandle in 111 | let resultQueue = rd_kafka_queue_new(kafkaHandle) 112 | defer { rd_kafka_queue_destroy(resultQueue) } 113 | 114 | var deleteTopicsArray: [OpaquePointer?] = [deleteTopic] 115 | rd_kafka_DeleteTopics( 116 | kafkaHandle, 117 | &deleteTopicsArray, 118 | 1, 119 | nil, 120 | resultQueue 121 | ) 122 | 123 | guard let resultEvent = rd_kafka_queue_poll(resultQueue, timeout) else { 124 | throw KafkaError.topicDeletion(reason: "No DeleteTopics result after 10s timeout") 125 | } 126 | defer { rd_kafka_event_destroy(resultEvent) } 127 | 128 | let resultCode = rd_kafka_event_error(resultEvent) 129 | guard resultCode == RD_KAFKA_RESP_ERR_NO_ERROR else { 130 | throw KafkaError.rdKafkaError(wrapping: resultCode) 131 | } 132 | 133 | guard let topicsResultEvent = rd_kafka_event_DeleteTopics_result(resultEvent) else { 134 | throw KafkaError.topicDeletion( 135 | reason: "Received event that is not of type rd_kafka_DeleteTopics_result_t" 136 | ) 137 | } 138 | 139 | var resultTopicCount = 0 140 | let topicResults = rd_kafka_DeleteTopics_result_topics( 141 | topicsResultEvent, 142 | &resultTopicCount 143 | ) 144 | 145 | guard resultTopicCount == 1, let topicResult = topicResults?[0] else { 146 | throw KafkaError.topicDeletion(reason: "Received less/more than one topic result") 147 | } 148 | 149 | let topicResultError = rd_kafka_topic_result_error(topicResult) 150 | guard topicResultError == RD_KAFKA_RESP_ERR_NO_ERROR else { 151 | throw KafkaError.rdKafkaError(wrapping: topicResultError) 152 | } 153 | 154 | let receivedTopicName = String(cString: rd_kafka_topic_result_name(topicResult)) 155 | guard receivedTopicName == topic else { 156 | throw KafkaError.topicDeletion(reason: "Received topic result for topic with different name") 157 | } 158 | } 159 | } 160 | 161 | public static func makeClientForTopics(config: KafkaConsumerConfiguration, logger: Logger) throws -> RDKafkaClient { 162 | try Self.makeClient(type: .consumer, configDictionary: config.dictionary, events: [], logger: logger) 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /Sources/Kafka/ForTesting/TestMessages.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | import NIOCore 15 | 16 | import struct Foundation.Date 17 | 18 | @_spi(Internal) 19 | public enum _TestMessagesError: Error { 20 | case deliveryReportsIdsIncorrect 21 | case deliveryReportsNotAllMessagesAcknoledged 22 | case deliveryReportsIncorrect 23 | } 24 | 25 | @_spi(Internal) 26 | public func _createTestMessages( 27 | topic: String, 28 | headers: [KafkaHeader] = [], 29 | count: UInt 30 | ) -> [KafkaProducerMessage] { 31 | Array(0..], 46 | skipConsistencyCheck: Bool = false 47 | ) async throws { 48 | var messageIDs = Set() 49 | messageIDs.reserveCapacity(messages.count) 50 | 51 | for message in messages { 52 | while true { 53 | do { 54 | messageIDs.insert(try producer.send(message)) 55 | break 56 | } catch let error as KafkaError where error.description.contains("Queue full") { 57 | // That means we have to flush queue immediately but there is no interface for that 58 | // producer.flush() 59 | } 60 | } 61 | } 62 | 63 | var receivedDeliveryReports = Set() 64 | receivedDeliveryReports.reserveCapacity(messages.count) 65 | 66 | for await event in events { 67 | switch event { 68 | case .deliveryReports(let deliveryReports): 69 | for deliveryReport in deliveryReports { 70 | receivedDeliveryReports.insert(deliveryReport) 71 | } 72 | default: 73 | break // Ignore any other events 74 | } 75 | 76 | if receivedDeliveryReports.count >= messages.count { 77 | break 78 | } 79 | } 80 | 81 | guard Set(receivedDeliveryReports.map(\.id)) == messageIDs else { 82 | throw _TestMessagesError.deliveryReportsIdsIncorrect 83 | } 84 | 85 | let acknowledgedMessages: [KafkaAcknowledgedMessage] = receivedDeliveryReports.compactMap { 86 | guard case .acknowledged(let receivedMessage) = $0.status else { 87 | return nil 88 | } 89 | return receivedMessage 90 | } 91 | 92 | guard messages.count == acknowledgedMessages.count else { 93 | throw _TestMessagesError.deliveryReportsNotAllMessagesAcknoledged 94 | } 95 | if skipConsistencyCheck { 96 | return 97 | } 98 | for message in messages { 99 | guard acknowledgedMessages.contains(where: { $0.topic == message.topic }), 100 | acknowledgedMessages.contains(where: { $0.key == ByteBuffer(string: message.key!) }), 101 | acknowledgedMessages.contains(where: { $0.value == ByteBuffer(string: message.value) }) 102 | else { 103 | throw _TestMessagesError.deliveryReportsIncorrect 104 | } 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /Sources/Kafka/KafkaAcknowledgedMessage.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Crdkafka 16 | import NIOCore 17 | 18 | /// A message acknowledged by the Kafka cluster. 19 | public struct KafkaAcknowledgedMessage { 20 | /// The topic that the message was sent to. 21 | public var topic: String 22 | /// The partition that the message was sent to. 23 | public var partition: KafkaPartition 24 | /// The key of the message. 25 | public var key: ByteBuffer? 26 | /// The body of the message. 27 | public var value: ByteBuffer 28 | /// The offset of the message in its partition. 29 | public var offset: KafkaOffset 30 | /// The headers of the message. 31 | public var headers: [KafkaHeader] 32 | 33 | /// Initialize ``KafkaAcknowledgedMessage`` from `rd_kafka_message_t` pointer. 34 | /// - Throws: A ``KafkaAcknowledgedMessageError`` for failed acknowledgements or malformed messages. 35 | internal init(messagePointer: UnsafePointer) throws { 36 | let rdKafkaMessage = messagePointer.pointee 37 | 38 | let valueBufferPointer = UnsafeRawBufferPointer(start: rdKafkaMessage.payload, count: rdKafkaMessage.len) 39 | self.value = ByteBuffer(bytes: valueBufferPointer) 40 | 41 | guard rdKafkaMessage.err == RD_KAFKA_RESP_ERR_NO_ERROR else { 42 | throw KafkaError.rdKafkaError(wrapping: rdKafkaMessage.err) 43 | } 44 | 45 | #if swift(<6.0) 46 | guard let topic = String(validatingUTF8: rd_kafka_topic_name(rdKafkaMessage.rkt)) else { 47 | fatalError("Received topic name that is non-valid UTF-8") 48 | } 49 | #else 50 | guard let topic = String(validatingCString: rd_kafka_topic_name(rdKafkaMessage.rkt)) else { 51 | fatalError("Received topic name that is non-valid UTF-8") 52 | } 53 | #endif 54 | 55 | self.topic = topic 56 | 57 | self.partition = KafkaPartition(rawValue: Int(rdKafkaMessage.partition)) 58 | self.headers = try RDKafkaClient.getHeaders(for: messagePointer) 59 | if let keyPointer = rdKafkaMessage.key { 60 | let keyBufferPointer = UnsafeRawBufferPointer( 61 | start: keyPointer, 62 | count: rdKafkaMessage.key_len 63 | ) 64 | self.key = .init(bytes: keyBufferPointer) 65 | } else { 66 | self.key = nil 67 | } 68 | 69 | self.offset = KafkaOffset(rawValue: Int(rdKafkaMessage.offset)) 70 | } 71 | } 72 | 73 | // MARK: KafkaAcknowledgedMessage + Hashable 74 | 75 | extension KafkaAcknowledgedMessage: Hashable {} 76 | 77 | // MARK: KafkaAcknowledgedMessage + Sendable 78 | 79 | extension KafkaAcknowledgedMessage: Sendable {} 80 | -------------------------------------------------------------------------------- /Sources/Kafka/KafkaConsumerEvent.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | /// An enumeration representing events that can be received through the ``KafkaConsumerEvents`` asynchronous sequence. 16 | public enum KafkaConsumerEvent: Sendable, Hashable { 17 | /// - Important: Always provide a `default` case when switiching over this `enum`. 18 | case DO_NOT_SWITCH_OVER_THIS_EXHAUSITVELY 19 | 20 | internal init(_ event: RDKafkaClient.KafkaEvent) { 21 | switch event { 22 | case .statistics: 23 | fatalError("Cannot cast \(event) to KafkaConsumerEvent") 24 | case .deliveryReport: 25 | fatalError("Cannot cast \(event) to KafkaConsumerEvent") 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /Sources/Kafka/KafkaConsumerMessage.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Crdkafka 16 | import NIOCore 17 | 18 | /// A message received from the Kafka cluster. 19 | public struct KafkaConsumerMessage { 20 | /// The topic that the message was received from. 21 | public var topic: String 22 | /// The partition that the message was received from. 23 | public var partition: KafkaPartition 24 | /// The headers of the message. 25 | public var headers: [KafkaHeader] 26 | /// The key of the message. 27 | public var key: ByteBuffer? 28 | /// The body of the message. 29 | public var value: ByteBuffer 30 | /// The offset of the message in its partition. 31 | public var offset: KafkaOffset 32 | 33 | /// Initialize ``KafkaConsumerMessage`` from `rd_kafka_message_t` pointer. 34 | /// - Throws: A ``KafkaError`` if the received message is an error message or malformed. 35 | internal init(messagePointer: UnsafePointer) throws { 36 | let rdKafkaMessage = messagePointer.pointee 37 | 38 | guard let valuePointer = rdKafkaMessage.payload else { 39 | fatalError("Could not resolve payload of consumer message") 40 | } 41 | 42 | let valueBufferPointer = UnsafeRawBufferPointer(start: valuePointer, count: rdKafkaMessage.len) 43 | 44 | guard rdKafkaMessage.err == RD_KAFKA_RESP_ERR_NO_ERROR else { 45 | var errorStringBuffer = ByteBuffer(bytes: valueBufferPointer) 46 | let errorString = errorStringBuffer.readString(length: errorStringBuffer.readableBytes) 47 | 48 | if let errorString { 49 | throw KafkaError.messageConsumption(reason: errorString) 50 | } else { 51 | throw KafkaError.rdKafkaError(wrapping: rdKafkaMessage.err) 52 | } 53 | } 54 | 55 | #if swift(<6.0) 56 | guard let topic = String(validatingUTF8: rd_kafka_topic_name(rdKafkaMessage.rkt)) else { 57 | fatalError("Received topic name that is non-valid UTF-8") 58 | } 59 | #else 60 | guard let topic = String(validatingCString: rd_kafka_topic_name(rdKafkaMessage.rkt)) else { 61 | fatalError("Received topic name that is non-valid UTF-8") 62 | } 63 | #endif 64 | 65 | self.topic = topic 66 | 67 | self.partition = KafkaPartition(rawValue: Int(rdKafkaMessage.partition)) 68 | 69 | self.headers = try RDKafkaClient.getHeaders(for: messagePointer) 70 | 71 | if let keyPointer = rdKafkaMessage.key { 72 | let keyBufferPointer = UnsafeRawBufferPointer( 73 | start: keyPointer, 74 | count: rdKafkaMessage.key_len 75 | ) 76 | self.key = .init(bytes: keyBufferPointer) 77 | } else { 78 | self.key = nil 79 | } 80 | 81 | self.value = ByteBuffer(bytes: valueBufferPointer) 82 | 83 | self.offset = KafkaOffset(rawValue: Int(rdKafkaMessage.offset)) 84 | } 85 | } 86 | 87 | // MARK: - KafkaConsumerMessage + Hashable 88 | 89 | extension KafkaConsumerMessage: Hashable {} 90 | 91 | // MARK: - KafkaConsumerMessage + Sendable 92 | 93 | extension KafkaConsumerMessage: Sendable {} 94 | -------------------------------------------------------------------------------- /Sources/Kafka/KafkaDeliveryReport.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Crdkafka 16 | 17 | /// A delivery report for a message that was sent to the Kafka cluster. 18 | public struct KafkaDeliveryReport: Sendable, Hashable { 19 | public enum Status: Sendable, Hashable { 20 | /// The message has been successfully acknowledged by the Kafka cluster. 21 | case acknowledged(message: KafkaAcknowledgedMessage) 22 | /// The message failed to be acknowledged by the Kafka cluster and encountered an error. 23 | case failure(KafkaError) 24 | } 25 | 26 | /// The ``Status`` of a Kafka producer message after attempting to send it. 27 | public var status: Status 28 | 29 | /// The unique identifier assigned by the ``KafkaProducer`` when the message was sent to Kafka. 30 | /// The same identifier is returned by ``KafkaProducer/send(_:)`` and can be used to correlate 31 | /// a sent message with a delivery report. 32 | public var id: KafkaProducerMessageID 33 | 34 | internal init?(messagePointer: UnsafePointer?) { 35 | guard let messagePointer else { 36 | return nil 37 | } 38 | 39 | self.id = KafkaProducerMessageID(rawValue: UInt(bitPattern: messagePointer.pointee._private)) 40 | 41 | do { 42 | let message = try KafkaAcknowledgedMessage(messagePointer: messagePointer) 43 | self.status = .acknowledged(message: message) 44 | } catch { 45 | guard let error = error as? KafkaError else { 46 | fatalError("Caught error that is not of type \(KafkaError.self)") 47 | } 48 | self.status = .failure(error) 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /Sources/Kafka/KafkaError.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Crdkafka 16 | 17 | /// An error that can occur on `Kafka` operations 18 | /// 19 | /// - Note: `Hashable` conformance only considers the ``KafkaError/code``. 20 | public struct KafkaError: Error, CustomStringConvertible, @unchecked Sendable { 21 | // Note: @unchecked because we use a backing class for storage (copy-on-write). 22 | 23 | private var backing: Backing 24 | 25 | /// Represents the kind of error that was encountered. 26 | public var code: ErrorCode { 27 | get { 28 | self.backing.code 29 | } 30 | set { 31 | self.makeUnique() 32 | self.backing.code = newValue 33 | } 34 | } 35 | 36 | private var reason: String { 37 | self.backing.reason 38 | } 39 | 40 | private var file: String { 41 | self.backing.file 42 | } 43 | 44 | private var line: UInt { 45 | self.backing.line 46 | } 47 | 48 | public var description: String { 49 | "KafkaError.\(self.code): \(self.reason) \(self.file):\(self.line)" 50 | } 51 | 52 | private mutating func makeUnique() { 53 | if !isKnownUniquelyReferenced(&self.backing) { 54 | self.backing = self.backing.copy() 55 | } 56 | } 57 | 58 | static func rdKafkaError( 59 | wrapping error: rd_kafka_resp_err_t, 60 | file: String = #fileID, 61 | line: UInt = #line 62 | ) -> KafkaError { 63 | let errorMessage = String(cString: rd_kafka_err2str(error)) 64 | return KafkaError( 65 | backing: .init( 66 | code: .underlying, 67 | reason: errorMessage, 68 | file: file, 69 | line: line 70 | ) 71 | ) 72 | } 73 | 74 | static func config( 75 | reason: String, 76 | file: String = #fileID, 77 | line: UInt = #line 78 | ) -> KafkaError { 79 | KafkaError( 80 | backing: .init( 81 | code: .config, 82 | reason: reason, 83 | file: file, 84 | line: line 85 | ) 86 | ) 87 | } 88 | 89 | static func topicConfig( 90 | reason: String, 91 | file: String = #fileID, 92 | line: UInt = #line 93 | ) -> KafkaError { 94 | KafkaError( 95 | backing: .init( 96 | code: .topicConfig, 97 | reason: reason, 98 | file: file, 99 | line: line 100 | ) 101 | ) 102 | } 103 | 104 | static func client( 105 | reason: String, 106 | file: String = #fileID, 107 | line: UInt = #line 108 | ) -> KafkaError { 109 | KafkaError( 110 | backing: .init( 111 | code: .connectionFailed, 112 | reason: reason, 113 | file: file, 114 | line: line 115 | ) 116 | ) 117 | } 118 | 119 | static func connectionClosed( 120 | reason: String, 121 | file: String = #fileID, 122 | line: UInt = #line 123 | ) -> KafkaError { 124 | KafkaError( 125 | backing: .init( 126 | code: .shutdown, 127 | reason: reason, 128 | file: file, 129 | line: line 130 | ) 131 | ) 132 | } 133 | 134 | static func messageConsumption( 135 | reason: String, 136 | file: String = #fileID, 137 | line: UInt = #line 138 | ) -> KafkaError { 139 | KafkaError( 140 | backing: .init( 141 | code: .messageConsumptionFailed, 142 | reason: reason, 143 | file: file, 144 | line: line 145 | ) 146 | ) 147 | } 148 | 149 | static func topicCreation( 150 | reason: String, 151 | file: String = #fileID, 152 | line: UInt = #line 153 | ) -> KafkaError { 154 | KafkaError( 155 | backing: .init( 156 | code: .topicCreationFailed, 157 | reason: reason, 158 | file: file, 159 | line: line 160 | ) 161 | ) 162 | } 163 | 164 | static func topicDeletion( 165 | reason: String, 166 | file: String = #fileID, 167 | line: UInt = #line 168 | ) -> KafkaError { 169 | KafkaError( 170 | backing: .init( 171 | code: .topicDeletionFailed, 172 | reason: reason, 173 | file: file, 174 | line: line 175 | ) 176 | ) 177 | } 178 | } 179 | 180 | extension KafkaError { 181 | /// Represents the kind of error. 182 | /// 183 | /// The same error may be thrown from more than one place for more than one reason. 184 | /// This type represents only a relatively high-level error: 185 | /// use the string representation of ``KafkaError`` to get more details about the specific cause. 186 | public struct ErrorCode: Hashable, Sendable, CustomStringConvertible { 187 | fileprivate enum BackingCode { 188 | case rdKafkaError 189 | case config 190 | case topicConfig 191 | case connectionClosed 192 | case client 193 | case messageConsumption 194 | case topicCreation 195 | case topicDeletion 196 | } 197 | 198 | fileprivate var backingCode: BackingCode 199 | 200 | fileprivate init(_ backingCode: BackingCode) { 201 | self.backingCode = backingCode 202 | } 203 | 204 | /// Errors caused in the underlying transport. 205 | public static let underlying = ErrorCode(.rdKafkaError) 206 | /// There is an error in the Kafka client configuration. 207 | public static let config = ErrorCode(.config) 208 | /// There is an error in the Kafka topic configuration. 209 | public static let topicConfig = ErrorCode(.topicConfig) 210 | /// The Kafka connection is already shutdown. 211 | public static let shutdown = ErrorCode(.connectionClosed) 212 | /// Establishing a connection to Kafka failed. 213 | public static let connectionFailed = ErrorCode(.client) 214 | /// Consuming a message failed. 215 | public static let messageConsumptionFailed = ErrorCode(.messageConsumption) 216 | /// Creating a topic failed. 217 | public static let topicCreationFailed = ErrorCode(.topicCreation) 218 | /// Deleting a topic failed. 219 | public static let topicDeletionFailed = ErrorCode(.topicDeletion) 220 | 221 | public var description: String { 222 | String(describing: self.backingCode) 223 | } 224 | } 225 | } 226 | 227 | // MARK: - KafkaError + Backing 228 | 229 | extension KafkaError { 230 | final class Backing: Hashable { 231 | var code: KafkaError.ErrorCode 232 | 233 | let reason: String 234 | 235 | let file: String 236 | 237 | let line: UInt 238 | 239 | fileprivate init( 240 | code: KafkaError.ErrorCode, 241 | reason: String, 242 | file: String, 243 | line: UInt 244 | ) { 245 | self.code = code 246 | self.reason = reason 247 | self.file = file 248 | self.line = line 249 | } 250 | 251 | // Only the error code matters for equality. 252 | static func == (lhs: Backing, rhs: Backing) -> Bool { 253 | lhs.code == rhs.code 254 | } 255 | 256 | func hash(into hasher: inout Hasher) { 257 | hasher.combine(self.code) 258 | } 259 | 260 | fileprivate func copy() -> Backing { 261 | Backing(code: self.code, reason: self.reason, file: self.file, line: self.line) 262 | } 263 | } 264 | } 265 | 266 | // MARK: - KafkaError + Hashable 267 | 268 | extension KafkaError: Hashable { 269 | public static func == (lhs: KafkaError, rhs: KafkaError) -> Bool { 270 | lhs.backing == rhs.backing 271 | } 272 | 273 | public func hash(into hasher: inout Hasher) { 274 | hasher.combine(self.backing) 275 | } 276 | } 277 | -------------------------------------------------------------------------------- /Sources/Kafka/KafkaHeader.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import NIOCore 16 | 17 | /// A structure representing a header for a Kafka message. 18 | /// Headers are key-value pairs that can be attached to Kafka messages to provide additional metadata. 19 | public struct KafkaHeader: Sendable, Hashable { 20 | /// The key associated with the header. 21 | public var key: String 22 | 23 | /// The value associated with the header. 24 | public var value: ByteBuffer? 25 | 26 | /// Initializes a new Kafka header with the provided key and optional value. 27 | /// 28 | /// - Parameters: 29 | /// - key: The key associated with the header. 30 | /// - value: The optional binary value associated with the header. 31 | public init( 32 | key: String, 33 | value: ByteBuffer? = nil 34 | ) { 35 | self.key = key 36 | self.value = value 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /Sources/Kafka/KafkaOffset.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Crdkafka 16 | 17 | /// Message offset on a Kafka partition queue. 18 | public struct KafkaOffset: RawRepresentable { 19 | public var rawValue: Int 20 | 21 | public init(rawValue: Int) { 22 | self.rawValue = rawValue 23 | } 24 | 25 | /// Start consuming from the beginning of the Kafka partition queue i.e. the oldest message. 26 | public static let beginning = KafkaOffset(rawValue: Int(RD_KAFKA_OFFSET_BEGINNING)) 27 | 28 | /// Start consuming from the end of the Kafka partition queue i.e. wait for next message to be produced. 29 | public static let end = KafkaOffset(rawValue: Int(RD_KAFKA_OFFSET_END)) 30 | 31 | /// Start consuming from offset retrieved from offset store. 32 | public static let storedOffset = KafkaOffset(rawValue: Int(RD_KAFKA_OFFSET_STORED)) 33 | 34 | /// Start consuming with the `count` latest messages. 35 | /// Example: Current end offset is at `12345` and `count = 200`. 36 | /// This means start reading offset from offset `12345 - 200 = 12145`. 37 | public static func tail(_ count: Int) -> KafkaOffset { 38 | KafkaOffset(rawValue: Int(RD_KAFKA_OFFSET_TAIL_BASE) - count) 39 | } 40 | } 41 | 42 | // MARK: KafkaOffset + Hashable 43 | 44 | extension KafkaOffset: Hashable {} 45 | 46 | // MARK: KafkaOffset + Sendable 47 | 48 | extension KafkaOffset: Sendable {} 49 | -------------------------------------------------------------------------------- /Sources/Kafka/KafkaPartition.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Crdkafka 16 | 17 | /// Type for representing the id of a Kafka Partition. 18 | public struct KafkaPartition: RawRepresentable { 19 | public var rawValue: Int { 20 | didSet { 21 | precondition( 22 | 0...Int(Int32.max) ~= self.rawValue || self.rawValue == RD_KAFKA_PARTITION_UA, 23 | "Partition ID outside of valid range \(0...Int32.max)" 24 | ) 25 | } 26 | } 27 | 28 | public init(rawValue: Int) { 29 | precondition( 30 | 0...Int(Int32.max) ~= rawValue || rawValue == RD_KAFKA_PARTITION_UA, 31 | "Partition ID outside of valid range \(0...Int32.max)" 32 | ) 33 | self.rawValue = rawValue 34 | } 35 | 36 | /// Automatically assign a partition using the topic's partitioner function. 37 | public static let unassigned = KafkaPartition(rawValue: Int(RD_KAFKA_PARTITION_UA)) 38 | } 39 | 40 | // MARK: KafkaPartition + Hashable 41 | 42 | extension KafkaPartition: Hashable {} 43 | 44 | // MARK: KafkaPartition + Sendable 45 | 46 | extension KafkaPartition: Sendable {} 47 | -------------------------------------------------------------------------------- /Sources/Kafka/KafkaProducerEvent.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | /// An enumeration representing events that can be received through the ``KafkaProducerEvents`` asynchronous sequence. 16 | public enum KafkaProducerEvent: Sendable, Hashable { 17 | /// A collection of delivery reports received from the Kafka cluster indicating the status of produced messages. 18 | case deliveryReports([KafkaDeliveryReport]) 19 | /// - Important: Always provide a `default` case when switching over this `enum`. 20 | case DO_NOT_SWITCH_OVER_THIS_EXHAUSITVELY 21 | 22 | internal init(_ event: RDKafkaClient.KafkaEvent) { 23 | switch event { 24 | case .deliveryReport(let results): 25 | self = .deliveryReports(results) 26 | case .statistics: 27 | fatalError("Cannot cast \(event) to KafkaProducerEvent") 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /Sources/Kafka/KafkaProducerMessage.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Crdkafka 16 | import NIOCore 17 | 18 | /// Message that is sent by the `KafkaProducer` 19 | public struct KafkaProducerMessage { 20 | /// The topic to which the message will be sent. 21 | public var topic: String 22 | 23 | /// The partition to which the message will be sent. 24 | /// Defaults to ``KafkaPartition/unassigned``. 25 | /// This means the message will be automatically assigned a partition using the topic's partitioner function. 26 | public var partition: KafkaPartition 27 | 28 | /// The headers of the message. 29 | public var headers: [KafkaHeader] 30 | 31 | /// The optional key associated with the message. 32 | /// If the ``KafkaPartition`` is ``KafkaPartition/unassigned``, the ``KafkaProducerMessage/key`` is used to ensure 33 | /// that two ``KafkaProducerMessage``s with the same key still get sent to the same ``KafkaPartition``. 34 | public var key: Key? 35 | 36 | /// The value of the message to be sent. 37 | public var value: Value 38 | 39 | /// Create a new `KafkaProducerMessage` with a ``KafkaContiguousBytes`` key and value. 40 | /// 41 | /// - Parameters: 42 | /// - topic: The topic the message will be sent to. Topics may be created by the `KafkaProducer` if non-existent. 43 | /// - partition: The topic partition the message will be sent to. If not set explicitly, the partition will be assigned automatically. 44 | /// - headers: The headers of the message. 45 | /// - key: Used to guarantee that messages with the same key will be sent to the same partition so that their order is preserved. 46 | /// - value: The message's value. 47 | public init( 48 | topic: String, 49 | partition: KafkaPartition = .unassigned, 50 | headers: [KafkaHeader] = [], 51 | key: Key?, 52 | value: Value 53 | ) { 54 | self.topic = topic 55 | self.partition = partition 56 | self.headers = headers 57 | self.key = key 58 | self.value = value 59 | } 60 | } 61 | 62 | extension KafkaProducerMessage where Key == Never { 63 | /// Create a new `KafkaProducerMessage` with a ``KafkaContiguousBytes`` value. 64 | /// 65 | /// - Parameters: 66 | /// - topic: The topic the message will be sent to. Topics may be created by the `KafkaProducer` if non-existent. 67 | /// - partition: The topic partition the message will be sent to. If not set explicitly, the partition will be assigned automatically. 68 | /// - headers: The headers of the message. 69 | /// - value: The message body. 70 | public init( 71 | topic: String, 72 | partition: KafkaPartition = .unassigned, 73 | headers: [KafkaHeader] = [], 74 | value: Value 75 | ) { 76 | self.topic = topic 77 | self.partition = partition 78 | self.headers = headers 79 | self.key = nil 80 | self.value = value 81 | } 82 | } 83 | 84 | extension KafkaProducerMessage: Hashable where Key: Hashable, Value: Hashable {} 85 | 86 | extension KafkaProducerMessage: Equatable where Key: Equatable, Value: Equatable {} 87 | 88 | extension KafkaProducerMessage: Sendable where Key: Sendable, Value: Sendable {} 89 | -------------------------------------------------------------------------------- /Sources/Kafka/KafkaProducerMessageID.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | /// ID of message produced by the ``KafkaProducer``. 16 | /// The ``KafkaProducerMessageID`` can be used to relate incoming ``KafkaDeliveryReport``s 17 | /// with their corresponding ``KafkaProducer/send(_:)`` invocation. 18 | public struct KafkaProducerMessageID { 19 | internal var rawValue: UInt 20 | 21 | internal init(rawValue: UInt) { 22 | self.rawValue = rawValue 23 | } 24 | } 25 | 26 | // MARK: - KafkaProducerMessageID + CustomStringConvertible 27 | 28 | extension KafkaProducerMessageID: CustomStringConvertible { 29 | public var description: String { 30 | String(self.rawValue) 31 | } 32 | } 33 | 34 | // MARK: - KafkaProducerMessageID + Hashable 35 | 36 | extension KafkaProducerMessageID: Hashable {} 37 | 38 | // MARK: - KafkaProducerMessageID + Comparable 39 | 40 | extension KafkaProducerMessageID: Comparable { 41 | public static func < (lhs: KafkaProducerMessageID, rhs: KafkaProducerMessageID) -> Bool { 42 | lhs.rawValue < rhs.rawValue 43 | } 44 | } 45 | 46 | // MARK: - KafkaProducerMessageID + Sendable 47 | 48 | extension KafkaProducerMessageID: Sendable {} 49 | -------------------------------------------------------------------------------- /Sources/Kafka/RDKafka/RDKafkaConfig.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Crdkafka 16 | import Logging 17 | 18 | /// A collection of helper functions wrapping common `rd_kafka_conf_*` functions in Swift. 19 | struct RDKafkaConfig { 20 | /// Create a new `rd_kafka_conf_t` object in memory and initialize it with the given configuration properties. 21 | /// - Parameter configDictionary: A dictionary containing the Kafka client configurations. 22 | /// - Returns: An `OpaquePointer` pointing to the newly created `rd_kafka_conf_t` object in memory. 23 | /// - Throws: A ``KafkaError`` if setting a config value failed. 24 | static func createFrom(configDictionary: [String: String]) throws -> OpaquePointer { 25 | let configPointer: OpaquePointer = rd_kafka_conf_new() 26 | for (key, value) in configDictionary { 27 | try Self.set(configPointer: configPointer, key: key, value: value) 28 | } 29 | 30 | return configPointer 31 | } 32 | 33 | /// A Swift wrapper for `rd_kafka_conf_set`. 34 | /// - Parameter configPointer: An `OpaquePointer` pointing to the `rd_kafka_conf_t` object in memory. 35 | /// - Parameter key: The configuration property to be changed. 36 | /// - Parameter value: The new value of the configuration property to be changed. 37 | /// - Throws: A ``KafkaError`` if setting the value failed. 38 | static func set(configPointer: OpaquePointer, key: String, value: String) throws { 39 | let errorChars = UnsafeMutablePointer.allocate(capacity: RDKafkaClient.stringSize) 40 | defer { errorChars.deallocate() } 41 | 42 | let configResult = rd_kafka_conf_set( 43 | configPointer, 44 | key, 45 | value, 46 | errorChars, 47 | RDKafkaClient.stringSize 48 | ) 49 | 50 | if configResult != RD_KAFKA_CONF_OK { 51 | let errorString = String(cString: errorChars) 52 | throw KafkaError.config(reason: errorString) 53 | } 54 | } 55 | 56 | /// Enable event sourcing. 57 | /// 58 | /// - Parameter events: a bitmask of ``RDKafkaEvent``s to enable 59 | /// for consumption by `rd_kafka_queue_poll()`. 60 | static func setEvents(configPointer: OpaquePointer, events: [RDKafkaEvent]) { 61 | let events = events.map(\.rawValue).reduce(0) { $0 | $1 } 62 | rd_kafka_conf_set_events(configPointer, events) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /Sources/Kafka/RDKafka/RDKafkaEvent.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Crdkafka 16 | 17 | /// Swift `enum` wrapping `librdkafka`'s `RD_KAFKA_EVENT_*` types. 18 | /// See `RD_KAFKA_EVENT_*` in rdkafka.h for reference. 19 | internal enum RDKafkaEvent: Int32 { 20 | case none = 0x0 21 | case deliveryReport = 0x1 22 | case fetch = 0x2 23 | case log = 0x4 24 | case error = 0x8 25 | case rebalance = 0x10 26 | case offsetCommit = 0x20 27 | case statistics = 0x40 28 | } 29 | -------------------------------------------------------------------------------- /Sources/Kafka/RDKafka/RDKafkaStatistics.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | // MARK: - RDKafkaStatistics 16 | 17 | struct RDKafkaStatistics: Hashable, Codable { 18 | let queuedOperation: Int? 19 | let queuedProducerMessages: Int? 20 | let queuedProducerMessagesSize: Int? 21 | let topicsInMetadataCache: Int? 22 | let totalKafkaBrokerRequests: Int? 23 | let totalKafkaBrokerBytesSent: Int? 24 | let totalKafkaBrokerResponses: Int? 25 | let totalKafkaBrokerResponsesSize: Int? 26 | let totalKafkaBrokerMessagesSent: Int? 27 | let totalKafkaBrokerMessagesBytesSent: Int? 28 | let totalKafkaBrokerMessagesRecieved: Int? 29 | let totalKafkaBrokerMessagesBytesRecieved: Int? 30 | 31 | enum CodingKeys: String, CodingKey { 32 | case queuedOperation = "replyq" 33 | case queuedProducerMessages = "msg_cnt" 34 | case queuedProducerMessagesSize = "msg_size" 35 | case topicsInMetadataCache = "metadata_cache_cnt" 36 | case totalKafkaBrokerRequests = "tx" 37 | case totalKafkaBrokerBytesSent = "tx_bytes" 38 | case totalKafkaBrokerResponses = "rx" 39 | case totalKafkaBrokerResponsesSize = "rx_bytes" 40 | case totalKafkaBrokerMessagesSent = "txmsgs" 41 | case totalKafkaBrokerMessagesBytesSent = "txmsg_bytes" 42 | case totalKafkaBrokerMessagesRecieved = "rxmsgs" 43 | case totalKafkaBrokerMessagesBytesRecieved = "rxmsg_bytes" 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /Sources/Kafka/RDKafka/RDKafkaTopicConfig.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Crdkafka 16 | 17 | /// A collection of helper functions wrapping common `rd_kafka_topic_conf_*` functions in Swift. 18 | struct RDKafkaTopicConfig { 19 | /// Create a new `rd_kafka_topic_conf_t` object in memory and initialize it with the given configuration properties. 20 | /// - Parameter topicConfiguration: The ``KafkaTopicConfiguration`` used to initialize the `rd_kafka_topic_conf_t` object. 21 | /// - Returns: An `OpaquePointer` pointing to the newly created `rd_kafka_topic_conf_t` object in memory. 22 | /// - Throws: A ``KafkaError`` if setting a config value failed. 23 | static func createFrom(topicConfiguration: KafkaTopicConfiguration) throws -> OpaquePointer { 24 | let configPointer: OpaquePointer = rd_kafka_topic_conf_new() 25 | for (key, value) in topicConfiguration.dictionary { 26 | try Self.set(configPointer: configPointer, key: key, value: value) 27 | } 28 | 29 | return configPointer 30 | } 31 | 32 | /// A Swift wrapper for `rd_kafka_topic_conf_set`. 33 | /// - Parameter configPointer: An `OpaquePointer` pointing to the `rd_kafka_topic_conf_t` object in memory. 34 | /// - Parameter key: The configuration property to be changed. 35 | /// - Parameter value: The new value of the configuration property to be changed. 36 | /// - Throws: A ``KafkaError`` if setting the value failed. 37 | static func set(configPointer: OpaquePointer, key: String, value: String) throws { 38 | let errorChars = UnsafeMutablePointer.allocate(capacity: RDKafkaClient.stringSize) 39 | defer { errorChars.deallocate() } 40 | 41 | let configResult = rd_kafka_topic_conf_set( 42 | configPointer, 43 | key, 44 | value, 45 | errorChars, 46 | RDKafkaClient.stringSize 47 | ) 48 | 49 | if configResult != RD_KAFKA_CONF_OK { 50 | let errorString = String(cString: errorChars) 51 | throw KafkaError.topicConfig(reason: errorString) 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /Sources/Kafka/RDKafka/RDKafkaTopicHandles.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Crdkafka 16 | import NIOConcurrencyHelpers 17 | 18 | /// Swift class that matches topic names with their respective `rd_kafka_topic_t` handles. 19 | internal final class RDKafkaTopicHandles: Sendable { 20 | private let _internal: NIOLockedValueBox<[String: SendableOpaquePointer]> 21 | 22 | // Note: we retain the client to ensure it does not get 23 | // deinitialized before rd_kafka_topic_destroy() is invoked (required) 24 | private let client: RDKafkaClient 25 | 26 | init(client: RDKafkaClient) { 27 | self._internal = NIOLockedValueBox([:]) 28 | self.client = client 29 | } 30 | 31 | deinit { 32 | self._internal.withLockedValue { dict in 33 | for (_, topicHandle) in dict { 34 | rd_kafka_topic_destroy(topicHandle.pointer) 35 | } 36 | } 37 | } 38 | 39 | /// Scoped accessor that enables safe access to the pointer of the topic's handle. 40 | /// - Warning: Do not escape the pointer from the closure for later use. 41 | /// - Parameter topic: The name of the topic that is addressed. 42 | /// - Parameter topicConfiguration: The ``KafkaTopicConfiguration`` used for newly created topics. 43 | /// - Parameter body: The closure will use the topic handle pointer. 44 | @discardableResult 45 | func withTopicHandlePointer( 46 | topic: String, 47 | topicConfiguration: KafkaTopicConfiguration, 48 | _ body: (OpaquePointer) throws -> T 49 | ) throws -> T { 50 | let topicHandle = try self.createTopicHandleIfNeeded(topic: topic, topicConfiguration: topicConfiguration) 51 | return try body(topicHandle) 52 | } 53 | 54 | /// Check `topicHandles` for a handle matching the topic name and create a new handle if needed. 55 | /// - Parameter topic: The name of the topic that is addressed. 56 | private func createTopicHandleIfNeeded( 57 | topic: String, 58 | topicConfiguration: KafkaTopicConfiguration 59 | ) throws -> OpaquePointer { 60 | try self._internal.withLockedValue { dict in 61 | if let handle = dict[topic] { 62 | return handle.pointer 63 | } else { 64 | let rdTopicConf = try RDKafkaTopicConfig.createFrom(topicConfiguration: topicConfiguration) 65 | let newHandle = self.client.withKafkaHandlePointer { kafkaHandle in 66 | rd_kafka_topic_new( 67 | kafkaHandle, 68 | topic, 69 | rdTopicConf 70 | ) 71 | // rd_kafka_topic_new deallocates topic config object 72 | } 73 | 74 | guard let newHandle else { 75 | // newHandle is nil, so we can retrieve error through rd_kafka_last_error() 76 | let error = KafkaError.rdKafkaError(wrapping: rd_kafka_last_error()) 77 | throw error 78 | } 79 | dict[topic] = .init(newHandle) 80 | return newHandle 81 | } 82 | } 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /Sources/Kafka/RDKafka/RDKafkaTopicPartitionList.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Crdkafka 16 | 17 | /// Swift wrapper type for `rd_kafka_topic_partition_list_t`. 18 | final class RDKafkaTopicPartitionList { 19 | private let _internal: UnsafeMutablePointer 20 | 21 | /// Create a new topic+partition list. 22 | /// 23 | /// - Parameter size: Initial allocated size used when the number of allocated elements can be estimated. 24 | init(size: Int32 = 1) { 25 | self._internal = rd_kafka_topic_partition_list_new(size) 26 | } 27 | 28 | deinit { 29 | rd_kafka_topic_partition_list_destroy(self._internal) 30 | } 31 | 32 | /// Add topic+partition pair to list. 33 | func add(topic: String, partition: KafkaPartition) { 34 | precondition( 35 | 0...Int(Int32.max) ~= partition.rawValue || partition == .unassigned, 36 | "Partition ID outside of valid range \(0...Int32.max)" 37 | ) 38 | 39 | rd_kafka_topic_partition_list_add( 40 | self._internal, 41 | topic, 42 | Int32(partition.rawValue) 43 | ) 44 | } 45 | 46 | /// Manually set read offset for a given topic+partition pair. 47 | func setOffset(topic: String, partition: KafkaPartition, offset: Int64) { 48 | precondition( 49 | 0...Int(Int32.max) ~= partition.rawValue || partition == .unassigned, 50 | "Partition ID outside of valid range \(0...Int32.max)" 51 | ) 52 | 53 | guard 54 | let partitionPointer = rd_kafka_topic_partition_list_add( 55 | self._internal, 56 | topic, 57 | Int32(partition.rawValue) 58 | ) 59 | else { 60 | fatalError("rd_kafka_topic_partition_list_add returned invalid pointer") 61 | } 62 | partitionPointer.pointee.offset = offset 63 | } 64 | 65 | /// Scoped accessor that enables safe access to the pointer of the underlying `rd_kafka_topic_partition_t`. 66 | /// - Warning: Do not escape the pointer from the closure for later use. 67 | /// - Parameter body: The closure will use the pointer. 68 | @discardableResult 69 | func withListPointer(_ body: (UnsafeMutablePointer) throws -> T) rethrows -> T { 70 | try body(self._internal) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /Sources/Kafka/Utilities/DispatchQueueTaskExecutor.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2024 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | #if swift(>=6.0) 16 | import Dispatch 17 | 18 | final class DispatchQueueTaskExecutor: TaskExecutor { 19 | let queue: DispatchQueue 20 | 21 | init(_ queue: DispatchQueue) { 22 | self.queue = queue 23 | } 24 | 25 | public func enqueue(_ _job: consuming ExecutorJob) { 26 | let job = UnownedJob(_job) 27 | queue.async { 28 | job.runSynchronously( 29 | on: self.asUnownedTaskExecutor() 30 | ) 31 | } 32 | } 33 | 34 | @inlinable 35 | public func asUnownedTaskExecutor() -> UnownedTaskExecutor { 36 | UnownedTaskExecutor(ordinary: self) 37 | } 38 | } 39 | #endif 40 | -------------------------------------------------------------------------------- /Sources/Kafka/Utilities/Duration+Helpers.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | extension Duration { 16 | internal var inMilliseconds: UInt { 17 | let seconds = Double(components.seconds) * 1000.0 18 | let attoseconds = Double(components.attoseconds) * 1e-15 19 | return UInt(seconds + attoseconds) 20 | } 21 | 22 | internal var canBeRepresentedAsMilliseconds: Bool { 23 | self.inMilliseconds > 0 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /Sources/Kafka/Utilities/NIOAsyncSequenceBackPressureStrategies+NoBackPressure.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import NIOCore 16 | 17 | extension NIOAsyncSequenceProducerBackPressureStrategies { 18 | /// `NIOAsyncSequenceProducerBackPressureStrategy` that always returns true. 19 | struct NoBackPressure: NIOAsyncSequenceProducerBackPressureStrategy { 20 | func didYield(bufferDepth: Int) -> Bool { true } 21 | func didConsume(bufferDepth: Int) -> Bool { true } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /Sources/Kafka/Utilities/SendableOpaquePointer.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2024 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | /// A wrapper for the `OpaquePointer` used to represent different handles from `librdkafka`. 16 | /// 17 | /// This wrapper silences `Sendable` warnings for the pointer introduced in Swift 5.10, and should 18 | /// only be used for handles from `librdkafka` that are known to be thread-safe. 19 | struct SendableOpaquePointer: @unchecked Sendable { 20 | let pointer: OpaquePointer 21 | 22 | init(_ pointer: OpaquePointer) { 23 | self.pointer = pointer 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /Sources/KafkaFoundationCompat/Data+KafkaContiguousBytes.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Kafka 16 | 17 | import struct Foundation.Data 18 | 19 | extension Data: KafkaContiguousBytes {} 20 | -------------------------------------------------------------------------------- /Tests/IntegrationTests/Utilities.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Logging 16 | 17 | import struct Foundation.UUID 18 | 19 | extension Logger { 20 | static var kafkaTest: Logger { 21 | var logger = Logger(label: "kafka.test") 22 | logger.logLevel = .info 23 | return logger 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /Tests/KafkaTests/KafkaConsumerTests.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Logging 16 | import Metrics 17 | import MetricsTestKit 18 | import ServiceLifecycle 19 | import XCTest 20 | 21 | import struct Foundation.UUID 22 | 23 | @testable import CoreMetrics // for MetricsSystem.bootstrapInternal 24 | @testable import Kafka 25 | 26 | // For testing locally on Mac, do the following: 27 | // 28 | // 1. Install Kafka and Zookeeper using homebrew 29 | // 30 | // https://medium.com/@Ankitthakur/apache-kafka-installation-on-mac-using-homebrew-a367cdefd273 31 | // 32 | // 2. Start Zookeeper & Kafka Server 33 | // 34 | // (Homebrew - Apple Silicon) 35 | // zookeeper-server-start /opt/homebrew/etc/kafka/zookeeper.properties & kafka-server-start /opt/homebrew/etc/kafka/server.properties 36 | // 37 | // (Homebrew - Intel Mac) 38 | // zookeeper-server-start /usr/local/etc/kafka/zookeeper.properties & kafka-server-start /usr/local/etc/kafka/server.properties 39 | 40 | final class KafkaConsumerTests: XCTestCase { 41 | var metrics: TestMetrics! = TestMetrics() 42 | 43 | override func setUp() async throws { 44 | MetricsSystem.bootstrapInternal(self.metrics) 45 | } 46 | 47 | override func tearDown() async throws { 48 | self.metrics = nil 49 | MetricsSystem.bootstrapInternal(NOOPMetricsHandler.instance) 50 | } 51 | 52 | func testConsumerLog() async throws { 53 | let recorder = LogEventRecorder() 54 | let mockLogger = Logger(label: "kafka.test.consumer.log") { 55 | _ in MockLogHandler(recorder: recorder) 56 | } 57 | 58 | // Set no bootstrap servers to trigger librdkafka configuration warning 59 | let uniqueGroupID = UUID().uuidString 60 | var config = KafkaConsumerConfiguration( 61 | consumptionStrategy: .group(id: uniqueGroupID, topics: ["this-topic-does-not-exist"]), 62 | bootstrapBrokerAddresses: [] 63 | ) 64 | config.securityProtocol = .plaintext 65 | config.debugOptions = [.all] 66 | 67 | let consumer = try KafkaConsumer(configuration: config, logger: mockLogger) 68 | 69 | let serviceGroupConfiguration = ServiceGroupConfiguration(services: [consumer], logger: .kafkaTest) 70 | let serviceGroup = ServiceGroup(configuration: serviceGroupConfiguration) 71 | 72 | await withThrowingTaskGroup(of: Void.self) { group in 73 | // Run Task 74 | group.addTask { 75 | try await serviceGroup.run() 76 | } 77 | 78 | // Sleep for 1s to let poll loop receive log message 79 | try! await Task.sleep(for: .seconds(1)) 80 | 81 | // Shutdown the serviceGroup 82 | await serviceGroup.triggerGracefulShutdown() 83 | } 84 | 85 | let recordedEvents = recorder.recordedEvents 86 | let expectedLogs: [(level: Logger.Level, source: String, message: String)] = [ 87 | (Logger.Level.debug, "MEMBERID", uniqueGroupID) 88 | ] 89 | 90 | for expectedLog in expectedLogs { 91 | XCTAssertTrue( 92 | recordedEvents.contains(where: { event in 93 | event.level == expectedLog.level && event.source == expectedLog.source 94 | && event.message.description.contains(expectedLog.message) 95 | }), 96 | "Expected log \(expectedLog) but was not found" 97 | ) 98 | } 99 | } 100 | 101 | func testConsumerStatistics() async throws { 102 | let uniqueGroupID = UUID().uuidString 103 | var config = KafkaConsumerConfiguration( 104 | consumptionStrategy: .group(id: uniqueGroupID, topics: ["this-topic-does-not-exist"]), 105 | bootstrapBrokerAddresses: [] 106 | ) 107 | 108 | config.metrics.updateInterval = .milliseconds(100) 109 | config.metrics.queuedOperation = .init(label: "operations") 110 | 111 | let consumer = try KafkaConsumer(configuration: config, logger: .kafkaTest) 112 | 113 | let svcGroupConfig = ServiceGroupConfiguration(services: [consumer], logger: .kafkaTest) 114 | let serviceGroup = ServiceGroup(configuration: svcGroupConfig) 115 | 116 | try await withThrowingTaskGroup(of: Void.self) { group in 117 | // Run Task 118 | group.addTask { 119 | try await serviceGroup.run() 120 | } 121 | 122 | try await Task.sleep(for: .seconds(1)) 123 | 124 | // Shutdown the serviceGroup 125 | await serviceGroup.triggerGracefulShutdown() 126 | } 127 | 128 | let value = try metrics.expectGauge("operations").lastValue 129 | XCTAssertNotNil(value) 130 | } 131 | 132 | func testConsumerConstructDeinit() async throws { 133 | let uniqueGroupID = UUID().uuidString 134 | let config = KafkaConsumerConfiguration( 135 | consumptionStrategy: .group(id: uniqueGroupID, topics: ["this-topic-does-not-exist"]), 136 | bootstrapBrokerAddresses: [] 137 | ) 138 | 139 | _ = try KafkaConsumer(configuration: config, logger: .kafkaTest) // deinit called before run 140 | _ = try KafkaConsumer.makeConsumerWithEvents(configuration: config, logger: .kafkaTest) 141 | } 142 | 143 | func testConsumerMessagesReadCancelledBeforeRun() async throws { 144 | let uniqueGroupID = UUID().uuidString 145 | let config = KafkaConsumerConfiguration( 146 | consumptionStrategy: .group(id: uniqueGroupID, topics: ["this-topic-does-not-exist"]), 147 | bootstrapBrokerAddresses: [] 148 | ) 149 | 150 | let consumer = try KafkaConsumer(configuration: config, logger: .kafkaTest) 151 | 152 | let svcGroupConfig = ServiceGroupConfiguration(services: [consumer], logger: .kafkaTest) 153 | let serviceGroup = ServiceGroup(configuration: svcGroupConfig) 154 | 155 | // explicitly run and cancel message consuming task before serviceGroup.run() 156 | let consumingTask = Task { 157 | for try await record in consumer.messages { 158 | XCTFail("Unexpected record \(record))") 159 | } 160 | } 161 | 162 | try await Task.sleep(for: .seconds(1)) 163 | 164 | // explicitly cancel message consuming task before serviceGroup.run() 165 | consumingTask.cancel() 166 | 167 | try await withThrowingTaskGroup(of: Void.self) { group in 168 | // Run Task 169 | group.addTask { 170 | try await serviceGroup.run() 171 | } 172 | 173 | try await Task.sleep(for: .seconds(1)) 174 | 175 | // Shutdown the serviceGroup 176 | await serviceGroup.triggerGracefulShutdown() 177 | } 178 | } 179 | } 180 | -------------------------------------------------------------------------------- /Tests/KafkaTests/Utilities.swift: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // This source file is part of the swift-kafka-client open source project 4 | // 5 | // Copyright (c) 2022 Apple Inc. and the swift-kafka-client project authors 6 | // Licensed under Apache License v2.0 7 | // 8 | // See LICENSE.txt for license information 9 | // See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 10 | // 11 | // SPDX-License-Identifier: Apache-2.0 12 | // 13 | //===----------------------------------------------------------------------===// 14 | 15 | import Logging 16 | import NIOConcurrencyHelpers 17 | 18 | extension Logger { 19 | static var kafkaTest: Logger { 20 | var logger = Logger(label: "kafka.test") 21 | logger.logLevel = .info 22 | return logger 23 | } 24 | } 25 | 26 | // MARK: - Mocks 27 | 28 | internal struct LogEvent { 29 | let level: Logger.Level 30 | let message: Logger.Message 31 | let source: String 32 | } 33 | 34 | internal struct LogEventRecorder { 35 | let _recordedEvents = NIOLockedValueBox<[LogEvent]>([]) 36 | 37 | var recordedEvents: [LogEvent] { 38 | self._recordedEvents.withLockedValue { $0 } 39 | } 40 | 41 | func record(_ event: LogEvent) { 42 | self._recordedEvents.withLockedValue { $0.append(event) } 43 | } 44 | } 45 | 46 | internal struct MockLogHandler: LogHandler { 47 | let recorder: LogEventRecorder 48 | 49 | init(recorder: LogEventRecorder) { 50 | self.recorder = recorder 51 | } 52 | 53 | func log( 54 | level: Logger.Level, 55 | message: Logger.Message, 56 | metadata: Logger.Metadata?, 57 | source: String, 58 | file: String, 59 | function: String, 60 | line: UInt 61 | ) { 62 | self.recorder.record(LogEvent(level: level, message: message, source: source)) 63 | } 64 | 65 | private var _logLevel: Logger.Level? 66 | var logLevel: Logger.Level { 67 | get { 68 | // get from config unless set 69 | self._logLevel ?? .debug 70 | } 71 | set { 72 | self._logLevel = newValue 73 | } 74 | } 75 | 76 | private var _metadataSet = false 77 | private var _metadata = Logger.Metadata() { 78 | didSet { 79 | self._metadataSet = true 80 | } 81 | } 82 | 83 | public var metadata: Logger.Metadata { 84 | get { 85 | self._metadata 86 | } 87 | set { 88 | self._metadata = newValue 89 | } 90 | } 91 | 92 | subscript(metadataKey metadataKey: Logger.Metadata.Key) -> Logger.Metadata.Value? { 93 | get { 94 | self._metadata[metadataKey] 95 | } 96 | set { 97 | self._metadata[metadataKey] = newValue 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /dev/git.commit.template: -------------------------------------------------------------------------------- 1 | One line description of your change 2 | 3 | Motivation: 4 | 5 | Explain here the context, and why you're making that change. 6 | What is the problem you're trying to solve. 7 | 8 | Modifications: 9 | 10 | Describe the modifications you've done. 11 | 12 | Result: 13 | 14 | After your change, what will change. 15 | -------------------------------------------------------------------------------- /dev/test-benchmark-thresholds.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ##===----------------------------------------------------------------------===## 3 | ## 4 | ## This source file is part of the swift-kafka-client open source project 5 | ## 6 | ## Copyright (c) YEARS Apple Inc. and the swift-kafka-client project authors 7 | ## Licensed under Apache License v2.0 8 | ## 9 | ## See LICENSE.txt for license information 10 | ## See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 11 | ## 12 | ## SPDX-License-Identifier: Apache-2.0 13 | ## 14 | ##===----------------------------------------------------------------------===## 15 | 16 | cd Benchmarks || exit 17 | swift package --disable-sandbox benchmark baseline update PR --no-progress 18 | git checkout main 19 | swift package --disable-sandbox benchmark baseline update main --no-progress 20 | 21 | swift package benchmark baseline check main PR 22 | BENCHMARK_RESULT=$? 23 | 24 | echo "Retcode is $BENCHMARK_RESULT" 25 | 26 | if [ $BENCHMARK_RESULT -eq 0 ]; then 27 | echo "Benchmark results are the same as for main" 28 | fi 29 | 30 | if [ $BENCHMARK_RESULT -eq 4 ]; then 31 | echo "Benchmark results are better as for main" 32 | fi 33 | 34 | if [ $BENCHMARK_RESULT -eq 1 ]; then 35 | echo "Benchmark failed" 36 | exit 1 37 | fi 38 | 39 | if [ $BENCHMARK_RESULT -eq 2 ]; then 40 | echo "Benchmark results are worse than main" 41 | exit 1 42 | fi 43 | -------------------------------------------------------------------------------- /dev/update-benchmark-thresholds.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ##===----------------------------------------------------------------------===## 3 | ## 4 | ## This source file is part of the swift-kafka-client open source project 5 | ## 6 | ## Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors 7 | ## Licensed under Apache License v2.0 8 | ## 9 | ## See LICENSE.txt for license information 10 | ## See CONTRIBUTORS.txt for the list of swift-kafka-client project authors 11 | ## 12 | ## SPDX-License-Identifier: Apache-2.0 13 | ## 14 | ##===----------------------------------------------------------------------===## 15 | 16 | set -eu 17 | set -o pipefail 18 | 19 | here="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 20 | target_repo=${2-"$here/.."} 21 | 22 | for f in 57 58 59 510 -nightly; do 23 | echo "swift$f" 24 | 25 | docker_file=$(if [[ "$f" == "-nightly" ]]; then f=main; fi && ls "$target_repo/docker/docker-compose."*"$f"*".yaml") 26 | 27 | docker-compose -f docker/docker-compose.yaml -f "$docker_file" run update-benchmark-baseline 28 | done 29 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG swift_version=5.9 2 | ARG ubuntu_version=jammy 3 | ARG base_image=swift:$swift_version-$ubuntu_version 4 | FROM $base_image 5 | # needed to do again after FROM due to docker limitation 6 | ARG swift_version 7 | ARG ubuntu_version 8 | 9 | # set as UTF-8 10 | RUN apt-get update && apt-get install -y locales locales-all 11 | ENV LC_ALL en_US.UTF-8 12 | ENV LANG en_US.UTF-8 13 | ENV LANGUAGE en_US.UTF-8 14 | 15 | # Dependencies 16 | RUN apt-get update 17 | RUN apt-get install libsasl2-dev -y 18 | RUN apt-get install libssl-dev -y 19 | RUN apt-get install libjemalloc-dev -y 20 | 21 | # tools 22 | RUN mkdir -p $HOME/.tools 23 | RUN echo 'export PATH="$HOME/.tools:$PATH"' >> $HOME/.profile 24 | 25 | # swiftformat (until part of the toolchain) 26 | 27 | ARG swiftformat_version=0.51.8 28 | RUN git clone --branch $swiftformat_version --depth 1 https://github.com/nicklockwood/SwiftFormat $HOME/.tools/swift-format 29 | RUN cd $HOME/.tools/swift-format && swift build -c release 30 | RUN ln -s $HOME/.tools/swift-format/.build/release/swiftformat $HOME/.tools/swiftformat 31 | -------------------------------------------------------------------------------- /docker/docker-compose.2204.510.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | 5 | runtime-setup: 6 | image: swift-kafka-client:22.04-5.10 7 | build: 8 | args: 9 | base_image: "swiftlang/swift:nightly-5.10-jammy" 10 | 11 | build: 12 | image: swift-kafka-client:22.04-5.10 13 | 14 | test: 15 | image: swift-kafka-client:22.04-5.10 16 | environment: 17 | - WARN_AS_ERROR_ARG=-Xswiftc -warnings-as-errors 18 | - IMPORT_CHECK_ARG=--explicit-target-dependency-import-check error 19 | - STRICT_CONCURRENCY_ARG=-Xswiftc -strict-concurrency=complete 20 | # - SANITIZER_ARG=--sanitize=thread # TSan broken still 21 | 22 | update-benchmark-baseline: 23 | image: swift-kafka-client:22.04-5.10 24 | environment: 25 | - SWIFT_VERSION=5.10 26 | 27 | shell: 28 | image: swift-kafka-client:22.04-5.10 29 | -------------------------------------------------------------------------------- /docker/docker-compose.2204.57.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | 5 | runtime-setup: 6 | image: swift-kafka-client:22.04-5.7 7 | build: 8 | args: 9 | ubuntu_version: "jammy" 10 | swift_version: "5.7" 11 | 12 | build: 13 | image: swift-kafka-client:22.04-5.7 14 | 15 | test: 16 | image: swift-kafka-client:22.04-5.7 17 | environment: 18 | - SWIFT_VERSION=5.7 19 | - WARN_AS_ERROR_ARG=-Xswiftc -warnings-as-errors 20 | - STRICT_CONCURRENCY_ARG=-Xswiftc -strict-concurrency=complete 21 | # - SANITIZER_ARG=--sanitize=thread # TSan broken still 22 | 23 | update-benchmark-baseline: 24 | image: swift-kafka-client:22.04-5.7 25 | environment: 26 | - SWIFT_VERSION=5.7 27 | 28 | shell: 29 | image: swift-kafka-client:22.04-5.7 30 | -------------------------------------------------------------------------------- /docker/docker-compose.2204.58.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | 5 | runtime-setup: 6 | image: swift-kafka-client:22.04-5.8 7 | build: 8 | args: 9 | ubuntu_version: "jammy" 10 | swift_version: "5.8" 11 | 12 | build: 13 | image: swift-kafka-client:22.04-5.8 14 | 15 | test: 16 | image: swift-kafka-client:22.04-5.8 17 | environment: 18 | - SWIFT_VERSION=5.8 19 | - WARN_AS_ERROR_ARG=-Xswiftc -warnings-as-errors 20 | - IMPORT_CHECK_ARG=--explicit-target-dependency-import-check error 21 | - STRICT_CONCURRENCY_ARG=-Xswiftc -strict-concurrency=complete 22 | # - SANITIZER_ARG=--sanitize=thread # TSan broken still 23 | 24 | update-benchmark-baseline: 25 | image: swift-kafka-client:22.04-5.8 26 | environment: 27 | - SWIFT_VERSION=5.8 28 | 29 | shell: 30 | image: swift-kafka-client:22.04-5.8 31 | -------------------------------------------------------------------------------- /docker/docker-compose.2204.59.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | 5 | runtime-setup: 6 | image: swift-kafka-client:22.04-5.9 7 | build: 8 | args: 9 | ubuntu_version: "jammy" 10 | swift_version: "5.9" 11 | 12 | build: 13 | image: swift-kafka-client:22.04-5.9 14 | 15 | test: 16 | image: swift-kafka-client:22.04-5.9 17 | environment: 18 | - SWIFT_VERSION=5.9 19 | - WARN_AS_ERROR_ARG=-Xswiftc -warnings-as-errors 20 | - IMPORT_CHECK_ARG=--explicit-target-dependency-import-check error 21 | - STRICT_CONCURRENCY_ARG=-Xswiftc -strict-concurrency=complete 22 | # - SANITIZER_ARG=--sanitize=thread # TSan broken still 23 | 24 | update-benchmark-baseline: 25 | image: swift-kafka-client:22.04-5.9 26 | environment: 27 | - SWIFT_VERSION=5.9 28 | 29 | shell: 30 | image: swift-kafka-client:22.04-5.9 31 | -------------------------------------------------------------------------------- /docker/docker-compose.2204.main.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | 5 | runtime-setup: 6 | image: swift-kafka-client:22.04-main 7 | build: 8 | args: 9 | base_image: "swiftlang/swift:nightly-main-jammy" 10 | 11 | test: 12 | image: swift-kafka-client:22.04-main 13 | environment: 14 | - SWIFT_VERSION=main 15 | - WARN_AS_ERROR_ARG=-Xswiftc -warnings-as-errors 16 | - IMPORT_CHECK_ARG=--explicit-target-dependency-import-check error 17 | - STRICT_CONCURRENCY_ARG=-Xswiftc -strict-concurrency=complete 18 | # - SANITIZER_ARG=--sanitize=thread # TSan broken still 19 | 20 | update-benchmark-baseline: 21 | image: swift-kafka-client:22.04-main 22 | environment: 23 | - SWIFT_VERSION=main 24 | 25 | shell: 26 | image: swift-kafka-client:22.04-main 27 | -------------------------------------------------------------------------------- /docker/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | # this file is not designed to be run directly 2 | # instead, use the docker-compose.. files 3 | # eg docker-compose -f docker/docker-compose.yaml -f docker/docker-compose.2204.57.yaml run test 4 | version: "3.9" 5 | services: 6 | 7 | zookeeper: 8 | image: ubuntu/zookeeper 9 | 10 | kafka: 11 | image: ubuntu/kafka 12 | depends_on: 13 | - zookeeper 14 | environment: 15 | ZOOKEEPER_HOST: zookeeper 16 | 17 | swift-kafka-client: 18 | depends_on: 19 | - kafka 20 | build: 21 | context: .. 22 | dockerfile: docker/Dockerfile 23 | environment: 24 | KAFKA_HOST: kafka 25 | 26 | runtime-setup: 27 | image: swift-kafka-client:default 28 | build: 29 | context: . 30 | dockerfile: Dockerfile 31 | 32 | common: &common 33 | image: swift-kafka-client:default 34 | depends_on: [runtime-setup] 35 | volumes: 36 | - ~/.ssh:/root/.ssh 37 | - ..:/swift-kafka-client:z 38 | working_dir: /swift-kafka-client 39 | 40 | soundness: 41 | <<: *common 42 | command: /bin/bash -xcl "swift -version && uname -a && ./scripts/soundness.sh" 43 | 44 | build: 45 | <<: *common 46 | environment: [] 47 | command: /bin/bash -cl "swift build" 48 | 49 | test: 50 | <<: *common 51 | depends_on: [kafka, runtime-setup] 52 | environment: 53 | SWIFT_VERSION: 5.7 54 | KAFKA_HOST: kafka 55 | command: > 56 | /bin/bash -xcl " 57 | swift build --build-tests $${SANITIZER_ARG-} && \ 58 | swift $${SWIFT_TEST_VERB-test} $${WARN_AS_ERROR_ARG-} $${SANITIZER_ARG-} $${IMPORT_CHECK_ARG-} $${STRICT_CONCURRENCY_ARG-} 59 | " 60 | 61 | benchmark: 62 | <<: *common 63 | depends_on: [kafka, runtime-setup] 64 | environment: 65 | KAFKA_HOST: kafka 66 | command: > 67 | /bin/bash -xcl " 68 | cd Benchmarks && swift package --disable-sandbox benchmark 69 | " 70 | 71 | update-benchmark-baseline: 72 | <<: *common 73 | depends_on: [kafka, runtime-setup] 74 | environment: 75 | KAFKA_HOST: kafka 76 | command: /bin/bash -xcl "cd Benchmarks && swift package --disable-sandbox --scratch-path .build/$${SWIFT_VERSION-}/ --allow-writing-to-package-directory benchmark --format metricP90AbsoluteThresholds --path Thresholds/$${SWIFT_VERSION-}/ --no-progress" 77 | 78 | # util 79 | 80 | shell: 81 | <<: *common 82 | entrypoint: /bin/bash 83 | --------------------------------------------------------------------------------