├── documentation ├── book │ ├── common │ ├── images │ ├── modules │ ├── assemblies │ ├── api │ │ ├── .openapi-generator │ │ │ ├── FILES │ │ │ ├── VERSION │ │ │ └── openapi.json-generate-apidoc.sha256 │ │ ├── snippet │ │ │ ├── topics │ │ │ │ ├── GET │ │ │ │ │ └── http-response.adoc │ │ │ │ └── {topicname} │ │ │ │ │ ├── partitions │ │ │ │ │ ├── {partitionid} │ │ │ │ │ │ ├── POST │ │ │ │ │ │ │ ├── http-request.adoc │ │ │ │ │ │ │ └── http-response.adoc │ │ │ │ │ │ ├── offsets │ │ │ │ │ │ │ └── GET │ │ │ │ │ │ │ │ └── http-response.adoc │ │ │ │ │ │ └── GET │ │ │ │ │ │ │ └── http-response.adoc │ │ │ │ │ └── GET │ │ │ │ │ │ └── http-response.adoc │ │ │ │ │ ├── POST │ │ │ │ │ ├── http-request.adoc │ │ │ │ │ └── http-response.adoc │ │ │ │ │ └── GET │ │ │ │ │ └── http-response.adoc │ │ │ ├── GET │ │ │ │ └── http-response.adoc │ │ │ ├── consumers │ │ │ │ └── {groupid} │ │ │ │ │ ├── instances │ │ │ │ │ └── {name} │ │ │ │ │ │ ├── subscription │ │ │ │ │ │ ├── POST │ │ │ │ │ │ │ ├── http-request.adoc │ │ │ │ │ │ │ └── http-response.adoc │ │ │ │ │ │ ├── DELETE │ │ │ │ │ │ │ └── http-response.adoc │ │ │ │ │ │ └── GET │ │ │ │ │ │ │ └── http-response.adoc │ │ │ │ │ │ ├── DELETE │ │ │ │ │ │ └── http-response.adoc │ │ │ │ │ │ ├── offsets │ │ │ │ │ │ └── POST │ │ │ │ │ │ │ ├── http-response.adoc │ │ │ │ │ │ │ └── http-request.adoc │ │ │ │ │ │ ├── positions │ │ │ │ │ │ ├── POST │ │ │ │ │ │ │ ├── http-response.adoc │ │ │ │ │ │ │ └── http-request.adoc │ │ │ │ │ │ ├── end │ │ │ │ │ │ │ └── POST │ │ │ │ │ │ │ │ ├── http-response.adoc │ │ │ │ │ │ │ │ └── http-request.adoc │ │ │ │ │ │ └── beginning │ │ │ │ │ │ │ └── POST │ │ │ │ │ │ │ ├── http-response.adoc │ │ │ │ │ │ │ └── http-request.adoc │ │ │ │ │ │ ├── assignments │ │ │ │ │ │ └── POST │ │ │ │ │ │ │ ├── http-request.adoc │ │ │ │ │ │ │ └── http-response.adoc │ │ │ │ │ │ └── records │ │ │ │ │ │ └── GET │ │ │ │ │ │ └── http-response.adoc │ │ │ │ │ └── POST │ │ │ │ │ ├── http-request.adoc │ │ │ │ │ └── http-response.adoc │ │ │ └── admin │ │ │ │ └── topics │ │ │ │ └── POST │ │ │ │ └── http-request.adoc │ │ ├── .openapi-generator-ignore │ │ └── template │ │ │ └── index.mustache │ └── bridge.adoc ├── assemblies │ ├── modules │ ├── assembly-http-bridge-overview.adoc │ ├── assembly-http-bridge-config.adoc │ └── assembly-http-bridge-quickstart.adoc ├── images │ └── kafka-bridge.png ├── common │ ├── revision-info.adoc │ └── attributes.adoc └── modules │ ├── proc-downloading-http-bridge.adoc │ ├── con-securing-http-bridge.adoc │ ├── con-overview-open-api-spec-http-bridge.adoc │ ├── proc-bridge-deleting-consumer.adoc │ ├── proc-configuring-http-bridge-jmx-metrics.adoc │ ├── con-securing-http-interface.adoc │ ├── proc-installing-http-bridge.adoc │ ├── proc-bridge-committing-consumer-offsets-to-log.adoc │ ├── con-loggers-http-bridge.adoc │ ├── con-overview-components-http-bridge.adoc │ ├── proc-configuring-http-bridge-smr-metrics.adoc │ ├── proc-bridge-subscribing-consumer-topics.adoc │ ├── con-overview-running-http-bridge.adoc │ ├── proc-creating-http-bridge-consumer.adoc │ ├── proc-bridge-retrieving-latest-messages-from-consumer.adoc │ └── proc-bridge-seeking-offsets-for-partition.adoc ├── release.version ├── src ├── test │ ├── resources │ │ ├── wrong.properties │ │ ├── application.properties │ │ └── log4j2.properties │ └── java │ │ └── io │ │ └── strimzi │ │ └── kafka │ │ └── bridge │ │ ├── Constants.java │ │ ├── tracing │ │ └── OpenTelemetryTest.java │ │ ├── clients │ │ └── ClientHandlerBase.java │ │ ├── utils │ │ ├── KafkaJsonSerializer.java │ │ ├── KafkaJsonDeserializer.java │ │ ├── Utils.java │ │ └── Urls.java │ │ ├── http │ │ ├── services │ │ │ ├── BaseService.java │ │ │ ├── ProducerService.java │ │ │ └── SeekService.java │ │ ├── InvalidProducerIT.java │ │ ├── tools │ │ │ ├── ExtensionContextParameterResolver.java │ │ │ └── TestSeparator.java │ │ └── StaticIT.java │ │ ├── metrics │ │ ├── StrimziMetricsCollectorTest.java │ │ └── JmxMetricsCollectorTest.java │ │ └── config │ │ └── ConfigRetrieverTest.java └── main │ ├── resources │ ├── META-INF │ │ └── services │ │ │ └── io.opentelemetry.context.ContextStorageProvider │ ├── log4j2.properties │ └── jmx_metrics_config.yaml │ ├── java │ └── io │ │ └── strimzi │ │ └── kafka │ │ └── bridge │ │ ├── http │ │ ├── model │ │ │ ├── HttpBridgeResult.java │ │ │ └── HttpBridgeError.java │ │ ├── converter │ │ │ ├── JsonDecodeException.java │ │ │ └── JsonEncodeException.java │ │ ├── HttpBridgeEndpoint.java │ │ ├── HttpOpenApiOperations.java │ │ ├── HttpOpenApiOperation.java │ │ ├── HttpUtils.java │ │ └── HttpBridgeContext.java │ │ ├── IllegalEmbeddedFormatException.java │ │ ├── Handler.java │ │ ├── config │ │ ├── AbstractConfig.java │ │ ├── KafkaAdminConfig.java │ │ ├── KafkaConsumerConfig.java │ │ ├── KafkaProducerConfig.java │ │ ├── ConfigRetriever.java │ │ └── KafkaConfig.java │ │ ├── tracing │ │ ├── TracingConstants.java │ │ ├── SpanHandle.java │ │ ├── NoopTracingHandle.java │ │ ├── TracingHandle.java │ │ ├── BridgeContextStorageProvider.java │ │ └── TracingUtil.java │ │ ├── BridgeContentType.java │ │ ├── EmbeddedFormat.java │ │ ├── metrics │ │ ├── MetricsType.java │ │ ├── StrimziMetricsCollector.java │ │ ├── MetricsCollector.java │ │ └── JmxMetricsCollector.java │ │ ├── SinkTopicSubscription.java │ │ ├── LoggingPartitionsRebalance.java │ │ ├── ConsumerInstanceId.java │ │ └── converter │ │ └── MessageConverter.java │ └── assembly │ └── assembly.xml ├── .azure ├── templates │ ├── steps │ │ ├── prerequisites │ │ │ ├── install_syft.yaml │ │ │ ├── install_cosign.yaml │ │ │ ├── install_asciidoc.yaml │ │ │ ├── install_java.yaml │ │ │ └── install_docker.yaml │ │ └── maven_cache.yaml │ └── jobs │ │ ├── publish_docs.yaml │ │ ├── release_artifacts.yaml │ │ ├── build_docs.yaml │ │ ├── deploy_java.yaml │ │ ├── build_container.yaml │ │ └── build_java.yaml ├── scripts │ ├── install_cosign.sh │ ├── settings.xml │ ├── install_syft.sh │ ├── push-to-central.sh │ ├── docu-push-to-website.sh │ └── check_docs.sh ├── release-pipeline.yaml ├── build-pipeline.yaml └── cve-pipeline.yaml ├── GOVERNANCE.md ├── MAINTAINERS.md ├── .checkstyle ├── java.header └── suppressions.xml ├── CODE_OF_CONDUCT.md ├── Makefile.os ├── .spotbugs └── spotbugs-exclude.xml ├── bin ├── docker │ ├── to_bytes.gawk │ ├── dynamic_resources.sh │ ├── kafka_bridge_tls_prepare_certificates.sh │ └── kafka_bridge_run.sh └── kafka_bridge_run.sh ├── .gitignore ├── Makefile.maven ├── perftest └── README.md ├── config ├── log4j2.properties └── application.properties ├── Dockerfile ├── .github └── workflows │ └── codeql-analysis.yml ├── Makefile └── BUILDING.md /documentation/book/common: -------------------------------------------------------------------------------- 1 | ../common -------------------------------------------------------------------------------- /documentation/book/images: -------------------------------------------------------------------------------- 1 | ../images -------------------------------------------------------------------------------- /documentation/book/modules: -------------------------------------------------------------------------------- 1 | ../modules -------------------------------------------------------------------------------- /release.version: -------------------------------------------------------------------------------- 1 | 0.34.0-SNAPSHOT 2 | -------------------------------------------------------------------------------- /documentation/assemblies/modules: -------------------------------------------------------------------------------- 1 | ../modules -------------------------------------------------------------------------------- /documentation/book/assemblies: -------------------------------------------------------------------------------- 1 | ../assemblies -------------------------------------------------------------------------------- /documentation/book/api/.openapi-generator/FILES: -------------------------------------------------------------------------------- 1 | index.adoc 2 | -------------------------------------------------------------------------------- /documentation/book/api/.openapi-generator/VERSION: -------------------------------------------------------------------------------- 1 | 7.8.0 2 | -------------------------------------------------------------------------------- /src/test/resources/wrong.properties: -------------------------------------------------------------------------------- 1 | no.meaningful.parameter=its-value -------------------------------------------------------------------------------- /documentation/images/kafka-bridge.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/strimzi/strimzi-kafka-bridge/HEAD/documentation/images/kafka-bridge.png -------------------------------------------------------------------------------- /.azure/templates/steps/prerequisites/install_syft.yaml: -------------------------------------------------------------------------------- 1 | steps: 2 | - bash: ".azure/scripts/install_syft.sh" 3 | displayName: "Install Syft" -------------------------------------------------------------------------------- /documentation/book/api/.openapi-generator/openapi.json-generate-apidoc.sha256: -------------------------------------------------------------------------------- 1 | 3e4b795fa237d0ac582b4c854373b19fa40984bac7ebe644d3fa554e37f6b3ba -------------------------------------------------------------------------------- /src/main/resources/META-INF/services/io.opentelemetry.context.ContextStorageProvider: -------------------------------------------------------------------------------- 1 | io.strimzi.kafka.bridge.tracing.BridgeContextStorageProvider -------------------------------------------------------------------------------- /.azure/templates/steps/prerequisites/install_cosign.yaml: -------------------------------------------------------------------------------- 1 | steps: 2 | - bash: ".azure/scripts/install_cosign.sh" 3 | displayName: "Install cosign" -------------------------------------------------------------------------------- /GOVERNANCE.md: -------------------------------------------------------------------------------- 1 | # Strimzi Governance 2 | 3 | Strimzi Governance is defined in the [governance repository](https://github.com/strimzi/governance/blob/main/GOVERNANCE.md). -------------------------------------------------------------------------------- /MAINTAINERS.md: -------------------------------------------------------------------------------- 1 | # Strimzi Maintainers list 2 | 3 | Strimzi Maintainers list is defined in the [governance repository](https://github.com/strimzi/governance/blob/main/MAINTAINERS). -------------------------------------------------------------------------------- /documentation/book/api/snippet/topics/GET/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 200 4 | [source,json] 5 | ---- 6 | [ "topic1", "topic2" ] 7 | ---- -------------------------------------------------------------------------------- /documentation/common/revision-info.adoc: -------------------------------------------------------------------------------- 1 | //// 2 | Revision information template. 3 | Add this to the end of every document. 4 | //// 5 | 6 | _Revised on {localdate} {localtime}_ 7 | -------------------------------------------------------------------------------- /.checkstyle/java.header: -------------------------------------------------------------------------------- 1 | ^/\* 2 | ^ \* Copyright Strimzi authors. 3 | ^ \* License: Apache License 2\.0 \(see the file LICENSE or http://apache\.org/licenses/LICENSE-2\.0\.html\)\. 4 | ^ \*/ -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Strimzi Community Code of Conduct 2 | 3 | Strimzi Community Code of Conduct is defined in the [governance repository](https://github.com/strimzi/governance/blob/main/CODE_OF_CONDUCT.md). -------------------------------------------------------------------------------- /Makefile.os: -------------------------------------------------------------------------------- 1 | FIND = find 2 | SED = sed 3 | GREP = grep 4 | CP = cp 5 | 6 | UNAME_S := $(shell uname -s) 7 | ifeq ($(UNAME_S),Darwin) 8 | FIND = gfind 9 | SED = gsed 10 | GREP = ggrep 11 | CP = gcp 12 | endif -------------------------------------------------------------------------------- /documentation/book/api/snippet/GET/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 200 4 | [source,json] 5 | [subs=attributes+] 6 | ---- 7 | { 8 | "bridge_version" : {ProductVersion} 9 | } 10 | ---- -------------------------------------------------------------------------------- /documentation/book/api/snippet/consumers/{groupid}/instances/{name}/subscription/POST/http-request.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP request 2 | 3 | ===== Request body 4 | [source,json] 5 | ---- 6 | { 7 | "topics" : [ "topic1", "topic2" ] 8 | } 9 | ---- -------------------------------------------------------------------------------- /.spotbugs/spotbugs-exclude.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /documentation/book/api/snippet/admin/topics/POST/http-request.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP request 2 | 3 | ===== Request body 4 | [source,json] 5 | ---- 6 | { 7 | "topic_name" : "my-topic", 8 | "partitions_count" : 1, 9 | "replication_factor" : 2, 10 | } 11 | ---- -------------------------------------------------------------------------------- /documentation/book/api/snippet/consumers/{groupid}/instances/{name}/DELETE/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 404 4 | [source,json] 5 | ---- 6 | { 7 | "error_code" : 404, 8 | "message" : "The specified consumer instance was not found." 9 | } 10 | ---- -------------------------------------------------------------------------------- /.azure/templates/steps/prerequisites/install_asciidoc.yaml: -------------------------------------------------------------------------------- 1 | # Steps needed for local Asciidoc installation 2 | steps: 3 | - task: UseRubyVersion@0 4 | inputs: 5 | versionSpec: '>= 2.4' 6 | addToPath: true 7 | - bash: gem install asciidoctor 8 | displayName: 'Install asciidoctor' -------------------------------------------------------------------------------- /documentation/book/api/snippet/consumers/{groupid}/instances/{name}/offsets/POST/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 404 4 | [source,json] 5 | ---- 6 | { 7 | "error_code" : 404, 8 | "message" : "The specified consumer instance was not found." 9 | } 10 | ---- -------------------------------------------------------------------------------- /documentation/book/api/snippet/consumers/{groupid}/instances/{name}/positions/POST/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 404 4 | [source,json] 5 | ---- 6 | { 7 | "error_code" : 404, 8 | "message" : "The specified consumer instance was not found." 9 | } 10 | ---- -------------------------------------------------------------------------------- /documentation/book/api/snippet/consumers/{groupid}/instances/{name}/positions/end/POST/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 404 4 | [source,json] 5 | ---- 6 | { 7 | "error_code" : 404, 8 | "message" : "The specified consumer instance was not found." 9 | } 10 | ---- -------------------------------------------------------------------------------- /documentation/book/api/snippet/consumers/{groupid}/instances/{name}/subscription/DELETE/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 404 4 | [source,json] 5 | ---- 6 | { 7 | "error_code" : 404, 8 | "message" : "The specified consumer instance was not found." 9 | } 10 | ---- -------------------------------------------------------------------------------- /documentation/book/api/snippet/consumers/{groupid}/instances/{name}/positions/beginning/POST/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 404 4 | [source,json] 5 | ---- 6 | { 7 | "error_code" : 404, 8 | "message" : "The specified consumer instance was not found." 9 | } 10 | ---- -------------------------------------------------------------------------------- /.azure/templates/steps/maven_cache.yaml: -------------------------------------------------------------------------------- 1 | steps: 2 | - task: Cache@2 3 | inputs: 4 | key: 'maven-cache | $(System.JobName) | **/pom.xml' 5 | restoreKeys: | 6 | maven-cache | $(System.JobName) 7 | maven-cache 8 | path: $(HOME)/.m2/repository 9 | displayName: Maven cache 10 | -------------------------------------------------------------------------------- /bin/docker/to_bytes.gawk: -------------------------------------------------------------------------------- 1 | # Use gawk because gnu awk can't extract regexp groups; gawk has `match` 2 | BEGIN { 3 | suffixes[""]=1 4 | suffixes["K"]=1024 5 | suffixes["M"]=1024**2 6 | suffixes["G"]=1024**3 7 | } 8 | 9 | match($0, /([0-9.]*)([kKmMgG]?)/, a) { 10 | printf("%d", a[1] * suffixes[toupper(a[2])]) 11 | } -------------------------------------------------------------------------------- /documentation/book/api/snippet/topics/{topicname}/partitions/{partitionid}/POST/http-request.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP request 2 | 3 | ===== Request body 4 | [source,json] 5 | ---- 6 | { 7 | "records" : [ { 8 | "key" : "key1", 9 | "value" : "value1" 10 | }, { 11 | "value" : "value2" 12 | } ] 13 | } 14 | ---- -------------------------------------------------------------------------------- /.azure/scripts/install_cosign.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | readonly VERSION="2.2.0" 4 | 5 | ARCH=$1 6 | if [ -z "$ARCH" ]; then 7 | ARCH="amd64" 8 | fi 9 | 10 | curl -L https://github.com/sigstore/cosign/releases/download/v${VERSION}/cosign-linux-${ARCH} > cosign && chmod +x cosign 11 | sudo mv cosign /usr/bin/ 12 | -------------------------------------------------------------------------------- /.azure/scripts/settings.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | central 5 | ${env.CENTRAL_USERNAME} 6 | ${env.CENTRAL_PASSWORD} 7 | 8 | 9 | -------------------------------------------------------------------------------- /documentation/book/api/snippet/consumers/{groupid}/instances/{name}/assignments/POST/http-request.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP request 2 | 3 | ===== Request body 4 | [source,json] 5 | ---- 6 | { 7 | "partitions" : [ { 8 | "topic" : "topic", 9 | "partition" : 0 10 | }, { 11 | "topic" : "topic", 12 | "partition" : 1 13 | } ] 14 | } 15 | ---- -------------------------------------------------------------------------------- /documentation/book/api/snippet/consumers/{groupid}/instances/{name}/positions/end/POST/http-request.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP request 2 | 3 | ===== Request body 4 | [source,json] 5 | ---- 6 | { 7 | "partitions" : [ { 8 | "topic" : "topic", 9 | "partition" : 0 10 | }, { 11 | "topic" : "topic", 12 | "partition" : 1 13 | } ] 14 | } 15 | ---- -------------------------------------------------------------------------------- /documentation/book/api/snippet/consumers/{groupid}/instances/{name}/positions/beginning/POST/http-request.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP request 2 | 3 | ===== Request body 4 | [source,json] 5 | ---- 6 | { 7 | "partitions" : [ { 8 | "topic" : "topic", 9 | "partition" : 0 10 | }, { 11 | "topic" : "topic", 12 | "partition" : 1 13 | } ] 14 | } 15 | ---- -------------------------------------------------------------------------------- /documentation/book/api/snippet/topics/{topicname}/POST/http-request.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP request 2 | 3 | ===== Request body 4 | [source,json] 5 | ---- 6 | { 7 | "records" : [ { 8 | "key" : "key1", 9 | "value" : "value1" 10 | }, { 11 | "value" : "value2", 12 | "partition" : 1 13 | }, { 14 | "value" : "value3" 15 | } ] 16 | } 17 | ---- -------------------------------------------------------------------------------- /src/test/resources/application.properties: -------------------------------------------------------------------------------- 1 | #Bridge related settings 2 | bridge.id=my-bridge 3 | 4 | #Apache Kafka common 5 | kafka.bootstrap.servers=localhost:9092 6 | 7 | #Apache Kafka producer 8 | kafka.producer.acks=1 9 | 10 | #Apache Kafka consumer 11 | kafka.consumer.auto.offset.reset=earliest 12 | 13 | #HTTP related settings 14 | http.host=0.0.0.0 15 | http.port=8080 -------------------------------------------------------------------------------- /.azure/scripts/install_syft.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | readonly VERSION="0.90.0" 5 | 6 | ARCH=$1 7 | if [ -z "$ARCH" ]; then 8 | ARCH="amd64" 9 | fi 10 | 11 | wget https://github.com/anchore/syft/releases/download/v${VERSION}/syft_${VERSION}_linux_${ARCH}.tar.gz -O syft.tar.gz 12 | tar xf syft.tar.gz -C /tmp 13 | chmod +x /tmp/syft 14 | sudo mv /tmp/syft /usr/bin 15 | -------------------------------------------------------------------------------- /.azure/templates/steps/prerequisites/install_java.yaml: -------------------------------------------------------------------------------- 1 | # Step to configure JAVA on the agent 2 | parameters: 3 | - name: JDK_VERSION 4 | default: '17' 5 | steps: 6 | - task: JavaToolInstaller@0 7 | inputs: 8 | versionSpec: ${{ parameters.JDK_VERSION }} 9 | jdkArchitectureOption: 'x64' 10 | jdkSourceOption: 'PreInstalled' 11 | displayName: 'Configure Java' 12 | -------------------------------------------------------------------------------- /documentation/book/api/snippet/consumers/{groupid}/instances/{name}/offsets/POST/http-request.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP request 2 | 3 | ===== Request body 4 | [source,json] 5 | ---- 6 | { 7 | "offsets" : [ { 8 | "topic" : "topic", 9 | "partition" : 0, 10 | "offset" : 15 11 | }, { 12 | "topic" : "topic", 13 | "partition" : 1, 14 | "offset" : 42 15 | } ] 16 | } 17 | ---- -------------------------------------------------------------------------------- /documentation/book/api/snippet/consumers/{groupid}/instances/{name}/positions/POST/http-request.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP request 2 | 3 | ===== Request body 4 | [source,json] 5 | ---- 6 | { 7 | "offsets" : [ { 8 | "topic" : "topic", 9 | "partition" : 0, 10 | "offset" : 15 11 | }, { 12 | "topic" : "topic", 13 | "partition" : 1, 14 | "offset" : 42 15 | } ] 16 | } 17 | ---- -------------------------------------------------------------------------------- /documentation/book/api/snippet/consumers/{groupid}/POST/http-request.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP request 2 | 3 | ===== Request body 4 | [source,json] 5 | ---- 6 | { 7 | "name" : "consumer1", 8 | "format" : "binary", 9 | "auto.offset.reset" : "earliest", 10 | "enable.auto.commit" : false, 11 | "fetch.min.bytes" : 512, 12 | "consumer.request.timeout.ms" : 30000, 13 | "isolation.level" : "read_committed" 14 | } 15 | ---- -------------------------------------------------------------------------------- /documentation/book/api/snippet/topics/{topicname}/partitions/{partitionid}/offsets/GET/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 200 4 | [source,json] 5 | ---- 6 | { 7 | "beginning_offset" : 10, 8 | "end_offset" : 50 9 | } 10 | ---- 11 | 12 | 13 | ===== Response 404 14 | [source,json] 15 | ---- 16 | { 17 | "error_code" : 404, 18 | "message" : "The specified topic partition was not found." 19 | } 20 | ---- -------------------------------------------------------------------------------- /src/test/java/io/strimzi/kafka/bridge/Constants.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | package io.strimzi.kafka.bridge; 6 | 7 | public interface Constants { 8 | 9 | /** 10 | * Tag for http bridge tests, which are triggered for each push/pr/merge on travis-ci 11 | */ 12 | String HTTP_BRIDGE = "httpbridge"; 13 | } 14 | -------------------------------------------------------------------------------- /documentation/modules/proc-downloading-http-bridge.adoc: -------------------------------------------------------------------------------- 1 | // Module included in the following assemblies: 2 | // 3 | // assembly-http-bridge-quickstart.adoc 4 | 5 | [id='proc-downloading-http-bridge-{context}'] 6 | 7 | = Downloading a HTTP Bridge archive 8 | 9 | [role="_abstract"] 10 | A zipped distribution of the HTTP Bridge is available for download. 11 | 12 | .Procedure 13 | 14 | - Download the latest version of the HTTP Bridge archive from the {ReleaseDownload}. 15 | -------------------------------------------------------------------------------- /.azure/templates/steps/prerequisites/install_docker.yaml: -------------------------------------------------------------------------------- 1 | # Steps needed for local Docker installation 2 | steps: 3 | - task: DockerInstaller@0 4 | displayName: Install Docker 5 | inputs: 6 | # Versions can be found from https://download.docker.com/linux/static/stable/x86_64/ 7 | dockerVersion: 24.0.5 8 | releaseType: stable 9 | - bash: | 10 | docker run --rm --privileged multiarch/qemu-user-static --reset -p yes 11 | displayName: 'Register QEMU binary' -------------------------------------------------------------------------------- /documentation/book/api/snippet/consumers/{groupid}/instances/{name}/assignments/POST/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 404 4 | [source,json] 5 | ---- 6 | { 7 | "error_code" : 404, 8 | "message" : "The specified consumer instance was not found." 9 | } 10 | ---- 11 | 12 | 13 | ===== Response 409 14 | [source,json] 15 | ---- 16 | { 17 | "error_code" : 409, 18 | "message" : "Subscriptions to topics, partitions, and patterns are mutually exclusive." 19 | } 20 | ---- -------------------------------------------------------------------------------- /documentation/book/api/snippet/consumers/{groupid}/instances/{name}/subscription/GET/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 200 4 | [source,json] 5 | ---- 6 | { 7 | "topics" : [ "my-topic1", "my-topic2" ], 8 | "partitions" : [ { 9 | "my-topic1" : [ 1, 2, 3 ] 10 | }, { 11 | "my-topic2" : [ 1 ] 12 | } ] 13 | } 14 | ---- 15 | 16 | 17 | ===== Response 404 18 | [source,json] 19 | ---- 20 | { 21 | "error_code" : 404, 22 | "message" : "The specified consumer instance was not found." 23 | } 24 | ---- -------------------------------------------------------------------------------- /documentation/book/bridge.adoc: -------------------------------------------------------------------------------- 1 | include::common/attributes.adoc[] 2 | 3 | :context: bridge 4 | 5 | [id='using_book-{context}'] 6 | = Using the Strimzi HTTP Bridge 7 | 8 | include::assemblies/assembly-http-bridge-overview.adoc[leveloffset=+1] 9 | 10 | include::assemblies/assembly-http-bridge-quickstart.adoc[leveloffset=+1] 11 | 12 | include::assemblies/assembly-http-bridge-config.adoc[leveloffset=+1] 13 | 14 | [id='api_reference-{context}'] 15 | include::api/index.adoc[leveloffset=+1] 16 | 17 | include::common/revision-info.adoc[leveloffset=+1] 18 | -------------------------------------------------------------------------------- /.azure/scripts/push-to-central.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | echo "Build reason: ${BUILD_REASON}" 6 | echo "Source branch: ${BRANCH}" 7 | 8 | function cleanup() { 9 | rm -rf signing.gpg 10 | gpg --delete-keys 11 | gpg --delete-secret-keys 12 | } 13 | 14 | # Run the cleanup on failure / exit 15 | trap cleanup EXIT 16 | 17 | export GPG_TTY=$(tty) 18 | echo $GPG_SIGNING_KEY | base64 -d > signing.gpg 19 | gpg --batch --import signing.gpg 20 | 21 | GPG_EXECUTABLE=gpg mvn $MVN_ARGS -DskipTests -s ./.azure/scripts/settings.xml -P central deploy 22 | 23 | cleanup -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/http/model/HttpBridgeResult.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.http.model; 7 | 8 | /** 9 | * This class represents a result of an HTTP bridging operation 10 | * 11 | * @param the class bringing the actual result as {@link HttpBridgeError} or {@link org.apache.kafka.clients.producer.RecordMetadata} 12 | * @param result actual result 13 | */ 14 | public record HttpBridgeResult(T result) { } 15 | -------------------------------------------------------------------------------- /documentation/book/api/snippet/topics/{topicname}/partitions/{partitionid}/GET/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 200 4 | [source,json] 5 | ---- 6 | { 7 | "partition" : 1, 8 | "leader" : 1, 9 | "replicas" : [ { 10 | "broker" : 1, 11 | "leader" : true, 12 | "in_sync" : true 13 | }, { 14 | "broker" : 2, 15 | "leader" : false, 16 | "in_sync" : true 17 | } ] 18 | } 19 | ---- 20 | 21 | 22 | ===== Response 404 23 | [source,json] 24 | ---- 25 | { 26 | "error_code" : 404, 27 | "message" : "The specified topic partition was not found." 28 | } 29 | ---- -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.class 2 | 3 | # Mobile Tools for Java (J2ME) 4 | .mtj.tmp/ 5 | 6 | # Package Files # 7 | *.jar 8 | *.war 9 | *.ear 10 | 11 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 12 | hs_err_pid* 13 | 14 | # Maven stuff 15 | **/target/* 16 | 17 | # Eclipse stuff 18 | **/.project 19 | **/.settings/* 20 | **/.prefs 21 | **/.classpath 22 | /target/ 23 | 24 | # IntelliJ IDEA specific 25 | .idea/ 26 | *.iml 27 | 28 | # VS Code 29 | .factorypath 30 | .vscode 31 | 32 | .DS_Store 33 | 34 | # Auto generated doc 35 | documentation/html/** 36 | documentation/htmlnoheader/** 37 | documentation/book/build/** 38 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/IllegalEmbeddedFormatException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge; 7 | 8 | /** 9 | * IllegalEmbeddedFormatException 10 | */ 11 | public class IllegalEmbeddedFormatException extends RuntimeException { 12 | 13 | private static final long serialVersionUID = 1L; 14 | 15 | /** 16 | * Constructor 17 | * 18 | * @param message message to set in the exception 19 | */ 20 | public IllegalEmbeddedFormatException(String message) { 21 | super(message); 22 | } 23 | } -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/Handler.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge; 7 | 8 | /** 9 | * Represents a functional interface for handling callback when an asynchronous operation ends 10 | * 11 | * @param type of the data to be handled 12 | */ 13 | @FunctionalInterface 14 | public interface Handler { 15 | 16 | /** 17 | * Called to handle the result of the asynchronous operation with the provided data 18 | * 19 | * @param data data result to handle 20 | */ 21 | void handle(T data); 22 | } 23 | -------------------------------------------------------------------------------- /documentation/book/api/snippet/consumers/{groupid}/POST/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 200 4 | [source,json] 5 | ---- 6 | { 7 | "instance_id" : "consumer1", 8 | "base_uri" : "http://localhost:8080/consumers/my-group/instances/consumer1" 9 | } 10 | ---- 11 | 12 | 13 | ===== Response 409 14 | [source,json] 15 | ---- 16 | { 17 | "error_code" : 409, 18 | "message" : "A consumer instance with the specified name already exists in the HTTP Bridge." 19 | } 20 | ---- 21 | 22 | 23 | ===== Response 422 24 | [source,json] 25 | ---- 26 | { 27 | "error_code" : 422, 28 | "message" : "One or more consumer configuration options have invalid values." 29 | } 30 | ---- -------------------------------------------------------------------------------- /documentation/book/api/snippet/consumers/{groupid}/instances/{name}/subscription/POST/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 404 4 | [source,json] 5 | ---- 6 | { 7 | "error_code" : 404, 8 | "message" : "The specified consumer instance was not found." 9 | } 10 | ---- 11 | 12 | 13 | ===== Response 409 14 | [source,json] 15 | ---- 16 | { 17 | "error_code" : 409, 18 | "message" : "Subscriptions to topics, partitions, and patterns are mutually exclusive." 19 | } 20 | ---- 21 | 22 | 23 | ===== Response 422 24 | [source,json] 25 | ---- 26 | { 27 | "error_code" : 422, 28 | "message" : "A list (of Topics type) or a topic_pattern must be specified." 29 | } 30 | ---- -------------------------------------------------------------------------------- /.checkstyle/suppressions.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 6 | 7 | 8 | 9 | 10 | 12 | 13 | 14 | 16 | -------------------------------------------------------------------------------- /documentation/book/api/snippet/topics/{topicname}/POST/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 200 4 | [source,json] 5 | ---- 6 | { 7 | "offsets" : [ { 8 | "partition" : 2, 9 | "offset" : 0 10 | }, { 11 | "partition" : 1, 12 | "offset" : 1 13 | }, { 14 | "partition" : 2, 15 | "offset" : 2 16 | } ] 17 | } 18 | ---- 19 | 20 | 21 | ===== Response 404 22 | [source,json] 23 | ---- 24 | { 25 | "error_code" : 404, 26 | "message" : "The specified topic was not found." 27 | } 28 | ---- 29 | 30 | 31 | ===== Response 422 32 | [source,json] 33 | ---- 34 | { 35 | "error_code" : 422, 36 | "message" : "The record list contains invalid records." 37 | } 38 | ---- -------------------------------------------------------------------------------- /documentation/book/api/snippet/topics/{topicname}/partitions/{partitionid}/POST/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 200 4 | [source,json] 5 | ---- 6 | { 7 | "offsets" : [ { 8 | "partition" : 2, 9 | "offset" : 0 10 | }, { 11 | "partition" : 1, 12 | "offset" : 1 13 | }, { 14 | "partition" : 2, 15 | "offset" : 2 16 | } ] 17 | } 18 | ---- 19 | 20 | 21 | ===== Response 404 22 | [source,json] 23 | ---- 24 | { 25 | "error_code" : 404, 26 | "message" : "The specified topic partition was not found." 27 | } 28 | ---- 29 | 30 | 31 | ===== Response 422 32 | [source,json] 33 | ---- 34 | { 35 | "error_code" : 422, 36 | "message" : "The record is not valid." 37 | } 38 | ---- -------------------------------------------------------------------------------- /bin/kafka_bridge_run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | # Find my path to use when calling scripts 5 | MYPATH="$(dirname "$0")" 6 | 7 | # Configure logging 8 | if [ -z "$KAFKA_BRIDGE_LOG4J_OPTS" ] 9 | then 10 | KAFKA_BRIDGE_LOG4J_OPTS="-Dlog4j2.configurationFile=file:${MYPATH}/../config/log4j2.properties" 11 | fi 12 | 13 | # Make sure that we use /dev/urandom 14 | JAVA_OPTS="${JAVA_OPTS} -Dvertx.cacheDirBase=/tmp/vertx-cache -Djava.security.egd=file:/dev/./urandom" 15 | 16 | # enabling OpenTelemetry with OTLP by default 17 | if [ -n "$OTEL_SERVICE_NAME" ] && [ -z "$OTEL_TRACES_EXPORTER" ]; then 18 | export OTEL_TRACES_EXPORTER="otlp" 19 | fi 20 | 21 | exec java $JAVA_OPTS $KAFKA_BRIDGE_LOG4J_OPTS -classpath "${MYPATH}/../libs/*" io.strimzi.kafka.bridge.Application "$@" -------------------------------------------------------------------------------- /Makefile.maven: -------------------------------------------------------------------------------- 1 | # Makefile.maven contains the shared tasks for building Java applications. This file is 2 | # included into the Makefile files which contain some Java sources which should be build 3 | 4 | .PHONY: java_compile 5 | java_compile: 6 | echo "Building JAR file ..." 7 | mvn $(MVN_ARGS) compile 8 | 9 | .PHONY: java_verify 10 | java_verify: 11 | echo "Building JAR file ..." 12 | mvn $(MVN_ARGS) verify 13 | 14 | .PHONY: java_package 15 | java_package: 16 | echo "Packaging project ..." 17 | mvn $(MVN_ARGS) package 18 | 19 | .PHONY: java_install 20 | java_install: 21 | echo "Installing JAR files ..." 22 | mvn $(MVN_ARGS) install 23 | 24 | .PHONY: java_clean 25 | java_clean: 26 | echo "Cleaning Maven build ..." 27 | mvn clean 28 | 29 | .PHONY: spotbugs 30 | spotbugs: 31 | mvn $(MVN_ARGS) spotbugs:check 32 | -------------------------------------------------------------------------------- /documentation/book/api/snippet/topics/{topicname}/partitions/GET/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 200 4 | [source,json] 5 | ---- 6 | [ { 7 | "partition" : 1, 8 | "leader" : 1, 9 | "replicas" : [ { 10 | "broker" : 1, 11 | "leader" : true, 12 | "in_sync" : true 13 | }, { 14 | "broker" : 2, 15 | "leader" : false, 16 | "in_sync" : true 17 | } ] 18 | }, { 19 | "partition" : 2, 20 | "leader" : 2, 21 | "replicas" : [ { 22 | "broker" : 1, 23 | "leader" : false, 24 | "in_sync" : true 25 | }, { 26 | "broker" : 2, 27 | "leader" : true, 28 | "in_sync" : true 29 | } ] 30 | } ] 31 | ---- 32 | 33 | 34 | ===== Response 404 35 | [source,json] 36 | ---- 37 | { 38 | "error_code" : 404, 39 | "message" : "The specified topic was not found." 40 | } 41 | ---- -------------------------------------------------------------------------------- /documentation/book/api/snippet/topics/{topicname}/GET/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 200 4 | [source,json] 5 | ---- 6 | { 7 | "name" : "topic", 8 | "offset" : 2, 9 | "configs" : { 10 | "cleanup.policy" : "compact" 11 | }, 12 | "partitions" : [ { 13 | "partition" : 1, 14 | "leader" : 1, 15 | "replicas" : [ { 16 | "broker" : 1, 17 | "leader" : true, 18 | "in_sync" : true 19 | }, { 20 | "broker" : 2, 21 | "leader" : false, 22 | "in_sync" : true 23 | } ] 24 | }, { 25 | "partition" : 2, 26 | "leader" : 2, 27 | "replicas" : [ { 28 | "broker" : 1, 29 | "leader" : false, 30 | "in_sync" : true 31 | }, { 32 | "broker" : 2, 33 | "leader" : true, 34 | "in_sync" : true 35 | } ] 36 | } ] 37 | } 38 | ---- -------------------------------------------------------------------------------- /perftest/README.md: -------------------------------------------------------------------------------- 1 | # Performance tests 2 | 3 | This folder contains a [JMeter](https://jmeter.apache.org/) JMX configuration file describing a test plan with the following operations: 4 | 5 | * consumers creation, topics subscription, polling for getting records in a loop, and final consumers deletion 6 | * producers sending records to topics in a loop 7 | 8 | The test plan is configurable. You can change the number of consumers/producers (JMeter threads) and the number of loop cycles for sending/receiving records. 9 | 10 | It needs a set of plugins in order to show some graphs. 11 | For this reason, you need to download the JMeter Plugins Manager from the [jmeter-plugins.org](https://jmeter-plugins.org/get/) website and put it into the `lib/ext` folder. 12 | When opening the JMX configuration file for the first time, JMeter will ask to install the plugins to run the test plan. -------------------------------------------------------------------------------- /documentation/modules/con-securing-http-bridge.adoc: -------------------------------------------------------------------------------- 1 | // This assembly is included in the following assemblies: 2 | // 3 | // assembly-http-bridge-overview.adoc 4 | 5 | [id='con-securing-http-bridge-{context}'] 6 | = Securing connectivity to the Kafka cluster 7 | 8 | [role="_abstract"] 9 | You can configure the following between the HTTP Bridge and your Kafka cluster: 10 | 11 | * TLS or SASL-based authentication 12 | * A TLS-encrypted connection 13 | 14 | You configure the HTTP Bridge for authentication through its xref:proc-configuring-http-bridge-{context}[properties file]. 15 | 16 | You can also use ACLs in Kafka brokers to restrict the topics that can be consumed and produced using the HTTP Bridge. 17 | 18 | NOTE: Use the `KafkaBridge` resource to configure authentication when you are xref:overview-components-running-http-bridge-cluster-{context}[running the HTTP Bridge on Kubernetes]. -------------------------------------------------------------------------------- /documentation/modules/con-overview-open-api-spec-http-bridge.adoc: -------------------------------------------------------------------------------- 1 | // This assembly is included in the following assemblies: 2 | // 3 | // assembly-http-bridge-overview.adoc 4 | 5 | [id='overview-open-api-spec-http-bridge-{context}'] 6 | = HTTP Bridge OpenAPI specification 7 | 8 | [role="_abstract"] 9 | HTTP Bridge APIs use the OpenAPI Specification (OAS). 10 | OAS provides a standard framework for describing and implementing HTTP APIs. 11 | 12 | The HTTP Bridge OpenAPI specification is in JSON format. 13 | You can find the OpenAPI JSON files in the `src/main/resources/` folder of the HTTP Bridge source download files. 14 | The download files are available from the {ReleaseDownload}. 15 | 16 | You can also use the xref:openapi[`GET /openapi` method] to retrieve the OpenAPI v3 specification in JSON format. 17 | 18 | [role="_additional-resources"] 19 | .Additional resources 20 | * {openapis} 21 | -------------------------------------------------------------------------------- /documentation/modules/proc-bridge-deleting-consumer.adoc: -------------------------------------------------------------------------------- 1 | // Module included in the following assemblies: 2 | // 3 | // assembly-http-bridge-quickstart.adoc 4 | 5 | [id='proc-bridge-deleting-consumer-{context}'] 6 | = Deleting a HTTP Bridge consumer 7 | 8 | [role="_abstract"] 9 | Delete the HTTP Bridge consumer that you used throughout this quickstart. 10 | 11 | .Procedure 12 | 13 | * Delete the HTTP Bridge consumer by sending a `DELETE` request to the xref:deleteconsumer[instances] endpoint. 14 | + 15 | [source,curl,subs=attributes+] 16 | ---- 17 | curl -X DELETE http://localhost:8080/consumers/bridge-quickstart-consumer-group/instances/bridge-quickstart-consumer 18 | ---- 19 | + 20 | If the request is successful, the HTTP Bridge returns a `204` code. 21 | 22 | [role="_additional-resources"] 23 | .Additional resources 24 | 25 | * xref:deleteconsumer[DELETE /consumers/{groupid}/instances/{name}] 26 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/config/AbstractConfig.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.config; 7 | 8 | import java.util.Map; 9 | 10 | /** 11 | * Base abstract class for configurations related to protocols heads and Kafka 12 | */ 13 | public abstract class AbstractConfig { 14 | 15 | protected final Map config; 16 | 17 | /** 18 | * Constructor 19 | * 20 | * @param config configuration parameters map 21 | */ 22 | public AbstractConfig(Map config) { 23 | this.config = config; 24 | } 25 | 26 | /** 27 | * @return configuration parameters map 28 | */ 29 | public Map getConfig() { 30 | return this.config; 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/test/java/io/strimzi/kafka/bridge/tracing/OpenTelemetryTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.tracing; 7 | 8 | import io.vertx.core.tracing.TracingOptions; 9 | import io.vertx.tracing.opentelemetry.OpenTelemetryOptions; 10 | 11 | import static io.strimzi.kafka.bridge.tracing.TracingConstants.OPENTELEMETRY_SERVICE_NAME_PROPERTY_KEY; 12 | 13 | /** 14 | * OpenTelemetry tests 15 | */ 16 | public class OpenTelemetryTest extends TracingTestBase { 17 | @Override 18 | protected TracingOptions tracingOptions() { 19 | System.setProperty(OPENTELEMETRY_SERVICE_NAME_PROPERTY_KEY, "strimzi-kafka-bridge-test"); 20 | System.setProperty("otel.metrics.exporter", "none"); // disable metrics 21 | return new OpenTelemetryOptions(); 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /documentation/common/attributes.adoc: -------------------------------------------------------------------------------- 1 | // AsciiDoc settings 2 | :data-uri!: 3 | :doctype: book 4 | :experimental: 5 | :idprefix: 6 | :imagesdir: images 7 | :numbered: 8 | :sectanchors!: 9 | :sectnums: 10 | :source-highlighter: highlightjs 11 | :toc: left 12 | :linkattrs: 13 | :toclevels: 3 14 | :sectlinks: 15 | 16 | //links to Strimzi docs 17 | :BookURLConfiguring: https://strimzi.io/documentation/[Strimzi documentation^] 18 | 19 | //API versions 20 | :KafkaTopicApiVersion: kafka.strimzi.io/v1beta2 21 | 22 | // Source and download links 23 | :ReleaseDownload: https://github.com/strimzi/strimzi-kafka-bridge/releases[GitHub release page^] 24 | 25 | //External links 26 | :external-cors-link: https://fetch.spec.whatwg.org/[Fetch CORS specification^] 27 | :openapis: https://www.openapis.org/[OpenAPI initiative^] 28 | :otel-exporters: https://github.com/open-telemetry/opentelemetry-java/tree/main/sdk-extensions/autoconfigure#exporters[OpenTelemetry exporter values^] 29 | -------------------------------------------------------------------------------- /bin/docker/dynamic_resources.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | MYPATH="$(dirname "$0")" 5 | 6 | function get_heap_size { 7 | # Get the max heap used by a jvm which used all the ram available to the container 8 | CONTAINER_MEMORY_IN_BYTES=$(java -XshowSettings:vm -version \ 9 | |& awk '/Max\. Heap Size \(Estimated\): [0-9KMG]+/{ print $5}' \ 10 | | gawk -f "${MYPATH}"/to_bytes.gawk) 11 | 12 | # use max of 31G memory, java performs much better with Compressed Ordinary Object Pointers 13 | DEFAULT_MEMORY_CEILING=$((31 * 2**30)) 14 | if [ "${CONTAINER_MEMORY_IN_BYTES}" -lt "${DEFAULT_MEMORY_CEILING}" ]; then 15 | if [ -z $CONTAINER_HEAP_PERCENT ]; then 16 | CONTAINER_HEAP_PERCENT=0.50 17 | fi 18 | 19 | CONTAINER_MEMORY_IN_MB=$((${CONTAINER_MEMORY_IN_BYTES}/1024**2)) 20 | CONTAINER_HEAP_MAX=$(echo "${CONTAINER_MEMORY_IN_MB} ${CONTAINER_HEAP_PERCENT}" | awk '{ printf "%d", $1 * $2 }') 21 | 22 | echo "${CONTAINER_HEAP_MAX}" 23 | fi 24 | } -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/tracing/TracingConstants.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.tracing; 7 | 8 | /** 9 | * Tracing constants. 10 | */ 11 | public final class TracingConstants { 12 | 13 | /** tracing component name definition */ 14 | public static final String COMPONENT = "strimzi-kafka-bridge"; 15 | /** Kafka service name definition */ 16 | public static final String KAFKA_SERVICE = "kafka"; 17 | 18 | /** OpenTelemetry tracing type */ 19 | public static final String OPENTELEMETRY = "opentelemetry"; 20 | 21 | /** OpenTelemetry service name env var */ 22 | public static final String OPENTELEMETRY_SERVICE_NAME_ENV_KEY = "OTEL_SERVICE_NAME"; 23 | /** OpenTelemetry service name system property */ 24 | public static final String OPENTELEMETRY_SERVICE_NAME_PROPERTY_KEY = "otel.service.name"; 25 | } 26 | -------------------------------------------------------------------------------- /src/main/resources/log4j2.properties: -------------------------------------------------------------------------------- 1 | # 2 | # The logging properties used 3 | # 4 | name = BridgeConfig 5 | 6 | appender.console.type = Console 7 | appender.console.name = STDOUT 8 | appender.console.layout.type = PatternLayout 9 | appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %highlight{%-5p} [%t] %c{1}:%L - %m%n 10 | 11 | rootLogger.level = INFO 12 | rootLogger.appenderRefs = console 13 | rootLogger.appenderRef.console.ref = STDOUT 14 | rootLogger.additivity = false 15 | 16 | logger.bridge.name = io.strimzi.kafka.bridge 17 | logger.bridge.level = INFO 18 | logger.bridge.appenderRefs = console 19 | logger.bridge.appenderRef.console.ref = STDOUT 20 | logger.bridge.additivity = false 21 | 22 | # HTTP OpenAPI specific logging levels (default is INFO) 23 | # Logging healthy and ready endpoints is very verbose because of Kubernetes health checking. 24 | logger.healthy.name = http.openapi.operation.healthy 25 | logger.healthy.level = WARN 26 | logger.ready.name = http.openapi.operation.ready 27 | logger.ready.level = WARN -------------------------------------------------------------------------------- /src/test/resources/log4j2.properties: -------------------------------------------------------------------------------- 1 | # 2 | # The logging properties used 3 | # 4 | name = BridgeConfig 5 | 6 | appender.console.type = Console 7 | appender.console.name = STDOUT 8 | appender.console.layout.type = PatternLayout 9 | appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %highlight{%-5p} [%t] %c{1}:%L - %m%n 10 | 11 | rootLogger.level = INFO 12 | rootLogger.appenderRefs = console 13 | rootLogger.appenderRef.console.ref = STDOUT 14 | rootLogger.additivity = false 15 | 16 | logger.bridge.name = io.strimzi.kafka.bridge 17 | logger.bridge.level = INFO 18 | logger.bridge.appenderRefs = console 19 | logger.bridge.appenderRef.console.ref = STDOUT 20 | logger.bridge.additivity = false 21 | 22 | # HTTP OpenAPI specific logging levels (default is INFO) 23 | # Logging healthy and ready endpoints is very verbose because of Kubernetes health checking. 24 | logger.healthy.name = http.openapi.operation.healthy 25 | logger.healthy.level = WARN 26 | logger.ready.name = http.openapi.operation.ready 27 | logger.ready.level = WARN -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/http/converter/JsonDecodeException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.http.converter; 7 | 8 | /** 9 | * Represents and exception during JSON decoding operations 10 | */ 11 | public class JsonDecodeException extends RuntimeException { 12 | 13 | /** 14 | * Default constrctor 15 | */ 16 | public JsonDecodeException() { 17 | } 18 | 19 | /** 20 | * Constructor 21 | * 22 | * @param message Exception message 23 | */ 24 | public JsonDecodeException(String message) { 25 | super(message); 26 | } 27 | 28 | /** 29 | * Constructor 30 | * 31 | * @param message Exception message 32 | * @param cause Inner cause of the exception 33 | */ 34 | public JsonDecodeException(String message, Throwable cause) { 35 | super(message, cause); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/http/converter/JsonEncodeException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.http.converter; 7 | 8 | /** 9 | * Represents and exception during JSON encoding operations 10 | */ 11 | public class JsonEncodeException extends RuntimeException { 12 | 13 | /** 14 | * Default constrctor 15 | */ 16 | public JsonEncodeException() { 17 | } 18 | 19 | /** 20 | * Constructor 21 | * 22 | * @param message Exception message 23 | */ 24 | public JsonEncodeException(String message) { 25 | super(message); 26 | } 27 | 28 | /** 29 | * Constructor 30 | * 31 | * @param message Exception message 32 | * @param cause Inner cause of the exception 33 | */ 34 | public JsonEncodeException(String message, Throwable cause) { 35 | super(message, cause); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/BridgeContentType.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge; 7 | 8 | /** 9 | * Bridge supported content type 10 | */ 11 | public class BridgeContentType { 12 | 13 | /** JSON encoding with JSON embedded format */ 14 | public static final String KAFKA_JSON_JSON = "application/vnd.kafka.json.v2+json"; 15 | 16 | /** JSON encoding with BINARY embedded format */ 17 | public static final String KAFKA_JSON_BINARY = "application/vnd.kafka.binary.v2+json"; 18 | 19 | /** JSON encoding with TEXT embedded format */ 20 | public static final String KAFKA_JSON_TEXT = "application/vnd.kafka.text.v2+json"; 21 | 22 | /** Specific Kafka JSON encoding */ 23 | public static final String KAFKA_JSON = "application/vnd.kafka.v2+json"; 24 | 25 | /** JSON encoding */ 26 | public static final String JSON = "application/json"; 27 | } 28 | -------------------------------------------------------------------------------- /src/test/java/io/strimzi/kafka/bridge/clients/ClientHandlerBase.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | package io.strimzi.kafka.bridge.clients; 6 | 7 | import io.vertx.core.AbstractVerticle; 8 | 9 | import java.util.concurrent.CompletableFuture; 10 | import java.util.function.IntPredicate; 11 | 12 | public abstract class ClientHandlerBase extends AbstractVerticle { 13 | final CompletableFuture resultPromise; 14 | final IntPredicate msgCntPredicate; 15 | 16 | public ClientHandlerBase(CompletableFuture resultPromise, IntPredicate msgCntPredicate) { 17 | this.resultPromise = resultPromise; 18 | this.msgCntPredicate = msgCntPredicate; 19 | } 20 | 21 | @Override 22 | public void start() { 23 | handleClient(); 24 | } 25 | 26 | protected abstract void handleClient(); 27 | 28 | public CompletableFuture getResultPromise() { 29 | return resultPromise; 30 | } 31 | } -------------------------------------------------------------------------------- /.azure/templates/jobs/publish_docs.yaml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - job: 'public_docs' 3 | displayName: 'Publish Docs' 4 | # Set timeout for jobs 5 | timeoutInMinutes: 60 6 | # Base system 7 | pool: 8 | vmImage: 'Ubuntu-22.04' 9 | # Pipeline steps 10 | steps: 11 | - task: DownloadPipelineArtifact@2 12 | inputs: 13 | source: '${{ parameters.artifactSource }}' 14 | artifact: Documentation 15 | path: $(System.DefaultWorkingDirectory) 16 | project: '${{ parameters.artifactProject }}' 17 | pipeline: '${{ parameters.artifactPipeline }}' 18 | runVersion: '${{ parameters.artifactRunVersion }}' 19 | runId: '${{ parameters.artifactRunId }}' 20 | - bash: tar -xvf documentation.tar 21 | displayName: "Untar the documentation directory" 22 | - bash: "make docu_pushtowebsite" 23 | env: 24 | BUILD_REASON: $(Build.Reason) 25 | BRANCH: $(Build.SourceBranch) 26 | GITHUB_DEPLOY_KEY: $(GITHUB_DEPLOY_KEY) 27 | displayName: "Publish the docs to the website" 28 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/EmbeddedFormat.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge; 7 | 8 | /** 9 | * Define the data format inside the HTTP messages 10 | */ 11 | public enum EmbeddedFormat { 12 | 13 | /** Define "binary" data as embedded format */ 14 | BINARY, 15 | 16 | /** Define "json" data as embedded format */ 17 | JSON, 18 | 19 | /** Define "text" data as embedded format */ 20 | TEXT; 21 | 22 | /** 23 | * Convert the String value in the corresponding enum 24 | * 25 | * @param value value to be converted 26 | * @return corresponding enum 27 | */ 28 | public static EmbeddedFormat from(String value) { 29 | return switch (value) { 30 | case "json" -> JSON; 31 | case "binary" -> BINARY; 32 | case "text" -> TEXT; 33 | default -> throw new IllegalEmbeddedFormatException("Invalid format type."); 34 | }; 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/test/java/io/strimzi/kafka/bridge/utils/KafkaJsonSerializer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.utils; 7 | 8 | import com.fasterxml.jackson.databind.ObjectMapper; 9 | import org.apache.kafka.common.serialization.Serializer; 10 | 11 | import java.util.Map; 12 | 13 | public class KafkaJsonSerializer implements Serializer { 14 | 15 | @Override 16 | public void configure(Map configs, boolean isKey) { 17 | 18 | } 19 | 20 | @Override 21 | public byte[] serialize(String topic, T data) { 22 | if (data == null) { 23 | return null; 24 | } 25 | 26 | ObjectMapper objectMapper = new ObjectMapper(); 27 | try { 28 | return objectMapper.writeValueAsBytes(data); 29 | } catch (Exception e) { 30 | throw new RuntimeException("Error serializing JSON message", e); 31 | } 32 | } 33 | 34 | @Override 35 | public void close() { 36 | 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /documentation/assemblies/assembly-http-bridge-overview.adoc: -------------------------------------------------------------------------------- 1 | // This assembly is included in the following assemblies: 2 | // 3 | // bridge.adoc 4 | 5 | [id='assembly-http-bridge-overview-{context}'] 6 | = HTTP Bridge overview 7 | 8 | [role="_abstract"] 9 | Use the HTTP Bridge to make HTTP requests to a Kafka cluster. 10 | 11 | You can use the HTTP Bridge to integrate HTTP client applications with your Kafka cluster. 12 | 13 | .HTTP client integration 14 | 15 | image:kafka-bridge.png[Internal and external HTTP producers and consumers exchange data with the Kafka brokers through the HTTP Bridge] 16 | 17 | include::modules/con-overview-running-http-bridge.adoc[leveloffset=+1] 18 | 19 | include::modules/con-overview-components-http-bridge.adoc[leveloffset=+1] 20 | 21 | include::modules/con-overview-open-api-spec-http-bridge.adoc[leveloffset=+1] 22 | 23 | include::modules/con-securing-http-bridge.adoc[leveloffset=+1] 24 | 25 | include::modules/con-securing-http-interface.adoc[leveloffset=+1] 26 | 27 | include::modules/con-requests-http-bridge.adoc[leveloffset=+1] 28 | 29 | include::modules/con-loggers-http-bridge.adoc[leveloffset=+1] 30 | -------------------------------------------------------------------------------- /documentation/book/api/.openapi-generator-ignore: -------------------------------------------------------------------------------- 1 | # OpenAPI Generator Ignore 2 | # Generated by openapi-generator https://github.com/openapitools/openapi-generator 3 | 4 | # Use this file to prevent files from being overwritten by the generator. 5 | # The patterns follow closely to .gitignore or .dockerignore. 6 | 7 | # As an example, the C# client generator defines ApiClient.cs. 8 | # You can make changes and tell OpenAPI Generator to ignore just this file by uncommenting the following line: 9 | #ApiClient.cs 10 | 11 | # You can match any string of characters against a directory, file or extension with a single asterisk (*): 12 | #foo/*/qux 13 | # The above matches foo/bar/qux and foo/baz/qux, but not foo/bar/baz/qux 14 | 15 | # You can recursively match patterns against a directory, file or extension with a double asterisk (**): 16 | #foo/**/qux 17 | # This matches foo/bar/qux, foo/baz/qux, and foo/bar/baz/qux 18 | 19 | # You can also negate patterns with an exclamation (!). 20 | # For example, you can ignore all files in a docs folder with the file extension .md: 21 | #docs/*.md 22 | # Then explicitly reverse the ignore rule for a single file: 23 | #!docs/README.md 24 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/metrics/MetricsType.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | package io.strimzi.kafka.bridge.metrics; 6 | 7 | /** 8 | * Metrics type. 9 | */ 10 | public enum MetricsType { 11 | /** Prometheus JMX Exporter. */ 12 | JMX_EXPORTER("jmxPrometheusExporter"), 13 | 14 | /** Strimzi Metrics Reporter. */ 15 | STRIMZI_REPORTER("strimziMetricsReporter"); 16 | 17 | private final String text; 18 | 19 | MetricsType(final String text) { 20 | this.text = text; 21 | } 22 | 23 | @Override 24 | public String toString() { 25 | return text; 26 | } 27 | 28 | /** 29 | * @param text Text. 30 | * @return Get type from text. 31 | */ 32 | public static MetricsType fromString(String text) { 33 | for (MetricsType t : MetricsType.values()) { 34 | if (t.text.equalsIgnoreCase(text)) { 35 | return t; 36 | } 37 | } 38 | throw new IllegalArgumentException("Metrics type not found: " + text); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /documentation/modules/proc-configuring-http-bridge-jmx-metrics.adoc: -------------------------------------------------------------------------------- 1 | [id='proc-configuring-http-bridge-jmx-metrics-{context}'] 2 | = Configuring Prometheus JMX Exporter metrics 3 | 4 | [role="_abstract"] 5 | Enable the Prometheus JMX Exporter to collect HTTP Bridge metrics by setting the `bridge.metrics` option to `jmxPrometheusExporter`. 6 | 7 | .Prerequisites 8 | 9 | * xref:proc-downloading-http-bridge-{context}[The HTTP Bridge installation archive is downloaded]. 10 | 11 | .Procedure 12 | 13 | . Set the `bridge.metrics` configuration to `jmxPrometheusExporter`. 14 | + 15 | .Configuration for enabling metrics 16 | 17 | [source,properties] 18 | ---- 19 | bridge.metrics=jmxPrometheusExporter 20 | ---- 21 | + 22 | Optionally, you can add a custom Prometheus JMX Exporter configuration using the `bridge.metrics.exporter.config.path` property. 23 | If not configured, a default embedded configuration file is used. 24 | 25 | . Run the HTTP Bridge run script. 26 | + 27 | .Running the HTTP Bridge 28 | [source,shell] 29 | ---- 30 | ./bin/kafka_bridge_run.sh --config-file=/application.properties 31 | ---- 32 | + 33 | With metrics enabled, you can scrape metrics in Prometheus format from the `/metrics` endpoint of the HTTP Bridge. 34 | -------------------------------------------------------------------------------- /.azure/templates/jobs/release_artifacts.yaml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - job: 'release_artifacts' 3 | displayName: 'Prepare and release artifacts' 4 | # Set timeout for jobs 5 | timeoutInMinutes: 60 6 | # Base system 7 | pool: 8 | vmImage: 'Ubuntu-22.04' 9 | # Pipeline steps 10 | steps: 11 | # Install Prerequisites 12 | - template: '../steps/prerequisites/install_java.yaml' 13 | 14 | # Change the release version 15 | - bash: "mvn versions:set -DnewVersion=$(echo $RELEASE_VERSION | tr a-z A-Z)" 16 | displayName: "Configure release version to ${{ parameters.releaseVersion }}" 17 | env: 18 | RELEASE_VERSION: '${{ parameters.releaseVersion }}' 19 | 20 | - bash: "mvn install -DskipTests" 21 | displayName: "Build Java" 22 | 23 | # Deploy to Central 24 | - bash: "./.azure/scripts/push-to-central.sh" 25 | env: 26 | BUILD_REASON: $(Build.Reason) 27 | BRANCH: $(Build.SourceBranch) 28 | GPG_PASSPHRASE: $(GPG_PASSPHRASE) 29 | GPG_SIGNING_KEY: $(GPG_SIGNING_KEY) 30 | CENTRAL_USERNAME: $(CENTRAL_USERNAME) 31 | CENTRAL_PASSWORD: $(CENTRAL_PASSWORD) 32 | MVN_ARGS: "-e -V -B" 33 | displayName: "Deploy Java artifacts" -------------------------------------------------------------------------------- /src/test/java/io/strimzi/kafka/bridge/utils/KafkaJsonDeserializer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.utils; 7 | 8 | import com.fasterxml.jackson.databind.ObjectMapper; 9 | import org.apache.kafka.common.serialization.Deserializer; 10 | 11 | import java.util.Map; 12 | 13 | public class KafkaJsonDeserializer implements Deserializer { 14 | 15 | private final Class type; 16 | 17 | public KafkaJsonDeserializer(Class type) { 18 | this.type = type; 19 | } 20 | 21 | @Override 22 | public void configure(Map configs, boolean isKey) { 23 | 24 | } 25 | 26 | @Override 27 | public T deserialize(String topic, byte[] bytes) { 28 | if (bytes == null || bytes.length == 0) { 29 | return null; 30 | } 31 | 32 | ObjectMapper mapper = new ObjectMapper(); 33 | try { 34 | return mapper.readValue(bytes, type); 35 | } catch (Exception e) { 36 | throw new RuntimeException("Error deserializing JSON message", e); 37 | } 38 | } 39 | 40 | @Override 41 | public void close() { 42 | 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /.azure/scripts/docu-push-to-website.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | echo "Build reason: ${BUILD_REASON}" 6 | echo "Source branch: ${BRANCH}" 7 | 8 | echo "$GITHUB_DEPLOY_KEY" | base64 -d > github_deploy_key 9 | chmod 600 github_deploy_key 10 | eval "$(ssh-agent -s)" 11 | ssh-add github_deploy_key 12 | 13 | git clone git@github.com:strimzi/strimzi.github.io.git /tmp/website 14 | rm -rf /tmp/website/docs/bridge/in-development/images 15 | rm -rf /tmp/website/docs/bridge/in-development/full/images 16 | cp -v documentation/htmlnoheader/bridge.html /tmp/website/docs/bridge/in-development/bridge.html 17 | cp -v documentation/html/bridge.html /tmp/website/docs/bridge/in-development/full/bridge.html 18 | cp -vrL documentation/htmlnoheader/images /tmp/website/docs/bridge/in-development/images 19 | cp -vrL documentation/htmlnoheader/images /tmp/website/docs/bridge/in-development/full/images 20 | 21 | pushd /tmp/website 22 | 23 | if [[ -z $(git status -s) ]]; then 24 | echo "No changes to the output on this push; exiting." 25 | exit 0 26 | fi 27 | 28 | git config user.name "Strimzi CI" 29 | git config user.email "ci@strimzi.io" 30 | 31 | git add -A 32 | git commit -s -m "Update Kafka Bridge documentation (Commit ${COMMIT})" --allow-empty 33 | git push origin main 34 | 35 | popd 36 | -------------------------------------------------------------------------------- /config/log4j2.properties: -------------------------------------------------------------------------------- 1 | # 2 | # The logging properties used 3 | # 4 | name = BridgeConfig 5 | 6 | appender.console.type = Console 7 | appender.console.name = STDOUT 8 | appender.console.layout.type = PatternLayout 9 | appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %highlight{%-5p} [%t] %c{1}:%L - %m%n 10 | 11 | rootLogger.level = INFO 12 | rootLogger.appenderRefs = console 13 | rootLogger.appenderRef.console.ref = STDOUT 14 | rootLogger.additivity = false 15 | 16 | logger.bridge.name = io.strimzi.kafka.bridge 17 | logger.bridge.level = INFO 18 | logger.bridge.appenderRefs = console 19 | logger.bridge.appenderRef.console.ref = STDOUT 20 | logger.bridge.additivity = false 21 | 22 | # HTTP OpenAPI specific logging levels (default is INFO) 23 | # Logging healthy and ready endpoints is very verbose because of Kubernetes health checking. 24 | logger.healthy.name = http.openapi.operation.healthy 25 | logger.healthy.level = WARN 26 | logger.ready.name = http.openapi.operation.ready 27 | logger.ready.level = WARN 28 | 29 | # Reduce verbosity of RouterBuilderImpl warnings for unimplemented OpenAPI endpoints because bridge servers share the same OpenAPI contract with different endpoints implemented. 30 | logger.vertx.name = io.vertx.ext.web.openapi.router.impl.RouterBuilderImpl 31 | logger.vertx.level = ERROR -------------------------------------------------------------------------------- /documentation/assemblies/assembly-http-bridge-config.adoc: -------------------------------------------------------------------------------- 1 | // This assembly is included in the following assemblies: 2 | // 3 | // bridge.adoc 4 | 5 | [id='assembly-http-bridge-config-{context}'] 6 | = HTTP Bridge configuration 7 | 8 | [role="_abstract"] 9 | Configure a deployment of the HTTP Bridge with Kafka-related properties and specify the HTTP connection details needed to be able to interact with Kafka. 10 | Additionally, enable metrics in Prometheus format using either the https://github.com/prometheus/jmx_exporter[Prometheus JMX Exporter] or the https://github.com/strimzi/metrics-reporter[Strimzi Metrics Reporter]. 11 | You can also use configuration properties to enable and use distributed tracing with the HTTP Bridge. 12 | Distributed tracing allows you to track the progress of transactions between applications in a distributed system. 13 | 14 | NOTE: Use the `KafkaBridge` resource to configure properties when you are xref:overview-components-running-http-bridge-cluster-{context}[running the HTTP Bridge on Kubernetes]. 15 | 16 | include::modules/proc-configuring-http-bridge.adoc[leveloffset=+1] 17 | include::modules/proc-configuring-http-bridge-jmx-metrics.adoc[leveloffset=+1] 18 | include::modules/proc-configuring-http-bridge-smr-metrics.adoc[leveloffset=+1] 19 | include::modules/proc-configuring-http-bridge-tracing.adoc[leveloffset=+1] 20 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/tracing/SpanHandle.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.tracing; 7 | 8 | import io.vertx.ext.web.RoutingContext; 9 | import org.apache.kafka.clients.producer.ProducerRecord; 10 | 11 | /** 12 | * Span handle, an abstraction over actual span implementation. 13 | */ 14 | public interface SpanHandle { 15 | /** 16 | * Inject tracing info into underlying span from Kafka producer record. 17 | * 18 | * @param record Kafka producer record to extract tracing info 19 | */ 20 | void inject(ProducerRecord record); 21 | 22 | /** 23 | * Inject tracing info into underlying span from Vert.x routing context. 24 | * 25 | * @param routingContext Vert.x routing context to extract tracing info 26 | */ 27 | void inject(RoutingContext routingContext); 28 | 29 | /** 30 | * Finish underlying span. 31 | * 32 | * @param code response code 33 | */ 34 | void finish(int code); 35 | 36 | /** 37 | * Finish underlying span. 38 | * 39 | * @param code response code 40 | * @param cause exception cause 41 | */ 42 | void finish(int code, Throwable cause); 43 | } 44 | -------------------------------------------------------------------------------- /.azure/templates/jobs/build_docs.yaml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - job: 'build_docs' 3 | displayName: 'Build Docs' 4 | strategy: 5 | matrix: 6 | 'java-17': 7 | image: 'Ubuntu-22.04' 8 | jdk_version: '17' 9 | # Strategy for the job 10 | # Set timeout for jobs 11 | timeoutInMinutes: 60 12 | # Base system 13 | pool: 14 | vmImage: $(image) 15 | # Variables 16 | variables: 17 | MVN_CACHE_FOLDER: $(HOME)/.m2/repository 18 | MVN_ARGS: '-e -V -B' 19 | # Pipeline steps 20 | steps: 21 | # Get cached Maven repository 22 | - template: "../steps/maven_cache.yaml" 23 | - template: '../steps/prerequisites/install_java.yaml' 24 | parameters: 25 | JDK_VERSION: $(jdk_version) 26 | - template: '../steps/prerequisites/install_asciidoc.yaml' 27 | - bash: "make docu_html docu_htmlnoheader" 28 | displayName: "Build docs" 29 | env: 30 | MVN_ARGS: "-e -V -B" 31 | # We have to TAR the target directory to maintain the permissions of 32 | # the files which would otherwise change when downloading the artifact 33 | - bash: tar -cvpf documentation.tar ./documentation 34 | displayName: "Tar the documentation directory" 35 | - publish: $(System.DefaultWorkingDirectory)/documentation.tar 36 | artifact: Documentation 37 | displayName: "Store the documentation tar" -------------------------------------------------------------------------------- /config/application.properties: -------------------------------------------------------------------------------- 1 | #Bridge related settings 2 | bridge.id=my-bridge 3 | 4 | # uncomment the following line to enable Prometheus JMX Exporter, check the Kafka Bridge documentation for more details 5 | #bridge.metrics=jmxPrometheusExporter 6 | # optionally, set the file path of your custom configuration 7 | #bridge.metrics.exporter.config.path=/path/to/my-exporter-config.yaml 8 | 9 | # uncomment the following line to enable Strimzi Metrics Reporter, check the Kafka Bridge documentation for more details 10 | #bridge.metrics=strimziMetricsReporter 11 | # optionally, filter the exposed metrics of all internal Kafka clients using a comma separated list of regexes 12 | #kafka.prometheus.metrics.reporter.allowlist=.* 13 | 14 | # uncomment the following line (bridge.tracing) to enable OpenTelemetry tracing, check the documentation for more details 15 | #bridge.tracing=opentelemetry 16 | 17 | #Apache Kafka common 18 | kafka.bootstrap.servers=localhost:9092 19 | 20 | #Apache Kafka producer 21 | kafka.producer.acks=1 22 | 23 | #Apache Kafka consumer 24 | kafka.consumer.auto.offset.reset=earliest 25 | 26 | #HTTP related settings 27 | http.host=0.0.0.0 28 | http.port=8080 29 | #Enable CORS 30 | http.cors.enabled=false 31 | http.cors.allowedOrigins=* 32 | http.cors.allowedMethods=GET,POST,PUT,DELETE,OPTIONS,PATCH 33 | 34 | #Enable consumer 35 | http.consumer.enabled=true 36 | 37 | #Enable producer 38 | http.producer.enabled=true 39 | -------------------------------------------------------------------------------- /documentation/modules/con-securing-http-interface.adoc: -------------------------------------------------------------------------------- 1 | // This assembly is included in the following assemblies: 2 | // 3 | // assembly-http-bridge-overview.adoc 4 | 5 | [id='con-securing-http-interface-{context}'] 6 | = Securing the HTTP Bridge HTTP interface 7 | 8 | [role="_abstract"] 9 | By default, connections between HTTP clients and HTTP Bridge are not encrypted, but you can configure a TLS-encrypted connection through its xref:proc-configuring-kafka-bridge-{context}[properties file]. 10 | If the HTTP Bridge is configured with TLS encryption, client requests should connect to the bridge by using HTTPS instead of HTTP. 11 | 12 | You can enable TLS encryption for all HTTP Bridge endpoints except management endpoints such as `/ready`, `/healthy` and `/metrics`. 13 | These endpoints are typically used only by internal clients and requests to them must use HTTP. 14 | 15 | Authentication between HTTP clients and the HTTP Bridge is not supported directly by the HTTP Bridge. 16 | You can combine the HTTP Bridge with the following tools to secure it further: 17 | 18 | * Network policies and firewalls that define which pods can access the HTTP Bridge 19 | * Reverse proxies (for example, OAuth 2.0) 20 | * API gateways 21 | 22 | If any of these tools used, for example, a reverse proxy to add authentication between HTTP clients and the HTTP Bridge, then the proxy should be configured with TLS to encrypt the connections between HTTP clients and the proxy. -------------------------------------------------------------------------------- /documentation/book/api/snippet/consumers/{groupid}/instances/{name}/records/GET/http-response.adoc: -------------------------------------------------------------------------------- 1 | ==== Example HTTP response 2 | 3 | ===== Response 200 4 | [source,json] 5 | ---- 6 | [ { 7 | "topic" : "topic", 8 | "key" : "key1", 9 | "value" : { 10 | "foo" : "bar" 11 | }, 12 | "partition" : 0, 13 | "offset" : 2 14 | }, { 15 | "topic" : "topic", 16 | "key" : "key2", 17 | "value" : [ "foo2", "bar2" ], 18 | "partition" : 1, 19 | "offset" : 3 20 | } ] 21 | ---- 22 | 23 | [source,json] 24 | ---- 25 | [ 26 | { 27 | "topic": "test", 28 | "key": "a2V5", 29 | "value": "Y29uZmx1ZW50", 30 | "partition": 1, 31 | "offset": 100, 32 | }, 33 | { 34 | "topic": "test", 35 | "key": "a2V5", 36 | "value": "a2Fma2E=", 37 | "partition": 2, 38 | "offset": 101, 39 | } 40 | ] 41 | ---- 42 | 43 | 44 | ===== Response 404 45 | [source,json] 46 | ---- 47 | { 48 | "error_code" : 404, 49 | "message" : "The specified consumer instance was not found." 50 | } 51 | ---- 52 | 53 | 54 | ===== Response 406 55 | [source,json] 56 | ---- 57 | { 58 | "error_code" : 406, 59 | "message" : "The `format` used in the consumer creation request does not match the embedded format in the Accept header of this request." 60 | } 61 | ---- 62 | 63 | 64 | ===== Response 422 65 | [source,json] 66 | ---- 67 | { 68 | "error_code" : 422, 69 | "message" : "Response exceeds the maximum number of bytes the consumer can receive" 70 | } 71 | ---- -------------------------------------------------------------------------------- /.azure/templates/jobs/deploy_java.yaml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - job: 'deploy_java' 3 | displayName: 'Deploy Java' 4 | # Strategy for the job 5 | strategy: 6 | matrix: 7 | 'java-17': 8 | image: 'Ubuntu-22.04' 9 | jdk_version: '17' 10 | main_build: 'true' 11 | # Set timeout for jobs 12 | timeoutInMinutes: 60 13 | # Base system 14 | pool: 15 | vmImage: 'Ubuntu-22.04' 16 | # Pipeline steps 17 | steps: 18 | - template: '../steps/prerequisites/install_java.yaml' 19 | - task: DownloadPipelineArtifact@2 20 | inputs: 21 | source: '${{ parameters.artifactSource }}' 22 | artifact: Binary 23 | path: $(System.DefaultWorkingDirectory)/ 24 | project: '${{ parameters.artifactProject }}' 25 | pipeline: '${{ parameters.artifactPipeline }}' 26 | runVersion: '${{ parameters.artifactRunVersion }}' 27 | runId: '${{ parameters.artifactRunId }}' 28 | - bash: tar -xvf target.tar 29 | displayName: "Untar the target directory" 30 | - bash: "./.azure/scripts/push-to-central.sh" 31 | env: 32 | BUILD_REASON: $(Build.Reason) 33 | BRANCH: $(Build.SourceBranch) 34 | GPG_PASSPHRASE: $(GPG_PASSPHRASE) 35 | GPG_SIGNING_KEY: $(GPG_SIGNING_KEY) 36 | CENTRAL_USERNAME: $(CENTRAL_USERNAME) 37 | CENTRAL_PASSWORD: $(CENTRAL_PASSWORD) 38 | displayName: "Deploy Java artifacts" -------------------------------------------------------------------------------- /.azure/templates/jobs/build_container.yaml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - job: 'build_container' 3 | displayName: 'Build' 4 | # Strategy for the job 5 | strategy: 6 | matrix: 7 | ${{ each arch in parameters.architectures }}: 8 | ${{ arch }}: 9 | arch: ${{ arch }} 10 | # Set timeout for jobs 11 | timeoutInMinutes: 60 12 | # Base system 13 | pool: 14 | vmImage: 'Ubuntu-22.04' 15 | # Pipeline steps 16 | steps: 17 | - template: '../steps/prerequisites/install_docker.yaml' 18 | - task: DownloadPipelineArtifact@2 19 | inputs: 20 | source: '${{ parameters.artifactSource }}' 21 | artifact: Binary 22 | path: $(System.DefaultWorkingDirectory)/ 23 | project: '${{ parameters.artifactProject }}' 24 | pipeline: '${{ parameters.artifactPipeline }}' 25 | runVersion: '${{ parameters.artifactRunVersion }}' 26 | runId: '${{ parameters.artifactRunId }}' 27 | - bash: tar -xvf target.tar 28 | displayName: "Untar the target directory" 29 | - bash: "make docker_build docker_save" 30 | env: 31 | DOCKER_BUILDKIT: 1 32 | BUILD_REASON: $(Build.Reason) 33 | BRANCH: $(Build.SourceBranch) 34 | DOCKER_REGISTRY: "quay.io" 35 | DOCKER_ORG: "strimzi" 36 | DOCKER_ARCHITECTURE: $(arch) 37 | displayName: "Build container - $(arch)" 38 | - publish: $(System.DefaultWorkingDirectory)/kafka-bridge-$(arch).tar.gz 39 | artifact: Container-$(arch) -------------------------------------------------------------------------------- /src/test/java/io/strimzi/kafka/bridge/utils/Utils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.utils; 7 | 8 | import java.io.BufferedReader; 9 | import java.io.FileNotFoundException; 10 | import java.io.FileReader; 11 | import java.io.IOException; 12 | 13 | public class Utils { 14 | 15 | /** 16 | * Retrieve the Kafka Bridge version from a text config fil, or throws an exception. 17 | * 18 | * @param releaseFile The name of the file that contains the release version 19 | * @return The version of the Kafka Bridge 20 | * @throws Exception 21 | */ 22 | 23 | public static String getKafkaBridgeVersionFromFile(String releaseFile) throws Exception { 24 | 25 | String versionFromFile; 26 | 27 | try { 28 | BufferedReader bufferedReader = new BufferedReader(new FileReader(releaseFile)); 29 | versionFromFile = bufferedReader.readLine(); 30 | bufferedReader.close(); 31 | } catch (FileNotFoundException e) { 32 | throw new Exception("File not found : " + releaseFile); 33 | } catch (IOException e) { 34 | throw new Exception("Unable to open file : " + releaseFile); 35 | } 36 | 37 | if ((versionFromFile == null) || (versionFromFile.isEmpty())) { 38 | throw new Exception("Unable to get Version from file : " + releaseFile); 39 | } 40 | return versionFromFile; 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /documentation/modules/proc-installing-http-bridge.adoc: -------------------------------------------------------------------------------- 1 | // Module included in the following assemblies: 2 | // 3 | // assembly-http-bridge-quickstart.adoc 4 | 5 | [id='proc-installing-http-bridge-{context}'] 6 | = Installing the HTTP Bridge 7 | 8 | [role="_abstract"] 9 | Use the script provided with the HTTP Bridge archive to install the HTTP Bridge. 10 | The `application.properties` file provided with the installation archive provides default configuration settings. 11 | 12 | The following default property values configure the HTTP Bridge to listen for requests on port 8080. 13 | 14 | .Default configuration properties 15 | [source,shell,subs=attributes+] 16 | ---- 17 | http.host=0.0.0.0 18 | http.port=8080 19 | ---- 20 | 21 | .Prerequisites 22 | 23 | * xref:proc-downloading-http-bridge-{context}[The HTTP Bridge installation archive is downloaded] 24 | 25 | .Procedure 26 | 27 | . If you have not already done so, unzip the HTTP Bridge installation archive to any directory. 28 | 29 | . Run the HTTP Bridge script using the configuration properties as a parameter: 30 | + 31 | For example: 32 | + 33 | [source,shell,subs="+quotes,attributes"] 34 | ---- 35 | ./bin/kafka_bridge_run.sh --config-file=__/application.properties 36 | ---- 37 | 38 | . Check to see that the installation was successful in the log. 39 | + 40 | [source,shell] 41 | ---- 42 | HTTP Bridge started and listening on port 8080 43 | HTTP Bridge bootstrap servers localhost:9092 44 | ---- 45 | 46 | .What to do next 47 | 48 | * xref:proc-producing-messages-from-bridge-topics-partitions-{context}[Produce messages to topics and partitions]. -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/SinkTopicSubscription.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | 7 | package io.strimzi.kafka.bridge; 8 | 9 | /** 10 | * Represents a Topic subscription in the sink bridge endpoint 11 | */ 12 | public class SinkTopicSubscription { 13 | 14 | private final String topic; 15 | private final Integer partition; 16 | 17 | /** 18 | * Constructor 19 | * 20 | * @param topic topic to subscribe/assign 21 | * @param partition partition requested as assignment (null if no specific assignment) 22 | */ 23 | public SinkTopicSubscription(String topic, Integer partition) { 24 | this.topic = topic; 25 | this.partition = partition; 26 | } 27 | 28 | /** 29 | * Constructor 30 | * 31 | * @param topic topic to subscribe 32 | */ 33 | public SinkTopicSubscription(String topic) { 34 | this(topic, null); 35 | } 36 | 37 | /** 38 | * @return topic to subscribe/assign 39 | */ 40 | public String getTopic() { 41 | return topic; 42 | } 43 | 44 | /** 45 | * @return partition requested as assignment (null if no specific assignment) 46 | */ 47 | public Integer getPartition() { 48 | return partition; 49 | } 50 | 51 | @Override 52 | public String toString() { 53 | return "SinkTopicSubscription(" + 54 | "topic=" + this.topic + 55 | ",partition=" + this.partition + 56 | ")"; 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /documentation/modules/proc-bridge-committing-consumer-offsets-to-log.adoc: -------------------------------------------------------------------------------- 1 | // Module included in the following assemblies: 2 | // 3 | // assembly-http-bridge-quickstart.adoc 4 | 5 | [id='proc-bridge-committing-consumer-offsets-to-log-{context}'] 6 | = Commiting offsets to the log 7 | 8 | [role="_abstract"] 9 | Use the xref:commit[offsets] endpoint to manually commit offsets to the log for all messages received by the HTTP Bridge consumer. This is required because the HTTP Bridge consumer that you created earlier, in xref:proc-creating-http-bridge-consumer-{context}[Creating a HTTP Bridge consumer], was configured with the `enable.auto.commit` setting as `false`. 10 | 11 | .Procedure 12 | 13 | * Commit offsets to the log for the `bridge-quickstart-consumer`: 14 | + 15 | [source,curl,subs=attributes+] 16 | ---- 17 | curl -X POST http://localhost:8080/consumers/bridge-quickstart-consumer-group/instances/bridge-quickstart-consumer/offsets 18 | ---- 19 | + 20 | Because no request body is submitted, offsets are committed for all the records that have been received by the consumer. Alternatively, the request body can contain an array of (xref:OffsetCommitSeek[OffsetCommitSeek]) that specifies the topics and partitions that you want to commit offsets for. 21 | + 22 | If the request is successful, the HTTP Bridge returns a `204` code only. 23 | 24 | .What to do next 25 | 26 | After committing offsets to the log, try out the endpoints for xref:proc-bridge-seeking-offset-for-partition-{context}[seeking to offsets]. 27 | 28 | [role="_additional-resources"] 29 | .Additional resources 30 | 31 | * xref:commit[POST /consumers/{groupid}/instances/{name}/offsets] 32 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/tracing/NoopTracingHandle.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.tracing; 7 | 8 | import io.strimzi.kafka.bridge.config.BridgeConfig; 9 | import io.vertx.ext.web.RoutingContext; 10 | import org.apache.kafka.clients.consumer.ConsumerRecord; 11 | import org.apache.kafka.clients.producer.ProducerRecord; 12 | 13 | import java.util.Properties; 14 | 15 | final class NoopTracingHandle implements TracingHandle { 16 | @Override 17 | public String envServiceName() { 18 | return null; 19 | } 20 | 21 | @Override 22 | public String serviceName(BridgeConfig config) { 23 | return null; 24 | } 25 | 26 | @Override 27 | public void initialize() { 28 | } 29 | 30 | @Override 31 | public SpanHandle span(RoutingContext routingContext, String operationName) { 32 | return new NoopSpanHandle<>(); 33 | } 34 | 35 | @Override 36 | public void handleRecordSpan(ConsumerRecord record) { 37 | } 38 | 39 | @Override 40 | public void addTracingPropsToProducerConfig(Properties props) { 41 | } 42 | 43 | private static final class NoopSpanHandle implements SpanHandle { 44 | @Override 45 | public void inject(ProducerRecord record) { 46 | } 47 | 48 | @Override 49 | public void inject(RoutingContext routingContext) { 50 | } 51 | 52 | @Override 53 | public void finish(int code) { 54 | } 55 | 56 | @Override 57 | public void finish(int code, Throwable cause) { 58 | } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/main/assembly/assembly.xml: -------------------------------------------------------------------------------- 1 | 4 | assembly 5 | true 6 | 7 | 8 | tar.gz 9 | zip 10 | dir 11 | 12 | 13 | 14 | ${project.basedir}/config 15 | /config 16 | 0644 17 | 18 | 19 | ${project.basedir}/bin 20 | /bin 21 | 0755 22 | 23 | 24 | ${project.basedir} 25 | 26 | README* 27 | LICENSE* 28 | CHANGELOG* 29 | 30 | 0644 31 | 32 | 33 | 34 | 35 | runtime 36 | /libs 37 | 0644 38 | ${artifact.groupId}.${artifact.artifactId}-${artifact.version}${dashClassifier?}.${artifact.extension} 39 | 40 | 41 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/LoggingPartitionsRebalance.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge; 7 | 8 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; 9 | import org.apache.kafka.common.TopicPartition; 10 | import org.apache.logging.log4j.LogManager; 11 | import org.apache.logging.log4j.Logger; 12 | 13 | import java.util.Collection; 14 | 15 | /** 16 | * No operations implementation about handling partitions being assigned on revoked on rebalancing 17 | * It just logs partitions if enabled 18 | */ 19 | public class LoggingPartitionsRebalance implements ConsumerRebalanceListener { 20 | private static final Logger LOGGER = LogManager.getLogger(LoggingPartitionsRebalance.class); 21 | 22 | @Override 23 | public void onPartitionsRevoked(Collection partitions) { 24 | LOGGER.debug("Partitions revoked {}", partitions.size()); 25 | 26 | if (LOGGER.isDebugEnabled() && !partitions.isEmpty()) { 27 | for (TopicPartition partition : partitions) { 28 | LOGGER.debug("topic {} partition {}", partition.topic(), partition.partition()); 29 | } 30 | } 31 | } 32 | 33 | @Override 34 | public void onPartitionsAssigned(Collection partitions) { 35 | LOGGER.debug("Partitions assigned {}", partitions.size()); 36 | 37 | if (LOGGER.isDebugEnabled() && !partitions.isEmpty()) { 38 | for (TopicPartition partition : partitions) { 39 | LOGGER.debug("topic {} partition {}", partition.topic(), partition.partition()); 40 | } 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/config/KafkaAdminConfig.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.config; 7 | 8 | import java.util.Map; 9 | import java.util.stream.Collectors; 10 | 11 | /** 12 | * Apache Kafka admin related configuration 13 | */ 14 | public class KafkaAdminConfig extends AbstractConfig { 15 | 16 | /** Prefix for administration related configuration parameters */ 17 | public static final String KAFKA_ADMIN_CONFIG_PREFIX = KafkaConfig.KAFKA_CONFIG_PREFIX + "admin."; 18 | 19 | /** 20 | * Constructor 21 | * 22 | * @param config configuration parameters map 23 | */ 24 | private KafkaAdminConfig(Map config) { 25 | super(config); 26 | } 27 | 28 | /** 29 | * Loads Kafka admin related configuration parameters from a related map 30 | * 31 | * @param map map from which loading configuration parameters 32 | * @return Kafka admin related configuration 33 | */ 34 | public static KafkaAdminConfig fromMap(Map map) { 35 | // filter the Kafka admin related configuration parameters, stripping the prefix as well 36 | return new KafkaAdminConfig(map.entrySet().stream() 37 | .filter(e -> e.getKey().startsWith(KafkaAdminConfig.KAFKA_ADMIN_CONFIG_PREFIX)) 38 | .collect(Collectors.toMap(e -> e.getKey().substring(KafkaAdminConfig.KAFKA_ADMIN_CONFIG_PREFIX.length()), Map.Entry::getValue))); 39 | } 40 | 41 | @Override 42 | public String toString() { 43 | return "KafkaAdminConfig(" + 44 | "config=" + this.config + 45 | ")"; 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/test/java/io/strimzi/kafka/bridge/http/services/BaseService.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | package io.strimzi.kafka.bridge.http.services; 6 | 7 | import io.vertx.core.buffer.Buffer; 8 | import io.vertx.core.json.JsonObject; 9 | import io.vertx.ext.web.client.HttpRequest; 10 | import io.vertx.ext.web.client.WebClient; 11 | import io.vertx.ext.web.codec.BodyCodec; 12 | 13 | public class BaseService { 14 | 15 | WebClient webClient; 16 | private static BaseService baseService; 17 | 18 | static final int HTTP_REQUEST_TIMEOUT = 60; 19 | 20 | // for request configuration 21 | private static final long RESPONSE_TIMEOUT = 60000L; 22 | 23 | BaseService(WebClient webClient) { 24 | this.webClient = webClient; 25 | } 26 | 27 | public static synchronized BaseService getInstance(WebClient webClient) { 28 | if (baseService == null || webClient != baseService.webClient) { 29 | baseService = new BaseService(webClient); 30 | } 31 | return baseService; 32 | } 33 | 34 | //HTTP methods with configured Response timeout 35 | public HttpRequest postRequest(String requestURI) { 36 | return webClient.post(requestURI) 37 | .timeout(RESPONSE_TIMEOUT) 38 | .as(BodyCodec.jsonObject()); 39 | } 40 | 41 | public HttpRequest getRequest(String requestURI) { 42 | return webClient.get(requestURI) 43 | .timeout(RESPONSE_TIMEOUT); 44 | } 45 | 46 | public HttpRequest deleteRequest(String requestURI) { 47 | return webClient.delete(requestURI) 48 | .timeout(RESPONSE_TIMEOUT); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /documentation/assemblies/assembly-http-bridge-quickstart.adoc: -------------------------------------------------------------------------------- 1 | // This assembly is included in the following assemblies: 2 | // 3 | // bridge.adoc 4 | 5 | [id='assembly-http-bridge-quickstart-{context}'] 6 | = HTTP Bridge quickstart 7 | 8 | [role="_abstract"] 9 | Use this quickstart to try out the HTTP Bridge in your local development environment. 10 | 11 | You will learn how to do the following: 12 | 13 | * Produce messages to topics and partitions in your Kafka cluster 14 | * Create a HTTP Bridge consumer 15 | * Perform basic consumer operations, such as subscribing the consumer to topics and retrieving the messages that you produced 16 | 17 | In this quickstart, HTTP requests are formatted as curl commands that you can copy and paste to your terminal. 18 | 19 | Ensure you have the prerequisites and then follow the tasks in the order provided in this chapter. 20 | 21 | In this quickstart, you will produce and consume messages in JSON format. 22 | 23 | .Prerequisites for the quickstart 24 | 25 | * A Kafka cluster is running on the host machine. 26 | 27 | include::modules/proc-downloading-http-bridge.adoc[leveloffset=+1] 28 | 29 | include::modules/proc-installing-http-bridge.adoc[leveloffset=+1] 30 | 31 | include::modules/proc-producing-messages-from-bridge-topics-partitions.adoc[leveloffset=+1] 32 | 33 | include::modules/proc-creating-http-bridge-consumer.adoc[leveloffset=+1] 34 | 35 | include::modules/proc-bridge-subscribing-consumer-topics.adoc[leveloffset=+1] 36 | 37 | include::modules/proc-bridge-retrieving-latest-messages-from-consumer.adoc[leveloffset=+1] 38 | 39 | include::modules/proc-bridge-committing-consumer-offsets-to-log.adoc[leveloffset=+1] 40 | 41 | include::modules/proc-bridge-seeking-offsets-for-partition.adoc[leveloffset=+1] 42 | 43 | include::modules/proc-bridge-deleting-consumer.adoc[leveloffset=+1] 44 | -------------------------------------------------------------------------------- /documentation/modules/con-loggers-http-bridge.adoc: -------------------------------------------------------------------------------- 1 | // Module included in the following assemblies: 2 | // 3 | // assembly-http-bridge-overview.adoc 4 | 5 | [id='con-loggers-http-bridge-{context}'] 6 | 7 | [role="_abstract"] 8 | = Configuring loggers for the HTTP Bridge 9 | 10 | [role="_abstract"] 11 | You can set a different log level for each operation that is defined by the HTTP Bridge OpenAPI specification. 12 | 13 | Each operation has a corresponding API endpoint through which the bridge receives requests from HTTP clients. 14 | You can change the log level on each endpoint to produce more or less fine-grained logging information about the incoming and outgoing HTTP requests. 15 | 16 | Loggers are defined in the `log4j2.properties` file, which has the following default configuration for `healthy` and `ready` endpoints: 17 | 18 | ``` 19 | logger.healthy.name = http.openapi.operation.healthy 20 | logger.healthy.level = WARN 21 | logger.ready.name = http.openapi.operation.ready 22 | logger.ready.level = WARN 23 | ``` 24 | 25 | The log level of all other operations is set to `INFO` by default. 26 | Loggers are formatted as follows: 27 | 28 | [source,properties,subs=+quotes] 29 | ---- 30 | logger.__.name = http.openapi.operation.__ 31 | logger.__level = __ 32 | ---- 33 | 34 | Where `__` is the identifier of the specific operation. 35 | 36 | .List of operations defined by the OpenAPI specification 37 | * `createConsumer` 38 | * `deleteConsumer` 39 | * `subscribe` 40 | * `unsubscribe` 41 | * `poll` 42 | * `assign` 43 | * `commit` 44 | * `send` 45 | * `sendToPartition` 46 | * `seekToBeginning` 47 | * `seekToEnd` 48 | * `seek` 49 | * `healthy` 50 | * `ready` 51 | * `openapi` 52 | 53 | Where __ is the logging level as defined by log4j2 (i.e. `INFO`, `DEBUG`, ...). 54 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/config/KafkaConsumerConfig.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.config; 7 | 8 | import java.util.Map; 9 | import java.util.stream.Collectors; 10 | 11 | /** 12 | * Apache Kafka consumer related configuration 13 | */ 14 | public class KafkaConsumerConfig extends AbstractConfig { 15 | 16 | /** Prefix for consumer related configuration parameters */ 17 | public static final String KAFKA_CONSUMER_CONFIG_PREFIX = KafkaConfig.KAFKA_CONFIG_PREFIX + "consumer."; 18 | 19 | /** 20 | * Constructor 21 | * 22 | * @param config configuration parameters map 23 | */ 24 | private KafkaConsumerConfig(Map config) { 25 | super(config); 26 | } 27 | 28 | /** 29 | * Loads Kafka consumer related configuration parameters from a related map 30 | * 31 | * @param map map from which loading configuration parameters 32 | * @return Kafka consumer related configuration 33 | */ 34 | public static KafkaConsumerConfig fromMap(Map map) { 35 | // filter the Kafka consumer related configuration parameters, stripping the prefix as well 36 | return new KafkaConsumerConfig(map.entrySet().stream() 37 | .filter(e -> e.getKey().startsWith(KafkaConsumerConfig.KAFKA_CONSUMER_CONFIG_PREFIX)) 38 | .collect(Collectors.toMap(e -> e.getKey().substring(KafkaConsumerConfig.KAFKA_CONSUMER_CONFIG_PREFIX.length()), Map.Entry::getValue))); 39 | } 40 | 41 | @Override 42 | public String toString() { 43 | return "KafkaConsumerConfig(" + 44 | "config=" + this.config + 45 | ")"; 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/config/KafkaProducerConfig.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.config; 7 | 8 | import java.util.Map; 9 | import java.util.stream.Collectors; 10 | 11 | /** 12 | * Apache Kafka producer related configuration 13 | */ 14 | public class KafkaProducerConfig extends AbstractConfig { 15 | 16 | /** Prefix for producer related configuration parameters */ 17 | public static final String KAFKA_PRODUCER_CONFIG_PREFIX = KafkaConfig.KAFKA_CONFIG_PREFIX + "producer."; 18 | 19 | /** 20 | * Constructor 21 | * 22 | * @param config configuration parameters map 23 | */ 24 | private KafkaProducerConfig(Map config) { 25 | super(config); 26 | } 27 | 28 | /** 29 | * Loads Kafka producer related configuration parameters from a related map 30 | * 31 | * @param map map from which loading configuration parameters 32 | * @return Kafka producer related configuration 33 | */ 34 | public static KafkaProducerConfig fromMap(Map map) { 35 | // filter the Kafka producer related configuration parameters, stripping the prefix as well 36 | return new KafkaProducerConfig(map.entrySet().stream() 37 | .filter(e -> e.getKey().startsWith(KafkaProducerConfig.KAFKA_PRODUCER_CONFIG_PREFIX)) 38 | .collect(Collectors.toMap(e -> e.getKey().substring(KafkaProducerConfig.KAFKA_PRODUCER_CONFIG_PREFIX.length()), Map.Entry::getValue))); 39 | } 40 | 41 | @Override 42 | public String toString() { 43 | return "KafkaProducerConfig(" + 44 | "config=" + this.config + 45 | ")"; 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /documentation/modules/con-overview-components-http-bridge.adoc: -------------------------------------------------------------------------------- 1 | // Module included in the following assemblies: 2 | // 3 | // assembly-http-bridge-overview.adoc 4 | 5 | [id="overview-components-http-bridge_{context}"] 6 | = HTTP Bridge interface 7 | 8 | [role="_abstract"] 9 | The HTTP Bridge provides a RESTful interface that allows HTTP-based clients to interact with a Kafka cluster.  10 | It offers the advantages of a web API connection to Strimzi, without the need for client applications to interpret the Kafka protocol. 11 | 12 | The API has two main resources — `consumers` and `topics` — that are exposed and made accessible through endpoints to interact with consumers and producers in your Kafka cluster. The resources relate only to the HTTP Bridge, not the consumers and producers connected directly to Kafka. 13 | 14 | == HTTP requests 15 | The HTTP Bridge supports HTTP requests to a Kafka cluster, with methods to: 16 | 17 | * Send messages to a topic. 18 | * Retrieve messages from topics. 19 | * Retrieve a list of partitions for a topic. 20 | * Create and delete consumers. 21 | * Subscribe consumers to topics, so that they start receiving messages from those topics. 22 | * Retrieve a list of topics that a consumer is subscribed to. 23 | * Unsubscribe consumers from topics. 24 | * Assign partitions to consumers. 25 | * Commit a list of consumer offsets. 26 | * Seek on a partition, so that a consumer starts receiving messages from the first or last offset position, or a given offset position. 27 | 28 | The methods provide JSON responses and HTTP response code error handling. 29 | Messages can be sent in JSON or binary formats. 30 | 31 | Clients can produce and consume messages without the requirement to use the native Kafka protocol. 32 | 33 | [role="_additional-resources"] 34 | .Additional resources 35 | * xref:api_reference-{context}[HTTP Bridge API reference] 36 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/tracing/TracingHandle.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.tracing; 7 | 8 | import io.strimzi.kafka.bridge.config.BridgeConfig; 9 | import io.vertx.ext.web.RoutingContext; 10 | import org.apache.kafka.clients.consumer.ConsumerRecord; 11 | 12 | import java.util.Properties; 13 | 14 | /** 15 | * Simple interface to abstract tracing 16 | */ 17 | public interface TracingHandle { 18 | /** 19 | * Tracing env var service name. 20 | * 21 | * @return tracing env var service name 22 | */ 23 | String envServiceName(); 24 | 25 | /** 26 | * Extract service name from bridge confing. 27 | * 28 | * @param config the bridge config 29 | * @return bridge's service name 30 | */ 31 | String serviceName(BridgeConfig config); 32 | 33 | /** 34 | * Initialize tracing. 35 | */ 36 | void initialize(); 37 | 38 | /** 39 | * Build span handle. 40 | * 41 | * @param key type 42 | * @param value type 43 | * @param routingContext Vert.x rounting context 44 | * @param operationName current operation name 45 | * @return span handle 46 | */ 47 | SpanHandle span(RoutingContext routingContext, String operationName); 48 | 49 | /** 50 | * Extract span info from Kafka consumer record. 51 | * 52 | * @param key type 53 | * @param value type 54 | * @param record Kafka consumer record 55 | */ 56 | void handleRecordSpan(ConsumerRecord record); 57 | 58 | /** 59 | * Add producer properties, if any. 60 | * 61 | * @param props the properties 62 | */ 63 | void addTracingPropsToProducerConfig(Properties props); 64 | } 65 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/ConsumerInstanceId.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge; 7 | 8 | /** 9 | * Represents a unique consumer instance made by consumer group and instance name 10 | */ 11 | public class ConsumerInstanceId { 12 | 13 | private final String groupId; 14 | private final String instanceId; 15 | 16 | /** 17 | * Consumer 18 | * 19 | * @param groupId the consumer group the Kafka consumer belongs to 20 | * @param instanceId the instance Id of the Kafka consumer 21 | */ 22 | public ConsumerInstanceId(String groupId, String instanceId) { 23 | this.groupId = groupId; 24 | this.instanceId = instanceId; 25 | } 26 | 27 | @Override 28 | public boolean equals(Object obj) { 29 | if (this == obj) { 30 | return true; 31 | } 32 | 33 | if (!(obj instanceof ConsumerInstanceId other)) { 34 | return false; 35 | } 36 | 37 | if (groupId != null && !groupId.equals(other.groupId)) { 38 | return false; 39 | } 40 | 41 | if (instanceId != null && !instanceId.equals(other.instanceId)) { 42 | return false; 43 | } 44 | 45 | return true; 46 | } 47 | 48 | @Override 49 | public int hashCode() { 50 | int result = 1; 51 | result = 31 * result + (groupId != null ? groupId.hashCode() : 0); 52 | result = 31 * result + (instanceId != null ? instanceId.hashCode() : 0); 53 | return result; 54 | } 55 | 56 | @Override 57 | public String toString() { 58 | return "ConsumerInstanceId(" + 59 | "groupId=" + this.groupId + 60 | ", instanceId=" + this.instanceId + 61 | ")"; 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /documentation/modules/proc-configuring-http-bridge-smr-metrics.adoc: -------------------------------------------------------------------------------- 1 | [id='proc-configuring-http-bridge-smr-metrics-{context}'] 2 | = Configuring Strimzi Metrics Reporter metrics 3 | 4 | [role="_abstract"] 5 | Enable the Strimzi Metrics Reporter to collect HTTP Bridge metrics by setting the `bridge.metrics` option to `strimziMetricsReporter`. 6 | 7 | .Prerequisites 8 | 9 | * xref:proc-downloading-http-bridge-{context}[The HTTP Bridge installation archive is downloaded]. 10 | 11 | .Procedure 12 | 13 | . Set the `bridge.metrics` configuration to `strimziMetricsReporter`. 14 | + 15 | .Configuration for enabling metrics 16 | 17 | [source,properties] 18 | ---- 19 | bridge.metrics=strimziMetricsReporter 20 | ---- 21 | + 22 | Optionally, you can configure a comma-separated list of regular expressions to filter exposed metrics using the `kafka.prometheus.metrics.reporter.allowlist` property. 23 | If not configured, a default set of metrics is exposed. 24 | 25 | + 26 | When needed, it is possible to configure the `allowlist` per client type. 27 | For example, by using the `kafka.admin` prefix and setting `kafka.admin.prometheus.metrics.reporter.allowlist=`, all admin client metrics are excluded. 28 | 29 | + 30 | You can add any plugin configuration to the HTTP Bridge properties file using `kafka.`, `kafka.admin.`, `kafka.producer.`, and `kafka.consumer.` prefixes. 31 | In the event that the same property is configured with multiple prefixes, the most specific prefix takes precedence. 32 | For example, `kafka.producer.prometheus.metrics.reporter.allowlist` takes precedence over `kafka.prometheus.metrics.reporter.allowlist`. 33 | 34 | . Run the HTTP Bridge run script. 35 | + 36 | .Running the HTTP Bridge 37 | [source,shell] 38 | ---- 39 | ./bin/kafka_bridge_run.sh --config-file=/application.properties 40 | ---- 41 | + 42 | With metrics enabled, you can scrape metrics in Prometheus format from the `/metrics` endpoint of the HTTP Bridge. 43 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/metrics/StrimziMetricsCollector.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.metrics; 7 | 8 | import io.prometheus.metrics.expositionformats.PrometheusTextFormatWriter; 9 | import io.prometheus.metrics.model.registry.PrometheusRegistry; 10 | 11 | import java.io.ByteArrayOutputStream; 12 | import java.io.IOException; 13 | import java.nio.charset.StandardCharsets; 14 | 15 | /** 16 | * Collect and scrape Strimzi Reporter metrics in Prometheus format. 17 | */ 18 | public class StrimziMetricsCollector extends MetricsCollector { 19 | private final PrometheusRegistry registry; 20 | private final PrometheusTextFormatWriter textFormatter; 21 | 22 | /** 23 | * Constructor. 24 | */ 25 | public StrimziMetricsCollector() { 26 | // Prometheus default registry is a singleton, so it is shared with Strimzi Metrics Reporter 27 | this(PrometheusRegistry.defaultRegistry, new PrometheusTextFormatWriter(true)); 28 | } 29 | 30 | /** 31 | * Constructor. 32 | * 33 | * @param registry Prometheus collector registry 34 | * @param textFormatter Prometheus text formatter 35 | */ 36 | /* test */ StrimziMetricsCollector(PrometheusRegistry registry, 37 | PrometheusTextFormatWriter textFormatter) { 38 | super(); 39 | this.registry = registry; 40 | this.textFormatter = textFormatter; 41 | } 42 | 43 | @Override 44 | public String doScrape() { 45 | ByteArrayOutputStream stream = new ByteArrayOutputStream(); 46 | try { 47 | textFormatter.write(stream, registry.scrape()); 48 | } catch (IOException e) { 49 | throw new RuntimeException(e); 50 | } 51 | return stream.toString(StandardCharsets.UTF_8); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /.azure/templates/jobs/build_java.yaml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - job: 'build_and_test_java' 3 | displayName: 'Build & Test' 4 | # Strategy for the job 5 | strategy: 6 | matrix: 7 | 'java-17': 8 | image: 'Ubuntu-22.04' 9 | jdk_version: '17' 10 | main_build: 'true' 11 | # Set timeout for jobs 12 | timeoutInMinutes: 60 13 | # Base system 14 | pool: 15 | vmImage: $(image) 16 | # Variables 17 | variables: 18 | MVN_CACHE_FOLDER: $(HOME)/.m2/repository 19 | # Pipeline steps 20 | steps: 21 | # Get cached Maven repository 22 | - template: "../steps/maven_cache.yaml" 23 | - template: '../steps/prerequisites/install_java.yaml' 24 | parameters: 25 | JDK_VERSION: $(jdk_version) 26 | - bash: "make java_verify" 27 | displayName: "Build & Test Java" 28 | env: 29 | BUILD_REASON: $(Build.Reason) 30 | BRANCH: $(Build.SourceBranch) 31 | TESTCONTAINERS_RYUK_DISABLED: "TRUE" 32 | TESTCONTAINERS_CHECKS_DISABLE: "TRUE" 33 | MVN_ARGS: "-e -V -B -Dfailsafe.rerunFailingTestsCount=2" 34 | - bash: "make spotbugs" 35 | displayName: "Spotbugs" 36 | env: 37 | MVN_ARGS: "-e -V -B" 38 | # We have to TAR the target directory to maintain the permissions of 39 | # the files which would otherwise change when downloading the artifact 40 | - bash: tar -cvpf target.tar ./target 41 | displayName: "Tar the target directory" 42 | condition: and(succeeded(), eq(variables['main_build'], 'true')) 43 | - publish: $(System.DefaultWorkingDirectory)/target.tar 44 | artifact: Binary 45 | condition: and(succeeded(), eq(variables['main_build'], 'true')) 46 | - task: PublishTestResults@2 47 | inputs: 48 | testResultsFormat: JUnit 49 | testResultsFiles: '**/TEST-*.xml' 50 | testRunTitle: "Unit & Integration tests" 51 | condition: always() -------------------------------------------------------------------------------- /src/test/java/io/strimzi/kafka/bridge/http/services/ProducerService.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | package io.strimzi.kafka.bridge.http.services; 6 | 7 | import io.strimzi.kafka.bridge.utils.Urls; 8 | import io.vertx.core.json.JsonObject; 9 | import io.vertx.ext.web.client.HttpRequest; 10 | import io.vertx.ext.web.client.WebClient; 11 | import io.vertx.ext.web.codec.BodyCodec; 12 | 13 | import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; 14 | import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TYPE; 15 | 16 | public class ProducerService extends BaseService { 17 | 18 | private static ProducerService producerService; 19 | 20 | private ProducerService(WebClient webClient) { 21 | super(webClient); 22 | } 23 | 24 | public static synchronized ProducerService getInstance(WebClient webClient) { 25 | if (producerService == null || webClient != producerService.webClient) { 26 | producerService = new ProducerService(webClient); 27 | } 28 | return producerService; 29 | } 30 | 31 | public HttpRequest sendRecordsRequest(String topic, JsonObject jsonObject, String bridgeContentType) { 32 | return postRequest(Urls.producerTopic(topic)) 33 | .putHeader(CONTENT_LENGTH.toString(), String.valueOf(jsonObject.toBuffer().length())) 34 | .putHeader(CONTENT_TYPE.toString(), bridgeContentType) 35 | .as(BodyCodec.jsonObject()); 36 | } 37 | 38 | public HttpRequest sendRecordsToPartitionRequest(String topic, Object partition, JsonObject jsonObject, String bridgeContentType) { 39 | return postRequest(Urls.producerTopicPartition(topic, partition)) 40 | .putHeader(CONTENT_LENGTH.toString(), String.valueOf(jsonObject.toBuffer().length())) 41 | .putHeader(CONTENT_TYPE.toString(), bridgeContentType) 42 | .as(BodyCodec.jsonObject()); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/converter/MessageConverter.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.converter; 7 | 8 | import org.apache.kafka.clients.consumer.ConsumerRecord; 9 | import org.apache.kafka.clients.consumer.ConsumerRecords; 10 | import org.apache.kafka.clients.producer.ProducerRecord; 11 | 12 | import java.util.List; 13 | 14 | /** 15 | * Interface for a message converter between Kafka record and bridge message 16 | */ 17 | public interface MessageConverter { 18 | 19 | /** 20 | * Converts a message to a Kafka record 21 | * 22 | * @param kafkaTopic Kafka topic for sending message 23 | * @param partition partition of topic where the messages are sent when partition is specified in the request 24 | * @param message message to convert 25 | * @return Kafka record 26 | */ 27 | ProducerRecord toKafkaRecord(String kafkaTopic, Integer partition, M message); 28 | 29 | /** 30 | * Convert a collection of messages to Kafka records 31 | * 32 | * @param kafkaTopic Kafka topic for sending message 33 | * @param partition partition of topic where the messages are sent when partition is specified in the request 34 | * @param messages collection of messages to convert 35 | * @return Kafka records 36 | */ 37 | List> toKafkaRecords(String kafkaTopic, Integer partition, C messages); 38 | 39 | /** 40 | * Converts a Kafka record to a message 41 | * 42 | * @param address address for sending message 43 | * @param record Kafka record to convert 44 | * @return message 45 | */ 46 | M toMessage(String address, ConsumerRecord record); 47 | 48 | /** 49 | * Converts Kafka records to a collection of messages 50 | * 51 | * @param records Kafka records to convert 52 | * @return a collection of messages 53 | */ 54 | C toMessages(ConsumerRecords records); 55 | } 56 | -------------------------------------------------------------------------------- /documentation/modules/proc-bridge-subscribing-consumer-topics.adoc: -------------------------------------------------------------------------------- 1 | // Module included in the following assemblies: 2 | // 3 | // assembly-http-bridge-quickstart.adoc 4 | 5 | [id='proc-bridge-subscribing-consumer-topics-{context}'] 6 | = Subscribing a HTTP Bridge consumer to topics 7 | 8 | [role="_abstract"] 9 | After you have created a HTTP Bridge consumer, subscribe it to one or more topics by using the xref:subscribe[subscription] endpoint. 10 | When subscribed, the consumer starts receiving all messages that are produced to the topic. 11 | 12 | .Procedure 13 | 14 | * Subscribe the consumer to the `bridge-quickstart-topic` topic that you created earlier, in xref:proc-producing-messages-from-bridge-topics-partitions-{context}[Producing messages to topics and partitions]: 15 | + 16 | [source,curl,subs=attributes+] 17 | ---- 18 | curl -X POST http://localhost:8080/consumers/bridge-quickstart-consumer-group/instances/bridge-quickstart-consumer/subscription \ 19 | -H 'content-type: application/vnd.kafka.v2+json' \ 20 | -d '{ 21 | "topics": [ 22 | "bridge-quickstart-topic" 23 | ] 24 | }' 25 | ---- 26 | + 27 | The `topics` array can contain a single topic (as shown here) or multiple topics. If you want to subscribe the consumer to multiple topics that match a regular expression, you can use the `topic_pattern` string instead of the `topics` array. 28 | + 29 | If the request is successful, the HTTP Bridge returns a `204` (No Content) code only. 30 | 31 | When using an Apache Kafka client, the HTTP subscribe operation adds topics to the local consumer's subscriptions. 32 | Joining a consumer group and obtaining partition assignments occur after running multiple HTTP poll operations, starting the partition rebalance and join-group process. 33 | It's important to note that the initial HTTP poll operations may not return any records. 34 | 35 | .What to do next 36 | 37 | After subscribing a HTTP Bridge consumer to topics, you can xref:proc-bridge-retrieving-latest-messages-from-consumer-{context}[retrieve messages from the consumer]. 38 | 39 | [role="_additional-resources"] 40 | .Additional resources 41 | 42 | * xref:subscribe[POST /consumers/{groupid}/instances/{name}/subscription] 43 | -------------------------------------------------------------------------------- /documentation/modules/con-overview-running-http-bridge.adoc: -------------------------------------------------------------------------------- 1 | // Module included in the following assemblies: 2 | // 3 | // assembly-http-bridge-overview.adoc 4 | 5 | [id="overview-components-running-http-bridge-{context}"] 6 | = Running the HTTP Bridge 7 | 8 | [role="_abstract"] 9 | Install the HTTP Bridge to run in the same environment as your Kafka cluster. 10 | 11 | You can download and add the HTTP Bridge installation artifacts to your host machine. 12 | To try out the HTTP Bridge in your local environment, see the xref:assembly-http-bridge-quickstart-{context}[HTTP Bridge quickstart]. 13 | 14 | It's important to note that each instance of the HTTP Bridge maintains its own set of in-memory consumers (and subscriptions) that connect to the Kafka Brokers on behalf of the HTTP clients. 15 | This means that each HTTP client must maintain affinity to the same HTTP Bridge instance in order to access any subscriptions that are created. 16 | Additionally, when an instance of the HTTP Bridge restarts, the in-memory consumers and subscriptions are lost. 17 | **It is the responsibility of the HTTP client to recreate any consumers and subscriptions if the HTTP Bridge restarts.** 18 | 19 | [id="overview-components-running-http-bridge-cluster-{context}"] 20 | == Running the HTTP Bridge on Kubernetes 21 | 22 | If you deployed Strimzi on Kubernetes, you can use the Strimzi Cluster Operator to deploy the HTTP Bridge to the Kubernetes cluster. 23 | Configure and deploy the HTTP Bridge as a `KafkaBridge` resource. 24 | You'll need a running Kafka cluster that was deployed by the Cluster Operator in a Kubernetes namespace. 25 | You can configure your deployment to access the HTTP Bridge outside the Kubernetes cluster. 26 | 27 | HTTP clients must maintain affinity to the same instance of the HTTP Bridge to access any consumers or subscriptions that they create. Hence, running multiple replicas of the HTTP Bridge per Kubernetes Deployment is not recommended. 28 | If the HTTP Bridge pod restarts (for instance, due to Kubernetes relocating the workload to another node), the HTTP client must recreate any consumers or subscriptions. 29 | 30 | For information on deploying and configuring the HTTP Bridge as a `KafkaBridge` resource, see the {BookURLConfiguring}. 31 | -------------------------------------------------------------------------------- /src/test/java/io/strimzi/kafka/bridge/http/services/SeekService.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | package io.strimzi.kafka.bridge.http.services; 6 | 7 | import io.strimzi.kafka.bridge.BridgeContentType; 8 | import io.strimzi.kafka.bridge.utils.Urls; 9 | import io.vertx.core.json.JsonObject; 10 | import io.vertx.ext.web.client.HttpRequest; 11 | import io.vertx.ext.web.client.WebClient; 12 | import io.vertx.ext.web.codec.BodyCodec; 13 | 14 | import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; 15 | import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TYPE; 16 | 17 | public class SeekService extends BaseService { 18 | 19 | private static SeekService seekService; 20 | 21 | private SeekService(WebClient webClient) { 22 | super(webClient); 23 | } 24 | 25 | public static synchronized SeekService getInstance(WebClient webClient) { 26 | if (seekService == null || webClient != seekService.webClient) { 27 | seekService = new SeekService(webClient); 28 | } 29 | return seekService; 30 | } 31 | 32 | // Seek basic requests 33 | 34 | public HttpRequest positionsRequest(String groupId, String name, JsonObject json) { 35 | return positionsBaseRequest(Urls.consumerInstancePosition(groupId, name), json); 36 | } 37 | 38 | public HttpRequest positionsBeginningRequest(String groupId, String name, JsonObject json) { 39 | return positionsBaseRequest(Urls.consumerInstancePositionBeginning(groupId, name), json); 40 | } 41 | 42 | public HttpRequest positionsBeginningEnd(String groupId, String name, JsonObject json) { 43 | return positionsBaseRequest(Urls.consumerInstancePositionEnd(groupId, name), json); 44 | } 45 | 46 | private HttpRequest positionsBaseRequest(String url, JsonObject json) { 47 | return postRequest(url) 48 | .putHeader(CONTENT_LENGTH.toString(), String.valueOf(json.toBuffer().length())) 49 | .putHeader(CONTENT_TYPE.toString(), BridgeContentType.KAFKA_JSON) 50 | .as(BodyCodec.jsonObject()); 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/test/java/io/strimzi/kafka/bridge/http/InvalidProducerIT.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | package io.strimzi.kafka.bridge.http; 6 | 7 | import io.strimzi.kafka.bridge.BridgeContentType; 8 | import io.strimzi.kafka.bridge.http.base.HttpBridgeITAbstract; 9 | import io.vertx.core.json.JsonArray; 10 | import io.vertx.core.json.JsonObject; 11 | import io.vertx.junit5.VertxTestContext; 12 | import org.apache.kafka.common.KafkaFuture; 13 | import org.apache.logging.log4j.LogManager; 14 | import org.apache.logging.log4j.Logger; 15 | import org.junit.jupiter.api.Test; 16 | 17 | import java.util.HashMap; 18 | import java.util.Map; 19 | import java.util.concurrent.ExecutionException; 20 | 21 | import static org.hamcrest.MatcherAssert.assertThat; 22 | import static org.hamcrest.Matchers.is; 23 | 24 | public class InvalidProducerIT extends HttpBridgeITAbstract { 25 | private static final Logger LOGGER = LogManager.getLogger(InvalidProducerIT.class); 26 | 27 | @Override 28 | protected Map overrideConfig() { 29 | Map cfg = new HashMap<>(); 30 | cfg.put("kafka.producer.acks", "5"); // invalid config 31 | return cfg; 32 | } 33 | 34 | @Test 35 | void sendSimpleMessage(VertxTestContext context) throws InterruptedException, ExecutionException { 36 | KafkaFuture future = adminClientFacade.createTopic(topic); 37 | 38 | String value = "message-value"; 39 | 40 | JsonArray records = new JsonArray(); 41 | JsonObject json = new JsonObject(); 42 | json.put("value", value); 43 | records.add(json); 44 | 45 | JsonObject root = new JsonObject(); 46 | root.put("records", records); 47 | 48 | future.get(); 49 | 50 | producerService() 51 | .sendRecordsRequest(topic, root, BridgeContentType.KAFKA_JSON_JSON) 52 | .sendJsonObject(root) 53 | .onComplete(ar -> { 54 | assertThat(ar.succeeded(), is(true)); 55 | assertThat(ar.result().statusCode(), is(500)); 56 | context.completeNow(); 57 | }); 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /documentation/modules/proc-creating-http-bridge-consumer.adoc: -------------------------------------------------------------------------------- 1 | // Module included in the following assemblies: 2 | // 3 | // assembly-http-bridge-quickstart.adoc 4 | 5 | [id='proc-creating-http-bridge-consumer-{context}'] 6 | = Creating a HTTP Bridge consumer 7 | 8 | [role="_abstract"] 9 | Before you can perform any consumer operations in the Kafka cluster, you must first create a consumer by using the xref:createconsumer[consumers] endpoint. The consumer is referred to as a __HTTP Bridge consumer__. 10 | 11 | .Procedure 12 | 13 | . Create a HTTP Bridge consumer in a new consumer group named `bridge-quickstart-consumer-group`: 14 | + 15 | [source,curl,subs=attributes+] 16 | ---- 17 | curl -X POST http://localhost:8080/consumers/bridge-quickstart-consumer-group \ 18 | -H 'content-type: application/vnd.kafka.v2+json' \ 19 | -d '{ 20 | "name": "bridge-quickstart-consumer", 21 | "auto.offset.reset": "earliest", 22 | "format": "json", 23 | "enable.auto.commit": false, 24 | "fetch.min.bytes": 512, 25 | "consumer.request.timeout.ms": 30000 26 | }' 27 | ---- 28 | + 29 | * The consumer is named `bridge-quickstart-consumer` and the embedded data format is set as `json`. 30 | * Some basic configuration settings are defined. 31 | * The consumer will not commit offsets to the log automatically because the `enable.auto.commit` setting is `false`. You will commit the offsets manually later in this quickstart. 32 | + 33 | If the request is successful, the HTTP Bridge returns the consumer ID (`instance_id`) and base URL (`base_uri`) in the response body, along with a `200` code. 34 | + 35 | .Example response 36 | 37 | [source,json,subs="+quotes"] 38 | ---- 39 | #... 40 | { 41 | "instance_id": "bridge-quickstart-consumer", 42 | "base_uri":"http://__-bridge-service:8080/consumers/bridge-quickstart-consumer-group/instances/bridge-quickstart-consumer" 43 | } 44 | ---- 45 | 46 | . Copy the base URL (`base_uri`) to use in the other consumer operations in this quickstart. 47 | 48 | .What to do next 49 | 50 | Now that you have created a HTTP Bridge consumer, you can xref:proc-bridge-subscribing-consumer-topics-{context}[subscribe it to topics]. 51 | 52 | [role="_additional-resources"] 53 | .Additional resources 54 | 55 | * xref:createconsumer[POST /consumers/{groupid}] 56 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/metrics/MetricsCollector.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | package io.strimzi.kafka.bridge.metrics; 6 | 7 | import io.micrometer.core.instrument.Meter; 8 | import io.micrometer.core.instrument.MeterRegistry; 9 | import io.micrometer.prometheusmetrics.PrometheusMeterRegistry; 10 | import io.micrometer.prometheusmetrics.PrometheusNamingConvention; 11 | import io.vertx.micrometer.backends.BackendRegistries; 12 | 13 | /** 14 | * Abstract class for collecting and exposing metrics. 15 | */ 16 | public abstract class MetricsCollector { 17 | private final PrometheusMeterRegistry vertxRegistry; 18 | 19 | MetricsCollector() { 20 | this.vertxRegistry = (PrometheusMeterRegistry) BackendRegistries.getDefaultNow(); 21 | if (vertxRegistry != null) { 22 | // replace the default Prometheus naming convention 23 | this.vertxRegistry.config().namingConvention(new MetricsNamingConvention()); 24 | } 25 | } 26 | 27 | private static class MetricsNamingConvention extends PrometheusNamingConvention { 28 | @Override 29 | public String name(String name, Meter.Type type, String baseUnit) { 30 | String metricName = name.startsWith("vertx.") ? name.replace("vertx.", "strimzi.bridge.") : name; 31 | return super.name(metricName, type, baseUnit); 32 | } 33 | } 34 | 35 | /** 36 | * @return Registry instance for scraping Vertx metrics. 37 | * This is null if metrics are not enabled in the VertxOptions instance. 38 | */ 39 | public MeterRegistry getVertxRegistry() { 40 | return vertxRegistry; 41 | } 42 | 43 | /** 44 | * Scrape all, including Vertx metrics. 45 | * 46 | * @return Raw metrics in Prometheus format. 47 | */ 48 | public String scrape() { 49 | StringBuilder sb = new StringBuilder(); 50 | sb.append(doScrape()); 51 | if (vertxRegistry != null) { 52 | sb.append(vertxRegistry.scrape()); 53 | } 54 | return sb.toString(); 55 | } 56 | 57 | /** 58 | * @return Raw metrics in Prometheus format. 59 | */ 60 | abstract String doScrape(); 61 | } 62 | -------------------------------------------------------------------------------- /src/test/java/io/strimzi/kafka/bridge/http/tools/ExtensionContextParameterResolver.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | package io.strimzi.kafka.bridge.http.tools; 6 | 7 | import org.junit.jupiter.api.extension.ExtensionContext; 8 | import org.junit.jupiter.api.extension.ParameterContext; 9 | import org.junit.jupiter.api.extension.ParameterResolutionException; 10 | import org.junit.jupiter.api.extension.ParameterResolver; 11 | 12 | 13 | /** 14 | * JUnit 5 {@link ParameterResolver} that injects the current {@link ExtensionContext} 15 | * into test methods or lifecycle methods that declare it as a parameter. 16 | *

17 | * This is typically used to provide test context to {@code @BeforeEach}, {@code @AfterEach}, 18 | * or test methods themselves. 19 | *

20 | */ 21 | public class ExtensionContextParameterResolver implements ParameterResolver { 22 | /** 23 | * Checks if the parameter is of type {@link ExtensionContext}. 24 | * 25 | * @param parameterContext The context for the parameter for which a value is to be resolved. 26 | * @param extensionContext The extension context for the test or container. 27 | * @return {@code true} if the parameter is of type {@code ExtensionContext}, otherwise {@code false}. 28 | * @throws ParameterResolutionException If an error occurs while checking parameter support. 29 | */ 30 | @Override 31 | public boolean supportsParameter(ParameterContext parameterContext, ExtensionContext extensionContext) throws ParameterResolutionException { 32 | return parameterContext.getParameter().getType() == ExtensionContext.class; 33 | } 34 | 35 | /** 36 | * Provides the {@link ExtensionContext} instance as the parameter value. 37 | * 38 | * @param parameterContext The context for the parameter for which a value is to be resolved. 39 | * @param extensionContext The extension context for the test or container. 40 | * @return The current {@code ExtensionContext} instance. 41 | * @throws ParameterResolutionException If an error occurs during parameter resolution. 42 | */ 43 | @Override 44 | public Object resolveParameter(ParameterContext parameterContext, ExtensionContext extensionContext) throws ParameterResolutionException { 45 | return extensionContext; 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/metrics/JmxMetricsCollector.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.metrics; 7 | 8 | import io.prometheus.jmx.JmxCollector; 9 | import io.prometheus.metrics.expositionformats.PrometheusTextFormatWriter; 10 | import io.prometheus.metrics.model.registry.PrometheusRegistry; 11 | 12 | import javax.management.MalformedObjectNameException; 13 | import java.io.ByteArrayOutputStream; 14 | import java.io.IOException; 15 | import java.nio.charset.StandardCharsets; 16 | 17 | /** 18 | * Collect and scrape JMX metrics in Prometheus format. 19 | */ 20 | public class JmxMetricsCollector extends MetricsCollector { 21 | private final PrometheusRegistry registry; 22 | private final PrometheusTextFormatWriter textFormatter; 23 | 24 | /** 25 | * Constructor. 26 | * 27 | * @param yamlConfig YAML configuration string with metrics filtering rules 28 | * @throws MalformedObjectNameException Throws MalformedObjectNameException 29 | */ 30 | public JmxMetricsCollector(String yamlConfig) throws MalformedObjectNameException { 31 | // Prometheus default registry is a singleton, so it is shared with JmxCollector 32 | this(new JmxCollector(yamlConfig), PrometheusRegistry.defaultRegistry, new PrometheusTextFormatWriter(true)); 33 | } 34 | 35 | /** 36 | * Constructor. 37 | * 38 | * @param jmxCollector JMX collector registry 39 | * @param registry Prometheus collector registry 40 | * @param textFormatter Prometheus text formatter 41 | */ 42 | /* test */ JmxMetricsCollector(JmxCollector jmxCollector, 43 | PrometheusRegistry registry, 44 | PrometheusTextFormatWriter textFormatter) { 45 | super(); 46 | jmxCollector.register(); 47 | this.registry = registry; 48 | this.textFormatter = textFormatter; 49 | } 50 | 51 | @Override 52 | public String doScrape() { 53 | ByteArrayOutputStream stream = new ByteArrayOutputStream(); 54 | try { 55 | textFormatter.write(stream, registry.scrape()); 56 | } catch (IOException e) { 57 | throw new RuntimeException(e); 58 | } 59 | return stream.toString(StandardCharsets.UTF_8); 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/http/model/HttpBridgeError.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.http.model; 7 | 8 | import com.fasterxml.jackson.databind.node.ObjectNode; 9 | import io.strimzi.kafka.bridge.http.converter.JsonUtils; 10 | import io.vertx.core.json.JsonObject; 11 | 12 | import java.util.ArrayList; 13 | import java.util.List; 14 | 15 | /** 16 | * Represents an error related to HTTP bridging 17 | * 18 | * @param code code classifying the error itself 19 | * @param message message providing more information about the error 20 | * @param validationErrors list of detailed validation errors 21 | */ 22 | public record HttpBridgeError(int code, String message, List validationErrors) { 23 | 24 | /** 25 | * Creates an error object with an empty list of validation errors 26 | * 27 | * @param code code classifying the error itself 28 | * @param message message providing more information about the error 29 | */ 30 | public HttpBridgeError(int code, String message) { 31 | this(code, message, List.of()); 32 | } 33 | 34 | /** 35 | * @return a JSON representation of the error with code and message 36 | */ 37 | public ObjectNode toJson() { 38 | ObjectNode json = JsonUtils.createObjectNode(); 39 | json.put("error_code", this.code); 40 | json.put("message", this.message); 41 | if (this.validationErrors != null && !this.validationErrors.isEmpty()) { 42 | json.set("validation_errors", JsonUtils.createArrayNode(this.validationErrors)); 43 | } 44 | return json; 45 | } 46 | 47 | /** 48 | * Create an error instance from a JSON representation 49 | * 50 | * @param json JSON representation of the error 51 | * @return error instance 52 | */ 53 | public static HttpBridgeError fromJson(JsonObject json) { 54 | if (json.containsKey("validation_errors")) { 55 | List validationErrors = new ArrayList<>(); 56 | json.getJsonArray("validation_errors").forEach(error -> validationErrors.add(error.toString())); 57 | return new HttpBridgeError(json.getInteger("error_code"), json.getString("message"), validationErrors); 58 | } else { 59 | return new HttpBridgeError(json.getInteger("error_code"), json.getString("message")); 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /src/test/java/io/strimzi/kafka/bridge/http/tools/TestSeparator.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | package io.strimzi.kafka.bridge.http.tools; 6 | 7 | import org.apache.logging.log4j.LogManager; 8 | import org.apache.logging.log4j.Logger; 9 | import org.junit.jupiter.api.AfterEach; 10 | import org.junit.jupiter.api.BeforeEach; 11 | import org.junit.jupiter.api.extension.ExtendWith; 12 | import org.junit.jupiter.api.extension.ExtensionContext; 13 | 14 | import java.util.Collections; 15 | 16 | /** 17 | * Provides a separator in the log output before and after each test for improved readability. 18 | *

19 | * Implement this interface in your test classes to automatically log a separator line and test status 20 | * (STARTED, SUCCEEDED, FAILED) around each test execution. 21 | *

22 | */ 23 | @ExtendWith(ExtensionContextParameterResolver.class) 24 | public interface TestSeparator { 25 | /** 26 | * Logger instance used for logging separator and test status. 27 | */ 28 | Logger LOGGER = LogManager.getLogger(TestSeparator.class); 29 | 30 | /** 31 | * The character used to build the separator line in the log output. 32 | */ 33 | String SEPARATOR_CHAR = "#"; 34 | 35 | /** 36 | * The length of the separator line. 37 | */ 38 | int SEPARATOR_LENGTH = 76; 39 | 40 | /** 41 | * Logs a separator line and the test class/method at the start of each test. 42 | * 43 | * @param context the JUnit extension context for the test 44 | */ 45 | @BeforeEach 46 | default void beforeEachTest(ExtensionContext context) { 47 | LOGGER.info(String.join("", Collections.nCopies(SEPARATOR_LENGTH, SEPARATOR_CHAR))); 48 | LOGGER.info("{}.{} - STARTED", context.getRequiredTestClass().getName(), context.getRequiredTestMethod().getName()); 49 | } 50 | 51 | /** 52 | * Logs the test result (SUCCEEDED or FAILED) and a separator line at the end of each test. 53 | * 54 | * @param context the JUnit extension context for the test 55 | */ 56 | @AfterEach 57 | default void afterEachTest(ExtensionContext context) { 58 | String status = context.getExecutionException().isPresent() ? "FAILED" : "SUCCEEDED"; 59 | LOGGER.info("{}.{} - {}", context.getRequiredTestClass().getName(), context.getRequiredTestMethod().getName(), status); 60 | LOGGER.info(String.join("", Collections.nCopies(SEPARATOR_LENGTH, SEPARATOR_CHAR))); 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /src/test/java/io/strimzi/kafka/bridge/http/StaticIT.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | package io.strimzi.kafka.bridge.http; 6 | 7 | import io.strimzi.kafka.bridge.utils.Utils; 8 | import org.junit.jupiter.api.Test; 9 | 10 | import java.io.BufferedReader; 11 | import java.io.InputStreamReader; 12 | import static org.hamcrest.CoreMatchers.containsString; 13 | import static org.hamcrest.MatcherAssert.assertThat; 14 | 15 | /** 16 | * Static tests are those that does not applies for the regular @beforeAll, @beforeEach and @afterEach methods 17 | * So this class of tests does not extends HttpBridgeTestBase 18 | */ 19 | public class StaticIT { 20 | 21 | @Test 22 | /** 23 | * Start the kafka bridge using ProcessBuilder to run the kafka_bridge_run.sh script, then check 24 | * if the bridge version is displayed in messages. 25 | */ 26 | void bridgeVersionDisplayedInStartupTest() throws Exception { 27 | 28 | String kBVersion = Utils.getKafkaBridgeVersionFromFile("release.version"); 29 | 30 | ProcessBuilder bridgeJar = new ProcessBuilder( 31 | String.format("target/kafka-bridge-%s/kafka-bridge-%s/bin/kafka_bridge_run.sh", kBVersion, kBVersion), 32 | "--config-file", 33 | String.format("target/kafka-bridge-%s/kafka-bridge-%s/config/application.properties", kBVersion, kBVersion)); 34 | Process bridgeProc = bridgeJar.start(); 35 | 36 | InputStreamReader inputStreamReader = new InputStreamReader(bridgeProc.getInputStream()); 37 | BufferedReader bufferedInputReader = new BufferedReader(inputStreamReader); 38 | 39 | int lineCount = 0; 40 | String procOutput; 41 | while ((procOutput = bufferedInputReader.readLine()) != null) { 42 | 43 | if ((procOutput.contains("Strimzi Kafka Bridge")) && (procOutput.contains("starting"))) { 44 | assertThat(procOutput, containsString("Strimzi Kafka Bridge " + kBVersion + " is starting")); 45 | break; 46 | } else { 47 | // Check only the first 5 lines 48 | lineCount++; 49 | if (lineCount == 5) { 50 | inputStreamReader.close(); 51 | bridgeProc.destroy(); 52 | assertThat("Test Failed", false); 53 | } 54 | } 55 | } 56 | bridgeProc.destroy(); 57 | inputStreamReader.close(); 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.access.redhat.com/ubi9/ubi-minimal:latest 2 | ARG JAVA_VERSION=17 3 | ARG TARGETPLATFORM 4 | 5 | USER root 6 | 7 | RUN microdnf update -y \ 8 | && microdnf --setopt=install_weak_deps=0 --setopt=tsflags=nodocs install -y java-${JAVA_VERSION}-openjdk-headless openssl shadow-utils \ 9 | && microdnf clean all -y 10 | 11 | # Set JAVA_HOME env var 12 | ENV JAVA_HOME /usr/lib/jvm/jre-${JAVA_VERSION} 13 | 14 | # Add strimzi user with UID 1001 15 | # The user is in the group 0 to have access to the mounted volumes and storage 16 | RUN useradd -r -m -u 1001 -g 0 strimzi 17 | 18 | ARG strimzi_kafka_bridge_version=1.0-SNAPSHOT 19 | ENV STRIMZI_KAFKA_BRIDGE_VERSION ${strimzi_kafka_bridge_version} 20 | ENV STRIMZI_HOME=/opt/strimzi 21 | RUN mkdir -p ${STRIMZI_HOME} 22 | WORKDIR ${STRIMZI_HOME} 23 | 24 | COPY target/kafka-bridge-${strimzi_kafka_bridge_version}/kafka-bridge-${strimzi_kafka_bridge_version} ./ 25 | 26 | ##### 27 | # Add Tini 28 | ##### 29 | ENV TINI_VERSION v0.19.0 30 | ENV TINI_SHA256_AMD64=93dcc18adc78c65a028a84799ecf8ad40c936fdfc5f2a57b1acda5a8117fa82c 31 | ENV TINI_SHA256_ARM64=07952557df20bfd2a95f9bef198b445e006171969499a1d361bd9e6f8e5e0e81 32 | ENV TINI_SHA256_PPC64LE=3f658420974768e40810001a038c29d003728c5fe86da211cff5059e48cfdfde 33 | ENV TINI_SHA256_S390X=931b70a182af879ca249ae9de87ef68423121b38d235c78997fafc680ceab32d 34 | 35 | RUN set -ex; \ 36 | if [[ ${TARGETPLATFORM} = "linux/ppc64le" ]]; then \ 37 | curl -s -L https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-ppc64le -o /usr/bin/tini; \ 38 | echo "${TINI_SHA256_PPC64LE} */usr/bin/tini" | sha256sum -c; \ 39 | chmod +x /usr/bin/tini; \ 40 | elif [[ ${TARGETPLATFORM} = "linux/arm64" ]]; then \ 41 | curl -s -L https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-arm64 -o /usr/bin/tini; \ 42 | echo "${TINI_SHA256_ARM64} */usr/bin/tini" | sha256sum -c; \ 43 | chmod +x /usr/bin/tini; \ 44 | elif [[ ${TARGETPLATFORM} = "linux/s390x" ]]; then \ 45 | curl -s -L https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-s390x -o /usr/bin/tini; \ 46 | echo "${TINI_SHA256_S390X} */usr/bin/tini" | sha256sum -c; \ 47 | chmod +x /usr/bin/tini; \ 48 | else \ 49 | curl -s -L https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini -o /usr/bin/tini; \ 50 | echo "${TINI_SHA256_AMD64} */usr/bin/tini" | sha256sum -c; \ 51 | chmod +x /usr/bin/tini; \ 52 | fi 53 | 54 | USER 1001 55 | 56 | CMD ["/opt/strimzi/bin/kafka_bridge_run.sh"] 57 | -------------------------------------------------------------------------------- /.azure/scripts/check_docs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | fatal=0 4 | GREP="grep" 5 | 6 | if [ "$(uname -s)" = "Darwin" ]; then 7 | GREP="ggrep" 8 | fi 9 | 10 | function grep_check { 11 | local pattern=$1 12 | local description=$2 13 | local opts=${3:--i -E -r -n} 14 | local fatalness=${4:-1} 15 | x=$($GREP $opts "$pattern" documentation/book/) 16 | if [ -n "$x" ]; then 17 | echo "$description:" 18 | echo "$x" 19 | y=$(echo "$x" | wc -l) 20 | ((fatal+=fatalness*y)) 21 | fi 22 | } 23 | 24 | # Check for latin abbrevs 25 | grep_check '[^[:alpha:]](e\.g\.|eg)[^[:alpha:]]' "Replace 'e.g'. with 'for example, '" 26 | grep_check '[^[:alpha:]](i\.e\.|ie)[^[:alpha:]]' "Replace 'i.e'. with 'that is, '" 27 | grep_check '[^[:alpha:]]etc\.?[^[:alpha:]]' "Replace 'etc.'. with ' and so on.'" 28 | 29 | # And/or 30 | grep_check '[^[:alpha:]]and/or[^[:alpha:]]' "Use either 'and' or 'or', but not 'and/or'" 31 | 32 | # Contractions 33 | grep_check '[^[:alpha:]](do|is|are|won|have|ca|does|did|had|has|must)n'"'"'?t[^[:alpha:]]' "Avoid 'nt contraction" 34 | grep_check '[^[:alpha:]]it'"'"'s[^[:alpha:]]' "Avoid it's contraction" 35 | grep_check '[^[:alpha:]]that'"'"'s[^[:alpha:]]' "Avoid that's contraction" 36 | grep_check '[^[:alpha:]]can not[^[:alpha:]]' "Use 'cannot' not 'can not'" 37 | grep_check '\>" 42 | grep_check '[[]id=(["'"'"'])[[:alnum:]_-]+(?!-[{]context[}])\1' "[id=...] should end with -{context}" "-i -P -r -n" 43 | 44 | # leveloffset=+ 45 | grep_check 'leveloffset+=[0-9]+' "It should be leveloffset=+... not +=" 46 | 47 | # include: should be include:: 48 | grep_check 'include:[^:[ ]+[[]' "It should be include::...[] (two colons) not include:...[]" 49 | 50 | if [ "$fatal" -gt 0 ]; then 51 | echo "ERROR: ${fatal} docs problems found." 52 | exit 1 53 | fi 54 | 55 | # Check for changes in autogenerated code 56 | mvn generate-sources 57 | CHANGED_DERIVED=$(git diff --name-status -- documentation/book/) 58 | if [ -n "$CHANGED_DERIVED" ] ; then 59 | echo "ERROR: Uncommitted changes in documentation:" 60 | echo "$CHANGED_DERIVED" 61 | echo "Run the following to add up-to-date resources:" 62 | echo " make docu_api \\" 63 | echo " && git add documentation/book/ \\" 64 | echo " && git commit -s -m 'Update generated documentation'" 65 | exit 1 66 | fi -------------------------------------------------------------------------------- /documentation/modules/proc-bridge-retrieving-latest-messages-from-consumer.adoc: -------------------------------------------------------------------------------- 1 | // Module included in the following assemblies: 2 | // 3 | // assembly-http-bridge-quickstart.adoc 4 | 5 | [id='proc-bridge-retrieving-latest-messages-from-consumer-{context}'] 6 | = Retrieving the latest messages from a HTTP Bridge consumer 7 | 8 | [role="_abstract"] 9 | Retrieve the latest messages from the HTTP Bridge consumer by requesting data from the xref:poll[records] endpoint. In production, HTTP clients can call this endpoint repeatedly (in a loop). 10 | 11 | .Procedure 12 | 13 | . Produce additional messages to the HTTP Bridge consumer, as described in xref:proc-producing-messages-from-bridge-topics-partitions-{context}[Producing messages to topics and partitions]. 14 | 15 | . Submit a `GET` request to the `records` endpoint: 16 | + 17 | [source,curl,subs=attributes+] 18 | ---- 19 | curl -X GET http://localhost:8080/consumers/bridge-quickstart-consumer-group/instances/bridge-quickstart-consumer/records \ 20 | -H 'accept: application/vnd.kafka.json.v2+json' 21 | ---- 22 | + 23 | After creating and subscribing to a HTTP Bridge consumer, a first GET request will return an empty response because the poll operation starts a rebalancing process to assign partitions. 24 | 25 | . Repeat step two to retrieve messages from the HTTP Bridge consumer. 26 | + 27 | The HTTP Bridge returns an array of messages -- describing the topic name, key, value, partition, and offset -- in the response body, along with a `200` code. Messages are retrieved from the latest offset by default. 28 | + 29 | [source,json,subs=attributes+] 30 | ---- 31 | HTTP/1.1 200 OK 32 | content-type: application/vnd.kafka.json.v2+json 33 | #... 34 | [ 35 | { 36 | "topic":"bridge-quickstart-topic", 37 | "key":"my-key", 38 | "value":"sales-lead-0001", 39 | "partition":0, 40 | "offset":0 41 | }, 42 | { 43 | "topic":"bridge-quickstart-topic", 44 | "key":null, 45 | "value":"sales-lead-0003", 46 | "partition":0, 47 | "offset":1 48 | }, 49 | #... 50 | ---- 51 | + 52 | NOTE: If an empty response is returned, produce more records to the consumer as described in xref:proc-producing-messages-from-bridge-topics-partitions-{context}[Producing messages to topics and partitions], and then try retrieving messages again. 53 | 54 | .What to do next 55 | 56 | After retrieving messages from a HTTP Bridge consumer, try xref:proc-bridge-committing-consumer-offsets-to-log-{context}[committing offsets to the log]. 57 | 58 | [role="_additional-resources"] 59 | .Additional resources 60 | 61 | * xref:poll[GET /consumers/{groupid}/instances/{name}/records] 62 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/config/ConfigRetriever.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.config; 7 | 8 | import java.io.FileInputStream; 9 | import java.io.IOException; 10 | import java.io.InputStream; 11 | import java.util.HashMap; 12 | import java.util.Map; 13 | import java.util.Properties; 14 | import java.util.stream.Collectors; 15 | 16 | /** 17 | * Retrieve the bridge configuration from properties file and environment variables 18 | */ 19 | public class ConfigRetriever { 20 | 21 | /** 22 | * Retrieve the bridge configuration from the properties file provided as parameter 23 | * and adding environment variables 24 | * If a parameter is defined in both properties file and environment variables, the latter wins 25 | * 26 | * @param path path to the properties file 27 | * @return configuration as key-value pairs 28 | * @throws IOException when not possible to get the properties file 29 | */ 30 | public static Map getConfig(String path) throws IOException { 31 | return getConfig(path, System.getenv()); 32 | } 33 | 34 | /** 35 | * Retrieve the bridge configuration from the properties file provided as parameter 36 | * and adding the additional configuration parameter provided as well 37 | * If a parameter is defined in both properties file and additional configuration, the latter wins 38 | * 39 | * @param path path to the properties file 40 | * @param additionalConfig additional configuration to add 41 | * @return configuration as key-value pairs 42 | * @throws IOException when not possible to get the properties file 43 | */ 44 | public static Map getConfig(String path, Map additionalConfig) throws IOException { 45 | Map configuration; 46 | try (InputStream is = new FileInputStream(path)) { 47 | Properties props = new Properties(); 48 | props.load(is); 49 | configuration = 50 | props.entrySet().stream().collect( 51 | Collectors.toMap( 52 | e -> String.valueOf(e.getKey()), 53 | e -> String.valueOf(e.getValue()), 54 | (prev, next) -> next, HashMap::new 55 | )); 56 | } 57 | configuration.putAll(additionalConfig); 58 | return configuration; 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/http/HttpBridgeEndpoint.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.http; 7 | 8 | import io.strimzi.kafka.bridge.Handler; 9 | import io.strimzi.kafka.bridge.config.BridgeConfig; 10 | import io.vertx.ext.web.RoutingContext; 11 | 12 | /** 13 | * Abstract class for an endpoint bridging traffic between HTTP and Apache Kafka 14 | */ 15 | public abstract class HttpBridgeEndpoint { 16 | protected String name; 17 | protected final BridgeConfig bridgeConfig; 18 | private Handler closeHandler; 19 | 20 | /** 21 | * Constructor 22 | * 23 | * @param bridgeConfig the bridge configuration 24 | */ 25 | public HttpBridgeEndpoint(BridgeConfig bridgeConfig) { 26 | this.bridgeConfig = bridgeConfig; 27 | } 28 | 29 | /** 30 | * @return the name of the HTTP bridge endpoint 31 | */ 32 | public String name() { 33 | return this.name; 34 | } 35 | 36 | /** 37 | * Open the HTTP bridge endpoint 38 | */ 39 | public abstract void open(); 40 | 41 | /** 42 | * Close the HTTP bridge endpoint calling the {@code closeHandler} as well 43 | */ 44 | public void close() { 45 | this.handleClose(); 46 | } 47 | 48 | /** 49 | * Handler for the HTTP routing context 50 | * 51 | * @param routingContext HTTP routing context to handle 52 | */ 53 | public void handle(RoutingContext routingContext) { 54 | this.handle(routingContext, null); 55 | } 56 | 57 | /** 58 | * Handler for the HTTP routing context 59 | * 60 | * @param routingContext HTTP routing context to handle 61 | * @param handler handler for the corresponding bridge endpoint 62 | */ 63 | public abstract void handle(RoutingContext routingContext, Handler handler); 64 | 65 | /** 66 | * Sets a handler called when an HTTP bridge endpoint is closed due to internal processing 67 | * 68 | * @param endpointCloseHandler The handler 69 | * @return The HTTP bridge endpoint itself 70 | */ 71 | public HttpBridgeEndpoint closeHandler(Handler endpointCloseHandler) { 72 | this.closeHandler = endpointCloseHandler; 73 | return this; 74 | } 75 | 76 | /** 77 | * Raise close event 78 | */ 79 | protected void handleClose() { 80 | if (this.closeHandler != null) { 81 | this.closeHandler.handle(this); 82 | } 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /bin/docker/kafka_bridge_tls_prepare_certificates.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Parameters: 4 | # $1: Path to the new truststore 5 | # $2: Truststore password 6 | # $3: Public key to be imported 7 | # $4: Alias of the certificate 8 | function create_truststore { 9 | # Disable FIPS if needed 10 | if [ "$FIPS_MODE" = "disabled" ]; then 11 | KEYTOOL_OPTS="${KEYTOOL_OPTS} -J-Dcom.redhat.fips=false" 12 | else 13 | KEYTOOL_OPTS="" 14 | fi 15 | 16 | # shellcheck disable=SC2086 17 | keytool ${KEYTOOL_OPTS} -keystore $1 -storepass $2 -noprompt -alias $4 -import -file $3 -storetype PKCS12 18 | } 19 | 20 | # Parameters: 21 | # $1: Path to the new keystore 22 | # $2: Truststore password 23 | # $3: Public key to be imported 24 | # $4: Private key to be imported 25 | # $5: Alias of the certificate 26 | function create_keystore { 27 | RANDFILE=/tmp/.rnd openssl pkcs12 -export -in $3 -inkey $4 -name $5 -password pass:$2 -out $1 -certpbe aes-128-cbc -keypbe aes-128-cbc -macalg sha256 28 | } 29 | 30 | # Parameters: 31 | # $1: Path to the new truststore 32 | # $2: Truststore password 33 | # $3: Base path where the certificates are mounted 34 | # $4: Environment variable defining the certs that should be loaded 35 | function prepare_truststore { 36 | TRUSTSTORE=$1 37 | PASSWORD=$2 38 | BASEPATH=$3 39 | TRUSTED_CERTS=$4 40 | 41 | rm -f "$TRUSTSTORE" 42 | 43 | IFS=';' read -ra CERTS <<< "${TRUSTED_CERTS}" 44 | for cert in "${CERTS[@]}" 45 | do 46 | for file in $BASEPATH/$cert 47 | do 48 | if [ -f "$file" ]; then 49 | echo "Adding $file to truststore $TRUSTSTORE with alias $file" 50 | create_truststore "$TRUSTSTORE" "$PASSWORD" "$file" "$file" 51 | fi 52 | done 53 | done 54 | } 55 | 56 | if [ -n "$KAFKA_BRIDGE_TRUSTED_CERTS" ]; then 57 | echo "Preparing Bridge truststore" 58 | prepare_truststore "/tmp/strimzi/bridge.truststore.p12" "$CERTS_STORE_PASSWORD" "${STRIMZI_HOME}/bridge-certs" "$KAFKA_BRIDGE_TRUSTED_CERTS" 59 | fi 60 | 61 | if [ -n "$KAFKA_BRIDGE_TLS_AUTH_CERT" ] && [ -n "$KAFKA_BRIDGE_TLS_AUTH_KEY" ]; then 62 | echo "Preparing keystore" 63 | rm -f "/tmp/strimzi/bridge.keystore.p12" 64 | create_keystore "/tmp/strimzi/bridge.keystore.p12" "$CERTS_STORE_PASSWORD" "${STRIMZI_HOME}/bridge-certs/$KAFKA_BRIDGE_TLS_AUTH_CERT" "${STRIMZI_HOME}/bridge-certs/$KAFKA_BRIDGE_TLS_AUTH_KEY" "$KAFKA_BRIDGE_TLS_AUTH_CERT" 65 | echo "Preparing keystore is complete" 66 | fi 67 | 68 | if [ -n "$KAFKA_BRIDGE_OAUTH_TRUSTED_CERTS" ]; then 69 | echo "Preparing OAuth truststore" 70 | prepare_truststore "/tmp/strimzi/oauth.truststore.p12" "$CERTS_STORE_PASSWORD" "/opt/strimzi/oauth-certs" "$KAFKA_BRIDGE_OAUTH_TRUSTED_CERTS" 71 | fi 72 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/http/HttpOpenApiOperations.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.http; 7 | 8 | /** 9 | * OpenAPI operations ids 10 | */ 11 | public enum HttpOpenApiOperations { 12 | 13 | /** send a message */ 14 | SEND("send"), 15 | /** send a message to a specific partition */ 16 | SEND_TO_PARTITION("sendToPartition"), 17 | /** create a consumer instance */ 18 | CREATE_CONSUMER("createConsumer"), 19 | /** delete a specific consumer instance */ 20 | DELETE_CONSUMER("deleteConsumer"), 21 | /** subscribe to topic(s) */ 22 | SUBSCRIBE("subscribe"), 23 | /** unsubscribe from topic(s) */ 24 | UNSUBSCRIBE("unsubscribe"), 25 | /** list topics subscription */ 26 | LIST_SUBSCRIPTIONS("listSubscriptions"), 27 | /** list topics on the cluster */ 28 | LIST_TOPICS("listTopics"), 29 | /** get information for a specific topic */ 30 | GET_TOPIC("getTopic"), 31 | /** creates a topic with specified name */ 32 | CREATE_TOPIC("createTopic"), 33 | /** list partitions for a specific topic */ 34 | LIST_PARTITIONS("listPartitions"), 35 | /** get partition information for a specific topic */ 36 | GET_PARTITION("getPartition"), 37 | /** get offesets information for a specific topic partition */ 38 | GET_OFFSETS("getOffsets"), 39 | /** assign a consumer to topic partition(s) */ 40 | ASSIGN("assign"), 41 | /** run a consumer poll to read messages */ 42 | POLL("poll"), 43 | /** commit consumer offset */ 44 | COMMIT("commit"), 45 | /** seek to a specific offset of a topic partition */ 46 | SEEK("seek"), 47 | /** seek to the beginning of a topic partition */ 48 | SEEK_TO_BEGINNING("seekToBeginning"), 49 | /** seek to the end of a topic partition */ 50 | SEEK_TO_END("seekToEnd"), 51 | /** check liveness of the bridge */ 52 | HEALTHY("healthy"), 53 | /** check the readiness of the bridge */ 54 | READY("ready"), 55 | /** get the OpenAPI v2 specification */ 56 | OPENAPI("openapi"), 57 | /** get the OpenAPI v2 specification */ 58 | OPENAPI_V2("openapiv2"), 59 | /** get the OpenAPI v3 specification */ 60 | OPENAPI_V3("openapiv3"), 61 | /** get general information (i.e. version) about the bridge */ 62 | INFO("info"), 63 | /** get metrics (if enabled) in Prometheus format */ 64 | METRICS("metrics"); 65 | 66 | private final String text; 67 | 68 | HttpOpenApiOperations(final String text) { 69 | this.text = text; 70 | } 71 | 72 | @Override 73 | public String toString() { 74 | return text; 75 | } 76 | 77 | } 78 | -------------------------------------------------------------------------------- /bin/docker/kafka_bridge_run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | set +x 4 | 5 | # Clean-up /tmp directory from files which might have remained from previous container restart 6 | # We ignore any errors which might be caused by files injected by different agents which we do not have the rights to delete 7 | rm -rfv /tmp/* || true 8 | 9 | MYPATH="$(dirname "$0")" 10 | 11 | # Generate temporary keystore password 12 | export CERTS_STORE_PASSWORD=$(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c32) 13 | 14 | # Create dir where keystores and truststores will be stored 15 | mkdir -p /tmp/strimzi 16 | 17 | # Import certificates into keystore and truststore 18 | "${MYPATH}"/kafka_bridge_tls_prepare_certificates.sh 19 | 20 | # Generate and print the bridge config file 21 | echo "Kafka Bridge configuration:" 22 | tee /tmp/kafka-bridge.properties < "$STRIMZI_HOME/custom-config/application.properties" | sed -e 's/sasl.jaas.config=.*/sasl.jaas.config=[hidden]/g' -e 's/password=.*/password=[hidden]/g' 23 | echo "" 24 | 25 | # Configure logging for Kubernetes deployments 26 | export KAFKA_BRIDGE_LOG4J_OPTS="-Dlog4j2.configurationFile=file:$STRIMZI_HOME/custom-config/log4j2.properties" 27 | 28 | # Configure Memory 29 | . "${MYPATH}"/dynamic_resources.sh 30 | 31 | MAX_HEAP=$(get_heap_size) 32 | if [ -n "$MAX_HEAP" ]; then 33 | echo "Configuring Java heap: -Xms${MAX_HEAP}m -Xmx${MAX_HEAP}m" 34 | export JAVA_OPTS="-Xms${MAX_HEAP}m -Xmx${MAX_HEAP}m $JAVA_OPTS" 35 | fi 36 | 37 | export MALLOC_ARENA_MAX=2 38 | 39 | # Configure GC logging for memory tracking 40 | function get_gc_opts { 41 | if [ "${STRIMZI_GC_LOG_ENABLED}" == "true" ]; then 42 | # The first segment of the version number is '1' for releases < 9; then '9', '10', '11', ... 43 | JAVA_MAJOR_VERSION=$(java -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p') 44 | if [ "$JAVA_MAJOR_VERSION" -ge "9" ] ; then 45 | echo "-Xlog:gc*:stdout:time -XX:NativeMemoryTracking=summary" 46 | else 47 | echo "-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:NativeMemoryTracking=summary" 48 | fi 49 | else 50 | # no gc options 51 | echo "" 52 | fi 53 | } 54 | 55 | export JAVA_OPTS="${JAVA_OPTS} $(get_gc_opts)" 56 | 57 | if [ -n "$STRIMZI_JAVA_SYSTEM_PROPERTIES" ]; then 58 | export JAVA_OPTS="${JAVA_OPTS} ${STRIMZI_JAVA_SYSTEM_PROPERTIES}" 59 | fi 60 | 61 | if [ -n "$STRIMZI_JAVA_OPTS" ]; then 62 | export JAVA_OPTS="${JAVA_OPTS} ${STRIMZI_JAVA_OPTS}" 63 | fi 64 | 65 | # Disable FIPS if needed 66 | if [ "$FIPS_MODE" = "disabled" ]; then 67 | export JAVA_OPTS="${JAVA_OPTS} -Dcom.redhat.fips=false" 68 | fi 69 | 70 | # starting Kafka Bridge with final configuration 71 | exec /usr/bin/tini -s -w -e 143 -- "${MYPATH}"/../kafka_bridge_run.sh --config-file=/tmp/kafka-bridge.properties "$@" 72 | -------------------------------------------------------------------------------- /.azure/release-pipeline.yaml: -------------------------------------------------------------------------------- 1 | # Triggers 2 | trigger: none 3 | pr: none 4 | 5 | # Parameters 6 | parameters: 7 | - name: releaseVersion 8 | displayName: Release Version 9 | type: string 10 | - name: useSuffix 11 | displayName: Build suffixed images 12 | type: boolean 13 | default: true 14 | - name: releaseSuffix 15 | displayName: Release Suffix 16 | type: number 17 | - name: sourcePipelineId 18 | displayName: Pipeline ID of the source build 19 | type: number 20 | default: 34 21 | values: 22 | - 34 23 | - name: sourceBuildId 24 | displayName: Build ID of the source build 25 | type: number 26 | 27 | # Stages 28 | stages: 29 | - stage: publish_artifacts 30 | displayName: Publish artifacts for ${{ parameters.releaseVersion }} 31 | condition: startsWith(variables['build.sourceBranch'], 'refs/heads/release-') 32 | jobs: 33 | - template: 'templates/jobs/release_artifacts.yaml' 34 | parameters: 35 | artifactSource: 'current' 36 | artifactProject: 'strimzi' 37 | artifactPipeline: '' 38 | artifactRunVersion: '' 39 | artifactRunId: '' 40 | releaseVersion: '${{ parameters.releaseVersion }}' 41 | - stage: containers_publish_with_suffix 42 | displayName: Publish Containers for ${{ parameters.releaseVersion }}-${{ parameters.releaseSuffix }} 43 | condition: and(succeeded(), startsWith(variables['build.sourceBranch'], 'refs/heads/release-'), eq('${{ parameters.useSuffix }}', 'true')) 44 | jobs: 45 | - template: 'templates/jobs/push_container.yaml' 46 | parameters: 47 | dockerTag: '${{ parameters.releaseVersion }}-${{ parameters.releaseSuffix }}' 48 | artifactSource: 'specific' 49 | artifactProject: 'strimzi' 50 | artifactPipeline: '${{ parameters.sourcePipelineId }}' 51 | artifactRunVersion: 'specific' 52 | artifactRunId: '${{ parameters.sourceBuildId }}' 53 | architectures: ['amd64', 'arm64', 's390x', 'ppc64le'] 54 | - stage: containers_publish 55 | displayName: Publish Containers for ${{ parameters.releaseVersion }} 56 | dependsOn: 57 | - containers_publish_with_suffix 58 | condition: and(in(dependencies.containers_publish_with_suffix.result, 'Succeeded', 'SucceededWithIssues', 'Skipped'), startsWith(variables['build.sourceBranch'], 'refs/heads/release-')) 59 | jobs: 60 | - template: 'templates/jobs/push_container.yaml' 61 | parameters: 62 | dockerTag: '${{ parameters.releaseVersion }}' 63 | artifactSource: 'specific' 64 | artifactProject: 'strimzi' 65 | artifactPipeline: '${{ parameters.sourcePipelineId }}' 66 | artifactRunVersion: 'specific' 67 | artifactRunId: '${{ parameters.sourceBuildId }}' 68 | architectures: ['amd64', 'arm64', 's390x', 'ppc64le'] 69 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ main, release* ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ main ] 20 | schedule: 21 | - cron: '23 17 * * 3' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | 28 | strategy: 29 | fail-fast: false 30 | matrix: 31 | language: [ 'java' ] 32 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 33 | # Learn more: 34 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 35 | 36 | steps: 37 | - name: Checkout repository 38 | uses: actions/checkout@v4 39 | 40 | # Setup OpenJDK 17 41 | - uses: actions/setup-java@v4 42 | with: 43 | distribution: 'temurin' 44 | java-version: '17' 45 | cache: 'maven' 46 | 47 | # Initializes the CodeQL tools for scanning. 48 | - name: Initialize CodeQL 49 | uses: github/codeql-action/init@v3 50 | with: 51 | languages: ${{ matrix.language }} 52 | # If you wish to specify custom queries, you can do so here or in a config file. 53 | # By default, queries listed here will override any specified in a config file. 54 | # Prefix the list here with "+" to use these queries and those in the config file. 55 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 56 | 57 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 58 | # If this step fails, then you should remove it and run the build manually (see below) 59 | - name: Autobuild 60 | uses: github/codeql-action/autobuild@v2 61 | 62 | # ℹ️ Command-line programs to run using the OS shell. 63 | # 📚 https://git.io/JvXDl 64 | 65 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 66 | # and modify them (or add more) to build your code if your project 67 | # uses a compiled language 68 | 69 | #- run: | 70 | # make bootstrap 71 | # make release 72 | 73 | - name: Perform CodeQL Analysis 74 | uses: github/codeql-action/analyze@v3 75 | -------------------------------------------------------------------------------- /.azure/build-pipeline.yaml: -------------------------------------------------------------------------------- 1 | # Triggers 2 | trigger: 3 | branches: 4 | include: 5 | - 'main' 6 | - 'release-*' 7 | pr: 8 | autoCancel: true 9 | branches: 10 | include: 11 | - '*' 12 | 13 | # Stages 14 | stages: 15 | - stage: java_build 16 | displayName: Java build 17 | jobs: 18 | - template: 'templates/jobs/build_java.yaml' 19 | variables: 20 | STRIMZI_TEST_CONTAINER_LOGGING_ENABLED: false 21 | - stage: docs_build 22 | displayName: Docs build 23 | dependsOn: [] 24 | jobs: 25 | - template: 'templates/jobs/build_docs.yaml' 26 | - stage: container_build 27 | displayName: Prepare Container 28 | dependsOn: 29 | - java_build 30 | jobs: 31 | - template: 'templates/jobs/build_container.yaml' 32 | parameters: 33 | artifactSource: 'current' 34 | artifactProject: 'strimzi' 35 | artifactPipeline: '' 36 | artifactRunVersion: '' 37 | artifactRunId: '' 38 | architectures: ['amd64', 'arm64', 's390x', 'ppc64le'] 39 | - stage: container_publish 40 | displayName: Publish Container 41 | dependsOn: 42 | - container_build 43 | - docs_build 44 | condition: and(succeeded(), eq(variables['build.sourceBranch'], 'refs/heads/main')) 45 | jobs: 46 | - template: 'templates/jobs/push_container.yaml' 47 | parameters: 48 | dockerTag: 'latest' 49 | artifactSource: 'current' 50 | artifactProject: 'strimzi' 51 | artifactPipeline: '' 52 | artifactRunVersion: '' 53 | artifactRunId: '' 54 | architectures: ['amd64', 'arm64', 's390x', 'ppc64le'] 55 | - stage: docs_publish 56 | displayName: Publish Docs 57 | dependsOn: 58 | - container_build 59 | - docs_build 60 | condition: and(succeeded(), eq(variables['build.sourceBranch'], 'refs/heads/main')) 61 | jobs: 62 | - template: 'templates/jobs/publish_docs.yaml' 63 | parameters: 64 | dockerTag: 'latest' 65 | artifactSource: 'current' 66 | artifactProject: 'strimzi' 67 | artifactPipeline: '' 68 | artifactRunVersion: '' 69 | artifactRunId: '' 70 | - stage: java_deploy 71 | displayName: Deploy Java 72 | dependsOn: 73 | - container_build 74 | - docs_build 75 | # Deploy Strimzi Java artifacts -> run only on main branch (where it deploys to OSS snapshot repos) 76 | condition: and(succeeded(), eq(variables['build.sourceBranch'], 'refs/heads/main')) 77 | jobs: 78 | - template: 'templates/jobs/deploy_java.yaml' 79 | parameters: 80 | dockerTag: 'latest' 81 | artifactSource: 'current' 82 | artifactProject: 'strimzi' 83 | artifactPipeline: '' 84 | artifactRunVersion: '' 85 | artifactRunId: '' 86 | -------------------------------------------------------------------------------- /documentation/modules/proc-bridge-seeking-offsets-for-partition.adoc: -------------------------------------------------------------------------------- 1 | // Module included in the following assemblies: 2 | // 3 | // assembly-http-bridge-quickstart.adoc 4 | 5 | [id='proc-bridge-seeking-offset-for-partition-{context}'] 6 | = Seeking to offsets for a partition 7 | 8 | [role="_abstract"] 9 | Use the xref:seek[positions] endpoints to configure the HTTP Bridge consumer to retrieve messages for a partition from a specific offset, and then from the latest offset. This is referred to in Apache Kafka as a seek operation. 10 | 11 | .Procedure 12 | 13 | . Seek to a specific offset for partition 0 of the `quickstart-bridge-topic` topic: 14 | + 15 | [source,curl,subs=attributes+] 16 | ---- 17 | curl -X POST http://localhost:8080/consumers/bridge-quickstart-consumer-group/instances/bridge-quickstart-consumer/positions \ 18 | -H 'content-type: application/vnd.kafka.v2+json' \ 19 | -d '{ 20 | "offsets": [ 21 | { 22 | "topic": "bridge-quickstart-topic", 23 | "partition": 0, 24 | "offset": 2 25 | } 26 | ] 27 | }' 28 | ---- 29 | + 30 | If the request is successful, the HTTP Bridge returns a `204` code only. 31 | 32 | . Submit a `GET` request to the `records` endpoint: 33 | + 34 | [source,curl,subs=attributes+] 35 | ---- 36 | curl -X GET http://localhost:8080/consumers/bridge-quickstart-consumer-group/instances/bridge-quickstart-consumer/records \ 37 | -H 'accept: application/vnd.kafka.json.v2+json' 38 | ---- 39 | + 40 | The HTTP Bridge returns messages from the offset that you seeked to. 41 | 42 | . Restore the default message retrieval behavior by seeking to the last offset for the same partition. This time, use the xref:seektoend[positions/end] endpoint. 43 | + 44 | [source,curl,subs=attributes+] 45 | ---- 46 | curl -X POST http://localhost:8080/consumers/bridge-quickstart-consumer-group/instances/bridge-quickstart-consumer/positions/end \ 47 | -H 'content-type: application/vnd.kafka.v2+json' \ 48 | -d '{ 49 | "partitions": [ 50 | { 51 | "topic": "bridge-quickstart-topic", 52 | "partition": 0 53 | } 54 | ] 55 | }' 56 | ---- 57 | + 58 | If the request is successful, the HTTP Bridge returns another `204` code. 59 | 60 | NOTE: You can also use the xref:seektobeginning[positions/beginning] endpoint to seek to the first offset for one or more partitions. 61 | 62 | .What to do next 63 | 64 | In this quickstart, you have used the HTTP Bridge to perform several common operations on a Kafka cluster. You can now xref:proc-bridge-deleting-consumer-{context}[delete the HTTP Bridge consumer] that you created earlier. 65 | 66 | [role="_additional-resources"] 67 | .Additional resources 68 | 69 | * xref:seek[POST /consumers/{groupid}/instances/{name}/positions] 70 | * xref:seektobeginning[POST /consumers/{groupid}/instances/{name}/positions/beginning] 71 | * xref:seektoend[POST /consumers/{groupid}/instances/{name}/positions/end] 72 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | include ./Makefile.os 2 | include ./Makefile.docker 3 | include ./Makefile.maven 4 | 5 | PROJECT_NAME ?= kafka-bridge 6 | GITHUB_VERSION ?= main 7 | RELEASE_VERSION ?= latest 8 | 9 | ifneq ($(RELEASE_VERSION),latest) 10 | GITHUB_VERSION = $(RELEASE_VERSION) 11 | endif 12 | 13 | .PHONY: all 14 | all: java_verify docker_build docker_push 15 | 16 | .PHONY: clean 17 | clean: java_clean 18 | 19 | .PHONY: release 20 | release: release_prepare release_maven release_package 21 | 22 | .PHONY: next_version 23 | next_version: 24 | echo $(shell echo $(NEXT_VERSION) | tr a-z A-Z) > release.version 25 | mvn versions:set -DnewVersion=$(shell echo $(NEXT_VERSION) | tr a-z A-Z) 26 | mvn versions:commit 27 | 28 | .PHONY: release_prepare 29 | release_prepare: 30 | echo "Update release.version to $(RELEASE_VERSION)" 31 | echo $(shell echo $(RELEASE_VERSION) | tr a-z A-Z) > release.version 32 | echo "Update pom versions to $(RELEASE_VERSION)" 33 | mvn versions:set -DnewVersion=$(shell echo $(RELEASE_VERSION) | tr a-z A-Z) 34 | mvn versions:commit 35 | 36 | .PHONY: release_maven 37 | release_maven: 38 | echo "Update pom versions to $(RELEASE_VERSION)" 39 | mvn versions:set -DnewVersion=$(shell echo $(RELEASE_VERSION) | tr a-z A-Z) 40 | mvn versions:commit 41 | 42 | .PHONY: release_package 43 | release_package: java_package 44 | 45 | ########## 46 | # Documentation targets 47 | ########## 48 | 49 | .PHONY: docu_html 50 | docu_html: docu_htmlclean docu_check 51 | mkdir -p documentation/html 52 | $(CP) -vrL documentation/book/images documentation/html/images 53 | # override snippetDir for asciidoctor to get right path to the snippets 54 | asciidoctor -v --failure-level WARN -t -dbook -a ProductVersion=$(RELEASE_VERSION) -a snippetDir=${PWD}/documentation/book/api/snippet/ documentation/book/bridge.adoc -o documentation/html/bridge.html 55 | 56 | .PHONY: docu_htmlnoheader 57 | docu_htmlnoheader: docu_htmlnoheaderclean docu_check 58 | mkdir -p documentation/htmlnoheader 59 | $(CP) -vrL documentation/book/images documentation/htmlnoheader/images 60 | # override snippetDir for asciidoctor to get right path to the snippets 61 | asciidoctor -v --failure-level WARN -t -dbook -a ProductVersion=$(RELEASE_VERSION) -a snippetDir=${PWD}/documentation/book/api/snippet/ -s documentation/book/bridge.adoc -o documentation/htmlnoheader/bridge.html 62 | 63 | .PHONY: docu_api 64 | docu_api: 65 | mvn $(MVN_ARGS) -P apidoc org.openapitools:openapi-generator-maven-plugin:generate@generate-apidoc 66 | 67 | .PHONY: docu_check 68 | docu_check: docu_api 69 | ./.azure/scripts/check_docs.sh 70 | 71 | .PHONY: docu_clean 72 | docu_clean: docu_htmlclean docu_htmlnoheaderclean 73 | 74 | .PHONY: docu_htmlclean 75 | docu_htmlclean: 76 | rm -rf documentation/html 77 | 78 | .PHONY: docu_htmlnoheaderclean 79 | docu_htmlnoheaderclean: 80 | rm -rf documentation/htmlnoheader 81 | 82 | .PHONY: docu_pushtowebsite 83 | docu_pushtowebsite: 84 | ./.azure/scripts/docu-push-to-website.sh -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/tracing/BridgeContextStorageProvider.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.tracing; 7 | 8 | import io.opentelemetry.context.Context; 9 | import io.opentelemetry.context.ContextStorage; 10 | import io.opentelemetry.context.ContextStorageProvider; 11 | import io.opentelemetry.context.Scope; 12 | 13 | import java.util.concurrent.ConcurrentHashMap; 14 | import java.util.concurrent.ConcurrentMap; 15 | 16 | /** 17 | * Provider returning the custom OpenTelemetry context storage 18 | */ 19 | public class BridgeContextStorageProvider implements ContextStorageProvider { 20 | static final String ACTIVE_CONTEXT = "tracing.context"; 21 | 22 | /** 23 | * Constructor 24 | */ 25 | public BridgeContextStorageProvider() { 26 | } 27 | 28 | @Override 29 | public ContextStorage get() { 30 | return BridgeContextStorage.INSTANCE; 31 | } 32 | 33 | /** 34 | * Custom OpenTelemetry context storage 35 | * The Vert.x context storage cannot be used anymore having a part of the bridge not using a Vert.x instance (so NPEs around the corner) 36 | * This custom implementation provides the same behaviour as the Vert.x one but without the need of a Vert.x instance 37 | * It's needed in a transitioning phase with parts of the bridge still handled by Vert.x where the event loop is around, 38 | * see for example incoming HTTP requests 39 | * In this case the default OpenTelemetry ThreadLocalContextStorage doesn't work because for each new incoming HTTP request 40 | * the Context is the same as the previous request because bounded to the thread (Vert.x event loop) while it's should be new 41 | * It drives to spans grouped all together under the same trace when they are related to different HTTP requests 42 | * We should be able to get rid of this class when Vert.x will be totally out of the picture 43 | */ 44 | // TODO: evaluate to remove this class, back to the default ThreadLocalContextStorage, when Vert.x will be totally removed 45 | enum BridgeContextStorage implements ContextStorage { 46 | INSTANCE; 47 | 48 | private final ConcurrentMap data = new ConcurrentHashMap<>(); 49 | 50 | @Override 51 | public Scope attach(Context toAttach) { 52 | Context current = (Context) data.get(ACTIVE_CONTEXT); 53 | if (current == toAttach) { 54 | return Scope.noop(); 55 | } else { 56 | data.put(ACTIVE_CONTEXT, toAttach); 57 | return current == null ? () -> { 58 | data.remove(ACTIVE_CONTEXT); 59 | } : () -> { 60 | data.put(ACTIVE_CONTEXT, current); 61 | }; 62 | } 63 | } 64 | 65 | @Override 66 | public Context current() { 67 | return (Context) data.get(ACTIVE_CONTEXT); 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/http/HttpOpenApiOperation.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.http; 7 | 8 | import io.vertx.core.Handler; 9 | import io.vertx.ext.web.RoutingContext; 10 | import org.apache.logging.log4j.LogManager; 11 | import org.apache.logging.log4j.Logger; 12 | 13 | /** 14 | * Represents an OpenApi operation with related logging 15 | */ 16 | public abstract class HttpOpenApiOperation implements Handler { 17 | 18 | protected final static String LOGGER_NAME_PREFIX = "http.openapi.operation."; 19 | 20 | protected final Logger log; 21 | protected final HttpOpenApiOperations operationId; 22 | 23 | /** 24 | * Constructor 25 | * 26 | * @param operationId operation ID 27 | */ 28 | public HttpOpenApiOperation(HttpOpenApiOperations operationId) { 29 | this.operationId = operationId; 30 | this.log = LogManager.getLogger(LOGGER_NAME_PREFIX + operationId.toString()); 31 | } 32 | 33 | /** 34 | * Process to run for a specific OpenAPI operation 35 | * 36 | * @param routingContext the routing context 37 | */ 38 | public abstract void process(RoutingContext routingContext); 39 | 40 | @Override 41 | public void handle(RoutingContext routingContext) { 42 | this.logRequest(routingContext); 43 | routingContext.addEndHandler(ignoredResult -> this.logResponse(routingContext)); 44 | this.process(routingContext); 45 | } 46 | 47 | protected void logRequest(RoutingContext routingContext) { 48 | String requestLogHeader = this.requestLogHeader(routingContext); 49 | log.info("{} Request: from {}, method = {}, path = {}", 50 | requestLogHeader, routingContext.request().remoteAddress(), 51 | routingContext.request().method(), 52 | routingContext.request().path()); 53 | log.debug("{} Request: headers = {}", requestLogHeader, routingContext.request().headers()); 54 | } 55 | 56 | protected void logResponse(RoutingContext routingContext) { 57 | String requestLogHeader = this.requestLogHeader(routingContext); 58 | log.info("{} Response: statusCode = {}, message = {}", 59 | requestLogHeader, routingContext.response().getStatusCode(), 60 | routingContext.response().getStatusMessage()); 61 | log.debug("{} Response: headers = {}", requestLogHeader, routingContext.response().headers()); 62 | } 63 | 64 | private String requestLogHeader(RoutingContext routingContext) { 65 | int requestId = (int) routingContext.data().computeIfAbsent("request-id", key -> System.identityHashCode(routingContext.request())); 66 | return String.format("[%s] %s", requestId, operationId.name()); 67 | } 68 | 69 | /** 70 | * @return the OpenAPI operation invoked 71 | */ 72 | public HttpOpenApiOperations getOperationId() { 73 | return this.operationId; 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/http/HttpUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.http; 7 | 8 | import io.netty.handler.codec.http.HttpHeaderNames; 9 | import io.strimzi.kafka.bridge.http.converter.JsonUtils; 10 | import io.vertx.core.buffer.Buffer; 11 | import io.vertx.ext.web.RoutingContext; 12 | import org.apache.logging.log4j.LogManager; 13 | import org.apache.logging.log4j.Logger; 14 | 15 | /** 16 | * Provides some utility methods for HTTP request/response 17 | */ 18 | public class HttpUtils { 19 | private static final Logger LOGGER = LogManager.getLogger(HttpUtils.class); 20 | 21 | /** 22 | * Send an HTTP response 23 | * 24 | * @param routingContext the routing context used to send the HTTP response 25 | * @param statusCode the HTTP status code 26 | * @param contentType the content-type to set in the HTTP response 27 | * @param body the body to set in the HTTP response 28 | */ 29 | public static void sendResponse(RoutingContext routingContext, int statusCode, String contentType, byte[] body) { 30 | if (!routingContext.response().closed() && !routingContext.response().ended()) { 31 | routingContext.response().setStatusCode(statusCode); 32 | if (body != null) { 33 | LOGGER.debug("[{}] Response: body = {}", routingContext.get("request-id"), JsonUtils.bytesToJson(body)); 34 | routingContext.response().putHeader(HttpHeaderNames.CONTENT_TYPE, contentType); 35 | routingContext.response().putHeader(HttpHeaderNames.CONTENT_LENGTH, String.valueOf(body.length)); 36 | routingContext.response().write(Buffer.buffer(body)); 37 | } 38 | routingContext.response().end(); 39 | } else if (routingContext.response().ended()) { 40 | LOGGER.warn("[{}] Response: already ended!", routingContext.get("request-id").toString()); 41 | } 42 | } 43 | 44 | /** 45 | * Send a file over an HTTP response 46 | * 47 | * @param routingContext the routing context used to send the HTTP response 48 | * @param statusCode the HTTP status code 49 | * @param contentType the content-type to set in the HTTP response 50 | * @param filename path to the file to send 51 | */ 52 | public static void sendFile(RoutingContext routingContext, int statusCode, String contentType, String filename) { 53 | if (!routingContext.response().closed() && !routingContext.response().ended()) { 54 | routingContext.response().setStatusCode(statusCode); 55 | LOGGER.debug("[{}] Response: filename = {}", routingContext.get("request-id"), filename); 56 | routingContext.response().putHeader(HttpHeaderNames.CONTENT_TYPE, contentType).sendFile(filename); 57 | } else if (routingContext.response().ended()) { 58 | LOGGER.warn("[{}] Response: already ended!", routingContext.get("request-id").toString()); 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/test/java/io/strimzi/kafka/bridge/metrics/StrimziMetricsCollectorTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | package io.strimzi.kafka.bridge.metrics; 6 | 7 | import io.prometheus.metrics.expositionformats.PrometheusTextFormatWriter; 8 | import io.prometheus.metrics.model.registry.PrometheusRegistry; 9 | import io.prometheus.metrics.model.snapshots.MetricSnapshots; 10 | import org.junit.jupiter.api.Test; 11 | import org.mockito.Mockito; 12 | 13 | import java.io.ByteArrayOutputStream; 14 | import java.io.IOException; 15 | import java.nio.charset.StandardCharsets; 16 | 17 | import static org.hamcrest.MatcherAssert.assertThat; 18 | import static org.hamcrest.Matchers.containsString; 19 | import static org.hamcrest.Matchers.is; 20 | import static org.junit.jupiter.api.Assertions.assertThrows; 21 | import static org.mockito.ArgumentMatchers.any; 22 | import static org.mockito.Mockito.doAnswer; 23 | import static org.mockito.Mockito.doThrow; 24 | import static org.mockito.Mockito.mock; 25 | import static org.mockito.Mockito.when; 26 | 27 | class StrimziMetricsCollectorTest { 28 | @Test 29 | void shouldReturnMetrics() throws IOException { 30 | PrometheusRegistry mockPromRegistry = mock(PrometheusRegistry.class); 31 | MetricSnapshots mockSnapshots = mock(MetricSnapshots.class); 32 | when(mockPromRegistry.scrape()).thenReturn(mockSnapshots); 33 | 34 | PrometheusTextFormatWriter mockPromFormatter = mock(PrometheusTextFormatWriter.class); 35 | doAnswer(invocation -> { 36 | ByteArrayOutputStream stream = invocation.getArgument(0); 37 | stream.write("test_metric\n".getBytes(StandardCharsets.UTF_8)); 38 | return null; 39 | }).when(mockPromFormatter).write(any(), any()); 40 | 41 | MetricsCollector metricsCollector = new StrimziMetricsCollector(mockPromRegistry, mockPromFormatter); 42 | 43 | String result = metricsCollector.scrape(); 44 | assertThat(result, containsString("test_metric")); 45 | assertThat(result.getBytes(StandardCharsets.UTF_8).length, is(result.length())); 46 | } 47 | 48 | @Test 49 | void shouldHandleIoException() throws IOException { 50 | PrometheusRegistry mockPromRegistry = mock(PrometheusRegistry.class); 51 | MetricSnapshots mockSnapshots = mock(MetricSnapshots.class); 52 | when(mockPromRegistry.scrape()).thenReturn(mockSnapshots); 53 | 54 | PrometheusTextFormatWriter mockPromFormatter = mock(PrometheusTextFormatWriter.class); 55 | doThrow(new IOException("Test exception")) 56 | .when(mockPromFormatter).write(any(ByteArrayOutputStream.class), Mockito.eq(mockSnapshots)); 57 | 58 | MetricsCollector metricsCollector = new StrimziMetricsCollector(mockPromRegistry, mockPromFormatter); 59 | 60 | RuntimeException exception = assertThrows(RuntimeException.class, () -> metricsCollector.doScrape()); 61 | assertThat(exception.getMessage(), containsString("Test exception")); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /BUILDING.md: -------------------------------------------------------------------------------- 1 | # Building Strimzi Kafka Bridge 2 | 3 | Strimzi Kafka Bridge uses `make` as its main build system. 4 | Our `make` build supports several different targets, mainly for building the Java binaries and pushing Docker images. 5 | 6 | ## Build Pre-Requisites 7 | 8 | To build this project you must first install several command line utilities. 9 | 10 | - [`make`](https://www.gnu.org/software/make/) - Make build system 11 | - [`mvn`](https://maven.apache.org/index.html) - Maven CLI 12 | - [`docker`](https://www.docker.com/) - Docker or [`podman`](https://podman.io/) 13 | 14 | In order to use `make` these all need to be available in your `$PATH`. 15 | 16 | ### Mac OS 17 | 18 | The `make` build is using the GNU versions of the `find` and `sed` utilities and is not compatible with the BSD versions available on Mac OS. 19 | When using Mac OS, you have to install the GNU versions of `find` and `sed`. 20 | When using `brew`, you can do `brew install gnu-sed findutils grep coreutils asciidoctor`. 21 | This command will install the GNU versions as `gcp`, `ggrep`, `gsed` and `gfind` and our `make` build will automatically pick them up and use them. 22 | 23 | ## Docker image 24 | 25 | ### Building Docker images 26 | 27 | The `docker_build` target will build the Docker image provided by the Strimzi Kafka Bridge project. 28 | You can build all Strimzi Docker images by calling `make docker_build` from the root of the repository. 29 | The `docker_build` target will always build the images under the `strimzi` organization. 30 | The `DOCKER_TAG` environment variable configures the Docker tag to use (default is `latest`). 31 | 32 | ### Tagging and pushing Docker images 33 | 34 | The `docker_tag` target can be used to tag the Docker images built by the `docker_build` target. 35 | This target is automatically called by the `docker_push` target and doesn't have to be called separately. 36 | 37 | To configure the `docker_tag` and `docker_push` targets you can set following environment variables: 38 | * `DOCKER_ORG` configures the Docker organization for tagging/pushing the images (defaults to the value of the `$USER` environment variable) 39 | * `DOCKER_TAG` configured Docker tag (default is `latest`) 40 | * `DOCKER_REGISTRY` configures the Docker registry where the image will be pushed (default is `docker.io`) 41 | 42 | ## Building everything 43 | 44 | The `make all` command can be used to trigger all the tasks above - build the Java code, Docker image, tag it and push it to the configured repository. 45 | 46 | `make` invokes Maven for packaging Java based applications. 47 | The `mvn` command can be customized by setting the `MVN_ARGS` environment variable when launching `make all`. 48 | For example, `MVN_ARGS=-DskipTests make all` can be used to avoid running the unit tests. 49 | 50 | ## Building container images for other platforms with Docker `buildx` 51 | 52 | Docker supports building images for different platforms using the `docker buildx` command. If you want to use it to 53 | build Strimzi images, you can just set the environment variable `DOCKER_BUILDX` to `buildx`, set the environment 54 | variable `DOCKER_BUILD_ARGS` to pass additional build options such as the platform and run the build. For example 55 | following can be used to build Strimzi images for Linux on PowerPC/ppc64le architecture: 56 | 57 | ``` 58 | export DOCKER_BUILDX=buildx 59 | export DOCKER_BUILD_ARGS="--platform linux/ppc64le" 60 | make all 61 | ``` 62 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/tracing/TracingUtil.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.tracing; 7 | 8 | import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; 9 | import io.strimzi.kafka.bridge.config.BridgeConfig; 10 | import org.apache.kafka.clients.consumer.ConsumerRecord; 11 | import org.apache.kafka.common.header.Header; 12 | import org.apache.logging.log4j.LogManager; 13 | import org.apache.logging.log4j.Logger; 14 | 15 | import java.nio.charset.StandardCharsets; 16 | import java.util.HashMap; 17 | import java.util.Map; 18 | import java.util.Properties; 19 | 20 | import static io.strimzi.kafka.bridge.tracing.TracingConstants.OPENTELEMETRY; 21 | 22 | /** 23 | * Tracing util to hold app's Tracing instance. 24 | */ 25 | @SuppressFBWarnings({"MS_EXPOSE_REP"}) 26 | public class TracingUtil { 27 | private static final Logger LOGGER = LogManager.getLogger(TracingUtil.class); 28 | private static TracingHandle tracing = new NoopTracingHandle(); 29 | 30 | /** 31 | * @return the current tracing instance 32 | */ 33 | public static TracingHandle getTracing() { 34 | return tracing; 35 | } 36 | 37 | /** 38 | * Initialize the proper tracing system based on the bridge configuration 39 | * 40 | * @param config bridge configuration 41 | */ 42 | public static void initialize(BridgeConfig config) { 43 | String tracingConfig = config.getTracing(); 44 | if (tracingConfig != null) { 45 | if (tracingConfig.equals(OPENTELEMETRY)) { 46 | TracingHandle instance = new OpenTelemetryHandle(); 47 | 48 | String serviceName = instance.serviceName(config); 49 | if (serviceName != null) { 50 | LOGGER.info("Initializing OpenTelemetry tracing config with service name {}", serviceName); 51 | instance.initialize(); 52 | tracing = instance; 53 | } else { 54 | LOGGER.error("Tracing configuration cannot be initialized because {} environment variable is not defined", instance.envServiceName()); 55 | } 56 | } else { 57 | LOGGER.warn("Tracing with {} is not supported/valid", tracingConfig); 58 | } 59 | } 60 | } 61 | 62 | /** 63 | * We are interested in tracing headers here, 64 | * which are unique - single value per key. 65 | * 66 | * @param record Kafka consumer record 67 | * @param key type 68 | * @param value type 69 | * @return map of headers 70 | */ 71 | public static Map toHeaders(ConsumerRecord record) { 72 | Map headers = new HashMap<>(); 73 | for (Header header : record.headers()) { 74 | headers.put(header.key(), new String(header.value(), StandardCharsets.UTF_8)); 75 | } 76 | return headers; 77 | } 78 | 79 | static void addProperty(Properties props, String key, String value) { 80 | String previous = props.getProperty(key); 81 | if (previous != null) { 82 | props.setProperty(key, previous + "," + value); 83 | } else { 84 | props.setProperty(key, value); 85 | } 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /.azure/cve-pipeline.yaml: -------------------------------------------------------------------------------- 1 | # Triggers 2 | trigger: none 3 | pr: none 4 | 5 | # Parameters 6 | parameters: 7 | - name: releaseVersion 8 | displayName: Release Version 9 | type: string 10 | - name: useSuffix 11 | displayName: Build suffixed images 12 | type: boolean 13 | default: true 14 | - name: releaseSuffix 15 | displayName: Release Suffix 16 | type: number 17 | - name: sourcePipelineId 18 | displayName: Pipeline ID of the source build 19 | type: number 20 | default: 34 21 | values: 22 | - 34 23 | - name: sourceBuildId 24 | displayName: Build ID of the source build 25 | type: number 26 | 27 | # Stages 28 | stages: 29 | - stage: container_build 30 | displayName: Prepare Container 31 | jobs: 32 | - template: 'templates/jobs/build_container.yaml' 33 | parameters: 34 | artifactSource: 'specific' 35 | artifactProject: 'strimzi' 36 | artifactPipeline: '${{ parameters.sourcePipelineId }}' 37 | artifactRunVersion: 'specific' 38 | artifactRunId: '${{ parameters.sourceBuildId }}' 39 | architectures: ['amd64', 'arm64', 's390x', 'ppc64le'] 40 | - stage: containers_publish_with_suffix 41 | displayName: Publish Containers for ${{ parameters.releaseVersion }}-${{ parameters.releaseSuffix }} 42 | dependsOn: 43 | - container_build 44 | condition: and(succeeded(), startsWith(variables['build.sourceBranch'], 'refs/heads/release-')) 45 | jobs: 46 | - template: 'templates/jobs/push_container.yaml' 47 | parameters: 48 | dockerTag: '${{ parameters.releaseVersion }}-${{ parameters.releaseSuffix }}' 49 | artifactSource: 'current' 50 | artifactProject: 'strimzi' 51 | artifactPipeline: '' 52 | artifactRunVersion: '' 53 | artifactRunId: '' 54 | architectures: ['amd64', 'arm64', 's390x', 'ppc64le'] 55 | - stage: manual_validation 56 | displayName: Validate container before pushing container as ${{ parameters.releaseVersion }} 57 | dependsOn: 58 | - containers_publish_with_suffix 59 | condition: and(succeeded(), startsWith(variables['build.sourceBranch'], 'refs/heads/release-')) 60 | jobs: 61 | - job: waitForValidation 62 | displayName: Wait for container image validation 63 | pool: server 64 | timeoutInMinutes: 4320 # job times out in 3 days 65 | steps: 66 | - task: ManualValidation@0 67 | timeoutInMinutes: 4310 # task times out in 3 days 68 | inputs: 69 | notifyUsers: | 70 | github@scholzj.com 71 | xstejs24@gmail.com 72 | instructions: 'Please validate the container image' 73 | onTimeout: 'reject' 74 | - stage: containers_publish 75 | displayName: Publish Containers for ${{ parameters.releaseVersion }} 76 | dependsOn: 77 | - manual_validation 78 | - containers_publish_with_suffix 79 | condition: and(succeeded(), startsWith(variables['build.sourceBranch'], 'refs/heads/release-')) 80 | jobs: 81 | - template: 'templates/jobs/push_container.yaml' 82 | parameters: 83 | dockerTag: '${{ parameters.releaseVersion }}' 84 | artifactSource: 'current' 85 | artifactProject: 'strimzi' 86 | artifactPipeline: '' 87 | artifactRunVersion: '' 88 | artifactRunId: '' 89 | architectures: ['amd64', 'arm64', 's390x', 'ppc64le'] 90 | -------------------------------------------------------------------------------- /documentation/book/api/template/index.mustache: -------------------------------------------------------------------------------- 1 | {{#headerAttributes}} 2 | :toc: left 3 | :numbered: 4 | :toclevels: 4 5 | :source-highlighter: highlightjs 6 | :keywords: openapi, rest, {{appName}} 7 | :specDir: {{specDir}} 8 | :snippetDir: {{snippetDir}} 9 | :generator-template: v1 2019-12-20 10 | :info-url: {{infoUrl}} 11 | :app-name: {{appName}} 12 | {{/headerAttributes}} 13 | 14 | = {{{appName}}} 15 | 16 | {{#useIntroduction}} 17 | == Introduction 18 | {{/useIntroduction}} 19 | {{^useIntroduction}} 20 | [abstract] 21 | .Abstract 22 | {{/useIntroduction}} 23 | {{{appDescription}}} 24 | 25 | {{#specinclude}}intro.adoc{{/specinclude}} 26 | 27 | {{#hasAuthMethods}} 28 | == Access 29 | 30 | {{#authMethods}} 31 | {{#isBasic}} 32 | {{#isBasicBasic}}* *HTTP Basic* Authentication _{{{name}}}_{{/isBasicBasic}} 33 | {{#isBasicBearer}}* *Bearer* Authentication {{/isBasicBearer}} 34 | {{#isHttpSignature}}* *HTTP signature* Authentication{{/isHttpSignature}} 35 | {{/isBasic}} 36 | {{#isOAuth}}* *OAuth* AuthorizationUrl: _{{authorizationUrl}}_, TokenUrl: _{{tokenUrl}}_ {{/isOAuth}} 37 | {{#isApiKey}}* *APIKey* KeyParamName: _{{keyParamName}}_, KeyInQuery: _{{isKeyInQuery}}_, KeyInHeader: _{{isKeyInHeader}}_{{/isApiKey}} 38 | {{/authMethods}} 39 | 40 | {{/hasAuthMethods}} 41 | 42 | == Endpoints 43 | 44 | {{#apiInfo}} 45 | {{#apis}} 46 | {{#operations}} 47 | 48 | [.{{baseName}}] 49 | === {{baseName}} 50 | 51 | {{#operation}} 52 | 53 | [.{{nickname}}] 54 | {{#useMethodAndPath}} 55 | ==== {{httpMethod}} {{path}} 56 | 57 | Operation Id:: {{nickname}} 58 | 59 | {{/useMethodAndPath}} 60 | {{^useMethodAndPath}} 61 | ==== {{nickname}} 62 | 63 | `{{httpMethod}} {{path}}` 64 | {{/useMethodAndPath}} 65 | 66 | {{{summary}}} 67 | 68 | ===== Description 69 | 70 | {{{notes}}} 71 | 72 | {{#specinclude}}{{path}}/{{httpMethod}}/spec.adoc{{/specinclude}} 73 | 74 | 75 | {{> params}} 76 | 77 | ===== Return Type 78 | 79 | 80 | {{#hasReference}} 81 | {{^returnSimpleType}}{{returnContainer}}[{{/returnSimpleType}}<<{{returnBaseType}}>>{{^returnSimpleType}}]{{/returnSimpleType}} 82 | {{/hasReference}} 83 | 84 | {{^hasReference}} 85 | {{#returnType}}<<{{.}}>>{{/returnType}} 86 | {{^returnType}}-{{/returnType}} 87 | {{/hasReference}} 88 | 89 | {{#hasProduces}} 90 | ===== Content Type 91 | 92 | {{#produces}} 93 | * {{{mediaType}}} 94 | {{/produces}} 95 | {{/hasProduces}} 96 | 97 | ===== Responses 98 | 99 | .HTTP Response Codes 100 | [cols="2,3,1"] 101 | |=== 102 | | Code | Message | Datatype 103 | 104 | {{#responses}} 105 | 106 | | {{code}} 107 | | {{message}} 108 | | {{#containerType}}{{dataType}}[<<{{baseType}}>>]{{/containerType}} {{^containerType}}<<{{dataType}}>>{{/containerType}} 109 | 110 | {{/responses}} 111 | |=== 112 | 113 | {{^skipExamples}} 114 | ===== Samples 115 | 116 | {{#snippetinclude}}{{path}}/{{httpMethod}}/http-request.adoc{{/snippetinclude}} 117 | {{#snippetinclude}}{{path}}/{{httpMethod}}/http-response.adoc{{/snippetinclude}} 118 | 119 | {{#snippetlink}}* wiremock data, {{path}}/{{httpMethod}}/{{httpMethod}}.json{{/snippetlink}} 120 | {{/skipExamples}} 121 | 122 | ifdef::internal-generation[] 123 | ===== Implementation 124 | {{#specinclude}}{{path}}/{{httpMethod}}/implementation.adoc{{/specinclude}} 125 | 126 | endif::internal-generation[] 127 | 128 | {{/operation}} 129 | {{/operations}} 130 | {{/apis}} 131 | {{/apiInfo}} 132 | 133 | {{> model}} -------------------------------------------------------------------------------- /src/test/java/io/strimzi/kafka/bridge/metrics/JmxMetricsCollectorTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | package io.strimzi.kafka.bridge.metrics; 6 | 7 | import io.prometheus.jmx.JmxCollector; 8 | import io.prometheus.metrics.expositionformats.PrometheusTextFormatWriter; 9 | import io.prometheus.metrics.model.registry.PrometheusRegistry; 10 | import io.prometheus.metrics.model.snapshots.MetricSnapshots; 11 | import org.junit.jupiter.api.Test; 12 | import org.mockito.Mockito; 13 | 14 | import java.io.ByteArrayOutputStream; 15 | import java.io.IOException; 16 | import java.nio.charset.StandardCharsets; 17 | 18 | import static org.hamcrest.MatcherAssert.assertThat; 19 | import static org.hamcrest.Matchers.containsString; 20 | import static org.hamcrest.Matchers.is; 21 | import static org.junit.jupiter.api.Assertions.assertThrows; 22 | import static org.mockito.ArgumentMatchers.any; 23 | import static org.mockito.Mockito.doAnswer; 24 | import static org.mockito.Mockito.doThrow; 25 | import static org.mockito.Mockito.mock; 26 | import static org.mockito.Mockito.when; 27 | 28 | class JmxMetricsCollectorTest { 29 | @Test 30 | void shouldReturnMetrics() throws IOException { 31 | JmxCollector mockJmxCollector = mock(JmxCollector.class); 32 | 33 | PrometheusRegistry mockPromRegistry = mock(PrometheusRegistry.class); 34 | MetricSnapshots mockSnapshots = mock(MetricSnapshots.class); 35 | when(mockPromRegistry.scrape()).thenReturn(mockSnapshots); 36 | 37 | PrometheusTextFormatWriter mockPromFormatter = mock(PrometheusTextFormatWriter.class); 38 | doAnswer(invocation -> { 39 | ByteArrayOutputStream stream = invocation.getArgument(0); 40 | stream.write("test_metric\n".getBytes(StandardCharsets.UTF_8)); 41 | return null; 42 | }).when(mockPromFormatter).write(any(), any()); 43 | 44 | MetricsCollector metricsCollector = new JmxMetricsCollector(mockJmxCollector, mockPromRegistry, mockPromFormatter); 45 | 46 | String result = metricsCollector.scrape(); 47 | assertThat(result, containsString("test_metric")); 48 | assertThat(result.getBytes(StandardCharsets.UTF_8).length, is(result.length())); 49 | } 50 | 51 | @Test 52 | void shouldHandleIoException() throws IOException { 53 | JmxCollector mockJmxCollector = mock(JmxCollector.class); 54 | 55 | PrometheusRegistry mockPromRegistry = mock(PrometheusRegistry.class); 56 | MetricSnapshots mockSnapshots = mock(MetricSnapshots.class); 57 | when(mockPromRegistry.scrape()).thenReturn(mockSnapshots); 58 | 59 | PrometheusTextFormatWriter mockPromFormatter = mock(PrometheusTextFormatWriter.class); 60 | doThrow(new IOException("Test exception")) 61 | .when(mockPromFormatter).write(any(ByteArrayOutputStream.class), Mockito.eq(mockSnapshots)); 62 | 63 | MetricsCollector metricsCollector = new JmxMetricsCollector(mockJmxCollector, mockPromRegistry, mockPromFormatter); 64 | 65 | RuntimeException exception = assertThrows(RuntimeException.class, () -> metricsCollector.doScrape()); 66 | assertThat(exception.getMessage(), containsString("Test exception")); 67 | } 68 | 69 | @Test 70 | void shouldThrowWithInvalidYaml() { 71 | assertThrows(ClassCastException.class, () -> new JmxMetricsCollector("invalid")); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/test/java/io/strimzi/kafka/bridge/utils/Urls.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.utils; 7 | 8 | public class Urls { 9 | 10 | private static final String SCHEME = "http://"; 11 | public static final String BRIDGE_HOST = "127.0.0.1"; 12 | public static final int BRIDGE_PORT = 8080; 13 | public static final int BRIDGE_SSL_PORT = 8443; 14 | public static final int BRIDGE_MANAGEMENT_PORT = 8081; 15 | private static final String BRIDGE_ADDRESS = SCHEME + BRIDGE_HOST + ":" + BRIDGE_PORT; 16 | 17 | private static final String CONSUMERS_PATH = "/consumers/"; 18 | private static final String INSTANCES_PATH = "/instances/"; 19 | private static final String POSITIONS_BEGINNING_PATH = "/positions/beginning"; 20 | private static final String POSITIONS_END_PATH = "/positions/end"; 21 | private static final String POSITIONS_PATH = "/positions"; 22 | private static final String SUBSCRIPTION_PATH = "/subscription"; 23 | private static final String TOPICS_PATH = "/topics/"; 24 | private static final String PARTITIONS_PATH = "/partitions/"; 25 | private static final String ASSIGNMENTS_PATH = "/assignments"; 26 | private static final String OFFSETS_PATH = "/offsets"; 27 | private static final String RECORDS_PATH = "/records"; 28 | 29 | public static String consumer(String groupId) { 30 | return BRIDGE_ADDRESS + CONSUMERS_PATH + groupId; 31 | } 32 | 33 | public static String consumerInstance(String groupId, String name) { 34 | return BRIDGE_ADDRESS + CONSUMERS_PATH + groupId + INSTANCES_PATH + name; 35 | } 36 | 37 | public static String consumerInstancePositionBeginning(String groupId, String name) { 38 | return consumerInstance(groupId, name) + POSITIONS_BEGINNING_PATH; 39 | } 40 | 41 | public static String consumerInstancePositionEnd(String groupId, String name) { 42 | return consumerInstance(groupId, name) + POSITIONS_END_PATH; 43 | } 44 | 45 | public static String consumerInstancePosition(String groupId, String name) { 46 | return consumerInstance(groupId, name) + POSITIONS_PATH; 47 | } 48 | 49 | public static String consumerInstanceSubscription(String groupId, String name) { 50 | return consumerInstance(groupId, name) + SUBSCRIPTION_PATH; 51 | } 52 | 53 | public static String consumerInstanceAssignments(String groupId, String name) { 54 | return consumerInstance(groupId, name) + ASSIGNMENTS_PATH; 55 | } 56 | 57 | public static String consumerInstanceOffsets(String groupId, String name) { 58 | return consumerInstance(groupId, name) + OFFSETS_PATH; 59 | } 60 | 61 | public static String consumerInstanceRecords(String groupId, String name, Integer timeout, Integer maxBytes) { 62 | return consumerInstance(groupId, name) + RECORDS_PATH 63 | + "?" 64 | + (timeout != null ? "timeout=" + timeout : "") 65 | + (timeout != null && maxBytes != null ? "&" : "") 66 | + (maxBytes != null ? "max_bytes=" + maxBytes : ""); 67 | } 68 | 69 | public static String consumerInstanceRecords(String groupId, String name) { 70 | return consumerInstance(groupId, name) + RECORDS_PATH; 71 | } 72 | 73 | public static String producerTopic(String topic) { 74 | return BRIDGE_ADDRESS + TOPICS_PATH + topic; 75 | } 76 | 77 | public static String producerTopicPartition(String topic, Object partitions) { 78 | return BRIDGE_ADDRESS + TOPICS_PATH + topic + PARTITIONS_PATH + partitions; 79 | } 80 | 81 | } 82 | -------------------------------------------------------------------------------- /src/test/java/io/strimzi/kafka/bridge/config/ConfigRetrieverTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.config; 7 | 8 | import org.apache.kafka.clients.CommonClientConfigs; 9 | import org.apache.kafka.clients.consumer.ConsumerConfig; 10 | import org.apache.kafka.clients.producer.ProducerConfig; 11 | import org.junit.jupiter.api.Test; 12 | 13 | import java.io.FileNotFoundException; 14 | import java.io.IOException; 15 | import java.util.HashMap; 16 | import java.util.Map; 17 | 18 | import static io.strimzi.kafka.bridge.config.BridgeConfig.BRIDGE_ID; 19 | import static org.hamcrest.MatcherAssert.assertThat; 20 | import static org.hamcrest.Matchers.is; 21 | import static org.junit.jupiter.api.Assertions.assertThrows; 22 | 23 | /** 24 | * Unit tests for the ConfigRetriever class 25 | */ 26 | public class ConfigRetrieverTest { 27 | 28 | @Test 29 | public void testApplicationPropertiesFile() throws IOException { 30 | String path = getClass().getClassLoader().getResource("application.properties").getPath(); 31 | Map config = ConfigRetriever.getConfig(path); 32 | BridgeConfig bridgeConfig = BridgeConfig.fromMap(config); 33 | 34 | assertThat(bridgeConfig.getBridgeID(), is("my-bridge")); 35 | 36 | assertThat(bridgeConfig.getKafkaConfig().getConfig().size(), is(1)); 37 | assertThat(bridgeConfig.getKafkaConfig().getConfig().get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG), is("localhost:9092")); 38 | 39 | assertThat(bridgeConfig.getKafkaConfig().getAdminConfig().getConfig().size(), is(0)); 40 | 41 | assertThat(bridgeConfig.getKafkaConfig().getProducerConfig().getConfig().size(), is(1)); 42 | assertThat(bridgeConfig.getKafkaConfig().getProducerConfig().getConfig().get(ProducerConfig.ACKS_CONFIG), is("1")); 43 | 44 | assertThat(bridgeConfig.getKafkaConfig().getConsumerConfig().getConfig().size(), is(1)); 45 | assertThat(bridgeConfig.getKafkaConfig().getConsumerConfig().getConfig().get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG), is("earliest")); 46 | 47 | assertThat(bridgeConfig.getHttpConfig().getConfig().size(), is(2)); 48 | assertThat(bridgeConfig.getHttpConfig().getHost(), is("0.0.0.0")); 49 | assertThat(bridgeConfig.getHttpConfig().getPort(), is(8080)); 50 | } 51 | 52 | @Test 53 | public void testEnvVarOverride() throws IOException { 54 | // "simulating" an addition to the current environment variables 55 | Map env = new HashMap<>(); 56 | env.putAll(System.getenv()); 57 | env.put(BRIDGE_ID, "different-bridge-id"); 58 | 59 | String path = getClass().getClassLoader().getResource("application.properties").getPath(); 60 | Map config = ConfigRetriever.getConfig(path, env); 61 | BridgeConfig bridgeConfig = BridgeConfig.fromMap(config); 62 | 63 | assertThat(bridgeConfig.getBridgeID(), is(env.get(BRIDGE_ID))); 64 | } 65 | 66 | @Test 67 | public void testNoApplicationPropertiesFile() throws IOException { 68 | assertThrows(FileNotFoundException.class, () -> ConfigRetriever.getConfig("no-existing.properties")); 69 | } 70 | 71 | @Test 72 | public void testWrongApplicationPropertiesFile() throws IOException { 73 | String path = getClass().getClassLoader().getResource("wrong.properties").getPath(); 74 | Map config = ConfigRetriever.getConfig(path); 75 | BridgeConfig bridgeConfig = BridgeConfig.fromMap(config); 76 | 77 | assertThat(bridgeConfig.getConfig().size(), is(0)); 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/http/HttpBridgeContext.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.http; 7 | 8 | import io.strimzi.kafka.bridge.ConsumerInstanceId; 9 | import io.vertx.core.http.HttpConnection; 10 | 11 | import java.util.HashMap; 12 | import java.util.Map; 13 | 14 | /** 15 | * Context class which is used for storing endpoints. 16 | * Using context in lower-level classes for better state determination. 17 | * 18 | * @param type of Kafka message key for the stored endpoints 19 | * @param type of Kafka message payload for the stored endpoints 20 | */ 21 | public class HttpBridgeContext { 22 | 23 | private final Map> httpSinkEndpoints = new HashMap<>(); 24 | private final Map> httpSourceEndpoints = new HashMap<>(); 25 | private HttpAdminBridgeEndpoint httpAdminBridgeEndpoint; 26 | private HttpOpenApiOperations openApiOperation; 27 | 28 | /** 29 | * @return map of the HTTP sink endpoints 30 | */ 31 | public Map> getHttpSinkEndpoints() { 32 | return this.httpSinkEndpoints; 33 | } 34 | 35 | /** 36 | * @return map of the HTTP source endpoints 37 | */ 38 | public Map> getHttpSourceEndpoints() { 39 | return this.httpSourceEndpoints; 40 | } 41 | 42 | /** 43 | * @return the HTTP admin endpoint 44 | */ 45 | public HttpAdminBridgeEndpoint getHttpAdminEndpoint() { 46 | return this.httpAdminBridgeEndpoint; 47 | } 48 | 49 | /** 50 | * Sets the HTTP admin endpoint 51 | * 52 | * @param httpAdminBridgeEndpoint the HTTP admin endpoint 53 | */ 54 | void setHttpAdminEndpoint(HttpAdminBridgeEndpoint httpAdminBridgeEndpoint) { 55 | this.httpAdminBridgeEndpoint = httpAdminBridgeEndpoint; 56 | } 57 | 58 | /** 59 | * Set the OpenAPI operation invoked 60 | * 61 | * @param openApiOperation OpenAPI operation 62 | */ 63 | public void setOpenApiOperation(HttpOpenApiOperations openApiOperation) { 64 | this.openApiOperation = openApiOperation; 65 | } 66 | 67 | /** 68 | * @return the OpenAPI operation invoked 69 | */ 70 | public HttpOpenApiOperations getOpenApiOperation() { 71 | return this.openApiOperation; 72 | } 73 | 74 | /** 75 | * Close all the HTTP sink endpoints 76 | */ 77 | public void closeAllHttpSinkBridgeEndpoints() { 78 | for (Map.Entry> sink: getHttpSinkEndpoints().entrySet()) { 79 | if (sink.getValue() != null) 80 | sink.getValue().close(); 81 | } 82 | getHttpSinkEndpoints().clear(); 83 | } 84 | 85 | /** 86 | * Close all the HTTP source endpoints 87 | */ 88 | public void closeAllHttpSourceBridgeEndpoints() { 89 | for (Map.Entry> source: getHttpSourceEndpoints().entrySet()) { 90 | if (source.getValue() != null) 91 | source.getValue().close(); 92 | } 93 | getHttpSourceEndpoints().clear(); 94 | } 95 | 96 | /** 97 | * Close the HTTP admin client endpoint 98 | */ 99 | public void closeHttpAdminClientEndpoint() { 100 | if (this.httpAdminBridgeEndpoint != null) 101 | this.httpAdminBridgeEndpoint.close(); 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/main/resources/jmx_metrics_config.yaml: -------------------------------------------------------------------------------- 1 | lowercaseOutputName: true 2 | 3 | rules: 4 | # more specific rules to consumer and producer with topic related information 5 | - pattern: kafka.producer<>([a-z-]+)-total 6 | name: strimzi_bridge_kafka_producer_$4_total 7 | type: COUNTER 8 | labels: 9 | type: "$1" 10 | clientId: "$2" 11 | topic: "$3" 12 | - pattern: kafka.producer<>([a-z-]+) 13 | name: strimzi_bridge_kafka_producer_$4 14 | type: GAUGE 15 | labels: 16 | type: "$1" 17 | clientId: "$2" 18 | topic: "$3" 19 | - pattern: kafka.consumer<>([a-z-]+)-total 20 | name: strimzi_bridge_kafka_consumer_$4_total 21 | type: COUNTER 22 | labels: 23 | type: "$1" 24 | clientId: "$2" 25 | topic: "$3" 26 | - pattern: kafka.consumer<>([a-z-]+) 27 | name: strimzi_bridge_kafka_consumer_$4 28 | type: GAUGE 29 | labels: 30 | type: "$1" 31 | clientId: "$2" 32 | topic: "$3" 33 | # more general metrics 34 | - pattern: kafka.(\w+)<>([a-z-]+-total-[a-z-]+) # handles the metrics with total in the middle of the metric name 35 | name: strimzi_bridge_kafka_$1_$4 36 | type: GAUGE 37 | labels: 38 | type: "$2" 39 | clientId: "$3" 40 | - pattern: kafka.(\w+)<>([a-z-]+)-total 41 | name: strimzi_bridge_kafka_$1_$4_total 42 | type: COUNTER 43 | labels: 44 | type: "$2" 45 | clientId: "$3" 46 | - pattern: kafka.(\w+)<>([a-z-]+) 47 | name: strimzi_bridge_kafka_$1_$4 48 | type: GAUGE 49 | labels: 50 | type: "$2" 51 | clientId: "$3" 52 | # OAuth Metrics 53 | # WARNING: Make sure that the ordering of the attributes is the same as in MBean names 54 | - pattern: "strimzi.oauth<>(count|totalTimeMs):" 55 | name: "strimzi_oauth_$1_$12" 56 | type: COUNTER 57 | labels: 58 | context: "$2" 59 | kind: "$3" 60 | host: "$4" 61 | path: "$5" 62 | "$6": "$7" 63 | "$8": "$9" 64 | "$10": "$11" 65 | - pattern: "strimzi.oauth<>(count|totalTimeMs):" 66 | name: "strimzi_oauth_$1_$10" 67 | type: COUNTER 68 | labels: 69 | context: "$2" 70 | kind: "$3" 71 | host: "$4" 72 | path: "$5" 73 | "$6": "$7" 74 | "$8": "$9" 75 | - pattern: "strimzi.oauth<>(count|totalTimeMs):" 76 | name: "strimzi_oauth_$1_$8" 77 | type: COUNTER 78 | labels: 79 | context: "$2" 80 | kind: "$3" 81 | host: "$4" 82 | path: "$5" 83 | "$6": "$7" 84 | - pattern: "strimzi.oauth<>(.+):" 85 | name: "strimzi_oauth_$1_$12" 86 | type: GAUGE 87 | labels: 88 | context: "$2" 89 | kind: "$3" 90 | host: "$4" 91 | path: "$5" 92 | "$6": "$7" 93 | "$8": "$9" 94 | "$10": "$11" 95 | - pattern: "strimzi.oauth<>(.+):" 96 | name: "strimzi_oauth_$1_$10" 97 | type: GAUGE 98 | labels: 99 | context: "$2" 100 | kind: "$3" 101 | host: "$4" 102 | path: "$5" 103 | "$6": "$7" 104 | "$8": "$9" 105 | - pattern: "strimzi.oauth<>(.+):" 106 | name: "strimzi_oauth_$1_$8" 107 | type: GAUGE 108 | labels: 109 | context: "$2" 110 | kind: "$3" 111 | host: "$4" 112 | path: "$5" 113 | "$6": "$7" 114 | -------------------------------------------------------------------------------- /src/main/java/io/strimzi/kafka/bridge/config/KafkaConfig.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Strimzi authors. 3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). 4 | */ 5 | 6 | package io.strimzi.kafka.bridge.config; 7 | 8 | import java.util.HashMap; 9 | import java.util.Map; 10 | import java.util.stream.Collectors; 11 | 12 | /** 13 | * Apache Kafka related configuration 14 | */ 15 | public class KafkaConfig extends AbstractConfig { 16 | 17 | /** Prefix for Kafka related configuration parameters */ 18 | public static final String KAFKA_CONFIG_PREFIX = "kafka."; 19 | 20 | private final KafkaProducerConfig producerConfig; 21 | private final KafkaConsumerConfig consumerConfig; 22 | private final KafkaAdminConfig adminConfig; 23 | 24 | /** 25 | * Constructor 26 | * 27 | * @param config Kafka common configuration parameters map 28 | * @param consumerConfig Kafka consumer related configuration 29 | * @param producerConfig Kafka producer related configuration 30 | * @param adminConfig Kafka admin related configuration 31 | */ 32 | private KafkaConfig(Map config, KafkaConsumerConfig consumerConfig, KafkaProducerConfig producerConfig, KafkaAdminConfig adminConfig) { 33 | super(config); 34 | this.consumerConfig = consumerConfig; 35 | this.producerConfig = producerConfig; 36 | this.adminConfig = adminConfig; 37 | } 38 | 39 | /** 40 | * @return the Kafka producer configuration 41 | */ 42 | public KafkaProducerConfig getProducerConfig() { 43 | return this.producerConfig; 44 | } 45 | 46 | /** 47 | * @return the Kafka consumer configuration 48 | */ 49 | public KafkaConsumerConfig getConsumerConfig() { 50 | return this.consumerConfig; 51 | } 52 | 53 | /** 54 | * @return the Kafka admin configuration 55 | */ 56 | public KafkaAdminConfig getAdminConfig() { 57 | return this.adminConfig; 58 | } 59 | 60 | /** 61 | * Loads Kafka related configuration parameters from a related map 62 | * 63 | * @param map map from which loading configuration parameters 64 | * @return Kafka related configuration 65 | */ 66 | public static KafkaConfig fromMap(Map map) { 67 | KafkaProducerConfig producerConfig = KafkaProducerConfig.fromMap(map); 68 | KafkaConsumerConfig consumerConfig = KafkaConsumerConfig.fromMap(map); 69 | KafkaAdminConfig adminConfig = KafkaAdminConfig.fromMap(map); 70 | 71 | // filter the common Kafka related configuration parameters, stripping the prefix as well 72 | return new KafkaConfig(map.entrySet().stream() 73 | .filter(e -> e.getKey().startsWith(KafkaConfig.KAFKA_CONFIG_PREFIX) && 74 | !e.getKey().startsWith(KafkaConsumerConfig.KAFKA_CONSUMER_CONFIG_PREFIX) && 75 | !e.getKey().startsWith(KafkaProducerConfig.KAFKA_PRODUCER_CONFIG_PREFIX) && 76 | !e.getKey().startsWith(KafkaAdminConfig.KAFKA_ADMIN_CONFIG_PREFIX)) 77 | .collect(Collectors.toMap(e -> e.getKey().substring(KafkaConfig.KAFKA_CONFIG_PREFIX.length()), Map.Entry::getValue)), 78 | consumerConfig, producerConfig, adminConfig); 79 | } 80 | 81 | @Override 82 | public String toString() { 83 | Map configToString = this.hidePasswords(); 84 | return "KafkaConfig(" + 85 | "config=" + configToString + 86 | ",consumerConfig=" + this.consumerConfig + 87 | ",producerConfig=" + this.producerConfig + 88 | ",adminConfig=" + this.adminConfig + 89 | ")"; 90 | } 91 | 92 | /** 93 | * Hides Kafka related password(s) configuration (i.e. truststore and keystore) 94 | * by replacing each actual password with [hidden] string 95 | * 96 | * @return updated configuration with hidden password(s) 97 | */ 98 | private Map hidePasswords() { 99 | Map configToString = new HashMap<>(this.config); 100 | configToString.entrySet().stream() 101 | .filter(e -> e.getKey().contains("password")) 102 | .forEach(e -> e.setValue("[hidden]")); 103 | return configToString; 104 | } 105 | } 106 | --------------------------------------------------------------------------------