├── documentation
├── book
│ ├── common
│ ├── images
│ ├── modules
│ ├── assemblies
│ ├── api
│ │ ├── .openapi-generator
│ │ │ ├── FILES
│ │ │ ├── VERSION
│ │ │ └── openapi.json-generate-apidoc.sha256
│ │ ├── snippet
│ │ │ ├── topics
│ │ │ │ ├── GET
│ │ │ │ │ └── http-response.adoc
│ │ │ │ └── {topicname}
│ │ │ │ │ ├── partitions
│ │ │ │ │ ├── {partitionid}
│ │ │ │ │ │ ├── POST
│ │ │ │ │ │ │ ├── http-request.adoc
│ │ │ │ │ │ │ └── http-response.adoc
│ │ │ │ │ │ ├── offsets
│ │ │ │ │ │ │ └── GET
│ │ │ │ │ │ │ │ └── http-response.adoc
│ │ │ │ │ │ └── GET
│ │ │ │ │ │ │ └── http-response.adoc
│ │ │ │ │ └── GET
│ │ │ │ │ │ └── http-response.adoc
│ │ │ │ │ ├── POST
│ │ │ │ │ ├── http-request.adoc
│ │ │ │ │ └── http-response.adoc
│ │ │ │ │ └── GET
│ │ │ │ │ └── http-response.adoc
│ │ │ ├── GET
│ │ │ │ └── http-response.adoc
│ │ │ ├── consumers
│ │ │ │ └── {groupid}
│ │ │ │ │ ├── instances
│ │ │ │ │ └── {name}
│ │ │ │ │ │ ├── subscription
│ │ │ │ │ │ ├── POST
│ │ │ │ │ │ │ ├── http-request.adoc
│ │ │ │ │ │ │ └── http-response.adoc
│ │ │ │ │ │ ├── DELETE
│ │ │ │ │ │ │ └── http-response.adoc
│ │ │ │ │ │ └── GET
│ │ │ │ │ │ │ └── http-response.adoc
│ │ │ │ │ │ ├── DELETE
│ │ │ │ │ │ └── http-response.adoc
│ │ │ │ │ │ ├── offsets
│ │ │ │ │ │ └── POST
│ │ │ │ │ │ │ ├── http-response.adoc
│ │ │ │ │ │ │ └── http-request.adoc
│ │ │ │ │ │ ├── positions
│ │ │ │ │ │ ├── POST
│ │ │ │ │ │ │ ├── http-response.adoc
│ │ │ │ │ │ │ └── http-request.adoc
│ │ │ │ │ │ ├── end
│ │ │ │ │ │ │ └── POST
│ │ │ │ │ │ │ │ ├── http-response.adoc
│ │ │ │ │ │ │ │ └── http-request.adoc
│ │ │ │ │ │ └── beginning
│ │ │ │ │ │ │ └── POST
│ │ │ │ │ │ │ ├── http-response.adoc
│ │ │ │ │ │ │ └── http-request.adoc
│ │ │ │ │ │ ├── assignments
│ │ │ │ │ │ └── POST
│ │ │ │ │ │ │ ├── http-request.adoc
│ │ │ │ │ │ │ └── http-response.adoc
│ │ │ │ │ │ └── records
│ │ │ │ │ │ └── GET
│ │ │ │ │ │ └── http-response.adoc
│ │ │ │ │ └── POST
│ │ │ │ │ ├── http-request.adoc
│ │ │ │ │ └── http-response.adoc
│ │ │ └── admin
│ │ │ │ └── topics
│ │ │ │ └── POST
│ │ │ │ └── http-request.adoc
│ │ ├── .openapi-generator-ignore
│ │ └── template
│ │ │ └── index.mustache
│ └── bridge.adoc
├── assemblies
│ ├── modules
│ ├── assembly-http-bridge-overview.adoc
│ ├── assembly-http-bridge-config.adoc
│ └── assembly-http-bridge-quickstart.adoc
├── images
│ └── kafka-bridge.png
├── common
│ ├── revision-info.adoc
│ └── attributes.adoc
└── modules
│ ├── proc-downloading-http-bridge.adoc
│ ├── con-securing-http-bridge.adoc
│ ├── con-overview-open-api-spec-http-bridge.adoc
│ ├── proc-bridge-deleting-consumer.adoc
│ ├── proc-configuring-http-bridge-jmx-metrics.adoc
│ ├── con-securing-http-interface.adoc
│ ├── proc-installing-http-bridge.adoc
│ ├── proc-bridge-committing-consumer-offsets-to-log.adoc
│ ├── con-loggers-http-bridge.adoc
│ ├── con-overview-components-http-bridge.adoc
│ ├── proc-configuring-http-bridge-smr-metrics.adoc
│ ├── proc-bridge-subscribing-consumer-topics.adoc
│ ├── con-overview-running-http-bridge.adoc
│ ├── proc-creating-http-bridge-consumer.adoc
│ ├── proc-bridge-retrieving-latest-messages-from-consumer.adoc
│ └── proc-bridge-seeking-offsets-for-partition.adoc
├── release.version
├── src
├── test
│ ├── resources
│ │ ├── wrong.properties
│ │ ├── application.properties
│ │ └── log4j2.properties
│ └── java
│ │ └── io
│ │ └── strimzi
│ │ └── kafka
│ │ └── bridge
│ │ ├── Constants.java
│ │ ├── tracing
│ │ └── OpenTelemetryTest.java
│ │ ├── clients
│ │ └── ClientHandlerBase.java
│ │ ├── utils
│ │ ├── KafkaJsonSerializer.java
│ │ ├── KafkaJsonDeserializer.java
│ │ ├── Utils.java
│ │ └── Urls.java
│ │ ├── http
│ │ ├── services
│ │ │ ├── BaseService.java
│ │ │ ├── ProducerService.java
│ │ │ └── SeekService.java
│ │ ├── InvalidProducerIT.java
│ │ ├── tools
│ │ │ ├── ExtensionContextParameterResolver.java
│ │ │ └── TestSeparator.java
│ │ └── StaticIT.java
│ │ ├── metrics
│ │ ├── StrimziMetricsCollectorTest.java
│ │ └── JmxMetricsCollectorTest.java
│ │ └── config
│ │ └── ConfigRetrieverTest.java
└── main
│ ├── resources
│ ├── META-INF
│ │ └── services
│ │ │ └── io.opentelemetry.context.ContextStorageProvider
│ ├── log4j2.properties
│ └── jmx_metrics_config.yaml
│ ├── java
│ └── io
│ │ └── strimzi
│ │ └── kafka
│ │ └── bridge
│ │ ├── http
│ │ ├── model
│ │ │ ├── HttpBridgeResult.java
│ │ │ └── HttpBridgeError.java
│ │ ├── converter
│ │ │ ├── JsonDecodeException.java
│ │ │ └── JsonEncodeException.java
│ │ ├── HttpBridgeEndpoint.java
│ │ ├── HttpOpenApiOperations.java
│ │ ├── HttpOpenApiOperation.java
│ │ ├── HttpUtils.java
│ │ └── HttpBridgeContext.java
│ │ ├── IllegalEmbeddedFormatException.java
│ │ ├── Handler.java
│ │ ├── config
│ │ ├── AbstractConfig.java
│ │ ├── KafkaAdminConfig.java
│ │ ├── KafkaConsumerConfig.java
│ │ ├── KafkaProducerConfig.java
│ │ ├── ConfigRetriever.java
│ │ └── KafkaConfig.java
│ │ ├── tracing
│ │ ├── TracingConstants.java
│ │ ├── SpanHandle.java
│ │ ├── NoopTracingHandle.java
│ │ ├── TracingHandle.java
│ │ ├── BridgeContextStorageProvider.java
│ │ └── TracingUtil.java
│ │ ├── BridgeContentType.java
│ │ ├── EmbeddedFormat.java
│ │ ├── metrics
│ │ ├── MetricsType.java
│ │ ├── StrimziMetricsCollector.java
│ │ ├── MetricsCollector.java
│ │ └── JmxMetricsCollector.java
│ │ ├── SinkTopicSubscription.java
│ │ ├── LoggingPartitionsRebalance.java
│ │ ├── ConsumerInstanceId.java
│ │ └── converter
│ │ └── MessageConverter.java
│ └── assembly
│ └── assembly.xml
├── .azure
├── templates
│ ├── steps
│ │ ├── prerequisites
│ │ │ ├── install_syft.yaml
│ │ │ ├── install_cosign.yaml
│ │ │ ├── install_asciidoc.yaml
│ │ │ ├── install_java.yaml
│ │ │ └── install_docker.yaml
│ │ └── maven_cache.yaml
│ └── jobs
│ │ ├── publish_docs.yaml
│ │ ├── release_artifacts.yaml
│ │ ├── build_docs.yaml
│ │ ├── deploy_java.yaml
│ │ ├── build_container.yaml
│ │ └── build_java.yaml
├── scripts
│ ├── install_cosign.sh
│ ├── settings.xml
│ ├── install_syft.sh
│ ├── push-to-central.sh
│ ├── docu-push-to-website.sh
│ └── check_docs.sh
├── release-pipeline.yaml
├── build-pipeline.yaml
└── cve-pipeline.yaml
├── GOVERNANCE.md
├── MAINTAINERS.md
├── .checkstyle
├── java.header
└── suppressions.xml
├── CODE_OF_CONDUCT.md
├── Makefile.os
├── .spotbugs
└── spotbugs-exclude.xml
├── bin
├── docker
│ ├── to_bytes.gawk
│ ├── dynamic_resources.sh
│ ├── kafka_bridge_tls_prepare_certificates.sh
│ └── kafka_bridge_run.sh
└── kafka_bridge_run.sh
├── .gitignore
├── Makefile.maven
├── perftest
└── README.md
├── config
├── log4j2.properties
└── application.properties
├── Dockerfile
├── .github
└── workflows
│ └── codeql-analysis.yml
├── Makefile
└── BUILDING.md
/documentation/book/common:
--------------------------------------------------------------------------------
1 | ../common
--------------------------------------------------------------------------------
/documentation/book/images:
--------------------------------------------------------------------------------
1 | ../images
--------------------------------------------------------------------------------
/documentation/book/modules:
--------------------------------------------------------------------------------
1 | ../modules
--------------------------------------------------------------------------------
/release.version:
--------------------------------------------------------------------------------
1 | 0.34.0-SNAPSHOT
2 |
--------------------------------------------------------------------------------
/documentation/assemblies/modules:
--------------------------------------------------------------------------------
1 | ../modules
--------------------------------------------------------------------------------
/documentation/book/assemblies:
--------------------------------------------------------------------------------
1 | ../assemblies
--------------------------------------------------------------------------------
/documentation/book/api/.openapi-generator/FILES:
--------------------------------------------------------------------------------
1 | index.adoc
2 |
--------------------------------------------------------------------------------
/documentation/book/api/.openapi-generator/VERSION:
--------------------------------------------------------------------------------
1 | 7.8.0
2 |
--------------------------------------------------------------------------------
/src/test/resources/wrong.properties:
--------------------------------------------------------------------------------
1 | no.meaningful.parameter=its-value
--------------------------------------------------------------------------------
/documentation/images/kafka-bridge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/strimzi/strimzi-kafka-bridge/HEAD/documentation/images/kafka-bridge.png
--------------------------------------------------------------------------------
/.azure/templates/steps/prerequisites/install_syft.yaml:
--------------------------------------------------------------------------------
1 | steps:
2 | - bash: ".azure/scripts/install_syft.sh"
3 | displayName: "Install Syft"
--------------------------------------------------------------------------------
/documentation/book/api/.openapi-generator/openapi.json-generate-apidoc.sha256:
--------------------------------------------------------------------------------
1 | 3e4b795fa237d0ac582b4c854373b19fa40984bac7ebe644d3fa554e37f6b3ba
--------------------------------------------------------------------------------
/src/main/resources/META-INF/services/io.opentelemetry.context.ContextStorageProvider:
--------------------------------------------------------------------------------
1 | io.strimzi.kafka.bridge.tracing.BridgeContextStorageProvider
--------------------------------------------------------------------------------
/.azure/templates/steps/prerequisites/install_cosign.yaml:
--------------------------------------------------------------------------------
1 | steps:
2 | - bash: ".azure/scripts/install_cosign.sh"
3 | displayName: "Install cosign"
--------------------------------------------------------------------------------
/GOVERNANCE.md:
--------------------------------------------------------------------------------
1 | # Strimzi Governance
2 |
3 | Strimzi Governance is defined in the [governance repository](https://github.com/strimzi/governance/blob/main/GOVERNANCE.md).
--------------------------------------------------------------------------------
/MAINTAINERS.md:
--------------------------------------------------------------------------------
1 | # Strimzi Maintainers list
2 |
3 | Strimzi Maintainers list is defined in the [governance repository](https://github.com/strimzi/governance/blob/main/MAINTAINERS).
--------------------------------------------------------------------------------
/documentation/book/api/snippet/topics/GET/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 200
4 | [source,json]
5 | ----
6 | [ "topic1", "topic2" ]
7 | ----
--------------------------------------------------------------------------------
/documentation/common/revision-info.adoc:
--------------------------------------------------------------------------------
1 | ////
2 | Revision information template.
3 | Add this to the end of every document.
4 | ////
5 |
6 | _Revised on {localdate} {localtime}_
7 |
--------------------------------------------------------------------------------
/.checkstyle/java.header:
--------------------------------------------------------------------------------
1 | ^/\*
2 | ^ \* Copyright Strimzi authors.
3 | ^ \* License: Apache License 2\.0 \(see the file LICENSE or http://apache\.org/licenses/LICENSE-2\.0\.html\)\.
4 | ^ \*/
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Strimzi Community Code of Conduct
2 |
3 | Strimzi Community Code of Conduct is defined in the [governance repository](https://github.com/strimzi/governance/blob/main/CODE_OF_CONDUCT.md).
--------------------------------------------------------------------------------
/Makefile.os:
--------------------------------------------------------------------------------
1 | FIND = find
2 | SED = sed
3 | GREP = grep
4 | CP = cp
5 |
6 | UNAME_S := $(shell uname -s)
7 | ifeq ($(UNAME_S),Darwin)
8 | FIND = gfind
9 | SED = gsed
10 | GREP = ggrep
11 | CP = gcp
12 | endif
--------------------------------------------------------------------------------
/documentation/book/api/snippet/GET/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 200
4 | [source,json]
5 | [subs=attributes+]
6 | ----
7 | {
8 | "bridge_version" : {ProductVersion}
9 | }
10 | ----
--------------------------------------------------------------------------------
/documentation/book/api/snippet/consumers/{groupid}/instances/{name}/subscription/POST/http-request.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP request
2 |
3 | ===== Request body
4 | [source,json]
5 | ----
6 | {
7 | "topics" : [ "topic1", "topic2" ]
8 | }
9 | ----
--------------------------------------------------------------------------------
/.spotbugs/spotbugs-exclude.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/documentation/book/api/snippet/admin/topics/POST/http-request.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP request
2 |
3 | ===== Request body
4 | [source,json]
5 | ----
6 | {
7 | "topic_name" : "my-topic",
8 | "partitions_count" : 1,
9 | "replication_factor" : 2,
10 | }
11 | ----
--------------------------------------------------------------------------------
/documentation/book/api/snippet/consumers/{groupid}/instances/{name}/DELETE/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 404
4 | [source,json]
5 | ----
6 | {
7 | "error_code" : 404,
8 | "message" : "The specified consumer instance was not found."
9 | }
10 | ----
--------------------------------------------------------------------------------
/.azure/templates/steps/prerequisites/install_asciidoc.yaml:
--------------------------------------------------------------------------------
1 | # Steps needed for local Asciidoc installation
2 | steps:
3 | - task: UseRubyVersion@0
4 | inputs:
5 | versionSpec: '>= 2.4'
6 | addToPath: true
7 | - bash: gem install asciidoctor
8 | displayName: 'Install asciidoctor'
--------------------------------------------------------------------------------
/documentation/book/api/snippet/consumers/{groupid}/instances/{name}/offsets/POST/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 404
4 | [source,json]
5 | ----
6 | {
7 | "error_code" : 404,
8 | "message" : "The specified consumer instance was not found."
9 | }
10 | ----
--------------------------------------------------------------------------------
/documentation/book/api/snippet/consumers/{groupid}/instances/{name}/positions/POST/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 404
4 | [source,json]
5 | ----
6 | {
7 | "error_code" : 404,
8 | "message" : "The specified consumer instance was not found."
9 | }
10 | ----
--------------------------------------------------------------------------------
/documentation/book/api/snippet/consumers/{groupid}/instances/{name}/positions/end/POST/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 404
4 | [source,json]
5 | ----
6 | {
7 | "error_code" : 404,
8 | "message" : "The specified consumer instance was not found."
9 | }
10 | ----
--------------------------------------------------------------------------------
/documentation/book/api/snippet/consumers/{groupid}/instances/{name}/subscription/DELETE/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 404
4 | [source,json]
5 | ----
6 | {
7 | "error_code" : 404,
8 | "message" : "The specified consumer instance was not found."
9 | }
10 | ----
--------------------------------------------------------------------------------
/documentation/book/api/snippet/consumers/{groupid}/instances/{name}/positions/beginning/POST/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 404
4 | [source,json]
5 | ----
6 | {
7 | "error_code" : 404,
8 | "message" : "The specified consumer instance was not found."
9 | }
10 | ----
--------------------------------------------------------------------------------
/.azure/templates/steps/maven_cache.yaml:
--------------------------------------------------------------------------------
1 | steps:
2 | - task: Cache@2
3 | inputs:
4 | key: 'maven-cache | $(System.JobName) | **/pom.xml'
5 | restoreKeys: |
6 | maven-cache | $(System.JobName)
7 | maven-cache
8 | path: $(HOME)/.m2/repository
9 | displayName: Maven cache
10 |
--------------------------------------------------------------------------------
/bin/docker/to_bytes.gawk:
--------------------------------------------------------------------------------
1 | # Use gawk because gnu awk can't extract regexp groups; gawk has `match`
2 | BEGIN {
3 | suffixes[""]=1
4 | suffixes["K"]=1024
5 | suffixes["M"]=1024**2
6 | suffixes["G"]=1024**3
7 | }
8 |
9 | match($0, /([0-9.]*)([kKmMgG]?)/, a) {
10 | printf("%d", a[1] * suffixes[toupper(a[2])])
11 | }
--------------------------------------------------------------------------------
/documentation/book/api/snippet/topics/{topicname}/partitions/{partitionid}/POST/http-request.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP request
2 |
3 | ===== Request body
4 | [source,json]
5 | ----
6 | {
7 | "records" : [ {
8 | "key" : "key1",
9 | "value" : "value1"
10 | }, {
11 | "value" : "value2"
12 | } ]
13 | }
14 | ----
--------------------------------------------------------------------------------
/.azure/scripts/install_cosign.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | readonly VERSION="2.2.0"
4 |
5 | ARCH=$1
6 | if [ -z "$ARCH" ]; then
7 | ARCH="amd64"
8 | fi
9 |
10 | curl -L https://github.com/sigstore/cosign/releases/download/v${VERSION}/cosign-linux-${ARCH} > cosign && chmod +x cosign
11 | sudo mv cosign /usr/bin/
12 |
--------------------------------------------------------------------------------
/.azure/scripts/settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | central
5 | ${env.CENTRAL_USERNAME}
6 | ${env.CENTRAL_PASSWORD}
7 |
8 |
9 |
--------------------------------------------------------------------------------
/documentation/book/api/snippet/consumers/{groupid}/instances/{name}/assignments/POST/http-request.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP request
2 |
3 | ===== Request body
4 | [source,json]
5 | ----
6 | {
7 | "partitions" : [ {
8 | "topic" : "topic",
9 | "partition" : 0
10 | }, {
11 | "topic" : "topic",
12 | "partition" : 1
13 | } ]
14 | }
15 | ----
--------------------------------------------------------------------------------
/documentation/book/api/snippet/consumers/{groupid}/instances/{name}/positions/end/POST/http-request.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP request
2 |
3 | ===== Request body
4 | [source,json]
5 | ----
6 | {
7 | "partitions" : [ {
8 | "topic" : "topic",
9 | "partition" : 0
10 | }, {
11 | "topic" : "topic",
12 | "partition" : 1
13 | } ]
14 | }
15 | ----
--------------------------------------------------------------------------------
/documentation/book/api/snippet/consumers/{groupid}/instances/{name}/positions/beginning/POST/http-request.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP request
2 |
3 | ===== Request body
4 | [source,json]
5 | ----
6 | {
7 | "partitions" : [ {
8 | "topic" : "topic",
9 | "partition" : 0
10 | }, {
11 | "topic" : "topic",
12 | "partition" : 1
13 | } ]
14 | }
15 | ----
--------------------------------------------------------------------------------
/documentation/book/api/snippet/topics/{topicname}/POST/http-request.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP request
2 |
3 | ===== Request body
4 | [source,json]
5 | ----
6 | {
7 | "records" : [ {
8 | "key" : "key1",
9 | "value" : "value1"
10 | }, {
11 | "value" : "value2",
12 | "partition" : 1
13 | }, {
14 | "value" : "value3"
15 | } ]
16 | }
17 | ----
--------------------------------------------------------------------------------
/src/test/resources/application.properties:
--------------------------------------------------------------------------------
1 | #Bridge related settings
2 | bridge.id=my-bridge
3 |
4 | #Apache Kafka common
5 | kafka.bootstrap.servers=localhost:9092
6 |
7 | #Apache Kafka producer
8 | kafka.producer.acks=1
9 |
10 | #Apache Kafka consumer
11 | kafka.consumer.auto.offset.reset=earliest
12 |
13 | #HTTP related settings
14 | http.host=0.0.0.0
15 | http.port=8080
--------------------------------------------------------------------------------
/.azure/scripts/install_syft.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 |
4 | readonly VERSION="0.90.0"
5 |
6 | ARCH=$1
7 | if [ -z "$ARCH" ]; then
8 | ARCH="amd64"
9 | fi
10 |
11 | wget https://github.com/anchore/syft/releases/download/v${VERSION}/syft_${VERSION}_linux_${ARCH}.tar.gz -O syft.tar.gz
12 | tar xf syft.tar.gz -C /tmp
13 | chmod +x /tmp/syft
14 | sudo mv /tmp/syft /usr/bin
15 |
--------------------------------------------------------------------------------
/.azure/templates/steps/prerequisites/install_java.yaml:
--------------------------------------------------------------------------------
1 | # Step to configure JAVA on the agent
2 | parameters:
3 | - name: JDK_VERSION
4 | default: '17'
5 | steps:
6 | - task: JavaToolInstaller@0
7 | inputs:
8 | versionSpec: ${{ parameters.JDK_VERSION }}
9 | jdkArchitectureOption: 'x64'
10 | jdkSourceOption: 'PreInstalled'
11 | displayName: 'Configure Java'
12 |
--------------------------------------------------------------------------------
/documentation/book/api/snippet/consumers/{groupid}/instances/{name}/offsets/POST/http-request.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP request
2 |
3 | ===== Request body
4 | [source,json]
5 | ----
6 | {
7 | "offsets" : [ {
8 | "topic" : "topic",
9 | "partition" : 0,
10 | "offset" : 15
11 | }, {
12 | "topic" : "topic",
13 | "partition" : 1,
14 | "offset" : 42
15 | } ]
16 | }
17 | ----
--------------------------------------------------------------------------------
/documentation/book/api/snippet/consumers/{groupid}/instances/{name}/positions/POST/http-request.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP request
2 |
3 | ===== Request body
4 | [source,json]
5 | ----
6 | {
7 | "offsets" : [ {
8 | "topic" : "topic",
9 | "partition" : 0,
10 | "offset" : 15
11 | }, {
12 | "topic" : "topic",
13 | "partition" : 1,
14 | "offset" : 42
15 | } ]
16 | }
17 | ----
--------------------------------------------------------------------------------
/documentation/book/api/snippet/consumers/{groupid}/POST/http-request.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP request
2 |
3 | ===== Request body
4 | [source,json]
5 | ----
6 | {
7 | "name" : "consumer1",
8 | "format" : "binary",
9 | "auto.offset.reset" : "earliest",
10 | "enable.auto.commit" : false,
11 | "fetch.min.bytes" : 512,
12 | "consumer.request.timeout.ms" : 30000,
13 | "isolation.level" : "read_committed"
14 | }
15 | ----
--------------------------------------------------------------------------------
/documentation/book/api/snippet/topics/{topicname}/partitions/{partitionid}/offsets/GET/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 200
4 | [source,json]
5 | ----
6 | {
7 | "beginning_offset" : 10,
8 | "end_offset" : 50
9 | }
10 | ----
11 |
12 |
13 | ===== Response 404
14 | [source,json]
15 | ----
16 | {
17 | "error_code" : 404,
18 | "message" : "The specified topic partition was not found."
19 | }
20 | ----
--------------------------------------------------------------------------------
/src/test/java/io/strimzi/kafka/bridge/Constants.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.bridge;
6 |
7 | public interface Constants {
8 |
9 | /**
10 | * Tag for http bridge tests, which are triggered for each push/pr/merge on travis-ci
11 | */
12 | String HTTP_BRIDGE = "httpbridge";
13 | }
14 |
--------------------------------------------------------------------------------
/documentation/modules/proc-downloading-http-bridge.adoc:
--------------------------------------------------------------------------------
1 | // Module included in the following assemblies:
2 | //
3 | // assembly-http-bridge-quickstart.adoc
4 |
5 | [id='proc-downloading-http-bridge-{context}']
6 |
7 | = Downloading a HTTP Bridge archive
8 |
9 | [role="_abstract"]
10 | A zipped distribution of the HTTP Bridge is available for download.
11 |
12 | .Procedure
13 |
14 | - Download the latest version of the HTTP Bridge archive from the {ReleaseDownload}.
15 |
--------------------------------------------------------------------------------
/.azure/templates/steps/prerequisites/install_docker.yaml:
--------------------------------------------------------------------------------
1 | # Steps needed for local Docker installation
2 | steps:
3 | - task: DockerInstaller@0
4 | displayName: Install Docker
5 | inputs:
6 | # Versions can be found from https://download.docker.com/linux/static/stable/x86_64/
7 | dockerVersion: 24.0.5
8 | releaseType: stable
9 | - bash: |
10 | docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
11 | displayName: 'Register QEMU binary'
--------------------------------------------------------------------------------
/documentation/book/api/snippet/consumers/{groupid}/instances/{name}/assignments/POST/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 404
4 | [source,json]
5 | ----
6 | {
7 | "error_code" : 404,
8 | "message" : "The specified consumer instance was not found."
9 | }
10 | ----
11 |
12 |
13 | ===== Response 409
14 | [source,json]
15 | ----
16 | {
17 | "error_code" : 409,
18 | "message" : "Subscriptions to topics, partitions, and patterns are mutually exclusive."
19 | }
20 | ----
--------------------------------------------------------------------------------
/documentation/book/api/snippet/consumers/{groupid}/instances/{name}/subscription/GET/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 200
4 | [source,json]
5 | ----
6 | {
7 | "topics" : [ "my-topic1", "my-topic2" ],
8 | "partitions" : [ {
9 | "my-topic1" : [ 1, 2, 3 ]
10 | }, {
11 | "my-topic2" : [ 1 ]
12 | } ]
13 | }
14 | ----
15 |
16 |
17 | ===== Response 404
18 | [source,json]
19 | ----
20 | {
21 | "error_code" : 404,
22 | "message" : "The specified consumer instance was not found."
23 | }
24 | ----
--------------------------------------------------------------------------------
/documentation/book/bridge.adoc:
--------------------------------------------------------------------------------
1 | include::common/attributes.adoc[]
2 |
3 | :context: bridge
4 |
5 | [id='using_book-{context}']
6 | = Using the Strimzi HTTP Bridge
7 |
8 | include::assemblies/assembly-http-bridge-overview.adoc[leveloffset=+1]
9 |
10 | include::assemblies/assembly-http-bridge-quickstart.adoc[leveloffset=+1]
11 |
12 | include::assemblies/assembly-http-bridge-config.adoc[leveloffset=+1]
13 |
14 | [id='api_reference-{context}']
15 | include::api/index.adoc[leveloffset=+1]
16 |
17 | include::common/revision-info.adoc[leveloffset=+1]
18 |
--------------------------------------------------------------------------------
/.azure/scripts/push-to-central.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | echo "Build reason: ${BUILD_REASON}"
6 | echo "Source branch: ${BRANCH}"
7 |
8 | function cleanup() {
9 | rm -rf signing.gpg
10 | gpg --delete-keys
11 | gpg --delete-secret-keys
12 | }
13 |
14 | # Run the cleanup on failure / exit
15 | trap cleanup EXIT
16 |
17 | export GPG_TTY=$(tty)
18 | echo $GPG_SIGNING_KEY | base64 -d > signing.gpg
19 | gpg --batch --import signing.gpg
20 |
21 | GPG_EXECUTABLE=gpg mvn $MVN_ARGS -DskipTests -s ./.azure/scripts/settings.xml -P central deploy
22 |
23 | cleanup
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/http/model/HttpBridgeResult.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.http.model;
7 |
8 | /**
9 | * This class represents a result of an HTTP bridging operation
10 | *
11 | * @param the class bringing the actual result as {@link HttpBridgeError} or {@link org.apache.kafka.clients.producer.RecordMetadata}
12 | * @param result actual result
13 | */
14 | public record HttpBridgeResult(T result) { }
15 |
--------------------------------------------------------------------------------
/documentation/book/api/snippet/topics/{topicname}/partitions/{partitionid}/GET/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 200
4 | [source,json]
5 | ----
6 | {
7 | "partition" : 1,
8 | "leader" : 1,
9 | "replicas" : [ {
10 | "broker" : 1,
11 | "leader" : true,
12 | "in_sync" : true
13 | }, {
14 | "broker" : 2,
15 | "leader" : false,
16 | "in_sync" : true
17 | } ]
18 | }
19 | ----
20 |
21 |
22 | ===== Response 404
23 | [source,json]
24 | ----
25 | {
26 | "error_code" : 404,
27 | "message" : "The specified topic partition was not found."
28 | }
29 | ----
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.class
2 |
3 | # Mobile Tools for Java (J2ME)
4 | .mtj.tmp/
5 |
6 | # Package Files #
7 | *.jar
8 | *.war
9 | *.ear
10 |
11 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
12 | hs_err_pid*
13 |
14 | # Maven stuff
15 | **/target/*
16 |
17 | # Eclipse stuff
18 | **/.project
19 | **/.settings/*
20 | **/.prefs
21 | **/.classpath
22 | /target/
23 |
24 | # IntelliJ IDEA specific
25 | .idea/
26 | *.iml
27 |
28 | # VS Code
29 | .factorypath
30 | .vscode
31 |
32 | .DS_Store
33 |
34 | # Auto generated doc
35 | documentation/html/**
36 | documentation/htmlnoheader/**
37 | documentation/book/build/**
38 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/IllegalEmbeddedFormatException.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge;
7 |
8 | /**
9 | * IllegalEmbeddedFormatException
10 | */
11 | public class IllegalEmbeddedFormatException extends RuntimeException {
12 |
13 | private static final long serialVersionUID = 1L;
14 |
15 | /**
16 | * Constructor
17 | *
18 | * @param message message to set in the exception
19 | */
20 | public IllegalEmbeddedFormatException(String message) {
21 | super(message);
22 | }
23 | }
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/Handler.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge;
7 |
8 | /**
9 | * Represents a functional interface for handling callback when an asynchronous operation ends
10 | *
11 | * @param type of the data to be handled
12 | */
13 | @FunctionalInterface
14 | public interface Handler {
15 |
16 | /**
17 | * Called to handle the result of the asynchronous operation with the provided data
18 | *
19 | * @param data data result to handle
20 | */
21 | void handle(T data);
22 | }
23 |
--------------------------------------------------------------------------------
/documentation/book/api/snippet/consumers/{groupid}/POST/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 200
4 | [source,json]
5 | ----
6 | {
7 | "instance_id" : "consumer1",
8 | "base_uri" : "http://localhost:8080/consumers/my-group/instances/consumer1"
9 | }
10 | ----
11 |
12 |
13 | ===== Response 409
14 | [source,json]
15 | ----
16 | {
17 | "error_code" : 409,
18 | "message" : "A consumer instance with the specified name already exists in the HTTP Bridge."
19 | }
20 | ----
21 |
22 |
23 | ===== Response 422
24 | [source,json]
25 | ----
26 | {
27 | "error_code" : 422,
28 | "message" : "One or more consumer configuration options have invalid values."
29 | }
30 | ----
--------------------------------------------------------------------------------
/documentation/book/api/snippet/consumers/{groupid}/instances/{name}/subscription/POST/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 404
4 | [source,json]
5 | ----
6 | {
7 | "error_code" : 404,
8 | "message" : "The specified consumer instance was not found."
9 | }
10 | ----
11 |
12 |
13 | ===== Response 409
14 | [source,json]
15 | ----
16 | {
17 | "error_code" : 409,
18 | "message" : "Subscriptions to topics, partitions, and patterns are mutually exclusive."
19 | }
20 | ----
21 |
22 |
23 | ===== Response 422
24 | [source,json]
25 | ----
26 | {
27 | "error_code" : 422,
28 | "message" : "A list (of Topics type) or a topic_pattern must be specified."
29 | }
30 | ----
--------------------------------------------------------------------------------
/.checkstyle/suppressions.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
6 |
7 |
8 |
9 |
10 |
12 |
13 |
14 |
16 |
--------------------------------------------------------------------------------
/documentation/book/api/snippet/topics/{topicname}/POST/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 200
4 | [source,json]
5 | ----
6 | {
7 | "offsets" : [ {
8 | "partition" : 2,
9 | "offset" : 0
10 | }, {
11 | "partition" : 1,
12 | "offset" : 1
13 | }, {
14 | "partition" : 2,
15 | "offset" : 2
16 | } ]
17 | }
18 | ----
19 |
20 |
21 | ===== Response 404
22 | [source,json]
23 | ----
24 | {
25 | "error_code" : 404,
26 | "message" : "The specified topic was not found."
27 | }
28 | ----
29 |
30 |
31 | ===== Response 422
32 | [source,json]
33 | ----
34 | {
35 | "error_code" : 422,
36 | "message" : "The record list contains invalid records."
37 | }
38 | ----
--------------------------------------------------------------------------------
/documentation/book/api/snippet/topics/{topicname}/partitions/{partitionid}/POST/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 200
4 | [source,json]
5 | ----
6 | {
7 | "offsets" : [ {
8 | "partition" : 2,
9 | "offset" : 0
10 | }, {
11 | "partition" : 1,
12 | "offset" : 1
13 | }, {
14 | "partition" : 2,
15 | "offset" : 2
16 | } ]
17 | }
18 | ----
19 |
20 |
21 | ===== Response 404
22 | [source,json]
23 | ----
24 | {
25 | "error_code" : 404,
26 | "message" : "The specified topic partition was not found."
27 | }
28 | ----
29 |
30 |
31 | ===== Response 422
32 | [source,json]
33 | ----
34 | {
35 | "error_code" : 422,
36 | "message" : "The record is not valid."
37 | }
38 | ----
--------------------------------------------------------------------------------
/bin/kafka_bridge_run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -x
3 |
4 | # Find my path to use when calling scripts
5 | MYPATH="$(dirname "$0")"
6 |
7 | # Configure logging
8 | if [ -z "$KAFKA_BRIDGE_LOG4J_OPTS" ]
9 | then
10 | KAFKA_BRIDGE_LOG4J_OPTS="-Dlog4j2.configurationFile=file:${MYPATH}/../config/log4j2.properties"
11 | fi
12 |
13 | # Make sure that we use /dev/urandom
14 | JAVA_OPTS="${JAVA_OPTS} -Dvertx.cacheDirBase=/tmp/vertx-cache -Djava.security.egd=file:/dev/./urandom"
15 |
16 | # enabling OpenTelemetry with OTLP by default
17 | if [ -n "$OTEL_SERVICE_NAME" ] && [ -z "$OTEL_TRACES_EXPORTER" ]; then
18 | export OTEL_TRACES_EXPORTER="otlp"
19 | fi
20 |
21 | exec java $JAVA_OPTS $KAFKA_BRIDGE_LOG4J_OPTS -classpath "${MYPATH}/../libs/*" io.strimzi.kafka.bridge.Application "$@"
--------------------------------------------------------------------------------
/Makefile.maven:
--------------------------------------------------------------------------------
1 | # Makefile.maven contains the shared tasks for building Java applications. This file is
2 | # included into the Makefile files which contain some Java sources which should be build
3 |
4 | .PHONY: java_compile
5 | java_compile:
6 | echo "Building JAR file ..."
7 | mvn $(MVN_ARGS) compile
8 |
9 | .PHONY: java_verify
10 | java_verify:
11 | echo "Building JAR file ..."
12 | mvn $(MVN_ARGS) verify
13 |
14 | .PHONY: java_package
15 | java_package:
16 | echo "Packaging project ..."
17 | mvn $(MVN_ARGS) package
18 |
19 | .PHONY: java_install
20 | java_install:
21 | echo "Installing JAR files ..."
22 | mvn $(MVN_ARGS) install
23 |
24 | .PHONY: java_clean
25 | java_clean:
26 | echo "Cleaning Maven build ..."
27 | mvn clean
28 |
29 | .PHONY: spotbugs
30 | spotbugs:
31 | mvn $(MVN_ARGS) spotbugs:check
32 |
--------------------------------------------------------------------------------
/documentation/book/api/snippet/topics/{topicname}/partitions/GET/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 200
4 | [source,json]
5 | ----
6 | [ {
7 | "partition" : 1,
8 | "leader" : 1,
9 | "replicas" : [ {
10 | "broker" : 1,
11 | "leader" : true,
12 | "in_sync" : true
13 | }, {
14 | "broker" : 2,
15 | "leader" : false,
16 | "in_sync" : true
17 | } ]
18 | }, {
19 | "partition" : 2,
20 | "leader" : 2,
21 | "replicas" : [ {
22 | "broker" : 1,
23 | "leader" : false,
24 | "in_sync" : true
25 | }, {
26 | "broker" : 2,
27 | "leader" : true,
28 | "in_sync" : true
29 | } ]
30 | } ]
31 | ----
32 |
33 |
34 | ===== Response 404
35 | [source,json]
36 | ----
37 | {
38 | "error_code" : 404,
39 | "message" : "The specified topic was not found."
40 | }
41 | ----
--------------------------------------------------------------------------------
/documentation/book/api/snippet/topics/{topicname}/GET/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 200
4 | [source,json]
5 | ----
6 | {
7 | "name" : "topic",
8 | "offset" : 2,
9 | "configs" : {
10 | "cleanup.policy" : "compact"
11 | },
12 | "partitions" : [ {
13 | "partition" : 1,
14 | "leader" : 1,
15 | "replicas" : [ {
16 | "broker" : 1,
17 | "leader" : true,
18 | "in_sync" : true
19 | }, {
20 | "broker" : 2,
21 | "leader" : false,
22 | "in_sync" : true
23 | } ]
24 | }, {
25 | "partition" : 2,
26 | "leader" : 2,
27 | "replicas" : [ {
28 | "broker" : 1,
29 | "leader" : false,
30 | "in_sync" : true
31 | }, {
32 | "broker" : 2,
33 | "leader" : true,
34 | "in_sync" : true
35 | } ]
36 | } ]
37 | }
38 | ----
--------------------------------------------------------------------------------
/perftest/README.md:
--------------------------------------------------------------------------------
1 | # Performance tests
2 |
3 | This folder contains a [JMeter](https://jmeter.apache.org/) JMX configuration file describing a test plan with the following operations:
4 |
5 | * consumers creation, topics subscription, polling for getting records in a loop, and final consumers deletion
6 | * producers sending records to topics in a loop
7 |
8 | The test plan is configurable. You can change the number of consumers/producers (JMeter threads) and the number of loop cycles for sending/receiving records.
9 |
10 | It needs a set of plugins in order to show some graphs.
11 | For this reason, you need to download the JMeter Plugins Manager from the [jmeter-plugins.org](https://jmeter-plugins.org/get/) website and put it into the `lib/ext` folder.
12 | When opening the JMX configuration file for the first time, JMeter will ask to install the plugins to run the test plan.
--------------------------------------------------------------------------------
/documentation/modules/con-securing-http-bridge.adoc:
--------------------------------------------------------------------------------
1 | // This assembly is included in the following assemblies:
2 | //
3 | // assembly-http-bridge-overview.adoc
4 |
5 | [id='con-securing-http-bridge-{context}']
6 | = Securing connectivity to the Kafka cluster
7 |
8 | [role="_abstract"]
9 | You can configure the following between the HTTP Bridge and your Kafka cluster:
10 |
11 | * TLS or SASL-based authentication
12 | * A TLS-encrypted connection
13 |
14 | You configure the HTTP Bridge for authentication through its xref:proc-configuring-http-bridge-{context}[properties file].
15 |
16 | You can also use ACLs in Kafka brokers to restrict the topics that can be consumed and produced using the HTTP Bridge.
17 |
18 | NOTE: Use the `KafkaBridge` resource to configure authentication when you are xref:overview-components-running-http-bridge-cluster-{context}[running the HTTP Bridge on Kubernetes].
--------------------------------------------------------------------------------
/documentation/modules/con-overview-open-api-spec-http-bridge.adoc:
--------------------------------------------------------------------------------
1 | // This assembly is included in the following assemblies:
2 | //
3 | // assembly-http-bridge-overview.adoc
4 |
5 | [id='overview-open-api-spec-http-bridge-{context}']
6 | = HTTP Bridge OpenAPI specification
7 |
8 | [role="_abstract"]
9 | HTTP Bridge APIs use the OpenAPI Specification (OAS).
10 | OAS provides a standard framework for describing and implementing HTTP APIs.
11 |
12 | The HTTP Bridge OpenAPI specification is in JSON format.
13 | You can find the OpenAPI JSON files in the `src/main/resources/` folder of the HTTP Bridge source download files.
14 | The download files are available from the {ReleaseDownload}.
15 |
16 | You can also use the xref:openapi[`GET /openapi` method] to retrieve the OpenAPI v3 specification in JSON format.
17 |
18 | [role="_additional-resources"]
19 | .Additional resources
20 | * {openapis}
21 |
--------------------------------------------------------------------------------
/documentation/modules/proc-bridge-deleting-consumer.adoc:
--------------------------------------------------------------------------------
1 | // Module included in the following assemblies:
2 | //
3 | // assembly-http-bridge-quickstart.adoc
4 |
5 | [id='proc-bridge-deleting-consumer-{context}']
6 | = Deleting a HTTP Bridge consumer
7 |
8 | [role="_abstract"]
9 | Delete the HTTP Bridge consumer that you used throughout this quickstart.
10 |
11 | .Procedure
12 |
13 | * Delete the HTTP Bridge consumer by sending a `DELETE` request to the xref:deleteconsumer[instances] endpoint.
14 | +
15 | [source,curl,subs=attributes+]
16 | ----
17 | curl -X DELETE http://localhost:8080/consumers/bridge-quickstart-consumer-group/instances/bridge-quickstart-consumer
18 | ----
19 | +
20 | If the request is successful, the HTTP Bridge returns a `204` code.
21 |
22 | [role="_additional-resources"]
23 | .Additional resources
24 |
25 | * xref:deleteconsumer[DELETE /consumers/{groupid}/instances/{name}]
26 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/config/AbstractConfig.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.config;
7 |
8 | import java.util.Map;
9 |
10 | /**
11 | * Base abstract class for configurations related to protocols heads and Kafka
12 | */
13 | public abstract class AbstractConfig {
14 |
15 | protected final Map config;
16 |
17 | /**
18 | * Constructor
19 | *
20 | * @param config configuration parameters map
21 | */
22 | public AbstractConfig(Map config) {
23 | this.config = config;
24 | }
25 |
26 | /**
27 | * @return configuration parameters map
28 | */
29 | public Map getConfig() {
30 | return this.config;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/test/java/io/strimzi/kafka/bridge/tracing/OpenTelemetryTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.tracing;
7 |
8 | import io.vertx.core.tracing.TracingOptions;
9 | import io.vertx.tracing.opentelemetry.OpenTelemetryOptions;
10 |
11 | import static io.strimzi.kafka.bridge.tracing.TracingConstants.OPENTELEMETRY_SERVICE_NAME_PROPERTY_KEY;
12 |
13 | /**
14 | * OpenTelemetry tests
15 | */
16 | public class OpenTelemetryTest extends TracingTestBase {
17 | @Override
18 | protected TracingOptions tracingOptions() {
19 | System.setProperty(OPENTELEMETRY_SERVICE_NAME_PROPERTY_KEY, "strimzi-kafka-bridge-test");
20 | System.setProperty("otel.metrics.exporter", "none"); // disable metrics
21 | return new OpenTelemetryOptions();
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/documentation/common/attributes.adoc:
--------------------------------------------------------------------------------
1 | // AsciiDoc settings
2 | :data-uri!:
3 | :doctype: book
4 | :experimental:
5 | :idprefix:
6 | :imagesdir: images
7 | :numbered:
8 | :sectanchors!:
9 | :sectnums:
10 | :source-highlighter: highlightjs
11 | :toc: left
12 | :linkattrs:
13 | :toclevels: 3
14 | :sectlinks:
15 |
16 | //links to Strimzi docs
17 | :BookURLConfiguring: https://strimzi.io/documentation/[Strimzi documentation^]
18 |
19 | //API versions
20 | :KafkaTopicApiVersion: kafka.strimzi.io/v1beta2
21 |
22 | // Source and download links
23 | :ReleaseDownload: https://github.com/strimzi/strimzi-kafka-bridge/releases[GitHub release page^]
24 |
25 | //External links
26 | :external-cors-link: https://fetch.spec.whatwg.org/[Fetch CORS specification^]
27 | :openapis: https://www.openapis.org/[OpenAPI initiative^]
28 | :otel-exporters: https://github.com/open-telemetry/opentelemetry-java/tree/main/sdk-extensions/autoconfigure#exporters[OpenTelemetry exporter values^]
29 |
--------------------------------------------------------------------------------
/bin/docker/dynamic_resources.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 |
4 | MYPATH="$(dirname "$0")"
5 |
6 | function get_heap_size {
7 | # Get the max heap used by a jvm which used all the ram available to the container
8 | CONTAINER_MEMORY_IN_BYTES=$(java -XshowSettings:vm -version \
9 | |& awk '/Max\. Heap Size \(Estimated\): [0-9KMG]+/{ print $5}' \
10 | | gawk -f "${MYPATH}"/to_bytes.gawk)
11 |
12 | # use max of 31G memory, java performs much better with Compressed Ordinary Object Pointers
13 | DEFAULT_MEMORY_CEILING=$((31 * 2**30))
14 | if [ "${CONTAINER_MEMORY_IN_BYTES}" -lt "${DEFAULT_MEMORY_CEILING}" ]; then
15 | if [ -z $CONTAINER_HEAP_PERCENT ]; then
16 | CONTAINER_HEAP_PERCENT=0.50
17 | fi
18 |
19 | CONTAINER_MEMORY_IN_MB=$((${CONTAINER_MEMORY_IN_BYTES}/1024**2))
20 | CONTAINER_HEAP_MAX=$(echo "${CONTAINER_MEMORY_IN_MB} ${CONTAINER_HEAP_PERCENT}" | awk '{ printf "%d", $1 * $2 }')
21 |
22 | echo "${CONTAINER_HEAP_MAX}"
23 | fi
24 | }
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/tracing/TracingConstants.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.tracing;
7 |
8 | /**
9 | * Tracing constants.
10 | */
11 | public final class TracingConstants {
12 |
13 | /** tracing component name definition */
14 | public static final String COMPONENT = "strimzi-kafka-bridge";
15 | /** Kafka service name definition */
16 | public static final String KAFKA_SERVICE = "kafka";
17 |
18 | /** OpenTelemetry tracing type */
19 | public static final String OPENTELEMETRY = "opentelemetry";
20 |
21 | /** OpenTelemetry service name env var */
22 | public static final String OPENTELEMETRY_SERVICE_NAME_ENV_KEY = "OTEL_SERVICE_NAME";
23 | /** OpenTelemetry service name system property */
24 | public static final String OPENTELEMETRY_SERVICE_NAME_PROPERTY_KEY = "otel.service.name";
25 | }
26 |
--------------------------------------------------------------------------------
/src/main/resources/log4j2.properties:
--------------------------------------------------------------------------------
1 | #
2 | # The logging properties used
3 | #
4 | name = BridgeConfig
5 |
6 | appender.console.type = Console
7 | appender.console.name = STDOUT
8 | appender.console.layout.type = PatternLayout
9 | appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %highlight{%-5p} [%t] %c{1}:%L - %m%n
10 |
11 | rootLogger.level = INFO
12 | rootLogger.appenderRefs = console
13 | rootLogger.appenderRef.console.ref = STDOUT
14 | rootLogger.additivity = false
15 |
16 | logger.bridge.name = io.strimzi.kafka.bridge
17 | logger.bridge.level = INFO
18 | logger.bridge.appenderRefs = console
19 | logger.bridge.appenderRef.console.ref = STDOUT
20 | logger.bridge.additivity = false
21 |
22 | # HTTP OpenAPI specific logging levels (default is INFO)
23 | # Logging healthy and ready endpoints is very verbose because of Kubernetes health checking.
24 | logger.healthy.name = http.openapi.operation.healthy
25 | logger.healthy.level = WARN
26 | logger.ready.name = http.openapi.operation.ready
27 | logger.ready.level = WARN
--------------------------------------------------------------------------------
/src/test/resources/log4j2.properties:
--------------------------------------------------------------------------------
1 | #
2 | # The logging properties used
3 | #
4 | name = BridgeConfig
5 |
6 | appender.console.type = Console
7 | appender.console.name = STDOUT
8 | appender.console.layout.type = PatternLayout
9 | appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %highlight{%-5p} [%t] %c{1}:%L - %m%n
10 |
11 | rootLogger.level = INFO
12 | rootLogger.appenderRefs = console
13 | rootLogger.appenderRef.console.ref = STDOUT
14 | rootLogger.additivity = false
15 |
16 | logger.bridge.name = io.strimzi.kafka.bridge
17 | logger.bridge.level = INFO
18 | logger.bridge.appenderRefs = console
19 | logger.bridge.appenderRef.console.ref = STDOUT
20 | logger.bridge.additivity = false
21 |
22 | # HTTP OpenAPI specific logging levels (default is INFO)
23 | # Logging healthy and ready endpoints is very verbose because of Kubernetes health checking.
24 | logger.healthy.name = http.openapi.operation.healthy
25 | logger.healthy.level = WARN
26 | logger.ready.name = http.openapi.operation.ready
27 | logger.ready.level = WARN
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/http/converter/JsonDecodeException.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.http.converter;
7 |
8 | /**
9 | * Represents and exception during JSON decoding operations
10 | */
11 | public class JsonDecodeException extends RuntimeException {
12 |
13 | /**
14 | * Default constrctor
15 | */
16 | public JsonDecodeException() {
17 | }
18 |
19 | /**
20 | * Constructor
21 | *
22 | * @param message Exception message
23 | */
24 | public JsonDecodeException(String message) {
25 | super(message);
26 | }
27 |
28 | /**
29 | * Constructor
30 | *
31 | * @param message Exception message
32 | * @param cause Inner cause of the exception
33 | */
34 | public JsonDecodeException(String message, Throwable cause) {
35 | super(message, cause);
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/http/converter/JsonEncodeException.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.http.converter;
7 |
8 | /**
9 | * Represents and exception during JSON encoding operations
10 | */
11 | public class JsonEncodeException extends RuntimeException {
12 |
13 | /**
14 | * Default constrctor
15 | */
16 | public JsonEncodeException() {
17 | }
18 |
19 | /**
20 | * Constructor
21 | *
22 | * @param message Exception message
23 | */
24 | public JsonEncodeException(String message) {
25 | super(message);
26 | }
27 |
28 | /**
29 | * Constructor
30 | *
31 | * @param message Exception message
32 | * @param cause Inner cause of the exception
33 | */
34 | public JsonEncodeException(String message, Throwable cause) {
35 | super(message, cause);
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/BridgeContentType.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge;
7 |
8 | /**
9 | * Bridge supported content type
10 | */
11 | public class BridgeContentType {
12 |
13 | /** JSON encoding with JSON embedded format */
14 | public static final String KAFKA_JSON_JSON = "application/vnd.kafka.json.v2+json";
15 |
16 | /** JSON encoding with BINARY embedded format */
17 | public static final String KAFKA_JSON_BINARY = "application/vnd.kafka.binary.v2+json";
18 |
19 | /** JSON encoding with TEXT embedded format */
20 | public static final String KAFKA_JSON_TEXT = "application/vnd.kafka.text.v2+json";
21 |
22 | /** Specific Kafka JSON encoding */
23 | public static final String KAFKA_JSON = "application/vnd.kafka.v2+json";
24 |
25 | /** JSON encoding */
26 | public static final String JSON = "application/json";
27 | }
28 |
--------------------------------------------------------------------------------
/src/test/java/io/strimzi/kafka/bridge/clients/ClientHandlerBase.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.bridge.clients;
6 |
7 | import io.vertx.core.AbstractVerticle;
8 |
9 | import java.util.concurrent.CompletableFuture;
10 | import java.util.function.IntPredicate;
11 |
12 | public abstract class ClientHandlerBase extends AbstractVerticle {
13 | final CompletableFuture resultPromise;
14 | final IntPredicate msgCntPredicate;
15 |
16 | public ClientHandlerBase(CompletableFuture resultPromise, IntPredicate msgCntPredicate) {
17 | this.resultPromise = resultPromise;
18 | this.msgCntPredicate = msgCntPredicate;
19 | }
20 |
21 | @Override
22 | public void start() {
23 | handleClient();
24 | }
25 |
26 | protected abstract void handleClient();
27 |
28 | public CompletableFuture getResultPromise() {
29 | return resultPromise;
30 | }
31 | }
--------------------------------------------------------------------------------
/.azure/templates/jobs/publish_docs.yaml:
--------------------------------------------------------------------------------
1 | jobs:
2 | - job: 'public_docs'
3 | displayName: 'Publish Docs'
4 | # Set timeout for jobs
5 | timeoutInMinutes: 60
6 | # Base system
7 | pool:
8 | vmImage: 'Ubuntu-22.04'
9 | # Pipeline steps
10 | steps:
11 | - task: DownloadPipelineArtifact@2
12 | inputs:
13 | source: '${{ parameters.artifactSource }}'
14 | artifact: Documentation
15 | path: $(System.DefaultWorkingDirectory)
16 | project: '${{ parameters.artifactProject }}'
17 | pipeline: '${{ parameters.artifactPipeline }}'
18 | runVersion: '${{ parameters.artifactRunVersion }}'
19 | runId: '${{ parameters.artifactRunId }}'
20 | - bash: tar -xvf documentation.tar
21 | displayName: "Untar the documentation directory"
22 | - bash: "make docu_pushtowebsite"
23 | env:
24 | BUILD_REASON: $(Build.Reason)
25 | BRANCH: $(Build.SourceBranch)
26 | GITHUB_DEPLOY_KEY: $(GITHUB_DEPLOY_KEY)
27 | displayName: "Publish the docs to the website"
28 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/EmbeddedFormat.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge;
7 |
8 | /**
9 | * Define the data format inside the HTTP messages
10 | */
11 | public enum EmbeddedFormat {
12 |
13 | /** Define "binary" data as embedded format */
14 | BINARY,
15 |
16 | /** Define "json" data as embedded format */
17 | JSON,
18 |
19 | /** Define "text" data as embedded format */
20 | TEXT;
21 |
22 | /**
23 | * Convert the String value in the corresponding enum
24 | *
25 | * @param value value to be converted
26 | * @return corresponding enum
27 | */
28 | public static EmbeddedFormat from(String value) {
29 | return switch (value) {
30 | case "json" -> JSON;
31 | case "binary" -> BINARY;
32 | case "text" -> TEXT;
33 | default -> throw new IllegalEmbeddedFormatException("Invalid format type.");
34 | };
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/src/test/java/io/strimzi/kafka/bridge/utils/KafkaJsonSerializer.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.utils;
7 |
8 | import com.fasterxml.jackson.databind.ObjectMapper;
9 | import org.apache.kafka.common.serialization.Serializer;
10 |
11 | import java.util.Map;
12 |
13 | public class KafkaJsonSerializer implements Serializer {
14 |
15 | @Override
16 | public void configure(Map configs, boolean isKey) {
17 |
18 | }
19 |
20 | @Override
21 | public byte[] serialize(String topic, T data) {
22 | if (data == null) {
23 | return null;
24 | }
25 |
26 | ObjectMapper objectMapper = new ObjectMapper();
27 | try {
28 | return objectMapper.writeValueAsBytes(data);
29 | } catch (Exception e) {
30 | throw new RuntimeException("Error serializing JSON message", e);
31 | }
32 | }
33 |
34 | @Override
35 | public void close() {
36 |
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/documentation/assemblies/assembly-http-bridge-overview.adoc:
--------------------------------------------------------------------------------
1 | // This assembly is included in the following assemblies:
2 | //
3 | // bridge.adoc
4 |
5 | [id='assembly-http-bridge-overview-{context}']
6 | = HTTP Bridge overview
7 |
8 | [role="_abstract"]
9 | Use the HTTP Bridge to make HTTP requests to a Kafka cluster.
10 |
11 | You can use the HTTP Bridge to integrate HTTP client applications with your Kafka cluster.
12 |
13 | .HTTP client integration
14 |
15 | image:kafka-bridge.png[Internal and external HTTP producers and consumers exchange data with the Kafka brokers through the HTTP Bridge]
16 |
17 | include::modules/con-overview-running-http-bridge.adoc[leveloffset=+1]
18 |
19 | include::modules/con-overview-components-http-bridge.adoc[leveloffset=+1]
20 |
21 | include::modules/con-overview-open-api-spec-http-bridge.adoc[leveloffset=+1]
22 |
23 | include::modules/con-securing-http-bridge.adoc[leveloffset=+1]
24 |
25 | include::modules/con-securing-http-interface.adoc[leveloffset=+1]
26 |
27 | include::modules/con-requests-http-bridge.adoc[leveloffset=+1]
28 |
29 | include::modules/con-loggers-http-bridge.adoc[leveloffset=+1]
30 |
--------------------------------------------------------------------------------
/documentation/book/api/.openapi-generator-ignore:
--------------------------------------------------------------------------------
1 | # OpenAPI Generator Ignore
2 | # Generated by openapi-generator https://github.com/openapitools/openapi-generator
3 |
4 | # Use this file to prevent files from being overwritten by the generator.
5 | # The patterns follow closely to .gitignore or .dockerignore.
6 |
7 | # As an example, the C# client generator defines ApiClient.cs.
8 | # You can make changes and tell OpenAPI Generator to ignore just this file by uncommenting the following line:
9 | #ApiClient.cs
10 |
11 | # You can match any string of characters against a directory, file or extension with a single asterisk (*):
12 | #foo/*/qux
13 | # The above matches foo/bar/qux and foo/baz/qux, but not foo/bar/baz/qux
14 |
15 | # You can recursively match patterns against a directory, file or extension with a double asterisk (**):
16 | #foo/**/qux
17 | # This matches foo/bar/qux, foo/baz/qux, and foo/bar/baz/qux
18 |
19 | # You can also negate patterns with an exclamation (!).
20 | # For example, you can ignore all files in a docs folder with the file extension .md:
21 | #docs/*.md
22 | # Then explicitly reverse the ignore rule for a single file:
23 | #!docs/README.md
24 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/metrics/MetricsType.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.bridge.metrics;
6 |
7 | /**
8 | * Metrics type.
9 | */
10 | public enum MetricsType {
11 | /** Prometheus JMX Exporter. */
12 | JMX_EXPORTER("jmxPrometheusExporter"),
13 |
14 | /** Strimzi Metrics Reporter. */
15 | STRIMZI_REPORTER("strimziMetricsReporter");
16 |
17 | private final String text;
18 |
19 | MetricsType(final String text) {
20 | this.text = text;
21 | }
22 |
23 | @Override
24 | public String toString() {
25 | return text;
26 | }
27 |
28 | /**
29 | * @param text Text.
30 | * @return Get type from text.
31 | */
32 | public static MetricsType fromString(String text) {
33 | for (MetricsType t : MetricsType.values()) {
34 | if (t.text.equalsIgnoreCase(text)) {
35 | return t;
36 | }
37 | }
38 | throw new IllegalArgumentException("Metrics type not found: " + text);
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/documentation/modules/proc-configuring-http-bridge-jmx-metrics.adoc:
--------------------------------------------------------------------------------
1 | [id='proc-configuring-http-bridge-jmx-metrics-{context}']
2 | = Configuring Prometheus JMX Exporter metrics
3 |
4 | [role="_abstract"]
5 | Enable the Prometheus JMX Exporter to collect HTTP Bridge metrics by setting the `bridge.metrics` option to `jmxPrometheusExporter`.
6 |
7 | .Prerequisites
8 |
9 | * xref:proc-downloading-http-bridge-{context}[The HTTP Bridge installation archive is downloaded].
10 |
11 | .Procedure
12 |
13 | . Set the `bridge.metrics` configuration to `jmxPrometheusExporter`.
14 | +
15 | .Configuration for enabling metrics
16 |
17 | [source,properties]
18 | ----
19 | bridge.metrics=jmxPrometheusExporter
20 | ----
21 | +
22 | Optionally, you can add a custom Prometheus JMX Exporter configuration using the `bridge.metrics.exporter.config.path` property.
23 | If not configured, a default embedded configuration file is used.
24 |
25 | . Run the HTTP Bridge run script.
26 | +
27 | .Running the HTTP Bridge
28 | [source,shell]
29 | ----
30 | ./bin/kafka_bridge_run.sh --config-file=/application.properties
31 | ----
32 | +
33 | With metrics enabled, you can scrape metrics in Prometheus format from the `/metrics` endpoint of the HTTP Bridge.
34 |
--------------------------------------------------------------------------------
/.azure/templates/jobs/release_artifacts.yaml:
--------------------------------------------------------------------------------
1 | jobs:
2 | - job: 'release_artifacts'
3 | displayName: 'Prepare and release artifacts'
4 | # Set timeout for jobs
5 | timeoutInMinutes: 60
6 | # Base system
7 | pool:
8 | vmImage: 'Ubuntu-22.04'
9 | # Pipeline steps
10 | steps:
11 | # Install Prerequisites
12 | - template: '../steps/prerequisites/install_java.yaml'
13 |
14 | # Change the release version
15 | - bash: "mvn versions:set -DnewVersion=$(echo $RELEASE_VERSION | tr a-z A-Z)"
16 | displayName: "Configure release version to ${{ parameters.releaseVersion }}"
17 | env:
18 | RELEASE_VERSION: '${{ parameters.releaseVersion }}'
19 |
20 | - bash: "mvn install -DskipTests"
21 | displayName: "Build Java"
22 |
23 | # Deploy to Central
24 | - bash: "./.azure/scripts/push-to-central.sh"
25 | env:
26 | BUILD_REASON: $(Build.Reason)
27 | BRANCH: $(Build.SourceBranch)
28 | GPG_PASSPHRASE: $(GPG_PASSPHRASE)
29 | GPG_SIGNING_KEY: $(GPG_SIGNING_KEY)
30 | CENTRAL_USERNAME: $(CENTRAL_USERNAME)
31 | CENTRAL_PASSWORD: $(CENTRAL_PASSWORD)
32 | MVN_ARGS: "-e -V -B"
33 | displayName: "Deploy Java artifacts"
--------------------------------------------------------------------------------
/src/test/java/io/strimzi/kafka/bridge/utils/KafkaJsonDeserializer.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.utils;
7 |
8 | import com.fasterxml.jackson.databind.ObjectMapper;
9 | import org.apache.kafka.common.serialization.Deserializer;
10 |
11 | import java.util.Map;
12 |
13 | public class KafkaJsonDeserializer implements Deserializer {
14 |
15 | private final Class type;
16 |
17 | public KafkaJsonDeserializer(Class type) {
18 | this.type = type;
19 | }
20 |
21 | @Override
22 | public void configure(Map configs, boolean isKey) {
23 |
24 | }
25 |
26 | @Override
27 | public T deserialize(String topic, byte[] bytes) {
28 | if (bytes == null || bytes.length == 0) {
29 | return null;
30 | }
31 |
32 | ObjectMapper mapper = new ObjectMapper();
33 | try {
34 | return mapper.readValue(bytes, type);
35 | } catch (Exception e) {
36 | throw new RuntimeException("Error deserializing JSON message", e);
37 | }
38 | }
39 |
40 | @Override
41 | public void close() {
42 |
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/.azure/scripts/docu-push-to-website.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | echo "Build reason: ${BUILD_REASON}"
6 | echo "Source branch: ${BRANCH}"
7 |
8 | echo "$GITHUB_DEPLOY_KEY" | base64 -d > github_deploy_key
9 | chmod 600 github_deploy_key
10 | eval "$(ssh-agent -s)"
11 | ssh-add github_deploy_key
12 |
13 | git clone git@github.com:strimzi/strimzi.github.io.git /tmp/website
14 | rm -rf /tmp/website/docs/bridge/in-development/images
15 | rm -rf /tmp/website/docs/bridge/in-development/full/images
16 | cp -v documentation/htmlnoheader/bridge.html /tmp/website/docs/bridge/in-development/bridge.html
17 | cp -v documentation/html/bridge.html /tmp/website/docs/bridge/in-development/full/bridge.html
18 | cp -vrL documentation/htmlnoheader/images /tmp/website/docs/bridge/in-development/images
19 | cp -vrL documentation/htmlnoheader/images /tmp/website/docs/bridge/in-development/full/images
20 |
21 | pushd /tmp/website
22 |
23 | if [[ -z $(git status -s) ]]; then
24 | echo "No changes to the output on this push; exiting."
25 | exit 0
26 | fi
27 |
28 | git config user.name "Strimzi CI"
29 | git config user.email "ci@strimzi.io"
30 |
31 | git add -A
32 | git commit -s -m "Update Kafka Bridge documentation (Commit ${COMMIT})" --allow-empty
33 | git push origin main
34 |
35 | popd
36 |
--------------------------------------------------------------------------------
/config/log4j2.properties:
--------------------------------------------------------------------------------
1 | #
2 | # The logging properties used
3 | #
4 | name = BridgeConfig
5 |
6 | appender.console.type = Console
7 | appender.console.name = STDOUT
8 | appender.console.layout.type = PatternLayout
9 | appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %highlight{%-5p} [%t] %c{1}:%L - %m%n
10 |
11 | rootLogger.level = INFO
12 | rootLogger.appenderRefs = console
13 | rootLogger.appenderRef.console.ref = STDOUT
14 | rootLogger.additivity = false
15 |
16 | logger.bridge.name = io.strimzi.kafka.bridge
17 | logger.bridge.level = INFO
18 | logger.bridge.appenderRefs = console
19 | logger.bridge.appenderRef.console.ref = STDOUT
20 | logger.bridge.additivity = false
21 |
22 | # HTTP OpenAPI specific logging levels (default is INFO)
23 | # Logging healthy and ready endpoints is very verbose because of Kubernetes health checking.
24 | logger.healthy.name = http.openapi.operation.healthy
25 | logger.healthy.level = WARN
26 | logger.ready.name = http.openapi.operation.ready
27 | logger.ready.level = WARN
28 |
29 | # Reduce verbosity of RouterBuilderImpl warnings for unimplemented OpenAPI endpoints because bridge servers share the same OpenAPI contract with different endpoints implemented.
30 | logger.vertx.name = io.vertx.ext.web.openapi.router.impl.RouterBuilderImpl
31 | logger.vertx.level = ERROR
--------------------------------------------------------------------------------
/documentation/assemblies/assembly-http-bridge-config.adoc:
--------------------------------------------------------------------------------
1 | // This assembly is included in the following assemblies:
2 | //
3 | // bridge.adoc
4 |
5 | [id='assembly-http-bridge-config-{context}']
6 | = HTTP Bridge configuration
7 |
8 | [role="_abstract"]
9 | Configure a deployment of the HTTP Bridge with Kafka-related properties and specify the HTTP connection details needed to be able to interact with Kafka.
10 | Additionally, enable metrics in Prometheus format using either the https://github.com/prometheus/jmx_exporter[Prometheus JMX Exporter] or the https://github.com/strimzi/metrics-reporter[Strimzi Metrics Reporter].
11 | You can also use configuration properties to enable and use distributed tracing with the HTTP Bridge.
12 | Distributed tracing allows you to track the progress of transactions between applications in a distributed system.
13 |
14 | NOTE: Use the `KafkaBridge` resource to configure properties when you are xref:overview-components-running-http-bridge-cluster-{context}[running the HTTP Bridge on Kubernetes].
15 |
16 | include::modules/proc-configuring-http-bridge.adoc[leveloffset=+1]
17 | include::modules/proc-configuring-http-bridge-jmx-metrics.adoc[leveloffset=+1]
18 | include::modules/proc-configuring-http-bridge-smr-metrics.adoc[leveloffset=+1]
19 | include::modules/proc-configuring-http-bridge-tracing.adoc[leveloffset=+1]
20 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/tracing/SpanHandle.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.tracing;
7 |
8 | import io.vertx.ext.web.RoutingContext;
9 | import org.apache.kafka.clients.producer.ProducerRecord;
10 |
11 | /**
12 | * Span handle, an abstraction over actual span implementation.
13 | */
14 | public interface SpanHandle {
15 | /**
16 | * Inject tracing info into underlying span from Kafka producer record.
17 | *
18 | * @param record Kafka producer record to extract tracing info
19 | */
20 | void inject(ProducerRecord record);
21 |
22 | /**
23 | * Inject tracing info into underlying span from Vert.x routing context.
24 | *
25 | * @param routingContext Vert.x routing context to extract tracing info
26 | */
27 | void inject(RoutingContext routingContext);
28 |
29 | /**
30 | * Finish underlying span.
31 | *
32 | * @param code response code
33 | */
34 | void finish(int code);
35 |
36 | /**
37 | * Finish underlying span.
38 | *
39 | * @param code response code
40 | * @param cause exception cause
41 | */
42 | void finish(int code, Throwable cause);
43 | }
44 |
--------------------------------------------------------------------------------
/.azure/templates/jobs/build_docs.yaml:
--------------------------------------------------------------------------------
1 | jobs:
2 | - job: 'build_docs'
3 | displayName: 'Build Docs'
4 | strategy:
5 | matrix:
6 | 'java-17':
7 | image: 'Ubuntu-22.04'
8 | jdk_version: '17'
9 | # Strategy for the job
10 | # Set timeout for jobs
11 | timeoutInMinutes: 60
12 | # Base system
13 | pool:
14 | vmImage: $(image)
15 | # Variables
16 | variables:
17 | MVN_CACHE_FOLDER: $(HOME)/.m2/repository
18 | MVN_ARGS: '-e -V -B'
19 | # Pipeline steps
20 | steps:
21 | # Get cached Maven repository
22 | - template: "../steps/maven_cache.yaml"
23 | - template: '../steps/prerequisites/install_java.yaml'
24 | parameters:
25 | JDK_VERSION: $(jdk_version)
26 | - template: '../steps/prerequisites/install_asciidoc.yaml'
27 | - bash: "make docu_html docu_htmlnoheader"
28 | displayName: "Build docs"
29 | env:
30 | MVN_ARGS: "-e -V -B"
31 | # We have to TAR the target directory to maintain the permissions of
32 | # the files which would otherwise change when downloading the artifact
33 | - bash: tar -cvpf documentation.tar ./documentation
34 | displayName: "Tar the documentation directory"
35 | - publish: $(System.DefaultWorkingDirectory)/documentation.tar
36 | artifact: Documentation
37 | displayName: "Store the documentation tar"
--------------------------------------------------------------------------------
/config/application.properties:
--------------------------------------------------------------------------------
1 | #Bridge related settings
2 | bridge.id=my-bridge
3 |
4 | # uncomment the following line to enable Prometheus JMX Exporter, check the Kafka Bridge documentation for more details
5 | #bridge.metrics=jmxPrometheusExporter
6 | # optionally, set the file path of your custom configuration
7 | #bridge.metrics.exporter.config.path=/path/to/my-exporter-config.yaml
8 |
9 | # uncomment the following line to enable Strimzi Metrics Reporter, check the Kafka Bridge documentation for more details
10 | #bridge.metrics=strimziMetricsReporter
11 | # optionally, filter the exposed metrics of all internal Kafka clients using a comma separated list of regexes
12 | #kafka.prometheus.metrics.reporter.allowlist=.*
13 |
14 | # uncomment the following line (bridge.tracing) to enable OpenTelemetry tracing, check the documentation for more details
15 | #bridge.tracing=opentelemetry
16 |
17 | #Apache Kafka common
18 | kafka.bootstrap.servers=localhost:9092
19 |
20 | #Apache Kafka producer
21 | kafka.producer.acks=1
22 |
23 | #Apache Kafka consumer
24 | kafka.consumer.auto.offset.reset=earliest
25 |
26 | #HTTP related settings
27 | http.host=0.0.0.0
28 | http.port=8080
29 | #Enable CORS
30 | http.cors.enabled=false
31 | http.cors.allowedOrigins=*
32 | http.cors.allowedMethods=GET,POST,PUT,DELETE,OPTIONS,PATCH
33 |
34 | #Enable consumer
35 | http.consumer.enabled=true
36 |
37 | #Enable producer
38 | http.producer.enabled=true
39 |
--------------------------------------------------------------------------------
/documentation/modules/con-securing-http-interface.adoc:
--------------------------------------------------------------------------------
1 | // This assembly is included in the following assemblies:
2 | //
3 | // assembly-http-bridge-overview.adoc
4 |
5 | [id='con-securing-http-interface-{context}']
6 | = Securing the HTTP Bridge HTTP interface
7 |
8 | [role="_abstract"]
9 | By default, connections between HTTP clients and HTTP Bridge are not encrypted, but you can configure a TLS-encrypted connection through its xref:proc-configuring-kafka-bridge-{context}[properties file].
10 | If the HTTP Bridge is configured with TLS encryption, client requests should connect to the bridge by using HTTPS instead of HTTP.
11 |
12 | You can enable TLS encryption for all HTTP Bridge endpoints except management endpoints such as `/ready`, `/healthy` and `/metrics`.
13 | These endpoints are typically used only by internal clients and requests to them must use HTTP.
14 |
15 | Authentication between HTTP clients and the HTTP Bridge is not supported directly by the HTTP Bridge.
16 | You can combine the HTTP Bridge with the following tools to secure it further:
17 |
18 | * Network policies and firewalls that define which pods can access the HTTP Bridge
19 | * Reverse proxies (for example, OAuth 2.0)
20 | * API gateways
21 |
22 | If any of these tools used, for example, a reverse proxy to add authentication between HTTP clients and the HTTP Bridge, then the proxy should be configured with TLS to encrypt the connections between HTTP clients and the proxy.
--------------------------------------------------------------------------------
/documentation/book/api/snippet/consumers/{groupid}/instances/{name}/records/GET/http-response.adoc:
--------------------------------------------------------------------------------
1 | ==== Example HTTP response
2 |
3 | ===== Response 200
4 | [source,json]
5 | ----
6 | [ {
7 | "topic" : "topic",
8 | "key" : "key1",
9 | "value" : {
10 | "foo" : "bar"
11 | },
12 | "partition" : 0,
13 | "offset" : 2
14 | }, {
15 | "topic" : "topic",
16 | "key" : "key2",
17 | "value" : [ "foo2", "bar2" ],
18 | "partition" : 1,
19 | "offset" : 3
20 | } ]
21 | ----
22 |
23 | [source,json]
24 | ----
25 | [
26 | {
27 | "topic": "test",
28 | "key": "a2V5",
29 | "value": "Y29uZmx1ZW50",
30 | "partition": 1,
31 | "offset": 100,
32 | },
33 | {
34 | "topic": "test",
35 | "key": "a2V5",
36 | "value": "a2Fma2E=",
37 | "partition": 2,
38 | "offset": 101,
39 | }
40 | ]
41 | ----
42 |
43 |
44 | ===== Response 404
45 | [source,json]
46 | ----
47 | {
48 | "error_code" : 404,
49 | "message" : "The specified consumer instance was not found."
50 | }
51 | ----
52 |
53 |
54 | ===== Response 406
55 | [source,json]
56 | ----
57 | {
58 | "error_code" : 406,
59 | "message" : "The `format` used in the consumer creation request does not match the embedded format in the Accept header of this request."
60 | }
61 | ----
62 |
63 |
64 | ===== Response 422
65 | [source,json]
66 | ----
67 | {
68 | "error_code" : 422,
69 | "message" : "Response exceeds the maximum number of bytes the consumer can receive"
70 | }
71 | ----
--------------------------------------------------------------------------------
/.azure/templates/jobs/deploy_java.yaml:
--------------------------------------------------------------------------------
1 | jobs:
2 | - job: 'deploy_java'
3 | displayName: 'Deploy Java'
4 | # Strategy for the job
5 | strategy:
6 | matrix:
7 | 'java-17':
8 | image: 'Ubuntu-22.04'
9 | jdk_version: '17'
10 | main_build: 'true'
11 | # Set timeout for jobs
12 | timeoutInMinutes: 60
13 | # Base system
14 | pool:
15 | vmImage: 'Ubuntu-22.04'
16 | # Pipeline steps
17 | steps:
18 | - template: '../steps/prerequisites/install_java.yaml'
19 | - task: DownloadPipelineArtifact@2
20 | inputs:
21 | source: '${{ parameters.artifactSource }}'
22 | artifact: Binary
23 | path: $(System.DefaultWorkingDirectory)/
24 | project: '${{ parameters.artifactProject }}'
25 | pipeline: '${{ parameters.artifactPipeline }}'
26 | runVersion: '${{ parameters.artifactRunVersion }}'
27 | runId: '${{ parameters.artifactRunId }}'
28 | - bash: tar -xvf target.tar
29 | displayName: "Untar the target directory"
30 | - bash: "./.azure/scripts/push-to-central.sh"
31 | env:
32 | BUILD_REASON: $(Build.Reason)
33 | BRANCH: $(Build.SourceBranch)
34 | GPG_PASSPHRASE: $(GPG_PASSPHRASE)
35 | GPG_SIGNING_KEY: $(GPG_SIGNING_KEY)
36 | CENTRAL_USERNAME: $(CENTRAL_USERNAME)
37 | CENTRAL_PASSWORD: $(CENTRAL_PASSWORD)
38 | displayName: "Deploy Java artifacts"
--------------------------------------------------------------------------------
/.azure/templates/jobs/build_container.yaml:
--------------------------------------------------------------------------------
1 | jobs:
2 | - job: 'build_container'
3 | displayName: 'Build'
4 | # Strategy for the job
5 | strategy:
6 | matrix:
7 | ${{ each arch in parameters.architectures }}:
8 | ${{ arch }}:
9 | arch: ${{ arch }}
10 | # Set timeout for jobs
11 | timeoutInMinutes: 60
12 | # Base system
13 | pool:
14 | vmImage: 'Ubuntu-22.04'
15 | # Pipeline steps
16 | steps:
17 | - template: '../steps/prerequisites/install_docker.yaml'
18 | - task: DownloadPipelineArtifact@2
19 | inputs:
20 | source: '${{ parameters.artifactSource }}'
21 | artifact: Binary
22 | path: $(System.DefaultWorkingDirectory)/
23 | project: '${{ parameters.artifactProject }}'
24 | pipeline: '${{ parameters.artifactPipeline }}'
25 | runVersion: '${{ parameters.artifactRunVersion }}'
26 | runId: '${{ parameters.artifactRunId }}'
27 | - bash: tar -xvf target.tar
28 | displayName: "Untar the target directory"
29 | - bash: "make docker_build docker_save"
30 | env:
31 | DOCKER_BUILDKIT: 1
32 | BUILD_REASON: $(Build.Reason)
33 | BRANCH: $(Build.SourceBranch)
34 | DOCKER_REGISTRY: "quay.io"
35 | DOCKER_ORG: "strimzi"
36 | DOCKER_ARCHITECTURE: $(arch)
37 | displayName: "Build container - $(arch)"
38 | - publish: $(System.DefaultWorkingDirectory)/kafka-bridge-$(arch).tar.gz
39 | artifact: Container-$(arch)
--------------------------------------------------------------------------------
/src/test/java/io/strimzi/kafka/bridge/utils/Utils.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.utils;
7 |
8 | import java.io.BufferedReader;
9 | import java.io.FileNotFoundException;
10 | import java.io.FileReader;
11 | import java.io.IOException;
12 |
13 | public class Utils {
14 |
15 | /**
16 | * Retrieve the Kafka Bridge version from a text config fil, or throws an exception.
17 | *
18 | * @param releaseFile The name of the file that contains the release version
19 | * @return The version of the Kafka Bridge
20 | * @throws Exception
21 | */
22 |
23 | public static String getKafkaBridgeVersionFromFile(String releaseFile) throws Exception {
24 |
25 | String versionFromFile;
26 |
27 | try {
28 | BufferedReader bufferedReader = new BufferedReader(new FileReader(releaseFile));
29 | versionFromFile = bufferedReader.readLine();
30 | bufferedReader.close();
31 | } catch (FileNotFoundException e) {
32 | throw new Exception("File not found : " + releaseFile);
33 | } catch (IOException e) {
34 | throw new Exception("Unable to open file : " + releaseFile);
35 | }
36 |
37 | if ((versionFromFile == null) || (versionFromFile.isEmpty())) {
38 | throw new Exception("Unable to get Version from file : " + releaseFile);
39 | }
40 | return versionFromFile;
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/documentation/modules/proc-installing-http-bridge.adoc:
--------------------------------------------------------------------------------
1 | // Module included in the following assemblies:
2 | //
3 | // assembly-http-bridge-quickstart.adoc
4 |
5 | [id='proc-installing-http-bridge-{context}']
6 | = Installing the HTTP Bridge
7 |
8 | [role="_abstract"]
9 | Use the script provided with the HTTP Bridge archive to install the HTTP Bridge.
10 | The `application.properties` file provided with the installation archive provides default configuration settings.
11 |
12 | The following default property values configure the HTTP Bridge to listen for requests on port 8080.
13 |
14 | .Default configuration properties
15 | [source,shell,subs=attributes+]
16 | ----
17 | http.host=0.0.0.0
18 | http.port=8080
19 | ----
20 |
21 | .Prerequisites
22 |
23 | * xref:proc-downloading-http-bridge-{context}[The HTTP Bridge installation archive is downloaded]
24 |
25 | .Procedure
26 |
27 | . If you have not already done so, unzip the HTTP Bridge installation archive to any directory.
28 |
29 | . Run the HTTP Bridge script using the configuration properties as a parameter:
30 | +
31 | For example:
32 | +
33 | [source,shell,subs="+quotes,attributes"]
34 | ----
35 | ./bin/kafka_bridge_run.sh --config-file=__/application.properties
36 | ----
37 |
38 | . Check to see that the installation was successful in the log.
39 | +
40 | [source,shell]
41 | ----
42 | HTTP Bridge started and listening on port 8080
43 | HTTP Bridge bootstrap servers localhost:9092
44 | ----
45 |
46 | .What to do next
47 |
48 | * xref:proc-producing-messages-from-bridge-topics-partitions-{context}[Produce messages to topics and partitions].
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/SinkTopicSubscription.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 |
7 | package io.strimzi.kafka.bridge;
8 |
9 | /**
10 | * Represents a Topic subscription in the sink bridge endpoint
11 | */
12 | public class SinkTopicSubscription {
13 |
14 | private final String topic;
15 | private final Integer partition;
16 |
17 | /**
18 | * Constructor
19 | *
20 | * @param topic topic to subscribe/assign
21 | * @param partition partition requested as assignment (null if no specific assignment)
22 | */
23 | public SinkTopicSubscription(String topic, Integer partition) {
24 | this.topic = topic;
25 | this.partition = partition;
26 | }
27 |
28 | /**
29 | * Constructor
30 | *
31 | * @param topic topic to subscribe
32 | */
33 | public SinkTopicSubscription(String topic) {
34 | this(topic, null);
35 | }
36 |
37 | /**
38 | * @return topic to subscribe/assign
39 | */
40 | public String getTopic() {
41 | return topic;
42 | }
43 |
44 | /**
45 | * @return partition requested as assignment (null if no specific assignment)
46 | */
47 | public Integer getPartition() {
48 | return partition;
49 | }
50 |
51 | @Override
52 | public String toString() {
53 | return "SinkTopicSubscription(" +
54 | "topic=" + this.topic +
55 | ",partition=" + this.partition +
56 | ")";
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/documentation/modules/proc-bridge-committing-consumer-offsets-to-log.adoc:
--------------------------------------------------------------------------------
1 | // Module included in the following assemblies:
2 | //
3 | // assembly-http-bridge-quickstart.adoc
4 |
5 | [id='proc-bridge-committing-consumer-offsets-to-log-{context}']
6 | = Commiting offsets to the log
7 |
8 | [role="_abstract"]
9 | Use the xref:commit[offsets] endpoint to manually commit offsets to the log for all messages received by the HTTP Bridge consumer. This is required because the HTTP Bridge consumer that you created earlier, in xref:proc-creating-http-bridge-consumer-{context}[Creating a HTTP Bridge consumer], was configured with the `enable.auto.commit` setting as `false`.
10 |
11 | .Procedure
12 |
13 | * Commit offsets to the log for the `bridge-quickstart-consumer`:
14 | +
15 | [source,curl,subs=attributes+]
16 | ----
17 | curl -X POST http://localhost:8080/consumers/bridge-quickstart-consumer-group/instances/bridge-quickstart-consumer/offsets
18 | ----
19 | +
20 | Because no request body is submitted, offsets are committed for all the records that have been received by the consumer. Alternatively, the request body can contain an array of (xref:OffsetCommitSeek[OffsetCommitSeek]) that specifies the topics and partitions that you want to commit offsets for.
21 | +
22 | If the request is successful, the HTTP Bridge returns a `204` code only.
23 |
24 | .What to do next
25 |
26 | After committing offsets to the log, try out the endpoints for xref:proc-bridge-seeking-offset-for-partition-{context}[seeking to offsets].
27 |
28 | [role="_additional-resources"]
29 | .Additional resources
30 |
31 | * xref:commit[POST /consumers/{groupid}/instances/{name}/offsets]
32 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/tracing/NoopTracingHandle.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.tracing;
7 |
8 | import io.strimzi.kafka.bridge.config.BridgeConfig;
9 | import io.vertx.ext.web.RoutingContext;
10 | import org.apache.kafka.clients.consumer.ConsumerRecord;
11 | import org.apache.kafka.clients.producer.ProducerRecord;
12 |
13 | import java.util.Properties;
14 |
15 | final class NoopTracingHandle implements TracingHandle {
16 | @Override
17 | public String envServiceName() {
18 | return null;
19 | }
20 |
21 | @Override
22 | public String serviceName(BridgeConfig config) {
23 | return null;
24 | }
25 |
26 | @Override
27 | public void initialize() {
28 | }
29 |
30 | @Override
31 | public SpanHandle span(RoutingContext routingContext, String operationName) {
32 | return new NoopSpanHandle<>();
33 | }
34 |
35 | @Override
36 | public void handleRecordSpan(ConsumerRecord record) {
37 | }
38 |
39 | @Override
40 | public void addTracingPropsToProducerConfig(Properties props) {
41 | }
42 |
43 | private static final class NoopSpanHandle implements SpanHandle {
44 | @Override
45 | public void inject(ProducerRecord record) {
46 | }
47 |
48 | @Override
49 | public void inject(RoutingContext routingContext) {
50 | }
51 |
52 | @Override
53 | public void finish(int code) {
54 | }
55 |
56 | @Override
57 | public void finish(int code, Throwable cause) {
58 | }
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/src/main/assembly/assembly.xml:
--------------------------------------------------------------------------------
1 |
4 | assembly
5 | true
6 |
7 |
8 | tar.gz
9 | zip
10 | dir
11 |
12 |
13 |
14 | ${project.basedir}/config
15 | /config
16 | 0644
17 |
18 |
19 | ${project.basedir}/bin
20 | /bin
21 | 0755
22 |
23 |
24 | ${project.basedir}
25 |
26 | README*
27 | LICENSE*
28 | CHANGELOG*
29 |
30 | 0644
31 |
32 |
33 |
34 |
35 | runtime
36 | /libs
37 | 0644
38 | ${artifact.groupId}.${artifact.artifactId}-${artifact.version}${dashClassifier?}.${artifact.extension}
39 |
40 |
41 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/LoggingPartitionsRebalance.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge;
7 |
8 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
9 | import org.apache.kafka.common.TopicPartition;
10 | import org.apache.logging.log4j.LogManager;
11 | import org.apache.logging.log4j.Logger;
12 |
13 | import java.util.Collection;
14 |
15 | /**
16 | * No operations implementation about handling partitions being assigned on revoked on rebalancing
17 | * It just logs partitions if enabled
18 | */
19 | public class LoggingPartitionsRebalance implements ConsumerRebalanceListener {
20 | private static final Logger LOGGER = LogManager.getLogger(LoggingPartitionsRebalance.class);
21 |
22 | @Override
23 | public void onPartitionsRevoked(Collection partitions) {
24 | LOGGER.debug("Partitions revoked {}", partitions.size());
25 |
26 | if (LOGGER.isDebugEnabled() && !partitions.isEmpty()) {
27 | for (TopicPartition partition : partitions) {
28 | LOGGER.debug("topic {} partition {}", partition.topic(), partition.partition());
29 | }
30 | }
31 | }
32 |
33 | @Override
34 | public void onPartitionsAssigned(Collection partitions) {
35 | LOGGER.debug("Partitions assigned {}", partitions.size());
36 |
37 | if (LOGGER.isDebugEnabled() && !partitions.isEmpty()) {
38 | for (TopicPartition partition : partitions) {
39 | LOGGER.debug("topic {} partition {}", partition.topic(), partition.partition());
40 | }
41 | }
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/config/KafkaAdminConfig.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.config;
7 |
8 | import java.util.Map;
9 | import java.util.stream.Collectors;
10 |
11 | /**
12 | * Apache Kafka admin related configuration
13 | */
14 | public class KafkaAdminConfig extends AbstractConfig {
15 |
16 | /** Prefix for administration related configuration parameters */
17 | public static final String KAFKA_ADMIN_CONFIG_PREFIX = KafkaConfig.KAFKA_CONFIG_PREFIX + "admin.";
18 |
19 | /**
20 | * Constructor
21 | *
22 | * @param config configuration parameters map
23 | */
24 | private KafkaAdminConfig(Map config) {
25 | super(config);
26 | }
27 |
28 | /**
29 | * Loads Kafka admin related configuration parameters from a related map
30 | *
31 | * @param map map from which loading configuration parameters
32 | * @return Kafka admin related configuration
33 | */
34 | public static KafkaAdminConfig fromMap(Map map) {
35 | // filter the Kafka admin related configuration parameters, stripping the prefix as well
36 | return new KafkaAdminConfig(map.entrySet().stream()
37 | .filter(e -> e.getKey().startsWith(KafkaAdminConfig.KAFKA_ADMIN_CONFIG_PREFIX))
38 | .collect(Collectors.toMap(e -> e.getKey().substring(KafkaAdminConfig.KAFKA_ADMIN_CONFIG_PREFIX.length()), Map.Entry::getValue)));
39 | }
40 |
41 | @Override
42 | public String toString() {
43 | return "KafkaAdminConfig(" +
44 | "config=" + this.config +
45 | ")";
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/src/test/java/io/strimzi/kafka/bridge/http/services/BaseService.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.bridge.http.services;
6 |
7 | import io.vertx.core.buffer.Buffer;
8 | import io.vertx.core.json.JsonObject;
9 | import io.vertx.ext.web.client.HttpRequest;
10 | import io.vertx.ext.web.client.WebClient;
11 | import io.vertx.ext.web.codec.BodyCodec;
12 |
13 | public class BaseService {
14 |
15 | WebClient webClient;
16 | private static BaseService baseService;
17 |
18 | static final int HTTP_REQUEST_TIMEOUT = 60;
19 |
20 | // for request configuration
21 | private static final long RESPONSE_TIMEOUT = 60000L;
22 |
23 | BaseService(WebClient webClient) {
24 | this.webClient = webClient;
25 | }
26 |
27 | public static synchronized BaseService getInstance(WebClient webClient) {
28 | if (baseService == null || webClient != baseService.webClient) {
29 | baseService = new BaseService(webClient);
30 | }
31 | return baseService;
32 | }
33 |
34 | //HTTP methods with configured Response timeout
35 | public HttpRequest postRequest(String requestURI) {
36 | return webClient.post(requestURI)
37 | .timeout(RESPONSE_TIMEOUT)
38 | .as(BodyCodec.jsonObject());
39 | }
40 |
41 | public HttpRequest getRequest(String requestURI) {
42 | return webClient.get(requestURI)
43 | .timeout(RESPONSE_TIMEOUT);
44 | }
45 |
46 | public HttpRequest deleteRequest(String requestURI) {
47 | return webClient.delete(requestURI)
48 | .timeout(RESPONSE_TIMEOUT);
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/documentation/assemblies/assembly-http-bridge-quickstart.adoc:
--------------------------------------------------------------------------------
1 | // This assembly is included in the following assemblies:
2 | //
3 | // bridge.adoc
4 |
5 | [id='assembly-http-bridge-quickstart-{context}']
6 | = HTTP Bridge quickstart
7 |
8 | [role="_abstract"]
9 | Use this quickstart to try out the HTTP Bridge in your local development environment.
10 |
11 | You will learn how to do the following:
12 |
13 | * Produce messages to topics and partitions in your Kafka cluster
14 | * Create a HTTP Bridge consumer
15 | * Perform basic consumer operations, such as subscribing the consumer to topics and retrieving the messages that you produced
16 |
17 | In this quickstart, HTTP requests are formatted as curl commands that you can copy and paste to your terminal.
18 |
19 | Ensure you have the prerequisites and then follow the tasks in the order provided in this chapter.
20 |
21 | In this quickstart, you will produce and consume messages in JSON format.
22 |
23 | .Prerequisites for the quickstart
24 |
25 | * A Kafka cluster is running on the host machine.
26 |
27 | include::modules/proc-downloading-http-bridge.adoc[leveloffset=+1]
28 |
29 | include::modules/proc-installing-http-bridge.adoc[leveloffset=+1]
30 |
31 | include::modules/proc-producing-messages-from-bridge-topics-partitions.adoc[leveloffset=+1]
32 |
33 | include::modules/proc-creating-http-bridge-consumer.adoc[leveloffset=+1]
34 |
35 | include::modules/proc-bridge-subscribing-consumer-topics.adoc[leveloffset=+1]
36 |
37 | include::modules/proc-bridge-retrieving-latest-messages-from-consumer.adoc[leveloffset=+1]
38 |
39 | include::modules/proc-bridge-committing-consumer-offsets-to-log.adoc[leveloffset=+1]
40 |
41 | include::modules/proc-bridge-seeking-offsets-for-partition.adoc[leveloffset=+1]
42 |
43 | include::modules/proc-bridge-deleting-consumer.adoc[leveloffset=+1]
44 |
--------------------------------------------------------------------------------
/documentation/modules/con-loggers-http-bridge.adoc:
--------------------------------------------------------------------------------
1 | // Module included in the following assemblies:
2 | //
3 | // assembly-http-bridge-overview.adoc
4 |
5 | [id='con-loggers-http-bridge-{context}']
6 |
7 | [role="_abstract"]
8 | = Configuring loggers for the HTTP Bridge
9 |
10 | [role="_abstract"]
11 | You can set a different log level for each operation that is defined by the HTTP Bridge OpenAPI specification.
12 |
13 | Each operation has a corresponding API endpoint through which the bridge receives requests from HTTP clients.
14 | You can change the log level on each endpoint to produce more or less fine-grained logging information about the incoming and outgoing HTTP requests.
15 |
16 | Loggers are defined in the `log4j2.properties` file, which has the following default configuration for `healthy` and `ready` endpoints:
17 |
18 | ```
19 | logger.healthy.name = http.openapi.operation.healthy
20 | logger.healthy.level = WARN
21 | logger.ready.name = http.openapi.operation.ready
22 | logger.ready.level = WARN
23 | ```
24 |
25 | The log level of all other operations is set to `INFO` by default.
26 | Loggers are formatted as follows:
27 |
28 | [source,properties,subs=+quotes]
29 | ----
30 | logger.__.name = http.openapi.operation.__
31 | logger.__level = __
32 | ----
33 |
34 | Where `__` is the identifier of the specific operation.
35 |
36 | .List of operations defined by the OpenAPI specification
37 | * `createConsumer`
38 | * `deleteConsumer`
39 | * `subscribe`
40 | * `unsubscribe`
41 | * `poll`
42 | * `assign`
43 | * `commit`
44 | * `send`
45 | * `sendToPartition`
46 | * `seekToBeginning`
47 | * `seekToEnd`
48 | * `seek`
49 | * `healthy`
50 | * `ready`
51 | * `openapi`
52 |
53 | Where __ is the logging level as defined by log4j2 (i.e. `INFO`, `DEBUG`, ...).
54 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/config/KafkaConsumerConfig.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.config;
7 |
8 | import java.util.Map;
9 | import java.util.stream.Collectors;
10 |
11 | /**
12 | * Apache Kafka consumer related configuration
13 | */
14 | public class KafkaConsumerConfig extends AbstractConfig {
15 |
16 | /** Prefix for consumer related configuration parameters */
17 | public static final String KAFKA_CONSUMER_CONFIG_PREFIX = KafkaConfig.KAFKA_CONFIG_PREFIX + "consumer.";
18 |
19 | /**
20 | * Constructor
21 | *
22 | * @param config configuration parameters map
23 | */
24 | private KafkaConsumerConfig(Map config) {
25 | super(config);
26 | }
27 |
28 | /**
29 | * Loads Kafka consumer related configuration parameters from a related map
30 | *
31 | * @param map map from which loading configuration parameters
32 | * @return Kafka consumer related configuration
33 | */
34 | public static KafkaConsumerConfig fromMap(Map map) {
35 | // filter the Kafka consumer related configuration parameters, stripping the prefix as well
36 | return new KafkaConsumerConfig(map.entrySet().stream()
37 | .filter(e -> e.getKey().startsWith(KafkaConsumerConfig.KAFKA_CONSUMER_CONFIG_PREFIX))
38 | .collect(Collectors.toMap(e -> e.getKey().substring(KafkaConsumerConfig.KAFKA_CONSUMER_CONFIG_PREFIX.length()), Map.Entry::getValue)));
39 | }
40 |
41 | @Override
42 | public String toString() {
43 | return "KafkaConsumerConfig(" +
44 | "config=" + this.config +
45 | ")";
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/config/KafkaProducerConfig.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.config;
7 |
8 | import java.util.Map;
9 | import java.util.stream.Collectors;
10 |
11 | /**
12 | * Apache Kafka producer related configuration
13 | */
14 | public class KafkaProducerConfig extends AbstractConfig {
15 |
16 | /** Prefix for producer related configuration parameters */
17 | public static final String KAFKA_PRODUCER_CONFIG_PREFIX = KafkaConfig.KAFKA_CONFIG_PREFIX + "producer.";
18 |
19 | /**
20 | * Constructor
21 | *
22 | * @param config configuration parameters map
23 | */
24 | private KafkaProducerConfig(Map config) {
25 | super(config);
26 | }
27 |
28 | /**
29 | * Loads Kafka producer related configuration parameters from a related map
30 | *
31 | * @param map map from which loading configuration parameters
32 | * @return Kafka producer related configuration
33 | */
34 | public static KafkaProducerConfig fromMap(Map map) {
35 | // filter the Kafka producer related configuration parameters, stripping the prefix as well
36 | return new KafkaProducerConfig(map.entrySet().stream()
37 | .filter(e -> e.getKey().startsWith(KafkaProducerConfig.KAFKA_PRODUCER_CONFIG_PREFIX))
38 | .collect(Collectors.toMap(e -> e.getKey().substring(KafkaProducerConfig.KAFKA_PRODUCER_CONFIG_PREFIX.length()), Map.Entry::getValue)));
39 | }
40 |
41 | @Override
42 | public String toString() {
43 | return "KafkaProducerConfig(" +
44 | "config=" + this.config +
45 | ")";
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/documentation/modules/con-overview-components-http-bridge.adoc:
--------------------------------------------------------------------------------
1 | // Module included in the following assemblies:
2 | //
3 | // assembly-http-bridge-overview.adoc
4 |
5 | [id="overview-components-http-bridge_{context}"]
6 | = HTTP Bridge interface
7 |
8 | [role="_abstract"]
9 | The HTTP Bridge provides a RESTful interface that allows HTTP-based clients to interact with a Kafka cluster.
10 | It offers the advantages of a web API connection to Strimzi, without the need for client applications to interpret the Kafka protocol.
11 |
12 | The API has two main resources — `consumers` and `topics` — that are exposed and made accessible through endpoints to interact with consumers and producers in your Kafka cluster. The resources relate only to the HTTP Bridge, not the consumers and producers connected directly to Kafka.
13 |
14 | == HTTP requests
15 | The HTTP Bridge supports HTTP requests to a Kafka cluster, with methods to:
16 |
17 | * Send messages to a topic.
18 | * Retrieve messages from topics.
19 | * Retrieve a list of partitions for a topic.
20 | * Create and delete consumers.
21 | * Subscribe consumers to topics, so that they start receiving messages from those topics.
22 | * Retrieve a list of topics that a consumer is subscribed to.
23 | * Unsubscribe consumers from topics.
24 | * Assign partitions to consumers.
25 | * Commit a list of consumer offsets.
26 | * Seek on a partition, so that a consumer starts receiving messages from the first or last offset position, or a given offset position.
27 |
28 | The methods provide JSON responses and HTTP response code error handling.
29 | Messages can be sent in JSON or binary formats.
30 |
31 | Clients can produce and consume messages without the requirement to use the native Kafka protocol.
32 |
33 | [role="_additional-resources"]
34 | .Additional resources
35 | * xref:api_reference-{context}[HTTP Bridge API reference]
36 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/tracing/TracingHandle.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.tracing;
7 |
8 | import io.strimzi.kafka.bridge.config.BridgeConfig;
9 | import io.vertx.ext.web.RoutingContext;
10 | import org.apache.kafka.clients.consumer.ConsumerRecord;
11 |
12 | import java.util.Properties;
13 |
14 | /**
15 | * Simple interface to abstract tracing
16 | */
17 | public interface TracingHandle {
18 | /**
19 | * Tracing env var service name.
20 | *
21 | * @return tracing env var service name
22 | */
23 | String envServiceName();
24 |
25 | /**
26 | * Extract service name from bridge confing.
27 | *
28 | * @param config the bridge config
29 | * @return bridge's service name
30 | */
31 | String serviceName(BridgeConfig config);
32 |
33 | /**
34 | * Initialize tracing.
35 | */
36 | void initialize();
37 |
38 | /**
39 | * Build span handle.
40 | *
41 | * @param key type
42 | * @param value type
43 | * @param routingContext Vert.x rounting context
44 | * @param operationName current operation name
45 | * @return span handle
46 | */
47 | SpanHandle span(RoutingContext routingContext, String operationName);
48 |
49 | /**
50 | * Extract span info from Kafka consumer record.
51 | *
52 | * @param key type
53 | * @param value type
54 | * @param record Kafka consumer record
55 | */
56 | void handleRecordSpan(ConsumerRecord record);
57 |
58 | /**
59 | * Add producer properties, if any.
60 | *
61 | * @param props the properties
62 | */
63 | void addTracingPropsToProducerConfig(Properties props);
64 | }
65 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/ConsumerInstanceId.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge;
7 |
8 | /**
9 | * Represents a unique consumer instance made by consumer group and instance name
10 | */
11 | public class ConsumerInstanceId {
12 |
13 | private final String groupId;
14 | private final String instanceId;
15 |
16 | /**
17 | * Consumer
18 | *
19 | * @param groupId the consumer group the Kafka consumer belongs to
20 | * @param instanceId the instance Id of the Kafka consumer
21 | */
22 | public ConsumerInstanceId(String groupId, String instanceId) {
23 | this.groupId = groupId;
24 | this.instanceId = instanceId;
25 | }
26 |
27 | @Override
28 | public boolean equals(Object obj) {
29 | if (this == obj) {
30 | return true;
31 | }
32 |
33 | if (!(obj instanceof ConsumerInstanceId other)) {
34 | return false;
35 | }
36 |
37 | if (groupId != null && !groupId.equals(other.groupId)) {
38 | return false;
39 | }
40 |
41 | if (instanceId != null && !instanceId.equals(other.instanceId)) {
42 | return false;
43 | }
44 |
45 | return true;
46 | }
47 |
48 | @Override
49 | public int hashCode() {
50 | int result = 1;
51 | result = 31 * result + (groupId != null ? groupId.hashCode() : 0);
52 | result = 31 * result + (instanceId != null ? instanceId.hashCode() : 0);
53 | return result;
54 | }
55 |
56 | @Override
57 | public String toString() {
58 | return "ConsumerInstanceId(" +
59 | "groupId=" + this.groupId +
60 | ", instanceId=" + this.instanceId +
61 | ")";
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/documentation/modules/proc-configuring-http-bridge-smr-metrics.adoc:
--------------------------------------------------------------------------------
1 | [id='proc-configuring-http-bridge-smr-metrics-{context}']
2 | = Configuring Strimzi Metrics Reporter metrics
3 |
4 | [role="_abstract"]
5 | Enable the Strimzi Metrics Reporter to collect HTTP Bridge metrics by setting the `bridge.metrics` option to `strimziMetricsReporter`.
6 |
7 | .Prerequisites
8 |
9 | * xref:proc-downloading-http-bridge-{context}[The HTTP Bridge installation archive is downloaded].
10 |
11 | .Procedure
12 |
13 | . Set the `bridge.metrics` configuration to `strimziMetricsReporter`.
14 | +
15 | .Configuration for enabling metrics
16 |
17 | [source,properties]
18 | ----
19 | bridge.metrics=strimziMetricsReporter
20 | ----
21 | +
22 | Optionally, you can configure a comma-separated list of regular expressions to filter exposed metrics using the `kafka.prometheus.metrics.reporter.allowlist` property.
23 | If not configured, a default set of metrics is exposed.
24 |
25 | +
26 | When needed, it is possible to configure the `allowlist` per client type.
27 | For example, by using the `kafka.admin` prefix and setting `kafka.admin.prometheus.metrics.reporter.allowlist=`, all admin client metrics are excluded.
28 |
29 | +
30 | You can add any plugin configuration to the HTTP Bridge properties file using `kafka.`, `kafka.admin.`, `kafka.producer.`, and `kafka.consumer.` prefixes.
31 | In the event that the same property is configured with multiple prefixes, the most specific prefix takes precedence.
32 | For example, `kafka.producer.prometheus.metrics.reporter.allowlist` takes precedence over `kafka.prometheus.metrics.reporter.allowlist`.
33 |
34 | . Run the HTTP Bridge run script.
35 | +
36 | .Running the HTTP Bridge
37 | [source,shell]
38 | ----
39 | ./bin/kafka_bridge_run.sh --config-file=/application.properties
40 | ----
41 | +
42 | With metrics enabled, you can scrape metrics in Prometheus format from the `/metrics` endpoint of the HTTP Bridge.
43 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/metrics/StrimziMetricsCollector.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.metrics;
7 |
8 | import io.prometheus.metrics.expositionformats.PrometheusTextFormatWriter;
9 | import io.prometheus.metrics.model.registry.PrometheusRegistry;
10 |
11 | import java.io.ByteArrayOutputStream;
12 | import java.io.IOException;
13 | import java.nio.charset.StandardCharsets;
14 |
15 | /**
16 | * Collect and scrape Strimzi Reporter metrics in Prometheus format.
17 | */
18 | public class StrimziMetricsCollector extends MetricsCollector {
19 | private final PrometheusRegistry registry;
20 | private final PrometheusTextFormatWriter textFormatter;
21 |
22 | /**
23 | * Constructor.
24 | */
25 | public StrimziMetricsCollector() {
26 | // Prometheus default registry is a singleton, so it is shared with Strimzi Metrics Reporter
27 | this(PrometheusRegistry.defaultRegistry, new PrometheusTextFormatWriter(true));
28 | }
29 |
30 | /**
31 | * Constructor.
32 | *
33 | * @param registry Prometheus collector registry
34 | * @param textFormatter Prometheus text formatter
35 | */
36 | /* test */ StrimziMetricsCollector(PrometheusRegistry registry,
37 | PrometheusTextFormatWriter textFormatter) {
38 | super();
39 | this.registry = registry;
40 | this.textFormatter = textFormatter;
41 | }
42 |
43 | @Override
44 | public String doScrape() {
45 | ByteArrayOutputStream stream = new ByteArrayOutputStream();
46 | try {
47 | textFormatter.write(stream, registry.scrape());
48 | } catch (IOException e) {
49 | throw new RuntimeException(e);
50 | }
51 | return stream.toString(StandardCharsets.UTF_8);
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/.azure/templates/jobs/build_java.yaml:
--------------------------------------------------------------------------------
1 | jobs:
2 | - job: 'build_and_test_java'
3 | displayName: 'Build & Test'
4 | # Strategy for the job
5 | strategy:
6 | matrix:
7 | 'java-17':
8 | image: 'Ubuntu-22.04'
9 | jdk_version: '17'
10 | main_build: 'true'
11 | # Set timeout for jobs
12 | timeoutInMinutes: 60
13 | # Base system
14 | pool:
15 | vmImage: $(image)
16 | # Variables
17 | variables:
18 | MVN_CACHE_FOLDER: $(HOME)/.m2/repository
19 | # Pipeline steps
20 | steps:
21 | # Get cached Maven repository
22 | - template: "../steps/maven_cache.yaml"
23 | - template: '../steps/prerequisites/install_java.yaml'
24 | parameters:
25 | JDK_VERSION: $(jdk_version)
26 | - bash: "make java_verify"
27 | displayName: "Build & Test Java"
28 | env:
29 | BUILD_REASON: $(Build.Reason)
30 | BRANCH: $(Build.SourceBranch)
31 | TESTCONTAINERS_RYUK_DISABLED: "TRUE"
32 | TESTCONTAINERS_CHECKS_DISABLE: "TRUE"
33 | MVN_ARGS: "-e -V -B -Dfailsafe.rerunFailingTestsCount=2"
34 | - bash: "make spotbugs"
35 | displayName: "Spotbugs"
36 | env:
37 | MVN_ARGS: "-e -V -B"
38 | # We have to TAR the target directory to maintain the permissions of
39 | # the files which would otherwise change when downloading the artifact
40 | - bash: tar -cvpf target.tar ./target
41 | displayName: "Tar the target directory"
42 | condition: and(succeeded(), eq(variables['main_build'], 'true'))
43 | - publish: $(System.DefaultWorkingDirectory)/target.tar
44 | artifact: Binary
45 | condition: and(succeeded(), eq(variables['main_build'], 'true'))
46 | - task: PublishTestResults@2
47 | inputs:
48 | testResultsFormat: JUnit
49 | testResultsFiles: '**/TEST-*.xml'
50 | testRunTitle: "Unit & Integration tests"
51 | condition: always()
--------------------------------------------------------------------------------
/src/test/java/io/strimzi/kafka/bridge/http/services/ProducerService.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.bridge.http.services;
6 |
7 | import io.strimzi.kafka.bridge.utils.Urls;
8 | import io.vertx.core.json.JsonObject;
9 | import io.vertx.ext.web.client.HttpRequest;
10 | import io.vertx.ext.web.client.WebClient;
11 | import io.vertx.ext.web.codec.BodyCodec;
12 |
13 | import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH;
14 | import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TYPE;
15 |
16 | public class ProducerService extends BaseService {
17 |
18 | private static ProducerService producerService;
19 |
20 | private ProducerService(WebClient webClient) {
21 | super(webClient);
22 | }
23 |
24 | public static synchronized ProducerService getInstance(WebClient webClient) {
25 | if (producerService == null || webClient != producerService.webClient) {
26 | producerService = new ProducerService(webClient);
27 | }
28 | return producerService;
29 | }
30 |
31 | public HttpRequest sendRecordsRequest(String topic, JsonObject jsonObject, String bridgeContentType) {
32 | return postRequest(Urls.producerTopic(topic))
33 | .putHeader(CONTENT_LENGTH.toString(), String.valueOf(jsonObject.toBuffer().length()))
34 | .putHeader(CONTENT_TYPE.toString(), bridgeContentType)
35 | .as(BodyCodec.jsonObject());
36 | }
37 |
38 | public HttpRequest sendRecordsToPartitionRequest(String topic, Object partition, JsonObject jsonObject, String bridgeContentType) {
39 | return postRequest(Urls.producerTopicPartition(topic, partition))
40 | .putHeader(CONTENT_LENGTH.toString(), String.valueOf(jsonObject.toBuffer().length()))
41 | .putHeader(CONTENT_TYPE.toString(), bridgeContentType)
42 | .as(BodyCodec.jsonObject());
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/converter/MessageConverter.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.converter;
7 |
8 | import org.apache.kafka.clients.consumer.ConsumerRecord;
9 | import org.apache.kafka.clients.consumer.ConsumerRecords;
10 | import org.apache.kafka.clients.producer.ProducerRecord;
11 |
12 | import java.util.List;
13 |
14 | /**
15 | * Interface for a message converter between Kafka record and bridge message
16 | */
17 | public interface MessageConverter {
18 |
19 | /**
20 | * Converts a message to a Kafka record
21 | *
22 | * @param kafkaTopic Kafka topic for sending message
23 | * @param partition partition of topic where the messages are sent when partition is specified in the request
24 | * @param message message to convert
25 | * @return Kafka record
26 | */
27 | ProducerRecord toKafkaRecord(String kafkaTopic, Integer partition, M message);
28 |
29 | /**
30 | * Convert a collection of messages to Kafka records
31 | *
32 | * @param kafkaTopic Kafka topic for sending message
33 | * @param partition partition of topic where the messages are sent when partition is specified in the request
34 | * @param messages collection of messages to convert
35 | * @return Kafka records
36 | */
37 | List> toKafkaRecords(String kafkaTopic, Integer partition, C messages);
38 |
39 | /**
40 | * Converts a Kafka record to a message
41 | *
42 | * @param address address for sending message
43 | * @param record Kafka record to convert
44 | * @return message
45 | */
46 | M toMessage(String address, ConsumerRecord record);
47 |
48 | /**
49 | * Converts Kafka records to a collection of messages
50 | *
51 | * @param records Kafka records to convert
52 | * @return a collection of messages
53 | */
54 | C toMessages(ConsumerRecords records);
55 | }
56 |
--------------------------------------------------------------------------------
/documentation/modules/proc-bridge-subscribing-consumer-topics.adoc:
--------------------------------------------------------------------------------
1 | // Module included in the following assemblies:
2 | //
3 | // assembly-http-bridge-quickstart.adoc
4 |
5 | [id='proc-bridge-subscribing-consumer-topics-{context}']
6 | = Subscribing a HTTP Bridge consumer to topics
7 |
8 | [role="_abstract"]
9 | After you have created a HTTP Bridge consumer, subscribe it to one or more topics by using the xref:subscribe[subscription] endpoint.
10 | When subscribed, the consumer starts receiving all messages that are produced to the topic.
11 |
12 | .Procedure
13 |
14 | * Subscribe the consumer to the `bridge-quickstart-topic` topic that you created earlier, in xref:proc-producing-messages-from-bridge-topics-partitions-{context}[Producing messages to topics and partitions]:
15 | +
16 | [source,curl,subs=attributes+]
17 | ----
18 | curl -X POST http://localhost:8080/consumers/bridge-quickstart-consumer-group/instances/bridge-quickstart-consumer/subscription \
19 | -H 'content-type: application/vnd.kafka.v2+json' \
20 | -d '{
21 | "topics": [
22 | "bridge-quickstart-topic"
23 | ]
24 | }'
25 | ----
26 | +
27 | The `topics` array can contain a single topic (as shown here) or multiple topics. If you want to subscribe the consumer to multiple topics that match a regular expression, you can use the `topic_pattern` string instead of the `topics` array.
28 | +
29 | If the request is successful, the HTTP Bridge returns a `204` (No Content) code only.
30 |
31 | When using an Apache Kafka client, the HTTP subscribe operation adds topics to the local consumer's subscriptions.
32 | Joining a consumer group and obtaining partition assignments occur after running multiple HTTP poll operations, starting the partition rebalance and join-group process.
33 | It's important to note that the initial HTTP poll operations may not return any records.
34 |
35 | .What to do next
36 |
37 | After subscribing a HTTP Bridge consumer to topics, you can xref:proc-bridge-retrieving-latest-messages-from-consumer-{context}[retrieve messages from the consumer].
38 |
39 | [role="_additional-resources"]
40 | .Additional resources
41 |
42 | * xref:subscribe[POST /consumers/{groupid}/instances/{name}/subscription]
43 |
--------------------------------------------------------------------------------
/documentation/modules/con-overview-running-http-bridge.adoc:
--------------------------------------------------------------------------------
1 | // Module included in the following assemblies:
2 | //
3 | // assembly-http-bridge-overview.adoc
4 |
5 | [id="overview-components-running-http-bridge-{context}"]
6 | = Running the HTTP Bridge
7 |
8 | [role="_abstract"]
9 | Install the HTTP Bridge to run in the same environment as your Kafka cluster.
10 |
11 | You can download and add the HTTP Bridge installation artifacts to your host machine.
12 | To try out the HTTP Bridge in your local environment, see the xref:assembly-http-bridge-quickstart-{context}[HTTP Bridge quickstart].
13 |
14 | It's important to note that each instance of the HTTP Bridge maintains its own set of in-memory consumers (and subscriptions) that connect to the Kafka Brokers on behalf of the HTTP clients.
15 | This means that each HTTP client must maintain affinity to the same HTTP Bridge instance in order to access any subscriptions that are created.
16 | Additionally, when an instance of the HTTP Bridge restarts, the in-memory consumers and subscriptions are lost.
17 | **It is the responsibility of the HTTP client to recreate any consumers and subscriptions if the HTTP Bridge restarts.**
18 |
19 | [id="overview-components-running-http-bridge-cluster-{context}"]
20 | == Running the HTTP Bridge on Kubernetes
21 |
22 | If you deployed Strimzi on Kubernetes, you can use the Strimzi Cluster Operator to deploy the HTTP Bridge to the Kubernetes cluster.
23 | Configure and deploy the HTTP Bridge as a `KafkaBridge` resource.
24 | You'll need a running Kafka cluster that was deployed by the Cluster Operator in a Kubernetes namespace.
25 | You can configure your deployment to access the HTTP Bridge outside the Kubernetes cluster.
26 |
27 | HTTP clients must maintain affinity to the same instance of the HTTP Bridge to access any consumers or subscriptions that they create. Hence, running multiple replicas of the HTTP Bridge per Kubernetes Deployment is not recommended.
28 | If the HTTP Bridge pod restarts (for instance, due to Kubernetes relocating the workload to another node), the HTTP client must recreate any consumers or subscriptions.
29 |
30 | For information on deploying and configuring the HTTP Bridge as a `KafkaBridge` resource, see the {BookURLConfiguring}.
31 |
--------------------------------------------------------------------------------
/src/test/java/io/strimzi/kafka/bridge/http/services/SeekService.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.bridge.http.services;
6 |
7 | import io.strimzi.kafka.bridge.BridgeContentType;
8 | import io.strimzi.kafka.bridge.utils.Urls;
9 | import io.vertx.core.json.JsonObject;
10 | import io.vertx.ext.web.client.HttpRequest;
11 | import io.vertx.ext.web.client.WebClient;
12 | import io.vertx.ext.web.codec.BodyCodec;
13 |
14 | import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH;
15 | import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TYPE;
16 |
17 | public class SeekService extends BaseService {
18 |
19 | private static SeekService seekService;
20 |
21 | private SeekService(WebClient webClient) {
22 | super(webClient);
23 | }
24 |
25 | public static synchronized SeekService getInstance(WebClient webClient) {
26 | if (seekService == null || webClient != seekService.webClient) {
27 | seekService = new SeekService(webClient);
28 | }
29 | return seekService;
30 | }
31 |
32 | // Seek basic requests
33 |
34 | public HttpRequest positionsRequest(String groupId, String name, JsonObject json) {
35 | return positionsBaseRequest(Urls.consumerInstancePosition(groupId, name), json);
36 | }
37 |
38 | public HttpRequest positionsBeginningRequest(String groupId, String name, JsonObject json) {
39 | return positionsBaseRequest(Urls.consumerInstancePositionBeginning(groupId, name), json);
40 | }
41 |
42 | public HttpRequest positionsBeginningEnd(String groupId, String name, JsonObject json) {
43 | return positionsBaseRequest(Urls.consumerInstancePositionEnd(groupId, name), json);
44 | }
45 |
46 | private HttpRequest positionsBaseRequest(String url, JsonObject json) {
47 | return postRequest(url)
48 | .putHeader(CONTENT_LENGTH.toString(), String.valueOf(json.toBuffer().length()))
49 | .putHeader(CONTENT_TYPE.toString(), BridgeContentType.KAFKA_JSON)
50 | .as(BodyCodec.jsonObject());
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/src/test/java/io/strimzi/kafka/bridge/http/InvalidProducerIT.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.bridge.http;
6 |
7 | import io.strimzi.kafka.bridge.BridgeContentType;
8 | import io.strimzi.kafka.bridge.http.base.HttpBridgeITAbstract;
9 | import io.vertx.core.json.JsonArray;
10 | import io.vertx.core.json.JsonObject;
11 | import io.vertx.junit5.VertxTestContext;
12 | import org.apache.kafka.common.KafkaFuture;
13 | import org.apache.logging.log4j.LogManager;
14 | import org.apache.logging.log4j.Logger;
15 | import org.junit.jupiter.api.Test;
16 |
17 | import java.util.HashMap;
18 | import java.util.Map;
19 | import java.util.concurrent.ExecutionException;
20 |
21 | import static org.hamcrest.MatcherAssert.assertThat;
22 | import static org.hamcrest.Matchers.is;
23 |
24 | public class InvalidProducerIT extends HttpBridgeITAbstract {
25 | private static final Logger LOGGER = LogManager.getLogger(InvalidProducerIT.class);
26 |
27 | @Override
28 | protected Map overrideConfig() {
29 | Map cfg = new HashMap<>();
30 | cfg.put("kafka.producer.acks", "5"); // invalid config
31 | return cfg;
32 | }
33 |
34 | @Test
35 | void sendSimpleMessage(VertxTestContext context) throws InterruptedException, ExecutionException {
36 | KafkaFuture future = adminClientFacade.createTopic(topic);
37 |
38 | String value = "message-value";
39 |
40 | JsonArray records = new JsonArray();
41 | JsonObject json = new JsonObject();
42 | json.put("value", value);
43 | records.add(json);
44 |
45 | JsonObject root = new JsonObject();
46 | root.put("records", records);
47 |
48 | future.get();
49 |
50 | producerService()
51 | .sendRecordsRequest(topic, root, BridgeContentType.KAFKA_JSON_JSON)
52 | .sendJsonObject(root)
53 | .onComplete(ar -> {
54 | assertThat(ar.succeeded(), is(true));
55 | assertThat(ar.result().statusCode(), is(500));
56 | context.completeNow();
57 | });
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/documentation/modules/proc-creating-http-bridge-consumer.adoc:
--------------------------------------------------------------------------------
1 | // Module included in the following assemblies:
2 | //
3 | // assembly-http-bridge-quickstart.adoc
4 |
5 | [id='proc-creating-http-bridge-consumer-{context}']
6 | = Creating a HTTP Bridge consumer
7 |
8 | [role="_abstract"]
9 | Before you can perform any consumer operations in the Kafka cluster, you must first create a consumer by using the xref:createconsumer[consumers] endpoint. The consumer is referred to as a __HTTP Bridge consumer__.
10 |
11 | .Procedure
12 |
13 | . Create a HTTP Bridge consumer in a new consumer group named `bridge-quickstart-consumer-group`:
14 | +
15 | [source,curl,subs=attributes+]
16 | ----
17 | curl -X POST http://localhost:8080/consumers/bridge-quickstart-consumer-group \
18 | -H 'content-type: application/vnd.kafka.v2+json' \
19 | -d '{
20 | "name": "bridge-quickstart-consumer",
21 | "auto.offset.reset": "earliest",
22 | "format": "json",
23 | "enable.auto.commit": false,
24 | "fetch.min.bytes": 512,
25 | "consumer.request.timeout.ms": 30000
26 | }'
27 | ----
28 | +
29 | * The consumer is named `bridge-quickstart-consumer` and the embedded data format is set as `json`.
30 | * Some basic configuration settings are defined.
31 | * The consumer will not commit offsets to the log automatically because the `enable.auto.commit` setting is `false`. You will commit the offsets manually later in this quickstart.
32 | +
33 | If the request is successful, the HTTP Bridge returns the consumer ID (`instance_id`) and base URL (`base_uri`) in the response body, along with a `200` code.
34 | +
35 | .Example response
36 |
37 | [source,json,subs="+quotes"]
38 | ----
39 | #...
40 | {
41 | "instance_id": "bridge-quickstart-consumer",
42 | "base_uri":"http://__-bridge-service:8080/consumers/bridge-quickstart-consumer-group/instances/bridge-quickstart-consumer"
43 | }
44 | ----
45 |
46 | . Copy the base URL (`base_uri`) to use in the other consumer operations in this quickstart.
47 |
48 | .What to do next
49 |
50 | Now that you have created a HTTP Bridge consumer, you can xref:proc-bridge-subscribing-consumer-topics-{context}[subscribe it to topics].
51 |
52 | [role="_additional-resources"]
53 | .Additional resources
54 |
55 | * xref:createconsumer[POST /consumers/{groupid}]
56 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/metrics/MetricsCollector.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.bridge.metrics;
6 |
7 | import io.micrometer.core.instrument.Meter;
8 | import io.micrometer.core.instrument.MeterRegistry;
9 | import io.micrometer.prometheusmetrics.PrometheusMeterRegistry;
10 | import io.micrometer.prometheusmetrics.PrometheusNamingConvention;
11 | import io.vertx.micrometer.backends.BackendRegistries;
12 |
13 | /**
14 | * Abstract class for collecting and exposing metrics.
15 | */
16 | public abstract class MetricsCollector {
17 | private final PrometheusMeterRegistry vertxRegistry;
18 |
19 | MetricsCollector() {
20 | this.vertxRegistry = (PrometheusMeterRegistry) BackendRegistries.getDefaultNow();
21 | if (vertxRegistry != null) {
22 | // replace the default Prometheus naming convention
23 | this.vertxRegistry.config().namingConvention(new MetricsNamingConvention());
24 | }
25 | }
26 |
27 | private static class MetricsNamingConvention extends PrometheusNamingConvention {
28 | @Override
29 | public String name(String name, Meter.Type type, String baseUnit) {
30 | String metricName = name.startsWith("vertx.") ? name.replace("vertx.", "strimzi.bridge.") : name;
31 | return super.name(metricName, type, baseUnit);
32 | }
33 | }
34 |
35 | /**
36 | * @return Registry instance for scraping Vertx metrics.
37 | * This is null if metrics are not enabled in the VertxOptions instance.
38 | */
39 | public MeterRegistry getVertxRegistry() {
40 | return vertxRegistry;
41 | }
42 |
43 | /**
44 | * Scrape all, including Vertx metrics.
45 | *
46 | * @return Raw metrics in Prometheus format.
47 | */
48 | public String scrape() {
49 | StringBuilder sb = new StringBuilder();
50 | sb.append(doScrape());
51 | if (vertxRegistry != null) {
52 | sb.append(vertxRegistry.scrape());
53 | }
54 | return sb.toString();
55 | }
56 |
57 | /**
58 | * @return Raw metrics in Prometheus format.
59 | */
60 | abstract String doScrape();
61 | }
62 |
--------------------------------------------------------------------------------
/src/test/java/io/strimzi/kafka/bridge/http/tools/ExtensionContextParameterResolver.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.bridge.http.tools;
6 |
7 | import org.junit.jupiter.api.extension.ExtensionContext;
8 | import org.junit.jupiter.api.extension.ParameterContext;
9 | import org.junit.jupiter.api.extension.ParameterResolutionException;
10 | import org.junit.jupiter.api.extension.ParameterResolver;
11 |
12 |
13 | /**
14 | * JUnit 5 {@link ParameterResolver} that injects the current {@link ExtensionContext}
15 | * into test methods or lifecycle methods that declare it as a parameter.
16 | *
17 | * This is typically used to provide test context to {@code @BeforeEach}, {@code @AfterEach},
18 | * or test methods themselves.
19 | *
20 | */
21 | public class ExtensionContextParameterResolver implements ParameterResolver {
22 | /**
23 | * Checks if the parameter is of type {@link ExtensionContext}.
24 | *
25 | * @param parameterContext The context for the parameter for which a value is to be resolved.
26 | * @param extensionContext The extension context for the test or container.
27 | * @return {@code true} if the parameter is of type {@code ExtensionContext}, otherwise {@code false}.
28 | * @throws ParameterResolutionException If an error occurs while checking parameter support.
29 | */
30 | @Override
31 | public boolean supportsParameter(ParameterContext parameterContext, ExtensionContext extensionContext) throws ParameterResolutionException {
32 | return parameterContext.getParameter().getType() == ExtensionContext.class;
33 | }
34 |
35 | /**
36 | * Provides the {@link ExtensionContext} instance as the parameter value.
37 | *
38 | * @param parameterContext The context for the parameter for which a value is to be resolved.
39 | * @param extensionContext The extension context for the test or container.
40 | * @return The current {@code ExtensionContext} instance.
41 | * @throws ParameterResolutionException If an error occurs during parameter resolution.
42 | */
43 | @Override
44 | public Object resolveParameter(ParameterContext parameterContext, ExtensionContext extensionContext) throws ParameterResolutionException {
45 | return extensionContext;
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/metrics/JmxMetricsCollector.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.metrics;
7 |
8 | import io.prometheus.jmx.JmxCollector;
9 | import io.prometheus.metrics.expositionformats.PrometheusTextFormatWriter;
10 | import io.prometheus.metrics.model.registry.PrometheusRegistry;
11 |
12 | import javax.management.MalformedObjectNameException;
13 | import java.io.ByteArrayOutputStream;
14 | import java.io.IOException;
15 | import java.nio.charset.StandardCharsets;
16 |
17 | /**
18 | * Collect and scrape JMX metrics in Prometheus format.
19 | */
20 | public class JmxMetricsCollector extends MetricsCollector {
21 | private final PrometheusRegistry registry;
22 | private final PrometheusTextFormatWriter textFormatter;
23 |
24 | /**
25 | * Constructor.
26 | *
27 | * @param yamlConfig YAML configuration string with metrics filtering rules
28 | * @throws MalformedObjectNameException Throws MalformedObjectNameException
29 | */
30 | public JmxMetricsCollector(String yamlConfig) throws MalformedObjectNameException {
31 | // Prometheus default registry is a singleton, so it is shared with JmxCollector
32 | this(new JmxCollector(yamlConfig), PrometheusRegistry.defaultRegistry, new PrometheusTextFormatWriter(true));
33 | }
34 |
35 | /**
36 | * Constructor.
37 | *
38 | * @param jmxCollector JMX collector registry
39 | * @param registry Prometheus collector registry
40 | * @param textFormatter Prometheus text formatter
41 | */
42 | /* test */ JmxMetricsCollector(JmxCollector jmxCollector,
43 | PrometheusRegistry registry,
44 | PrometheusTextFormatWriter textFormatter) {
45 | super();
46 | jmxCollector.register();
47 | this.registry = registry;
48 | this.textFormatter = textFormatter;
49 | }
50 |
51 | @Override
52 | public String doScrape() {
53 | ByteArrayOutputStream stream = new ByteArrayOutputStream();
54 | try {
55 | textFormatter.write(stream, registry.scrape());
56 | } catch (IOException e) {
57 | throw new RuntimeException(e);
58 | }
59 | return stream.toString(StandardCharsets.UTF_8);
60 | }
61 | }
62 |
--------------------------------------------------------------------------------
/src/main/java/io/strimzi/kafka/bridge/http/model/HttpBridgeError.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 |
6 | package io.strimzi.kafka.bridge.http.model;
7 |
8 | import com.fasterxml.jackson.databind.node.ObjectNode;
9 | import io.strimzi.kafka.bridge.http.converter.JsonUtils;
10 | import io.vertx.core.json.JsonObject;
11 |
12 | import java.util.ArrayList;
13 | import java.util.List;
14 |
15 | /**
16 | * Represents an error related to HTTP bridging
17 | *
18 | * @param code code classifying the error itself
19 | * @param message message providing more information about the error
20 | * @param validationErrors list of detailed validation errors
21 | */
22 | public record HttpBridgeError(int code, String message, List validationErrors) {
23 |
24 | /**
25 | * Creates an error object with an empty list of validation errors
26 | *
27 | * @param code code classifying the error itself
28 | * @param message message providing more information about the error
29 | */
30 | public HttpBridgeError(int code, String message) {
31 | this(code, message, List.of());
32 | }
33 |
34 | /**
35 | * @return a JSON representation of the error with code and message
36 | */
37 | public ObjectNode toJson() {
38 | ObjectNode json = JsonUtils.createObjectNode();
39 | json.put("error_code", this.code);
40 | json.put("message", this.message);
41 | if (this.validationErrors != null && !this.validationErrors.isEmpty()) {
42 | json.set("validation_errors", JsonUtils.createArrayNode(this.validationErrors));
43 | }
44 | return json;
45 | }
46 |
47 | /**
48 | * Create an error instance from a JSON representation
49 | *
50 | * @param json JSON representation of the error
51 | * @return error instance
52 | */
53 | public static HttpBridgeError fromJson(JsonObject json) {
54 | if (json.containsKey("validation_errors")) {
55 | List validationErrors = new ArrayList<>();
56 | json.getJsonArray("validation_errors").forEach(error -> validationErrors.add(error.toString()));
57 | return new HttpBridgeError(json.getInteger("error_code"), json.getString("message"), validationErrors);
58 | } else {
59 | return new HttpBridgeError(json.getInteger("error_code"), json.getString("message"));
60 | }
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/src/test/java/io/strimzi/kafka/bridge/http/tools/TestSeparator.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.bridge.http.tools;
6 |
7 | import org.apache.logging.log4j.LogManager;
8 | import org.apache.logging.log4j.Logger;
9 | import org.junit.jupiter.api.AfterEach;
10 | import org.junit.jupiter.api.BeforeEach;
11 | import org.junit.jupiter.api.extension.ExtendWith;
12 | import org.junit.jupiter.api.extension.ExtensionContext;
13 |
14 | import java.util.Collections;
15 |
16 | /**
17 | * Provides a separator in the log output before and after each test for improved readability.
18 | *
19 | * Implement this interface in your test classes to automatically log a separator line and test status
20 | * (STARTED, SUCCEEDED, FAILED) around each test execution.
21 | *