20 | }
21 |
22 | [domain_realm]
23 | .EXAMPLE.COM = EXAMPLE.COM
24 | EXAMPLE.COM = EXAMPLE.COM
25 | .localhost = EXAMPLE.COM
26 | localhost = EXAMPLE.COM
--------------------------------------------------------------------------------
/kafka-native-test-container/src/test/resources/kerberos/krb5KafkaBroker.conf:
--------------------------------------------------------------------------------
1 | [logging]
2 | default = FILE:/var/log/kerberos/krb5libs.log
3 | kdc = FILE:/var/log/kerberos/krb5kdc.log
4 | admin_server = FILE:/var/log/kerberos/kadmind.log
5 |
6 | [libdefaults]
7 | default_realm = EXAMPLE.COM
8 | dns_lookup_realm = false
9 | dns_lookup_kdc = false
10 | ticket_lifetime = 24h
11 | renew_lifetime = 7d
12 | forwardable = true
13 | rdns = false
14 |
15 | [realms]
16 | EXAMPLE.COM = {
17 | kdc = kerberos:88
18 | admin_server = kerberos:749
19 | }
20 |
21 | [domain_realm]
22 | .EXAMPLE.COM = EXAMPLE.COM
23 | EXAMPLE.COM = EXAMPLE.COM
24 | .localhost = EXAMPLE.COM
25 | localhost = EXAMPLE.COM
--------------------------------------------------------------------------------
/kafka-native-test-container/src/test/resources/keycloak/realms/kafka-authz-realm.json:
--------------------------------------------------------------------------------
1 | {
2 | "realm": "kafka-authz",
3 | "accessTokenLifespan": 300,
4 | "ssoSessionIdleTimeout": 864000,
5 | "ssoSessionMaxLifespan": 864000,
6 | "enabled": true,
7 | "sslRequired": "external",
8 | "roles": {
9 | "realm": [],
10 | "client": {
11 | "kafka-cli": [],
12 | "kafka": [
13 | {
14 | "name": "uma_protection",
15 | "clientRole": true
16 | },
17 | {
18 | "name": "kafka-user",
19 | "clientRole": true
20 | }
21 | ]
22 | }
23 | },
24 | "groups" : [],
25 | "users": [
26 | {
27 | "username": "service-account-kafka-producer-client",
28 | "enabled": true,
29 | "realmRoles" : [ "offline_access" ],
30 | "email": "service-account-kafka-producer-client@placeholder.org",
31 | "serviceAccountClientId": "kafka-producer-client"
32 | },
33 | {
34 | "username": "service-account-kafka-consumer-client",
35 | "enabled": true,
36 | "realmRoles" : [ "offline_access" ],
37 | "email": "service-account-kafka-consumer-client@placeholder.org",
38 | "serviceAccountClientId": "kafka-consumer-client"
39 | }
40 | ],
41 | "clients": [
42 | {
43 | "clientId": "kafka",
44 | "enabled": true,
45 | "clientAuthenticatorType": "client-secret",
46 | "secret": "kafka-secret",
47 | "bearerOnly": false,
48 | "consentRequired": false,
49 | "standardFlowEnabled": false,
50 | "implicitFlowEnabled": false,
51 | "directAccessGrantsEnabled": true,
52 | "serviceAccountsEnabled": true,
53 | "authorizationServicesEnabled": true,
54 | "publicClient": false,
55 | "fullScopeAllowed": true,
56 | "protocolMappers": [
57 | {
58 | "name": "kafka audience",
59 | "protocol": "openid-connect",
60 | "protocolMapper": "oidc-audience-mapper",
61 | "consentRequired": false,
62 | "config": {
63 | "included.client.audience": "kafka",
64 | "id.token.claim": "false",
65 | "access.token.claim": "true"
66 | }
67 | }
68 | ],
69 | "authorizationSettings": {
70 | "allowRemoteResourceManagement": true,
71 | "policyEnforcementMode": "ENFORCING",
72 | "resources": [
73 | {
74 | "name": "Group:*",
75 | "type": "Group",
76 | "ownerManagedAccess": false,
77 | "displayName": "Any group",
78 | "attributes": {},
79 | "uris": [],
80 | "scopes": [
81 | {
82 | "name": "Describe"
83 | },
84 | {
85 | "name": "Read"
86 | },
87 | {
88 | "name": "DescribeConfigs"
89 | },
90 | {
91 | "name": "AlterConfigs"
92 | }
93 | ]
94 | },
95 | {
96 | "name": "Topic:*",
97 | "type": "Topic",
98 | "ownerManagedAccess": false,
99 | "displayName": "Any topic",
100 | "attributes": {},
101 | "uris": [],
102 | "scopes": [
103 | {
104 | "name": "Create"
105 | },
106 | {
107 | "name": "Delete"
108 | },
109 | {
110 | "name": "Describe"
111 | },
112 | {
113 | "name": "Write"
114 | },
115 | {
116 | "name": "Read"
117 | },
118 | {
119 | "name": "Alter"
120 | },
121 | {
122 | "name": "DescribeConfigs"
123 | },
124 | {
125 | "name": "AlterConfigs"
126 | }
127 | ]
128 | },
129 | {
130 | "name" : "Cluster:*",
131 | "type" : "Cluster",
132 | "ownerManagedAccess" : false,
133 | "attributes" : { },
134 | "uris" : [ ]
135 | },
136 | {
137 | "name": "Topic:messages",
138 | "type": "topic",
139 | "scopes": [
140 | {
141 | "name": "Delete"
142 | },
143 | {
144 | "name": "Describe"
145 | },
146 | {
147 | "name": "Create"
148 | },
149 | {
150 | "name": "Write"
151 | },
152 | {
153 | "name": "Alter"
154 | },
155 | {
156 | "name": "Read"
157 | },
158 | {
159 | "name": "DescribeConfigs"
160 | },
161 | {
162 | "name": "AlterConfigs"
163 | }
164 | ]
165 | }
166 | ],
167 | "policies": [
168 | {
169 | "name": "Producer Client",
170 | "type": "client",
171 | "logic": "POSITIVE",
172 | "decisionStrategy": "UNANIMOUS",
173 | "config": {
174 | "clients": "[\"kafka-producer-client\", \"kafka-client\"]"
175 | }
176 | },
177 | {
178 | "name": "Consumer Client",
179 | "type": "client",
180 | "logic": "POSITIVE",
181 | "decisionStrategy": "UNANIMOUS",
182 | "config": {
183 | "clients": "[\"kafka-consumer-client\", \"kafka-client\"]"
184 | }
185 | },
186 | {
187 | "name": "Producer Client can write to topic 'messages'",
188 | "type": "scope",
189 | "logic": "POSITIVE",
190 | "decisionStrategy": "UNANIMOUS",
191 | "config": {
192 | "resources": "[\"Topic:messages\"]",
193 | "scopes": "[\"Delete\",\"Describe\",\"Create\",\"Write\"]",
194 | "applyPolicies": "[\"Producer Client\"]"
195 | }
196 | },
197 | {
198 | "name": "Consumer Client can read from topic 'messages'",
199 | "type": "scope",
200 | "logic": "POSITIVE",
201 | "decisionStrategy": "UNANIMOUS",
202 | "config": {
203 | "resources": "[\"Topic:messages\"]",
204 | "scopes": "[\"Describe\",\"Read\"]",
205 | "applyPolicies": "[\"Consumer Client\"]"
206 | }
207 | },
208 | {
209 | "name": "Consumer Client can use any group",
210 | "type": "scope",
211 | "logic": "POSITIVE",
212 | "decisionStrategy": "UNANIMOUS",
213 | "config": {
214 | "resources": "[\"Group:*\"]",
215 | "scopes": "[\"Describe\",\"Write\",\"Read\"]",
216 | "applyPolicies": "[\"Consumer Client\"]"
217 | }
218 | }
219 | ],
220 | "scopes": [
221 | {
222 | "name": "Create"
223 | },
224 | {
225 | "name": "Read"
226 | },
227 | {
228 | "name": "Write"
229 | },
230 | {
231 | "name": "Delete"
232 | },
233 | {
234 | "name": "Alter"
235 | },
236 | {
237 | "name": "Describe"
238 | },
239 | {
240 | "name": "ClusterAction"
241 | },
242 | {
243 | "name": "DescribeConfigs"
244 | },
245 | {
246 | "name": "AlterConfigs"
247 | },
248 | {
249 | "name": "IdempotentWrite"
250 | }
251 | ],
252 | "decisionStrategy": "AFFIRMATIVE"
253 | }
254 | },
255 | {
256 | "clientId": "kafka-cli",
257 | "enabled": true,
258 | "clientAuthenticatorType": "client-secret",
259 | "secret": "kafka-cli-secret",
260 | "bearerOnly": false,
261 | "consentRequired": false,
262 | "standardFlowEnabled": false,
263 | "implicitFlowEnabled": false,
264 | "directAccessGrantsEnabled": true,
265 | "serviceAccountsEnabled": false,
266 | "publicClient": true,
267 | "fullScopeAllowed": true
268 | },
269 | {
270 | "clientId": "kafka-producer-client",
271 | "enabled": true,
272 | "clientAuthenticatorType": "client-secret",
273 | "secret": "kafka-producer-client-secret",
274 | "publicClient": false,
275 | "bearerOnly": false,
276 | "standardFlowEnabled": false,
277 | "implicitFlowEnabled": false,
278 | "directAccessGrantsEnabled": true,
279 | "serviceAccountsEnabled": true,
280 | "consentRequired" : false,
281 | "fullScopeAllowed" : false,
282 | "attributes": {
283 | "access.token.lifespan": "36000"
284 | }
285 | },
286 | {
287 | "clientId": "kafka-consumer-client",
288 | "enabled": true,
289 | "clientAuthenticatorType": "client-secret",
290 | "secret": "kafka-consumer-client-secret",
291 | "publicClient": false,
292 | "bearerOnly": false,
293 | "standardFlowEnabled": false,
294 | "implicitFlowEnabled": false,
295 | "directAccessGrantsEnabled": true,
296 | "serviceAccountsEnabled": true,
297 | "consentRequired" : false,
298 | "fullScopeAllowed" : false,
299 | "attributes": {
300 | "access.token.lifespan": "36000"
301 | }
302 | },
303 | {
304 | "clientId": "kafka-client",
305 | "enabled": true,
306 | "clientAuthenticatorType": "client-secret",
307 | "secret": "kafka-client-secret",
308 | "publicClient": false,
309 | "bearerOnly": false,
310 | "standardFlowEnabled": false,
311 | "implicitFlowEnabled": false,
312 | "directAccessGrantsEnabled": true,
313 | "serviceAccountsEnabled": true,
314 | "consentRequired" : false,
315 | "fullScopeAllowed" : false,
316 | "attributes": {
317 | "access.token.lifespan": "36000"
318 | }
319 | }
320 | ]
321 | }
--------------------------------------------------------------------------------
/kafka-native-test-container/src/test/resources/oauth.properties:
--------------------------------------------------------------------------------
1 | listener.security.protocol.map=JWT:SASL_PLAINTEXT
2 |
3 | sasl.enabled.mechanisms=OAUTHBEARER
4 |
5 | #sasl.mechanism.inter.broker.protocol=OAUTHBEARER
6 |
7 | oauth.username.claim=preferred_username
8 | principal.builder.class=io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder
9 |
10 | listener.name.jwt.sasl.enabled.mechanisms=OAUTHBEARER,PLAIN
11 | listener.name.jwt.oauthbearer.sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required \
12 | oauth.jwks.endpoint.uri="http://keycloak:8080/realms/kafka-authz/protocol/openid-connect/certs" \
13 | oauth.valid.issuer.uri="http://keycloak:8080/realms/kafka-authz" \
14 | oauth.token.endpoint.uri="http://keycloak:8080/realms/kafka-authz/protocol/openid-connect/token" \
15 | oauth.client.id="kafka" \
16 | oauth.client.secret="kafka-secret";
17 |
18 | listener.name.jwt.oauthbearer.sasl.server.callback.handler.class=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler
19 | listener.name.jwt.oauthbearer.sasl.login.callback.handler.class=io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler
20 | listener.name.jwt.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
21 | oauth.jwks.endpoint.uri="http://keycloak:8080/realms/kafka-authz/protocol/openid-connect/certs" \
22 | oauth.valid.issuer.uri="http://keycloak:8080/realms/kafka-authz" \
23 | oauth.token.endpoint.uri="http://keycloak:8080/realms/kafka-authz/protocol/openid-connect/token" \
24 | oauth.client.id="kafka" \
25 | oauth.client.secret="kafka-secret" \
26 | unsecuredLoginStringClaim_sub="admin";
27 |
28 | listener.name.jwt.plain.sasl.server.callback.handler.class=io.strimzi.kafka.oauth.server.plain.JaasServerOauthOverPlainValidatorCallbackHandler
29 |
--------------------------------------------------------------------------------
/kafka-native-test-container/src/test/resources/sasl_plain_plaintext.properties:
--------------------------------------------------------------------------------
1 | sasl.enabled.mechanisms=PLAIN
2 |
3 | listener.name.sasl_plaintext.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
4 | username="broker" \
5 | password="broker-secret" \
6 | user_broker="broker-secret" \
7 | user_client="client-secret";
8 |
--------------------------------------------------------------------------------
/kafka-native-test-container/src/test/resources/sasl_scram_plaintext.properties:
--------------------------------------------------------------------------------
1 | sasl.enabled.mechanisms=SCRAM-SHA-512
2 |
3 | listener.name.sasl_plaintext.scram-sha-512.sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
4 | username="broker" \
5 | password="broker-secret";
6 |
--------------------------------------------------------------------------------
/kafka-native-test-container/src/test/resources/ssl.properties:
--------------------------------------------------------------------------------
1 | ssl.keystore.location=/dir/kafka-keystore.p12
2 | ssl.keystore.password=Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L
3 | ssl.keystore.type=PKCS12
4 | ssl.key.password=Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L
5 | ssl.truststore.location=/dir/kafka-truststore.p12
6 | ssl.truststore.password=Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L
7 | ssl.truststore.type=PKCS12
8 | ssl.endpoint.identification.algorithm=
--------------------------------------------------------------------------------
/kafka-server/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | com.ozangunalp
8 | kafka-native-parent
9 | 999-SNAPSHOT
10 |
11 | kafka-server
12 | Kafka Server
13 |
14 |
15 |
16 | io.quarkus
17 | quarkus-arc
18 |
19 |
20 | io.quarkus
21 | quarkus-netty
22 |
23 |
24 | com.ozangunalp
25 | quarkus-kafka-server
26 |
27 |
28 | io.quarkus
29 | quarkus-container-image-docker
30 |
31 |
32 |
33 | org.apache.kafka
34 | kafka_2.13
35 |
36 |
37 | org.apache.kafka
38 | kafka-metadata
39 |
40 |
41 | org.apache.kafka
42 | kafka-server-common
43 |
44 |
45 | org.apache.kafka
46 | kafka-transaction-coordinator
47 |
48 |
49 | org.apache.kafka
50 | kafka-server
51 |
52 |
53 | org.apache.kafka
54 | kafka-raft
55 |
56 |
57 | org.apache.kafka
58 | kafka-clients
59 |
60 |
61 | org.scala-lang
62 | scala-library
63 |
64 |
65 | org.jboss.logmanager
66 | log4j2-jboss-logmanager
67 |
68 |
69 | io.quarkus
70 | quarkus-junit5
71 | test
72 |
73 |
74 | io.smallrye.reactive
75 | smallrye-reactive-messaging-kafka-test-companion
76 |
77 |
78 | net.sourceforge.argparse4j
79 | argparse4j
80 |
81 |
82 |
83 | org.apache.logging.log4j
84 | log4j-core
85 | 2.24.3
86 |
87 |
88 |
89 |
90 |
91 | ${quarkus.platform.group-id}
92 | quarkus-maven-plugin
93 | ${quarkus.platform.version}
94 | true
95 |
96 |
97 |
98 | build
99 | generate-code
100 | generate-code-tests
101 |
102 |
103 |
104 |
105 |
106 | org.apache.maven.plugins
107 | maven-compiler-plugin
108 | ${compiler-plugin.version}
109 |
110 | ${maven.compiler.parameters}
111 |
112 |
113 |
114 | org.apache.maven.plugins
115 | maven-surefire-plugin
116 | ${surefire-plugin.version}
117 |
118 |
119 | org.jboss.logmanager.LogManager
120 | ${maven.home}
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 | native
129 |
130 |
131 | native
132 |
133 |
134 |
135 |
136 |
137 | org.apache.maven.plugins
138 | maven-failsafe-plugin
139 | ${surefire-plugin.version}
140 |
141 |
142 |
143 | integration-test
144 | verify
145 |
146 |
147 |
148 | ${project.build.directory}/${project.build.finalName}-runner
149 | org.jboss.logmanager.LogManager
150 | ${maven.home}
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 | true
160 |
161 |
162 |
163 |
164 |
--------------------------------------------------------------------------------
/kafka-server/src/main/docker/Dockerfile.jvm:
--------------------------------------------------------------------------------
1 | ####
2 | # This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
3 | #
4 | # Before building the container image run:
5 | #
6 | # ./mvnw package
7 | #
8 | # Then, build the image with:
9 | #
10 | # docker build -f src/main/docker/Dockerfile.jvm -t quarkus/embedded-kafka-jvm .
11 | #
12 | # Then run the container using:
13 | #
14 | # docker run -i --rm -p 8080:8080 quarkus/embedded-kafka-jvm
15 | #
16 | # If you want to include the debug port into your docker image
17 | # you will have to expose the debug port (default 5005) like this : EXPOSE 8080 5005
18 | #
19 | # Then run the container using :
20 | #
21 | # docker run -i --rm -p 8080:8080 quarkus/embedded-kafka-jvm
22 | #
23 | # This image uses the `run-java.sh` script to run the application.
24 | # This scripts computes the command line to execute your Java application, and
25 | # includes memory/GC tuning.
26 | # You can configure the behavior using the following environment properties:
27 | # - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
28 | # - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
29 | # in JAVA_OPTS (example: "-Dsome.property=foo")
30 | # - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
31 | # used to calculate a default maximal heap memory based on a containers restriction.
32 | # If used in a container without any memory constraints for the container then this
33 | # option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
34 | # of the container available memory as set here. The default is `50` which means 50%
35 | # of the available memory is used as an upper boundary. You can skip this mechanism by
36 | # setting this value to `0` in which case no `-Xmx` option is added.
37 | # - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
38 | # is used to calculate a default initial heap memory based on the maximum heap memory.
39 | # If used in a container without any memory constraints for the container then this
40 | # option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
41 | # of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
42 | # is used as the initial heap size. You can skip this mechanism by setting this value
43 | # to `0` in which case no `-Xms` option is added (example: "25")
44 | # - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
45 | # This is used to calculate the maximum value of the initial heap memory. If used in
46 | # a container without any memory constraints for the container then this option has
47 | # no effect. If there is a memory constraint then `-Xms` is limited to the value set
48 | # here. The default is 4096MB which means the calculated value of `-Xms` never will
49 | # be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
50 | # - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
51 | # when things are happening. This option, if set to true, will set
52 | # `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
53 | # - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
54 | # true").
55 | # - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
56 | # - CONTAINER_CORE_LIMIT: A calculated core limit as described in
57 | # https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
58 | # - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
59 | # - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
60 | # (example: "20")
61 | # - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
62 | # (example: "40")
63 | # - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
64 | # (example: "4")
65 | # - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
66 | # previous GC times. (example: "90")
67 | # - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
68 | # - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
69 | # - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
70 | # contain the necessary JRE command-line options to specify the required GC, which
71 | # will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
72 | # - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
73 | # - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
74 | # - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
75 | # accessed directly. (example: "foo.example.com,bar.example.com")
76 | #
77 | ###
78 | FROM registry.access.redhat.com/ubi8/openjdk-17:1.11
79 |
80 | ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en'
81 |
82 |
83 | # We make four distinct layers so if there are application changes the library layers can be re-used
84 | COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/
85 | COPY --chown=185 target/quarkus-app/*.jar /deployments/
86 | COPY --chown=185 target/quarkus-app/app/ /deployments/app/
87 | COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/
88 | RUN mkdir -m 777 -p /deployments/target/log-dir
89 |
90 | EXPOSE 9092
91 | USER 185
92 | ENV JAVA_OPTS="-Djava.util.logging.manager=org.jboss.logmanager.LogManager"
93 | ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
94 |
95 |
--------------------------------------------------------------------------------
/kafka-server/src/main/docker/Dockerfile.legacy-jar:
--------------------------------------------------------------------------------
1 | ####
2 | # This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
3 | #
4 | # Before building the container image run:
5 | #
6 | # ./mvnw package -Dquarkus.package.jar.type=legacy-jar
7 | #
8 | # Then, build the image with:
9 | #
10 | # docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/code-with-quarkus-legacy-jar .
11 | #
12 | # Then run the container using:
13 | #
14 | # docker run -i --rm -p 8080:8080 quarkus/code-with-quarkus-legacy-jar
15 | #
16 | # If you want to include the debug port into your docker image
17 | # you will have to expose the debug port (default 5005) like this : EXPOSE 8080 5005
18 | #
19 | # Then run the container using :
20 | #
21 | # docker run -i --rm -p 8080:8080 quarkus/code-with-quarkus-legacy-jar
22 | #
23 | # This image uses the `run-java.sh` script to run the application.
24 | # This scripts computes the command line to execute your Java application, and
25 | # includes memory/GC tuning.
26 | # You can configure the behavior using the following environment properties:
27 | # - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
28 | # - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
29 | # in JAVA_OPTS (example: "-Dsome.property=foo")
30 | # - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
31 | # used to calculate a default maximal heap memory based on a containers restriction.
32 | # If used in a container without any memory constraints for the container then this
33 | # option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
34 | # of the container available memory as set here. The default is `50` which means 50%
35 | # of the available memory is used as an upper boundary. You can skip this mechanism by
36 | # setting this value to `0` in which case no `-Xmx` option is added.
37 | # - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
38 | # is used to calculate a default initial heap memory based on the maximum heap memory.
39 | # If used in a container without any memory constraints for the container then this
40 | # option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
41 | # of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
42 | # is used as the initial heap size. You can skip this mechanism by setting this value
43 | # to `0` in which case no `-Xms` option is added (example: "25")
44 | # - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
45 | # This is used to calculate the maximum value of the initial heap memory. If used in
46 | # a container without any memory constraints for the container then this option has
47 | # no effect. If there is a memory constraint then `-Xms` is limited to the value set
48 | # here. The default is 4096MB which means the calculated value of `-Xms` never will
49 | # be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
50 | # - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
51 | # when things are happening. This option, if set to true, will set
52 | # `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
53 | # - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
54 | # true").
55 | # - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
56 | # - CONTAINER_CORE_LIMIT: A calculated core limit as described in
57 | # https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
58 | # - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
59 | # - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
60 | # (example: "20")
61 | # - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
62 | # (example: "40")
63 | # - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
64 | # (example: "4")
65 | # - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
66 | # previous GC times. (example: "90")
67 | # - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
68 | # - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
69 | # - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
70 | # contain the necessary JRE command-line options to specify the required GC, which
71 | # will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
72 | # - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
73 | # - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
74 | # - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
75 | # accessed directly. (example: "foo.example.com,bar.example.com")
76 | #
77 | ###
78 | FROM registry.access.redhat.com/ubi8/openjdk-17:1.11
79 |
80 | ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en'
81 |
82 |
83 | COPY target/lib/* /deployments/lib/
84 | COPY target/*-runner.jar /deployments/quarkus-run.jar
85 | RUN mkdir -m 777 -p /deployments/target/log-dir
86 |
87 | EXPOSE 9092
88 | USER 185
89 | ENV JAVA_OPTS="-Djava.util.logging.manager=org.jboss.logmanager.LogManager"
90 | ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
91 |
--------------------------------------------------------------------------------
/kafka-server/src/main/docker/Dockerfile.native:
--------------------------------------------------------------------------------
1 | ####
2 | # This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
3 | #
4 | # Before building the container image run:
5 | #
6 | # ./mvnw package -Pnative
7 | #
8 | # Then, build the image with:
9 | #
10 | # docker build -f src/main/docker/Dockerfile.native -t quarkus/code-with-quarkus .
11 | #
12 | # Then run the container using:
13 | #
14 | # docker run -i --rm -p 8080:8080 quarkus/code-with-quarkus
15 | #
16 | ###
17 | FROM registry.access.redhat.com/ubi9/ubi-minimal:9.5
18 | WORKDIR /work/
19 | RUN chown 1001 /work \
20 | && chmod "g+rwX" /work \
21 | && chown 1001:root /work
22 | COPY --chown=1001:root target/*-runner /work/kafka
23 |
24 | EXPOSE 9092
25 | USER 1001
26 | RUN mkdir -m 777 -p /work/target/log-dir
27 |
28 | CMD ["./kafka"]
29 |
--------------------------------------------------------------------------------
/kafka-server/src/main/docker/Dockerfile.native-micro:
--------------------------------------------------------------------------------
1 | ####
2 | # This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
3 | # It uses a micro base image, tuned for Quarkus native executables.
4 | # It reduces the size of the resulting container image.
5 | # Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image.
6 | #
7 | # Before building the container image run:
8 | #
9 | # ./mvnw package -Pnative
10 | #
11 | # Then, build the image with:
12 | #
13 | # docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/code-with-quarkus .
14 | #
15 | # Then run the container using:
16 | #
17 | # docker run -i --rm -p 8080:8080 quarkus/code-with-quarkus
18 | #
19 | ###
20 | FROM quay.io/quarkus/ubi9-quarkus-micro-image:2.0
21 | WORKDIR /work/
22 | RUN chown 1001 /work \
23 | && chmod "g+rwX" /work \
24 | && chown 1001:root /work
25 | COPY --chown=1001:root target/*-runner /work/kafka
26 |
27 | EXPOSE 9092
28 | USER 1001
29 | RUN mkdir -m 777 -p /work/target/log-dir
30 |
31 | CMD ["./kafka"]
32 |
--------------------------------------------------------------------------------
/kafka-server/src/main/java/com/ozangunalp/kafka/server/BrokerConfig.java:
--------------------------------------------------------------------------------
1 | package com.ozangunalp.kafka.server;
2 |
3 | import org.apache.kafka.common.Endpoint;
4 | import org.apache.kafka.common.config.TopicConfig;
5 | import org.apache.kafka.common.security.auth.SecurityProtocol;
6 | import org.apache.kafka.common.utils.Utils;
7 | import org.apache.kafka.coordinator.group.GroupCoordinatorConfig;
8 | import org.apache.kafka.coordinator.transaction.TransactionLogConfig;
9 | import org.apache.kafka.network.SocketServerConfigs;
10 | import org.apache.kafka.raft.QuorumConfig;
11 | import org.apache.kafka.server.config.KRaftConfigs;
12 | import org.apache.kafka.server.config.ReplicationConfigs;
13 | import org.apache.kafka.server.config.ServerConfigs;
14 | import org.apache.kafka.server.config.ServerLogConfigs;
15 | import org.apache.kafka.storage.internals.log.CleanerConfig;
16 | import org.eclipse.microprofile.config.Config;
17 | import org.eclipse.microprofile.config.ConfigProvider;
18 | import org.jboss.logging.Logger;
19 |
20 | import java.util.ArrayList;
21 | import java.util.Arrays;
22 | import java.util.Collection;
23 | import java.util.List;
24 | import java.util.Map;
25 | import java.util.Properties;
26 | import java.util.TreeMap;
27 | import java.util.function.Function;
28 | import java.util.stream.Collectors;
29 | import java.util.stream.Stream;
30 |
31 | public final class BrokerConfig {
32 |
33 | static final Logger LOGGER = Logger.getLogger(BrokerConfig.class.getName());
34 |
35 | final static String CONFIG_PREFIX = "kafka";
36 |
37 | private BrokerConfig() {
38 | }
39 |
40 |
41 | /**
42 | * broker.id
43 | * process.roles
44 | * quorum.voters
45 | *
46 | * controller.listener.names
47 | * inter.broker.listener.name
48 | * listeners
49 | * advertised.listeners
50 | * listener.security.protocol.map
51 | *
52 | * early.start.listeners
53 | *
54 | *
55 | * @param host
56 | * @param kafkaPort
57 | * @param internalPort
58 | * @param controllerPort
59 | * @param defaultProtocol
60 | * @return
61 | */
62 | public static Properties defaultCoreConfig(Properties props, String host, int kafkaPort,
63 | int internalPort, int controllerPort, SecurityProtocol defaultProtocol) {
64 | Endpoint internal = Endpoints.internal(host, internalPort);
65 | Endpoint controller = Endpoints.controller(host, controllerPort);
66 | List advertised = new ArrayList<>();
67 | String advertisedListenersStr = props.getProperty(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG);
68 | if (!Utils.isBlank(advertisedListenersStr)) {
69 | advertised.addAll(Endpoints.parseEndpoints(advertisedListenersStr, defaultProtocol));
70 | }
71 | if (advertised.isEmpty()) {
72 | advertised.add(Endpoints.endpoint(defaultProtocol, kafkaPort));
73 | }
74 |
75 | // Configure node id
76 | String brokerId = props.getProperty(ServerConfigs.BROKER_ID_CONFIG);
77 | if (brokerId == null) {
78 | brokerId = "1";
79 | props.put(ServerConfigs.BROKER_ID_CONFIG, brokerId);
80 | }
81 |
82 | boolean kraftController = !props.containsKey(KRaftConfigs.PROCESS_ROLES_CONFIG) ||
83 | Arrays.asList(props.getProperty(KRaftConfigs.PROCESS_ROLES_CONFIG).split(",")).contains("controller");
84 | // Configure kraft
85 | props.putIfAbsent(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker,controller");
86 | if (kraftController) {
87 | props.putIfAbsent(QuorumConfig.QUORUM_VOTERS_CONFIG, brokerId + "@" + controller.host() + ":" + controller.port());
88 | }
89 |
90 | // auto-configure listeners if
91 | // - no controller.listener.names config
92 | // - no inter.broker.listener.name config
93 | // - no listeners config
94 | if (!props.containsKey(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG)
95 | && !props.containsKey(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG)
96 | && !props.containsKey(SocketServerConfigs.LISTENERS_CONFIG)) {
97 | // Configure listeners
98 | List earlyStartListeners = new ArrayList<>();
99 | earlyStartListeners.add(Endpoints.BROKER_PROTOCOL_NAME);
100 |
101 | Map listeners = advertised.stream()
102 | .map(l -> new Endpoint(l.listenerName().orElse(null), l.securityProtocol(), "", kafkaPort))
103 | .collect(Collectors.toMap(Endpoints::listenerName, Function.identity()));
104 | listeners.put(Endpoints.listenerName(internal), internal);
105 |
106 | Map securityProtocolMapListeners = new TreeMap<>(listeners);
107 | if (kraftController) {
108 | earlyStartListeners.add(Endpoints.CONTROLLER_PROTOCOL_NAME);
109 | listeners.put(Endpoints.listenerName(controller), controller);
110 | }
111 | securityProtocolMapListeners.put(Endpoints.listenerName(controller), controller);
112 | props.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, Endpoints.listenerName(controller));
113 |
114 | props.put(SocketServerConfigs.LISTENERS_CONFIG, joinListeners(listeners.values()));
115 |
116 | // Configure internal listener
117 | props.put(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG, Endpoints.listenerName(internal));
118 | advertised.add(internal);
119 |
120 | // Configure security protocol map, by respecting existing map
121 | props.compute(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, (k, v) ->
122 | mergeSecurityProtocolMap(securityProtocolMapListeners, (String) v));
123 |
124 | // Configure early start listeners
125 | props.put(ServerConfigs.EARLY_START_LISTENERS_CONFIG, String.join(",", earlyStartListeners));
126 | } else {
127 | LOGGER.warnf("Broker configs %s, %s, %s, %s will not be configured automatically, " +
128 | "make sure to provide necessary configuration manually.",
129 | KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG,
130 | SocketServerConfigs.LISTENERS_CONFIG,
131 | ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG,
132 | SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG);
133 | }
134 |
135 | // Configure advertised listeners
136 | props.put(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, joinListeners(advertised));
137 |
138 | return props;
139 | }
140 |
141 | private static String joinListeners(Collection endpoints) {
142 | return endpoints.stream()
143 | .map(Endpoints::toListenerString)
144 | .distinct()
145 | .collect(Collectors.joining(","));
146 | }
147 |
148 | private static String mergeSecurityProtocolMap(Map listeners, String current) {
149 | Map existing = Stream.ofNullable(current).flatMap(m -> Arrays.stream(m.split(",")))
150 | .collect(Collectors.toMap(s -> s.split(":")[0], s -> s));
151 | String toAdd = listeners.entrySet().stream()
152 | .filter(e -> !existing.containsKey(e.getKey()))
153 | .map(Map.Entry::getValue)
154 | .map(Endpoints::toProtocolMap)
155 | .collect(Collectors.joining(","));
156 | return current == null ? toAdd : current + "," + toAdd;
157 | }
158 |
159 | public static Properties defaultStaticConfig(Properties props) {
160 | // Configure static default props
161 | props.putIfAbsent(ReplicationConfigs.REPLICA_SOCKET_TIMEOUT_MS_CONFIG, "1000");
162 | props.putIfAbsent(ReplicationConfigs.REPLICA_HIGH_WATERMARK_CHECKPOINT_INTERVAL_MS_CONFIG, String.valueOf(Long.MAX_VALUE));
163 | props.putIfAbsent(ReplicationConfigs.CONTROLLER_SOCKET_TIMEOUT_MS_CONFIG, "1000");
164 | props.putIfAbsent(ServerConfigs.CONTROLLED_SHUTDOWN_ENABLE_CONFIG, Boolean.toString(false));
165 | props.putIfAbsent(ServerConfigs.DELETE_TOPIC_ENABLE_CONFIG, Boolean.toString(true));
166 | props.putIfAbsent(ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG, Boolean.toString(true));
167 | props.putIfAbsent(ServerLogConfigs.LOG_DELETE_DELAY_MS_CONFIG, "1000");
168 | props.putIfAbsent(CleanerConfig.LOG_CLEANER_DEDUPE_BUFFER_SIZE_PROP, "2097152");
169 | props.putIfAbsent(TopicConfig.MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG, String.valueOf(Long.MAX_VALUE));
170 | props.putIfAbsent(TopicConfig.MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG, String.valueOf(Long.MAX_VALUE));
171 | props.putIfAbsent(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, "1");
172 | props.putIfAbsent(GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, "5");
173 | props.putIfAbsent(GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, "0");
174 | props.putIfAbsent(ServerLogConfigs.NUM_PARTITIONS_CONFIG, "1");
175 | props.putIfAbsent(ReplicationConfigs.DEFAULT_REPLICATION_FACTOR_CONFIG, "1");
176 | props.putIfAbsent(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1");
177 | props.putIfAbsent(TransactionLogConfig.TRANSACTIONS_TOPIC_REPLICATION_FACTOR_CONFIG, "1");
178 | props.putIfAbsent(TransactionLogConfig.TRANSACTIONS_TOPIC_MIN_ISR_CONFIG, "1");
179 | return props;
180 | }
181 |
182 | public static Properties providedConfig(Properties props) {
183 | Config config = ConfigProvider.getConfig();
184 | for (String propertyName : config.getPropertyNames()) {
185 | String propertyNameLowerCase = propertyName.toLowerCase();
186 | if (!propertyNameLowerCase.startsWith(CONFIG_PREFIX)) {
187 | continue;
188 | }
189 | // Replace _ by . - This is because Kafka properties tend to use . and env variables use _ for every special
190 | // character. So, replace _ with .
191 | String effectivePropertyName = propertyNameLowerCase.substring(CONFIG_PREFIX.length() + 1).toLowerCase()
192 | .replace("_", ".");
193 | String value = config.getValue(propertyName, String.class);
194 | props.put(effectivePropertyName, value);
195 | }
196 | return props;
197 | }
198 |
199 | }
200 |
--------------------------------------------------------------------------------
/kafka-server/src/main/java/com/ozangunalp/kafka/server/EmbeddedKafkaBroker.java:
--------------------------------------------------------------------------------
1 | package com.ozangunalp.kafka.server;
2 |
3 | import kafka.cluster.EndPoint;
4 | import kafka.server.KafkaConfig;
5 | import kafka.server.KafkaRaftServer;
6 | import kafka.server.Server;
7 | import org.apache.kafka.common.Endpoint;
8 | import org.apache.kafka.common.Uuid;
9 | import org.apache.kafka.common.security.auth.SecurityProtocol;
10 | import org.apache.kafka.common.utils.Time;
11 | import org.apache.kafka.common.utils.Utils;
12 | import org.apache.kafka.network.SocketServerConfigs;
13 | import org.apache.kafka.server.common.MetadataVersion;
14 | import org.jboss.logging.Logger;
15 | import scala.jdk.javaapi.StreamConverters;
16 |
17 | import java.io.Closeable;
18 | import java.io.File;
19 | import java.util.Arrays;
20 | import java.util.List;
21 | import java.util.Properties;
22 | import java.util.function.Consumer;
23 | import java.util.stream.Collectors;
24 |
25 | import static org.apache.kafka.common.security.auth.SecurityProtocol.PLAINTEXT;
26 |
27 | /**
28 | * Embedded Kafka Broker, by default listens on localhost with random broker and controller ports.
29 | *
30 | */
31 | public class EmbeddedKafkaBroker implements Closeable {
32 |
33 | static final Logger LOGGER = Logger.getLogger(EmbeddedKafkaBroker.class.getName());
34 |
35 | static final String KAFKA_PREFIX = "kafka-server-";
36 |
37 | private Server kafkaServer;
38 | private KafkaConfig config;
39 |
40 | private String host = "localhost";
41 | private int kafkaPort = 0;
42 | private int internalPort = 0;
43 | private int controllerPort = 0;
44 | private boolean deleteDirsOnClose = true;
45 | private String clusterId = Uuid.randomUuid().toString();
46 | private String storageMetadataVersion = MetadataVersion.LATEST_PRODUCTION.version();
47 | private final Properties brokerConfig = new Properties();
48 | public SecurityProtocol defaultProtocol = PLAINTEXT;
49 | private boolean autoConfigure = true;
50 | private List scramCredentials = List.of();
51 |
52 | /**
53 | * Configure properties for the broker.
54 | *
55 | * @param function the config modifier function.
56 | * @return this {@link EmbeddedKafkaBroker}
57 | */
58 | public EmbeddedKafkaBroker withConfig(Consumer function) {
59 | assertNotRunning();
60 | function.accept(this.brokerConfig);
61 | return this;
62 | }
63 |
64 | /**
65 | * Automatically configure broker for embedded testing, exposing relevant listeners, configuring broker to run
66 | * in KRaft mode if required, tuning timeouts. See {@link BrokerConfig} for details. Disabling autoConfigure should
67 | * be used in combination with user supplied configuration.
68 | *
69 | * @param autoConfigure autoConfigure
70 | * @return this {@link EmbeddedKafkaBroker}
71 | */
72 | public EmbeddedKafkaBroker withAutoConfigure(boolean autoConfigure) {
73 | this.autoConfigure = autoConfigure;
74 | return this;
75 | }
76 |
77 | /**
78 | * Configure the port on which the broker will listen.
79 | *
80 | * @param port the port.
81 | * @return this {@link EmbeddedKafkaBroker}
82 | */
83 | public EmbeddedKafkaBroker withKafkaPort(int port) {
84 | assertNotRunning();
85 | this.kafkaPort = port;
86 | return this;
87 | }
88 |
89 | /**
90 | * Configure the controller port for the broker.
91 | *
92 | * @param port the port.
93 | * @return this {@link EmbeddedKafkaBroker}
94 | */
95 | public EmbeddedKafkaBroker withControllerPort(int port) {
96 | assertNotRunning();
97 | this.controllerPort = port;
98 | return this;
99 | }
100 |
101 |
102 | /**
103 | * Configure the internal port for the broker.
104 | *
105 | * @param port the port.
106 | * @return this {@link EmbeddedKafkaBroker}
107 | */
108 | public EmbeddedKafkaBroker withInternalPort(int port) {
109 | assertNotRunning();
110 | this.internalPort = port;
111 | return this;
112 | }
113 |
114 | /**
115 | * Configure the hostname on which the broker will listen.
116 | *
117 | * @param host the host.
118 | * @return this {@link EmbeddedKafkaBroker}
119 | */
120 | public EmbeddedKafkaBroker withKafkaHost(String host) {
121 | assertNotRunning();
122 | this.host = host;
123 | return this;
124 | }
125 |
126 | /**
127 | * Configure the cluster id for the broker storage dirs.
128 | *
129 | * @param clusterId the cluster id.
130 | * @return this {@link EmbeddedKafkaBroker}
131 | */
132 | public EmbeddedKafkaBroker withClusterId(String clusterId) {
133 | assertNotRunning();
134 | this.clusterId = clusterId;
135 | return this;
136 | }
137 |
138 | /**
139 | * Configure the metadata version for the broker storage dirs.
140 | *
141 | * @param storageMetadataVersion the cluster id.
142 | * @return this {@link EmbeddedKafkaBroker}
143 | */
144 | public EmbeddedKafkaBroker withStorageMetadataVersion(String storageMetadataVersion) {
145 | assertNotRunning();
146 | this.storageMetadataVersion = storageMetadataVersion;
147 | return this;
148 | }
149 |
150 | /**
151 | * Configure the list of scram credentials for the broker.
152 | *
153 | * @param scramCredentials the list of strings representing scram credentials.
154 | * @return this {@link EmbeddedKafkaBroker}
155 | */
156 | public EmbeddedKafkaBroker withScramCredentials(List scramCredentials) {
157 | assertNotRunning();
158 | this.scramCredentials = scramCredentials;
159 | return this;
160 | }
161 |
162 | /**
163 | * Configure whether log directories will be deleted on broker shutdown.
164 | *
165 | * @param deleteDirsOnClose {@code true}
166 | * @return this {@link EmbeddedKafkaBroker}
167 | */
168 | public EmbeddedKafkaBroker withDeleteLogDirsOnClose(boolean deleteDirsOnClose) {
169 | assertNotRunning();
170 | this.deleteDirsOnClose = deleteDirsOnClose;
171 | return this;
172 | }
173 |
174 | /**
175 | * Configure custom listeners for the broker.
176 | *
177 | * Note that this will override the default PLAINTEXT listener.
178 | * A CONTROLLER listener will be added automatically.
179 | *
180 | * @return this {@link EmbeddedKafkaBroker}
181 | */
182 | public EmbeddedKafkaBroker withAdvertisedListeners(Endpoint... endpoints) {
183 | String advertisedListeners = Arrays.stream(endpoints)
184 | .map(Endpoints::toListenerString)
185 | .collect(Collectors.joining(","));
186 | return withAdvertisedListeners(advertisedListeners);
187 | }
188 |
189 | /**
190 | * Configure custom listeners for the broker.
191 | *
192 | * Note that this will override the default PLAINTEXT listener.
193 | * A CONTROLLER listener will be added automatically.
194 | *
195 | * @return this {@link EmbeddedKafkaBroker}
196 | */
197 | public EmbeddedKafkaBroker withAdvertisedListeners(String advertisedListeners) {
198 | assertNotRunning();
199 | this.brokerConfig.compute(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG,
200 | (k, v) -> v == null ? advertisedListeners : v + "," + advertisedListeners);
201 | return this;
202 | }
203 |
204 | /**
205 | * Create and start the broker.
206 | *
207 | * @return this {@link EmbeddedKafkaBroker}
208 | */
209 | public synchronized EmbeddedKafkaBroker start() {
210 | if (isRunning()) {
211 | return this;
212 | }
213 |
214 | if (autoConfigure) {
215 | LOGGER.info("auto-configuring server");
216 | BrokerConfig.providedConfig(brokerConfig);
217 | BrokerConfig.defaultStaticConfig(brokerConfig);
218 | BrokerConfig.defaultCoreConfig(brokerConfig, host, kafkaPort, internalPort, controllerPort, defaultProtocol);
219 | }
220 |
221 | Storage.ensureLogDirExists(brokerConfig);
222 |
223 | long start = System.currentTimeMillis();
224 | this.config = KafkaConfig.fromProps(brokerConfig, false);
225 | Server server;
226 |
227 | var metadataVersion = MetadataVersion.fromVersionString(storageMetadataVersion);
228 | Storage.formatStorageFromConfig(config, clusterId, true, metadataVersion, scramCredentials);
229 | server = new KafkaRaftServer(config, Time.SYSTEM);
230 | server.startup();
231 | this.kafkaServer = server;
232 | LOGGER.infof("Kafka broker started in %d ms with advertised listeners: %s",
233 | System.currentTimeMillis() - start, getAdvertisedListeners());
234 | return this;
235 | }
236 |
237 | @Override
238 | public synchronized void close() {
239 | try {
240 | if (isRunning()) {
241 | kafkaServer.shutdown();
242 | kafkaServer.awaitShutdown();
243 | }
244 | } catch (Exception e) {
245 | LOGGER.error("Error shutting down broker", e);
246 | } finally {
247 | if (deleteDirsOnClose) {
248 | try {
249 | for (String logDir : getLogDirs()) {
250 | Utils.delete(new File(logDir));
251 | }
252 | } catch (Exception e) {
253 | LOGGER.error("Error deleting logdirs", e);
254 | }
255 | }
256 | kafkaServer = null;
257 | }
258 | }
259 |
260 | public boolean isRunning() {
261 | return kafkaServer != null;
262 | }
263 |
264 | private void assertNotRunning() {
265 | if (isRunning()) {
266 | throw new IllegalStateException("Configuration of the running broker is not permitted.");
267 | }
268 | }
269 |
270 | public KafkaConfig getKafkaConfig() {
271 | return config;
272 | }
273 |
274 | public String getAdvertisedListeners() {
275 | return StreamConverters.asJavaParStream(config.effectiveAdvertisedBrokerListeners())
276 | .map(EndPoint::connectionString)
277 | .collect(Collectors.joining(","));
278 | }
279 |
280 | public List getLogDirs() {
281 | return StreamConverters.asJavaParStream(config.logDirs())
282 | .toList();
283 | }
284 |
285 | public String getClusterId() {
286 | return this.clusterId;
287 | }
288 |
289 | }
290 |
--------------------------------------------------------------------------------
/kafka-server/src/main/java/com/ozangunalp/kafka/server/Endpoints.java:
--------------------------------------------------------------------------------
1 | package com.ozangunalp.kafka.server;
2 |
3 | import static org.apache.kafka.common.security.auth.SecurityProtocol.PLAINTEXT;
4 |
5 | import java.io.IOException;
6 | import java.net.ServerSocket;
7 | import java.util.Arrays;
8 | import java.util.List;
9 | import java.util.stream.Collectors;
10 |
11 | import org.apache.kafka.common.Endpoint;
12 | import org.apache.kafka.common.security.auth.SecurityProtocol;
13 |
14 | public final class Endpoints {
15 | private Endpoints() {
16 | }
17 |
18 | public static final String BROKER_PROTOCOL_NAME = "BROKER";
19 | public static final String CONTROLLER_PROTOCOL_NAME = "CONTROLLER";
20 |
21 | public static Endpoint endpoint(SecurityProtocol protocol, int port) {
22 | return endpoint(protocol.name, protocol, "", port);
23 | }
24 |
25 | public static Endpoint endpoint(SecurityProtocol protocol, String host, int port) {
26 | return endpoint(protocol.name, protocol, host, port);
27 | }
28 |
29 | public static Endpoint endpoint(String listener, SecurityProtocol protocol, int port) {
30 | return endpoint(listener, protocol, "", port);
31 | }
32 |
33 | public static Endpoint endpoint(String listener, SecurityProtocol protocol, String host, int port) {
34 | return new Endpoint(listener, protocol, host, getUnusedPort(port));
35 | }
36 |
37 | public static List parseEndpoints(String listenerStr, SecurityProtocol defaultProtocol) {
38 | return Arrays.stream(listenerStr.split(","))
39 | .map(s -> parseEndpoint(s, defaultProtocol))
40 | .collect(Collectors.toList());
41 | }
42 |
43 | public static Endpoint parseEndpoint(String listenerStr, SecurityProtocol defaultProtocol) {
44 | String[] parts = listenerStr.split(":");
45 | if (parts.length == 2) {
46 | return new Endpoint(null, defaultProtocol, parts[0], Integer.parseInt(parts[1]));
47 | } else if (parts.length == 3) {
48 | String listenerName = parts[0];
49 | String host = parts[1].replace("//", "");
50 | int port = Integer.parseInt(parts[2]);
51 | if (SecurityProtocol.names().contains(listenerName)) {
52 | return new Endpoint(listenerName, SecurityProtocol.forName(listenerName), host, port);
53 | } else {
54 | return new Endpoint(listenerName, defaultProtocol, host, port);
55 | }
56 | }
57 | throw new IllegalArgumentException("Cannot parse listener: " + listenerStr);
58 | }
59 |
60 | public static Endpoint internal(String host, int port) {
61 | return endpoint(BROKER_PROTOCOL_NAME, PLAINTEXT, host, port);
62 | }
63 |
64 | public static Endpoint controller(String host, int port) {
65 | return endpoint(CONTROLLER_PROTOCOL_NAME, PLAINTEXT, host, port);
66 | }
67 |
68 | public static String toListenerString(Endpoint endpoint) {
69 | return String.format("%s://%s:%d", listenerName(endpoint), endpoint.host(), endpoint.port());
70 | }
71 |
72 | public static String toProtocolMap(Endpoint endpoint) {
73 | return String.format("%s:%s", listenerName(endpoint), endpoint.securityProtocol().name);
74 | }
75 |
76 | public static String listenerName(Endpoint endpoint) {
77 | return endpoint.listenerName().orElse(endpoint.securityProtocol().name);
78 | }
79 |
80 | public static int getUnusedPort(int port) {
81 | if (port != 0) {
82 | return port;
83 | }
84 | try (ServerSocket s = new ServerSocket(0)) {
85 | return s.getLocalPort();
86 | } catch (IOException e) {
87 | throw new RuntimeException(e);
88 | }
89 | }
90 |
91 | }
92 |
--------------------------------------------------------------------------------
/kafka-server/src/main/java/com/ozangunalp/kafka/server/ScramUtils.java:
--------------------------------------------------------------------------------
1 | package com.ozangunalp.kafka.server;
2 |
3 | import org.apache.kafka.common.metadata.UserScramCredentialRecord;
4 | import org.apache.kafka.common.security.scram.ScramCredential;
5 |
6 | final class ScramUtils {
7 |
8 | private ScramUtils() {
9 | throw new IllegalArgumentException();
10 | }
11 |
12 | static ScramCredential asScramCredential(UserScramCredentialRecord uscr) {
13 | return new ScramCredential(uscr.salt(), uscr.storedKey(), uscr.serverKey(), uscr.iterations());
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/kafka-server/src/main/java/com/ozangunalp/kafka/server/ServerConfig.java:
--------------------------------------------------------------------------------
1 | package com.ozangunalp.kafka.server;
2 |
3 | import java.nio.file.Path;
4 | import java.util.Arrays;
5 | import java.util.Collections;
6 | import java.util.List;
7 | import java.util.Optional;
8 |
9 | import io.smallrye.config.ConfigMapping;
10 | import io.smallrye.config.WithDefault;
11 |
12 | @ConfigMapping(prefix = "server")
13 | public interface ServerConfig {
14 |
15 | @WithDefault("9092")
16 | int kafkaPort();
17 |
18 | @WithDefault("9093")
19 | int internalPort();
20 |
21 | @WithDefault("9094")
22 | int controllerPort();
23 |
24 | @WithDefault("false")
25 | boolean deleteDirsOnClose();
26 |
27 | Optional clusterId();
28 |
29 | Optional host();
30 |
31 | Optional propertiesFile();
32 |
33 | @WithDefault("true")
34 | boolean autoConfigure();
35 |
36 | /**
37 | * List of scram credentials, separated by semicolon.
38 | *
39 | * Format of the scram string must be in one of the following forms:
40 | *
41 | * SCRAM-SHA-256=[user=alice,password=alice-secret]
42 | * SCRAM-SHA-512=[user=alice,iterations=8192,salt="N3E=",saltedpassword="YCE="]
43 | *
44 | *
45 | * @return list of scram credentials
46 | */
47 | Optional scramCredentials();
48 |
49 | default List scramCredentialsList() {
50 | return scramCredentials().map(s -> Arrays.stream(s.split(";")).toList())
51 | .orElse(Collections.emptyList());
52 | }
53 |
54 | /** Metadata version used for the Kafka storage. */
55 | Optional storageMetadataVersion();
56 | }
57 |
--------------------------------------------------------------------------------
/kafka-server/src/main/java/com/ozangunalp/kafka/server/Startup.java:
--------------------------------------------------------------------------------
1 | package com.ozangunalp.kafka.server;
2 |
3 | import jakarta.enterprise.context.ApplicationScoped;
4 | import jakarta.enterprise.event.Observes;
5 | import jakarta.inject.Inject;
6 |
7 | import org.apache.kafka.clients.CommonClientConfigs;
8 | import org.apache.kafka.common.utils.Utils;
9 |
10 | import com.ozangunalp.kafka.server.metrics.Reporter;
11 | import io.quarkus.runtime.ShutdownEvent;
12 | import io.quarkus.runtime.StartupEvent;
13 | import io.smallrye.mutiny.unchecked.Unchecked;
14 |
15 | @ApplicationScoped
16 | public class Startup {
17 |
18 | @Inject
19 | ServerConfig config;
20 |
21 | private EmbeddedKafkaBroker broker;
22 |
23 | void startup(@Observes StartupEvent event) {
24 | broker = new EmbeddedKafkaBroker()
25 | .withDeleteLogDirsOnClose(config.deleteDirsOnClose())
26 | .withKafkaPort(config.kafkaPort())
27 | .withControllerPort(config.controllerPort())
28 | .withInternalPort(config.internalPort())
29 | .withKafkaHost(config.host().orElse(""))
30 | .withAutoConfigure(config.autoConfigure())
31 | .withScramCredentials(config.scramCredentialsList())
32 | .withConfig(properties -> {
33 | properties.put(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, Reporter.class.getName());
34 | config.propertiesFile().ifPresent(Unchecked.consumer(file ->
35 | properties.putAll(Utils.loadProps(file.toFile().getAbsolutePath()))));
36 | });
37 | config.clusterId().ifPresent(id -> broker.withClusterId(id));
38 | config.storageMetadataVersion().ifPresent(storageMetadataVersion -> broker.withStorageMetadataVersion(storageMetadataVersion));
39 | broker.start();
40 | }
41 |
42 | void shutdown(@Observes ShutdownEvent event) {
43 | broker.close();
44 | }
45 | }
--------------------------------------------------------------------------------
/kafka-server/src/main/java/com/ozangunalp/kafka/server/Storage.java:
--------------------------------------------------------------------------------
1 | package com.ozangunalp.kafka.server;
2 |
3 | import java.io.ByteArrayOutputStream;
4 | import java.io.IOException;
5 | import java.io.PrintStream;
6 | import java.io.UncheckedIOException;
7 | import java.nio.file.Files;
8 | import java.nio.file.Paths;
9 | import java.util.List;
10 | import java.util.Properties;
11 | import java.util.UUID;
12 |
13 | import org.apache.kafka.metadata.storage.Formatter;
14 | import org.apache.kafka.server.common.MetadataVersion;
15 | import org.jboss.logging.Logger;
16 |
17 | import kafka.server.KafkaConfig;
18 | import kafka.tools.StorageTool;
19 | import scala.jdk.javaapi.CollectionConverters;
20 |
21 | public final class Storage {
22 |
23 | static final Logger LOGGER = Logger.getLogger(Storage.class.getName());
24 | public static final String LOG_DIR = "log.dir";
25 |
26 | private Storage() {
27 | }
28 |
29 | public static void ensureLogDirExists(Properties properties) {
30 | String logDir = properties.getProperty(LOG_DIR);
31 | if (logDir != null) {
32 | try {
33 | Files.createDirectories(Paths.get(logDir));
34 | } catch (Throwable throwable) {
35 | LOGGER.warnf(throwable, "Error using %s as `log.dir`, setting up a temporary directory.", logDir);
36 | Storage.createAndSetLogDir(properties);
37 | }
38 | } else {
39 | Storage.createAndSetLogDir(properties);
40 | }
41 | }
42 |
43 | public static void createAndSetLogDir(Properties properties) {
44 | try {
45 | properties.put(LOG_DIR,
46 | Files.createTempDirectory(EmbeddedKafkaBroker.KAFKA_PREFIX + UUID.randomUUID()).toString());
47 | } catch (IOException e) {
48 | throw new UncheckedIOException(e);
49 | }
50 | }
51 |
52 | public static void formatStorageFromConfig(KafkaConfig config, String clusterId, boolean ignoreFormatted, MetadataVersion metadataVersion, List scramCredentials) {
53 | if (!scramCredentials.isEmpty() && !metadataVersion.isScramSupported()) {
54 | throw new IllegalArgumentException("SCRAM is only supported in metadataVersion IBP_3_5_IV2 or later.");
55 | }
56 | var controllerListenerName = CollectionConverters.asJava(config.controllerListenerNames()).stream().findFirst().orElseThrow();
57 | var logDirs = CollectionConverters.asJava(StorageTool.configToLogDirectories(config));
58 | var storageFormatter = new Formatter()
59 | .setClusterId(clusterId)
60 | .setNodeId(config.nodeId())
61 | .setControllerListenerName(controllerListenerName)
62 | .setMetadataLogDirectory(config.metadataLogDir())
63 | .setDirectories(logDirs)
64 | .setScramArguments(scramCredentials)
65 | .setIgnoreFormatted(ignoreFormatted)
66 | .setPrintStream(LoggingOutputStream.loggerPrintStream(LOGGER))
67 | .setReleaseVersion(metadataVersion);
68 |
69 | try {
70 | storageFormatter.run();
71 | } catch (Exception e) {
72 | throw new RuntimeException("Failed to format storage", e);
73 | }
74 | }
75 |
76 | public static class LoggingOutputStream extends java.io.OutputStream {
77 |
78 | public static PrintStream loggerPrintStream(Logger logger) {
79 | return new PrintStream(new LoggingOutputStream(logger));
80 | }
81 |
82 | private final ByteArrayOutputStream os = new ByteArrayOutputStream(1000);
83 | private final Logger logger;
84 |
85 | LoggingOutputStream(Logger logger) {
86 | this.logger = logger;
87 | }
88 |
89 | @Override
90 | public void write(int b) throws IOException {
91 | if (b == '\n' || b == '\r') {
92 | os.flush();
93 | String log = os.toString();
94 | logger.info(log);
95 | } else {
96 | os.write(b);
97 | }
98 | }
99 | }
100 | }
101 |
--------------------------------------------------------------------------------
/kafka-server/src/main/java/com/ozangunalp/kafka/server/metrics/Reporter.java:
--------------------------------------------------------------------------------
1 | package com.ozangunalp.kafka.server.metrics;
2 |
3 | import java.util.List;
4 | import java.util.Map;
5 |
6 | import org.apache.kafka.common.metrics.KafkaMetric;
7 | import org.apache.kafka.common.metrics.MetricsReporter;
8 |
9 | import io.quarkus.runtime.annotations.RegisterForReflection;
10 |
11 | @RegisterForReflection
12 | public class Reporter implements MetricsReporter {
13 |
14 | @Override
15 | public void init(List metrics) {
16 |
17 | }
18 |
19 | @Override
20 | public void metricChange(KafkaMetric metric) {
21 |
22 | }
23 |
24 | @Override
25 | public void metricRemoval(KafkaMetric metric) {
26 |
27 | }
28 |
29 | @Override
30 | public void close() {
31 |
32 | }
33 |
34 | @Override
35 | public void configure(Map configs) {
36 |
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/kafka-server/src/main/resources/application.properties:
--------------------------------------------------------------------------------
1 | kafka.log.dir=./target/log-dir
2 | quarkus.docker.dockerfile-native-path=src/main/docker/Dockerfile.native-micro
3 | quarkus.container-image.registry=quay.io
4 | quarkus.container-image.group=ogunalp
5 | quarkus.application.name=kafka-native
6 | quarkus.container-image.name=${quarkus.application.name}
7 | quarkus.native.auto-service-loader-registration=true
8 |
--------------------------------------------------------------------------------
/kafka-server/src/test/java/com/ozangunalp/kafka/server/BrokerConfigTest.java:
--------------------------------------------------------------------------------
1 | package com.ozangunalp.kafka.server;
2 |
3 |
4 | import static org.apache.kafka.common.security.auth.SecurityProtocol.PLAINTEXT;
5 | import static org.assertj.core.api.Assertions.assertThat;
6 |
7 | import java.util.Properties;
8 |
9 | import org.junit.jupiter.api.Test;
10 |
11 |
12 | class BrokerConfigTest {
13 |
14 | @Test
15 | void testEmptyOverride() {
16 | Properties properties = BrokerConfig.defaultCoreConfig(new Properties(), "", 9092, 9093, 9094, PLAINTEXT);
17 | assertThat(properties).containsEntry("broker.id", "1");
18 | assertThat(properties).containsEntry("controller.quorum.voters", "1@:9094");
19 | assertThat(properties).containsEntry("listeners", "BROKER://:9093,PLAINTEXT://:9092,CONTROLLER://:9094");
20 | assertThat(properties).containsEntry("process.roles", "broker,controller");
21 | assertThat(properties).containsEntry("controller.listener.names", "CONTROLLER");
22 | assertThat(properties).containsEntry("inter.broker.listener.name", "BROKER");
23 | assertThat(properties).containsEntry("advertised.listeners", "PLAINTEXT://:9092,BROKER://:9093");
24 | assertThat(properties).containsEntry("early.start.listeners", "BROKER,CONTROLLER");
25 | assertThat(properties).containsEntry("listener.security.protocol.map", "BROKER:PLAINTEXT,CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT");
26 | }
27 |
28 | @Test
29 | void testOverrideAdvertisedListeners() {
30 | Properties props = new Properties();
31 | props.put("advertised.listeners", "PLAINTEXT://:9092");
32 | Properties properties = BrokerConfig.defaultCoreConfig(props, "", 9092, 9093, 9094, PLAINTEXT);
33 | assertThat(properties).containsEntry("broker.id", "1");
34 | assertThat(properties).containsEntry("controller.quorum.voters", "1@:9094");
35 | assertThat(properties).containsEntry("listeners", "BROKER://:9093,PLAINTEXT://:9092,CONTROLLER://:9094");
36 | assertThat(properties).containsEntry("process.roles", "broker,controller");
37 | assertThat(properties).containsEntry("controller.listener.names", "CONTROLLER");
38 | assertThat(properties).containsEntry("inter.broker.listener.name", "BROKER");
39 | assertThat(properties).containsEntry("advertised.listeners", "PLAINTEXT://:9092,BROKER://:9093");
40 | assertThat(properties).containsEntry("early.start.listeners", "BROKER,CONTROLLER");
41 | assertThat(properties).containsEntry("listener.security.protocol.map", "BROKER:PLAINTEXT,CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT");
42 | }
43 |
44 | @Test
45 | void testOverrideProcessRoles() {
46 | Properties props = new Properties();
47 | props.put("advertised.listeners", "PLAINTEXT://:9092");
48 | props.put("process.roles", "broker");
49 | props.put("listeners", "BROKER://:9093,PLAINTEXT://:9092");
50 | props.put("listener.security.protocol.map", "BROKER:PLAINTEXT");
51 | props.put("controller.quorum.voters", "1@:9094");
52 | Properties properties = BrokerConfig.defaultCoreConfig(props, "", 9092, 9093, 9094, PLAINTEXT);
53 | assertThat(properties).doesNotContainKey("controller.listener.names");
54 | assertThat(properties).doesNotContainKey("inter.broker.listener.name");
55 | assertThat(properties).doesNotContainKey("early.start.listeners");
56 | assertThat(properties).containsEntry("broker.id", "1");
57 | assertThat(properties).containsEntry("controller.quorum.voters", "1@:9094");
58 | assertThat(properties).containsEntry("listeners", "BROKER://:9093,PLAINTEXT://:9092");
59 | assertThat(properties).containsEntry("process.roles", "broker");
60 | assertThat(properties).containsEntry("advertised.listeners", "PLAINTEXT://:9092");
61 | assertThat(properties).containsEntry("listener.security.protocol.map", "BROKER:PLAINTEXT");
62 | }
63 |
64 | @Test
65 | void testOverrideProcessRolesWithNoQuorumVotersOverride() {
66 | Properties props = new Properties();
67 | props.put("advertised.listeners", "PLAINTEXT://:9092");
68 | props.put("process.roles", "broker");
69 | props.put("listeners", "BROKER://:9093,PLAINTEXT://:9092");
70 | props.put("listener.security.protocol.map", "BROKER:PLAINTEXT");
71 | Properties properties = BrokerConfig.defaultCoreConfig(props, "", 9092, 9093, 9094, PLAINTEXT);
72 | assertThat(properties).doesNotContainKey("controller.listener.names");
73 | assertThat(properties).doesNotContainKey("inter.broker.listener.name");
74 | assertThat(properties).doesNotContainKey("early.start.listeners");
75 | assertThat(properties).doesNotContainKey("controller.quorum.voters");
76 | assertThat(properties).containsEntry("broker.id", "1");
77 | assertThat(properties).containsEntry("listeners", "BROKER://:9093,PLAINTEXT://:9092");
78 | assertThat(properties).containsEntry("process.roles", "broker");
79 | assertThat(properties).containsEntry("advertised.listeners", "PLAINTEXT://:9092");
80 | assertThat(properties).containsEntry("listener.security.protocol.map", "BROKER:PLAINTEXT");
81 | }
82 |
83 | @Test
84 | void testOverrideListeners() {
85 | Properties props = new Properties();
86 | props.put("advertised.listeners", "SSL://:9092");
87 | props.put("listeners", "SSL://:9092,CONTROLLER://9093");
88 | props.put("controller.listener.names", "CONTROLLER");
89 | props.put("inter.broker.listener.name", "SSL");
90 | props.put("listener.security.protocol.map", "SSL:SSL,CONTROLLER:PLAINTEXT");
91 | Properties properties = BrokerConfig.defaultCoreConfig(props, "", 9092, 9093, 9094, PLAINTEXT);
92 | assertThat(properties).containsEntry("broker.id", "1");
93 | assertThat(properties).containsEntry("controller.quorum.voters", "1@:9094");
94 | assertThat(properties).containsEntry("listeners", "SSL://:9092,CONTROLLER://9093");
95 | assertThat(properties).containsEntry("process.roles", "broker,controller");
96 | assertThat(properties).containsEntry("controller.listener.names", "CONTROLLER");
97 | assertThat(properties).containsEntry("inter.broker.listener.name", "SSL");
98 | assertThat(properties).containsEntry("advertised.listeners", "SSL://:9092");
99 | assertThat(properties).containsEntry("listener.security.protocol.map", "SSL:SSL,CONTROLLER:PLAINTEXT");
100 | }
101 |
102 | @Test
103 | void testKraftBrokerRoleOnly() {
104 | Properties props = new Properties();
105 | props.put("process.roles", "broker");
106 | props.put("broker.id", "2");
107 | props.put("controller.quorum.voters", "1@:9094");
108 |
109 | Properties properties = BrokerConfig.defaultCoreConfig(props, "", 9092, 9093, 9094, PLAINTEXT);
110 |
111 | assertThat(properties).containsEntry("broker.id", "2");
112 | assertThat(properties).containsEntry("controller.quorum.voters", "1@:9094");
113 | assertThat(properties).containsEntry("listeners", "BROKER://:9093,PLAINTEXT://:9092");
114 | assertThat(properties).containsEntry("process.roles", "broker");
115 | assertThat(properties).containsEntry("controller.listener.names", "CONTROLLER");
116 | assertThat(properties).containsEntry("inter.broker.listener.name", "BROKER");
117 | assertThat(properties).containsEntry("advertised.listeners", "PLAINTEXT://:9092,BROKER://:9093");
118 | assertThat(properties).containsEntry("early.start.listeners", "BROKER");
119 | assertThat(properties).containsEntry("listener.security.protocol.map", "BROKER:PLAINTEXT,CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT");
120 | }
121 |
122 | @Test
123 | void testMergedSecurityProtocolMap() {
124 | Properties props = new Properties();
125 | props.put("advertised.listeners", "JWT://:9092");
126 | props.put("listener.security.protocol.map", "JWT:SSL");
127 | Properties properties = BrokerConfig.defaultCoreConfig(props, "", 9092, 9093, 9094, PLAINTEXT);
128 | assertThat(properties).containsEntry("broker.id", "1");
129 | assertThat(properties).containsEntry("controller.quorum.voters", "1@:9094");
130 | assertThat(properties).containsEntry("listeners", "BROKER://:9093,CONTROLLER://:9094,JWT://:9092");
131 | assertThat(properties).containsEntry("process.roles", "broker,controller");
132 | assertThat(properties).containsEntry("controller.listener.names", "CONTROLLER");
133 | assertThat(properties).containsEntry("inter.broker.listener.name", "BROKER");
134 | assertThat(properties).containsEntry("advertised.listeners", "JWT://:9092,BROKER://:9093");
135 | assertThat(properties).containsEntry("early.start.listeners", "BROKER,CONTROLLER");
136 | assertThat(properties).containsEntry("listener.security.protocol.map", "JWT:SSL,BROKER:PLAINTEXT,CONTROLLER:PLAINTEXT");
137 | }
138 |
139 |
140 | }
--------------------------------------------------------------------------------
/kafka-server/src/test/java/com/ozangunalp/kafka/server/ScramUtilsTest.java:
--------------------------------------------------------------------------------
1 | package com.ozangunalp.kafka.server;
2 |
3 | import org.apache.kafka.clients.admin.ScramMechanism;
4 | import org.apache.kafka.common.metadata.UserScramCredentialRecord;
5 | import org.apache.kafka.common.security.scram.ScramCredential;
6 | import org.junit.jupiter.api.Test;
7 |
8 | import org.junit.jupiter.params.ParameterizedTest;
9 | import org.junit.jupiter.params.provider.EnumSource;
10 |
11 | import java.nio.charset.StandardCharsets;
12 |
13 | import static org.assertj.core.api.Assertions.assertThat;
14 |
15 | class ScramUtilsTest {
16 | @Test
17 | void asScramCredential() {
18 | int iterations = 4096;
19 | byte[] salt = "salt".getBytes(StandardCharsets.UTF_8);
20 | byte[] server = "key".getBytes(StandardCharsets.UTF_8);
21 | var uscr = new UserScramCredentialRecord()
22 | .setIterations(iterations)
23 | .setSalt(salt)
24 | .setServerKey(server);
25 |
26 | var sc = ScramUtils.asScramCredential(uscr);
27 | assertThat(sc).extracting(ScramCredential::iterations).isEqualTo(iterations);
28 | assertThat(sc).extracting(ScramCredential::salt).isEqualTo(salt);
29 | assertThat(sc).extracting(ScramCredential::serverKey).isEqualTo(server);
30 |
31 |
32 | }
33 | }
--------------------------------------------------------------------------------
/kafka-server/src/test/java/com/ozangunalp/kafka/server/SmokeTest.java:
--------------------------------------------------------------------------------
1 | package com.ozangunalp.kafka.server;
2 |
3 | import static org.assertj.core.api.Assertions.assertThat;
4 |
5 | import static org.awaitility.Awaitility.await;
6 |
7 | import org.junit.jupiter.api.Test;
8 |
9 | import io.quarkus.test.junit.QuarkusTest;
10 | import io.smallrye.reactive.messaging.kafka.companion.KafkaCompanion;
11 |
12 | @QuarkusTest
13 | public class SmokeTest {
14 |
15 | @Test
16 | void test() {
17 | try (KafkaCompanion companion = new KafkaCompanion("localhost:9092")) {
18 | await().untilAsserted(() -> assertThat(companion.cluster().nodes().size()).isGreaterThan(0));
19 | }
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/mvnw:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # ----------------------------------------------------------------------------
3 | # Licensed to the Apache Software Foundation (ASF) under one
4 | # or more contributor license agreements. See the NOTICE file
5 | # distributed with this work for additional information
6 | # regarding copyright ownership. The ASF licenses this file
7 | # to you under the Apache License, Version 2.0 (the
8 | # "License"); you may not use this file except in compliance
9 | # with the License. You may obtain a copy of the License at
10 | #
11 | # https://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing,
14 | # software distributed under the License is distributed on an
15 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16 | # KIND, either express or implied. See the License for the
17 | # specific language governing permissions and limitations
18 | # under the License.
19 | # ----------------------------------------------------------------------------
20 |
21 | # ----------------------------------------------------------------------------
22 | # Maven Start Up Batch script
23 | #
24 | # Required ENV vars:
25 | # ------------------
26 | # JAVA_HOME - location of a JDK home dir
27 | #
28 | # Optional ENV vars
29 | # -----------------
30 | # M2_HOME - location of maven2's installed home dir
31 | # MAVEN_OPTS - parameters passed to the Java VM when running Maven
32 | # e.g. to debug Maven itself, use
33 | # set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
34 | # MAVEN_SKIP_RC - flag to disable loading of mavenrc files
35 | # ----------------------------------------------------------------------------
36 |
37 | if [ -z "$MAVEN_SKIP_RC" ] ; then
38 |
39 | if [ -f /etc/mavenrc ] ; then
40 | . /etc/mavenrc
41 | fi
42 |
43 | if [ -f "$HOME/.mavenrc" ] ; then
44 | . "$HOME/.mavenrc"
45 | fi
46 |
47 | fi
48 |
49 | # OS specific support. $var _must_ be set to either true or false.
50 | cygwin=false;
51 | darwin=false;
52 | mingw=false
53 | case "`uname`" in
54 | CYGWIN*) cygwin=true ;;
55 | MINGW*) mingw=true;;
56 | Darwin*) darwin=true
57 | # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
58 | # See https://developer.apple.com/library/mac/qa/qa1170/_index.html
59 | if [ -z "$JAVA_HOME" ]; then
60 | if [ -x "/usr/libexec/java_home" ]; then
61 | export JAVA_HOME="`/usr/libexec/java_home`"
62 | else
63 | export JAVA_HOME="/Library/Java/Home"
64 | fi
65 | fi
66 | ;;
67 | esac
68 |
69 | if [ -z "$JAVA_HOME" ] ; then
70 | if [ -r /etc/gentoo-release ] ; then
71 | JAVA_HOME=`java-config --jre-home`
72 | fi
73 | fi
74 |
75 | if [ -z "$M2_HOME" ] ; then
76 | ## resolve links - $0 may be a link to maven's home
77 | PRG="$0"
78 |
79 | # need this for relative symlinks
80 | while [ -h "$PRG" ] ; do
81 | ls=`ls -ld "$PRG"`
82 | link=`expr "$ls" : '.*-> \(.*\)$'`
83 | if expr "$link" : '/.*' > /dev/null; then
84 | PRG="$link"
85 | else
86 | PRG="`dirname "$PRG"`/$link"
87 | fi
88 | done
89 |
90 | saveddir=`pwd`
91 |
92 | M2_HOME=`dirname "$PRG"`/..
93 |
94 | # make it fully qualified
95 | M2_HOME=`cd "$M2_HOME" && pwd`
96 |
97 | cd "$saveddir"
98 | # echo Using m2 at $M2_HOME
99 | fi
100 |
101 | # For Cygwin, ensure paths are in UNIX format before anything is touched
102 | if $cygwin ; then
103 | [ -n "$M2_HOME" ] &&
104 | M2_HOME=`cygpath --unix "$M2_HOME"`
105 | [ -n "$JAVA_HOME" ] &&
106 | JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
107 | [ -n "$CLASSPATH" ] &&
108 | CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
109 | fi
110 |
111 | # For Mingw, ensure paths are in UNIX format before anything is touched
112 | if $mingw ; then
113 | [ -n "$M2_HOME" ] &&
114 | M2_HOME="`(cd "$M2_HOME"; pwd)`"
115 | [ -n "$JAVA_HOME" ] &&
116 | JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
117 | fi
118 |
119 | if [ -z "$JAVA_HOME" ]; then
120 | javaExecutable="`which javac`"
121 | if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
122 | # readlink(1) is not available as standard on Solaris 10.
123 | readLink=`which readlink`
124 | if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
125 | if $darwin ; then
126 | javaHome="`dirname \"$javaExecutable\"`"
127 | javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
128 | else
129 | javaExecutable="`readlink -f \"$javaExecutable\"`"
130 | fi
131 | javaHome="`dirname \"$javaExecutable\"`"
132 | javaHome=`expr "$javaHome" : '\(.*\)/bin'`
133 | JAVA_HOME="$javaHome"
134 | export JAVA_HOME
135 | fi
136 | fi
137 | fi
138 |
139 | if [ -z "$JAVACMD" ] ; then
140 | if [ -n "$JAVA_HOME" ] ; then
141 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
142 | # IBM's JDK on AIX uses strange locations for the executables
143 | JAVACMD="$JAVA_HOME/jre/sh/java"
144 | else
145 | JAVACMD="$JAVA_HOME/bin/java"
146 | fi
147 | else
148 | JAVACMD="`which java`"
149 | fi
150 | fi
151 |
152 | if [ ! -x "$JAVACMD" ] ; then
153 | echo "Error: JAVA_HOME is not defined correctly." >&2
154 | echo " We cannot execute $JAVACMD" >&2
155 | exit 1
156 | fi
157 |
158 | if [ -z "$JAVA_HOME" ] ; then
159 | echo "Warning: JAVA_HOME environment variable is not set."
160 | fi
161 |
162 | CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
163 |
164 | # traverses directory structure from process work directory to filesystem root
165 | # first directory with .mvn subdirectory is considered project base directory
166 | find_maven_basedir() {
167 |
168 | if [ -z "$1" ]
169 | then
170 | echo "Path not specified to find_maven_basedir"
171 | return 1
172 | fi
173 |
174 | basedir="$1"
175 | wdir="$1"
176 | while [ "$wdir" != '/' ] ; do
177 | if [ -d "$wdir"/.mvn ] ; then
178 | basedir=$wdir
179 | break
180 | fi
181 | # workaround for JBEAP-8937 (on Solaris 10/Sparc)
182 | if [ -d "${wdir}" ]; then
183 | wdir=`cd "$wdir/.."; pwd`
184 | fi
185 | # end of workaround
186 | done
187 | echo "${basedir}"
188 | }
189 |
190 | # concatenates all lines of a file
191 | concat_lines() {
192 | if [ -f "$1" ]; then
193 | echo "$(tr -s '\n' ' ' < "$1")"
194 | fi
195 | }
196 |
197 | BASE_DIR=`find_maven_basedir "$(pwd)"`
198 | if [ -z "$BASE_DIR" ]; then
199 | exit 1;
200 | fi
201 |
202 | ##########################################################################################
203 | # Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
204 | # This allows using the maven wrapper in projects that prohibit checking in binary data.
205 | ##########################################################################################
206 | if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then
207 | if [ "$MVNW_VERBOSE" = true ]; then
208 | echo "Found .mvn/wrapper/maven-wrapper.jar"
209 | fi
210 | else
211 | if [ "$MVNW_VERBOSE" = true ]; then
212 | echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
213 | fi
214 | if [ -n "$MVNW_REPOURL" ]; then
215 | jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
216 | else
217 | jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
218 | fi
219 | while IFS="=" read key value; do
220 | case "$key" in (wrapperUrl) jarUrl="$value"; break ;;
221 | esac
222 | done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties"
223 | if [ "$MVNW_VERBOSE" = true ]; then
224 | echo "Downloading from: $jarUrl"
225 | fi
226 | wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar"
227 | if $cygwin; then
228 | wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"`
229 | fi
230 |
231 | if command -v wget > /dev/null; then
232 | if [ "$MVNW_VERBOSE" = true ]; then
233 | echo "Found wget ... using wget"
234 | fi
235 | if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
236 | wget "$jarUrl" -O "$wrapperJarPath"
237 | else
238 | wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath"
239 | fi
240 | elif command -v curl > /dev/null; then
241 | if [ "$MVNW_VERBOSE" = true ]; then
242 | echo "Found curl ... using curl"
243 | fi
244 | if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
245 | curl -o "$wrapperJarPath" "$jarUrl" -f
246 | else
247 | curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f
248 | fi
249 |
250 | else
251 | if [ "$MVNW_VERBOSE" = true ]; then
252 | echo "Falling back to using Java to download"
253 | fi
254 | javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java"
255 | # For Cygwin, switch paths to Windows format before running javac
256 | if $cygwin; then
257 | javaClass=`cygpath --path --windows "$javaClass"`
258 | fi
259 | if [ -e "$javaClass" ]; then
260 | if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
261 | if [ "$MVNW_VERBOSE" = true ]; then
262 | echo " - Compiling MavenWrapperDownloader.java ..."
263 | fi
264 | # Compiling the Java class
265 | ("$JAVA_HOME/bin/javac" "$javaClass")
266 | fi
267 | if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
268 | # Running the downloader
269 | if [ "$MVNW_VERBOSE" = true ]; then
270 | echo " - Running MavenWrapperDownloader.java ..."
271 | fi
272 | ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR")
273 | fi
274 | fi
275 | fi
276 | fi
277 | ##########################################################################################
278 | # End of extension
279 | ##########################################################################################
280 |
281 | export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}
282 | if [ "$MVNW_VERBOSE" = true ]; then
283 | echo $MAVEN_PROJECTBASEDIR
284 | fi
285 | MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
286 |
287 | # For Cygwin, switch paths to Windows format before running java
288 | if $cygwin; then
289 | [ -n "$M2_HOME" ] &&
290 | M2_HOME=`cygpath --path --windows "$M2_HOME"`
291 | [ -n "$JAVA_HOME" ] &&
292 | JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
293 | [ -n "$CLASSPATH" ] &&
294 | CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
295 | [ -n "$MAVEN_PROJECTBASEDIR" ] &&
296 | MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"`
297 | fi
298 |
299 | # Provide a "standardized" way to retrieve the CLI args that will
300 | # work with both Windows and non-Windows executions.
301 | MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
302 | export MAVEN_CMD_LINE_ARGS
303 |
304 | WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
305 |
306 | exec "$JAVACMD" \
307 | $MAVEN_OPTS \
308 | -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
309 | "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
310 | ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
311 |
--------------------------------------------------------------------------------
/mvnw.cmd:
--------------------------------------------------------------------------------
1 | @REM ----------------------------------------------------------------------------
2 | @REM Licensed to the Apache Software Foundation (ASF) under one
3 | @REM or more contributor license agreements. See the NOTICE file
4 | @REM distributed with this work for additional information
5 | @REM regarding copyright ownership. The ASF licenses this file
6 | @REM to you under the Apache License, Version 2.0 (the
7 | @REM "License"); you may not use this file except in compliance
8 | @REM with the License. You may obtain a copy of the License at
9 | @REM
10 | @REM https://www.apache.org/licenses/LICENSE-2.0
11 | @REM
12 | @REM Unless required by applicable law or agreed to in writing,
13 | @REM software distributed under the License is distributed on an
14 | @REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | @REM KIND, either express or implied. See the License for the
16 | @REM specific language governing permissions and limitations
17 | @REM under the License.
18 | @REM ----------------------------------------------------------------------------
19 |
20 | @REM ----------------------------------------------------------------------------
21 | @REM Maven Start Up Batch script
22 | @REM
23 | @REM Required ENV vars:
24 | @REM JAVA_HOME - location of a JDK home dir
25 | @REM
26 | @REM Optional ENV vars
27 | @REM M2_HOME - location of maven2's installed home dir
28 | @REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
29 | @REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending
30 | @REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
31 | @REM e.g. to debug Maven itself, use
32 | @REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
33 | @REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
34 | @REM ----------------------------------------------------------------------------
35 |
36 | @REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
37 | @echo off
38 | @REM set title of command window
39 | title %0
40 | @REM enable echoing by setting MAVEN_BATCH_ECHO to 'on'
41 | @if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
42 |
43 | @REM set %HOME% to equivalent of $HOME
44 | if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
45 |
46 | @REM Execute a user defined script before this one
47 | if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
48 | @REM check for pre script, once with legacy .bat ending and once with .cmd ending
49 | if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
50 | if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
51 | :skipRcPre
52 |
53 | @setlocal
54 |
55 | set ERROR_CODE=0
56 |
57 | @REM To isolate internal variables from possible post scripts, we use another setlocal
58 | @setlocal
59 |
60 | @REM ==== START VALIDATION ====
61 | if not "%JAVA_HOME%" == "" goto OkJHome
62 |
63 | echo.
64 | echo Error: JAVA_HOME not found in your environment. >&2
65 | echo Please set the JAVA_HOME variable in your environment to match the >&2
66 | echo location of your Java installation. >&2
67 | echo.
68 | goto error
69 |
70 | :OkJHome
71 | if exist "%JAVA_HOME%\bin\java.exe" goto init
72 |
73 | echo.
74 | echo Error: JAVA_HOME is set to an invalid directory. >&2
75 | echo JAVA_HOME = "%JAVA_HOME%" >&2
76 | echo Please set the JAVA_HOME variable in your environment to match the >&2
77 | echo location of your Java installation. >&2
78 | echo.
79 | goto error
80 |
81 | @REM ==== END VALIDATION ====
82 |
83 | :init
84 |
85 | @REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
86 | @REM Fallback to current working directory if not found.
87 |
88 | set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
89 | IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
90 |
91 | set EXEC_DIR=%CD%
92 | set WDIR=%EXEC_DIR%
93 | :findBaseDir
94 | IF EXIST "%WDIR%"\.mvn goto baseDirFound
95 | cd ..
96 | IF "%WDIR%"=="%CD%" goto baseDirNotFound
97 | set WDIR=%CD%
98 | goto findBaseDir
99 |
100 | :baseDirFound
101 | set MAVEN_PROJECTBASEDIR=%WDIR%
102 | cd "%EXEC_DIR%"
103 | goto endDetectBaseDir
104 |
105 | :baseDirNotFound
106 | set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
107 | cd "%EXEC_DIR%"
108 |
109 | :endDetectBaseDir
110 |
111 | IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
112 |
113 | @setlocal EnableExtensions EnableDelayedExpansion
114 | for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
115 | @endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
116 |
117 | :endReadAdditionalConfig
118 |
119 | SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
120 | set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
121 | set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
122 |
123 | set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
124 |
125 | FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO (
126 | IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
127 | )
128 |
129 | @REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
130 | @REM This allows using the maven wrapper in projects that prohibit checking in binary data.
131 | if exist %WRAPPER_JAR% (
132 | if "%MVNW_VERBOSE%" == "true" (
133 | echo Found %WRAPPER_JAR%
134 | )
135 | ) else (
136 | if not "%MVNW_REPOURL%" == "" (
137 | SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
138 | )
139 | if "%MVNW_VERBOSE%" == "true" (
140 | echo Couldn't find %WRAPPER_JAR%, downloading it ...
141 | echo Downloading from: %DOWNLOAD_URL%
142 | )
143 |
144 | powershell -Command "&{"^
145 | "$webclient = new-object System.Net.WebClient;"^
146 | "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^
147 | "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^
148 | "}"^
149 | "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^
150 | "}"
151 | if "%MVNW_VERBOSE%" == "true" (
152 | echo Finished downloading %WRAPPER_JAR%
153 | )
154 | )
155 | @REM End of extension
156 |
157 | @REM Provide a "standardized" way to retrieve the CLI args that will
158 | @REM work with both Windows and non-Windows executions.
159 | set MAVEN_CMD_LINE_ARGS=%*
160 |
161 | %MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
162 | if ERRORLEVEL 1 goto error
163 | goto end
164 |
165 | :error
166 | set ERROR_CODE=1
167 |
168 | :end
169 | @endlocal & set ERROR_CODE=%ERROR_CODE%
170 |
171 | if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
172 | @REM check for post script, once with legacy .bat ending and once with .cmd ending
173 | if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
174 | if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
175 | :skipRcPost
176 |
177 | @REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
178 | if "%MAVEN_BATCH_PAUSE%" == "on" pause
179 |
180 | if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
181 |
182 | exit /B %ERROR_CODE%
183 |
--------------------------------------------------------------------------------
/quarkus-kafka-server-extension/deployment/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
4 | 4.0.0
5 |
6 | com.ozangunalp
7 | quarkus-kafka-server-extension
8 | 999-SNAPSHOT
9 |
10 | quarkus-kafka-server-deployment
11 | Kafka Server Extension - Deployment
12 |
13 |
14 |
15 | io.quarkus
16 | quarkus-arc-deployment
17 |
18 |
19 | com.ozangunalp
20 | quarkus-kafka-server
21 |
22 |
23 | io.quarkus
24 | quarkus-junit5-internal
25 | test
26 |
27 |
28 |
29 |
30 |
31 | maven-compiler-plugin
32 |
33 |
34 |
35 | io.quarkus
36 | quarkus-extension-processor
37 | ${quarkus.platform.version}
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
--------------------------------------------------------------------------------
/quarkus-kafka-server-extension/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | com.ozangunalp
8 | kafka-native-parent
9 | 999-SNAPSHOT
10 |
11 | quarkus-kafka-server-extension
12 | pom
13 | Kafka Server Extension - Parent
14 |
15 |
16 | deployment
17 | runtime
18 |
19 |
20 |
21 |
22 |
23 |
24 | io.quarkus
25 | quarkus-maven-plugin
26 | ${quarkus.platform.version}
27 |
28 |
29 | maven-surefire-plugin
30 | ${surefire-plugin.version}
31 |
32 |
33 | org.jboss.logmanager.LogManager
34 | ${maven.home}
35 | ${settings.localRepository}
36 |
37 |
38 |
39 |
40 | maven-failsafe-plugin
41 | ${failsafe-plugin.version}
42 |
43 |
44 | org.jboss.logmanager.LogManager
45 | ${maven.home}
46 | ${settings.localRepository}
47 |
48 |
49 |
50 |
51 | maven-compiler-plugin
52 | ${compiler-plugin.version}
53 |
54 |
55 | -parameters
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
--------------------------------------------------------------------------------
/quarkus-kafka-server-extension/runtime/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
4 | 4.0.0
5 |
6 | com.ozangunalp
7 | quarkus-kafka-server-extension
8 | 999-SNAPSHOT
9 |
10 | quarkus-kafka-server
11 | Kafka Server Extension - Runtime
12 |
13 |
14 |
15 | io.quarkus
16 | quarkus-arc
17 |
18 |
19 | org.graalvm.sdk
20 | graal-sdk
21 | provided
22 |
23 |
24 | org.apache.kafka
25 | kafka-server-common
26 |
27 |
28 | org.apache.kafka
29 | kafka-storage
30 |
31 |
32 | org.apache.kafka
33 | kafka-group-coordinator
34 |
35 |
36 | org.apache.kafka
37 | kafka-group-coordinator-api
38 |
39 |
40 | org.apache.kafka
41 | kafka_2.13
42 |
43 |
44 | org.apache.kafka
45 | kafka-clients
46 |
47 |
48 | org.jboss.logmanager
49 | log4j2-jboss-logmanager
50 |
51 |
52 | io.strimzi
53 | kafka-oauth-server
54 |
55 |
56 | io.strimzi
57 | kafka-oauth-server-plain
58 |
59 |
60 | io.strimzi
61 | kafka-oauth-client
62 |
63 |
64 |
65 |
66 |
67 |
68 | io.quarkus
69 | quarkus-extension-maven-plugin
70 | ${quarkus.platform.version}
71 |
72 |
73 | compile
74 |
75 | extension-descriptor
76 |
77 |
78 | ${project.groupId}:${project.artifactId}-deployment:${project.version}
79 |
80 |
81 |
82 |
83 |
84 | maven-compiler-plugin
85 |
86 |
87 |
88 | io.quarkus
89 | quarkus-extension-processor
90 | ${quarkus.platform.version}
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
--------------------------------------------------------------------------------
/quarkus-kafka-server-extension/runtime/src/main/java/com/ozangunalp/kafka/server/extension/runtime/JsonPathConfigRecorder.java:
--------------------------------------------------------------------------------
1 | package com.ozangunalp.kafka.server.extension.runtime;
2 |
3 | import java.util.EnumSet;
4 | import java.util.Set;
5 |
6 | import com.jayway.jsonpath.Configuration;
7 | import com.jayway.jsonpath.Option;
8 | import com.jayway.jsonpath.spi.json.JacksonJsonProvider;
9 | import com.jayway.jsonpath.spi.json.JsonProvider;
10 | import com.jayway.jsonpath.spi.mapper.JacksonMappingProvider;
11 | import com.jayway.jsonpath.spi.mapper.MappingProvider;
12 | import io.quarkus.runtime.annotations.Recorder;
13 |
14 | @Recorder
15 | public class JsonPathConfigRecorder {
16 |
17 | public void setDefaults() {
18 | Configuration.setDefaults(new Configuration.Defaults() {
19 |
20 | private final JsonProvider jsonProvider = new JacksonJsonProvider();
21 | private final MappingProvider mappingProvider = new JacksonMappingProvider();
22 |
23 |
24 | @Override
25 | public JsonProvider jsonProvider() {
26 | return jsonProvider;
27 | }
28 |
29 | @Override
30 | public Set