├── .editorconfig
├── .github
└── workflows
│ ├── build.yml
│ ├── release.yml
│ └── trigger-release.yml
├── .gitignore
├── .mvn
└── wrapper
│ ├── maven-wrapper.jar
│ └── maven-wrapper.properties
├── LICENSE
├── README.md
├── mvnw
├── pom.xml
├── renovate.json
└── src
├── main
├── java
│ └── io
│ │ └── dropwizard
│ │ └── kafka
│ │ ├── BasicKafkaAdminClientFactory.java
│ │ ├── BasicKafkaConsumerFactory.java
│ │ ├── BasicKafkaProducerFactory.java
│ │ ├── DropwizardKafkaUtils.java
│ │ ├── KafkaAdminClientBundle.java
│ │ ├── KafkaAdminClientFactory.java
│ │ ├── KafkaClientFactory.java
│ │ ├── KafkaConsumerBundle.java
│ │ ├── KafkaConsumerFactory.java
│ │ ├── KafkaProducerBundle.java
│ │ ├── KafkaProducerFactory.java
│ │ ├── KafkaTopicFactory.java
│ │ ├── MockKafkaConsumerFactory.java
│ │ ├── MockKafkaProducerFactory.java
│ │ ├── deserializer
│ │ ├── ByteArrayDeserializerFactory.java
│ │ ├── ByteBufferDeserializerFactory.java
│ │ ├── BytesDeserializerFactory.java
│ │ ├── DeserializerFactory.java
│ │ ├── DoubleDeserializerFactory.java
│ │ ├── FloatDeserializerFactory.java
│ │ ├── IntegerDeserializerFactory.java
│ │ ├── LongDeserializerFactory.java
│ │ ├── ShortDeserializerFactory.java
│ │ ├── StringDeserializerFactory.java
│ │ └── UUIDDeserializerFactory.java
│ │ ├── health
│ │ ├── KafkaAdminHealthCheck.java
│ │ └── KafkaProducerHealthCheck.java
│ │ ├── managed
│ │ ├── KafkaAdminClientManager.java
│ │ ├── KafkaConsumerManager.java
│ │ └── KafkaProducerManager.java
│ │ ├── metrics
│ │ └── DropwizardMetricsReporter.java
│ │ ├── security
│ │ ├── SaslPlaintextSecurityFactory.java
│ │ ├── SaslSslSecurityFactory.java
│ │ ├── SecurityFactory.java
│ │ └── SslSecurityFactory.java
│ │ ├── serializer
│ │ ├── ByteArraySerializerFactory.java
│ │ ├── ByteBufferSerializerFactory.java
│ │ ├── BytesSerializerFactory.java
│ │ ├── DoubleSerializerFactory.java
│ │ ├── FloatSerializerFactory.java
│ │ ├── IntegerSerializerFactory.java
│ │ ├── LongSerializerFactory.java
│ │ ├── SerializerFactory.java
│ │ ├── ShortSerializerFactory.java
│ │ ├── StringSerializerFactory.java
│ │ └── UUIDSerializerFactory.java
│ │ └── tracing
│ │ └── TracingFactory.java
└── resources
│ └── META-INF
│ └── services
│ ├── io.dropwizard.jackson.Discoverable
│ ├── io.dropwizard.kafka.KafkaAdminClientFactory
│ ├── io.dropwizard.kafka.KafkaConsumerFactory
│ ├── io.dropwizard.kafka.KafkaProducerFactory
│ ├── io.dropwizard.kafka.deserializer.DeserializerFactory
│ ├── io.dropwizard.kafka.security.SecurityFactory
│ └── io.dropwizard.kafka.serializer.SerializerFactory
└── test
├── java
└── io
│ └── dropwizard
│ └── kafka
│ ├── BasicKafkaAdminClientFactoryTest.java
│ ├── BasicKafkaConsumerFactoryTest.java
│ ├── BasicKafkaProducerFactoryTest.java
│ ├── DropwizardKafkaUtilsTest.java
│ ├── KafkaTopicFactoryTest.java
│ ├── MockKafkaConsumerFactoryTest.java
│ ├── MockKafkaProducerFactoryTest.java
│ ├── deserializer
│ ├── ByteArrayDeserializerFactoryTest.java
│ ├── ByteBufferDeserializerFactoryTest.java
│ ├── BytesDeserializerFactoryTest.java
│ ├── DoubleDeserializerFactoryTest.java
│ ├── FloatDeserializerFactoryTest.java
│ ├── IntegerDeserializerFactoryTest.java
│ ├── LongDeserializerFactoryTest.java
│ ├── ShortDeserializerFactoryTest.java
│ ├── StringDeserializerFactoryTest.java
│ └── UUIDDeserializerFactoryTest.java
│ ├── health
│ └── KafkaProducerHealthCheckTest.java
│ ├── integration
│ └── DropwizardKafkaIT.java
│ ├── metrics
│ └── DropwizardMetricsReporterTest.java
│ ├── security
│ ├── SaslPlaintextSecurityFactoryTest.java
│ ├── SaslSslSecurityFactoryTest.java
│ └── SslSecurityFactoryTest.java
│ └── serializer
│ ├── ByteArraySerializerFactoryTest.java
│ ├── ByteBufferSerializerFactoryTest.java
│ ├── BytesSerializerFactoryTest.java
│ ├── DoubleSerializerFactoryTest.java
│ ├── FloatSerializerFactoryTest.java
│ ├── IntegerSerializerFactoryTest.java
│ ├── LongSerializerFactoryTest.java
│ ├── ShortSerializerFactoryTest.java
│ ├── StringSerializerFactoryTest.java
│ └── UUIDSerializerFactoryTest.java
└── resources
└── yaml
├── basic-admin-no-topics.yaml
├── basic-admin.yaml
├── basic-consumer.yaml
├── basic-producer.yaml
├── deserializer
├── byte-array.yaml
├── byte-buffer.yaml
├── bytes.yaml
├── double.yaml
├── float.yaml
├── integer.yaml
├── long.yaml
├── short.yaml
├── string.yaml
└── uuid.yaml
├── integration
├── basic-admin.yaml
├── basic-consumer.yaml
└── basic-producer.yaml
├── security
├── sasl-plaintext-security.yaml
├── sasl-ssl-security.yaml
└── ssl-security.yaml
├── serializer
├── byte-array.yaml
├── byte-buffer.yaml
├── bytes.yaml
├── double.yaml
├── float.yaml
├── integer.yaml
├── long.yaml
├── short.yaml
├── string.yaml
└── uuid.yaml
└── topic.yaml
/.editorconfig:
--------------------------------------------------------------------------------
1 | [*]
2 | charset=utf-8
3 | end_of_line=lf
4 | trim_trailing_whitespace=true
5 | max_line_length=120
6 | insert_final_newline=true
7 | indent_style=space
8 | indent_size=2
9 |
10 | [*.java]
11 | indent_style=space
12 | indent_size=4
13 | continuation_indent_size=8
14 |
--------------------------------------------------------------------------------
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Build
3 | # yamllint disable-line rule:truthy
4 | on:
5 | push:
6 | branches:
7 | - release/*
8 | pull_request:
9 | jobs:
10 | yamllint:
11 | uses: dropwizard/workflows/.github/workflows/yamllint.yml@main
12 | build:
13 | strategy:
14 | fail-fast: false
15 | matrix:
16 | java-version: ['17', '21']
17 | uses: dropwizard/workflows/.github/workflows/maven.yml@main
18 | secrets: inherit
19 | with:
20 | java-version: ${{ matrix.java-version }}
21 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Release
3 | # yamllint disable-line rule:truthy
4 | on:
5 | push:
6 | tags:
7 | - dropwizard-kafka-*
8 | jobs:
9 | release:
10 | uses: dropwizard/workflows/.github/workflows/release.yml@main
11 | secrets: inherit
12 |
--------------------------------------------------------------------------------
/.github/workflows/trigger-release.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # yamllint disable rule:comments rule:line-length
3 | name: Trigger Release
4 | # yamllint disable-line rule:truthy
5 | on:
6 | workflow_dispatch:
7 | inputs:
8 | releaseVersion:
9 | description: Version of the next release
10 | required: true
11 | type: string
12 | developmentVersion:
13 | description: Version of the next development cycle (must end in "-SNAPSHOT")
14 | required: true
15 | type: string
16 | jobs:
17 | release:
18 | uses: dropwizard/workflows/.github/workflows/trigger-release.yml@main
19 | secrets: inherit
20 | with:
21 | releaseVersion: ${{ inputs.releaseVersion }}
22 | developmentVersion: ${{ inputs.developmentVersion }}
23 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 |
3 | .out
4 | out
5 | .dist
6 | .run
7 | .gradle
8 | target
9 | logs
10 |
11 | .idea
12 | *.iml
13 | *.ipr
14 | *.iws
15 | *.class
16 | *.log
17 |
18 | # eclipse
19 | .project
20 | .settings
21 | bin
22 | .classpath
23 |
24 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
25 | hs_err_pid*
26 |
27 | */dependency-reduced-pom.xml
28 | node_modules
29 | .cache
30 |
31 | .vscode
32 | .factorypath
33 |
--------------------------------------------------------------------------------
/.mvn/wrapper/maven-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dropwizard/dropwizard-kafka/f8d14e7ebb96f344b26f54b151545ae3ccd846ca/.mvn/wrapper/maven-wrapper.jar
--------------------------------------------------------------------------------
/.mvn/wrapper/maven-wrapper.properties:
--------------------------------------------------------------------------------
1 | distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.10/apache-maven-3.9.10-bin.zip
2 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # dropwizard-kafka
2 | [](https://github.com/dropwizard/dropwizard-kafka/actions?query=workflow%3ABuild)
3 | [](https://sonarcloud.io/dashboard?id=dropwizard_dropwizard-kafka)
4 | [](https://maven-badges.herokuapp.com/maven-central/io.dropwizard.modules/dropwizard-kafka/)
5 |
6 | Provides easy integration for Dropwizard applications with the Apache Kafka client.
7 |
8 | This bundle comes with out-of-the-box support for:
9 | * YAML Configuration integration
10 | * Producer and Consumer lifecycle management
11 | * Producer and Cluster connection health checks
12 | * Metrics integration for the Kafka client
13 | * An easier way to create/configure Kafka consumers/producers than is offered by the base Kafka client
14 | * Distributed tracing integration, using the [Brave Kafka client instrumentation library](https://github.com/openzipkin/brave/tree/master/instrumentation/kafka-clients).
15 |
16 | For more information on Kafka, take a look at the official documentation here: http://kafka.apache.org/documentation/
17 |
18 | ## Dropwizard Version Support Matrix
19 | | dropwizard-kafka | Dropwizard v1.3.x | Dropwizard v2.0.x | Dropwizard v2.1.x | Dropwizard v3.0.x | Dropwizard v4.0.x |
20 | |------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
21 | | v1.3.x | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: |
22 | | v1.4.x | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: |
23 | | v1.5.x | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: |
24 | | v1.6.x | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: |
25 | | v1.7.x | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: |
26 | | v1.8.x | :x: | :x: | :white_check_mark: | :x: | :x: |
27 | | v3.0.x | :x: | :x: | :x: | :white_check_mark: | :x: |
28 | | v4.0.x | :x: | :x: | :x: | :x: | :white_check_mark: |
29 |
30 | ## Usage
31 | Add dependency on library.
32 |
33 | Maven:
34 | ```xml
35 |
36 | io.dropwizard.modules
37 | dropwizard-kafka
38 | 1.7.0
39 |
40 | ```
41 |
42 | Gradle:
43 | ```groovy
44 | compile "io.dropwizard.modules:dropwizard-kafka:$dropwizardVersion"
45 | ```
46 |
47 | ### Basic Kafka Producer
48 | In your Dropwizard `Configuration` class, configure a `KafkaProducerFactory`:
49 | ```java
50 | @Valid
51 | @NotNull
52 | @JsonProperty("producer")
53 | private KafkaProducerFactory kafkaProducerFactory;
54 | ```
55 |
56 | Then, in your `Application` class, you'll want to do something similar to the following:
57 | ```java
58 | private final KafkaProducerBundle kafkaProducer = new KafkaProducerBundle() {
59 | @Override
60 | public KafkaProducerFactory getKafkaProducerFactory(ExampleConfiguration configuration) {
61 | return configuration.getKafkaProducerFactory();
62 | }
63 | };
64 |
65 | @Override
66 | public void initialize(Bootstrap bootstrap) {
67 | bootstrap.addBundle(kafkaProducer);
68 | }
69 |
70 | @Override
71 | public void run(ExampleConfiguration config, Environment environment) {
72 | final PersonEventProducer personEventProducer = new PersonEventProducer(kafkaProducer.getProducer());
73 | environment.jersey().register(new PersonEventResource(personEventProducer));
74 | }
75 | ```
76 |
77 | Configure your factory in your `config.yml` file:
78 |
79 | ```yaml
80 | producer:
81 | type: basic
82 | bootstrapServers:
83 | - 127.0.0.1:9092
84 | - 127.0.0.1:9093
85 | - 127.0.0.1:9094
86 | name: producerNameToBeUsedInMetrics
87 | keySerializer:
88 | type: string
89 | valueSerializer:
90 | type: string
91 | acks: all
92 | retries: 2147483647 # int max value
93 | maxInFlightRequestsPerConnection: 1
94 | maxPollBlockTime: 10s
95 | security:
96 | securityProtocol: sasl_ssl
97 | sslProtocol: TLSv1.2
98 | saslMechanism: PLAIN
99 | saslJaas: "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"\" password=\"\";"
100 | ```
101 |
102 | ### Basic Kafka Consumer
103 | In your Dropwizard `Configuration` class, configure a `KafkaConsumerFactory`:
104 |
105 | ```java
106 | @Valid
107 | @NotNull
108 | @JsonProperty("consumer")
109 | private KafkaConsumerFactory kafkaConsumerFactory;
110 | ```
111 |
112 | Then, in your `Application` class, you'll want to do something similar to the following:
113 | ```java
114 | private final KafkaConsumerBundle kafkaConsumer = new KafkaConsumerBundle() {
115 | @Override
116 | public KafkaConsumerFactory getKafkaConsumerFactory(ExampleConfiguration configuration) {
117 | return configuration.getKafkaConsumerFactory();
118 | }
119 | };
120 |
121 | @Override
122 | public void initialize(Bootstrap bootstrap) {
123 | bootstrap.addBundle(kafkaConsumer);
124 | }
125 |
126 | @Override
127 | public void run(ExampleConfiguration config, Environment environment) {
128 | final PersonEventConsumer personEventConsumer = new PersonEventConsumer(kafkaConsumer.getConsumer());
129 | personEventConsumer.startConsuming();
130 | }
131 | ```
132 |
133 | Configure your factory in your `config.yml` file:
134 |
135 | ```yaml
136 | consumer:
137 | type: basic
138 | bootstrapServers:
139 | - 127.0.0.1:9092
140 | - 127.0.0.1:9093
141 | - 127.0.0.1:9094
142 | consumerGroupId: consumer1
143 | name: consumerNameToBeUsedInMetrics
144 | keyDeserializer:
145 | type: string
146 | valueDeserializer:
147 | type: string
148 | security:
149 | securityProtocol: sasl_ssl
150 | sslProtocol: TLSv1.2
151 | saslMechanism: PLAIN
152 | saslJaas: "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"\" password=\"\";"
153 | ```
154 |
155 | ## Using an older version of the Kafka Client
156 | This library *should* remain backwards compatible, such that you can override the version of the kafka client that this library includes. If this becomes no longer possible, we will need to create separate branches for differing major versions of the Kafka client.
157 |
158 | For example, say you would like to use version `1.1.1` of the Kafka client. One option would be to explicitly define a dependency on version `1.1.1` of `kafka-clients` before you declare a dependency on `dropwizard-kafka`.
159 |
160 | ```xml
161 |
162 |
163 | org.apache.kafka
164 | kafka-clients
165 | 1.1.1
166 |
167 |
168 | io.dropwizard
169 | dropwizard-kafka
170 | ${dropwizard.version}
171 |
172 |
173 | ```
174 |
175 | ## Adding support for additional serializers and/or deserializers
176 | In order to support additional serializers or deserializers, you'll need to create a new factory:
177 | ```java
178 | @JsonTypeName("my-serializer")
179 | public class MySerializerFactory extends SerializerFactory {
180 |
181 | @NotNull
182 | @JsonProperty
183 | private String someConfig;
184 |
185 | public String getSomeConfig() {
186 | return someConfig;
187 | }
188 |
189 | public void setSomeConfig(final String someConfig) {
190 | this.someConfig = someConfig;
191 | }
192 |
193 |
194 | @Override
195 | public Class extends Serializer> getSerializerClass() {
196 | return MySerializer.class;
197 | }
198 | }
199 | ```
200 |
201 | Then you will need to add the following files to your `src/main/resources/META-INF/services` directory in order to support Jackson
202 | polymorphic serialization:
203 |
204 | File named `io.dropwizard.jackson.Discoverable`:
205 |
206 | ```
207 | io.dropwizard.kafka.serializer.SerializerFactory
208 | ```
209 |
210 | File named `io.dropwizard.kafka.serializer.SerializerFactory`:
211 |
212 | ```
213 | package.name.for.your.MySerializerFactory
214 | ```
215 |
--------------------------------------------------------------------------------
/mvnw:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # ----------------------------------------------------------------------------
3 | # Licensed to the Apache Software Foundation (ASF) under one
4 | # or more contributor license agreements. See the NOTICE file
5 | # distributed with this work for additional information
6 | # regarding copyright ownership. The ASF licenses this file
7 | # to you under the Apache License, Version 2.0 (the
8 | # "License"); you may not use this file except in compliance
9 | # with the License. You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing,
14 | # software distributed under the License is distributed on an
15 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16 | # KIND, either express or implied. See the License for the
17 | # specific language governing permissions and limitations
18 | # under the License.
19 | # ----------------------------------------------------------------------------
20 |
21 | # ----------------------------------------------------------------------------
22 | # Maven2 Start Up Batch script
23 | #
24 | # Required ENV vars:
25 | # ------------------
26 | # JAVA_HOME - location of a JDK home dir
27 | #
28 | # Optional ENV vars
29 | # -----------------
30 | # M2_HOME - location of maven2's installed home dir
31 | # MAVEN_OPTS - parameters passed to the Java VM when running Maven
32 | # e.g. to debug Maven itself, use
33 | # set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
34 | # MAVEN_SKIP_RC - flag to disable loading of mavenrc files
35 | # ----------------------------------------------------------------------------
36 |
37 | if [ -z "$MAVEN_SKIP_RC" ] ; then
38 |
39 | if [ -f /etc/mavenrc ] ; then
40 | . /etc/mavenrc
41 | fi
42 |
43 | if [ -f "$HOME/.mavenrc" ] ; then
44 | . "$HOME/.mavenrc"
45 | fi
46 |
47 | fi
48 |
49 | # OS specific support. $var _must_ be set to either true or false.
50 | cygwin=false;
51 | darwin=false;
52 | mingw=false
53 | case "`uname`" in
54 | CYGWIN*) cygwin=true ;;
55 | MINGW*) mingw=true;;
56 | Darwin*) darwin=true
57 | #
58 | # Look for the Apple JDKs first to preserve the existing behaviour, and then look
59 | # for the new JDKs provided by Oracle.
60 | #
61 | if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK ] ; then
62 | #
63 | # Apple JDKs
64 | #
65 | export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home
66 | fi
67 |
68 | if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Java/JavaVirtualMachines/CurrentJDK ] ; then
69 | #
70 | # Apple JDKs
71 | #
72 | export JAVA_HOME=/System/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
73 | fi
74 |
75 | if [ -z "$JAVA_HOME" ] && [ -L "/Library/Java/JavaVirtualMachines/CurrentJDK" ] ; then
76 | #
77 | # Oracle JDKs
78 | #
79 | export JAVA_HOME=/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
80 | fi
81 |
82 | if [ -z "$JAVA_HOME" ] && [ -x "/usr/libexec/java_home" ]; then
83 | #
84 | # Apple JDKs
85 | #
86 | export JAVA_HOME=`/usr/libexec/java_home`
87 | fi
88 | ;;
89 | esac
90 |
91 | if [ -z "$JAVA_HOME" ] ; then
92 | if [ -r /etc/gentoo-release ] ; then
93 | JAVA_HOME=`java-config --jre-home`
94 | fi
95 | fi
96 |
97 | if [ -z "$M2_HOME" ] ; then
98 | ## resolve links - $0 may be a link to maven's home
99 | PRG="$0"
100 |
101 | # need this for relative symlinks
102 | while [ -h "$PRG" ] ; do
103 | ls=`ls -ld "$PRG"`
104 | link=`expr "$ls" : '.*-> \(.*\)$'`
105 | if expr "$link" : '/.*' > /dev/null; then
106 | PRG="$link"
107 | else
108 | PRG="`dirname "$PRG"`/$link"
109 | fi
110 | done
111 |
112 | saveddir=`pwd`
113 |
114 | M2_HOME=`dirname "$PRG"`/..
115 |
116 | # make it fully qualified
117 | M2_HOME=`cd "$M2_HOME" && pwd`
118 |
119 | cd "$saveddir"
120 | # echo Using m2 at $M2_HOME
121 | fi
122 |
123 | # For Cygwin, ensure paths are in UNIX format before anything is touched
124 | if $cygwin ; then
125 | [ -n "$M2_HOME" ] &&
126 | M2_HOME=`cygpath --unix "$M2_HOME"`
127 | [ -n "$JAVA_HOME" ] &&
128 | JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
129 | [ -n "$CLASSPATH" ] &&
130 | CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
131 | fi
132 |
133 | # For Migwn, ensure paths are in UNIX format before anything is touched
134 | if $mingw ; then
135 | [ -n "$M2_HOME" ] &&
136 | M2_HOME="`(cd "$M2_HOME"; pwd)`"
137 | [ -n "$JAVA_HOME" ] &&
138 | JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
139 | # TODO classpath?
140 | fi
141 |
142 | if [ -z "$JAVA_HOME" ]; then
143 | javaExecutable="`which javac`"
144 | if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
145 | # readlink(1) is not available as standard on Solaris 10.
146 | readLink=`which readlink`
147 | if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
148 | if $darwin ; then
149 | javaHome="`dirname \"$javaExecutable\"`"
150 | javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
151 | else
152 | javaExecutable="`readlink -f \"$javaExecutable\"`"
153 | fi
154 | javaHome="`dirname \"$javaExecutable\"`"
155 | javaHome=`expr "$javaHome" : '\(.*\)/bin'`
156 | JAVA_HOME="$javaHome"
157 | export JAVA_HOME
158 | fi
159 | fi
160 | fi
161 |
162 | if [ -z "$JAVACMD" ] ; then
163 | if [ -n "$JAVA_HOME" ] ; then
164 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
165 | # IBM's JDK on AIX uses strange locations for the executables
166 | JAVACMD="$JAVA_HOME/jre/sh/java"
167 | else
168 | JAVACMD="$JAVA_HOME/bin/java"
169 | fi
170 | else
171 | JAVACMD="`which java`"
172 | fi
173 | fi
174 |
175 | if [ ! -x "$JAVACMD" ] ; then
176 | echo "Error: JAVA_HOME is not defined correctly." >&2
177 | echo " We cannot execute $JAVACMD" >&2
178 | exit 1
179 | fi
180 |
181 | if [ -z "$JAVA_HOME" ] ; then
182 | echo "Warning: JAVA_HOME environment variable is not set."
183 | fi
184 |
185 | CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
186 |
187 | # traverses directory structure from process work directory to filesystem root
188 | # first directory with .mvn subdirectory is considered project base directory
189 | find_maven_basedir() {
190 | local basedir=$(pwd)
191 | local wdir=$(pwd)
192 | while [ "$wdir" != '/' ] ; do
193 | if [ -d "$wdir"/.mvn ] ; then
194 | basedir=$wdir
195 | break
196 | fi
197 | wdir=$(cd "$wdir/.."; pwd)
198 | done
199 | echo "${basedir}"
200 | }
201 |
202 | # concatenates all lines of a file
203 | concat_lines() {
204 | if [ -f "$1" ]; then
205 | echo "$(tr -s '\n' ' ' < "$1")"
206 | fi
207 | }
208 |
209 | export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-$(find_maven_basedir)}
210 | MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
211 |
212 | # For Cygwin, switch paths to Windows format before running java
213 | if $cygwin; then
214 | [ -n "$M2_HOME" ] &&
215 | M2_HOME=`cygpath --path --windows "$M2_HOME"`
216 | [ -n "$JAVA_HOME" ] &&
217 | JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
218 | [ -n "$CLASSPATH" ] &&
219 | CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
220 | [ -n "$MAVEN_PROJECTBASEDIR" ] &&
221 | MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"`
222 | fi
223 |
224 | # Provide a "standardized" way to retrieve the CLI args that will
225 | # work with both Windows and non-Windows executions.
226 | MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
227 | export MAVEN_CMD_LINE_ARGS
228 |
229 | WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
230 |
231 | # avoid using MAVEN_CMD_LINE_ARGS below since that would loose parameter escaping in $@
232 | exec "$JAVACMD" \
233 | $MAVEN_OPTS \
234 | -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
235 | "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
236 | ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
237 |
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 4.0.0
5 |
6 | io.dropwizard.modules
7 | module-parent
8 | 4.0.4
9 |
10 | io.dropwizard.modules
11 | dropwizard-kafka
12 | 4.0.2-SNAPSHOT
13 | jar
14 |
15 | Dropwizard Kafka
16 | Provides easy integration for Dropwizard applications with Apache Kafka
17 |
18 |
19 |
20 | dropwizard-kafka
21 | http://dropwizard.github.io/dropwizard-kafka/${project.version}
22 |
23 |
24 |
25 |
26 | dropwizard_dropwizard-kafka
27 |
28 | 6.3.0
29 | 3.8.1
30 | 3.3.6
31 |
32 |
33 |
34 |
35 |
36 | org.apache.kafka
37 | kafka-clients
38 | ${kafka-client.version}
39 |
40 |
41 | org.apache.kafka
42 | kafka-clients
43 | ${kafka-client.version}
44 | test
45 |
46 |
47 | org.apache.kafka
48 | kafka-streams
49 | ${kafka-client.version}
50 |
51 |
52 | org.apache.kafka
53 | kafka-metadata
54 | ${kafka-client.version}
55 |
56 |
57 | org.apache.kafka
58 | kafka-raft
59 | ${kafka-client.version}
60 |
61 |
62 | org.apache.kafka
63 | kafka-server-common
64 | ${kafka-client.version}
65 |
66 |
67 | org.apache.kafka
68 | kafka-streams-test-utils
69 | ${kafka-client.version}
70 |
71 |
72 | org.apache.kafka
73 | kafka_2.13
74 | ${kafka-client.version}
75 |
76 |
77 | org.apache.kafka
78 | kafka_2.13
79 | ${kafka-client.version}
80 | test
81 |
82 |
83 |
84 |
85 |
86 |
87 | io.dropwizard
88 | dropwizard-core
89 |
90 |
91 | org.apache.kafka
92 | kafka-clients
93 |
94 |
95 | org.slf4j
96 | slf4j-log4j12
97 |
98 |
99 |
100 |
101 | io.zipkin.brave
102 | brave-instrumentation-kafka-clients
103 | ${brave.version}
104 |
105 |
106 |
107 |
108 | org.junit.vintage
109 | junit-vintage-engine
110 | test
111 |
112 |
113 | org.mockito
114 | mockito-core
115 | test
116 |
117 |
118 | io.dropwizard
119 | dropwizard-testing
120 | test
121 |
122 |
123 | org.springframework.kafka
124 | spring-kafka-test
125 | ${spring-kafka.version}
126 | test
127 |
128 |
129 | org.assertj
130 | assertj-core
131 | test
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 | org.apache.maven.plugins
140 | maven-enforcer-plugin
141 | 3.5.0
142 |
143 |
144 |
145 |
146 |
147 |
148 | org.apache.maven.plugins
149 | maven-failsafe-plugin
150 |
151 |
152 |
153 | integration-test
154 | verify
155 |
156 |
157 |
158 |
159 |
160 | org.apache.maven.plugins
161 | maven-enforcer-plugin
162 |
163 |
164 | enforce-rules
165 |
166 | enforce
167 |
168 |
169 |
170 |
171 |
172 | org.apache.kafka:*
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
--------------------------------------------------------------------------------
/renovate.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json",
3 | "extends": [
4 | "local>dropwizard/renovate-config"
5 | ]
6 | }
7 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/BasicKafkaAdminClientFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka;
2 |
3 | import com.codahale.metrics.health.HealthCheckRegistry;
4 | import com.fasterxml.jackson.annotation.JsonTypeName;
5 | import io.dropwizard.lifecycle.setup.LifecycleEnvironment;
6 | import io.dropwizard.validation.ValidationMethod;
7 | import org.apache.kafka.clients.admin.AdminClient;
8 | import org.apache.kafka.clients.admin.AdminClientConfig;
9 | import org.apache.kafka.clients.admin.NewTopic;
10 | import org.slf4j.Logger;
11 | import org.slf4j.LoggerFactory;
12 |
13 | import java.util.ArrayList;
14 | import java.util.Collection;
15 | import java.util.Collections;
16 | import java.util.HashMap;
17 | import java.util.List;
18 | import java.util.Map;
19 | import java.util.stream.Collectors;
20 |
21 | import static java.util.Objects.requireNonNull;
22 |
23 | @JsonTypeName("basic")
24 | public class BasicKafkaAdminClientFactory extends KafkaAdminClientFactory {
25 | private static final Logger log = LoggerFactory.getLogger(BasicKafkaAdminClientFactory.class);
26 |
27 | @Override
28 | public AdminClient build(final LifecycleEnvironment lifecycle, final HealthCheckRegistry healthChecks,
29 | final Map configOverrides, final Collection topics) {
30 | final Map config = new HashMap<>();
31 |
32 | config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, String.join(",", bootstrapServers));
33 |
34 | config.put(AdminClientConfig.CLIENT_ID_CONFIG, clientId);
35 |
36 | config.put(AdminClientConfig.CLIENT_DNS_LOOKUP_CONFIG, clientDnsLookup.toString());
37 | config.put(AdminClientConfig.RECONNECT_BACKOFF_MS_CONFIG, reconnectBackoff.toMilliseconds());
38 | config.put(AdminClientConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG, reconnectBackoffMax.toMilliseconds());
39 | config.put(AdminClientConfig.RETRIES_CONFIG, retries);
40 | config.put(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, retryBackoff.toMilliseconds());
41 | config.put(AdminClientConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG, connectionMaxIdle.toMilliseconds());
42 | config.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, (int) requestTimeout.toMilliseconds());
43 | config.put(AdminClientConfig.METADATA_MAX_AGE_CONFIG, metadataMaxAge.toMilliseconds());
44 | config.put(AdminClientConfig.SEND_BUFFER_CONFIG, sendBufferBytes);
45 | config.put(AdminClientConfig.RECEIVE_BUFFER_CONFIG, receiveBufferBytes);
46 | config.put(AdminClientConfig.METRIC_REPORTER_CLASSES_CONFIG, metricsReporters);
47 | config.put(AdminClientConfig.METRICS_NUM_SAMPLES_CONFIG, metricsSamples);
48 | config.put(AdminClientConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG, metricsSampleWindow.toMilliseconds());
49 | config.put(AdminClientConfig.METRICS_RECORDING_LEVEL_CONFIG, metricsRecordingLevel.toString());
50 |
51 | if (security != null && security.isEnabled()) {
52 | config.putAll(security.build());
53 | }
54 |
55 | if (!requireNonNull(configOverrides).isEmpty()) {
56 | config.putAll(configOverrides);
57 | }
58 |
59 | final AdminClient adminClient = buildAdminClient(config);
60 |
61 | manageAdminClient(lifecycle, adminClient, topics);
62 |
63 | registerHealthCheck(healthChecks, adminClient);
64 |
65 | return adminClient;
66 | }
67 |
68 | @Override
69 | public AdminClient build(final LifecycleEnvironment lifecycle, final HealthCheckRegistry healthChecks,
70 | final Map configOverrides) {
71 | List newTopics = Collections.emptyList();
72 | if (topicCreationEnabled) {
73 | newTopics = topics.stream()
74 | .map(KafkaTopicFactory::asNewTopic)
75 | .collect(Collectors.toList());
76 | }
77 | return build(lifecycle, healthChecks, configOverrides, newTopics);
78 | }
79 |
80 | @ValidationMethod(message = "Bootstrap servers must not be empty in BasicKafkaAdminClientFactory and topics must be defined " +
81 | "if allowed to be created")
82 | public boolean isValidConfiguration() {
83 | final List errors = new ArrayList<>();
84 |
85 | if (bootstrapServers.isEmpty()) {
86 | errors.add("bootstrapServers cannot be empty if basic type is configured");
87 | }
88 |
89 | if (topicCreationEnabled && topics.isEmpty()) {
90 | errors.add("topicCreationEnabled cannot be true with no topics defined");
91 | }
92 |
93 | if (!topicCreationEnabled && !topics.isEmpty()) {
94 | log.warn("topicCreationEnabled was set to false, but topics were defined");
95 | }
96 |
97 | if (!errors.isEmpty()) {
98 | final String errorMessage = String.join(System.lineSeparator(), errors);
99 | log.error("Failed to construct a basic Kafka cluster connection, due to the following errors:{}{}", System.lineSeparator(),
100 | errorMessage);
101 | return false;
102 | }
103 |
104 | return true;
105 | }
106 | }
107 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/BasicKafkaConsumerFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka;
2 |
3 | import brave.Tracing;
4 | import brave.kafka.clients.KafkaTracing;
5 | import com.codahale.metrics.health.HealthCheckRegistry;
6 | import com.fasterxml.jackson.annotation.JsonTypeName;
7 | import io.dropwizard.lifecycle.setup.LifecycleEnvironment;
8 | import org.apache.kafka.clients.CommonClientConfigs;
9 | import org.apache.kafka.clients.consumer.Consumer;
10 | import org.apache.kafka.clients.consumer.ConsumerConfig;
11 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
12 | import org.checkerframework.checker.nullness.qual.Nullable;
13 | import org.slf4j.Logger;
14 | import org.slf4j.LoggerFactory;
15 |
16 | import java.util.ArrayList;
17 | import java.util.List;
18 | import java.util.Map;
19 | import java.util.Optional;
20 |
21 | import static java.util.Objects.requireNonNull;
22 |
23 | @JsonTypeName("basic")
24 | public class BasicKafkaConsumerFactory extends KafkaConsumerFactory {
25 | private static final Logger log = LoggerFactory.getLogger(BasicKafkaConsumerFactory.class);
26 |
27 | @Override
28 | public Consumer build(final LifecycleEnvironment lifecycle,
29 | final HealthCheckRegistry healthChecks,
30 | @Nullable final Tracing tracing,
31 | @Nullable final ConsumerRebalanceListener rebalanceListener,
32 | final Map configOverrides) {
33 | final Map config = createBaseKafkaConfigurations();
34 |
35 | config.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, String.join(",", bootstrapServers));
36 |
37 | config.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId);
38 |
39 | if (!requireNonNull(configOverrides).isEmpty()) {
40 | config.putAll(configOverrides);
41 | }
42 |
43 | final Optional kafkaTracing = Optional.ofNullable(getTracingFactory())
44 | .flatMap(tracingFactory -> tracingFactory.build(tracing));
45 |
46 | final Consumer rawConsumer = buildConsumer(config);
47 |
48 | final Consumer consumer = kafkaTracing.map(kTracing -> kTracing.consumer(rawConsumer))
49 | .orElse(rawConsumer);
50 |
51 | manageConsumer(lifecycle, consumer);
52 |
53 | registerHealthCheck(healthChecks, consumer);
54 |
55 | return consumer;
56 | }
57 |
58 | @Override
59 | public boolean isValidConfiguration() {
60 | final List errors = new ArrayList<>();
61 |
62 | if (bootstrapServers != null && bootstrapServers.isEmpty()) {
63 | errors.add("bootstrapServers cannot be empty if basic type is configured");
64 | }
65 |
66 | if (!errors.isEmpty()) {
67 | final String errorMessage = String.join(System.lineSeparator(), errors);
68 | log.error("Failed to construct a BASIC Kafka cluster connection, due to the following errors:{}{}", System.lineSeparator(),
69 | errorMessage);
70 | return false;
71 | }
72 |
73 | return true;
74 | }
75 | }
76 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/BasicKafkaProducerFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka;
2 |
3 | import brave.Tracing;
4 | import brave.kafka.clients.KafkaTracing;
5 | import com.codahale.metrics.health.HealthCheckRegistry;
6 | import com.fasterxml.jackson.annotation.JsonTypeName;
7 | import io.dropwizard.lifecycle.setup.LifecycleEnvironment;
8 | import org.apache.kafka.clients.CommonClientConfigs;
9 | import org.apache.kafka.clients.producer.Producer;
10 | import org.checkerframework.checker.nullness.qual.Nullable;
11 | import org.slf4j.Logger;
12 | import org.slf4j.LoggerFactory;
13 |
14 | import java.util.ArrayList;
15 | import java.util.Collection;
16 | import java.util.List;
17 | import java.util.Map;
18 | import java.util.Optional;
19 |
20 | import static java.util.Objects.requireNonNull;
21 |
22 | @JsonTypeName("basic")
23 | public class BasicKafkaProducerFactory extends KafkaProducerFactory {
24 | private static final Logger log = LoggerFactory.getLogger(BasicKafkaProducerFactory.class);
25 |
26 | @Override
27 | public Producer build(final LifecycleEnvironment lifecycle,
28 | final HealthCheckRegistry healthChecks,
29 | final Collection topics,
30 | @Nullable final Tracing tracing,
31 | final Map configOverrides) {
32 | final Map config = createBaseKafkaConfigurations();
33 |
34 | config.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, String.join(",", bootstrapServers));
35 |
36 | if (!requireNonNull(configOverrides).isEmpty()) {
37 | config.putAll(configOverrides);
38 | }
39 |
40 | final Optional kafkaTracing = Optional.ofNullable(getTracingFactory())
41 | .flatMap(tracingFactory -> tracingFactory.build(tracing));
42 |
43 | final Producer rawProducer = buildProducer(config);
44 |
45 | final Producer producer = kafkaTracing.map(kTracing -> kTracing.producer(rawProducer))
46 | .orElse(rawProducer);
47 |
48 | manageProducer(lifecycle, producer);
49 |
50 | registerProducerHealthCheck(healthChecks, producer, topics);
51 |
52 | return producer;
53 | }
54 |
55 | @Override
56 | public boolean isValidConfiguration() {
57 | final List errors = new ArrayList<>();
58 |
59 | if (bootstrapServers != null && bootstrapServers.isEmpty()) {
60 | errors.add("bootstrapServers cannot be empty if basic type is configured");
61 | }
62 |
63 | if (!errors.isEmpty()) {
64 | final String errorMessage = String.join(System.lineSeparator(), errors);
65 | log.error("Failed to construct a basic Kafka cluster connection, due to the following errors:{}{}", System.lineSeparator(),
66 | errorMessage);
67 | return false;
68 | }
69 |
70 | return true;
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/DropwizardKafkaUtils.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka;
2 |
3 | import org.slf4j.Logger;
4 | import org.slf4j.LoggerFactory;
5 |
6 | public class DropwizardKafkaUtils {
7 | private static final Logger log = LoggerFactory.getLogger(DropwizardKafkaUtils.class);
8 |
9 | private DropwizardKafkaUtils() {
10 | // should not instantiate
11 | }
12 |
13 | public static void validateStringIsValidSubClass(final String classString, final Class> parentClass) {
14 | final Class> actualClass;
15 | try {
16 | actualClass = Class.forName(classString);
17 | } catch (final ClassNotFoundException e) {
18 | log.error("No valid class found for string={}", classString);
19 | throw new RuntimeException(e);
20 | }
21 |
22 | if (!parentClass.isAssignableFrom(actualClass)) {
23 | log.error("class={} is not a subclass of parentClass={}", actualClass, parentClass);
24 | throw new IllegalStateException(String.format("Class for name=%s is not a child of parentClass=%s", actualClass, parentClass));
25 | }
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/KafkaAdminClientBundle.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka;
2 |
3 | import brave.Tracing;
4 | import io.dropwizard.core.Configuration;
5 | import io.dropwizard.core.ConfiguredBundle;
6 | import io.dropwizard.core.setup.Bootstrap;
7 | import io.dropwizard.core.setup.Environment;
8 | import org.apache.kafka.clients.admin.AdminClient;
9 | import org.apache.kafka.clients.admin.NewTopic;
10 | import org.checkerframework.checker.nullness.qual.Nullable;
11 |
12 | import java.util.Collection;
13 | import java.util.Collections;
14 | import java.util.Map;
15 | import java.util.Objects;
16 |
17 | import static java.util.Objects.requireNonNull;
18 |
19 | public abstract class KafkaAdminClientBundle implements ConfiguredBundle {
20 | private final Collection topics;
21 | private final Map configOverrides;
22 |
23 | @Nullable
24 | private AdminClient adminClient;
25 |
26 | protected KafkaAdminClientBundle(final Collection topics) {
27 | this(topics, Collections.emptyMap());
28 | }
29 |
30 | public KafkaAdminClientBundle(final Map configOverrides) {
31 | this(Collections.emptyList(), configOverrides);
32 | }
33 |
34 | protected KafkaAdminClientBundle(final Collection topics, final Map configOverrides) {
35 | this.topics = Objects.requireNonNull(topics);
36 | this.configOverrides = Objects.requireNonNull(configOverrides);
37 | }
38 |
39 | @Override
40 | public void initialize(final Bootstrap> bootstrap) {
41 | // do nothing
42 | }
43 |
44 | @Override
45 | public void run(final T configuration, final Environment environment) throws Exception {
46 | final KafkaAdminClientFactory kafkaAdminClientFactory = requireNonNull(getKafkaAdminClientFactory(configuration));
47 |
48 | final Tracing tracing = Tracing.current();
49 |
50 | this.adminClient = kafkaAdminClientFactory.build(environment.lifecycle(), environment.healthChecks(), configOverrides, topics);
51 | }
52 |
53 | public abstract KafkaAdminClientFactory getKafkaAdminClientFactory(final T configuration);
54 |
55 | public AdminClient getAdminClient() {
56 | return requireNonNull(adminClient);
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/KafkaAdminClientFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka;
2 |
3 | import com.codahale.metrics.health.HealthCheckRegistry;
4 | import com.fasterxml.jackson.annotation.JsonProperty;
5 | import com.fasterxml.jackson.annotation.JsonTypeInfo;
6 | import io.dropwizard.kafka.health.KafkaAdminHealthCheck;
7 | import io.dropwizard.kafka.managed.KafkaAdminClientManager;
8 | import io.dropwizard.kafka.metrics.DropwizardMetricsReporter;
9 | import io.dropwizard.kafka.security.SecurityFactory;
10 | import io.dropwizard.lifecycle.setup.LifecycleEnvironment;
11 | import io.dropwizard.util.Duration;
12 | import org.apache.kafka.clients.ClientDnsLookup;
13 | import org.apache.kafka.clients.admin.AdminClient;
14 | import org.apache.kafka.clients.admin.NewTopic;
15 | import org.apache.kafka.common.metrics.MetricsReporter;
16 | import org.apache.kafka.common.metrics.Sensor;
17 |
18 | import java.util.Collection;
19 | import java.util.Collections;
20 | import java.util.List;
21 | import java.util.Map;
22 | import java.util.Set;
23 | import java.util.stream.Collectors;
24 |
25 | import jakarta.validation.Valid;
26 | import jakarta.validation.constraints.Min;
27 | import jakarta.validation.constraints.NotNull;
28 |
29 | @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type")
30 | public abstract class KafkaAdminClientFactory {
31 | @NotNull
32 | @JsonProperty
33 | protected String name;
34 |
35 | @NotNull
36 | @JsonProperty
37 | protected String clientId;
38 |
39 | @NotNull
40 | @JsonProperty
41 | protected Set bootstrapServers = Collections.emptySet();
42 |
43 | @NotNull
44 | @JsonProperty
45 | protected ClientDnsLookup clientDnsLookup = ClientDnsLookup.USE_ALL_DNS_IPS;
46 |
47 | @NotNull
48 | @JsonProperty
49 | protected Duration reconnectBackoff = Duration.milliseconds(50);
50 |
51 | @NotNull
52 | @JsonProperty
53 | protected Duration reconnectBackoffMax = Duration.seconds(1);
54 |
55 | @Min(0)
56 | @NotNull
57 | @JsonProperty
58 | protected Integer retries = 5;
59 |
60 | @NotNull
61 | @JsonProperty
62 | protected Duration retryBackoff = Duration.milliseconds(100);
63 |
64 | @NotNull
65 | @JsonProperty
66 | protected Duration connectionMaxIdle = Duration.minutes(5);
67 |
68 | @NotNull
69 | @JsonProperty
70 | protected Duration requestTimeout = Duration.minutes(2);
71 |
72 | @NotNull
73 | @JsonProperty
74 | protected Duration metadataMaxAge = Duration.minutes(5);
75 |
76 | @Min(-1)
77 | @NotNull
78 | @JsonProperty
79 | protected Integer sendBufferBytes = 131072; // default taken from AdminClientConfig
80 |
81 | @Min(-1)
82 | @NotNull
83 | @JsonProperty
84 | protected Integer receiveBufferBytes = 65536; // default taken from AdminClientConfig
85 |
86 | @NotNull
87 | @JsonProperty
88 | protected List> metricsReporters = Collections.singletonList(DropwizardMetricsReporter.class);
89 |
90 | @Min(1)
91 | @NotNull
92 | @JsonProperty
93 | protected Integer metricsSamples = 2; // default in AdminClientConfig
94 |
95 | @NotNull
96 | @JsonProperty
97 | protected Duration metricsSampleWindow = Duration.seconds(30);
98 |
99 | @NotNull
100 | @JsonProperty
101 | protected Sensor.RecordingLevel metricsRecordingLevel = Sensor.RecordingLevel.INFO;
102 |
103 | @Valid
104 | @JsonProperty
105 | protected SecurityFactory security;
106 |
107 | @JsonProperty
108 | protected boolean topicCreationEnabled = false;
109 |
110 | @Valid
111 | @NotNull
112 | @JsonProperty
113 | protected Set topics = Collections.emptySet();
114 |
115 | public String getName() {
116 | return name;
117 | }
118 |
119 | public void setName(final String name) {
120 | this.name = name;
121 | }
122 |
123 | public String getClientId() {
124 | return clientId;
125 | }
126 |
127 | public void setClientId(final String clientId) {
128 | this.clientId = clientId;
129 | }
130 |
131 | public Set getBootstrapServers() {
132 | return bootstrapServers;
133 | }
134 |
135 | public void setBootstrapServers(final Set bootstrapServers) {
136 | this.bootstrapServers = bootstrapServers;
137 | }
138 |
139 | public ClientDnsLookup getClientDnsLookup() {
140 | return clientDnsLookup;
141 | }
142 |
143 | public void setClientDnsLookup(final ClientDnsLookup clientDnsLookup) {
144 | this.clientDnsLookup = clientDnsLookup;
145 | }
146 |
147 | public Duration getReconnectBackoff() {
148 | return reconnectBackoff;
149 | }
150 |
151 | public void setReconnectBackoff(final Duration reconnectBackoff) {
152 | this.reconnectBackoff = reconnectBackoff;
153 | }
154 |
155 | public Duration getReconnectBackoffMax() {
156 | return reconnectBackoffMax;
157 | }
158 |
159 | public void setReconnectBackoffMax(final Duration reconnectBackoffMax) {
160 | this.reconnectBackoffMax = reconnectBackoffMax;
161 | }
162 |
163 | public Integer getRetries() {
164 | return retries;
165 | }
166 |
167 | public void setRetries(final Integer retries) {
168 | this.retries = retries;
169 | }
170 |
171 | public Duration getRetryBackoff() {
172 | return retryBackoff;
173 | }
174 |
175 | public void setRetryBackoff(final Duration retryBackoff) {
176 | this.retryBackoff = retryBackoff;
177 | }
178 |
179 | public Duration getConnectionMaxIdle() {
180 | return connectionMaxIdle;
181 | }
182 |
183 | public void setConnectionMaxIdle(final Duration connectionMaxIdle) {
184 | this.connectionMaxIdle = connectionMaxIdle;
185 | }
186 |
187 | public Duration getRequestTimeout() {
188 | return requestTimeout;
189 | }
190 |
191 | public void setRequestTimeout(final Duration requestTimeout) {
192 | this.requestTimeout = requestTimeout;
193 | }
194 |
195 | public Duration getMetadataMaxAge() {
196 | return metadataMaxAge;
197 | }
198 |
199 | public void setMetadataMaxAge(final Duration metadataMaxAge) {
200 | this.metadataMaxAge = metadataMaxAge;
201 | }
202 |
203 | public Integer getSendBufferBytes() {
204 | return sendBufferBytes;
205 | }
206 |
207 | public void setSendBufferBytes(final Integer sendBufferBytes) {
208 | this.sendBufferBytes = sendBufferBytes;
209 | }
210 |
211 | public Integer getReceiveBufferBytes() {
212 | return receiveBufferBytes;
213 | }
214 |
215 | public void setReceiveBufferBytes(final Integer receiveBufferBytes) {
216 | this.receiveBufferBytes = receiveBufferBytes;
217 | }
218 |
219 | public List> getMetricsReporters() {
220 | return metricsReporters;
221 | }
222 |
223 | public void setMetricsReporters(final List> metricsReporters) {
224 | this.metricsReporters = metricsReporters;
225 | }
226 |
227 | public Integer getMetricsSamples() {
228 | return metricsSamples;
229 | }
230 |
231 | public void setMetricsSamples(final Integer metricsSamples) {
232 | this.metricsSamples = metricsSamples;
233 | }
234 |
235 | public Duration getMetricsSampleWindow() {
236 | return metricsSampleWindow;
237 | }
238 |
239 | public void setMetricsSampleWindow(final Duration metricsSampleWindow) {
240 | this.metricsSampleWindow = metricsSampleWindow;
241 | }
242 |
243 | public Sensor.RecordingLevel getMetricsRecordingLevel() {
244 | return metricsRecordingLevel;
245 | }
246 |
247 | public void setMetricsRecordingLevel(final Sensor.RecordingLevel metricsRecordingLevel) {
248 | this.metricsRecordingLevel = metricsRecordingLevel;
249 | }
250 |
251 | public SecurityFactory getSecurity() {
252 | return security;
253 | }
254 |
255 | public void setSecurity(final SecurityFactory security) {
256 | this.security = security;
257 | }
258 |
259 | public Set getTopics() {
260 | return topics;
261 | }
262 |
263 | public Boolean getTopicCreationEnabled() {
264 | return topicCreationEnabled;
265 | }
266 |
267 | public void setTopicCreationEnabled(boolean topicCreationEnabled) {
268 | this.topicCreationEnabled = topicCreationEnabled;
269 | }
270 |
271 | protected AdminClient buildAdminClient(final Map config) {
272 | return AdminClient.create(config);
273 | }
274 |
275 | protected void manageAdminClient(final LifecycleEnvironment lifecycle, final AdminClient adminClient) {
276 | List newTopics = Collections.emptyList();
277 | if (topicCreationEnabled) {
278 | newTopics = topics.stream()
279 | .map(KafkaTopicFactory::asNewTopic)
280 | .collect(Collectors.toList());
281 | }
282 | manageAdminClient(lifecycle, adminClient, newTopics);
283 | }
284 |
285 | protected void manageAdminClient(final LifecycleEnvironment lifecycle, final AdminClient adminClient,
286 | final Collection topics) {
287 | lifecycle.manage(new KafkaAdminClientManager(adminClient, name, topics));
288 | }
289 |
290 | protected void registerHealthCheck(final HealthCheckRegistry healthChecks, final AdminClient adminClient) {
291 | healthChecks.register(name, new KafkaAdminHealthCheck(adminClient, name));
292 | }
293 |
294 | public abstract AdminClient build(LifecycleEnvironment lifecycle, HealthCheckRegistry healthChecks,
295 | Map configOverrides);
296 |
297 | public abstract AdminClient build(LifecycleEnvironment lifecycle, HealthCheckRegistry healthChecks, Map configOverrides,
298 | Collection topics);
299 | }
300 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/KafkaClientFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka;
2 |
3 | import com.fasterxml.jackson.annotation.JsonIgnore;
4 | import com.fasterxml.jackson.annotation.JsonProperty;
5 | import io.dropwizard.kafka.security.SecurityFactory;
6 | import io.dropwizard.kafka.tracing.TracingFactory;
7 | import io.dropwizard.validation.ValidationMethod;
8 |
9 | import java.util.Optional;
10 | import java.util.Set;
11 |
12 | import jakarta.validation.Valid;
13 | import jakarta.validation.constraints.NotEmpty;
14 |
15 | public abstract class KafkaClientFactory {
16 | @NotEmpty
17 | @JsonProperty
18 | protected String name;
19 |
20 | @Valid
21 | @JsonProperty
22 | protected Optional security = Optional.empty();
23 |
24 | @JsonProperty
25 | protected boolean metricsEnabled = true;
26 |
27 | @JsonProperty
28 | protected boolean includeTaggedMetrics = false;
29 |
30 | @JsonProperty
31 | protected Set bootstrapServers;
32 |
33 | @JsonProperty
34 | protected Optional clientDNSLookup = Optional.empty();
35 |
36 | @JsonProperty
37 | protected Optional clientId = Optional.empty();
38 |
39 | @Valid
40 | @JsonProperty
41 | private TracingFactory tracingFactory;
42 |
43 | public String getName() {
44 | return name;
45 | }
46 |
47 | public void setName(final String name) {
48 | this.name = name;
49 | }
50 |
51 | public Optional getSecurity() {
52 | return security;
53 | }
54 |
55 | public void setSecurity(final Optional security) {
56 | this.security = security;
57 | }
58 |
59 | public boolean isMetricsEnabled() {
60 | return metricsEnabled;
61 | }
62 |
63 | public void setMetricsEnabled(final boolean metricsEnabled) {
64 | this.metricsEnabled = metricsEnabled;
65 | }
66 |
67 | public boolean isIncludeTaggedMetrics() {
68 | return includeTaggedMetrics;
69 | }
70 |
71 | public void setIncludeTaggedMetrics(final boolean includeTaggedMetrics) {
72 | this.includeTaggedMetrics = includeTaggedMetrics;
73 | }
74 |
75 | public Set getBootstrapServers() {
76 | return bootstrapServers;
77 | }
78 |
79 | public Optional getClientDNSLookup() {
80 | return clientDNSLookup;
81 | }
82 |
83 | public void setClientDNSLookup(Optional clientDNSLookup) {
84 | this.clientDNSLookup = clientDNSLookup;
85 | }
86 |
87 | public Optional getClientId() {
88 | return clientId;
89 | }
90 |
91 | public void setClientId(Optional clientId) {
92 | this.clientId = clientId;
93 | }
94 |
95 | public void setBootstrapServers(final Set bootstrapServers) {
96 | this.bootstrapServers = bootstrapServers;
97 | }
98 |
99 | public TracingFactory getTracingFactory() {
100 | return tracingFactory;
101 | }
102 |
103 | public void setTracingFactory(final TracingFactory tracingFactory) {
104 | this.tracingFactory = tracingFactory;
105 | }
106 |
107 | @ValidationMethod(message = "Some Kafka configurations were invalid")
108 | @JsonIgnore
109 | public abstract boolean isValidConfiguration();
110 | }
111 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/KafkaConsumerBundle.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka;
2 |
3 | import brave.Tracing;
4 | import io.dropwizard.core.Configuration;
5 | import io.dropwizard.core.ConfiguredBundle;
6 | import io.dropwizard.core.setup.Bootstrap;
7 | import io.dropwizard.core.setup.Environment;
8 | import org.apache.kafka.clients.consumer.Consumer;
9 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
10 | import org.checkerframework.checker.nullness.qual.Nullable;
11 |
12 | import java.util.Collection;
13 | import java.util.Collections;
14 | import java.util.Map;
15 | import java.util.Objects;
16 |
17 | import static java.util.Objects.requireNonNull;
18 |
19 | public abstract class KafkaConsumerBundle implements ConfiguredBundle {
20 | private final Collection topics;
21 | private final ConsumerRebalanceListener consumerRebalanceListener;
22 | private final Map configOverrides;
23 |
24 | @Nullable
25 | private Consumer consumer;
26 |
27 | protected KafkaConsumerBundle(final Collection topics,
28 | final ConsumerRebalanceListener consumerRebalanceListener) {
29 | this(topics, consumerRebalanceListener, Collections.emptyMap());
30 | }
31 |
32 | protected KafkaConsumerBundle(final Collection topics,
33 | final ConsumerRebalanceListener consumerRebalanceListener,
34 | final Map configOverrides) {
35 | this.topics = Objects.requireNonNull(topics);
36 | this.consumerRebalanceListener = Objects.requireNonNull(consumerRebalanceListener);
37 | this.configOverrides = Objects.requireNonNull(configOverrides);
38 | }
39 |
40 | @Override
41 | public void initialize(final Bootstrap> bootstrap) {
42 | // do nothing
43 | }
44 |
45 | @Override
46 | public void run(final T configuration, final Environment environment) throws Exception {
47 | final KafkaConsumerFactory kafkaConsumerFactory = requireNonNull(getKafkaConsumerFactory(configuration));
48 |
49 | final Tracing tracing = Tracing.current();
50 |
51 | this.consumer = kafkaConsumerFactory.build(environment.lifecycle(), environment.healthChecks(), tracing,
52 | consumerRebalanceListener, configOverrides);
53 | }
54 |
55 | public abstract KafkaConsumerFactory getKafkaConsumerFactory(T configuration);
56 |
57 | public Consumer getConsumer() {
58 | return requireNonNull(consumer);
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/KafkaConsumerFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka;
2 |
3 | import brave.Tracing;
4 | import com.codahale.metrics.health.HealthCheckRegistry;
5 | import com.fasterxml.jackson.annotation.JsonProperty;
6 | import com.fasterxml.jackson.annotation.JsonTypeInfo;
7 | import io.dropwizard.jackson.Discoverable;
8 | import io.dropwizard.kafka.deserializer.DeserializerFactory;
9 | import io.dropwizard.kafka.managed.KafkaConsumerManager;
10 | import io.dropwizard.kafka.metrics.DropwizardMetricsReporter;
11 | import io.dropwizard.kafka.security.SecurityFactory;
12 | import io.dropwizard.lifecycle.setup.LifecycleEnvironment;
13 | import io.dropwizard.util.Duration;
14 | import org.apache.kafka.clients.CommonClientConfigs;
15 | import org.apache.kafka.clients.consumer.Consumer;
16 | import org.apache.kafka.clients.consumer.ConsumerConfig;
17 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
18 | import org.apache.kafka.clients.consumer.KafkaConsumer;
19 | import org.checkerframework.checker.nullness.qual.Nullable;
20 |
21 | import java.util.Collections;
22 | import java.util.HashMap;
23 | import java.util.Map;
24 |
25 | import jakarta.validation.Valid;
26 | import jakarta.validation.constraints.Min;
27 | import jakarta.validation.constraints.NotEmpty;
28 | import jakarta.validation.constraints.NotNull;
29 |
30 | @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type")
31 | public abstract class KafkaConsumerFactory extends KafkaClientFactory implements Discoverable {
32 | @NotEmpty
33 | @JsonProperty
34 | protected String consumerGroupId;
35 |
36 | @Valid
37 | @NotNull
38 | @JsonProperty
39 | protected DeserializerFactory keyDeserializer;
40 |
41 | @Valid
42 | @NotNull
43 | @JsonProperty
44 | protected DeserializerFactory valueDeserializer;
45 |
46 | @JsonProperty
47 | protected boolean autoCommitEnabled = true;
48 |
49 | @JsonProperty
50 | protected Duration autoCommitInterval = Duration.seconds(5);
51 |
52 | @Min(-1)
53 | @JsonProperty
54 | protected int sendBufferBytes = -1;
55 |
56 | @Min(-1)
57 | @JsonProperty
58 | protected int receiveBufferBytes = -1;
59 |
60 | @Min(1)
61 | @JsonProperty
62 | protected int maxPollRecords = 500;
63 |
64 | @NotNull
65 | @JsonProperty
66 | protected Duration maxPollInterval = Duration.minutes(5);
67 |
68 | public String getConsumerGroupId() {
69 | return consumerGroupId;
70 | }
71 |
72 | public void setConsumerGroupId(final String consumerGroupId) {
73 | this.consumerGroupId = consumerGroupId;
74 | }
75 |
76 | public DeserializerFactory getKeyDeserializer() {
77 | return keyDeserializer;
78 | }
79 |
80 | public void setKeyDeserializer(final DeserializerFactory keyDeserializer) {
81 | this.keyDeserializer = keyDeserializer;
82 | }
83 |
84 | public DeserializerFactory getValueDeserializer() {
85 | return valueDeserializer;
86 | }
87 |
88 | public void setValueDeserializer(final DeserializerFactory valueDeserializer) {
89 | this.valueDeserializer = valueDeserializer;
90 | }
91 |
92 | public boolean isAutoCommitEnabled() {
93 | return autoCommitEnabled;
94 | }
95 |
96 | public void setAutoCommitEnabled(final boolean autoCommitEnabled) {
97 | this.autoCommitEnabled = autoCommitEnabled;
98 | }
99 |
100 | public Duration getAutoCommitInterval() {
101 | return autoCommitInterval;
102 | }
103 |
104 | public void setAutoCommitInterval(final Duration autoCommitInterval) {
105 | this.autoCommitInterval = autoCommitInterval;
106 | }
107 |
108 | public int getSendBufferBytes() {
109 | return sendBufferBytes;
110 | }
111 |
112 | public void setSendBufferBytes(final int sendBufferBytes) {
113 | this.sendBufferBytes = sendBufferBytes;
114 | }
115 |
116 | public int getReceiveBufferBytes() {
117 | return receiveBufferBytes;
118 | }
119 |
120 | public void setReceiveBufferBytes(final int receiveBufferBytes) {
121 | this.receiveBufferBytes = receiveBufferBytes;
122 | }
123 |
124 | public int getMaxPollRecords() {
125 | return maxPollRecords;
126 | }
127 |
128 | public void setMaxPollRecords(final int maxPollRecords) {
129 | this.maxPollRecords = maxPollRecords;
130 | }
131 |
132 | public Duration getMaxPollInterval() {
133 | return maxPollInterval;
134 | }
135 |
136 | public void setMaxPollInterval(final Duration maxPollInterval) {
137 | this.maxPollInterval = maxPollInterval;
138 | }
139 |
140 | protected Map createBaseKafkaConfigurations() {
141 | final Map config = new HashMap<>();
142 |
143 | config.putAll(keyDeserializer.build(true));
144 | config.putAll(valueDeserializer.build(false));
145 |
146 | config.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommitEnabled);
147 | if (autoCommitEnabled && autoCommitInterval != null) {
148 | config.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, (int) autoCommitInterval.toMilliseconds());
149 | }
150 |
151 | clientDNSLookup.ifPresent(clientIdValue -> config.put(CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG, clientIdValue));
152 | clientId.ifPresent(clientIdValue -> config.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientIdValue));
153 |
154 | config.put(ConsumerConfig.SEND_BUFFER_CONFIG, sendBufferBytes);
155 | config.put(ConsumerConfig.RECEIVE_BUFFER_CONFIG, receiveBufferBytes);
156 | config.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
157 | config.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, (int) maxPollInterval.toMilliseconds());
158 |
159 | security.filter(SecurityFactory::isEnabled)
160 | .ifPresent(securityFactory -> config.putAll(securityFactory.build()));
161 |
162 | if (metricsEnabled) {
163 | config.put(DropwizardMetricsReporter.SHOULD_INCLUDE_TAGS_CONFIG, includeTaggedMetrics);
164 | config.put(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, DropwizardMetricsReporter.class.getName());
165 | config.put(DropwizardMetricsReporter.METRICS_NAME_CONFIG, name);
166 | }
167 |
168 | return config;
169 | }
170 |
171 | protected Consumer buildConsumer(final Map config) {
172 | return new KafkaConsumer<>(config);
173 | }
174 |
175 | protected void registerHealthCheck(final HealthCheckRegistry healthChecks, final Consumer consumer) {
176 | // no consumer health checks, due to kafka client limitations. The admin client health check is the better option in general
177 | }
178 |
179 | protected void manageConsumer(final LifecycleEnvironment lifecycle, final Consumer consumer) {
180 | lifecycle.manage(new KafkaConsumerManager(consumer));
181 | }
182 |
183 | public Consumer build(final LifecycleEnvironment lifecycle,
184 | final HealthCheckRegistry healthChecks,
185 | @Nullable final Tracing tracing,
186 | @Nullable final ConsumerRebalanceListener rebalanceListener) {
187 | return build(lifecycle, healthChecks, tracing, rebalanceListener, Collections.emptyMap());
188 | }
189 |
190 | public abstract Consumer build(final LifecycleEnvironment lifecycle,
191 | final HealthCheckRegistry healthChecks,
192 | @Nullable Tracing tracing,
193 | @Nullable ConsumerRebalanceListener rebalanceListener,
194 | Map configOverrides);
195 | }
196 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/KafkaProducerBundle.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka;
2 |
3 | import brave.Tracing;
4 | import io.dropwizard.core.Configuration;
5 | import io.dropwizard.core.ConfiguredBundle;
6 | import io.dropwizard.core.setup.Bootstrap;
7 | import io.dropwizard.core.setup.Environment;
8 | import org.apache.kafka.clients.producer.Producer;
9 | import org.checkerframework.checker.nullness.qual.Nullable;
10 |
11 | import java.util.Collection;
12 | import java.util.Collections;
13 | import java.util.Map;
14 |
15 | import static java.util.Objects.requireNonNull;
16 |
17 | public abstract class KafkaProducerBundle implements ConfiguredBundle {
18 | private final Collection topics;
19 | private final Map configOverrides;
20 |
21 | @Nullable
22 | private Producer producer;
23 |
24 | public KafkaProducerBundle(final Collection topics) {
25 | this(topics, Collections.emptyMap());
26 | }
27 |
28 | public KafkaProducerBundle(final Collection topics,
29 | final Map configOverrides) {
30 | this.topics = requireNonNull(topics);
31 | this.configOverrides = requireNonNull(configOverrides);
32 | }
33 |
34 | @Override
35 | public void initialize(final Bootstrap> bootstrap) {
36 | // do nothing
37 | }
38 |
39 | @Override
40 | public void run(final T configuration, final Environment environment) throws Exception {
41 | final KafkaProducerFactory kafkaProducerFactory = requireNonNull(getKafkaProducerFactory(configuration));
42 |
43 | final Tracing tracing = Tracing.current();
44 |
45 | this.producer = kafkaProducerFactory.build(environment.lifecycle(), environment.healthChecks(), topics, tracing, configOverrides);
46 | }
47 |
48 | public abstract KafkaProducerFactory getKafkaProducerFactory(T configuration);
49 |
50 | public Producer getProducer() {
51 | return requireNonNull(producer);
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/KafkaProducerFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka;
2 |
3 | import brave.Tracing;
4 | import com.codahale.metrics.health.HealthCheckRegistry;
5 | import com.fasterxml.jackson.annotation.JsonProperty;
6 | import com.fasterxml.jackson.annotation.JsonTypeInfo;
7 | import io.dropwizard.jackson.Discoverable;
8 | import io.dropwizard.kafka.health.KafkaProducerHealthCheck;
9 | import io.dropwizard.kafka.managed.KafkaProducerManager;
10 | import io.dropwizard.kafka.metrics.DropwizardMetricsReporter;
11 | import io.dropwizard.kafka.security.SecurityFactory;
12 | import io.dropwizard.kafka.serializer.SerializerFactory;
13 | import io.dropwizard.lifecycle.setup.LifecycleEnvironment;
14 | import io.dropwizard.util.Duration;
15 | import io.dropwizard.validation.MinDuration;
16 | import org.apache.kafka.clients.CommonClientConfigs;
17 | import org.apache.kafka.clients.producer.KafkaProducer;
18 | import org.apache.kafka.clients.producer.Producer;
19 | import org.apache.kafka.clients.producer.ProducerConfig;
20 | import org.apache.kafka.common.record.CompressionType;
21 | import org.checkerframework.checker.nullness.qual.Nullable;
22 |
23 | import jakarta.validation.Valid;
24 | import jakarta.validation.constraints.Min;
25 | import jakarta.validation.constraints.NotEmpty;
26 | import jakarta.validation.constraints.NotNull;
27 | import java.util.Collection;
28 | import java.util.Collections;
29 | import java.util.HashMap;
30 | import java.util.Map;
31 | import java.util.Optional;
32 |
33 | @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type")
34 | public abstract class KafkaProducerFactory extends KafkaClientFactory implements Discoverable {
35 | @Valid
36 | @NotNull
37 | @JsonProperty
38 | protected SerializerFactory keySerializer;
39 |
40 | @Valid
41 | @NotNull
42 | @JsonProperty
43 | protected SerializerFactory valueSerializer;
44 |
45 | @JsonProperty
46 | protected Optional acks = Optional.empty();
47 |
48 | @JsonProperty
49 | protected Optional retries = Optional.empty();
50 |
51 | @JsonProperty
52 | protected Optional maxInFlightRequestsPerConnection = Optional.empty();
53 |
54 | @JsonProperty
55 | protected Optional maxPollBlockTime = Optional.empty();
56 |
57 | @NotEmpty
58 | @JsonProperty
59 | protected String compressionType = CompressionType.GZIP.name;
60 |
61 | @Min(-1)
62 | @JsonProperty
63 | protected int sendBufferBytes = -1;
64 |
65 | @Min(-1)
66 | @JsonProperty
67 | protected int receiveBufferBytes = -1;
68 |
69 | @Min(0L)
70 | @JsonProperty
71 | protected long bufferMemory = 32 * 1024 * 1024L;
72 |
73 | @Min(0)
74 | @JsonProperty
75 | protected int batchSize = 16384;
76 |
77 | @MinDuration(0)
78 | @JsonProperty
79 | protected Duration linger = Duration.milliseconds(0);
80 |
81 | @MinDuration(0)
82 | @JsonProperty
83 | protected Duration requestTimeout = Duration.seconds(30);
84 |
85 | @JsonProperty
86 | protected boolean enableIdempotence = false;
87 |
88 | @JsonProperty
89 | protected Optional transactionalId = Optional.empty();
90 |
91 | public SerializerFactory getKeySerializer() {
92 | return keySerializer;
93 | }
94 |
95 | public void setKeySerializer(final SerializerFactory keySerializer) {
96 | this.keySerializer = keySerializer;
97 | }
98 |
99 | public SerializerFactory getValueSerializer() {
100 | return valueSerializer;
101 | }
102 |
103 | public void setValueSerializer(final SerializerFactory valueSerializer) {
104 | this.valueSerializer = valueSerializer;
105 | }
106 |
107 | public Optional getAcks() {
108 | return acks;
109 | }
110 |
111 | public void setAcks(final Optional acks) {
112 | this.acks = acks;
113 | }
114 |
115 | public Optional getRetries() {
116 | return retries;
117 | }
118 |
119 | public void setRetries(final Optional retries) {
120 | this.retries = retries;
121 | }
122 |
123 | public Optional getMaxInFlightRequestsPerConnection() {
124 | return maxInFlightRequestsPerConnection;
125 | }
126 |
127 | public void setMaxInFlightRequestsPerConnection(final Optional maxInFlightRequestsPerConnection) {
128 | this.maxInFlightRequestsPerConnection = maxInFlightRequestsPerConnection;
129 | }
130 |
131 | public Optional getMaxPollBlockTime() {
132 | return maxPollBlockTime;
133 | }
134 |
135 | public void setMaxPollBlockTime(final Optional maxPollBlockTime) {
136 | this.maxPollBlockTime = maxPollBlockTime;
137 | }
138 |
139 | public String getCompressionType() {
140 | return compressionType;
141 | }
142 |
143 | public void setCompressionType(final String compressionType) {
144 | this.compressionType = compressionType;
145 | }
146 |
147 | public int getSendBufferBytes() {
148 | return sendBufferBytes;
149 | }
150 |
151 | public void setSendBufferBytes(final int sendBufferBytes) {
152 | this.sendBufferBytes = sendBufferBytes;
153 | }
154 |
155 | public int getReceiveBufferBytes() {
156 | return receiveBufferBytes;
157 | }
158 |
159 | public void setReceiveBufferBytes(final int receiveBufferBytes) {
160 | this.receiveBufferBytes = receiveBufferBytes;
161 | }
162 |
163 | public long getBufferMemory() {
164 | return bufferMemory;
165 | }
166 |
167 | public void setBufferMemory(final long bufferMemory) {
168 | this.bufferMemory = bufferMemory;
169 | }
170 |
171 | public int getBatchSize() {
172 | return batchSize;
173 | }
174 |
175 | public void setBatchSize(final int batchSize) {
176 | this.batchSize = batchSize;
177 | }
178 |
179 | public Duration getLinger() {
180 | return linger;
181 | }
182 |
183 | public void setLinger(final Duration linger) {
184 | this.linger = linger;
185 | }
186 |
187 | public Duration getRequestTimeout() {
188 | return requestTimeout;
189 | }
190 |
191 | public void setRequestTimeout(final Duration requestTimeout) {
192 | this.requestTimeout = requestTimeout;
193 | }
194 |
195 | public boolean isEnableIdempotence() {
196 | return enableIdempotence;
197 | }
198 |
199 | public void setEnableIdempotence(final boolean enableIdempotence) {
200 | this.enableIdempotence = enableIdempotence;
201 | }
202 |
203 | public Optional getTransactionalId() {
204 | return transactionalId;
205 | }
206 |
207 | public void setTransactionalId(final Optional transactionalId) {
208 | this.transactionalId = transactionalId;
209 | }
210 |
211 | protected Map createBaseKafkaConfigurations() {
212 | final Map config = new HashMap<>();
213 |
214 | config.putAll(keySerializer.build(true));
215 | config.putAll(valueSerializer.build(false));
216 |
217 | security.filter(SecurityFactory::isEnabled)
218 | .ifPresent(securityFactory -> config.putAll(securityFactory.build()));
219 | acks.ifPresent(acksValue -> config.put(ProducerConfig.ACKS_CONFIG, acksValue));
220 | retries.ifPresent(retriesValue -> config.put(ProducerConfig.RETRIES_CONFIG, retriesValue));
221 | maxInFlightRequestsPerConnection.ifPresent(maxInFlightRequestsPerConnectionValue ->
222 | config.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, maxInFlightRequestsPerConnectionValue));
223 | maxPollBlockTime.ifPresent(maxPollBlockTimeValue ->
224 | config.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, maxPollBlockTimeValue.toMilliseconds()));
225 | clientDNSLookup.ifPresent(clientIdValue -> config.put(CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG, clientIdValue));
226 | clientId.ifPresent(clientIdValue -> config.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientIdValue));
227 | transactionalId.ifPresent(transactionalIdValue -> config.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, transactionalIdValue));
228 |
229 | config.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, compressionType);
230 | config.put(ProducerConfig.SEND_BUFFER_CONFIG, sendBufferBytes);
231 | config.put(ProducerConfig.RECEIVE_BUFFER_CONFIG, receiveBufferBytes);
232 | config.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
233 | config.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
234 | config.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, enableIdempotence);
235 | config.put(ProducerConfig.LINGER_MS_CONFIG, (int) linger.toMilliseconds());
236 | config.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, (int) requestTimeout.toMilliseconds());
237 |
238 | if (metricsEnabled) {
239 | config.put(DropwizardMetricsReporter.SHOULD_INCLUDE_TAGS_CONFIG, Boolean.toString(includeTaggedMetrics));
240 | config.put(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, DropwizardMetricsReporter.class.getName());
241 | config.put(DropwizardMetricsReporter.METRICS_NAME_CONFIG, name);
242 | }
243 |
244 | return config;
245 | }
246 |
247 | protected void registerProducerHealthCheck(final HealthCheckRegistry healthChecks, final Producer producer,
248 | final Collection topics) {
249 | healthChecks.register(name, new KafkaProducerHealthCheck(producer, topics));
250 | }
251 |
252 | protected Producer buildProducer(final Map config) {
253 | return new KafkaProducer<>(config);
254 | }
255 |
256 | protected void manageProducer(final LifecycleEnvironment lifecycle, final Producer producer) {
257 | lifecycle.manage(new KafkaProducerManager(producer));
258 | }
259 |
260 | public Producer build(final LifecycleEnvironment lifecycle,
261 | final HealthCheckRegistry healthChecks,
262 | final Collection topics,
263 | @Nullable final Tracing tracing) {
264 | return build(lifecycle, healthChecks, topics, tracing, Collections.emptyMap());
265 | }
266 |
267 | public abstract Producer build(final LifecycleEnvironment lifecycle,
268 | final HealthCheckRegistry healthChecks,
269 | Collection topics,
270 | @Nullable Tracing tracing,
271 | Map configOverrides);
272 | }
273 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/KafkaTopicFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka;
2 |
3 | import com.fasterxml.jackson.annotation.JsonProperty;
4 | import org.apache.kafka.clients.admin.NewTopic;
5 |
6 | import java.util.Collections;
7 | import java.util.Map;
8 | import java.util.Objects;
9 |
10 | import jakarta.validation.constraints.Min;
11 | import jakarta.validation.constraints.NotEmpty;
12 | import jakarta.validation.constraints.NotNull;
13 |
14 | public class KafkaTopicFactory {
15 |
16 | @NotEmpty
17 | @JsonProperty
18 | private String name;
19 |
20 | @Min(1)
21 | @JsonProperty
22 | private int partitions;
23 |
24 | @Min(1)
25 | @JsonProperty
26 | private short replicationFactor;
27 |
28 | @NotNull
29 | @JsonProperty
30 | private Map configs = Collections.emptyMap();
31 |
32 | public String getName() {
33 | return name;
34 | }
35 |
36 | public void setName(String name) {
37 | this.name = name;
38 | }
39 |
40 | public int getPartitions() {
41 | return partitions;
42 | }
43 |
44 | public void setPartitions(int partitions) {
45 | this.partitions = partitions;
46 | }
47 |
48 | public short getReplicationFactor() {
49 | return replicationFactor;
50 | }
51 |
52 | public void setReplicationFactor(short replicationFactor) {
53 | this.replicationFactor = replicationFactor;
54 | }
55 |
56 | public Map getConfigs() {
57 | return configs;
58 | }
59 |
60 | public void setConfigs(Map configs) {
61 | this.configs = configs;
62 | }
63 |
64 | public NewTopic asNewTopic() {
65 | return new NewTopic(this.name, this.partitions, this.replicationFactor).configs(this.configs);
66 | }
67 |
68 | @Override
69 | public boolean equals(Object o) {
70 | if (this == o) return true;
71 | if (o == null || getClass() != o.getClass()) return false;
72 | KafkaTopicFactory that = (KafkaTopicFactory) o;
73 | return partitions == that.partitions &&
74 | replicationFactor == that.replicationFactor &&
75 | name.equals(that.name) &&
76 | Objects.equals(configs, that.configs);
77 | }
78 |
79 | @Override
80 | public int hashCode() {
81 | return Objects.hash(name, partitions, replicationFactor, configs);
82 | }
83 |
84 | }
85 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/MockKafkaConsumerFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka;
2 |
3 | import brave.Tracing;
4 | import com.codahale.metrics.health.HealthCheckRegistry;
5 | import com.fasterxml.jackson.annotation.JsonTypeName;
6 | import io.dropwizard.lifecycle.setup.LifecycleEnvironment;
7 | import org.apache.kafka.clients.consumer.Consumer;
8 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
9 | import org.apache.kafka.clients.consumer.MockConsumer;
10 | import org.apache.kafka.clients.consumer.OffsetResetStrategy;
11 | import org.checkerframework.checker.nullness.qual.Nullable;
12 |
13 | import java.util.Map;
14 |
15 | @JsonTypeName("mock")
16 | public class MockKafkaConsumerFactory extends KafkaConsumerFactory {
17 | @Override
18 | public Consumer build(LifecycleEnvironment lifecycle, HealthCheckRegistry healthChecks, @Nullable Tracing tracing, @Nullable ConsumerRebalanceListener rebalanceListener, Map configOverrides) {
19 | return new MockConsumer<>(OffsetResetStrategy.EARLIEST);
20 | }
21 |
22 | @Override
23 | public boolean isValidConfiguration() {
24 | return true;
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/MockKafkaProducerFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka;
2 |
3 | import brave.Tracing;
4 | import com.codahale.metrics.health.HealthCheckRegistry;
5 | import com.fasterxml.jackson.annotation.JsonTypeName;
6 | import io.dropwizard.lifecycle.setup.LifecycleEnvironment;
7 | import org.apache.kafka.clients.producer.MockProducer;
8 | import org.apache.kafka.clients.producer.Producer;
9 | import org.checkerframework.checker.nullness.qual.Nullable;
10 |
11 | import java.util.Collection;
12 | import java.util.Map;
13 |
14 | @JsonTypeName("mock")
15 | public class MockKafkaProducerFactory extends KafkaProducerFactory {
16 | @Override
17 | public Producer build(LifecycleEnvironment lifecycle, HealthCheckRegistry healthChecks, Collection topics, @Nullable Tracing tracing, Map configOverrides) {
18 | return new MockProducer<>();
19 | }
20 |
21 | @Override
22 | public boolean isValidConfiguration() {
23 | return true;
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/deserializer/ByteArrayDeserializerFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka.deserializer;
2 |
3 | import com.fasterxml.jackson.annotation.JsonTypeName;
4 | import org.apache.kafka.common.serialization.ByteArrayDeserializer;
5 | import org.apache.kafka.common.serialization.Deserializer;
6 |
7 | @JsonTypeName("byte-array")
8 | public class ByteArrayDeserializerFactory extends DeserializerFactory {
9 | @Override
10 | public Class extends Deserializer>> getDeserializerClass() {
11 | return ByteArrayDeserializer.class;
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/deserializer/ByteBufferDeserializerFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka.deserializer;
2 |
3 | import com.fasterxml.jackson.annotation.JsonTypeName;
4 | import org.apache.kafka.common.serialization.ByteBufferDeserializer;
5 | import org.apache.kafka.common.serialization.Deserializer;
6 |
7 | @JsonTypeName("byte-buffer")
8 | public class ByteBufferDeserializerFactory extends DeserializerFactory {
9 | @Override
10 | public Class extends Deserializer>> getDeserializerClass() {
11 | return ByteBufferDeserializer.class;
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/deserializer/BytesDeserializerFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka.deserializer;
2 |
3 | import com.fasterxml.jackson.annotation.JsonTypeName;
4 | import org.apache.kafka.common.serialization.BytesDeserializer;
5 | import org.apache.kafka.common.serialization.Deserializer;
6 |
7 | @JsonTypeName("bytes")
8 | public class BytesDeserializerFactory extends DeserializerFactory {
9 | @Override
10 | public Class extends Deserializer>> getDeserializerClass() {
11 | return BytesDeserializer.class;
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/deserializer/DeserializerFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka.deserializer;
2 |
3 | import com.fasterxml.jackson.annotation.JsonTypeInfo;
4 | import io.dropwizard.jackson.Discoverable;
5 | import org.apache.kafka.clients.consumer.ConsumerConfig;
6 | import org.apache.kafka.common.serialization.Deserializer;
7 |
8 | import java.util.Collections;
9 | import java.util.HashMap;
10 | import java.util.Map;
11 |
12 | @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type")
13 | public abstract class DeserializerFactory implements Discoverable {
14 | public abstract Class extends Deserializer>> getDeserializerClass();
15 |
16 | public Map build(final boolean isKey) {
17 | final String propertyName = isKey ?
18 | ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG : ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG;
19 |
20 | final Map config = new HashMap<>();
21 | config.put(propertyName, getDeserializerClass());
22 | return Collections.unmodifiableMap(config);
23 | }
24 |
25 | }
26 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/deserializer/DoubleDeserializerFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka.deserializer;
2 |
3 | import com.fasterxml.jackson.annotation.JsonTypeName;
4 | import org.apache.kafka.common.serialization.Deserializer;
5 | import org.apache.kafka.common.serialization.DoubleDeserializer;
6 |
7 | @JsonTypeName("double")
8 | public class DoubleDeserializerFactory extends DeserializerFactory {
9 | @Override
10 | public Class extends Deserializer>> getDeserializerClass() {
11 | return DoubleDeserializer.class;
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/deserializer/FloatDeserializerFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka.deserializer;
2 |
3 | import com.fasterxml.jackson.annotation.JsonTypeName;
4 | import org.apache.kafka.common.serialization.Deserializer;
5 | import org.apache.kafka.common.serialization.FloatDeserializer;
6 |
7 | @JsonTypeName("float")
8 | public class FloatDeserializerFactory extends DeserializerFactory {
9 | @Override
10 | public Class extends Deserializer>> getDeserializerClass() {
11 | return FloatDeserializer.class;
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/deserializer/IntegerDeserializerFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka.deserializer;
2 |
3 | import com.fasterxml.jackson.annotation.JsonTypeName;
4 | import org.apache.kafka.common.serialization.Deserializer;
5 | import org.apache.kafka.common.serialization.IntegerDeserializer;
6 |
7 | @JsonTypeName("integer")
8 | public class IntegerDeserializerFactory extends DeserializerFactory {
9 | @Override
10 | public Class extends Deserializer>> getDeserializerClass() {
11 | return IntegerDeserializer.class;
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/deserializer/LongDeserializerFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka.deserializer;
2 |
3 | import com.fasterxml.jackson.annotation.JsonTypeName;
4 | import org.apache.kafka.common.serialization.Deserializer;
5 | import org.apache.kafka.common.serialization.LongDeserializer;
6 |
7 | @JsonTypeName("long")
8 | public class LongDeserializerFactory extends DeserializerFactory {
9 | @Override
10 | public Class extends Deserializer>> getDeserializerClass() {
11 | return LongDeserializer.class;
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/deserializer/ShortDeserializerFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka.deserializer;
2 |
3 | import com.fasterxml.jackson.annotation.JsonTypeName;
4 | import org.apache.kafka.common.serialization.Deserializer;
5 | import org.apache.kafka.common.serialization.ShortDeserializer;
6 |
7 | @JsonTypeName("short")
8 | public class ShortDeserializerFactory extends DeserializerFactory {
9 | @Override
10 | public Class extends Deserializer>> getDeserializerClass() {
11 | return ShortDeserializer.class;
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/deserializer/StringDeserializerFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka.deserializer;
2 |
3 | import com.fasterxml.jackson.annotation.JsonIgnore;
4 | import com.fasterxml.jackson.annotation.JsonProperty;
5 | import com.fasterxml.jackson.annotation.JsonTypeName;
6 | import io.dropwizard.validation.ValidationMethod;
7 | import org.apache.kafka.clients.consumer.ConsumerConfig;
8 | import org.apache.kafka.common.serialization.Deserializer;
9 | import org.apache.kafka.common.serialization.StringDeserializer;
10 |
11 | import java.nio.charset.Charset;
12 | import java.util.Collections;
13 | import java.util.HashMap;
14 | import java.util.Map;
15 |
16 | import jakarta.validation.constraints.NotNull;
17 |
18 | @JsonTypeName("string")
19 | public class StringDeserializerFactory extends DeserializerFactory {
20 | @NotNull
21 | @JsonProperty
22 | private String encoding = "UTF8";
23 |
24 | public String getEncoding() {
25 | return encoding;
26 | }
27 |
28 | public void setEncoding(final String encoding) {
29 | this.encoding = encoding;
30 | }
31 |
32 | @JsonIgnore
33 | @ValidationMethod(message = "Invalid charset used for StringDeserializerFactory")
34 | public boolean isEncodingValid() {
35 | return Charset.isSupported(encoding);
36 | }
37 |
38 | @Override
39 | public Class extends Deserializer>> getDeserializerClass() {
40 | return StringDeserializer.class;
41 | }
42 |
43 | @Override
44 | public Map build(final boolean isKey) {
45 | final String deserializerPropertyName;
46 | final String encodingPropertyName;
47 | if (isKey) {
48 | deserializerPropertyName = ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG;
49 | encodingPropertyName = "key.deserializer.encoding";
50 | } else {
51 | deserializerPropertyName = ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG;
52 | encodingPropertyName = "value.deserializer.encoding";
53 | }
54 |
55 | final Map config = new HashMap<>();
56 | config.put(deserializerPropertyName, getDeserializerClass());
57 | config.put(encodingPropertyName, encoding);
58 | return Collections.unmodifiableMap(config);
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/deserializer/UUIDDeserializerFactory.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka.deserializer;
2 |
3 | import com.fasterxml.jackson.annotation.JsonIgnore;
4 | import com.fasterxml.jackson.annotation.JsonProperty;
5 | import com.fasterxml.jackson.annotation.JsonTypeName;
6 | import io.dropwizard.validation.ValidationMethod;
7 | import org.apache.kafka.clients.consumer.ConsumerConfig;
8 | import org.apache.kafka.common.serialization.Deserializer;
9 | import org.apache.kafka.common.serialization.UUIDDeserializer;
10 |
11 | import java.nio.charset.Charset;
12 | import java.util.Collections;
13 | import java.util.HashMap;
14 | import java.util.Map;
15 |
16 | import jakarta.validation.constraints.NotNull;
17 |
18 | @JsonTypeName("uuid")
19 | public class UUIDDeserializerFactory extends DeserializerFactory {
20 | @NotNull
21 | @JsonProperty
22 | private String encoding = "UTF8";
23 |
24 | public String getEncoding() {
25 | return encoding;
26 | }
27 |
28 | public void setEncoding(final String encoding) {
29 | this.encoding = encoding;
30 | }
31 |
32 | @JsonIgnore
33 | @ValidationMethod(message = "Invalid charset used for UUIDDeserializerFactory")
34 | public boolean isEncodingValid() {
35 | return Charset.isSupported(encoding);
36 | }
37 |
38 | @Override
39 | public Class extends Deserializer>> getDeserializerClass() {
40 | return UUIDDeserializer.class;
41 | }
42 |
43 | @Override
44 | public Map build(final boolean isKey) {
45 | final String propertyName;
46 | final String encodingPropertyName;
47 | if (isKey) {
48 | propertyName = ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG;
49 | encodingPropertyName = "key.deserializer.encoding";
50 | } else {
51 | propertyName = ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG;
52 | encodingPropertyName = "value.deserializer.encoding";
53 | }
54 |
55 | final Map config = new HashMap<>();
56 | config.put(propertyName, getDeserializerClass());
57 | config.put(encodingPropertyName, encoding);
58 | return Collections.unmodifiableMap(config);
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/health/KafkaAdminHealthCheck.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka.health;
2 |
3 | import com.codahale.metrics.health.HealthCheck;
4 | import org.apache.kafka.clients.admin.AdminClient;
5 | import org.apache.kafka.clients.admin.DescribeClusterResult;
6 |
7 | import java.util.ArrayList;
8 | import java.util.List;
9 |
10 | import static java.util.Objects.requireNonNull;
11 |
12 | public class KafkaAdminHealthCheck extends HealthCheck {
13 | private final AdminClient adminClient;
14 | private final String name;
15 |
16 | public KafkaAdminHealthCheck(final AdminClient adminClient, final String name) {
17 | this.adminClient = requireNonNull(adminClient);
18 | this.name = requireNonNull(name);
19 | }
20 |
21 | @Override
22 | protected Result check() throws Exception {
23 | try {
24 | final DescribeClusterResult response = adminClient.describeCluster();
25 |
26 | final boolean nodesNotEmpty = !response.nodes().get().isEmpty();
27 | final boolean clusterIdAvailable = response.clusterId() != null;
28 | final boolean aControllerExists = response.controller().get() != null;
29 |
30 | final List errors = new ArrayList<>();
31 |
32 | if (!nodesNotEmpty) {
33 | errors.add("no nodes found for " + name);
34 | }
35 |
36 | if (!clusterIdAvailable) {
37 | errors.add("no cluster id available for " + name);
38 | }
39 |
40 | if (!aControllerExists) {
41 | errors.add("no active controller exists for " + name);
42 | }
43 |
44 | if (!errors.isEmpty()) {
45 | final String errorMessage = String.join(",", errors);
46 | return Result.unhealthy(errorMessage);
47 | }
48 |
49 | return Result.healthy();
50 | } catch (final Exception e) {
51 | return Result.unhealthy("Error describing Kafka Cluster name={}", name, e);
52 | }
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/health/KafkaProducerHealthCheck.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka.health;
2 |
3 | import com.codahale.metrics.health.HealthCheck;
4 | import org.apache.kafka.clients.producer.Producer;
5 |
6 | import java.util.Collection;
7 |
8 | import static java.util.Objects.requireNonNull;
9 |
10 | public class KafkaProducerHealthCheck extends HealthCheck {
11 |
12 | private final Producer producer;
13 | private final Collection topics;
14 |
15 | public KafkaProducerHealthCheck(final Producer producer,
16 | final Collection topics) {
17 | this.producer = requireNonNull(producer);
18 | this.topics = requireNonNull(topics);
19 | }
20 |
21 | @Override
22 | protected Result check() {
23 | try {
24 | topics.forEach(producer::partitionsFor);
25 | return Result.healthy();
26 | } catch (final Exception e) {
27 | return Result.unhealthy(e);
28 | }
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/managed/KafkaAdminClientManager.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka.managed;
2 |
3 | import java.util.ArrayList;
4 | import java.util.Collection;
5 | import java.util.List;
6 | import java.util.Set;
7 |
8 | import io.dropwizard.lifecycle.Managed;
9 | import org.apache.kafka.clients.admin.AdminClient;
10 | import org.apache.kafka.clients.admin.NewTopic;
11 | import org.slf4j.Logger;
12 | import org.slf4j.LoggerFactory;
13 |
14 | import static java.util.Objects.requireNonNull;
15 |
16 | public class KafkaAdminClientManager implements Managed {
17 | private static final Logger log = LoggerFactory.getLogger(KafkaAdminClientManager.class);
18 |
19 | private final AdminClient adminClient;
20 | private final String name;
21 | private final Collection topics;
22 |
23 | public KafkaAdminClientManager(final AdminClient adminClient, final String name, final Collection topics) {
24 | this.adminClient = requireNonNull(adminClient);
25 | this.name = requireNonNull(name);
26 | this.topics = topics;
27 | }
28 |
29 | @Override
30 | public void start() throws Exception {
31 | log.info("Starting adminClient for name={}", name);
32 | if (!this.topics.isEmpty()) {
33 | log.trace("Searching existing topics in cluster.");
34 | final Set existingTopics = this.adminClient.listTopics().names().get();
35 | final List matchingTopics = new ArrayList<>();
36 | for (String t : existingTopics) {
37 | this.topics.removeIf(newTopic -> {
38 | boolean match = newTopic.name().equals(t);
39 | if (match) {
40 | matchingTopics.add(t);
41 | }
42 | return match;
43 | });
44 | }
45 | if (!matchingTopics.isEmpty()) {
46 | log.info("Not attempting to re-create existing topics {}.", matchingTopics);
47 | }
48 | this.adminClient.createTopics(this.topics);
49 | }
50 | }
51 |
52 | @Override
53 | public void stop() throws Exception {
54 | log.info("Shutting down adminClient for name={}", name);
55 | adminClient.close();
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/managed/KafkaConsumerManager.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka.managed;
2 |
3 | import io.dropwizard.lifecycle.Managed;
4 | import org.apache.kafka.clients.consumer.Consumer;
5 |
6 | import static java.util.Objects.requireNonNull;
7 |
8 | public class KafkaConsumerManager implements Managed {
9 |
10 | private final Consumer consumer;
11 |
12 | public KafkaConsumerManager(final Consumer consumer) {
13 | this.consumer = requireNonNull(consumer);
14 | }
15 |
16 | @Override
17 | public void start() {
18 | // do nothing, to prevent concurrent modification exceptions
19 | }
20 |
21 | @Override
22 | public void stop() {
23 | consumer.wakeup();
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/managed/KafkaProducerManager.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka.managed;
2 |
3 | import io.dropwizard.lifecycle.Managed;
4 | import org.apache.kafka.clients.producer.Producer;
5 |
6 | import java.util.Objects;
7 |
8 | import static java.util.Objects.requireNonNull;
9 |
10 | public class KafkaProducerManager implements Managed {
11 |
12 | private final Producer producer;
13 |
14 | public KafkaProducerManager(final Producer producer) {
15 | this.producer = requireNonNull(producer);
16 | }
17 |
18 | @Override
19 | public void start() throws Exception {
20 | // do nothing
21 | }
22 |
23 | @Override
24 | public void stop() throws Exception {
25 | producer.close();
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/src/main/java/io/dropwizard/kafka/metrics/DropwizardMetricsReporter.java:
--------------------------------------------------------------------------------
1 | package io.dropwizard.kafka.metrics;
2 |
3 | import com.codahale.metrics.Gauge;
4 | import com.codahale.metrics.MetricRegistry;
5 | import com.codahale.metrics.SharedMetricRegistries;
6 | import com.google.common.collect.ImmutableList;
7 | import org.apache.kafka.common.MetricName;
8 | import org.apache.kafka.common.metrics.KafkaMetric;
9 | import org.apache.kafka.common.metrics.MetricsReporter;
10 | import org.slf4j.Logger;
11 | import org.slf4j.LoggerFactory;
12 |
13 | import java.util.List;
14 | import java.util.Map;
15 | import java.util.Objects;
16 | import java.util.Optional;
17 | import java.util.stream.Collectors;
18 |
19 | import static com.codahale.metrics.MetricRegistry.name;
20 |
21 | public class DropwizardMetricsReporter implements MetricsReporter {
22 | private static final Logger log = LoggerFactory.getLogger(DropwizardMetricsReporter.class);
23 |
24 | public static final String SHOULD_INCLUDE_TAGS_CONFIG = "io.dropwizard.kafka.metrics.includeTags";
25 | public static final String METRICS_NAME_CONFIG = "io.dropwizard.kafka.metrics.name";
26 |
27 | private final MetricRegistry registry;
28 | private boolean includeTags = false;
29 | private String name;
30 |
31 | public DropwizardMetricsReporter() {
32 | this.registry = SharedMetricRegistries.getOrCreate("default");
33 | }
34 |
35 | public DropwizardMetricsReporter(final MetricRegistry registry) {
36 | this.registry = registry;
37 | }
38 |
39 | @Override
40 | public void init(final List metrics) {
41 | if (includeTags) {
42 | metrics.stream()
43 | .filter(metric -> shouldRegister(metric::metricValue))
44 | .forEach(metric -> createMetricNamesWithTags(metric.metricName())
45 | .forEach(metricName -> tryRegister(metricName, metric::metricValue)));
46 | } else {
47 | metrics.stream()
48 | .filter(metric -> shouldRegister(metric::metricValue))
49 | .forEach(metric -> tryRegister(createMetricName(metric.metricName()), metric::metricValue));
50 | }
51 | }
52 |
53 | String createMetricName(final MetricName metricName) {
54 | return name(name, metricName.group(), metricName.name());
55 | }
56 |
57 | List createMetricNamesWithTags(final MetricName metricName) {
58 | if (metricName.tags().isEmpty()) {
59 | return ImmutableList.of(createMetricName(metricName));
60 | }
61 |
62 | return metricName.tags()
63 | .entrySet()
64 | .stream()
65 | .map(entry -> name(name, metricName.group(), metricName.name(), entry.getKey(), entry.getValue()))
66 | .collect(Collectors.toList());
67 | }
68 |
69 | @Override
70 | public void metricChange(final KafkaMetric metric) {
71 | final Gauge