├── .gitignore
├── .travis.yml
├── CODE_OF_CONDUCT.md
├── LICENSE
├── README.md
├── made-in-darmstadt.jpg
├── pom.xml
└── src
├── main
├── asciidoc
│ ├── chapters
│ │ ├── colophon.adoc
│ │ ├── consuming-records.adoc
│ │ ├── embedded-kafka-cluster.adoc
│ │ ├── external-kafka-cluster.adoc
│ │ ├── introduction.adoc
│ │ ├── managing-topics.adoc
│ │ └── producing-records.adoc
│ ├── css
│ │ ├── article-theme.yml
│ │ ├── asciidoctor.css
│ │ └── coderay-asciidoctor.css
│ └── user-guide.adoc
└── java
│ └── net
│ └── mguenther
│ └── kafka
│ └── junit
│ ├── EmbeddedConnect.java
│ ├── EmbeddedConnectConfig.java
│ ├── EmbeddedKafka.java
│ ├── EmbeddedKafkaCluster.java
│ ├── EmbeddedKafkaClusterConfig.java
│ ├── EmbeddedKafkaConfig.java
│ ├── EmbeddedLifecycle.java
│ ├── EmbeddedZooKeeper.java
│ ├── EmbeddedZooKeeperConfig.java
│ ├── ExternalKafkaCluster.java
│ ├── KeyValue.java
│ ├── KeyValueMetadata.java
│ ├── LeaderAndIsr.java
│ ├── ObserveKeyValues.java
│ ├── Props.java
│ ├── ReadKeyValues.java
│ ├── RecordConsumer.java
│ ├── RecordProducer.java
│ ├── SendKeyValues.java
│ ├── SendKeyValuesTransactional.java
│ ├── SendValues.java
│ ├── SendValuesTransactional.java
│ ├── TopicConfig.java
│ ├── TopicManager.java
│ ├── Wait.java
│ └── provider
│ ├── DefaultRecordConsumer.java
│ ├── DefaultRecordProducer.java
│ └── DefaultTopicManager.java
└── test
├── java
└── net
│ └── mguenther
│ └── kafka
│ └── junit
│ ├── ConnectorTest.java
│ ├── EmbeddedConnectConfigTest.java
│ ├── EmbeddedKafkaConfigTest.java
│ ├── EmbeddedZooKeeperConfigTest.java
│ ├── ExternalKafkaClusterTest.java
│ ├── KeyValueTest.java
│ ├── MultipleBrokersTest.java
│ ├── ObserveKeyValuesTest.java
│ ├── PropsTest.java
│ ├── ReadKeyValuesTest.java
│ ├── RecordConsumerTest.java
│ ├── RecordProducerTest.java
│ ├── SendKeyValuesTest.java
│ ├── SendKeyValuesTransactionalTest.java
│ ├── SendValuesTest.java
│ ├── SendValuesTransactionalTest.java
│ ├── SingleBrokerTest.java
│ ├── TopicConfigTest.java
│ ├── TopicManagerTest.java
│ └── connector
│ ├── InstrumentingConfig.java
│ ├── InstrumentingConfigBuilder.java
│ ├── InstrumentingSourceConnector.java
│ └── InstrumentingSourceTask.java
└── resources
└── log4j.properties
/.gitignore:
--------------------------------------------------------------------------------
1 | # Compiled class file
2 | *.class
3 |
4 | # Log file
5 | *.log
6 |
7 | # BlueJ files
8 | *.ctxt
9 |
10 | # Mobile Tools for Java (J2ME)
11 | .mtj.tmp/
12 |
13 | # Package Files #
14 | *.jar
15 | *.war
16 | *.nar
17 | *.ear
18 | *.zip
19 | *.tar.gz
20 | *.rar
21 |
22 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
23 | hs_err_pid*
24 |
25 | **/target
26 | **/*iml
27 | .idea
28 |
29 | .classpath
30 | .factorypath
31 | .project
32 | .settings
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | sudo: required
2 |
3 | language: java
4 | jdk:
5 | - openjdk8
6 |
7 | services:
8 | - docker
9 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We as members, contributors, and leaders pledge to make participation in our
6 | community a harassment-free experience for everyone, regardless of age, body
7 | size, visible or invisible disability, ethnicity, sex characteristics, gender
8 | identity and expression, level of experience, education, socio-economic status,
9 | nationality, personal appearance, race, religion, or sexual identity
10 | and orientation.
11 |
12 | We pledge to act and interact in ways that contribute to an open, welcoming,
13 | diverse, inclusive, and healthy community.
14 |
15 | ## Our Standards
16 |
17 | Examples of behavior that contributes to a positive environment for our
18 | community include:
19 |
20 | * Demonstrating empathy and kindness toward other people
21 | * Being respectful of differing opinions, viewpoints, and experiences
22 | * Giving and gracefully accepting constructive feedback
23 | * Accepting responsibility and apologizing to those affected by our mistakes,
24 | and learning from the experience
25 | * Focusing on what is best not just for us as individuals, but for the
26 | overall community
27 |
28 | Examples of unacceptable behavior include:
29 |
30 | * The use of sexualized language or imagery, and sexual attention or
31 | advances of any kind
32 | * Trolling, insulting or derogatory comments, and personal or political attacks
33 | * Public or private harassment
34 | * Publishing others' private information, such as a physical or email
35 | address, without their explicit permission
36 | * Other conduct which could reasonably be considered inappropriate in a
37 | professional setting
38 |
39 | ## Enforcement Responsibilities
40 |
41 | Community leaders are responsible for clarifying and enforcing our standards of
42 | acceptable behavior and will take appropriate and fair corrective action in
43 | response to any behavior that they deem inappropriate, threatening, offensive,
44 | or harmful.
45 |
46 | Community leaders have the right and responsibility to remove, edit, or reject
47 | comments, commits, code, wiki edits, issues, and other contributions that are
48 | not aligned to this Code of Conduct, and will communicate reasons for moderation
49 | decisions when appropriate.
50 |
51 | ## Scope
52 |
53 | This Code of Conduct applies within all community spaces, and also applies when
54 | an individual is officially representing the community in public spaces.
55 | Examples of representing our community include using an official e-mail address,
56 | posting via an official social media account, or acting as an appointed
57 | representative at an online or offline event.
58 |
59 | ## Enforcement
60 |
61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
62 | reported to the community leaders responsible for enforcement at
63 | markus.guenther@gmail.com.
64 | All complaints will be reviewed and investigated promptly and fairly.
65 |
66 | All community leaders are obligated to respect the privacy and security of the
67 | reporter of any incident.
68 |
69 | ## Enforcement Guidelines
70 |
71 | Community leaders will follow these Community Impact Guidelines in determining
72 | the consequences for any action they deem in violation of this Code of Conduct:
73 |
74 | ### 1. Correction
75 |
76 | **Community Impact**: Use of inappropriate language or other behavior deemed
77 | unprofessional or unwelcome in the community.
78 |
79 | **Consequence**: A private, written warning from community leaders, providing
80 | clarity around the nature of the violation and an explanation of why the
81 | behavior was inappropriate. A public apology may be requested.
82 |
83 | ### 2. Warning
84 |
85 | **Community Impact**: A violation through a single incident or series
86 | of actions.
87 |
88 | **Consequence**: A warning with consequences for continued behavior. No
89 | interaction with the people involved, including unsolicited interaction with
90 | those enforcing the Code of Conduct, for a specified period of time. This
91 | includes avoiding interactions in community spaces as well as external channels
92 | like social media. Violating these terms may lead to a temporary or
93 | permanent ban.
94 |
95 | ### 3. Temporary Ban
96 |
97 | **Community Impact**: A serious violation of community standards, including
98 | sustained inappropriate behavior.
99 |
100 | **Consequence**: A temporary ban from any sort of interaction or public
101 | communication with the community for a specified period of time. No public or
102 | private interaction with the people involved, including unsolicited interaction
103 | with those enforcing the Code of Conduct, is allowed during this period.
104 | Violating these terms may lead to a permanent ban.
105 |
106 | ### 4. Permanent Ban
107 |
108 | **Community Impact**: Demonstrating a pattern of violation of community
109 | standards, including sustained inappropriate behavior, harassment of an
110 | individual, or aggression toward or disparagement of classes of individuals.
111 |
112 | **Consequence**: A permanent ban from any sort of public interaction within
113 | the community.
114 |
115 | ## Attribution
116 |
117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118 | version 2.0, available at
119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
120 |
121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct
122 | enforcement ladder](https://github.com/mozilla/diversity).
123 |
124 | [homepage]: https://www.contributor-covenant.org
125 |
126 | For answers to common questions about this code of conduct, see the FAQ at
127 | https://www.contributor-covenant.org/faq. Translations are available at
128 | https://www.contributor-covenant.org/translations.
129 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Kafka for JUnit
2 |
3 | [](https://maven-badges.herokuapp.com/maven-central/net.mguenther.kafka/kafka-junit)
4 |
5 | > [!IMPORTANT]
6 | > Kafka for JUnit will not be updated regularly any longer. Unfortunately, I do not have the time to allocate my attention to this project in a sustainable fashion. Updates and bugfixes **may** still happen (support for new versions, ...), but if they do, it will be solely based on my personal requirements. Thank you for your understanding.
7 |
8 | Kafka for JUnit enables developers to start and stop a complete Kafka cluster comprised of Kafka brokers and distributed Kafka Connect workers from within a JUnit test. It also provides a rich set of convenient accessors to interact with such an embedded Kafka cluster in a lean and non-obtrusive way.
9 |
10 | Kafka for JUnit can be used to both whitebox-test individual Kafka-based components of your application or to blackbox-test applications that offer an incoming and/or outgoing Kafka-based interface.
11 |
12 | ## Using Kafka for JUnit in your tests
13 |
14 | Kafka for JUnit provides the necessary infrastructure to exercise your Kafka-based components against an embeddable Kafka cluster. However, Kafka for JUnit got you covered as well if you are simply interested in using the convenient accessors against Kafka clusters that are already present in your infrastructure. Checkout sections *Working with an embedded Kafka cluster* and *Working with an external Kafka cluster* in the [user's guide](https://mguenther.github.io/kafka-junit) for more information.
15 |
16 | ```java
17 | import org.junit.jupiter.api.AfterEach;
18 | import org.junit.jupiter.api.BeforeEach;
19 | import org.junit.jupiter.api.Test;
20 |
21 | import static net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith;
22 | import static net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.defaultClusterConfig;
23 |
24 | class KafkaTest {
25 |
26 | private EmbeddedKafkaCluster kafka;
27 |
28 | @BeforeEach
29 | void setupKafka() {
30 | kafka = provisionWith(defaultClusterConfig());
31 | kafka.start();
32 | }
33 |
34 | @AfterEach
35 | void tearDownKafka() {
36 | kafka.stop();
37 | }
38 |
39 | @Test
40 | void shouldWaitForRecordsToBePublished() throws Exception {
41 | kafka.send(to("test-topic", "a", "b", "c"));
42 | kafka.observe(on("test-topic", 3));
43 | }
44 | }
45 | ```
46 |
47 | This starts an embedded Kafka cluster and submits three records to the topic named `test-topic`. The call to `kafka.observe(on("test-topic", 3))` watches that same topic for a configurable amount of time and checks if it observes the previously submitted records. If it doesn't, Kafka for JUnit raises an `AssertionError` which would fail the test. Surely, [Kafka for JUnit provides lots of more ways to interact with a Kafka cluster]((https://mguenther.github.io/kafka-junit)).
48 |
49 | Since `EmbeddedKafkaCluster` implements the `AutoCloseable` interface, you can achieve the same behavior using a `try-with-resources`-construct.
50 |
51 | ```java
52 | import org.junit.jupiter.api.Test;
53 |
54 | import static net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith;
55 | import static net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.defaultClusterConfig;
56 |
57 | class KafkaTest {
58 |
59 | @Test
60 | void shouldWaitForRecordsToBePublished() throws Exception {
61 |
62 | try (EmbeddedKafkaCluster kafka = provisionWith(defaultClusterConfig())) {
63 | kafka.start();
64 | kafka.send(to("test-topic", "a", "b", "c"));
65 | kafka.observe(on("test-topic", 3));
66 | }
67 | }
68 | }
69 | ```
70 |
71 | ### Supported versions of Apache Kafka
72 |
73 | | Version of Kafka for JUnit | Supports (up to) |
74 | |----------------------------|--------------------|
75 | | 3.6.0 | Apache Kafka 3.6.1 |
76 | | 3.5.1 | Apache Kafka 3.5.1 |
77 | | 3.4.0 | Apache Kafka 3.4.0 |
78 | | 3.3.0 | Apache Kafka 3.3.1 |
79 | | 3.2.2 | Apache Kafka 3.2.3 |
80 | | 3.1.1 | Apache Kafka 3.1.0 |
81 | | 3.0.1 | Apache Kafka 3.0.0 |
82 | | 2.8.0 | Apache Kafka 2.8.0 |
83 | | 2.7.0 | Apache Kafka 2.7.0 |
84 | | 2.6.0 | Apache Kafka 2.6.0 |
85 | | 2.5.1 | Apache Kafka 2.5.1 |
86 | | 2.4.0 | Apache Kafka 2.4.0 |
87 | | 2.3.0 | Apache Kafka 2.3.0 |
88 | | 2.2.0 | Apache Kafka 2.2.1 |
89 | | 2.1.1 | Apache Kafka 2.1.1 |
90 | | 2.0.0 | Apache Kafka 2.0.0 |
91 | | 1.0.0 | Apache Kafka 1.1.1 |
92 |
93 | ## Interacting with the Kafka cluster
94 |
95 | See the [comprehensive user's guide](https://mguenther.github.io/kafka-junit) for examples on how to interact with the Kafka cluster from within your JUnit test.
96 |
97 | ## License
98 |
99 | This work is released under the terms of the Apache 2.0 license.
100 |
101 |
102 |
106 |
107 |
--------------------------------------------------------------------------------
/made-in-darmstadt.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mguenther/kafka-junit/015fa438199750cf3a50209744ed0b24a70eb5a6/made-in-darmstadt.jpg
--------------------------------------------------------------------------------
/src/main/asciidoc/chapters/colophon.adoc:
--------------------------------------------------------------------------------
1 | [[section:colophon]]
2 |
3 | == License
4 |
5 | This work is released under the terms of the Apache 2.0 license.
6 |
--------------------------------------------------------------------------------
/src/main/asciidoc/chapters/embedded-kafka-cluster.adoc:
--------------------------------------------------------------------------------
1 | [[section:embedded-kafka-cluster]]
2 |
3 | == Working with an embedded Kafka cluster
4 |
5 | Kafka for JUnit is able to spin up a fully-fledged embedded Kafka cluster that is accessible via class `EmbeddedKafkaCluster`. `EmbeddedKafkaCluster` implements the interfaces `RecordProducer`, `RecordConsumer` and `TopicManager` and thus provides convenient accessors to interact with the cluster.
6 |
7 | Using `EmbeddedKafkaCluster` in a JUnit test is quite simple. The necessary code to set it up is minimal if you are comfortable with the default configuration.
8 |
9 | [source,java]
10 | ----
11 | import org.junit.jupiter.api.AfterEach;
12 | import org.junit.jupiter.api.BeforeEach;
13 |
14 | import static net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith;
15 | import static net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.defaultClusterConfig;
16 |
17 | class KafkaTest {
18 |
19 | private EmbeddedKafkaCluster kafka;
20 |
21 | @BeforeEach
22 | void setupKafka() {
23 | kafka = provisionWith(defaultClusterConfig());
24 | kafka.start();
25 | }
26 |
27 | @AfterEach
28 | void tearDownKafka() {
29 | kafka.stop();
30 | }
31 | }
32 | ----
33 |
34 | Kafka for JUnit uses the Builder pattern extensively to provide a fluent API when provisioning an embedded Kafka cluster. Let's take a closer look at method `EmbeddedKafkaCluster.provisionWith`. This method consumes a configuration of type `EmbeddedKafkaClusterConfig`. `EmbeddedKafkaClusterConfig` uses defaults for the Kafka broker and ZooKeeper. By default, Kafka Connect will not be provisioned at all. The builder of `EmbeddedKafkaClusterConfig` provides a `provisionWith` method as well and is overloaded to accept configurations of type `EmbeddedZooKeeperConfig`, `EmbeddedKafkaConfig` and `EmbeddedConnectConfig`. The following listing demonstrates how to adjust the configuration of the embedded Kafka broker wrt. the default number of partitions for newly created topics.
35 |
36 | [source,java]
37 | ----
38 | EmbeddedKafkaCluster kafka = provisionWith(newClusterConfig()
39 | .configure(kafkaConnect()
40 | .with(KafkaConfig$.MODULE$.NumPartitionsProp(), "5")));
41 | ----
42 |
43 | The builders for those configurations provide a uniform interface for overriding defaults, comprising two methods `with(String propertyName, T value)` and `withAll(java.util.Properties overrides)`. To override a default value, you simply provide the name of the configuration parameter as defined by the resp. Kafka component along with the new value.
44 |
45 | Using the default setting will provide you with a single embedded Kafka broker. This ought to be sufficient for most cases. However, there are scenarios which require testing against multiple brokers that form a cluster. Forming an embedded cluster with multiple brokers is done by adjusting the default provisioning of your test case. See the listing underneath for an example.
46 |
47 | [source,java]
48 | ----
49 |
50 | EmbeddedKafkaCluster kafka = provisionWith(newClusterConfig()
51 | .configure(brokers()
52 | .withNumberOfBrokers(3)
53 | .with(KafkaConfig$.MODULE$.NumPartitionsProp(), "5")
54 | .with(KafkaConfig$.MODULE$.DefaultReplicationFactorProp(), "3")
55 | .with(KafkaConfig$.MODULE$.MinInSyncReplicasProp(), "2")
56 | .with(KafkaConfig$.MODULE$.OffsetsTopicReplicationFactorProp(), "3")
57 | .with(KafkaConfig$.MODULE$.TransactionsTopicReplicationFactorProp(), "3")
58 | .with(KafkaConfig$.MODULE$.TransactionsTopicMinISRProp(), "2")));
59 | ----
60 |
61 | Using this configuration, we end up with a total of three brokers that form an embedded Kafka cluster, while the defaults for topic partitions and replicas have been adjusted to be consistent with the size of the cluster.
62 |
63 | Of course, you can also use the `try-with-resources`-pattern to fire up an embedded cluster. Have a look at the following test setup.
64 |
65 | [source,java]
66 | ----
67 | import org.junit.jupiter.api.Test;
68 |
69 | import static net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith;
70 | import static net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.defaultClusterConfig;
71 |
72 | class KafkaTest {
73 |
74 | @Test
75 | void shouldWaitForRecordsToBePublished() throws Exception {
76 |
77 | try (EmbeddedKafkaCluster kafka = provisionWith(defaultClusterConfig())) {
78 | kafka.start();
79 | kafka.send(to("test-topic", "a", "b", "c"));
80 | kafka.observe(on("test-topic", 3));
81 | }
82 | }
83 | }
84 | ----
85 |
86 | See sections on <>, <> and <> for further reference on how to interact with the cluster.
87 |
88 | === Failure Modes
89 |
90 | `EmbeddedKafkaCluster` provides the means to disconnect - and re-connect of course - specific embedded Kafka brokers. All brokers in the embedded cluster get broker ID assigned during cluster formation. This broker ID is an `Integer`-based value and starts at 1. The broker ID increases by 1 for every subsequent embedded Kafka broker that is started during cluster formation.
91 |
92 | Clusters stay fixed wrt. the maximum number of embedded brokers. But individual brokers can, given their broker ID, be disconnected from the rest of the cluster to test for failure scenarios. Such failure scenarios include:
93 |
94 | * How does may Kafka-based component behave in the presence of broker outages?
95 | * What happens if the In-Sync-Replica Set (ISR) of a topic that my application consumes from shrinks below its minimum size?
96 | * Is my application able to progress after brokers re-connect and form a working cluster?
97 |
98 | ==== Disconnect and reconnect a single broker
99 |
100 | The following listing shows how to disconnect and re-connect a certain broker, while fetching the ISR of a dedicated topic in between these operations to determine whether the cluster behaves correctly.
101 |
102 | NOTE: If you do use this feature of Kafka for JUnit, then please give the embedded cluster some time to handle broker churn. Identifying that a leader for a topic-partition is not available and conducting the leader election takes some time. In the example underneath we introduce a delay of five seconds in between operations that affect cluster membership.
103 |
104 | [source,java]
105 | ----
106 | kafka.createTopic(TopicConfig.withName("test-topic")
107 | .withNumberOfPartitions(5)
108 | .withNumberOfReplicas(3));
109 |
110 | delay(5);
111 |
112 | Set leaders = kafka.fetchLeaderAndIsr("test-topic")
113 | .values()
114 | .stream()
115 | .map(LeaderAndIsr::getLeader)
116 | .collect(Collectors.toSet());
117 |
118 | assertThat(leaders.contains(1)).isTrue();
119 | assertThat(leaders.contains(2)).isTrue();
120 | assertThat(leaders.contains(3)).isTrue();
121 |
122 | kafka.disconnect(1);
123 |
124 | delay(5);
125 |
126 | Set leadersAfterDisconnect = kafka.fetchLeaderAndIsr("test-topic")
127 | .values()
128 | .stream()
129 | .map(LeaderAndIsr::getLeader)
130 | .collect(Collectors.toSet());
131 |
132 | assertThat(leadersAfterDisconnect.contains(1)).isFalse();
133 | assertThat(leadersAfterDisconnect.contains(2)).isTrue();
134 | assertThat(leadersAfterDisconnect.contains(3)).isTrue();
135 |
136 | kafka.connect(1);
137 |
138 | delay(5);
139 |
140 | Set leadersAfterReconnect = kafka.fetchLeaderAndIsr("test-topic")
141 | .values()
142 | .stream()
143 | .map(LeaderAndIsr::getLeader)
144 | .collect(Collectors.toSet());
145 |
146 | assertThat(leadersAfterReconnect.contains(1)).isTrue();
147 | assertThat(leadersAfterReconnect.contains(2)).isTrue();
148 | assertThat(leadersAfterReconnect.contains(3)).isTrue();
149 | ----
150 |
151 | ==== Disconnect until In-Sync-Replica Set falls below minimum size
152 |
153 | The following listing shows how to disconnect the In-Sync-Replica Set (ISR) for a given topic until its ISR falls below its minimum size.
154 |
155 | NOTE: If you do use this feature of Kafka for JUnit, then please give the embedded cluster some time to handle broker churn. Identifying that a leader for a topic-partition is not available and conducting the leader election takes some time. In the example underneath we introduce a delay of five seconds in between operations that affect cluster membership.
156 |
157 | [source,java]
158 | ----
159 | // Create a topic and configure the number of replicas as well as the size of the ISR
160 |
161 | kafka.createTopic(TopicConfig.withName("test-topic")
162 | .withNumberOfPartitions(5)
163 | .withNumberOfReplicas(3)
164 | .with("min.insync.replicas", "2"));
165 |
166 | // Wait a bit to give the cluster a chance to properly assign topic-partitions to leaders
167 |
168 | delay(5);
169 |
170 | // Disconnect until the remaining number of brokers fall below the minimum ISR size
171 |
172 | kafka.disconnectUntilIsrFallsBelowMinimumSize("test-topic");
173 |
174 | delay(5);
175 |
176 | // Submitting records to this topic will yield a NotEnoughReplicasException
177 |
178 | kafka.send(SendValues.to("test-topic", "A"));
179 | ----
180 |
181 | The last line of the listing shows the effect of an ISR that can no longer operate reliably. Your Kafka-based component or application would run concurrently to this test so that you are able to observe if it behaves correctly (e.g. by checking that the component progresses normally if the ISR is restored).
182 |
183 | ==== Restoring the In-Sync-Replica Set
184 |
185 | Restoring the In-Sync-Replica Set is easy, as method `disconnectUntilIsrFallsBelowMinimumSize` returns a list of broker IDs for all brokers that have been deactivated during the shrinking. The following listing shows how to restore the ISR.
186 |
187 | [source,java]
188 | ----
189 | kafka.createTopic(TopicConfig.withName("test-topic")
190 | .withNumberOfPartitions(5)
191 | .withNumberOfReplicas(3)
192 | .with("min.insync.replicas", "2"));
193 |
194 | delay(5);
195 |
196 | Set disconnectedBrokers = kafka.disconnectUntilIsrFallsBelowMinimumSize("test-topic");
197 |
198 | delay(5);
199 |
200 | // Do some testing, trigger some operations, observe the behavior of your application
201 |
202 | kafka.connect(disconnectedBrokers);
203 |
204 | // Give the cluster some time to assign leaders and reestablish the ISR
205 |
206 | delay(5);
207 |
208 | // Do some more testing ...
209 | ----
--------------------------------------------------------------------------------
/src/main/asciidoc/chapters/external-kafka-cluster.adoc:
--------------------------------------------------------------------------------
1 | [[section:external-kafka-cluster]]
2 |
3 | == Working with an external Kafka cluster
4 |
5 | Kafka for JUnit can be used to work with an external Kafka cluster. This is useful if you want to execute your tests against a staging/testing environment or if you already use other testing libraries (e.g. Testcontainers) that spin up a Kafka cluster on your local machine, but want to use the convenient accessors provided by Kafka for JUnit.
6 |
7 | Class `ExternalKafkaCluster` integrates an external cluster. Just like `EmbeddableKafkaCluster`, an `ExternalKafkaCluster` also implements the interfaces `RecordProducer`, `RecordConsumer` and `TopicManager` and thus provides convenient accessors to interact with the cluster.
8 |
9 | Using `ExternalKafkaCluster` in a JUnit test is easy. The listing below shows the necessary code to use `ExternalKafkaCluster` in combination with Testcontainers.
10 |
11 | [source,java]
12 | ----
13 | @Testcontainers
14 | class ExternalKafkaClusterTest {
15 |
16 | // This is not part of Kafka for JUnit, but a sub-module provided
17 | // by Testcontainers (org.testcontainers:kafka)
18 | @Container
19 | private KafkaContainer kafkaContainer = new KafkaContainer();
20 |
21 | @Test
22 | @DisplayName("should be able to observe records written to an external Kafka cluster")
23 | void externalKafkaClusterShouldWorkWithExternalResources() throws Exception {
24 |
25 | ExternalKafkaCluster kafka = ExternalKafkaCluster.at(kafkaContainer.getBootstrapServers());
26 |
27 | // use the accessors that cluster provides to interact with the Kafka container
28 |
29 | [...]
30 | }
31 | }
32 | ----
33 |
34 | See sections on <>, <> and <> for further reference on how to interact with the cluster.
--------------------------------------------------------------------------------
/src/main/asciidoc/chapters/introduction.adoc:
--------------------------------------------------------------------------------
1 | [[section:introduction]]
2 |
3 | == Introduction
4 |
5 | Kafka for JUnit enables developers to start and stop a complete Kafka cluster comprised of Kafka brokers and distributed Kafka Connect workers from within a JUnit test. It also provides a rich set of convenient accessors to interact with such an embedded Kafka cluster in a lean and non-obtrusive way.
6 |
7 | Kafka for JUnit can be used to both whitebox-test individual Kafka-based components of your application or to blackbox-test applications that offer an incoming and/or outgoing Kafka-based interface.
8 |
9 | == Using Kafka for JUnit in your tests
10 |
11 | Kafka for JUnit provides the necessary infrastructure to exercise your Kafka-based components against an embeddable Kafka cluster (cf. <>). However, Kafka for JUnit got you covered as well if you are simply interested in using the convenient accessors against Kafka clusters that are already present in your infrastructure (cf. section <>).
12 |
13 | [source,java]
14 | ----
15 | import org.junit.jupiter.api.AfterEach;
16 | import org.junit.jupiter.api.BeforeEach;
17 | import org.junit.jupiter.api.Test;
18 |
19 | import static net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith;
20 | import static net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.defaultClusterConfig;
21 |
22 | class KafkaTest {
23 |
24 | private EmbeddedKafkaCluster kafka;
25 |
26 | @BeforeEach
27 | void setupKafka() {
28 | kafka = provisionWith(defaultClusterConfig());
29 | kafka.start();
30 | }
31 |
32 | @AfterEach
33 | void tearDownKafka() {
34 | kafka.stop();
35 | }
36 |
37 | @Test
38 | void shouldWaitForRecordsToBePublished() throws Exception {
39 | kafka.send(to("test-topic", "a", "b", "c"));
40 | kafka.observe(on("test-topic", 3));
41 | }
42 | }
43 | ----
44 |
45 | This starts an embedded Kafka cluster and submits three records to the topic named `test-topic`. The call to `kafka.observe(on("test-topic", 3))` watches that same topic for a configurable amount of time and checks if it observes the previously submitted records. If it doesn't, Kafka for JUnit raises an `AssertionError` which would fail the test. Surely, Kafka for JUnit provides lots of more ways to interact with a Kafka cluster.
46 |
47 | Since `EmbeddedKafkaCluster` implements the `AutoCloseable` interface, you can achieve the same behavior using a `try-with-resources`-construct.
48 |
49 | [source,java]
50 | ----
51 | import org.junit.jupiter.api.Test;
52 |
53 | import static net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith;
54 | import static net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.defaultClusterConfig;
55 |
56 | class KafkaTest {
57 |
58 | @Test
59 | void shouldWaitForRecordsToBePublished() throws Exception {
60 |
61 | try (EmbeddedKafkaCluster kafka = provisionWith(defaultClusterConfig())) {
62 | kafka.start();
63 | kafka.send(to("test-topic", "a", "b", "c"));
64 | kafka.observe(on("test-topic", 3));
65 | }
66 | }
67 | }
68 | ----
69 |
70 | === Supported versions of Apache Kafka
71 |
72 | |===
73 | | Version of Kafka for JUnit | Supports
74 |
75 | | 3.6.0
76 | | Apache Kafka 3.6.1
77 |
78 | | 3.5.1
79 | | Apache Kafka 3.5.1
80 |
81 | | 3.4.0
82 | | Apache Kafka 3.4.0
83 |
84 | | 3.3.0
85 | | Apache Kafka 3.3.1
86 |
87 | | 3.2.2
88 | | Apache Kafka 3.2.3
89 |
90 | | 3.1.1
91 | | Apache Kafka 3.1.0
92 |
93 | | 3.0.1
94 | | Apache Kafka 3.3.0
95 |
96 | | 2.8.0
97 | | Apache Kafka 2.8.0
98 |
99 | | 2.7.0
100 | | Apache Kafka 2.7.0
101 |
102 | | 2.6.0
103 | | Apache Kafka 2.6.0
104 |
105 | | 2.5.1
106 | | Apache Kafka 2.5.1
107 |
108 | | 2.4.0
109 | | Apache Kafka 2.4.0
110 |
111 | | 2.3.0
112 | | Apache Kafka 2.3.0
113 |
114 | | 2.2.0
115 | | Apache Kafka 2.2.1
116 |
117 | | 2.1.1
118 | | Apache Kafka 2.1.1
119 |
120 | | 2.0.0
121 | | Apache Kafka 2.0.0
122 |
123 | | 1.0.0
124 | | Apache Kafka 1.1.1
125 |
126 | |===
127 |
--------------------------------------------------------------------------------
/src/main/asciidoc/chapters/managing-topics.adoc:
--------------------------------------------------------------------------------
1 | [[section:managing-topics]]
2 |
3 | == Managing topics
4 |
5 | Class `EmbeddedKafkaClusterRule` as well as `EmbeddedKafkaCluster` expose convenience methods for managing Kafka topics. Have a look at the `TopicManager` interface (Java omitted for brevity).
6 |
7 | [source,java]
8 | ----
9 | public interface TopicManager {
10 | void createTopic(TopicConfig config);
11 | void deleteTopic(String topic);
12 | boolean exists(String topic);
13 | Map fetchLeaderAndIsr(String topic);
14 | Properties fetchTopicConfig(String topic);
15 | }
16 | ----
17 |
18 | Implementations of the `TopicManager` interface currently use the `AdminClient` implementation of the Kafka Client library for topic management.
19 |
20 | All operations are executed *synchronously*.
21 |
22 | === Creating a topic
23 |
24 | [source,java]
25 | ----
26 | kafka.createTopic(TopicConfig.withName("test-topic"));
27 | ----
28 |
29 | NOTE: By default, Kafka for JUnit enables the automatic creation of topics at the broker with defaults that should be sensible for local testing. However, if you find yourself in the situation to create a topic with a specific replication factor or number of partitions that deviate from their default setting, you should create that topic with the respective settings before writing the first Kafka record to it.
30 |
31 | === Deleting a topic
32 |
33 | [source,java]
34 | ----
35 | kafka.deleteTopic("test-topic");
36 | ----
37 |
38 | NOTE: Deleting a topic will only set a deletion marker for that topic. The topic may not be deleted immediately after `deleteTopic` completes.
39 |
40 | === Determine whether a topic exists
41 |
42 | [source,java]
43 | ----
44 | kafka.exists("test-topic");
45 | ----
46 |
47 | NOTE: Returns `true` even if the topic is marked for deletion.
48 |
49 | === Retrieving the leader and the In-Sync-Replica Set (ISR)
50 |
51 | In case you have multiple brokers running and want to query their assignments and roles for a specific topic, you can use `TopicManager#fetchLeaderAndIsr` to retrieve that kind of information. The method returns an unmodifiable `java.util.Map` of `LeaderAndIsr` instances by their designated partition. The listing underneath shows how to retrieve this information for the topic named `test-topic`.
52 |
53 | [source,java]
54 | ----
55 | Map leaderAndIsr = kafka.fetchLeaderAndIsr("test-topic");
56 | ----
57 |
58 | The type `LeaderAndIsr` is not to be confused with the same type in package `kafka.api`. The `LeaderAndIsr` implementation Kafka for JUnit is a simple transfer object that only contains the ID of the leader node and the IDs of all nodes that comprise the ISR.
59 |
60 | === Retrieving the topic configuration remotely
61 |
62 | Looking up the topic configuration by accessing the cluster is easily done using the `TopicManager`.
63 |
64 | [source,java]
65 | ----
66 | Properties topicConfig = kafka.fetchTopicConfig("test-topic");
67 | ----
--------------------------------------------------------------------------------
/src/main/asciidoc/chapters/producing-records.adoc:
--------------------------------------------------------------------------------
1 | [[section:producing-records]]
2 |
3 | == Producing records
4 |
5 | Class `EmbeddedKafkaClusterRule` as well as `EmbeddedKafkaCluster` expose convenience methods for producing new Kafka records. Have a look at the `RecordProducer` interface (Javadoc omitted for brevity).
6 |
7 | [source,java]
8 | ----
9 | public interface RecordProducer {
10 |
11 | List send(SendValues sendRequest) throws InterruptedException;
12 | List send(SendValuesTransactional sendRequest) throws InterruptedException;
13 | List send(SendKeyValues sendRequest) throws InterruptedException;
14 | List send(SendKeyValuesTransactional sendRequest) throws InterruptedException;
15 | }
16 | ----
17 |
18 | Calling `send` using an instance of `SendValues` does just that: It produces non-keyed Kafka records that only feature a value. The key of a record that has been produced this way is simply `null`. If you wish to associate a key, you can do so by passing an instance of `SendKeyValues` to the `send` method. Both `SendValues` and `SendKeyValues` use the link:https://en.wikipedia.org/wiki/Builder_pattern[Builder pattern] so that creating the resp. send parameterization is easy and does not pollute your test code with any kind of boilerplate.
19 |
20 | Implementations of the `RecordProducer` interface use the high-level producer API that comes with Apache Kafka. Hence, the underlying producer is a `KafkaProducer`. This `KafkaProducer` is fully parameterizable via the builders of both `SendValues` and `SendKeyValues`.
21 |
22 | All `send` operations are executed *synchronously*.
23 |
24 | With these abstractions in place, sending content to your embedded Kafka cluster is easy. Have a look at the following examples . One thing you should notice is that you do not have to specify `bootstrap.servers`. Kafka for JUnit adjusts a given client configuration so that you can start off with meaningful defaults that work out-of-the-box. You'll only have to provide configuration overrides if it is absolutely necessary for your test.
25 |
26 | === Sending non-keyed values using defaults
27 |
28 | [source,java]
29 | ----
30 | kafka.send(SendValues.to("test-topic", "a", "b", "c"));
31 | ----
32 |
33 | === Sending non-keyed values using overrides
34 |
35 | [source,java]
36 | ----
37 | kafka.send(SendValues.to("test-topic", "a", "b", "c")
38 | .with(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true")
39 | .with(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1"));
40 | ----
41 |
42 | === Sending non-keyed values transactionally
43 |
44 | [source,java]
45 | ----
46 | kafka
47 | .send(SendValuesTransactional
48 | .inTransaction("test-topic", Arrays.asList("a", "b", "c")));
49 | ----
50 |
51 | NOTE: The API of Kafka for JUnit has been designed with great care and readability in mind. Using `static` imports for factory methods shows that we can interact with the embedded Kafka cluster in a lean and readable way.
52 |
53 | [source,java]
54 | ----
55 | kafka.send(inTransaction("test-topic", Arrays.asList("a", "b", "c")));
56 | ----
57 |
58 | === Sending keyed records using defaults
59 |
60 | [source,java]
61 | ----
62 | List> records = new ArrayList<>();
63 |
64 | records.add(new KeyValue<>("aggregate", "a"));
65 | records.add(new KeyValue<>("aggregate", "b"));
66 | records.add(new KeyValue<>("aggregate", "c"));
67 |
68 | kafka.send(SendKeyValues.to("test-topic", records));
69 | ----
70 |
71 | === Sending keyed records using overrides
72 |
73 | [source,java]
74 | ----
75 | List> records = new ArrayList<>();
76 |
77 | records.add(new KeyValue<>("aggregate", "a"));
78 | records.add(new KeyValue<>("aggregate", "b"));
79 | records.add(new KeyValue<>("aggregate", "c"));
80 |
81 | kafka.send(SendKeyValues.to("test-topic", records)
82 | .with(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true")
83 | .with(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1"));
84 | ----
85 |
86 | === Sending keyed records transactionally
87 |
88 | [source,java]
89 | ----
90 | List> records = new ArrayList<>();
91 |
92 | records.add(new KeyValue<>("aggregate", "a"));
93 | records.add(new KeyValue<>("aggregate", "b"));
94 | records.add(new KeyValue<>("aggregate", "c"));
95 |
96 | kafka.send(inTransaction("test-topic", records));
97 | ----
98 |
99 | === Sending records or values transactionally to multiple topics
100 |
101 | [source,java]
102 | ----
103 | kafka.send(SendValuesTransactional
104 | .inTransaction("test-topic-1", Arrays.asList("a", "b"))
105 | .inTransaction("test-topic-2", Arrays.asList("c", "d")));
106 | ----
107 |
108 | === Failing a transaction on purpose
109 |
110 | [source,java]
111 | ----
112 | kafka.send(SendValuesTransactional
113 | .inTransaction("test-topic", Arrays.asList("a", "b"))
114 | .failTransaction());
115 | ----
116 |
117 | Defining a `SendValuesTransactional` request with `failTransaction` will write records to the Kafka log, but abort the transaction they belong to. This allows you to test if your application-specific Kafka consumers adhere to the transactional guarantees they claim to satisfy, since only a correct implementation of a consumer with `isolation.level` set to `read_committed` must see - and process - those records.
118 |
119 | NOTE: This works for `SendKeyValuesTransactional` as well.
120 |
121 | === Attaching record headers
122 |
123 | [source,java]
124 | ----
125 | KeyValue record = new KeyValue<>("a", "b");
126 | record.addHeader("client", "kafka-junit-test".getBytes("utf-8"));
127 |
128 | kafka.send(SendKeyValues
129 | .to("test-topic", Collections.singletonList(record)));
130 | ----
131 |
132 | NOTE: You can also pre-construct an instance of `Headers` and pass it along via the constructor of a `KeyValue`.
133 |
--------------------------------------------------------------------------------
/src/main/asciidoc/css/article-theme.yml:
--------------------------------------------------------------------------------
1 | font:
2 | catalog:
3 | # Noto Serif supports Latin, Latin-1 Supplement, Latin Extended-A, Greek, Cyrillic, Vietnamese & an assortment of symbols
4 | Noto Serif:
5 | normal: notoserif-regular-subset.ttf
6 | bold: notoserif-bold-subset.ttf
7 | italic: notoserif-italic-subset.ttf
8 | bold_italic: notoserif-bold_italic-subset.ttf
9 | # M+ 1mn supports ASCII and the circled numbers used for conums
10 | M+ 1mn:
11 | normal: mplus1mn-regular-ascii-conums.ttf
12 | bold: mplus1mn-bold-ascii.ttf
13 | italic: mplus1mn-italic-ascii.ttf
14 | bold_italic: mplus1mn-bold_italic-ascii.ttf
15 | # M+ 1p supports Latin, Latin-1 Supplement, Latin Extended, Greek, Cyrillic, Vietnamese, Japanese & an assortment of symbols
16 | # It also provides arrows for ->, <-, => and <= replacements in case these glyphs are missing from font
17 | M+ 1p Fallback:
18 | normal: mplus1p-regular-fallback.ttf
19 | bold: mplus1p-regular-fallback.ttf
20 | italic: mplus1p-regular-fallback.ttf
21 | bold_italic: mplus1p-regular-fallback.ttf
22 | fallbacks:
23 | - M+ 1p Fallback
24 | page:
25 | background_color: ffffff
26 | layout: portrait
27 | margin: [1.3in, 0.72in, 0.8in, 0.72in]
28 | size: A4
29 | base:
30 | align: justify
31 | # color as hex string (leading # is optional)
32 | font_color: 333333
33 | # color as RGB array
34 | #font_color: [51, 51, 51]
35 | # color as CMYK array (approximated)
36 | #font_color: [0, 0, 0, 0.92]
37 | #font_color: [0, 0, 0, 92%]
38 | #Supported Fonts are Times-Roman, Helvetica, Courier and fonts in the catalog (see above)
39 | font_family: Noto Serif
40 | # choose one of these font_size/line_height_length combinations
41 | #font_size: 14
42 | #line_height_length: 20
43 | #font_size: 11.25
44 | #line_height_length: 18
45 | #font_size: 11.2
46 | #line_height_length: 16
47 | font_size: 10
48 | #line_height_length: 15
49 | # correct line height for Noto Serif metrics
50 | line_height_length: 12
51 | #font_size: 11.25
52 | #line_height_length: 18
53 | line_height: $base_line_height_length / $base_font_size
54 | font_size_large: round($base_font_size * 1.25)
55 | font_size_small: round($base_font_size * 0.85)
56 | font_size_min: $base_font_size * 0.75
57 | font_style: normal
58 | border_color: eeeeee
59 | border_radius: 4
60 | border_width: 0.5
61 | # FIXME vertical_rhythm is weird; we should think in terms of ems
62 | #vertical_rhythm: $base_line_height_length * 2 / 3
63 | # correct line height for Noto Serif metrics (comes with built-in line height)
64 | vertical_rhythm: $base_line_height_length
65 | horizontal_rhythm: $base_line_height_length
66 | # QUESTION should vertical_spacing be block_spacing instead?
67 | vertical_spacing: $vertical_rhythm
68 | link:
69 | font_color: 428bca
70 | # literal is currently used for inline monospaced in prose and table cells
71 | literal:
72 | font_color: b12146
73 | font_family: M+ 1mn
74 | heading:
75 | #font_color: 181818
76 | font_color: $base_font_color
77 | #font_family: $base_font_family
78 | font_family: Helvetica
79 | font_style: bold
80 | # h1 is used for part titles
81 | h1_font_size: floor($base_font_size * 2.6)
82 | # h2 is used for chapter titles
83 | h2_font_size: floor($base_font_size * 2.15)
84 | h3_font_size: round($base_font_size * 1.7)
85 | h4_font_size: $base_font_size_large
86 | h5_font_size: $base_font_size
87 | h6_font_size: $base_font_size_small
88 | #line_height: 1.4
89 | # correct line height for Noto Serif metrics (comes with built-in line height)
90 | line_height: 1
91 | margin_top: $vertical_rhythm * 0.4
92 | margin_bottom: $vertical_rhythm * 0.9
93 | title_page:
94 | align: right
95 | #logo:
96 | # top: 10%
97 | # image: image:tc-logo.png[scaledwidth=35%]
98 | title:
99 | top: 55%
100 | font_size: $heading_h1_font_size
101 | # font_color: 999999
102 | font_style: bold
103 | line_height: 1.2
104 | subtitle:
105 | font_size: $heading_h3_font_size
106 | #font_style: bold_italic
107 | line_height: 1
108 | authors:
109 | margin_top: $base_font_size * 1.25
110 | font_size: $base_font_size_large
111 | font_color: 999999
112 | revision:
113 | margin_top: $base_font_size * 1.25
114 | font_color: 999999
115 | block:
116 | margin_top: 0
117 | margin_bottom: $vertical_rhythm
118 | caption:
119 | align: left
120 | font_style: italic
121 | # FIXME perhaps set line_height instead of / in addition to margins?
122 | margin_inside: $vertical_rhythm / 3
123 | #margin_inside: $vertical_rhythm / 4
124 | margin_outside: 0
125 | lead:
126 | font_size: $base_font_size_large
127 | line_height: 1.4
128 | abstract:
129 | font_color: 5c6266
130 | font_size: $lead_font_size
131 | line_height: $lead_line_height
132 | font_style: italic
133 | first_line_font_style: bold
134 | admonition:
135 | border_color: $base_border_color
136 | border_width: $base_border_width
137 | padding: [0, $horizontal_rhythm, 0, $horizontal_rhythm]
138 | # icon:
139 | # tip:
140 | # name: fa-lightbulb-o
141 | # stroke_color: 111111
142 | # size: 24
143 | blockquote:
144 | font_color: $base_font_color
145 | font_size: $base_font_size_large
146 | border_color: $base_border_color
147 | border_width: 5
148 | padding: [$vertical_rhythm / 2, $horizontal_rhythm, $vertical_rhythm / -2, $horizontal_rhythm + $blockquote_border_width / 2]
149 | cite_font_size: $base_font_size_small
150 | cite_font_color: 999999
151 | # code is used for source blocks (perhaps change to source or listing?)
152 | code:
153 | font_color: $base_font_color
154 | font_family: $literal_font_family
155 | font_size: ceil($base_font_size)
156 | padding: $code_font_size
157 | line_height: 1.25
158 | background_color: f5f5f5
159 | border_color: cccccc
160 | border_radius: $base_border_radius
161 | border_width: 0.75
162 | conum:
163 | font_family: M+ 1mn
164 | font_color: $literal_font_color
165 | font_size: $base_font_size
166 | line_height: 4 / 3
167 | example:
168 | border_color: $base_border_color
169 | border_radius: $base_border_radius
170 | border_width: 0.75
171 | background_color: transparent
172 | # FIXME reenable margin bottom once margin collapsing is implemented
173 | padding: [$vertical_rhythm, $horizontal_rhythm, 0, $horizontal_rhythm]
174 | image:
175 | align: left
176 | prose:
177 | margin_top: 0
178 | margin_bottom: $vertical_rhythm
179 | sidebar:
180 | border_color: $page_background_color
181 | border_radius: $base_border_radius
182 | border_width: $base_border_width
183 | background_color: eeeeee
184 | # FIXME reenable margin bottom once margin collapsing is implemented
185 | padding: [$vertical_rhythm, $vertical_rhythm * 1.25, 0, $vertical_rhythm * 1.25]
186 | title:
187 | align: center
188 | font_color: $heading_font_color
189 | font_family: $heading_font_family
190 | font_size: $heading_h4_font_size
191 | font_style: $heading_font_style
192 | thematic_break:
193 | border_color: $base_border_color
194 | border_style: solid
195 | border_width: $base_border_width
196 | margin_top: $vertical_rhythm * 0.5
197 | margin_bottom: $vertical_rhythm * 1.5
198 | description_list:
199 | term_font_style: italic
200 | term_spacing: $vertical_rhythm / 4
201 | description_indent: $horizontal_rhythm * 1.25
202 | outline_list:
203 | indent: $horizontal_rhythm * 1.5
204 | # NOTE item_spacing applies to list items that do not have complex content
205 | item_spacing: $vertical_rhythm / 2
206 | #marker_font_color: 404040
207 | table:
208 | background_color: $page_background_color
209 | head_background_color: f9f9f9
210 | #head_font_color: $base_font_color
211 | head_font_style: bold
212 | #even_row_background_color: f9f9f9
213 | #odd_row_background_color:
214 | foot_background_color: f0f0f0
215 | border_color: dddddd
216 | border_width: $base_border_width
217 | header_cell_background_color: f9f9f9
218 | #header_cell_font_style: bold
219 | # HACK accounting for line-height
220 | cell_padding: [3, 3, 6, 3]
221 | toc:
222 | dot_leader_color: dddddd
223 | #dot_leader_content: '. '
224 | indent: $horizontal_rhythm
225 | line_height: 1.4
226 |
227 |
228 | header:
229 | font_size: $base_font_size
230 | font_color: $base_font_color
231 | # NOTE if background_color is set, background and border will span width of page
232 | border_color: dddddd
233 | border_width: 0.25
234 | height: 1.1in
235 | #$page_margin_top + $base_line_height *3 #$base_line_height_length * 12.5
236 | line_height: 1
237 | padding: [$base_line_height_length / 2, 1, $base_line_height_length / 2, 1]
238 | vertical_align: bottom
239 | # height: 3in
240 | image_vertical_align: bottom
241 | #12 (1)
242 | recto_content:
243 | #left: image:tc-logo.png[width=120px]
244 | #right: image:header.png[scalewidth=100%]
245 | center: '{chapter-title}'
246 | #left: image:tc-logo.png[width=180px]
247 | verso_content:
248 | right: $header_recto_content_right
249 | center: $header_recto_content_center
250 | #left: $header_recto_content_left
251 |
252 |
253 | footer:
254 | font_size: $base_font_size_small
255 | font_color: $base_font_color
256 | # NOTE if background_color is set, background and border will span width of page
257 | border_color: dddddd
258 | border_width: 0.25
259 | height: $base_line_height_length * 2.5 * 2
260 | line_height: 1
261 | padding: [$base_line_height_length / 2, 1, 0, 1]
262 | vertical_align: top
263 | #image_vertical_align: or
264 | # additional attributes for content:
265 | # * {page-count}
266 | # * {page-number}
267 | # * {document-title}
268 | # * {document-subtitle}
269 | # * {chapter-title}
270 | # * {section-title}
271 | # * {section-or-chapter-title}
272 | recto_content:
273 | #$header_recto_content_right
274 | left: |
275 | {document-title} +
276 | Version {revnumber}
277 | center: '<{status}>'
278 | right: 'Seite {page-number} von {page-count}'
279 | #center: $footer_verso_content_center
280 | # right: $footer_verso_content_right
281 | #right: '{section-or-chapter-title} | {page-number}'
282 | #right: '{document-title} | {page-number}'
283 | # right: '{page-number}'
284 | #center: '{page-number}'
285 | verso_content:
286 | left: $footer_recto_content_left
287 | center: $footer_recto_content_center
288 | right: $footer_recto_content_right
289 | # center: |
290 | # x +
291 | # vertraulich
292 |
293 | #center: '{page-number}'
294 |
--------------------------------------------------------------------------------
/src/main/asciidoc/css/coderay-asciidoctor.css:
--------------------------------------------------------------------------------
1 | /* Stylesheet for CodeRay to match GitHub theme | MIT License | http://foundation.zurb.com */
2 | /*pre.CodeRay {background-color:#f7f7f8;}*/
3 | .CodeRay .line-numbers{border-right:1px solid #d8d8d8;padding:0 0.5em 0 .25em}
4 | .CodeRay span.line-numbers{display:inline-block;margin-right:.5em;color:rgba(0,0,0,.3)}
5 | .CodeRay .line-numbers strong{color:rgba(0,0,0,.4)}
6 | table.CodeRay{border-collapse:separate;border-spacing:0;margin-bottom:0;border:0;background:none}
7 | table.CodeRay td{vertical-align: top;line-height:1.45}
8 | table.CodeRay td.line-numbers{text-align:right}
9 | table.CodeRay td.line-numbers>pre{padding:0;color:rgba(0,0,0,.3)}
10 | table.CodeRay td.code{padding:0 0 0 .5em}
11 | table.CodeRay td.code>pre{padding:0}
12 | .CodeRay .debug{color:#fff !important;background:#000080 !important}
13 | .CodeRay .annotation{color:#007}
14 | .CodeRay .attribute-name{color:#000080}
15 | .CodeRay .attribute-value{color:#700}
16 | .CodeRay .binary{color:#509}
17 | .CodeRay .comment{color:#998;font-style:italic}
18 | .CodeRay .char{color:#04d}
19 | .CodeRay .char .content{color:#04d}
20 | .CodeRay .char .delimiter{color:#039}
21 | .CodeRay .class{color:#458;font-weight:bold}
22 | .CodeRay .complex{color:#a08}
23 | .CodeRay .constant,.CodeRay .predefined-constant{color:#008080}
24 | .CodeRay .color{color:#099}
25 | .CodeRay .class-variable{color:#369}
26 | .CodeRay .decorator{color:#b0b}
27 | .CodeRay .definition{color:#099}
28 | .CodeRay .delimiter{color:#000}
29 | .CodeRay .doc{color:#970}
30 | .CodeRay .doctype{color:#34b}
31 | .CodeRay .doc-string{color:#d42}
32 | .CodeRay .escape{color:#666}
33 | .CodeRay .entity{color:#800}
34 | .CodeRay .error{color:#808}
35 | .CodeRay .exception{color:inherit}
36 | .CodeRay .filename{color:#099}
37 | .CodeRay .function{color:#900;font-weight:bold}
38 | .CodeRay .global-variable{color:#008080}
39 | .CodeRay .hex{color:#058}
40 | .CodeRay .integer,.CodeRay .float{color:#099}
41 | .CodeRay .include{color:#555}
42 | .CodeRay .inline{color:#000}
43 | .CodeRay .inline .inline{background:#ccc}
44 | .CodeRay .inline .inline .inline{background:#bbb}
45 | .CodeRay .inline .inline-delimiter{color:#d14}
46 | .CodeRay .inline-delimiter{color:#d14}
47 | .CodeRay .important{color:#555;font-weight:bold}
48 | .CodeRay .interpreted{color:#b2b}
49 | .CodeRay .instance-variable{color:#008080}
50 | .CodeRay .label{color:#970}
51 | .CodeRay .local-variable{color:#963}
52 | .CodeRay .octal{color:#40e}
53 | .CodeRay .predefined{color:#369}
54 | .CodeRay .preprocessor{color:#579}
55 | .CodeRay .pseudo-class{color:#555}
56 | .CodeRay .directive{font-weight:bold}
57 | .CodeRay .type{font-weight:bold}
58 | .CodeRay .predefined-type{color:inherit}
59 | .CodeRay .reserved,.CodeRay .keyword {color:#000;font-weight:bold}
60 | .CodeRay .key{color:#808}
61 | .CodeRay .key .delimiter{color:#606}
62 | .CodeRay .key .char{color:#80f}
63 | .CodeRay .value{color:#088}
64 | .CodeRay .regexp .delimiter{color:#808}
65 | .CodeRay .regexp .content{color:#808}
66 | .CodeRay .regexp .modifier{color:#808}
67 | .CodeRay .regexp .char{color:#d14}
68 | .CodeRay .regexp .function{color:#404;font-weight:bold}
69 | .CodeRay .string{color:#d20}
70 | .CodeRay .string .string .string{background:#ffd0d0}
71 | .CodeRay .string .content{color:#d14}
72 | .CodeRay .string .char{color:#d14}
73 | .CodeRay .string .delimiter{color:#d14}
74 | .CodeRay .shell{color:#d14}
75 | .CodeRay .shell .delimiter{color:#d14}
76 | .CodeRay .symbol{color:#990073}
77 | .CodeRay .symbol .content{color:#a60}
78 | .CodeRay .symbol .delimiter{color:#630}
79 | .CodeRay .tag{color:#008080}
80 | .CodeRay .tag-special{color:#d70}
81 | .CodeRay .variable{color:#036}
82 | .CodeRay .insert{background:#afa}
83 | .CodeRay .delete{background:#faa}
84 | .CodeRay .change{color:#aaf;background:#007}
85 | .CodeRay .head{color:#f8f;background:#505}
86 | .CodeRay .insert .insert{color:#080}
87 | .CodeRay .delete .delete{color:#800}
88 | .CodeRay .change .change{color:#66f}
89 | .CodeRay .head .head{color:#f4f}
90 |
--------------------------------------------------------------------------------
/src/main/asciidoc/user-guide.adoc:
--------------------------------------------------------------------------------
1 | :last-update-label:
2 | :chapter-label:
3 | :doctype: book
4 | :linkcss:
5 | :stylesdir: css/
6 | :source-highlighter: coderay
7 | :numbered:
8 | :imagesdir: chapters
9 | :icons: font
10 | :pdf-stylesdir: css/
11 | :pdf-style: article
12 | :experimental:
13 | :toc-title: Table of Contents
14 | :figure-caption: Figure
15 | :table-caption: Table
16 | :status: Final
17 | :datum: 2021-12-21
18 | :author: Markus Günther
19 |
20 | = User Guide to Kafka for JUnit
21 | Markus Günther
22 | 3.0.0, 2021-12-21
23 |
24 | :toc:
25 |
26 | include::chapters/introduction.adoc[]
27 | include::chapters/embedded-kafka-cluster.adoc[]
28 | include::chapters/external-kafka-cluster.adoc[]
29 | include::chapters/producing-records.adoc[]
30 | include::chapters/consuming-records.adoc[]
31 | include::chapters/managing-topics.adoc[]
32 | include::chapters/colophon.adoc[]
33 |
--------------------------------------------------------------------------------
/src/main/java/net/mguenther/kafka/junit/EmbeddedConnect.java:
--------------------------------------------------------------------------------
1 | package net.mguenther.kafka.junit;
2 |
3 | import lombok.extern.slf4j.Slf4j;
4 | import org.apache.kafka.common.utils.Time;
5 | import org.apache.kafka.common.utils.Utils;
6 | import org.apache.kafka.connect.connector.policy.AllConnectorClientConfigOverridePolicy;
7 | import org.apache.kafka.connect.runtime.ConnectorConfig;
8 | import org.apache.kafka.connect.runtime.Herder;
9 | import org.apache.kafka.connect.runtime.Worker;
10 | import org.apache.kafka.connect.runtime.WorkerConfig;
11 | import org.apache.kafka.connect.runtime.WorkerConfigTransformer;
12 | import org.apache.kafka.connect.runtime.distributed.DistributedConfig;
13 | import org.apache.kafka.connect.runtime.distributed.DistributedHerder;
14 | import org.apache.kafka.connect.runtime.isolation.Plugins;
15 | import org.apache.kafka.connect.runtime.rest.RestClient;
16 | import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
17 | import org.apache.kafka.connect.storage.ConfigBackingStore;
18 | import org.apache.kafka.connect.storage.KafkaConfigBackingStore;
19 | import org.apache.kafka.connect.storage.KafkaOffsetBackingStore;
20 | import org.apache.kafka.connect.storage.KafkaStatusBackingStore;
21 | import org.apache.kafka.connect.storage.StatusBackingStore;
22 | import org.apache.kafka.connect.storage.StringConverter;
23 | import org.apache.kafka.connect.util.FutureCallback;
24 | import org.apache.kafka.connect.util.TopicAdmin;
25 |
26 | import java.util.Collections;
27 | import java.util.HashMap;
28 | import java.util.List;
29 | import java.util.Map;
30 | import java.util.Properties;
31 | import java.util.UUID;
32 | import java.util.concurrent.ExecutionException;
33 | import java.util.concurrent.TimeUnit;
34 | import java.util.concurrent.TimeoutException;
35 | import java.util.concurrent.atomic.AtomicBoolean;
36 | import java.util.function.Supplier;
37 |
38 | @Slf4j
39 | public class EmbeddedConnect implements EmbeddedLifecycle {
40 |
41 | private static final int REQUEST_TIMEOUT_MS = 120_000;
42 |
43 | private final AtomicBoolean shutdown = new AtomicBoolean(false);
44 |
45 | private final List connectorConfigs;
46 |
47 | private final DistributedConfig config;
48 |
49 | private final KafkaOffsetBackingStore offsetBackingStore;
50 |
51 | private final Worker worker;
52 |
53 | private final StatusBackingStore statusBackingStore;
54 |
55 | private final ConfigBackingStore configBackingStore;
56 |
57 | private final DistributedHerder herder;
58 |
59 | public EmbeddedConnect(final EmbeddedConnectConfig connectConfig, final String brokerList, final String clusterId) {
60 | // once created, this supplier will always answer with a constant client ID
61 | final Supplier clientIdGenerator = constantClientIdBase();
62 | final AllConnectorClientConfigOverridePolicy policy = new AllConnectorClientConfigOverridePolicy();
63 | final Properties effectiveWorkerConfig = connectConfig.getConnectProperties();
64 | effectiveWorkerConfig.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
65 | this.connectorConfigs = connectConfig.getConnectors();
66 | this.config = new DistributedConfig(Utils.propsToStringMap(effectiveWorkerConfig));
67 | this.offsetBackingStore = new KafkaOffsetBackingStore(getTopicAdminSupplier(brokerList), clientIdGenerator, new StringConverter());
68 | this.worker = new Worker(connectConfig.getWorkerId(), Time.SYSTEM, new Plugins(new HashMap<>()), config, offsetBackingStore, policy);
69 | this.statusBackingStore = new KafkaStatusBackingStore(Time.SYSTEM, worker.getInternalValueConverter(), getTopicAdminSupplier(brokerList), clientIdGenerator.get());
70 | this.configBackingStore = new KafkaConfigBackingStore(worker.getInternalValueConverter(), config, new WorkerConfigTransformer(worker, Collections.emptyMap()),
71 | getTopicAdminSupplier(brokerList), clientIdGenerator.get());
72 | this.herder = new DistributedHerder(
73 | config,
74 | Time.SYSTEM,
75 | worker,
76 | clusterId,
77 | statusBackingStore,
78 | configBackingStore,
79 | "",
80 | new RestClient(new DistributedConfig(toMap(effectiveWorkerConfig))),
81 | policy,
82 | Collections.emptyList());
83 | }
84 |
85 | @Override
86 | public void start() {
87 |
88 | offsetBackingStore.configure(config);
89 | statusBackingStore.configure(config);
90 |
91 | try {
92 | log.info("Embedded Kafka Connect is starting.");
93 |
94 | worker.start();
95 | herder.start();
96 |
97 | log.info("Embedded Kafka Connect started.");
98 | log.info("Found {} connectors to deploy.", connectorConfigs.size());
99 |
100 | connectorConfigs.forEach(this::deployConnector);
101 | } catch (Exception e) {
102 | throw new RuntimeException("Unable to start Embedded Kafka Connect.", e);
103 | }
104 | }
105 |
106 | private void deployConnector(final Properties connectorConfig) {
107 | final FutureCallback> callback = new FutureCallback<>();
108 | final String connectorName = connectorConfig.getProperty(ConnectorConfig.NAME_CONFIG);
109 | log.info("Deploying connector {}.", connectorName);
110 | herder.putConnectorConfig(connectorName, Utils.propsToStringMap(connectorConfig), true, callback);
111 | try {
112 | callback.get(REQUEST_TIMEOUT_MS, TimeUnit.MILLISECONDS);
113 | } catch (InterruptedException | ExecutionException | TimeoutException e) {
114 | log.error("Failed to deploy connector {}.", connectorName, e);
115 | }
116 | }
117 |
118 | @Override
119 | public void stop() {
120 | try {
121 | final boolean wasShuttingDown = shutdown.getAndSet(true);
122 | if (!wasShuttingDown) {
123 | log.info("Embedded Kafka Connect is stopping.");
124 | herder.stop();
125 | worker.stop();
126 | log.info("Embedded Kafka Connect stopped.");
127 | }
128 | } catch (Exception e) {
129 | throw new RuntimeException("Unable to stop Embedded Kafka Connect.", e);
130 | }
131 | }
132 |
133 | private static Supplier getTopicAdminSupplier(final String brokerList) {
134 | final Map config = new HashMap<>();
135 | config.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
136 | return () -> new TopicAdmin(config);
137 | }
138 |
139 | private static Supplier constantClientIdBase() {
140 | final String randomSuffix = UUID.randomUUID().toString().substring(0, 7);
141 | return () -> String.format("kafka-junit-%s", randomSuffix);
142 | }
143 |
144 | private static Map toMap(final Properties props) {
145 | final Map propertyMap = new HashMap<>();
146 | for (String propertyName : props.stringPropertyNames()) {
147 | String propertyValue = props.getProperty(propertyName);
148 | propertyMap.put(propertyName, propertyValue);
149 | }
150 | return Collections.unmodifiableMap(propertyMap);
151 | }
152 | }
153 |
--------------------------------------------------------------------------------
/src/main/java/net/mguenther/kafka/junit/EmbeddedConnectConfig.java:
--------------------------------------------------------------------------------
1 | package net.mguenther.kafka.junit;
2 |
3 | import lombok.Getter;
4 | import lombok.RequiredArgsConstructor;
5 | import lombok.ToString;
6 | import org.apache.kafka.connect.runtime.WorkerConfig;
7 | import org.apache.kafka.connect.runtime.distributed.DistributedConfig;
8 |
9 | import java.util.ArrayList;
10 | import java.util.Arrays;
11 | import java.util.List;
12 | import java.util.Properties;
13 | import java.util.UUID;
14 |
15 | @Getter
16 | @ToString
17 | @RequiredArgsConstructor
18 | public class EmbeddedConnectConfig {
19 |
20 | @RequiredArgsConstructor
21 | public static class EmbeddedConnectConfigBuilder {
22 |
23 | private final String workerId;
24 | private final Properties properties = new Properties();
25 | private final List connectorProps = new ArrayList<>();
26 |
27 | EmbeddedConnectConfigBuilder() {
28 | this(UUID.randomUUID().toString().substring(0, 7));
29 | }
30 |
31 | public EmbeddedConnectConfigBuilder with(final String propertyName, final T value) {
32 | properties.put(propertyName, value);
33 | return this;
34 | }
35 |
36 | public EmbeddedConnectConfigBuilder withAll(final Properties overrides) {
37 | properties.putAll(overrides);
38 | return this;
39 | }
40 |
41 | private void ifNonExisting(final String propertyName, final T value) {
42 | if (properties.get(propertyName) != null) return;
43 | properties.put(propertyName, value);
44 | }
45 |
46 | public EmbeddedConnectConfigBuilder deployConnector(final Properties connectorProps) {
47 | this.connectorProps.add(connectorProps);
48 | return this;
49 | }
50 |
51 | public EmbeddedConnectConfigBuilder deployConnectors(final Properties... connectorProps) {
52 | this.connectorProps.addAll(Arrays.asList(connectorProps));
53 | return this;
54 | }
55 |
56 | public EmbeddedConnectConfig build() {
57 | ifNonExisting(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.storage.StringConverter");
58 | ifNonExisting(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.storage.StringConverter");
59 | ifNonExisting(WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG, "All");
60 | ifNonExisting("internal.key.converter.schemas.enable", "false");
61 | ifNonExisting("internal.value.converter.schemas.enable", "false");
62 | ifNonExisting(DistributedConfig.CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG, "1");
63 | ifNonExisting(DistributedConfig.CONFIG_TOPIC_CONFIG, "embedded-connect-config");
64 | ifNonExisting(DistributedConfig.OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG, "1");
65 | ifNonExisting(DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG, "embedded-connect-offsets");
66 | ifNonExisting(DistributedConfig.STATUS_STORAGE_REPLICATION_FACTOR_CONFIG, "1");
67 | ifNonExisting(DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG, "embedded-connect-status");
68 | ifNonExisting(DistributedConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString().substring(0, 7));
69 | return new EmbeddedConnectConfig(workerId, properties, connectorProps);
70 | }
71 | }
72 |
73 | private final String workerId;
74 |
75 | private final Properties connectProperties;
76 |
77 | private final List connectors;
78 |
79 | /**
80 | * @return instance of {@link EmbeddedConnectConfigBuilder}
81 | */
82 | public static EmbeddedConnectConfigBuilder kafkaConnect() {
83 | return new EmbeddedConnectConfigBuilder();
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/src/main/java/net/mguenther/kafka/junit/EmbeddedKafka.java:
--------------------------------------------------------------------------------
1 | package net.mguenther.kafka.junit;
2 |
3 | import kafka.server.KafkaConfig;
4 | import kafka.server.KafkaConfig$;
5 | import kafka.server.KafkaServer;
6 | import kafka.utils.TestUtils;
7 | import lombok.extern.slf4j.Slf4j;
8 | import org.apache.kafka.common.network.ListenerName;
9 | import org.apache.kafka.common.security.auth.SecurityProtocol;
10 | import org.apache.kafka.common.utils.Time;
11 |
12 | import java.io.IOException;
13 | import java.nio.file.FileVisitResult;
14 | import java.nio.file.Files;
15 | import java.nio.file.Path;
16 | import java.nio.file.SimpleFileVisitor;
17 | import java.nio.file.attribute.BasicFileAttributes;
18 | import java.util.Properties;
19 |
20 | import static org.apache.kafka.common.network.ListenerName.forSecurityProtocol;
21 |
22 | @Slf4j
23 | public class EmbeddedKafka implements EmbeddedLifecycle {
24 |
25 | private static final int UNDEFINED_BOUND_PORT = -1;
26 |
27 | private final int brokerId;
28 |
29 | private final Properties brokerConfig;
30 |
31 | private final Path logDirectory;
32 |
33 | private KafkaServer kafka;
34 |
35 | private int boundPort = UNDEFINED_BOUND_PORT;
36 |
37 | public EmbeddedKafka(final int brokerId, final String listener, final EmbeddedKafkaConfig config, final String zooKeeperConnectUrl, final boolean usesConnect) throws IOException {
38 | this.brokerId = brokerId;
39 | this.brokerConfig = new Properties();
40 | this.brokerConfig.putAll(config.getBrokerProperties());
41 | this.brokerConfig.put(KafkaConfig$.MODULE$.ListenersProp(), listener);
42 | this.brokerConfig.put(KafkaConfig$.MODULE$.ZkConnectProp(), zooKeeperConnectUrl);
43 | this.logDirectory = Files.createTempDirectory("kafka-junit");
44 | this.brokerConfig.put(KafkaConfig$.MODULE$.BrokerIdProp(), brokerId);
45 | this.brokerConfig.put(KafkaConfig$.MODULE$.LogDirProp(), logDirectory.toFile().getAbsolutePath());
46 | if (usesConnect) {
47 | log.info("Enforcing 'log.cleanup.policy=compact', due to the presence of a Kafka Connect deployment.");
48 | this.brokerConfig.put(KafkaConfig$.MODULE$.LogCleanupPolicyProp(), "compact");
49 | }
50 | }
51 |
52 | @Override
53 | public void start() {
54 | activate();
55 | }
56 |
57 | public void activate() {
58 |
59 | if (kafka != null) {
60 | log.info("The embedded Kafka broker with ID {} is already running.", brokerId);
61 | return;
62 | }
63 |
64 | try {
65 | log.info("Embedded Kafka broker with ID {} is starting.", brokerId);
66 |
67 | if (boundPort != UNDEFINED_BOUND_PORT) {
68 | this.brokerConfig.put(KafkaConfig$.MODULE$.ListenersProp(), String.format("PLAINTEXT://localhost:%s", boundPort));
69 | }
70 |
71 | final KafkaConfig config = new KafkaConfig(brokerConfig, true);
72 | kafka = TestUtils.createServer(config, Time.SYSTEM);
73 | boundPort = kafka.boundPort(config.interBrokerListenerName());
74 |
75 | log.info("The embedded Kafka broker with ID {} has been started. Its logs can be found at {}.", brokerId, logDirectory);
76 | } catch (Exception e) {
77 | throw new RuntimeException(String.format("Unable to start the embedded Kafka broker with ID %s.", brokerId), e);
78 | }
79 | }
80 |
81 | @Override
82 | public void stop() {
83 |
84 | if (kafka == null) {
85 | log.info("The embedded Kafka broker with ID {} is not running or was already shut down.", brokerId);
86 | return;
87 | }
88 |
89 | deactivate();
90 |
91 | log.info("Removing working directory at {}. This directory contains Kafka logs for Kafka broker with ID {} as well.", logDirectory, brokerId);
92 | try {
93 | recursivelyDelete(logDirectory);
94 | } catch (IOException e) {
95 | log.warn("Unable to remove working directory at {}.", logDirectory);
96 | }
97 | log.info("The embedded Kafka broker with ID {} has been stopped.", brokerId);
98 | }
99 |
100 | private void recursivelyDelete(final Path path) throws IOException {
101 | Files.walkFileTree(path, new SimpleFileVisitor() {
102 | @Override
103 | public FileVisitResult visitFile(Path file,
104 | @SuppressWarnings("unused") BasicFileAttributes attrs) {
105 |
106 | file.toFile().delete();
107 | return FileVisitResult.CONTINUE;
108 | }
109 |
110 | @Override
111 | public FileVisitResult preVisitDirectory(Path dir,
112 | @SuppressWarnings("unused") BasicFileAttributes attrs) {
113 | return FileVisitResult.CONTINUE;
114 | }
115 |
116 | @Override
117 | public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
118 | dir.toFile().delete();
119 | return FileVisitResult.CONTINUE;
120 | }
121 | });
122 | }
123 |
124 | public void deactivate() {
125 | if (kafka == null) return;
126 | boundPort = kafka.boundPort(ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT));
127 | log.info("The embedded Kafka broker with ID {} is stopping.", brokerId);
128 | kafka.shutdown();
129 | kafka.awaitShutdown();
130 | kafka = null;
131 | }
132 |
133 | public String getBrokerList() {
134 | return String.format("localhost:%s", kafka.boundPort(forSecurityProtocol(SecurityProtocol.PLAINTEXT)));
135 | }
136 |
137 | public String getClusterId() {
138 | return kafka.clusterId();
139 | }
140 |
141 | public Integer getBrokerId() {
142 | return brokerId;
143 | }
144 |
145 | public boolean isActive() {
146 | return kafka != null;
147 | }
148 | }
149 |
--------------------------------------------------------------------------------
/src/main/java/net/mguenther/kafka/junit/EmbeddedKafkaClusterConfig.java:
--------------------------------------------------------------------------------
1 | package net.mguenther.kafka.junit;
2 |
3 | import lombok.Getter;
4 | import lombok.RequiredArgsConstructor;
5 | import lombok.ToString;
6 |
7 | @Getter
8 | @ToString
9 | @RequiredArgsConstructor
10 | public class EmbeddedKafkaClusterConfig {
11 |
12 | public static class EmbeddedKafkaClusterConfigBuilder {
13 |
14 | private EmbeddedZooKeeperConfig zooKeeperConfig = EmbeddedZooKeeperConfig.defaultZooKeeper();
15 |
16 | private EmbeddedKafkaConfig kafkaConfig = EmbeddedKafkaConfig.defaultBrokers();
17 |
18 | private EmbeddedConnectConfig connectConfig = null;
19 |
20 | /**
21 | * Uses the given {@link EmbeddedZooKeeperConfig} to configure the ZooKeeper instance that
22 | * runs within the embedded Kafka cluster.
23 | *
24 | * @param zooKeeperConfig
25 | * represents the configuration for the embedded ZooKeeper instance
26 | * @return
27 | * this builder
28 | */
29 | public EmbeddedKafkaClusterConfigBuilder configure(final EmbeddedZooKeeperConfig zooKeeperConfig) {
30 | this.zooKeeperConfig = zooKeeperConfig;
31 | return this;
32 | }
33 |
34 | /**
35 | * Uses the given {@link EmbeddedZooKeeperConfig.EmbeddedZooKeeperConfigBuilder} to configure the
36 | * ZooKeeper instance that runs within the embedded Kafka cluster.
37 | *
38 | * @param builder
39 | * represents the configuration for the embedded ZooKeeper instance
40 | * @return
41 | * this builder
42 | */
43 | public EmbeddedKafkaClusterConfigBuilder configure(final EmbeddedZooKeeperConfig.EmbeddedZooKeeperConfigBuilder builder) {
44 | return configure(builder.build());
45 | }
46 |
47 | /**
48 | * Uses the given {@link EmbeddedKafkaConfig} to configure brokers that run within the
49 | * embedded Kafka cluster. This configuration is applied to all brokers in a multi-broker
50 | * environment.
51 | *
52 | * @param kafkaConfig
53 | * represents the configuration for embedded Kafka brokers
54 | * @return
55 | * this builder
56 | */
57 | public EmbeddedKafkaClusterConfigBuilder configure(final EmbeddedKafkaConfig kafkaConfig) {
58 | this.kafkaConfig = kafkaConfig;
59 | return this;
60 | }
61 |
62 | /**
63 | * Uses the given {@link net.mguenther.kafka.junit.EmbeddedKafkaConfig.EmbeddedKafkaConfigBuilder} to
64 | * configure brokers that run within the embedded Kafka cluster. This configuration is applied to
65 | * all brokers in a multi-broker environment.
66 | *
67 | * @param builder
68 | * represents the configuration for embedded Kafka brokers
69 | * @return
70 | * this builder
71 | */
72 | public EmbeddedKafkaClusterConfigBuilder configure(final EmbeddedKafkaConfig.EmbeddedKafkaConfigBuilder builder) {
73 | return configure(builder.build());
74 | }
75 |
76 | /**
77 | * Uses the given {@link EmbeddedConnectConfig} to configure Kafka Connect for the embedded
78 | * Kafka cluster.
79 | *
80 | * @param connectConfig
81 | * represents the configuration for Kafka Connect
82 | * @return
83 | * this builder
84 | */
85 | public EmbeddedKafkaClusterConfigBuilder configure(final EmbeddedConnectConfig connectConfig) {
86 | this.connectConfig = connectConfig;
87 | return this;
88 | }
89 |
90 | /**
91 | * Uses the given {@link EmbeddedConnectConfig.EmbeddedConnectConfigBuilder} to configure Kafka Connect
92 | * for the embedded Kafka cluster.
93 | *
94 | * @param builder
95 | * represents the configuration for Kafka Connect
96 | * @return
97 | * this builder
98 | */
99 | public EmbeddedKafkaClusterConfigBuilder configure(final EmbeddedConnectConfig.EmbeddedConnectConfigBuilder builder) {
100 | return configure(builder.build());
101 | }
102 |
103 | public EmbeddedKafkaClusterConfig build() {
104 | return new EmbeddedKafkaClusterConfig(zooKeeperConfig, kafkaConfig, connectConfig);
105 | }
106 | }
107 |
108 | private final EmbeddedZooKeeperConfig zooKeeperConfig;
109 |
110 | private final EmbeddedKafkaConfig kafkaConfig;
111 |
112 | private final EmbeddedConnectConfig connectConfig;
113 |
114 | public boolean usesConnect() {
115 | return connectConfig != null;
116 | }
117 |
118 | /**
119 | * @return instance of {@link EmbeddedKafkaClusterConfigBuilder} used to configure
120 | * the embedded Kafka cluster
121 | */
122 | public static EmbeddedKafkaClusterConfigBuilder newClusterConfig() {
123 | return new EmbeddedKafkaClusterConfigBuilder();
124 | }
125 |
126 | /**
127 | * @return instance of {@link EmbeddedKafkaClusterConfig} that contains the default
128 | * configuration for the embedded Kafka cluster
129 | */
130 | public static EmbeddedKafkaClusterConfig defaultClusterConfig() {
131 | return newClusterConfig().build();
132 | }
133 | }
134 |
--------------------------------------------------------------------------------
/src/main/java/net/mguenther/kafka/junit/EmbeddedKafkaConfig.java:
--------------------------------------------------------------------------------
1 | package net.mguenther.kafka.junit;
2 |
3 | import kafka.server.KafkaConfig$;
4 | import lombok.Getter;
5 | import lombok.RequiredArgsConstructor;
6 | import lombok.ToString;
7 | import lombok.extern.slf4j.Slf4j;
8 |
9 | import java.util.ArrayList;
10 | import java.util.List;
11 | import java.util.Properties;
12 | import java.util.stream.Collectors;
13 |
14 | @Slf4j
15 | @ToString
16 | @RequiredArgsConstructor
17 | public class EmbeddedKafkaConfig {
18 |
19 | public static final int DEFAULT_NUMBER_OF_BROKERS = 1;
20 |
21 | public static final String DEFAULT_LISTENER = "PLAINTEXT://localhost:9092";
22 |
23 | private static final String LISTENER_TEMPLATE = "PLAINTEXT://localhost:%s";
24 |
25 | public static class EmbeddedKafkaConfigBuilder {
26 |
27 | private final Properties properties = new Properties();
28 |
29 | private int numberOfBrokers = DEFAULT_NUMBER_OF_BROKERS;
30 |
31 | private EmbeddedKafkaConfigBuilder() {
32 | }
33 |
34 | public EmbeddedKafkaConfigBuilder withNumberOfBrokers(final int numberOfBrokers) {
35 | this.numberOfBrokers = numberOfBrokers;
36 | return this;
37 | }
38 |
39 | public EmbeddedKafkaConfigBuilder with(final String propertyName, final T value) {
40 | properties.put(propertyName, value);
41 | return this;
42 | }
43 |
44 | public EmbeddedKafkaConfigBuilder withAll(final Properties overrides) {
45 | properties.putAll(overrides);
46 | return this;
47 | }
48 |
49 | private void ifNonExisting(final String propertyName, final T value) {
50 | if (properties.get(propertyName) != null) return;
51 | properties.put(propertyName, value);
52 | }
53 |
54 | public EmbeddedKafkaConfig build() {
55 |
56 | final List listeners = new ArrayList<>(numberOfBrokers);
57 |
58 | if (numberOfBrokers > 1) {
59 | listeners.addAll(getUniqueEphemeralPorts(numberOfBrokers)
60 | .stream()
61 | .map(port -> String.format(LISTENER_TEMPLATE, port))
62 | .collect(Collectors.toList()));
63 | } else {
64 | listeners.add(DEFAULT_LISTENER);
65 | }
66 |
67 | ifNonExisting(KafkaConfig$.MODULE$.ZkSessionTimeoutMsProp(), "8000");
68 | ifNonExisting(KafkaConfig$.MODULE$.ZkConnectionTimeoutMsProp(), "10000");
69 | ifNonExisting(KafkaConfig$.MODULE$.NumPartitionsProp(), "1");
70 | ifNonExisting(KafkaConfig$.MODULE$.DefaultReplicationFactorProp(), "1");
71 | ifNonExisting(KafkaConfig$.MODULE$.MinInSyncReplicasProp(), "1");
72 | ifNonExisting(KafkaConfig$.MODULE$.AutoCreateTopicsEnableProp(), "true");
73 | ifNonExisting(KafkaConfig$.MODULE$.MessageMaxBytesProp(), "1000000");
74 | ifNonExisting(KafkaConfig$.MODULE$.ControlledShutdownEnableProp(), "true");
75 | ifNonExisting(KafkaConfig$.MODULE$.OffsetsTopicReplicationFactorProp(), "1");
76 | ifNonExisting(KafkaConfig$.MODULE$.GroupInitialRebalanceDelayMsProp(), 0);
77 | ifNonExisting(KafkaConfig$.MODULE$.TransactionsTopicReplicationFactorProp(), "1");
78 | ifNonExisting(KafkaConfig$.MODULE$.TransactionsTopicMinISRProp(), "1");
79 | ifNonExisting(KafkaConfig$.MODULE$.SslClientAuthProp(), "none");
80 | ifNonExisting(KafkaConfig$.MODULE$.AutoLeaderRebalanceEnableProp(), "true");
81 | ifNonExisting(KafkaConfig$.MODULE$.ControlledShutdownEnableProp(), "true");
82 | ifNonExisting(KafkaConfig$.MODULE$.LeaderImbalanceCheckIntervalSecondsProp(), 5);
83 | ifNonExisting(KafkaConfig$.MODULE$.LeaderImbalancePerBrokerPercentageProp(), 1);
84 | ifNonExisting(KafkaConfig$.MODULE$.UncleanLeaderElectionEnableProp(), "false");
85 | return new EmbeddedKafkaConfig(numberOfBrokers, listeners, properties);
86 | }
87 |
88 | private List getUniqueEphemeralPorts(final int howMany) {
89 | final List ephemeralPorts = new ArrayList<>(howMany);
90 | while (ephemeralPorts.size() < howMany) {
91 | final int port = generateRandomEphemeralPort();
92 | if (!ephemeralPorts.contains(port)) {
93 | ephemeralPorts.add(port);
94 | }
95 | }
96 | return ephemeralPorts;
97 | }
98 |
99 | private int generateRandomEphemeralPort() {
100 | return Math.min((int) (Math.random() * 65535) + 1024, 65535);
101 | }
102 | }
103 |
104 | @Getter
105 | private final int numberOfBrokers;
106 |
107 | private final List uniqueListeners;
108 |
109 | @Getter
110 | private final Properties brokerProperties;
111 |
112 | public String listenerFor(final int brokerIndex) {
113 | if (brokerProperties.containsKey(KafkaConfig$.MODULE$.ListenersProp())) {
114 | return brokerProperties.getProperty(KafkaConfig$.MODULE$.ListenersProp());
115 | } else {
116 | return uniqueListeners.get(brokerIndex);
117 | }
118 | }
119 |
120 | /**
121 | * @return instance of {@link EmbeddedKafkaConfigBuilder}
122 | */
123 | public static EmbeddedKafkaConfigBuilder brokers() {
124 | return new EmbeddedKafkaConfigBuilder();
125 | }
126 |
127 | /**
128 | * @return instance of {@link EmbeddedKafkaConfig} that contains the default configuration
129 | * for all brokers in an embedded Kafka cluster
130 | */
131 | public static EmbeddedKafkaConfig defaultBrokers() {
132 | return brokers().build();
133 | }
134 | }
135 |
--------------------------------------------------------------------------------
/src/main/java/net/mguenther/kafka/junit/EmbeddedLifecycle.java:
--------------------------------------------------------------------------------
1 | package net.mguenther.kafka.junit;
2 |
3 | public interface EmbeddedLifecycle {
4 |
5 | /**
6 | * Starts the embedded component. After this method completes the component *must* fully
7 | * operational.
8 | *
9 | * @throws RuntimeException
10 | * this method can and should throw an {@link RuntimeException} to indicate that the
11 | * component could not be deployed
12 | */
13 | void start();
14 |
15 | /**
16 | * Stops the embedded component. After this method completes all acquired resources are
17 | * freed and the component is properly shut down. The component is no longer operational
18 | * after this.
19 | *
20 | * @throws RuntimeException
21 | * this method can and should throw an {@link RuntimeException} to indicate that the
22 | * component could not be shut down
23 | */
24 | void stop();
25 | }
26 |
--------------------------------------------------------------------------------
/src/main/java/net/mguenther/kafka/junit/EmbeddedZooKeeper.java:
--------------------------------------------------------------------------------
1 | package net.mguenther.kafka.junit;
2 |
3 | import lombok.RequiredArgsConstructor;
4 | import lombok.extern.slf4j.Slf4j;
5 | import org.apache.curator.test.TestingServer;
6 |
7 | @Slf4j
8 | @RequiredArgsConstructor
9 | public class EmbeddedZooKeeper implements EmbeddedLifecycle {
10 |
11 | private final EmbeddedZooKeeperConfig config;
12 |
13 | private TestingServer internalServer;
14 |
15 | @Override
16 | public void start() {
17 |
18 | if (internalServer != null) {
19 | log.info("The embedded ZooKeeper instance is already running.");
20 | return;
21 | }
22 |
23 | try {
24 | log.info("Embedded ZooKeeper is starting.");
25 | internalServer = new TestingServer(config.getPort());
26 | log.info("Successfully started an embedded ZooKeeper instance at {} which is assigned to the temporary directory {}.",
27 | internalServer.getConnectString(),
28 | internalServer.getTempDirectory());
29 | } catch (Exception e) {
30 | throw new RuntimeException("Unable to start an embedded ZooKeeper instance.", e);
31 | }
32 | }
33 |
34 | @Override
35 | public void stop() {
36 |
37 | if (internalServer == null) {
38 | log.info("The embedded ZooKeeper is not running or was already shut down.");
39 | return;
40 | }
41 |
42 | try {
43 | log.info("The embedded ZooKeeper instance at {} is stopping.", internalServer.getConnectString());
44 | internalServer.close();
45 | log.info("The embedded ZooKeeper instance at {} has been shut down.", internalServer.getConnectString());
46 | } catch (Exception e) {
47 | throw new RuntimeException("Unable to stop the embedded ZooKeeper instance.", e);
48 | }
49 | }
50 |
51 | public String getConnectString() {
52 | return internalServer.getConnectString();
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/src/main/java/net/mguenther/kafka/junit/EmbeddedZooKeeperConfig.java:
--------------------------------------------------------------------------------
1 | package net.mguenther.kafka.junit;
2 |
3 | import lombok.Getter;
4 | import lombok.ToString;
5 |
6 | @ToString
7 | public class EmbeddedZooKeeperConfig {
8 |
9 | public static final int USE_RANDOM_ZOOKEEPER_PORT = -1;
10 |
11 | public static class EmbeddedZooKeeperConfigBuilder {
12 |
13 | private int port = USE_RANDOM_ZOOKEEPER_PORT;
14 |
15 | EmbeddedZooKeeperConfigBuilder withPort(final int port) {
16 | this.port = port;
17 | return this;
18 | }
19 |
20 | public EmbeddedZooKeeperConfig build() {
21 | return new EmbeddedZooKeeperConfig(this);
22 | }
23 | }
24 |
25 | @Getter
26 | private final Integer port;
27 |
28 | private EmbeddedZooKeeperConfig(final EmbeddedZooKeeperConfigBuilder builder) {
29 | this.port = builder.port;
30 | }
31 |
32 | public static EmbeddedZooKeeperConfigBuilder zooKeeper() {
33 | return new EmbeddedZooKeeperConfigBuilder();
34 | }
35 |
36 | /**
37 | * @return instance of {@link EmbeddedZooKeeperConfig} that contains the default configuration
38 | * for the embedded ZooKeeper instance
39 | */
40 | public static EmbeddedZooKeeperConfig defaultZooKeeper() {
41 | return zooKeeper().build();
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/src/main/java/net/mguenther/kafka/junit/ExternalKafkaCluster.java:
--------------------------------------------------------------------------------
1 | package net.mguenther.kafka.junit;
2 |
3 | import lombok.AccessLevel;
4 | import lombok.RequiredArgsConstructor;
5 | import net.mguenther.kafka.junit.provider.DefaultRecordConsumer;
6 | import net.mguenther.kafka.junit.provider.DefaultRecordProducer;
7 | import net.mguenther.kafka.junit.provider.DefaultTopicManager;
8 | import org.apache.kafka.clients.producer.RecordMetadata;
9 |
10 | import java.util.List;
11 | import java.util.Map;
12 | import java.util.Properties;
13 |
14 | @RequiredArgsConstructor(access = AccessLevel.PACKAGE)
15 | public class ExternalKafkaCluster implements RecordProducer, RecordConsumer, TopicManager {
16 |
17 | private final RecordProducer producerDelegate;
18 |
19 | private final RecordConsumer consumerDelegate;
20 |
21 | private final TopicManager topicManagerDelegate;
22 |
23 | private ExternalKafkaCluster(final String bootstrapServers) {
24 | producerDelegate = new DefaultRecordProducer(bootstrapServers);
25 | consumerDelegate = new DefaultRecordConsumer(bootstrapServers);
26 | topicManagerDelegate = new DefaultTopicManager(bootstrapServers);
27 | }
28 |
29 | @Override
30 | public List readValues(final ReadKeyValues readRequest) throws InterruptedException {
31 | return consumerDelegate.readValues(readRequest);
32 | }
33 |
34 | @Override
35 | public List> read(final ReadKeyValues readRequest) throws InterruptedException {
36 | return consumerDelegate.read(readRequest);
37 | }
38 |
39 | @Override
40 | public List observeValues(final ObserveKeyValues observeRequest) throws InterruptedException {
41 | return consumerDelegate.observeValues(observeRequest);
42 | }
43 |
44 | @Override
45 | public List> observe(final ObserveKeyValues observeRequest) throws InterruptedException {
46 | return consumerDelegate.observe(observeRequest);
47 | }
48 |
49 | @Override
50 | public List send(final SendValues sendRequest) throws InterruptedException {
51 | return producerDelegate.send(sendRequest);
52 | }
53 |
54 | @Override
55 | public List send(final SendValuesTransactional sendRequest) throws InterruptedException {
56 | return producerDelegate.send(sendRequest);
57 | }
58 |
59 | @Override
60 | public List send(final SendKeyValues sendRequest) throws InterruptedException {
61 | return producerDelegate.send(sendRequest);
62 | }
63 |
64 | @Override
65 | public List send(final SendKeyValuesTransactional sendRequest) throws InterruptedException {
66 | return producerDelegate.send(sendRequest);
67 | }
68 |
69 | @Override
70 | public void createTopic(final TopicConfig config) {
71 | topicManagerDelegate.createTopic(config);
72 | }
73 |
74 | @Override
75 | public void deleteTopic(final String topic) {
76 | topicManagerDelegate.deleteTopic(topic);
77 | }
78 |
79 | @Override
80 | public boolean exists(final String topic) {
81 | return topicManagerDelegate.exists(topic);
82 | }
83 |
84 | @Override
85 | public Map fetchLeaderAndIsr(final String topic) {
86 | return topicManagerDelegate.fetchLeaderAndIsr(topic);
87 | }
88 |
89 | @Override
90 | public Properties fetchTopicConfig(final String topic) {
91 | return topicManagerDelegate.fetchTopicConfig(topic);
92 | }
93 |
94 | public static ExternalKafkaCluster at(final String bootstrapServers) {
95 | return new ExternalKafkaCluster(bootstrapServers);
96 | }
97 | }
98 |
--------------------------------------------------------------------------------
/src/main/java/net/mguenther/kafka/junit/KeyValue.java:
--------------------------------------------------------------------------------
1 | package net.mguenther.kafka.junit;
2 |
3 | import lombok.EqualsAndHashCode;
4 | import lombok.Getter;
5 | import lombok.RequiredArgsConstructor;
6 | import lombok.ToString;
7 | import org.apache.kafka.common.header.Header;
8 | import org.apache.kafka.common.header.Headers;
9 | import org.apache.kafka.common.header.internals.RecordHeader;
10 | import org.apache.kafka.common.header.internals.RecordHeaders;
11 |
12 | import java.nio.charset.Charset;
13 | import java.util.Optional;
14 |
15 | @ToString
16 | @EqualsAndHashCode(of = { "key", "value" })
17 | @RequiredArgsConstructor
18 | public class KeyValue {
19 |
20 | @Getter
21 | private final K key;
22 |
23 | @Getter
24 | private final V value;
25 |
26 | @Getter
27 | private final Headers headers;
28 |
29 | private final KeyValueMetadata metadata;
30 |
31 | public KeyValue(final K key, final V value) {
32 | this(key, value, new RecordHeaders(), null);
33 | }
34 |
35 | public KeyValue(final K key, final V value, final Headers headers) {
36 | this(key, value, headers, null);
37 | }
38 |
39 | public void addHeader(final String headerName, final String headerValue, final Charset charset) {
40 | addHeader(headerName, headerValue.getBytes(charset));
41 | }
42 |
43 | public void addHeader(final String headerName, final byte[] headerValue) {
44 | final Header header = new RecordHeader(headerName, headerValue);
45 | headers.add(header);
46 | }
47 |
48 | public Optional getMetadata() {
49 | return Optional.ofNullable(metadata);
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/src/main/java/net/mguenther/kafka/junit/KeyValueMetadata.java:
--------------------------------------------------------------------------------
1 | package net.mguenther.kafka.junit;
2 |
3 | import lombok.Getter;
4 | import lombok.RequiredArgsConstructor;
5 | import lombok.ToString;
6 |
7 | @Getter
8 | @ToString
9 | @RequiredArgsConstructor
10 | public class KeyValueMetadata {
11 |
12 | private final String topic;
13 | private final int partition;
14 | private final long offset;
15 | }
16 |
--------------------------------------------------------------------------------
/src/main/java/net/mguenther/kafka/junit/LeaderAndIsr.java:
--------------------------------------------------------------------------------
1 | package net.mguenther.kafka.junit;
2 |
3 | import lombok.Getter;
4 | import lombok.RequiredArgsConstructor;
5 | import lombok.ToString;
6 |
7 | import java.util.Set;
8 |
9 | @Getter
10 | @ToString
11 | @RequiredArgsConstructor
12 | public class LeaderAndIsr {
13 |
14 | private final Integer leader;
15 | private final Set isr;
16 | }
17 |
--------------------------------------------------------------------------------
/src/main/java/net/mguenther/kafka/junit/ObserveKeyValues.java:
--------------------------------------------------------------------------------
1 | package net.mguenther.kafka.junit;
2 |
3 | import lombok.Getter;
4 | import lombok.RequiredArgsConstructor;
5 | import lombok.ToString;
6 | import org.apache.kafka.clients.consumer.ConsumerConfig;
7 | import org.apache.kafka.common.header.Headers;
8 | import org.apache.kafka.common.serialization.StringDeserializer;
9 |
10 | import java.util.HashMap;
11 | import java.util.Map;
12 | import java.util.Properties;
13 | import java.util.UUID;
14 | import java.util.concurrent.TimeUnit;
15 | import java.util.function.Predicate;
16 |
17 | @Getter
18 | @ToString
19 | @RequiredArgsConstructor
20 | public class ObserveKeyValues {
21 |
22 | public static final int DEFAULT_OBSERVATION_TIME_MILLIS = 30_000;
23 |
24 | public static class ObserveKeyValuesBuilder {
25 |
26 | private final String topic;
27 | private final int expected;
28 | private final Class clazzOfK;
29 | private final Class clazzOfV;
30 | private final Properties consumerProps = new Properties();
31 | private final Map seekTo = new HashMap<>();
32 | private Predicate filterOnKeys = key -> true;
33 | private Predicate filterOnValues = value -> true;
34 | private Predicate filterOnHeaders = value -> true;
35 | private int observationTimeMillis = DEFAULT_OBSERVATION_TIME_MILLIS;
36 | private boolean includeMetadata = false;
37 |
38 | ObserveKeyValuesBuilder(final String topic, final int expected, final Class clazzOfK, final Class clazzOfV) {
39 | this.topic = topic;
40 | this.expected = expected;
41 | this.clazzOfK = clazzOfK;
42 | this.clazzOfV = clazzOfV;
43 | }
44 |
45 | public ObserveKeyValuesBuilder observeFor(final int duration, final TimeUnit unit) {
46 | this.observationTimeMillis = (int) unit.toMillis(duration);
47 | return this;
48 | }
49 |
50 | public ObserveKeyValuesBuilder filterOnKeys(final Predicate filterOnKeys) {
51 | this.filterOnKeys = filterOnKeys;
52 | return this;
53 | }
54 |
55 | public ObserveKeyValuesBuilder filterOnValues(final Predicate filterOnValues) {
56 | this.filterOnValues = filterOnValues;
57 | return this;
58 | }
59 |
60 | public ObserveKeyValuesBuilder filterOnHeaders(final Predicate filterOnHeaders) {
61 | this.filterOnHeaders = filterOnHeaders;
62 | return this;
63 | }
64 |
65 | public ObserveKeyValuesBuilder includeMetadata() {
66 | return withMetadata(true);
67 | }
68 |
69 | public ObserveKeyValuesBuilder withMetadata(final boolean modifier) {
70 | this.includeMetadata = modifier;
71 | return this;
72 | }
73 |
74 | public ObserveKeyValuesBuilder seekTo(final int partition, final long offset) {
75 | seekTo.put(partition, offset);
76 | return this;
77 | }
78 |
79 | public ObserveKeyValuesBuilder seekTo(final Map seekTo) {
80 | this.seekTo.putAll(seekTo);
81 | return this;
82 | }
83 |
84 | public ObserveKeyValuesBuilder with(final String propertyName, final T value) {
85 | consumerProps.put(propertyName, value);
86 | return this;
87 | }
88 |
89 | public ObserveKeyValuesBuilder withAll(final Properties consumerProps) {
90 | this.consumerProps.putAll(consumerProps);
91 | return this;
92 | }
93 |
94 | private void ifNonExisting(final String propertyName, final T value) {
95 | if (consumerProps.get(propertyName) != null) return;
96 | consumerProps.put(propertyName, value);
97 | }
98 |
99 | public ObserveKeyValues useDefaults() {
100 | consumerProps.clear();
101 | observationTimeMillis = DEFAULT_OBSERVATION_TIME_MILLIS;
102 | return build();
103 | }
104 |
105 | public ObserveKeyValues build() {
106 | ifNonExisting(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
107 | ifNonExisting(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
108 | ifNonExisting(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
109 | ifNonExisting(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
110 | ifNonExisting(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
111 | ifNonExisting(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 100);
112 | ifNonExisting(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_uncommitted");
113 | return new ObserveKeyValues<>(topic, expected, observationTimeMillis, includeMetadata, seekTo, consumerProps, filterOnKeys, filterOnValues, filterOnHeaders, clazzOfK, clazzOfV);
114 | }
115 | }
116 |
117 | private final String topic;
118 | private final int expected;
119 | private final int observationTimeMillis;
120 | private final boolean includeMetadata;
121 | private final Map seekTo;
122 | private final Properties consumerProps;
123 | private final Predicate filterOnKeys;
124 | private final Predicate filterOnValues;
125 | private final Predicate filterOnHeaders;
126 | private final Class clazzOfK;
127 | private final Class clazzOfV;
128 |
129 | public static ObserveKeyValuesBuilder on(final String topic, final int expected) {
130 | return on(topic, expected, String.class, String.class);
131 | }
132 |
133 | public static ObserveKeyValuesBuilder on(final String topic,
134 | final int expected,
135 | final Class clazzOfV) {
136 | return on(topic, expected, String.class, clazzOfV);
137 | }
138 |
139 | public static ObserveKeyValuesBuilder on(final String topic,
140 | final int expected,
141 | final Class clazzOfK,
142 | final Class clazzOfV) {
143 | return new ObserveKeyValuesBuilder<>(topic, expected, clazzOfK, clazzOfV);
144 | }
145 | }
146 |
--------------------------------------------------------------------------------
/src/main/java/net/mguenther/kafka/junit/Props.java:
--------------------------------------------------------------------------------
1 | package net.mguenther.kafka.junit;
2 |
3 | import java.util.Properties;
4 |
5 | /**
6 | * Provides a fluent interface for constructing {@link java.util.Properties}. Use this for example
7 | * with {@code EmbeddedConnectConfig#deployConnector} to retain the fluent interface when provisioning
8 | * your embedded Kafka cluster.
9 | */
10 | public class Props {
11 |
12 | private final Properties properties = new Properties();
13 |
14 | public Props with(final String propertyName, final T value) {
15 | properties.put(propertyName, value);
16 | return this;
17 | }
18 |
19 | public Props withAll(final Properties overrides) {
20 | properties.putAll(overrides);
21 | return this;
22 | }
23 |
24 | public Properties build() {
25 | final Properties copyOfProps = new Properties();
26 | copyOfProps.putAll(properties);
27 | return copyOfProps;
28 | }
29 |
30 | public static Props create() {
31 | return new Props();
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/src/main/java/net/mguenther/kafka/junit/ReadKeyValues.java:
--------------------------------------------------------------------------------
1 | package net.mguenther.kafka.junit;
2 |
3 | import lombok.Getter;
4 | import lombok.RequiredArgsConstructor;
5 | import lombok.ToString;
6 | import org.apache.kafka.clients.consumer.ConsumerConfig;
7 | import org.apache.kafka.common.header.Headers;
8 | import org.apache.kafka.common.serialization.StringDeserializer;
9 |
10 | import java.util.HashMap;
11 | import java.util.Map;
12 | import java.util.Properties;
13 | import java.util.UUID;
14 | import java.util.concurrent.TimeUnit;
15 | import java.util.function.Predicate;
16 |
17 | @Getter
18 | @ToString
19 | @RequiredArgsConstructor
20 | public class ReadKeyValues {
21 |
22 | public static final int WITHOUT_LIMIT = -1;
23 | public static final int DEFAULT_MAX_TOTAL_POLL_TIME_MILLIS = 2_000;
24 |
25 | public static class ReadKeyValuesBuilder {
26 |
27 | private final String topic;
28 | private final Class clazzOfK;
29 | private final Class clazzOfV;
30 | private final Properties consumerProps = new Properties();
31 | private final Map seekTo = new HashMap<>();
32 | private Predicate filterOnKeys = key -> true;
33 | private Predicate filterOnValues = value -> true;
34 | private Predicate filterOnHeaders = value -> true;
35 | private int limit = WITHOUT_LIMIT;
36 | private int maxTotalPollTimeMillis = DEFAULT_MAX_TOTAL_POLL_TIME_MILLIS;
37 | private boolean includeMetadata = false;
38 |
39 | ReadKeyValuesBuilder(final String topic, final Class clazzOfK, final Class clazzOfV) {
40 | this.topic = topic;
41 | this.clazzOfK = clazzOfK;
42 | this.clazzOfV = clazzOfV;
43 | }
44 |
45 | public ReadKeyValuesBuilder filterOnKeys(final Predicate filterOnKeys) {
46 | this.filterOnKeys = filterOnKeys;
47 | return this;
48 | }
49 |
50 | public ReadKeyValuesBuilder filterOnValues(final Predicate filterOnValues) {
51 | this.filterOnValues = filterOnValues;
52 | return this;
53 | }
54 |
55 | public ReadKeyValuesBuilder filterOnHeaders(final Predicate filterOnHeaders) {
56 | this.filterOnHeaders = filterOnHeaders;
57 | return this;
58 | }
59 |
60 | public ReadKeyValuesBuilder unlimited() {
61 | this.limit = WITHOUT_LIMIT;
62 | return this;
63 | }
64 |
65 | public ReadKeyValuesBuilder withLimit(final int limit) {
66 | this.limit = limit;
67 | return this;
68 | }
69 |
70 | public ReadKeyValuesBuilder withMaxTotalPollTime(final int duration, final TimeUnit unit) {
71 | this.maxTotalPollTimeMillis = (int) unit.toMillis(duration);
72 | return this;
73 | }
74 |
75 | public ReadKeyValuesBuilder includeMetadata() {
76 | return withMetadata(true);
77 | }
78 |
79 | public ReadKeyValuesBuilder withMetadata(final boolean modifier) {
80 | this.includeMetadata = modifier;
81 | return this;
82 | }
83 |
84 | public ReadKeyValuesBuilder seekTo(final int partition, final long offset) {
85 | seekTo.put(partition, offset);
86 | return this;
87 | }
88 |
89 | public ReadKeyValuesBuilder seekTo(final Map seekTo) {
90 | this.seekTo.putAll(seekTo);
91 | return this;
92 | }
93 |
94 | public ReadKeyValuesBuilder with(final String propertyName, final T value) {
95 | consumerProps.put(propertyName, value);
96 | return this;
97 | }
98 |
99 | public ReadKeyValuesBuilder withAll(final Properties consumerProps) {
100 | this.consumerProps.putAll(consumerProps);
101 | return this;
102 | }
103 |
104 | private void ifNonExisting(final String propertyName, final T value) {
105 | if (consumerProps.get(propertyName) != null) return;
106 | consumerProps.put(propertyName, value);
107 | }
108 |
109 | public ReadKeyValues useDefaults() {
110 | consumerProps.clear();
111 | limit = WITHOUT_LIMIT;
112 | maxTotalPollTimeMillis = DEFAULT_MAX_TOTAL_POLL_TIME_MILLIS;
113 | return build();
114 | }
115 |
116 | public ReadKeyValues build() {
117 | ifNonExisting(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
118 | ifNonExisting(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
119 | ifNonExisting(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
120 | ifNonExisting(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
121 | ifNonExisting(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
122 | ifNonExisting(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 100);
123 | ifNonExisting(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_uncommitted");
124 | return new ReadKeyValues<>(topic, limit, maxTotalPollTimeMillis, includeMetadata, seekTo, consumerProps, filterOnKeys, filterOnValues, filterOnHeaders, clazzOfK, clazzOfV);
125 | }
126 | }
127 |
128 | private final String topic;
129 | private final int limit;
130 | private final int maxTotalPollTimeMillis;
131 | private final boolean includeMetadata;
132 | private final Map seekTo;
133 | private final Properties consumerProps;
134 | private final Predicate filterOnKeys;
135 | private final Predicate filterOnValues;
136 | private final Predicate filterOnHeaders;
137 | private final Class clazzOfK;
138 | private final Class clazzOfV;
139 |
140 | public static ReadKeyValuesBuilder from(final String topic) {
141 | return from(topic, String.class, String.class);
142 | }
143 |
144 | public static ReadKeyValuesBuilder from(final String topic, final Class clazzOfV) {
145 | return from(topic, String.class, clazzOfV);
146 | }
147 |
148 | public static ReadKeyValuesBuilder from(final String topic, final Class clazzOfK, final Class clazzOfV) {
149 | return new ReadKeyValuesBuilder<>(topic, clazzOfK, clazzOfV);
150 | }
151 | }
152 |
--------------------------------------------------------------------------------
/src/main/java/net/mguenther/kafka/junit/RecordConsumer.java:
--------------------------------------------------------------------------------
1 | package net.mguenther.kafka.junit;
2 |
3 | import java.util.List;
4 |
5 | /**
6 | * Provides the means to read key-value pairs or un-keyed values from a Kafka topic as well
7 | * as the possibility to watch given topics until a certain amount of records have been consumed
8 | * from them. All of the operations a {@code RecordConsumer} provides are synchronous in their
9 | * nature.
10 | */
11 | public interface RecordConsumer {
12 |
13 | /**
14 | * Reads values from a Kafka topic.
15 | *
16 | * @param readRequest
17 | * the configuration of the consumer and the read operation it has to carry out
18 | * @param
19 | * refers to the type of values being read
20 | * @throws InterruptedException
21 | * in case an interrupt signal has been set
22 | * @return
23 | * unmodifiable {@link java.util.List} of consumed values
24 | * @see ReadKeyValues
25 | */
26 | List readValues(ReadKeyValues readRequest) throws InterruptedException;
27 |
28 | /**
29 | * Reads values from a Kafka topic. This is a convenience method that accepts a
30 | * {@link net.mguenther.kafka.junit.ReadKeyValues.ReadKeyValuesBuilder} and
31 | * immediately constructs a {@link ReadKeyValues} request from it that is passed
32 | * on to {@link #readValues(ReadKeyValues)}.
33 | *
34 | * @param builder
35 | * the builder used for the configuration of the consumer and the read operation it
36 | * has to carry out
37 | * @param
38 | * refers to the type of values being read
39 | * @throws InterruptedException
40 | * in case an interrupt signal has been set
41 | * @return
42 | * unmodifiable {@link java.util.List} of consumed values
43 | * @see ReadKeyValues
44 | * @see ReadKeyValues.ReadKeyValuesBuilder
45 | */
46 | default List readValues(ReadKeyValues.ReadKeyValuesBuilder builder) throws InterruptedException {
47 | return readValues(builder.build());
48 | }
49 |
50 | /**
51 | * Reads key-value pairs from a Kafka topic.
52 | *
53 | * @param readRequest
54 | * the configuration of the consumer and the read operation it has to carry out
55 | * @param
56 | * refers to the type of keys being read
57 | * @param
58 | * refers to the type of values being read
59 | * @throws InterruptedException
60 | * in case an interrupt signal has been set
61 | * @return
62 | * unmodifiable {@link java.util.List} of consumed key-value pairs
63 | * @see ReadKeyValues
64 | */
65 | List> read(ReadKeyValues readRequest) throws InterruptedException;
66 |
67 | /**
68 | * Reads key-value pairs from a Kafka topic. This is a convenience method that accepts a
69 | * {@link net.mguenther.kafka.junit.ReadKeyValues.ReadKeyValuesBuilder} and immediately
70 | * constructs a {@link ReadKeyValues} request from it that is passed on to
71 | * {@link #read(ReadKeyValues)}.
72 | *
73 | * @param builder
74 | * the builder used for the configuration of the consumer and the read operation it
75 | * has to carry out
76 | * @param
77 | * refers to the type of keys being read
78 | * @param
79 | * refers to the type of values being read
80 | * @throws InterruptedException
81 | * in case an interrupt signal has been set
82 | * @return
83 | * unmodifiable {@link java.util.List} of consumed key-value pairs
84 | * @see ReadKeyValues
85 | * @see ReadKeyValues.ReadKeyValuesBuilder
86 | */
87 | default List> read(ReadKeyValues.ReadKeyValuesBuilder builder) throws InterruptedException {
88 | return read(builder.build());
89 | }
90 |
91 | /**
92 | * Observes a Kafka topic until a certain amount of records have been consumed or a timeout
93 | * elapses. Returns the values that have been consumed up until this point or throws an
94 | * {@code AssertionError} if the number of consumed values does not meet the expected
95 | * number of records.
96 | *
97 | * @param observeRequest
98 | * the configuration of the consumer and the observe operation it has to carry out
99 | * @param
100 | * refers to the type of values being read
101 | * @throws AssertionError
102 | * in case the number of consumed values does not meet the expected number of records
103 | * @throws InterruptedException
104 | * in case an interrupt signal has been set
105 | * @return
106 | * unmodifiable {@link java.util.List} of values
107 | * @see ObserveKeyValues
108 | */
109 | List observeValues(ObserveKeyValues observeRequest) throws InterruptedException;
110 |
111 | /**
112 | * Observes a Kafka topic until a certain amount of records have been consumed or a timeout
113 | * elapses. Returns the values that have been consumed up until this point or throws an
114 | * {@code AssertionError} if the number of consumed values does not meet the expected
115 | * number of records. This is a convenience method that accepts a
116 | * {@link net.mguenther.kafka.junit.ObserveKeyValues.ObserveKeyValuesBuilder} and immediately
117 | * constructs a {@link ObserveKeyValues} request from it that is passed on to
118 | * {@link #observeValues(ObserveKeyValues)}.
119 | *
120 | * @param builder
121 | * the builder used for the configuration of the consumer and the observe operation it
122 | * has to carry out
123 | * @param
124 | * refers to the type of values being read
125 | * @throws AssertionError
126 | * in case the number of consumed values does not meet the expected number of records
127 | * @throws InterruptedException
128 | * in case an interrupt signal has been set
129 | * @return
130 | * unmodifiable {@link java.util.List} of values
131 | * @see ObserveKeyValues
132 | * @see ObserveKeyValues.ObserveKeyValuesBuilder
133 | */
134 | default List observeValues(ObserveKeyValues.ObserveKeyValuesBuilder builder) throws InterruptedException {
135 | return observeValues(builder.build());
136 | }
137 |
138 | /**
139 | * Observes a Kafka topic until a certain amount of records have been consumed or a timeout
140 | * elapses. Returns the key-value pairs that have been consumed up until this point or throws an
141 | * {@code AssertionError} if the number of consumed key-value pairs does not meet the expected
142 | * number of records.
143 | *
144 | * @param observeRequest
145 | * the configuration of the consumer and the observe operation it has to carry out
146 | * @param
147 | * refers to the type of keys being read
148 | * @param
149 | * refers to the type of values being read
150 | * @throws InterruptedException
151 | * in case an interrupt signal has been set
152 | * @return
153 | * unmodifiable {@link java.util.List} of key-value pairs
154 | * @see ObserveKeyValues
155 | */
156 | List> observe(ObserveKeyValues observeRequest) throws InterruptedException;
157 |
158 | /**
159 | * Observes a Kafka topic until a certain amount of records have been consumed or a timeout
160 | * elapses. Returns the key-value pairs that have been consumed up until this point or throws an
161 | * {@code AssertionError} if the number of consumed key-value pairs does not meet the expected
162 | * number of records. This is a convenience method that accepts a
163 | * {@link net.mguenther.kafka.junit.ObserveKeyValues.ObserveKeyValuesBuilder} and immediately
164 | * constructs a {@link ObserveKeyValues} request from it that is passed on to
165 | * {@link #observe(ObserveKeyValues)}.
166 | *
167 | * @param builder
168 | * the builder used for the configuration of the consumer and the observe operation it
169 | * has to carry out
170 | * @param
171 | * refers to the type of keys being read
172 | * @param
173 | * refers to the type of values being read
174 | * @throws AssertionError
175 | * in case the number of consumed values does not meet the expected number of records
176 | * @throws InterruptedException
177 | * in case an interrupt signal has been set
178 | * @return
179 | * unmodifiable {@link java.util.List} of key-value pairs
180 | * @see ObserveKeyValues
181 | * @see ObserveKeyValues.ObserveKeyValuesBuilder
182 | */
183 | default List> observe(ObserveKeyValues.ObserveKeyValuesBuilder builder) throws InterruptedException {
184 | return observe(builder.build());
185 | }
186 | }
187 |
--------------------------------------------------------------------------------
/src/main/java/net/mguenther/kafka/junit/RecordProducer.java:
--------------------------------------------------------------------------------
1 | package net.mguenther.kafka.junit;
2 |
3 | import org.apache.kafka.clients.producer.RecordMetadata;
4 |
5 | import java.util.List;
6 |
7 | /**
8 | * Provides the means to send key-value pairs or un-keyed values to a Kafka topic. The send
9 | * operations a {@code RecordProducer} provides are synchronous in their nature.
10 | */
11 | public interface RecordProducer {
12 |
13 | /**
14 | * Sends (non-keyed) values synchronously to a Kafka topic.
15 | *
16 | * @param sendRequest
17 | * the configuration of the producer and the send operation it has to carry out
18 | * @param
19 | * refers to the type of values being sent
20 | * @throws RuntimeException
21 | * in case there is an error while sending an individual Kafka record to the
22 | * designated Kafka broker
23 | * @throws InterruptedException
24 | * in case an interrupt signal has been set
25 | * @return
26 | * unmodifiable {@link java.util.List} that contains metadata on the written
27 | * Kafka records
28 | * @see SendValues
29 | */
30 | List send(SendValues sendRequest) throws InterruptedException;
31 |
32 | /**
33 | * Sends (non-keyed) values synchronously to a Kafka topic. This is a convenience
34 | * method that accepts a {@link net.mguenther.kafka.junit.SendValues.SendValuesBuilder}
35 | * and immediately constructs a {@link SendValues} request from it that is passed
36 | * on to {@link #send(SendValues)}.
37 | *
38 | * @param builder
39 | * the builder used for the configuration of the producer and the send operation
40 | * it has to carry out
41 | * @param
42 | * refers to the type of values being sent
43 | * @throws RuntimeException
44 | * in case there is an error while sending an individual Kafka record to the
45 | * designated Kafka broker
46 | * @throws InterruptedException
47 | * in case an interrupt signal has been set
48 | * @return
49 | * unmodifiable {@link java.util.List} that contains metadata on the written
50 | * Kafka records
51 | * @see SendValues
52 | * @see SendValues.SendValuesBuilder
53 | */
54 | default List send(SendValues.SendValuesBuilder builder) throws InterruptedException {
55 | return send(builder.build());
56 | }
57 |
58 | /**
59 | * Sends (non-keyed) values synchronously and within a transaction to a Kafka topic.
60 | *
61 | * @param sendRequest
62 | * the configuration of the producer and the send operation it has to carry out
63 | * @param
64 | * refers to the type of values being sent
65 | * @throws RuntimeException
66 | * in case there is an error while sending an individual Kafka record to the
67 | * designated Kafka broker
68 | * @throws InterruptedException
69 | * in case an interrupt signal has been set
70 | * @return
71 | * unmodifiable {@link java.util.List} that contains metadata on the written
72 | * Kafka records
73 | * @see SendValuesTransactional
74 | */
75 | List send(SendValuesTransactional sendRequest) throws InterruptedException;
76 |
77 | /**
78 | * Sends (non-keyed) values synchronously and within a transaction to a Kafka topic.
79 | * This is a convenience method that accepts a {@link net.mguenther.kafka.junit.SendValuesTransactional.SendValuesTransactionalBuilder}
80 | * and immediately constructs a {@link SendKeyValuesTransactional} request from it that
81 | * is passed on to {@link #send(SendValuesTransactional)}.
82 | *
83 | * @param builder
84 | * the builder used for the configuration of the producer and the send operation
85 | * it has to carry out
86 | * @param
87 | * refers to the type of values being sent
88 | * @throws RuntimeException
89 | * in case there is an error while sending an individual Kafka record to the
90 | * designated Kafka broker
91 | * @throws InterruptedException
92 | * in case an interrupt signal has been set
93 | * @return
94 | * unmodifiable {@link java.util.List} that contains metadata on the written
95 | * Kafka records
96 | * @see SendValuesTransactional
97 | * @see SendValuesTransactional.SendValuesTransactionalBuilder
98 | */
99 | default List send(SendValuesTransactional.SendValuesTransactionalBuilder builder) throws InterruptedException {
100 | return send(builder.build());
101 | }
102 |
103 | /**
104 | * Sends key-value pairs synchronously to a Kafka topic.
105 | *
106 | * @param sendRequest
107 | * the configuration of the producer and the send operation it has to carry out
108 | * @param
109 | * refers to the type of keys being sent
110 | * @param
111 | * refers to the type of values being sent
112 | * @throws RuntimeException
113 | * in case there is an error while sending an individual Kafka record to the
114 | * designated Kafka broker
115 | * @throws InterruptedException
116 | * in case an interrupt signal has been set
117 | * @return
118 | * unmodifiable {@link java.util.List} that contains metadata on the written
119 | * Kafka records
120 | * @see SendKeyValues
121 | */
122 | List send(SendKeyValues sendRequest) throws InterruptedException;
123 |
124 | /**
125 | * Sends key-value pairs synchronously to a Kafka topic. This is a convenience method
126 | * that accepts a {@link net.mguenther.kafka.junit.SendKeyValues.SendKeyValuesBuilder}
127 | * and immediately constructs a {@link SendKeyValues} and is passed on to
128 | * {@link #send(SendKeyValues)}.
129 | *
130 | * @param builder
131 | * the builder used for the configuration of the producer and the send operation
132 | * it has to carry out
133 | * @param
134 | * refers to the type of keys being sent
135 | * @param
136 | * refers to the type of values being sent
137 | * @throws RuntimeException
138 | * in case there is an error while sending an individual Kafka record to the
139 | * designated Kafka broker
140 | * @throws InterruptedException
141 | * in case an interrupt signal has been set
142 | * @return
143 | * unmodifiable {@link java.util.List} that contains metadata on the written
144 | * Kafka records
145 | * @see SendKeyValues
146 | * @see SendKeyValues.SendKeyValuesBuilder
147 | */
148 | default List send(SendKeyValues.SendKeyValuesBuilder builder) throws InterruptedException {
149 | return send(builder.build());
150 | }
151 |
152 | /**
153 | * Sends key-value pairs synchronously and within a transaction to a Kafka topic.
154 | *
155 | * @param sendRequest
156 | * the configuration of the producer and the send operation it has to carry out
157 | * @param
158 | * refers to the type of keys being sent
159 | * @param
160 | * refers to the type of values being sent
161 | * @throws RuntimeException
162 | * in case there is an error while sending an individual Kafka record to the
163 | * designated Kafka broker
164 | * @throws InterruptedException
165 | * in case an interrupt signal has been set
166 | * @return
167 | * unmodifiable {@link java.util.List} that contains metadata on the written
168 | * Kafka records
169 | * @see SendKeyValuesTransactional
170 | */
171 | List send(SendKeyValuesTransactional sendRequest) throws InterruptedException;
172 |
173 | /**
174 | * Sends key-value pairs synchronously and within a transaction to a Kafka topic.
175 | * This is a convenience method that accepts a
176 | * {@link net.mguenther.kafka.junit.SendKeyValuesTransactional.SendKeyValuesTransactionalBuilder}
177 | * and immediately constructs a {@link SendKeyValuesTransactional} request from it that is
178 | * passed on to {@link #send(SendKeyValuesTransactional)}.
179 | *
180 | * @param builder
181 | * the builder used for the configuration of the producer and the send operation
182 | * it has to carry out
183 | * @param
184 | * refers to the type of keys being sent
185 | * @param
186 | * refers to the type of values being sent
187 | * @throws RuntimeException
188 | * in case there is an error while sending an individual Kafka record to the
189 | * designated Kafka broker
190 | * @throws InterruptedException
191 | * in case an interrupt signal has been set
192 | * @return
193 | * unmodifiable {@link java.util.List} that contains metadata on the written
194 | * Kafka records
195 | * @see SendKeyValuesTransactional
196 | * @see SendKeyValuesTransactional.SendKeyValuesTransactionalBuilder
197 | */
198 | default List send(SendKeyValuesTransactional.SendKeyValuesTransactionalBuilder builder) throws InterruptedException {
199 | return send(builder.build());
200 | }
201 | }
202 |
--------------------------------------------------------------------------------
/src/main/java/net/mguenther/kafka/junit/SendKeyValues.java:
--------------------------------------------------------------------------------
1 | package net.mguenther.kafka.junit;
2 |
3 | import lombok.Getter;
4 | import lombok.RequiredArgsConstructor;
5 | import lombok.ToString;
6 | import org.apache.kafka.clients.producer.ProducerConfig;
7 | import org.apache.kafka.common.serialization.StringSerializer;
8 |
9 | import java.util.ArrayList;
10 | import java.util.Collection;
11 | import java.util.Properties;
12 |
13 | @Getter
14 | @ToString
15 | @RequiredArgsConstructor
16 | public class SendKeyValues {
17 |
18 | public static class SendKeyValuesBuilder {
19 |
20 | private final String topic;
21 | private final Collection> records = new ArrayList<>();
22 | private final Properties producerProps = new Properties();
23 |
24 | SendKeyValuesBuilder(final String topic, final Collection> records) {
25 | this.topic = topic;
26 | this.records.addAll(records);
27 | }
28 |
29 | public SendKeyValuesBuilder with(final String propertyName, final T value) {
30 | producerProps.put(propertyName, value);
31 | return this;
32 | }
33 |
34 | public SendKeyValuesBuilder withAll(final Properties producerProps) {
35 | this.producerProps.putAll(producerProps);
36 | return this;
37 | }
38 |
39 | private void ifNonExisting(final String propertyName, final T value) {
40 | if (producerProps.get(propertyName) != null) return;
41 | producerProps.put(propertyName, value);
42 | }
43 |
44 | public SendKeyValues useDefaults() {
45 | producerProps.clear();
46 | return build();
47 | }
48 |
49 | public SendKeyValues build() {
50 | ifNonExisting(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
51 | ifNonExisting(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
52 | ifNonExisting(ProducerConfig.ACKS_CONFIG, "all");
53 | ifNonExisting(ProducerConfig.RETRIES_CONFIG, "1");
54 | return new SendKeyValues<>(topic, records, producerProps);
55 | }
56 | }
57 |
58 | private final String topic;
59 | private final Collection> records;
60 | private final Properties producerProps;
61 |
62 | public static SendKeyValuesBuilder