├── .gitignore ├── .travis.yml ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md ├── made-in-darmstadt.jpg ├── pom.xml └── src ├── main ├── asciidoc │ ├── chapters │ │ ├── colophon.adoc │ │ ├── consuming-records.adoc │ │ ├── embedded-kafka-cluster.adoc │ │ ├── external-kafka-cluster.adoc │ │ ├── introduction.adoc │ │ ├── managing-topics.adoc │ │ └── producing-records.adoc │ ├── css │ │ ├── article-theme.yml │ │ ├── asciidoctor.css │ │ └── coderay-asciidoctor.css │ └── user-guide.adoc └── java │ └── net │ └── mguenther │ └── kafka │ └── junit │ ├── EmbeddedConnect.java │ ├── EmbeddedConnectConfig.java │ ├── EmbeddedKafka.java │ ├── EmbeddedKafkaCluster.java │ ├── EmbeddedKafkaClusterConfig.java │ ├── EmbeddedKafkaConfig.java │ ├── EmbeddedLifecycle.java │ ├── EmbeddedZooKeeper.java │ ├── EmbeddedZooKeeperConfig.java │ ├── ExternalKafkaCluster.java │ ├── KeyValue.java │ ├── KeyValueMetadata.java │ ├── LeaderAndIsr.java │ ├── ObserveKeyValues.java │ ├── Props.java │ ├── ReadKeyValues.java │ ├── RecordConsumer.java │ ├── RecordProducer.java │ ├── SendKeyValues.java │ ├── SendKeyValuesTransactional.java │ ├── SendValues.java │ ├── SendValuesTransactional.java │ ├── TopicConfig.java │ ├── TopicManager.java │ ├── Wait.java │ └── provider │ ├── DefaultRecordConsumer.java │ ├── DefaultRecordProducer.java │ └── DefaultTopicManager.java └── test ├── java └── net │ └── mguenther │ └── kafka │ └── junit │ ├── ConnectorTest.java │ ├── EmbeddedConnectConfigTest.java │ ├── EmbeddedKafkaConfigTest.java │ ├── EmbeddedZooKeeperConfigTest.java │ ├── ExternalKafkaClusterTest.java │ ├── KeyValueTest.java │ ├── MultipleBrokersTest.java │ ├── ObserveKeyValuesTest.java │ ├── PropsTest.java │ ├── ReadKeyValuesTest.java │ ├── RecordConsumerTest.java │ ├── RecordProducerTest.java │ ├── SendKeyValuesTest.java │ ├── SendKeyValuesTransactionalTest.java │ ├── SendValuesTest.java │ ├── SendValuesTransactionalTest.java │ ├── SingleBrokerTest.java │ ├── TopicConfigTest.java │ ├── TopicManagerTest.java │ └── connector │ ├── InstrumentingConfig.java │ ├── InstrumentingConfigBuilder.java │ ├── InstrumentingSourceConnector.java │ └── InstrumentingSourceTask.java └── resources └── log4j.properties /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled class file 2 | *.class 3 | 4 | # Log file 5 | *.log 6 | 7 | # BlueJ files 8 | *.ctxt 9 | 10 | # Mobile Tools for Java (J2ME) 11 | .mtj.tmp/ 12 | 13 | # Package Files # 14 | *.jar 15 | *.war 16 | *.nar 17 | *.ear 18 | *.zip 19 | *.tar.gz 20 | *.rar 21 | 22 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 23 | hs_err_pid* 24 | 25 | **/target 26 | **/*iml 27 | .idea 28 | 29 | .classpath 30 | .factorypath 31 | .project 32 | .settings -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | 3 | language: java 4 | jdk: 5 | - openjdk8 6 | 7 | services: 8 | - docker 9 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | markus.guenther@gmail.com. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kafka for JUnit 2 | 3 | [![Maven Central](https://maven-badges.herokuapp.com/maven-central/net.mguenther.kafka/kafka-junit/badge.svg)](https://maven-badges.herokuapp.com/maven-central/net.mguenther.kafka/kafka-junit) 4 | 5 | > [!IMPORTANT] 6 | > Kafka for JUnit will not be updated regularly any longer. Unfortunately, I do not have the time to allocate my attention to this project in a sustainable fashion. Updates and bugfixes **may** still happen (support for new versions, ...), but if they do, it will be solely based on my personal requirements. Thank you for your understanding. 7 | 8 | Kafka for JUnit enables developers to start and stop a complete Kafka cluster comprised of Kafka brokers and distributed Kafka Connect workers from within a JUnit test. It also provides a rich set of convenient accessors to interact with such an embedded Kafka cluster in a lean and non-obtrusive way. 9 | 10 | Kafka for JUnit can be used to both whitebox-test individual Kafka-based components of your application or to blackbox-test applications that offer an incoming and/or outgoing Kafka-based interface. 11 | 12 | ## Using Kafka for JUnit in your tests 13 | 14 | Kafka for JUnit provides the necessary infrastructure to exercise your Kafka-based components against an embeddable Kafka cluster. However, Kafka for JUnit got you covered as well if you are simply interested in using the convenient accessors against Kafka clusters that are already present in your infrastructure. Checkout sections *Working with an embedded Kafka cluster* and *Working with an external Kafka cluster* in the [user's guide](https://mguenther.github.io/kafka-junit) for more information. 15 | 16 | ```java 17 | import org.junit.jupiter.api.AfterEach; 18 | import org.junit.jupiter.api.BeforeEach; 19 | import org.junit.jupiter.api.Test; 20 | 21 | import static net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith; 22 | import static net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.defaultClusterConfig; 23 | 24 | class KafkaTest { 25 | 26 | private EmbeddedKafkaCluster kafka; 27 | 28 | @BeforeEach 29 | void setupKafka() { 30 | kafka = provisionWith(defaultClusterConfig()); 31 | kafka.start(); 32 | } 33 | 34 | @AfterEach 35 | void tearDownKafka() { 36 | kafka.stop(); 37 | } 38 | 39 | @Test 40 | void shouldWaitForRecordsToBePublished() throws Exception { 41 | kafka.send(to("test-topic", "a", "b", "c")); 42 | kafka.observe(on("test-topic", 3)); 43 | } 44 | } 45 | ``` 46 | 47 | This starts an embedded Kafka cluster and submits three records to the topic named `test-topic`. The call to `kafka.observe(on("test-topic", 3))` watches that same topic for a configurable amount of time and checks if it observes the previously submitted records. If it doesn't, Kafka for JUnit raises an `AssertionError` which would fail the test. Surely, [Kafka for JUnit provides lots of more ways to interact with a Kafka cluster]((https://mguenther.github.io/kafka-junit)). 48 | 49 | Since `EmbeddedKafkaCluster` implements the `AutoCloseable` interface, you can achieve the same behavior using a `try-with-resources`-construct. 50 | 51 | ```java 52 | import org.junit.jupiter.api.Test; 53 | 54 | import static net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith; 55 | import static net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.defaultClusterConfig; 56 | 57 | class KafkaTest { 58 | 59 | @Test 60 | void shouldWaitForRecordsToBePublished() throws Exception { 61 | 62 | try (EmbeddedKafkaCluster kafka = provisionWith(defaultClusterConfig())) { 63 | kafka.start(); 64 | kafka.send(to("test-topic", "a", "b", "c")); 65 | kafka.observe(on("test-topic", 3)); 66 | } 67 | } 68 | } 69 | ``` 70 | 71 | ### Supported versions of Apache Kafka 72 | 73 | | Version of Kafka for JUnit | Supports (up to) | 74 | |----------------------------|--------------------| 75 | | 3.6.0 | Apache Kafka 3.6.1 | 76 | | 3.5.1 | Apache Kafka 3.5.1 | 77 | | 3.4.0 | Apache Kafka 3.4.0 | 78 | | 3.3.0 | Apache Kafka 3.3.1 | 79 | | 3.2.2 | Apache Kafka 3.2.3 | 80 | | 3.1.1 | Apache Kafka 3.1.0 | 81 | | 3.0.1 | Apache Kafka 3.0.0 | 82 | | 2.8.0 | Apache Kafka 2.8.0 | 83 | | 2.7.0 | Apache Kafka 2.7.0 | 84 | | 2.6.0 | Apache Kafka 2.6.0 | 85 | | 2.5.1 | Apache Kafka 2.5.1 | 86 | | 2.4.0 | Apache Kafka 2.4.0 | 87 | | 2.3.0 | Apache Kafka 2.3.0 | 88 | | 2.2.0 | Apache Kafka 2.2.1 | 89 | | 2.1.1 | Apache Kafka 2.1.1 | 90 | | 2.0.0 | Apache Kafka 2.0.0 | 91 | | 1.0.0 | Apache Kafka 1.1.1 | 92 | 93 | ## Interacting with the Kafka cluster 94 | 95 | See the [comprehensive user's guide](https://mguenther.github.io/kafka-junit) for examples on how to interact with the Kafka cluster from within your JUnit test. 96 | 97 | ## License 98 | 99 | This work is released under the terms of the Apache 2.0 license. 100 | 101 |

102 |

103 |
104 |
mguenther.net
105 |
106 |

107 | -------------------------------------------------------------------------------- /made-in-darmstadt.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mguenther/kafka-junit/015fa438199750cf3a50209744ed0b24a70eb5a6/made-in-darmstadt.jpg -------------------------------------------------------------------------------- /src/main/asciidoc/chapters/colophon.adoc: -------------------------------------------------------------------------------- 1 | [[section:colophon]] 2 | 3 | == License 4 | 5 | This work is released under the terms of the Apache 2.0 license. 6 | -------------------------------------------------------------------------------- /src/main/asciidoc/chapters/embedded-kafka-cluster.adoc: -------------------------------------------------------------------------------- 1 | [[section:embedded-kafka-cluster]] 2 | 3 | == Working with an embedded Kafka cluster 4 | 5 | Kafka for JUnit is able to spin up a fully-fledged embedded Kafka cluster that is accessible via class `EmbeddedKafkaCluster`. `EmbeddedKafkaCluster` implements the interfaces `RecordProducer`, `RecordConsumer` and `TopicManager` and thus provides convenient accessors to interact with the cluster. 6 | 7 | Using `EmbeddedKafkaCluster` in a JUnit test is quite simple. The necessary code to set it up is minimal if you are comfortable with the default configuration. 8 | 9 | [source,java] 10 | ---- 11 | import org.junit.jupiter.api.AfterEach; 12 | import org.junit.jupiter.api.BeforeEach; 13 | 14 | import static net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith; 15 | import static net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.defaultClusterConfig; 16 | 17 | class KafkaTest { 18 | 19 | private EmbeddedKafkaCluster kafka; 20 | 21 | @BeforeEach 22 | void setupKafka() { 23 | kafka = provisionWith(defaultClusterConfig()); 24 | kafka.start(); 25 | } 26 | 27 | @AfterEach 28 | void tearDownKafka() { 29 | kafka.stop(); 30 | } 31 | } 32 | ---- 33 | 34 | Kafka for JUnit uses the Builder pattern extensively to provide a fluent API when provisioning an embedded Kafka cluster. Let's take a closer look at method `EmbeddedKafkaCluster.provisionWith`. This method consumes a configuration of type `EmbeddedKafkaClusterConfig`. `EmbeddedKafkaClusterConfig` uses defaults for the Kafka broker and ZooKeeper. By default, Kafka Connect will not be provisioned at all. The builder of `EmbeddedKafkaClusterConfig` provides a `provisionWith` method as well and is overloaded to accept configurations of type `EmbeddedZooKeeperConfig`, `EmbeddedKafkaConfig` and `EmbeddedConnectConfig`. The following listing demonstrates how to adjust the configuration of the embedded Kafka broker wrt. the default number of partitions for newly created topics. 35 | 36 | [source,java] 37 | ---- 38 | EmbeddedKafkaCluster kafka = provisionWith(newClusterConfig() 39 | .configure(kafkaConnect() 40 | .with(KafkaConfig$.MODULE$.NumPartitionsProp(), "5"))); 41 | ---- 42 | 43 | The builders for those configurations provide a uniform interface for overriding defaults, comprising two methods `with(String propertyName, T value)` and `withAll(java.util.Properties overrides)`. To override a default value, you simply provide the name of the configuration parameter as defined by the resp. Kafka component along with the new value. 44 | 45 | Using the default setting will provide you with a single embedded Kafka broker. This ought to be sufficient for most cases. However, there are scenarios which require testing against multiple brokers that form a cluster. Forming an embedded cluster with multiple brokers is done by adjusting the default provisioning of your test case. See the listing underneath for an example. 46 | 47 | [source,java] 48 | ---- 49 | 50 | EmbeddedKafkaCluster kafka = provisionWith(newClusterConfig() 51 | .configure(brokers() 52 | .withNumberOfBrokers(3) 53 | .with(KafkaConfig$.MODULE$.NumPartitionsProp(), "5") 54 | .with(KafkaConfig$.MODULE$.DefaultReplicationFactorProp(), "3") 55 | .with(KafkaConfig$.MODULE$.MinInSyncReplicasProp(), "2") 56 | .with(KafkaConfig$.MODULE$.OffsetsTopicReplicationFactorProp(), "3") 57 | .with(KafkaConfig$.MODULE$.TransactionsTopicReplicationFactorProp(), "3") 58 | .with(KafkaConfig$.MODULE$.TransactionsTopicMinISRProp(), "2"))); 59 | ---- 60 | 61 | Using this configuration, we end up with a total of three brokers that form an embedded Kafka cluster, while the defaults for topic partitions and replicas have been adjusted to be consistent with the size of the cluster. 62 | 63 | Of course, you can also use the `try-with-resources`-pattern to fire up an embedded cluster. Have a look at the following test setup. 64 | 65 | [source,java] 66 | ---- 67 | import org.junit.jupiter.api.Test; 68 | 69 | import static net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith; 70 | import static net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.defaultClusterConfig; 71 | 72 | class KafkaTest { 73 | 74 | @Test 75 | void shouldWaitForRecordsToBePublished() throws Exception { 76 | 77 | try (EmbeddedKafkaCluster kafka = provisionWith(defaultClusterConfig())) { 78 | kafka.start(); 79 | kafka.send(to("test-topic", "a", "b", "c")); 80 | kafka.observe(on("test-topic", 3)); 81 | } 82 | } 83 | } 84 | ---- 85 | 86 | See sections on <>, <> and <> for further reference on how to interact with the cluster. 87 | 88 | === Failure Modes 89 | 90 | `EmbeddedKafkaCluster` provides the means to disconnect - and re-connect of course - specific embedded Kafka brokers. All brokers in the embedded cluster get broker ID assigned during cluster formation. This broker ID is an `Integer`-based value and starts at 1. The broker ID increases by 1 for every subsequent embedded Kafka broker that is started during cluster formation. 91 | 92 | Clusters stay fixed wrt. the maximum number of embedded brokers. But individual brokers can, given their broker ID, be disconnected from the rest of the cluster to test for failure scenarios. Such failure scenarios include: 93 | 94 | * How does may Kafka-based component behave in the presence of broker outages? 95 | * What happens if the In-Sync-Replica Set (ISR) of a topic that my application consumes from shrinks below its minimum size? 96 | * Is my application able to progress after brokers re-connect and form a working cluster? 97 | 98 | ==== Disconnect and reconnect a single broker 99 | 100 | The following listing shows how to disconnect and re-connect a certain broker, while fetching the ISR of a dedicated topic in between these operations to determine whether the cluster behaves correctly. 101 | 102 | NOTE: If you do use this feature of Kafka for JUnit, then please give the embedded cluster some time to handle broker churn. Identifying that a leader for a topic-partition is not available and conducting the leader election takes some time. In the example underneath we introduce a delay of five seconds in between operations that affect cluster membership. 103 | 104 | [source,java] 105 | ---- 106 | kafka.createTopic(TopicConfig.withName("test-topic") 107 | .withNumberOfPartitions(5) 108 | .withNumberOfReplicas(3)); 109 | 110 | delay(5); 111 | 112 | Set leaders = kafka.fetchLeaderAndIsr("test-topic") 113 | .values() 114 | .stream() 115 | .map(LeaderAndIsr::getLeader) 116 | .collect(Collectors.toSet()); 117 | 118 | assertThat(leaders.contains(1)).isTrue(); 119 | assertThat(leaders.contains(2)).isTrue(); 120 | assertThat(leaders.contains(3)).isTrue(); 121 | 122 | kafka.disconnect(1); 123 | 124 | delay(5); 125 | 126 | Set leadersAfterDisconnect = kafka.fetchLeaderAndIsr("test-topic") 127 | .values() 128 | .stream() 129 | .map(LeaderAndIsr::getLeader) 130 | .collect(Collectors.toSet()); 131 | 132 | assertThat(leadersAfterDisconnect.contains(1)).isFalse(); 133 | assertThat(leadersAfterDisconnect.contains(2)).isTrue(); 134 | assertThat(leadersAfterDisconnect.contains(3)).isTrue(); 135 | 136 | kafka.connect(1); 137 | 138 | delay(5); 139 | 140 | Set leadersAfterReconnect = kafka.fetchLeaderAndIsr("test-topic") 141 | .values() 142 | .stream() 143 | .map(LeaderAndIsr::getLeader) 144 | .collect(Collectors.toSet()); 145 | 146 | assertThat(leadersAfterReconnect.contains(1)).isTrue(); 147 | assertThat(leadersAfterReconnect.contains(2)).isTrue(); 148 | assertThat(leadersAfterReconnect.contains(3)).isTrue(); 149 | ---- 150 | 151 | ==== Disconnect until In-Sync-Replica Set falls below minimum size 152 | 153 | The following listing shows how to disconnect the In-Sync-Replica Set (ISR) for a given topic until its ISR falls below its minimum size. 154 | 155 | NOTE: If you do use this feature of Kafka for JUnit, then please give the embedded cluster some time to handle broker churn. Identifying that a leader for a topic-partition is not available and conducting the leader election takes some time. In the example underneath we introduce a delay of five seconds in between operations that affect cluster membership. 156 | 157 | [source,java] 158 | ---- 159 | // Create a topic and configure the number of replicas as well as the size of the ISR 160 | 161 | kafka.createTopic(TopicConfig.withName("test-topic") 162 | .withNumberOfPartitions(5) 163 | .withNumberOfReplicas(3) 164 | .with("min.insync.replicas", "2")); 165 | 166 | // Wait a bit to give the cluster a chance to properly assign topic-partitions to leaders 167 | 168 | delay(5); 169 | 170 | // Disconnect until the remaining number of brokers fall below the minimum ISR size 171 | 172 | kafka.disconnectUntilIsrFallsBelowMinimumSize("test-topic"); 173 | 174 | delay(5); 175 | 176 | // Submitting records to this topic will yield a NotEnoughReplicasException 177 | 178 | kafka.send(SendValues.to("test-topic", "A")); 179 | ---- 180 | 181 | The last line of the listing shows the effect of an ISR that can no longer operate reliably. Your Kafka-based component or application would run concurrently to this test so that you are able to observe if it behaves correctly (e.g. by checking that the component progresses normally if the ISR is restored). 182 | 183 | ==== Restoring the In-Sync-Replica Set 184 | 185 | Restoring the In-Sync-Replica Set is easy, as method `disconnectUntilIsrFallsBelowMinimumSize` returns a list of broker IDs for all brokers that have been deactivated during the shrinking. The following listing shows how to restore the ISR. 186 | 187 | [source,java] 188 | ---- 189 | kafka.createTopic(TopicConfig.withName("test-topic") 190 | .withNumberOfPartitions(5) 191 | .withNumberOfReplicas(3) 192 | .with("min.insync.replicas", "2")); 193 | 194 | delay(5); 195 | 196 | Set disconnectedBrokers = kafka.disconnectUntilIsrFallsBelowMinimumSize("test-topic"); 197 | 198 | delay(5); 199 | 200 | // Do some testing, trigger some operations, observe the behavior of your application 201 | 202 | kafka.connect(disconnectedBrokers); 203 | 204 | // Give the cluster some time to assign leaders and reestablish the ISR 205 | 206 | delay(5); 207 | 208 | // Do some more testing ... 209 | ---- -------------------------------------------------------------------------------- /src/main/asciidoc/chapters/external-kafka-cluster.adoc: -------------------------------------------------------------------------------- 1 | [[section:external-kafka-cluster]] 2 | 3 | == Working with an external Kafka cluster 4 | 5 | Kafka for JUnit can be used to work with an external Kafka cluster. This is useful if you want to execute your tests against a staging/testing environment or if you already use other testing libraries (e.g. Testcontainers) that spin up a Kafka cluster on your local machine, but want to use the convenient accessors provided by Kafka for JUnit. 6 | 7 | Class `ExternalKafkaCluster` integrates an external cluster. Just like `EmbeddableKafkaCluster`, an `ExternalKafkaCluster` also implements the interfaces `RecordProducer`, `RecordConsumer` and `TopicManager` and thus provides convenient accessors to interact with the cluster. 8 | 9 | Using `ExternalKafkaCluster` in a JUnit test is easy. The listing below shows the necessary code to use `ExternalKafkaCluster` in combination with Testcontainers. 10 | 11 | [source,java] 12 | ---- 13 | @Testcontainers 14 | class ExternalKafkaClusterTest { 15 | 16 | // This is not part of Kafka for JUnit, but a sub-module provided 17 | // by Testcontainers (org.testcontainers:kafka) 18 | @Container 19 | private KafkaContainer kafkaContainer = new KafkaContainer(); 20 | 21 | @Test 22 | @DisplayName("should be able to observe records written to an external Kafka cluster") 23 | void externalKafkaClusterShouldWorkWithExternalResources() throws Exception { 24 | 25 | ExternalKafkaCluster kafka = ExternalKafkaCluster.at(kafkaContainer.getBootstrapServers()); 26 | 27 | // use the accessors that cluster provides to interact with the Kafka container 28 | 29 | [...] 30 | } 31 | } 32 | ---- 33 | 34 | See sections on <>, <> and <> for further reference on how to interact with the cluster. -------------------------------------------------------------------------------- /src/main/asciidoc/chapters/introduction.adoc: -------------------------------------------------------------------------------- 1 | [[section:introduction]] 2 | 3 | == Introduction 4 | 5 | Kafka for JUnit enables developers to start and stop a complete Kafka cluster comprised of Kafka brokers and distributed Kafka Connect workers from within a JUnit test. It also provides a rich set of convenient accessors to interact with such an embedded Kafka cluster in a lean and non-obtrusive way. 6 | 7 | Kafka for JUnit can be used to both whitebox-test individual Kafka-based components of your application or to blackbox-test applications that offer an incoming and/or outgoing Kafka-based interface. 8 | 9 | == Using Kafka for JUnit in your tests 10 | 11 | Kafka for JUnit provides the necessary infrastructure to exercise your Kafka-based components against an embeddable Kafka cluster (cf. <>). However, Kafka for JUnit got you covered as well if you are simply interested in using the convenient accessors against Kafka clusters that are already present in your infrastructure (cf. section <>). 12 | 13 | [source,java] 14 | ---- 15 | import org.junit.jupiter.api.AfterEach; 16 | import org.junit.jupiter.api.BeforeEach; 17 | import org.junit.jupiter.api.Test; 18 | 19 | import static net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith; 20 | import static net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.defaultClusterConfig; 21 | 22 | class KafkaTest { 23 | 24 | private EmbeddedKafkaCluster kafka; 25 | 26 | @BeforeEach 27 | void setupKafka() { 28 | kafka = provisionWith(defaultClusterConfig()); 29 | kafka.start(); 30 | } 31 | 32 | @AfterEach 33 | void tearDownKafka() { 34 | kafka.stop(); 35 | } 36 | 37 | @Test 38 | void shouldWaitForRecordsToBePublished() throws Exception { 39 | kafka.send(to("test-topic", "a", "b", "c")); 40 | kafka.observe(on("test-topic", 3)); 41 | } 42 | } 43 | ---- 44 | 45 | This starts an embedded Kafka cluster and submits three records to the topic named `test-topic`. The call to `kafka.observe(on("test-topic", 3))` watches that same topic for a configurable amount of time and checks if it observes the previously submitted records. If it doesn't, Kafka for JUnit raises an `AssertionError` which would fail the test. Surely, Kafka for JUnit provides lots of more ways to interact with a Kafka cluster. 46 | 47 | Since `EmbeddedKafkaCluster` implements the `AutoCloseable` interface, you can achieve the same behavior using a `try-with-resources`-construct. 48 | 49 | [source,java] 50 | ---- 51 | import org.junit.jupiter.api.Test; 52 | 53 | import static net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith; 54 | import static net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.defaultClusterConfig; 55 | 56 | class KafkaTest { 57 | 58 | @Test 59 | void shouldWaitForRecordsToBePublished() throws Exception { 60 | 61 | try (EmbeddedKafkaCluster kafka = provisionWith(defaultClusterConfig())) { 62 | kafka.start(); 63 | kafka.send(to("test-topic", "a", "b", "c")); 64 | kafka.observe(on("test-topic", 3)); 65 | } 66 | } 67 | } 68 | ---- 69 | 70 | === Supported versions of Apache Kafka 71 | 72 | |=== 73 | | Version of Kafka for JUnit | Supports 74 | 75 | | 3.6.0 76 | | Apache Kafka 3.6.1 77 | 78 | | 3.5.1 79 | | Apache Kafka 3.5.1 80 | 81 | | 3.4.0 82 | | Apache Kafka 3.4.0 83 | 84 | | 3.3.0 85 | | Apache Kafka 3.3.1 86 | 87 | | 3.2.2 88 | | Apache Kafka 3.2.3 89 | 90 | | 3.1.1 91 | | Apache Kafka 3.1.0 92 | 93 | | 3.0.1 94 | | Apache Kafka 3.3.0 95 | 96 | | 2.8.0 97 | | Apache Kafka 2.8.0 98 | 99 | | 2.7.0 100 | | Apache Kafka 2.7.0 101 | 102 | | 2.6.0 103 | | Apache Kafka 2.6.0 104 | 105 | | 2.5.1 106 | | Apache Kafka 2.5.1 107 | 108 | | 2.4.0 109 | | Apache Kafka 2.4.0 110 | 111 | | 2.3.0 112 | | Apache Kafka 2.3.0 113 | 114 | | 2.2.0 115 | | Apache Kafka 2.2.1 116 | 117 | | 2.1.1 118 | | Apache Kafka 2.1.1 119 | 120 | | 2.0.0 121 | | Apache Kafka 2.0.0 122 | 123 | | 1.0.0 124 | | Apache Kafka 1.1.1 125 | 126 | |=== 127 | -------------------------------------------------------------------------------- /src/main/asciidoc/chapters/managing-topics.adoc: -------------------------------------------------------------------------------- 1 | [[section:managing-topics]] 2 | 3 | == Managing topics 4 | 5 | Class `EmbeddedKafkaClusterRule` as well as `EmbeddedKafkaCluster` expose convenience methods for managing Kafka topics. Have a look at the `TopicManager` interface (Java omitted for brevity). 6 | 7 | [source,java] 8 | ---- 9 | public interface TopicManager { 10 | void createTopic(TopicConfig config); 11 | void deleteTopic(String topic); 12 | boolean exists(String topic); 13 | Map fetchLeaderAndIsr(String topic); 14 | Properties fetchTopicConfig(String topic); 15 | } 16 | ---- 17 | 18 | Implementations of the `TopicManager` interface currently use the `AdminClient` implementation of the Kafka Client library for topic management. 19 | 20 | All operations are executed *synchronously*. 21 | 22 | === Creating a topic 23 | 24 | [source,java] 25 | ---- 26 | kafka.createTopic(TopicConfig.withName("test-topic")); 27 | ---- 28 | 29 | NOTE: By default, Kafka for JUnit enables the automatic creation of topics at the broker with defaults that should be sensible for local testing. However, if you find yourself in the situation to create a topic with a specific replication factor or number of partitions that deviate from their default setting, you should create that topic with the respective settings before writing the first Kafka record to it. 30 | 31 | === Deleting a topic 32 | 33 | [source,java] 34 | ---- 35 | kafka.deleteTopic("test-topic"); 36 | ---- 37 | 38 | NOTE: Deleting a topic will only set a deletion marker for that topic. The topic may not be deleted immediately after `deleteTopic` completes. 39 | 40 | === Determine whether a topic exists 41 | 42 | [source,java] 43 | ---- 44 | kafka.exists("test-topic"); 45 | ---- 46 | 47 | NOTE: Returns `true` even if the topic is marked for deletion. 48 | 49 | === Retrieving the leader and the In-Sync-Replica Set (ISR) 50 | 51 | In case you have multiple brokers running and want to query their assignments and roles for a specific topic, you can use `TopicManager#fetchLeaderAndIsr` to retrieve that kind of information. The method returns an unmodifiable `java.util.Map` of `LeaderAndIsr` instances by their designated partition. The listing underneath shows how to retrieve this information for the topic named `test-topic`. 52 | 53 | [source,java] 54 | ---- 55 | Map leaderAndIsr = kafka.fetchLeaderAndIsr("test-topic"); 56 | ---- 57 | 58 | The type `LeaderAndIsr` is not to be confused with the same type in package `kafka.api`. The `LeaderAndIsr` implementation Kafka for JUnit is a simple transfer object that only contains the ID of the leader node and the IDs of all nodes that comprise the ISR. 59 | 60 | === Retrieving the topic configuration remotely 61 | 62 | Looking up the topic configuration by accessing the cluster is easily done using the `TopicManager`. 63 | 64 | [source,java] 65 | ---- 66 | Properties topicConfig = kafka.fetchTopicConfig("test-topic"); 67 | ---- -------------------------------------------------------------------------------- /src/main/asciidoc/chapters/producing-records.adoc: -------------------------------------------------------------------------------- 1 | [[section:producing-records]] 2 | 3 | == Producing records 4 | 5 | Class `EmbeddedKafkaClusterRule` as well as `EmbeddedKafkaCluster` expose convenience methods for producing new Kafka records. Have a look at the `RecordProducer` interface (Javadoc omitted for brevity). 6 | 7 | [source,java] 8 | ---- 9 | public interface RecordProducer { 10 | 11 | List send(SendValues sendRequest) throws InterruptedException; 12 | List send(SendValuesTransactional sendRequest) throws InterruptedException; 13 | List send(SendKeyValues sendRequest) throws InterruptedException; 14 | List send(SendKeyValuesTransactional sendRequest) throws InterruptedException; 15 | } 16 | ---- 17 | 18 | Calling `send` using an instance of `SendValues` does just that: It produces non-keyed Kafka records that only feature a value. The key of a record that has been produced this way is simply `null`. If you wish to associate a key, you can do so by passing an instance of `SendKeyValues` to the `send` method. Both `SendValues` and `SendKeyValues` use the link:https://en.wikipedia.org/wiki/Builder_pattern[Builder pattern] so that creating the resp. send parameterization is easy and does not pollute your test code with any kind of boilerplate. 19 | 20 | Implementations of the `RecordProducer` interface use the high-level producer API that comes with Apache Kafka. Hence, the underlying producer is a `KafkaProducer`. This `KafkaProducer` is fully parameterizable via the builders of both `SendValues` and `SendKeyValues`. 21 | 22 | All `send` operations are executed *synchronously*. 23 | 24 | With these abstractions in place, sending content to your embedded Kafka cluster is easy. Have a look at the following examples . One thing you should notice is that you do not have to specify `bootstrap.servers`. Kafka for JUnit adjusts a given client configuration so that you can start off with meaningful defaults that work out-of-the-box. You'll only have to provide configuration overrides if it is absolutely necessary for your test. 25 | 26 | === Sending non-keyed values using defaults 27 | 28 | [source,java] 29 | ---- 30 | kafka.send(SendValues.to("test-topic", "a", "b", "c")); 31 | ---- 32 | 33 | === Sending non-keyed values using overrides 34 | 35 | [source,java] 36 | ---- 37 | kafka.send(SendValues.to("test-topic", "a", "b", "c") 38 | .with(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true") 39 | .with(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1")); 40 | ---- 41 | 42 | === Sending non-keyed values transactionally 43 | 44 | [source,java] 45 | ---- 46 | kafka 47 | .send(SendValuesTransactional 48 | .inTransaction("test-topic", Arrays.asList("a", "b", "c"))); 49 | ---- 50 | 51 | NOTE: The API of Kafka for JUnit has been designed with great care and readability in mind. Using `static` imports for factory methods shows that we can interact with the embedded Kafka cluster in a lean and readable way. 52 | 53 | [source,java] 54 | ---- 55 | kafka.send(inTransaction("test-topic", Arrays.asList("a", "b", "c"))); 56 | ---- 57 | 58 | === Sending keyed records using defaults 59 | 60 | [source,java] 61 | ---- 62 | List> records = new ArrayList<>(); 63 | 64 | records.add(new KeyValue<>("aggregate", "a")); 65 | records.add(new KeyValue<>("aggregate", "b")); 66 | records.add(new KeyValue<>("aggregate", "c")); 67 | 68 | kafka.send(SendKeyValues.to("test-topic", records)); 69 | ---- 70 | 71 | === Sending keyed records using overrides 72 | 73 | [source,java] 74 | ---- 75 | List> records = new ArrayList<>(); 76 | 77 | records.add(new KeyValue<>("aggregate", "a")); 78 | records.add(new KeyValue<>("aggregate", "b")); 79 | records.add(new KeyValue<>("aggregate", "c")); 80 | 81 | kafka.send(SendKeyValues.to("test-topic", records) 82 | .with(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true") 83 | .with(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1")); 84 | ---- 85 | 86 | === Sending keyed records transactionally 87 | 88 | [source,java] 89 | ---- 90 | List> records = new ArrayList<>(); 91 | 92 | records.add(new KeyValue<>("aggregate", "a")); 93 | records.add(new KeyValue<>("aggregate", "b")); 94 | records.add(new KeyValue<>("aggregate", "c")); 95 | 96 | kafka.send(inTransaction("test-topic", records)); 97 | ---- 98 | 99 | === Sending records or values transactionally to multiple topics 100 | 101 | [source,java] 102 | ---- 103 | kafka.send(SendValuesTransactional 104 | .inTransaction("test-topic-1", Arrays.asList("a", "b")) 105 | .inTransaction("test-topic-2", Arrays.asList("c", "d"))); 106 | ---- 107 | 108 | === Failing a transaction on purpose 109 | 110 | [source,java] 111 | ---- 112 | kafka.send(SendValuesTransactional 113 | .inTransaction("test-topic", Arrays.asList("a", "b")) 114 | .failTransaction()); 115 | ---- 116 | 117 | Defining a `SendValuesTransactional` request with `failTransaction` will write records to the Kafka log, but abort the transaction they belong to. This allows you to test if your application-specific Kafka consumers adhere to the transactional guarantees they claim to satisfy, since only a correct implementation of a consumer with `isolation.level` set to `read_committed` must see - and process - those records. 118 | 119 | NOTE: This works for `SendKeyValuesTransactional` as well. 120 | 121 | === Attaching record headers 122 | 123 | [source,java] 124 | ---- 125 | KeyValue record = new KeyValue<>("a", "b"); 126 | record.addHeader("client", "kafka-junit-test".getBytes("utf-8")); 127 | 128 | kafka.send(SendKeyValues 129 | .to("test-topic", Collections.singletonList(record))); 130 | ---- 131 | 132 | NOTE: You can also pre-construct an instance of `Headers` and pass it along via the constructor of a `KeyValue`. 133 | -------------------------------------------------------------------------------- /src/main/asciidoc/css/article-theme.yml: -------------------------------------------------------------------------------- 1 | font: 2 | catalog: 3 | # Noto Serif supports Latin, Latin-1 Supplement, Latin Extended-A, Greek, Cyrillic, Vietnamese & an assortment of symbols 4 | Noto Serif: 5 | normal: notoserif-regular-subset.ttf 6 | bold: notoserif-bold-subset.ttf 7 | italic: notoserif-italic-subset.ttf 8 | bold_italic: notoserif-bold_italic-subset.ttf 9 | # M+ 1mn supports ASCII and the circled numbers used for conums 10 | M+ 1mn: 11 | normal: mplus1mn-regular-ascii-conums.ttf 12 | bold: mplus1mn-bold-ascii.ttf 13 | italic: mplus1mn-italic-ascii.ttf 14 | bold_italic: mplus1mn-bold_italic-ascii.ttf 15 | # M+ 1p supports Latin, Latin-1 Supplement, Latin Extended, Greek, Cyrillic, Vietnamese, Japanese & an assortment of symbols 16 | # It also provides arrows for ->, <-, => and <= replacements in case these glyphs are missing from font 17 | M+ 1p Fallback: 18 | normal: mplus1p-regular-fallback.ttf 19 | bold: mplus1p-regular-fallback.ttf 20 | italic: mplus1p-regular-fallback.ttf 21 | bold_italic: mplus1p-regular-fallback.ttf 22 | fallbacks: 23 | - M+ 1p Fallback 24 | page: 25 | background_color: ffffff 26 | layout: portrait 27 | margin: [1.3in, 0.72in, 0.8in, 0.72in] 28 | size: A4 29 | base: 30 | align: justify 31 | # color as hex string (leading # is optional) 32 | font_color: 333333 33 | # color as RGB array 34 | #font_color: [51, 51, 51] 35 | # color as CMYK array (approximated) 36 | #font_color: [0, 0, 0, 0.92] 37 | #font_color: [0, 0, 0, 92%] 38 | #Supported Fonts are Times-Roman, Helvetica, Courier and fonts in the catalog (see above) 39 | font_family: Noto Serif 40 | # choose one of these font_size/line_height_length combinations 41 | #font_size: 14 42 | #line_height_length: 20 43 | #font_size: 11.25 44 | #line_height_length: 18 45 | #font_size: 11.2 46 | #line_height_length: 16 47 | font_size: 10 48 | #line_height_length: 15 49 | # correct line height for Noto Serif metrics 50 | line_height_length: 12 51 | #font_size: 11.25 52 | #line_height_length: 18 53 | line_height: $base_line_height_length / $base_font_size 54 | font_size_large: round($base_font_size * 1.25) 55 | font_size_small: round($base_font_size * 0.85) 56 | font_size_min: $base_font_size * 0.75 57 | font_style: normal 58 | border_color: eeeeee 59 | border_radius: 4 60 | border_width: 0.5 61 | # FIXME vertical_rhythm is weird; we should think in terms of ems 62 | #vertical_rhythm: $base_line_height_length * 2 / 3 63 | # correct line height for Noto Serif metrics (comes with built-in line height) 64 | vertical_rhythm: $base_line_height_length 65 | horizontal_rhythm: $base_line_height_length 66 | # QUESTION should vertical_spacing be block_spacing instead? 67 | vertical_spacing: $vertical_rhythm 68 | link: 69 | font_color: 428bca 70 | # literal is currently used for inline monospaced in prose and table cells 71 | literal: 72 | font_color: b12146 73 | font_family: M+ 1mn 74 | heading: 75 | #font_color: 181818 76 | font_color: $base_font_color 77 | #font_family: $base_font_family 78 | font_family: Helvetica 79 | font_style: bold 80 | # h1 is used for part titles 81 | h1_font_size: floor($base_font_size * 2.6) 82 | # h2 is used for chapter titles 83 | h2_font_size: floor($base_font_size * 2.15) 84 | h3_font_size: round($base_font_size * 1.7) 85 | h4_font_size: $base_font_size_large 86 | h5_font_size: $base_font_size 87 | h6_font_size: $base_font_size_small 88 | #line_height: 1.4 89 | # correct line height for Noto Serif metrics (comes with built-in line height) 90 | line_height: 1 91 | margin_top: $vertical_rhythm * 0.4 92 | margin_bottom: $vertical_rhythm * 0.9 93 | title_page: 94 | align: right 95 | #logo: 96 | # top: 10% 97 | # image: image:tc-logo.png[scaledwidth=35%] 98 | title: 99 | top: 55% 100 | font_size: $heading_h1_font_size 101 | # font_color: 999999 102 | font_style: bold 103 | line_height: 1.2 104 | subtitle: 105 | font_size: $heading_h3_font_size 106 | #font_style: bold_italic 107 | line_height: 1 108 | authors: 109 | margin_top: $base_font_size * 1.25 110 | font_size: $base_font_size_large 111 | font_color: 999999 112 | revision: 113 | margin_top: $base_font_size * 1.25 114 | font_color: 999999 115 | block: 116 | margin_top: 0 117 | margin_bottom: $vertical_rhythm 118 | caption: 119 | align: left 120 | font_style: italic 121 | # FIXME perhaps set line_height instead of / in addition to margins? 122 | margin_inside: $vertical_rhythm / 3 123 | #margin_inside: $vertical_rhythm / 4 124 | margin_outside: 0 125 | lead: 126 | font_size: $base_font_size_large 127 | line_height: 1.4 128 | abstract: 129 | font_color: 5c6266 130 | font_size: $lead_font_size 131 | line_height: $lead_line_height 132 | font_style: italic 133 | first_line_font_style: bold 134 | admonition: 135 | border_color: $base_border_color 136 | border_width: $base_border_width 137 | padding: [0, $horizontal_rhythm, 0, $horizontal_rhythm] 138 | # icon: 139 | # tip: 140 | # name: fa-lightbulb-o 141 | # stroke_color: 111111 142 | # size: 24 143 | blockquote: 144 | font_color: $base_font_color 145 | font_size: $base_font_size_large 146 | border_color: $base_border_color 147 | border_width: 5 148 | padding: [$vertical_rhythm / 2, $horizontal_rhythm, $vertical_rhythm / -2, $horizontal_rhythm + $blockquote_border_width / 2] 149 | cite_font_size: $base_font_size_small 150 | cite_font_color: 999999 151 | # code is used for source blocks (perhaps change to source or listing?) 152 | code: 153 | font_color: $base_font_color 154 | font_family: $literal_font_family 155 | font_size: ceil($base_font_size) 156 | padding: $code_font_size 157 | line_height: 1.25 158 | background_color: f5f5f5 159 | border_color: cccccc 160 | border_radius: $base_border_radius 161 | border_width: 0.75 162 | conum: 163 | font_family: M+ 1mn 164 | font_color: $literal_font_color 165 | font_size: $base_font_size 166 | line_height: 4 / 3 167 | example: 168 | border_color: $base_border_color 169 | border_radius: $base_border_radius 170 | border_width: 0.75 171 | background_color: transparent 172 | # FIXME reenable margin bottom once margin collapsing is implemented 173 | padding: [$vertical_rhythm, $horizontal_rhythm, 0, $horizontal_rhythm] 174 | image: 175 | align: left 176 | prose: 177 | margin_top: 0 178 | margin_bottom: $vertical_rhythm 179 | sidebar: 180 | border_color: $page_background_color 181 | border_radius: $base_border_radius 182 | border_width: $base_border_width 183 | background_color: eeeeee 184 | # FIXME reenable margin bottom once margin collapsing is implemented 185 | padding: [$vertical_rhythm, $vertical_rhythm * 1.25, 0, $vertical_rhythm * 1.25] 186 | title: 187 | align: center 188 | font_color: $heading_font_color 189 | font_family: $heading_font_family 190 | font_size: $heading_h4_font_size 191 | font_style: $heading_font_style 192 | thematic_break: 193 | border_color: $base_border_color 194 | border_style: solid 195 | border_width: $base_border_width 196 | margin_top: $vertical_rhythm * 0.5 197 | margin_bottom: $vertical_rhythm * 1.5 198 | description_list: 199 | term_font_style: italic 200 | term_spacing: $vertical_rhythm / 4 201 | description_indent: $horizontal_rhythm * 1.25 202 | outline_list: 203 | indent: $horizontal_rhythm * 1.5 204 | # NOTE item_spacing applies to list items that do not have complex content 205 | item_spacing: $vertical_rhythm / 2 206 | #marker_font_color: 404040 207 | table: 208 | background_color: $page_background_color 209 | head_background_color: f9f9f9 210 | #head_font_color: $base_font_color 211 | head_font_style: bold 212 | #even_row_background_color: f9f9f9 213 | #odd_row_background_color: 214 | foot_background_color: f0f0f0 215 | border_color: dddddd 216 | border_width: $base_border_width 217 | header_cell_background_color: f9f9f9 218 | #header_cell_font_style: bold 219 | # HACK accounting for line-height 220 | cell_padding: [3, 3, 6, 3] 221 | toc: 222 | dot_leader_color: dddddd 223 | #dot_leader_content: '. ' 224 | indent: $horizontal_rhythm 225 | line_height: 1.4 226 | 227 | 228 | header: 229 | font_size: $base_font_size 230 | font_color: $base_font_color 231 | # NOTE if background_color is set, background and border will span width of page 232 | border_color: dddddd 233 | border_width: 0.25 234 | height: 1.1in 235 | #$page_margin_top + $base_line_height *3 #$base_line_height_length * 12.5 236 | line_height: 1 237 | padding: [$base_line_height_length / 2, 1, $base_line_height_length / 2, 1] 238 | vertical_align: bottom 239 | # height: 3in 240 | image_vertical_align: bottom 241 | #12 (1) 242 | recto_content: 243 | #left: image:tc-logo.png[width=120px] 244 | #right: image:header.png[scalewidth=100%] 245 | center: '{chapter-title}' 246 | #left: image:tc-logo.png[width=180px] 247 | verso_content: 248 | right: $header_recto_content_right 249 | center: $header_recto_content_center 250 | #left: $header_recto_content_left 251 | 252 | 253 | footer: 254 | font_size: $base_font_size_small 255 | font_color: $base_font_color 256 | # NOTE if background_color is set, background and border will span width of page 257 | border_color: dddddd 258 | border_width: 0.25 259 | height: $base_line_height_length * 2.5 * 2 260 | line_height: 1 261 | padding: [$base_line_height_length / 2, 1, 0, 1] 262 | vertical_align: top 263 | #image_vertical_align: or 264 | # additional attributes for content: 265 | # * {page-count} 266 | # * {page-number} 267 | # * {document-title} 268 | # * {document-subtitle} 269 | # * {chapter-title} 270 | # * {section-title} 271 | # * {section-or-chapter-title} 272 | recto_content: 273 | #$header_recto_content_right 274 | left: | 275 | {document-title} + 276 | Version {revnumber} 277 | center: '<{status}>' 278 | right: 'Seite {page-number} von {page-count}' 279 | #center: $footer_verso_content_center 280 | # right: $footer_verso_content_right 281 | #right: '{section-or-chapter-title} | {page-number}' 282 | #right: '{document-title} | {page-number}' 283 | # right: '{page-number}' 284 | #center: '{page-number}' 285 | verso_content: 286 | left: $footer_recto_content_left 287 | center: $footer_recto_content_center 288 | right: $footer_recto_content_right 289 | # center: | 290 | # x + 291 | # vertraulich 292 | 293 | #center: '{page-number}' 294 | -------------------------------------------------------------------------------- /src/main/asciidoc/css/coderay-asciidoctor.css: -------------------------------------------------------------------------------- 1 | /* Stylesheet for CodeRay to match GitHub theme | MIT License | http://foundation.zurb.com */ 2 | /*pre.CodeRay {background-color:#f7f7f8;}*/ 3 | .CodeRay .line-numbers{border-right:1px solid #d8d8d8;padding:0 0.5em 0 .25em} 4 | .CodeRay span.line-numbers{display:inline-block;margin-right:.5em;color:rgba(0,0,0,.3)} 5 | .CodeRay .line-numbers strong{color:rgba(0,0,0,.4)} 6 | table.CodeRay{border-collapse:separate;border-spacing:0;margin-bottom:0;border:0;background:none} 7 | table.CodeRay td{vertical-align: top;line-height:1.45} 8 | table.CodeRay td.line-numbers{text-align:right} 9 | table.CodeRay td.line-numbers>pre{padding:0;color:rgba(0,0,0,.3)} 10 | table.CodeRay td.code{padding:0 0 0 .5em} 11 | table.CodeRay td.code>pre{padding:0} 12 | .CodeRay .debug{color:#fff !important;background:#000080 !important} 13 | .CodeRay .annotation{color:#007} 14 | .CodeRay .attribute-name{color:#000080} 15 | .CodeRay .attribute-value{color:#700} 16 | .CodeRay .binary{color:#509} 17 | .CodeRay .comment{color:#998;font-style:italic} 18 | .CodeRay .char{color:#04d} 19 | .CodeRay .char .content{color:#04d} 20 | .CodeRay .char .delimiter{color:#039} 21 | .CodeRay .class{color:#458;font-weight:bold} 22 | .CodeRay .complex{color:#a08} 23 | .CodeRay .constant,.CodeRay .predefined-constant{color:#008080} 24 | .CodeRay .color{color:#099} 25 | .CodeRay .class-variable{color:#369} 26 | .CodeRay .decorator{color:#b0b} 27 | .CodeRay .definition{color:#099} 28 | .CodeRay .delimiter{color:#000} 29 | .CodeRay .doc{color:#970} 30 | .CodeRay .doctype{color:#34b} 31 | .CodeRay .doc-string{color:#d42} 32 | .CodeRay .escape{color:#666} 33 | .CodeRay .entity{color:#800} 34 | .CodeRay .error{color:#808} 35 | .CodeRay .exception{color:inherit} 36 | .CodeRay .filename{color:#099} 37 | .CodeRay .function{color:#900;font-weight:bold} 38 | .CodeRay .global-variable{color:#008080} 39 | .CodeRay .hex{color:#058} 40 | .CodeRay .integer,.CodeRay .float{color:#099} 41 | .CodeRay .include{color:#555} 42 | .CodeRay .inline{color:#000} 43 | .CodeRay .inline .inline{background:#ccc} 44 | .CodeRay .inline .inline .inline{background:#bbb} 45 | .CodeRay .inline .inline-delimiter{color:#d14} 46 | .CodeRay .inline-delimiter{color:#d14} 47 | .CodeRay .important{color:#555;font-weight:bold} 48 | .CodeRay .interpreted{color:#b2b} 49 | .CodeRay .instance-variable{color:#008080} 50 | .CodeRay .label{color:#970} 51 | .CodeRay .local-variable{color:#963} 52 | .CodeRay .octal{color:#40e} 53 | .CodeRay .predefined{color:#369} 54 | .CodeRay .preprocessor{color:#579} 55 | .CodeRay .pseudo-class{color:#555} 56 | .CodeRay .directive{font-weight:bold} 57 | .CodeRay .type{font-weight:bold} 58 | .CodeRay .predefined-type{color:inherit} 59 | .CodeRay .reserved,.CodeRay .keyword {color:#000;font-weight:bold} 60 | .CodeRay .key{color:#808} 61 | .CodeRay .key .delimiter{color:#606} 62 | .CodeRay .key .char{color:#80f} 63 | .CodeRay .value{color:#088} 64 | .CodeRay .regexp .delimiter{color:#808} 65 | .CodeRay .regexp .content{color:#808} 66 | .CodeRay .regexp .modifier{color:#808} 67 | .CodeRay .regexp .char{color:#d14} 68 | .CodeRay .regexp .function{color:#404;font-weight:bold} 69 | .CodeRay .string{color:#d20} 70 | .CodeRay .string .string .string{background:#ffd0d0} 71 | .CodeRay .string .content{color:#d14} 72 | .CodeRay .string .char{color:#d14} 73 | .CodeRay .string .delimiter{color:#d14} 74 | .CodeRay .shell{color:#d14} 75 | .CodeRay .shell .delimiter{color:#d14} 76 | .CodeRay .symbol{color:#990073} 77 | .CodeRay .symbol .content{color:#a60} 78 | .CodeRay .symbol .delimiter{color:#630} 79 | .CodeRay .tag{color:#008080} 80 | .CodeRay .tag-special{color:#d70} 81 | .CodeRay .variable{color:#036} 82 | .CodeRay .insert{background:#afa} 83 | .CodeRay .delete{background:#faa} 84 | .CodeRay .change{color:#aaf;background:#007} 85 | .CodeRay .head{color:#f8f;background:#505} 86 | .CodeRay .insert .insert{color:#080} 87 | .CodeRay .delete .delete{color:#800} 88 | .CodeRay .change .change{color:#66f} 89 | .CodeRay .head .head{color:#f4f} 90 | -------------------------------------------------------------------------------- /src/main/asciidoc/user-guide.adoc: -------------------------------------------------------------------------------- 1 | :last-update-label: 2 | :chapter-label: 3 | :doctype: book 4 | :linkcss: 5 | :stylesdir: css/ 6 | :source-highlighter: coderay 7 | :numbered: 8 | :imagesdir: chapters 9 | :icons: font 10 | :pdf-stylesdir: css/ 11 | :pdf-style: article 12 | :experimental: 13 | :toc-title: Table of Contents 14 | :figure-caption: Figure 15 | :table-caption: Table 16 | :status: Final 17 | :datum: 2021-12-21 18 | :author: Markus Günther 19 | 20 | = User Guide to Kafka for JUnit 21 | Markus Günther 22 | 3.0.0, 2021-12-21 23 | 24 | :toc: 25 | 26 | include::chapters/introduction.adoc[] 27 | include::chapters/embedded-kafka-cluster.adoc[] 28 | include::chapters/external-kafka-cluster.adoc[] 29 | include::chapters/producing-records.adoc[] 30 | include::chapters/consuming-records.adoc[] 31 | include::chapters/managing-topics.adoc[] 32 | include::chapters/colophon.adoc[] 33 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/EmbeddedConnect.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import lombok.extern.slf4j.Slf4j; 4 | import org.apache.kafka.common.utils.Time; 5 | import org.apache.kafka.common.utils.Utils; 6 | import org.apache.kafka.connect.connector.policy.AllConnectorClientConfigOverridePolicy; 7 | import org.apache.kafka.connect.runtime.ConnectorConfig; 8 | import org.apache.kafka.connect.runtime.Herder; 9 | import org.apache.kafka.connect.runtime.Worker; 10 | import org.apache.kafka.connect.runtime.WorkerConfig; 11 | import org.apache.kafka.connect.runtime.WorkerConfigTransformer; 12 | import org.apache.kafka.connect.runtime.distributed.DistributedConfig; 13 | import org.apache.kafka.connect.runtime.distributed.DistributedHerder; 14 | import org.apache.kafka.connect.runtime.isolation.Plugins; 15 | import org.apache.kafka.connect.runtime.rest.RestClient; 16 | import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo; 17 | import org.apache.kafka.connect.storage.ConfigBackingStore; 18 | import org.apache.kafka.connect.storage.KafkaConfigBackingStore; 19 | import org.apache.kafka.connect.storage.KafkaOffsetBackingStore; 20 | import org.apache.kafka.connect.storage.KafkaStatusBackingStore; 21 | import org.apache.kafka.connect.storage.StatusBackingStore; 22 | import org.apache.kafka.connect.storage.StringConverter; 23 | import org.apache.kafka.connect.util.FutureCallback; 24 | import org.apache.kafka.connect.util.TopicAdmin; 25 | 26 | import java.util.Collections; 27 | import java.util.HashMap; 28 | import java.util.List; 29 | import java.util.Map; 30 | import java.util.Properties; 31 | import java.util.UUID; 32 | import java.util.concurrent.ExecutionException; 33 | import java.util.concurrent.TimeUnit; 34 | import java.util.concurrent.TimeoutException; 35 | import java.util.concurrent.atomic.AtomicBoolean; 36 | import java.util.function.Supplier; 37 | 38 | @Slf4j 39 | public class EmbeddedConnect implements EmbeddedLifecycle { 40 | 41 | private static final int REQUEST_TIMEOUT_MS = 120_000; 42 | 43 | private final AtomicBoolean shutdown = new AtomicBoolean(false); 44 | 45 | private final List connectorConfigs; 46 | 47 | private final DistributedConfig config; 48 | 49 | private final KafkaOffsetBackingStore offsetBackingStore; 50 | 51 | private final Worker worker; 52 | 53 | private final StatusBackingStore statusBackingStore; 54 | 55 | private final ConfigBackingStore configBackingStore; 56 | 57 | private final DistributedHerder herder; 58 | 59 | public EmbeddedConnect(final EmbeddedConnectConfig connectConfig, final String brokerList, final String clusterId) { 60 | // once created, this supplier will always answer with a constant client ID 61 | final Supplier clientIdGenerator = constantClientIdBase(); 62 | final AllConnectorClientConfigOverridePolicy policy = new AllConnectorClientConfigOverridePolicy(); 63 | final Properties effectiveWorkerConfig = connectConfig.getConnectProperties(); 64 | effectiveWorkerConfig.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); 65 | this.connectorConfigs = connectConfig.getConnectors(); 66 | this.config = new DistributedConfig(Utils.propsToStringMap(effectiveWorkerConfig)); 67 | this.offsetBackingStore = new KafkaOffsetBackingStore(getTopicAdminSupplier(brokerList), clientIdGenerator, new StringConverter()); 68 | this.worker = new Worker(connectConfig.getWorkerId(), Time.SYSTEM, new Plugins(new HashMap<>()), config, offsetBackingStore, policy); 69 | this.statusBackingStore = new KafkaStatusBackingStore(Time.SYSTEM, worker.getInternalValueConverter(), getTopicAdminSupplier(brokerList), clientIdGenerator.get()); 70 | this.configBackingStore = new KafkaConfigBackingStore(worker.getInternalValueConverter(), config, new WorkerConfigTransformer(worker, Collections.emptyMap()), 71 | getTopicAdminSupplier(brokerList), clientIdGenerator.get()); 72 | this.herder = new DistributedHerder( 73 | config, 74 | Time.SYSTEM, 75 | worker, 76 | clusterId, 77 | statusBackingStore, 78 | configBackingStore, 79 | "", 80 | new RestClient(new DistributedConfig(toMap(effectiveWorkerConfig))), 81 | policy, 82 | Collections.emptyList()); 83 | } 84 | 85 | @Override 86 | public void start() { 87 | 88 | offsetBackingStore.configure(config); 89 | statusBackingStore.configure(config); 90 | 91 | try { 92 | log.info("Embedded Kafka Connect is starting."); 93 | 94 | worker.start(); 95 | herder.start(); 96 | 97 | log.info("Embedded Kafka Connect started."); 98 | log.info("Found {} connectors to deploy.", connectorConfigs.size()); 99 | 100 | connectorConfigs.forEach(this::deployConnector); 101 | } catch (Exception e) { 102 | throw new RuntimeException("Unable to start Embedded Kafka Connect.", e); 103 | } 104 | } 105 | 106 | private void deployConnector(final Properties connectorConfig) { 107 | final FutureCallback> callback = new FutureCallback<>(); 108 | final String connectorName = connectorConfig.getProperty(ConnectorConfig.NAME_CONFIG); 109 | log.info("Deploying connector {}.", connectorName); 110 | herder.putConnectorConfig(connectorName, Utils.propsToStringMap(connectorConfig), true, callback); 111 | try { 112 | callback.get(REQUEST_TIMEOUT_MS, TimeUnit.MILLISECONDS); 113 | } catch (InterruptedException | ExecutionException | TimeoutException e) { 114 | log.error("Failed to deploy connector {}.", connectorName, e); 115 | } 116 | } 117 | 118 | @Override 119 | public void stop() { 120 | try { 121 | final boolean wasShuttingDown = shutdown.getAndSet(true); 122 | if (!wasShuttingDown) { 123 | log.info("Embedded Kafka Connect is stopping."); 124 | herder.stop(); 125 | worker.stop(); 126 | log.info("Embedded Kafka Connect stopped."); 127 | } 128 | } catch (Exception e) { 129 | throw new RuntimeException("Unable to stop Embedded Kafka Connect.", e); 130 | } 131 | } 132 | 133 | private static Supplier getTopicAdminSupplier(final String brokerList) { 134 | final Map config = new HashMap<>(); 135 | config.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); 136 | return () -> new TopicAdmin(config); 137 | } 138 | 139 | private static Supplier constantClientIdBase() { 140 | final String randomSuffix = UUID.randomUUID().toString().substring(0, 7); 141 | return () -> String.format("kafka-junit-%s", randomSuffix); 142 | } 143 | 144 | private static Map toMap(final Properties props) { 145 | final Map propertyMap = new HashMap<>(); 146 | for (String propertyName : props.stringPropertyNames()) { 147 | String propertyValue = props.getProperty(propertyName); 148 | propertyMap.put(propertyName, propertyValue); 149 | } 150 | return Collections.unmodifiableMap(propertyMap); 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/EmbeddedConnectConfig.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import lombok.Getter; 4 | import lombok.RequiredArgsConstructor; 5 | import lombok.ToString; 6 | import org.apache.kafka.connect.runtime.WorkerConfig; 7 | import org.apache.kafka.connect.runtime.distributed.DistributedConfig; 8 | 9 | import java.util.ArrayList; 10 | import java.util.Arrays; 11 | import java.util.List; 12 | import java.util.Properties; 13 | import java.util.UUID; 14 | 15 | @Getter 16 | @ToString 17 | @RequiredArgsConstructor 18 | public class EmbeddedConnectConfig { 19 | 20 | @RequiredArgsConstructor 21 | public static class EmbeddedConnectConfigBuilder { 22 | 23 | private final String workerId; 24 | private final Properties properties = new Properties(); 25 | private final List connectorProps = new ArrayList<>(); 26 | 27 | EmbeddedConnectConfigBuilder() { 28 | this(UUID.randomUUID().toString().substring(0, 7)); 29 | } 30 | 31 | public EmbeddedConnectConfigBuilder with(final String propertyName, final T value) { 32 | properties.put(propertyName, value); 33 | return this; 34 | } 35 | 36 | public EmbeddedConnectConfigBuilder withAll(final Properties overrides) { 37 | properties.putAll(overrides); 38 | return this; 39 | } 40 | 41 | private void ifNonExisting(final String propertyName, final T value) { 42 | if (properties.get(propertyName) != null) return; 43 | properties.put(propertyName, value); 44 | } 45 | 46 | public EmbeddedConnectConfigBuilder deployConnector(final Properties connectorProps) { 47 | this.connectorProps.add(connectorProps); 48 | return this; 49 | } 50 | 51 | public EmbeddedConnectConfigBuilder deployConnectors(final Properties... connectorProps) { 52 | this.connectorProps.addAll(Arrays.asList(connectorProps)); 53 | return this; 54 | } 55 | 56 | public EmbeddedConnectConfig build() { 57 | ifNonExisting(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.storage.StringConverter"); 58 | ifNonExisting(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.storage.StringConverter"); 59 | ifNonExisting(WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG, "All"); 60 | ifNonExisting("internal.key.converter.schemas.enable", "false"); 61 | ifNonExisting("internal.value.converter.schemas.enable", "false"); 62 | ifNonExisting(DistributedConfig.CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG, "1"); 63 | ifNonExisting(DistributedConfig.CONFIG_TOPIC_CONFIG, "embedded-connect-config"); 64 | ifNonExisting(DistributedConfig.OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG, "1"); 65 | ifNonExisting(DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG, "embedded-connect-offsets"); 66 | ifNonExisting(DistributedConfig.STATUS_STORAGE_REPLICATION_FACTOR_CONFIG, "1"); 67 | ifNonExisting(DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG, "embedded-connect-status"); 68 | ifNonExisting(DistributedConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString().substring(0, 7)); 69 | return new EmbeddedConnectConfig(workerId, properties, connectorProps); 70 | } 71 | } 72 | 73 | private final String workerId; 74 | 75 | private final Properties connectProperties; 76 | 77 | private final List connectors; 78 | 79 | /** 80 | * @return instance of {@link EmbeddedConnectConfigBuilder} 81 | */ 82 | public static EmbeddedConnectConfigBuilder kafkaConnect() { 83 | return new EmbeddedConnectConfigBuilder(); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/EmbeddedKafka.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import kafka.server.KafkaConfig; 4 | import kafka.server.KafkaConfig$; 5 | import kafka.server.KafkaServer; 6 | import kafka.utils.TestUtils; 7 | import lombok.extern.slf4j.Slf4j; 8 | import org.apache.kafka.common.network.ListenerName; 9 | import org.apache.kafka.common.security.auth.SecurityProtocol; 10 | import org.apache.kafka.common.utils.Time; 11 | 12 | import java.io.IOException; 13 | import java.nio.file.FileVisitResult; 14 | import java.nio.file.Files; 15 | import java.nio.file.Path; 16 | import java.nio.file.SimpleFileVisitor; 17 | import java.nio.file.attribute.BasicFileAttributes; 18 | import java.util.Properties; 19 | 20 | import static org.apache.kafka.common.network.ListenerName.forSecurityProtocol; 21 | 22 | @Slf4j 23 | public class EmbeddedKafka implements EmbeddedLifecycle { 24 | 25 | private static final int UNDEFINED_BOUND_PORT = -1; 26 | 27 | private final int brokerId; 28 | 29 | private final Properties brokerConfig; 30 | 31 | private final Path logDirectory; 32 | 33 | private KafkaServer kafka; 34 | 35 | private int boundPort = UNDEFINED_BOUND_PORT; 36 | 37 | public EmbeddedKafka(final int brokerId, final String listener, final EmbeddedKafkaConfig config, final String zooKeeperConnectUrl, final boolean usesConnect) throws IOException { 38 | this.brokerId = brokerId; 39 | this.brokerConfig = new Properties(); 40 | this.brokerConfig.putAll(config.getBrokerProperties()); 41 | this.brokerConfig.put(KafkaConfig$.MODULE$.ListenersProp(), listener); 42 | this.brokerConfig.put(KafkaConfig$.MODULE$.ZkConnectProp(), zooKeeperConnectUrl); 43 | this.logDirectory = Files.createTempDirectory("kafka-junit"); 44 | this.brokerConfig.put(KafkaConfig$.MODULE$.BrokerIdProp(), brokerId); 45 | this.brokerConfig.put(KafkaConfig$.MODULE$.LogDirProp(), logDirectory.toFile().getAbsolutePath()); 46 | if (usesConnect) { 47 | log.info("Enforcing 'log.cleanup.policy=compact', due to the presence of a Kafka Connect deployment."); 48 | this.brokerConfig.put(KafkaConfig$.MODULE$.LogCleanupPolicyProp(), "compact"); 49 | } 50 | } 51 | 52 | @Override 53 | public void start() { 54 | activate(); 55 | } 56 | 57 | public void activate() { 58 | 59 | if (kafka != null) { 60 | log.info("The embedded Kafka broker with ID {} is already running.", brokerId); 61 | return; 62 | } 63 | 64 | try { 65 | log.info("Embedded Kafka broker with ID {} is starting.", brokerId); 66 | 67 | if (boundPort != UNDEFINED_BOUND_PORT) { 68 | this.brokerConfig.put(KafkaConfig$.MODULE$.ListenersProp(), String.format("PLAINTEXT://localhost:%s", boundPort)); 69 | } 70 | 71 | final KafkaConfig config = new KafkaConfig(brokerConfig, true); 72 | kafka = TestUtils.createServer(config, Time.SYSTEM); 73 | boundPort = kafka.boundPort(config.interBrokerListenerName()); 74 | 75 | log.info("The embedded Kafka broker with ID {} has been started. Its logs can be found at {}.", brokerId, logDirectory); 76 | } catch (Exception e) { 77 | throw new RuntimeException(String.format("Unable to start the embedded Kafka broker with ID %s.", brokerId), e); 78 | } 79 | } 80 | 81 | @Override 82 | public void stop() { 83 | 84 | if (kafka == null) { 85 | log.info("The embedded Kafka broker with ID {} is not running or was already shut down.", brokerId); 86 | return; 87 | } 88 | 89 | deactivate(); 90 | 91 | log.info("Removing working directory at {}. This directory contains Kafka logs for Kafka broker with ID {} as well.", logDirectory, brokerId); 92 | try { 93 | recursivelyDelete(logDirectory); 94 | } catch (IOException e) { 95 | log.warn("Unable to remove working directory at {}.", logDirectory); 96 | } 97 | log.info("The embedded Kafka broker with ID {} has been stopped.", brokerId); 98 | } 99 | 100 | private void recursivelyDelete(final Path path) throws IOException { 101 | Files.walkFileTree(path, new SimpleFileVisitor() { 102 | @Override 103 | public FileVisitResult visitFile(Path file, 104 | @SuppressWarnings("unused") BasicFileAttributes attrs) { 105 | 106 | file.toFile().delete(); 107 | return FileVisitResult.CONTINUE; 108 | } 109 | 110 | @Override 111 | public FileVisitResult preVisitDirectory(Path dir, 112 | @SuppressWarnings("unused") BasicFileAttributes attrs) { 113 | return FileVisitResult.CONTINUE; 114 | } 115 | 116 | @Override 117 | public FileVisitResult postVisitDirectory(Path dir, IOException exc) { 118 | dir.toFile().delete(); 119 | return FileVisitResult.CONTINUE; 120 | } 121 | }); 122 | } 123 | 124 | public void deactivate() { 125 | if (kafka == null) return; 126 | boundPort = kafka.boundPort(ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)); 127 | log.info("The embedded Kafka broker with ID {} is stopping.", brokerId); 128 | kafka.shutdown(); 129 | kafka.awaitShutdown(); 130 | kafka = null; 131 | } 132 | 133 | public String getBrokerList() { 134 | return String.format("localhost:%s", kafka.boundPort(forSecurityProtocol(SecurityProtocol.PLAINTEXT))); 135 | } 136 | 137 | public String getClusterId() { 138 | return kafka.clusterId(); 139 | } 140 | 141 | public Integer getBrokerId() { 142 | return brokerId; 143 | } 144 | 145 | public boolean isActive() { 146 | return kafka != null; 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/EmbeddedKafkaClusterConfig.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import lombok.Getter; 4 | import lombok.RequiredArgsConstructor; 5 | import lombok.ToString; 6 | 7 | @Getter 8 | @ToString 9 | @RequiredArgsConstructor 10 | public class EmbeddedKafkaClusterConfig { 11 | 12 | public static class EmbeddedKafkaClusterConfigBuilder { 13 | 14 | private EmbeddedZooKeeperConfig zooKeeperConfig = EmbeddedZooKeeperConfig.defaultZooKeeper(); 15 | 16 | private EmbeddedKafkaConfig kafkaConfig = EmbeddedKafkaConfig.defaultBrokers(); 17 | 18 | private EmbeddedConnectConfig connectConfig = null; 19 | 20 | /** 21 | * Uses the given {@link EmbeddedZooKeeperConfig} to configure the ZooKeeper instance that 22 | * runs within the embedded Kafka cluster. 23 | * 24 | * @param zooKeeperConfig 25 | * represents the configuration for the embedded ZooKeeper instance 26 | * @return 27 | * this builder 28 | */ 29 | public EmbeddedKafkaClusterConfigBuilder configure(final EmbeddedZooKeeperConfig zooKeeperConfig) { 30 | this.zooKeeperConfig = zooKeeperConfig; 31 | return this; 32 | } 33 | 34 | /** 35 | * Uses the given {@link EmbeddedZooKeeperConfig.EmbeddedZooKeeperConfigBuilder} to configure the 36 | * ZooKeeper instance that runs within the embedded Kafka cluster. 37 | * 38 | * @param builder 39 | * represents the configuration for the embedded ZooKeeper instance 40 | * @return 41 | * this builder 42 | */ 43 | public EmbeddedKafkaClusterConfigBuilder configure(final EmbeddedZooKeeperConfig.EmbeddedZooKeeperConfigBuilder builder) { 44 | return configure(builder.build()); 45 | } 46 | 47 | /** 48 | * Uses the given {@link EmbeddedKafkaConfig} to configure brokers that run within the 49 | * embedded Kafka cluster. This configuration is applied to all brokers in a multi-broker 50 | * environment. 51 | * 52 | * @param kafkaConfig 53 | * represents the configuration for embedded Kafka brokers 54 | * @return 55 | * this builder 56 | */ 57 | public EmbeddedKafkaClusterConfigBuilder configure(final EmbeddedKafkaConfig kafkaConfig) { 58 | this.kafkaConfig = kafkaConfig; 59 | return this; 60 | } 61 | 62 | /** 63 | * Uses the given {@link net.mguenther.kafka.junit.EmbeddedKafkaConfig.EmbeddedKafkaConfigBuilder} to 64 | * configure brokers that run within the embedded Kafka cluster. This configuration is applied to 65 | * all brokers in a multi-broker environment. 66 | * 67 | * @param builder 68 | * represents the configuration for embedded Kafka brokers 69 | * @return 70 | * this builder 71 | */ 72 | public EmbeddedKafkaClusterConfigBuilder configure(final EmbeddedKafkaConfig.EmbeddedKafkaConfigBuilder builder) { 73 | return configure(builder.build()); 74 | } 75 | 76 | /** 77 | * Uses the given {@link EmbeddedConnectConfig} to configure Kafka Connect for the embedded 78 | * Kafka cluster. 79 | * 80 | * @param connectConfig 81 | * represents the configuration for Kafka Connect 82 | * @return 83 | * this builder 84 | */ 85 | public EmbeddedKafkaClusterConfigBuilder configure(final EmbeddedConnectConfig connectConfig) { 86 | this.connectConfig = connectConfig; 87 | return this; 88 | } 89 | 90 | /** 91 | * Uses the given {@link EmbeddedConnectConfig.EmbeddedConnectConfigBuilder} to configure Kafka Connect 92 | * for the embedded Kafka cluster. 93 | * 94 | * @param builder 95 | * represents the configuration for Kafka Connect 96 | * @return 97 | * this builder 98 | */ 99 | public EmbeddedKafkaClusterConfigBuilder configure(final EmbeddedConnectConfig.EmbeddedConnectConfigBuilder builder) { 100 | return configure(builder.build()); 101 | } 102 | 103 | public EmbeddedKafkaClusterConfig build() { 104 | return new EmbeddedKafkaClusterConfig(zooKeeperConfig, kafkaConfig, connectConfig); 105 | } 106 | } 107 | 108 | private final EmbeddedZooKeeperConfig zooKeeperConfig; 109 | 110 | private final EmbeddedKafkaConfig kafkaConfig; 111 | 112 | private final EmbeddedConnectConfig connectConfig; 113 | 114 | public boolean usesConnect() { 115 | return connectConfig != null; 116 | } 117 | 118 | /** 119 | * @return instance of {@link EmbeddedKafkaClusterConfigBuilder} used to configure 120 | * the embedded Kafka cluster 121 | */ 122 | public static EmbeddedKafkaClusterConfigBuilder newClusterConfig() { 123 | return new EmbeddedKafkaClusterConfigBuilder(); 124 | } 125 | 126 | /** 127 | * @return instance of {@link EmbeddedKafkaClusterConfig} that contains the default 128 | * configuration for the embedded Kafka cluster 129 | */ 130 | public static EmbeddedKafkaClusterConfig defaultClusterConfig() { 131 | return newClusterConfig().build(); 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/EmbeddedKafkaConfig.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import kafka.server.KafkaConfig$; 4 | import lombok.Getter; 5 | import lombok.RequiredArgsConstructor; 6 | import lombok.ToString; 7 | import lombok.extern.slf4j.Slf4j; 8 | 9 | import java.util.ArrayList; 10 | import java.util.List; 11 | import java.util.Properties; 12 | import java.util.stream.Collectors; 13 | 14 | @Slf4j 15 | @ToString 16 | @RequiredArgsConstructor 17 | public class EmbeddedKafkaConfig { 18 | 19 | public static final int DEFAULT_NUMBER_OF_BROKERS = 1; 20 | 21 | public static final String DEFAULT_LISTENER = "PLAINTEXT://localhost:9092"; 22 | 23 | private static final String LISTENER_TEMPLATE = "PLAINTEXT://localhost:%s"; 24 | 25 | public static class EmbeddedKafkaConfigBuilder { 26 | 27 | private final Properties properties = new Properties(); 28 | 29 | private int numberOfBrokers = DEFAULT_NUMBER_OF_BROKERS; 30 | 31 | private EmbeddedKafkaConfigBuilder() { 32 | } 33 | 34 | public EmbeddedKafkaConfigBuilder withNumberOfBrokers(final int numberOfBrokers) { 35 | this.numberOfBrokers = numberOfBrokers; 36 | return this; 37 | } 38 | 39 | public EmbeddedKafkaConfigBuilder with(final String propertyName, final T value) { 40 | properties.put(propertyName, value); 41 | return this; 42 | } 43 | 44 | public EmbeddedKafkaConfigBuilder withAll(final Properties overrides) { 45 | properties.putAll(overrides); 46 | return this; 47 | } 48 | 49 | private void ifNonExisting(final String propertyName, final T value) { 50 | if (properties.get(propertyName) != null) return; 51 | properties.put(propertyName, value); 52 | } 53 | 54 | public EmbeddedKafkaConfig build() { 55 | 56 | final List listeners = new ArrayList<>(numberOfBrokers); 57 | 58 | if (numberOfBrokers > 1) { 59 | listeners.addAll(getUniqueEphemeralPorts(numberOfBrokers) 60 | .stream() 61 | .map(port -> String.format(LISTENER_TEMPLATE, port)) 62 | .collect(Collectors.toList())); 63 | } else { 64 | listeners.add(DEFAULT_LISTENER); 65 | } 66 | 67 | ifNonExisting(KafkaConfig$.MODULE$.ZkSessionTimeoutMsProp(), "8000"); 68 | ifNonExisting(KafkaConfig$.MODULE$.ZkConnectionTimeoutMsProp(), "10000"); 69 | ifNonExisting(KafkaConfig$.MODULE$.NumPartitionsProp(), "1"); 70 | ifNonExisting(KafkaConfig$.MODULE$.DefaultReplicationFactorProp(), "1"); 71 | ifNonExisting(KafkaConfig$.MODULE$.MinInSyncReplicasProp(), "1"); 72 | ifNonExisting(KafkaConfig$.MODULE$.AutoCreateTopicsEnableProp(), "true"); 73 | ifNonExisting(KafkaConfig$.MODULE$.MessageMaxBytesProp(), "1000000"); 74 | ifNonExisting(KafkaConfig$.MODULE$.ControlledShutdownEnableProp(), "true"); 75 | ifNonExisting(KafkaConfig$.MODULE$.OffsetsTopicReplicationFactorProp(), "1"); 76 | ifNonExisting(KafkaConfig$.MODULE$.GroupInitialRebalanceDelayMsProp(), 0); 77 | ifNonExisting(KafkaConfig$.MODULE$.TransactionsTopicReplicationFactorProp(), "1"); 78 | ifNonExisting(KafkaConfig$.MODULE$.TransactionsTopicMinISRProp(), "1"); 79 | ifNonExisting(KafkaConfig$.MODULE$.SslClientAuthProp(), "none"); 80 | ifNonExisting(KafkaConfig$.MODULE$.AutoLeaderRebalanceEnableProp(), "true"); 81 | ifNonExisting(KafkaConfig$.MODULE$.ControlledShutdownEnableProp(), "true"); 82 | ifNonExisting(KafkaConfig$.MODULE$.LeaderImbalanceCheckIntervalSecondsProp(), 5); 83 | ifNonExisting(KafkaConfig$.MODULE$.LeaderImbalancePerBrokerPercentageProp(), 1); 84 | ifNonExisting(KafkaConfig$.MODULE$.UncleanLeaderElectionEnableProp(), "false"); 85 | return new EmbeddedKafkaConfig(numberOfBrokers, listeners, properties); 86 | } 87 | 88 | private List getUniqueEphemeralPorts(final int howMany) { 89 | final List ephemeralPorts = new ArrayList<>(howMany); 90 | while (ephemeralPorts.size() < howMany) { 91 | final int port = generateRandomEphemeralPort(); 92 | if (!ephemeralPorts.contains(port)) { 93 | ephemeralPorts.add(port); 94 | } 95 | } 96 | return ephemeralPorts; 97 | } 98 | 99 | private int generateRandomEphemeralPort() { 100 | return Math.min((int) (Math.random() * 65535) + 1024, 65535); 101 | } 102 | } 103 | 104 | @Getter 105 | private final int numberOfBrokers; 106 | 107 | private final List uniqueListeners; 108 | 109 | @Getter 110 | private final Properties brokerProperties; 111 | 112 | public String listenerFor(final int brokerIndex) { 113 | if (brokerProperties.containsKey(KafkaConfig$.MODULE$.ListenersProp())) { 114 | return brokerProperties.getProperty(KafkaConfig$.MODULE$.ListenersProp()); 115 | } else { 116 | return uniqueListeners.get(brokerIndex); 117 | } 118 | } 119 | 120 | /** 121 | * @return instance of {@link EmbeddedKafkaConfigBuilder} 122 | */ 123 | public static EmbeddedKafkaConfigBuilder brokers() { 124 | return new EmbeddedKafkaConfigBuilder(); 125 | } 126 | 127 | /** 128 | * @return instance of {@link EmbeddedKafkaConfig} that contains the default configuration 129 | * for all brokers in an embedded Kafka cluster 130 | */ 131 | public static EmbeddedKafkaConfig defaultBrokers() { 132 | return brokers().build(); 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/EmbeddedLifecycle.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | public interface EmbeddedLifecycle { 4 | 5 | /** 6 | * Starts the embedded component. After this method completes the component *must* fully 7 | * operational. 8 | * 9 | * @throws RuntimeException 10 | * this method can and should throw an {@link RuntimeException} to indicate that the 11 | * component could not be deployed 12 | */ 13 | void start(); 14 | 15 | /** 16 | * Stops the embedded component. After this method completes all acquired resources are 17 | * freed and the component is properly shut down. The component is no longer operational 18 | * after this. 19 | * 20 | * @throws RuntimeException 21 | * this method can and should throw an {@link RuntimeException} to indicate that the 22 | * component could not be shut down 23 | */ 24 | void stop(); 25 | } 26 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/EmbeddedZooKeeper.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import lombok.RequiredArgsConstructor; 4 | import lombok.extern.slf4j.Slf4j; 5 | import org.apache.curator.test.TestingServer; 6 | 7 | @Slf4j 8 | @RequiredArgsConstructor 9 | public class EmbeddedZooKeeper implements EmbeddedLifecycle { 10 | 11 | private final EmbeddedZooKeeperConfig config; 12 | 13 | private TestingServer internalServer; 14 | 15 | @Override 16 | public void start() { 17 | 18 | if (internalServer != null) { 19 | log.info("The embedded ZooKeeper instance is already running."); 20 | return; 21 | } 22 | 23 | try { 24 | log.info("Embedded ZooKeeper is starting."); 25 | internalServer = new TestingServer(config.getPort()); 26 | log.info("Successfully started an embedded ZooKeeper instance at {} which is assigned to the temporary directory {}.", 27 | internalServer.getConnectString(), 28 | internalServer.getTempDirectory()); 29 | } catch (Exception e) { 30 | throw new RuntimeException("Unable to start an embedded ZooKeeper instance.", e); 31 | } 32 | } 33 | 34 | @Override 35 | public void stop() { 36 | 37 | if (internalServer == null) { 38 | log.info("The embedded ZooKeeper is not running or was already shut down."); 39 | return; 40 | } 41 | 42 | try { 43 | log.info("The embedded ZooKeeper instance at {} is stopping.", internalServer.getConnectString()); 44 | internalServer.close(); 45 | log.info("The embedded ZooKeeper instance at {} has been shut down.", internalServer.getConnectString()); 46 | } catch (Exception e) { 47 | throw new RuntimeException("Unable to stop the embedded ZooKeeper instance.", e); 48 | } 49 | } 50 | 51 | public String getConnectString() { 52 | return internalServer.getConnectString(); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/EmbeddedZooKeeperConfig.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import lombok.Getter; 4 | import lombok.ToString; 5 | 6 | @ToString 7 | public class EmbeddedZooKeeperConfig { 8 | 9 | public static final int USE_RANDOM_ZOOKEEPER_PORT = -1; 10 | 11 | public static class EmbeddedZooKeeperConfigBuilder { 12 | 13 | private int port = USE_RANDOM_ZOOKEEPER_PORT; 14 | 15 | EmbeddedZooKeeperConfigBuilder withPort(final int port) { 16 | this.port = port; 17 | return this; 18 | } 19 | 20 | public EmbeddedZooKeeperConfig build() { 21 | return new EmbeddedZooKeeperConfig(this); 22 | } 23 | } 24 | 25 | @Getter 26 | private final Integer port; 27 | 28 | private EmbeddedZooKeeperConfig(final EmbeddedZooKeeperConfigBuilder builder) { 29 | this.port = builder.port; 30 | } 31 | 32 | public static EmbeddedZooKeeperConfigBuilder zooKeeper() { 33 | return new EmbeddedZooKeeperConfigBuilder(); 34 | } 35 | 36 | /** 37 | * @return instance of {@link EmbeddedZooKeeperConfig} that contains the default configuration 38 | * for the embedded ZooKeeper instance 39 | */ 40 | public static EmbeddedZooKeeperConfig defaultZooKeeper() { 41 | return zooKeeper().build(); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/ExternalKafkaCluster.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import lombok.AccessLevel; 4 | import lombok.RequiredArgsConstructor; 5 | import net.mguenther.kafka.junit.provider.DefaultRecordConsumer; 6 | import net.mguenther.kafka.junit.provider.DefaultRecordProducer; 7 | import net.mguenther.kafka.junit.provider.DefaultTopicManager; 8 | import org.apache.kafka.clients.producer.RecordMetadata; 9 | 10 | import java.util.List; 11 | import java.util.Map; 12 | import java.util.Properties; 13 | 14 | @RequiredArgsConstructor(access = AccessLevel.PACKAGE) 15 | public class ExternalKafkaCluster implements RecordProducer, RecordConsumer, TopicManager { 16 | 17 | private final RecordProducer producerDelegate; 18 | 19 | private final RecordConsumer consumerDelegate; 20 | 21 | private final TopicManager topicManagerDelegate; 22 | 23 | private ExternalKafkaCluster(final String bootstrapServers) { 24 | producerDelegate = new DefaultRecordProducer(bootstrapServers); 25 | consumerDelegate = new DefaultRecordConsumer(bootstrapServers); 26 | topicManagerDelegate = new DefaultTopicManager(bootstrapServers); 27 | } 28 | 29 | @Override 30 | public List readValues(final ReadKeyValues readRequest) throws InterruptedException { 31 | return consumerDelegate.readValues(readRequest); 32 | } 33 | 34 | @Override 35 | public List> read(final ReadKeyValues readRequest) throws InterruptedException { 36 | return consumerDelegate.read(readRequest); 37 | } 38 | 39 | @Override 40 | public List observeValues(final ObserveKeyValues observeRequest) throws InterruptedException { 41 | return consumerDelegate.observeValues(observeRequest); 42 | } 43 | 44 | @Override 45 | public List> observe(final ObserveKeyValues observeRequest) throws InterruptedException { 46 | return consumerDelegate.observe(observeRequest); 47 | } 48 | 49 | @Override 50 | public List send(final SendValues sendRequest) throws InterruptedException { 51 | return producerDelegate.send(sendRequest); 52 | } 53 | 54 | @Override 55 | public List send(final SendValuesTransactional sendRequest) throws InterruptedException { 56 | return producerDelegate.send(sendRequest); 57 | } 58 | 59 | @Override 60 | public List send(final SendKeyValues sendRequest) throws InterruptedException { 61 | return producerDelegate.send(sendRequest); 62 | } 63 | 64 | @Override 65 | public List send(final SendKeyValuesTransactional sendRequest) throws InterruptedException { 66 | return producerDelegate.send(sendRequest); 67 | } 68 | 69 | @Override 70 | public void createTopic(final TopicConfig config) { 71 | topicManagerDelegate.createTopic(config); 72 | } 73 | 74 | @Override 75 | public void deleteTopic(final String topic) { 76 | topicManagerDelegate.deleteTopic(topic); 77 | } 78 | 79 | @Override 80 | public boolean exists(final String topic) { 81 | return topicManagerDelegate.exists(topic); 82 | } 83 | 84 | @Override 85 | public Map fetchLeaderAndIsr(final String topic) { 86 | return topicManagerDelegate.fetchLeaderAndIsr(topic); 87 | } 88 | 89 | @Override 90 | public Properties fetchTopicConfig(final String topic) { 91 | return topicManagerDelegate.fetchTopicConfig(topic); 92 | } 93 | 94 | public static ExternalKafkaCluster at(final String bootstrapServers) { 95 | return new ExternalKafkaCluster(bootstrapServers); 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/KeyValue.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import lombok.EqualsAndHashCode; 4 | import lombok.Getter; 5 | import lombok.RequiredArgsConstructor; 6 | import lombok.ToString; 7 | import org.apache.kafka.common.header.Header; 8 | import org.apache.kafka.common.header.Headers; 9 | import org.apache.kafka.common.header.internals.RecordHeader; 10 | import org.apache.kafka.common.header.internals.RecordHeaders; 11 | 12 | import java.nio.charset.Charset; 13 | import java.util.Optional; 14 | 15 | @ToString 16 | @EqualsAndHashCode(of = { "key", "value" }) 17 | @RequiredArgsConstructor 18 | public class KeyValue { 19 | 20 | @Getter 21 | private final K key; 22 | 23 | @Getter 24 | private final V value; 25 | 26 | @Getter 27 | private final Headers headers; 28 | 29 | private final KeyValueMetadata metadata; 30 | 31 | public KeyValue(final K key, final V value) { 32 | this(key, value, new RecordHeaders(), null); 33 | } 34 | 35 | public KeyValue(final K key, final V value, final Headers headers) { 36 | this(key, value, headers, null); 37 | } 38 | 39 | public void addHeader(final String headerName, final String headerValue, final Charset charset) { 40 | addHeader(headerName, headerValue.getBytes(charset)); 41 | } 42 | 43 | public void addHeader(final String headerName, final byte[] headerValue) { 44 | final Header header = new RecordHeader(headerName, headerValue); 45 | headers.add(header); 46 | } 47 | 48 | public Optional getMetadata() { 49 | return Optional.ofNullable(metadata); 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/KeyValueMetadata.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import lombok.Getter; 4 | import lombok.RequiredArgsConstructor; 5 | import lombok.ToString; 6 | 7 | @Getter 8 | @ToString 9 | @RequiredArgsConstructor 10 | public class KeyValueMetadata { 11 | 12 | private final String topic; 13 | private final int partition; 14 | private final long offset; 15 | } 16 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/LeaderAndIsr.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import lombok.Getter; 4 | import lombok.RequiredArgsConstructor; 5 | import lombok.ToString; 6 | 7 | import java.util.Set; 8 | 9 | @Getter 10 | @ToString 11 | @RequiredArgsConstructor 12 | public class LeaderAndIsr { 13 | 14 | private final Integer leader; 15 | private final Set isr; 16 | } 17 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/ObserveKeyValues.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import lombok.Getter; 4 | import lombok.RequiredArgsConstructor; 5 | import lombok.ToString; 6 | import org.apache.kafka.clients.consumer.ConsumerConfig; 7 | import org.apache.kafka.common.header.Headers; 8 | import org.apache.kafka.common.serialization.StringDeserializer; 9 | 10 | import java.util.HashMap; 11 | import java.util.Map; 12 | import java.util.Properties; 13 | import java.util.UUID; 14 | import java.util.concurrent.TimeUnit; 15 | import java.util.function.Predicate; 16 | 17 | @Getter 18 | @ToString 19 | @RequiredArgsConstructor 20 | public class ObserveKeyValues { 21 | 22 | public static final int DEFAULT_OBSERVATION_TIME_MILLIS = 30_000; 23 | 24 | public static class ObserveKeyValuesBuilder { 25 | 26 | private final String topic; 27 | private final int expected; 28 | private final Class clazzOfK; 29 | private final Class clazzOfV; 30 | private final Properties consumerProps = new Properties(); 31 | private final Map seekTo = new HashMap<>(); 32 | private Predicate filterOnKeys = key -> true; 33 | private Predicate filterOnValues = value -> true; 34 | private Predicate filterOnHeaders = value -> true; 35 | private int observationTimeMillis = DEFAULT_OBSERVATION_TIME_MILLIS; 36 | private boolean includeMetadata = false; 37 | 38 | ObserveKeyValuesBuilder(final String topic, final int expected, final Class clazzOfK, final Class clazzOfV) { 39 | this.topic = topic; 40 | this.expected = expected; 41 | this.clazzOfK = clazzOfK; 42 | this.clazzOfV = clazzOfV; 43 | } 44 | 45 | public ObserveKeyValuesBuilder observeFor(final int duration, final TimeUnit unit) { 46 | this.observationTimeMillis = (int) unit.toMillis(duration); 47 | return this; 48 | } 49 | 50 | public ObserveKeyValuesBuilder filterOnKeys(final Predicate filterOnKeys) { 51 | this.filterOnKeys = filterOnKeys; 52 | return this; 53 | } 54 | 55 | public ObserveKeyValuesBuilder filterOnValues(final Predicate filterOnValues) { 56 | this.filterOnValues = filterOnValues; 57 | return this; 58 | } 59 | 60 | public ObserveKeyValuesBuilder filterOnHeaders(final Predicate filterOnHeaders) { 61 | this.filterOnHeaders = filterOnHeaders; 62 | return this; 63 | } 64 | 65 | public ObserveKeyValuesBuilder includeMetadata() { 66 | return withMetadata(true); 67 | } 68 | 69 | public ObserveKeyValuesBuilder withMetadata(final boolean modifier) { 70 | this.includeMetadata = modifier; 71 | return this; 72 | } 73 | 74 | public ObserveKeyValuesBuilder seekTo(final int partition, final long offset) { 75 | seekTo.put(partition, offset); 76 | return this; 77 | } 78 | 79 | public ObserveKeyValuesBuilder seekTo(final Map seekTo) { 80 | this.seekTo.putAll(seekTo); 81 | return this; 82 | } 83 | 84 | public ObserveKeyValuesBuilder with(final String propertyName, final T value) { 85 | consumerProps.put(propertyName, value); 86 | return this; 87 | } 88 | 89 | public ObserveKeyValuesBuilder withAll(final Properties consumerProps) { 90 | this.consumerProps.putAll(consumerProps); 91 | return this; 92 | } 93 | 94 | private void ifNonExisting(final String propertyName, final T value) { 95 | if (consumerProps.get(propertyName) != null) return; 96 | consumerProps.put(propertyName, value); 97 | } 98 | 99 | public ObserveKeyValues useDefaults() { 100 | consumerProps.clear(); 101 | observationTimeMillis = DEFAULT_OBSERVATION_TIME_MILLIS; 102 | return build(); 103 | } 104 | 105 | public ObserveKeyValues build() { 106 | ifNonExisting(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); 107 | ifNonExisting(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); 108 | ifNonExisting(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false); 109 | ifNonExisting(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); 110 | ifNonExisting(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); 111 | ifNonExisting(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 100); 112 | ifNonExisting(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_uncommitted"); 113 | return new ObserveKeyValues<>(topic, expected, observationTimeMillis, includeMetadata, seekTo, consumerProps, filterOnKeys, filterOnValues, filterOnHeaders, clazzOfK, clazzOfV); 114 | } 115 | } 116 | 117 | private final String topic; 118 | private final int expected; 119 | private final int observationTimeMillis; 120 | private final boolean includeMetadata; 121 | private final Map seekTo; 122 | private final Properties consumerProps; 123 | private final Predicate filterOnKeys; 124 | private final Predicate filterOnValues; 125 | private final Predicate filterOnHeaders; 126 | private final Class clazzOfK; 127 | private final Class clazzOfV; 128 | 129 | public static ObserveKeyValuesBuilder on(final String topic, final int expected) { 130 | return on(topic, expected, String.class, String.class); 131 | } 132 | 133 | public static ObserveKeyValuesBuilder on(final String topic, 134 | final int expected, 135 | final Class clazzOfV) { 136 | return on(topic, expected, String.class, clazzOfV); 137 | } 138 | 139 | public static ObserveKeyValuesBuilder on(final String topic, 140 | final int expected, 141 | final Class clazzOfK, 142 | final Class clazzOfV) { 143 | return new ObserveKeyValuesBuilder<>(topic, expected, clazzOfK, clazzOfV); 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/Props.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import java.util.Properties; 4 | 5 | /** 6 | * Provides a fluent interface for constructing {@link java.util.Properties}. Use this for example 7 | * with {@code EmbeddedConnectConfig#deployConnector} to retain the fluent interface when provisioning 8 | * your embedded Kafka cluster. 9 | */ 10 | public class Props { 11 | 12 | private final Properties properties = new Properties(); 13 | 14 | public Props with(final String propertyName, final T value) { 15 | properties.put(propertyName, value); 16 | return this; 17 | } 18 | 19 | public Props withAll(final Properties overrides) { 20 | properties.putAll(overrides); 21 | return this; 22 | } 23 | 24 | public Properties build() { 25 | final Properties copyOfProps = new Properties(); 26 | copyOfProps.putAll(properties); 27 | return copyOfProps; 28 | } 29 | 30 | public static Props create() { 31 | return new Props(); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/ReadKeyValues.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import lombok.Getter; 4 | import lombok.RequiredArgsConstructor; 5 | import lombok.ToString; 6 | import org.apache.kafka.clients.consumer.ConsumerConfig; 7 | import org.apache.kafka.common.header.Headers; 8 | import org.apache.kafka.common.serialization.StringDeserializer; 9 | 10 | import java.util.HashMap; 11 | import java.util.Map; 12 | import java.util.Properties; 13 | import java.util.UUID; 14 | import java.util.concurrent.TimeUnit; 15 | import java.util.function.Predicate; 16 | 17 | @Getter 18 | @ToString 19 | @RequiredArgsConstructor 20 | public class ReadKeyValues { 21 | 22 | public static final int WITHOUT_LIMIT = -1; 23 | public static final int DEFAULT_MAX_TOTAL_POLL_TIME_MILLIS = 2_000; 24 | 25 | public static class ReadKeyValuesBuilder { 26 | 27 | private final String topic; 28 | private final Class clazzOfK; 29 | private final Class clazzOfV; 30 | private final Properties consumerProps = new Properties(); 31 | private final Map seekTo = new HashMap<>(); 32 | private Predicate filterOnKeys = key -> true; 33 | private Predicate filterOnValues = value -> true; 34 | private Predicate filterOnHeaders = value -> true; 35 | private int limit = WITHOUT_LIMIT; 36 | private int maxTotalPollTimeMillis = DEFAULT_MAX_TOTAL_POLL_TIME_MILLIS; 37 | private boolean includeMetadata = false; 38 | 39 | ReadKeyValuesBuilder(final String topic, final Class clazzOfK, final Class clazzOfV) { 40 | this.topic = topic; 41 | this.clazzOfK = clazzOfK; 42 | this.clazzOfV = clazzOfV; 43 | } 44 | 45 | public ReadKeyValuesBuilder filterOnKeys(final Predicate filterOnKeys) { 46 | this.filterOnKeys = filterOnKeys; 47 | return this; 48 | } 49 | 50 | public ReadKeyValuesBuilder filterOnValues(final Predicate filterOnValues) { 51 | this.filterOnValues = filterOnValues; 52 | return this; 53 | } 54 | 55 | public ReadKeyValuesBuilder filterOnHeaders(final Predicate filterOnHeaders) { 56 | this.filterOnHeaders = filterOnHeaders; 57 | return this; 58 | } 59 | 60 | public ReadKeyValuesBuilder unlimited() { 61 | this.limit = WITHOUT_LIMIT; 62 | return this; 63 | } 64 | 65 | public ReadKeyValuesBuilder withLimit(final int limit) { 66 | this.limit = limit; 67 | return this; 68 | } 69 | 70 | public ReadKeyValuesBuilder withMaxTotalPollTime(final int duration, final TimeUnit unit) { 71 | this.maxTotalPollTimeMillis = (int) unit.toMillis(duration); 72 | return this; 73 | } 74 | 75 | public ReadKeyValuesBuilder includeMetadata() { 76 | return withMetadata(true); 77 | } 78 | 79 | public ReadKeyValuesBuilder withMetadata(final boolean modifier) { 80 | this.includeMetadata = modifier; 81 | return this; 82 | } 83 | 84 | public ReadKeyValuesBuilder seekTo(final int partition, final long offset) { 85 | seekTo.put(partition, offset); 86 | return this; 87 | } 88 | 89 | public ReadKeyValuesBuilder seekTo(final Map seekTo) { 90 | this.seekTo.putAll(seekTo); 91 | return this; 92 | } 93 | 94 | public ReadKeyValuesBuilder with(final String propertyName, final T value) { 95 | consumerProps.put(propertyName, value); 96 | return this; 97 | } 98 | 99 | public ReadKeyValuesBuilder withAll(final Properties consumerProps) { 100 | this.consumerProps.putAll(consumerProps); 101 | return this; 102 | } 103 | 104 | private void ifNonExisting(final String propertyName, final T value) { 105 | if (consumerProps.get(propertyName) != null) return; 106 | consumerProps.put(propertyName, value); 107 | } 108 | 109 | public ReadKeyValues useDefaults() { 110 | consumerProps.clear(); 111 | limit = WITHOUT_LIMIT; 112 | maxTotalPollTimeMillis = DEFAULT_MAX_TOTAL_POLL_TIME_MILLIS; 113 | return build(); 114 | } 115 | 116 | public ReadKeyValues build() { 117 | ifNonExisting(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); 118 | ifNonExisting(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); 119 | ifNonExisting(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false); 120 | ifNonExisting(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); 121 | ifNonExisting(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); 122 | ifNonExisting(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 100); 123 | ifNonExisting(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_uncommitted"); 124 | return new ReadKeyValues<>(topic, limit, maxTotalPollTimeMillis, includeMetadata, seekTo, consumerProps, filterOnKeys, filterOnValues, filterOnHeaders, clazzOfK, clazzOfV); 125 | } 126 | } 127 | 128 | private final String topic; 129 | private final int limit; 130 | private final int maxTotalPollTimeMillis; 131 | private final boolean includeMetadata; 132 | private final Map seekTo; 133 | private final Properties consumerProps; 134 | private final Predicate filterOnKeys; 135 | private final Predicate filterOnValues; 136 | private final Predicate filterOnHeaders; 137 | private final Class clazzOfK; 138 | private final Class clazzOfV; 139 | 140 | public static ReadKeyValuesBuilder from(final String topic) { 141 | return from(topic, String.class, String.class); 142 | } 143 | 144 | public static ReadKeyValuesBuilder from(final String topic, final Class clazzOfV) { 145 | return from(topic, String.class, clazzOfV); 146 | } 147 | 148 | public static ReadKeyValuesBuilder from(final String topic, final Class clazzOfK, final Class clazzOfV) { 149 | return new ReadKeyValuesBuilder<>(topic, clazzOfK, clazzOfV); 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/RecordConsumer.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import java.util.List; 4 | 5 | /** 6 | * Provides the means to read key-value pairs or un-keyed values from a Kafka topic as well 7 | * as the possibility to watch given topics until a certain amount of records have been consumed 8 | * from them. All of the operations a {@code RecordConsumer} provides are synchronous in their 9 | * nature. 10 | */ 11 | public interface RecordConsumer { 12 | 13 | /** 14 | * Reads values from a Kafka topic. 15 | * 16 | * @param readRequest 17 | * the configuration of the consumer and the read operation it has to carry out 18 | * @param 19 | * refers to the type of values being read 20 | * @throws InterruptedException 21 | * in case an interrupt signal has been set 22 | * @return 23 | * unmodifiable {@link java.util.List} of consumed values 24 | * @see ReadKeyValues 25 | */ 26 | List readValues(ReadKeyValues readRequest) throws InterruptedException; 27 | 28 | /** 29 | * Reads values from a Kafka topic. This is a convenience method that accepts a 30 | * {@link net.mguenther.kafka.junit.ReadKeyValues.ReadKeyValuesBuilder} and 31 | * immediately constructs a {@link ReadKeyValues} request from it that is passed 32 | * on to {@link #readValues(ReadKeyValues)}. 33 | * 34 | * @param builder 35 | * the builder used for the configuration of the consumer and the read operation it 36 | * has to carry out 37 | * @param 38 | * refers to the type of values being read 39 | * @throws InterruptedException 40 | * in case an interrupt signal has been set 41 | * @return 42 | * unmodifiable {@link java.util.List} of consumed values 43 | * @see ReadKeyValues 44 | * @see ReadKeyValues.ReadKeyValuesBuilder 45 | */ 46 | default List readValues(ReadKeyValues.ReadKeyValuesBuilder builder) throws InterruptedException { 47 | return readValues(builder.build()); 48 | } 49 | 50 | /** 51 | * Reads key-value pairs from a Kafka topic. 52 | * 53 | * @param readRequest 54 | * the configuration of the consumer and the read operation it has to carry out 55 | * @param 56 | * refers to the type of keys being read 57 | * @param 58 | * refers to the type of values being read 59 | * @throws InterruptedException 60 | * in case an interrupt signal has been set 61 | * @return 62 | * unmodifiable {@link java.util.List} of consumed key-value pairs 63 | * @see ReadKeyValues 64 | */ 65 | List> read(ReadKeyValues readRequest) throws InterruptedException; 66 | 67 | /** 68 | * Reads key-value pairs from a Kafka topic. This is a convenience method that accepts a 69 | * {@link net.mguenther.kafka.junit.ReadKeyValues.ReadKeyValuesBuilder} and immediately 70 | * constructs a {@link ReadKeyValues} request from it that is passed on to 71 | * {@link #read(ReadKeyValues)}. 72 | * 73 | * @param builder 74 | * the builder used for the configuration of the consumer and the read operation it 75 | * has to carry out 76 | * @param 77 | * refers to the type of keys being read 78 | * @param 79 | * refers to the type of values being read 80 | * @throws InterruptedException 81 | * in case an interrupt signal has been set 82 | * @return 83 | * unmodifiable {@link java.util.List} of consumed key-value pairs 84 | * @see ReadKeyValues 85 | * @see ReadKeyValues.ReadKeyValuesBuilder 86 | */ 87 | default List> read(ReadKeyValues.ReadKeyValuesBuilder builder) throws InterruptedException { 88 | return read(builder.build()); 89 | } 90 | 91 | /** 92 | * Observes a Kafka topic until a certain amount of records have been consumed or a timeout 93 | * elapses. Returns the values that have been consumed up until this point or throws an 94 | * {@code AssertionError} if the number of consumed values does not meet the expected 95 | * number of records. 96 | * 97 | * @param observeRequest 98 | * the configuration of the consumer and the observe operation it has to carry out 99 | * @param 100 | * refers to the type of values being read 101 | * @throws AssertionError 102 | * in case the number of consumed values does not meet the expected number of records 103 | * @throws InterruptedException 104 | * in case an interrupt signal has been set 105 | * @return 106 | * unmodifiable {@link java.util.List} of values 107 | * @see ObserveKeyValues 108 | */ 109 | List observeValues(ObserveKeyValues observeRequest) throws InterruptedException; 110 | 111 | /** 112 | * Observes a Kafka topic until a certain amount of records have been consumed or a timeout 113 | * elapses. Returns the values that have been consumed up until this point or throws an 114 | * {@code AssertionError} if the number of consumed values does not meet the expected 115 | * number of records. This is a convenience method that accepts a 116 | * {@link net.mguenther.kafka.junit.ObserveKeyValues.ObserveKeyValuesBuilder} and immediately 117 | * constructs a {@link ObserveKeyValues} request from it that is passed on to 118 | * {@link #observeValues(ObserveKeyValues)}. 119 | * 120 | * @param builder 121 | * the builder used for the configuration of the consumer and the observe operation it 122 | * has to carry out 123 | * @param 124 | * refers to the type of values being read 125 | * @throws AssertionError 126 | * in case the number of consumed values does not meet the expected number of records 127 | * @throws InterruptedException 128 | * in case an interrupt signal has been set 129 | * @return 130 | * unmodifiable {@link java.util.List} of values 131 | * @see ObserveKeyValues 132 | * @see ObserveKeyValues.ObserveKeyValuesBuilder 133 | */ 134 | default List observeValues(ObserveKeyValues.ObserveKeyValuesBuilder builder) throws InterruptedException { 135 | return observeValues(builder.build()); 136 | } 137 | 138 | /** 139 | * Observes a Kafka topic until a certain amount of records have been consumed or a timeout 140 | * elapses. Returns the key-value pairs that have been consumed up until this point or throws an 141 | * {@code AssertionError} if the number of consumed key-value pairs does not meet the expected 142 | * number of records. 143 | * 144 | * @param observeRequest 145 | * the configuration of the consumer and the observe operation it has to carry out 146 | * @param 147 | * refers to the type of keys being read 148 | * @param 149 | * refers to the type of values being read 150 | * @throws InterruptedException 151 | * in case an interrupt signal has been set 152 | * @return 153 | * unmodifiable {@link java.util.List} of key-value pairs 154 | * @see ObserveKeyValues 155 | */ 156 | List> observe(ObserveKeyValues observeRequest) throws InterruptedException; 157 | 158 | /** 159 | * Observes a Kafka topic until a certain amount of records have been consumed or a timeout 160 | * elapses. Returns the key-value pairs that have been consumed up until this point or throws an 161 | * {@code AssertionError} if the number of consumed key-value pairs does not meet the expected 162 | * number of records. This is a convenience method that accepts a 163 | * {@link net.mguenther.kafka.junit.ObserveKeyValues.ObserveKeyValuesBuilder} and immediately 164 | * constructs a {@link ObserveKeyValues} request from it that is passed on to 165 | * {@link #observe(ObserveKeyValues)}. 166 | * 167 | * @param builder 168 | * the builder used for the configuration of the consumer and the observe operation it 169 | * has to carry out 170 | * @param 171 | * refers to the type of keys being read 172 | * @param 173 | * refers to the type of values being read 174 | * @throws AssertionError 175 | * in case the number of consumed values does not meet the expected number of records 176 | * @throws InterruptedException 177 | * in case an interrupt signal has been set 178 | * @return 179 | * unmodifiable {@link java.util.List} of key-value pairs 180 | * @see ObserveKeyValues 181 | * @see ObserveKeyValues.ObserveKeyValuesBuilder 182 | */ 183 | default List> observe(ObserveKeyValues.ObserveKeyValuesBuilder builder) throws InterruptedException { 184 | return observe(builder.build()); 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/RecordProducer.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import org.apache.kafka.clients.producer.RecordMetadata; 4 | 5 | import java.util.List; 6 | 7 | /** 8 | * Provides the means to send key-value pairs or un-keyed values to a Kafka topic. The send 9 | * operations a {@code RecordProducer} provides are synchronous in their nature. 10 | */ 11 | public interface RecordProducer { 12 | 13 | /** 14 | * Sends (non-keyed) values synchronously to a Kafka topic. 15 | * 16 | * @param sendRequest 17 | * the configuration of the producer and the send operation it has to carry out 18 | * @param 19 | * refers to the type of values being sent 20 | * @throws RuntimeException 21 | * in case there is an error while sending an individual Kafka record to the 22 | * designated Kafka broker 23 | * @throws InterruptedException 24 | * in case an interrupt signal has been set 25 | * @return 26 | * unmodifiable {@link java.util.List} that contains metadata on the written 27 | * Kafka records 28 | * @see SendValues 29 | */ 30 | List send(SendValues sendRequest) throws InterruptedException; 31 | 32 | /** 33 | * Sends (non-keyed) values synchronously to a Kafka topic. This is a convenience 34 | * method that accepts a {@link net.mguenther.kafka.junit.SendValues.SendValuesBuilder} 35 | * and immediately constructs a {@link SendValues} request from it that is passed 36 | * on to {@link #send(SendValues)}. 37 | * 38 | * @param builder 39 | * the builder used for the configuration of the producer and the send operation 40 | * it has to carry out 41 | * @param 42 | * refers to the type of values being sent 43 | * @throws RuntimeException 44 | * in case there is an error while sending an individual Kafka record to the 45 | * designated Kafka broker 46 | * @throws InterruptedException 47 | * in case an interrupt signal has been set 48 | * @return 49 | * unmodifiable {@link java.util.List} that contains metadata on the written 50 | * Kafka records 51 | * @see SendValues 52 | * @see SendValues.SendValuesBuilder 53 | */ 54 | default List send(SendValues.SendValuesBuilder builder) throws InterruptedException { 55 | return send(builder.build()); 56 | } 57 | 58 | /** 59 | * Sends (non-keyed) values synchronously and within a transaction to a Kafka topic. 60 | * 61 | * @param sendRequest 62 | * the configuration of the producer and the send operation it has to carry out 63 | * @param 64 | * refers to the type of values being sent 65 | * @throws RuntimeException 66 | * in case there is an error while sending an individual Kafka record to the 67 | * designated Kafka broker 68 | * @throws InterruptedException 69 | * in case an interrupt signal has been set 70 | * @return 71 | * unmodifiable {@link java.util.List} that contains metadata on the written 72 | * Kafka records 73 | * @see SendValuesTransactional 74 | */ 75 | List send(SendValuesTransactional sendRequest) throws InterruptedException; 76 | 77 | /** 78 | * Sends (non-keyed) values synchronously and within a transaction to a Kafka topic. 79 | * This is a convenience method that accepts a {@link net.mguenther.kafka.junit.SendValuesTransactional.SendValuesTransactionalBuilder} 80 | * and immediately constructs a {@link SendKeyValuesTransactional} request from it that 81 | * is passed on to {@link #send(SendValuesTransactional)}. 82 | * 83 | * @param builder 84 | * the builder used for the configuration of the producer and the send operation 85 | * it has to carry out 86 | * @param 87 | * refers to the type of values being sent 88 | * @throws RuntimeException 89 | * in case there is an error while sending an individual Kafka record to the 90 | * designated Kafka broker 91 | * @throws InterruptedException 92 | * in case an interrupt signal has been set 93 | * @return 94 | * unmodifiable {@link java.util.List} that contains metadata on the written 95 | * Kafka records 96 | * @see SendValuesTransactional 97 | * @see SendValuesTransactional.SendValuesTransactionalBuilder 98 | */ 99 | default List send(SendValuesTransactional.SendValuesTransactionalBuilder builder) throws InterruptedException { 100 | return send(builder.build()); 101 | } 102 | 103 | /** 104 | * Sends key-value pairs synchronously to a Kafka topic. 105 | * 106 | * @param sendRequest 107 | * the configuration of the producer and the send operation it has to carry out 108 | * @param 109 | * refers to the type of keys being sent 110 | * @param 111 | * refers to the type of values being sent 112 | * @throws RuntimeException 113 | * in case there is an error while sending an individual Kafka record to the 114 | * designated Kafka broker 115 | * @throws InterruptedException 116 | * in case an interrupt signal has been set 117 | * @return 118 | * unmodifiable {@link java.util.List} that contains metadata on the written 119 | * Kafka records 120 | * @see SendKeyValues 121 | */ 122 | List send(SendKeyValues sendRequest) throws InterruptedException; 123 | 124 | /** 125 | * Sends key-value pairs synchronously to a Kafka topic. This is a convenience method 126 | * that accepts a {@link net.mguenther.kafka.junit.SendKeyValues.SendKeyValuesBuilder} 127 | * and immediately constructs a {@link SendKeyValues} and is passed on to 128 | * {@link #send(SendKeyValues)}. 129 | * 130 | * @param builder 131 | * the builder used for the configuration of the producer and the send operation 132 | * it has to carry out 133 | * @param 134 | * refers to the type of keys being sent 135 | * @param 136 | * refers to the type of values being sent 137 | * @throws RuntimeException 138 | * in case there is an error while sending an individual Kafka record to the 139 | * designated Kafka broker 140 | * @throws InterruptedException 141 | * in case an interrupt signal has been set 142 | * @return 143 | * unmodifiable {@link java.util.List} that contains metadata on the written 144 | * Kafka records 145 | * @see SendKeyValues 146 | * @see SendKeyValues.SendKeyValuesBuilder 147 | */ 148 | default List send(SendKeyValues.SendKeyValuesBuilder builder) throws InterruptedException { 149 | return send(builder.build()); 150 | } 151 | 152 | /** 153 | * Sends key-value pairs synchronously and within a transaction to a Kafka topic. 154 | * 155 | * @param sendRequest 156 | * the configuration of the producer and the send operation it has to carry out 157 | * @param 158 | * refers to the type of keys being sent 159 | * @param 160 | * refers to the type of values being sent 161 | * @throws RuntimeException 162 | * in case there is an error while sending an individual Kafka record to the 163 | * designated Kafka broker 164 | * @throws InterruptedException 165 | * in case an interrupt signal has been set 166 | * @return 167 | * unmodifiable {@link java.util.List} that contains metadata on the written 168 | * Kafka records 169 | * @see SendKeyValuesTransactional 170 | */ 171 | List send(SendKeyValuesTransactional sendRequest) throws InterruptedException; 172 | 173 | /** 174 | * Sends key-value pairs synchronously and within a transaction to a Kafka topic. 175 | * This is a convenience method that accepts a 176 | * {@link net.mguenther.kafka.junit.SendKeyValuesTransactional.SendKeyValuesTransactionalBuilder} 177 | * and immediately constructs a {@link SendKeyValuesTransactional} request from it that is 178 | * passed on to {@link #send(SendKeyValuesTransactional)}. 179 | * 180 | * @param builder 181 | * the builder used for the configuration of the producer and the send operation 182 | * it has to carry out 183 | * @param 184 | * refers to the type of keys being sent 185 | * @param 186 | * refers to the type of values being sent 187 | * @throws RuntimeException 188 | * in case there is an error while sending an individual Kafka record to the 189 | * designated Kafka broker 190 | * @throws InterruptedException 191 | * in case an interrupt signal has been set 192 | * @return 193 | * unmodifiable {@link java.util.List} that contains metadata on the written 194 | * Kafka records 195 | * @see SendKeyValuesTransactional 196 | * @see SendKeyValuesTransactional.SendKeyValuesTransactionalBuilder 197 | */ 198 | default List send(SendKeyValuesTransactional.SendKeyValuesTransactionalBuilder builder) throws InterruptedException { 199 | return send(builder.build()); 200 | } 201 | } 202 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/SendKeyValues.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import lombok.Getter; 4 | import lombok.RequiredArgsConstructor; 5 | import lombok.ToString; 6 | import org.apache.kafka.clients.producer.ProducerConfig; 7 | import org.apache.kafka.common.serialization.StringSerializer; 8 | 9 | import java.util.ArrayList; 10 | import java.util.Collection; 11 | import java.util.Properties; 12 | 13 | @Getter 14 | @ToString 15 | @RequiredArgsConstructor 16 | public class SendKeyValues { 17 | 18 | public static class SendKeyValuesBuilder { 19 | 20 | private final String topic; 21 | private final Collection> records = new ArrayList<>(); 22 | private final Properties producerProps = new Properties(); 23 | 24 | SendKeyValuesBuilder(final String topic, final Collection> records) { 25 | this.topic = topic; 26 | this.records.addAll(records); 27 | } 28 | 29 | public SendKeyValuesBuilder with(final String propertyName, final T value) { 30 | producerProps.put(propertyName, value); 31 | return this; 32 | } 33 | 34 | public SendKeyValuesBuilder withAll(final Properties producerProps) { 35 | this.producerProps.putAll(producerProps); 36 | return this; 37 | } 38 | 39 | private void ifNonExisting(final String propertyName, final T value) { 40 | if (producerProps.get(propertyName) != null) return; 41 | producerProps.put(propertyName, value); 42 | } 43 | 44 | public SendKeyValues useDefaults() { 45 | producerProps.clear(); 46 | return build(); 47 | } 48 | 49 | public SendKeyValues build() { 50 | ifNonExisting(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); 51 | ifNonExisting(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); 52 | ifNonExisting(ProducerConfig.ACKS_CONFIG, "all"); 53 | ifNonExisting(ProducerConfig.RETRIES_CONFIG, "1"); 54 | return new SendKeyValues<>(topic, records, producerProps); 55 | } 56 | } 57 | 58 | private final String topic; 59 | private final Collection> records; 60 | private final Properties producerProps; 61 | 62 | public static SendKeyValuesBuilder to(final String topic, final Collection> records) { 63 | return new SendKeyValuesBuilder<>(topic, records); 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/SendKeyValuesTransactional.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import lombok.Getter; 4 | import lombok.RequiredArgsConstructor; 5 | import lombok.ToString; 6 | import org.apache.kafka.clients.producer.ProducerConfig; 7 | import org.apache.kafka.common.serialization.StringSerializer; 8 | 9 | import java.util.ArrayList; 10 | import java.util.Collection; 11 | import java.util.HashMap; 12 | import java.util.Map; 13 | import java.util.Properties; 14 | import java.util.UUID; 15 | 16 | @ToString 17 | @RequiredArgsConstructor 18 | public class SendKeyValuesTransactional { 19 | 20 | public static class SendKeyValuesTransactionalBuilder { 21 | 22 | private final Map>> recordsPerTopic = new HashMap<>(); 23 | private final Properties producerProps = new Properties(); 24 | private boolean failTransaction = false; 25 | 26 | SendKeyValuesTransactionalBuilder(final String topic, final Collection> records) { 27 | recordsPerTopic.put(topic, records); 28 | } 29 | 30 | SendKeyValuesTransactionalBuilder(final Map>> recordsPerTopic) { 31 | this.recordsPerTopic.putAll(recordsPerTopic); 32 | } 33 | 34 | public SendKeyValuesTransactionalBuilder inTransaction(final String topic, final Collection> records) { 35 | final Collection> existingRecordsForTopic = recordsPerTopic.getOrDefault(topic, new ArrayList<>()); 36 | existingRecordsForTopic.addAll(records); 37 | recordsPerTopic.put(topic, existingRecordsForTopic); 38 | return this; 39 | } 40 | 41 | public SendKeyValuesTransactionalBuilder failTransaction() { 42 | return withFailTransaction(true); 43 | } 44 | 45 | public SendKeyValuesTransactionalBuilder withFailTransaction(final boolean modifier) { 46 | failTransaction = modifier; 47 | return this; 48 | } 49 | 50 | public SendKeyValuesTransactionalBuilder with(final String propertyName, final T value) { 51 | producerProps.put(propertyName, value); 52 | return this; 53 | } 54 | 55 | public SendKeyValuesTransactionalBuilder withAll(final Properties transactionalProps) { 56 | this.producerProps.putAll(transactionalProps); 57 | return this; 58 | } 59 | 60 | private void ifNonExisting(final String propertyName, final T value) { 61 | if (producerProps.get(propertyName) != null) return; 62 | producerProps.put(propertyName, value); 63 | } 64 | 65 | public SendKeyValuesTransactional useDefaults() { 66 | producerProps.clear(); 67 | failTransaction = false; 68 | return build(); 69 | } 70 | 71 | public SendKeyValuesTransactional build() { 72 | ifNonExisting(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); 73 | ifNonExisting(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); 74 | ifNonExisting(ProducerConfig.RETRIES_CONFIG, Integer.MAX_VALUE); 75 | ifNonExisting(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 1); 76 | ifNonExisting(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true); 77 | ifNonExisting(ProducerConfig.TRANSACTIONAL_ID_CONFIG, UUID.randomUUID().toString()); 78 | ifNonExisting(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, 60_000); 79 | ifNonExisting(ProducerConfig.ACKS_CONFIG, "all"); 80 | return new SendKeyValuesTransactional<>(recordsPerTopic, failTransaction, producerProps); 81 | } 82 | } 83 | 84 | @Getter 85 | private final Map>> recordsPerTopic; 86 | 87 | private final boolean failTransaction; 88 | 89 | @Getter 90 | private final Properties producerProps; 91 | 92 | public boolean shouldFailTransaction() { 93 | return failTransaction; 94 | } 95 | 96 | public static SendKeyValuesTransactionalBuilder inTransaction(final String topic, final Collection> records) { 97 | return new SendKeyValuesTransactionalBuilder<>(topic, records); 98 | } 99 | 100 | public static SendKeyValuesTransactionalBuilder inTransaction(final Map>> recordsPerTopic) { 101 | return new SendKeyValuesTransactionalBuilder<>(recordsPerTopic); 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/SendValues.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import lombok.Getter; 4 | import lombok.RequiredArgsConstructor; 5 | import lombok.ToString; 6 | import org.apache.kafka.clients.producer.ProducerConfig; 7 | import org.apache.kafka.common.serialization.StringSerializer; 8 | 9 | import java.util.ArrayList; 10 | import java.util.Arrays; 11 | import java.util.Collection; 12 | import java.util.Properties; 13 | 14 | @Getter 15 | @ToString 16 | @RequiredArgsConstructor 17 | public class SendValues { 18 | 19 | public static class SendValuesBuilder { 20 | 21 | private final String topic; 22 | private final Collection values = new ArrayList<>(); 23 | private final Properties producerProps = new Properties(); 24 | 25 | SendValuesBuilder(final String topic, final Collection values) { 26 | this.topic = topic; 27 | this.values.addAll(values); 28 | } 29 | 30 | public SendValuesBuilder with(final String propertyName, final T value) { 31 | producerProps.put(propertyName, value); 32 | return this; 33 | } 34 | 35 | public SendValuesBuilder withAll(final Properties producerProps) { 36 | this.producerProps.putAll(producerProps); 37 | return this; 38 | } 39 | 40 | private void ifNonExisting(final String propertyName, final T value) { 41 | if (producerProps.get(propertyName) != null) return; 42 | producerProps.put(propertyName, value); 43 | } 44 | 45 | public SendValues useDefaults() { 46 | producerProps.clear(); 47 | return build(); 48 | } 49 | 50 | public SendValues build() { 51 | ifNonExisting(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); 52 | ifNonExisting(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); 53 | ifNonExisting(ProducerConfig.ACKS_CONFIG, "all"); 54 | ifNonExisting(ProducerConfig.RETRIES_CONFIG, "1"); 55 | return new SendValues<>(topic, values, producerProps); 56 | } 57 | } 58 | 59 | private final String topic; 60 | private final Collection values; 61 | private final Properties producerProps; 62 | 63 | public static SendValuesBuilder to(final String topic, final Collection values) { 64 | return new SendValuesBuilder<>(topic, values); 65 | } 66 | 67 | @SafeVarargs 68 | public static SendValuesBuilder to(final String topic, final V... values) { 69 | return new SendValuesBuilder<>(topic, Arrays.asList(values)); 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/SendValuesTransactional.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import lombok.Getter; 4 | import lombok.RequiredArgsConstructor; 5 | import lombok.ToString; 6 | import org.apache.kafka.clients.producer.ProducerConfig; 7 | import org.apache.kafka.common.serialization.StringSerializer; 8 | 9 | import java.util.ArrayList; 10 | import java.util.Arrays; 11 | import java.util.Collection; 12 | import java.util.HashMap; 13 | import java.util.Map; 14 | import java.util.Properties; 15 | import java.util.UUID; 16 | 17 | @ToString 18 | @RequiredArgsConstructor 19 | public class SendValuesTransactional { 20 | 21 | public static class SendValuesTransactionalBuilder { 22 | 23 | private final Map> valuesPerTopic = new HashMap<>(); 24 | private final Properties producerPros = new Properties(); 25 | private boolean failTransaction = false; 26 | 27 | SendValuesTransactionalBuilder(final String topic, final Collection values) { 28 | valuesPerTopic.put(topic, values); 29 | } 30 | 31 | public SendValuesTransactionalBuilder inTransaction(final String topic, final Collection values) { 32 | final Collection existingValuesPerTopic = valuesPerTopic.getOrDefault(topic, new ArrayList<>()); 33 | existingValuesPerTopic.addAll(values); 34 | valuesPerTopic.put(topic, existingValuesPerTopic); 35 | return this; 36 | } 37 | 38 | public SendValuesTransactionalBuilder failTransaction() { 39 | return withFailTransaction(true); 40 | } 41 | 42 | public SendValuesTransactionalBuilder withFailTransaction(final boolean modifier) { 43 | failTransaction = modifier; 44 | return this; 45 | } 46 | 47 | public SendValuesTransactionalBuilder with(final String propertyName, final T value) { 48 | producerPros.put(propertyName, value); 49 | return this; 50 | } 51 | 52 | public SendValuesTransactionalBuilder withAll(final Properties transactionalProps) { 53 | this.producerPros.putAll(transactionalProps); 54 | return this; 55 | } 56 | 57 | private void ifNonExisting(final String propertyName, final T value) { 58 | if (producerPros.get(propertyName) != null) return; 59 | producerPros.put(propertyName, value); 60 | } 61 | 62 | public SendValuesTransactional useDefaults() { 63 | producerPros.clear(); 64 | failTransaction = false; 65 | return build(); 66 | } 67 | 68 | public SendValuesTransactional build() { 69 | ifNonExisting(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); 70 | ifNonExisting(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); 71 | ifNonExisting(ProducerConfig.RETRIES_CONFIG, Integer.MAX_VALUE); 72 | ifNonExisting(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 1); 73 | ifNonExisting(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true); 74 | ifNonExisting(ProducerConfig.TRANSACTIONAL_ID_CONFIG, UUID.randomUUID().toString()); 75 | ifNonExisting(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, 60_000); 76 | ifNonExisting(ProducerConfig.ACKS_CONFIG, "all"); 77 | return new SendValuesTransactional<>(valuesPerTopic, failTransaction, producerPros); 78 | } 79 | } 80 | 81 | @Getter 82 | private final Map> valuesPerTopic; 83 | 84 | private final boolean failTransaction; 85 | 86 | @Getter 87 | private final Properties producerProps; 88 | 89 | public boolean shouldFailTransaction() { 90 | return failTransaction; 91 | } 92 | 93 | public static SendValuesTransactionalBuilder inTransaction(final String topic, final Collection values) { 94 | return new SendValuesTransactionalBuilder<>(topic, values); 95 | } 96 | 97 | @SafeVarargs 98 | public static SendValuesTransactionalBuilder inTransaction(final String topic, final V... values) { 99 | return new SendValuesTransactionalBuilder<>(topic, Arrays.asList(values)); 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/TopicConfig.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import lombok.Getter; 4 | import lombok.RequiredArgsConstructor; 5 | import lombok.ToString; 6 | 7 | import java.util.HashMap; 8 | import java.util.Map; 9 | import java.util.Properties; 10 | 11 | @Getter 12 | @ToString 13 | @RequiredArgsConstructor 14 | public class TopicConfig { 15 | 16 | public static class TopicConfigBuilder { 17 | 18 | private final String topic; 19 | private final Properties properties = new Properties(); 20 | private int numberOfPartitions = 1; 21 | private int numberOfReplicas = 1; 22 | 23 | TopicConfigBuilder(final String topic) { 24 | this.topic = topic; 25 | } 26 | 27 | public TopicConfigBuilder withNumberOfPartitions(final int numberOfPartitions) { 28 | this.numberOfPartitions = numberOfPartitions; 29 | return this; 30 | } 31 | 32 | public TopicConfigBuilder withNumberOfReplicas(final int numberOfReplicas) { 33 | this.numberOfReplicas = numberOfReplicas; 34 | return this; 35 | } 36 | 37 | public TopicConfigBuilder with(final String propertyName, final T value) { 38 | properties.put(propertyName, value); 39 | return this; 40 | } 41 | 42 | public TopicConfigBuilder withAll(final Properties properties) { 43 | this.properties.putAll(properties); 44 | return this; 45 | } 46 | 47 | private void ifNonExisting(final String propertyName, final T value) { 48 | if (properties.get(propertyName) != null) return; 49 | properties.put(propertyName, value); 50 | } 51 | 52 | public TopicConfig useDefaults() { 53 | properties.clear(); 54 | numberOfPartitions = 1; 55 | numberOfReplicas = 1; 56 | return build(); 57 | } 58 | 59 | public TopicConfig build() { 60 | ifNonExisting("cleanup.policy", "delete"); 61 | ifNonExisting("delete.retention.ms", "86400000"); 62 | ifNonExisting("min.insync.replicas", "1"); 63 | return new TopicConfig(topic, numberOfPartitions, numberOfReplicas, properties); 64 | } 65 | } 66 | 67 | private final String topic; 68 | private final int numberOfPartitions; 69 | private final int numberOfReplicas; 70 | private final Properties properties; 71 | 72 | public Map getPropertiesMap() { 73 | final Map map = new HashMap<>(); 74 | for (String propertyName : properties.stringPropertyNames()) { 75 | map.put(propertyName, properties.get(propertyName)); 76 | } 77 | return map; 78 | } 79 | 80 | /** 81 | * Returns a {@link TopicConfigBuilder} with default settings for the topic {@code topic}. 82 | * Use the returned builder to override those default settings. 83 | * 84 | * @param topic the name of the topic 85 | * @return instance of {@link TopicConfigBuilder} used to parameterize the topic creation request 86 | */ 87 | public static TopicConfigBuilder withName(final String topic) { 88 | return new TopicConfigBuilder(topic); 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/TopicManager.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import java.util.Map; 4 | import java.util.Properties; 5 | 6 | /** 7 | * Provides the means to manage Kafka topics. All of the operations a {@code TopicManager} provides 8 | * are synchronous in their nature. 9 | */ 10 | public interface TopicManager { 11 | 12 | /** 13 | * Creates the topic as defined by the given {@link TopicConfig} synchronously. This 14 | * method blocks as long as it takes to complete the underlying topic creation request. 15 | * Please note that this does not imply that the topic is usable directly after this 16 | * method returns due to an outstanding partition leader election. 17 | * 18 | * @param config 19 | * provides the settings for the topic to create 20 | */ 21 | void createTopic(TopicConfig config); 22 | 23 | /** 24 | * Creates the topic as defined by the given {@link TopicConfig} synchronously. This 25 | * method blocks as long as it takes to complete the underlying topic creation request. 26 | * Please note that this does not imply that the topic is usable directly after this 27 | * method returns due to an outstanding partition leader election. This is a convenience 28 | * method that accepts a {@link net.mguenther.kafka.junit.TopicConfig.TopicConfigBuilder} 29 | * and immediately constructs a {@link TopicConfig} request from it that is passed on to 30 | * {@link #createTopic(TopicConfig)}. 31 | * 32 | * @param builder 33 | * provides the settings for the topic to create 34 | */ 35 | default void createTopic(TopicConfig.TopicConfigBuilder builder) { 36 | createTopic(builder.build()); 37 | } 38 | 39 | /** 40 | * Marks the given {@code topic} for deletion. Please note that topics are not immediately 41 | * deleted from a Kafka cluster. This method will fail if the configuration of Kafka brokers 42 | * prohibits topic deletions and if the topic has already been marked for deletion. 43 | * 44 | * @param topic 45 | * name of the topic that ought to be marked for deletion 46 | */ 47 | void deleteTopic(String topic); 48 | 49 | /** 50 | * Determines whether the given {@code topic} exists. 51 | * 52 | * @param topic 53 | * name of the topic that ought to be checked 54 | * @return 55 | * {@code true} if the topic exists, {@code false} otherwise 56 | */ 57 | boolean exists(String topic); 58 | 59 | /** 60 | * Retrieves the leader as well as the In-Sync-Replica-Set (ISR) for all topic-partitions 61 | * of the given topic. 62 | * 63 | * @param topic 64 | * name of the topic for which the ISR shall be fetched 65 | * @return 66 | * unmodifiable {@link Map} of {@link LeaderAndIsr} by partition, which shows us 67 | * broker assignments and the role of the broker for a particular partition 68 | * (leader or follower) 69 | */ 70 | Map fetchLeaderAndIsr(String topic); 71 | 72 | /** 73 | * Retrieves the topic configuration for the given topic. 74 | * 75 | * @param topic 76 | * name of the topic for which the configuration shall be fetched 77 | * @return 78 | * instance of {@link java.util.Properties} which contains the configuration 79 | * of the given topic 80 | */ 81 | Properties fetchTopicConfig(String topic); 82 | } 83 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/Wait.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import java.util.concurrent.TimeUnit; 4 | 5 | public class Wait { 6 | 7 | /** 8 | * Delays the thread that the test runs in by the given duration in seconds. 9 | * 10 | * @param duration 11 | * the duration of the delay in seconds 12 | * @throws InterruptedException 13 | * in case an interrupted signal is set 14 | */ 15 | public static void delay(final int duration) throws InterruptedException { 16 | delay(duration, TimeUnit.SECONDS); 17 | } 18 | 19 | /** 20 | * Delays the thread that the test runs in by the given duration. 21 | * 22 | * @param duration 23 | * the duration of the delay 24 | * @param timeUnit 25 | * the time unit for the given {@code duration} 26 | * @throws InterruptedException 27 | * in case an interrupted signal is set 28 | */ 29 | public static void delay(final int duration, final TimeUnit timeUnit) throws InterruptedException { 30 | Thread.sleep(timeUnit.toMillis(duration)); 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/main/java/net/mguenther/kafka/junit/provider/DefaultRecordProducer.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit.provider; 2 | 3 | import lombok.RequiredArgsConstructor; 4 | import net.mguenther.kafka.junit.KeyValue; 5 | import net.mguenther.kafka.junit.RecordProducer; 6 | import net.mguenther.kafka.junit.SendKeyValues; 7 | import net.mguenther.kafka.junit.SendKeyValuesTransactional; 8 | import net.mguenther.kafka.junit.SendValues; 9 | import net.mguenther.kafka.junit.SendValuesTransactional; 10 | import org.apache.kafka.clients.producer.KafkaProducer; 11 | import org.apache.kafka.clients.producer.Producer; 12 | import org.apache.kafka.clients.producer.ProducerConfig; 13 | import org.apache.kafka.clients.producer.ProducerRecord; 14 | import org.apache.kafka.clients.producer.RecordMetadata; 15 | import org.apache.kafka.common.errors.OutOfOrderSequenceException; 16 | import org.apache.kafka.common.errors.ProducerFencedException; 17 | 18 | import java.util.ArrayList; 19 | import java.util.Collection; 20 | import java.util.Collections; 21 | import java.util.HashMap; 22 | import java.util.List; 23 | import java.util.Map; 24 | import java.util.Properties; 25 | import java.util.concurrent.ExecutionException; 26 | import java.util.concurrent.Future; 27 | import java.util.stream.Collectors; 28 | 29 | @RequiredArgsConstructor 30 | public class DefaultRecordProducer implements RecordProducer { 31 | 32 | private final String bootstrapServers; 33 | 34 | @Override 35 | public List send(final SendValues sendRequest) throws InterruptedException { 36 | final Collection> records = sendRequest.getValues() 37 | .stream() 38 | .map(value -> new KeyValue<>((String) null, value)) 39 | .collect(Collectors.toList()); 40 | final SendKeyValues keyValueRequest = SendKeyValues 41 | .to(sendRequest.getTopic(), records) 42 | .withAll(sendRequest.getProducerProps()) 43 | .build(); 44 | return send(keyValueRequest); 45 | } 46 | 47 | @Override 48 | public List send(final SendValuesTransactional sendRequest) throws InterruptedException { 49 | final Map>> recordsPerTopic = new HashMap<>(); 50 | for (String topic : sendRequest.getValuesPerTopic().keySet()) { 51 | final Collection> records = sendRequest.getValuesPerTopic().get(topic) 52 | .stream() 53 | .map(value -> new KeyValue<>((String) null, value)) 54 | .collect(Collectors.toList()); 55 | recordsPerTopic.put(topic, records); 56 | } 57 | final SendKeyValuesTransactional keyValueRequest = SendKeyValuesTransactional.inTransaction(recordsPerTopic) 58 | .withAll(sendRequest.getProducerProps()) 59 | .withFailTransaction(sendRequest.shouldFailTransaction()) 60 | .build(); 61 | return send(keyValueRequest); 62 | } 63 | 64 | @Override 65 | public List send(final SendKeyValues sendRequest) throws InterruptedException { 66 | final Producer producer = new KafkaProducer<>(effectiveProducerProps(sendRequest.getProducerProps())); 67 | final List metadata = new ArrayList<>(sendRequest.getRecords().size()); 68 | try { 69 | for (KeyValue record : sendRequest.getRecords()) { 70 | final ProducerRecord producerRecord = new ProducerRecord<>(sendRequest.getTopic(), null, record.getKey(), record.getValue(), record.getHeaders()); 71 | final Future f = producer.send(producerRecord); 72 | try { 73 | metadata.add(f.get()); 74 | } catch (ExecutionException e) { 75 | if (RuntimeException.class.isAssignableFrom(e.getCause().getClass())) throw (RuntimeException) e.getCause(); 76 | else throw new RuntimeException(e.getCause()); 77 | } 78 | } 79 | } finally { 80 | producer.flush(); 81 | producer.close(); 82 | } 83 | return Collections.unmodifiableList(metadata); 84 | } 85 | 86 | @Override 87 | public List send(final SendKeyValuesTransactional sendRequest) throws InterruptedException { 88 | final Producer producer = new KafkaProducer<>(effectiveProducerProps(sendRequest.getProducerProps())); 89 | final List metadata = new ArrayList<>(); 90 | try { 91 | producer.initTransactions(); 92 | producer.beginTransaction(); 93 | for (String topic : sendRequest.getRecordsPerTopic().keySet()) { 94 | for (KeyValue record : sendRequest.getRecordsPerTopic().get(topic)) { 95 | final ProducerRecord producerRecord = new ProducerRecord<>(topic, null, record.getKey(), record.getValue(), record.getHeaders()); 96 | final Future f = producer.send(producerRecord); 97 | try { 98 | metadata.add(f.get()); 99 | } catch (ExecutionException e) { 100 | if (RuntimeException.class.isAssignableFrom(e.getCause().getClass())) throw (RuntimeException) e.getCause(); 101 | else throw new RuntimeException(e.getCause()); 102 | } 103 | } 104 | } 105 | if (sendRequest.shouldFailTransaction()) producer.abortTransaction(); 106 | else producer.commitTransaction(); 107 | } catch (ProducerFencedException e) { 108 | producer.abortTransaction(); 109 | final String message = String.format( 110 | "There happens to be another producer that shares the transactional ID '%s'" + 111 | "with this producer, but that has a newer epoch assigned to it. This producer " + 112 | "has been fenced off, it can no longer write to the log transactionally. Hence, " + 113 | "the ongoing transaction is aborted and the producer closed.", 114 | sendRequest.getProducerProps().get(ProducerConfig.TRANSACTIONAL_ID_CONFIG)); 115 | throw new RuntimeException(message, e); 116 | } catch (OutOfOrderSequenceException e) { 117 | producer.abortTransaction(); 118 | final String message = "This producer has received out-of-band sequence numbers. This is a fatal condition " + 119 | "and thus, the producer is no longer able to log transactionally and reliably. " + 120 | "Hence, the ongoing transaction is aborted and the producer closed."; 121 | throw new RuntimeException(message, e); 122 | } finally { 123 | producer.flush(); 124 | producer.close(); 125 | } 126 | return Collections.unmodifiableList(metadata); 127 | } 128 | 129 | private Properties effectiveProducerProps(final Properties originalProducerProps) { 130 | final Properties effectiveProducerProps = new Properties(); 131 | effectiveProducerProps.putAll(originalProducerProps); 132 | effectiveProducerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); 133 | return effectiveProducerProps; 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/ConnectorTest.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import net.mguenther.kafka.junit.connector.InstrumentingConfigBuilder; 4 | import org.junit.jupiter.api.AfterEach; 5 | import org.junit.jupiter.api.BeforeEach; 6 | import org.junit.jupiter.api.DisplayName; 7 | import org.junit.jupiter.api.Test; 8 | 9 | import java.util.Properties; 10 | import java.util.UUID; 11 | 12 | import static net.mguenther.kafka.junit.EmbeddedConnectConfig.kafkaConnect; 13 | import static net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith; 14 | import static net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.newClusterConfig; 15 | 16 | class ConnectorTest { 17 | 18 | private final String topic = String.format("topic-%s", UUID.randomUUID().toString()); 19 | 20 | private final String key = String.format("key-%s", UUID.randomUUID().toString()); 21 | 22 | private EmbeddedKafkaCluster kafka; 23 | 24 | @BeforeEach 25 | void prepareEnvironment() { 26 | kafka = provisionWith(newClusterConfig() 27 | .configure(kafkaConnect() 28 | .deployConnector(connectorConfig(topic, key)))); 29 | kafka.start(); 30 | } 31 | 32 | @AfterEach 33 | public void tearDownEnvironment() { 34 | if (kafka != null) kafka.stop(); 35 | } 36 | 37 | @Test 38 | @DisplayName("A given Kafka Connect connector should be provisioned and able to emit records") 39 | void connectorShouldBeProvisionedAndEmitRecords() throws Exception { 40 | 41 | kafka.observe(ObserveKeyValues.on(topic, 1) 42 | .filterOnKeys(k -> k.equalsIgnoreCase(key)) 43 | .build()); 44 | } 45 | 46 | private Properties connectorConfig(final String topic, final String key) { 47 | return InstrumentingConfigBuilder.create() 48 | .withTopic(topic) 49 | .withKey(key) 50 | .with("consumer.override.auto.offset.reset", "latest") 51 | .build(); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/EmbeddedConnectConfigTest.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import org.apache.kafka.connect.runtime.WorkerConfig; 4 | import org.apache.kafka.connect.runtime.distributed.DistributedConfig; 5 | import org.junit.jupiter.api.DisplayName; 6 | import org.junit.jupiter.api.Test; 7 | 8 | import java.util.Properties; 9 | 10 | import static org.assertj.core.api.Assertions.assertThat; 11 | 12 | class EmbeddedConnectConfigTest { 13 | 14 | @Test 15 | @DisplayName("should use defaults if not explicitly overriden") 16 | void shouldUseDefaultsIfNotOverridden() { 17 | 18 | final EmbeddedConnectConfig config = EmbeddedConnectConfig.kafkaConnect().build(); 19 | final Properties props = config.getConnectProperties(); 20 | 21 | assertThat(props.get(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG)).isEqualTo("org.apache.kafka.connect.storage.StringConverter"); 22 | assertThat(props.get(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG)).isEqualTo("org.apache.kafka.connect.storage.StringConverter"); 23 | assertThat(props.get("internal.key.converter.schemas.enable")).isEqualTo("false"); 24 | assertThat(props.get("internal.value.converter.schemas.enable")).isEqualTo("false"); 25 | assertThat(props.get(DistributedConfig.CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG)).isEqualTo("1"); 26 | assertThat(props.get(DistributedConfig.CONFIG_TOPIC_CONFIG)).isEqualTo("embedded-connect-config"); 27 | assertThat(props.get(DistributedConfig.OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG)).isEqualTo("1"); 28 | assertThat(props.get(DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG)).isEqualTo("embedded-connect-offsets"); 29 | assertThat(props.get(DistributedConfig.STATUS_STORAGE_REPLICATION_FACTOR_CONFIG)).isEqualTo("1"); 30 | assertThat(props.get(DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG)).isEqualTo("embedded-connect-status"); 31 | assertThat(props.get(DistributedConfig.GROUP_ID_CONFIG)).isNotNull(); 32 | } 33 | 34 | @Test 35 | @DisplayName("with(param) should override the corresponding default setting") 36 | void withShouldOverrideDefaultSetting() { 37 | 38 | final EmbeddedConnectConfig config = EmbeddedConnectConfig 39 | .kafkaConnect() 40 | .with(DistributedConfig.GROUP_ID_CONFIG, "test-group") 41 | .build(); 42 | final Properties props = config.getConnectProperties(); 43 | 44 | assertThat(props.get(DistributedConfig.GROUP_ID_CONFIG)).isEqualTo("test-group"); 45 | } 46 | 47 | @Test 48 | @DisplayName("withAll(params) should override the corresponding default settings") 49 | void withAllShouldOverrideDefaultSettings() { 50 | 51 | final Properties overrides = new Properties(); 52 | overrides.put(DistributedConfig.GROUP_ID_CONFIG, "test-group"); 53 | overrides.put(DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG, "status-topic"); 54 | 55 | final EmbeddedConnectConfig config = EmbeddedConnectConfig 56 | .kafkaConnect() 57 | .withAll(overrides) 58 | .build(); 59 | final Properties props = config.getConnectProperties(); 60 | 61 | assertThat(props.get(DistributedConfig.GROUP_ID_CONFIG)).isEqualTo("test-group"); 62 | assertThat(props.get(DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG)).isEqualTo("status-topic"); 63 | } 64 | 65 | @Test 66 | @DisplayName("deployConnector should retain the configuration of the connector") 67 | void deployConnectorShouldStoreConnectorConfig() { 68 | 69 | final Properties connectorConfig = new Properties(); 70 | final EmbeddedConnectConfig config = EmbeddedConnectConfig 71 | .kafkaConnect() 72 | .deployConnector(connectorConfig) 73 | .build(); 74 | 75 | assertThat(config.getConnectors().size()).isEqualTo(1); 76 | assertThat(config.getConnectors().contains(connectorConfig)).isTrue(); 77 | } 78 | 79 | @Test 80 | @DisplayName("deployConnectors should retain all configurations for the given connectors") 81 | void deployConnectorsShouldStoreConnectorConfigs() { 82 | 83 | final EmbeddedConnectConfig config = EmbeddedConnectConfig 84 | .kafkaConnect() 85 | .deployConnectors(new Properties(), new Properties()) 86 | .build(); 87 | 88 | assertThat(config.getConnectors().size()).isEqualTo(2); 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/EmbeddedKafkaConfigTest.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import kafka.server.KafkaConfig$; 4 | import org.junit.jupiter.api.DisplayName; 5 | import org.junit.jupiter.api.Test; 6 | 7 | import java.util.Properties; 8 | 9 | import static org.assertj.core.api.Assertions.assertThat; 10 | 11 | class EmbeddedKafkaConfigTest { 12 | 13 | @Test 14 | @DisplayName("should use defaults if not explicitly overridden") 15 | void shouldUseDefaultsIfNotOverridden() { 16 | 17 | final EmbeddedKafkaConfig config = EmbeddedKafkaConfig.defaultBrokers(); 18 | final Properties props = config.getBrokerProperties(); 19 | 20 | assertThat(props.get(KafkaConfig$.MODULE$.ZkSessionTimeoutMsProp())).isEqualTo("8000"); 21 | assertThat(props.get(KafkaConfig$.MODULE$.ZkConnectionTimeoutMsProp())).isEqualTo("10000"); 22 | assertThat(props.get(KafkaConfig$.MODULE$.NumPartitionsProp())).isEqualTo("1"); 23 | assertThat(props.get(KafkaConfig$.MODULE$.AutoCreateTopicsEnableProp())).isEqualTo("true"); 24 | assertThat(props.get(KafkaConfig$.MODULE$.MessageMaxBytesProp())).isEqualTo("1000000"); 25 | assertThat(props.get(KafkaConfig$.MODULE$.ControlledShutdownEnableProp())).isEqualTo("true"); 26 | assertThat(props.get(KafkaConfig$.MODULE$.OffsetsTopicReplicationFactorProp())).isEqualTo("1"); 27 | assertThat(props.get(KafkaConfig$.MODULE$.GroupInitialRebalanceDelayMsProp())).isEqualTo(0); 28 | assertThat(props.get(KafkaConfig$.MODULE$.TransactionsTopicReplicationFactorProp())).isEqualTo("1"); 29 | assertThat(props.get(KafkaConfig$.MODULE$.TransactionsTopicMinISRProp())).isEqualTo("1"); 30 | } 31 | 32 | @Test 33 | @DisplayName("with(param) should override the corresponding default setting") 34 | void withShouldOverrideDefaultSetting() { 35 | 36 | final EmbeddedKafkaConfig config = EmbeddedKafkaConfig 37 | .brokers() 38 | .with(KafkaConfig$.MODULE$.AdvertisedListenersProp(), "localhost:9092") 39 | .build(); 40 | final Properties props = config.getBrokerProperties(); 41 | 42 | assertThat(props.get(KafkaConfig$.MODULE$.AdvertisedListenersProp())).isEqualTo("localhost:9092"); 43 | } 44 | 45 | @Test 46 | @DisplayName("withAll(params) should override the corresponding default settings") 47 | void withAllShouldOverrideDefaultSettings() { 48 | 49 | final Properties overrides = new Properties(); 50 | overrides.put(KafkaConfig$.MODULE$.AdvertisedListenersProp(), "localhost:9092"); 51 | overrides.put(KafkaConfig$.MODULE$.NumPartitionsProp(), "2"); 52 | 53 | final EmbeddedKafkaConfig config = EmbeddedKafkaConfig 54 | .brokers() 55 | .withAll(overrides) 56 | .build(); 57 | final Properties props = config.getBrokerProperties(); 58 | 59 | assertThat(props.get(KafkaConfig$.MODULE$.AdvertisedListenersProp())).isEqualTo("localhost:9092"); 60 | assertThat(props.get(KafkaConfig$.MODULE$.NumPartitionsProp())).isEqualTo("2"); 61 | } 62 | 63 | @Test 64 | @DisplayName("should yield listeners for multiple brokers") 65 | void shouldYieldListenersForMultipleBrokers() { 66 | 67 | final EmbeddedKafkaConfig config = EmbeddedKafkaConfig 68 | .brokers() 69 | .withNumberOfBrokers(3) 70 | .build(); 71 | 72 | assertThat(config.listenerFor(0)).startsWith("PLAINTEXT://localhost"); 73 | assertThat(config.listenerFor(1)).startsWith("PLAINTEXT://localhost"); 74 | assertThat(config.listenerFor(2)).startsWith("PLAINTEXT://localhost"); 75 | } 76 | 77 | @Test 78 | @DisplayName("should yield default listener for single broker") 79 | void shouldYieldDefaultListenerForSingleBroker() { 80 | 81 | final EmbeddedKafkaConfig config = EmbeddedKafkaConfig.defaultBrokers(); 82 | 83 | assertThat(config.listenerFor(0)).isEqualTo("PLAINTEXT://localhost:9092"); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/EmbeddedZooKeeperConfigTest.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import org.junit.jupiter.api.DisplayName; 4 | import org.junit.jupiter.api.Test; 5 | 6 | import static org.assertj.core.api.Assertions.assertThat; 7 | 8 | class EmbeddedZooKeeperConfigTest { 9 | 10 | @Test 11 | @DisplayName("should use a randomly chosen port per default") 12 | void useDefaultsShouldUseRandomPort() { 13 | final EmbeddedZooKeeperConfig config = EmbeddedZooKeeperConfig.defaultZooKeeper(); 14 | assertThat(config.getPort()).isEqualTo(EmbeddedZooKeeperConfig.USE_RANDOM_ZOOKEEPER_PORT); 15 | } 16 | 17 | @Test 18 | @DisplayName("withPort should override the default port") 19 | void withPortShouldOverrideDefaultPort() { 20 | final EmbeddedZooKeeperConfig config = EmbeddedZooKeeperConfig 21 | .zooKeeper() 22 | .withPort(8090) 23 | .build(); 24 | assertThat(config.getPort()).isEqualTo(8090); 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/ExternalKafkaClusterTest.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import org.junit.jupiter.api.DisplayName; 4 | import org.junit.jupiter.api.Test; 5 | import org.testcontainers.containers.KafkaContainer; 6 | import org.testcontainers.junit.jupiter.Container; 7 | import org.testcontainers.junit.jupiter.Testcontainers; 8 | import org.testcontainers.utility.DockerImageName; 9 | 10 | import java.util.ArrayList; 11 | import java.util.List; 12 | 13 | import static net.mguenther.kafka.junit.ObserveKeyValues.on; 14 | import static net.mguenther.kafka.junit.SendKeyValues.to; 15 | 16 | @Testcontainers 17 | class ExternalKafkaClusterTest { 18 | 19 | @Container 20 | private final KafkaContainer kafkaContainer = new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:5.4.3")); 21 | 22 | @Test 23 | @DisplayName("should be able to observe records written to an external Kafka cluster") 24 | void externalKafkaClusterShouldWorkWithExternalResources() throws Exception { 25 | 26 | final ExternalKafkaCluster kafka = ExternalKafkaCluster.at(kafkaContainer.getBootstrapServers()); 27 | 28 | final List> records = new ArrayList<>(); 29 | 30 | records.add(new KeyValue<>("aggregate", "a")); 31 | records.add(new KeyValue<>("aggregate", "b")); 32 | records.add(new KeyValue<>("aggregate", "c")); 33 | 34 | kafka.send(to("test-topic", records)); 35 | kafka.observe(on("test-topic", 3).useDefaults()); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/KeyValueTest.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import org.apache.kafka.common.header.Headers; 4 | import org.apache.kafka.common.header.internals.RecordHeaders; 5 | import org.junit.jupiter.api.DisplayName; 6 | import org.junit.jupiter.api.Test; 7 | 8 | import java.nio.charset.StandardCharsets; 9 | 10 | import static org.assertj.core.api.Assertions.assertThat; 11 | 12 | class KeyValueTest { 13 | 14 | @Test 15 | @DisplayName("should preserve added headers") 16 | void shouldPreserveAddedHeaders() { 17 | 18 | final KeyValue keyValue = new KeyValue<>("k", "v"); 19 | keyValue.addHeader("headerName", "headerValue", StandardCharsets.UTF_8); 20 | 21 | assertThat(keyValue.getHeaders().lastHeader("headerName").value()).isEqualTo("headerValue".getBytes(StandardCharsets.UTF_8)); 22 | } 23 | 24 | @Test 25 | @DisplayName("should preserve headers given on construction") 26 | void shouldPreserveHeadersGivenOnConstruction() { 27 | 28 | final Headers headers = new RecordHeaders(); 29 | headers.add("headerName", "headerValue".getBytes(StandardCharsets.UTF_8)); 30 | final KeyValue keyValue = new KeyValue<>("k", "v", headers); 31 | 32 | assertThat(keyValue.getHeaders().lastHeader("headerName").value()).isEqualTo("headerValue".getBytes(StandardCharsets.UTF_8)); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/MultipleBrokersTest.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import kafka.server.KafkaConfig$; 4 | import lombok.extern.slf4j.Slf4j; 5 | import org.apache.kafka.clients.producer.ProducerConfig; 6 | import org.apache.kafka.common.errors.NotEnoughReplicasException; 7 | import org.junit.jupiter.api.AfterEach; 8 | import org.junit.jupiter.api.Assertions; 9 | import org.junit.jupiter.api.BeforeEach; 10 | import org.junit.jupiter.api.DisplayName; 11 | import org.junit.jupiter.api.Test; 12 | 13 | import java.util.Arrays; 14 | import java.util.List; 15 | import java.util.Set; 16 | import java.util.stream.Collectors; 17 | 18 | import static java.util.Collections.singletonList; 19 | import static net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith; 20 | import static net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.newClusterConfig; 21 | import static net.mguenther.kafka.junit.EmbeddedKafkaConfig.brokers; 22 | import static net.mguenther.kafka.junit.ObserveKeyValues.on; 23 | import static net.mguenther.kafka.junit.SendKeyValuesTransactional.inTransaction; 24 | import static net.mguenther.kafka.junit.SendValues.to; 25 | import static net.mguenther.kafka.junit.SendValuesTransactional.inTransaction; 26 | import static net.mguenther.kafka.junit.TopicConfig.withName; 27 | import static net.mguenther.kafka.junit.Wait.delay; 28 | import static org.assertj.core.api.Assertions.assertThat; 29 | import static org.assertj.core.api.Fail.fail; 30 | 31 | @Slf4j 32 | class MultipleBrokersTest { 33 | 34 | private EmbeddedKafkaCluster kafka; 35 | 36 | @BeforeEach 37 | void prepareEnvironment() { 38 | kafka = provisionWith(newClusterConfig() 39 | .configure(brokers() 40 | .withNumberOfBrokers(3) 41 | .with(KafkaConfig$.MODULE$.TransactionsTopicReplicationFactorProp(), "1") 42 | .with(KafkaConfig$.MODULE$.TransactionsTopicMinISRProp(), "1"))); 43 | kafka.start(); 44 | } 45 | 46 | @AfterEach 47 | void tearDownEnvironment() { 48 | if (kafka != null) kafka.stop(); 49 | } 50 | 51 | @Test 52 | @DisplayName("multiple brokers should comprise the in-sync replica set of topics") 53 | void multipleBrokersCompriseTheInSyncReplicaSetOfTopics() throws Exception { 54 | 55 | kafka.createTopic(withName("test-topic") 56 | .withNumberOfPartitions(5) 57 | .withNumberOfReplicas(3)); 58 | 59 | delay(5); 60 | 61 | final Set leaders = leaders("test-topic"); 62 | 63 | assertThat(leaders.size()).isEqualTo(3); 64 | assertThat(leaders.contains(1)).isTrue(); 65 | assertThat(leaders.contains(2)).isTrue(); 66 | assertThat(leaders.contains(3)).isTrue(); 67 | } 68 | 69 | @Test 70 | @DisplayName("disconnected broker leaves in-sync replica set of topic and rejoins it after re-joining the cluster") 71 | void disconnectedBrokerLeavesIsrOfTopicAndRejoinsItAfterReconnecting() throws Exception { 72 | 73 | kafka.createTopic(withName("test-topic") 74 | .withNumberOfPartitions(5) 75 | .withNumberOfReplicas(3)); 76 | 77 | delay(5); 78 | 79 | Set leaders = leaders("test-topic"); 80 | 81 | assertThat(leaders.contains(1)).isTrue(); 82 | assertThat(leaders.contains(2)).isTrue(); 83 | assertThat(leaders.contains(3)).isTrue(); 84 | 85 | kafka.disconnect(1); 86 | 87 | delay(5); 88 | 89 | Set leadersAfterDisconnect = leaders("test-topic"); 90 | 91 | assertThat(leadersAfterDisconnect.contains(1)).isFalse(); 92 | assertThat(leadersAfterDisconnect.contains(2)).isTrue(); 93 | assertThat(leadersAfterDisconnect.contains(3)).isTrue(); 94 | 95 | kafka.connect(1); 96 | 97 | delay(10); 98 | 99 | Set leadersAfterReconnect = leaders("test-topic"); 100 | 101 | assertThat(leadersAfterReconnect.contains(1)).isTrue(); 102 | assertThat(leadersAfterReconnect.contains(2)).isTrue(); 103 | assertThat(leadersAfterReconnect.contains(3)).isTrue(); 104 | } 105 | 106 | @Test 107 | @DisplayName("should throw NotEnoughReplicasException when trying to send non-keyed records and ISR has fallen below its minimum size") 108 | void disconnectUntilIsrFallsBelowMinimumSizeShouldThrowNotEnoughReplicasExceptionWhenSendingValues() throws Exception { 109 | 110 | kafka.createTopic(withName("test-topic") 111 | .withNumberOfPartitions(5) 112 | .withNumberOfReplicas(3) 113 | .with("min.insync.replicas", "2")); 114 | 115 | delay(5); 116 | 117 | kafka.disconnectUntilIsrFallsBelowMinimumSize("test-topic"); 118 | 119 | delay(5); 120 | 121 | Assertions.assertThrows(NotEnoughReplicasException.class, () -> { 122 | kafka.send(to("test-topic", "A")); 123 | }); 124 | } 125 | 126 | @Test 127 | @DisplayName("should throw NotEnoughReplicasException when trying to send non-keyed records within a transaction and ISR has fallen below its minimum size") 128 | void disconnectUntilIsrFallsBelowMinimumSizeShouldThrowNotEnoughReplicasExceptionWhenSendingValuesTransactionally() throws Exception { 129 | 130 | kafka.createTopic(withName("test-topic") 131 | .withNumberOfPartitions(5) 132 | .withNumberOfReplicas(3) 133 | .with("min.insync.replicas", "2")); 134 | 135 | delay(5); 136 | 137 | kafka.disconnectUntilIsrFallsBelowMinimumSize("test-topic"); 138 | 139 | delay(5); 140 | 141 | Assertions.assertThrows(NotEnoughReplicasException.class, () -> { 142 | kafka.send(inTransaction("test-topic", "A") 143 | .with(ProducerConfig.RETRIES_CONFIG, 1)); 144 | }); 145 | } 146 | 147 | @Test 148 | @DisplayName("should throw NotEnoughReplicasException when trying to send keyed records and ISR has fallen below its minimum size") 149 | void disconnectUntilIsrFallsBelowMinimumSizeShouldThrowNotEnoughReplicasExceptionWhenSendingKeyValues() throws Exception { 150 | 151 | kafka.createTopic(withName("test-topic") 152 | .withNumberOfPartitions(5) 153 | .withNumberOfReplicas(3) 154 | .with("min.insync.replicas", "2")); 155 | 156 | delay(5); 157 | 158 | kafka.disconnectUntilIsrFallsBelowMinimumSize("test-topic"); 159 | 160 | delay(5); 161 | 162 | Assertions.assertThrows(NotEnoughReplicasException.class, () -> { 163 | kafka.send(SendKeyValues.to("test-topic", singletonList(new KeyValue<>("a", "A")))); 164 | }); 165 | } 166 | 167 | @Test 168 | @DisplayName("should throw NotEnoughReplicasException when trying to send keyed records within a transaction and ISR has fallen below its minimum size") 169 | void disconnectUntilIsrFallsBelowMinimumSizeShouldThrowNotEnoughReplicasExceptionWhenSendingKeyValuesTransactionally() throws Exception { 170 | 171 | kafka.createTopic(withName("test-topic") 172 | .withNumberOfPartitions(5) 173 | .withNumberOfReplicas(3) 174 | .with("min.insync.replicas", "2")); 175 | 176 | delay(5); 177 | 178 | kafka.disconnectUntilIsrFallsBelowMinimumSize("test-topic"); 179 | 180 | delay(5); 181 | 182 | Assertions.assertThrows(NotEnoughReplicasException.class, () -> { 183 | kafka.send(inTransaction("test-topic", singletonList(new KeyValue<>("a", "A"))) 184 | .with(ProducerConfig.RETRIES_CONFIG, 1)); 185 | }); 186 | } 187 | 188 | @Test 189 | @DisplayName("should be able to submit reords after restoring previously disconnected in-sync replica set") 190 | void shouldBeAbleToWriteRecordsAfterRestoringDisconnectedIsr() throws Exception { 191 | 192 | kafka.createTopic(withName("test-topic") 193 | .withNumberOfPartitions(5) 194 | .withNumberOfReplicas(3) 195 | .with("min.insync.replicas", "2")); 196 | 197 | delay(5); 198 | 199 | final Set disconnectedBrokers = kafka.disconnectUntilIsrFallsBelowMinimumSize("test-topic"); 200 | 201 | delay(5); 202 | 203 | try { 204 | kafka.send(to("test-topic", "A")); 205 | fail("A NotEnoughReplicasException is expected, but has not been raised."); 206 | } catch (NotEnoughReplicasException e) { 207 | // ignore, this is expected 208 | } 209 | 210 | kafka.connect(disconnectedBrokers); 211 | 212 | delay(5); 213 | 214 | kafka.send(to("test-topic", "A")); 215 | kafka.observeValues(on("test-topic", 1)); 216 | } 217 | 218 | @Test 219 | @DisplayName("a re-enabled broker should bind to the same port as it was bound before") 220 | void reActivatedBrokersShouldBindToTheSamePortAsTheyWereBoundToBefore() throws Exception { 221 | 222 | kafka.createTopic(withName("test-topic") 223 | .withNumberOfPartitions(5) 224 | .withNumberOfReplicas(3) 225 | .with("min.insync.replicas", "2")); 226 | 227 | final List brokersBeforeDisconnect = Arrays.asList(kafka.getBrokerList().split(",")); 228 | 229 | final Set disconnectedBrokers = kafka.disconnectUntilIsrFallsBelowMinimumSize("test-topic"); 230 | 231 | assertThat(disconnectedBrokers.size()).isEqualTo(2); 232 | 233 | delay(5); 234 | 235 | kafka.connect(disconnectedBrokers); 236 | 237 | delay(5); 238 | 239 | final List brokersAfterReconnect = Arrays.asList(kafka.getBrokerList().split(",")); 240 | 241 | assertThat(brokersAfterReconnect).containsAll(brokersBeforeDisconnect); 242 | assertThat(brokersBeforeDisconnect).containsAll(brokersAfterReconnect); 243 | } 244 | 245 | private Set leaders(final String topic) { 246 | return kafka.fetchLeaderAndIsr(topic) 247 | .values() 248 | .stream() 249 | .peek(leaderAndIsr -> log.info("Assignment: {}", leaderAndIsr.toString())) 250 | .map(LeaderAndIsr::getLeader) 251 | .collect(Collectors.toSet()); 252 | } 253 | } 254 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/ObserveKeyValuesTest.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import org.apache.kafka.clients.consumer.ConsumerConfig; 4 | import org.apache.kafka.common.serialization.StringDeserializer; 5 | import org.junit.jupiter.api.DisplayName; 6 | import org.junit.jupiter.api.Test; 7 | 8 | import java.util.Collections; 9 | import java.util.Properties; 10 | import java.util.concurrent.TimeUnit; 11 | 12 | import static org.assertj.core.api.Assertions.assertThat; 13 | 14 | class ObserveKeyValuesTest { 15 | 16 | @Test 17 | @DisplayName("should preserve constructor arguments") 18 | void shouldPreserveConstructorArguments() { 19 | 20 | final ObserveKeyValues observeRequest = ObserveKeyValues.on("test", 10).useDefaults(); 21 | 22 | assertThat(observeRequest.getTopic()).isEqualTo("test"); 23 | assertThat(observeRequest.getExpected()).isEqualTo(10); 24 | assertThat(observeRequest.getObservationTimeMillis()).isEqualTo(ObserveKeyValues.DEFAULT_OBSERVATION_TIME_MILLIS); 25 | } 26 | 27 | @Test 28 | @DisplayName("should use defaults if not overridden") 29 | void shouldUseDefaultsIfNotOverridden() { 30 | 31 | final ObserveKeyValues observeRequest = ObserveKeyValues.on("test", 10).useDefaults(); 32 | final Properties props = observeRequest.getConsumerProps(); 33 | 34 | assertThat(props.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("earliest"); 35 | assertThat(props.get(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)).isEqualTo(false); 36 | assertThat(props.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG)).isEqualTo(StringDeserializer.class); 37 | assertThat(props.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG)).isEqualTo(StringDeserializer.class); 38 | assertThat(props.get(ConsumerConfig.MAX_POLL_RECORDS_CONFIG)).isEqualTo(100); 39 | assertThat(props.get(ConsumerConfig.ISOLATION_LEVEL_CONFIG)).isEqualTo("read_uncommitted"); 40 | assertThat(observeRequest.isIncludeMetadata()).isFalse(); 41 | assertThat(observeRequest.getSeekTo().isEmpty()).isTrue(); 42 | } 43 | 44 | @Test 45 | @DisplayName("observeFor should override default observation time") 46 | void observeForShouldOverrideDefaultObservationTime() { 47 | 48 | final ObserveKeyValues observeRequest = ObserveKeyValues.on("test", 10) 49 | .observeFor(10, TimeUnit.SECONDS) 50 | .build(); 51 | 52 | assertThat(observeRequest.getObservationTimeMillis()).isEqualTo((int) TimeUnit.SECONDS.toMillis(10)); 53 | } 54 | 55 | @Test 56 | @DisplayName("with should override default setting of the given parameter with the given value") 57 | void withShouldOverrideDefaultSetting() { 58 | 59 | final ObserveKeyValues observeRequest = ObserveKeyValues.on("test", 10) 60 | .with(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed") 61 | .build(); 62 | 63 | assertThat(observeRequest.getConsumerProps().get(ConsumerConfig.ISOLATION_LEVEL_CONFIG)).isEqualTo("read_committed"); 64 | } 65 | 66 | @Test 67 | @DisplayName("withAll should override the default settings of the given parameters with the resp. values") 68 | void withAllShouldOverrideDefaultSettings() { 69 | 70 | final Properties overrides = new Properties(); 71 | overrides.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); 72 | overrides.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1000L); 73 | 74 | final ObserveKeyValues observeRequest = ObserveKeyValues.on("test", 10) 75 | .withAll(overrides) 76 | .build(); 77 | 78 | assertThat(observeRequest.getConsumerProps().get(ConsumerConfig.ISOLATION_LEVEL_CONFIG)).isEqualTo("read_committed"); 79 | assertThat(observeRequest.getConsumerProps().get(ConsumerConfig.MAX_POLL_RECORDS_CONFIG)).isEqualTo(1000L); 80 | } 81 | 82 | @Test 83 | @DisplayName("includeMetadata should override its default setting") 84 | void includeMetadataShouldOverrideItsDefaultSetting() { 85 | 86 | final ObserveKeyValues observeRequest = ObserveKeyValues.on("test", 10) 87 | .includeMetadata() 88 | .build(); 89 | 90 | assertThat(observeRequest.isIncludeMetadata()).isTrue(); 91 | } 92 | 93 | @Test 94 | @DisplayName("seekTo should preserve seek settings") 95 | void seekToShouldPreserveSeekSettings() { 96 | 97 | final ObserveKeyValues observeRequest = ObserveKeyValues.on("test", 10) 98 | .seekTo(0, 1L) 99 | .seekTo(Collections.singletonMap(1, 2L)) 100 | .build(); 101 | 102 | assertThat(observeRequest.getSeekTo().size()).isEqualTo(2); 103 | assertThat(observeRequest.getSeekTo().get(0)).isEqualTo(1L); 104 | assertThat(observeRequest.getSeekTo().get(1)).isEqualTo(2L); 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/PropsTest.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import org.junit.jupiter.api.DisplayName; 4 | import org.junit.jupiter.api.Test; 5 | 6 | import java.util.Properties; 7 | 8 | import static org.assertj.core.api.Assertions.assertThat; 9 | 10 | class PropsTest { 11 | 12 | @Test 13 | @DisplayName("build should retain properties that were added using a call to with") 14 | void buildShouldRetainPreviouslyAddedPropertiesUsingWith() { 15 | final Properties props = Props.create().with("test", "some-value").build(); 16 | assertThat(props.getProperty("test")).isEqualTo("some-value"); 17 | } 18 | 19 | @Test 20 | @DisplayName("build should retain properties that were added using a call to withAll") 21 | void buildShouldRetainPreviouslyAddedPropertiesUsingWithAll() { 22 | final Properties props = Props.create().with("test", "some-value").build(); 23 | final Properties withAllProps = Props.create().withAll(props).build(); 24 | assertThat(withAllProps.getProperty("test")).isEqualTo("some-value"); 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/ReadKeyValuesTest.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import org.apache.kafka.clients.consumer.ConsumerConfig; 4 | import org.apache.kafka.common.serialization.StringDeserializer; 5 | import org.junit.jupiter.api.DisplayName; 6 | import org.junit.jupiter.api.Test; 7 | 8 | import java.util.Collections; 9 | import java.util.Properties; 10 | import java.util.concurrent.TimeUnit; 11 | 12 | import static org.assertj.core.api.Assertions.assertThat; 13 | 14 | class ReadKeyValuesTest { 15 | 16 | @Test 17 | @DisplayName("should preserve constructor arguments") 18 | void shouldPreserveConstructorArguments() { 19 | 20 | final ReadKeyValues readRequest = ReadKeyValues.from("test").useDefaults(); 21 | 22 | assertThat(readRequest.getTopic()).isEqualTo("test"); 23 | assertThat(readRequest.getLimit()).isEqualTo(ReadKeyValues.WITHOUT_LIMIT); 24 | assertThat(readRequest.getMaxTotalPollTimeMillis()).isEqualTo(ReadKeyValues.DEFAULT_MAX_TOTAL_POLL_TIME_MILLIS); 25 | } 26 | 27 | @Test 28 | @DisplayName("should use defaults if not overridden") 29 | void shouldUseDefaultsIfNotOverridden() { 30 | 31 | final ReadKeyValues readRequest = ReadKeyValues.from("test").useDefaults(); 32 | final Properties props = readRequest.getConsumerProps(); 33 | 34 | assertThat(props.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("earliest"); 35 | assertThat(props.get(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)).isEqualTo(false); 36 | assertThat(props.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG)).isEqualTo(StringDeserializer.class); 37 | assertThat(props.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG)).isEqualTo(StringDeserializer.class); 38 | assertThat(props.get(ConsumerConfig.MAX_POLL_RECORDS_CONFIG)).isEqualTo(100); 39 | assertThat(props.get(ConsumerConfig.ISOLATION_LEVEL_CONFIG)).isEqualTo("read_uncommitted"); 40 | assertThat(readRequest.isIncludeMetadata()).isFalse(); 41 | assertThat(readRequest.getSeekTo().isEmpty()).isTrue(); 42 | } 43 | 44 | @Test 45 | @DisplayName("unlimited should not restrict limit setting") 46 | void unlimitedShouldNotRestrictLimitSetting() { 47 | 48 | final ReadKeyValues readRequest = ReadKeyValues.from("test") 49 | .withLimit(1) 50 | .unlimited() 51 | .build(); 52 | 53 | assertThat(readRequest.getLimit()).isEqualTo(ReadKeyValues.WITHOUT_LIMIT); 54 | } 55 | 56 | @Test 57 | @DisplayName("withLimit should restrict limit setting") 58 | void withLimitShouldRestrictLimitSetting() { 59 | 60 | final ReadKeyValues readRequest = ReadKeyValues.from("test") 61 | .withLimit(1) 62 | .build(); 63 | 64 | assertThat(readRequest.getLimit()).isEqualTo(1); 65 | } 66 | 67 | @Test 68 | @DisplayName("withMaxPollTime should override its default setting") 69 | void withMaxPollTimeShouldOverrideItsDefault() { 70 | 71 | final ReadKeyValues readRequest = ReadKeyValues.from("test") 72 | .withMaxTotalPollTime(10, TimeUnit.SECONDS) 73 | .build(); 74 | 75 | assertThat(readRequest.getMaxTotalPollTimeMillis()).isEqualTo((int) TimeUnit.SECONDS.toMillis(10)); 76 | } 77 | 78 | @Test 79 | @DisplayName("includeMetadata should override its default setting") 80 | void includeMetadataShouldOverrideItsDefaultSetting() { 81 | 82 | final ReadKeyValues readRequest = ReadKeyValues.from("test") 83 | .includeMetadata() 84 | .build(); 85 | 86 | assertThat(readRequest.isIncludeMetadata()).isTrue(); 87 | } 88 | 89 | @Test 90 | @DisplayName("seekTo should preserve seek settings") 91 | void seekToShouldPreserveSeekSettings() { 92 | 93 | final ReadKeyValues readRequest = ReadKeyValues.from("test") 94 | .seekTo(0, 1L) 95 | .seekTo(Collections.singletonMap(1, 2L)) 96 | .build(); 97 | 98 | assertThat(readRequest.getSeekTo().size()).isEqualTo(2); 99 | assertThat(readRequest.getSeekTo().get(0)).isEqualTo(1L); 100 | assertThat(readRequest.getSeekTo().get(1)).isEqualTo(2L); 101 | } 102 | 103 | @Test 104 | @DisplayName("with should override the default setting of the given parameter with the given value") 105 | void withShouldOverrideDefaultSetting() { 106 | 107 | final ReadKeyValues readRequest = ReadKeyValues.from("test") 108 | .with(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed") 109 | .build(); 110 | 111 | assertThat(readRequest.getConsumerProps().get(ConsumerConfig.ISOLATION_LEVEL_CONFIG)).isEqualTo("read_committed"); 112 | } 113 | 114 | @Test 115 | @DisplayName("withAll should override the default settings of the given parameters with the resp. values") 116 | void withAllShouldOverrideDefaultSettings() { 117 | 118 | final Properties overrides = new Properties(); 119 | overrides.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); 120 | overrides.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1000L); 121 | 122 | final ReadKeyValues readRequest = ReadKeyValues.from("test") 123 | .withAll(overrides) 124 | .build(); 125 | 126 | assertThat(readRequest.getConsumerProps().get(ConsumerConfig.ISOLATION_LEVEL_CONFIG)).isEqualTo("read_committed"); 127 | assertThat(readRequest.getConsumerProps().get(ConsumerConfig.MAX_POLL_RECORDS_CONFIG)).isEqualTo(1000L); 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/RecordProducerTest.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import org.apache.kafka.clients.consumer.ConsumerConfig; 4 | import org.apache.kafka.clients.producer.ProducerConfig; 5 | import org.junit.jupiter.api.AfterEach; 6 | import org.junit.jupiter.api.Assertions; 7 | import org.junit.jupiter.api.BeforeEach; 8 | import org.junit.jupiter.api.DisplayName; 9 | import org.junit.jupiter.api.Test; 10 | 11 | import java.nio.charset.StandardCharsets; 12 | import java.util.ArrayList; 13 | import java.util.List; 14 | import java.util.concurrent.TimeUnit; 15 | 16 | import static java.util.Arrays.asList; 17 | import static java.util.Collections.singletonList; 18 | import static net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith; 19 | import static net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.defaultClusterConfig; 20 | import static net.mguenther.kafka.junit.ObserveKeyValues.on; 21 | import static net.mguenther.kafka.junit.SendKeyValuesTransactional.inTransaction; 22 | import static net.mguenther.kafka.junit.SendValues.to; 23 | import static org.assertj.core.api.Assertions.assertThat; 24 | 25 | class RecordProducerTest { 26 | 27 | private EmbeddedKafkaCluster kafka; 28 | 29 | @BeforeEach 30 | void prepareEnvironment() { 31 | kafka = provisionWith(defaultClusterConfig()); 32 | kafka.start(); 33 | } 34 | 35 | @AfterEach 36 | void tearDownEnvironment() { 37 | if (kafka != null) kafka.stop(); 38 | } 39 | 40 | @Test 41 | @DisplayName("calling send with non-keyed records and default settings should write all given records to the target topic") 42 | void sendingUnkeyedRecordsWithDefaults() throws Exception { 43 | 44 | kafka.send(to("test-topic", "a", "b", "c")); 45 | 46 | assertThat(kafka.observeValues(on("test-topic", 3)).size()) 47 | .isEqualTo(3); 48 | } 49 | 50 | @Test 51 | @DisplayName("calling send with keyed records and default settings should write all given records to the target topic") 52 | void sendingKeyedRecordsWithDefaults() throws Exception { 53 | 54 | final List> records = new ArrayList<>(); 55 | 56 | records.add(new KeyValue<>("aggregate", "a")); 57 | records.add(new KeyValue<>("aggregate", "b")); 58 | records.add(new KeyValue<>("aggregate", "c")); 59 | 60 | kafka.send(SendKeyValues.to("test-topic", records)); 61 | 62 | assertThat(kafka.observeValues(on("test-topic", 3)).size()) 63 | .isEqualTo(3); 64 | } 65 | 66 | @Test 67 | @DisplayName("calling send with non-keyed records and altered producer settings should write all given records to the target topic") 68 | void sendingUnkeyedRecordsWithAlteredProducerSettings() throws Exception { 69 | 70 | final SendValues sendRequest = to("test-topic", "a", "b", "c") 71 | .with(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true") 72 | .with(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1") 73 | .build(); 74 | 75 | kafka.send(sendRequest); 76 | 77 | assertThat(kafka.observeValues(on("test-topic", 3)).size()) 78 | .isEqualTo(3); 79 | } 80 | 81 | @Test 82 | @DisplayName("calling send with keyed records within a transaction should write all given records to the target topic") 83 | void sendingKeyedRecordsWithinTransaction() throws Exception { 84 | 85 | final List> records = new ArrayList<>(); 86 | 87 | records.add(new KeyValue<>("aggregate", "a")); 88 | records.add(new KeyValue<>("aggregate", "b")); 89 | records.add(new KeyValue<>("aggregate", "c")); 90 | 91 | kafka.send(inTransaction("test-topic", records)); 92 | kafka.observeValues(on("test-topic", 3)); 93 | } 94 | 95 | @Test 96 | @DisplayName("calling send with non-keyed records within a transaction should write all given records to the target topic") 97 | void sendingUnkeyedRecordsWithinTransaction() throws Exception { 98 | 99 | kafka.send(SendValuesTransactional.inTransaction("test-topic", asList("a", "b", "c"))); 100 | kafka.observeValues(on("test-topic", 3)); 101 | } 102 | 103 | @Test 104 | @DisplayName("calling send with non-keyed records for multiple topics should write all given records to the correct topic") 105 | void sendingUnkeyedRecordsToMultipleTopics() throws Exception { 106 | 107 | kafka.send(SendValuesTransactional 108 | .inTransaction("test-topic-1", asList("a", "b")) 109 | .inTransaction("test-topic-2", asList("c", "d"))); 110 | kafka.observeValues(on("test-topic-1", 2).useDefaults()); 111 | kafka.observeValues(on("test-topic-2", 2).useDefaults()); 112 | } 113 | 114 | @Test 115 | @DisplayName("record headers should be retained") 116 | void usingRecordHeaders() throws Exception { 117 | 118 | final KeyValue record = new KeyValue<>("a", "b"); 119 | record.addHeader("client", "kafka-junit-test".getBytes(StandardCharsets.UTF_8)); 120 | 121 | kafka.send(SendKeyValues.to("test-topic", singletonList(record))); 122 | 123 | final List> consumedRecords = kafka.read(ReadKeyValues.from("test-topic")); 124 | 125 | assertThat(consumedRecords.size()).isEqualTo(1); 126 | assertThat(new String(consumedRecords.get(0).getHeaders().lastHeader("client").value())).isEqualTo("kafka-junit-test"); 127 | } 128 | 129 | @Test 130 | @DisplayName("non-keyed records written during a failed transaction should not be visible by a transactional consumer") 131 | void valuesOfAbortedTransactionsShouldNotBeVisibleByTransactionalConsumer() throws Exception { 132 | 133 | kafka.send(SendValuesTransactional 134 | .inTransaction("test-topic", asList("a", "b")) 135 | .failTransaction()); 136 | 137 | Assertions.assertThrows(AssertionError.class, () -> kafka.observe(on("test-topic", 2) 138 | .with(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed") 139 | .observeFor(5, TimeUnit.SECONDS) 140 | .build())); 141 | } 142 | 143 | @Test 144 | @DisplayName("keyed records written during a failed transaction should be not visible by a transactional consumer") 145 | void keyValuesOfAbortedTransactionsShouldNotBeVisibleByTransactionalConsumer() throws Exception { 146 | 147 | kafka.send(inTransaction("test-topic", singletonList(new KeyValue<>("a", "b"))).failTransaction()); 148 | 149 | Assertions.assertThrows(AssertionError.class, () -> kafka.observe(on("test-topic", 1) 150 | .with(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed") 151 | .observeFor(5, TimeUnit.SECONDS))); 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/SendKeyValuesTest.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import org.apache.kafka.clients.producer.ProducerConfig; 4 | import org.apache.kafka.common.serialization.IntegerSerializer; 5 | import org.apache.kafka.common.serialization.StringSerializer; 6 | import org.junit.jupiter.api.DisplayName; 7 | import org.junit.jupiter.api.Test; 8 | 9 | import java.util.Collection; 10 | import java.util.Collections; 11 | import java.util.Properties; 12 | 13 | import static org.assertj.core.api.Assertions.assertThat; 14 | 15 | class SendKeyValuesTest { 16 | 17 | @Test 18 | @DisplayName("should preserve constructor arguments") 19 | void shouldPreserveConstructorArguments() { 20 | 21 | final Collection> records = Collections.singletonList(new KeyValue<>("k", "v")); 22 | final SendKeyValues sendRequest = SendKeyValues.to("test-topic", records).useDefaults(); 23 | 24 | assertThat(sendRequest.getTopic()).isEqualTo("test-topic"); 25 | assertThat(sendRequest.getRecords().size()).isEqualTo(1); 26 | assertThat(sendRequest.getRecords()).contains(new KeyValue<>("k", "v")); 27 | } 28 | 29 | @Test 30 | @DisplayName("should use defaults if not overridden") 31 | void shouldUseDefaultsIfNotOverridden() { 32 | 33 | final Collection> records = Collections.singletonList(new KeyValue<>("k", "v")); 34 | final SendKeyValues sendRequest = SendKeyValues.to("test-topic", records).useDefaults(); 35 | final Properties props = sendRequest.getProducerProps(); 36 | 37 | assertThat(props.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)).isEqualTo(StringSerializer.class); 38 | assertThat(props.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)).isEqualTo(StringSerializer.class); 39 | } 40 | 41 | @Test 42 | @DisplayName("with should override the default setting of the given parameter with the given value") 43 | void withShouldOverrideDefaultSetting() { 44 | 45 | final Collection> records = Collections.singletonList(new KeyValue<>("k", 1)); 46 | final SendKeyValues sendRequest = SendKeyValues.to("test-topic", records) 47 | .with(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class) 48 | .build(); 49 | final Properties props = sendRequest.getProducerProps(); 50 | 51 | assertThat(props.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)).isEqualTo(IntegerSerializer.class); 52 | } 53 | 54 | @Test 55 | @DisplayName("withAll should override the default settings of the given parameters with the resp. values") 56 | void withAllShouldOverrideDefaultSettings() { 57 | 58 | final Properties overrides = new Properties(); 59 | overrides.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class); 60 | overrides.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class); 61 | 62 | final Collection> records = Collections.singletonList(new KeyValue<>(1, 1)); 63 | final SendKeyValues sendRequest = SendKeyValues.to("test-topic", records) 64 | .withAll(overrides) 65 | .build(); 66 | final Properties props = sendRequest.getProducerProps(); 67 | 68 | assertThat(props.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)).isEqualTo(IntegerSerializer.class); 69 | assertThat(props.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)).isEqualTo(IntegerSerializer.class); 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/SendKeyValuesTransactionalTest.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import org.apache.kafka.clients.producer.ProducerConfig; 4 | import org.apache.kafka.common.serialization.IntegerSerializer; 5 | import org.apache.kafka.common.serialization.LongSerializer; 6 | import org.apache.kafka.common.serialization.StringSerializer; 7 | import org.junit.jupiter.api.DisplayName; 8 | import org.junit.jupiter.api.Test; 9 | 10 | import java.util.Collections; 11 | import java.util.Properties; 12 | 13 | import static org.assertj.core.api.Assertions.assertThat; 14 | 15 | class SendKeyValuesTransactionalTest { 16 | 17 | @Test 18 | @DisplayName("should preserve constructor arguments") 19 | void shouldPreserveConstructorArguments() { 20 | 21 | final SendKeyValuesTransactional sendRequest = SendKeyValuesTransactional 22 | .inTransaction("test-topic", Collections.singletonList(new KeyValue<>("k", "v"))) 23 | .useDefaults(); 24 | 25 | assertThat(sendRequest.getRecordsPerTopic().containsKey("test-topic")).isTrue(); 26 | assertThat(sendRequest.getRecordsPerTopic().get("test-topic").contains(new KeyValue<>("k", "v"))).isTrue(); 27 | } 28 | 29 | @Test 30 | @DisplayName("should be able to close over records for multiple topics") 31 | void shouldBeAbleToCloseOverRecordsForMultipleTopics() { 32 | 33 | final SendKeyValuesTransactional sendRequest = SendKeyValuesTransactional 34 | .inTransaction("test-topic", Collections.singletonList(new KeyValue<>("k", "v"))) 35 | .inTransaction("test-topic-2", Collections.singletonList(new KeyValue<>("a", "b"))) 36 | .useDefaults(); 37 | 38 | assertThat(sendRequest.getRecordsPerTopic().containsKey("test-topic-2")).isTrue(); 39 | assertThat(sendRequest.getRecordsPerTopic().get("test-topic-2").contains(new KeyValue<>("a", "b"))).isTrue(); 40 | } 41 | 42 | @Test 43 | @DisplayName("should use defaults if not overridden") 44 | void shouldUseDefaultsIfNotOverridden() { 45 | 46 | final SendKeyValuesTransactional sendRequest = SendKeyValuesTransactional 47 | .inTransaction("test-topic", Collections.singletonList(new KeyValue<>("k", "v"))) 48 | .useDefaults(); 49 | final Properties props = sendRequest.getProducerProps(); 50 | 51 | assertThat(props.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)).isEqualTo(StringSerializer.class); 52 | assertThat(props.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)).isEqualTo(StringSerializer.class); 53 | assertThat(sendRequest.shouldFailTransaction()).isFalse(); 54 | } 55 | 56 | @Test 57 | @DisplayName("should preserve fail transaction setting if overridden") 58 | void shouldPreserveFailTransactionSettingIfOverridden() { 59 | 60 | final SendKeyValuesTransactional sendRequest = SendKeyValuesTransactional 61 | .inTransaction("test-topic", Collections.singletonList(new KeyValue<>("k", "v"))) 62 | .failTransaction() 63 | .build(); 64 | 65 | assertThat(sendRequest.shouldFailTransaction()).isTrue(); 66 | } 67 | 68 | @Test 69 | @DisplayName("with should override the default setting of the given parameter with the given value") 70 | void withShouldOverrideDefaultSetting() { 71 | 72 | final SendKeyValuesTransactional sendRequest = SendKeyValuesTransactional 73 | .inTransaction("test-topic", Collections.singletonList(new KeyValue<>("a", 1))) 74 | .with(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class) 75 | .build(); 76 | final Properties props = sendRequest.getProducerProps(); 77 | 78 | assertThat(props.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)).isEqualTo(IntegerSerializer.class); 79 | } 80 | 81 | @Test 82 | @DisplayName("withAll should override the default settings of the given parameters with the resp. values") 83 | void withAllShouldOverrideDefaultSettings() { 84 | 85 | final Properties overrides = new Properties(); 86 | overrides.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, LongSerializer.class); 87 | overrides.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class); 88 | 89 | final SendKeyValuesTransactional sendRequest = SendKeyValuesTransactional 90 | .inTransaction("test-topic", Collections.singletonList(new KeyValue<>(1L, 2))) 91 | .withAll(overrides) 92 | .build(); 93 | final Properties props = sendRequest.getProducerProps(); 94 | 95 | assertThat(props.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)).isEqualTo(LongSerializer.class); 96 | assertThat(props.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)).isEqualTo(IntegerSerializer.class); 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/SendValuesTest.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import org.apache.kafka.clients.producer.ProducerConfig; 4 | import org.apache.kafka.common.serialization.IntegerSerializer; 5 | import org.apache.kafka.common.serialization.StringSerializer; 6 | import org.junit.jupiter.api.DisplayName; 7 | import org.junit.jupiter.api.Test; 8 | 9 | import java.util.Properties; 10 | 11 | import static org.assertj.core.api.Assertions.assertThat; 12 | 13 | class SendValuesTest { 14 | 15 | @Test 16 | @DisplayName("should preserve constructor arguments") 17 | void shouldPreserveConstructorArguments() { 18 | 19 | final SendValues sendRequest = SendValues.to("test-topic", "a", "b").useDefaults(); 20 | 21 | assertThat(sendRequest.getTopic()).isEqualTo("test-topic"); 22 | assertThat(sendRequest.getValues().size()).isEqualTo(2); 23 | assertThat(sendRequest.getValues().contains("a")).isTrue(); 24 | assertThat(sendRequest.getValues().contains("b")).isTrue(); 25 | } 26 | 27 | @Test 28 | @DisplayName("should use defaults if not overridden") 29 | void shouldUseDefaultsIfNotOverridden() { 30 | 31 | final SendValues sendRequest = SendValues.to("test-topic", "a", "b").useDefaults(); 32 | final Properties props = sendRequest.getProducerProps(); 33 | 34 | assertThat(props.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)).isEqualTo(StringSerializer.class); 35 | assertThat(props.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)).isEqualTo(StringSerializer.class); 36 | } 37 | 38 | @Test 39 | @DisplayName("with should override the default setting of the given parameter with the given value") 40 | void withShouldOverrideDefaultSetting() { 41 | 42 | final SendValues sendRequest = SendValues.to("test-topic", 1, 2, 3) 43 | .with(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class) 44 | .build(); 45 | final Properties props = sendRequest.getProducerProps(); 46 | 47 | assertThat(props.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)).isEqualTo(IntegerSerializer.class); 48 | } 49 | 50 | @Test 51 | @DisplayName("withAll should override the default settings of the given parameters with the resp. values") 52 | void withAllShouldOverrideDefaultSettings() { 53 | 54 | final Properties overrides = new Properties(); 55 | overrides.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class); 56 | 57 | final SendValues sendRequest = SendValues.to("test-topic", 1, 2, 3) 58 | .withAll(overrides) 59 | .build(); 60 | final Properties props = sendRequest.getProducerProps(); 61 | 62 | assertThat(props.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)).isEqualTo(IntegerSerializer.class); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/SendValuesTransactionalTest.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import org.apache.kafka.clients.producer.ProducerConfig; 4 | import org.apache.kafka.common.serialization.IntegerSerializer; 5 | import org.apache.kafka.common.serialization.StringSerializer; 6 | import org.junit.jupiter.api.DisplayName; 7 | import org.junit.jupiter.api.Test; 8 | 9 | import java.util.Collections; 10 | import java.util.Properties; 11 | 12 | import static org.assertj.core.api.Assertions.assertThat; 13 | 14 | class SendValuesTransactionalTest { 15 | 16 | @Test 17 | @DisplayName("should preserve constructor arguments") 18 | void shouldPreserveConstructorArguments() { 19 | 20 | final SendValuesTransactional sendRequest = SendValuesTransactional 21 | .inTransaction("test-topic", Collections.singletonList("a")) 22 | .useDefaults(); 23 | 24 | assertThat(sendRequest.getValuesPerTopic().containsKey("test-topic")).isTrue(); 25 | assertThat(sendRequest.getValuesPerTopic().get("test-topic").contains("a")).isTrue(); 26 | } 27 | 28 | @Test 29 | @DisplayName("should be able to close over records for multiple topics") 30 | void shouldBeAbleToCloseOverRecordsForMultipleTopics() { 31 | 32 | final SendValuesTransactional sendRequest = SendValuesTransactional 33 | .inTransaction("test-topic", Collections.singletonList("a")) 34 | .inTransaction("test-topic-2", Collections.singletonList("b")) 35 | .useDefaults(); 36 | 37 | assertThat(sendRequest.getValuesPerTopic().containsKey("test-topic-2")).isTrue(); 38 | assertThat(sendRequest.getValuesPerTopic().get("test-topic-2").contains("b")).isTrue(); 39 | } 40 | 41 | @Test 42 | @DisplayName("should use defaults if not overridden") 43 | void shouldUseDefaultsIfNotOverridden() { 44 | 45 | final SendValuesTransactional sendRequest = SendValuesTransactional 46 | .inTransaction("test-topic", Collections.singletonList("a")) 47 | .useDefaults(); 48 | final Properties props = sendRequest.getProducerProps(); 49 | 50 | assertThat(props.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)).isEqualTo(StringSerializer.class); 51 | assertThat(props.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)).isEqualTo(StringSerializer.class); 52 | assertThat(sendRequest.shouldFailTransaction()).isFalse(); 53 | } 54 | 55 | @Test 56 | @DisplayName("should preserve fail transaction setting if overridden") 57 | void shouldPreserveFailTransactionSettingIfOverridden() { 58 | 59 | final SendValuesTransactional sendRequest = SendValuesTransactional 60 | .inTransaction("test-topic", Collections.singletonList("a")) 61 | .failTransaction() 62 | .build(); 63 | 64 | assertThat(sendRequest.shouldFailTransaction()).isTrue(); 65 | } 66 | 67 | @Test 68 | @DisplayName("with should override default setting of given parameter with the given value") 69 | void withShouldOverrideDefaultSetting() { 70 | 71 | final SendValuesTransactional sendRequest = SendValuesTransactional 72 | .inTransaction("test-topic", Collections.singletonList(1)) 73 | .with(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class) 74 | .build(); 75 | final Properties props = sendRequest.getProducerProps(); 76 | 77 | assertThat(props.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)).isEqualTo(IntegerSerializer.class); 78 | } 79 | 80 | @Test 81 | @DisplayName("withAll should override default settings of given parameters with the resp. values") 82 | void withAllShouldOverrideDefaultSettings() { 83 | 84 | final Properties overrides = new Properties(); 85 | overrides.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class); 86 | 87 | final SendValuesTransactional sendRequest = SendValuesTransactional 88 | .inTransaction("test-topic", Collections.singletonList(1)) 89 | .withAll(overrides) 90 | .build(); 91 | final Properties props = sendRequest.getProducerProps(); 92 | 93 | assertThat(props.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)).isEqualTo(IntegerSerializer.class); 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/SingleBrokerTest.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import kafka.server.KafkaConfig$; 4 | import lombok.extern.slf4j.Slf4j; 5 | import org.junit.jupiter.api.AfterEach; 6 | import org.junit.jupiter.api.BeforeEach; 7 | import org.junit.jupiter.api.DisplayName; 8 | import org.junit.jupiter.api.Test; 9 | 10 | import static net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith; 11 | import static net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.newClusterConfig; 12 | import static net.mguenther.kafka.junit.EmbeddedKafkaConfig.brokers; 13 | import static org.assertj.core.api.Assertions.assertThat; 14 | 15 | @Slf4j 16 | class SingleBrokerTest { 17 | 18 | private EmbeddedKafkaCluster kafka; 19 | 20 | @BeforeEach 21 | void prepareEnvironment() { 22 | kafka = provisionWith(newClusterConfig() 23 | .configure(brokers() 24 | .withNumberOfBrokers(1) 25 | .with(KafkaConfig$.MODULE$.ListenersProp(), "PLAINTEXT://localhost:9093"))); 26 | kafka.start(); 27 | } 28 | 29 | @AfterEach 30 | void tearDownEnvironment() { 31 | if (kafka != null) kafka.stop(); 32 | } 33 | 34 | @Test 35 | @DisplayName("should be able to override listener and switch to another local port") 36 | void shouldBeAbleToOverrideListenerAndSwitchToAnotherLocalPort() { 37 | assertThat(kafka.getBrokerList()).contains("localhost:9093"); 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/TopicConfigTest.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import org.junit.jupiter.api.DisplayName; 4 | import org.junit.jupiter.api.Test; 5 | 6 | import java.util.Properties; 7 | 8 | import static org.assertj.core.api.Assertions.assertThat; 9 | 10 | class TopicConfigTest { 11 | 12 | @Test 13 | @DisplayName("should preserve constructor arguments") 14 | void shouldPreserveConstructorArguments() { 15 | 16 | final TopicConfig topicConfig = TopicConfig.withName("test").useDefaults(); 17 | 18 | assertThat(topicConfig.getTopic()).isEqualTo("test"); 19 | } 20 | 21 | @Test 22 | @DisplayName("should use defaults if not overridden") 23 | void shouldUseDefaultsIfNotOverridden() { 24 | 25 | final TopicConfig topicConfig = TopicConfig.withName("test").useDefaults(); 26 | 27 | assertThat(topicConfig.getNumberOfPartitions()).isEqualTo(1); 28 | assertThat(topicConfig.getNumberOfReplicas()).isEqualTo(1); 29 | assertThat(topicConfig.getProperties().getProperty("cleanup.policy")).isEqualTo("delete"); 30 | assertThat(topicConfig.getProperties().getProperty("delete.retention.ms")).isEqualTo("86400000"); 31 | assertThat(topicConfig.getProperties().getProperty("min.insync.replicas")).isEqualTo("1"); 32 | } 33 | 34 | @Test 35 | @DisplayName("withNumberOfReplicas should override its default setting") 36 | void withNumberOfReplicasShouldOverrideDefaultSetting() { 37 | 38 | final TopicConfig topicConfig = TopicConfig.withName("test") 39 | .withNumberOfReplicas(99) 40 | .build(); 41 | 42 | assertThat(topicConfig.getNumberOfReplicas()).isEqualTo(99); 43 | } 44 | 45 | @Test 46 | @DisplayName("withNumberOfPartitions should override its default setting") 47 | void withNumberOfPartitionsShouldOverrideDefaultSetting() { 48 | 49 | final TopicConfig topicConfig = TopicConfig.withName("test") 50 | .withNumberOfPartitions(99) 51 | .build(); 52 | 53 | assertThat(topicConfig.getNumberOfPartitions()).isEqualTo(99); 54 | } 55 | 56 | @Test 57 | @DisplayName("with should override the default setting of the given parameter with the given value") 58 | void withShouldOverrideDefaultSetting() { 59 | 60 | final TopicConfig topicConfig = TopicConfig.withName("test") 61 | .with("min.insync.replicas", "2") 62 | .build(); 63 | 64 | assertThat(topicConfig.getProperties().getProperty("min.insync.replicas")).isEqualTo("2"); 65 | } 66 | 67 | @Test 68 | @DisplayName("withAll should override the default settings of the given parameters with the resp. values") 69 | void withAllShouldOverrideDefaulSettings() { 70 | 71 | final Properties overrides = new Properties(); 72 | overrides.put("min.insync.replicas", "2"); 73 | overrides.put("delete.retention.ms", "1000"); 74 | 75 | final TopicConfig topicConfig = TopicConfig.withName("test") 76 | .withAll(overrides) 77 | .build(); 78 | 79 | assertThat(topicConfig.getProperties().getProperty("min.insync.replicas")).isEqualTo("2"); 80 | assertThat(topicConfig.getProperties().getProperty("delete.retention.ms")).isEqualTo("1000"); 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/TopicManagerTest.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit; 2 | 3 | import lombok.extern.slf4j.Slf4j; 4 | import org.junit.jupiter.api.AfterEach; 5 | import org.junit.jupiter.api.Assertions; 6 | import org.junit.jupiter.api.BeforeEach; 7 | import org.junit.jupiter.api.DisplayName; 8 | import org.junit.jupiter.api.Test; 9 | 10 | import java.util.Map; 11 | import java.util.Properties; 12 | import java.util.UUID; 13 | 14 | import static net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith; 15 | import static net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.defaultClusterConfig; 16 | import static net.mguenther.kafka.junit.TopicConfig.withName; 17 | import static net.mguenther.kafka.junit.Wait.delay; 18 | import static org.assertj.core.api.Assertions.assertThat; 19 | 20 | @Slf4j 21 | class TopicManagerTest { 22 | 23 | private EmbeddedKafkaCluster kafka; 24 | 25 | @BeforeEach 26 | void prepareEnvironment() { 27 | kafka = provisionWith(defaultClusterConfig()); 28 | kafka.start(); 29 | } 30 | 31 | @AfterEach 32 | void tearDownEnvironment() { 33 | if (kafka != null) kafka.stop(); 34 | } 35 | 36 | @Test 37 | @DisplayName("should be able to create topics and mark them for deletion") 38 | void shouldBeAbleToCreateTopicsAndMarkThemForDeletion() { 39 | 40 | kafka.createTopic(withName("test-topic")); 41 | 42 | assertThat(kafka.exists("test-topic")).isTrue(); 43 | 44 | // the topic will not be deleted immediately, but "marked for deletion" 45 | // hence a check on exists would return "false" directly after deleting 46 | // the topic 47 | kafka.deleteTopic("test-topic"); 48 | } 49 | 50 | @Test 51 | @DisplayName("fetchLeaderAndIsr should retrieve the in-sync replica set") 52 | void fetchLeaderAndIsrShouldRetrieveTheIsr() throws Exception { 53 | 54 | kafka.createTopic(withName("test-topic") 55 | .withNumberOfPartitions(5) 56 | .withNumberOfReplicas(1)); 57 | 58 | // it takes a couple of seconds until topic-partition assignments are there 59 | delay(5); 60 | 61 | Map isr = kafka.fetchLeaderAndIsr("test-topic"); 62 | 63 | assertThat(isr.size()).isEqualTo(5); 64 | assertThat(isr.values().stream().allMatch(lai -> lai.getLeader() == 1)).isTrue(); 65 | } 66 | 67 | @Test 68 | @DisplayName("fetchTopicConfig should retrieve the proper config") 69 | void fetchTopicConfigShouldRetrieveTheProperConfig() throws Exception { 70 | 71 | kafka.createTopic(withName("test-topic") 72 | .with("min.insync.replicas", "1")); 73 | 74 | delay(3); 75 | 76 | Properties topicConfig = kafka.fetchTopicConfig("test-topic"); 77 | 78 | assertThat(topicConfig.getProperty("min.insync.replicas")).isEqualTo("1"); 79 | } 80 | 81 | @Test 82 | @DisplayName("fetchTopicConfig should throw a RuntimeException if the topic does not exist") 83 | void fetchTopicConfigShouldThrowRuntimeExceptionIfTopicDoesNotExist() { 84 | Assertions.assertThrows(RuntimeException.class, () -> kafka.fetchTopicConfig(UUID.randomUUID().toString())); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/connector/InstrumentingConfig.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit.connector; 2 | 3 | import lombok.Getter; 4 | import lombok.ToString; 5 | import org.apache.commons.lang3.StringUtils; 6 | import org.apache.kafka.common.config.AbstractConfig; 7 | import org.apache.kafka.common.config.ConfigDef; 8 | 9 | import java.util.Map; 10 | 11 | import static java.util.Collections.emptyList; 12 | 13 | @ToString 14 | public class InstrumentingConfig extends AbstractConfig { 15 | 16 | @Getter 17 | private final String topic; 18 | 19 | @Getter 20 | private final String key; 21 | 22 | public InstrumentingConfig(final ConfigDef configDef, final Map originals) { 23 | super(configDef, originals, true); 24 | 25 | topic = getString("topic"); 26 | key = getString("key"); 27 | } 28 | 29 | public static ConfigDef config() { 30 | return new ConfigDef() 31 | .define(toKey("topic", ConfigDef.Type.STRING)) 32 | .define(toKey("key", ConfigDef.Type.STRING)); 33 | } 34 | 35 | private static ConfigDef.ConfigKey toKey(final String name, final ConfigDef.Type type) { 36 | return new ConfigDef.ConfigKey(name, type, null, null, ConfigDef.Importance.HIGH, StringUtils.EMPTY, null, -1, ConfigDef.Width.NONE, null, emptyList(), null, false); 37 | } 38 | } -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/connector/InstrumentingConfigBuilder.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit.connector; 2 | 3 | import org.apache.kafka.connect.runtime.ConnectorConfig; 4 | 5 | import java.util.Properties; 6 | import java.util.UUID; 7 | 8 | public class InstrumentingConfigBuilder { 9 | 10 | private String topic = String.format("topic-%s", UUID.randomUUID().toString()); 11 | 12 | private String key = String.format("key-%s", UUID.randomUUID().toString()); 13 | 14 | private final Properties connectorProps = new Properties(); 15 | 16 | public InstrumentingConfigBuilder withTopic(final String topic) { 17 | this.topic = topic; 18 | return this; 19 | } 20 | 21 | public InstrumentingConfigBuilder withKey(final String key) { 22 | this.key = key; 23 | return this; 24 | } 25 | 26 | public InstrumentingConfigBuilder with(final String propertyName, final T value) { 27 | connectorProps.put(propertyName, value); 28 | return this; 29 | } 30 | 31 | public InstrumentingConfigBuilder withAll(final Properties connectorProps) { 32 | this.connectorProps.putAll(connectorProps); 33 | return this; 34 | } 35 | 36 | private void ifNonExisting(final String propertyName, final T value) { 37 | if (connectorProps.get(propertyName) != null) return; 38 | connectorProps.put(propertyName, value); 39 | } 40 | 41 | public Properties build() { 42 | 43 | ifNonExisting(ConnectorConfig.NAME_CONFIG, "instrumenting-source-connector"); 44 | ifNonExisting(ConnectorConfig.CONNECTOR_CLASS_CONFIG, "InstrumentingSourceConnector"); 45 | ifNonExisting(ConnectorConfig.TASKS_MAX_CONFIG, "1"); 46 | ifNonExisting("topic", topic); 47 | ifNonExisting("key", key); 48 | 49 | final Properties copyOfConnectorProps = new Properties(); 50 | copyOfConnectorProps.putAll(connectorProps); 51 | 52 | return copyOfConnectorProps; 53 | } 54 | 55 | public static InstrumentingConfigBuilder create() { 56 | return new InstrumentingConfigBuilder(); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/connector/InstrumentingSourceConnector.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit.connector; 2 | 3 | import lombok.extern.slf4j.Slf4j; 4 | import org.apache.commons.lang3.StringUtils; 5 | import org.apache.kafka.common.config.ConfigDef; 6 | import org.apache.kafka.connect.connector.Task; 7 | import org.apache.kafka.connect.source.SourceConnector; 8 | 9 | import java.util.Collections; 10 | import java.util.List; 11 | import java.util.Map; 12 | 13 | @Slf4j 14 | public class InstrumentingSourceConnector extends SourceConnector { 15 | 16 | private InstrumentingConfig config; 17 | 18 | @Override 19 | public String version() { 20 | return StringUtils.EMPTY; 21 | } 22 | 23 | @Override 24 | public void start(final Map props) { 25 | config = new InstrumentingConfig(config(), props); 26 | log.info("Starting InstrumentingSourceConnector using configuration '{}'.", config); 27 | } 28 | 29 | @Override 30 | public Class taskClass() { 31 | return InstrumentingSourceTask.class; 32 | } 33 | 34 | @Override 35 | public List> taskConfigs(final int i) { 36 | return Collections.singletonList(config.originalsStrings()); 37 | } 38 | 39 | @Override 40 | public void stop() { 41 | log.info("Stopping InstrumentingSourceConnector."); 42 | } 43 | 44 | @Override 45 | public ConfigDef config() { 46 | return InstrumentingConfig.config(); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/test/java/net/mguenther/kafka/junit/connector/InstrumentingSourceTask.java: -------------------------------------------------------------------------------- 1 | package net.mguenther.kafka.junit.connector; 2 | 3 | import lombok.extern.slf4j.Slf4j; 4 | import org.apache.commons.lang3.StringUtils; 5 | import org.apache.kafka.connect.source.SourceRecord; 6 | import org.apache.kafka.connect.source.SourceTask; 7 | 8 | import java.util.Collections; 9 | import java.util.List; 10 | import java.util.Map; 11 | import java.util.UUID; 12 | import java.util.concurrent.atomic.AtomicInteger; 13 | 14 | import static net.mguenther.kafka.junit.connector.InstrumentingConfig.config; 15 | 16 | @Slf4j 17 | public class InstrumentingSourceTask extends SourceTask { 18 | 19 | private final AtomicInteger currentSourceOffset = new AtomicInteger(0); 20 | 21 | private InstrumentingConfig config; 22 | 23 | @Override 24 | public void start(final Map props) { 25 | config = new InstrumentingConfig(config(), props); 26 | log.info("Starting task InstrumentingSourceTask using configuration '{}'.", config); 27 | } 28 | 29 | @Override 30 | public List poll() throws InterruptedException { 31 | 32 | return Collections.singletonList(new SourceRecord( 33 | sourcePartition(), 34 | sourceOffset(), 35 | config.getTopic(), 36 | 0, 37 | null, 38 | config.getKey(), 39 | null, 40 | UUID.randomUUID().toString(), 41 | System.currentTimeMillis())); 42 | } 43 | 44 | private Map sourcePartition() { 45 | return Collections.singletonMap("source", "instrumenting-source-record-partition"); 46 | } 47 | 48 | private Map sourceOffset() { 49 | final Integer sourceOffset = currentSourceOffset.getAndIncrement(); 50 | return Collections.singletonMap("offset", String.valueOf(sourceOffset)); 51 | } 52 | 53 | @Override 54 | public void stop() { 55 | log.info("Stopping task InstrumentingSourceTask."); 56 | } 57 | 58 | @Override 59 | public String version() { 60 | return StringUtils.EMPTY; 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n 9 | 10 | log4j.logger.org.apache.zookeeper=WARN 11 | log4j.logger.org.apache.kafka=WARN 12 | log4j.logger.kafka=WARN 13 | log4j.logger.kafka.server.ReplicaManager=OFF 14 | log4j.logger.org.I0Itec.zkclient=WARN --------------------------------------------------------------------------------