├── .gitignore ├── .travis.yml ├── LICENSE ├── README.md ├── kafka ├── kafka-listener │ ├── README.md │ ├── pom.xml │ └── src │ │ ├── main │ │ └── java │ │ │ └── com │ │ │ └── ameliant │ │ │ └── tools │ │ │ └── kafka │ │ │ └── listener │ │ │ ├── KafkaMessageListenerContainer.java │ │ │ ├── MemoryOffsetStore.java │ │ │ └── OffsetStore.java │ │ └── test │ │ ├── java │ │ └── com │ │ │ └── ameliant │ │ │ └── tools │ │ │ └── kafka │ │ │ └── listener │ │ │ └── KafkaMessageListenerContainerTest.java │ │ └── resources │ │ └── log4j.properties ├── kafka-parent │ └── pom.xml ├── kafka-perf-tool │ ├── README.md │ ├── docs │ │ └── UsersGuide.md │ ├── pom.xml │ └── src │ │ ├── main │ │ ├── java │ │ │ └── com │ │ │ │ └── ameliant │ │ │ │ └── tools │ │ │ │ ├── kafka │ │ │ │ └── perftool │ │ │ │ │ ├── KafkaPerf.java │ │ │ │ │ ├── config │ │ │ │ │ ├── Configurable.java │ │ │ │ │ ├── ConfigurableWithParent.java │ │ │ │ │ ├── ConsumerDefinition.java │ │ │ │ │ ├── ConsumersDefinition.java │ │ │ │ │ ├── KeyAllocationStrategyDefinition.java │ │ │ │ │ ├── KeyAllocationType.java │ │ │ │ │ ├── PartitioningStrategy.java │ │ │ │ │ ├── ProducerDefinition.java │ │ │ │ │ ├── ProducersDefinition.java │ │ │ │ │ └── TestProfileDefinition.java │ │ │ │ │ └── drivers │ │ │ │ │ ├── ConsumerDriver.java │ │ │ │ │ ├── Driver.java │ │ │ │ │ ├── ProducerDriver.java │ │ │ │ │ ├── TestProfileRunner.java │ │ │ │ │ └── partitioning │ │ │ │ │ ├── KeyAllocationStrategy.java │ │ │ │ │ ├── RoundRobinPartitioner.java │ │ │ │ │ └── StickyPartitioner.java │ │ │ │ └── support │ │ │ │ ├── FileLoader.java │ │ │ │ └── PayloadDetector.java │ │ └── resources │ │ │ └── log4j.properties │ │ └── test │ │ ├── java │ │ └── com │ │ │ └── ameliant │ │ │ └── tools │ │ │ └── kafka │ │ │ └── perftool │ │ │ ├── KafkaPerfTest.java │ │ │ ├── config │ │ │ └── ConfigurableWithParentTest.java │ │ │ ├── coordination │ │ │ └── AwaitsStartup.java │ │ │ ├── drivers │ │ │ ├── ConsumerDriverTest.java │ │ │ ├── DistributionRun.java │ │ │ ├── DistributionValidator.java │ │ │ ├── ProducerDriverTest.java │ │ │ ├── ProducerPartitioningTest.java │ │ │ └── partitioning │ │ │ │ └── KeyAllocationStrategyTest.java │ │ │ └── samples │ │ │ └── SampleProfileValidationTest.java │ │ └── resources │ │ ├── log4j.properties │ │ ├── payloads │ │ └── lorem.txt │ │ └── test-profiles │ │ ├── consumer.json │ │ ├── multi-consumer.json │ │ ├── multi-producer.json │ │ ├── producer-allocation-strategy.json │ │ ├── producer-allocation-strategy.yml │ │ ├── producer-cascade.json │ │ ├── producer-consumer-cascade.json │ │ ├── producer-consumer-loopback.json │ │ ├── producer-consumer-loopback.yml │ │ ├── producer-message-location.yml │ │ ├── producer-round-robin.yml │ │ ├── producer-sticky.yml │ │ ├── producer.json │ │ └── producer.yml ├── kafka-test-dsl │ ├── README.md │ ├── pom.xml │ └── src │ │ ├── main │ │ └── java │ │ │ └── com │ │ │ └── ameliant │ │ │ └── tools │ │ │ └── kafka │ │ │ └── testdsl │ │ │ ├── BrokerBuilder.java │ │ │ ├── BrokerDefinition.java │ │ │ ├── EmbeddedKafkaBroker.java │ │ │ ├── TopicBuilder.java │ │ │ ├── TopicDefinition.java │ │ │ └── config │ │ │ ├── ConsumerConfigsBuilder.java │ │ │ └── ProducerConfigsBuilder.java │ │ └── test │ │ └── java │ │ └── com │ │ └── ameliant │ │ └── tools │ │ └── kafka │ │ └── testdsl │ │ ├── EmbeddedKafkaBrokerTest.java │ │ └── EmbeddedKafkaBrokerWithTopicsTest.java └── pom.xml ├── parent └── pom.xml ├── pom.xml ├── tools-support ├── pom.xml └── src │ └── main │ └── java │ └── com │ └── ameliant │ └── tools │ └── support │ ├── AvailablePortFinder.java │ └── DirectoryUtils.java └── zookeeper ├── pom.xml └── zookeeper-test-dsl ├── README.md ├── pom.xml └── src ├── main └── java │ └── com │ └── ameliant │ └── tools │ └── zookeeper │ └── testdsl │ └── EmbeddedZooKeeper.java └── test └── java └── com └── ameliant └── tools └── zookeeper └── testdsl └── EmbeddedZookeeperTest.java /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.gitignore.io/api/intellij,maven 2 | 3 | ### Intellij ### 4 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio 5 | 6 | *.iml 7 | 8 | ## Directory-based project format: 9 | .idea/ 10 | # if you remove the above rule, at least ignore the following: 11 | 12 | # User-specific stuff: 13 | # .idea/workspace.xml 14 | # .idea/tasks.xml 15 | # .idea/dictionaries 16 | 17 | # Sensitive or high-churn files: 18 | # .idea/dataSources.ids 19 | # .idea/dataSources.xml 20 | # .idea/sqlDataSources.xml 21 | # .idea/dynamic.xml 22 | # .idea/uiDesigner.xml 23 | 24 | # Gradle: 25 | # .idea/gradle.xml 26 | # .idea/libraries 27 | 28 | # Mongo Explorer plugin: 29 | # .idea/mongoSettings.xml 30 | 31 | ## File-based project format: 32 | *.ipr 33 | *.iws 34 | 35 | ## Plugin-specific files: 36 | 37 | # IntelliJ 38 | /out/ 39 | 40 | # mpeltonen/sbt-idea plugin 41 | .idea_modules/ 42 | 43 | # JIRA plugin 44 | atlassian-ide-plugin.xml 45 | 46 | # Crashlytics plugin (for Android Studio and IntelliJ) 47 | com_crashlytics_export_strings.xml 48 | crashlytics.properties 49 | crashlytics-build.properties 50 | 51 | 52 | ### Maven ### 53 | target/ 54 | pom.xml.tag 55 | pom.xml.releaseBackup 56 | pom.xml.versionsBackup 57 | pom.xml.next 58 | release.properties 59 | dependency-reduced-pom.xml 60 | buildNumber.properties 61 | .mvn/timing.properties 62 | 63 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: java 2 | jdk: 3 | - oraclejdk8 -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ameliant-tools 2 | ![Build Status](https://travis-ci.org/jkorab/ameliant-tools.svg) 3 | 4 | A set of tools to ease working with Zookeeper and Kafka. 5 | 6 | 1. [kafka-perf-tool] (kafka/kafka-perf-tool/README.md) - a load testing tool for Apache Kafka; formerly `kafka-perf-test` 7 | 1. [kafka-listener] (kafka/kafka-listener/README.md) - a listener abstraction for Kafka consumers that aims towards simplifying 8 | reliable once-one consumption; inspired by Spring's `DefaultMessageListenerContainer` 9 | 1. [kafka-test-dsl] (kafka/kafka-test-dsl/README.md) - a fluent DSL for embedding Kafka servers in JUnit tests 10 | 1. [zookeeper-test-dsl] (zookeeper/zookeeper/README.md) - a utility for embedding Zookeepers servers in JUnit tests 11 | 1. [tools-support] (tools-support) - support library for common activities, such as finding available ports 12 | -------------------------------------------------------------------------------- /kafka/kafka-listener/README.md: -------------------------------------------------------------------------------- 1 | An implementation of a simplified listener-based interface to Apache Kafka that deals with the heavy lifting required 2 | to emulate JMS once-only delivery of messages. 3 | 4 | This is a *Work In Progress*, and should not be used for any production code. 5 | 6 | This construct keeps tabs on its own cursor position through a configurable ``OffsetStore`` instance, compensating for 7 | the ``KafkaConsumer`` API's batch polling functionality by rewinding the cursor to the last known successfully 8 | processed read on startup. The only ``OffsetStore`` implementation at the moment is a ``MemoryOffsetStore``, which keeps 9 | state within a single VM only, and not between restarts, making it useful for testing only. 10 | 11 | An instance may be constructed via the following constructor: 12 | 13 | ```java 14 | 15 | public KafkaMessageListenerContainer(Properties kafkaConfig, 16 | OffsetStore offsetStore, 17 | String topic, 18 | BiConsumer messageListener); 19 | 20 | ``` 21 | 22 | A ``KafkaMessageListenerContainer`` is started through a call to ``init()``, and shut down through ``close()``. 23 | 24 | As Kafka has no concept of redelivery or dead-letter queues, ``KafkaMessageListenerContainer`` also has the ability 25 | to pass any exceptions thrown to an exception handler, allowing you to add dead-letter channel functionality: 26 | 27 | ```java 28 | 29 | // this signature will change 30 | public void setExceptionHandler(BiConsumer, Exception> exceptionHandler) { 31 | this.exceptionHandler = exceptionHandler; 32 | } 33 | 34 | ``` 35 | 36 | There is a fluent builder associated with this class that enables you to define the container inline. Sample usage: 37 | 38 | ```java 39 | 40 | final CountDownLatch latch = new CountDownLatch(1); // receive one message and shut down 41 | 42 | KafkaMessageListenerContainer.Builder builder = 43 | new KafkaMessageListenerContainer.Builder() 44 | .kafkaConfig(configs) 45 | .offsetStore(offsetStore) 46 | .topic(TOPIC) 47 | .messageListener((key, value) -> { 48 | log.info("Received message [{}, {}]", key, value); 49 | latch.countDown() 50 | }); 51 | 52 | try (KafkaMessageListenerContainer container = builder.build()) { 53 | container.init(); 54 | if (!latch.await(20, TimeUnit.SECONDS)) { 55 | fail("Timeout expired waiting on latch"); 56 | } 57 | } 58 | 59 | ``` 60 | -------------------------------------------------------------------------------- /kafka/kafka-listener/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | com.ameliant.tools 7 | kafka-parent 8 | 0.1.0-SNAPSHOT 9 | ../kafka-parent/pom.xml 10 | 11 | 4.0.0 12 | 13 | kafka-listener 14 | ${base-name} :: ${project.artifactId} 15 | 16 | 17 | 18 | com.ameliant.tools 19 | zookeeper-test-dsl 20 | 0.1.0-SNAPSHOT 21 | test 22 | 23 | 24 | com.ameliant.tools 25 | kafka-test-dsl 26 | 0.1.0-SNAPSHOT 27 | test 28 | 29 | 30 | com.ameliant.tools 31 | kafka-perf-tool 32 | 0.1.0-SNAPSHOT 33 | test 34 | 35 | 36 | commons-lang 37 | commons-lang 38 | 39 | 40 | org.jooq 41 | jool 42 | 43 | 44 | org.apache.kafka 45 | kafka-clients 46 | 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /kafka/kafka-listener/src/main/java/com/ameliant/tools/kafka/listener/KafkaMessageListenerContainer.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.listener; 2 | 3 | import org.apache.commons.lang.Validate; 4 | import org.apache.kafka.clients.consumer.*; 5 | import org.apache.kafka.common.TopicPartition; 6 | import static org.jooq.lambda.tuple.Tuple.tuple; 7 | import org.jooq.lambda.tuple.Tuple2; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | import java.util.Collection; 12 | import java.util.Collections; 13 | import java.util.Optional; 14 | import java.util.Properties; 15 | import java.util.concurrent.*; 16 | import java.util.concurrent.atomic.AtomicBoolean; 17 | import java.util.concurrent.atomic.AtomicLong; 18 | import java.util.function.BiConsumer; 19 | 20 | /** 21 | * Container class to simplify Kafka message consumption, as well as providing only-once consumption. 22 | * @author jkorab 23 | */ 24 | public class KafkaMessageListenerContainer implements Runnable, AutoCloseable { 25 | 26 | public static final int DEFAULT_POLL_TIMEOUT = 100; 27 | private final Logger log = LoggerFactory.getLogger(this.getClass()); 28 | 29 | private final Properties kafkaConfig; 30 | private final String groupId; 31 | private final OffsetStore offsetStore; 32 | private final String topic; // TODO refactor to List, Pattern 33 | private final BiConsumer messageListener; 34 | private final AtomicLong recordsProcessed = new AtomicLong(); 35 | 36 | /** Acts as a hook for a dead-letter channel, handling exceptions thrown from the messageListener. */ 37 | private BiConsumer, Exception> exceptionHandler = 38 | (tuple, ex) -> log.error("Caught exception: {}", ex); // TODO refactor to interface 39 | 40 | /** Convenience class for fluent instantiation */ 41 | public static class Builder { 42 | private Properties kafkaConfig; 43 | private OffsetStore offsetStore; 44 | private String topic; 45 | private BiConsumer messageListener; 46 | private BiConsumer, Exception> exceptionHandler; 47 | 48 | public Builder kafkaConfig(Properties kafkaConfig) { 49 | this.kafkaConfig = kafkaConfig; 50 | return this; 51 | } 52 | 53 | public Builder offsetStore(OffsetStore offsetStore) { 54 | this.offsetStore = offsetStore; 55 | return this; 56 | } 57 | 58 | public Builder topic(String topic) { 59 | this.topic = topic; 60 | return this; 61 | } 62 | 63 | public Builder messageListener(BiConsumer messageListener) { 64 | this.messageListener = messageListener; 65 | return this; 66 | } 67 | 68 | public Builder exceptionHandler(BiConsumer, Exception> exceptionHandler) { 69 | this.exceptionHandler = exceptionHandler; 70 | return this; 71 | } 72 | 73 | public KafkaMessageListenerContainer build() { 74 | KafkaMessageListenerContainer container = 75 | new KafkaMessageListenerContainer<>(kafkaConfig, offsetStore, topic, messageListener); 76 | if (exceptionHandler != null) { 77 | container.setExceptionHandler(exceptionHandler); 78 | } 79 | return container; 80 | } 81 | } 82 | 83 | public KafkaMessageListenerContainer(Properties kafkaConfig, 84 | OffsetStore offsetStore, 85 | String topic, 86 | BiConsumer messageListener) { 87 | Validate.notNull(kafkaConfig, "kafkaConfig is null"); 88 | this.kafkaConfig = kafkaConfig; 89 | 90 | this.groupId = (String) kafkaConfig.get(ConsumerConfig.GROUP_ID_CONFIG); 91 | Validate.notEmpty(groupId, "groupId is empty"); 92 | 93 | Validate.notNull(offsetStore, "offsetStore is null"); 94 | this.offsetStore = offsetStore; 95 | 96 | Validate.notEmpty(topic, "topic is empty"); 97 | this.topic = topic; 98 | 99 | Validate.notNull(messageListener, "messageListener is null"); 100 | this.messageListener = messageListener; 101 | } 102 | 103 | public void setExceptionHandler(BiConsumer, Exception> exceptionHandler) { 104 | this.exceptionHandler = exceptionHandler; 105 | } 106 | 107 | private final ExecutorService executorService = Executors.newSingleThreadExecutor(); 108 | 109 | private final CountDownLatch workerShutdownLatch = new CountDownLatch(1); 110 | 111 | public void init() { 112 | executorService.submit(this); 113 | } 114 | 115 | private final CopyOnWriteArraySet assignedTopicPartitions = new CopyOnWriteArraySet(); 116 | 117 | @Override 118 | public void run() { 119 | try (Consumer consumer = createConsumer(kafkaConfig)) { 120 | final String groupId = kafkaConfig.getProperty(ConsumerConfig.GROUP_ID_CONFIG); 121 | log.info("Consuming as group {}", groupId); 122 | 123 | consumer.subscribe(Collections.singletonList(topic), new ConsumerRebalanceListener() { 124 | @Override 125 | public void onPartitionsRevoked(Collection topicPartitions) { 126 | // keep track of TopicPartitions that the consumer is currently assigned to, so that any messages 127 | // that have been polled can be skipped if the consumer is no longer assigned to that partition 128 | 129 | // There is still the slight possibility of out-of-order message processing if a message (m1) is in the 130 | // middle of being processed by the old consumer, and the new consumer picks up and processes the 131 | // next message in the partition (m2) - assuming idempotent consumption - before m1 processing has 132 | // completed. There is no way to get around this. 133 | assignedTopicPartitions.removeAll(topicPartitions); 134 | topicPartitions.forEach(topicPartition -> { 135 | log.debug("Partition revoked {}:{}", topicPartition.topic(), topicPartition.partition()); 136 | }); 137 | } 138 | 139 | @Override 140 | public void onPartitionsAssigned(Collection topicPartitions) { 141 | topicPartitions.stream() 142 | .forEach(topicPartition -> { 143 | log.debug("Partition assigned {}:{}", topicPartition.topic(), topicPartition.partition()); 144 | Optional lastConsumed = offsetStore.getLastConsumed(topicPartition, groupId); 145 | lastConsumed.ifPresent(cursorPosition -> { 146 | log.debug("Seeking {}:{}:{} for {}", 147 | topicPartition.topic(), topicPartition.partition(), cursorPosition, groupId); 148 | consumer.seek(topicPartition, cursorPosition); 149 | }); 150 | // otherwise will revert to the configured cursor positioning strategy 151 | assignedTopicPartitions.add(topicPartition); 152 | }); 153 | } 154 | }); // TODO handle a consumer subscribing to multiple topics 155 | 156 | pollingLoop(consumer, groupId); 157 | log.debug("Polling loop closed, shutting down consumer. {} records processed.", recordsProcessed.get()); 158 | workerShutdownLatch.countDown(); 159 | } 160 | } 161 | 162 | private KafkaConsumer createConsumer(Properties kafkaConfig) { 163 | log.info("Disabling auto-commit for {}", topic); 164 | kafkaConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, Boolean.FALSE.toString()); 165 | 166 | return new KafkaConsumer(kafkaConfig); 167 | } 168 | 169 | private final AtomicBoolean shuttingDown = new AtomicBoolean(false); 170 | 171 | private void pollingLoop(Consumer consumer, String groupId) { 172 | POLLING_LOOP: while (!shuttingDown.get()) { 173 | 174 | // committed position is the last offset that was saved securely 175 | ConsumerRecords consumerRecords = consumer.poll(DEFAULT_POLL_TIMEOUT); 176 | // TODO why does polling work in increments if commit has not been called? 177 | Iterable> records = consumerRecords.records(topic); 178 | 179 | long initialCount = recordsProcessed.get(); 180 | RECORD_PROCESSING: for( ConsumerRecord consumerRecord : records) { 181 | if (shuttingDown.get()) { 182 | break POLLING_LOOP; 183 | } 184 | String topic = consumerRecord.topic(); 185 | int partition = consumerRecord.partition(); 186 | TopicPartition topicPartition = new TopicPartition(topic, partition); 187 | 188 | long offset = consumerRecord.offset(); 189 | if (!assignedTopicPartitions.contains(topicPartition)) { 190 | // another consumer has been assigned to this partition since polling, skip this record 191 | log.debug("Discarding polled message as no longer assigned to partition {}:{}:{}", topic, partition, offset); 192 | continue RECORD_PROCESSING; 193 | } 194 | recordsProcessed.incrementAndGet(); 195 | K key = consumerRecord.key(); 196 | V value = consumerRecord.value(); 197 | try { 198 | // TODO introduce idempotent consumption here 199 | // you could end up in a situation where just after polling, the partitions have been reallocated 200 | // and another node picks up the message 201 | messageListener.accept(key, value); 202 | 203 | offsetStore.markConsumed(topicPartition, groupId, offset); 204 | // doesn't matter if the system crashes at this point, as the offsetStore will be used to seek 205 | // to offset at next startup 206 | 207 | // the consumer has an associated groupId, so it doesn't need to pass it to the commit operation 208 | consumer.commitSync(Collections.singletonMap(topicPartition, new OffsetAndMetadata(offset))); 209 | } catch (Exception ex) { 210 | try { 211 | exceptionHandler.accept(tuple(key, value), ex); 212 | } catch (Exception exh) { 213 | // TODO potentially endless loop; add maxRedeliveryAttempts, redeliveryDelay 214 | log.error("Exception caught from dead-letter handler: {}", ex); 215 | log.info("Rewinding offset before re-polling"); 216 | consumer.seek(topicPartition, offsetStore.getLastConsumed(topicPartition, groupId).get()); 217 | break RECORD_PROCESSING; // interrupt the consumption of the already polled messages 218 | } 219 | } 220 | } 221 | if (initialCount == recordsProcessed.get()) { 222 | log.debug("No records polled from topic:{}", topic); 223 | } 224 | } 225 | } 226 | 227 | @Override 228 | public void close() throws Exception { 229 | shuttingDown.set(true); 230 | if (!workerShutdownLatch.await(10, TimeUnit.SECONDS)) { 231 | log.warn("Timeout waiting to shut down worker thread"); 232 | } 233 | executorService.shutdownNow(); 234 | } 235 | 236 | public long getRecordsProcessed() { 237 | return recordsProcessed.get(); 238 | } 239 | } 240 | -------------------------------------------------------------------------------- /kafka/kafka-listener/src/main/java/com/ameliant/tools/kafka/listener/MemoryOffsetStore.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.listener; 2 | 3 | import org.apache.commons.lang.Validate; 4 | import org.apache.kafka.common.TopicPartition; 5 | import static org.jooq.lambda.tuple.Tuple.tuple; 6 | import org.jooq.lambda.tuple.Tuple2; 7 | 8 | import java.util.Optional; 9 | import java.util.concurrent.ConcurrentHashMap; 10 | 11 | /** 12 | * @author jkorab 13 | */ 14 | public class MemoryOffsetStore implements OffsetStore { 15 | 16 | private ConcurrentHashMap, Long> topicPartitionGroupOffsets = 17 | new ConcurrentHashMap<>(); 18 | 19 | @Override 20 | public void markConsumed(TopicPartition topicPartition, String groupId, long offset) { 21 | Validate.notNull(topicPartition, "topicPartition is null"); 22 | Validate.notEmpty(groupId, "groupId is empty"); 23 | Validate.isTrue(offset >= 0, "offset cannot be negative"); 24 | topicPartitionGroupOffsets.put(tuple(topicPartition, groupId), offset); 25 | } 26 | 27 | @Override 28 | public Optional getLastConsumed(TopicPartition topicPartition, String groupId) { 29 | Validate.notNull(topicPartition, "topicPartition is null"); 30 | Validate.notEmpty(groupId, "groupId is empty"); 31 | 32 | Long lastConsumed = topicPartitionGroupOffsets.get(tuple(topicPartition, groupId)); 33 | return (lastConsumed == null) ? Optional.empty() : Optional.of(lastConsumed); 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /kafka/kafka-listener/src/main/java/com/ameliant/tools/kafka/listener/OffsetStore.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.listener; 2 | 3 | import org.apache.kafka.common.TopicPartition; 4 | 5 | import java.util.Optional; 6 | 7 | /** 8 | * An offset store maintains the last successfully read position of topic partitions. 9 | * Use of an offset store addresses the problem of keeping track of previously consumed messages in a batch 10 | * polling scenario, where the cursor for the message group may need to be rewound on system restart. 11 | * 12 | * There is some overlap with an idempotent store as both keep track of previously seen messages. 13 | * The critical difference being that the latter does not contain enough information to allow a cursor rewind. 14 | * 15 | * TODO integrating the two. Processing messages polled, when one of the the partitions is rebalanced will cause issues. 16 | * TODO determine whether messages are polled from multiple partitions at the same time. 17 | * 18 | * It is used as a pessimistic store, keeping track of the last message to have been processed - not fetched. 19 | * The two constructs vary as n messages may have been fetched via a poll, but are only then sequentially 20 | * processed. The cursor for the consumer group, in the mean-time has already been moved forward by n places. 21 | * 22 | * On consumer startup, when partitions are allocated, the last successfully position for the consumer's group id 23 | * is fetched and the cursor rewound back to it. 24 | * 25 | * @author jkorab 26 | */ 27 | public interface OffsetStore { 28 | 29 | void markConsumed(TopicPartition topicPartition, String groupId, long offset); 30 | 31 | Optional getLastConsumed(TopicPartition topicPartition, String groupId); 32 | 33 | } 34 | -------------------------------------------------------------------------------- /kafka/kafka-listener/src/test/java/com/ameliant/tools/kafka/listener/KafkaMessageListenerContainerTest.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.listener; 2 | 3 | import com.ameliant.tools.kafka.perftool.config.*; 4 | import com.ameliant.tools.kafka.perftool.drivers.ProducerDriver; 5 | import com.ameliant.tools.kafka.testdsl.config.ConsumerConfigsBuilder; 6 | import com.ameliant.tools.kafka.testdsl.EmbeddedKafkaBroker; 7 | import com.ameliant.tools.kafka.testdsl.config.ProducerConfigsBuilder; 8 | import com.ameliant.tools.zookeeper.testdsl.EmbeddedZooKeeper; 9 | import org.apache.kafka.common.serialization.ByteArrayDeserializer; 10 | import org.apache.kafka.common.serialization.ByteArraySerializer; 11 | import org.junit.Rule; 12 | import org.junit.Test; 13 | 14 | import java.util.Map; 15 | import java.util.Properties; 16 | import java.util.concurrent.CountDownLatch; 17 | import java.util.concurrent.TimeUnit; 18 | import java.util.concurrent.atomic.AtomicInteger; 19 | 20 | import static org.junit.Assert.*; 21 | 22 | /** 23 | * @author jkorab 24 | */ 25 | public class KafkaMessageListenerContainerTest { 26 | private final String TOPIC = "foo"; 27 | 28 | @Rule 29 | public EmbeddedZooKeeper zooKeeper = new EmbeddedZooKeeper(); 30 | 31 | @Rule 32 | public EmbeddedKafkaBroker broker = EmbeddedKafkaBroker.builder() 33 | .zookeeperConnect("127.0.0.1:" + zooKeeper.getPort()) 34 | .numPartitions(3) 35 | .build(); 36 | 37 | @Test 38 | public void testReceive() throws Exception { 39 | int messagesToSend = 100; 40 | preloadTopic(TOPIC, messagesToSend); 41 | 42 | Properties configs = props(getConsumerConfigs()); 43 | final CountDownLatch latch = new CountDownLatch(messagesToSend); 44 | 45 | MemoryOffsetStore offsetStore = new MemoryOffsetStore(); 46 | 47 | KafkaMessageListenerContainer.Builder builder = 48 | new KafkaMessageListenerContainer.Builder() 49 | .kafkaConfig(configs) 50 | .offsetStore(offsetStore) 51 | .topic(TOPIC) 52 | .messageListener((key, value) -> latch.countDown()); 53 | 54 | try (KafkaMessageListenerContainer container = builder.build()) { 55 | container.init(); 56 | if (!latch.await(20, TimeUnit.SECONDS)) { 57 | fail("Timeout expired waiting on latch"); 58 | } 59 | 60 | assertEquals(messagesToSend, container.getRecordsProcessed()); 61 | } 62 | } 63 | 64 | @Test 65 | public void testReceive_errorHandling() throws Exception { 66 | int messagesToSend = 1000; 67 | preloadTopic(TOPIC, messagesToSend); 68 | 69 | Properties configs = props(getConsumerConfigs()); 70 | AtomicInteger messagesReceived = new AtomicInteger(); 71 | AtomicInteger exceptionsHandled = new AtomicInteger(); 72 | 73 | CountDownLatch latch = new CountDownLatch(messagesToSend); 74 | 75 | MemoryOffsetStore offsetStore = new MemoryOffsetStore(); 76 | 77 | KafkaMessageListenerContainer.Builder builder = new KafkaMessageListenerContainer.Builder() 78 | .kafkaConfig(configs) 79 | .offsetStore(offsetStore) 80 | .topic(TOPIC) 81 | .messageListener((key, value) -> { 82 | try { 83 | if (messagesReceived.incrementAndGet() == 500) { 84 | throw new IllegalArgumentException("Boom!"); 85 | } 86 | } finally { 87 | latch.countDown(); 88 | } 89 | }) 90 | .exceptionHandler((tuple, exception) -> { 91 | exceptionsHandled.incrementAndGet(); 92 | assertTrue(exception instanceof IllegalArgumentException); 93 | }); 94 | 95 | try (KafkaMessageListenerContainer container = builder.build()) { 96 | container.init(); 97 | if (!latch.await(10, TimeUnit.SECONDS)) { 98 | fail("Timeout expired waiting on latch"); 99 | } 100 | 101 | assertEquals(messagesToSend, messagesReceived.get()); 102 | assertEquals(1, exceptionsHandled.get()); 103 | } 104 | } 105 | 106 | @Test 107 | public void testReceive_shutdownResumption() throws Exception { 108 | int messagesToSend = 1000; 109 | preloadTopic(TOPIC, messagesToSend); 110 | 111 | Properties configs = props(getConsumerConfigs()); 112 | AtomicInteger messagesReceived = new AtomicInteger(); 113 | 114 | CountDownLatch messagesReceivedLatch = new CountDownLatch(messagesToSend); 115 | CountDownLatch shutdownLatch = new CountDownLatch(1); 116 | 117 | MemoryOffsetStore offsetStore = new MemoryOffsetStore(); // shared between container instances 118 | 119 | try (KafkaMessageListenerContainer container = new KafkaMessageListenerContainer.Builder() 120 | .kafkaConfig(configs) 121 | .offsetStore(offsetStore) 122 | .topic(TOPIC) 123 | .messageListener((key, value) -> { 124 | try { 125 | if (messagesReceived.incrementAndGet() == 500) { 126 | shutdownLatch.countDown(); 127 | } 128 | } finally { 129 | messagesReceivedLatch.countDown(); 130 | } 131 | }).build()) { 132 | container.init(); 133 | if (!shutdownLatch.await(10, TimeUnit.SECONDS)) { 134 | fail("Timeout expired waiting on shutdownLatch"); 135 | } 136 | } 137 | try (KafkaMessageListenerContainer container = new KafkaMessageListenerContainer.Builder() 138 | .kafkaConfig(configs) 139 | .offsetStore(offsetStore) 140 | .topic(TOPIC) 141 | .messageListener((key, value) -> messagesReceivedLatch.countDown()).build()) { 142 | container.init(); 143 | if (!messagesReceivedLatch.await(10, TimeUnit.SECONDS)) { 144 | fail("Timeout expired waiting on messagesReceivedLatch, " + container.getRecordsProcessed() 145 | + " records processed from " + TOPIC); 146 | } 147 | } 148 | } 149 | 150 | // TODO test dropping of messages during repartitioning 151 | 152 | private Properties props(Map map) { 153 | Properties properties = new Properties(); 154 | properties.putAll(map); 155 | return properties; 156 | } 157 | 158 | private Map getConsumerConfigs() { 159 | Map configs = new ConsumerConfigsBuilder() 160 | .groupId("bar") 161 | .bootstrapServers(broker.getConnectionString()) 162 | .autoOffsetReset(ConsumerConfigsBuilder.OffsetReset.earliest) 163 | .sessionTimeoutMs(10000) 164 | .keyDeserializer(ByteArrayDeserializer.class) 165 | .valueDeserializer(ByteArrayDeserializer.class) 166 | .build(); 167 | return configs; 168 | } 169 | 170 | private void preloadTopic(String topic, int messagesToSend) { 171 | Map producerConfigs = getProducerConfigs(); 172 | 173 | ProducerDefinition producerDefinition = new ProducerDefinition(); 174 | producerDefinition.setConfig(producerConfigs); 175 | producerDefinition.setTopic(topic); 176 | producerDefinition.setMessageSize(1024); 177 | producerDefinition.setMessagesToSend(messagesToSend); 178 | producerDefinition.setSendBlocking(true); 179 | producerDefinition.setPartitioningStrategy(PartitioningStrategy.roundRobin); 180 | 181 | ProducerDriver driver = new ProducerDriver(producerDefinition); 182 | driver.run(); 183 | } 184 | 185 | private Map getProducerConfigs() { 186 | return new ProducerConfigsBuilder() 187 | .bootstrapServers(broker.getConnectionString()) 188 | .requestRequiredAcks(ProducerConfigsBuilder.RequestRequiredAcks.ackFromLeader) 189 | .producerType(ProducerConfigsBuilder.ProducerType.sync) 190 | .keySerializer(ByteArraySerializer.class) 191 | .valueSerializer(ByteArraySerializer.class) 192 | .batchSize(0) 193 | .build(); 194 | } 195 | 196 | 197 | } -------------------------------------------------------------------------------- /kafka/kafka-listener/src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # The logging properties used 3 | # 4 | log4j.rootLogger=INFO, out 5 | 6 | log4j.logger.com.ameliant.tools.kafka=DEBUG 7 | log4j.logger.org.apache.kafka=WARN 8 | log4j.logger.kafka=WARN 9 | log4j.logger.org.apache.kafka.clients.consumer=INFO 10 | log4j.logger.org.apache.zookeeper=WARN 11 | 12 | # CONSOLE appender not used by default 13 | log4j.appender.out=org.apache.log4j.ConsoleAppender 14 | log4j.appender.out.layout=org.apache.log4j.PatternLayout 15 | #log4j.appender.out.layout.ConversionPattern=[%30.30t] %-30.30c{1} %-5p %m%n 16 | log4j.appender.out.layout.ConversionPattern=%d{HH:mm:ss.SSS} [%-12.12t] %-5p %-25.30c{1} - %m%n 17 | 18 | -------------------------------------------------------------------------------- /kafka/kafka-parent/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | com.ameliant.tools 7 | parent 8 | 0.1.0-SNAPSHOT 9 | ../../parent/pom.xml 10 | 11 | 4.0.0 12 | pom 13 | 14 | kafka-parent 15 | ${base-name} :: ${project.artifactId} 16 | 17 | 18 | 19 | 20 | org.apache.kafka 21 | kafka-clients 22 | ${kafka-version} 23 | 24 | 25 | org.apache.kafka 26 | kafka_2.10 27 | ${kafka-version} 28 | 29 | 30 | commons-io 31 | commons-io 32 | ${commons-io-version} 33 | 34 | 35 | 36 | commons-cli 37 | commons-cli 38 | ${commons-cli-version} 39 | 40 | 41 | com.fasterxml.jackson.core 42 | jackson-core 43 | ${jackson-version} 44 | 45 | 46 | com.fasterxml.jackson.core 47 | jackson-databind 48 | ${jackson-version} 49 | 50 | 51 | com.fasterxml.jackson.dataformat 52 | jackson-dataformat-yaml 53 | ${jackson-version} 54 | 55 | 56 | 57 | org.jooq 58 | jool 59 | ${jool-version} 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/README.md: -------------------------------------------------------------------------------- 1 | # kafka-perf-tool 2 | A command-line performance test tool for Apache Kafka 3 | 4 | This tool runs an arbitrary number of consumers and producers concurrently against a Kafka cluster. 5 | The tool currently runs against Kafka 0.9.0 only. 6 | 7 | To run: 8 | 9 | $ mvn clean install 10 | $ java -jar target/kafka-perf-tool.jar -c src/test/resources/test-profiles/producer.json 11 | 12 | The tool accepts a single argument, which is the location of a config file that details 13 | the behaviour of the producers and consumers (collectively referred to as clients). The config file can be either 14 | JSON or YAML; the right parser is auto-detected based on config file extension (YAML = `.yaml` or `.yml`). 15 | 16 | Since Kafka clients each need their own Kafka config (a bag of properties for which you need 17 | to look at [the manual](http://kafka.apache.org/documentation.html#configuration)), 18 | the config format supports overrides in `config` blocks at each level to keep the file nice and DRY - and to keep you sane :) 19 | Lower-lever config is merged with, and in the process overrides, config defined at a higher-level. 20 | 21 | See the [Users Guide] (docs/UsersGuide.md) for a description of using this tool, and its configuration. 22 | 23 | Example (pseudo-)JSON config (doesn't really do comments): 24 | 25 | { 26 | // see TestProfileDefinition for all properties 27 | "config" : { 28 | // Kafka config; applies to producers and consumers 29 | "bootstrap.servers" : "tcp://localhost:9092" 30 | }, 31 | "producers" : { 32 | "config" : { 33 | // Kafka config; applies to all producers 34 | "request.required.acks": "ackFromLeader", 35 | "producer.type": "sync", 36 | "key.serializer": "org.apache.kafka.common.serialization.ByteArraySerializer", 37 | "value.serializer": "org.apache.kafka.common.serialization.ByteArraySerializer", 38 | "batch.size": "0", 39 | "timeout.ms" : "10000" 40 | }, 41 | "instances" : [ 42 | { 43 | // see ProducerDefinition for all properties 44 | "config" : { 45 | // Kafka config; just for this producer 46 | "timeout.ms" : "5000" 47 | }, 48 | "topic" : "foo", 49 | "messagesToSend" : "1000", 50 | "sendBlocking": "false", 51 | "messageSize" : "100000" 52 | } 53 | ] 54 | }, 55 | "consumers" : { 56 | "config" : { 57 | // Kafka config; applies to all consumers 58 | "key.deserializer" : "org.apache.kafka.common.serialization.ByteArrayDeserializer", 59 | "value.deserializer" : "org.apache.kafka.common.serialization.ByteArrayDeserializer", 60 | "enable.auto.commit" : "true", 61 | "auto.commit.interval.ms" : "1000", 62 | "auto.offset.reset" : "earliest" 63 | }, 64 | "instances" : [ 65 | { 66 | // see ConsumerDefinition for all properties 67 | "config" : { 68 | // Kafka config; just for this consumer 69 | "timeout.ms" : "5000", 70 | "group.id" : "foo1" // every consumer must define a unique one of these 71 | }, 72 | "topic" : "foo" 73 | } 74 | ] 75 | } 76 | } 77 | 78 | Example YAML config: 79 | 80 | # see TestProfileDefinition for all properties 81 | config: 82 | # Kafka config; applies to producers and consumers 83 | bootstrap.servers: "tcp://localhost:9092" 84 | maxDuration: 30 85 | concurrent: true 86 | autogenerateTopic: true 87 | producers: 88 | config: 89 | # Kafka config; applies to all producers 90 | request.timeout.ms: "10000" 91 | key.serializer: "org.apache.kafka.common.serialization.ByteArraySerializer" 92 | value.serializer: "org.apache.kafka.common.serialization.ByteArraySerializer" 93 | batch.size: "0" 94 | acks: "1" 95 | max.block.ms: "10000" 96 | instances: 97 | # see ProducerDefinition for all properties 98 | - config: 99 | # Kafka config; just for this producer 100 | timeout.ms: 5000 101 | messagesToSend: 1000 102 | messageSize: 1000 103 | sendBlocking: false 104 | consumers: 105 | config: 106 | # Kafka config; applies to all consumers 107 | key.deserializer: "org.apache.kafka.common.serialization.ByteArrayDeserializer" 108 | value.deserializer: "org.apache.kafka.common.serialization.ByteArrayDeserializer" 109 | enable.auto.commit: "true" 110 | auto.commit.interval.ms: "1000" 111 | auto.offset.reset: "earliest" 112 | instances: 113 | # see ConsumerDefinition for all properties 114 | - config: 115 | # Kafka config; just for this consumer 116 | group.id: "bar" 117 | messagesToReceive: 10000 118 | pollTimeout: 1000 119 | reportReceivedEvery: 1000 120 | receiveDelay: 0 121 | 122 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/docs/UsersGuide.md: -------------------------------------------------------------------------------- 1 | # Kafka Performance Tool Users Guide 2 | 3 | This tool provides a standalone mechanism for measuring load and validate the behaviour of Apache Kafka clusters. 4 | Beyond simple performance measurement, it provides a way to understand Kafka through observing its behaviour from the 5 | outside - after all, observation trumps documentation :) 6 | 7 | This tool aims to be more complete than the scripts provided with the Kafka distribution, in allowing you to define 8 | entire test profiles comprising multiple consumers and producers communicating over a set of topics. This is useful 9 | when simulating the behaviours of multiple functional nodes communicating with each other over Kafka. 10 | 11 | ## Running 12 | 13 | To run this tool execute the JAR as follows (Java 1.8 minimum): 14 | 15 | $ java -jar kafka-perf-tool.jar -c myConfig.yml 16 | 17 | Flags are as follows: 18 | 19 | usage: kafka-perf-tool 20 | -c,--config config file that defines the test 21 | profile(s) to run 22 | -o,--output-format the format of the parsed config to echo 23 | to console 24 | 25 | ## Test Profiles 26 | 27 | To support this functionality each test is driven by a single config file which defines a test profile. This file may 28 | be defined in YAML or JSON, although the former is generally preferred as it allows for comments. A test profile 29 | contains within it both Kafka configuration (defined through sets of properties) as well as tool-specific properties 30 | that define the tool's behaviour. 31 | 32 | Sample configurations may be found in the [test-profiles folder] (../src/test/resources/test-profiles). 33 | 34 | The general structure of a test profile is as follows (pseudo-JSON): 35 | 36 | # TestProfileDefinition 37 | "config" : {} # Kafka configuration applying to all producers and consumers 38 | "producers" : { # defines any producers as well as their shared config 39 | "config" : {} # Kafka configuration applying to all producers 40 | "instances" : [ 41 | # list of producers 42 | { 43 | # ProducerDefinition 44 | "config" : {} # Kafka configuration for this producer 45 | } 46 | ] 47 | } 48 | "consumers" : { # defines any consumers as well as their global config 49 | "config" : {} # Kafka configuration applying to all shared 50 | "instances" : [ 51 | # list of consumers 52 | { 53 | # ConsumerDefinition 54 | "config" : {} # Kafka configuration for this consumer 55 | } 56 | ] 57 | } 58 | 59 | `config` blocks are simply maps that are passed directly to Kafka when starting up producers and consumers. 60 | They are not validated by the tool in any way. Configuration cascades down, where `config` defined at the most specific 61 | level (in a producer or a consumer) inherits and overrides the config of the layer above it, which does the same to the 62 | configuration at the top level. 63 | 64 | All other properties are bound to field in the tool's 65 | [configuration object graph] (../src/main/java/com/ameliant/tools/kafka/perftool/config); the tool will complain if these 66 | are not correctly defined. Most fields are optional, so if is perfectly reasonable to omit them if not needed; you can 67 | for example define consumers only, or producers only, or not define `config` at some level. 68 | 69 | The top-level element is a `TestProfileDefinition`. 70 | 71 | ### TestProfileDefinition 72 | 73 | * `config` - a map of Kafka properties that apply to all producers and consumers 74 | * `maxDuration` (`30`) - Maximum test duration in seconds. Applies to concurrent tests only. 75 | * `concurrent` (`true`) - Whether producers and consumers execute concurrently. If false, producers will executed before consumers. 76 | * `autogenerateTopic` (`false`) - Whether or not the test should use an auto-generated topic name. If true, consumers and producers will all 77 | use the auto-generated one in preference to any defined within their config during this run. 78 | * `producers` : `ProducersDefinition` 79 | * `consumers` : `ConsumersDefinition` 80 | 81 | ### ProducersDefinition 82 | 83 | * `config` - a map of Kafka properties that apply to all producers 84 | * `topic` - topic for all producers to send to (may be overridded by the producer) 85 | * `instances` : `List[ProducerDefinition]` 86 | 87 | ### ProducerDefinition 88 | 89 | * `config` - a map of Kafka properties for this producer 90 | * `topic` - topic to send to 91 | * `sendDelay` (`0`) - how long in ms to pause between sending messages 92 | * `messagesToSend` (`10000`) 93 | * `messageSize` (`1024`) - how big an autogenerated message should be. A message payload is generated once per producer 94 | so as to not mess with timings. 95 | * `messageLocation` - Location of a file to use as the message payload. If provided, driver will not generate its own payloads 96 | and `messageSize` will be ignored. This can be a relative or an absolute path. 97 | * `sendBlocking` (`false`) - whether to wait for acknowledgement from the client library before sending the next message. 98 | Kafka uses a client buffer, which it consumes from in the background in order to send to the broker. 99 | * `keyAllocationStrategy`:`KeyAllocationStrategyDefinition` (`{"type":"fair", "uniqueKeys":"1"}`) - test functionality 100 | that simulates key allocation within your code, allowing you to see how partitioning affects the distribution of sent messages. 101 | * `partitioningStrategy` 102 | * `none` (default) - Will use Kafka's built in strategy to hash the key into one of the available partitions. 103 | This strategy does not give a reliably fair distribution, as many keys potentially hash into the same bucket. 104 | * `roundRobin` - This built-in `Partitioner` will round-robin distribute the messages amongst the topic partitions 105 | without considering the value of key. 106 | * `sticky` - This built-in `Partitioner` acts in a similar way to [JMS Message Groups] (http://activemq.apache.org/message-groups.html), 107 | as it is a form of sticky load-balancing of messages. Each new key is assigned a topic partition on a 108 | round-robin basis; subsequent messages sent with the same key will be sent to that same partition. 109 | Note that this partitioning is on a per-JVM basis, 110 | multiple JVM/test instances running at the same time will likely not partition their keys in the same way. 111 | 112 | ### KeyAllocationStrategyDefinition 113 | 114 | * `type` - built in allocation strategy algorithm, in your own Kafka producers you would assign business-specific keys. This has two options: 115 | * `fair` - evenly assigns the keys to each message using a modulo function 116 | * `random` - randomly assigns one of the keys 117 | * `uniqueKeys` - how many keys this strategy should generate for allocation 118 | 119 | ### ConsumersDefinition 120 | 121 | * `config` - a map of Kafka properties that apply to all consumers 122 | * `topic` - topic for all consumers to receive from (may be overridded by the consumer) 123 | * `instances` : `List[ConsumerDefinition]` 124 | 125 | ### ConsumerDefinition 126 | 127 | * `config` - a map of Kafka properties for this consumer 128 | * `topic` - topic to receive from 129 | * `messagesToReceive` (`1000`) 130 | * `pollTimeout` (`1000`) - ms until a call to `poll()` returns. 131 | * `reportReceivedEvery` (`1000`) - how many messages should pass between reporting to the console that that many messages were consumed 132 | * `receiveDelay` (`0`) - how long to pause before processing the next message. Note that this may not mean polling again, 133 | as many messages are fetched with each call to `poll()`. 134 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | com.ameliant.tools 7 | kafka-parent 8 | 0.1.0-SNAPSHOT 9 | ../kafka-parent/pom.xml 10 | 11 | 4.0.0 12 | 13 | kafka-perf-tool 14 | ${base-name} :: ${project.artifactId} 15 | 16 | 17 | 18 | com.ameliant.tools 19 | tools-support 20 | 0.1.0-SNAPSHOT 21 | 22 | 23 | com.ameliant.tools 24 | zookeeper-test-dsl 25 | 0.1.0-SNAPSHOT 26 | test 27 | 28 | 29 | com.ameliant.tools 30 | kafka-test-dsl 31 | 0.1.0-SNAPSHOT 32 | test 33 | 34 | 35 | org.apache.kafka 36 | kafka-clients 37 | 38 | 39 | com.fasterxml.jackson.core 40 | jackson-core 41 | 42 | 43 | com.fasterxml.jackson.core 44 | jackson-databind 45 | 46 | 47 | com.fasterxml.jackson.dataformat 48 | jackson-dataformat-yaml 49 | 50 | 51 | commons-cli 52 | commons-cli 53 | 54 | 55 | org.apache.kafka 56 | kafka_2.10 57 | test 58 | 59 | 60 | org.hamcrest 61 | hamcrest-all 62 | test 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | ${artifactId} 71 | 72 | 73 | 74 | org.apache.maven.plugins 75 | maven-shade-plugin 76 | ${maven-shade-plugin-version} 77 | 78 | 79 | package 80 | 81 | shade 82 | 83 | 84 | 85 | 86 | 87 | com.ameliant.tools.kafka.perftool.KafkaPerf 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/kafka/perftool/KafkaPerf.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool; 2 | 3 | import com.ameliant.tools.kafka.perftool.config.TestProfileDefinition; 4 | import com.ameliant.tools.kafka.perftool.drivers.TestProfileRunner; 5 | import com.ameliant.tools.support.PayloadDetector; 6 | import com.fasterxml.jackson.core.JsonParseException; 7 | import com.fasterxml.jackson.core.JsonProcessingException; 8 | import com.fasterxml.jackson.databind.JsonMappingException; 9 | import com.fasterxml.jackson.databind.ObjectMapper; 10 | import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; 11 | import org.apache.commons.cli.*; 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | 15 | import java.io.File; 16 | import java.io.IOException; 17 | 18 | /** 19 | * Performance test main class. The tool allows you to run multiple consumers and producers 20 | * at the same time against one or more Kafka clusters. 21 | * 22 | * This class takes a JSON config file location as an argument, which it translates into an 23 | * object graph representing the test profile ({@link com.ameliant.tools.kafka.perftool.config.TestProfileDefinition}). 24 | * 25 | * The test profile is then executed by a {@link com.ameliant.tools.kafka.perftool.drivers.TestProfileRunner}, 26 | * which runs each of the producers and consumers in their own threads. 27 | * 28 | * @author jkorab 29 | */ 30 | public class KafkaPerf { 31 | 32 | private final static Logger LOG = LoggerFactory.getLogger(KafkaPerf.class); 33 | 34 | public static final String CONFIG = "config"; 35 | public static final String OUTPUT_FORMAT = "output-format"; 36 | 37 | public static void main(String[] args) { 38 | CommandLineParser parser = new DefaultParser(); 39 | 40 | Options options = new Options(); 41 | options.addOption(Option.builder("c") 42 | .longOpt(CONFIG) 43 | .desc("config file that defines the test profile(s) to run") 44 | .hasArg() 45 | .argName("FILE") 46 | .required(true) 47 | .build()); 48 | 49 | options.addOption(Option.builder("o") 50 | .longOpt(OUTPUT_FORMAT) 51 | .desc("the format of the parsed config to echo to console") 52 | .hasArg() 53 | .argName("yaml|json") 54 | .required(false) 55 | .build()); 56 | 57 | boolean displayHelp = false; 58 | String errorMessage = null; 59 | 60 | try { 61 | CommandLine commandLine = parser.parse(options, args); 62 | String config = commandLine.getOptionValue(CONFIG); 63 | try { 64 | ObjectMapper mapper = PayloadDetector.isYamlFile(config) ? new ObjectMapper(new YAMLFactory()) 65 | : new ObjectMapper(); 66 | TestProfileDefinition testProfileDefinition = mapper.readValue(new File(config), TestProfileDefinition.class); 67 | TestProfileRunner testProfileRunner = new TestProfileRunner(testProfileDefinition); 68 | 69 | echoTestProfileDefinition(testProfileDefinition, commandLine.getOptionValue(OUTPUT_FORMAT)); 70 | testProfileRunner.run(); // TODO implement test reporting 71 | } catch (JsonMappingException | JsonParseException e) { 72 | errorMessage = "Unable to parse " + config + ": " + e.getOriginalMessage(); 73 | displayHelp = true; 74 | } catch (IOException e) { 75 | errorMessage = "Unable to load " + config + ": " + e.getMessage(); 76 | displayHelp = true; 77 | } 78 | } catch (ParseException e) { 79 | displayHelp = true; 80 | } 81 | 82 | if (displayHelp) { 83 | HelpFormatter formatter = new HelpFormatter(); 84 | if (errorMessage != null) { 85 | System.out.println(errorMessage + System.lineSeparator()); 86 | } 87 | formatter.printHelp("kafka-perf-test", options); 88 | } 89 | 90 | } 91 | 92 | private static void echoTestProfileDefinition(TestProfileDefinition testProfileDefinition, String outputFormat) 93 | throws JsonProcessingException { 94 | if (outputFormat != null) { 95 | ObjectMapper mapper = PayloadDetector.isYaml(outputFormat) ? new ObjectMapper(new YAMLFactory()) 96 | : new ObjectMapper(); 97 | 98 | LOG.info(System.lineSeparator() + 99 | mapper.writerWithDefaultPrettyPrinter().writeValueAsString(testProfileDefinition)); 100 | } 101 | } 102 | 103 | } 104 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/kafka/perftool/config/Configurable.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.config; 2 | 3 | import com.fasterxml.jackson.annotation.JsonIgnore; 4 | 5 | import java.util.Collections; 6 | import java.util.HashMap; 7 | import java.util.Map; 8 | 9 | /** 10 | * @author jkorab 11 | */ 12 | public abstract class Configurable { 13 | 14 | private Map config = new HashMap<>(); 15 | 16 | private String topic; 17 | 18 | public String getTopic() { 19 | return topic; 20 | } 21 | 22 | public void setTopic(String topic) { 23 | this.topic = topic; 24 | } 25 | 26 | public Map getConfig() { 27 | return config; 28 | } 29 | 30 | public void setConfig(Map config) { 31 | this.config = config; 32 | } 33 | 34 | /** 35 | * Decorateable method for obtaining the provided config. 36 | * @return The config for this object. 37 | */ 38 | @JsonIgnore 39 | public Map getKafkaConfig() { 40 | return Collections.unmodifiableMap(config); 41 | } 42 | 43 | } 44 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/kafka/perftool/config/ConfigurableWithParent.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.config; 2 | 3 | import com.fasterxml.jackson.annotation.JsonBackReference; 4 | import com.fasterxml.jackson.annotation.JsonIgnore; 5 | import org.apache.commons.lang.Validate; 6 | 7 | import java.util.HashMap; 8 | import java.util.Map; 9 | 10 | /** 11 | * @author jkorab 12 | */ 13 | public abstract class ConfigurableWithParent extends Configurable { 14 | 15 | private Configurable parent; 16 | 17 | @JsonIgnore 18 | private Map _mergedConfig; // lazily initialised 19 | 20 | public Configurable getParent() { 21 | return parent; 22 | } 23 | 24 | @JsonBackReference 25 | public void setParent(Configurable parent) { 26 | this.parent = parent; 27 | } 28 | 29 | /** 30 | * Get the merged config of this object and its parent. This method caches the result, so any changes to this config will not show up. 31 | * @return The merged config of this object and its parent. 32 | */ 33 | @Override 34 | public Map getKafkaConfig() { 35 | Map config = getConfig(); 36 | if (parent == null) { 37 | return config; 38 | } 39 | 40 | if (_mergedConfig == null) { // lazy init 41 | _mergedConfig = merge(parent.getKafkaConfig(), config); 42 | } 43 | return _mergedConfig; 44 | } 45 | 46 | /** 47 | * Merges two maps, with entries in the child overriding those in the parent. 48 | * @param parentConfig The parent map. 49 | * @param childConfig The child map. 50 | * @return A merged map. 51 | */ 52 | private Map merge(Map parentConfig, Map childConfig) { 53 | Validate.notNull(parentConfig, "parent is null"); 54 | Validate.notNull(childConfig, "child is null"); 55 | 56 | Map merged = new HashMap<>(); 57 | merged.putAll(parentConfig); 58 | merged.putAll(childConfig); 59 | return merged; 60 | } 61 | 62 | @Override 63 | public String getTopic() { 64 | return ((parent == null) || (parent.getTopic() == null)) 65 | ? super.getTopic() : parent.getTopic(); 66 | } 67 | 68 | } 69 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/kafka/perftool/config/ConsumerDefinition.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.config; 2 | 3 | /** 4 | * @author jkorab 5 | */ 6 | public class ConsumerDefinition extends ConfigurableWithParent { 7 | 8 | private long messagesToReceive = 10000; 9 | private long pollTimeout = 1000; 10 | private int reportReceivedEvery = 1000; 11 | private int receiveDelay = 0; 12 | 13 | private long testRunTimeout = Long.MAX_VALUE; 14 | 15 | public long getMessagesToReceive() { 16 | return messagesToReceive; 17 | } 18 | 19 | public void setMessagesToReceive(long messagesToReceive) { 20 | this.messagesToReceive = messagesToReceive; 21 | } 22 | 23 | public long getPollTimeout() { 24 | return pollTimeout; 25 | } 26 | 27 | public void setPollTimeout(long pollTimeout) { 28 | this.pollTimeout = pollTimeout; 29 | } 30 | 31 | public long getTestRunTimeout() { 32 | return testRunTimeout; 33 | } 34 | 35 | public void setTestRunTimeout(long testRunTimeout) { 36 | this.testRunTimeout = testRunTimeout; 37 | } 38 | 39 | public int getReportReceivedEvery() { 40 | return reportReceivedEvery; 41 | } 42 | 43 | public void setReportReceivedEvery(int reportReceivedEvery) { 44 | this.reportReceivedEvery = reportReceivedEvery; 45 | } 46 | 47 | public int getReceiveDelay() { 48 | return receiveDelay; 49 | } 50 | 51 | public void setReceiveDelay(int receiveDelay) { 52 | this.receiveDelay = receiveDelay; 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/kafka/perftool/config/ConsumersDefinition.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.config; 2 | 3 | import com.fasterxml.jackson.annotation.JsonManagedReference; 4 | 5 | import java.util.ArrayList; 6 | import java.util.List; 7 | 8 | /** 9 | * @author jkorab 10 | */ 11 | public class ConsumersDefinition extends ConfigurableWithParent { 12 | 13 | @JsonManagedReference 14 | private List instances = new ArrayList<>(); 15 | 16 | public List getInstances() { 17 | return instances; 18 | } 19 | 20 | public void setInstances(List instances) { 21 | this.instances = instances; 22 | } 23 | 24 | } 25 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/kafka/perftool/config/KeyAllocationStrategyDefinition.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.config; 2 | 3 | /** 4 | * @author jkorab 5 | */ 6 | public class KeyAllocationStrategyDefinition { 7 | 8 | /** 9 | * How keys are allocated to each message. 10 | */ 11 | private KeyAllocationType type; 12 | 13 | /** 14 | * How many unique keys are used. 15 | */ 16 | private int uniqueKeys; 17 | 18 | // for Jackson 19 | public KeyAllocationStrategyDefinition() {} 20 | 21 | public KeyAllocationStrategyDefinition(KeyAllocationType type, int uniqueKeys) { 22 | this.type = type; 23 | this.uniqueKeys = uniqueKeys; 24 | } 25 | 26 | public KeyAllocationType getType() { 27 | return type; 28 | } 29 | 30 | public void setType(KeyAllocationType type) { 31 | this.type = type; 32 | } 33 | 34 | public int getUniqueKeys() { 35 | return uniqueKeys; 36 | } 37 | 38 | public void setUniqueKeys(int uniqueKeys) { 39 | this.uniqueKeys = uniqueKeys; 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/kafka/perftool/config/KeyAllocationType.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.config; 2 | 3 | /** 4 | * @author jkorab 5 | */ 6 | public enum KeyAllocationType { 7 | fair, random 8 | } 9 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/kafka/perftool/config/PartitioningStrategy.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.config; 2 | 3 | /** 4 | * @author jkorab 5 | */ 6 | public enum PartitioningStrategy { 7 | none, roundRobin, sticky; 8 | } 9 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/kafka/perftool/config/ProducerDefinition.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.config; 2 | 3 | /** 4 | * @author jkorab 5 | */ 6 | public class ProducerDefinition extends ConfigurableWithParent { 7 | 8 | private long sendDelay = 0; 9 | private long messagesToSend = 10000; 10 | private int messageSize = 1024; 11 | /** 12 | * Location of a file to use as the message payload. If provided, driver will not generate its own payloads 13 | * and {@link #messageSize} will be ignored. 14 | */ 15 | private String messageLocation; 16 | private boolean sendBlocking = false; 17 | private KeyAllocationStrategyDefinition keyAllocationStrategyDefinition = 18 | new KeyAllocationStrategyDefinition(KeyAllocationType.fair, 1); 19 | /** 20 | * Any value here other than {@see PartitioningStrategy.none} will override the value of 21 | * partitioner.class in the producer config. 22 | */ 23 | private PartitioningStrategy partitioningStrategy = PartitioningStrategy.none; 24 | 25 | @Override 26 | public String toString() { 27 | String mergedConfig = getKafkaConfig().entrySet().stream() 28 | .map(entry -> entry.getKey() + ":" + entry.getValue()) 29 | .reduce("", (joined, configEntry) -> 30 | (joined.equals("")) ? configEntry : joined + ", " + configEntry); 31 | 32 | return "ProducerDefinition{" + 33 | "topic='" + getTopic() + '\'' + 34 | ", sendDelay=" + sendDelay + 35 | ", messagesToSend=" + messagesToSend + 36 | ", messageSize=" + messageSize + 37 | ", messageLocation=" + messageLocation + 38 | ", sendBlocking=" + sendBlocking + 39 | ", partitioningStrategy=" + partitioningStrategy + 40 | ", mergedConfig={" + mergedConfig + "}" + 41 | '}'; 42 | } 43 | 44 | public long getSendDelay() { 45 | return sendDelay; 46 | } 47 | 48 | public void setSendDelay(long sendDelay) { 49 | this.sendDelay = sendDelay; 50 | } 51 | 52 | public boolean isSendBlocking() { 53 | return sendBlocking; 54 | } 55 | 56 | public void setSendBlocking(boolean sendBlocking) { 57 | this.sendBlocking = sendBlocking; 58 | } 59 | 60 | public long getMessagesToSend() { 61 | return messagesToSend; 62 | } 63 | 64 | public void setMessagesToSend(long messagesToSend) { 65 | this.messagesToSend = messagesToSend; 66 | } 67 | 68 | public int getMessageSize() { 69 | return messageSize; 70 | } 71 | 72 | public void setMessageSize(int messageSize) { 73 | this.messageSize = messageSize; 74 | } 75 | 76 | public KeyAllocationStrategyDefinition getKeyAllocationStrategy() { 77 | return keyAllocationStrategyDefinition; 78 | } 79 | 80 | public void setKeyAllocationStrategy(KeyAllocationStrategyDefinition keyAllocationStrategyDefinition) { 81 | this.keyAllocationStrategyDefinition = keyAllocationStrategyDefinition; 82 | } 83 | 84 | public PartitioningStrategy getPartitioningStrategy() { 85 | return partitioningStrategy; 86 | } 87 | 88 | public void setPartitioningStrategy(PartitioningStrategy partitioningStrategy) { 89 | this.partitioningStrategy = partitioningStrategy; 90 | } 91 | 92 | public String getMessageLocation() { 93 | return messageLocation; 94 | } 95 | 96 | public void setMessageLocation(String messageLocation) { 97 | this.messageLocation = messageLocation; 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/kafka/perftool/config/ProducersDefinition.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.config; 2 | 3 | import com.fasterxml.jackson.annotation.JsonManagedReference; 4 | 5 | import java.util.ArrayList; 6 | import java.util.List; 7 | 8 | /** 9 | * @author jkorab 10 | */ 11 | public class ProducersDefinition extends ConfigurableWithParent { 12 | 13 | @JsonManagedReference 14 | private List instances = new ArrayList<>(); 15 | 16 | public List getInstances() { 17 | return instances; 18 | } 19 | 20 | public void setInstances(List instances) { 21 | this.instances = instances; 22 | } 23 | 24 | } 25 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/kafka/perftool/config/TestProfileDefinition.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.config; 2 | 3 | import com.fasterxml.jackson.annotation.JsonIgnore; 4 | import com.fasterxml.jackson.annotation.JsonManagedReference; 5 | 6 | import java.time.LocalDateTime; 7 | import java.time.format.DateTimeFormatter; 8 | 9 | /** 10 | * @author jkorab 11 | */ 12 | public class TestProfileDefinition extends Configurable { 13 | 14 | /** 15 | * Maximum test duration in seconds. Applies to concurrent tests only. 16 | */ 17 | private int maxDuration = 30; 18 | /** 19 | * Whether producers and consumers execute concurrently. If false, producers will executed before consumers. 20 | */ 21 | private boolean concurrent = true; 22 | 23 | /** 24 | * Whether or not the test should use an auto-generated topic name. If true, consumers and producers will all 25 | * use the auto-generated one in preference to any defined within their config during this run. 26 | */ 27 | private boolean autogenerateTopic = false; 28 | 29 | @JsonManagedReference 30 | private ProducersDefinition producers = new ProducersDefinition(); 31 | 32 | @JsonManagedReference 33 | private ConsumersDefinition consumers = new ConsumersDefinition(); 34 | 35 | public boolean isConcurrent() { 36 | return concurrent; 37 | } 38 | 39 | public void setConcurrent(boolean concurrent) { 40 | this.concurrent = concurrent; 41 | } 42 | 43 | public ConsumersDefinition getConsumers() { 44 | return consumers; 45 | } 46 | 47 | public void setConsumers(ConsumersDefinition consumers) { 48 | this.consumers = consumers; 49 | } 50 | 51 | public int getMaxDuration() { 52 | return maxDuration; 53 | } 54 | 55 | public void setMaxDuration(int maxDuration) { 56 | this.maxDuration = maxDuration; 57 | } 58 | 59 | public ProducersDefinition getProducers() { 60 | return producers; 61 | } 62 | 63 | public void setProducers(ProducersDefinition producers) { 64 | this.producers = producers; 65 | } 66 | 67 | public boolean isAutogenerateTopic() { 68 | return autogenerateTopic; 69 | } 70 | 71 | public void setAutogenerateTopic(boolean autogenerateTopic) { 72 | this.autogenerateTopic = autogenerateTopic; 73 | } 74 | 75 | @JsonIgnore 76 | private String _autogeneratedTopic; // lazily initialised 77 | 78 | @Override 79 | public String getTopic() { 80 | if (autogenerateTopic) { 81 | if (_autogeneratedTopic == null) { 82 | DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyyMMdd_HHmmss"); 83 | _autogeneratedTopic = "kafka-perf-test_" + LocalDateTime.now().format(formatter); 84 | } 85 | return _autogeneratedTopic; 86 | } else { 87 | return super.getTopic(); 88 | } 89 | } 90 | 91 | } 92 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/kafka/perftool/drivers/ConsumerDriver.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.drivers; 2 | 3 | import com.ameliant.tools.kafka.perftool.config.ConsumerDefinition; 4 | import org.apache.commons.lang.Validate; 5 | import org.apache.commons.lang.time.StopWatch; 6 | import org.apache.kafka.clients.consumer.*; 7 | 8 | import java.util.Collections; 9 | import java.util.concurrent.CountDownLatch; 10 | 11 | /** 12 | * @author jkorab 13 | */ 14 | public class ConsumerDriver extends Driver { 15 | 16 | private final ConsumerDefinition consumerDefinition; 17 | private CountDownLatch completionLatch; 18 | private long recordsFetched = 0; 19 | private ConsumerRebalanceListener consumerRebalanceListener; 20 | 21 | public void setConsumerRebalanceListener(ConsumerRebalanceListener consumerRebalanceListener) { 22 | this.consumerRebalanceListener = consumerRebalanceListener; 23 | } 24 | 25 | public ConsumerDriver(ConsumerDefinition consumerDefinition) { 26 | Validate.notNull(consumerDefinition, "consumerDefinition is null"); 27 | this.consumerDefinition = consumerDefinition; 28 | } 29 | 30 | public ConsumerDriver(ConsumerDefinition consumerDefinition, CountDownLatch completionLatch) { 31 | this(consumerDefinition); 32 | this.completionLatch = completionLatch; 33 | } 34 | 35 | @Override 36 | public void drive() { 37 | // A Consumer is not thread-safe 38 | // {@see http://kafka.apache.org/090/javadoc/org/apache/kafka/clients/consumer/KafkaConsumer.html} 39 | // {@see http://kafka.apache.org/090/javadoc/org/apache/kafka/clients/consumer/KafkaConsumer.html#multithreaded} 40 | try (KafkaConsumer consumer = new KafkaConsumer<>(consumerDefinition.getKafkaConfig())) { 41 | 42 | String topic = consumerDefinition.getTopic(); 43 | log.info("Subscribing to {}", topic); 44 | if (consumerRebalanceListener == null) { 45 | consumer.subscribe(Collections.singletonList(topic)); 46 | } else { 47 | consumer.subscribe(Collections.singletonList(topic), consumerRebalanceListener); 48 | } 49 | 50 | long messagesToReceive = consumerDefinition.getMessagesToReceive(); 51 | log.info("Expecting {} messages", messagesToReceive); 52 | 53 | StopWatch stopWatch = new StopWatch(); 54 | stopWatch.start(); 55 | 56 | do { 57 | ConsumerRecords records = consumer.poll(consumerDefinition.getPollTimeout()); 58 | if (records == null) { 59 | throw new IllegalStateException("null ConsumerRecords polled"); 60 | } else { 61 | if (records.count() == 0) { 62 | try { 63 | log.info("No records fetched, pausing"); 64 | Thread.sleep(1000); 65 | } catch (InterruptedException e) { 66 | throw new RuntimeException(e); 67 | } 68 | } else { 69 | if (log.isTraceEnabled()) { 70 | log.trace("Fetched {} records", records.count()); 71 | } 72 | for (ConsumerRecord record : records) { 73 | recordsFetched += 1; 74 | applyReceiveDelay(); 75 | if (recordsFetched % consumerDefinition.getReportReceivedEvery() == 0) { 76 | log.info("Received {} messages", recordsFetched); 77 | } 78 | } 79 | } 80 | } 81 | 82 | if (isShutdownRequested()) { 83 | break; 84 | } 85 | stopWatch.split(); 86 | } while ((recordsFetched < messagesToReceive) 87 | && (stopWatch.getSplitTime() < consumerDefinition.getTestRunTimeout())); 88 | 89 | stopWatch.stop(); 90 | if (isShutdownRequested()) { 91 | log.info("Shutting down"); 92 | } else { 93 | long runTime = stopWatch.getTime(); 94 | log.info("Done. Consumer received {} msgs in {} ms", messagesToReceive, runTime); 95 | 96 | double averageThroughput = (1000d / runTime) * messagesToReceive; 97 | log.info("Average throughput: {} msg/s", averageThroughput); 98 | } 99 | 100 | } finally { 101 | log.debug("Consumer closed"); 102 | if (completionLatch != null) { 103 | completionLatch.countDown(); 104 | } 105 | } 106 | } 107 | 108 | private void applyReceiveDelay() { 109 | int receiveDelay = consumerDefinition.getReceiveDelay(); 110 | if (receiveDelay > 0) { 111 | try { 112 | Thread.sleep(receiveDelay); 113 | } catch (InterruptedException e) { 114 | throw new RuntimeException(e); 115 | } 116 | } 117 | } 118 | 119 | public long getMessagesReceived() { 120 | return recordsFetched; 121 | } 122 | 123 | public long getMessagesExpected() { 124 | return consumerDefinition.getMessagesToReceive(); 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/kafka/perftool/drivers/Driver.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.drivers; 2 | 3 | import org.slf4j.Logger; 4 | import org.slf4j.LoggerFactory; 5 | 6 | /** 7 | * @author jkorab 8 | */ 9 | public abstract class Driver implements Runnable { 10 | 11 | protected final Logger log = LoggerFactory.getLogger(this.getClass()); 12 | 13 | /** 14 | * Flag marking whether shutdown has been requested. 15 | */ 16 | private boolean shutdownRequested = false; 17 | 18 | public void requestShutdown() { 19 | shutdownRequested = true; 20 | } 21 | 22 | protected boolean isShutdownRequested() { 23 | return shutdownRequested; 24 | } 25 | 26 | @Override 27 | public void run() { 28 | try { 29 | drive(); 30 | } catch (Exception ex) { 31 | log.error("Caught exception: {}", ex); 32 | throw ex; 33 | } 34 | } 35 | 36 | protected abstract void drive(); 37 | } 38 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/kafka/perftool/drivers/ProducerDriver.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.drivers; 2 | 3 | import com.ameliant.tools.kafka.perftool.config.PartitioningStrategy; 4 | import com.ameliant.tools.kafka.perftool.config.ProducerDefinition; 5 | import com.ameliant.tools.kafka.perftool.drivers.partitioning.KeyAllocationStrategy; 6 | import com.ameliant.tools.kafka.perftool.drivers.partitioning.RoundRobinPartitioner; 7 | import com.ameliant.tools.kafka.perftool.drivers.partitioning.StickyPartitioner; 8 | import com.ameliant.tools.support.FileLoader; 9 | import org.apache.commons.lang.RandomStringUtils; 10 | import org.apache.commons.lang.Validate; 11 | import org.apache.commons.lang.time.StopWatch; 12 | import org.apache.kafka.clients.producer.KafkaProducer; 13 | import org.apache.kafka.clients.producer.ProducerConfig; 14 | import org.apache.kafka.clients.producer.ProducerRecord; 15 | import org.apache.kafka.clients.producer.RecordMetadata; 16 | 17 | import java.util.Map; 18 | import java.util.Optional; 19 | import java.util.concurrent.CountDownLatch; 20 | import java.util.concurrent.ExecutionException; 21 | import java.util.concurrent.Future; 22 | 23 | /** 24 | * @author jkorab 25 | */ 26 | public class ProducerDriver extends Driver { 27 | 28 | private final ProducerDefinition producerDefinition; 29 | private CountDownLatch completionLatch; 30 | 31 | public ProducerDriver(ProducerDefinition producerDefinition) { 32 | Validate.notNull(producerDefinition, "producerDefinition is null"); 33 | this.producerDefinition = producerDefinition; 34 | } 35 | 36 | public ProducerDriver(ProducerDefinition producerDefinition, CountDownLatch completionLatch) { 37 | this(producerDefinition); 38 | this.completionLatch = completionLatch; 39 | } 40 | 41 | @Override 42 | public String toString() { 43 | return "ProducerDriver{" + 44 | "producerDefinition=" + producerDefinition + 45 | '}'; 46 | } 47 | 48 | public void drive() { 49 | // The producer is thread safe and sharing a single producer instance across threads will generally be 50 | // faster than having multiple instances. 51 | // {@see http://kafka.apache.org/090/javadoc/index.html?org/apache/kafka/clients/producer/KafkaProducer.html} 52 | Map kafkaConfig = producerDefinition.getKafkaConfig(); 53 | applyConfigOverrides(kafkaConfig); 54 | 55 | try (KafkaProducer producer = new KafkaProducer(kafkaConfig)) { 56 | 57 | String message = generateMessage(producerDefinition.getMessageLocation(), producerDefinition.getMessageSize()); 58 | StopWatch stopWatch = new StopWatch(); 59 | stopWatch.start(); 60 | 61 | String topic = producerDefinition.getTopic(); 62 | Validate.notEmpty(topic, "topic is empty"); 63 | long messagesToSend = producerDefinition.getMessagesToSend(); 64 | Validate.isTrue(messagesToSend > 0, "messagesToSend must be greater than 0"); 65 | 66 | log.info("Producing {} messages to {}", messagesToSend, topic); 67 | 68 | KeyAllocationStrategy keyAllocationStrategy = new KeyAllocationStrategy(producerDefinition.getKeyAllocationStrategy()); 69 | log.debug("KeyAllocationStrategy is {}", keyAllocationStrategy); 70 | 71 | for (int i = 0; i < messagesToSend; i++) { 72 | if (isShutdownRequested()) { 73 | break; 74 | } 75 | 76 | // keys are used by the partitioning function to assign a partition 77 | byte[] key = keyAllocationStrategy.getKey(i); 78 | if (log.isTraceEnabled()) { 79 | log.trace("Sending message {} with key {}", i, key); 80 | } 81 | ProducerRecord record = new ProducerRecord<>(topic, key, message.getBytes()); 82 | 83 | if (producerDefinition.isSendBlocking()) { 84 | Future future = producer.send(record); 85 | try { 86 | // all sends are async, you need to get in order to block 87 | traceOffset(future.get()); 88 | } catch (InterruptedException | ExecutionException e) { 89 | throw new RuntimeException(e); 90 | } 91 | } else { 92 | // callbacks for records being sent to the same partition are guaranteed to execute in order 93 | producer.send(record, (recordMetadata, exception) -> { 94 | if (exception == null) { 95 | traceOffset(recordMetadata); 96 | } else { 97 | throw new RuntimeException("Error sending to Kafka: " + exception.getMessage()); 98 | } 99 | }); 100 | } 101 | long sendDelay = producerDefinition.getSendDelay(); 102 | if (sendDelay > 0) { 103 | log.trace("Delaying send by {}", sendDelay); 104 | Thread.sleep(sendDelay); 105 | } 106 | } 107 | 108 | stopWatch.stop(); 109 | if (isShutdownRequested()) { 110 | log.info("Shutting down"); 111 | } else { 112 | long runTime = stopWatch.getTime(); 113 | log.info("Done. Producer finished sending {} msgs in {} ms", messagesToSend, runTime); 114 | 115 | double averageThroughput = (1000d / runTime) * messagesToSend; 116 | log.info("Average throughput: {} msg/s", averageThroughput); 117 | } 118 | 119 | } catch (InterruptedException e) { 120 | log.error("Producer interrupted."); 121 | } finally { 122 | log.debug("Producer closed"); 123 | if (completionLatch != null) { 124 | completionLatch.countDown(); 125 | } 126 | } 127 | } 128 | 129 | private void applyConfigOverrides(Map kafkaConfig) { 130 | log.debug("Applying config overrides"); 131 | PartitioningStrategy partitioningStrategy = producerDefinition.getPartitioningStrategy(); 132 | applyOverride(kafkaConfig, ProducerConfig.PARTITIONER_CLASS_CONFIG, getPartitionerClassName(partitioningStrategy)); 133 | } 134 | 135 | private void applyOverride(Map kafkaConfig, String property, Optional optionalValue) { 136 | if (optionalValue.isPresent()) { 137 | String value = optionalValue.get(); 138 | kafkaConfig.put(property, value); 139 | log.debug("Overriding {} to {}", property, value); 140 | } else { 141 | log.debug("No override applied for {}", property); 142 | } 143 | } 144 | 145 | private Optional getPartitionerClassName(PartitioningStrategy partitioningStrategy) { 146 | if ((partitioningStrategy == null) || (partitioningStrategy == PartitioningStrategy.none)) { 147 | return Optional.empty(); 148 | } else if (partitioningStrategy.equals(PartitioningStrategy.roundRobin)) { 149 | return Optional.of(RoundRobinPartitioner.class.getCanonicalName()); 150 | } else if (partitioningStrategy.equals(PartitioningStrategy.sticky)) { 151 | return Optional.of(StickyPartitioner.class.getCanonicalName()); 152 | } else { 153 | throw new IllegalArgumentException("Unrecognised partitioningStrategy: " + partitioningStrategy); 154 | } 155 | } 156 | 157 | private String generateMessage(String messageLocation, int messageSize) { 158 | if (messageLocation == null) { 159 | Validate.isTrue(messageSize > 0, "messageSize must be greater than 0"); 160 | return RandomStringUtils.randomAlphanumeric(messageSize); 161 | } else { 162 | log.debug("Loading payload from {}", messageLocation); 163 | return new FileLoader().loadFileAsString(messageLocation); 164 | } 165 | } 166 | 167 | private void traceOffset(RecordMetadata recordMetadata) { 168 | assert (recordMetadata != null); 169 | if (log.isTraceEnabled()) { 170 | log.trace("The [topic:partition:offset] of the record sent was [{}:{}:{}]", 171 | recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset()); 172 | } 173 | } 174 | 175 | 176 | } 177 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/kafka/perftool/drivers/TestProfileRunner.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.drivers; 2 | 3 | import com.ameliant.tools.kafka.perftool.config.*; 4 | import org.slf4j.Logger; 5 | import org.slf4j.LoggerFactory; 6 | 7 | import java.util.ArrayList; 8 | import java.util.List; 9 | import java.util.concurrent.CountDownLatch; 10 | import java.util.concurrent.ExecutorService; 11 | import java.util.concurrent.Executors; 12 | import java.util.concurrent.TimeUnit; 13 | import java.util.stream.Collectors; 14 | 15 | /** 16 | * @author jkorab 17 | */ 18 | public class TestProfileRunner { 19 | private final Logger log = LoggerFactory.getLogger(this.getClass()); 20 | 21 | private final TestProfileDefinition testProfileDefinition; 22 | 23 | public TestProfileRunner(TestProfileDefinition testProfileDefinition) { 24 | this.testProfileDefinition = testProfileDefinition; 25 | } 26 | 27 | public void run() { 28 | ArrayList drivers = new ArrayList<>(); 29 | 30 | int driverCount = 0; 31 | List producerDefinitions = testProfileDefinition.getProducers().getInstances(); 32 | driverCount += producerDefinitions.size(); 33 | 34 | List consumerDefinitions = testProfileDefinition.getConsumers().getInstances(); 35 | driverCount += consumerDefinitions.size(); 36 | 37 | log.debug("Latching {} drivers", driverCount); 38 | CountDownLatch latch = new CountDownLatch(driverCount); 39 | 40 | drivers.addAll(producerDefinitions.stream() 41 | .map(producerDefinition -> new ProducerDriver(producerDefinition, latch)) 42 | .collect(Collectors.toList())); 43 | 44 | drivers.addAll(consumerDefinitions.stream() 45 | .map(consumerDefinition -> new ConsumerDriver(consumerDefinition, latch)) 46 | .collect(Collectors.toList())); 47 | 48 | if (testProfileDefinition.isConcurrent()) { 49 | ExecutorService executorService = Executors.newFixedThreadPool(driverCount); 50 | 51 | drivers.forEach(driver -> { 52 | log.debug("Submitting {}", driver); 53 | executorService.submit(driver); 54 | }); 55 | 56 | try { 57 | if (!latch.await(testProfileDefinition.getMaxDuration(), TimeUnit.SECONDS)) { 58 | log.info("Shutting down gracefully"); 59 | drivers.forEach(driver -> driver.requestShutdown()); 60 | } 61 | } catch (InterruptedException e) { 62 | throw new RuntimeException(e); 63 | } 64 | executorService.shutdown(); 65 | } else { 66 | drivers.forEach(driver -> { 67 | log.debug("Running {}", driver); 68 | driver.run(); 69 | }); 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/kafka/perftool/drivers/partitioning/KeyAllocationStrategy.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.drivers.partitioning; 2 | 3 | import com.ameliant.tools.kafka.perftool.config.KeyAllocationStrategyDefinition; 4 | import com.ameliant.tools.kafka.perftool.config.KeyAllocationType; 5 | import org.apache.commons.lang.Validate; 6 | 7 | import java.util.ArrayList; 8 | import java.util.List; 9 | import java.util.Random; 10 | 11 | /** 12 | * Assigns keys based on a message number. The set of keys is generated at object instantiation, and may be either 13 | * {@see KeyAllocationType.fair} where it will be the same every time, or {@see KeyAllocationType.random} where each 14 | * instance of this class will contain a different set of keys. 15 | * @author jkorab 16 | */ 17 | public class KeyAllocationStrategy { 18 | 19 | private final KeyAllocationType keyAllocationType; 20 | protected final int uniqueKeys; 21 | protected final List keys = new ArrayList<>(); 22 | 23 | public KeyAllocationStrategy(KeyAllocationStrategyDefinition strategyDefinition) { 24 | uniqueKeys = strategyDefinition.getUniqueKeys(); 25 | Validate.isTrue(uniqueKeys > 0, "uniqueKeys must be greater than 0"); 26 | 27 | keyAllocationType = strategyDefinition.getType(); 28 | Validate.notNull(keyAllocationType, "keyAllocationType is null"); 29 | 30 | Random random = new Random(); 31 | for (int i = 0; i < this.uniqueKeys; i++) { 32 | if (keyAllocationType == KeyAllocationType.fair) { 33 | keys.add(asByteArray(i)); 34 | } else { 35 | keys.add(asByteArray(random.nextInt())); 36 | } 37 | } 38 | } 39 | 40 | @Override 41 | public String toString() { 42 | return "KeyAllocationStrategy{" + 43 | "keyAllocationType=" + keyAllocationType + 44 | ", uniqueKeys=" + uniqueKeys + 45 | '}'; 46 | } 47 | 48 | /** 49 | * Convert the int into an array of two bytes. 50 | */ 51 | private byte[] asByteArray(int i) { 52 | byte _0 = (byte) (i & 0xFF); 53 | byte _1 = (byte) ((i >> 8) & 0xFF); 54 | return new byte[]{_0, _1}; 55 | } 56 | 57 | /** 58 | * Gets a key for a message number. The key is guaranteed to be the same for this object instance every time 59 | * this method is called. 60 | * @param messageNum The message number. 61 | * @return A message key. 62 | */ 63 | public byte[] getKey(int messageNum) { 64 | return keys.get(messageNum % uniqueKeys); 65 | } 66 | 67 | } 68 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/kafka/perftool/drivers/partitioning/RoundRobinPartitioner.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.drivers.partitioning; 2 | 3 | import org.apache.kafka.clients.producer.Partitioner; 4 | import org.apache.kafka.common.Cluster; 5 | import org.apache.kafka.common.PartitionInfo; 6 | 7 | import java.util.List; 8 | import java.util.Map; 9 | import java.util.concurrent.atomic.AtomicInteger; 10 | 11 | /** 12 | * Partitioner that round-robin distributes messages across all cluster partitions for the given topic. 13 | * This is a naive load-spreading strategy, that does not consider the message keys at all. 14 | * @author jkorab 15 | */ 16 | public class RoundRobinPartitioner implements Partitioner { 17 | 18 | private final AtomicInteger messageCount = new AtomicInteger(0); 19 | 20 | @Override 21 | public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { 22 | List partitions = cluster.partitionsForTopic(topic); 23 | int numPartitions = partitions.size(); 24 | 25 | return messageCount.incrementAndGet() % numPartitions; 26 | } 27 | 28 | @Override 29 | public void close() {} 30 | 31 | @Override 32 | public void configure(Map map) {} 33 | } 34 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/kafka/perftool/drivers/partitioning/StickyPartitioner.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.drivers.partitioning; 2 | 3 | import org.apache.kafka.clients.producer.Partitioner; 4 | import org.apache.kafka.common.Cluster; 5 | import org.apache.kafka.common.PartitionInfo; 6 | 7 | import java.util.LinkedHashMap; 8 | import java.util.List; 9 | import java.util.Map; 10 | import java.util.concurrent.atomic.AtomicInteger; 11 | import static org.apache.kafka.common.utils.Utils.murmur2; 12 | 13 | /** 14 | * Partitioner that assigns partitions in a round-robin fashion to keys across all cluster partitions for 15 | * the given topic. All messages after the first one for that unique key will be assigned to the same partition. 16 | * Messages without keys will be sent to the 0th partition. 17 | * When broker partitions are: 18 | *
    19 | *
  • added - the strategy will add new partitions into rotation for new keys
  • 20 | *
  • removed - keys for the missing partition will be reassigned a new partition
  • 21 | *
22 | * 23 | * BE AWARE: 24 | * Partitioning is based on round-robin sequence and message send time. This will cause problems when 25 | * there are two nodes running at the same time with this strategy. 26 | * 27 | * @author jkorab 28 | */ 29 | public class StickyPartitioner implements Partitioner { 30 | 31 | private final AtomicInteger uniqueKeyCount = new AtomicInteger(0); 32 | private final Map partitionMap = new LinkedHashMap<>(); 33 | 34 | @Override 35 | public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { 36 | if ((keyBytes == null) || (keyBytes.length == 0)) { 37 | return 0; 38 | } 39 | 40 | int keyHash = Math.abs(murmur2(keyBytes)); 41 | 42 | List partitions = cluster.partitionsForTopic(topic); 43 | int numPartitions = partitions.size(); 44 | 45 | Integer partition = null; 46 | if (partitionMap.containsKey(keyHash)) { 47 | partition = partitionMap.get(keyHash); 48 | if (partition >= numPartitions) { 49 | // partition existed, but was removed 50 | partition = null; 51 | } 52 | } 53 | 54 | if (partition == null) { 55 | partition = uniqueKeyCount.incrementAndGet() % numPartitions; 56 | partitionMap.put(keyHash, partition); 57 | } 58 | return partition; 59 | } 60 | 61 | @Override 62 | public void close() {} 63 | 64 | @Override 65 | public void configure(Map map) {} 66 | } 67 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/support/FileLoader.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.support; 2 | 3 | import org.apache.commons.lang.Validate; 4 | 5 | import java.io.BufferedReader; 6 | import java.io.FileInputStream; 7 | import java.io.IOException; 8 | import java.io.InputStreamReader; 9 | 10 | /** 11 | * Utility class for reading in files. 12 | * @author jkorab 13 | */ 14 | public class FileLoader { 15 | 16 | public String loadFileAsString(String fileName) { 17 | Validate.notEmpty(fileName, "fileName is empty"); 18 | try { 19 | FileInputStream fileInputStream = new FileInputStream(fileName); 20 | 21 | StringBuffer sb = new StringBuffer(); 22 | try (BufferedReader reader = new BufferedReader(new InputStreamReader(fileInputStream))) { 23 | while (true) { 24 | String line = reader.readLine(); 25 | if (line == null) { 26 | break; 27 | } else { 28 | if (sb.length() > 0) { 29 | sb.append(System.lineSeparator()); 30 | } 31 | sb.append(line); 32 | } 33 | } 34 | } 35 | return sb.toString(); 36 | } catch (IOException e) { 37 | throw new RuntimeException(e); 38 | } 39 | } 40 | 41 | } 42 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/java/com/ameliant/tools/support/PayloadDetector.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.support; 2 | 3 | /** 4 | * @author jkorab 5 | */ 6 | public class PayloadDetector { 7 | 8 | public static boolean isYaml(String configType) { 9 | return (configType.equals("yml") || configType.equals("yaml")); 10 | } 11 | 12 | public static boolean isYamlFile(String fileName) { 13 | assert (fileName != null); 14 | return isYaml(fileName.substring(fileName.lastIndexOf(".") + 1)); 15 | } 16 | 17 | } 18 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # The logging properties used 3 | # 4 | log4j.rootLogger=INFO, out 5 | 6 | log4j.logger.com.ameliant.tools.kafka.kafkaperf=DEBUG 7 | log4j.logger.org.apache.kafka=INFO 8 | log4j.logger.kafka=WARN 9 | 10 | # CONSOLE appender not used by default 11 | log4j.appender.out=org.apache.log4j.ConsoleAppender 12 | log4j.appender.out.layout=org.apache.log4j.PatternLayout 13 | #log4j.appender.out.layout.ConversionPattern=[%30.30t] %-30.30c{1} %-5p %m%n 14 | log4j.appender.out.layout.ConversionPattern=%d{HH:mm:ss.SSS} [%-12.12t] %-5p %-25.30c{1} - %m%n 15 | 16 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/java/com/ameliant/tools/kafka/perftool/KafkaPerfTest.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool; 2 | 3 | import junit.framework.Test; 4 | import junit.framework.TestCase; 5 | import junit.framework.TestSuite; 6 | 7 | /** 8 | * Unit test for simple App. 9 | */ 10 | public class KafkaPerfTest 11 | extends TestCase 12 | { 13 | /** 14 | * Create the test case 15 | * 16 | * @param testName name of the test case 17 | */ 18 | public KafkaPerfTest(String testName) 19 | { 20 | super( testName ); 21 | } 22 | 23 | /** 24 | * @return the suite of tests being tested 25 | */ 26 | public static Test suite() 27 | { 28 | return new TestSuite( KafkaPerfTest.class ); 29 | } 30 | 31 | /** 32 | * Rigourous Test :-) 33 | */ 34 | public void testApp() 35 | { 36 | assertTrue( true ); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/java/com/ameliant/tools/kafka/perftool/config/ConfigurableWithParentTest.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.config; 2 | 3 | import static org.hamcrest.Matchers.*; 4 | import static org.junit.Assert.*; 5 | import org.junit.Test; 6 | 7 | import java.util.HashMap; 8 | import java.util.Map; 9 | 10 | /** 11 | * @author jkorab 12 | */ 13 | public class ConfigurableWithParentTest { 14 | 15 | public static final String PROP_0 = "prop0"; 16 | public static final String PROP_1 = "prop1"; 17 | public static final String PROP_2 = "prop2"; 18 | 19 | private class TestConfigurable extends ConfigurableWithParent {} 20 | 21 | @Test 22 | public void testGetMergedConfig_noParent() { 23 | TestConfigurable child = new TestConfigurable(); 24 | { 25 | Map config = new HashMap<>(); 26 | config.put(PROP_1, 1); 27 | child.setConfig(config); 28 | } 29 | Map mergedConfig = child.getKafkaConfig(); 30 | assertThat(mergedConfig.get(PROP_1), equalTo(1)); 31 | assertThat(mergedConfig.get(PROP_2), nullValue()); 32 | } 33 | 34 | @Test 35 | public void testGetMergedConfig_withParent() { 36 | TestConfigurable l0 = new TestConfigurable(); 37 | { 38 | Map config = new HashMap<>(); 39 | config.put(PROP_1, 1); 40 | config.put(PROP_2, 2); 41 | l0.setConfig(config); 42 | } 43 | 44 | TestConfigurable l1 = new TestConfigurable(); 45 | { 46 | Map config = new HashMap<>(); 47 | config.put(PROP_1, 3); 48 | l1.setConfig(config); 49 | } 50 | l1.setParent(l0); 51 | 52 | Map mergedConfig = l1.getKafkaConfig(); 53 | assertThat(mergedConfig.get(PROP_1), equalTo(3)); // overrides the same value in the parent 54 | assertThat(mergedConfig.get(PROP_2), equalTo(2)); 55 | } 56 | 57 | 58 | @Test 59 | public void testGetMergedConfig_with2LevelsParent() { 60 | TestConfigurable l0 = new TestConfigurable(); 61 | { 62 | Map config = new HashMap<>(); 63 | config.put(PROP_0, 0); 64 | l0.setConfig(config); 65 | } 66 | 67 | TestConfigurable l1 = new TestConfigurable(); 68 | l1.setParent(l0); 69 | { 70 | Map config = new HashMap<>(); 71 | config.put(PROP_1, 1); 72 | config.put(PROP_2, 2); 73 | l1.setConfig(config); 74 | } 75 | 76 | TestConfigurable l2 = new TestConfigurable(); 77 | l2.setParent(l1); 78 | { 79 | Map config = new HashMap<>(); 80 | config.put(PROP_1, 3); 81 | l2.setConfig(config); 82 | } 83 | 84 | Map mergedConfig = l2.getKafkaConfig(); 85 | assertThat(mergedConfig.get(PROP_0), equalTo(0)); 86 | assertThat(mergedConfig.get(PROP_1), equalTo(3)); // overrides the same value in the parent 87 | assertThat(mergedConfig.get(PROP_2), equalTo(2)); 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/java/com/ameliant/tools/kafka/perftool/coordination/AwaitsStartup.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.coordination; 2 | 3 | import java.util.concurrent.CountDownLatch; 4 | import java.util.concurrent.TimeUnit; 5 | 6 | /** 7 | * @author jkorab 8 | */ 9 | public class AwaitsStartup implements Runnable { 10 | 11 | private final CountDownLatch latch; 12 | private final Runnable runnable; 13 | 14 | public AwaitsStartup(Runnable runnable, CountDownLatch latch) { 15 | this.runnable = runnable; 16 | this.latch = latch; 17 | } 18 | 19 | @Override 20 | public void run() { 21 | try { 22 | if (latch.await(10, TimeUnit.SECONDS)) { 23 | runnable.run(); 24 | } else { 25 | throw new IllegalStateException("Timeout reached waiting for startup signal"); 26 | } 27 | } catch (InterruptedException iex) { 28 | throw new RuntimeException(iex); 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/java/com/ameliant/tools/kafka/perftool/drivers/ConsumerDriverTest.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.drivers; 2 | 3 | import com.ameliant.tools.kafka.testdsl.config.ConsumerConfigsBuilder; 4 | import com.ameliant.tools.kafka.perftool.config.ConsumerDefinition; 5 | import com.ameliant.tools.kafka.testdsl.config.ProducerConfigsBuilder; 6 | import com.ameliant.tools.kafka.perftool.config.ProducerDefinition; 7 | import com.ameliant.tools.kafka.testdsl.EmbeddedKafkaBroker; 8 | import com.ameliant.tools.zookeeper.testdsl.EmbeddedZooKeeper; 9 | import org.apache.kafka.common.serialization.ByteArrayDeserializer; 10 | import org.apache.kafka.common.serialization.ByteArraySerializer; 11 | import org.junit.Rule; 12 | import org.junit.Test; 13 | import static org.junit.Assert.*; 14 | 15 | import java.util.Map; 16 | import java.util.concurrent.CountDownLatch; 17 | import java.util.concurrent.ExecutorService; 18 | import java.util.concurrent.Executors; 19 | 20 | /** 21 | * @author jkorab 22 | */ 23 | public class ConsumerDriverTest { 24 | 25 | @Rule 26 | public EmbeddedZooKeeper zooKeeper = new EmbeddedZooKeeper(); 27 | 28 | @Rule 29 | public EmbeddedKafkaBroker broker = EmbeddedKafkaBroker.builder() 30 | .zookeeperConnect("127.0.0.1:" + zooKeeper.getPort()) 31 | //.logFlushIntervalMessages(1) 32 | .build(); 33 | 34 | @Test 35 | public void testReceive_sequential() throws InterruptedException { 36 | String topic = "foo"; 37 | int messageCount = 1000; 38 | 39 | CountDownLatch latch = new CountDownLatch(2); 40 | 41 | // fill up the topic 42 | ProducerDriver producerDriver = createProducerDriver(latch, topic, messageCount); 43 | producerDriver.run(); 44 | 45 | ConsumerDriver consumerDriver = createConsumerDriver(latch, topic, messageCount); 46 | consumerDriver.run(); 47 | 48 | assertEquals(messageCount, consumerDriver.getMessagesReceived()); 49 | } 50 | 51 | @Test 52 | public void testReceive_parallel() throws InterruptedException { 53 | String topic = "foo"; 54 | int messageCount = 1000; 55 | 56 | int numWorkers = 2; 57 | CountDownLatch latch = new CountDownLatch(numWorkers); 58 | ExecutorService executorService = Executors.newFixedThreadPool(numWorkers); 59 | 60 | // fill up the topic 61 | ConsumerDriver consumerDriver = createConsumerDriver(latch, topic, messageCount); 62 | executorService.submit(consumerDriver); 63 | 64 | ProducerDriver producerDriver = createProducerDriver(latch, topic, messageCount); 65 | executorService.submit(producerDriver); 66 | 67 | latch.await(); 68 | } 69 | 70 | private ConsumerDriver createConsumerDriver(CountDownLatch latch, String topic, int messageCount) { 71 | Map configs = new ConsumerConfigsBuilder() 72 | .groupId("bar") 73 | .bootstrapServers("127.0.0.1:" + broker.getPort()) 74 | .enableAutoCommit(true) 75 | .autoCommitIntervalMs(1000) 76 | .sessionTimeoutMs(30000) 77 | .keyDeserializer(ByteArrayDeserializer.class) 78 | .valueDeserializer(ByteArrayDeserializer.class) 79 | .autoOffsetReset(ConsumerConfigsBuilder.OffsetReset.earliest) 80 | .build(); 81 | 82 | ConsumerDefinition consumerDefinition = new ConsumerDefinition(); 83 | consumerDefinition.setConfig(configs); 84 | consumerDefinition.setTopic(topic); 85 | consumerDefinition.setMessagesToReceive(messageCount); 86 | return new ConsumerDriver(consumerDefinition, latch); 87 | } 88 | 89 | public ProducerDriver createProducerDriver(CountDownLatch latch, String topic, int messageCount) { 90 | Map producerConfigs = new ProducerConfigsBuilder() 91 | .bootstrapServers("127.0.0.1:" + broker.getPort()) 92 | .requestRequiredAcks(ProducerConfigsBuilder.RequestRequiredAcks.ackFromLeader) 93 | .producerType(ProducerConfigsBuilder.ProducerType.sync) 94 | .keySerializer(ByteArraySerializer.class) 95 | .valueSerializer(ByteArraySerializer.class) 96 | .batchSize(0) 97 | .build(); 98 | 99 | ProducerDefinition producerDefinition = new ProducerDefinition(); 100 | producerDefinition.setConfig(producerConfigs); 101 | producerDefinition.setTopic(topic); 102 | producerDefinition.setMessageSize(100 * 1024); 103 | producerDefinition.setMessagesToSend(messageCount); 104 | producerDefinition.setSendBlocking(true); 105 | 106 | return new ProducerDriver(producerDefinition, latch); 107 | } 108 | 109 | } -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/java/com/ameliant/tools/kafka/perftool/drivers/DistributionRun.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.drivers; 2 | 3 | /** 4 | * @author jkorab 5 | */ 6 | public class DistributionRun { 7 | private final String topic; 8 | private final String groupId; 9 | private final int numProducers; 10 | private final int numConsumers; 11 | private int expectedConsumersWithoutMessages = 0; 12 | private int uniqueKeys = 2; 13 | 14 | public DistributionRun(String topic, int numProducers, int numConsumers) { 15 | this.topic = topic; 16 | this.groupId = "GROUP_ID"; 17 | this.numProducers = numProducers; 18 | this.numConsumers = numConsumers; 19 | } 20 | 21 | public DistributionRun uniqueKeys(int uniqueKeys) { 22 | this.uniqueKeys = uniqueKeys; 23 | return this; 24 | } 25 | 26 | public DistributionRun expectedConsumersWithoutMessages(int expectedConsumersWithoutMessages) { 27 | this.expectedConsumersWithoutMessages = expectedConsumersWithoutMessages; 28 | return this; 29 | } 30 | 31 | public String getTopic() { 32 | return topic; 33 | } 34 | 35 | public String getGroupId() { 36 | return groupId; 37 | } 38 | 39 | public int getNumProducers() { 40 | return numProducers; 41 | } 42 | 43 | public int getNumConsumers() { 44 | return numConsumers; 45 | } 46 | 47 | public int getExpectedConsumersWithoutMessages() { 48 | return expectedConsumersWithoutMessages; 49 | } 50 | 51 | public int getMessagesToSend() { 52 | return 10 * numConsumers * numProducers; 53 | } 54 | 55 | public int getUniqueKeys() { 56 | return uniqueKeys; 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/java/com/ameliant/tools/kafka/perftool/drivers/DistributionValidator.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.drivers; 2 | 3 | import com.ameliant.tools.kafka.perftool.config.*; 4 | import com.ameliant.tools.kafka.perftool.coordination.AwaitsStartup; 5 | import com.ameliant.tools.kafka.testdsl.config.ConsumerConfigsBuilder; 6 | import com.ameliant.tools.kafka.testdsl.EmbeddedKafkaBroker; 7 | import com.ameliant.tools.kafka.testdsl.config.ProducerConfigsBuilder; 8 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; 9 | import org.apache.kafka.common.TopicPartition; 10 | import org.apache.kafka.common.serialization.ByteArrayDeserializer; 11 | import org.apache.kafka.common.serialization.ByteArraySerializer; 12 | 13 | import java.util.*; 14 | import java.util.concurrent.CountDownLatch; 15 | import java.util.concurrent.ExecutorService; 16 | import java.util.concurrent.Executors; 17 | import java.util.concurrent.TimeUnit; 18 | 19 | import static org.junit.Assert.*; 20 | 21 | /** 22 | * @author jkorab 23 | */ 24 | public class DistributionValidator { 25 | 26 | private EmbeddedKafkaBroker broker; 27 | DistributionValidator(EmbeddedKafkaBroker broker) { 28 | this.broker = broker; 29 | } 30 | 31 | void validateRoundRobinDistribution(DistributionRun... runs) 32 | throws InterruptedException { 33 | validateDistribution(PartitioningStrategy.roundRobin, runs); 34 | } 35 | 36 | void validateStickyDistribution(DistributionRun... runs) 37 | throws InterruptedException { 38 | validateDistribution(PartitioningStrategy.sticky, runs); 39 | } 40 | 41 | void validateDistribution(PartitioningStrategy partitioningStrategy, 42 | DistributionRun[] runs) 43 | throws InterruptedException { 44 | 45 | final Map topicMessagesToSend = getTopicMessagesToSend(runs); 46 | final Map topicRuns = new HashMap<>(); 47 | for (DistributionRun run : runs) { 48 | topicRuns.put(run.getTopic(), run); 49 | } 50 | 51 | int totalConsumers = Arrays.stream(runs) 52 | .map(DistributionRun::getNumConsumers) 53 | .reduce(0, (i, total) -> total + i); 54 | 55 | int consumersThatWillTimeout = Arrays.stream(runs) 56 | .map(DistributionRun::getExpectedConsumersWithoutMessages) 57 | .reduce(0, (i, total) -> total + i); 58 | 59 | int totalProducers = Arrays.stream(runs) 60 | .map(DistributionRun::getNumProducers) 61 | .reduce(0, (i, total) -> total + i); 62 | 63 | CountDownLatch startUpLatch = new CountDownLatch(totalConsumers); 64 | CountDownLatch shutDownLatch = new CountDownLatch(totalConsumers - consumersThatWillTimeout + totalProducers); 65 | 66 | ExecutorService executorService = Executors.newFixedThreadPool(totalConsumers + totalProducers); 67 | 68 | try { 69 | Map> topicConsumerDrivers = new HashMap<>(); 70 | 71 | for (DistributionRun run : runs) { 72 | int numMessages = run.getMessagesToSend(); 73 | List producerDrivers = 74 | createProducerDrivers(shutDownLatch, 75 | run.getTopic(), 76 | run.getNumProducers(), 77 | numMessages / run.getNumProducers(), // evenly share out the sends 78 | run.getUniqueKeys(), 79 | partitioningStrategy); 80 | 81 | List consumerDrivers = 82 | createConsumerDrivers(shutDownLatch, 83 | run.getTopic(), 84 | run.getGroupId(), 85 | run.getNumConsumers(), 86 | numMessages / (run.getNumConsumers() - run.getExpectedConsumersWithoutMessages())); // evenly share out the receives 87 | topicConsumerDrivers.put(run.getTopic(), consumerDrivers); 88 | 89 | // fill up the topic 90 | producerDrivers.stream() 91 | .forEach(producer -> executorService.submit(new AwaitsStartup(producer, startUpLatch))); 92 | 93 | consumerDrivers.stream() 94 | .forEach(consumer -> { 95 | consumer.setConsumerRebalanceListener(waitForPartitionAssignment(startUpLatch)); 96 | executorService.submit(consumer); 97 | }); 98 | } 99 | 100 | // some consumers will flick the latch, others will continue polling 101 | 102 | if (!shutDownLatch.await(30, TimeUnit.SECONDS)) { 103 | topicConsumerDrivers.forEach((topic, consumerDrivers) -> { 104 | consumerDrivers.forEach(ConsumerDriver::requestShutdown); 105 | 106 | long messagesReceived = getMessagesReceived(consumerDrivers); 107 | int messagesSent = topicMessagesToSend.get(topic); 108 | if (messagesSent != messagesReceived) { 109 | String consumerResults = getConsumerResults(consumerDrivers); 110 | fail("Consumers for " + topic + " did not receive " + messagesSent 111 | + " messages, actual: " + messagesReceived + ". Details [" + consumerResults + "]"); 112 | } 113 | 114 | int consumersWithoutMessages = getConsumersWithoutMessages(consumerDrivers); 115 | DistributionRun run = topicRuns.get(topic); 116 | assertEquals(run.getExpectedConsumersWithoutMessages(), consumersWithoutMessages); 117 | }); 118 | } 119 | 120 | } finally { 121 | executorService.shutdownNow(); 122 | } 123 | } 124 | 125 | private Map getTopicMessagesToSend(DistributionRun[] runs) { 126 | final Map topicMessagesToSend = new HashMap<>(); 127 | for (DistributionRun run: runs) { 128 | topicMessagesToSend.put(run.getTopic(), run.getMessagesToSend()); 129 | } 130 | return topicMessagesToSend; 131 | } 132 | 133 | private List createProducerDrivers(CountDownLatch latch, String topic, int numDrivers, int messagesToSend, int uniqueKeys, PartitioningStrategy partitioningStrategy) { 134 | List producerDrivers = new ArrayList<>(numDrivers); 135 | for (int i = 0; i < numDrivers; i++) { 136 | ProducerDefinition producerDefinition = getProducerDefinition(topic, messagesToSend, uniqueKeys); 137 | producerDefinition.setPartitioningStrategy(partitioningStrategy); 138 | producerDrivers.add(new ProducerDriver(producerDefinition, latch)); 139 | } 140 | return producerDrivers; 141 | } 142 | 143 | private ProducerDefinition getProducerDefinition(String topic, int numMessages, int uniqueKeys) { 144 | ProducerDefinition producerDefinition = new ProducerDefinition(); 145 | producerDefinition.setConfig(getProducerConfigs()); 146 | producerDefinition.setTopic(topic); 147 | producerDefinition.setMessagesToSend(numMessages); 148 | producerDefinition.setSendBlocking(true); 149 | { 150 | KeyAllocationStrategyDefinition strategyDefinition = 151 | new KeyAllocationStrategyDefinition(KeyAllocationType.fair, uniqueKeys); 152 | producerDefinition.setKeyAllocationStrategy(strategyDefinition); 153 | } 154 | return producerDefinition; 155 | } 156 | 157 | private Map getProducerConfigs() { 158 | return new ProducerConfigsBuilder() 159 | .bootstrapServers("127.0.0.1:" + broker.getPort()) 160 | .requestRequiredAcks(ProducerConfigsBuilder.RequestRequiredAcks.ackFromLeader) 161 | .producerType(ProducerConfigsBuilder.ProducerType.sync) 162 | .keySerializer(ByteArraySerializer.class) 163 | .valueSerializer(ByteArraySerializer.class) 164 | .batchSize(0) 165 | .build(); 166 | } 167 | 168 | private ConsumerRebalanceListener waitForPartitionAssignment(CountDownLatch latch) { 169 | return new ConsumerRebalanceListener() { 170 | 171 | @Override 172 | public void onPartitionsAssigned(Collection collection) { 173 | latch.countDown(); 174 | } 175 | 176 | @Override 177 | public void onPartitionsRevoked(Collection collection) {} 178 | 179 | }; 180 | } 181 | 182 | private List createConsumerDrivers(CountDownLatch latch, String topic, String groupId, int numDrivers, int expectedMessages) { 183 | List consumerDrivers = new ArrayList<>(numDrivers); 184 | for (int i = 0; i < numDrivers; i++) { 185 | // each driver expects the same number of messages 186 | consumerDrivers.add(createConsumerDriver(latch, topic, groupId, expectedMessages)); 187 | } 188 | return consumerDrivers; 189 | } 190 | 191 | private ConsumerDriver createConsumerDriver(CountDownLatch latch, String topic, String groupId, int messageCount) { 192 | Map configs = getConsumerConfigs(groupId); 193 | 194 | ConsumerDefinition consumerDefinition = new ConsumerDefinition(); 195 | consumerDefinition.setConfig(configs); 196 | consumerDefinition.setTopic(topic); 197 | consumerDefinition.setMessagesToReceive(messageCount); 198 | return new ConsumerDriver(consumerDefinition, latch); 199 | } 200 | 201 | private Map getConsumerConfigs(String groupId) { 202 | return new ConsumerConfigsBuilder() 203 | .groupId(groupId) 204 | .bootstrapServers("127.0.0.1:" + broker.getPort()) 205 | .enableAutoCommit(true) 206 | .autoCommitIntervalMs(1000) 207 | .sessionTimeoutMs(30000) 208 | .keyDeserializer(ByteArrayDeserializer.class) 209 | .valueDeserializer(ByteArrayDeserializer.class) 210 | .autoOffsetReset(ConsumerConfigsBuilder.OffsetReset.earliest) 211 | .build(); 212 | } 213 | 214 | private String getConsumerResults(List consumerDrivers) { 215 | return consumerDrivers.stream() 216 | .map(consumer -> consumer.getMessagesExpected() + ":" + consumer.getMessagesReceived()) 217 | .reduce("", (results, expectedVsActual) -> 218 | (results.equals("")) ? expectedVsActual : results + ", " + expectedVsActual 219 | ); 220 | } 221 | 222 | private int getConsumersWithoutMessages(List consumerDrivers) { 223 | return (int) consumerDrivers.stream() 224 | .filter(consumer -> consumer.getMessagesReceived() == 0) 225 | .count(); 226 | } 227 | 228 | private long getMessagesReceived(List consumerDrivers) { 229 | return consumerDrivers.stream() 230 | .map(ConsumerDriver::getMessagesReceived) 231 | .reduce(0l, (total, received) -> total + received); 232 | } 233 | 234 | } 235 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/java/com/ameliant/tools/kafka/perftool/drivers/ProducerDriverTest.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.drivers; 2 | 3 | import static java.lang.String.format; 4 | import com.ameliant.tools.kafka.testdsl.config.ProducerConfigsBuilder; 5 | import com.ameliant.tools.kafka.perftool.config.ProducerDefinition; 6 | import com.ameliant.tools.kafka.testdsl.EmbeddedKafkaBroker; 7 | import com.ameliant.tools.zookeeper.testdsl.EmbeddedZooKeeper; 8 | import org.apache.kafka.common.serialization.ByteArraySerializer; 9 | import org.junit.Rule; 10 | import org.junit.Test; 11 | 12 | import java.util.Map; 13 | 14 | /** 15 | * @author jkorab 16 | */ 17 | public class ProducerDriverTest { 18 | 19 | @Rule 20 | public EmbeddedZooKeeper zooKeeper = new EmbeddedZooKeeper(); 21 | 22 | @Rule 23 | public EmbeddedKafkaBroker broker = EmbeddedKafkaBroker.builder() 24 | .zookeeperConnect("127.0.0.1:" + zooKeeper.getPort()) 25 | //.logFlushIntervalMessages(1) 26 | .build(); 27 | 28 | @Test 29 | public void testSend() { 30 | Map producerConfigs = getProducerConfigs(); 31 | 32 | ProducerDefinition producerDefinition = new ProducerDefinition(); 33 | producerDefinition.setConfig(producerConfigs); 34 | producerDefinition.setTopic("foo"); 35 | producerDefinition.setMessageSize(1024); 36 | producerDefinition.setMessagesToSend(1000); 37 | producerDefinition.setSendBlocking(true); 38 | 39 | ProducerDriver driver = new ProducerDriver(producerDefinition); 40 | driver.run(); 41 | } 42 | 43 | 44 | private Map getProducerConfigs() { 45 | return new ProducerConfigsBuilder() 46 | .bootstrapServers(format("127.0.0.1:%s", broker.getPort())) 47 | .requestRequiredAcks(ProducerConfigsBuilder.RequestRequiredAcks.ackFromLeader) 48 | .producerType(ProducerConfigsBuilder.ProducerType.sync) 49 | .keySerializer(ByteArraySerializer.class) 50 | .valueSerializer(ByteArraySerializer.class) 51 | .batchSize(0) 52 | .build(); 53 | } 54 | 55 | } -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/java/com/ameliant/tools/kafka/perftool/drivers/ProducerPartitioningTest.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.drivers; 2 | 3 | import com.ameliant.tools.zookeeper.testdsl.EmbeddedZooKeeper; 4 | import com.ameliant.tools.kafka.testdsl.EmbeddedKafkaBroker; 5 | import org.junit.Rule; 6 | import org.junit.Test; 7 | 8 | /** 9 | * @author jkorab 10 | */ 11 | public class ProducerPartitioningTest { 12 | 13 | public static final String FOO = "foo"; 14 | public static final String BAR = "bar"; 15 | public static final String BAZ = "baz"; 16 | 17 | @Rule 18 | public EmbeddedZooKeeper zooKeeper = new EmbeddedZooKeeper(); 19 | 20 | @Rule 21 | public EmbeddedKafkaBroker broker = EmbeddedKafkaBroker.builder() 22 | .zookeeperConnect("127.0.0.1:" + zooKeeper.getPort()) 23 | .topic(FOO) 24 | .partitions(3) 25 | .end() 26 | .topic(BAR) 27 | .partitions(1) 28 | .end() 29 | .topic(BAZ) 30 | .partitions(2) 31 | .end() 32 | .build(); 33 | 34 | @Test 35 | public void testSend_roundRobin() throws InterruptedException { 36 | int numConsumers = 3; 37 | int numProducers = 1; 38 | 39 | DistributionValidator validator = new DistributionValidator(broker); 40 | validator.validateRoundRobinDistribution(new DistributionRun(FOO, numProducers, numConsumers)); 41 | } 42 | 43 | @Test 44 | public void testSend_roundRobin_multiProducer() throws InterruptedException { 45 | int numConsumers = 3; 46 | int numProducers = 2; 47 | 48 | DistributionValidator validator = new DistributionValidator(broker); 49 | validator.validateRoundRobinDistribution(new DistributionRun(FOO, numProducers, numConsumers)); 50 | } 51 | 52 | @Test 53 | public void testSend_roundRobin_multiTopic() throws InterruptedException { 54 | DistributionValidator validator = new DistributionValidator(broker); 55 | validator.validateRoundRobinDistribution( 56 | new DistributionRun(FOO, 1, 3) 57 | .uniqueKeys(2) 58 | .expectedConsumersWithoutMessages(0), 59 | new DistributionRun(BAR, 1, 1) 60 | .uniqueKeys(2) 61 | .expectedConsumersWithoutMessages(0) 62 | ); 63 | } 64 | 65 | @Test 66 | public void testSend_sticky() throws InterruptedException { 67 | int numConsumers = 3; 68 | int numProducers = 1; 69 | 70 | DistributionValidator validator = new DistributionValidator(broker); 71 | validator.validateStickyDistribution(new DistributionRun(FOO, numProducers, numConsumers) 72 | .expectedConsumersWithoutMessages(1)); 73 | } 74 | 75 | @Test 76 | public void testSend_sticky_multiProducer() throws InterruptedException { 77 | int numConsumers = 3; 78 | int numProducers = 2; 79 | 80 | DistributionValidator validator = new DistributionValidator(broker); 81 | // two consumers should receive half of the messages each 82 | validator.validateStickyDistribution(new DistributionRun(FOO, numProducers, numConsumers) 83 | .expectedConsumersWithoutMessages(1)); 84 | } 85 | 86 | @Test 87 | public void testSend_sticky_multiTopic() throws InterruptedException { 88 | DistributionValidator validator = new DistributionValidator(broker); 89 | validator.validateStickyDistribution( 90 | new DistributionRun(FOO, 1, 3) 91 | .uniqueKeys(2) 92 | .expectedConsumersWithoutMessages(1), // evenly partition - one consumer gets nothing (2 keys) 93 | new DistributionRun(BAR, 1, 2) 94 | .uniqueKeys(2) 95 | .expectedConsumersWithoutMessages(1), // one consumer blocked 96 | new DistributionRun(BAZ, 1, 2) 97 | .uniqueKeys(2).expectedConsumersWithoutMessages(0) // evenly partitioned 98 | ); 99 | } 100 | 101 | } 102 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/java/com/ameliant/tools/kafka/perftool/drivers/partitioning/KeyAllocationStrategyTest.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.drivers.partitioning; 2 | 3 | import com.ameliant.tools.kafka.perftool.config.KeyAllocationStrategyDefinition; 4 | import com.ameliant.tools.kafka.perftool.config.KeyAllocationType; 5 | import org.junit.Test; 6 | 7 | import static org.hamcrest.Matchers.*; 8 | import static org.junit.Assert.*; 9 | 10 | /** 11 | * @author jkorab 12 | */ 13 | public class KeyAllocationStrategyTest { 14 | 15 | private KeyAllocationStrategy getStrategy(KeyAllocationType type, int uniqueKeys) { 16 | return new KeyAllocationStrategy(new KeyAllocationStrategyDefinition(type, uniqueKeys)); 17 | } 18 | 19 | @Test 20 | public void testFairAllocation() { 21 | KeyAllocationStrategy s0 = getStrategy(KeyAllocationType.fair, 2); 22 | assertThat(s0.getKey(0), equalTo(s0.getKey(0))); // consistency 23 | assertThat(s0.getKey(0), equalTo(s0.getKey(2))); // modulo around uniqueKeys 24 | assertThat(s0.getKey(0), not(equalTo(s0.getKey(1)))); 25 | 26 | KeyAllocationStrategy s1 = getStrategy(KeyAllocationType.fair, 2); 27 | assertThat(s0.getKey(0), equalTo(s1.getKey(0))); 28 | assertThat(s0.getKey(1), equalTo(s1.getKey(1))); 29 | } 30 | 31 | @Test 32 | public void testRandomAllocation() { 33 | KeyAllocationStrategy s0 = getStrategy(KeyAllocationType.random, 2); 34 | assertThat(s0.getKey(0), equalTo(s0.getKey(0))); // consistency 35 | assertThat(s0.getKey(0), equalTo(s0.getKey(2))); // modulo around uniqueKeys 36 | assertThat(s0.getKey(0), not(equalTo(s0.getKey(1)))); 37 | 38 | KeyAllocationStrategy s1 = getStrategy(KeyAllocationType.random, 2); 39 | assertThat(s0.getKey(0), not(equalTo(s1.getKey(0)))); 40 | assertThat(s0.getKey(1), not(equalTo(s1.getKey(1)))); 41 | } 42 | } -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/java/com/ameliant/tools/kafka/perftool/samples/SampleProfileValidationTest.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.perftool.samples; 2 | 3 | import com.ameliant.tools.kafka.perftool.config.TestProfileDefinition; 4 | import com.ameliant.tools.support.DirectoryUtils; 5 | import com.ameliant.tools.support.PayloadDetector; 6 | import com.fasterxml.jackson.databind.ObjectMapper; 7 | import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; 8 | import org.junit.Test; 9 | import static org.junit.Assert.*; 10 | import static org.hamcrest.Matchers.*; 11 | 12 | import java.io.File; 13 | import java.io.IOException; 14 | 15 | /** 16 | * @author jkorab 17 | */ 18 | public class SampleProfileValidationTest { 19 | 20 | @Test 21 | public void testProfilesValid() { 22 | // can be run within an IDE or via Maven 23 | File testDirectory = DirectoryUtils.locateDirectory("src/test/resources/test-profiles"); 24 | assertThat(testDirectory, notNullValue()); 25 | for (File file : testDirectory.listFiles()) { 26 | ObjectMapper mapper = PayloadDetector.isYamlFile(file.getName()) ? new ObjectMapper(new YAMLFactory()) 27 | : new ObjectMapper(); 28 | TestProfileDefinition testProfileDefinition = null; 29 | try { 30 | testProfileDefinition = mapper.readValue(file, TestProfileDefinition.class); 31 | } catch (IOException e) { 32 | throw new IllegalArgumentException("Unable to read in " + file.getName(), e); 33 | } 34 | assertThat(testProfileDefinition, notNullValue()); 35 | } 36 | } 37 | 38 | } 39 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # The logging properties used 3 | # 4 | log4j.rootLogger=INFO, out 5 | 6 | log4j.logger.com.ameliant.tools.kafka.kafkaperf.drivers=DEBUG 7 | log4j.logger.org.apache.kafka=WARN 8 | log4j.logger.kafka=WARN 9 | log4j.logger.org.apache.kafka.clients.consumer=INFO 10 | log4j.logger.org.apache.zookeeper=WARN 11 | 12 | # CONSOLE appender not used by default 13 | log4j.appender.out=org.apache.log4j.ConsoleAppender 14 | log4j.appender.out.layout=org.apache.log4j.PatternLayout 15 | #log4j.appender.out.layout.ConversionPattern=[%30.30t] %-30.30c{1} %-5p %m%n 16 | log4j.appender.out.layout.ConversionPattern=%d{HH:mm:ss.SSS} [%-12.12t] %-5p %-25.30c{1} - %m%n 17 | 18 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/resources/payloads/lorem.txt: -------------------------------------------------------------------------------- 1 | Lorem ipsum dolor sit amet, consectetur adipisicing 2 | elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut 3 | enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut 4 | aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in 5 | voluptate velit esse cillum dolore eu fugiat nulla pariatur. -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/resources/test-profiles/consumer.json: -------------------------------------------------------------------------------- 1 | { 2 | "consumers" : { 3 | "topic" : "foo", 4 | "config" : { 5 | "bootstrap.servers" : "tcp://localhost:9092", 6 | "key.deserializer" : "org.apache.kafka.common.serialization.ByteArrayDeserializer", 7 | "value.deserializer" : "org.apache.kafka.common.serialization.ByteArrayDeserializer", 8 | "enable.auto.commit" : "true", 9 | "auto.commit.interval.ms" : "1000", 10 | "auto.offset.reset" : "earliest", 11 | "group.id" : "bar" 12 | }, 13 | "instances" : [ 14 | { 15 | "messagesToReceive" : "50000", 16 | "reportReceivedEvery" : "500" 17 | } 18 | ] 19 | } 20 | } -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/resources/test-profiles/multi-consumer.json: -------------------------------------------------------------------------------- 1 | { 2 | "concurrent" : "true", 3 | "consumers" : { 4 | "topic" : "foo", 5 | "config" : { 6 | "bootstrap.servers" : "tcp://localhost:9092", 7 | "key.deserializer" : "org.apache.kafka.common.serialization.ByteArrayDeserializer", 8 | "value.deserializer" : "org.apache.kafka.common.serialization.ByteArrayDeserializer", 9 | "enable.auto.commit" : "true", 10 | "auto.commit.interval.ms" : "1000", 11 | "auto.offset.reset" : "earliest", 12 | "group.id" : "bar" 13 | }, 14 | "instances" : [ 15 | { 16 | "messagesToReceive" : "10000", 17 | "receiveDelay" : "10" 18 | }, 19 | { 20 | "config" : { 21 | "group.id" : "baz" 22 | }, 23 | "messagesToReceive" : "10000", 24 | "receiveDelay" : "10" 25 | } 26 | ] 27 | } 28 | } -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/resources/test-profiles/multi-producer.json: -------------------------------------------------------------------------------- 1 | { 2 | "config" : { 3 | "bootstrap.servers" : "tcp://localhost:9092" 4 | }, 5 | "autogenerateTopic" : "true", 6 | "producers" : { 7 | "config" : { 8 | "request.required.acks": "ackFromLeader", 9 | "producer.type": "sync", 10 | "key.serializer": "org.apache.kafka.common.serialization.ByteArraySerializer", 11 | "value.serializer": "org.apache.kafka.common.serialization.ByteArraySerializer", 12 | "batch.size": "0", 13 | "request.timeout.ms" : "10000" 14 | }, 15 | "instances" : [ 16 | { 17 | "messagesToSend" : "1000", 18 | "sendBlocking": "false", 19 | "messageSize" : "1000" 20 | }, 21 | { 22 | "messagesToSend" : "1000", 23 | "sendBlocking": "false", 24 | "messageSize" : "1000" 25 | } 26 | ] 27 | } 28 | } -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/resources/test-profiles/producer-allocation-strategy.json: -------------------------------------------------------------------------------- 1 | { 2 | "concurrent" : "false", 3 | "producers" : { 4 | "config" : { 5 | "bootstrap.servers" : "tcp://localhost:9092", 6 | "request.timeout.ms" : "10000", 7 | "key.serializer" : "org.apache.kafka.common.serialization.ByteArraySerializer", 8 | "value.serializer" : "org.apache.kafka.common.serialization.ByteArraySerializer", 9 | "batch.size" : "0", 10 | "acks": "1", 11 | "max.block.ms": "10000" 12 | }, 13 | "instances" : [ 14 | { 15 | "keyAllocationStrategy" : { 16 | "type" : "random", 17 | "uniqueKeys" : "4" 18 | }, 19 | "topic" : "foo", 20 | "messagesToSend" : "2000", 21 | "sendBlocking": "false", 22 | "messageSize" : "1000" 23 | } 24 | ] 25 | } 26 | } -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/resources/test-profiles/producer-allocation-strategy.yml: -------------------------------------------------------------------------------- 1 | producers: 2 | config: 3 | bootstrap.servers: "tcp://localhost:9092" 4 | request.timeout.ms: "10000" 5 | key.serializer: "org.apache.kafka.common.serialization.ByteArraySerializer" 6 | value.serializer: "org.apache.kafka.common.serialization.ByteArraySerializer" 7 | batch.size: "0" 8 | acks: "1" 9 | max.block.ms: "10000" 10 | topic: null 11 | instances: 12 | - topic: "foo" 13 | messagesToSend: 20 14 | messageSize: 1000 15 | keyAllocationStrategy: 16 | type: "random" 17 | uniqueKeys: 4 18 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/resources/test-profiles/producer-cascade.json: -------------------------------------------------------------------------------- 1 | { 2 | "config": { 3 | "bootstrap.servers": "tcp://localhost:9092" 4 | }, 5 | "autogenerateTopic": "true", 6 | "producers": { 7 | "config": { 8 | "request.timeout.ms": "10000", 9 | "key.serializer": "org.apache.kafka.common.serialization.ByteArraySerializer", 10 | "value.serializer": "org.apache.kafka.common.serialization.ByteArraySerializer", 11 | "batch.size": "0", 12 | "acks": "1", 13 | "max.block.ms": "10000" 14 | }, 15 | "instances": [ 16 | { 17 | "messagesToSend": "1000", 18 | "sendBlocking": "false", 19 | "messageSize": "1000" 20 | } 21 | ] 22 | } 23 | } -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/resources/test-profiles/producer-consumer-cascade.json: -------------------------------------------------------------------------------- 1 | { 2 | "config" : { 3 | "bootstrap.servers" : "tcp://localhost:9092" 4 | }, 5 | "producers" : { 6 | "config" : { 7 | "request.required.acks": "ackFromLeader", 8 | "producer.type": "sync", 9 | "key.serializer": "org.apache.kafka.common.serialization.ByteArraySerializer", 10 | "value.serializer": "org.apache.kafka.common.serialization.ByteArraySerializer", 11 | "batch.size": "0", 12 | "timeout.ms" : "10000" 13 | }, 14 | "instances" : [ 15 | { 16 | "config" : { 17 | "timeout.ms" : "5000" 18 | }, 19 | "topic" : "foo", 20 | "messagesToSend" : "1000", 21 | "sendBlocking": "false", 22 | "messageSize" : "100000" 23 | }, 24 | { 25 | "topic" : "bar", 26 | "messagesToSend" : "200", 27 | "sendBlocking": "true", 28 | "messageSize" : "10000" 29 | } 30 | ] 31 | }, 32 | "consumers" : { 33 | "config" : { 34 | "key.deserializer" : "org.apache.kafka.common.serialization.ByteArrayDeserializer", 35 | "value.deserializer" : "org.apache.kafka.common.serialization.ByteArrayDeserializer", 36 | "enable.auto.commit" : "true", 37 | "auto.commit.interval.ms" : "1000", 38 | "auto.offset.reset" : "earliest" 39 | }, 40 | "instances" : [ 41 | { 42 | "config" : { 43 | "timeout.ms" : "5000", 44 | "groupId" : "foo1" 45 | 46 | }, 47 | "topic" : "foo" 48 | }, 49 | { 50 | "config" : { 51 | "timeout.ms" : "5000", 52 | "groupId" : "bar2" 53 | 54 | }, 55 | "topic" : "bar" 56 | } 57 | ] 58 | } 59 | } -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/resources/test-profiles/producer-consumer-loopback.json: -------------------------------------------------------------------------------- 1 | { 2 | "config" : { 3 | "bootstrap.servers" : "tcp://localhost:9092" 4 | }, 5 | "autogenerateTopic" : "true", 6 | "producers" : { 7 | "config" : { 8 | "request.timeout.ms" : "10000", 9 | "key.serializer" : "org.apache.kafka.common.serialization.ByteArraySerializer", 10 | "value.serializer" : "org.apache.kafka.common.serialization.ByteArraySerializer", 11 | "batch.size" : "0", 12 | "acks": "1", 13 | "max.block.ms": "10000" 14 | }, 15 | "instances" : [ 16 | { 17 | "messagesToSend" : "1000", 18 | "sendBlocking": "false", 19 | "messageSize" : "1000" 20 | } 21 | ] 22 | }, 23 | "consumers" : { 24 | "config" : { 25 | "key.deserializer" : "org.apache.kafka.common.serialization.ByteArrayDeserializer", 26 | "value.deserializer" : "org.apache.kafka.common.serialization.ByteArrayDeserializer", 27 | "enable.auto.commit" : "true", 28 | "auto.commit.interval.ms" : "1000", 29 | "auto.offset.reset" : "earliest" 30 | }, 31 | "instances" : [ 32 | { 33 | "config" : { 34 | "group.id" : "bar" 35 | }, 36 | "messagesToReceive" : "1000" 37 | } 38 | ] 39 | } 40 | } -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/resources/test-profiles/producer-consumer-loopback.yml: -------------------------------------------------------------------------------- 1 | # see TestProfileDefinition for all properties 2 | config: 3 | # Kafka config; applies to producers and consumers 4 | bootstrap.servers: "tcp://localhost:9092" 5 | maxDuration: 30 6 | concurrent: true 7 | autogenerateTopic: true 8 | producers: 9 | config: 10 | # Kafka config; applies to all producers 11 | request.timeout.ms: "10000" 12 | key.serializer: "org.apache.kafka.common.serialization.ByteArraySerializer" 13 | value.serializer: "org.apache.kafka.common.serialization.ByteArraySerializer" 14 | batch.size: "0" 15 | acks: "1" 16 | max.block.ms: "10000" 17 | instances: 18 | # see ProducerDefinition for all properties 19 | - config: 20 | # Kafka config; just for this producer 21 | timeout.ms: 5000 22 | messagesToSend: 1000 23 | messageSize: 1000 24 | sendBlocking: false 25 | consumers: 26 | config: 27 | # Kafka config; applies to all consumers 28 | key.deserializer: "org.apache.kafka.common.serialization.ByteArrayDeserializer" 29 | value.deserializer: "org.apache.kafka.common.serialization.ByteArrayDeserializer" 30 | enable.auto.commit: "true" 31 | auto.commit.interval.ms: "1000" 32 | auto.offset.reset: "earliest" 33 | instances: 34 | # see ConsumerDefinition for all properties 35 | - config: 36 | # Kafka config; just for this consumer 37 | group.id: "bar" 38 | messagesToReceive: 1000 39 | pollTimeout: 1000 40 | reportReceivedEvery: 1000 41 | receiveDelay: 0 42 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/resources/test-profiles/producer-message-location.yml: -------------------------------------------------------------------------------- 1 | --- 2 | config: {} # applies to all producers and consumers 3 | producers: 4 | config: # applies to all producers 5 | bootstrap.servers: "tcp://localhost:9092" 6 | request.timeout.ms: "10000" 7 | key.serializer: "org.apache.kafka.common.serialization.ByteArraySerializer" 8 | value.serializer: "org.apache.kafka.common.serialization.ByteArraySerializer" 9 | batch.size: "0" 10 | acks: "1" 11 | max.block.ms: "10000" 12 | instances: 13 | - topic: "lorem" 14 | messagesToSend: 1000 15 | messageLocation: "src/test/resources/payloads/lorem.txt" 16 | -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/resources/test-profiles/producer-round-robin.yml: -------------------------------------------------------------------------------- 1 | --- 2 | config: {} # applies to all producers and consumers 3 | producers: 4 | config: # applies to all producers 5 | bootstrap.servers: "tcp://localhost:9092" 6 | request.timeout.ms: "10000" 7 | key.serializer: "org.apache.kafka.common.serialization.ByteArraySerializer" 8 | value.serializer: "org.apache.kafka.common.serialization.ByteArraySerializer" 9 | batch.size: "0" 10 | acks: "1" 11 | max.block.ms: "10000" 12 | instances: 13 | - config: {} # applies to this producer 14 | topic: "foo1" 15 | messagesToSend: 2000 16 | messageSize: 1000 17 | sendBlocking: false 18 | sendDelay: 1000 19 | partitioningStrategy: "roundRobin" -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/resources/test-profiles/producer-sticky.yml: -------------------------------------------------------------------------------- 1 | --- 2 | config: {} # applies to all producers and consumers 3 | producers: 4 | config: # applies to all producers 5 | bootstrap.servers: "tcp://localhost:9092" 6 | request.timeout.ms: "10000" 7 | key.serializer: "org.apache.kafka.common.serialization.ByteArraySerializer" 8 | value.serializer: "org.apache.kafka.common.serialization.ByteArraySerializer" 9 | batch.size: "0" 10 | acks: "1" 11 | max.block.ms: "10000" 12 | instances: 13 | - config: {} # applies to this producer 14 | topic: "foo" 15 | messagesToSend: 2000 16 | messageSize: 1000 17 | sendBlocking: false 18 | partitioningStrategy: "sticky" -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/resources/test-profiles/producer.json: -------------------------------------------------------------------------------- 1 | { 2 | "concurrent" : "false", 3 | "producers" : { 4 | "config" : { 5 | "bootstrap.servers" : "tcp://localhost:9092", 6 | "request.timeout.ms" : "10000", 7 | "key.serializer" : "org.apache.kafka.common.serialization.ByteArraySerializer", 8 | "value.serializer" : "org.apache.kafka.common.serialization.ByteArraySerializer", 9 | "batch.size" : "0", 10 | "acks": "1", 11 | "max.block.ms": "10000" 12 | }, 13 | "instances" : [ 14 | { 15 | "topic": "foo", 16 | "messagesToSend": "2000", 17 | "sendBlocking": "false", 18 | "messageSize": "1000", 19 | "keyAllocationStrategy": { 20 | "type": "fair", 21 | "uniqueKeys": "4" 22 | } 23 | } 24 | ] 25 | } 26 | } -------------------------------------------------------------------------------- /kafka/kafka-perf-tool/src/test/resources/test-profiles/producer.yml: -------------------------------------------------------------------------------- 1 | --- 2 | config: {} # applies to all producers and consumers 3 | producers: 4 | config: # applies to all producers 5 | bootstrap.servers: "tcp://localhost:9092" 6 | request.timeout.ms: "10000" 7 | key.serializer: "org.apache.kafka.common.serialization.ByteArraySerializer" 8 | value.serializer: "org.apache.kafka.common.serialization.ByteArraySerializer" 9 | batch.size: "0" 10 | acks: "1" 11 | max.block.ms: "10000" 12 | instances: 13 | - config: {} # applies to this producer 14 | topic: "foo" 15 | messagesToSend: 2000 16 | messageSize: 1000 17 | sendBlocking: false -------------------------------------------------------------------------------- /kafka/kafka-test-dsl/README.md: -------------------------------------------------------------------------------- 1 | A fluent DSL for embedding Kafka servers in JUnit tests. 2 | 3 | Before each JUnit ``@Test`` method the library will spin up a Kafka instance on a dynamically-determined port, 4 | with storage in a dynamically created directory, located within ``System.getProperty("java.tmpdir")``. At the end of 5 | each test, the instance will be shut down and the directory deleted. 6 | 7 | The ``EmbeddedKafkaBroker`` class provides a fluent builder API, accessible via the static ``builder()`` method. This 8 | provides the following properties: 9 | 10 | * ``brokerId`` 11 | * ``hostname`` 12 | * ``port`` 13 | * ``logFlushIntervalMessages`` 14 | * ``zookeeperConnect`` 15 | * ``numPartitions`` - default partition count for all topics 16 | 17 | Sample usage: 18 | 19 | ```java 20 | 21 | public class EmbeddedKafkaBrokerTest { 22 | 23 | @Rule 24 | public EmbeddedZooKeeper zooKeeper = new EmbeddedZooKeeper(); 25 | 26 | @Rule 27 | public EmbeddedKafkaBroker broker = EmbeddedKafkaBroker.builder() 28 | .zookeeperConnect("127.0.0.1:" + zooKeeper.getPort()) 29 | .build(); 30 | 31 | @Test 32 | public void testLifecycle() { 33 | Assert.assertTrue(broker.getPort() > 0); 34 | } 35 | } 36 | 37 | ``` 38 | 39 | It is also possible to predefine topics, specifying different partition counts for them, through the ``topic()`` element: 40 | 41 | ```java 42 | 43 | @Rule 44 | public EmbeddedKafkaBroker broker = EmbeddedKafkaBroker.builder() 45 | .zookeeperConnect("127.0.0.1:" + zooKeeper.getPort()) 46 | .topic("goat") 47 | .partitions(1) 48 | .replicationFactor(1) 49 | .property("flush.messages", "1") 50 | .end() 51 | .topic("cheese") 52 | .partitions(3) 53 | .end() 54 | .build(); 55 | 56 | ``` 57 | -------------------------------------------------------------------------------- /kafka/kafka-test-dsl/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | com.ameliant.tools 7 | kafka-parent 8 | 0.1.0-SNAPSHOT 9 | ../kafka-parent/pom.xml 10 | 11 | 4.0.0 12 | 13 | kafka-test-dsl 14 | ${base-name} :: ${project.artifactId} 15 | 16 | 17 | 18 | org.apache.kafka 19 | kafka_2.10 20 | 21 | 22 | org.apache.kafka 23 | kafka-clients 24 | 25 | 26 | commons-lang 27 | commons-lang 28 | 29 | 30 | commons-io 31 | commons-io 32 | 33 | 34 | junit 35 | junit 36 | 37 | 38 | com.ameliant.tools 39 | tools-support 40 | 0.1.0-SNAPSHOT 41 | 42 | 43 | com.ameliant.tools 44 | zookeeper-test-dsl 45 | 0.1.0-SNAPSHOT 46 | test 47 | 48 | 49 | -------------------------------------------------------------------------------- /kafka/kafka-test-dsl/src/main/java/com/ameliant/tools/kafka/testdsl/BrokerBuilder.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.testdsl; 2 | 3 | import kafka.server.KafkaConfig; 4 | import org.apache.commons.lang.Validate; 5 | 6 | import java.util.Properties; 7 | 8 | /** 9 | * @author jkorab 10 | */ 11 | public class BrokerBuilder { 12 | 13 | final BrokerDefinition brokerDefinition; 14 | 15 | BrokerBuilder() { 16 | this.brokerDefinition = new BrokerDefinition(); 17 | } 18 | 19 | public BrokerBuilder brokerId(int brokerId) { 20 | brokerDefinition.setBrokerId(brokerId); 21 | return this; 22 | } 23 | 24 | public BrokerBuilder hostname(String hostname) { 25 | brokerDefinition.setHostname(hostname); 26 | return this; 27 | } 28 | 29 | public BrokerBuilder port(int port) { 30 | brokerDefinition.setPort(port); 31 | return this; 32 | } 33 | 34 | public BrokerBuilder logFlushIntervalMessages(long logFlushIntervalMessages) { 35 | brokerDefinition.setLogFlushIntervalMessages(logFlushIntervalMessages); 36 | return this; 37 | } 38 | 39 | public BrokerBuilder zookeeperConnect(String zookeeperConnect) { 40 | brokerDefinition.setZookeeperConnect(zookeeperConnect); 41 | return this; 42 | } 43 | 44 | public BrokerBuilder numPartitions(int numPartitions) { 45 | Validate.isTrue(numPartitions > 0, "numPartitions must be greater than 0"); 46 | brokerDefinition.setNumPartitions(numPartitions); 47 | return this; 48 | } 49 | 50 | public TopicBuilder topic(String name) { 51 | return new TopicBuilder(this, name); 52 | } 53 | 54 | public EmbeddedKafkaBroker build() { 55 | Properties props = brokerDefinition.getProperties(); 56 | props.setProperty(KafkaConfig.HostNameProp(), brokerDefinition.getHostname()); 57 | props.setProperty(KafkaConfig.PortProp(), Integer.toString(brokerDefinition.getPort())); 58 | props.setProperty(KafkaConfig.BrokerIdProp(), Integer.toString(brokerDefinition.getBrokerId())); 59 | props.setProperty(KafkaConfig.ZkConnectProp(), brokerDefinition.getZookeeperConnect()); 60 | props.setProperty(KafkaConfig.NumPartitionsProp(), Integer.toString(brokerDefinition.getNumPartitions())); 61 | 62 | Long logFlushIntervalMessages = brokerDefinition.getLogFlushIntervalMessages(); 63 | if (logFlushIntervalMessages != null) { 64 | props.setProperty(KafkaConfig.LogFlushIntervalMessagesProp(), logFlushIntervalMessages.toString()); 65 | } 66 | 67 | return new EmbeddedKafkaBroker(brokerDefinition); 68 | } 69 | 70 | } 71 | -------------------------------------------------------------------------------- /kafka/kafka-test-dsl/src/main/java/com/ameliant/tools/kafka/testdsl/BrokerDefinition.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.testdsl; 2 | 3 | import com.ameliant.tools.support.AvailablePortFinder; 4 | 5 | import java.util.ArrayList; 6 | import java.util.List; 7 | import java.util.Properties; 8 | 9 | /** 10 | * @author jkorab 11 | */ 12 | class BrokerDefinition { 13 | 14 | private int brokerId = 1; 15 | private String hostname = "localhost"; 16 | private String zookeeperConnect; 17 | private int port = AvailablePortFinder.getNextAvailable(); 18 | 19 | private int numPartitions = 1; 20 | private Long logFlushIntervalMessages; 21 | private Properties properties = new Properties(); 22 | 23 | private List topicDefinitions = new ArrayList<>(); 24 | 25 | public int getBrokerId() { 26 | return brokerId; 27 | } 28 | 29 | public void setBrokerId(int brokerId) { 30 | this.brokerId = brokerId; 31 | } 32 | 33 | public List getTopicDefinitions() { 34 | return topicDefinitions; 35 | } 36 | 37 | public void setTopicDefinitions(List topicDefinitions) { 38 | this.topicDefinitions = topicDefinitions; 39 | } 40 | 41 | public String getHostname() { 42 | return hostname; 43 | } 44 | 45 | public void setHostname(String hostname) { 46 | this.hostname = hostname; 47 | } 48 | 49 | public String getZookeeperConnect() { 50 | return zookeeperConnect; 51 | } 52 | 53 | public void setZookeeperConnect(String zookeeperConnect) { 54 | this.zookeeperConnect = zookeeperConnect; 55 | } 56 | 57 | public int getPort() { 58 | return port; 59 | } 60 | 61 | public void setPort(int port) { 62 | this.port = port; 63 | } 64 | 65 | public int getNumPartitions() { 66 | return numPartitions; 67 | } 68 | 69 | public void setNumPartitions(int numPartitions) { 70 | this.numPartitions = numPartitions; 71 | } 72 | 73 | public Long getLogFlushIntervalMessages() { 74 | return logFlushIntervalMessages; 75 | } 76 | 77 | public void setLogFlushIntervalMessages(Long logFlushIntervalMessages) { 78 | this.logFlushIntervalMessages = logFlushIntervalMessages; 79 | } 80 | 81 | public Properties getProperties() { 82 | return properties; 83 | } 84 | 85 | public void setProperties(Properties properties) { 86 | this.properties = properties; 87 | } 88 | } 89 | 90 | -------------------------------------------------------------------------------- /kafka/kafka-test-dsl/src/main/java/com/ameliant/tools/kafka/testdsl/EmbeddedKafkaBroker.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.testdsl; 2 | 3 | import static com.ameliant.tools.support.DirectoryUtils.*; 4 | 5 | import kafka.admin.AdminUtils; 6 | import kafka.server.KafkaConfig; 7 | import kafka.server.KafkaServer; 8 | import kafka.utils.SystemTime$; 9 | import kafka.utils.ZkUtils; 10 | import org.apache.commons.io.FileUtils; 11 | import org.apache.kafka.common.security.JaasUtils; 12 | import org.junit.rules.ExternalResource; 13 | import org.slf4j.Logger; 14 | import org.slf4j.LoggerFactory; 15 | import scala.Some$; 16 | 17 | import java.io.File; 18 | import java.io.IOException; 19 | import java.util.List; 20 | import java.util.Properties; 21 | 22 | /** 23 | * @author jkorab 24 | */ 25 | public class EmbeddedKafkaBroker extends ExternalResource { 26 | private Logger log = LoggerFactory.getLogger(this.getClass()); 27 | 28 | private final BrokerDefinition brokerDefinition; 29 | private KafkaServer kafkaServer; 30 | private File logDirectory; 31 | 32 | public static BrokerBuilder builder() { 33 | return new BrokerBuilder(); 34 | } 35 | 36 | EmbeddedKafkaBroker(BrokerDefinition brokerDefinition) { 37 | this.brokerDefinition = brokerDefinition; 38 | } 39 | 40 | public int getPort() { 41 | return brokerDefinition.getPort(); 42 | } 43 | 44 | public String getConnectionString() { 45 | return "127.0.0.1:" + getPort(); 46 | } 47 | 48 | @Override 49 | protected void before() throws Throwable { 50 | logDirectory = tempDir(perTest("kafka-log")); 51 | Properties properties = brokerDefinition.getProperties(); 52 | properties.setProperty(KafkaConfig.LogDirProp(), logDirectory.getCanonicalPath()); 53 | kafkaServer = new KafkaServer(new KafkaConfig(properties), 54 | SystemTime$.MODULE$, Some$.MODULE$.apply("kafkaServer")); 55 | kafkaServer.startup(); 56 | 57 | List topicDefinitions = brokerDefinition.getTopicDefinitions(); 58 | if (!topicDefinitions.isEmpty()) { 59 | ZkUtils zkUtils = ZkUtils.apply(brokerDefinition.getZookeeperConnect(), 30000, 30000, 60 | JaasUtils.isZkSecurityEnabled()); 61 | for (TopicDefinition topicDefinition : topicDefinitions) { 62 | String name = topicDefinition.getName(); 63 | log.info("Creating topic {}", name); 64 | AdminUtils.createTopic(zkUtils, 65 | name, 66 | topicDefinition.getPartitions(), 67 | topicDefinition.getReplicationFactor(), 68 | topicDefinition.getProperties()); 69 | } 70 | } 71 | } 72 | 73 | @Override 74 | protected void after() { 75 | kafkaServer.shutdown(); 76 | kafkaServer.awaitShutdown(); 77 | 78 | try { 79 | log.info("Deleting {}", logDirectory); 80 | FileUtils.deleteDirectory(logDirectory); 81 | } catch (IOException e) { 82 | throw new RuntimeException(e); 83 | } 84 | kafkaServer.awaitShutdown(); 85 | } 86 | 87 | } 88 | -------------------------------------------------------------------------------- /kafka/kafka-test-dsl/src/main/java/com/ameliant/tools/kafka/testdsl/TopicBuilder.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.testdsl; 2 | 3 | /** 4 | * @author jkorab 5 | */ 6 | public class TopicBuilder { 7 | private BrokerBuilder parent; 8 | private TopicDefinition topicDefinition; 9 | 10 | TopicBuilder(BrokerBuilder parent, String name) { 11 | this.parent = parent; 12 | this.topicDefinition = new TopicDefinition(); 13 | topicDefinition.setName(name); 14 | } 15 | 16 | public TopicBuilder replicationFactor(int replicationFactor) { 17 | topicDefinition.setReplicationFactor(replicationFactor); 18 | return this; 19 | } 20 | 21 | public TopicBuilder partitions(int partitions) { 22 | topicDefinition.setPartitions(partitions); 23 | return this; 24 | } 25 | 26 | public TopicBuilder property(Object key, Object value) { 27 | topicDefinition.getProperties().put(key, value); 28 | return this; 29 | } 30 | 31 | public BrokerBuilder end() { 32 | parent.brokerDefinition.getTopicDefinitions().add(topicDefinition); 33 | return parent; 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /kafka/kafka-test-dsl/src/main/java/com/ameliant/tools/kafka/testdsl/TopicDefinition.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.testdsl; 2 | 3 | import java.util.Properties; 4 | 5 | /** 6 | * @author jkorab 7 | */ 8 | class TopicDefinition { 9 | private String name; 10 | private int partitions = 1; 11 | private int replicationFactor = 1; 12 | private Properties properties = new Properties(); 13 | 14 | public String getName() { 15 | return name; 16 | } 17 | 18 | public void setName(String name) { 19 | this.name = name; 20 | } 21 | 22 | public int getPartitions() { 23 | return partitions; 24 | } 25 | 26 | public void setPartitions(int partitions) { 27 | this.partitions = partitions; 28 | } 29 | 30 | public int getReplicationFactor() { 31 | return replicationFactor; 32 | } 33 | 34 | public void setReplicationFactor(int replicationFactor) { 35 | this.replicationFactor = replicationFactor; 36 | } 37 | 38 | public Properties getProperties() { 39 | return properties; 40 | } 41 | 42 | } 43 | -------------------------------------------------------------------------------- /kafka/kafka-test-dsl/src/main/java/com/ameliant/tools/kafka/testdsl/config/ConsumerConfigsBuilder.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.testdsl.config; 2 | 3 | import org.apache.commons.lang.Validate; 4 | import org.apache.kafka.clients.consumer.ConsumerConfig; 5 | import org.apache.kafka.clients.consumer.internals.PartitionAssignor; 6 | import org.apache.kafka.common.serialization.Deserializer; 7 | 8 | import java.util.HashMap; 9 | import java.util.Map; 10 | 11 | /** 12 | * Utility class for building up Kafka consumer config maps. 13 | * @author jkorab 14 | */ 15 | public class ConsumerConfigsBuilder { 16 | 17 | private final Map consumerConfigs; 18 | 19 | public ConsumerConfigsBuilder() { 20 | consumerConfigs = new HashMap<>(); 21 | } 22 | 23 | // Copy constructor 24 | private ConsumerConfigsBuilder(ConsumerConfigsBuilder builder, String key, Object value) { 25 | consumerConfigs = new HashMap<>(); 26 | consumerConfigs.putAll(builder.consumerConfigs); 27 | consumerConfigs.put(key, value); 28 | } 29 | 30 | public ConsumerConfigsBuilder bootstrapServers(String bootstrapServersConfig) { 31 | return new ConsumerConfigsBuilder(this, ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServersConfig); 32 | } 33 | 34 | public ConsumerConfigsBuilder groupId(String groupId) { 35 | return new ConsumerConfigsBuilder(this, ConsumerConfig.GROUP_ID_CONFIG, groupId); 36 | } 37 | 38 | public ConsumerConfigsBuilder autoCommitIntervalMs(long autoCommitIntervalMs) { 39 | return new ConsumerConfigsBuilder(this, ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, Long.toString(autoCommitIntervalMs)); 40 | } 41 | 42 | public ConsumerConfigsBuilder enableAutoCommit(boolean enableAutoCommit) { 43 | return new ConsumerConfigsBuilder(this, ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, Boolean.toString(enableAutoCommit)); 44 | } 45 | 46 | public ConsumerConfigsBuilder sessionTimeoutMs(long sessionTimeoutMs) { 47 | return new ConsumerConfigsBuilder(this, ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, Long.toString(sessionTimeoutMs)); 48 | } 49 | 50 | public Map build() { 51 | return consumerConfigs; 52 | } 53 | 54 | public ConsumerConfigsBuilder keyDeserializer(Class keyDeserializerClass) { 55 | Validate.notNull(keyDeserializerClass, "keyDeserializerClass is null"); 56 | return new ConsumerConfigsBuilder(this, ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass.getCanonicalName()); 57 | } 58 | 59 | public ConsumerConfigsBuilder valueDeserializer(Class valueDeserializerClass) { 60 | Validate.notNull(valueDeserializerClass, "valueDeserializerClass is null"); 61 | return new ConsumerConfigsBuilder(this, ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass.getCanonicalName()); 62 | } 63 | 64 | public ConsumerConfigsBuilder partitionAssignmentStrategy(Class partitionAssignmentStrategy) { 65 | return new ConsumerConfigsBuilder(this, ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, partitionAssignmentStrategy.getCanonicalName()); 66 | } 67 | 68 | public enum OffsetReset { 69 | earliest, latest, none; 70 | } 71 | 72 | public ConsumerConfigsBuilder autoOffsetReset(OffsetReset offsetReset) { 73 | return new ConsumerConfigsBuilder(this, ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, offsetReset.toString()); 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /kafka/kafka-test-dsl/src/main/java/com/ameliant/tools/kafka/testdsl/config/ProducerConfigsBuilder.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.testdsl.config; 2 | 3 | 4 | import org.apache.kafka.clients.producer.ProducerConfig; 5 | 6 | import java.util.HashMap; 7 | import java.util.Map; 8 | 9 | /** 10 | * Utility class for building up Kafka producer config maps. 11 | * @author jkorab 12 | */ 13 | public class ProducerConfigsBuilder { 14 | 15 | private final Map producerConfigs; 16 | 17 | public ProducerConfigsBuilder() { 18 | producerConfigs = new HashMap<>(); 19 | } 20 | 21 | // Copy constructor 22 | private ProducerConfigsBuilder(ProducerConfigsBuilder builder, String key, Object value) { 23 | producerConfigs = new HashMap<>(); 24 | producerConfigs.putAll(builder.producerConfigs); 25 | producerConfigs.put(key, value); 26 | } 27 | 28 | public ProducerConfigsBuilder bootstrapServers(String bootstrapServers) { 29 | return new ProducerConfigsBuilder(this, ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); 30 | } 31 | 32 | public enum RequestRequiredAcks { 33 | noAck(0), 34 | ackFromLeader(1), 35 | ackFromInSyncReplicas(-1); 36 | 37 | private int flag; 38 | 39 | RequestRequiredAcks(int flag) { 40 | this.flag = flag; 41 | } 42 | 43 | public int getFlag() { 44 | return flag; 45 | } 46 | } 47 | 48 | public ProducerConfigsBuilder requestRequiredAcks(RequestRequiredAcks requestRequiredAcks) { 49 | // TODO check if still valid 50 | return new ProducerConfigsBuilder(this, "request.required.acks", requestRequiredAcks.getFlag()); 51 | } 52 | 53 | public enum ProducerType { 54 | sync, async; 55 | } 56 | 57 | public ProducerConfigsBuilder producerType(ProducerType producerType) { 58 | // TODO check if still valid 59 | return new ProducerConfigsBuilder(this, "producer.type", producerType.toString()); 60 | } 61 | 62 | public ProducerConfigsBuilder valueSerializer(Class serializerClass) { 63 | return new ProducerConfigsBuilder(this, ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, serializerClass.getCanonicalName()); 64 | } 65 | 66 | public ProducerConfigsBuilder keySerializer(Class serializerClass) { 67 | return new ProducerConfigsBuilder(this, ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, serializerClass.getCanonicalName()); 68 | } 69 | 70 | public ProducerConfigsBuilder batchSize(int batchSize) { 71 | return new ProducerConfigsBuilder(this, ProducerConfig.BATCH_SIZE_CONFIG, Integer.toString(batchSize)); 72 | } 73 | 74 | public Map build() { 75 | return producerConfigs; 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /kafka/kafka-test-dsl/src/test/java/com/ameliant/tools/kafka/testdsl/EmbeddedKafkaBrokerTest.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.testdsl; 2 | 3 | import com.ameliant.tools.zookeeper.testdsl.EmbeddedZooKeeper; 4 | import org.junit.Assert; 5 | import org.junit.Rule; 6 | import org.junit.Test; 7 | 8 | /** 9 | * @author jkorab 10 | */ 11 | public class EmbeddedKafkaBrokerTest { 12 | 13 | @Rule 14 | public EmbeddedZooKeeper zooKeeper = new EmbeddedZooKeeper(); 15 | 16 | @Rule 17 | public EmbeddedKafkaBroker broker = EmbeddedKafkaBroker.builder() 18 | .zookeeperConnect("127.0.0.1:" + zooKeeper.getPort()) 19 | .build(); 20 | 21 | @Test 22 | public void testLifecycle() { 23 | Assert.assertTrue(broker.getPort() > 0); 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /kafka/kafka-test-dsl/src/test/java/com/ameliant/tools/kafka/testdsl/EmbeddedKafkaBrokerWithTopicsTest.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.kafka.testdsl; 2 | 3 | import com.ameliant.tools.zookeeper.testdsl.EmbeddedZooKeeper; 4 | import org.junit.Assert; 5 | import org.junit.Rule; 6 | import org.junit.Test; 7 | 8 | /** 9 | * @author jkorab 10 | */ 11 | public class EmbeddedKafkaBrokerWithTopicsTest { 12 | 13 | @Rule 14 | public EmbeddedZooKeeper zooKeeper = new EmbeddedZooKeeper(); 15 | 16 | @Rule 17 | public EmbeddedKafkaBroker broker = EmbeddedKafkaBroker.builder() 18 | .zookeeperConnect("127.0.0.1:" + zooKeeper.getPort()) 19 | .topic("goat") 20 | .partitions(1) 21 | .replicationFactor(1) 22 | .property("flush.messages", "1") 23 | .end() 24 | .topic("cheese") 25 | .partitions(3) 26 | .end() 27 | .build(); 28 | 29 | @Test 30 | public void testLifecycle() { 31 | Assert.assertTrue(true); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /kafka/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | tools 7 | com.ameliant.tools 8 | 0.1.0-SNAPSHOT 9 | 10 | 4.0.0 11 | 12 | kafka 13 | ${base-name} :: ${project.artifactId} 14 | pom 15 | 16 | 17 | kafka-parent 18 | kafka-test-dsl 19 | kafka-perf-tool 20 | kafka-listener 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /parent/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | 4.0.0 7 | com.ameliant.tools 8 | parent 9 | 0.1.0-SNAPSHOT 10 | 11 | ${base-name} :: ${project.artifactId} 12 | pom 13 | 14 | 15 | Ameliant Tools 16 | 17 | UTF-8 18 | 0.9.0.0 19 | 2.6 20 | 1.7.12 21 | 4.12 22 | 1.3 23 | 1.3.2 24 | 1.3.1 25 | 2.4.3 26 | 27 | 3.4.6 28 | 1.2.16 29 | 0.9.7 30 | 31 | 3.1 32 | 1.1 33 | 2.4.2 34 | 35 | 36 | 37 | 38 | 39 | commons-lang 40 | commons-lang 41 | ${commons-lang-version} 42 | 43 | 44 | 45 | junit 46 | junit 47 | ${junit-version} 48 | 49 | 50 | org.hamcrest 51 | hamcrest-all 52 | ${hamcrest-version} 53 | 54 | 55 | org.apache.commons 56 | commons-io 57 | ${commons-io-version} 58 | 59 | 60 | org.slf4j 61 | slf4j-api 62 | ${slf4j-version} 63 | 64 | 65 | org.slf4j 66 | slf4j-log4j12 67 | ${slf4j-version} 68 | 69 | 70 | log4j 71 | log4j 72 | ${log4j-version} 73 | 74 | 75 | 76 | org.apache.zookeeper 77 | zookeeper 78 | ${zookeeper-version} 79 | 80 | 81 | 82 | 83 | 84 | 85 | org.slf4j 86 | slf4j-api 87 | 88 | 89 | org.slf4j 90 | slf4j-log4j12 91 | test 92 | 93 | 94 | log4j 95 | log4j 96 | test 97 | 98 | 99 | 100 | 101 | install 102 | 103 | 104 | 105 | org.apache.maven.plugins 106 | maven-compiler-plugin 107 | ${maven-compiler-plugin-version} 108 | 109 | 1.8 110 | 1.8 111 | true 112 | 113 | 114 | 115 | 116 | org.apache.maven.plugins 117 | maven-enforcer-plugin 118 | ${maven-enforcer-plugin-version} 119 | 120 | 121 | enforce 122 | 123 | 124 | 125 | 126 | 127 | 128 | enforce 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 3 | 4.0.0 4 | 5 | com.ameliant.tools 6 | tools 7 | 0.1.0-SNAPSHOT 8 | pom 9 | 10 | ${base-name} 11 | 12 | 13 | Ameliant Tools 14 | 15 | 16 | 17 | parent 18 | tools-support 19 | zookeeper 20 | kafka 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /tools-support/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | com.ameliant.tools 7 | parent 8 | 0.1.0-SNAPSHOT 9 | ../parent 10 | 11 | 4.0.0 12 | 13 | tools-support 14 | ${base-name} :: ${project.artifactId} 15 | 16 | 17 | 18 | commons-lang 19 | commons-lang 20 | 21 | 22 | org.slf4j 23 | slf4j-api 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /tools-support/src/main/java/com/ameliant/tools/support/AvailablePortFinder.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.support; 2 | 3 | /** 4 | * Licensed to the Apache Software Foundation (ASF) under one or more 5 | * contributor license agreements. See the NOTICE file distributed with 6 | * this work for additional information regarding copyright ownership. 7 | * The ASF licenses this file to You under the Apache License, Version 2.0 8 | * (the "License"); you may not use this file except in compliance with 9 | * the License. You may obtain a copy of the License at 10 | * 11 | * http://www.apache.org/licenses/LICENSE-2.0 12 | * 13 | * Unless required by applicable law or agreed to in writing, software 14 | * distributed under the License is distributed on an "AS IS" BASIS, 15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | * See the License for the specific language governing permissions and 17 | * limitations under the License. 18 | */ 19 | 20 | import org.slf4j.Logger; 21 | import org.slf4j.LoggerFactory; 22 | 23 | import java.io.IOException; 24 | import java.net.DatagramSocket; 25 | import java.net.ServerSocket; 26 | import java.util.NoSuchElementException; 27 | import java.util.concurrent.atomic.AtomicInteger; 28 | 29 | /** 30 | * Finds currently available server ports. 31 | * Repackaged from camel-test. 32 | */ 33 | public final class AvailablePortFinder { 34 | 35 | /** 36 | * The minimum server currentMinPort number for IPv4. 37 | * Set at 1100 to avoid returning privileged currentMinPort numbers. 38 | */ 39 | public static final int MIN_PORT_NUMBER = 1100; 40 | 41 | /** 42 | * The maximum server currentMinPort number for IPv4. 43 | */ 44 | public static final int MAX_PORT_NUMBER = 65535; 45 | 46 | private static final Logger LOG = LoggerFactory.getLogger(AvailablePortFinder.class); 47 | 48 | /** 49 | * We'll hold open the lowest port in this process 50 | * so parallel processes won't use the same block 51 | * of ports. They'll go up to the next block. 52 | */ 53 | private static final ServerSocket LOCK; 54 | 55 | /** 56 | * Incremented to the next lowest available port when getNextAvailable() is called. 57 | */ 58 | private static AtomicInteger currentMinPort = new AtomicInteger(MIN_PORT_NUMBER); 59 | 60 | /** 61 | * Creates a new instance. 62 | */ 63 | private AvailablePortFinder() { 64 | // Do nothing 65 | } 66 | 67 | static { 68 | int port = MIN_PORT_NUMBER; 69 | ServerSocket ss = null; 70 | 71 | while (ss == null) { 72 | try { 73 | ss = new ServerSocket(port); 74 | } catch (Exception e) { 75 | ss = null; 76 | port += 200; 77 | } 78 | } 79 | LOCK = ss; 80 | Runtime.getRuntime().addShutdownHook(new Thread() { 81 | public void run() { 82 | try { 83 | LOCK.close(); 84 | } catch (Exception ex) { 85 | //ignore 86 | } 87 | } 88 | }); 89 | currentMinPort.set(port + 1); 90 | } 91 | 92 | /** 93 | * Gets the next available port starting at the lowest number. This is the preferred 94 | * method to use. The port return is immediately marked in use and doesn't rely on the caller actually opening 95 | * the port. 96 | * 97 | * @return the available port 98 | * @throws IllegalArgumentException is thrown if the port number is out of range 99 | * @throws java.util.NoSuchElementException if there are no ports available 100 | */ 101 | public static synchronized int getNextAvailable() { 102 | int next = getNextAvailable(currentMinPort.get()); 103 | currentMinPort.set(next + 1); 104 | return next; 105 | } 106 | 107 | /** 108 | * Gets the next available port starting at a given from port. 109 | * 110 | * @param fromPort the from port to scan for availability 111 | * @return the available port 112 | * @throws IllegalArgumentException is thrown if the port number is out of range 113 | * @throws java.util.NoSuchElementException if there are no ports available 114 | */ 115 | public static synchronized int getNextAvailable(int fromPort) { 116 | if (fromPort < currentMinPort.get() || fromPort > MAX_PORT_NUMBER) { 117 | throw new IllegalArgumentException("From port number not in valid range: " + fromPort); 118 | } 119 | 120 | for (int i = fromPort; i <= MAX_PORT_NUMBER; i++) { 121 | if (available(i)) { 122 | LOG.info("getNextAvailable({}) -> {}", fromPort, i); 123 | return i; 124 | } 125 | } 126 | 127 | throw new NoSuchElementException("Could not find an available port above " + fromPort); 128 | } 129 | 130 | /** 131 | * Checks to see if a specific port is available. 132 | * 133 | * @param port the port number to check for availability 134 | * @return true if the port is available, or false if not 135 | * @throws IllegalArgumentException is thrown if the port number is out of range 136 | */ 137 | public static boolean available(int port) throws IllegalArgumentException { 138 | if (port < currentMinPort.get() || port > MAX_PORT_NUMBER) { 139 | throw new IllegalArgumentException("Invalid start currentMinPort: " + port); 140 | } 141 | 142 | ServerSocket ss = null; 143 | DatagramSocket ds = null; 144 | try { 145 | ss = new ServerSocket(port); 146 | ss.setReuseAddress(true); 147 | ds = new DatagramSocket(port); 148 | ds.setReuseAddress(true); 149 | return true; 150 | } catch (IOException e) { 151 | // Do nothing 152 | } finally { 153 | if (ds != null) { 154 | ds.close(); 155 | } 156 | 157 | if (ss != null) { 158 | try { 159 | ss.close(); 160 | } catch (IOException e) { 161 | /* should not be thrown */ 162 | } 163 | } 164 | } 165 | 166 | return false; 167 | } 168 | 169 | } 170 | -------------------------------------------------------------------------------- /tools-support/src/main/java/com/ameliant/tools/support/DirectoryUtils.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.support; 2 | 3 | import org.apache.commons.lang.RandomStringUtils; 4 | 5 | import java.io.File; 6 | import static java.io.File.separator; 7 | 8 | /** 9 | * @author jkorab 10 | */ 11 | public class DirectoryUtils { 12 | 13 | private static String ioTempDir = System.getProperty("java.io.tmpdir"); 14 | 15 | private static String tempDirString(String suffix) { 16 | return ioTempDir + separator + suffix; 17 | } 18 | 19 | public static String perTest(String directory) { 20 | return RandomStringUtils.randomAlphanumeric(8) + "-" + directory; 21 | } 22 | 23 | public static File tempDir(String suffix) { 24 | return new File(tempDirString(suffix)); 25 | } 26 | 27 | public static File locateDirectory(String path) { 28 | if (path == null) { 29 | return null; 30 | } 31 | File dir = new File(path); 32 | return (dir.exists() && dir.isDirectory()) ? dir 33 | : locateDirectory(path.substring(path.indexOf('/'))); 34 | } 35 | 36 | } 37 | -------------------------------------------------------------------------------- /zookeeper/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | tools 7 | com.ameliant.tools 8 | 0.1.0-SNAPSHOT 9 | 10 | 4.0.0 11 | 12 | zookeeper 13 | ${base-name} :: ${project.artifactId} 14 | pom 15 | 16 | 17 | zookeeper-test-dsl 18 | 19 | -------------------------------------------------------------------------------- /zookeeper/zookeeper-test-dsl/README.md: -------------------------------------------------------------------------------- 1 | Utility library for embedding a Zookeeper instance in a JUnit test. 2 | 3 | Before each JUnit ``@Test`` method the library will spin up a Zookeeper instance on a dynamically-determined port, 4 | with storage in a dynamically created directory, located within ``System.getProperty("java.tmpdir")``. At the end of 5 | each test, the instance will be shut down and the directory deleted. 6 | 7 | Sample usage: 8 | 9 | ```java 10 | 11 | public class MyTest { 12 | 13 | @Rule 14 | public EmbeddedZooKeeper zookeeper = new EmbeddedZooKeeper(); 15 | 16 | @Test 17 | public void testPortAssignment() { 18 | assertThat(zookeeper.getPort(), greaterThan(0)); 19 | } 20 | 21 | } 22 | 23 | ``` -------------------------------------------------------------------------------- /zookeeper/zookeeper-test-dsl/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | com.ameliant.tools 7 | parent 8 | 0.1.0-SNAPSHOT 9 | ../../parent/pom.xml 10 | 11 | 4.0.0 12 | 13 | zookeeper-test-dsl 14 | ${base-name} :: ${project.artifactId} 15 | 16 | 17 | 18 | com.ameliant.tools 19 | tools-support 20 | 0.1.0-SNAPSHOT 21 | 22 | 23 | junit 24 | junit 25 | 26 | 27 | org.apache.zookeeper 28 | zookeeper 29 | 30 | 31 | org.apache.commons 32 | commons-io 33 | 34 | 35 | org.hamcrest 36 | hamcrest-all 37 | test 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /zookeeper/zookeeper-test-dsl/src/main/java/com/ameliant/tools/zookeeper/testdsl/EmbeddedZooKeeper.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.zookeeper.testdsl; 2 | 3 | import static com.ameliant.tools.support.DirectoryUtils.*; 4 | import com.ameliant.tools.support.AvailablePortFinder; 5 | import org.apache.commons.io.FileUtils; 6 | import org.apache.zookeeper.server.NIOServerCnxnFactory; 7 | import org.apache.zookeeper.server.ServerCnxnFactory; 8 | import org.apache.zookeeper.server.ZooKeeperServer; 9 | import org.junit.rules.ExternalResource; 10 | import org.slf4j.Logger; 11 | import org.slf4j.LoggerFactory; 12 | 13 | import java.io.File; 14 | import java.io.IOException; 15 | import java.net.InetSocketAddress; 16 | 17 | /** 18 | * @author jkorab 19 | */ 20 | public class EmbeddedZooKeeper extends ExternalResource { 21 | 22 | private Logger log = LoggerFactory.getLogger(this.getClass()); // TODO 23 | 24 | private final int port; 25 | private ZooKeeperServer zooKeeperServer; 26 | private ServerCnxnFactory cnxnFactory; 27 | private File snapshotDir; 28 | private File logDir; 29 | 30 | public EmbeddedZooKeeper() { 31 | this.port = AvailablePortFinder.getNextAvailable(); 32 | } 33 | 34 | public int getPort() { 35 | return port; 36 | } 37 | 38 | @Override 39 | protected void before() throws Throwable { 40 | snapshotDir = tempDir(perTest("zk-snapshot")); 41 | logDir = tempDir(perTest("zk-log")); 42 | log.info("Setting up ZK Server with snapshotDir:{}, logDir:{}", snapshotDir, logDir); 43 | 44 | int tickTime = 500; 45 | try { 46 | zooKeeperServer = new ZooKeeperServer(snapshotDir, logDir, tickTime); 47 | cnxnFactory = new NIOServerCnxnFactory(); 48 | cnxnFactory.configure(new InetSocketAddress("127.0.0.1", port), 0); 49 | cnxnFactory.startup(zooKeeperServer); 50 | } catch (IOException e) { 51 | e.printStackTrace(); 52 | } 53 | } 54 | 55 | @Override 56 | protected void after() { 57 | cnxnFactory.shutdown(); 58 | zooKeeperServer.shutdown(); 59 | 60 | try { 61 | log.info("Deleting {}", snapshotDir); 62 | FileUtils.deleteDirectory(snapshotDir); 63 | log.info("Deleting {}", logDir); 64 | FileUtils.deleteDirectory(logDir); 65 | } catch (IOException e) { 66 | throw new RuntimeException(e); 67 | } 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /zookeeper/zookeeper-test-dsl/src/test/java/com/ameliant/tools/zookeeper/testdsl/EmbeddedZookeeperTest.java: -------------------------------------------------------------------------------- 1 | package com.ameliant.tools.zookeeper.testdsl; 2 | 3 | import static org.junit.Assert.*; 4 | import static org.hamcrest.Matchers.*; 5 | import org.junit.Rule; 6 | import org.junit.Test; 7 | 8 | /** 9 | * @author jkorab 10 | */ 11 | public class EmbeddedZookeeperTest { 12 | 13 | @Rule 14 | public EmbeddedZooKeeper zookeeper = new EmbeddedZooKeeper(); 15 | 16 | @Test 17 | public void testPortAssignment() { 18 | assertThat(zookeeper.getPort(), greaterThan(0)); 19 | } 20 | 21 | } --------------------------------------------------------------------------------