messageJsonRepresentations =
45 | Arrays.asList(
46 | RestAssured.given()
47 | .get(JSON_SERVER_URL + "/messages")
48 | .then()
49 | .statusCode(200)
50 | .extract()
51 | .as(MessageJsonRepresentation[].class));
52 | assertTrue(messageJsonRepresentations.size() > 0);
53 | }
54 |
55 | @Test
56 | public void test_data_pipeline_flow_successful() {
57 | JSON_SERVER_URL = "http://" + environment.getServiceHost("db-mock_1", 80) + ":" + environment.getServicePort("db-mock_1", 80);
58 | HTTP_PRODUCER_BASE_URL = "http://" + environment.getServiceHost("http-producer_1", 8080) + ":" + environment.getServicePort("http-producer_1", 8080);
59 |
60 | String id = UUID.randomUUID().toString();
61 | String from = UUID.randomUUID().toString();
62 | String to = UUID.randomUUID().toString();
63 | String text = UUID.randomUUID().toString();
64 |
65 | MessageJsonRepresentation messageJsonRepresentation =
66 | new MessageJsonRepresentation(id, from, to, text);
67 |
68 | RestAssured.given()
69 | .contentType(ContentType.JSON)
70 | .body(messageJsonRepresentation)
71 | .post(HTTP_PRODUCER_BASE_URL + "/messages")
72 | .then()
73 | .statusCode(202);
74 |
75 | await()
76 | .atMost(10, TimeUnit.SECONDS)
77 | .untilAsserted(
78 | () -> {
79 | MessageJsonRepresentation jsonRepresentation =
80 | RestAssured.given()
81 | .get(JSON_SERVER_URL + "/messages/" + id)
82 | .then()
83 | .extract()
84 | .as(MessageJsonRepresentation.class);
85 |
86 | assertNotNull(jsonRepresentation);
87 | });
88 | }
89 | }
90 |
--------------------------------------------------------------------------------
/e2e/src/test/java/no/sysco/testing/kafka/e2e/representation/MessageJsonRepresentation.java:
--------------------------------------------------------------------------------
1 | package no.sysco.testing.kafka.e2e.representation;
2 |
3 | public class MessageJsonRepresentation {
4 |
5 | private String id;
6 | private String from;
7 | private String to;
8 | private String text;
9 |
10 | public MessageJsonRepresentation() {}
11 |
12 | public MessageJsonRepresentation(String id, String from, String to, String text) {
13 | this.id = id;
14 | this.from = from;
15 | this.to = to;
16 | this.text = text;
17 | }
18 |
19 | public String getId() {
20 | return id;
21 | }
22 |
23 | public void setId(String id) {
24 | this.id = id;
25 | }
26 |
27 | public String getFrom() {
28 | return from;
29 | }
30 |
31 | public void setFrom(String from) {
32 | this.from = from;
33 | }
34 |
35 | public String getTo() {
36 | return to;
37 | }
38 |
39 | public void setTo(String to) {
40 | this.to = to;
41 | }
42 |
43 | public String getText() {
44 | return text;
45 | }
46 |
47 | public void setText(String text) {
48 | this.text = text;
49 | }
50 |
51 | @Override
52 | public String toString() {
53 | return "MessageJsonRepresentation{"
54 | + "id='"
55 | + id
56 | + '\''
57 | + ", from='"
58 | + from
59 | + '\''
60 | + ", to='"
61 | + to
62 | + '\''
63 | + ", text='"
64 | + text
65 | + '\''
66 | + '}';
67 | }
68 |
69 | public String json() {
70 | return "{"
71 | + "\"id\":"
72 | + "\""
73 | + id
74 | + "\","
75 | + "\"from\":"
76 | + "\""
77 | + from
78 | + "\","
79 | + "\"to\":"
80 | + "\""
81 | + to
82 | + "\","
83 | + "\"text\":"
84 | + "\""
85 | + text
86 | + "\""
87 | + "}";
88 | }
89 | }
90 |
--------------------------------------------------------------------------------
/e2e/src/test/resources/json-server-database.json:
--------------------------------------------------------------------------------
1 | {
2 | "messages": [
3 | {
4 | "id": "1",
5 | "from": "user-id-1",
6 | "to": "user-id-2",
7 | "text": "hello world"
8 | }
9 | ]
10 | }
--------------------------------------------------------------------------------
/e2e/src/test/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | log4j.rootLogger=INFO, stdout
2 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
3 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
4 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p [%t] %m (%c)%n
--------------------------------------------------------------------------------
/embedded-cluster/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | kafka
8 | no.sysco.testing
9 | 1.0-SNAPSHOT
10 |
11 | embedded-cluster
12 |
13 |
14 | 2.9.0
15 |
16 |
17 |
18 |
19 | org.apache.avro
20 | avro
21 |
22 |
23 | io.confluent
24 | kafka-avro-serializer
25 |
26 |
27 | org.slf4j
28 | slf4j-log4j12
29 |
30 |
31 | org.slf4j
32 | slf4j-api
33 |
34 |
35 | junit
36 | junit
37 | compile
38 |
39 |
40 | org.apache.kafka
41 | kafka_2.11
42 | ${kafka.apis.version}
43 |
44 |
45 | org.apache.curator
46 | curator-test
47 | ${curator.test.version}
48 |
49 |
50 | org.apache.kafka
51 | kafka_2.11
52 | test
53 | ${kafka.apis.version}
54 |
55 |
56 | org.apache.kafka
57 | kafka-clients
58 | ${kafka.apis.version}
59 | test
60 |
61 |
62 | io.confluent
63 | kafka-schema-registry
64 | ${confluent.platform.version}
65 |
66 |
67 | io.confluent
68 | kafka-schema-registry
69 | ${confluent.platform.version}
70 |
71 | tests
72 |
73 |
74 |
--------------------------------------------------------------------------------
/embedded-cluster/readme.md:
--------------------------------------------------------------------------------
1 | # Description
2 | Module contains copy of code from [kafka-streams-example](https://github.com/confluentinc/kafka-streams-examples/blob/5.2.0-post/src/test/java/io/confluent/examples/streams/kafka/EmbeddedSingleNodeKafkaCluster.java)
3 | There is open issue: extract embedded kafka for testing purpose.
4 |
5 | `EmbeddedSingleNodeKafkaCluster` contains: Zookeeper, KafkaBroker and SchemaRegistry.
6 |
7 | ## Important notes
8 | * Schema registry at start call some deprecated methods, check logs
9 | * Current support java8 only, because there are reflections usage under the hood
10 |
11 | ## Related Issues
12 | * [issue-#26: Encapsulate EmbeddedSingleNodeKafkaCluster in a seperately-available maven/gradle/sbt dep](https://github.com/confluentinc/kafka-streams-examples/issues/26)
13 | * [KIP-139: Kafka TestKit library](https://cwiki.apache.org/confluence/display/KAFKA/KIP-139%3A+Kafka+TestKit+library)
--------------------------------------------------------------------------------
/embedded-cluster/src/main/java/no/sysco/testing/kafka/embedded/EmbeddedSingleNodeKafkaCluster.java:
--------------------------------------------------------------------------------
1 | package no.sysco.testing.kafka.embedded;
2 |
3 | import io.confluent.kafka.schemaregistry.RestApp;
4 | import io.confluent.kafka.schemaregistry.avro.AvroCompatibilityLevel;
5 | import io.confluent.kafka.schemaregistry.rest.SchemaRegistryConfig;
6 | import java.io.IOException;
7 | import java.util.Collections;
8 | import java.util.HashSet;
9 | import java.util.Map;
10 | import java.util.Properties;
11 | import java.util.Set;
12 | import kafka.server.KafkaConfig$;
13 | import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
14 | import org.apache.kafka.test.TestCondition;
15 | import org.apache.kafka.test.TestUtils;
16 | import org.junit.rules.ExternalResource;
17 | import org.slf4j.Logger;
18 | import org.slf4j.LoggerFactory;
19 | import scala.collection.JavaConverters;
20 |
21 | // https://github.com/confluentinc/kafka-streams-examples/blob/5.2.0-post/src/test/java/io/confluent/examples/streams/kafka/EmbeddedSingleNodeKafkaCluster.java
22 |
23 | public class EmbeddedSingleNodeKafkaCluster extends ExternalResource {
24 | private static final Logger log = LoggerFactory.getLogger(EmbeddedSingleNodeKafkaCluster.class);
25 | private static final int DEFAULT_BROKER_PORT = 0; // 0 results in a random port being selected
26 | private static final String KAFKA_SCHEMAS_TOPIC = "_schemas";
27 | private static final String AVRO_COMPATIBILITY_TYPE = AvroCompatibilityLevel.NONE.name;
28 |
29 | private static final String KAFKASTORE_OPERATION_TIMEOUT_MS = "60000";
30 | private static final String KAFKASTORE_DEBUG = "true";
31 | private static final String KAFKASTORE_INIT_TIMEOUT = "90000";
32 |
33 | private ZooKeeperEmbedded zookeeper;
34 | private KafkaEmbedded broker;
35 | private RestApp schemaRegistry;
36 | private final Properties brokerConfig;
37 | private boolean running;
38 |
39 | /**
40 | * Creates and starts the cluster.
41 | */
42 | public EmbeddedSingleNodeKafkaCluster() {
43 | this(new Properties());
44 | }
45 |
46 | /**
47 | * Creates and starts the cluster.
48 | *
49 | * @param brokerConfig Additional broker configuration settings.
50 | */
51 | public EmbeddedSingleNodeKafkaCluster(final Properties brokerConfig) {
52 | this.brokerConfig = new Properties();
53 | this.brokerConfig.put(SchemaRegistryConfig.KAFKASTORE_TIMEOUT_CONFIG, KAFKASTORE_OPERATION_TIMEOUT_MS);
54 | this.brokerConfig.putAll(brokerConfig);
55 | }
56 |
57 | /**
58 | * Creates and starts the cluster.
59 | */
60 | public void start() throws Exception {
61 | log.debug("Initiating embedded Kafka cluster startup");
62 | log.debug("Starting a ZooKeeper instance...");
63 | zookeeper = new ZooKeeperEmbedded();
64 | log.debug("ZooKeeper instance is running at {}", zookeeper.connectString());
65 |
66 | final Properties effectiveBrokerConfig = effectiveBrokerConfigFrom(brokerConfig, zookeeper);
67 | log.debug("Starting a Kafka instance on port {} ...",
68 | effectiveBrokerConfig.getProperty(KafkaConfig$.MODULE$.PortProp()));
69 | broker = new KafkaEmbedded(effectiveBrokerConfig);
70 | log.debug("Kafka instance is running at {}, connected to ZooKeeper at {}",
71 | broker.brokerList(), broker.zookeeperConnect());
72 |
73 | final Properties schemaRegistryProps = new Properties();
74 |
75 | schemaRegistryProps.put(SchemaRegistryConfig.KAFKASTORE_TIMEOUT_CONFIG, KAFKASTORE_OPERATION_TIMEOUT_MS);
76 | schemaRegistryProps.put(SchemaRegistryConfig.DEBUG_CONFIG, KAFKASTORE_DEBUG);
77 | schemaRegistryProps.put(SchemaRegistryConfig.KAFKASTORE_INIT_TIMEOUT_CONFIG, KAFKASTORE_INIT_TIMEOUT);
78 |
79 | schemaRegistry = new RestApp(0, zookeeperConnect(), KAFKA_SCHEMAS_TOPIC, AVRO_COMPATIBILITY_TYPE, schemaRegistryProps);
80 | schemaRegistry.start();
81 | running = true;
82 | }
83 |
84 | private Properties effectiveBrokerConfigFrom(final Properties brokerConfig, final ZooKeeperEmbedded zookeeper) {
85 | final Properties effectiveConfig = new Properties();
86 | effectiveConfig.putAll(brokerConfig);
87 | effectiveConfig.put(KafkaConfig$.MODULE$.ZkConnectProp(), zookeeper.connectString());
88 | effectiveConfig.put(KafkaConfig$.MODULE$.ZkSessionTimeoutMsProp(), 30 * 1000);
89 | effectiveConfig.put(KafkaConfig$.MODULE$.PortProp(), DEFAULT_BROKER_PORT);
90 | effectiveConfig.put(KafkaConfig$.MODULE$.ZkConnectionTimeoutMsProp(), 60 * 1000);
91 | effectiveConfig.put(KafkaConfig$.MODULE$.DeleteTopicEnableProp(), true);
92 | effectiveConfig.put(KafkaConfig$.MODULE$.LogCleanerDedupeBufferSizeProp(), 2 * 1024 * 1024L);
93 | effectiveConfig.put(KafkaConfig$.MODULE$.GroupMinSessionTimeoutMsProp(), 0);
94 | effectiveConfig.put(KafkaConfig$.MODULE$.OffsetsTopicReplicationFactorProp(), (short) 1);
95 | effectiveConfig.put(KafkaConfig$.MODULE$.OffsetsTopicPartitionsProp(), 1);
96 | effectiveConfig.put(KafkaConfig$.MODULE$.AutoCreateTopicsEnableProp(), true);
97 | return effectiveConfig;
98 | }
99 |
100 | @Override
101 | protected void before() throws Exception {
102 | start();
103 | }
104 |
105 | @Override
106 | protected void after() {
107 | stop();
108 | }
109 |
110 | /**
111 | * Stops the cluster.
112 | */
113 | public void stop() {
114 | log.info("Stopping Confluent");
115 | try {
116 | try {
117 | if (schemaRegistry != null) {
118 | schemaRegistry.stop();
119 | }
120 | } catch (final Exception fatal) {
121 | throw new RuntimeException(fatal);
122 | }
123 | if (broker != null) {
124 | broker.stop();
125 | }
126 | try {
127 | if (zookeeper != null) {
128 | zookeeper.stop();
129 | }
130 | } catch (final IOException fatal) {
131 | throw new RuntimeException(fatal);
132 | }
133 | } finally {
134 | running = false;
135 | }
136 | log.info("Confluent Stopped");
137 | }
138 |
139 | /**
140 | * This cluster's `bootstrap.servers` value. Example: `127.0.0.1:9092`.
141 | *
142 | * You can use this to tell Kafka Streams applications, Kafka producers, and Kafka consumers (new
143 | * consumer API) how to connect to this cluster.
144 | */
145 | public String bootstrapServers() {
146 | return broker.brokerList();
147 | }
148 |
149 | /**
150 | * This cluster's ZK connection string aka `zookeeper.connect` in `hostnameOrIp:port` format.
151 | * Example: `127.0.0.1:2181`.
152 | *
153 | * You can use this to e.g. tell Kafka consumers (old consumer API) how to connect to this
154 | * cluster.
155 | */
156 | public String zookeeperConnect() {
157 | return zookeeper.connectString();
158 | }
159 |
160 | /**
161 | * The "schema.registry.url" setting of the schema registry instance.
162 | */
163 | public String schemaRegistryUrl() {
164 | return schemaRegistry.restConnect;
165 | }
166 |
167 | /**
168 | * Creates a Kafka topic with 1 partition and a replication factor of 1.
169 | *
170 | * @param topic The name of the topic.
171 | */
172 | public void createTopic(final String topic) {
173 | createTopic(topic, 1, (short) 1, Collections.emptyMap());
174 | }
175 |
176 | /**
177 | * Creates a Kafka topic with the given parameters.
178 | *
179 | * @param topic The name of the topic.
180 | * @param partitions The number of partitions for this topic.
181 | * @param replication The replication factor for (the partitions of) this topic.
182 | */
183 | public void createTopic(final String topic, final int partitions, final short replication) {
184 | createTopic(topic, partitions, replication, Collections.emptyMap());
185 | }
186 |
187 | /**
188 | * Creates a Kafka topic with the given parameters.
189 | *
190 | * @param topic The name of the topic.
191 | * @param partitions The number of partitions for this topic.
192 | * @param replication The replication factor for (partitions of) this topic.
193 | * @param topicConfig Additional topic-level configuration settings.
194 | */
195 | public void createTopic(final String topic,
196 | final int partitions,
197 | final short replication,
198 | final Map topicConfig) {
199 | broker.createTopic(topic, partitions, replication, topicConfig);
200 | }
201 |
202 | /**
203 | * Deletes multiple topics and blocks until all topics got deleted.
204 | *
205 | * @param timeoutMs the max time to wait for the topics to be deleted (does not block if {@code <= 0})
206 | * @param topics the name of the topics
207 | */
208 | public void deleteTopicsAndWait(final long timeoutMs, final String... topics) throws InterruptedException {
209 | for (final String topic : topics) {
210 | try {
211 | broker.deleteTopic(topic);
212 | } catch (final UnknownTopicOrPartitionException expected) {
213 | // indicates (idempotent) success
214 | }
215 | }
216 |
217 | if (timeoutMs > 0) {
218 | TestUtils.waitForCondition(new TopicsDeletedCondition(topics), timeoutMs, "Topics not deleted after " + timeoutMs + " milli seconds.");
219 | }
220 | }
221 |
222 | public boolean isRunning() {
223 | return running;
224 | }
225 |
226 | private final class TopicsDeletedCondition implements TestCondition {
227 | final Set deletedTopics = new HashSet<>();
228 |
229 | private TopicsDeletedCondition(final String... topics) {
230 | Collections.addAll(deletedTopics, topics);
231 | }
232 |
233 | @Override
234 | public boolean conditionMet() {
235 | //TODO once KAFKA-6098 is fixed use AdminClient to verify topics have been deleted
236 | final Set allTopics = new HashSet<>(
237 | JavaConverters.seqAsJavaListConverter(broker.kafkaServer().zkClient().getAllTopicsInCluster()).asJava());
238 | return !allTopics.removeAll(deletedTopics);
239 | }
240 | }
241 | }
242 |
--------------------------------------------------------------------------------
/embedded-cluster/src/main/java/no/sysco/testing/kafka/embedded/KafkaEmbedded.java:
--------------------------------------------------------------------------------
1 | package no.sysco.testing.kafka.embedded;
2 |
3 | import java.io.File;
4 | import java.io.IOException;
5 | import java.util.Collections;
6 | import java.util.Map;
7 | import java.util.Properties;
8 | import java.util.concurrent.ExecutionException;
9 | import kafka.server.KafkaConfig;
10 | import kafka.server.KafkaConfig$;
11 | import kafka.server.KafkaServer;
12 | import kafka.utils.TestUtils;
13 | import org.apache.kafka.clients.admin.AdminClient;
14 | import org.apache.kafka.clients.admin.AdminClientConfig;
15 | import org.apache.kafka.clients.admin.NewTopic;
16 | import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
17 | import org.apache.kafka.common.network.ListenerName;
18 | import org.apache.kafka.common.security.auth.SecurityProtocol;
19 | import org.apache.kafka.common.utils.Time;
20 | import org.junit.rules.TemporaryFolder;
21 | import org.slf4j.Logger;
22 | import org.slf4j.LoggerFactory;
23 |
24 | /**
25 | * Runs an in-memory, "embedded" instance of a Kafka broker, which listens at `127.0.0.1:9092` by
26 | * default.
27 | *
28 | * Requires a running ZooKeeper instance to connect to. By default, it expects a ZooKeeper instance
29 | * running at `127.0.0.1:2181`. You can specify a different ZooKeeper instance by setting the
30 | * `zookeeper.connect` parameter in the broker's configuration.
31 | */
32 | public class KafkaEmbedded {
33 |
34 | private static final Logger log = LoggerFactory.getLogger(KafkaEmbedded.class);
35 |
36 | private static final String DEFAULT_ZK_CONNECT = "127.0.0.1:2181";
37 |
38 | private final Properties effectiveConfig;
39 | private final File logDir;
40 | private final TemporaryFolder tmpFolder;
41 | private final KafkaServer kafka;
42 |
43 | /**
44 | * https://github.com/confluentinc/kafka-streams-examples/blob/5.2.0-post/src/test/java/io/confluent/examples/streams/kafka/KafkaEmbedded.java
45 | *
46 | * Creates and starts an embedded Kafka broker.
47 | *
48 | * @param config Broker configuration settings. Used to modify, for example, on which port the
49 | * broker should listen to. Note that you cannot change some settings such as
50 | * `log.dirs`, `port`.
51 | */
52 | public KafkaEmbedded(final Properties config) throws IOException {
53 | tmpFolder = new TemporaryFolder();
54 | tmpFolder.create();
55 | logDir = tmpFolder.newFolder();
56 | effectiveConfig = effectiveConfigFrom(config);
57 | final boolean loggingEnabled = true;
58 |
59 | final KafkaConfig kafkaConfig = new KafkaConfig(effectiveConfig, loggingEnabled);
60 | log.debug("Starting embedded Kafka broker (with log.dirs={} and ZK ensemble at {}) ...",
61 | logDir, zookeeperConnect());
62 | kafka = TestUtils.createServer(kafkaConfig, Time.SYSTEM);
63 | log.debug("Startup of embedded Kafka broker at {} completed (with ZK ensemble at {}) ...",
64 | brokerList(), zookeeperConnect());
65 | }
66 |
67 | private Properties effectiveConfigFrom(final Properties initialConfig) throws IOException {
68 | final Properties effectiveConfig = new Properties();
69 | effectiveConfig.put(KafkaConfig$.MODULE$.BrokerIdProp(), 0);
70 | effectiveConfig.put(KafkaConfig$.MODULE$.HostNameProp(), "127.0.0.1");
71 | effectiveConfig.put(KafkaConfig$.MODULE$.PortProp(), "9092");
72 | effectiveConfig.put(KafkaConfig$.MODULE$.NumPartitionsProp(), 1);
73 | effectiveConfig.put(KafkaConfig$.MODULE$.AutoCreateTopicsEnableProp(), true);
74 | effectiveConfig.put(KafkaConfig$.MODULE$.MessageMaxBytesProp(), 1000000);
75 | effectiveConfig.put(KafkaConfig$.MODULE$.ControlledShutdownEnableProp(), true);
76 |
77 | effectiveConfig.putAll(initialConfig);
78 | effectiveConfig.setProperty(KafkaConfig$.MODULE$.LogDirProp(), logDir.getAbsolutePath());
79 | return effectiveConfig;
80 | }
81 |
82 | /**
83 | * This broker's `metadata.broker.list` value. Example: `127.0.0.1:9092`.
84 | *
85 | * You can use this to tell Kafka producers and consumers how to connect to this instance.
86 | */
87 | public String brokerList() {
88 | return String.join(":", kafka.config().hostName(), Integer.toString(kafka.boundPort(
89 | ListenerName.forSecurityProtocol(SecurityProtocol
90 | .PLAINTEXT))));
91 | }
92 |
93 |
94 | /**
95 | * The ZooKeeper connection string aka `zookeeper.connect`.
96 | */
97 | public String zookeeperConnect() {
98 | return effectiveConfig.getProperty("zookeeper.connect", DEFAULT_ZK_CONNECT);
99 | }
100 |
101 | /**
102 | * Stop the broker.
103 | */
104 | public void stop() {
105 | log.debug("Shutting down embedded Kafka broker at {} (with ZK ensemble at {}) ...",
106 | brokerList(), zookeeperConnect());
107 | kafka.shutdown();
108 | kafka.awaitShutdown();
109 | log.debug("Removing temp folder {} with logs.dir at {} ...", tmpFolder, logDir);
110 | tmpFolder.delete();
111 | log.debug("Shutdown of embedded Kafka broker at {} completed (with ZK ensemble at {}) ...",
112 | brokerList(), zookeeperConnect());
113 | }
114 |
115 | /**
116 | * Create a Kafka topic with 1 partition and a replication factor of 1.
117 | *
118 | * @param topic The name of the topic.
119 | */
120 | public void createTopic(final String topic) {
121 | createTopic(topic, 1, (short) 1, Collections.emptyMap());
122 | }
123 |
124 | /**
125 | * Create a Kafka topic with the given parameters.
126 | *
127 | * @param topic The name of the topic.
128 | * @param partitions The number of partitions for this topic.
129 | * @param replication The replication factor for (the partitions of) this topic.
130 | */
131 | public void createTopic(final String topic, final int partitions, final short replication) {
132 | createTopic(topic, partitions, replication, Collections.emptyMap());
133 | }
134 |
135 | /**
136 | * Create a Kafka topic with the given parameters.
137 | *
138 | * @param topic The name of the topic.
139 | * @param partitions The number of partitions for this topic.
140 | * @param replication The replication factor for (partitions of) this topic.
141 | * @param topicConfig Additional topic-level configuration settings.
142 | */
143 | public void createTopic(final String topic,
144 | final int partitions,
145 | final short replication,
146 | final Map topicConfig) {
147 | log.debug("Creating topic { name: {}, partitions: {}, replication: {}, config: {} }",
148 | topic, partitions, replication, topicConfig);
149 |
150 | final Properties properties = new Properties();
151 | properties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList());
152 |
153 | try (final AdminClient adminClient = AdminClient.create(properties)) {
154 | final NewTopic newTopic = new NewTopic(topic, partitions, replication);
155 | newTopic.configs(topicConfig);
156 | adminClient.createTopics(Collections.singleton(newTopic)).all().get();
157 | } catch (final InterruptedException | ExecutionException fatal) {
158 | throw new RuntimeException(fatal);
159 | }
160 |
161 | }
162 |
163 | /**
164 | * Delete a Kafka topic.
165 | *
166 | * @param topic The name of the topic.
167 | */
168 | public void deleteTopic(final String topic) {
169 | log.debug("Deleting topic {}", topic);
170 | final Properties properties = new Properties();
171 | properties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList());
172 |
173 | try (final AdminClient adminClient = AdminClient.create(properties)) {
174 | adminClient.deleteTopics(Collections.singleton(topic)).all().get();
175 | } catch (final InterruptedException e) {
176 | throw new RuntimeException(e);
177 | } catch (final ExecutionException e) {
178 | if (!(e.getCause() instanceof UnknownTopicOrPartitionException)) {
179 | throw new RuntimeException(e);
180 | }
181 | }
182 | }
183 |
184 | KafkaServer kafkaServer() {
185 | return kafka;
186 | }
187 | }
188 |
--------------------------------------------------------------------------------
/embedded-cluster/src/main/java/no/sysco/testing/kafka/embedded/ZooKeeperEmbedded.java:
--------------------------------------------------------------------------------
1 | package no.sysco.testing.kafka.embedded;
2 |
3 | import java.io.IOException;
4 | import org.apache.curator.test.TestingServer;
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 |
8 | /**
9 | * https://github.com/confluentinc/kafka-streams-examples/tree/5.2.0-post/src/test/java/io/confluent/examples/streams/zookeeper
10 | *
11 | * Runs an in-memory, "embedded" instance of a ZooKeeper server.
12 | *
13 | * The ZooKeeper server instance is automatically started when you create a new instance of this class.
14 | */
15 | public class ZooKeeperEmbedded {
16 |
17 | private static final Logger log = LoggerFactory.getLogger(ZooKeeperEmbedded.class);
18 |
19 | private final TestingServer server;
20 |
21 | /**
22 | * Creates and starts a ZooKeeper instance.
23 | *
24 | * @throws Exception
25 | */
26 | public ZooKeeperEmbedded() throws Exception {
27 | log.debug("Starting embedded ZooKeeper server...");
28 | this.server = new TestingServer();
29 | log.debug("Embedded ZooKeeper server at {} uses the temp directory at {}",
30 | server.getConnectString(), server.getTempDirectory());
31 | }
32 |
33 | public void stop() throws IOException {
34 | log.debug("Shutting down embedded ZooKeeper server at {} ...", server.getConnectString());
35 | server.close();
36 | log.debug("Shutdown of embedded ZooKeeper server at {} completed", server.getConnectString());
37 | }
38 |
39 | /**
40 | * The ZooKeeper connection string aka `zookeeper.connect` in `hostnameOrIp:port` format.
41 | * Example: `127.0.0.1:2181`.
42 | *
43 | * You can use this to e.g. tell Kafka brokers how to connect to this instance.
44 | */
45 | public String connectString() {
46 | return server.getConnectString();
47 | }
48 |
49 | /**
50 | * The hostname of the ZooKeeper instance. Example: `127.0.0.1`
51 | */
52 | public String hostname() {
53 | // "server:1:2:3" -> "server:1:2"
54 | return connectString().substring(0, connectString().lastIndexOf(':'));
55 | }
56 |
57 | }
58 |
--------------------------------------------------------------------------------
/mvnw:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # ----------------------------------------------------------------------------
3 | # Licensed to the Apache Software Foundation (ASF) under one
4 | # or more contributor license agreements. See the NOTICE file
5 | # distributed with this work for additional information
6 | # regarding copyright ownership. The ASF licenses this file
7 | # to you under the Apache License, Version 2.0 (the
8 | # "License"); you may not use this file except in compliance
9 | # with the License. You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing,
14 | # software distributed under the License is distributed on an
15 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16 | # KIND, either express or implied. See the License for the
17 | # specific language governing permissions and limitations
18 | # under the License.
19 | # ----------------------------------------------------------------------------
20 |
21 | # ----------------------------------------------------------------------------
22 | # Maven2 Start Up Batch script
23 | #
24 | # Required ENV vars:
25 | # ------------------
26 | # JAVA_HOME - location of a JDK home dir
27 | #
28 | # Optional ENV vars
29 | # -----------------
30 | # M2_HOME - location of maven2's installed home dir
31 | # MAVEN_OPTS - parameters passed to the Java VM when running Maven
32 | # e.g. to debug Maven itself, use
33 | # set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
34 | # MAVEN_SKIP_RC - flag to disable loading of mavenrc files
35 | # ----------------------------------------------------------------------------
36 |
37 | if [ -z "$MAVEN_SKIP_RC" ] ; then
38 |
39 | if [ -f /etc/mavenrc ] ; then
40 | . /etc/mavenrc
41 | fi
42 |
43 | if [ -f "$HOME/.mavenrc" ] ; then
44 | . "$HOME/.mavenrc"
45 | fi
46 |
47 | fi
48 |
49 | # OS specific support. $var _must_ be set to either true or false.
50 | cygwin=false;
51 | darwin=false;
52 | mingw=false
53 | case "`uname`" in
54 | CYGWIN*) cygwin=true ;;
55 | MINGW*) mingw=true;;
56 | Darwin*) darwin=true
57 | # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
58 | # See https://developer.apple.com/library/mac/qa/qa1170/_index.html
59 | if [ -z "$JAVA_HOME" ]; then
60 | if [ -x "/usr/libexec/java_home" ]; then
61 | export JAVA_HOME="`/usr/libexec/java_home`"
62 | else
63 | export JAVA_HOME="/Library/Java/Home"
64 | fi
65 | fi
66 | ;;
67 | esac
68 |
69 | if [ -z "$JAVA_HOME" ] ; then
70 | if [ -r /etc/gentoo-release ] ; then
71 | JAVA_HOME=`java-config --jre-home`
72 | fi
73 | fi
74 |
75 | if [ -z "$M2_HOME" ] ; then
76 | ## resolve links - $0 may be a link to maven's home
77 | PRG="$0"
78 |
79 | # need this for relative symlinks
80 | while [ -h "$PRG" ] ; do
81 | ls=`ls -ld "$PRG"`
82 | link=`expr "$ls" : '.*-> \(.*\)$'`
83 | if expr "$link" : '/.*' > /dev/null; then
84 | PRG="$link"
85 | else
86 | PRG="`dirname "$PRG"`/$link"
87 | fi
88 | done
89 |
90 | saveddir=`pwd`
91 |
92 | M2_HOME=`dirname "$PRG"`/..
93 |
94 | # make it fully qualified
95 | M2_HOME=`cd "$M2_HOME" && pwd`
96 |
97 | cd "$saveddir"
98 | # echo Using m2 at $M2_HOME
99 | fi
100 |
101 | # For Cygwin, ensure paths are in UNIX format before anything is touched
102 | if $cygwin ; then
103 | [ -n "$M2_HOME" ] &&
104 | M2_HOME=`cygpath --unix "$M2_HOME"`
105 | [ -n "$JAVA_HOME" ] &&
106 | JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
107 | [ -n "$CLASSPATH" ] &&
108 | CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
109 | fi
110 |
111 | # For Mingw, ensure paths are in UNIX format before anything is touched
112 | if $mingw ; then
113 | [ -n "$M2_HOME" ] &&
114 | M2_HOME="`(cd "$M2_HOME"; pwd)`"
115 | [ -n "$JAVA_HOME" ] &&
116 | JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
117 | fi
118 |
119 | if [ -z "$JAVA_HOME" ]; then
120 | javaExecutable="`which javac`"
121 | if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
122 | # readlink(1) is not available as standard on Solaris 10.
123 | readLink=`which readlink`
124 | if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
125 | if $darwin ; then
126 | javaHome="`dirname \"$javaExecutable\"`"
127 | javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
128 | else
129 | javaExecutable="`readlink -f \"$javaExecutable\"`"
130 | fi
131 | javaHome="`dirname \"$javaExecutable\"`"
132 | javaHome=`expr "$javaHome" : '\(.*\)/bin'`
133 | JAVA_HOME="$javaHome"
134 | export JAVA_HOME
135 | fi
136 | fi
137 | fi
138 |
139 | if [ -z "$JAVACMD" ] ; then
140 | if [ -n "$JAVA_HOME" ] ; then
141 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
142 | # IBM's JDK on AIX uses strange locations for the executables
143 | JAVACMD="$JAVA_HOME/jre/sh/java"
144 | else
145 | JAVACMD="$JAVA_HOME/bin/java"
146 | fi
147 | else
148 | JAVACMD="`which java`"
149 | fi
150 | fi
151 |
152 | if [ ! -x "$JAVACMD" ] ; then
153 | echo "Error: JAVA_HOME is not defined correctly." >&2
154 | echo " We cannot execute $JAVACMD" >&2
155 | exit 1
156 | fi
157 |
158 | if [ -z "$JAVA_HOME" ] ; then
159 | echo "Warning: JAVA_HOME environment variable is not set."
160 | fi
161 |
162 | CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
163 |
164 | # traverses directory structure from process work directory to filesystem root
165 | # first directory with .mvn subdirectory is considered project base directory
166 | find_maven_basedir() {
167 |
168 | if [ -z "$1" ]
169 | then
170 | echo "Path not specified to find_maven_basedir"
171 | return 1
172 | fi
173 |
174 | basedir="$1"
175 | wdir="$1"
176 | while [ "$wdir" != '/' ] ; do
177 | if [ -d "$wdir"/.mvn ] ; then
178 | basedir=$wdir
179 | break
180 | fi
181 | # workaround for JBEAP-8937 (on Solaris 10/Sparc)
182 | if [ -d "${wdir}" ]; then
183 | wdir=`cd "$wdir/.."; pwd`
184 | fi
185 | # end of workaround
186 | done
187 | echo "${basedir}"
188 | }
189 |
190 | # concatenates all lines of a file
191 | concat_lines() {
192 | if [ -f "$1" ]; then
193 | echo "$(tr -s '\n' ' ' < "$1")"
194 | fi
195 | }
196 |
197 | BASE_DIR=`find_maven_basedir "$(pwd)"`
198 | if [ -z "$BASE_DIR" ]; then
199 | exit 1;
200 | fi
201 |
202 | ##########################################################################################
203 | # Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
204 | # This allows using the maven wrapper in projects that prohibit checking in binary data.
205 | ##########################################################################################
206 | if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then
207 | if [ "$MVNW_VERBOSE" = true ]; then
208 | echo "Found .mvn/wrapper/maven-wrapper.jar"
209 | fi
210 | else
211 | if [ "$MVNW_VERBOSE" = true ]; then
212 | echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
213 | fi
214 | if [ -n "$MVNW_REPOURL" ]; then
215 | jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.4/maven-wrapper-0.5.4.jar"
216 | else
217 | jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.4/maven-wrapper-0.5.4.jar"
218 | fi
219 | while IFS="=" read key value; do
220 | case "$key" in (wrapperUrl) jarUrl="$value"; break ;;
221 | esac
222 | done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties"
223 | if [ "$MVNW_VERBOSE" = true ]; then
224 | echo "Downloading from: $jarUrl"
225 | fi
226 | wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar"
227 | if $cygwin; then
228 | wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"`
229 | fi
230 |
231 | if command -v wget > /dev/null; then
232 | if [ "$MVNW_VERBOSE" = true ]; then
233 | echo "Found wget ... using wget"
234 | fi
235 | if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
236 | wget "$jarUrl" -O "$wrapperJarPath"
237 | else
238 | wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath"
239 | fi
240 | elif command -v curl > /dev/null; then
241 | if [ "$MVNW_VERBOSE" = true ]; then
242 | echo "Found curl ... using curl"
243 | fi
244 | if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
245 | curl -o "$wrapperJarPath" "$jarUrl" -f
246 | else
247 | curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f
248 | fi
249 |
250 | else
251 | if [ "$MVNW_VERBOSE" = true ]; then
252 | echo "Falling back to using Java to download"
253 | fi
254 | javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java"
255 | # For Cygwin, switch paths to Windows format before running javac
256 | if $cygwin; then
257 | javaClass=`cygpath --path --windows "$javaClass"`
258 | fi
259 | if [ -e "$javaClass" ]; then
260 | if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
261 | if [ "$MVNW_VERBOSE" = true ]; then
262 | echo " - Compiling MavenWrapperDownloader.java ..."
263 | fi
264 | # Compiling the Java class
265 | ("$JAVA_HOME/bin/javac" "$javaClass")
266 | fi
267 | if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
268 | # Running the downloader
269 | if [ "$MVNW_VERBOSE" = true ]; then
270 | echo " - Running MavenWrapperDownloader.java ..."
271 | fi
272 | ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR")
273 | fi
274 | fi
275 | fi
276 | fi
277 | ##########################################################################################
278 | # End of extension
279 | ##########################################################################################
280 |
281 | export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}
282 | if [ "$MVNW_VERBOSE" = true ]; then
283 | echo $MAVEN_PROJECTBASEDIR
284 | fi
285 | MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
286 |
287 | # For Cygwin, switch paths to Windows format before running java
288 | if $cygwin; then
289 | [ -n "$M2_HOME" ] &&
290 | M2_HOME=`cygpath --path --windows "$M2_HOME"`
291 | [ -n "$JAVA_HOME" ] &&
292 | JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
293 | [ -n "$CLASSPATH" ] &&
294 | CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
295 | [ -n "$MAVEN_PROJECTBASEDIR" ] &&
296 | MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"`
297 | fi
298 |
299 | # Provide a "standardized" way to retrieve the CLI args that will
300 | # work with both Windows and non-Windows executions.
301 | MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
302 | export MAVEN_CMD_LINE_ARGS
303 |
304 | WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
305 |
306 | exec "$JAVACMD" \
307 | $MAVEN_OPTS \
308 | -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
309 | "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
310 | ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
311 |
--------------------------------------------------------------------------------
/mvnw.cmd:
--------------------------------------------------------------------------------
1 | @REM ----------------------------------------------------------------------------
2 | @REM Licensed to the Apache Software Foundation (ASF) under one
3 | @REM or more contributor license agreements. See the NOTICE file
4 | @REM distributed with this work for additional information
5 | @REM regarding copyright ownership. The ASF licenses this file
6 | @REM to you under the Apache License, Version 2.0 (the
7 | @REM "License"); you may not use this file except in compliance
8 | @REM with the License. You may obtain a copy of the License at
9 | @REM
10 | @REM http://www.apache.org/licenses/LICENSE-2.0
11 | @REM
12 | @REM Unless required by applicable law or agreed to in writing,
13 | @REM software distributed under the License is distributed on an
14 | @REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | @REM KIND, either express or implied. See the License for the
16 | @REM specific language governing permissions and limitations
17 | @REM under the License.
18 | @REM ----------------------------------------------------------------------------
19 |
20 | @REM ----------------------------------------------------------------------------
21 | @REM Maven2 Start Up Batch script
22 | @REM
23 | @REM Required ENV vars:
24 | @REM JAVA_HOME - location of a JDK home dir
25 | @REM
26 | @REM Optional ENV vars
27 | @REM M2_HOME - location of maven2's installed home dir
28 | @REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
29 | @REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending
30 | @REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
31 | @REM e.g. to debug Maven itself, use
32 | @REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
33 | @REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
34 | @REM ----------------------------------------------------------------------------
35 |
36 | @REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
37 | @echo off
38 | @REM set title of command window
39 | title %0
40 | @REM enable echoing by setting MAVEN_BATCH_ECHO to 'on'
41 | @if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
42 |
43 | @REM set %HOME% to equivalent of $HOME
44 | if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
45 |
46 | @REM Execute a user defined script before this one
47 | if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
48 | @REM check for pre script, once with legacy .bat ending and once with .cmd ending
49 | if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
50 | if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
51 | :skipRcPre
52 |
53 | @setlocal
54 |
55 | set ERROR_CODE=0
56 |
57 | @REM To isolate internal variables from possible post scripts, we use another setlocal
58 | @setlocal
59 |
60 | @REM ==== START VALIDATION ====
61 | if not "%JAVA_HOME%" == "" goto OkJHome
62 |
63 | echo.
64 | echo Error: JAVA_HOME not found in your environment. >&2
65 | echo Please set the JAVA_HOME variable in your environment to match the >&2
66 | echo location of your Java installation. >&2
67 | echo.
68 | goto error
69 |
70 | :OkJHome
71 | if exist "%JAVA_HOME%\bin\java.exe" goto init
72 |
73 | echo.
74 | echo Error: JAVA_HOME is set to an invalid directory. >&2
75 | echo JAVA_HOME = "%JAVA_HOME%" >&2
76 | echo Please set the JAVA_HOME variable in your environment to match the >&2
77 | echo location of your Java installation. >&2
78 | echo.
79 | goto error
80 |
81 | @REM ==== END VALIDATION ====
82 |
83 | :init
84 |
85 | @REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
86 | @REM Fallback to current working directory if not found.
87 |
88 | set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
89 | IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
90 |
91 | set EXEC_DIR=%CD%
92 | set WDIR=%EXEC_DIR%
93 | :findBaseDir
94 | IF EXIST "%WDIR%"\.mvn goto baseDirFound
95 | cd ..
96 | IF "%WDIR%"=="%CD%" goto baseDirNotFound
97 | set WDIR=%CD%
98 | goto findBaseDir
99 |
100 | :baseDirFound
101 | set MAVEN_PROJECTBASEDIR=%WDIR%
102 | cd "%EXEC_DIR%"
103 | goto endDetectBaseDir
104 |
105 | :baseDirNotFound
106 | set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
107 | cd "%EXEC_DIR%"
108 |
109 | :endDetectBaseDir
110 |
111 | IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
112 |
113 | @setlocal EnableExtensions EnableDelayedExpansion
114 | for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
115 | @endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
116 |
117 | :endReadAdditionalConfig
118 |
119 | SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
120 | set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
121 | set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
122 |
123 | set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.4/maven-wrapper-0.5.4.jar"
124 |
125 | FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO (
126 | IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
127 | )
128 |
129 | @REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
130 | @REM This allows using the maven wrapper in projects that prohibit checking in binary data.
131 | if exist %WRAPPER_JAR% (
132 | if "%MVNW_VERBOSE%" == "true" (
133 | echo Found %WRAPPER_JAR%
134 | )
135 | ) else (
136 | if not "%MVNW_REPOURL%" == "" (
137 | SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.4/maven-wrapper-0.5.4.jar"
138 | )
139 | if "%MVNW_VERBOSE%" == "true" (
140 | echo Couldn't find %WRAPPER_JAR%, downloading it ...
141 | echo Downloading from: %DOWNLOAD_URL%
142 | )
143 |
144 | powershell -Command "&{"^
145 | "$webclient = new-object System.Net.WebClient;"^
146 | "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^
147 | "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^
148 | "}"^
149 | "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^
150 | "}"
151 | if "%MVNW_VERBOSE%" == "true" (
152 | echo Finished downloading %WRAPPER_JAR%
153 | )
154 | )
155 | @REM End of extension
156 |
157 | @REM Provide a "standardized" way to retrieve the CLI args that will
158 | @REM work with both Windows and non-Windows executions.
159 | set MAVEN_CMD_LINE_ARGS=%*
160 |
161 | %MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
162 | if ERRORLEVEL 1 goto error
163 | goto end
164 |
165 | :error
166 | set ERROR_CODE=1
167 |
168 | :end
169 | @endlocal & set ERROR_CODE=%ERROR_CODE%
170 |
171 | if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
172 | @REM check for post script, once with legacy .bat ending and once with .cmd ending
173 | if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
174 | if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
175 | :skipRcPost
176 |
177 | @REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
178 | if "%MAVEN_BATCH_PAUSE%" == "on" pause
179 |
180 | if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
181 |
182 | exit /B %ERROR_CODE%
183 |
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | no.sysco.testing
8 | kafka
9 | 1.0-SNAPSHOT
10 | pom
11 |
12 |
13 | streams-client
14 | consumer-producer-clients
15 | embedded-cluster
16 | data-pipeline
17 | e2e
18 |
19 |
20 |
21 | UTF-8
22 | UTF-8
23 | 1.8
24 | 1.8
25 |
26 | 2.1.1
27 | 5.1.1
28 |
29 | 1.8.2
30 | 1.7.25
31 | 3.2.1
32 |
33 | 4.12
34 | 3.1.6
35 | 2.22.0
36 | 3.0.1
37 | 2.9.10.4
38 |
39 | 2.22.1
40 | true
41 |
42 |
43 |
44 |
45 | confluent
46 | http://packages.confluent.io/maven/
47 |
48 |
49 |
50 |
51 | confluent
52 | Confluent
53 | http://packages.confluent.io/maven/
54 |
55 |
56 |
57 |
58 |
59 |
60 | zhenik
61 | Nikita Zhevnitskiy
62 | https://github.com/zhenik
63 | Sysco AS
64 | https://sysco.no
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 | no.sysco.testing
73 | embedded-cluster
74 | ${project.version}
75 |
76 |
77 |
78 | com.typesafe
79 | config
80 | 1.3.3
81 |
82 |
83 |
84 | org.apache.kafka
85 | kafka-clients
86 | ${kafka.apis.version}
87 |
88 |
89 | org.apache.kafka
90 | kafka-streams
91 | ${kafka.apis.version}
92 |
93 |
94 |
95 | com.fasterxml.jackson.core
96 | jackson-databind
97 | ${jackson.version}
98 |
99 |
100 |
101 | org.apache.avro
102 | avro
103 | ${avro.version}
104 |
105 |
106 | io.confluent
107 | kafka-avro-serializer
108 | ${confluent.platform.version}
109 |
110 |
111 | io.confluent
112 | kafka-streams-avro-serde
113 | ${confluent.platform.version}
114 |
115 |
116 |
117 | org.slf4j
118 | slf4j-log4j12
119 | ${slf4j.logger.version}
120 |
121 |
122 | org.slf4j
123 | slf4j-api
124 | ${slf4j.logger.version}
125 |
126 |
127 |
128 | org.apache.kafka
129 | kafka-streams-test-utils
130 | ${kafka.apis.version}
131 | test
132 |
133 |
134 | junit
135 | junit
136 | ${junit.version}
137 | test
138 |
139 |
140 | com.github.tomakehurst
141 | wiremock-jre8
142 | ${wiremock.version}
143 | test
144 |
145 |
146 | org.awaitility
147 | awaitility
148 | ${awaitility.version}
149 | test
150 |
151 |
152 | io.rest-assured
153 | rest-assured
154 | ${rest-assured.version}
155 | test
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 | org.apache.avro
165 | avro-maven-plugin
166 | ${avro.version}
167 |
168 |
169 | io.confluent
170 | kafka-schema-registry-maven-plugin
171 | ${confluent.platform.version}
172 |
173 |
174 | org.apache.maven.plugins
175 | maven-shade-plugin
176 | ${shade.plugin.version}
177 |
178 |
179 | org.apache.maven.plugins
180 | maven-failsafe-plugin
181 | ${failsafe.plugin.version}
182 |
183 |
184 |
185 | integration-test
186 | verify
187 |
188 |
189 |
190 |
191 | ${skipIntegrationTests}
192 |
193 |
194 |
195 |
196 |
197 |
198 |
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | [](https://www.travis-ci.org/sysco-middleware/kafka-testing)
2 | # Kafka-clients: writing automated tests
3 | Run all tests:
4 | ```
5 | ./mvnw clean install -DskipIntegrationTests=false
6 | ```
7 |
8 | ## Modules and approaches
9 | 1. [streams-client module](./streams-client) contains examples of unit-tests for kafka-streams topologies with [kafka-streams-test-utils](https://kafka.apache.org/21/documentation/streams/developer-guide/testing.html).
10 | Approach covers testing topologies (stateful & stateless processors) with different `serdes` including [avro](https://avro.apache.org/docs/1.8.2/spec.html) and [confluent schema registry](https://docs.confluent.io/current/schema-registry/index.html).
11 | 2. [embedded-kafka-cluster module](./embedded-cluster) is example of kafka-embedded cluster in memory (1 Zookeeper, 1 Kafka broker, 1 Confluent schema registry). Embedded kafka cluster is used for integration test of kafka-client application.
12 | 3. [consumer-producer-clients module](./consumer-producer-clients) contains examples of integration tests with [embedded kafka cluster](./embedded-cluster) and kafka based applications with [Producer/Consumer API](https://kafka.apache.org/documentation)
13 | 4. [data-pipeline module](./data-pipeline) contains examples of integration tests with [embedded kafka cluster](./embedded-cluster), [wire-mock](http://wiremock.org) and kafka based applications with [Streams API](https://kafka.apache.org/documentation/streams/)
14 | 5. [e2e module](./e2e) contains `IT` test for data pipeline, using [testcontainers](https://www.testcontainers.org)
15 |
16 |
17 | ### TODO:
18 | - [ ] Makefile
19 | - [ ] update vers java `8 -> 12` `!NB`: Reflection use -> only java8 currently
20 | - [ ] update vers junit `4 -> 5`
21 |
22 | ### Important notes
23 | - [Confluent Platform and Apache Kafka Compatibility](https://docs.confluent.io/current/installation/versions-interoperability.html#cp-and-apache-kafka-compatibility)
24 |
25 | ### References
26 | - [Apache Kafka. Developer guide. Testing](https://kafka.apache.org/20/documentation/streams/developer-guide/testing.html)
27 | - [Getting Your Feet Wet with Stream Processing – Part 2: Testing Your Streaming Application](https://www.confluent.io/blog/stream-processing-part-2-testing-your-streaming-application)
--------------------------------------------------------------------------------
/streams-client/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | kafka
8 | no.sysco.testing
9 | 1.0-SNAPSHOT
10 |
11 | streams-client
12 |
13 |
14 |
15 |
16 | org.apache.kafka
17 | kafka-clients
18 |
19 |
20 | org.apache.kafka
21 | kafka-streams
22 |
23 |
24 |
25 | io.confluent
26 | kafka-streams-avro-serde
27 | ${confluent.platform.version}
28 |
29 |
30 | org.apache.avro
31 | avro
32 |
33 |
34 |
35 | org.apache.kafka
36 | kafka-streams-test-utils
37 | test
38 |
39 |
40 | junit
41 | junit
42 | test
43 |
44 |
45 |
46 | org.slf4j
47 | slf4j-log4j12
48 |
49 |
50 |
51 |
52 |
53 |
54 | org.apache.avro
55 | avro-maven-plugin
56 |
57 |
58 | generate-sources
59 |
60 | schema
61 |
62 |
63 |
64 |
65 | ${project.basedir}/src/main/resources/avro
66 | String
67 |
68 |
69 |
70 |
71 |
--------------------------------------------------------------------------------
/streams-client/readme.md:
--------------------------------------------------------------------------------
1 | # Description
2 | Module contains examples of testing Kafka Streams APIs with kafka-streams-test-utils.
3 | Testing only streams topologies in isolation.
4 |
5 | ## TODO
6 | - [X] Topology using dsl + local storage + avro schemas
7 | - [X] Topology using processor api + local storage + avro schemas
8 | - [ ] Clean tests + DRY
9 |
10 | ## Related Issues
11 | * [#877 - Passing Schema Registry URL twice to instantiate KafkaAvroSerializer](https://github.com/confluentinc/schema-registry/issues/877)
12 |
13 | ## Additional Info
14 | * More examples [kafka-streams-examples](https://github.com/confluentinc/kafka-streams-examples)
15 |
--------------------------------------------------------------------------------
/streams-client/src/main/java/no/sysco/testing/kafka/streams/topology/StreamProcessing.java:
--------------------------------------------------------------------------------
1 | package no.sysco.testing.kafka.streams.topology;
2 |
3 | import java.util.stream.Collectors;
4 | import java.util.stream.Stream;
5 | import org.apache.kafka.common.serialization.Serdes;
6 | import org.apache.kafka.common.utils.Bytes;
7 | import org.apache.kafka.streams.KeyValue;
8 | import org.apache.kafka.streams.StreamsBuilder;
9 | import org.apache.kafka.streams.Topology;
10 | import org.apache.kafka.streams.kstream.Consumed;
11 | import org.apache.kafka.streams.kstream.KStream;
12 | import org.apache.kafka.streams.kstream.Materialized;
13 | import org.apache.kafka.streams.kstream.Produced;
14 | import org.apache.kafka.streams.kstream.ValueMapper;
15 | import org.apache.kafka.streams.state.KeyValueStore;
16 |
17 | public class StreamProcessing {
18 |
19 | // stateless
20 | public static Topology topologyUpperCase(final String sourceTopic, final String sinkTopic) {
21 | final StreamsBuilder streamsBuilder = new StreamsBuilder();
22 | final KStream sourceStream =
23 | streamsBuilder.stream(sourceTopic, Consumed.with(Serdes.String(), Serdes.String()));
24 |
25 | sourceStream
26 | .mapValues((ValueMapper) String::toUpperCase)
27 | .to(sinkTopic, Produced.with(Serdes.String(), Serdes.String()));
28 | return streamsBuilder.build();
29 | }
30 |
31 | // stateful
32 | public static Topology topologyCountAnagram(
33 | final String sourceTopic,
34 | final String sinkTopic,
35 | final String storeName) {
36 | final StreamsBuilder streamsBuilder = new StreamsBuilder();
37 | final KStream sourceStream =
38 | streamsBuilder.stream(sourceTopic, Consumed.with(Serdes.String(), Serdes.String()));
39 | // 1. [null:"magic"] => ["acgim":"magic"]
40 | // 2. amount with same key
41 | sourceStream
42 | .map(
43 | (key, value) -> {
44 | final String newKey =
45 | Stream.of(value.replaceAll(" ", "").split(""))
46 | .sorted()
47 | .collect(Collectors.joining());
48 | return KeyValue.pair(newKey, value);
49 | })
50 | .groupByKey()
51 | .count(Materialized.>as(storeName))
52 | .toStream()
53 | .to(sinkTopic, Produced.with(Serdes.String(), Serdes.Long()));
54 | return streamsBuilder.build();
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/streams-client/src/main/java/no/sysco/testing/kafka/streams/topology/StreamProcessingAvro.java:
--------------------------------------------------------------------------------
1 | package no.sysco.testing.kafka.streams.topology;
2 |
3 | import no.sysco.testing.kafka.streams.avro.Person;
4 | import org.apache.kafka.common.serialization.Serde;
5 | import org.apache.kafka.common.serialization.Serdes;
6 | import org.apache.kafka.common.utils.Bytes;
7 | import org.apache.kafka.streams.StreamsBuilder;
8 | import org.apache.kafka.streams.Topology;
9 | import org.apache.kafka.streams.kstream.Consumed;
10 | import org.apache.kafka.streams.kstream.Materialized;
11 | import org.apache.kafka.streams.kstream.Produced;
12 | import org.apache.kafka.streams.state.KeyValueStore;
13 |
14 | public class StreamProcessingAvro {
15 |
16 | // stateless
17 | public static Topology topologyUpperCase(
18 | final String sourceTopic,
19 | final String sinkTopic,
20 | final Serde personSerdes) {
21 | final StreamsBuilder builder = new StreamsBuilder();
22 | builder.stream(sourceTopic, Consumed.with(Serdes.String(), personSerdes))
23 | // .peek((k, v) -> System.out.printf("%s %s %s\n", v.getId(), v.getName(), v.getLastname()))
24 | .mapValues(
25 | person ->
26 | Person.newBuilder()
27 | .setId(person.getId().toUpperCase())
28 | .setName(person.getName().toUpperCase())
29 | .setLastname(person.getLastname().toUpperCase())
30 | .build())
31 | .to(sinkTopic, Produced.with(Serdes.String(), personSerdes));
32 | return builder.build();
33 | }
34 |
35 | // stateful
36 | public static Topology topologyCountUsersWithSameName(
37 | String sourceTopic,
38 | String sinkTopic,
39 | final Serde personSerdes,
40 | final String storeName) {
41 |
42 | final StreamsBuilder builder = new StreamsBuilder();
43 | builder.stream(sourceTopic, Consumed.with(Serdes.String(), personSerdes))
44 | .groupBy((key, value) -> value.getName())
45 | .count(Materialized.>as(storeName))
46 | .toStream()
47 | .to(sinkTopic, Produced.with(Serdes.String(), Serdes.Long()));
48 |
49 | return builder.build();
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/streams-client/src/main/java/no/sysco/testing/kafka/streams/topology/StreamProcessingLowLvlAvro.java:
--------------------------------------------------------------------------------
1 | package no.sysco.testing.kafka.streams.topology;
2 |
3 | import no.sysco.testing.kafka.streams.avro.Person;
4 | import org.apache.kafka.common.serialization.Serde;
5 | import org.apache.kafka.common.serialization.Serdes;
6 | import org.apache.kafka.streams.KeyValue;
7 | import org.apache.kafka.streams.StreamsBuilder;
8 | import org.apache.kafka.streams.Topology;
9 | import org.apache.kafka.streams.kstream.Consumed;
10 | import org.apache.kafka.streams.kstream.Produced;
11 | import org.apache.kafka.streams.kstream.Transformer;
12 | import org.apache.kafka.streams.processor.ProcessorContext;
13 | import org.apache.kafka.streams.state.KeyValueStore;
14 | import org.apache.kafka.streams.state.Stores;
15 |
16 | public class StreamProcessingLowLvlAvro {
17 |
18 | // stateful
19 | public static Topology topologyDedupByUserId(
20 | final String sourceTopic,
21 | final String sinkTopic,
22 | final Serde personSerdes,
23 | final String idStore) {
24 |
25 | final StreamsBuilder builder = new StreamsBuilder();
26 | builder
27 | .addStateStore(
28 | Stores.keyValueStoreBuilder(
29 | Stores.persistentKeyValueStore(idStore), Serdes.String(), personSerdes))
30 | .stream(sourceTopic, Consumed.with(Serdes.String(), personSerdes))
31 | .transform(
32 | () ->
33 | new Transformer>() {
34 | KeyValueStore stateStore;
35 |
36 | @Override
37 | public void init(ProcessorContext context) {
38 | this.stateStore =
39 | (KeyValueStore) context.getStateStore(idStore);
40 | }
41 |
42 | @Override
43 | public KeyValue transform(String key, Person value) {
44 | String id = value.getId();
45 | if (!id.equals(key)) return null; // some weird
46 |
47 | Person person = stateStore.get(key);
48 | if (person == null) {
49 | // add to store
50 | stateStore.put(key, value);
51 | return KeyValue.pair(key, value);
52 | } else {
53 | return null;
54 | }
55 | }
56 |
57 | @Override
58 | public void close() {}
59 | },
60 | idStore)
61 | .to(sinkTopic, Produced.with(Serdes.String(), personSerdes));
62 |
63 | return builder.build();
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/streams-client/src/main/resources/avro/person.avsc:
--------------------------------------------------------------------------------
1 | {
2 | "namespace": "no.sysco.testing.kafka.streams.avro",
3 | "type": "record",
4 | "name": "Person",
5 | "fields": [
6 | {"name": "id", "type": "string"},
7 | {"name": "name", "type": "string"},
8 | {"name": "lastname", "type": "string"}
9 | ]
10 | }
--------------------------------------------------------------------------------
/streams-client/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Set root logger level to DEBUG and its only appender to A1.
2 | log4j.rootLogger=INFO, A1
3 | # A1 is set to be a ConsoleAppender.
4 | log4j.appender.A1=org.apache.log4j.ConsoleAppender
5 | # A1 uses PatternLayout.
6 | log4j.appender.A1.layout=org.apache.log4j.PatternLayout
7 | log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
--------------------------------------------------------------------------------
/streams-client/src/test/java/no/sysco/testing/kafka/streams/topology/StreamProcessingAvroTest.java:
--------------------------------------------------------------------------------
1 | package no.sysco.testing.kafka.streams.topology;
2 |
3 | import io.confluent.kafka.schemaregistry.client.MockSchemaRegistryClient;
4 | import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
5 | import io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig;
6 | import io.confluent.kafka.streams.serdes.avro.SpecificAvroSerde;
7 | import java.io.IOException;
8 | import java.util.Arrays;
9 | import java.util.Collections;
10 | import java.util.Map;
11 | import java.util.Optional;
12 | import java.util.Properties;
13 | import no.sysco.testing.kafka.streams.avro.Person;
14 | import org.apache.kafka.clients.consumer.ConsumerRecord;
15 | import org.apache.kafka.clients.producer.ProducerRecord;
16 | import org.apache.kafka.common.serialization.Serdes;
17 | import org.apache.kafka.common.serialization.StringDeserializer;
18 | import org.apache.kafka.common.serialization.StringSerializer;
19 | import org.apache.kafka.streams.StreamsConfig;
20 | import org.apache.kafka.streams.Topology;
21 | import org.apache.kafka.streams.TopologyTestDriver;
22 | import org.apache.kafka.streams.state.KeyValueStore;
23 | import org.apache.kafka.streams.test.ConsumerRecordFactory;
24 | import org.junit.After;
25 | import org.junit.Before;
26 | import org.junit.Test;
27 |
28 | import static org.junit.Assert.assertEquals;
29 |
30 | public class StreamProcessingAvroTest {
31 | private final String topicIn = "topic-in";
32 | private final String topicOut = "topic-out";
33 | private final String schemaUrl = "http://localhost:8081";
34 | // http://localhost:8081/subjects/topic-in-value/versions/latest
35 | // only for TopicNameStrategy
36 | private final String mockedUrl = schemaUrl + "/subjects/" + topicIn + "-value/versions/latest";
37 | private TopologyTestDriver testDriver;
38 | private MockSchemaRegistryClient schemaRegistryClient;
39 | private Properties properties;
40 |
41 | @Before
42 | public void start() {
43 | properties = new Properties();
44 | properties.put(StreamsConfig.CLIENT_ID_CONFIG, "client-id-test-1");
45 | properties.put(StreamsConfig.APPLICATION_ID_CONFIG, "stream-id-test-5");
46 | properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:9922");
47 | properties.put(
48 | AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, "http://localhost:8081");
49 | properties.put(
50 | StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
51 | properties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, SpecificAvroSerde.class);
52 |
53 | schemaRegistryClient = new MockSchemaRegistryClient();
54 | }
55 |
56 | @After
57 | public void tearDown() {
58 | Optional.ofNullable(testDriver).ifPresent(TopologyTestDriver::close);
59 | testDriver = null;
60 | properties = null;
61 | }
62 |
63 | @Test
64 | public void testTopologyAvro_statelessProcessors() throws IOException, RestClientException {
65 |
66 | /** Arrange */
67 | // register schema in mock schema-registry -> not necessary
68 | // schemaRegistryClient.register(
69 | // new TopicNameStrategy().subjectName(topicIn, false, Person.SCHEMA$), Person.SCHEMA$);
70 | // create serde with config to be able to connect to mock schema registry
71 | // https://github.com/confluentinc/schema-registry/issues/877
72 | // Passing Schema Registry URL twice to instantiate KafkaAvroSerializer or Serde
73 | final SpecificAvroSerde serde = new SpecificAvroSerde<>(schemaRegistryClient);
74 |
75 | final Map schema =
76 | Collections.singletonMap(
77 | AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG,
78 | "wat-ever-url-anyway-it-is-mocked");
79 | serde.configure(schema, false);
80 | // get topology
81 | final Topology topology =
82 | StreamProcessingAvro.topologyUpperCase(topicIn, topicOut, serde);
83 | testDriver = new TopologyTestDriver(topology, properties);
84 |
85 | final ConsumerRecordFactory factory =
86 | new ConsumerRecordFactory<>(topicIn, new StringSerializer(), serde.serializer());
87 |
88 | final ConsumerRecord inRecord1 =
89 | factory.create(
90 | topicIn,
91 | "1",
92 | Person.newBuilder().setId("id-1").setName("nikita").setLastname("zhevnitskiy").build());
93 |
94 | final ConsumerRecord inRecord2 =
95 | factory.create(
96 | topicIn,
97 | "2",
98 | Person.newBuilder().setId("id-2").setName("vitaly").setLastname("moscow").build());
99 |
100 | /** Act */
101 | testDriver.pipeInput(Arrays.asList(inRecord1, inRecord2));
102 | final ProducerRecord outRecord1 =
103 | testDriver.readOutput(topicOut, new StringDeserializer(), serde.deserializer());
104 | final ProducerRecord outRecord2 =
105 | testDriver.readOutput(topicOut, new StringDeserializer(), serde.deserializer());
106 |
107 | /** Assert */
108 | assertEquals("ID-1", outRecord1.value().getId());
109 | assertEquals("ID-2", outRecord2.value().getId());
110 | assertEquals("moscow".toUpperCase(), outRecord2.value().getLastname());
111 | }
112 |
113 | @Test
114 | public void testTopologyAvro_statefulProcessors() throws IOException, RestClientException {
115 | /** Arrange */
116 | final String storeName = "same-name";
117 | // register schema in mock schema-registry -> Not - necessary
118 | // schemaRegistryClient.register(
119 | // new TopicNameStrategy().subjectName(topicIn, false, Person.SCHEMA$), Person.SCHEMA$);
120 | // create serde with config to be able to connect to mock schema registry
121 | final SpecificAvroSerde serde = new SpecificAvroSerde<>(schemaRegistryClient);
122 |
123 | final Map schema =
124 | Collections.singletonMap(
125 | AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG,
126 | "wat-ever-url-anyway-it-is-mocked");
127 | serde.configure(schema, false);
128 | // get topology
129 | final Topology topology =
130 | StreamProcessingAvro.topologyCountUsersWithSameName(topicIn, topicOut, serde, storeName);
131 | testDriver = new TopologyTestDriver(topology, properties);
132 |
133 | final ConsumerRecordFactory factory =
134 | new ConsumerRecordFactory<>(topicIn, new StringSerializer(), serde.serializer());
135 |
136 | final ConsumerRecord inRecord1 =
137 | factory.create(
138 | topicIn,
139 | "1",
140 | Person.newBuilder().setId("id-1").setName("nikita").setLastname("zhevnitskiy").build());
141 |
142 | final ConsumerRecord inRecord2 =
143 | factory.create(
144 | topicIn,
145 | "2",
146 | Person.newBuilder().setId("id-2").setName("nikita").setLastname("moscow").build());
147 |
148 | /** Act */
149 | testDriver.pipeInput(Arrays.asList(inRecord1, inRecord2));
150 | final KeyValueStore keyValueStore = testDriver.getKeyValueStore(storeName);
151 | final Long amountOfRecordWithSameName = keyValueStore.get("nikita");
152 |
153 | /** Assert */
154 | assertEquals(Long.valueOf(2), amountOfRecordWithSameName);
155 | }
156 | }
157 |
--------------------------------------------------------------------------------
/streams-client/src/test/java/no/sysco/testing/kafka/streams/topology/StreamProcessingLowLvlAvroTest.java:
--------------------------------------------------------------------------------
1 | package no.sysco.testing.kafka.streams.topology;
2 |
3 | import io.confluent.kafka.schemaregistry.client.MockSchemaRegistryClient;
4 | import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
5 | import io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig;
6 | import io.confluent.kafka.streams.serdes.avro.SpecificAvroSerde;
7 | import java.io.IOException;
8 | import java.util.Arrays;
9 | import java.util.Collections;
10 | import java.util.Map;
11 | import java.util.Optional;
12 | import java.util.Properties;
13 | import no.sysco.testing.kafka.streams.avro.Person;
14 | import org.apache.kafka.clients.consumer.ConsumerRecord;
15 | import org.apache.kafka.clients.producer.ProducerRecord;
16 | import org.apache.kafka.common.serialization.Serdes;
17 | import org.apache.kafka.common.serialization.StringDeserializer;
18 | import org.apache.kafka.common.serialization.StringSerializer;
19 | import org.apache.kafka.streams.StreamsConfig;
20 | import org.apache.kafka.streams.Topology;
21 | import org.apache.kafka.streams.TopologyTestDriver;
22 | import org.apache.kafka.streams.state.KeyValueStore;
23 | import org.apache.kafka.streams.test.ConsumerRecordFactory;
24 | import org.junit.After;
25 | import org.junit.Before;
26 | import org.junit.Test;
27 |
28 | import static org.junit.Assert.assertEquals;
29 | import static org.junit.Assert.assertNull;
30 |
31 | public class StreamProcessingLowLvlAvroTest {
32 | private final String topicIn = "topic-in";
33 | private final String topicOut = "topic-out";
34 | private TopologyTestDriver testDriver;
35 | private MockSchemaRegistryClient schemaRegistryClient;
36 | private Properties properties;
37 |
38 | @Before
39 | public void start() {
40 | properties = new Properties();
41 | properties.put(StreamsConfig.CLIENT_ID_CONFIG, "client-id-test-1");
42 | properties.put(StreamsConfig.APPLICATION_ID_CONFIG, "stream-id-test-5");
43 | properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:9922");
44 | properties.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, "http://whatever:4242");
45 | properties.put(
46 | StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
47 | properties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, SpecificAvroSerde.class);
48 |
49 | schemaRegistryClient = new MockSchemaRegistryClient();
50 | }
51 |
52 | @After
53 | public void tearDown() {
54 | Optional.ofNullable(testDriver).ifPresent(TopologyTestDriver::close);
55 | testDriver = null;
56 | properties = null;
57 | }
58 |
59 | @Test
60 | public void testTopologyLowLvlAvro_statefulProcessors() throws IOException, RestClientException {
61 | /** Arrange */
62 | final String storeName = "same-name";
63 |
64 | // create serde with config to be able to connect to mock schema registry
65 | final SpecificAvroSerde serde = new SpecificAvroSerde<>(schemaRegistryClient);
66 |
67 | final Map schema =
68 | Collections.singletonMap(
69 | AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG,
70 | "wat-ever-url-anyway-it-is-mocked");
71 | serde.configure(schema, false);
72 | // get topology
73 | final Topology topology =
74 | StreamProcessingLowLvlAvro.topologyDedupByUserId(topicIn, topicOut, serde, storeName);
75 | testDriver = new TopologyTestDriver(topology, properties);
76 |
77 | final ConsumerRecordFactory factory =
78 | new ConsumerRecordFactory<>(topicIn, new StringSerializer(), serde.serializer());
79 |
80 | final ConsumerRecord inRecord1 =
81 | factory.create(
82 | topicIn,
83 | "id-1",
84 | Person.newBuilder().setId("id-1").setName("nikita").setLastname("zhevnitskiy").build());
85 |
86 | /** Act */
87 | testDriver.pipeInput(Collections.singletonList(inRecord1));
88 | final KeyValueStore dedupStore = testDriver.getKeyValueStore(storeName);
89 | final Person person = dedupStore.get("id-1");
90 | final long l = dedupStore.approximateNumEntries();
91 | System.out.println("Here : " + l);
92 |
93 | /** Assert */
94 | assertEquals("id-1", person.getId());
95 | }
96 |
97 | @Test
98 | public void testTopologyLowLvlAvro_statefulProcessors_invalidInput()
99 | throws IOException, RestClientException {
100 | /** Arrange */
101 | final String storeName = "same-name";
102 |
103 | // create serde with config to be able to connect to mock schema registry
104 | final SpecificAvroSerde serde = new SpecificAvroSerde<>(schemaRegistryClient);
105 |
106 | final Map schema =
107 | Collections.singletonMap(
108 | AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG,
109 | "wat-ever-url-anyway-it-is-mocked");
110 | serde.configure(schema, false);
111 | // get topology
112 | final Topology topology =
113 | StreamProcessingLowLvlAvro.topologyDedupByUserId(topicIn, topicOut, serde, storeName);
114 | testDriver = new TopologyTestDriver(topology, properties);
115 |
116 | final ConsumerRecordFactory factory =
117 | new ConsumerRecordFactory<>(topicIn, new StringSerializer(), serde.serializer());
118 |
119 | final ConsumerRecord inRecord1 =
120 | factory.create(
121 | topicIn,
122 | "invalid-id",
123 | Person.newBuilder().setId("id-1").setName("yohoho").setLastname("pirate").build());
124 |
125 | final ConsumerRecord inRecord2 =
126 | factory.create(
127 | topicIn,
128 | "id-1",
129 | Person.newBuilder().setId("id-1").setName("nikita").setLastname("zhevnitskiy").build());
130 |
131 | final ConsumerRecord inRecord3 =
132 | factory.create(
133 | topicIn,
134 | "id-1",
135 | Person.newBuilder().setId("id-1").setName("42").setLastname("42").build());
136 |
137 | /** Act */
138 | testDriver.pipeInput(Arrays.asList(inRecord1, inRecord2, inRecord3));
139 | final KeyValueStore dedupStore = testDriver.getKeyValueStore(storeName);
140 | final Person person = dedupStore.get("id-1");
141 |
142 | final ProducerRecord outRecord1 =
143 | testDriver.readOutput(topicOut, new StringDeserializer(), serde.deserializer());
144 | final ProducerRecord outRecord2 =
145 | testDriver.readOutput(topicOut, new StringDeserializer(), serde.deserializer());
146 | final ProducerRecord outRecord3 =
147 | testDriver.readOutput(topicOut, new StringDeserializer(), serde.deserializer());
148 |
149 | /** Assert */
150 | assertEquals("nikita", outRecord1.value().getName());
151 | assertEquals("id-1", outRecord1.key());
152 | assertEquals("id-1", person.getId());
153 | assertNull(outRecord2);
154 | assertNull(outRecord3);
155 |
156 | assertEquals("zhevnitskiy", dedupStore.get("id-1").getLastname());
157 | assertNull(dedupStore.get("invalid-id"));
158 |
159 | assertEquals(1, dedupStore.approximateNumEntries());
160 | }
161 | }
162 |
--------------------------------------------------------------------------------
/streams-client/src/test/java/no/sysco/testing/kafka/streams/topology/StreamProcessingTest.java:
--------------------------------------------------------------------------------
1 | package no.sysco.testing.kafka.streams.topology;
2 |
3 | import java.util.Arrays;
4 | import java.util.Optional;
5 | import java.util.Properties;
6 | import org.apache.kafka.clients.consumer.ConsumerRecord;
7 | import org.apache.kafka.clients.producer.ProducerRecord;
8 | import org.apache.kafka.common.serialization.LongDeserializer;
9 | import org.apache.kafka.common.serialization.Serdes;
10 | import org.apache.kafka.common.serialization.StringDeserializer;
11 | import org.apache.kafka.common.serialization.StringSerializer;
12 | import org.apache.kafka.streams.StreamsConfig;
13 | import org.apache.kafka.streams.Topology;
14 | import org.apache.kafka.streams.TopologyTestDriver;
15 | import org.apache.kafka.streams.state.KeyValueStore;
16 | import org.apache.kafka.streams.test.ConsumerRecordFactory;
17 | import org.junit.After;
18 | import org.junit.Before;
19 | import org.junit.Test;
20 |
21 | import static org.junit.Assert.assertEquals;
22 | import static org.junit.Assert.assertNull;
23 |
24 | public class StreamProcessingTest {
25 | private final String topicIn = "topic-in";
26 | private final String topicOut = "topic-out";
27 | private TopologyTestDriver testDriver;
28 | private Properties properties;
29 |
30 | @Before
31 | public void start() {
32 | properties = new Properties();
33 | properties.put(StreamsConfig.APPLICATION_ID_CONFIG, "test");
34 | properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234");
35 | properties.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
36 | properties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
37 | }
38 |
39 | @After
40 | public void tearDown() {
41 | Optional.ofNullable(testDriver).ifPresent(TopologyTestDriver::close);
42 | testDriver = null;
43 | properties = null;
44 | }
45 |
46 | @Test
47 | public void testTopology_statelessProcessors_Uppercase() {
48 | // Arrange
49 | final Topology topology = StreamProcessing.topologyUpperCase(topicIn, topicOut);
50 | testDriver = new TopologyTestDriver(topology, properties);
51 | final ConsumerRecordFactory factory =
52 | new ConsumerRecordFactory<>(topicIn, new StringSerializer(), new StringSerializer());
53 | final ConsumerRecord record1 = factory.create(topicIn, null, "magic");
54 | final ConsumerRecord record2 = factory.create(topicIn, null, "cigma");
55 |
56 | // Act
57 | testDriver.pipeInput(Arrays.asList(record1, record2));
58 | final ProducerRecord outRecord1 =
59 | testDriver.readOutput(topicOut, new StringDeserializer(), new StringDeserializer());
60 | final ProducerRecord outRecord2 =
61 | testDriver.readOutput(topicOut, new StringDeserializer(), new StringDeserializer());
62 | final ProducerRecord outRecord3 =
63 | testDriver.readOutput(topicOut, new StringDeserializer(), new StringDeserializer());
64 |
65 | // Assert
66 | assertNull(outRecord3);
67 | assertEquals("magic".toUpperCase(), outRecord1.value());
68 | assertEquals("cigma".toUpperCase(), outRecord2.value());
69 | }
70 |
71 | @Test
72 | public void testTopology_statefullProcessors_Anagram() {
73 |
74 | // Arrange
75 | final String storeName = "count-storage";
76 | final Topology topology =
77 | StreamProcessing.topologyCountAnagram(topicIn, topicOut, storeName);
78 | // setup TopologyTestDriver
79 | testDriver = new TopologyTestDriver(topology, properties);
80 | final ConsumerRecordFactory factory =
81 | new ConsumerRecordFactory<>(topicIn, new StringSerializer(), new StringSerializer());
82 | final ConsumerRecord record1 = factory.create(topicIn, null, "magic");
83 | final ConsumerRecord record2 = factory.create(topicIn, null, "cigma");
84 |
85 | // Act
86 | testDriver.pipeInput(Arrays.asList(record1, record2));
87 | final ProducerRecord outRecord1 =
88 | testDriver.readOutput(topicOut, new StringDeserializer(), new LongDeserializer());
89 | final ProducerRecord outRecord2 =
90 | testDriver.readOutput(topicOut, new StringDeserializer(), new LongDeserializer());
91 | final ProducerRecord outRecord3 =
92 | testDriver.readOutput(topicOut, new StringDeserializer(), new LongDeserializer());
93 |
94 | // accessing storage
95 | final KeyValueStore keyValueStore = testDriver.getKeyValueStore(storeName);
96 | final Long amountOfRecordsInStorageByKey = keyValueStore.get("acgim");
97 |
98 | // Assert
99 | assertNull(outRecord3);
100 | assertEquals("acgim", outRecord1.key());
101 | assertEquals("acgim", outRecord2.key());
102 | assertEquals(Long.valueOf(1), outRecord1.value());
103 | assertEquals(Long.valueOf(2), outRecord2.value());
104 | assertEquals(Long.valueOf(2), amountOfRecordsInStorageByKey);
105 | }
106 | }
107 |
--------------------------------------------------------------------------------