├── .gitignore ├── .mvn └── wrapper │ ├── MavenWrapperDownloader.java │ ├── maven-wrapper.jar │ └── maven-wrapper.properties ├── .travis.yml ├── consumer-producer-clients ├── pom.xml ├── readme.md └── src │ ├── main │ └── resources │ │ └── avro │ │ └── event.avsc │ └── test │ ├── java │ └── no │ │ └── sysco │ │ └── testing │ │ └── kafka │ │ └── producerconsumer │ │ ├── AvroProducerConsumerIT.java │ │ └── SimpleProducerConsumerIT.java │ └── resources │ └── log4j.properties ├── data-pipeline ├── data_pipeline.png ├── database.json ├── docker-compose.yml ├── http-materializer │ ├── Dockerfile │ ├── pom.xml │ └── src │ │ ├── main │ │ ├── java │ │ │ └── no │ │ │ │ └── sysco │ │ │ │ └── testing │ │ │ │ └── kafka │ │ │ │ └── pipeline │ │ │ │ └── materializer │ │ │ │ ├── MaterializerApplication.java │ │ │ │ ├── MaterializerConfig.java │ │ │ │ ├── domain │ │ │ │ ├── MessageJsonRepresentation.java │ │ │ │ └── MessageRepresentationTransformer.java │ │ │ │ └── infrastructure │ │ │ │ ├── kafka │ │ │ │ └── KafkaMessageMaterializer.java │ │ │ │ └── service │ │ │ │ ├── DatabaseWebService.java │ │ │ │ ├── DatabaseWebServiceInMemmory.java │ │ │ │ └── DatabaseWebServiceRest.java │ │ └── resources │ │ │ ├── application.conf │ │ │ └── log4j.properties │ │ └── test │ │ └── java │ │ └── no │ │ └── sysco │ │ └── testing │ │ └── kafka │ │ └── pipeline │ │ └── materializer │ │ ├── ApplicationIT.java │ │ └── TestUtils.java ├── http-producer │ ├── Dockerfile │ ├── pom.xml │ └── src │ │ ├── main │ │ ├── java │ │ │ └── no │ │ │ │ └── sysco │ │ │ │ └── testing │ │ │ │ └── kafka │ │ │ │ └── pipeline │ │ │ │ └── producer │ │ │ │ ├── ApplicationHealthCheck.java │ │ │ │ ├── ProducerRestApplication.java │ │ │ │ ├── ProducerRestConfig.java │ │ │ │ ├── domain │ │ │ │ ├── MessageJsonRepresentation.java │ │ │ │ └── MessageRepresentationTransformer.java │ │ │ │ ├── infrastructure │ │ │ │ └── kafka │ │ │ │ │ └── KafkaMessageProducer.java │ │ │ │ └── interfaces │ │ │ │ └── rest │ │ │ │ └── MessageResources.java │ │ └── resources │ │ │ ├── config.yml │ │ │ └── log4j.properties │ │ └── test │ │ └── java │ │ └── no │ │ └── sysco │ │ └── testing │ │ └── kafka │ │ └── pipeline │ │ └── producer │ │ └── ProducerRestApplicationIT.java ├── pom.xml ├── readme.md └── schema │ ├── pom.xml │ └── src │ └── main │ └── resources │ └── avro │ └── message.avsc ├── docker-compose.yml ├── e2e ├── docker-compose.test.yml ├── pom.xml ├── readme.md └── src │ └── test │ ├── java │ └── no │ │ └── sysco │ │ └── testing │ │ └── kafka │ │ └── e2e │ │ ├── ContainersIT.java │ │ ├── ContainersTestBaseIT.java │ │ ├── DockerComposeIT.java │ │ └── representation │ │ └── MessageJsonRepresentation.java │ └── resources │ ├── json-server-database.json │ └── log4j.properties ├── embedded-cluster ├── pom.xml ├── readme.md └── src │ └── main │ └── java │ └── no │ └── sysco │ └── testing │ └── kafka │ └── embedded │ ├── EmbeddedSingleNodeKafkaCluster.java │ ├── KafkaEmbedded.java │ └── ZooKeeperEmbedded.java ├── mvnw ├── mvnw.cmd ├── pom.xml ├── readme.md └── streams-client ├── pom.xml ├── readme.md └── src ├── main ├── java │ └── no │ │ └── sysco │ │ └── testing │ │ └── kafka │ │ └── streams │ │ └── topology │ │ ├── StreamProcessing.java │ │ ├── StreamProcessingAvro.java │ │ └── StreamProcessingLowLvlAvro.java └── resources │ ├── avro │ └── person.avsc │ └── log4j.properties └── test └── java └── no └── sysco └── testing └── kafka └── streams └── topology ├── StreamProcessingAvroTest.java ├── StreamProcessingLowLvlAvroTest.java └── StreamProcessingTest.java /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | .idea 3 | *.iml 4 | dependency-reduced-pom.xml -------------------------------------------------------------------------------- /.mvn/wrapper/MavenWrapperDownloader.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2007-present the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | import java.util.Properties; 18 | 19 | public class MavenWrapperDownloader { 20 | 21 | private static final String WRAPPER_VERSION = "0.5.4"; 22 | /** Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. */ 23 | private static final String DEFAULT_DOWNLOAD_URL = 24 | "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/" 25 | + WRAPPER_VERSION 26 | + "/maven-wrapper-" 27 | + WRAPPER_VERSION 28 | + " .jar"; 29 | 30 | /** 31 | * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to use 32 | * instead of the default one. 33 | */ 34 | private static final String MAVEN_WRAPPER_PROPERTIES_PATH = 35 | ".mvn/wrapper/maven-wrapper.properties"; 36 | 37 | /** Path where the maven-wrapper.jar will be saved to. */ 38 | private static final String MAVEN_WRAPPER_JAR_PATH = ".mvn/wrapper/maven-wrapper.jar"; 39 | 40 | /** 41 | * Name of the property which should be used to override the default download url for the wrapper. 42 | */ 43 | private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; 44 | 45 | public static void main(String args[]) { 46 | System.out.println("- Downloader started"); 47 | File baseDirectory = new File(args[0]); 48 | System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); 49 | 50 | // If the maven-wrapper.properties exists, read it and check if it contains a custom 51 | // wrapperUrl parameter. 52 | File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); 53 | String url = DEFAULT_DOWNLOAD_URL; 54 | if (mavenWrapperPropertyFile.exists()) { 55 | FileInputStream mavenWrapperPropertyFileInputStream = null; 56 | try { 57 | mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); 58 | Properties mavenWrapperProperties = new Properties(); 59 | mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); 60 | url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); 61 | } catch (IOException e) { 62 | System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); 63 | } finally { 64 | try { 65 | if (mavenWrapperPropertyFileInputStream != null) { 66 | mavenWrapperPropertyFileInputStream.close(); 67 | } 68 | } catch (IOException e) { 69 | // Ignore ... 70 | } 71 | } 72 | } 73 | System.out.println("- Downloading from: " + url); 74 | 75 | File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); 76 | if (!outputFile.getParentFile().exists()) { 77 | if (!outputFile.getParentFile().mkdirs()) { 78 | System.out.println( 79 | "- ERROR creating output directory '" 80 | + outputFile.getParentFile().getAbsolutePath() 81 | + "'"); 82 | } 83 | } 84 | System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); 85 | try { 86 | downloadFileFromURL(url, outputFile); 87 | System.out.println("Done"); 88 | System.exit(0); 89 | } catch (Throwable e) { 90 | System.out.println("- Error downloading"); 91 | e.printStackTrace(); 92 | System.exit(1); 93 | } 94 | } 95 | 96 | private static void downloadFileFromURL(String urlString, File destination) throws Exception { 97 | if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) { 98 | String username = System.getenv("MVNW_USERNAME"); 99 | char[] password = System.getenv("MVNW_PASSWORD").toCharArray(); 100 | Authenticator.setDefault( 101 | new Authenticator() { 102 | @Override 103 | protected PasswordAuthentication getPasswordAuthentication() { 104 | return new PasswordAuthentication(username, password); 105 | } 106 | }); 107 | } 108 | URL website = new URL(urlString); 109 | ReadableByteChannel rbc; 110 | rbc = Channels.newChannel(website.openStream()); 111 | FileOutputStream fos = new FileOutputStream(destination); 112 | fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); 113 | fos.close(); 114 | rbc.close(); 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /.mvn/wrapper/maven-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sysco-middleware/kafka-testing/f263f8c92dad3b176e116dadc77a3263a0e9ec6b/.mvn/wrapper/maven-wrapper.jar -------------------------------------------------------------------------------- /.mvn/wrapper/maven-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.0/apache-maven-3.6.0-bin.zip 2 | wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.4/maven-wrapper-0.5.4.jar 3 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | cache: 3 | directories: 4 | - $HOME/.m2 5 | 6 | language: java 7 | 8 | jdk: 9 | - oraclejdk8 10 | dist: 11 | - trusty 12 | 13 | services: 14 | - docker 15 | 16 | script: 17 | - mvn clean verify -DskipIntegrationTests=false 18 | -------------------------------------------------------------------------------- /consumer-producer-clients/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | kafka 8 | no.sysco.testing 9 | 1.0-SNAPSHOT 10 | 11 | 12 | consumer-producer-clients 13 | 14 | 15 | 16 | 17 | org.apache.avro 18 | avro 19 | 20 | 21 | io.confluent 22 | kafka-avro-serializer 23 | 24 | 25 | org.slf4j 26 | slf4j-log4j12 27 | 28 | 29 | org.slf4j 30 | slf4j-api 31 | 32 | 33 | 34 | junit 35 | junit 36 | test 37 | 38 | 39 | no.sysco.testing 40 | embedded-cluster 41 | test 42 | 43 | 44 | org.awaitility 45 | awaitility 46 | test 47 | 48 | 49 | 50 | 51 | 52 | 53 | org.apache.avro 54 | avro-maven-plugin 55 | 56 | 57 | generate-sources 58 | 59 | schema 60 | 61 | 62 | 63 | 64 | ${project.basedir}/src/main/resources/avro 65 | String 66 | 67 | 68 | 69 | 70 | org.apache.maven.plugins 71 | maven-failsafe-plugin 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /consumer-producer-clients/readme.md: -------------------------------------------------------------------------------- 1 | # Description 2 | Module contains examples of testing Kafka Producer/Consumer APIs with Kafka cluster in memory. 3 | Testing kafka-client application(s) in isolation. 4 | 5 | `NB!`: Some of client properties have specific requirements about how cluster should be setup. 6 | Example: [processing.guarantee:exactly_once](https://kafka.apache.org/22/documentation/streams/developer-guide/config-streams.html#processing-guarantee) 7 | 8 | ## TODO 9 | Module contains examples of testing Kafka Consumer/Producer APIs with: 10 | - [X] embedded kafka + producer/consumer 11 | - [X] embedded kafka + producer/consumer + avro 12 | - [X] cleanup test 13 | 14 | ## Related Issues 15 | * [issue-#26](https://github.com/confluentinc/kafka-streams-examples/issues/26) 16 | 17 | ## Additional Info 18 | * [scalatest-embedded-kafka](https://github.com/manub/scalatest-embedded-kafka) 19 | * [kafka-it-embedded](https://github.com/asmaier/mini-kafka/blob/master/src/test/java/de/am/KafkaProducerIT.java) 20 | * [embedded-single-node-kafka-cluster](https://www.confluent.io/blog/stream-processing-part-2-testing-your-streaming-application) 21 | -------------------------------------------------------------------------------- /consumer-producer-clients/src/main/resources/avro/event.avsc: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": "no.sysco.testing.kafka.producerconsumer", 3 | "type": "record", 4 | "name": "Event", 5 | "fields": [ 6 | {"name": "id", "type": "string"}, 7 | {"name": "type", "type": "string"}, 8 | {"name": "context", "type": "string"} 9 | ] 10 | } -------------------------------------------------------------------------------- /consumer-producer-clients/src/test/java/no/sysco/testing/kafka/producerconsumer/AvroProducerConsumerIT.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.producerconsumer; 2 | 3 | import io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig; 4 | import io.confluent.kafka.serializers.KafkaAvroDeserializer; 5 | import io.confluent.kafka.serializers.KafkaAvroSerializer; 6 | import java.time.Duration; 7 | import java.util.ArrayList; 8 | import java.util.Collections; 9 | import java.util.Properties; 10 | import java.util.UUID; 11 | import java.util.concurrent.ExecutionException; 12 | import java.util.concurrent.TimeUnit; 13 | import java.util.concurrent.TimeoutException; 14 | import no.sysco.testing.kafka.embedded.EmbeddedSingleNodeKafkaCluster; 15 | import org.apache.kafka.clients.consumer.ConsumerConfig; 16 | import org.apache.kafka.clients.consumer.ConsumerRecord; 17 | import org.apache.kafka.clients.consumer.ConsumerRecords; 18 | import org.apache.kafka.clients.consumer.KafkaConsumer; 19 | import org.apache.kafka.clients.producer.KafkaProducer; 20 | import org.apache.kafka.clients.producer.ProducerConfig; 21 | import org.apache.kafka.clients.producer.ProducerRecord; 22 | import org.apache.kafka.clients.producer.RecordMetadata; 23 | import org.apache.kafka.common.serialization.StringDeserializer; 24 | import org.apache.kafka.common.serialization.StringSerializer; 25 | import org.junit.ClassRule; 26 | import org.junit.Test; 27 | 28 | import static org.awaitility.Awaitility.await; 29 | import static org.junit.Assert.assertEquals; 30 | import static org.junit.Assert.assertTrue; 31 | import static org.junit.Assert.fail; 32 | 33 | public class AvroProducerConsumerIT { 34 | 35 | @ClassRule 36 | public static final EmbeddedSingleNodeKafkaCluster CLUSTER = new EmbeddedSingleNodeKafkaCluster(); 37 | 38 | @Test 39 | public void clusterIsRunning() { 40 | assertTrue(CLUSTER.isRunning()); 41 | } 42 | 43 | @Test 44 | public void testAvroProducer() throws InterruptedException, ExecutionException, TimeoutException { 45 | 46 | String topic = "topic1"; 47 | CLUSTER.createTopic(topic); 48 | 49 | final KafkaProducer producer = new KafkaProducer<>(getProducerProperties()); 50 | // async with callback 51 | producer.send( 52 | new ProducerRecord<>( 53 | topic, 54 | "id-1", 55 | Event.newBuilder().setId("id-1").setType("type-1").setContext("context-1").build()), 56 | ((metadata, exception) -> { 57 | if (exception == null) { 58 | System.out.println("SUCCESS bro"); 59 | } else { 60 | fail(); 61 | } 62 | })); 63 | 64 | // sync 65 | final RecordMetadata recordMetadata = 66 | producer 67 | .send( 68 | new ProducerRecord<>( 69 | topic, 70 | "id-2", 71 | Event.newBuilder() 72 | .setId("id-2") 73 | .setType("type-2") 74 | .setContext("context-2") 75 | .build())) 76 | .get(3, TimeUnit.SECONDS); 77 | 78 | assertTrue(recordMetadata.hasOffset()); 79 | assertTrue(recordMetadata.hasTimestamp()); 80 | } 81 | 82 | @Test 83 | public void testAvroConsumer() throws InterruptedException, ExecutionException, TimeoutException { 84 | 85 | String topic = "topic2"; 86 | CLUSTER.createTopic(topic); 87 | 88 | final KafkaProducer producer = new KafkaProducer<>(getProducerProperties()); 89 | producer 90 | .send( 91 | new ProducerRecord<>( 92 | topic, 93 | "id-3", 94 | Event.newBuilder().setId("id-3").setType("type-3").setContext("context-3").build())) 95 | .get(1, TimeUnit.SECONDS); 96 | 97 | final KafkaConsumer consumer = new KafkaConsumer<>(getConsumerProperties()); 98 | consumer.subscribe(Collections.singletonList(topic)); 99 | 100 | final ArrayList events = new ArrayList<>(); 101 | 102 | await() 103 | .atMost(15, TimeUnit.SECONDS) 104 | .untilAsserted( 105 | () -> { 106 | final ConsumerRecords records = consumer.poll(Duration.ofSeconds(1)); 107 | for (final ConsumerRecord record : records) events.add(record.value()); 108 | assertEquals(1, events.size()); 109 | }); 110 | } 111 | 112 | private Properties getProducerProperties() { 113 | final Properties properties = new Properties(); 114 | properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); 115 | properties.put(ProducerConfig.CLIENT_ID_CONFIG, UUID.randomUUID().toString()); 116 | properties.put( 117 | AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, CLUSTER.schemaRegistryUrl()); 118 | properties.put(ProducerConfig.ACKS_CONFIG, "all"); 119 | properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); 120 | properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaAvroSerializer.class); 121 | return properties; 122 | } 123 | 124 | private Properties getConsumerProperties() { 125 | final Properties properties = new Properties(); 126 | properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); 127 | properties.put(ConsumerConfig.CLIENT_ID_CONFIG, "client0"); 128 | properties.put(ConsumerConfig.GROUP_ID_CONFIG, "group0"); 129 | properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); 130 | properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class); 131 | properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); 132 | properties.put( 133 | AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, CLUSTER.schemaRegistryUrl()); 134 | return properties; 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /consumer-producer-clients/src/test/java/no/sysco/testing/kafka/producerconsumer/SimpleProducerConsumerIT.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.producerconsumer; 2 | 3 | import java.time.Duration; 4 | import java.util.ArrayList; 5 | import java.util.Collections; 6 | import java.util.Properties; 7 | import java.util.UUID; 8 | import java.util.concurrent.ExecutionException; 9 | import java.util.concurrent.TimeUnit; 10 | import java.util.concurrent.TimeoutException; 11 | import no.sysco.testing.kafka.embedded.EmbeddedSingleNodeKafkaCluster; 12 | import org.apache.kafka.clients.consumer.ConsumerConfig; 13 | import org.apache.kafka.clients.consumer.ConsumerRecord; 14 | import org.apache.kafka.clients.consumer.ConsumerRecords; 15 | import org.apache.kafka.clients.consumer.KafkaConsumer; 16 | import org.apache.kafka.clients.producer.KafkaProducer; 17 | import org.apache.kafka.clients.producer.ProducerConfig; 18 | import org.apache.kafka.clients.producer.ProducerRecord; 19 | import org.apache.kafka.clients.producer.RecordMetadata; 20 | import org.apache.kafka.common.serialization.StringDeserializer; 21 | import org.apache.kafka.common.serialization.StringSerializer; 22 | import org.junit.ClassRule; 23 | import org.junit.Test; 24 | 25 | import static org.awaitility.Awaitility.await; 26 | import static org.junit.Assert.assertEquals; 27 | import static org.junit.Assert.assertTrue; 28 | import static org.junit.Assert.fail; 29 | 30 | public class SimpleProducerConsumerIT { 31 | 32 | @ClassRule 33 | public static final EmbeddedSingleNodeKafkaCluster CLUSTER = new EmbeddedSingleNodeKafkaCluster(); 34 | 35 | @Test 36 | public void clusterIsRunning() { 37 | assertTrue(CLUSTER.isRunning()); 38 | } 39 | 40 | @Test 41 | public void testSimpleProducer() 42 | throws InterruptedException, ExecutionException, TimeoutException { 43 | String topic = "topic1"; 44 | CLUSTER.createTopic(topic); 45 | 46 | final KafkaProducer producer = new KafkaProducer<>(getProducerProperties()); 47 | 48 | // async with callback 49 | producer.send( 50 | new ProducerRecord<>(topic, "k1", "v1"), 51 | ((metadata, exception) -> { 52 | if (exception != null) fail(); 53 | })); 54 | 55 | // sync 56 | final RecordMetadata recordMetadata = 57 | producer.send(new ProducerRecord<>(topic, "k2", "v2")).get(3, TimeUnit.SECONDS); 58 | 59 | assertTrue(recordMetadata.hasOffset()); 60 | assertTrue(recordMetadata.hasTimestamp()); 61 | } 62 | 63 | @Test 64 | public void testSimpleConsumer() 65 | throws InterruptedException, ExecutionException, TimeoutException { 66 | 67 | String topic = "topic2"; 68 | CLUSTER.createTopic(topic); 69 | 70 | final KafkaProducer producer = new KafkaProducer<>(getProducerProperties()); 71 | producer.send(new ProducerRecord<>(topic, "k3", "v3")).get(1, TimeUnit.SECONDS); 72 | 73 | final KafkaConsumer consumer = new KafkaConsumer<>(getConsumerProperties()); 74 | consumer.subscribe(Collections.singletonList(topic)); 75 | 76 | final ArrayList values = new ArrayList<>(); 77 | await() 78 | .atMost(15, TimeUnit.SECONDS) 79 | .untilAsserted( 80 | () -> { 81 | final ConsumerRecords records = consumer.poll(Duration.ofSeconds(1)); 82 | for (final ConsumerRecord record : records) 83 | values.add(record.value()); 84 | assertEquals(1, values.size()); 85 | }); 86 | } 87 | 88 | private Properties getProducerProperties() { 89 | final Properties properties = new Properties(); 90 | properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); 91 | properties.put(ProducerConfig.CLIENT_ID_CONFIG, UUID.randomUUID().toString()); 92 | properties.put(ProducerConfig.ACKS_CONFIG, "all"); 93 | properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); 94 | properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); 95 | return properties; 96 | } 97 | 98 | private Properties getConsumerProperties() { 99 | final Properties properties = new Properties(); 100 | properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); 101 | properties.put(ConsumerConfig.CLIENT_ID_CONFIG, "client0"); 102 | properties.put(ConsumerConfig.GROUP_ID_CONFIG, "group0"); 103 | properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); 104 | properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); 105 | properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); 106 | return properties; 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /consumer-producer-clients/src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | log4j.rootLogger=INFO, stdout 2 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 3 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 4 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p [%t] %m (%c)%n 5 | -------------------------------------------------------------------------------- /data-pipeline/data_pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sysco-middleware/kafka-testing/f263f8c92dad3b176e116dadc77a3263a0e9ec6b/data-pipeline/data_pipeline.png -------------------------------------------------------------------------------- /data-pipeline/database.json: -------------------------------------------------------------------------------- 1 | { 2 | "messages": [ 3 | { 4 | "id": "1", 5 | "from": "user-id-1", 6 | "to": "user-id-2", 7 | "text": "hello world" 8 | } 9 | ] 10 | } -------------------------------------------------------------------------------- /data-pipeline/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zookeeper: 4 | image: confluentinc/cp-zookeeper:5.1.1 5 | ports: 6 | - 2181:2181 7 | environment: 8 | ZOOKEEPER_CLIENT_PORT: 2181 9 | ZOOKEEPER_TICK_TIME: 2000 10 | kafka: 11 | image: confluentinc/cp-kafka:5.1.1 12 | ports: 13 | - 9092:9092 14 | - 29092:29092 15 | environment: 16 | KAFKA_BROKER_ID: 1 17 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 18 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT 19 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092 20 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 21 | KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 22 | depends_on: 23 | - zookeeper 24 | 25 | schema-registry: 26 | image: confluentinc/cp-schema-registry:5.1.1 27 | ports: 28 | - 8081:8081 29 | environment: 30 | SCHEMA_REGISTRY_HOST_NAME: localhost 31 | SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 32 | SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka:9092 33 | depends_on: 34 | - kafka 35 | 36 | # DATA PIPELINE SERVICES 37 | 38 | # https://github.com/clue/docker-json-server 39 | db-mock: 40 | image: zhenik/json-server 41 | environment: 42 | ID_MAP: id 43 | ports: 44 | - "3000:80" 45 | volumes: 46 | - ./database.json:/data/db.json 47 | 48 | http-producer: 49 | image: zhenik/http-producer:data-pipeline 50 | build: http-producer 51 | environment: 52 | APPLICATION_PORT: 8080 53 | ADMIN_PORT: 8081 54 | KAFKA_BOOTSTRAP_SERVERS: kafka:9092 55 | SCHEMA_REGISTRY_URL: http://schema-registry:8081 56 | SINK_TOPIC: events-message-v1 57 | ports: 58 | - "8080:8080" 59 | depends_on: 60 | - zookeeper 61 | - kafka 62 | - schema-registry 63 | 64 | http-materializer: 65 | image: zhenik/http-materializer:data-pipeline 66 | environment: 67 | KAFKA_BOOTSTRAP_SERVERS: kafka:9092 68 | SCHEMA_REGISTRY_URL: http://schema-registry:8081 69 | SOURCE_TOPIC: events-message-v1 70 | DATABASE_REST_SERVICE_URL: http://db-mock:80/messages 71 | restart: always 72 | depends_on: 73 | - zookeeper 74 | - kafka 75 | - db-mock 76 | - schema-registry -------------------------------------------------------------------------------- /data-pipeline/http-materializer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:8 2 | COPY ./target/http-materializer-1.0-SNAPSHOT.jar . 3 | CMD java -jar http-materializer-1.0-SNAPSHOT.jar -------------------------------------------------------------------------------- /data-pipeline/http-materializer/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | data-pipeline 8 | no.sysco.testing 9 | 1.0-SNAPSHOT 10 | 11 | 12 | http-materializer 13 | 14 | 15 | 16 | 17 | no.sysco.testing 18 | schema 19 | 20 | 21 | 22 | com.typesafe 23 | config 24 | 25 | 26 | 27 | com.squareup.okhttp3 28 | okhttp 29 | 3.14.0 30 | 31 | 32 | 33 | org.apache.kafka 34 | kafka-streams 35 | 36 | 37 | io.confluent 38 | kafka-streams-avro-serde 39 | 40 | 41 | com.fasterxml.jackson.core 42 | jackson-databind 43 | ${jackson.version} 44 | 45 | 46 | 47 | junit 48 | junit 49 | test 50 | 51 | 52 | no.sysco.testing 53 | embedded-cluster 54 | test 55 | 56 | 57 | 58 | org.slf4j 59 | slf4j-log4j12 60 | 61 | 62 | org.slf4j 63 | slf4j-api 64 | 65 | 66 | com.github.tomakehurst 67 | wiremock-jre8 68 | test 69 | 70 | 71 | org.awaitility 72 | awaitility 73 | test 74 | 75 | 76 | 77 | 78 | 79 | 80 | org.apache.maven.plugins 81 | maven-shade-plugin 82 | 83 | 84 | package 85 | 86 | shade 87 | 88 | 89 | 90 | 92 | 94 | reference.conf 95 | 96 | 98 | no.sysco.testing.kafka.pipeline.materializer.MaterializerApplication 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | org.apache.maven.plugins 109 | maven-failsafe-plugin 110 | 111 | 112 | 113 | -------------------------------------------------------------------------------- /data-pipeline/http-materializer/src/main/java/no/sysco/testing/kafka/pipeline/materializer/MaterializerApplication.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.pipeline.materializer; 2 | 3 | import com.typesafe.config.Config; 4 | import com.typesafe.config.ConfigFactory; 5 | 6 | import no.sysco.testing.kafka.pipeline.materializer.domain.MessageRepresentationTransformer; 7 | import no.sysco.testing.kafka.pipeline.materializer.infrastructure.kafka.KafkaMessageMaterializer; 8 | import no.sysco.testing.kafka.pipeline.materializer.infrastructure.service.DatabaseWebService; 9 | import no.sysco.testing.kafka.pipeline.materializer.infrastructure.service.DatabaseWebServiceInMemmory; 10 | import no.sysco.testing.kafka.pipeline.materializer.infrastructure.service.DatabaseWebServiceRest; 11 | 12 | public class MaterializerApplication { 13 | 14 | private final DatabaseWebService dbWebService; 15 | private final MessageRepresentationTransformer transformer; 16 | private final KafkaMessageMaterializer kafkaMessageMaterializer; 17 | 18 | public MaterializerApplication(MaterializerConfig materializerConfig, MessageRepresentationTransformer transformer, boolean inMemory) { 19 | this.dbWebService = 20 | inMemory 21 | ? new DatabaseWebServiceInMemmory() 22 | : new DatabaseWebServiceRest(materializerConfig); 23 | this.transformer = new MessageRepresentationTransformer(); 24 | this.kafkaMessageMaterializer = 25 | new KafkaMessageMaterializer(materializerConfig, dbWebService, transformer); 26 | } 27 | 28 | public static void main(String[] args) { 29 | // typesafe conf load 30 | final Config config = ConfigFactory.load(); 31 | final MaterializerConfig materializerConfig = MaterializerConfig.loadConfig(config); 32 | final MessageRepresentationTransformer messageRepresentationTransformer = 33 | new MessageRepresentationTransformer(); 34 | 35 | final MaterializerApplication materializerApplication = 36 | new MaterializerApplication(materializerConfig,messageRepresentationTransformer, false); 37 | 38 | materializerApplication.start(); 39 | } 40 | 41 | public void stop() { 42 | kafkaMessageMaterializer.stop(); 43 | } 44 | 45 | public void start() { 46 | kafkaMessageMaterializer.run(); 47 | Runtime.getRuntime().addShutdownHook(new Thread(kafkaMessageMaterializer::stop)); 48 | } 49 | 50 | // for test 51 | DatabaseWebService getDbWebService() { 52 | return dbWebService; 53 | } 54 | 55 | KafkaMessageMaterializer getKafkaMessageMaterializer() { 56 | return kafkaMessageMaterializer; 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /data-pipeline/http-materializer/src/main/java/no/sysco/testing/kafka/pipeline/materializer/MaterializerConfig.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.pipeline.materializer; 2 | 3 | import com.typesafe.config.Config; 4 | import org.slf4j.Logger; 5 | import org.slf4j.LoggerFactory; 6 | 7 | public class MaterializerConfig { 8 | private static Logger log = LoggerFactory.getLogger(MaterializerConfig.class.getName()); 9 | 10 | public final String name; 11 | public final KafkaConfig kafkaConfig; 12 | public final DatabaseRestServiceConfig databaseRestServiceConfig; 13 | 14 | // leave default, to make it testable 15 | MaterializerConfig( 16 | String name, KafkaConfig kafkaConfig, DatabaseRestServiceConfig databaseRestServiceConfig) { 17 | this.name = name; 18 | this.kafkaConfig = kafkaConfig; 19 | this.databaseRestServiceConfig = databaseRestServiceConfig; 20 | } 21 | 22 | public static MaterializerConfig loadConfig(Config config) { 23 | final String name = config.getString("materializer.name"); 24 | 25 | final String bootstrapServers = config.getString("materializer.kafka.bootstrap-servers"); 26 | final String schemaRegistryUrl = config.getString("materializer.kafka.schema-registry-url"); 27 | final String sourceTopic = config.getString("materializer.kafka.source-topic"); 28 | 29 | final String dbServiceUrl = config.getString("materializer.database-rest-service.url"); 30 | 31 | final MaterializerConfig materializerConfig = 32 | new MaterializerConfig( 33 | name, 34 | new KafkaConfig(bootstrapServers, schemaRegistryUrl, sourceTopic), 35 | new DatabaseRestServiceConfig(dbServiceUrl)); 36 | 37 | log.info(materializerConfig.toString()); 38 | 39 | return materializerConfig; 40 | } 41 | 42 | @Override 43 | public String toString() { 44 | return "MaterializerConfig{" 45 | + "name='" 46 | + name 47 | + '\'' 48 | + ", kafkaConfig=" 49 | + kafkaConfig 50 | + ", databaseRestServiceConfig=" 51 | + databaseRestServiceConfig 52 | + '}'; 53 | } 54 | 55 | public static final class KafkaConfig { 56 | public final String bootstrapServers; 57 | public final String schemaRegistryUrl; 58 | public final String sourceTopic; 59 | 60 | KafkaConfig(String bootstrapServers, String schemaRegistryUrl, String sourceTopic) { 61 | this.bootstrapServers = bootstrapServers; 62 | this.schemaRegistryUrl = schemaRegistryUrl; 63 | this.sourceTopic = sourceTopic; 64 | } 65 | 66 | @Override 67 | public String toString() { 68 | return "KafkaConfig{" 69 | + "bootstrapServers='" 70 | + bootstrapServers 71 | + '\'' 72 | + ", schemaRegistryUrl='" 73 | + schemaRegistryUrl 74 | + '\'' 75 | + ", sourceTopic='" 76 | + sourceTopic 77 | + '\'' 78 | + '}'; 79 | } 80 | } 81 | 82 | public static final class DatabaseRestServiceConfig { 83 | public final String url; 84 | 85 | DatabaseRestServiceConfig(String url) { 86 | this.url = url; 87 | } 88 | 89 | @Override 90 | public String toString() { 91 | return "DatabaseRestServiceConfig{" + "url='" + url + '\'' + '}'; 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /data-pipeline/http-materializer/src/main/java/no/sysco/testing/kafka/pipeline/materializer/domain/MessageJsonRepresentation.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.pipeline.materializer.domain; 2 | 3 | public class MessageJsonRepresentation { 4 | 5 | private String id; 6 | private String from; 7 | private String to; 8 | private String text; 9 | 10 | public MessageJsonRepresentation() {} 11 | 12 | public MessageJsonRepresentation(String id, String from, String to, String text) { 13 | this.id = id; 14 | this.from = from; 15 | this.to = to; 16 | this.text = text; 17 | } 18 | 19 | public String getId() { 20 | return id; 21 | } 22 | 23 | public void setId(String id) { 24 | this.id = id; 25 | } 26 | 27 | public String getFrom() { 28 | return from; 29 | } 30 | 31 | public void setFrom(String from) { 32 | this.from = from; 33 | } 34 | 35 | public String getTo() { 36 | return to; 37 | } 38 | 39 | public void setTo(String to) { 40 | this.to = to; 41 | } 42 | 43 | public String getText() { 44 | return text; 45 | } 46 | 47 | public void setText(String text) { 48 | this.text = text; 49 | } 50 | 51 | @Override 52 | public String toString() { 53 | return "MessageJsonRepresentation{" 54 | + "id='" 55 | + id 56 | + '\'' 57 | + ", from='" 58 | + from 59 | + '\'' 60 | + ", to='" 61 | + to 62 | + '\'' 63 | + ", text='" 64 | + text 65 | + '\'' 66 | + '}'; 67 | } 68 | 69 | public String json() { 70 | return "{" 71 | + "\"id\":" 72 | + "\"" 73 | + id 74 | + "\"," 75 | + "\"from\":" 76 | + "\"" 77 | + from 78 | + "\"," 79 | + "\"to\":" 80 | + "\"" 81 | + to 82 | + "\"," 83 | + "\"text\":" 84 | + "\"" 85 | + text 86 | + "\"" 87 | + "}"; 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /data-pipeline/http-materializer/src/main/java/no/sysco/testing/kafka/pipeline/materializer/domain/MessageRepresentationTransformer.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.pipeline.materializer.domain; 2 | 3 | import no.sysco.testing.kafka.pipeline.avro.Message; 4 | 5 | public final class MessageRepresentationTransformer { 6 | public MessageJsonRepresentation transform(final Message message) { 7 | return new MessageJsonRepresentation( 8 | message.getId(), message.getFrom(), message.getTo(), message.getText()); 9 | } 10 | 11 | public Message transform(final MessageJsonRepresentation messageJsonRepresentation) { 12 | return Message.newBuilder() 13 | .setId(messageJsonRepresentation.getId()) 14 | .setFrom(messageJsonRepresentation.getFrom()) 15 | .setTo(messageJsonRepresentation.getTo()) 16 | .setText(messageJsonRepresentation.getText()) 17 | .build(); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /data-pipeline/http-materializer/src/main/java/no/sysco/testing/kafka/pipeline/materializer/infrastructure/kafka/KafkaMessageMaterializer.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.pipeline.materializer.infrastructure.kafka; 2 | 3 | import io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig; 4 | import io.confluent.kafka.streams.serdes.avro.SpecificAvroSerde; 5 | import java.util.Collections; 6 | import java.util.Map; 7 | import java.util.Optional; 8 | import java.util.Properties; 9 | 10 | import no.sysco.testing.kafka.pipeline.avro.Message; 11 | import no.sysco.testing.kafka.pipeline.materializer.MaterializerConfig; 12 | import no.sysco.testing.kafka.pipeline.materializer.domain.MessageRepresentationTransformer; 13 | import no.sysco.testing.kafka.pipeline.materializer.infrastructure.service.DatabaseWebService; 14 | import org.apache.kafka.common.serialization.Serde; 15 | import org.apache.kafka.common.serialization.Serdes; 16 | import org.apache.kafka.streams.KafkaStreams; 17 | import org.apache.kafka.streams.StreamsBuilder; 18 | import org.apache.kafka.streams.StreamsConfig; 19 | import org.apache.kafka.streams.Topology; 20 | import org.apache.kafka.streams.kstream.Consumed; 21 | import org.slf4j.Logger; 22 | import org.slf4j.LoggerFactory; 23 | 24 | public class KafkaMessageMaterializer implements Runnable { 25 | private static final Logger log = LoggerFactory.getLogger(KafkaMessageMaterializer.class.getName()); 26 | 27 | private final MaterializerConfig config; 28 | private final String sourceTopic; 29 | private final DatabaseWebService databaseWebService; 30 | private final KafkaStreams kafkaStreams; 31 | private final MessageRepresentationTransformer transformer; 32 | 33 | public KafkaMessageMaterializer( 34 | final MaterializerConfig config, 35 | final DatabaseWebService databaseWebService, 36 | final MessageRepresentationTransformer transformer) { 37 | 38 | this.config = config; 39 | this.databaseWebService = databaseWebService; 40 | this.transformer = transformer; 41 | this.sourceTopic = config.kafkaConfig.sourceTopic; 42 | 43 | final Properties properties = new Properties(); 44 | properties.put(StreamsConfig.APPLICATION_ID_CONFIG, config.name + "steam-processing-v1"); 45 | properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, config.kafkaConfig.bootstrapServers); 46 | properties.put( 47 | AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, 48 | config.kafkaConfig.schemaRegistryUrl); 49 | properties.put( 50 | StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); 51 | properties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, SpecificAvroSerde.class); 52 | this.kafkaStreams = new KafkaStreams(topology(), properties); 53 | kafkaStreams.setUncaughtExceptionHandler((Thread t, Throwable e) -> 54 | log.error(e.getMessage()) 55 | ); 56 | } 57 | 58 | static Topology topology( 59 | final String sourceTopic, 60 | final Serde messageSerdeValue, 61 | final DatabaseWebService databaseWebService, 62 | final MessageRepresentationTransformer transformer) { 63 | final StreamsBuilder builder = new StreamsBuilder(); 64 | 65 | builder.stream(sourceTopic, Consumed.with(Serdes.String(), messageSerdeValue)) 66 | .mapValues(transformer::transform) 67 | .foreach((id, message) -> databaseWebService.saveMessage(message)); 68 | 69 | return builder.build(); 70 | } 71 | 72 | private Topology topology() { 73 | final Map schema = 74 | Collections.singletonMap( 75 | AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, 76 | config.kafkaConfig.schemaRegistryUrl); 77 | final SpecificAvroSerde messageSerde = new SpecificAvroSerde<>(); 78 | messageSerde.configure(schema, false); 79 | return topology(sourceTopic, messageSerde, databaseWebService, transformer); 80 | } 81 | 82 | @Override 83 | public void run() { 84 | kafkaStreams.start(); 85 | } 86 | 87 | public void stop() { 88 | Optional.ofNullable(kafkaStreams).ifPresent(KafkaStreams::close); 89 | } 90 | 91 | public KafkaStreams.State getState() { 92 | return kafkaStreams.state(); 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /data-pipeline/http-materializer/src/main/java/no/sysco/testing/kafka/pipeline/materializer/infrastructure/service/DatabaseWebService.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.pipeline.materializer.infrastructure.service; 2 | 3 | import java.util.List; 4 | import no.sysco.testing.kafka.pipeline.materializer.domain.MessageJsonRepresentation; 5 | 6 | public interface DatabaseWebService { 7 | void saveMessage(MessageJsonRepresentation message); 8 | 9 | List getMessages(); 10 | } 11 | -------------------------------------------------------------------------------- /data-pipeline/http-materializer/src/main/java/no/sysco/testing/kafka/pipeline/materializer/infrastructure/service/DatabaseWebServiceInMemmory.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.pipeline.materializer.infrastructure.service; 2 | 3 | import java.util.ArrayList; 4 | import java.util.List; 5 | import no.sysco.testing.kafka.pipeline.materializer.domain.MessageJsonRepresentation; 6 | import org.slf4j.Logger; 7 | import org.slf4j.LoggerFactory; 8 | 9 | public class DatabaseWebServiceInMemmory implements DatabaseWebService { 10 | private static final Logger log = LoggerFactory.getLogger(DatabaseWebServiceInMemmory.class.getName()); 11 | private List messages; 12 | 13 | public DatabaseWebServiceInMemmory() { 14 | this.messages = new ArrayList<>(); 15 | } 16 | 17 | @Override 18 | public void saveMessage(MessageJsonRepresentation message) { 19 | messages.add(message); 20 | log.info("Message added:" + message); 21 | } 22 | 23 | @Override 24 | public List getMessages() { 25 | return messages; 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /data-pipeline/http-materializer/src/main/java/no/sysco/testing/kafka/pipeline/materializer/infrastructure/service/DatabaseWebServiceRest.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.pipeline.materializer.infrastructure.service; 2 | 3 | import java.io.IOException; 4 | import java.util.List; 5 | 6 | import no.sysco.testing.kafka.pipeline.materializer.MaterializerConfig; 7 | import no.sysco.testing.kafka.pipeline.materializer.domain.MessageJsonRepresentation; 8 | import okhttp3.MediaType; 9 | import okhttp3.OkHttpClient; 10 | import okhttp3.Request; 11 | import okhttp3.RequestBody; 12 | import okhttp3.Response; 13 | import org.slf4j.Logger; 14 | import org.slf4j.LoggerFactory; 15 | 16 | public class DatabaseWebServiceRest implements DatabaseWebService { 17 | private static final Logger log = LoggerFactory.getLogger(DatabaseWebServiceRest.class.getName()); 18 | private final String url; 19 | private final OkHttpClient client; 20 | private final MediaType JSON = MediaType.get("application/json; charset=utf-8"); 21 | 22 | public DatabaseWebServiceRest(final MaterializerConfig applicationConfig) { 23 | this.url = applicationConfig.databaseRestServiceConfig.url; 24 | this.client = new OkHttpClient(); 25 | log.info("Database REST service created: {}", url); 26 | } 27 | 28 | @Override 29 | public void saveMessage(final MessageJsonRepresentation message) { 30 | RequestBody body = RequestBody.create(JSON, message.json()); 31 | Request request = 32 | new Request.Builder() 33 | .addHeader("Accept", "application/json; charset=utf-8") 34 | .addHeader("Content-Type", "application/json; charset=utf-8") 35 | .url(url) 36 | .post(body) 37 | .build(); 38 | log.info("Request created {}", request); 39 | try (Response response = client.newCall(request).execute()) { 40 | final int statusCode = response.code(); 41 | log.info("Request status {}", statusCode); 42 | if (statusCode != 201) { 43 | log.error("Request failed with status code: {}", statusCode); 44 | throw new RuntimeException("Request failed with status " + statusCode); 45 | } 46 | log.info("Response received successfully: {}", statusCode); 47 | } catch (IOException e) { 48 | e.printStackTrace(); 49 | log.error(e.getMessage()); 50 | throw new RuntimeException("Request failed ", e); 51 | } 52 | } 53 | 54 | // ignored 55 | @Override 56 | public List getMessages() { 57 | return null; 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /data-pipeline/http-materializer/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | materializer { 2 | name = "http-materializer" 3 | 4 | kafka { 5 | bootstrap-servers = "127.0.0.1:29092" 6 | bootstrap-servers = ${?KAFKA_BOOTSTRAP_SERVERS} 7 | schema-registry-url: "http://127.0.0.1:8081" 8 | schema-registry-url: ${?SCHEMA_REGISTRY_URL} 9 | source-topic = "events-message-v1" 10 | source-topic = ${?SOURCE_TOPIC} 11 | } 12 | 13 | database-rest-service { 14 | url = "http://127.0.0.1:3000/messages" 15 | url = ${?DATABASE_REST_SERVICE_URL} 16 | } 17 | } -------------------------------------------------------------------------------- /data-pipeline/http-materializer/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Set root logger level to DEBUG and its only appender to A1. 2 | log4j.rootLogger=INFO, A1 3 | # A1 is set to be a ConsoleAppender. 4 | log4j.appender.A1=org.apache.log4j.ConsoleAppender 5 | # A1 uses PatternLayout. 6 | log4j.appender.A1.layout=org.apache.log4j.PatternLayout 7 | log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n -------------------------------------------------------------------------------- /data-pipeline/http-materializer/src/test/java/no/sysco/testing/kafka/pipeline/materializer/ApplicationIT.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.pipeline.materializer; 2 | 3 | import com.github.tomakehurst.wiremock.junit.WireMockRule; 4 | import java.util.concurrent.ExecutionException; 5 | import java.util.concurrent.TimeUnit; 6 | import no.sysco.testing.kafka.embedded.EmbeddedSingleNodeKafkaCluster; 7 | import no.sysco.testing.kafka.pipeline.avro.Message; 8 | import no.sysco.testing.kafka.pipeline.materializer.domain.MessageRepresentationTransformer; 9 | import no.sysco.testing.kafka.pipeline.materializer.infrastructure.service.DatabaseWebService; 10 | import org.apache.kafka.clients.producer.KafkaProducer; 11 | import org.apache.kafka.clients.producer.ProducerRecord; 12 | import org.apache.kafka.streams.KafkaStreams; 13 | import org.junit.Before; 14 | import org.junit.ClassRule; 15 | import org.junit.Rule; 16 | import org.junit.Test; 17 | 18 | import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; 19 | import static com.github.tomakehurst.wiremock.client.WireMock.post; 20 | import static com.github.tomakehurst.wiremock.client.WireMock.postRequestedFor; 21 | import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; 22 | import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; 23 | import static com.github.tomakehurst.wiremock.client.WireMock.verify; 24 | import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; 25 | import static org.awaitility.Awaitility.await; 26 | import static org.junit.Assert.assertEquals; 27 | import static org.junit.Assert.assertFalse; 28 | import static org.junit.Assert.assertTrue; 29 | 30 | public class ApplicationIT { 31 | 32 | @ClassRule 33 | public static final EmbeddedSingleNodeKafkaCluster CLUSTER = new EmbeddedSingleNodeKafkaCluster(); 34 | 35 | private static final String JSON = "application/json; charset=utf-8"; 36 | @Rule public WireMockRule wireMockRule = new WireMockRule(wireMockConfig().dynamicPort()); 37 | 38 | @Before 39 | public void setup() {} 40 | 41 | @Test 42 | public void send_InMemory_3Msg_success() throws ExecutionException, InterruptedException { 43 | 44 | String topic = "topic1"; 45 | CLUSTER.createTopic(topic); 46 | 47 | // Arrange 48 | // config 49 | final String dbRestServiceUrl = "http://whatever:1234/messages"; 50 | final MaterializerConfig.KafkaConfig kafkaConfig = 51 | new MaterializerConfig.KafkaConfig( 52 | CLUSTER.bootstrapServers(), CLUSTER.schemaRegistryUrl(), topic); 53 | final MaterializerConfig.DatabaseRestServiceConfig dbRestServiceConfig = 54 | new MaterializerConfig.DatabaseRestServiceConfig(dbRestServiceUrl); 55 | final MaterializerConfig testConfig = 56 | new MaterializerConfig("test", kafkaConfig, dbRestServiceConfig); 57 | final MaterializerApplication materializerApplication = 58 | new MaterializerApplication(testConfig, new MessageRepresentationTransformer(), true); 59 | 60 | // send 3 records 61 | final KafkaProducer messageProducer = 62 | TestUtils.getMessageProducer(kafkaConfig); 63 | for (int i = 1; i < 4; i++) { 64 | final Message msg = 65 | Message.newBuilder() 66 | .setId(i + "") 67 | .setFrom("from-" + i) 68 | .setTo("to-" + i) 69 | .setText("text-" + i) 70 | .build(); 71 | messageProducer.send(new ProducerRecord<>(topic, msg.getId(), msg)).get(); // blocking 72 | } 73 | 74 | // Act 75 | final DatabaseWebService dbWebService = materializerApplication.getDbWebService(); 76 | // verify that 77 | assertEquals(0, dbWebService.getMessages().size()); 78 | // start app 79 | materializerApplication.start(); 80 | 81 | // Assert 82 | await().atMost(15, TimeUnit.SECONDS).until(() -> dbWebService.getMessages().size() == 3); 83 | 84 | // stop stream app 85 | materializerApplication.stop(); 86 | } 87 | 88 | @Test 89 | public void send_3Msg_success() throws ExecutionException, InterruptedException { 90 | String topic = "topic2"; 91 | CLUSTER.createTopic(topic); 92 | 93 | // Arrange 94 | // config 95 | final String baseUrl = wireMockRule.baseUrl(); 96 | final String dbRestServiceUrl = baseUrl + "/messages"; 97 | final MaterializerConfig.KafkaConfig kafkaConfig = 98 | new MaterializerConfig.KafkaConfig( 99 | CLUSTER.bootstrapServers(), CLUSTER.schemaRegistryUrl(), topic); 100 | final MaterializerConfig.DatabaseRestServiceConfig dbRestServiceConfig = 101 | new MaterializerConfig.DatabaseRestServiceConfig(dbRestServiceUrl); 102 | final MaterializerConfig testConfig = 103 | new MaterializerConfig("test", kafkaConfig, dbRestServiceConfig); 104 | 105 | final MaterializerApplication materializerApplication = 106 | new MaterializerApplication(testConfig, new MessageRepresentationTransformer(), false); 107 | 108 | // wiremock stub 109 | stubFor( 110 | post(urlEqualTo("/messages")) 111 | // .withHeader("Accept", equalTo(JSON)) 112 | // .withHeader("Content-Type", equalTo(JSON)) 113 | .willReturn(aResponse().withStatus(201).withHeader("Content-Type", JSON))); 114 | 115 | // send 3 records 116 | final KafkaProducer messageProducer = 117 | TestUtils.getMessageProducer(kafkaConfig); 118 | for (int i = 1; i < 4; i++) { 119 | final Message msg = 120 | Message.newBuilder() 121 | .setId(i + "") 122 | .setFrom("from-" + i) 123 | .setTo("to-" + i) 124 | .setText("text-" + i) 125 | .build(); 126 | messageProducer.send(new ProducerRecord<>(topic, msg.getId(), msg)).get(); // blocking 127 | } 128 | 129 | // Act 130 | materializerApplication.start(); 131 | 132 | // Assert 133 | await() 134 | .atMost(15, TimeUnit.SECONDS) 135 | .untilAsserted( 136 | () -> 137 | // verify 3 POST requests happen 138 | verify(3, postRequestedFor(urlEqualTo("/messages")))); 139 | 140 | materializerApplication.stop(); 141 | } 142 | 143 | @Test 144 | public void send_1Msg_3times_failAfter_2nd() throws ExecutionException, InterruptedException { 145 | String topic = "topic3"; 146 | CLUSTER.createTopic(topic); 147 | 148 | // Arrange 149 | // config 150 | final String baseUrl = wireMockRule.baseUrl(); 151 | final String dbRestServiceUrl = baseUrl + "/messages"; 152 | final MaterializerConfig.KafkaConfig kafkaConfig = 153 | new MaterializerConfig.KafkaConfig( 154 | CLUSTER.bootstrapServers(), CLUSTER.schemaRegistryUrl(), topic); 155 | final MaterializerConfig.DatabaseRestServiceConfig dbRestServiceConfig = 156 | new MaterializerConfig.DatabaseRestServiceConfig(dbRestServiceUrl); 157 | final MaterializerConfig testConfig = 158 | new MaterializerConfig("test", kafkaConfig, dbRestServiceConfig); 159 | 160 | final MaterializerApplication materializerApplication = 161 | new MaterializerApplication(testConfig, new MessageRepresentationTransformer(), false); 162 | 163 | // start app 164 | materializerApplication.start(); 165 | final KafkaStreams.State runningState = 166 | materializerApplication.getKafkaMessageMaterializer().getState(); 167 | 168 | assertTrue(runningState.isRunning()); 169 | 170 | // init producer 171 | final KafkaProducer messageProducer = 172 | TestUtils.getMessageProducer(kafkaConfig); 173 | final Message msg = 174 | Message.newBuilder() 175 | .setId("id-1") 176 | .setFrom("from-1") 177 | .setTo("to-1") 178 | .setText("text-1") 179 | .build(); 180 | 181 | // wiremock stub 182 | stubFor( 183 | post(urlEqualTo("/messages")) 184 | .willReturn(aResponse().withStatus(201).withHeader("Content-Type", JSON))); 185 | // send record 186 | messageProducer.send(new ProducerRecord<>(topic, msg.getId(), msg)).get(); // blocking 187 | 188 | await() 189 | .atMost(15, TimeUnit.SECONDS) 190 | .untilAsserted(() -> verify(1, postRequestedFor(urlEqualTo("/messages")))); 191 | 192 | stubFor( 193 | post(urlEqualTo("/messages")) 194 | .willReturn( 195 | aResponse() 196 | // conflict, entity with id already exist 197 | .withStatus(409) 198 | .withHeader("Content-Type", JSON))); 199 | 200 | // send record again 2nd time 201 | messageProducer.send(new ProducerRecord<>(topic, msg.getId(), msg)).get(); // blocking 202 | // send record again 3rd time 203 | messageProducer.send(new ProducerRecord<>(topic, msg.getId(), msg)).get(); 204 | 205 | await() 206 | .atMost(15, TimeUnit.SECONDS) 207 | // ignore exceptions, otherwise test will fail 208 | .ignoreExceptions() 209 | .untilAsserted( 210 | () -> { 211 | System.out.println( 212 | "STATE: " 213 | + materializerApplication 214 | .getKafkaMessageMaterializer() 215 | .getState() 216 | .isRunning()); 217 | // assert that streams is not running due to exception 218 | assertFalse( 219 | materializerApplication.getKafkaMessageMaterializer().getState().isRunning()); 220 | }); 221 | 222 | // 3 msg produced but stream failed and stopped after 2nd msg was processed -> only 2 request 223 | // were made 224 | verify(2, postRequestedFor(urlEqualTo("/messages"))); 225 | 226 | materializerApplication.stop(); 227 | } 228 | } 229 | -------------------------------------------------------------------------------- /data-pipeline/http-materializer/src/test/java/no/sysco/testing/kafka/pipeline/materializer/TestUtils.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.pipeline.materializer; 2 | 3 | import io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig; 4 | import io.confluent.kafka.serializers.KafkaAvroSerializer; 5 | import java.util.Properties; 6 | import java.util.UUID; 7 | import no.sysco.testing.kafka.pipeline.avro.Message; 8 | import org.apache.kafka.clients.producer.KafkaProducer; 9 | import org.apache.kafka.clients.producer.ProducerConfig; 10 | import org.apache.kafka.common.serialization.StringSerializer; 11 | 12 | public class TestUtils { 13 | 14 | public static KafkaProducer getMessageProducer( 15 | MaterializerConfig.KafkaConfig kafkaConfig) { 16 | return new KafkaProducer<>( 17 | getProperties(kafkaConfig.bootstrapServers, kafkaConfig.schemaRegistryUrl)); 18 | } 19 | 20 | public static Properties getProperties(String bootstrapservers, String schemaUrl) { 21 | final Properties properties = new Properties(); 22 | properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapservers); 23 | properties.put(ProducerConfig.CLIENT_ID_CONFIG, UUID.randomUUID().toString()); 24 | properties.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, schemaUrl); 25 | properties.put(ProducerConfig.ACKS_CONFIG, "all"); 26 | properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); 27 | properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaAvroSerializer.class); 28 | return properties; 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /data-pipeline/http-producer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:8 2 | COPY ./target/http-producer-1.0-SNAPSHOT.jar . 3 | COPY ./src/main/resources/config.yml . 4 | CMD java -jar http-producer-1.0-SNAPSHOT.jar server config.yml -------------------------------------------------------------------------------- /data-pipeline/http-producer/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | data-pipeline 8 | no.sysco.testing 9 | 1.0-SNAPSHOT 10 | 11 | http-producer 12 | 13 | 14 | 1.3.3 15 | 16 | 17 | 18 | 19 | 20 | no.sysco.testing 21 | schema 22 | 23 | 24 | io.dropwizard 25 | dropwizard-core 26 | ${dropwizard.core.version} 27 | 28 | 29 | org.apache.kafka 30 | kafka-clients 31 | 32 | 33 | io.confluent 34 | kafka-avro-serializer 35 | 36 | 37 | 38 | 39 | junit 40 | junit 41 | test 42 | 43 | 44 | org.awaitility 45 | awaitility 46 | test 47 | 48 | 49 | no.sysco.testing 50 | embedded-cluster 51 | test 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | io.dropwizard 65 | dropwizard-testing 66 | ${dropwizard.core.version} 67 | test 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | org.apache.maven.plugins 76 | maven-shade-plugin 77 | 78 | 79 | 80 | package 81 | 82 | shade 83 | 84 | 85 | 86 | 87 | 88 | 90 | 92 | no.sysco.testing.kafka.pipeline.producer.ProducerRestApplication 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | org.apache.maven.plugins 102 | maven-failsafe-plugin 103 | 104 | 105 | 106 | -------------------------------------------------------------------------------- /data-pipeline/http-producer/src/main/java/no/sysco/testing/kafka/pipeline/producer/ApplicationHealthCheck.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.pipeline.producer; 2 | 3 | import com.codahale.metrics.health.HealthCheck; 4 | 5 | public class ApplicationHealthCheck extends HealthCheck { 6 | @Override 7 | protected Result check() { 8 | return Result.healthy(); 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /data-pipeline/http-producer/src/main/java/no/sysco/testing/kafka/pipeline/producer/ProducerRestApplication.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.pipeline.producer; 2 | 3 | import io.dropwizard.configuration.EnvironmentVariableSubstitutor; 4 | import io.dropwizard.configuration.SubstitutingSourceProvider; 5 | import io.dropwizard.setup.Bootstrap; 6 | import io.dropwizard.setup.Environment; 7 | import no.sysco.testing.kafka.pipeline.producer.domain.MessageRepresentationTransformer; 8 | import no.sysco.testing.kafka.pipeline.producer.infrastructure.kafka.KafkaMessageProducer; 9 | import no.sysco.testing.kafka.pipeline.producer.interfaces.rest.MessageResources; 10 | import org.slf4j.Logger; 11 | import org.slf4j.LoggerFactory; 12 | 13 | public class ProducerRestApplication extends io.dropwizard.Application { 14 | 15 | private static Logger log = LoggerFactory.getLogger(ProducerRestApplication.class); 16 | 17 | public static void main(String[] args) throws Exception { 18 | new ProducerRestApplication().run(args); 19 | } 20 | 21 | // enable environment variables 22 | @Override 23 | public void initialize(Bootstrap bootstrap) { 24 | bootstrap.setConfigurationSourceProvider( 25 | new SubstitutingSourceProvider( 26 | bootstrap.getConfigurationSourceProvider(), new EnvironmentVariableSubstitutor(false))); 27 | super.initialize(bootstrap); 28 | } 29 | 30 | @Override 31 | public void run(ProducerRestConfig producerRestConfig, Environment environment) { 32 | log.info("Configuration: {}\n", producerRestConfig); 33 | environment 34 | .healthChecks() 35 | .register(producerRestConfig.getName() + "HealthCheck", new ApplicationHealthCheck()); 36 | 37 | KafkaMessageProducer messageProducer = new KafkaMessageProducer(producerRestConfig); 38 | MessageRepresentationTransformer transformer = new MessageRepresentationTransformer(); 39 | MessageResources messageResources = new MessageResources(messageProducer, transformer); 40 | 41 | // register REST endpoints 42 | environment.jersey().register(messageResources); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /data-pipeline/http-producer/src/main/java/no/sysco/testing/kafka/pipeline/producer/ProducerRestConfig.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.pipeline.producer; 2 | 3 | import com.fasterxml.jackson.annotation.JsonProperty; 4 | import io.dropwizard.Configuration; 5 | import javax.validation.Valid; 6 | import javax.validation.constraints.NotNull; 7 | 8 | public class ProducerRestConfig extends Configuration { 9 | 10 | @Valid 11 | @JsonProperty("name") 12 | private String name; 13 | 14 | @Valid 15 | @JsonProperty("kafka") 16 | private KafkaClientFactory kafkaClientFactory; 17 | 18 | public ProducerRestConfig(String name, KafkaClientFactory kafkaClientFactory) { 19 | this.name = name; 20 | this.kafkaClientFactory = kafkaClientFactory; 21 | } 22 | 23 | public ProducerRestConfig() {} 24 | 25 | public String getName() { 26 | return name; 27 | } 28 | 29 | public KafkaClientFactory getKafkaClientFactory() { 30 | return kafkaClientFactory; 31 | } 32 | 33 | @Override 34 | public String toString() { 35 | return "ProducerRestConfig{" 36 | + "name='" 37 | + name 38 | + '\'' 39 | + ", kafkaClientFactory=" 40 | + kafkaClientFactory 41 | + '}'; 42 | } 43 | 44 | public static class KafkaClientFactory { 45 | 46 | @Valid @NotNull private String bootstrapServers; 47 | @Valid @NotNull private String schemaRegistryUrl; 48 | @Valid @NotNull private String sinkTopic; 49 | 50 | public KafkaClientFactory() {} 51 | 52 | public KafkaClientFactory(String bootstrapServers, String schemaRegistryUrl, String sinkTopic) { 53 | this.bootstrapServers = bootstrapServers; 54 | this.schemaRegistryUrl = schemaRegistryUrl; 55 | this.sinkTopic = sinkTopic; 56 | } 57 | 58 | public String getBootstrapServers() { 59 | return bootstrapServers; 60 | } 61 | 62 | public void setBootstrapServers(String bootstrapServers) { 63 | this.bootstrapServers = bootstrapServers; 64 | } 65 | 66 | public String getSinkTopic() { 67 | return sinkTopic; 68 | } 69 | 70 | public void setSinkTopic(String sinkTopic) { 71 | this.sinkTopic = sinkTopic; 72 | } 73 | 74 | public String getSchemaRegistryUrl() { 75 | return schemaRegistryUrl; 76 | } 77 | 78 | public void setSchemaRegistryUrl(String schemaRegistryUrl) { 79 | this.schemaRegistryUrl = schemaRegistryUrl; 80 | } 81 | 82 | @Override 83 | public String toString() { 84 | return "KafkaClientFactory{" 85 | + "bootstrapServers='" 86 | + bootstrapServers 87 | + '\'' 88 | + ", schemaRegistryUrl='" 89 | + schemaRegistryUrl 90 | + '\'' 91 | + ", sinkTopic='" 92 | + sinkTopic 93 | + '\'' 94 | + '}'; 95 | } 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /data-pipeline/http-producer/src/main/java/no/sysco/testing/kafka/pipeline/producer/domain/MessageJsonRepresentation.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.pipeline.producer.domain; 2 | 3 | import com.fasterxml.jackson.annotation.JsonInclude; 4 | 5 | @JsonInclude(JsonInclude.Include.NON_EMPTY) 6 | public class MessageJsonRepresentation { 7 | 8 | private String id; 9 | private String from; 10 | private String to; 11 | private String text; 12 | 13 | public MessageJsonRepresentation() {} 14 | 15 | public MessageJsonRepresentation(String id, String from, String to, String text) { 16 | this.id = id; 17 | this.from = from; 18 | this.to = to; 19 | this.text = text; 20 | } 21 | 22 | public String getId() { 23 | return id; 24 | } 25 | 26 | public void setId(String id) { 27 | this.id = id; 28 | } 29 | 30 | public String getFrom() { 31 | return from; 32 | } 33 | 34 | public void setFrom(String from) { 35 | this.from = from; 36 | } 37 | 38 | public String getTo() { 39 | return to; 40 | } 41 | 42 | public void setTo(String to) { 43 | this.to = to; 44 | } 45 | 46 | public String getText() { 47 | return text; 48 | } 49 | 50 | public void setText(String text) { 51 | this.text = text; 52 | } 53 | 54 | @Override 55 | public String toString() { 56 | return "MessageJsonRepresentation{" 57 | + "id='" 58 | + id 59 | + '\'' 60 | + ", from='" 61 | + from 62 | + '\'' 63 | + ", to='" 64 | + to 65 | + '\'' 66 | + ", text='" 67 | + text 68 | + '\'' 69 | + '}'; 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /data-pipeline/http-producer/src/main/java/no/sysco/testing/kafka/pipeline/producer/domain/MessageRepresentationTransformer.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.pipeline.producer.domain; 2 | 3 | import no.sysco.testing.kafka.pipeline.avro.Message; 4 | 5 | public final class MessageRepresentationTransformer { 6 | public MessageJsonRepresentation transform(final Message message) { 7 | return new MessageJsonRepresentation( 8 | message.getId(), message.getFrom(), message.getTo(), message.getText()); 9 | } 10 | 11 | public Message transform(final MessageJsonRepresentation messageJsonRepresentation) { 12 | return Message.newBuilder() 13 | .setId(messageJsonRepresentation.getId()) 14 | .setFrom(messageJsonRepresentation.getFrom()) 15 | .setTo(messageJsonRepresentation.getTo()) 16 | .setText(messageJsonRepresentation.getText()) 17 | .build(); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /data-pipeline/http-producer/src/main/java/no/sysco/testing/kafka/pipeline/producer/infrastructure/kafka/KafkaMessageProducer.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.pipeline.producer.infrastructure.kafka; 2 | 3 | import io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig; 4 | import io.confluent.kafka.serializers.KafkaAvroSerializer; 5 | import java.util.HashMap; 6 | import java.util.Map; 7 | import java.util.Properties; 8 | import no.sysco.testing.kafka.pipeline.avro.Message; 9 | import no.sysco.testing.kafka.pipeline.producer.ProducerRestConfig; 10 | import org.apache.kafka.clients.producer.KafkaProducer; 11 | import org.apache.kafka.clients.producer.ProducerConfig; 12 | import org.apache.kafka.clients.producer.ProducerRecord; 13 | import org.apache.kafka.common.serialization.StringSerializer; 14 | import org.slf4j.Logger; 15 | import org.slf4j.LoggerFactory; 16 | 17 | public class KafkaMessageProducer { 18 | private static final Logger log = LoggerFactory.getLogger(KafkaMessageProducer.class); 19 | private final KafkaProducer producer; 20 | private final String sinkTopic; 21 | 22 | public KafkaMessageProducer(final ProducerRestConfig producerRestConfig) { 23 | Properties props = new Properties(); 24 | props.put( 25 | ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, 26 | producerRestConfig.getKafkaClientFactory().getBootstrapServers()); 27 | props.put(ProducerConfig.CLIENT_ID_CONFIG, producerRestConfig.getName() + "-producer-id1"); 28 | props.put(ProducerConfig.ACKS_CONFIG, "all"); 29 | props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); 30 | props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaAvroSerializer.class); 31 | // props.put(ProducerConfig.RETRIES_CONFIG, 0); 32 | // props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384); 33 | // props.put(ProducerConfig.LINGER_MS_CONFIG, 1); 34 | props.put( 35 | AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, 36 | producerRestConfig.getKafkaClientFactory().getSchemaRegistryUrl()); 37 | this.sinkTopic = producerRestConfig.getKafkaClientFactory().getSinkTopic(); 38 | this.producer = new KafkaProducer<>(props); 39 | } 40 | 41 | public void producerMessage(final Message message) { 42 | final ProducerRecord record = 43 | new ProducerRecord<>(sinkTopic, message.getId(), message); 44 | producer.send( 45 | record, 46 | ((metadata, exception) -> { 47 | if (exception == null) { 48 | Map data = new HashMap<>(); 49 | data.put("topic", metadata.topic()); 50 | data.put("partition", metadata.partition()); 51 | data.put("offset", metadata.offset()); 52 | data.put("timestamp", metadata.timestamp()); 53 | log.info(data.toString()); 54 | } else { 55 | exception.printStackTrace(); 56 | } 57 | })); 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /data-pipeline/http-producer/src/main/java/no/sysco/testing/kafka/pipeline/producer/interfaces/rest/MessageResources.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.pipeline.producer.interfaces.rest; 2 | 3 | import javax.ws.rs.Consumes; 4 | import javax.ws.rs.GET; 5 | import javax.ws.rs.POST; 6 | import javax.ws.rs.Path; 7 | import javax.ws.rs.Produces; 8 | import javax.ws.rs.core.MediaType; 9 | import javax.ws.rs.core.Response; 10 | import no.sysco.testing.kafka.pipeline.producer.domain.MessageJsonRepresentation; 11 | import no.sysco.testing.kafka.pipeline.producer.domain.MessageRepresentationTransformer; 12 | import no.sysco.testing.kafka.pipeline.producer.infrastructure.kafka.KafkaMessageProducer; 13 | import org.apache.avro.AvroRuntimeException; 14 | 15 | @Path("messages") 16 | @Produces(MediaType.APPLICATION_JSON) 17 | public class MessageResources { 18 | private KafkaMessageProducer messageProducer; 19 | private MessageRepresentationTransformer transformer; 20 | 21 | public MessageResources( 22 | final KafkaMessageProducer messageProducer, 23 | final MessageRepresentationTransformer transformer) { 24 | this.messageProducer = messageProducer; 25 | this.transformer = transformer; 26 | } 27 | 28 | @GET 29 | public Response healthCheck() { 30 | return Response.ok().build(); 31 | } 32 | 33 | @POST 34 | @Consumes(MediaType.APPLICATION_JSON) 35 | public Response produceAsync(final MessageJsonRepresentation messageJsonRepresentation) { 36 | try { 37 | messageProducer.producerMessage(transformer.transform(messageJsonRepresentation)); 38 | return Response.accepted().build(); 39 | } catch (AvroRuntimeException exception) { 40 | return Response.status(400).entity(exception.toString()).build(); 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /data-pipeline/http-producer/src/main/resources/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: http-producer 3 | 4 | server: 5 | applicationConnectors: 6 | - type: http 7 | port: ${APPLICATION_PORT:-18080} 8 | adminConnectors: 9 | - type: http 10 | port: ${ADMIN_PORT:-18081} 11 | 12 | kafka: 13 | bootstrapServers: ${KAFKA_BOOTSTRAP_SERVERS:-127.0.0.1:29092} 14 | schemaRegistryUrl: ${SCHEMA_REGISTRY_URL:-http://127.0.0.1:8081} 15 | sinkTopic: ${SINK_TOPIC:-events-message-v1} -------------------------------------------------------------------------------- /data-pipeline/http-producer/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Set root logger level to DEBUG and its only appender to A1. 2 | log4j.rootLogger=INFO, A1 3 | # A1 is set to be a ConsoleAppender. 4 | log4j.appender.A1=org.apache.log4j.ConsoleAppender 5 | # A1 uses PatternLayout. 6 | log4j.appender.A1.layout=org.apache.log4j.PatternLayout 7 | log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n -------------------------------------------------------------------------------- /data-pipeline/http-producer/src/test/java/no/sysco/testing/kafka/pipeline/producer/ProducerRestApplicationIT.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.pipeline.producer; 2 | 3 | import io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig; 4 | import io.dropwizard.testing.DropwizardTestSupport; 5 | import java.time.Duration; 6 | import java.util.ArrayList; 7 | import java.util.Collections; 8 | import java.util.Properties; 9 | import java.util.UUID; 10 | import java.util.concurrent.TimeUnit; 11 | import javax.ws.rs.client.Client; 12 | import javax.ws.rs.client.Entity; 13 | import javax.ws.rs.core.Response; 14 | import no.sysco.testing.kafka.embedded.EmbeddedSingleNodeKafkaCluster; 15 | import no.sysco.testing.kafka.pipeline.avro.Message; 16 | import no.sysco.testing.kafka.pipeline.producer.domain.MessageJsonRepresentation; 17 | import org.apache.kafka.clients.consumer.ConsumerConfig; 18 | import org.apache.kafka.clients.consumer.ConsumerRecord; 19 | import org.apache.kafka.clients.consumer.ConsumerRecords; 20 | import org.apache.kafka.clients.consumer.KafkaConsumer; 21 | import org.apache.kafka.common.serialization.StringDeserializer; 22 | import org.glassfish.jersey.client.JerseyClientBuilder; 23 | import org.junit.AfterClass; 24 | import org.junit.BeforeClass; 25 | import org.junit.ClassRule; 26 | import org.junit.Test; 27 | 28 | import static org.awaitility.Awaitility.await; 29 | import static org.junit.Assert.assertEquals; 30 | 31 | public class ProducerRestApplicationIT { 32 | 33 | @ClassRule 34 | public static final EmbeddedSingleNodeKafkaCluster CLUSTER = new EmbeddedSingleNodeKafkaCluster(); 35 | 36 | private static final String topic = "topic1"; 37 | private static ProducerRestConfig producerRestConfig; 38 | // https://www.dropwizard.io/1.3.9/docs/manual/testing.html 39 | private static DropwizardTestSupport SUPPORT; 40 | 41 | @BeforeClass 42 | public static void beforeClass() { 43 | // init -> service is stateless 44 | producerRestConfig = 45 | new ProducerRestConfig( 46 | "test-config-1", 47 | new ProducerRestConfig.KafkaClientFactory( 48 | CLUSTER.bootstrapServers(), CLUSTER.schemaRegistryUrl(), topic)); 49 | SUPPORT = new DropwizardTestSupport<>(ProducerRestApplication.class, producerRestConfig); 50 | SUPPORT.before(); 51 | } 52 | 53 | @AfterClass 54 | public static void afterClass() { 55 | SUPPORT.after(); 56 | producerRestConfig = null; 57 | SUPPORT = null; 58 | } 59 | 60 | @Test 61 | public void sendPostRequest_msgProduced_success() { 62 | Client client = new JerseyClientBuilder().build(); 63 | 64 | Response response = 65 | client 66 | .target(String.format("http://localhost:%d/messages", SUPPORT.getLocalPort())) 67 | .request() 68 | .post(Entity.json(new MessageJsonRepresentation("id-1", "from-1", "to-1", "text-1"))); 69 | 70 | assertEquals(202, response.getStatus()); 71 | 72 | final KafkaConsumer consumer = new KafkaConsumer<>(getConsumerProperties()); 73 | consumer.subscribe(Collections.singletonList(topic)); 74 | final ArrayList messages = new ArrayList<>(); 75 | 76 | await() 77 | .atMost(25, TimeUnit.SECONDS) 78 | .untilAsserted( 79 | () -> { 80 | final ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); 81 | for (final ConsumerRecord record : records) { 82 | messages.add(record.value()); 83 | } 84 | assertEquals(1, messages.size()); 85 | }); 86 | } 87 | 88 | @Test 89 | public void sendPostRequest_msgProduced_fail() { 90 | Client client = new JerseyClientBuilder().build(); 91 | 92 | Response response = 93 | client 94 | .target(String.format("http://localhost:%d/messages", SUPPORT.getLocalPort())) 95 | .request() 96 | // cant construct avro record from this payload 97 | .post(Entity.json(new MessageJsonRepresentation(null, null, "null", ""))); 98 | 99 | // bad request 100 | assertEquals(400, response.getStatus()); 101 | 102 | final KafkaConsumer consumer = new KafkaConsumer<>(getConsumerProperties()); 103 | consumer.subscribe(Collections.singletonList(topic)); 104 | final ArrayList messages = new ArrayList<>(); 105 | 106 | await() 107 | .pollDelay(5, TimeUnit.SECONDS) 108 | .untilAsserted( 109 | () -> { 110 | final ConsumerRecords records = consumer.poll(Duration.ofSeconds(1)); 111 | for (final ConsumerRecord record : records) { 112 | messages.add(record.value()); 113 | } 114 | assertEquals(0, messages.size()); 115 | }); 116 | } 117 | 118 | private Properties getConsumerProperties() { 119 | final Properties properties = new Properties(); 120 | properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); 121 | properties.put( 122 | AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, CLUSTER.schemaRegistryUrl()); 123 | properties.put(ConsumerConfig.CLIENT_ID_CONFIG, "client-"+ UUID.randomUUID().toString()); 124 | properties.put(ConsumerConfig.GROUP_ID_CONFIG, "gr-" + UUID.randomUUID().toString()); 125 | properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); 126 | properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); 127 | properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); 128 | 129 | return properties; 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /data-pipeline/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | kafka 8 | no.sysco.testing 9 | 1.0-SNAPSHOT 10 | 11 | 12 | data-pipeline 13 | pom 14 | 15 | 16 | http-producer 17 | http-materializer 18 | schema 19 | 20 | 21 | 22 | 23 | 24 | no.sysco.testing 25 | schema 26 | ${project.version} 27 | 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /data-pipeline/readme.md: -------------------------------------------------------------------------------- 1 | # Data pipeline 2 | Example of data pipeline. 3 | ![img](./data_pipeline.png) 4 | 5 | 6 | ## cmds 7 | ``` 8 | # list topics 9 | docker-compose exec kafka kafka-topics --list --zookeeper zookeeper:2181 10 | 11 | # create a topic 12 | docker-compose exec kafka kafka-topics --create --zookeeper zookeeper:2181 --replication-factor 1 --partitions 1 --topic events-message-v1 13 | ``` -------------------------------------------------------------------------------- /data-pipeline/schema/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | data-pipeline 8 | no.sysco.testing 9 | 1.0-SNAPSHOT 10 | 11 | 12 | schema 13 | 14 | 15 | 16 | org.apache.avro 17 | avro 18 | 19 | 20 | 21 | 22 | 23 | 24 | org.apache.avro 25 | avro-maven-plugin 26 | 27 | 28 | generate-sources 29 | 30 | schema 31 | 32 | 33 | 34 | 35 | ${project.basedir}/src/main/resources/avro 36 | String 37 | 38 | 39 | 40 | io.confluent 41 | kafka-schema-registry-maven-plugin 42 | 43 | 44 | http://localhost:8081 45 | 46 | 47 | src/main/resources/avro/message.avsc 48 | 49 | 50 | 51 | 52 | test-compatibility 53 | register 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /data-pipeline/schema/src/main/resources/avro/message.avsc: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": "no.sysco.testing.kafka.pipeline.avro", 3 | "type": "record", 4 | "name": "Message", 5 | "fields": [ 6 | {"name": "id", "type": "string"}, 7 | {"name": "from", "type": "string"}, 8 | {"name": "to", "type": "string"}, 9 | {"name": "text", "type": "string"} 10 | ] 11 | } -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zookeeper: 4 | image: confluentinc/cp-zookeeper:5.1.2 5 | ports: 6 | - 2181:2181 7 | environment: 8 | ZOOKEEPER_CLIENT_PORT: 2181 9 | ZOOKEEPER_TICK_TIME: 2000 10 | kafka: 11 | image: confluentinc/cp-kafka:5.1.2 12 | ports: 13 | - 9092:9092 14 | - 29092:29092 15 | environment: 16 | KAFKA_BROKER_ID: 1 17 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 18 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT 19 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092 20 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 21 | KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 22 | depends_on: 23 | - zookeeper 24 | schema-registry: 25 | image: confluentinc/cp-schema-registry:5.1.2 26 | ports: 27 | - 8081:8081 28 | environment: 29 | SCHEMA_REGISTRY_HOST_NAME: localhost 30 | SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 31 | SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka:9092 32 | depends_on: 33 | - kafka 34 | - zookeeper -------------------------------------------------------------------------------- /e2e/docker-compose.test.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zookeeper: 4 | image: confluentinc/cp-zookeeper:5.1.1 5 | ports: 6 | - 2181:2181 7 | environment: 8 | ZOOKEEPER_CLIENT_PORT: 2181 9 | ZOOKEEPER_TICK_TIME: 2000 10 | kafka: 11 | image: confluentinc/cp-kafka:5.1.1 12 | ports: 13 | - 9092:9092 14 | - 29092:29092 15 | environment: 16 | KAFKA_BROKER_ID: 1 17 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 18 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT 19 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092 20 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 21 | KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 22 | depends_on: 23 | - zookeeper 24 | # This "container" is a workaround to pre-create topics 25 | # https://github.com/confluentinc/examples/blob/f854ac008952346ceaba0d1f1a352788f0572c74/microservices-orders/docker-compose.yml#L182-L215 26 | kafka-setup: 27 | image: confluentinc/cp-kafka:5.1.1 28 | hostname: kafka-setup 29 | container_name: kafka-setup 30 | depends_on: 31 | - kafka 32 | - zookeeper 33 | command: "bash -c 'echo Waiting for Kafka to be ready... && \ 34 | cub kafka-ready -b kafka:9092 1 30 && \ 35 | kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --topic events-message-v1 && \ 36 | sleep 30'" 37 | environment: 38 | # The following settings are listed here only to satisfy the image's requirements. 39 | # We override the image's `command` anyways, hence this container will not start a broker. 40 | KAFKA_BROKER_ID: ignored 41 | KAFKA_ZOOKEEPER_CONNECT: ignored 42 | 43 | schema-registry: 44 | image: confluentinc/cp-schema-registry:5.1.1 45 | ports: 46 | - 8081:8081 47 | environment: 48 | SCHEMA_REGISTRY_HOST_NAME: localhost 49 | SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 50 | SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka:9092 51 | depends_on: 52 | - kafka 53 | 54 | # DATA PIPELINE SERVICES 55 | 56 | # https://github.com/clue/docker-json-server 57 | db-mock: 58 | image: zhenik/json-server 59 | environment: 60 | ID_MAP: id 61 | ports: 62 | - "3000:80" 63 | volumes: 64 | - ./src/test/resources/json-server-database.json:/data/db.json 65 | 66 | http-producer: 67 | image: zhenik/http-producer:data-pipeline 68 | build: ../data-pipeline/http-producer 69 | environment: 70 | APPLICATION_PORT: 8080 71 | ADMIN_PORT: 8081 72 | KAFKA_BOOTSTRAP_SERVERS: kafka:9092 73 | SCHEMA_REGISTRY_URL: http://schema-registry:8081 74 | SINK_TOPIC: events-message-v1 75 | ports: 76 | - "8080:8080" 77 | depends_on: 78 | - zookeeper 79 | - kafka 80 | - schema-registry 81 | 82 | http-materializer: 83 | image: zhenik/http-materializer:data-pipeline 84 | build: ../data-pipeline/http-materializer 85 | environment: 86 | KAFKA_BOOTSTRAP_SERVERS: kafka:9092 87 | SCHEMA_REGISTRY_URL: http://schema-registry:8081 88 | SOURCE_TOPIC: events-message-v1 89 | DATABASE_REST_SERVICE_URL: http://db-mock:80/messages 90 | restart: always 91 | depends_on: 92 | - zookeeper 93 | - kafka 94 | - db-mock 95 | - kafka-setup 96 | - schema-registry -------------------------------------------------------------------------------- /e2e/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | kafka 7 | no.sysco.testing 8 | 1.0-SNAPSHOT 9 | 10 | 4.0.0 11 | 12 | e2e 13 | 14 | 15 | 1.11.2 16 | 17 | 18 | 19 | 20 | junit 21 | junit 22 | test 23 | 24 | 25 | org.awaitility 26 | awaitility 27 | test 28 | 29 | 30 | org.testcontainers 31 | kafka 32 | ${testcontainers.version} 33 | test 34 | 35 | 36 | 37 | org.slf4j 38 | slf4j-log4j12 39 | test 40 | 41 | 42 | org.slf4j 43 | slf4j-api 44 | test 45 | 46 | 47 | io.rest-assured 48 | rest-assured 49 | test 50 | 51 | 52 | 53 | com.fasterxml.jackson.core 54 | jackson-databind 55 | test 56 | 57 | 58 | -------------------------------------------------------------------------------- /e2e/readme.md: -------------------------------------------------------------------------------- 1 | # End to end tests (e2e) 2 | Module contains IT test for whole [data pipeline](../data-pipeline) using [testcontainers-java]() 3 | 4 | ## Approach 1: Setup each container for data-pipeline 5 | ### Topic creation, before tests 6 | * [testcontainers - kafka container](https://www.testcontainers.org/modules/kafka/) 7 | * [testcontainers - execInContainer](https://www.testcontainers.org/features/commands/) 8 | ```java 9 | private static void createTopic(String topicName) { 10 | // kafka container uses with embedded zookeeper 11 | // confluent platform and Kafka compatibility 5.1.x <-> kafka 2.1.x 12 | // kafka 2.1.x require option --zookeeper, later versions use --bootstrap-servers instead 13 | String createTopic = 14 | String.format( 15 | "/usr/bin/kafka-topics --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic %s", 16 | topicName); 17 | try { 18 | final Container.ExecResult execResult = kafka.execInContainer("/bin/sh", "-c", createTopic); 19 | if (execResult.getExitCode() != 0) fail(); 20 | } catch (Exception e) { 21 | e.printStackTrace(); 22 | fail(); 23 | } 24 | } 25 | ``` 26 | 27 | ## Approach 2: Use docker-compose 28 | ### Topic creation, before tests 29 | * [Confluent Platform Utility Belt (cub)](https://docs.confluent.io/current/installation/docker/development.html#confluent-platform-utility-belt-cub) 30 | 31 | To create topics, I run additional container (used NOT as kafka broker). 32 | 33 | ```yaml 34 | # This "container" is a workaround to pre-create topics 35 | # https://github.com/confluentinc/examples/blob/f854ac008952346ceaba0d1f1a352788f0572c74/microservices-orders/docker-compose.yml#L182-L215 36 | kafka-setup: 37 | image: confluentinc/cp-kafka:5.1.2 38 | hostname: kafka-setup 39 | container_name: kafka-setup 40 | depends_on: 41 | - kafka 42 | - zookeeper 43 | command: "bash -c 'echo Waiting for Kafka to be ready... && \ 44 | cub kafka-ready -b kafka:9092 1 30 && \ 45 | kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --topic events-message-v1 && \ 46 | sleep 60'" 47 | environment: 48 | # The following settings are listed here only to satisfy the image's requirements. 49 | # We override the image's `command` anyways, hence this container will not start a broker. 50 | KAFKA_BROKER_ID: ignored 51 | KAFKA_ZOOKEEPER_CONNECT: ignored 52 | ``` -------------------------------------------------------------------------------- /e2e/src/test/java/no/sysco/testing/kafka/e2e/ContainersIT.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.e2e; 2 | 3 | import io.restassured.RestAssured; 4 | import io.restassured.http.ContentType; 5 | import no.sysco.testing.kafka.e2e.representation.MessageJsonRepresentation; 6 | import org.junit.Test; 7 | 8 | import java.util.Arrays; 9 | import java.util.List; 10 | import java.util.UUID; 11 | import java.util.concurrent.TimeUnit; 12 | 13 | import static org.awaitility.Awaitility.await; 14 | import static org.junit.Assert.assertNotNull; 15 | import static org.junit.Assert.assertTrue; 16 | 17 | public class ContainersIT extends ContainersTestBaseIT { 18 | 19 | 20 | @Test 21 | public void is_running() { 22 | assertTrue(kafka.isRunning()); 23 | assertTrue(schemaRegistry.isRunning()); 24 | 25 | assertTrue(jsonServer.isRunning()); 26 | assertTrue(httpProducer.isRunning()); 27 | assertTrue(httpMaterializer.isRunning()); 28 | 29 | final List messageJsonRepresentations = 30 | Arrays.asList( 31 | RestAssured.given() 32 | .get(JSON_SERVER_EXPOSED + "/messages") 33 | .then() 34 | .statusCode(200) 35 | .extract() 36 | .as(MessageJsonRepresentation[].class)); 37 | assertTrue(messageJsonRepresentations.size() > 0); 38 | } 39 | 40 | @Test 41 | public void test_data_pipeline_flow_successful() { 42 | String id = UUID.randomUUID().toString(); 43 | String from = UUID.randomUUID().toString(); 44 | String to = UUID.randomUUID().toString(); 45 | String text = UUID.randomUUID().toString(); 46 | 47 | MessageJsonRepresentation messageJsonRepresentation = 48 | new MessageJsonRepresentation(id, from, to, text); 49 | 50 | RestAssured.given() 51 | .contentType(ContentType.JSON) 52 | .body(messageJsonRepresentation) 53 | .post(HTTP_PRODUCER_EXPOSED + "/messages") 54 | .then() 55 | .statusCode(202); 56 | 57 | await() 58 | .atMost(70, TimeUnit.SECONDS) 59 | .untilAsserted( 60 | () -> { 61 | MessageJsonRepresentation jsonPresentation = 62 | RestAssured.given() 63 | .get(JSON_SERVER_EXPOSED + "/messages/" + id) 64 | .then() 65 | // .statusCode(200) 66 | .extract() 67 | .as(MessageJsonRepresentation.class); 68 | 69 | assertNotNull(jsonPresentation); 70 | }); 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /e2e/src/test/java/no/sysco/testing/kafka/e2e/ContainersTestBaseIT.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.e2e; 2 | 3 | import org.testcontainers.containers.*; 4 | import org.testcontainers.containers.wait.strategy.Wait; 5 | import static org.junit.Assert.fail; 6 | 7 | /** 8 | * Base test abstract class contains setup to 9 | * initialize testing environment. 10 | */ 11 | public abstract class ContainersTestBaseIT { 12 | 13 | static final String TOPIC = "events-message-v1"; 14 | static final String CONFLUENT_PLATFORM_VERSION = "5.1.1"; 15 | 16 | // kafka env 17 | static KafkaContainer kafka; 18 | static String KAFKA_BROKER_INSIDE_DOCKER_ENV; 19 | static GenericContainer schemaRegistry; 20 | static String SCHEMA_REGISTRY_INSIDE_DOCKER_ENV; 21 | 22 | // app env 23 | static GenericContainer jsonServer; 24 | static String JSON_SERVER_INSIDE_DOCKER_ENV; 25 | static String JSON_SERVER_EXPOSED; 26 | 27 | static GenericContainer httpProducer; 28 | static String HTTP_PRODUCER_EXPOSED; 29 | 30 | static GenericContainer httpMaterializer; 31 | 32 | // singleton containers https://www.testcontainers.org/test_framework_integration/manual_lifecycle_control/#singleton-containers 33 | static { 34 | final Network commonNetwork = Network.newNetwork(); 35 | setZookeeperAndKafka(commonNetwork); 36 | setSchemaRegistry(commonNetwork); 37 | setJsonServer(commonNetwork); 38 | setHttProducer(commonNetwork); 39 | setHttpMaterializer(commonNetwork); 40 | } 41 | 42 | private static void setZookeeperAndKafka(Network network) { 43 | kafka = new KafkaContainer(CONFLUENT_PLATFORM_VERSION).withNetwork(network); 44 | kafka.start(); 45 | } 46 | 47 | private static void setSchemaRegistry(Network network) { 48 | // get network alias to be able connect other container to env 49 | KAFKA_BROKER_INSIDE_DOCKER_ENV = "PLAINTEXT://" + kafka.getNetworkAliases().get(0) + ":9092"; 50 | schemaRegistry = 51 | new GenericContainer("confluentinc/cp-schema-registry:" + CONFLUENT_PLATFORM_VERSION) 52 | .withExposedPorts(8081) 53 | .withNetwork(network) 54 | .withEnv("SCHEMA_REGISTRY_HOST_NAME", "localhost") // loopback to the container 55 | .withEnv("SCHEMA_REGISTRY_LISTENERS", "http://0.0.0.0:8081") // loopback to the container 56 | .withEnv("SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS", KAFKA_BROKER_INSIDE_DOCKER_ENV) 57 | .waitingFor(Wait.forHttp("/subjects").forStatusCode(200)); 58 | schemaRegistry.start(); 59 | SCHEMA_REGISTRY_INSIDE_DOCKER_ENV = 60 | "http://" + schemaRegistry.getNetworkAliases().get(0) + ":8081"; 61 | } 62 | 63 | private static void setJsonServer(Network network) { 64 | jsonServer = 65 | new GenericContainer("zhenik/json-server") 66 | .withExposedPorts(80) 67 | // all containers put in same network 68 | .withNetwork(network) 69 | .withEnv("ID_MAP", "id") 70 | .withNetworkAliases("json-server") 71 | .withClasspathResourceMapping( 72 | "json-server-database.json", "/data/db.json", BindMode.READ_WRITE) 73 | .waitingFor(Wait.forHttp("/").forStatusCode(200)); 74 | 75 | jsonServer.start(); 76 | // provide availability make http calls from localhost against docker env 77 | JSON_SERVER_EXPOSED = "http://"+jsonServer.getContainerIpAddress() + ":" + jsonServer.getMappedPort(80); 78 | JSON_SERVER_INSIDE_DOCKER_ENV = "http://" + jsonServer.getNetworkAliases().get(0) + ":80"; 79 | } 80 | 81 | private static void setHttProducer(Network network) { 82 | httpProducer = 83 | new GenericContainer("zhenik/http-producer:data-pipeline") 84 | .withExposedPorts(8080) 85 | .withNetwork(network) 86 | .withEnv("APPLICATION_PORT", "8080") 87 | .withEnv("KAFKA_BOOTSTRAP_SERVERS", KAFKA_BROKER_INSIDE_DOCKER_ENV) 88 | .withEnv("SCHEMA_REGISTRY_URL", SCHEMA_REGISTRY_INSIDE_DOCKER_ENV) 89 | .withEnv("SINK_TOPIC", TOPIC) 90 | .waitingFor(Wait.forHttp("/messages").forStatusCode(200)); 91 | httpProducer.start(); 92 | 93 | HTTP_PRODUCER_EXPOSED = "http://" + httpProducer.getContainerIpAddress() + ":" + httpProducer.getMappedPort(8080); 94 | // HTTP_PRODUCER_EXPOSED = LOCAL_HOST + ":" + httpProducer.getMappedPort(8080); 95 | } 96 | 97 | private static void setHttpMaterializer(Network network) { 98 | 99 | createTopic(TOPIC); 100 | 101 | String messageEndpoint = "http://json-server/messages"; 102 | httpMaterializer = 103 | new GenericContainer("zhenik/http-materializer:data-pipeline") 104 | .withNetwork(network) 105 | .withEnv("KAFKA_BOOTSTRAP_SERVERS", KAFKA_BROKER_INSIDE_DOCKER_ENV) 106 | .withEnv("SCHEMA_REGISTRY_URL", SCHEMA_REGISTRY_INSIDE_DOCKER_ENV) 107 | .withEnv("DATABASE_REST_SERVICE_URL", messageEndpoint) 108 | .withEnv("SOURCE_TOPIC", TOPIC); 109 | httpMaterializer.start(); 110 | } 111 | 112 | private static void createTopic(String topicName) { 113 | // kafka container uses with embedded zookeeper 114 | // confluent platform and Kafka compatibility 5.1.x <-> kafka 2.1.x 115 | // kafka 2.1.x require option --zookeeper, later versions use --bootstrap-servers instead 116 | String createTopic = 117 | String.format( 118 | "/usr/bin/kafka-topics --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic %s", 119 | topicName); 120 | try { 121 | final Container.ExecResult execResult = kafka.execInContainer("/bin/sh", "-c", createTopic); 122 | if (execResult.getExitCode() != 0) fail(); 123 | } catch (Exception e) { 124 | e.printStackTrace(); 125 | fail(); 126 | } 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /e2e/src/test/java/no/sysco/testing/kafka/e2e/DockerComposeIT.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.e2e; 2 | 3 | import io.restassured.RestAssured; 4 | import io.restassured.http.ContentType; 5 | import java.io.File; 6 | import java.util.Arrays; 7 | import java.util.List; 8 | import java.util.UUID; 9 | import java.util.concurrent.TimeUnit; 10 | import no.sysco.testing.kafka.e2e.representation.MessageJsonRepresentation; 11 | import org.junit.ClassRule; 12 | import org.junit.Test; 13 | import org.testcontainers.containers.DockerComposeContainer; 14 | import org.testcontainers.containers.wait.strategy.Wait; 15 | 16 | import static org.awaitility.Awaitility.await; 17 | import static org.junit.Assert.assertNotNull; 18 | import static org.junit.Assert.assertTrue; 19 | 20 | public class DockerComposeIT { 21 | 22 | private String JSON_SERVER_URL; 23 | private String HTTP_PRODUCER_BASE_URL; 24 | 25 | /** 26 | * Environment container contains composition of containers which are declared 27 | * in docker-compose.test.yml file. Use a local Docker Compose binary. 28 | * Waiting strategies are applied to `service-name` with suffix `_1` 29 | */ 30 | @ClassRule 31 | public static DockerComposeContainer environment = 32 | new DockerComposeContainer(new File("docker-compose.test.yml")) 33 | .withLocalCompose(true) 34 | .waitingFor("db-mock_1", Wait.forHttp("/").forStatusCode(200)) 35 | .waitingFor("schema-registry_1", Wait.forHttp("/subjects").forStatusCode(200)) 36 | .waitingFor("http-producer_1", Wait.forHttp("/messages").forStatusCode(200)) 37 | .withExposedService("db-mock_1", 80) 38 | .withExposedService("http-producer_1", 8080); 39 | 40 | @Test 41 | public void is_running() { 42 | // System.out.println("HERE: "+environment.getServiceHost("db-mock_1", 80)+":"+environment.getServicePort("db-mock_1", 80)); 43 | JSON_SERVER_URL = "http://" + environment.getServiceHost("db-mock_1", 80) + ":" + environment.getServicePort("db-mock_1", 80); 44 | final List messageJsonRepresentations = 45 | Arrays.asList( 46 | RestAssured.given() 47 | .get(JSON_SERVER_URL + "/messages") 48 | .then() 49 | .statusCode(200) 50 | .extract() 51 | .as(MessageJsonRepresentation[].class)); 52 | assertTrue(messageJsonRepresentations.size() > 0); 53 | } 54 | 55 | @Test 56 | public void test_data_pipeline_flow_successful() { 57 | JSON_SERVER_URL = "http://" + environment.getServiceHost("db-mock_1", 80) + ":" + environment.getServicePort("db-mock_1", 80); 58 | HTTP_PRODUCER_BASE_URL = "http://" + environment.getServiceHost("http-producer_1", 8080) + ":" + environment.getServicePort("http-producer_1", 8080); 59 | 60 | String id = UUID.randomUUID().toString(); 61 | String from = UUID.randomUUID().toString(); 62 | String to = UUID.randomUUID().toString(); 63 | String text = UUID.randomUUID().toString(); 64 | 65 | MessageJsonRepresentation messageJsonRepresentation = 66 | new MessageJsonRepresentation(id, from, to, text); 67 | 68 | RestAssured.given() 69 | .contentType(ContentType.JSON) 70 | .body(messageJsonRepresentation) 71 | .post(HTTP_PRODUCER_BASE_URL + "/messages") 72 | .then() 73 | .statusCode(202); 74 | 75 | await() 76 | .atMost(10, TimeUnit.SECONDS) 77 | .untilAsserted( 78 | () -> { 79 | MessageJsonRepresentation jsonRepresentation = 80 | RestAssured.given() 81 | .get(JSON_SERVER_URL + "/messages/" + id) 82 | .then() 83 | .extract() 84 | .as(MessageJsonRepresentation.class); 85 | 86 | assertNotNull(jsonRepresentation); 87 | }); 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /e2e/src/test/java/no/sysco/testing/kafka/e2e/representation/MessageJsonRepresentation.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.e2e.representation; 2 | 3 | public class MessageJsonRepresentation { 4 | 5 | private String id; 6 | private String from; 7 | private String to; 8 | private String text; 9 | 10 | public MessageJsonRepresentation() {} 11 | 12 | public MessageJsonRepresentation(String id, String from, String to, String text) { 13 | this.id = id; 14 | this.from = from; 15 | this.to = to; 16 | this.text = text; 17 | } 18 | 19 | public String getId() { 20 | return id; 21 | } 22 | 23 | public void setId(String id) { 24 | this.id = id; 25 | } 26 | 27 | public String getFrom() { 28 | return from; 29 | } 30 | 31 | public void setFrom(String from) { 32 | this.from = from; 33 | } 34 | 35 | public String getTo() { 36 | return to; 37 | } 38 | 39 | public void setTo(String to) { 40 | this.to = to; 41 | } 42 | 43 | public String getText() { 44 | return text; 45 | } 46 | 47 | public void setText(String text) { 48 | this.text = text; 49 | } 50 | 51 | @Override 52 | public String toString() { 53 | return "MessageJsonRepresentation{" 54 | + "id='" 55 | + id 56 | + '\'' 57 | + ", from='" 58 | + from 59 | + '\'' 60 | + ", to='" 61 | + to 62 | + '\'' 63 | + ", text='" 64 | + text 65 | + '\'' 66 | + '}'; 67 | } 68 | 69 | public String json() { 70 | return "{" 71 | + "\"id\":" 72 | + "\"" 73 | + id 74 | + "\"," 75 | + "\"from\":" 76 | + "\"" 77 | + from 78 | + "\"," 79 | + "\"to\":" 80 | + "\"" 81 | + to 82 | + "\"," 83 | + "\"text\":" 84 | + "\"" 85 | + text 86 | + "\"" 87 | + "}"; 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /e2e/src/test/resources/json-server-database.json: -------------------------------------------------------------------------------- 1 | { 2 | "messages": [ 3 | { 4 | "id": "1", 5 | "from": "user-id-1", 6 | "to": "user-id-2", 7 | "text": "hello world" 8 | } 9 | ] 10 | } -------------------------------------------------------------------------------- /e2e/src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | log4j.rootLogger=INFO, stdout 2 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 3 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 4 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p [%t] %m (%c)%n -------------------------------------------------------------------------------- /embedded-cluster/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | kafka 8 | no.sysco.testing 9 | 1.0-SNAPSHOT 10 | 11 | embedded-cluster 12 | 13 | 14 | 2.9.0 15 | 16 | 17 | 18 | 19 | org.apache.avro 20 | avro 21 | 22 | 23 | io.confluent 24 | kafka-avro-serializer 25 | 26 | 27 | org.slf4j 28 | slf4j-log4j12 29 | 30 | 31 | org.slf4j 32 | slf4j-api 33 | 34 | 35 | junit 36 | junit 37 | compile 38 | 39 | 40 | org.apache.kafka 41 | kafka_2.11 42 | ${kafka.apis.version} 43 | 44 | 45 | org.apache.curator 46 | curator-test 47 | ${curator.test.version} 48 | 49 | 50 | org.apache.kafka 51 | kafka_2.11 52 | test 53 | ${kafka.apis.version} 54 | 55 | 56 | org.apache.kafka 57 | kafka-clients 58 | ${kafka.apis.version} 59 | test 60 | 61 | 62 | io.confluent 63 | kafka-schema-registry 64 | ${confluent.platform.version} 65 | 66 | 67 | io.confluent 68 | kafka-schema-registry 69 | ${confluent.platform.version} 70 | 71 | tests 72 | 73 | 74 | -------------------------------------------------------------------------------- /embedded-cluster/readme.md: -------------------------------------------------------------------------------- 1 | # Description 2 | Module contains copy of code from [kafka-streams-example](https://github.com/confluentinc/kafka-streams-examples/blob/5.2.0-post/src/test/java/io/confluent/examples/streams/kafka/EmbeddedSingleNodeKafkaCluster.java) 3 | There is open issue: extract embedded kafka for testing purpose. 4 | 5 | `EmbeddedSingleNodeKafkaCluster` contains: Zookeeper, KafkaBroker and SchemaRegistry. 6 | 7 | ## Important notes 8 | * Schema registry at start call some deprecated methods, check logs 9 | * Current support java8 only, because there are reflections usage under the hood 10 | 11 | ## Related Issues 12 | * [issue-#26: Encapsulate EmbeddedSingleNodeKafkaCluster in a seperately-available maven/gradle/sbt dep](https://github.com/confluentinc/kafka-streams-examples/issues/26) 13 | * [KIP-139: Kafka TestKit library](https://cwiki.apache.org/confluence/display/KAFKA/KIP-139%3A+Kafka+TestKit+library) -------------------------------------------------------------------------------- /embedded-cluster/src/main/java/no/sysco/testing/kafka/embedded/EmbeddedSingleNodeKafkaCluster.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.embedded; 2 | 3 | import io.confluent.kafka.schemaregistry.RestApp; 4 | import io.confluent.kafka.schemaregistry.avro.AvroCompatibilityLevel; 5 | import io.confluent.kafka.schemaregistry.rest.SchemaRegistryConfig; 6 | import java.io.IOException; 7 | import java.util.Collections; 8 | import java.util.HashSet; 9 | import java.util.Map; 10 | import java.util.Properties; 11 | import java.util.Set; 12 | import kafka.server.KafkaConfig$; 13 | import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; 14 | import org.apache.kafka.test.TestCondition; 15 | import org.apache.kafka.test.TestUtils; 16 | import org.junit.rules.ExternalResource; 17 | import org.slf4j.Logger; 18 | import org.slf4j.LoggerFactory; 19 | import scala.collection.JavaConverters; 20 | 21 | // https://github.com/confluentinc/kafka-streams-examples/blob/5.2.0-post/src/test/java/io/confluent/examples/streams/kafka/EmbeddedSingleNodeKafkaCluster.java 22 | 23 | public class EmbeddedSingleNodeKafkaCluster extends ExternalResource { 24 | private static final Logger log = LoggerFactory.getLogger(EmbeddedSingleNodeKafkaCluster.class); 25 | private static final int DEFAULT_BROKER_PORT = 0; // 0 results in a random port being selected 26 | private static final String KAFKA_SCHEMAS_TOPIC = "_schemas"; 27 | private static final String AVRO_COMPATIBILITY_TYPE = AvroCompatibilityLevel.NONE.name; 28 | 29 | private static final String KAFKASTORE_OPERATION_TIMEOUT_MS = "60000"; 30 | private static final String KAFKASTORE_DEBUG = "true"; 31 | private static final String KAFKASTORE_INIT_TIMEOUT = "90000"; 32 | 33 | private ZooKeeperEmbedded zookeeper; 34 | private KafkaEmbedded broker; 35 | private RestApp schemaRegistry; 36 | private final Properties brokerConfig; 37 | private boolean running; 38 | 39 | /** 40 | * Creates and starts the cluster. 41 | */ 42 | public EmbeddedSingleNodeKafkaCluster() { 43 | this(new Properties()); 44 | } 45 | 46 | /** 47 | * Creates and starts the cluster. 48 | * 49 | * @param brokerConfig Additional broker configuration settings. 50 | */ 51 | public EmbeddedSingleNodeKafkaCluster(final Properties brokerConfig) { 52 | this.brokerConfig = new Properties(); 53 | this.brokerConfig.put(SchemaRegistryConfig.KAFKASTORE_TIMEOUT_CONFIG, KAFKASTORE_OPERATION_TIMEOUT_MS); 54 | this.brokerConfig.putAll(brokerConfig); 55 | } 56 | 57 | /** 58 | * Creates and starts the cluster. 59 | */ 60 | public void start() throws Exception { 61 | log.debug("Initiating embedded Kafka cluster startup"); 62 | log.debug("Starting a ZooKeeper instance..."); 63 | zookeeper = new ZooKeeperEmbedded(); 64 | log.debug("ZooKeeper instance is running at {}", zookeeper.connectString()); 65 | 66 | final Properties effectiveBrokerConfig = effectiveBrokerConfigFrom(brokerConfig, zookeeper); 67 | log.debug("Starting a Kafka instance on port {} ...", 68 | effectiveBrokerConfig.getProperty(KafkaConfig$.MODULE$.PortProp())); 69 | broker = new KafkaEmbedded(effectiveBrokerConfig); 70 | log.debug("Kafka instance is running at {}, connected to ZooKeeper at {}", 71 | broker.brokerList(), broker.zookeeperConnect()); 72 | 73 | final Properties schemaRegistryProps = new Properties(); 74 | 75 | schemaRegistryProps.put(SchemaRegistryConfig.KAFKASTORE_TIMEOUT_CONFIG, KAFKASTORE_OPERATION_TIMEOUT_MS); 76 | schemaRegistryProps.put(SchemaRegistryConfig.DEBUG_CONFIG, KAFKASTORE_DEBUG); 77 | schemaRegistryProps.put(SchemaRegistryConfig.KAFKASTORE_INIT_TIMEOUT_CONFIG, KAFKASTORE_INIT_TIMEOUT); 78 | 79 | schemaRegistry = new RestApp(0, zookeeperConnect(), KAFKA_SCHEMAS_TOPIC, AVRO_COMPATIBILITY_TYPE, schemaRegistryProps); 80 | schemaRegistry.start(); 81 | running = true; 82 | } 83 | 84 | private Properties effectiveBrokerConfigFrom(final Properties brokerConfig, final ZooKeeperEmbedded zookeeper) { 85 | final Properties effectiveConfig = new Properties(); 86 | effectiveConfig.putAll(brokerConfig); 87 | effectiveConfig.put(KafkaConfig$.MODULE$.ZkConnectProp(), zookeeper.connectString()); 88 | effectiveConfig.put(KafkaConfig$.MODULE$.ZkSessionTimeoutMsProp(), 30 * 1000); 89 | effectiveConfig.put(KafkaConfig$.MODULE$.PortProp(), DEFAULT_BROKER_PORT); 90 | effectiveConfig.put(KafkaConfig$.MODULE$.ZkConnectionTimeoutMsProp(), 60 * 1000); 91 | effectiveConfig.put(KafkaConfig$.MODULE$.DeleteTopicEnableProp(), true); 92 | effectiveConfig.put(KafkaConfig$.MODULE$.LogCleanerDedupeBufferSizeProp(), 2 * 1024 * 1024L); 93 | effectiveConfig.put(KafkaConfig$.MODULE$.GroupMinSessionTimeoutMsProp(), 0); 94 | effectiveConfig.put(KafkaConfig$.MODULE$.OffsetsTopicReplicationFactorProp(), (short) 1); 95 | effectiveConfig.put(KafkaConfig$.MODULE$.OffsetsTopicPartitionsProp(), 1); 96 | effectiveConfig.put(KafkaConfig$.MODULE$.AutoCreateTopicsEnableProp(), true); 97 | return effectiveConfig; 98 | } 99 | 100 | @Override 101 | protected void before() throws Exception { 102 | start(); 103 | } 104 | 105 | @Override 106 | protected void after() { 107 | stop(); 108 | } 109 | 110 | /** 111 | * Stops the cluster. 112 | */ 113 | public void stop() { 114 | log.info("Stopping Confluent"); 115 | try { 116 | try { 117 | if (schemaRegistry != null) { 118 | schemaRegistry.stop(); 119 | } 120 | } catch (final Exception fatal) { 121 | throw new RuntimeException(fatal); 122 | } 123 | if (broker != null) { 124 | broker.stop(); 125 | } 126 | try { 127 | if (zookeeper != null) { 128 | zookeeper.stop(); 129 | } 130 | } catch (final IOException fatal) { 131 | throw new RuntimeException(fatal); 132 | } 133 | } finally { 134 | running = false; 135 | } 136 | log.info("Confluent Stopped"); 137 | } 138 | 139 | /** 140 | * This cluster's `bootstrap.servers` value. Example: `127.0.0.1:9092`. 141 | *

142 | * You can use this to tell Kafka Streams applications, Kafka producers, and Kafka consumers (new 143 | * consumer API) how to connect to this cluster. 144 | */ 145 | public String bootstrapServers() { 146 | return broker.brokerList(); 147 | } 148 | 149 | /** 150 | * This cluster's ZK connection string aka `zookeeper.connect` in `hostnameOrIp:port` format. 151 | * Example: `127.0.0.1:2181`. 152 | *

153 | * You can use this to e.g. tell Kafka consumers (old consumer API) how to connect to this 154 | * cluster. 155 | */ 156 | public String zookeeperConnect() { 157 | return zookeeper.connectString(); 158 | } 159 | 160 | /** 161 | * The "schema.registry.url" setting of the schema registry instance. 162 | */ 163 | public String schemaRegistryUrl() { 164 | return schemaRegistry.restConnect; 165 | } 166 | 167 | /** 168 | * Creates a Kafka topic with 1 partition and a replication factor of 1. 169 | * 170 | * @param topic The name of the topic. 171 | */ 172 | public void createTopic(final String topic) { 173 | createTopic(topic, 1, (short) 1, Collections.emptyMap()); 174 | } 175 | 176 | /** 177 | * Creates a Kafka topic with the given parameters. 178 | * 179 | * @param topic The name of the topic. 180 | * @param partitions The number of partitions for this topic. 181 | * @param replication The replication factor for (the partitions of) this topic. 182 | */ 183 | public void createTopic(final String topic, final int partitions, final short replication) { 184 | createTopic(topic, partitions, replication, Collections.emptyMap()); 185 | } 186 | 187 | /** 188 | * Creates a Kafka topic with the given parameters. 189 | * 190 | * @param topic The name of the topic. 191 | * @param partitions The number of partitions for this topic. 192 | * @param replication The replication factor for (partitions of) this topic. 193 | * @param topicConfig Additional topic-level configuration settings. 194 | */ 195 | public void createTopic(final String topic, 196 | final int partitions, 197 | final short replication, 198 | final Map topicConfig) { 199 | broker.createTopic(topic, partitions, replication, topicConfig); 200 | } 201 | 202 | /** 203 | * Deletes multiple topics and blocks until all topics got deleted. 204 | * 205 | * @param timeoutMs the max time to wait for the topics to be deleted (does not block if {@code <= 0}) 206 | * @param topics the name of the topics 207 | */ 208 | public void deleteTopicsAndWait(final long timeoutMs, final String... topics) throws InterruptedException { 209 | for (final String topic : topics) { 210 | try { 211 | broker.deleteTopic(topic); 212 | } catch (final UnknownTopicOrPartitionException expected) { 213 | // indicates (idempotent) success 214 | } 215 | } 216 | 217 | if (timeoutMs > 0) { 218 | TestUtils.waitForCondition(new TopicsDeletedCondition(topics), timeoutMs, "Topics not deleted after " + timeoutMs + " milli seconds."); 219 | } 220 | } 221 | 222 | public boolean isRunning() { 223 | return running; 224 | } 225 | 226 | private final class TopicsDeletedCondition implements TestCondition { 227 | final Set deletedTopics = new HashSet<>(); 228 | 229 | private TopicsDeletedCondition(final String... topics) { 230 | Collections.addAll(deletedTopics, topics); 231 | } 232 | 233 | @Override 234 | public boolean conditionMet() { 235 | //TODO once KAFKA-6098 is fixed use AdminClient to verify topics have been deleted 236 | final Set allTopics = new HashSet<>( 237 | JavaConverters.seqAsJavaListConverter(broker.kafkaServer().zkClient().getAllTopicsInCluster()).asJava()); 238 | return !allTopics.removeAll(deletedTopics); 239 | } 240 | } 241 | } 242 | -------------------------------------------------------------------------------- /embedded-cluster/src/main/java/no/sysco/testing/kafka/embedded/KafkaEmbedded.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.embedded; 2 | 3 | import java.io.File; 4 | import java.io.IOException; 5 | import java.util.Collections; 6 | import java.util.Map; 7 | import java.util.Properties; 8 | import java.util.concurrent.ExecutionException; 9 | import kafka.server.KafkaConfig; 10 | import kafka.server.KafkaConfig$; 11 | import kafka.server.KafkaServer; 12 | import kafka.utils.TestUtils; 13 | import org.apache.kafka.clients.admin.AdminClient; 14 | import org.apache.kafka.clients.admin.AdminClientConfig; 15 | import org.apache.kafka.clients.admin.NewTopic; 16 | import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; 17 | import org.apache.kafka.common.network.ListenerName; 18 | import org.apache.kafka.common.security.auth.SecurityProtocol; 19 | import org.apache.kafka.common.utils.Time; 20 | import org.junit.rules.TemporaryFolder; 21 | import org.slf4j.Logger; 22 | import org.slf4j.LoggerFactory; 23 | 24 | /** 25 | * Runs an in-memory, "embedded" instance of a Kafka broker, which listens at `127.0.0.1:9092` by 26 | * default. 27 | * 28 | * Requires a running ZooKeeper instance to connect to. By default, it expects a ZooKeeper instance 29 | * running at `127.0.0.1:2181`. You can specify a different ZooKeeper instance by setting the 30 | * `zookeeper.connect` parameter in the broker's configuration. 31 | */ 32 | public class KafkaEmbedded { 33 | 34 | private static final Logger log = LoggerFactory.getLogger(KafkaEmbedded.class); 35 | 36 | private static final String DEFAULT_ZK_CONNECT = "127.0.0.1:2181"; 37 | 38 | private final Properties effectiveConfig; 39 | private final File logDir; 40 | private final TemporaryFolder tmpFolder; 41 | private final KafkaServer kafka; 42 | 43 | /** 44 | * https://github.com/confluentinc/kafka-streams-examples/blob/5.2.0-post/src/test/java/io/confluent/examples/streams/kafka/KafkaEmbedded.java 45 | * 46 | * Creates and starts an embedded Kafka broker. 47 | * 48 | * @param config Broker configuration settings. Used to modify, for example, on which port the 49 | * broker should listen to. Note that you cannot change some settings such as 50 | * `log.dirs`, `port`. 51 | */ 52 | public KafkaEmbedded(final Properties config) throws IOException { 53 | tmpFolder = new TemporaryFolder(); 54 | tmpFolder.create(); 55 | logDir = tmpFolder.newFolder(); 56 | effectiveConfig = effectiveConfigFrom(config); 57 | final boolean loggingEnabled = true; 58 | 59 | final KafkaConfig kafkaConfig = new KafkaConfig(effectiveConfig, loggingEnabled); 60 | log.debug("Starting embedded Kafka broker (with log.dirs={} and ZK ensemble at {}) ...", 61 | logDir, zookeeperConnect()); 62 | kafka = TestUtils.createServer(kafkaConfig, Time.SYSTEM); 63 | log.debug("Startup of embedded Kafka broker at {} completed (with ZK ensemble at {}) ...", 64 | brokerList(), zookeeperConnect()); 65 | } 66 | 67 | private Properties effectiveConfigFrom(final Properties initialConfig) throws IOException { 68 | final Properties effectiveConfig = new Properties(); 69 | effectiveConfig.put(KafkaConfig$.MODULE$.BrokerIdProp(), 0); 70 | effectiveConfig.put(KafkaConfig$.MODULE$.HostNameProp(), "127.0.0.1"); 71 | effectiveConfig.put(KafkaConfig$.MODULE$.PortProp(), "9092"); 72 | effectiveConfig.put(KafkaConfig$.MODULE$.NumPartitionsProp(), 1); 73 | effectiveConfig.put(KafkaConfig$.MODULE$.AutoCreateTopicsEnableProp(), true); 74 | effectiveConfig.put(KafkaConfig$.MODULE$.MessageMaxBytesProp(), 1000000); 75 | effectiveConfig.put(KafkaConfig$.MODULE$.ControlledShutdownEnableProp(), true); 76 | 77 | effectiveConfig.putAll(initialConfig); 78 | effectiveConfig.setProperty(KafkaConfig$.MODULE$.LogDirProp(), logDir.getAbsolutePath()); 79 | return effectiveConfig; 80 | } 81 | 82 | /** 83 | * This broker's `metadata.broker.list` value. Example: `127.0.0.1:9092`. 84 | * 85 | * You can use this to tell Kafka producers and consumers how to connect to this instance. 86 | */ 87 | public String brokerList() { 88 | return String.join(":", kafka.config().hostName(), Integer.toString(kafka.boundPort( 89 | ListenerName.forSecurityProtocol(SecurityProtocol 90 | .PLAINTEXT)))); 91 | } 92 | 93 | 94 | /** 95 | * The ZooKeeper connection string aka `zookeeper.connect`. 96 | */ 97 | public String zookeeperConnect() { 98 | return effectiveConfig.getProperty("zookeeper.connect", DEFAULT_ZK_CONNECT); 99 | } 100 | 101 | /** 102 | * Stop the broker. 103 | */ 104 | public void stop() { 105 | log.debug("Shutting down embedded Kafka broker at {} (with ZK ensemble at {}) ...", 106 | brokerList(), zookeeperConnect()); 107 | kafka.shutdown(); 108 | kafka.awaitShutdown(); 109 | log.debug("Removing temp folder {} with logs.dir at {} ...", tmpFolder, logDir); 110 | tmpFolder.delete(); 111 | log.debug("Shutdown of embedded Kafka broker at {} completed (with ZK ensemble at {}) ...", 112 | brokerList(), zookeeperConnect()); 113 | } 114 | 115 | /** 116 | * Create a Kafka topic with 1 partition and a replication factor of 1. 117 | * 118 | * @param topic The name of the topic. 119 | */ 120 | public void createTopic(final String topic) { 121 | createTopic(topic, 1, (short) 1, Collections.emptyMap()); 122 | } 123 | 124 | /** 125 | * Create a Kafka topic with the given parameters. 126 | * 127 | * @param topic The name of the topic. 128 | * @param partitions The number of partitions for this topic. 129 | * @param replication The replication factor for (the partitions of) this topic. 130 | */ 131 | public void createTopic(final String topic, final int partitions, final short replication) { 132 | createTopic(topic, partitions, replication, Collections.emptyMap()); 133 | } 134 | 135 | /** 136 | * Create a Kafka topic with the given parameters. 137 | * 138 | * @param topic The name of the topic. 139 | * @param partitions The number of partitions for this topic. 140 | * @param replication The replication factor for (partitions of) this topic. 141 | * @param topicConfig Additional topic-level configuration settings. 142 | */ 143 | public void createTopic(final String topic, 144 | final int partitions, 145 | final short replication, 146 | final Map topicConfig) { 147 | log.debug("Creating topic { name: {}, partitions: {}, replication: {}, config: {} }", 148 | topic, partitions, replication, topicConfig); 149 | 150 | final Properties properties = new Properties(); 151 | properties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList()); 152 | 153 | try (final AdminClient adminClient = AdminClient.create(properties)) { 154 | final NewTopic newTopic = new NewTopic(topic, partitions, replication); 155 | newTopic.configs(topicConfig); 156 | adminClient.createTopics(Collections.singleton(newTopic)).all().get(); 157 | } catch (final InterruptedException | ExecutionException fatal) { 158 | throw new RuntimeException(fatal); 159 | } 160 | 161 | } 162 | 163 | /** 164 | * Delete a Kafka topic. 165 | * 166 | * @param topic The name of the topic. 167 | */ 168 | public void deleteTopic(final String topic) { 169 | log.debug("Deleting topic {}", topic); 170 | final Properties properties = new Properties(); 171 | properties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList()); 172 | 173 | try (final AdminClient adminClient = AdminClient.create(properties)) { 174 | adminClient.deleteTopics(Collections.singleton(topic)).all().get(); 175 | } catch (final InterruptedException e) { 176 | throw new RuntimeException(e); 177 | } catch (final ExecutionException e) { 178 | if (!(e.getCause() instanceof UnknownTopicOrPartitionException)) { 179 | throw new RuntimeException(e); 180 | } 181 | } 182 | } 183 | 184 | KafkaServer kafkaServer() { 185 | return kafka; 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /embedded-cluster/src/main/java/no/sysco/testing/kafka/embedded/ZooKeeperEmbedded.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.embedded; 2 | 3 | import java.io.IOException; 4 | import org.apache.curator.test.TestingServer; 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | /** 9 | * https://github.com/confluentinc/kafka-streams-examples/tree/5.2.0-post/src/test/java/io/confluent/examples/streams/zookeeper 10 | * 11 | * Runs an in-memory, "embedded" instance of a ZooKeeper server. 12 | * 13 | * The ZooKeeper server instance is automatically started when you create a new instance of this class. 14 | */ 15 | public class ZooKeeperEmbedded { 16 | 17 | private static final Logger log = LoggerFactory.getLogger(ZooKeeperEmbedded.class); 18 | 19 | private final TestingServer server; 20 | 21 | /** 22 | * Creates and starts a ZooKeeper instance. 23 | * 24 | * @throws Exception 25 | */ 26 | public ZooKeeperEmbedded() throws Exception { 27 | log.debug("Starting embedded ZooKeeper server..."); 28 | this.server = new TestingServer(); 29 | log.debug("Embedded ZooKeeper server at {} uses the temp directory at {}", 30 | server.getConnectString(), server.getTempDirectory()); 31 | } 32 | 33 | public void stop() throws IOException { 34 | log.debug("Shutting down embedded ZooKeeper server at {} ...", server.getConnectString()); 35 | server.close(); 36 | log.debug("Shutdown of embedded ZooKeeper server at {} completed", server.getConnectString()); 37 | } 38 | 39 | /** 40 | * The ZooKeeper connection string aka `zookeeper.connect` in `hostnameOrIp:port` format. 41 | * Example: `127.0.0.1:2181`. 42 | * 43 | * You can use this to e.g. tell Kafka brokers how to connect to this instance. 44 | */ 45 | public String connectString() { 46 | return server.getConnectString(); 47 | } 48 | 49 | /** 50 | * The hostname of the ZooKeeper instance. Example: `127.0.0.1` 51 | */ 52 | public String hostname() { 53 | // "server:1:2:3" -> "server:1:2" 54 | return connectString().substring(0, connectString().lastIndexOf(':')); 55 | } 56 | 57 | } 58 | -------------------------------------------------------------------------------- /mvnw: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # ---------------------------------------------------------------------------- 3 | # Licensed to the Apache Software Foundation (ASF) under one 4 | # or more contributor license agreements. See the NOTICE file 5 | # distributed with this work for additional information 6 | # regarding copyright ownership. The ASF licenses this file 7 | # to you under the Apache License, Version 2.0 (the 8 | # "License"); you may not use this file except in compliance 9 | # with the License. You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, 14 | # software distributed under the License is distributed on an 15 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 16 | # KIND, either express or implied. See the License for the 17 | # specific language governing permissions and limitations 18 | # under the License. 19 | # ---------------------------------------------------------------------------- 20 | 21 | # ---------------------------------------------------------------------------- 22 | # Maven2 Start Up Batch script 23 | # 24 | # Required ENV vars: 25 | # ------------------ 26 | # JAVA_HOME - location of a JDK home dir 27 | # 28 | # Optional ENV vars 29 | # ----------------- 30 | # M2_HOME - location of maven2's installed home dir 31 | # MAVEN_OPTS - parameters passed to the Java VM when running Maven 32 | # e.g. to debug Maven itself, use 33 | # set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 34 | # MAVEN_SKIP_RC - flag to disable loading of mavenrc files 35 | # ---------------------------------------------------------------------------- 36 | 37 | if [ -z "$MAVEN_SKIP_RC" ] ; then 38 | 39 | if [ -f /etc/mavenrc ] ; then 40 | . /etc/mavenrc 41 | fi 42 | 43 | if [ -f "$HOME/.mavenrc" ] ; then 44 | . "$HOME/.mavenrc" 45 | fi 46 | 47 | fi 48 | 49 | # OS specific support. $var _must_ be set to either true or false. 50 | cygwin=false; 51 | darwin=false; 52 | mingw=false 53 | case "`uname`" in 54 | CYGWIN*) cygwin=true ;; 55 | MINGW*) mingw=true;; 56 | Darwin*) darwin=true 57 | # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home 58 | # See https://developer.apple.com/library/mac/qa/qa1170/_index.html 59 | if [ -z "$JAVA_HOME" ]; then 60 | if [ -x "/usr/libexec/java_home" ]; then 61 | export JAVA_HOME="`/usr/libexec/java_home`" 62 | else 63 | export JAVA_HOME="/Library/Java/Home" 64 | fi 65 | fi 66 | ;; 67 | esac 68 | 69 | if [ -z "$JAVA_HOME" ] ; then 70 | if [ -r /etc/gentoo-release ] ; then 71 | JAVA_HOME=`java-config --jre-home` 72 | fi 73 | fi 74 | 75 | if [ -z "$M2_HOME" ] ; then 76 | ## resolve links - $0 may be a link to maven's home 77 | PRG="$0" 78 | 79 | # need this for relative symlinks 80 | while [ -h "$PRG" ] ; do 81 | ls=`ls -ld "$PRG"` 82 | link=`expr "$ls" : '.*-> \(.*\)$'` 83 | if expr "$link" : '/.*' > /dev/null; then 84 | PRG="$link" 85 | else 86 | PRG="`dirname "$PRG"`/$link" 87 | fi 88 | done 89 | 90 | saveddir=`pwd` 91 | 92 | M2_HOME=`dirname "$PRG"`/.. 93 | 94 | # make it fully qualified 95 | M2_HOME=`cd "$M2_HOME" && pwd` 96 | 97 | cd "$saveddir" 98 | # echo Using m2 at $M2_HOME 99 | fi 100 | 101 | # For Cygwin, ensure paths are in UNIX format before anything is touched 102 | if $cygwin ; then 103 | [ -n "$M2_HOME" ] && 104 | M2_HOME=`cygpath --unix "$M2_HOME"` 105 | [ -n "$JAVA_HOME" ] && 106 | JAVA_HOME=`cygpath --unix "$JAVA_HOME"` 107 | [ -n "$CLASSPATH" ] && 108 | CLASSPATH=`cygpath --path --unix "$CLASSPATH"` 109 | fi 110 | 111 | # For Mingw, ensure paths are in UNIX format before anything is touched 112 | if $mingw ; then 113 | [ -n "$M2_HOME" ] && 114 | M2_HOME="`(cd "$M2_HOME"; pwd)`" 115 | [ -n "$JAVA_HOME" ] && 116 | JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" 117 | fi 118 | 119 | if [ -z "$JAVA_HOME" ]; then 120 | javaExecutable="`which javac`" 121 | if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then 122 | # readlink(1) is not available as standard on Solaris 10. 123 | readLink=`which readlink` 124 | if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then 125 | if $darwin ; then 126 | javaHome="`dirname \"$javaExecutable\"`" 127 | javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" 128 | else 129 | javaExecutable="`readlink -f \"$javaExecutable\"`" 130 | fi 131 | javaHome="`dirname \"$javaExecutable\"`" 132 | javaHome=`expr "$javaHome" : '\(.*\)/bin'` 133 | JAVA_HOME="$javaHome" 134 | export JAVA_HOME 135 | fi 136 | fi 137 | fi 138 | 139 | if [ -z "$JAVACMD" ] ; then 140 | if [ -n "$JAVA_HOME" ] ; then 141 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 142 | # IBM's JDK on AIX uses strange locations for the executables 143 | JAVACMD="$JAVA_HOME/jre/sh/java" 144 | else 145 | JAVACMD="$JAVA_HOME/bin/java" 146 | fi 147 | else 148 | JAVACMD="`which java`" 149 | fi 150 | fi 151 | 152 | if [ ! -x "$JAVACMD" ] ; then 153 | echo "Error: JAVA_HOME is not defined correctly." >&2 154 | echo " We cannot execute $JAVACMD" >&2 155 | exit 1 156 | fi 157 | 158 | if [ -z "$JAVA_HOME" ] ; then 159 | echo "Warning: JAVA_HOME environment variable is not set." 160 | fi 161 | 162 | CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher 163 | 164 | # traverses directory structure from process work directory to filesystem root 165 | # first directory with .mvn subdirectory is considered project base directory 166 | find_maven_basedir() { 167 | 168 | if [ -z "$1" ] 169 | then 170 | echo "Path not specified to find_maven_basedir" 171 | return 1 172 | fi 173 | 174 | basedir="$1" 175 | wdir="$1" 176 | while [ "$wdir" != '/' ] ; do 177 | if [ -d "$wdir"/.mvn ] ; then 178 | basedir=$wdir 179 | break 180 | fi 181 | # workaround for JBEAP-8937 (on Solaris 10/Sparc) 182 | if [ -d "${wdir}" ]; then 183 | wdir=`cd "$wdir/.."; pwd` 184 | fi 185 | # end of workaround 186 | done 187 | echo "${basedir}" 188 | } 189 | 190 | # concatenates all lines of a file 191 | concat_lines() { 192 | if [ -f "$1" ]; then 193 | echo "$(tr -s '\n' ' ' < "$1")" 194 | fi 195 | } 196 | 197 | BASE_DIR=`find_maven_basedir "$(pwd)"` 198 | if [ -z "$BASE_DIR" ]; then 199 | exit 1; 200 | fi 201 | 202 | ########################################################################################## 203 | # Extension to allow automatically downloading the maven-wrapper.jar from Maven-central 204 | # This allows using the maven wrapper in projects that prohibit checking in binary data. 205 | ########################################################################################## 206 | if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then 207 | if [ "$MVNW_VERBOSE" = true ]; then 208 | echo "Found .mvn/wrapper/maven-wrapper.jar" 209 | fi 210 | else 211 | if [ "$MVNW_VERBOSE" = true ]; then 212 | echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." 213 | fi 214 | if [ -n "$MVNW_REPOURL" ]; then 215 | jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.4/maven-wrapper-0.5.4.jar" 216 | else 217 | jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.4/maven-wrapper-0.5.4.jar" 218 | fi 219 | while IFS="=" read key value; do 220 | case "$key" in (wrapperUrl) jarUrl="$value"; break ;; 221 | esac 222 | done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" 223 | if [ "$MVNW_VERBOSE" = true ]; then 224 | echo "Downloading from: $jarUrl" 225 | fi 226 | wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" 227 | if $cygwin; then 228 | wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"` 229 | fi 230 | 231 | if command -v wget > /dev/null; then 232 | if [ "$MVNW_VERBOSE" = true ]; then 233 | echo "Found wget ... using wget" 234 | fi 235 | if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then 236 | wget "$jarUrl" -O "$wrapperJarPath" 237 | else 238 | wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" 239 | fi 240 | elif command -v curl > /dev/null; then 241 | if [ "$MVNW_VERBOSE" = true ]; then 242 | echo "Found curl ... using curl" 243 | fi 244 | if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then 245 | curl -o "$wrapperJarPath" "$jarUrl" -f 246 | else 247 | curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f 248 | fi 249 | 250 | else 251 | if [ "$MVNW_VERBOSE" = true ]; then 252 | echo "Falling back to using Java to download" 253 | fi 254 | javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" 255 | # For Cygwin, switch paths to Windows format before running javac 256 | if $cygwin; then 257 | javaClass=`cygpath --path --windows "$javaClass"` 258 | fi 259 | if [ -e "$javaClass" ]; then 260 | if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then 261 | if [ "$MVNW_VERBOSE" = true ]; then 262 | echo " - Compiling MavenWrapperDownloader.java ..." 263 | fi 264 | # Compiling the Java class 265 | ("$JAVA_HOME/bin/javac" "$javaClass") 266 | fi 267 | if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then 268 | # Running the downloader 269 | if [ "$MVNW_VERBOSE" = true ]; then 270 | echo " - Running MavenWrapperDownloader.java ..." 271 | fi 272 | ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") 273 | fi 274 | fi 275 | fi 276 | fi 277 | ########################################################################################## 278 | # End of extension 279 | ########################################################################################## 280 | 281 | export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} 282 | if [ "$MVNW_VERBOSE" = true ]; then 283 | echo $MAVEN_PROJECTBASEDIR 284 | fi 285 | MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" 286 | 287 | # For Cygwin, switch paths to Windows format before running java 288 | if $cygwin; then 289 | [ -n "$M2_HOME" ] && 290 | M2_HOME=`cygpath --path --windows "$M2_HOME"` 291 | [ -n "$JAVA_HOME" ] && 292 | JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` 293 | [ -n "$CLASSPATH" ] && 294 | CLASSPATH=`cygpath --path --windows "$CLASSPATH"` 295 | [ -n "$MAVEN_PROJECTBASEDIR" ] && 296 | MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` 297 | fi 298 | 299 | # Provide a "standardized" way to retrieve the CLI args that will 300 | # work with both Windows and non-Windows executions. 301 | MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" 302 | export MAVEN_CMD_LINE_ARGS 303 | 304 | WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain 305 | 306 | exec "$JAVACMD" \ 307 | $MAVEN_OPTS \ 308 | -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ 309 | "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ 310 | ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" 311 | -------------------------------------------------------------------------------- /mvnw.cmd: -------------------------------------------------------------------------------- 1 | @REM ---------------------------------------------------------------------------- 2 | @REM Licensed to the Apache Software Foundation (ASF) under one 3 | @REM or more contributor license agreements. See the NOTICE file 4 | @REM distributed with this work for additional information 5 | @REM regarding copyright ownership. The ASF licenses this file 6 | @REM to you under the Apache License, Version 2.0 (the 7 | @REM "License"); you may not use this file except in compliance 8 | @REM with the License. You may obtain a copy of the License at 9 | @REM 10 | @REM http://www.apache.org/licenses/LICENSE-2.0 11 | @REM 12 | @REM Unless required by applicable law or agreed to in writing, 13 | @REM software distributed under the License is distributed on an 14 | @REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | @REM KIND, either express or implied. See the License for the 16 | @REM specific language governing permissions and limitations 17 | @REM under the License. 18 | @REM ---------------------------------------------------------------------------- 19 | 20 | @REM ---------------------------------------------------------------------------- 21 | @REM Maven2 Start Up Batch script 22 | @REM 23 | @REM Required ENV vars: 24 | @REM JAVA_HOME - location of a JDK home dir 25 | @REM 26 | @REM Optional ENV vars 27 | @REM M2_HOME - location of maven2's installed home dir 28 | @REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands 29 | @REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending 30 | @REM MAVEN_OPTS - parameters passed to the Java VM when running Maven 31 | @REM e.g. to debug Maven itself, use 32 | @REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 33 | @REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files 34 | @REM ---------------------------------------------------------------------------- 35 | 36 | @REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' 37 | @echo off 38 | @REM set title of command window 39 | title %0 40 | @REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' 41 | @if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% 42 | 43 | @REM set %HOME% to equivalent of $HOME 44 | if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") 45 | 46 | @REM Execute a user defined script before this one 47 | if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre 48 | @REM check for pre script, once with legacy .bat ending and once with .cmd ending 49 | if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" 50 | if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" 51 | :skipRcPre 52 | 53 | @setlocal 54 | 55 | set ERROR_CODE=0 56 | 57 | @REM To isolate internal variables from possible post scripts, we use another setlocal 58 | @setlocal 59 | 60 | @REM ==== START VALIDATION ==== 61 | if not "%JAVA_HOME%" == "" goto OkJHome 62 | 63 | echo. 64 | echo Error: JAVA_HOME not found in your environment. >&2 65 | echo Please set the JAVA_HOME variable in your environment to match the >&2 66 | echo location of your Java installation. >&2 67 | echo. 68 | goto error 69 | 70 | :OkJHome 71 | if exist "%JAVA_HOME%\bin\java.exe" goto init 72 | 73 | echo. 74 | echo Error: JAVA_HOME is set to an invalid directory. >&2 75 | echo JAVA_HOME = "%JAVA_HOME%" >&2 76 | echo Please set the JAVA_HOME variable in your environment to match the >&2 77 | echo location of your Java installation. >&2 78 | echo. 79 | goto error 80 | 81 | @REM ==== END VALIDATION ==== 82 | 83 | :init 84 | 85 | @REM Find the project base dir, i.e. the directory that contains the folder ".mvn". 86 | @REM Fallback to current working directory if not found. 87 | 88 | set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% 89 | IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir 90 | 91 | set EXEC_DIR=%CD% 92 | set WDIR=%EXEC_DIR% 93 | :findBaseDir 94 | IF EXIST "%WDIR%"\.mvn goto baseDirFound 95 | cd .. 96 | IF "%WDIR%"=="%CD%" goto baseDirNotFound 97 | set WDIR=%CD% 98 | goto findBaseDir 99 | 100 | :baseDirFound 101 | set MAVEN_PROJECTBASEDIR=%WDIR% 102 | cd "%EXEC_DIR%" 103 | goto endDetectBaseDir 104 | 105 | :baseDirNotFound 106 | set MAVEN_PROJECTBASEDIR=%EXEC_DIR% 107 | cd "%EXEC_DIR%" 108 | 109 | :endDetectBaseDir 110 | 111 | IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig 112 | 113 | @setlocal EnableExtensions EnableDelayedExpansion 114 | for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a 115 | @endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% 116 | 117 | :endReadAdditionalConfig 118 | 119 | SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" 120 | set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" 121 | set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain 122 | 123 | set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.4/maven-wrapper-0.5.4.jar" 124 | 125 | FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( 126 | IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B 127 | ) 128 | 129 | @REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central 130 | @REM This allows using the maven wrapper in projects that prohibit checking in binary data. 131 | if exist %WRAPPER_JAR% ( 132 | if "%MVNW_VERBOSE%" == "true" ( 133 | echo Found %WRAPPER_JAR% 134 | ) 135 | ) else ( 136 | if not "%MVNW_REPOURL%" == "" ( 137 | SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.4/maven-wrapper-0.5.4.jar" 138 | ) 139 | if "%MVNW_VERBOSE%" == "true" ( 140 | echo Couldn't find %WRAPPER_JAR%, downloading it ... 141 | echo Downloading from: %DOWNLOAD_URL% 142 | ) 143 | 144 | powershell -Command "&{"^ 145 | "$webclient = new-object System.Net.WebClient;"^ 146 | "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ 147 | "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ 148 | "}"^ 149 | "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ 150 | "}" 151 | if "%MVNW_VERBOSE%" == "true" ( 152 | echo Finished downloading %WRAPPER_JAR% 153 | ) 154 | ) 155 | @REM End of extension 156 | 157 | @REM Provide a "standardized" way to retrieve the CLI args that will 158 | @REM work with both Windows and non-Windows executions. 159 | set MAVEN_CMD_LINE_ARGS=%* 160 | 161 | %MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* 162 | if ERRORLEVEL 1 goto error 163 | goto end 164 | 165 | :error 166 | set ERROR_CODE=1 167 | 168 | :end 169 | @endlocal & set ERROR_CODE=%ERROR_CODE% 170 | 171 | if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost 172 | @REM check for post script, once with legacy .bat ending and once with .cmd ending 173 | if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" 174 | if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" 175 | :skipRcPost 176 | 177 | @REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' 178 | if "%MAVEN_BATCH_PAUSE%" == "on" pause 179 | 180 | if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% 181 | 182 | exit /B %ERROR_CODE% 183 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | no.sysco.testing 8 | kafka 9 | 1.0-SNAPSHOT 10 | pom 11 | 12 | 13 | streams-client 14 | consumer-producer-clients 15 | embedded-cluster 16 | data-pipeline 17 | e2e 18 | 19 | 20 | 21 | UTF-8 22 | UTF-8 23 | 1.8 24 | 1.8 25 | 26 | 2.1.1 27 | 5.1.1 28 | 29 | 1.8.2 30 | 1.7.25 31 | 3.2.1 32 | 33 | 4.12 34 | 3.1.6 35 | 2.22.0 36 | 3.0.1 37 | 2.9.10.4 38 | 39 | 2.22.1 40 | true 41 | 42 | 43 | 44 | 45 | confluent 46 | http://packages.confluent.io/maven/ 47 | 48 | 49 | 50 | 51 | confluent 52 | Confluent 53 | http://packages.confluent.io/maven/ 54 | 55 | 56 | 57 | 58 | 59 | 60 | zhenik 61 | Nikita Zhevnitskiy 62 | https://github.com/zhenik 63 | Sysco AS 64 | https://sysco.no 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | no.sysco.testing 73 | embedded-cluster 74 | ${project.version} 75 | 76 | 77 | 78 | com.typesafe 79 | config 80 | 1.3.3 81 | 82 | 83 | 84 | org.apache.kafka 85 | kafka-clients 86 | ${kafka.apis.version} 87 | 88 | 89 | org.apache.kafka 90 | kafka-streams 91 | ${kafka.apis.version} 92 | 93 | 94 | 95 | com.fasterxml.jackson.core 96 | jackson-databind 97 | ${jackson.version} 98 | 99 | 100 | 101 | org.apache.avro 102 | avro 103 | ${avro.version} 104 | 105 | 106 | io.confluent 107 | kafka-avro-serializer 108 | ${confluent.platform.version} 109 | 110 | 111 | io.confluent 112 | kafka-streams-avro-serde 113 | ${confluent.platform.version} 114 | 115 | 116 | 117 | org.slf4j 118 | slf4j-log4j12 119 | ${slf4j.logger.version} 120 | 121 | 122 | org.slf4j 123 | slf4j-api 124 | ${slf4j.logger.version} 125 | 126 | 127 | 128 | org.apache.kafka 129 | kafka-streams-test-utils 130 | ${kafka.apis.version} 131 | test 132 | 133 | 134 | junit 135 | junit 136 | ${junit.version} 137 | test 138 | 139 | 140 | com.github.tomakehurst 141 | wiremock-jre8 142 | ${wiremock.version} 143 | test 144 | 145 | 146 | org.awaitility 147 | awaitility 148 | ${awaitility.version} 149 | test 150 | 151 | 152 | io.rest-assured 153 | rest-assured 154 | ${rest-assured.version} 155 | test 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | org.apache.avro 165 | avro-maven-plugin 166 | ${avro.version} 167 | 168 | 169 | io.confluent 170 | kafka-schema-registry-maven-plugin 171 | ${confluent.platform.version} 172 | 173 | 174 | org.apache.maven.plugins 175 | maven-shade-plugin 176 | ${shade.plugin.version} 177 | 178 | 179 | org.apache.maven.plugins 180 | maven-failsafe-plugin 181 | ${failsafe.plugin.version} 182 | 183 | 184 | 185 | integration-test 186 | verify 187 | 188 | 189 | 190 | 191 | ${skipIntegrationTests} 192 | 193 | 194 | 195 | 196 | 197 | 198 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://www.travis-ci.org/sysco-middleware/kafka-testing.svg?branch=master)](https://www.travis-ci.org/sysco-middleware/kafka-testing) 2 | # Kafka-clients: writing automated tests 3 | Run all tests: 4 | ``` 5 | ./mvnw clean install -DskipIntegrationTests=false 6 | ``` 7 | 8 | ## Modules and approaches 9 | 1. [streams-client module](./streams-client) contains examples of unit-tests for kafka-streams topologies with [kafka-streams-test-utils](https://kafka.apache.org/21/documentation/streams/developer-guide/testing.html). 10 | Approach covers testing topologies (stateful & stateless processors) with different `serdes` including [avro](https://avro.apache.org/docs/1.8.2/spec.html) and [confluent schema registry](https://docs.confluent.io/current/schema-registry/index.html). 11 | 2. [embedded-kafka-cluster module](./embedded-cluster) is example of kafka-embedded cluster in memory (1 Zookeeper, 1 Kafka broker, 1 Confluent schema registry). Embedded kafka cluster is used for integration test of kafka-client application. 12 | 3. [consumer-producer-clients module](./consumer-producer-clients) contains examples of integration tests with [embedded kafka cluster](./embedded-cluster) and kafka based applications with [Producer/Consumer API](https://kafka.apache.org/documentation) 13 | 4. [data-pipeline module](./data-pipeline) contains examples of integration tests with [embedded kafka cluster](./embedded-cluster), [wire-mock](http://wiremock.org) and kafka based applications with [Streams API](https://kafka.apache.org/documentation/streams/) 14 | 5. [e2e module](./e2e) contains `IT` test for data pipeline, using [testcontainers](https://www.testcontainers.org) 15 | 16 | 17 | ### TODO: 18 | - [ ] Makefile 19 | - [ ] update vers java `8 -> 12` `!NB`: Reflection use -> only java8 currently 20 | - [ ] update vers junit `4 -> 5` 21 | 22 | ### Important notes 23 | - [Confluent Platform and Apache Kafka Compatibility](https://docs.confluent.io/current/installation/versions-interoperability.html#cp-and-apache-kafka-compatibility) 24 | 25 | ### References 26 | - [Apache Kafka. Developer guide. Testing](https://kafka.apache.org/20/documentation/streams/developer-guide/testing.html) 27 | - [Getting Your Feet Wet with Stream Processing – Part 2: Testing Your Streaming Application](https://www.confluent.io/blog/stream-processing-part-2-testing-your-streaming-application) -------------------------------------------------------------------------------- /streams-client/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | kafka 8 | no.sysco.testing 9 | 1.0-SNAPSHOT 10 | 11 | streams-client 12 | 13 | 14 | 15 | 16 | org.apache.kafka 17 | kafka-clients 18 | 19 | 20 | org.apache.kafka 21 | kafka-streams 22 | 23 | 24 | 25 | io.confluent 26 | kafka-streams-avro-serde 27 | ${confluent.platform.version} 28 | 29 | 30 | org.apache.avro 31 | avro 32 | 33 | 34 | 35 | org.apache.kafka 36 | kafka-streams-test-utils 37 | test 38 | 39 | 40 | junit 41 | junit 42 | test 43 | 44 | 45 | 46 | org.slf4j 47 | slf4j-log4j12 48 | 49 | 50 | 51 | 52 | 53 | 54 | org.apache.avro 55 | avro-maven-plugin 56 | 57 | 58 | generate-sources 59 | 60 | schema 61 | 62 | 63 | 64 | 65 | ${project.basedir}/src/main/resources/avro 66 | String 67 | 68 | 69 | 70 | 71 | -------------------------------------------------------------------------------- /streams-client/readme.md: -------------------------------------------------------------------------------- 1 | # Description 2 | Module contains examples of testing Kafka Streams APIs with kafka-streams-test-utils. 3 | Testing only streams topologies in isolation. 4 | 5 | ## TODO 6 | - [X] Topology using dsl + local storage + avro schemas 7 | - [X] Topology using processor api + local storage + avro schemas 8 | - [ ] Clean tests + DRY 9 | 10 | ## Related Issues 11 | * [#877 - Passing Schema Registry URL twice to instantiate KafkaAvroSerializer](https://github.com/confluentinc/schema-registry/issues/877) 12 | 13 | ## Additional Info 14 | * More examples [kafka-streams-examples](https://github.com/confluentinc/kafka-streams-examples) 15 | -------------------------------------------------------------------------------- /streams-client/src/main/java/no/sysco/testing/kafka/streams/topology/StreamProcessing.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.streams.topology; 2 | 3 | import java.util.stream.Collectors; 4 | import java.util.stream.Stream; 5 | import org.apache.kafka.common.serialization.Serdes; 6 | import org.apache.kafka.common.utils.Bytes; 7 | import org.apache.kafka.streams.KeyValue; 8 | import org.apache.kafka.streams.StreamsBuilder; 9 | import org.apache.kafka.streams.Topology; 10 | import org.apache.kafka.streams.kstream.Consumed; 11 | import org.apache.kafka.streams.kstream.KStream; 12 | import org.apache.kafka.streams.kstream.Materialized; 13 | import org.apache.kafka.streams.kstream.Produced; 14 | import org.apache.kafka.streams.kstream.ValueMapper; 15 | import org.apache.kafka.streams.state.KeyValueStore; 16 | 17 | public class StreamProcessing { 18 | 19 | // stateless 20 | public static Topology topologyUpperCase(final String sourceTopic, final String sinkTopic) { 21 | final StreamsBuilder streamsBuilder = new StreamsBuilder(); 22 | final KStream sourceStream = 23 | streamsBuilder.stream(sourceTopic, Consumed.with(Serdes.String(), Serdes.String())); 24 | 25 | sourceStream 26 | .mapValues((ValueMapper) String::toUpperCase) 27 | .to(sinkTopic, Produced.with(Serdes.String(), Serdes.String())); 28 | return streamsBuilder.build(); 29 | } 30 | 31 | // stateful 32 | public static Topology topologyCountAnagram( 33 | final String sourceTopic, 34 | final String sinkTopic, 35 | final String storeName) { 36 | final StreamsBuilder streamsBuilder = new StreamsBuilder(); 37 | final KStream sourceStream = 38 | streamsBuilder.stream(sourceTopic, Consumed.with(Serdes.String(), Serdes.String())); 39 | // 1. [null:"magic"] => ["acgim":"magic"] 40 | // 2. amount with same key 41 | sourceStream 42 | .map( 43 | (key, value) -> { 44 | final String newKey = 45 | Stream.of(value.replaceAll(" ", "").split("")) 46 | .sorted() 47 | .collect(Collectors.joining()); 48 | return KeyValue.pair(newKey, value); 49 | }) 50 | .groupByKey() 51 | .count(Materialized.>as(storeName)) 52 | .toStream() 53 | .to(sinkTopic, Produced.with(Serdes.String(), Serdes.Long())); 54 | return streamsBuilder.build(); 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /streams-client/src/main/java/no/sysco/testing/kafka/streams/topology/StreamProcessingAvro.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.streams.topology; 2 | 3 | import no.sysco.testing.kafka.streams.avro.Person; 4 | import org.apache.kafka.common.serialization.Serde; 5 | import org.apache.kafka.common.serialization.Serdes; 6 | import org.apache.kafka.common.utils.Bytes; 7 | import org.apache.kafka.streams.StreamsBuilder; 8 | import org.apache.kafka.streams.Topology; 9 | import org.apache.kafka.streams.kstream.Consumed; 10 | import org.apache.kafka.streams.kstream.Materialized; 11 | import org.apache.kafka.streams.kstream.Produced; 12 | import org.apache.kafka.streams.state.KeyValueStore; 13 | 14 | public class StreamProcessingAvro { 15 | 16 | // stateless 17 | public static Topology topologyUpperCase( 18 | final String sourceTopic, 19 | final String sinkTopic, 20 | final Serde personSerdes) { 21 | final StreamsBuilder builder = new StreamsBuilder(); 22 | builder.stream(sourceTopic, Consumed.with(Serdes.String(), personSerdes)) 23 | // .peek((k, v) -> System.out.printf("%s %s %s\n", v.getId(), v.getName(), v.getLastname())) 24 | .mapValues( 25 | person -> 26 | Person.newBuilder() 27 | .setId(person.getId().toUpperCase()) 28 | .setName(person.getName().toUpperCase()) 29 | .setLastname(person.getLastname().toUpperCase()) 30 | .build()) 31 | .to(sinkTopic, Produced.with(Serdes.String(), personSerdes)); 32 | return builder.build(); 33 | } 34 | 35 | // stateful 36 | public static Topology topologyCountUsersWithSameName( 37 | String sourceTopic, 38 | String sinkTopic, 39 | final Serde personSerdes, 40 | final String storeName) { 41 | 42 | final StreamsBuilder builder = new StreamsBuilder(); 43 | builder.stream(sourceTopic, Consumed.with(Serdes.String(), personSerdes)) 44 | .groupBy((key, value) -> value.getName()) 45 | .count(Materialized.>as(storeName)) 46 | .toStream() 47 | .to(sinkTopic, Produced.with(Serdes.String(), Serdes.Long())); 48 | 49 | return builder.build(); 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /streams-client/src/main/java/no/sysco/testing/kafka/streams/topology/StreamProcessingLowLvlAvro.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.streams.topology; 2 | 3 | import no.sysco.testing.kafka.streams.avro.Person; 4 | import org.apache.kafka.common.serialization.Serde; 5 | import org.apache.kafka.common.serialization.Serdes; 6 | import org.apache.kafka.streams.KeyValue; 7 | import org.apache.kafka.streams.StreamsBuilder; 8 | import org.apache.kafka.streams.Topology; 9 | import org.apache.kafka.streams.kstream.Consumed; 10 | import org.apache.kafka.streams.kstream.Produced; 11 | import org.apache.kafka.streams.kstream.Transformer; 12 | import org.apache.kafka.streams.processor.ProcessorContext; 13 | import org.apache.kafka.streams.state.KeyValueStore; 14 | import org.apache.kafka.streams.state.Stores; 15 | 16 | public class StreamProcessingLowLvlAvro { 17 | 18 | // stateful 19 | public static Topology topologyDedupByUserId( 20 | final String sourceTopic, 21 | final String sinkTopic, 22 | final Serde personSerdes, 23 | final String idStore) { 24 | 25 | final StreamsBuilder builder = new StreamsBuilder(); 26 | builder 27 | .addStateStore( 28 | Stores.keyValueStoreBuilder( 29 | Stores.persistentKeyValueStore(idStore), Serdes.String(), personSerdes)) 30 | .stream(sourceTopic, Consumed.with(Serdes.String(), personSerdes)) 31 | .transform( 32 | () -> 33 | new Transformer>() { 34 | KeyValueStore stateStore; 35 | 36 | @Override 37 | public void init(ProcessorContext context) { 38 | this.stateStore = 39 | (KeyValueStore) context.getStateStore(idStore); 40 | } 41 | 42 | @Override 43 | public KeyValue transform(String key, Person value) { 44 | String id = value.getId(); 45 | if (!id.equals(key)) return null; // some weird 46 | 47 | Person person = stateStore.get(key); 48 | if (person == null) { 49 | // add to store 50 | stateStore.put(key, value); 51 | return KeyValue.pair(key, value); 52 | } else { 53 | return null; 54 | } 55 | } 56 | 57 | @Override 58 | public void close() {} 59 | }, 60 | idStore) 61 | .to(sinkTopic, Produced.with(Serdes.String(), personSerdes)); 62 | 63 | return builder.build(); 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /streams-client/src/main/resources/avro/person.avsc: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": "no.sysco.testing.kafka.streams.avro", 3 | "type": "record", 4 | "name": "Person", 5 | "fields": [ 6 | {"name": "id", "type": "string"}, 7 | {"name": "name", "type": "string"}, 8 | {"name": "lastname", "type": "string"} 9 | ] 10 | } -------------------------------------------------------------------------------- /streams-client/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Set root logger level to DEBUG and its only appender to A1. 2 | log4j.rootLogger=INFO, A1 3 | # A1 is set to be a ConsoleAppender. 4 | log4j.appender.A1=org.apache.log4j.ConsoleAppender 5 | # A1 uses PatternLayout. 6 | log4j.appender.A1.layout=org.apache.log4j.PatternLayout 7 | log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n -------------------------------------------------------------------------------- /streams-client/src/test/java/no/sysco/testing/kafka/streams/topology/StreamProcessingAvroTest.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.streams.topology; 2 | 3 | import io.confluent.kafka.schemaregistry.client.MockSchemaRegistryClient; 4 | import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException; 5 | import io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig; 6 | import io.confluent.kafka.streams.serdes.avro.SpecificAvroSerde; 7 | import java.io.IOException; 8 | import java.util.Arrays; 9 | import java.util.Collections; 10 | import java.util.Map; 11 | import java.util.Optional; 12 | import java.util.Properties; 13 | import no.sysco.testing.kafka.streams.avro.Person; 14 | import org.apache.kafka.clients.consumer.ConsumerRecord; 15 | import org.apache.kafka.clients.producer.ProducerRecord; 16 | import org.apache.kafka.common.serialization.Serdes; 17 | import org.apache.kafka.common.serialization.StringDeserializer; 18 | import org.apache.kafka.common.serialization.StringSerializer; 19 | import org.apache.kafka.streams.StreamsConfig; 20 | import org.apache.kafka.streams.Topology; 21 | import org.apache.kafka.streams.TopologyTestDriver; 22 | import org.apache.kafka.streams.state.KeyValueStore; 23 | import org.apache.kafka.streams.test.ConsumerRecordFactory; 24 | import org.junit.After; 25 | import org.junit.Before; 26 | import org.junit.Test; 27 | 28 | import static org.junit.Assert.assertEquals; 29 | 30 | public class StreamProcessingAvroTest { 31 | private final String topicIn = "topic-in"; 32 | private final String topicOut = "topic-out"; 33 | private final String schemaUrl = "http://localhost:8081"; 34 | // http://localhost:8081/subjects/topic-in-value/versions/latest 35 | // only for TopicNameStrategy 36 | private final String mockedUrl = schemaUrl + "/subjects/" + topicIn + "-value/versions/latest"; 37 | private TopologyTestDriver testDriver; 38 | private MockSchemaRegistryClient schemaRegistryClient; 39 | private Properties properties; 40 | 41 | @Before 42 | public void start() { 43 | properties = new Properties(); 44 | properties.put(StreamsConfig.CLIENT_ID_CONFIG, "client-id-test-1"); 45 | properties.put(StreamsConfig.APPLICATION_ID_CONFIG, "stream-id-test-5"); 46 | properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:9922"); 47 | properties.put( 48 | AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, "http://localhost:8081"); 49 | properties.put( 50 | StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); 51 | properties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, SpecificAvroSerde.class); 52 | 53 | schemaRegistryClient = new MockSchemaRegistryClient(); 54 | } 55 | 56 | @After 57 | public void tearDown() { 58 | Optional.ofNullable(testDriver).ifPresent(TopologyTestDriver::close); 59 | testDriver = null; 60 | properties = null; 61 | } 62 | 63 | @Test 64 | public void testTopologyAvro_statelessProcessors() throws IOException, RestClientException { 65 | 66 | /** Arrange */ 67 | // register schema in mock schema-registry -> not necessary 68 | // schemaRegistryClient.register( 69 | // new TopicNameStrategy().subjectName(topicIn, false, Person.SCHEMA$), Person.SCHEMA$); 70 | // create serde with config to be able to connect to mock schema registry 71 | // https://github.com/confluentinc/schema-registry/issues/877 72 | // Passing Schema Registry URL twice to instantiate KafkaAvroSerializer or Serde 73 | final SpecificAvroSerde serde = new SpecificAvroSerde<>(schemaRegistryClient); 74 | 75 | final Map schema = 76 | Collections.singletonMap( 77 | AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, 78 | "wat-ever-url-anyway-it-is-mocked"); 79 | serde.configure(schema, false); 80 | // get topology 81 | final Topology topology = 82 | StreamProcessingAvro.topologyUpperCase(topicIn, topicOut, serde); 83 | testDriver = new TopologyTestDriver(topology, properties); 84 | 85 | final ConsumerRecordFactory factory = 86 | new ConsumerRecordFactory<>(topicIn, new StringSerializer(), serde.serializer()); 87 | 88 | final ConsumerRecord inRecord1 = 89 | factory.create( 90 | topicIn, 91 | "1", 92 | Person.newBuilder().setId("id-1").setName("nikita").setLastname("zhevnitskiy").build()); 93 | 94 | final ConsumerRecord inRecord2 = 95 | factory.create( 96 | topicIn, 97 | "2", 98 | Person.newBuilder().setId("id-2").setName("vitaly").setLastname("moscow").build()); 99 | 100 | /** Act */ 101 | testDriver.pipeInput(Arrays.asList(inRecord1, inRecord2)); 102 | final ProducerRecord outRecord1 = 103 | testDriver.readOutput(topicOut, new StringDeserializer(), serde.deserializer()); 104 | final ProducerRecord outRecord2 = 105 | testDriver.readOutput(topicOut, new StringDeserializer(), serde.deserializer()); 106 | 107 | /** Assert */ 108 | assertEquals("ID-1", outRecord1.value().getId()); 109 | assertEquals("ID-2", outRecord2.value().getId()); 110 | assertEquals("moscow".toUpperCase(), outRecord2.value().getLastname()); 111 | } 112 | 113 | @Test 114 | public void testTopologyAvro_statefulProcessors() throws IOException, RestClientException { 115 | /** Arrange */ 116 | final String storeName = "same-name"; 117 | // register schema in mock schema-registry -> Not - necessary 118 | // schemaRegistryClient.register( 119 | // new TopicNameStrategy().subjectName(topicIn, false, Person.SCHEMA$), Person.SCHEMA$); 120 | // create serde with config to be able to connect to mock schema registry 121 | final SpecificAvroSerde serde = new SpecificAvroSerde<>(schemaRegistryClient); 122 | 123 | final Map schema = 124 | Collections.singletonMap( 125 | AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, 126 | "wat-ever-url-anyway-it-is-mocked"); 127 | serde.configure(schema, false); 128 | // get topology 129 | final Topology topology = 130 | StreamProcessingAvro.topologyCountUsersWithSameName(topicIn, topicOut, serde, storeName); 131 | testDriver = new TopologyTestDriver(topology, properties); 132 | 133 | final ConsumerRecordFactory factory = 134 | new ConsumerRecordFactory<>(topicIn, new StringSerializer(), serde.serializer()); 135 | 136 | final ConsumerRecord inRecord1 = 137 | factory.create( 138 | topicIn, 139 | "1", 140 | Person.newBuilder().setId("id-1").setName("nikita").setLastname("zhevnitskiy").build()); 141 | 142 | final ConsumerRecord inRecord2 = 143 | factory.create( 144 | topicIn, 145 | "2", 146 | Person.newBuilder().setId("id-2").setName("nikita").setLastname("moscow").build()); 147 | 148 | /** Act */ 149 | testDriver.pipeInput(Arrays.asList(inRecord1, inRecord2)); 150 | final KeyValueStore keyValueStore = testDriver.getKeyValueStore(storeName); 151 | final Long amountOfRecordWithSameName = keyValueStore.get("nikita"); 152 | 153 | /** Assert */ 154 | assertEquals(Long.valueOf(2), amountOfRecordWithSameName); 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /streams-client/src/test/java/no/sysco/testing/kafka/streams/topology/StreamProcessingLowLvlAvroTest.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.streams.topology; 2 | 3 | import io.confluent.kafka.schemaregistry.client.MockSchemaRegistryClient; 4 | import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException; 5 | import io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig; 6 | import io.confluent.kafka.streams.serdes.avro.SpecificAvroSerde; 7 | import java.io.IOException; 8 | import java.util.Arrays; 9 | import java.util.Collections; 10 | import java.util.Map; 11 | import java.util.Optional; 12 | import java.util.Properties; 13 | import no.sysco.testing.kafka.streams.avro.Person; 14 | import org.apache.kafka.clients.consumer.ConsumerRecord; 15 | import org.apache.kafka.clients.producer.ProducerRecord; 16 | import org.apache.kafka.common.serialization.Serdes; 17 | import org.apache.kafka.common.serialization.StringDeserializer; 18 | import org.apache.kafka.common.serialization.StringSerializer; 19 | import org.apache.kafka.streams.StreamsConfig; 20 | import org.apache.kafka.streams.Topology; 21 | import org.apache.kafka.streams.TopologyTestDriver; 22 | import org.apache.kafka.streams.state.KeyValueStore; 23 | import org.apache.kafka.streams.test.ConsumerRecordFactory; 24 | import org.junit.After; 25 | import org.junit.Before; 26 | import org.junit.Test; 27 | 28 | import static org.junit.Assert.assertEquals; 29 | import static org.junit.Assert.assertNull; 30 | 31 | public class StreamProcessingLowLvlAvroTest { 32 | private final String topicIn = "topic-in"; 33 | private final String topicOut = "topic-out"; 34 | private TopologyTestDriver testDriver; 35 | private MockSchemaRegistryClient schemaRegistryClient; 36 | private Properties properties; 37 | 38 | @Before 39 | public void start() { 40 | properties = new Properties(); 41 | properties.put(StreamsConfig.CLIENT_ID_CONFIG, "client-id-test-1"); 42 | properties.put(StreamsConfig.APPLICATION_ID_CONFIG, "stream-id-test-5"); 43 | properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:9922"); 44 | properties.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, "http://whatever:4242"); 45 | properties.put( 46 | StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); 47 | properties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, SpecificAvroSerde.class); 48 | 49 | schemaRegistryClient = new MockSchemaRegistryClient(); 50 | } 51 | 52 | @After 53 | public void tearDown() { 54 | Optional.ofNullable(testDriver).ifPresent(TopologyTestDriver::close); 55 | testDriver = null; 56 | properties = null; 57 | } 58 | 59 | @Test 60 | public void testTopologyLowLvlAvro_statefulProcessors() throws IOException, RestClientException { 61 | /** Arrange */ 62 | final String storeName = "same-name"; 63 | 64 | // create serde with config to be able to connect to mock schema registry 65 | final SpecificAvroSerde serde = new SpecificAvroSerde<>(schemaRegistryClient); 66 | 67 | final Map schema = 68 | Collections.singletonMap( 69 | AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, 70 | "wat-ever-url-anyway-it-is-mocked"); 71 | serde.configure(schema, false); 72 | // get topology 73 | final Topology topology = 74 | StreamProcessingLowLvlAvro.topologyDedupByUserId(topicIn, topicOut, serde, storeName); 75 | testDriver = new TopologyTestDriver(topology, properties); 76 | 77 | final ConsumerRecordFactory factory = 78 | new ConsumerRecordFactory<>(topicIn, new StringSerializer(), serde.serializer()); 79 | 80 | final ConsumerRecord inRecord1 = 81 | factory.create( 82 | topicIn, 83 | "id-1", 84 | Person.newBuilder().setId("id-1").setName("nikita").setLastname("zhevnitskiy").build()); 85 | 86 | /** Act */ 87 | testDriver.pipeInput(Collections.singletonList(inRecord1)); 88 | final KeyValueStore dedupStore = testDriver.getKeyValueStore(storeName); 89 | final Person person = dedupStore.get("id-1"); 90 | final long l = dedupStore.approximateNumEntries(); 91 | System.out.println("Here : " + l); 92 | 93 | /** Assert */ 94 | assertEquals("id-1", person.getId()); 95 | } 96 | 97 | @Test 98 | public void testTopologyLowLvlAvro_statefulProcessors_invalidInput() 99 | throws IOException, RestClientException { 100 | /** Arrange */ 101 | final String storeName = "same-name"; 102 | 103 | // create serde with config to be able to connect to mock schema registry 104 | final SpecificAvroSerde serde = new SpecificAvroSerde<>(schemaRegistryClient); 105 | 106 | final Map schema = 107 | Collections.singletonMap( 108 | AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, 109 | "wat-ever-url-anyway-it-is-mocked"); 110 | serde.configure(schema, false); 111 | // get topology 112 | final Topology topology = 113 | StreamProcessingLowLvlAvro.topologyDedupByUserId(topicIn, topicOut, serde, storeName); 114 | testDriver = new TopologyTestDriver(topology, properties); 115 | 116 | final ConsumerRecordFactory factory = 117 | new ConsumerRecordFactory<>(topicIn, new StringSerializer(), serde.serializer()); 118 | 119 | final ConsumerRecord inRecord1 = 120 | factory.create( 121 | topicIn, 122 | "invalid-id", 123 | Person.newBuilder().setId("id-1").setName("yohoho").setLastname("pirate").build()); 124 | 125 | final ConsumerRecord inRecord2 = 126 | factory.create( 127 | topicIn, 128 | "id-1", 129 | Person.newBuilder().setId("id-1").setName("nikita").setLastname("zhevnitskiy").build()); 130 | 131 | final ConsumerRecord inRecord3 = 132 | factory.create( 133 | topicIn, 134 | "id-1", 135 | Person.newBuilder().setId("id-1").setName("42").setLastname("42").build()); 136 | 137 | /** Act */ 138 | testDriver.pipeInput(Arrays.asList(inRecord1, inRecord2, inRecord3)); 139 | final KeyValueStore dedupStore = testDriver.getKeyValueStore(storeName); 140 | final Person person = dedupStore.get("id-1"); 141 | 142 | final ProducerRecord outRecord1 = 143 | testDriver.readOutput(topicOut, new StringDeserializer(), serde.deserializer()); 144 | final ProducerRecord outRecord2 = 145 | testDriver.readOutput(topicOut, new StringDeserializer(), serde.deserializer()); 146 | final ProducerRecord outRecord3 = 147 | testDriver.readOutput(topicOut, new StringDeserializer(), serde.deserializer()); 148 | 149 | /** Assert */ 150 | assertEquals("nikita", outRecord1.value().getName()); 151 | assertEquals("id-1", outRecord1.key()); 152 | assertEquals("id-1", person.getId()); 153 | assertNull(outRecord2); 154 | assertNull(outRecord3); 155 | 156 | assertEquals("zhevnitskiy", dedupStore.get("id-1").getLastname()); 157 | assertNull(dedupStore.get("invalid-id")); 158 | 159 | assertEquals(1, dedupStore.approximateNumEntries()); 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /streams-client/src/test/java/no/sysco/testing/kafka/streams/topology/StreamProcessingTest.java: -------------------------------------------------------------------------------- 1 | package no.sysco.testing.kafka.streams.topology; 2 | 3 | import java.util.Arrays; 4 | import java.util.Optional; 5 | import java.util.Properties; 6 | import org.apache.kafka.clients.consumer.ConsumerRecord; 7 | import org.apache.kafka.clients.producer.ProducerRecord; 8 | import org.apache.kafka.common.serialization.LongDeserializer; 9 | import org.apache.kafka.common.serialization.Serdes; 10 | import org.apache.kafka.common.serialization.StringDeserializer; 11 | import org.apache.kafka.common.serialization.StringSerializer; 12 | import org.apache.kafka.streams.StreamsConfig; 13 | import org.apache.kafka.streams.Topology; 14 | import org.apache.kafka.streams.TopologyTestDriver; 15 | import org.apache.kafka.streams.state.KeyValueStore; 16 | import org.apache.kafka.streams.test.ConsumerRecordFactory; 17 | import org.junit.After; 18 | import org.junit.Before; 19 | import org.junit.Test; 20 | 21 | import static org.junit.Assert.assertEquals; 22 | import static org.junit.Assert.assertNull; 23 | 24 | public class StreamProcessingTest { 25 | private final String topicIn = "topic-in"; 26 | private final String topicOut = "topic-out"; 27 | private TopologyTestDriver testDriver; 28 | private Properties properties; 29 | 30 | @Before 31 | public void start() { 32 | properties = new Properties(); 33 | properties.put(StreamsConfig.APPLICATION_ID_CONFIG, "test"); 34 | properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234"); 35 | properties.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); 36 | properties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); 37 | } 38 | 39 | @After 40 | public void tearDown() { 41 | Optional.ofNullable(testDriver).ifPresent(TopologyTestDriver::close); 42 | testDriver = null; 43 | properties = null; 44 | } 45 | 46 | @Test 47 | public void testTopology_statelessProcessors_Uppercase() { 48 | // Arrange 49 | final Topology topology = StreamProcessing.topologyUpperCase(topicIn, topicOut); 50 | testDriver = new TopologyTestDriver(topology, properties); 51 | final ConsumerRecordFactory factory = 52 | new ConsumerRecordFactory<>(topicIn, new StringSerializer(), new StringSerializer()); 53 | final ConsumerRecord record1 = factory.create(topicIn, null, "magic"); 54 | final ConsumerRecord record2 = factory.create(topicIn, null, "cigma"); 55 | 56 | // Act 57 | testDriver.pipeInput(Arrays.asList(record1, record2)); 58 | final ProducerRecord outRecord1 = 59 | testDriver.readOutput(topicOut, new StringDeserializer(), new StringDeserializer()); 60 | final ProducerRecord outRecord2 = 61 | testDriver.readOutput(topicOut, new StringDeserializer(), new StringDeserializer()); 62 | final ProducerRecord outRecord3 = 63 | testDriver.readOutput(topicOut, new StringDeserializer(), new StringDeserializer()); 64 | 65 | // Assert 66 | assertNull(outRecord3); 67 | assertEquals("magic".toUpperCase(), outRecord1.value()); 68 | assertEquals("cigma".toUpperCase(), outRecord2.value()); 69 | } 70 | 71 | @Test 72 | public void testTopology_statefullProcessors_Anagram() { 73 | 74 | // Arrange 75 | final String storeName = "count-storage"; 76 | final Topology topology = 77 | StreamProcessing.topologyCountAnagram(topicIn, topicOut, storeName); 78 | // setup TopologyTestDriver 79 | testDriver = new TopologyTestDriver(topology, properties); 80 | final ConsumerRecordFactory factory = 81 | new ConsumerRecordFactory<>(topicIn, new StringSerializer(), new StringSerializer()); 82 | final ConsumerRecord record1 = factory.create(topicIn, null, "magic"); 83 | final ConsumerRecord record2 = factory.create(topicIn, null, "cigma"); 84 | 85 | // Act 86 | testDriver.pipeInput(Arrays.asList(record1, record2)); 87 | final ProducerRecord outRecord1 = 88 | testDriver.readOutput(topicOut, new StringDeserializer(), new LongDeserializer()); 89 | final ProducerRecord outRecord2 = 90 | testDriver.readOutput(topicOut, new StringDeserializer(), new LongDeserializer()); 91 | final ProducerRecord outRecord3 = 92 | testDriver.readOutput(topicOut, new StringDeserializer(), new LongDeserializer()); 93 | 94 | // accessing storage 95 | final KeyValueStore keyValueStore = testDriver.getKeyValueStore(storeName); 96 | final Long amountOfRecordsInStorageByKey = keyValueStore.get("acgim"); 97 | 98 | // Assert 99 | assertNull(outRecord3); 100 | assertEquals("acgim", outRecord1.key()); 101 | assertEquals("acgim", outRecord2.key()); 102 | assertEquals(Long.valueOf(1), outRecord1.value()); 103 | assertEquals(Long.valueOf(2), outRecord2.value()); 104 | assertEquals(Long.valueOf(2), amountOfRecordsInStorageByKey); 105 | } 106 | } 107 | --------------------------------------------------------------------------------