├── kafka ├── build-docker.sh ├── build.sh ├── run-kafka.sh ├── Dockerfile ├── build-docker-kafka-multi-arch.sh └── config │ └── server.properties ├── deploy-artifacts.sh ├── .gitignore ├── docker-compose-registry.yml ├── eventuate-messaging-kafka-leadership ├── src │ ├── test │ │ ├── resources │ │ │ ├── application.properties │ │ │ └── logback.xml │ │ └── java │ │ │ └── io │ │ │ └── eventuate │ │ │ └── coordination │ │ │ └── leadership │ │ │ └── zookeeper │ │ │ └── KafkaLeadershipTest.java │ └── main │ │ └── java │ │ └── io │ │ └── eventuate │ │ └── coordination │ │ └── leadership │ │ └── zookeeper │ │ ├── KafkaLeadershipController.java │ │ └── KafkaLeaderSelector.java └── build.gradle ├── gradle ├── wrapper │ ├── gradle-wrapper.jar │ └── gradle-wrapper.properties └── gradlew.bat ├── set-multi-arch-image-env-vars.sh ├── eventuate-messaging-kafka-producer ├── build.gradle └── src │ └── main │ └── java │ └── io │ └── eventuate │ └── messaging │ └── kafka │ └── producer │ ├── EventuateKafkaProducerConfigurationProperties.java │ ├── EventuateKafkaPartitioner.java │ └── EventuateKafkaProducer.java ├── eventuate-messaging-kafka-basic-consumer ├── src │ ├── main │ │ └── java │ │ │ └── io │ │ │ └── eventuate │ │ │ └── messaging │ │ │ └── kafka │ │ │ └── basic │ │ │ └── consumer │ │ │ ├── MessageConsumerBacklog.java │ │ │ ├── EventuateKafkaConsumerState.java │ │ │ ├── ConsumerCallbacks.java │ │ │ ├── KafkaMessageProcessorFailedException.java │ │ │ ├── KafkaConsumerFactory.java │ │ │ ├── BackPressureManagerState.java │ │ │ ├── DefaultKafkaConsumerFactory.java │ │ │ ├── EventuateKafkaConsumerMessageHandler.java │ │ │ ├── BackPressureManagerStateAndActions.java │ │ │ ├── BackPressureConfig.java │ │ │ ├── ConsumerPropertiesFactory.java │ │ │ ├── BackPressureManagerNormalState.java │ │ │ ├── BackPressureManager.java │ │ │ ├── BackPressureActions.java │ │ │ ├── KafkaMessageConsumer.java │ │ │ ├── EventuateKafkaConsumerConfigurationProperties.java │ │ │ ├── BackPressureManagerPausedState.java │ │ │ ├── TopicPartitionOffsets.java │ │ │ ├── OffsetTracker.java │ │ │ ├── DefaultKafkaMessageConsumer.java │ │ │ ├── KafkaMessageProcessor.java │ │ │ └── EventuateKafkaConsumer.java │ └── test │ │ └── java │ │ └── io │ │ └── eventuate │ │ └── messaging │ │ └── kafka │ │ └── basic │ │ └── consumer │ │ ├── TopicPartitionOffsetsTest.java │ │ ├── OffsetTrackerTest.java │ │ └── BackPressureManagerTest.java └── build.gradle ├── README.md ├── eventuate-messaging-kafka-spring-integration-test ├── src │ └── test │ │ ├── resources │ │ └── application.properties │ │ └── java │ │ └── io │ │ └── eventuate │ │ └── messaging │ │ └── kafka │ │ └── spring │ │ └── basic │ │ └── consumer │ │ ├── EventuateKafkaBasicConsumerSpringWithSwimlanePerTopicTest.java │ │ └── EventuateKafkaBasicConsumerSpringTest.java └── build.gradle ├── eventuate-messaging-kafka-consumer ├── src │ ├── main │ │ └── java │ │ │ └── io │ │ │ └── eventuate │ │ │ └── messaging │ │ │ └── kafka │ │ │ └── consumer │ │ │ ├── KafkaMessageHandler.java │ │ │ ├── TopicPartitionToSwimlaneMapping.java │ │ │ ├── ReactiveKafkaMessageHandler.java │ │ │ ├── KafkaMessage.java │ │ │ ├── KafkaSubscription.java │ │ │ ├── OriginalTopicPartitionToSwimlaneMapping.java │ │ │ ├── RawKafkaMessage.java │ │ │ ├── SwimlaneDispatcherBacklog.java │ │ │ ├── SwimlanePerTopicPartition.java │ │ │ ├── MultipleSwimlanesPerTopicPartitionMapping.java │ │ │ ├── SwimlaneBasedDispatcher.java │ │ │ ├── SwimlaneDispatcher.java │ │ │ └── MessageConsumerKafkaImpl.java │ └── test │ │ └── java │ │ └── io │ │ └── eventuate │ │ └── messaging │ │ └── kafka │ │ └── consumer │ │ ├── SwimlanePerTopicPartitionTest.java │ │ ├── MultipleSwimlanesPerTopicPartitionMappingTest.java │ │ └── SwimlaneDispatcherTest.java └── build.gradle ├── eventuate-messaging-kafka-common ├── src │ ├── main │ │ ├── java │ │ │ └── io │ │ │ │ └── eventuate │ │ │ │ └── messaging │ │ │ │ └── kafka │ │ │ │ └── common │ │ │ │ ├── TopicCleaner.java │ │ │ │ ├── AggregateTopicMapping.java │ │ │ │ ├── EventuateKafkaMultiMessageHeader.java │ │ │ │ ├── EventuateKafkaMultiMessagesHeader.java │ │ │ │ ├── EventuateBinaryMessageEncoding.java │ │ │ │ ├── EventuateKafkaConfigurationProperties.java │ │ │ │ ├── EventuateKafkaMultiMessage.java │ │ │ │ ├── EventuateKafkaMultiMessages.java │ │ │ │ └── KeyValue.java │ │ └── resources │ │ │ └── sbe-schema.xml │ └── test │ │ └── java │ │ └── io │ │ └── eventuate │ │ └── messaging │ │ └── kafka │ │ └── common │ │ ├── sbe │ │ └── SbeMessageSerdeTest.java │ │ └── EventuateKafkaMultiMessageConverterTest.java └── build.gradle ├── eventuate-messaging-kafka-spring-common ├── build.gradle └── src │ └── main │ └── java │ └── io │ └── eventuate │ └── messaging │ └── kafka │ └── spring │ └── common │ └── EventuateKafkaPropertiesConfiguration.java ├── eventuate-messaging-kafka-spring-producer ├── src │ ├── test │ │ ├── resources │ │ │ └── application.properties │ │ └── java │ │ │ └── io │ │ │ └── eventuate │ │ │ └── messaging │ │ │ └── kafka │ │ │ └── spring │ │ │ └── producer │ │ │ └── EventuateKafkaProducerConfigurationSpringTest.java │ └── main │ │ └── java │ │ └── io │ │ └── eventuate │ │ └── messaging │ │ └── kafka │ │ └── spring │ │ └── producer │ │ ├── EventuateKafkaProducerSpringConfigurationProperties.java │ │ └── EventuateKafkaProducerSpringConfigurationPropertiesConfiguration.java └── build.gradle ├── eventuate-messaging-kafka-micronaut-producer ├── src │ ├── test │ │ ├── resources │ │ │ └── application.properties │ │ └── java │ │ │ └── io │ │ │ └── eventuate │ │ │ └── messaging │ │ │ └── kafka │ │ │ └── micronaut │ │ │ └── producer │ │ │ └── EventuateKafkaProducerConfigurationTest.java │ └── main │ │ └── java │ │ └── io │ │ └── eventuate │ │ └── messaging │ │ └── kafka │ │ └── micronaut │ │ └── producer │ │ ├── EventuateKafkaProducerMicronautConfigurationProperties.java │ │ └── EventuateKafkaProducerConfigurationPropertiesFactory.java └── build.gradle ├── eventuate-messaging-kafka-testcontainers ├── src │ ├── test │ │ ├── resources │ │ │ └── logback.xml │ │ └── java │ │ │ └── io │ │ │ └── eventuate │ │ │ └── messaging │ │ │ └── kafka │ │ │ └── testcontainers │ │ │ ├── EventuateKafkaClusterTest.java │ │ │ ├── EventuateKafkaNativeClusterTest.java │ │ │ └── EventuateKafkaContainerTest.java │ └── main │ │ └── java │ │ └── io │ │ └── eventuate │ │ └── messaging │ │ └── kafka │ │ └── testcontainers │ │ ├── EventuateKafkaNativeCluster.java │ │ ├── EventuateKafkaCluster.java │ │ ├── EventuateKafkaNativeContainer.java │ │ └── EventuateKafkaContainer.java └── build.gradle ├── .circleci ├── save-containers-and-tests.sh └── config.yml ├── eventuate-messaging-kafka-spring-consumer ├── build.gradle └── src │ └── main │ └── java │ └── io │ └── eventuate │ └── messaging │ └── kafka │ └── spring │ └── consumer │ ├── KafkaConsumerFactoryConfiguration.java │ └── MessageConsumerKafkaConfiguration.java ├── eventuate-messaging-kafka-micronaut-basic-consumer ├── src │ ├── test │ │ ├── resources │ │ │ └── application.properties │ │ └── java │ │ │ └── io │ │ │ └── eventuate │ │ │ └── messaging │ │ │ └── kafka │ │ │ └── micronaut │ │ │ └── basic │ │ │ └── consumer │ │ │ └── EventuateKafkaConsumerConfigurationMicronautTest.java │ └── main │ │ └── java │ │ └── io │ │ └── eventuate │ │ └── messaging │ │ └── kafka │ │ └── micronaut │ │ └── basic │ │ └── consumer │ │ ├── EventuateKafkaConsumerConfigurationPropertiesFactory.java │ │ └── EventuateKafkaConsumerMicronautConfigurationProperties.java └── build.gradle ├── eventuate-messaging-kafka-micronaut-integration-test ├── src │ └── test │ │ ├── resources │ │ └── application.properties │ │ └── java │ │ └── io │ │ └── eventuate │ │ └── messaging │ │ └── kafka │ │ └── basic │ │ └── consumer │ │ ├── EventuateKafkaProducerConsumerFactory.java │ │ └── EventuateKafkaBasicConsumerMicronautTest.java └── build.gradle ├── eventuate-messaging-kafka-spring-basic-consumer ├── src │ ├── test │ │ ├── resources │ │ │ └── application.properties │ │ └── java │ │ │ └── io │ │ │ └── eventuate │ │ │ └── messaging │ │ │ └── kafka │ │ │ └── spring │ │ │ └── basic │ │ │ └── consumer │ │ │ └── EventuateKafkaConsumerConfigurationSpringTest.java │ └── main │ │ └── java │ │ └── io │ │ └── eventuate │ │ └── messaging │ │ └── kafka │ │ └── spring │ │ └── basic │ │ └── consumer │ │ ├── EventuateKafkaConsumerSpringConfigurationProperties.java │ │ └── EventuateKafkaConsumerSpringConfigurationPropertiesConfiguration.java └── build.gradle ├── .github └── workflows │ ├── print-container-logs.sh │ └── build-and-test.yaml ├── LICENSE ├── eventuate-messaging-kafka-micronaut-consumer ├── src │ └── main │ │ └── java │ │ └── io │ │ └── eventuate │ │ └── messaging │ │ └── kafka │ │ └── micronaut │ │ └── consumer │ │ ├── KafkaConsumerFactoryFactory.java │ │ └── MessageConsumerKafkaFactory.java └── build.gradle ├── gradle.properties ├── eventuate-messaging-kafka-micronaut-common ├── build.gradle └── src │ └── main │ └── java │ └── io │ └── eventuate │ └── messaging │ └── kafka │ └── micronaut │ └── common │ └── EventuateKafkaPropertiesFactory.java ├── eventuate-messaging-kafka-integration-test ├── src │ └── main │ │ ├── resources │ │ └── application.properties │ │ └── java │ │ └── io │ │ └── eventuate │ │ └── messaging │ │ └── kafka │ │ └── basic │ │ └── consumer │ │ └── AbstractEventuateKafkaBasicConsumerTest.java └── build.gradle ├── docker-compose.yml ├── deploy-multi-arch.sh ├── settings.gradle ├── gradlew.bat └── gradlew /kafka/build-docker.sh: -------------------------------------------------------------------------------- 1 | docker build -t test-eventuate-kafka . -------------------------------------------------------------------------------- /deploy-artifacts.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash -e 2 | 3 | ./gradlew publishEventuateArtifacts 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .gradle 2 | build/ 3 | *.idea/ 4 | *.iml 5 | *.log 6 | out 7 | .classpath 8 | .project 9 | .settings 10 | bin 11 | -------------------------------------------------------------------------------- /docker-compose-registry.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | registry: 4 | image: registry:2 5 | ports: 6 | - "5002:5000" 7 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-leadership/src/test/resources/application.properties: -------------------------------------------------------------------------------- 1 | eventuatelocal.zookeeper.connection.string=localhost:2181 2 | -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eventuate-foundation/eventuate-messaging-kafka/HEAD/gradle/wrapper/gradle-wrapper.jar -------------------------------------------------------------------------------- /set-multi-arch-image-env-vars.sh: -------------------------------------------------------------------------------- 1 | export MULTI_ARCH_TAG=test-build-${CIRCLE_SHA1?} 2 | export BUILDX_PUSH_OPTIONS=--push 3 | 4 | export KAFKA_MULTI_ARCH_IMAGE=eventuateio/eventuate-kafka:$MULTI_ARCH_TAG 5 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-producer/build.gradle: -------------------------------------------------------------------------------- 1 | 2 | dependencies { 3 | implementation project (":eventuate-messaging-kafka-common") 4 | } 5 | tasks.withType(Test).configureEach { 6 | useJUnitPlatform() 7 | } 8 | -------------------------------------------------------------------------------- /kafka/build.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash -e 2 | 3 | #docker build -t test-eventuateio-local-kafka . 4 | 5 | docker buildx build --platform linux/amd64,linux/arm64 -t 182030608133.dkr.ecr.us-west-1.amazonaws.com/test-repo/eventuate-kafka --push . 6 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/MessageConsumerBacklog.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | public interface MessageConsumerBacklog { 4 | int size(); 5 | } 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # An Eventuate project 2 | 3 | 4 | 5 | This project is part of [Eventuate](http://eventuate.io), which is a microservices collaboration platform. 6 | 7 | 8 | 9 | # eventuate-messaging-kafka 10 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-integration-test/src/test/resources/application.properties: -------------------------------------------------------------------------------- 1 | eventuatelocal.kafka.bootstrap.servers=${DOCKER_HOST_IP:localhost}:9092 2 | logging.level.io.eventuate.messaging.kafka.consumer=TRACE 3 | logging.level.io.eventuate.messaging.kafka.basic.consumer=TRACE -------------------------------------------------------------------------------- /eventuate-messaging-kafka-consumer/src/main/java/io/eventuate/messaging/kafka/consumer/KafkaMessageHandler.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.consumer; 2 | 3 | import java.util.function.Consumer; 4 | 5 | public interface KafkaMessageHandler extends Consumer { 6 | 7 | } 8 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-common/src/main/java/io/eventuate/messaging/kafka/common/TopicCleaner.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.common; 2 | 3 | public class TopicCleaner { 4 | 5 | public static String clean(String topic) { 6 | return topic.replace("$", "_DLR_"); 7 | } 8 | 9 | } 10 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-common/build.gradle: -------------------------------------------------------------------------------- 1 | 2 | dependencies { 3 | implementation project (":eventuate-messaging-kafka-common") 4 | 5 | api "org.springframework.boot:spring-boot-starter:$springBootVersion" 6 | } 7 | tasks.withType(Test).configureEach { 8 | useJUnitPlatform() 9 | } 10 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/EventuateKafkaConsumerState.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | public enum EventuateKafkaConsumerState { 4 | MESSAGE_HANDLING_FAILED, STARTED, FAILED_TO_START, STOPPED, FAILED, CREATED 5 | } 6 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-producer/src/test/resources/application.properties: -------------------------------------------------------------------------------- 1 | eventuate.local.kafka.producer.properties.buffer.memory=1000000 2 | eventuate.local.kafka.producer.properties.value.serializer=org.apache.kafka.common.serialization.ByteArraySerializer 3 | not.eventuate.local.kafka.producer.properties.property=N/A -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/ConsumerCallbacks.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | public interface ConsumerCallbacks { 4 | void onTryCommitCallback(); 5 | void onCommitedCallback(); 6 | void onCommitFailedCallback(); 7 | } 8 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-producer/src/test/resources/application.properties: -------------------------------------------------------------------------------- 1 | eventuate.local.kafka.producer.properties.buffer-memory=1000000 2 | eventuate.local.kafka.producer.properties.value-serializer=org.apache.kafka.common.serialization.ByteArraySerializer 3 | not.eventuate.local.kafka.producer.properties.property=N/A 4 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-common/src/main/java/io/eventuate/messaging/kafka/common/AggregateTopicMapping.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.common; 2 | 3 | public class AggregateTopicMapping { 4 | 5 | public static String aggregateTypeToTopic(String aggregateType) { 6 | return TopicCleaner.clean(aggregateType); 7 | } 8 | 9 | } 10 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-common/src/main/java/io/eventuate/messaging/kafka/common/EventuateKafkaMultiMessageHeader.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.common; 2 | 3 | public class EventuateKafkaMultiMessageHeader extends KeyValue { 4 | 5 | public EventuateKafkaMultiMessageHeader(String key, String value) { 6 | super(key, value); 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-common/src/main/java/io/eventuate/messaging/kafka/common/EventuateKafkaMultiMessagesHeader.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.common; 2 | 3 | public class EventuateKafkaMultiMessagesHeader extends KeyValue { 4 | 5 | public EventuateKafkaMultiMessagesHeader(String key, String value) { 6 | super(key, value); 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /kafka/run-kafka.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash -e 2 | 3 | set -e 4 | 5 | EFFECTIVE_KAFKA_CONFIG_DIR=./config 6 | 7 | export KAFKA_LOG_DIRS=${KAFKA_LOG_DIRS:-/tmp/kafka-logs} 8 | 9 | envsubst < /usr/local/kafka-config/server.properties > $EFFECTIVE_KAFKA_CONFIG_DIR/server.properties 10 | 11 | exec bin/kafka-server-start.sh $EFFECTIVE_KAFKA_CONFIG_DIR/server.properties 12 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-consumer/src/main/java/io/eventuate/messaging/kafka/consumer/TopicPartitionToSwimlaneMapping.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.consumer; 2 | 3 | import org.apache.kafka.common.TopicPartition; 4 | 5 | public interface TopicPartitionToSwimlaneMapping { 6 | Integer toSwimlane(TopicPartition topicPartition, String messageKey); 7 | } 8 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/KafkaMessageProcessorFailedException.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | public class KafkaMessageProcessorFailedException extends RuntimeException { 4 | public KafkaMessageProcessorFailedException(Throwable t) { 5 | super(t); 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/KafkaConsumerFactory.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import java.util.Properties; 4 | 5 | public interface KafkaConsumerFactory { 6 | 7 | KafkaMessageConsumer makeConsumer(String subscriptionId, Properties consumerProperties); 8 | 9 | } 10 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-consumer/src/main/java/io/eventuate/messaging/kafka/consumer/ReactiveKafkaMessageHandler.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.consumer; 2 | 3 | import java.util.concurrent.CompletableFuture; 4 | import java.util.function.Function; 5 | 6 | public interface ReactiveKafkaMessageHandler extends Function> { 7 | 8 | } 9 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-consumer/src/main/java/io/eventuate/messaging/kafka/consumer/KafkaMessage.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.consumer; 2 | 3 | public class KafkaMessage { 4 | private String payload; 5 | 6 | public KafkaMessage(String payload) { 7 | this.payload = payload; 8 | } 9 | 10 | public String getPayload() { 11 | return payload; 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionBase=GRADLE_USER_HOME 2 | distributionPath=wrapper/dists 3 | distributionSha256Sum=bd71102213493060956ec229d946beee57158dbd89d0e62b91bca0fa2c5f3531 4 | distributionUrl=https\://services.gradle.org/distributions/gradle-8.14.3-bin.zip 5 | networkTimeout=10000 6 | validateDistributionUrl=true 7 | zipStoreBase=GRADLE_USER_HOME 8 | zipStorePath=wrapper/dists 9 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-leadership/src/test/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-testcontainers/src/test/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-consumer/src/main/java/io/eventuate/messaging/kafka/consumer/KafkaSubscription.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.consumer; 2 | 3 | public class KafkaSubscription { 4 | private Runnable closingCallback; 5 | 6 | public KafkaSubscription(Runnable closingCallback) { 7 | this.closingCallback = closingCallback; 8 | } 9 | 10 | public void close() { 11 | closingCallback.run(); 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /.circleci/save-containers-and-tests.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash -e 2 | 3 | mkdir -p ~/junit /home/circleci/container-logs 4 | docker ps -a > /home/circleci/container-logs/containers.txt 5 | find . -type f -regex ".*/build/test-results/.*xml" -exec cp {} ~/junit/ \; 6 | sudo bash -c 'find /var/lib/docker/containers -name "*-json.log" -exec cp {} /home/circleci/container-logs \;' 7 | sudo bash -c 'find /home/circleci/container-logs -type f -exec chown circleci {} \;' 8 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-consumer/build.gradle: -------------------------------------------------------------------------------- 1 | 2 | dependencies { 3 | api project (":eventuate-messaging-kafka-consumer") 4 | api project (":eventuate-messaging-kafka-spring-basic-consumer") 5 | api project (":eventuate-messaging-kafka-common") 6 | 7 | implementation "org.springframework.boot:spring-boot-starter:$springBootVersion" 8 | } 9 | tasks.withType(Test).configureEach { 10 | useJUnitPlatform() 11 | } 12 | 13 | 14 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/BackPressureManagerState.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import org.apache.kafka.common.TopicPartition; 4 | 5 | import java.util.Set; 6 | 7 | public interface BackPressureManagerState { 8 | BackPressureManagerStateAndActions update(Set allTopicPartitions, int backlog, BackPressureConfig backPressureConfig); 9 | } 10 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-producer/build.gradle: -------------------------------------------------------------------------------- 1 | 2 | dependencies { 3 | api project (":eventuate-messaging-kafka-producer") 4 | api project (':eventuate-messaging-kafka-spring-common') 5 | 6 | implementation "org.springframework.boot:spring-boot-starter:$springBootVersion" 7 | 8 | testImplementation "org.springframework.boot:spring-boot-starter-test:$springBootVersion" 9 | } 10 | tasks.withType(Test).configureEach { 11 | useJUnitPlatform() 12 | } 13 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-basic-consumer/src/test/resources/application.properties: -------------------------------------------------------------------------------- 1 | eventuate.local.kafka.consumer.properties.session-timeout-ms=10000 2 | eventuate.local.kafka.consumer.properties.key-serializer=org.apache.kafka.common.serialization.StringSerializer 3 | not.eventuate.local.kafka.consumer.properties.property=N/A 4 | eventuate.local.kafka.consumer.backPressure.low=5 5 | eventuate.local.kafka.consumer.backPressure.high=100 6 | eventuate.local.kafka.consumer.pollTimeout=200 -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/build.gradle: -------------------------------------------------------------------------------- 1 | 2 | dependencies { 3 | implementation 'org.apache.commons:commons-lang3:3.18.0' 4 | implementation "org.junit.jupiter:junit-jupiter:5.13.4" 5 | implementation project (':eventuate-messaging-kafka-common') 6 | implementation "org.slf4j:slf4j-api:$slf4jVersion" 7 | 8 | testImplementation "org.assertj:assertj-core:$assertjVersion" 9 | 10 | } 11 | 12 | test { 13 | forkEvery 1 14 | useJUnitPlatform() 15 | } 16 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-consumer/src/main/java/io/eventuate/messaging/kafka/consumer/OriginalTopicPartitionToSwimlaneMapping.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.consumer; 2 | 3 | import org.apache.kafka.common.TopicPartition; 4 | 5 | public class OriginalTopicPartitionToSwimlaneMapping implements TopicPartitionToSwimlaneMapping { 6 | @Override 7 | public Integer toSwimlane(TopicPartition topicPartition, String messageKey) { 8 | return topicPartition.partition(); 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/DefaultKafkaConsumerFactory.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import java.util.Properties; 4 | 5 | public class DefaultKafkaConsumerFactory implements KafkaConsumerFactory { 6 | 7 | @Override 8 | public KafkaMessageConsumer makeConsumer(String subscriptionId, Properties consumerProperties) { 9 | return DefaultKafkaMessageConsumer.create(consumerProperties); 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-common/src/main/java/io/eventuate/messaging/kafka/common/EventuateBinaryMessageEncoding.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.common; 2 | 3 | import java.nio.charset.Charset; 4 | 5 | public class EventuateBinaryMessageEncoding { 6 | public static String bytesToString(byte[] bytes) { 7 | return new String(bytes, Charset.forName("UTF-8")); 8 | } 9 | 10 | public static byte[] stringToBytes(String string) { 11 | return string.getBytes(Charset.forName("UTF-8")); 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/EventuateKafkaConsumerMessageHandler.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import org.apache.kafka.clients.consumer.ConsumerRecord; 4 | 5 | import java.util.function.BiConsumer; 6 | import java.util.function.BiFunction; 7 | 8 | public interface EventuateKafkaConsumerMessageHandler 9 | extends BiFunction, BiConsumer, MessageConsumerBacklog> { 10 | } 11 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-integration-test/src/test/resources/application.properties: -------------------------------------------------------------------------------- 1 | eventuate.local.kafka.consumer.properties.session-timeout-ms=30000 2 | eventuate.local.kafka.consumer.properties.key-serializer=org.apache.kafka.common.serialization.StringSerializer 3 | 4 | eventuate.local.kafka.producer.properties.buffer-memory=1000000 5 | eventuate.local.kafka.producer.properties.value.serializer=org.apache.kafka.common.serialization.ByteArraySerializer 6 | 7 | eventuatelocal.kafka.bootstrap.servers=${DOCKER_HOST_IP:localhost}:9092 8 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-consumer/src/main/java/io/eventuate/messaging/kafka/consumer/RawKafkaMessage.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.consumer; 2 | 3 | public class RawKafkaMessage { 4 | private final String messageKey; 5 | private final byte[] payload; 6 | 7 | public RawKafkaMessage(String messageKey, byte[] payload) { 8 | this.messageKey = messageKey; 9 | this.payload = payload; 10 | } 11 | 12 | public String getMessageKey() { 13 | return messageKey; 14 | } 15 | 16 | public byte[] getPayload() { 17 | return payload; 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-integration-test/build.gradle: -------------------------------------------------------------------------------- 1 | 2 | dependencies { 3 | implementation project (':eventuate-messaging-kafka-integration-test') 4 | implementation project (':eventuate-messaging-kafka-spring-basic-consumer') 5 | implementation project (':eventuate-messaging-kafka-spring-producer') 6 | implementation project (':eventuate-messaging-kafka-spring-common') 7 | 8 | testImplementation "org.springframework.boot:spring-boot-starter-test:$springBootVersion" 9 | } 10 | tasks.withType(Test).configureEach { 11 | useJUnitPlatform() 12 | } 13 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-common/build.gradle: -------------------------------------------------------------------------------- 1 | apply plugin: SbeToolPlugin 2 | 3 | dependencies { 4 | implementation 'org.apache.commons:commons-lang3:3.18.0' 5 | implementation "org.hamcrest:hamcrest:2.2" 6 | implementation "org.junit.jupiter:junit-jupiter:5.13.4" 7 | api "org.apache.kafka:kafka-clients:$kafkaClientVersion" 8 | 9 | implementation 'uk.co.real-logic:sbe-all:1.32.1' 10 | 11 | implementation "org.slf4j:slf4j-api:$slf4jVersion" 12 | 13 | testImplementation "org.hamcrest:hamcrest-library:1.3" 14 | } 15 | tasks.withType(Test).configureEach { 16 | useJUnitPlatform() 17 | } 18 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-basic-consumer/src/test/resources/application.properties: -------------------------------------------------------------------------------- 1 | eventuate.local.kafka.consumer.properties.session.timeout.ms=10000 2 | eventuate.local.kafka.consumer.properties.key.serializer=org.apache.kafka.common.serialization.StringSerializer 3 | not.eventuate.local.kafka.consumer.properties.property=N/A 4 | eventuate.local.kafka.consumer.backPressure.low=5 5 | eventuate.local.kafka.consumer.backPressure.high=100 6 | eventuate.local.kafka.consumer.pollTimeout=200 7 | 8 | logging.level.io.eventuate.tram=DEBUG 9 | 10 | eventuatelocal.kafka.bootstrap.servers=${DOCKER_HOST_IP:localhost}:9092 11 | -------------------------------------------------------------------------------- /.github/workflows/print-container-logs.sh: -------------------------------------------------------------------------------- 1 | π#! /bin/bash -e 2 | 3 | CONTAINER_IDS=$(docker ps -a -q) 4 | 5 | for id in $CONTAINER_IDS ; do 6 | echo "\n--------------------" 7 | echo "logs of:\n" 8 | (docker ps -a -f "id=$id" || echo container gone: $id) 9 | echo "\n" 10 | (docker logs $id || echo container gone: $id) 11 | echo "--------------------\n" 12 | done 13 | 14 | mkdir -p ~/container-logs 15 | 16 | docker ps -a > ~/container-logs/containers.txt 17 | 18 | for name in $(docker ps -a --format "{{.Names}}") ; do 19 | (docker logs $name || echo container gone: $name) > ~/container-logs/${name}.log 20 | done 21 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2020 Eventuate, Inc. All rights reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-consumer/src/main/java/io/eventuate/messaging/kafka/consumer/SwimlaneDispatcherBacklog.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.consumer; 2 | 3 | import io.eventuate.messaging.kafka.basic.consumer.MessageConsumerBacklog; 4 | 5 | import java.util.concurrent.LinkedBlockingQueue; 6 | 7 | public class SwimlaneDispatcherBacklog implements MessageConsumerBacklog { 8 | private final LinkedBlockingQueue queue; 9 | 10 | public SwimlaneDispatcherBacklog(LinkedBlockingQueue queue) { 11 | this.queue = queue; 12 | } 13 | 14 | @Override 15 | public int size() { 16 | return queue.size(); 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM amazoncorretto:8u322-al2 2 | ENV KAFKA_VERSION=3.2.3 3 | 4 | RUN yum install -y wget tar gzip nc gettext && \ 5 | (wget -q -O - https://archive.apache.org/dist/kafka/${KAFKA_VERSION}/kafka_2.13-${KAFKA_VERSION}.tgz | tar -xzf - -C /usr/local) && \ 6 | yum clean all && \ 7 | rm -rf /var/cache/yum 8 | WORKDIR /usr/local/kafka_2.13-${KAFKA_VERSION} 9 | EXPOSE 9092 10 | COPY ./config/server.properties /usr/local/kafka-config/server.properties 11 | COPY run-kafka.sh . 12 | CMD ./run-kafka.sh 13 | HEALTHCHECK --interval=1s --retries=30 CMD (nc -z $(echo $KAFKA_LISTENERS | sed -e 's?.*//??' -e 's/:/ /' )) || exit 1 14 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-consumer/build.gradle: -------------------------------------------------------------------------------- 1 | 2 | dependencies { 3 | api project (":eventuate-messaging-kafka-basic-consumer") 4 | implementation project (":eventuate-messaging-kafka-common") 5 | api "io.eventuate.common.messaging:eventuate-messaging-partition-management:$eventuateCommonMessagingVersion" 6 | implementation "org.apache.kafka:kafka-clients:$kafkaClientVersion" 7 | 8 | testImplementation "io.eventuate.util:eventuate-util-test:$eventuateUtilVersion" 9 | testImplementation "org.assertj:assertj-core:$assertjVersion" 10 | } 11 | tasks.withType(Test).configureEach { 12 | useJUnitPlatform() 13 | } 14 | 15 | 16 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/BackPressureManagerStateAndActions.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | public class BackPressureManagerStateAndActions { 4 | 5 | final BackPressureActions actions; 6 | final BackPressureManagerState state; 7 | 8 | public BackPressureManagerStateAndActions(BackPressureActions actions, BackPressureManagerState state) { 9 | this.actions = actions; 10 | this.state = state; 11 | } 12 | 13 | public BackPressureManagerStateAndActions(BackPressureManagerState state) { 14 | this(BackPressureActions.NONE, state); 15 | } 16 | 17 | } 18 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-consumer/src/main/java/io/eventuate/messaging/kafka/consumer/SwimlanePerTopicPartition.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.consumer; 2 | 3 | import org.apache.kafka.common.TopicPartition; 4 | 5 | import java.util.concurrent.ConcurrentHashMap; 6 | 7 | public class SwimlanePerTopicPartition implements TopicPartitionToSwimlaneMapping { 8 | 9 | private final ConcurrentHashMap mapping = new ConcurrentHashMap<>(); 10 | 11 | @Override 12 | public Integer toSwimlane(TopicPartition topicPartition, String messageKey) { 13 | return mapping.computeIfAbsent(topicPartition, tp -> mapping.size()); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-basic-consumer/build.gradle: -------------------------------------------------------------------------------- 1 | 2 | dependencies { 3 | api project (':eventuate-messaging-kafka-basic-consumer') 4 | api project (':eventuate-messaging-kafka-spring-common') 5 | implementation "org.springframework.boot:spring-boot-starter:$springBootVersion" 6 | 7 | testImplementation project (':eventuate-messaging-kafka-spring-producer') 8 | testImplementation project (':eventuate-messaging-kafka-common') 9 | 10 | testImplementation "org.springframework.boot:spring-boot-starter-test:$springBootVersion" 11 | testImplementation "io.eventuate.util:eventuate-util-test:$eventuateUtilVersion" 12 | } 13 | tasks.withType(Test).configureEach { 14 | useJUnitPlatform() 15 | } 16 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/BackPressureConfig.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | public class BackPressureConfig { 4 | 5 | private int low = 0; 6 | private int high = Integer.MAX_VALUE; 7 | 8 | public BackPressureConfig() { 9 | } 10 | 11 | public BackPressureConfig(int low, int high) { 12 | this.low = low; 13 | this.high = high; 14 | } 15 | 16 | public int getLow() { 17 | return low; 18 | } 19 | 20 | public void setLow(int low) { 21 | this.low = low; 22 | } 23 | 24 | public int getHigh() { 25 | return high; 26 | } 27 | 28 | public void setHigh(int high) { 29 | this.high = high; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-consumer/src/main/java/io/eventuate/messaging/kafka/micronaut/consumer/KafkaConsumerFactoryFactory.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.micronaut.consumer; 2 | 3 | import javax.inject.Singleton; 4 | 5 | import io.eventuate.messaging.kafka.basic.consumer.DefaultKafkaConsumerFactory; 6 | import io.eventuate.messaging.kafka.basic.consumer.KafkaConsumerFactory; 7 | import io.micronaut.context.annotation.Factory; 8 | import io.micronaut.context.annotation.Requires; 9 | 10 | @Factory 11 | public class KafkaConsumerFactoryFactory { 12 | @Singleton 13 | @Requires(missingBeans = KafkaConsumerFactory.class) 14 | public KafkaConsumerFactory kafkaConsumerFactory() { 15 | return new DefaultKafkaConsumerFactory(); 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-common/src/main/java/io/eventuate/messaging/kafka/common/EventuateKafkaConfigurationProperties.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.common; 2 | 3 | public class EventuateKafkaConfigurationProperties { 4 | 5 | private String bootstrapServers; 6 | private long connectionValidationTimeout; 7 | 8 | public EventuateKafkaConfigurationProperties(String bootstrapServers, long connectionValidationTimeout) { 9 | this.bootstrapServers = bootstrapServers; 10 | this.connectionValidationTimeout = connectionValidationTimeout; 11 | } 12 | 13 | public String getBootstrapServers() { 14 | return bootstrapServers; 15 | } 16 | 17 | public long getConnectionValidationTimeout() { 18 | return connectionValidationTimeout; 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /gradle.properties: -------------------------------------------------------------------------------- 1 | 2 | deployUrl=file:///Users/cer/.m2/testdeploy 3 | dockerComposePluginVersion=0.17.7 4 | 5 | eventuateMavenRepoUrl=file:///Users/cer/.m2/testdeploy,https://snapshots.repositories.eventuate.io/repository 6 | 7 | kafkaClientVersion=3.1.1 8 | 9 | springBootVersion=3.4.7 10 | slf4jVersion=1.7.36 11 | micronautVersion=2.4.1 12 | 13 | eventuateUtilVersion=0.19.0.BUILD-SNAPSHOT 14 | eventuateCommonMessagingVersion=0.17.0.BUILD-SNAPSHOT 15 | 16 | eventuatePluginsGradleVersion=0.14.0.BUILD-SNAPSHOT 17 | junitVersion=5.12.2 18 | junitPlatformLauncherVersion=1.12.2 19 | eventuateCommonImageVersion=0.21.0.BUILD-SNAPSHOT 20 | eventuateCommonVersion=0.21.0.BUILD-SNAPSHOT 21 | assertjVersion=3.26.3 22 | testContainersVersion=1.20.1 23 | slf4jApiVersion=2.0.17 24 | 25 | version=0.21.0-SNAPSHOT 26 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-consumer/src/main/java/io/eventuate/messaging/kafka/spring/consumer/KafkaConsumerFactoryConfiguration.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.spring.consumer; 2 | 3 | import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; 4 | import org.springframework.context.annotation.Bean; 5 | import org.springframework.context.annotation.Configuration; 6 | import io.eventuate.messaging.kafka.basic.consumer.DefaultKafkaConsumerFactory; 7 | import io.eventuate.messaging.kafka.basic.consumer.KafkaConsumerFactory; 8 | 9 | @Configuration 10 | public class KafkaConsumerFactoryConfiguration { 11 | @Bean 12 | @ConditionalOnMissingBean 13 | public KafkaConsumerFactory kafkaConsumerFactory() { 14 | return new DefaultKafkaConsumerFactory(); 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-common/build.gradle: -------------------------------------------------------------------------------- 1 | plugins { 2 | id "io.spring.dependency-management" version "1.1.7" 3 | } 4 | 5 | 6 | 7 | 8 | dependencyManagement { 9 | imports { 10 | mavenBom "io.micronaut:micronaut-bom:$micronautVersion" 11 | } 12 | } 13 | 14 | dependencies { 15 | implementation project (":eventuate-messaging-kafka-common") 16 | 17 | annotationProcessor "io.micronaut:micronaut-inject-java" 18 | annotationProcessor "io.micronaut:micronaut-validation" 19 | annotationProcessor "io.micronaut.configuration:micronaut-openapi" 20 | implementation "io.micronaut:micronaut-inject" 21 | implementation "io.micronaut:micronaut-validation" 22 | implementation "io.micronaut:micronaut-runtime" 23 | } 24 | tasks.withType(Test).configureEach { 25 | useJUnitPlatform() 26 | } 27 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-producer/src/main/java/io/eventuate/messaging/kafka/micronaut/producer/EventuateKafkaProducerMicronautConfigurationProperties.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.micronaut.producer; 2 | 3 | import io.micronaut.context.annotation.ConfigurationProperties; 4 | 5 | import java.util.HashMap; 6 | import java.util.Map; 7 | import java.util.stream.Collectors; 8 | 9 | @ConfigurationProperties("eventuate.local.kafka.producer") 10 | public class EventuateKafkaProducerMicronautConfigurationProperties { 11 | Map properties = new HashMap<>(); 12 | 13 | public Map getProperties() { 14 | return properties 15 | .entrySet() 16 | .stream() 17 | .collect(Collectors.toMap(o -> o.getKey().replace("-", "."), Map.Entry::getValue)); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-integration-test/src/main/resources/application.properties: -------------------------------------------------------------------------------- 1 | eventuate.local.kafka.consumer.properties.session.timeout.ms=30000 2 | eventuate.local.kafka.consumer.properties.key.serializer=org.apache.kafka.common.serialization.StringSerializer 3 | 4 | eventuate.local.kafka.producer.properties.buffer.memory=1000000 5 | eventuate.local.kafka.producer.properties.value.serializer=org.apache.kafka.common.serialization.ByteArraySerializer 6 | 7 | eventuatelocal.kafka.bootstrap.servers=${DOCKER_HOST_IP:localhost}:9092 8 | eventuate.local.kafka.consumer.properties.session.timeout.ms=10000 9 | logging.level.io.eventuate.local.java.kafka.consumer=TRACE 10 | logging.level.io.io.eventuate.messaging.kafka.basic.consumer=INFO 11 | 12 | eventuate.local.kafka.consumer.backPressure.low=5 13 | eventuate.local.kafka.consumer.backPressure.high=100 14 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-testcontainers/src/main/java/io/eventuate/messaging/kafka/testcontainers/EventuateKafkaNativeCluster.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.testcontainers; 2 | 3 | import io.eventuate.common.testcontainers.ReusableNetworkFactory; 4 | import org.testcontainers.containers.Network; 5 | 6 | public class EventuateKafkaNativeCluster { 7 | 8 | public final Network network; 9 | public final EventuateKafkaNativeContainer kafka; 10 | 11 | public EventuateKafkaNativeCluster() { 12 | this("foofoo"); 13 | } 14 | 15 | public EventuateKafkaNativeCluster(String networkName) { 16 | network = ReusableNetworkFactory.createNetwork(networkName); 17 | 18 | kafka = (EventuateKafkaNativeContainer)new EventuateKafkaNativeContainer() 19 | .withNetwork(network) 20 | ; 21 | } 22 | 23 | 24 | } 25 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zookeeper: 4 | image: eventuateio/eventuate-zookeeper:$EVENTUATE_COMMON_VERSION 5 | ports: 6 | - 2181:2181 7 | environment: 8 | ZOOKEEPER_CLIENT_PORT: 2181 9 | kafka: 10 | image: ${KAFKA_MULTI_ARCH_IMAGE:-localhost:5002/eventuate-kafka:multi-arch-local-build} 11 | ports: 12 | - 9092:9092 13 | depends_on: 14 | - zookeeper 15 | environment: 16 | KAFKA_LISTENERS: LC://kafka:29092,LX://kafka:9092 17 | KAFKA_ADVERTISED_LISTENERS: LC://kafka:29092,LX://${DOCKER_HOST_IP:-localhost}:9092 18 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: LC:PLAINTEXT,LX:PLAINTEXT 19 | KAFKA_INTER_BROKER_LISTENER_NAME: LC 20 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 21 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 22 | KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 60000 23 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-producer/src/main/java/io/eventuate/messaging/kafka/micronaut/producer/EventuateKafkaProducerConfigurationPropertiesFactory.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.micronaut.producer; 2 | 3 | import io.eventuate.messaging.kafka.producer.EventuateKafkaProducerConfigurationProperties; 4 | import io.micronaut.context.annotation.Factory; 5 | 6 | import javax.inject.Singleton; 7 | 8 | @Factory 9 | public class EventuateKafkaProducerConfigurationPropertiesFactory { 10 | @Singleton 11 | public EventuateKafkaProducerConfigurationProperties eventuateKafkaProducerConfigurationProperties(EventuateKafkaProducerMicronautConfigurationProperties eventuateKafkaProducerMicronautConfigurationProperties) { 12 | return new EventuateKafkaProducerConfigurationProperties(eventuateKafkaProducerMicronautConfigurationProperties.getProperties()); 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-testcontainers/build.gradle: -------------------------------------------------------------------------------- 1 | dependencies { 2 | implementation "io.eventuate.common:eventuate-common-testcontainers:$eventuateCommonVersion" 3 | implementation "org.testcontainers:testcontainers:$testContainersVersion" 4 | api "org.testcontainers:kafka:$testContainersVersion" 5 | 6 | implementation "org.springframework.boot:spring-boot-starter-test:$springBootVersion" 7 | implementation "org.assertj:assertj-core:$assertjVersion" 8 | testImplementation project(":eventuate-messaging-kafka-spring-consumer") 9 | 10 | 11 | } 12 | 13 | task writeProperties(type: WriteProperties) { 14 | outputFile "${project.buildDir}/generated/eventuate.messaging.kafka.version.properties" 15 | property 'version', version 16 | } 17 | 18 | 19 | processResources.from(writeProperties) 20 | tasks.withType(Test).configureEach { 21 | useJUnitPlatform() 22 | } -------------------------------------------------------------------------------- /eventuate-messaging-kafka-integration-test/build.gradle: -------------------------------------------------------------------------------- 1 | 2 | dependencies { 3 | 4 | api project (':eventuate-messaging-kafka-common') 5 | api project (':eventuate-messaging-kafka-consumer') 6 | api project (':eventuate-messaging-kafka-producer') 7 | api "io.eventuate.util:eventuate-util-test:$eventuateUtilVersion" 8 | 9 | implementation project (":eventuate-messaging-kafka-basic-consumer") 10 | implementation "io.eventuate.common.messaging:eventuate-messaging-partition-management:$eventuateCommonMessagingVersion" 11 | implementation "org.junit.jupiter:junit-jupiter:$junitVersion" 12 | 13 | implementation "org.apache.kafka:kafka-clients:$kafkaClientVersion" 14 | implementation "org.mockito:mockito-core:4.11.0" 15 | implementation "org.assertj:assertj-core:3.23.1" 16 | 17 | 18 | 19 | } 20 | tasks.withType(Test).configureEach { 21 | useJUnitPlatform() 22 | } 23 | -------------------------------------------------------------------------------- /kafka/build-docker-kafka-multi-arch.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash -e 2 | 3 | SCRIPT_DIR=$(cd $( dirname "${BASH_SOURCE[0]}" ) ; pwd) 4 | 5 | docker-compose -f $SCRIPT_DIR/../docker-compose-registry.yml --project-name eventuate-common-registry up -d registry 6 | 7 | IMAGE="${KAFKA_MULTI_ARCH_IMAGE:-${DOCKER_HOST_NAME:-host.docker.internal}:5002/eventuate-kafka:multi-arch-local-build}" 8 | OPTS="${BUILDX_PUSH_OPTIONS:---output=type=image,push=true,registry.insecure=true}" 9 | 10 | echo IMAGE=$IMAGE 11 | echo OPTS=$OPTS 12 | 13 | docker buildx build --platform linux/amd64,linux/arm64 \ 14 | -t $IMAGE \ 15 | -f $SCRIPT_DIR/Dockerfile \ 16 | $OPTS \ 17 | $SCRIPT_DIR 18 | 19 | echo maybe pull 20 | 21 | if [ "$IMAGE" = "host.docker.internal:5002/eventuate-kafka:multi-arch-local-build" ] ; then 22 | docker pull localhost:5002/eventuate-kafka:multi-arch-local-build 23 | else 24 | echo not pulling "$IMAGE" 25 | fi 26 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-leadership/src/main/java/io/eventuate/coordination/leadership/zookeeper/KafkaLeadershipController.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.coordination.leadership.zookeeper; 2 | 3 | import io.eventuate.coordination.leadership.LeadershipController; 4 | import org.slf4j.Logger; 5 | import org.slf4j.LoggerFactory; 6 | 7 | import java.util.concurrent.CountDownLatch; 8 | 9 | public class KafkaLeadershipController implements LeadershipController { 10 | private Logger logger = LoggerFactory.getLogger(getClass()); 11 | 12 | private CountDownLatch stopCountDownLatch; 13 | 14 | public KafkaLeadershipController(CountDownLatch stopCountDownLatch) { 15 | this.stopCountDownLatch = stopCountDownLatch; 16 | } 17 | 18 | @Override 19 | public void stop() { 20 | logger.info("Stopping leadership controller"); 21 | stopCountDownLatch.countDown(); 22 | logger.info("Stopped leadership controller"); 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-producer/src/main/java/io/eventuate/messaging/kafka/spring/producer/EventuateKafkaProducerSpringConfigurationProperties.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.spring.producer; 2 | 3 | import org.springframework.boot.context.properties.ConfigurationProperties; 4 | 5 | import java.util.HashMap; 6 | import java.util.Map; 7 | 8 | @ConfigurationProperties("eventuate.local.kafka.producer") 9 | public class EventuateKafkaProducerSpringConfigurationProperties { 10 | Map properties = new HashMap<>(); 11 | 12 | public Map getProperties() { 13 | return properties; 14 | } 15 | 16 | public void setProperties(Map properties) { 17 | this.properties = properties; 18 | } 19 | 20 | public static EventuateKafkaProducerSpringConfigurationProperties empty() { 21 | return new EventuateKafkaProducerSpringConfigurationProperties(); 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/ConsumerPropertiesFactory.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import java.util.Properties; 4 | 5 | public class ConsumerPropertiesFactory { 6 | public static Properties makeDefaultConsumerProperties(String bootstrapServers, String subscriberId) { 7 | Properties consumerProperties = new Properties(); 8 | consumerProperties.put("bootstrap.servers", bootstrapServers); 9 | consumerProperties.put("group.id", subscriberId); 10 | consumerProperties.put("enable.auto.commit", "false"); 11 | consumerProperties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 12 | consumerProperties.put("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer"); 13 | consumerProperties.put("auto.offset.reset", "earliest"); 14 | return consumerProperties; 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-producer/src/main/java/io/eventuate/messaging/kafka/producer/EventuateKafkaProducerConfigurationProperties.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.producer; 2 | 3 | import java.util.HashMap; 4 | import java.util.Map; 5 | 6 | public class EventuateKafkaProducerConfigurationProperties { 7 | Map properties = new HashMap<>(); 8 | 9 | public EventuateKafkaProducerConfigurationProperties() { 10 | } 11 | 12 | public EventuateKafkaProducerConfigurationProperties(Map properties) { 13 | this.properties = properties; 14 | } 15 | 16 | public Map getProperties() { 17 | return properties; 18 | } 19 | 20 | public void setProperties(Map properties) { 21 | this.properties = properties; 22 | } 23 | 24 | public static EventuateKafkaProducerConfigurationProperties empty() { 25 | return new EventuateKafkaProducerConfigurationProperties(); 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-consumer/src/test/java/io/eventuate/messaging/kafka/consumer/SwimlanePerTopicPartitionTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.consumer; 2 | 3 | import org.apache.kafka.common.TopicPartition; 4 | import org.junit.jupiter.api.Test; 5 | 6 | import static org.junit.jupiter.api.Assertions.*; 7 | 8 | public class SwimlanePerTopicPartitionTest { 9 | 10 | @Test 11 | public void shouldComputeSwimlane() { 12 | SwimlanePerTopicPartition mapping = new SwimlanePerTopicPartition(); 13 | assertEquals(Integer.valueOf(0), mapping.toSwimlane(tp0(), "X")); 14 | assertEquals(Integer.valueOf(0), mapping.toSwimlane(tp0(), "Y")); 15 | assertEquals(Integer.valueOf(1), mapping.toSwimlane(tp1(), "Z")); 16 | } 17 | 18 | private TopicPartition tp1() { 19 | return new TopicPartition("x", 1); 20 | } 21 | 22 | private TopicPartition tp0() { 23 | return new TopicPartition("x", 0); 24 | } 25 | 26 | } -------------------------------------------------------------------------------- /deploy-multi-arch.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash -e 2 | 3 | BRANCH=$(git rev-parse --abbrev-ref HEAD) 4 | 5 | if [[ $BRANCH == "master" ]] ; then 6 | TARGET_TAG=$(sed -e '/^version=/!d' -e 's/version=//' -e 's/-SNAPSHOT/.BUILD-SNAPSHOT/' < gradle.properties) 7 | elif [[ $BRANCH =~ RELEASE$ ]] ; then 8 | TARGET_TAG=$BRANCH 9 | elif [[ $BRANCH =~ M[0-9]+$ ]] ; then 10 | TARGET_TAG=$BRANCH 11 | elif [[ $BRANCH =~ RC[0-9]+$ ]] ; then 12 | TARGET_TAG=$BRANCH 13 | else 14 | TARGET_TAG=${BRANCH}-${CIRCLE_BUILD_NUM?} 15 | fi 16 | 17 | docker login -u ${DOCKER_USER_ID?} -p ${DOCKER_PASSWORD?} 18 | 19 | retag() { 20 | BASE=$1 21 | IMAGE=${BASE}:${MULTI_ARCH_TAG?} 22 | TARGET_IMAGE=$BASE:$TARGET_TAG 23 | 24 | echo Retagging $IMAGE $TARGET_IMAGE 25 | 26 | SOURCES=$(docker manifest inspect $IMAGE | \ 27 | jq -r '.manifests[].digest | sub("^"; "'${BASE}'@")') 28 | 29 | docker buildx imagetools create -t ${TARGET_IMAGE} $SOURCES 30 | } 31 | 32 | retag "eventuateio/eventuate-kafka" 33 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-producer/src/main/java/io/eventuate/messaging/kafka/spring/producer/EventuateKafkaProducerSpringConfigurationPropertiesConfiguration.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.spring.producer; 2 | 3 | import io.eventuate.messaging.kafka.producer.EventuateKafkaProducerConfigurationProperties; 4 | import org.springframework.boot.context.properties.EnableConfigurationProperties; 5 | import org.springframework.context.annotation.Bean; 6 | 7 | @EnableConfigurationProperties(EventuateKafkaProducerSpringConfigurationProperties.class) 8 | public class EventuateKafkaProducerSpringConfigurationPropertiesConfiguration { 9 | 10 | @Bean 11 | public EventuateKafkaProducerConfigurationProperties eventuateKafkaProducerConfigurationProperties(EventuateKafkaProducerSpringConfigurationProperties eventuateKafkaProducerSpringConfigurationProperties) { 12 | return new EventuateKafkaProducerConfigurationProperties(eventuateKafkaProducerSpringConfigurationProperties.getProperties()); 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/BackPressureManagerNormalState.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import org.apache.kafka.common.TopicPartition; 4 | 5 | import java.util.Set; 6 | 7 | public class BackPressureManagerNormalState implements BackPressureManagerState { 8 | 9 | public static BackPressureManagerStateAndActions transitionTo(Set suspendedPartitions) { 10 | return new BackPressureManagerStateAndActions(BackPressureActions.resume(suspendedPartitions), new BackPressureManagerNormalState()); 11 | } 12 | 13 | @Override 14 | public BackPressureManagerStateAndActions update(Set allTopicPartitions, int backlog, BackPressureConfig backPressureConfig) { 15 | if (backlog > backPressureConfig.getHigh()) { 16 | return BackPressureManagerPausedState.transitionTo(allTopicPartitions); 17 | } else { 18 | return new BackPressureManagerStateAndActions(this); 19 | } 20 | } 21 | 22 | } 23 | -------------------------------------------------------------------------------- /settings.gradle: -------------------------------------------------------------------------------- 1 | rootProject.name="eventuate-messaging-kafka" 2 | 3 | include 'eventuate-messaging-kafka-basic-consumer' 4 | include 'eventuate-messaging-kafka-spring-basic-consumer' 5 | //include 'eventuate-messaging-kafka-micronaut-basic-consumer' 6 | include 'eventuate-messaging-kafka-common' 7 | include 'eventuate-messaging-kafka-spring-common' 8 | //include 'eventuate-messaging-kafka-micronaut-common' 9 | include 'eventuate-messaging-kafka-consumer' 10 | include 'eventuate-messaging-kafka-spring-consumer' 11 | //include 'eventuate-messaging-kafka-micronaut-consumer' 12 | include 'eventuate-messaging-kafka-producer' 13 | include 'eventuate-messaging-kafka-spring-producer' 14 | //include 'eventuate-messaging-kafka-micronaut-producer' 15 | include 'eventuate-messaging-kafka-integration-test' 16 | include 'eventuate-messaging-kafka-spring-integration-test' 17 | //include 'eventuate-messaging-kafka-micronaut-integration-test' 18 | include 'eventuate-messaging-kafka-testcontainers' 19 | include 'eventuate-messaging-kafka-leadership' 20 | include 'eventuate-messaging-kafka-bom' 21 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/BackPressureManager.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import org.apache.kafka.common.TopicPartition; 4 | 5 | import java.util.HashSet; 6 | import java.util.Set; 7 | 8 | public class BackPressureManager { 9 | 10 | private final BackPressureConfig backPressureConfig; 11 | private Set allTopicPartitions = new HashSet<>(); 12 | 13 | private BackPressureManagerState state = new BackPressureManagerNormalState(); 14 | 15 | public BackPressureManager(BackPressureConfig backPressureConfig) { 16 | this.backPressureConfig = backPressureConfig; 17 | } 18 | 19 | public BackPressureActions update(Set topicPartitions, int backlog) { 20 | allTopicPartitions.addAll(topicPartitions); 21 | BackPressureManagerStateAndActions stateAndActions = state.update(allTopicPartitions, backlog, backPressureConfig); 22 | this.state = stateAndActions.state; 23 | return stateAndActions.actions; 24 | } 25 | 26 | 27 | } 28 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-consumer/src/main/java/io/eventuate/messaging/kafka/consumer/MultipleSwimlanesPerTopicPartitionMapping.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.consumer; 2 | 3 | import org.apache.kafka.common.TopicPartition; 4 | 5 | import java.util.concurrent.ConcurrentHashMap; 6 | 7 | public class MultipleSwimlanesPerTopicPartitionMapping implements TopicPartitionToSwimlaneMapping { 8 | 9 | private final int swimlanesPerTopicPartition; 10 | 11 | private final ConcurrentHashMap mapping = new ConcurrentHashMap<>(); 12 | 13 | public MultipleSwimlanesPerTopicPartitionMapping(int swimlanesPerTopicPartition) { 14 | this.swimlanesPerTopicPartition = swimlanesPerTopicPartition; 15 | } 16 | 17 | 18 | @Override 19 | public Integer toSwimlane(TopicPartition topicPartition, String messageKey) { 20 | int startingSwimlane = mapping.computeIfAbsent(topicPartition, tp -> mapping.size() * swimlanesPerTopicPartition); 21 | return startingSwimlane + messageKey.hashCode() % swimlanesPerTopicPartition; 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-producer/build.gradle: -------------------------------------------------------------------------------- 1 | plugins { 2 | id "io.spring.dependency-management" version "1.1.7" 3 | } 4 | 5 | 6 | 7 | 8 | dependencyManagement { 9 | imports { 10 | mavenBom "io.micronaut:micronaut-bom:$micronautVersion" 11 | } 12 | } 13 | 14 | dependencies { 15 | implementation project (":eventuate-messaging-kafka-producer") 16 | 17 | annotationProcessor "io.micronaut:micronaut-inject-java" 18 | annotationProcessor "io.micronaut:micronaut-validation" 19 | annotationProcessor "io.micronaut.configuration:micronaut-openapi" 20 | implementation "io.micronaut:micronaut-inject" 21 | implementation "io.micronaut:micronaut-validation" 22 | implementation "io.micronaut:micronaut-runtime" 23 | 24 | testAnnotationProcessor "io.micronaut:micronaut-inject-java" 25 | testImplementation "org.junit.jupiter:junit-jupiter-api" 26 | testImplementation "io.micronaut.test:micronaut-test-junit5" 27 | testRuntimeOnly "org.junit.jupiter:junit-jupiter-engine" 28 | } 29 | 30 | test { 31 | useJUnitPlatform() 32 | } 33 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/BackPressureActions.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import org.apache.kafka.common.TopicPartition; 4 | 5 | import java.util.Collections; 6 | import java.util.Set; 7 | 8 | public class BackPressureActions { 9 | 10 | public final Set pause; 11 | public final Set resume; 12 | 13 | public BackPressureActions(Set pause, Set resume) { 14 | this.pause = pause; 15 | this.resume = resume; 16 | } 17 | 18 | public static final BackPressureActions NONE = new BackPressureActions(Collections.emptySet(), Collections.emptySet()); 19 | 20 | public static BackPressureActions pause(Set topicPartitions) { 21 | return new BackPressureActions(topicPartitions, Collections.emptySet()); 22 | } 23 | 24 | public static BackPressureActions resume(Set topicPartitions) { 25 | return new BackPressureActions(Collections.emptySet(), topicPartitions); 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-basic-consumer/build.gradle: -------------------------------------------------------------------------------- 1 | plugins { 2 | id "io.spring.dependency-management" version "1.1.7" 3 | } 4 | 5 | 6 | 7 | 8 | dependencyManagement { 9 | imports { 10 | mavenBom "io.micronaut:micronaut-bom:$micronautVersion" 11 | } 12 | } 13 | 14 | dependencies { 15 | implementation project (':eventuate-messaging-kafka-basic-consumer') 16 | 17 | annotationProcessor "io.micronaut:micronaut-inject-java" 18 | annotationProcessor "io.micronaut:micronaut-validation" 19 | annotationProcessor "io.micronaut.configuration:micronaut-openapi" 20 | implementation "io.micronaut:micronaut-inject" 21 | implementation "io.micronaut:micronaut-validation" 22 | implementation "io.micronaut:micronaut-runtime" 23 | 24 | testAnnotationProcessor "io.micronaut:micronaut-inject-java" 25 | testImplementation "org.junit.jupiter:junit-jupiter-api" 26 | testImplementation "io.micronaut.test:micronaut-test-junit5" 27 | testRuntimeOnly "org.junit.jupiter:junit-jupiter-engine" 28 | } 29 | 30 | test { 31 | useJUnitPlatform() 32 | } 33 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-producer/src/test/java/io/eventuate/messaging/kafka/micronaut/producer/EventuateKafkaProducerConfigurationTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.micronaut.producer; 2 | 3 | import io.micronaut.test.annotation.MicronautTest; 4 | import org.junit.Assert; 5 | import org.junit.jupiter.api.Test; 6 | 7 | import javax.inject.Inject; 8 | 9 | @MicronautTest(propertySources = "application.properties") 10 | public class EventuateKafkaProducerConfigurationTest { 11 | 12 | @Inject 13 | private EventuateKafkaProducerMicronautConfigurationProperties eventuateKafkaProducerConfigurationProperties; 14 | 15 | @Test 16 | public void testPropertyParsing() { 17 | 18 | Assert.assertEquals(2, eventuateKafkaProducerConfigurationProperties.getProperties().size()); 19 | 20 | Assert.assertEquals("1000000", eventuateKafkaProducerConfigurationProperties.getProperties().get("buffer.memory")); 21 | 22 | Assert.assertEquals("org.apache.kafka.common.serialization.ByteArraySerializer", 23 | eventuateKafkaProducerConfigurationProperties.getProperties().get("value.serializer")); 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-consumer/build.gradle: -------------------------------------------------------------------------------- 1 | plugins { 2 | id "io.spring.dependency-management" version "1.1.7" 3 | } 4 | 5 | 6 | 7 | 8 | dependencyManagement { 9 | imports { 10 | mavenBom "io.micronaut:micronaut-bom:$micronautVersion" 11 | } 12 | } 13 | 14 | dependencies { 15 | implementation project (":eventuate-messaging-kafka-common") 16 | implementation project (":eventuate-messaging-kafka-basic-consumer") 17 | implementation project (":eventuate-messaging-kafka-consumer") 18 | implementation project (":eventuate-messaging-kafka-micronaut-common") 19 | implementation project (":eventuate-messaging-kafka-micronaut-basic-consumer") 20 | 21 | annotationProcessor "io.micronaut:micronaut-inject-java" 22 | annotationProcessor "io.micronaut:micronaut-validation" 23 | annotationProcessor "io.micronaut.configuration:micronaut-openapi" 24 | implementation "io.micronaut:micronaut-inject" 25 | implementation "io.micronaut:micronaut-validation" 26 | implementation "io.micronaut:micronaut-runtime" 27 | } 28 | tasks.withType(Test).configureEach { 29 | useJUnitPlatform() 30 | } 31 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-leadership/build.gradle: -------------------------------------------------------------------------------- 1 | 2 | dependencies { 3 | implementation "io.eventuate.common:eventuate-common-coordination-leadership:$eventuateCommonVersion" 4 | implementation "org.slf4j:slf4j-api:1.7.18" 5 | implementation project(":eventuate-messaging-kafka-basic-consumer") 6 | implementation "org.apache.kafka:kafka-clients:$kafkaClientVersion" 7 | 8 | testImplementation project(":eventuate-messaging-kafka-spring-common") 9 | testImplementation project (':eventuate-messaging-kafka-common') 10 | 11 | testImplementation "io.eventuate.common:eventuate-common-coordination-leadership-tests:$eventuateCommonVersion" 12 | testImplementation "io.eventuate.util:eventuate-util-test:$eventuateUtilVersion" 13 | testImplementation "org.springframework.boot:spring-boot-starter-test:$springBootVersion" 14 | 15 | testImplementation "io.eventuate.common:eventuate-common-testcontainers:$eventuateCommonVersion" 16 | testImplementation project(":eventuate-messaging-kafka-testcontainers") 17 | testImplementation "org.testcontainers:testcontainers:$testContainersVersion" 18 | } 19 | 20 | test { 21 | forkEvery 1 22 | useJUnitPlatform() 23 | } 24 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/test/java/io/eventuate/messaging/kafka/basic/consumer/TopicPartitionOffsetsTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import org.junit.jupiter.api.Test; 4 | 5 | import java.util.Optional; 6 | 7 | import static org.junit.jupiter.api.Assertions.assertEquals; 8 | import static org.junit.jupiter.api.Assertions.assertFalse; 9 | 10 | public class TopicPartitionOffsetsTest { 11 | 12 | @Test 13 | public void shouldTrackOffsets() { 14 | 15 | TopicPartitionOffsets tpo = new TopicPartitionOffsets(); 16 | 17 | tpo.noteUnprocessed(1); 18 | tpo.noteUnprocessed(2); 19 | tpo.noteUnprocessed(3); 20 | 21 | tpo.noteProcessed(2); 22 | 23 | assertFalse(tpo.offsetToCommit().isPresent()); 24 | 25 | tpo.noteProcessed(1); 26 | 27 | assertEquals(Long.valueOf(2L), tpo.offsetToCommit().get()); 28 | 29 | tpo.noteOffsetCommitted(2); 30 | 31 | assertEquals(Optional.empty(), tpo.offsetToCommit()); 32 | 33 | tpo.noteProcessed(3); 34 | 35 | assertEquals(Long.valueOf(3), tpo.offsetToCommit().get()); 36 | 37 | tpo.noteOffsetCommitted(3); 38 | 39 | assertEquals(Optional.empty(), tpo.offsetToCommit()); 40 | } 41 | 42 | 43 | } 44 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-common/src/main/java/io/eventuate/messaging/kafka/micronaut/common/EventuateKafkaPropertiesFactory.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.micronaut.common; 2 | 3 | import io.eventuate.messaging.kafka.common.EventuateKafkaConfigurationProperties; 4 | import io.micronaut.context.annotation.Bean; 5 | import io.micronaut.context.annotation.Factory; 6 | import io.micronaut.context.annotation.Requires; 7 | import io.micronaut.context.annotation.Value; 8 | 9 | @Factory 10 | public class EventuateKafkaPropertiesFactory { 11 | 12 | @Bean 13 | public EventuateKafkaConfigurationProperties eventuateKafkaConfigurationProperties(@Value("${eventuatelocal.kafka.bootstrap.servers}") 14 | String bootstrapServers, 15 | @Value("${eventuatelocal.kafka.connection.validation.timeout:1000}") 16 | long connectionValidationTimeout) { 17 | return new EventuateKafkaConfigurationProperties(bootstrapServers, connectionValidationTimeout); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-common/src/main/java/io/eventuate/messaging/kafka/spring/common/EventuateKafkaPropertiesConfiguration.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.spring.common; 2 | 3 | import io.eventuate.messaging.kafka.common.EventuateKafkaConfigurationProperties; 4 | import org.springframework.beans.factory.annotation.Value; 5 | import org.springframework.context.annotation.Bean; 6 | import org.springframework.context.annotation.Configuration; 7 | 8 | @Configuration 9 | public class EventuateKafkaPropertiesConfiguration { 10 | 11 | 12 | 13 | @Bean 14 | public EventuateKafkaConfigurationProperties eventuateKafkaConfigurationProperties(@Value("${eventuatelocal.kafka.bootstrap.servers}") 15 | String bootstrapServers, 16 | @Value("${eventuatelocal.kafka.connection.validation.timeout:#{1000}}") 17 | long connectionValidationTimeout) { 18 | return new EventuateKafkaConfigurationProperties(bootstrapServers, connectionValidationTimeout); 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-basic-consumer/src/main/java/io/eventuate/messaging/kafka/spring/basic/consumer/EventuateKafkaConsumerSpringConfigurationProperties.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.spring.basic.consumer; 2 | 3 | import io.eventuate.messaging.kafka.basic.consumer.BackPressureConfig; 4 | import org.springframework.boot.context.properties.ConfigurationProperties; 5 | 6 | import java.util.HashMap; 7 | import java.util.Map; 8 | 9 | @ConfigurationProperties("eventuate.local.kafka.consumer") 10 | public class EventuateKafkaConsumerSpringConfigurationProperties { 11 | Map properties = new HashMap<>(); 12 | 13 | private BackPressureConfig backPressure = new BackPressureConfig(); 14 | private long pollTimeout = 100; 15 | 16 | public BackPressureConfig getBackPressure() { 17 | return backPressure; 18 | } 19 | 20 | public void setBackPressure(BackPressureConfig backPressure) { 21 | this.backPressure = backPressure; 22 | } 23 | 24 | public long getPollTimeout() { 25 | return pollTimeout; 26 | } 27 | 28 | public void setPollTimeout(long pollTimeout) { 29 | this.pollTimeout = pollTimeout; 30 | } 31 | 32 | public Map getProperties() { 33 | return properties; 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-producer/src/test/java/io/eventuate/messaging/kafka/spring/producer/EventuateKafkaProducerConfigurationSpringTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.spring.producer; 2 | 3 | import io.eventuate.messaging.kafka.producer.EventuateKafkaProducerConfigurationProperties; 4 | import org.junit.jupiter.api.Assertions; 5 | import org.junit.jupiter.api.Test; 6 | import org.springframework.beans.factory.annotation.Autowired; 7 | import org.springframework.boot.test.context.SpringBootTest; 8 | 9 | @SpringBootTest(classes = EventuateKafkaProducerSpringConfigurationPropertiesConfiguration.class) 10 | public class EventuateKafkaProducerConfigurationSpringTest { 11 | 12 | @Autowired 13 | private EventuateKafkaProducerConfigurationProperties eventuateKafkaProducerConfigurationProperties; 14 | 15 | @Test 16 | public void testPropertyParsing() { 17 | 18 | Assertions.assertEquals(2, eventuateKafkaProducerConfigurationProperties.getProperties().size()); 19 | 20 | Assertions.assertEquals("1000000", eventuateKafkaProducerConfigurationProperties.getProperties().get("buffer.memory")); 21 | 22 | Assertions.assertEquals("org.apache.kafka.common.serialization.ByteArraySerializer", 23 | eventuateKafkaProducerConfigurationProperties.getProperties().get("value.serializer")); 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-basic-consumer/src/main/java/io/eventuate/messaging/kafka/micronaut/basic/consumer/EventuateKafkaConsumerConfigurationPropertiesFactory.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.micronaut.basic.consumer; 2 | 3 | import io.eventuate.messaging.kafka.basic.consumer.EventuateKafkaConsumerConfigurationProperties; 4 | import io.micronaut.context.annotation.Factory; 5 | 6 | import javax.inject.Singleton; 7 | 8 | @Factory 9 | public class EventuateKafkaConsumerConfigurationPropertiesFactory { 10 | @Singleton 11 | public EventuateKafkaConsumerConfigurationProperties eventuateKafkaConsumerConfigurationProperties(EventuateKafkaConsumerMicronautConfigurationProperties eventuateKafkaConsumerMicronautConfigurationProperties) { 12 | EventuateKafkaConsumerConfigurationProperties eventuateKafkaConsumerConfigurationProperties = new EventuateKafkaConsumerConfigurationProperties(eventuateKafkaConsumerMicronautConfigurationProperties.getProperties()); 13 | eventuateKafkaConsumerConfigurationProperties.setBackPressure(eventuateKafkaConsumerMicronautConfigurationProperties.getBackPressure().toBackPressureConfig()); 14 | eventuateKafkaConsumerConfigurationProperties.setPollTimeout(eventuateKafkaConsumerMicronautConfigurationProperties.getPollTimeout()); 15 | return eventuateKafkaConsumerConfigurationProperties; 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-integration-test/build.gradle: -------------------------------------------------------------------------------- 1 | plugins { 2 | id "io.spring.dependency-management" version "1.1.7" 3 | } 4 | 5 | 6 | 7 | 8 | dependencyManagement { 9 | imports { 10 | mavenBom "io.micronaut:micronaut-bom:$micronautVersion" 11 | } 12 | } 13 | 14 | dependencies { 15 | implementation project (':eventuate-messaging-kafka-integration-test') 16 | 17 | implementation project (':eventuate-messaging-kafka-micronaut-common') 18 | implementation project (':eventuate-messaging-kafka-micronaut-basic-consumer') 19 | implementation project (':eventuate-messaging-kafka-micronaut-producer') 20 | 21 | annotationProcessor "io.micronaut:micronaut-inject-java" 22 | annotationProcessor "io.micronaut:micronaut-validation" 23 | annotationProcessor "io.micronaut.configuration:micronaut-openapi" 24 | implementation "io.micronaut:micronaut-inject" 25 | implementation "io.micronaut:micronaut-validation" 26 | implementation "io.micronaut:micronaut-runtime" 27 | 28 | testAnnotationProcessor "io.micronaut:micronaut-inject-java" 29 | testImplementation "org.junit.jupiter:junit-jupiter-api" 30 | testImplementation "io.micronaut.test:micronaut-test-junit5" 31 | testRuntimeOnly "org.junit.jupiter:junit-jupiter-engine" 32 | } 33 | 34 | test { 35 | useJUnitPlatform() 36 | } 37 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-basic-consumer/src/main/java/io/eventuate/messaging/kafka/spring/basic/consumer/EventuateKafkaConsumerSpringConfigurationPropertiesConfiguration.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.spring.basic.consumer; 2 | 3 | import io.eventuate.messaging.kafka.basic.consumer.EventuateKafkaConsumerConfigurationProperties; 4 | import org.springframework.boot.context.properties.EnableConfigurationProperties; 5 | import org.springframework.context.annotation.Bean; 6 | 7 | @EnableConfigurationProperties(EventuateKafkaConsumerSpringConfigurationProperties.class) 8 | public class EventuateKafkaConsumerSpringConfigurationPropertiesConfiguration { 9 | 10 | @Bean 11 | public EventuateKafkaConsumerConfigurationProperties eventuateKafkaConsumerConfigurationProperties(EventuateKafkaConsumerSpringConfigurationProperties eventuateKafkaConsumerSpringConfigurationProperties) { 12 | EventuateKafkaConsumerConfigurationProperties eventuateKafkaConsumerConfigurationProperties = new EventuateKafkaConsumerConfigurationProperties(eventuateKafkaConsumerSpringConfigurationProperties.getProperties()); 13 | eventuateKafkaConsumerConfigurationProperties.setBackPressure(eventuateKafkaConsumerSpringConfigurationProperties.getBackPressure()); 14 | eventuateKafkaConsumerConfigurationProperties.setPollTimeout(eventuateKafkaConsumerSpringConfigurationProperties.getPollTimeout()); 15 | return eventuateKafkaConsumerConfigurationProperties; 16 | 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/KafkaMessageConsumer.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import java.time.Duration; 4 | import java.util.Collection; 5 | import java.util.List; 6 | import java.util.Map; 7 | import java.util.Set; 8 | 9 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; 10 | import org.apache.kafka.clients.consumer.ConsumerRecords; 11 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 12 | import org.apache.kafka.common.PartitionInfo; 13 | import org.apache.kafka.common.TopicPartition; 14 | 15 | public interface KafkaMessageConsumer { 16 | 17 | void assign(Collection topicPartitions); 18 | 19 | void seekToEnd(Collection topicPartitions); 20 | 21 | long position(TopicPartition topicPartition); 22 | 23 | void seek(TopicPartition topicPartition, long position); 24 | 25 | void subscribe(List topics); 26 | 27 | void commitOffsets(Map offsets); 28 | 29 | List partitionsFor(String topic); 30 | 31 | ConsumerRecords poll(Duration duration); 32 | 33 | void pause(Set partitions); 34 | 35 | void resume(Set partitions); 36 | 37 | void close(); 38 | 39 | void close(Duration duration); 40 | 41 | void subscribe(Collection topics, ConsumerRebalanceListener callback); 42 | } 43 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/EventuateKafkaConsumerConfigurationProperties.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import java.util.HashMap; 4 | import java.util.Map; 5 | 6 | public class EventuateKafkaConsumerConfigurationProperties { 7 | private Map properties = new HashMap<>(); 8 | 9 | private BackPressureConfig backPressure = new BackPressureConfig(); 10 | private long pollTimeout; 11 | 12 | public BackPressureConfig getBackPressure() { 13 | return backPressure; 14 | } 15 | 16 | public void setBackPressure(BackPressureConfig backPressure) { 17 | this.backPressure = backPressure; 18 | } 19 | 20 | public long getPollTimeout() { 21 | return pollTimeout; 22 | } 23 | 24 | public void setPollTimeout(long pollTimeout) { 25 | this.pollTimeout = pollTimeout; 26 | } 27 | public Map getProperties() { 28 | return properties; 29 | } 30 | 31 | public EventuateKafkaConsumerConfigurationProperties() { 32 | } 33 | 34 | public EventuateKafkaConsumerConfigurationProperties(Map properties) { 35 | this.properties = properties; 36 | } 37 | 38 | public void setProperties(Map properties) { 39 | this.properties = properties; 40 | } 41 | 42 | public static EventuateKafkaConsumerConfigurationProperties empty() { 43 | return new EventuateKafkaConsumerConfigurationProperties(); 44 | } 45 | 46 | } 47 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-consumer/src/test/java/io/eventuate/messaging/kafka/consumer/MultipleSwimlanesPerTopicPartitionMappingTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.consumer; 2 | 3 | import org.apache.kafka.common.TopicPartition; 4 | import org.junit.jupiter.api.Test; 5 | 6 | import static org.assertj.core.api.Assertions.assertThat; 7 | 8 | public class MultipleSwimlanesPerTopicPartitionMappingTest { 9 | 10 | @Test 11 | public void shouldMap() { 12 | int swimlanesPerTopicPartition = 4; 13 | 14 | MultipleSwimlanesPerTopicPartitionMapping mapping = new MultipleSwimlanesPerTopicPartitionMapping(swimlanesPerTopicPartition); 15 | 16 | assertSwimlaneWithinRange(mapping, tp0(), "X", 0, swimlanesPerTopicPartition); 17 | assertSwimlaneWithinRange(mapping, tp0(), "Y", 0, swimlanesPerTopicPartition); 18 | assertSwimlaneWithinRange(mapping, tp1(), "Z", swimlanesPerTopicPartition, 2 * swimlanesPerTopicPartition); 19 | 20 | } 21 | 22 | private void assertSwimlaneWithinRange(MultipleSwimlanesPerTopicPartitionMapping mapping, TopicPartition topicPartition, String key, int minInclusive, int maxExclusive) { 23 | assertThat(mapping.toSwimlane(topicPartition, key)).isGreaterThanOrEqualTo(minInclusive).isLessThan(maxExclusive); 24 | } 25 | 26 | 27 | private TopicPartition tp1() { 28 | return new TopicPartition("x", 1); 29 | } 30 | 31 | private TopicPartition tp0() { 32 | return new TopicPartition("x", 0); 33 | } 34 | 35 | } -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/BackPressureManagerPausedState.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import org.apache.kafka.common.TopicPartition; 4 | 5 | import java.util.HashSet; 6 | import java.util.Set; 7 | 8 | public class BackPressureManagerPausedState implements BackPressureManagerState { 9 | 10 | private Set suspendedPartitions; 11 | 12 | public BackPressureManagerPausedState(Set pausedTopic) { 13 | this.suspendedPartitions = new HashSet<>(pausedTopic); 14 | } 15 | 16 | public static BackPressureManagerStateAndActions transitionTo(Set allTopicPartitions) { 17 | return new BackPressureManagerStateAndActions(BackPressureActions.pause(allTopicPartitions), new BackPressureManagerPausedState(allTopicPartitions)); 18 | } 19 | 20 | 21 | @Override 22 | public BackPressureManagerStateAndActions update(Set allTopicPartitions, int backlog, BackPressureConfig backPressureConfig) { 23 | if (backlog <= backPressureConfig.getLow()) { 24 | return BackPressureManagerNormalState.transitionTo(suspendedPartitions); 25 | } else { 26 | Set toSuspend = new HashSet<>(allTopicPartitions); 27 | toSuspend.removeAll(suspendedPartitions); 28 | suspendedPartitions.addAll(toSuspend); 29 | return new BackPressureManagerStateAndActions(BackPressureActions.pause(toSuspend), this); 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-consumer/src/main/java/io/eventuate/messaging/kafka/micronaut/consumer/MessageConsumerKafkaFactory.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.micronaut.consumer; 2 | 3 | import io.eventuate.messaging.kafka.basic.consumer.EventuateKafkaConsumerConfigurationProperties; 4 | import io.eventuate.messaging.kafka.basic.consumer.KafkaConsumerFactory; 5 | import io.eventuate.messaging.kafka.common.EventuateKafkaConfigurationProperties; 6 | import io.eventuate.messaging.kafka.consumer.MessageConsumerKafkaImpl; 7 | import io.eventuate.messaging.kafka.consumer.OriginalTopicPartitionToSwimlaneMapping; 8 | import io.eventuate.messaging.kafka.consumer.TopicPartitionToSwimlaneMapping; 9 | import io.micronaut.context.annotation.Factory; 10 | 11 | import javax.inject.Singleton; 12 | 13 | @Factory 14 | public class MessageConsumerKafkaFactory { 15 | private final TopicPartitionToSwimlaneMapping partitionToSwimLaneMapping = new OriginalTopicPartitionToSwimlaneMapping(); 16 | 17 | @Singleton 18 | public MessageConsumerKafkaImpl messageConsumerKafka(EventuateKafkaConfigurationProperties props, 19 | EventuateKafkaConsumerConfigurationProperties eventuateKafkaConsumerConfigurationProperties, 20 | KafkaConsumerFactory kafkaConsumerFactory) { 21 | return new MessageConsumerKafkaImpl(props.getBootstrapServers(), eventuateKafkaConsumerConfigurationProperties, kafkaConsumerFactory, partitionToSwimLaneMapping); 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-testcontainers/src/test/java/io/eventuate/messaging/kafka/testcontainers/EventuateKafkaClusterTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.testcontainers; 2 | 3 | import org.junit.jupiter.api.BeforeAll; 4 | import org.junit.jupiter.api.Test; 5 | import org.testcontainers.lifecycle.Startables; 6 | 7 | import java.util.HashMap; 8 | import java.util.Map; 9 | 10 | import static org.assertj.core.api.Assertions.assertThat; 11 | 12 | public class EventuateKafkaClusterTest { 13 | 14 | private static EventuateKafkaCluster eventuateKafkaCluster; 15 | 16 | @BeforeAll 17 | public static void startCluster() { 18 | eventuateKafkaCluster = new EventuateKafkaCluster("network-" + System.currentTimeMillis()) { 19 | @Override 20 | protected EventuateKafkaContainer makeEventuateKafkaContainer() { 21 | return EventuateKafkaContainer.makeFromDockerfile(zookeeper.getConnectionString()); 22 | } 23 | }.withReuse(false); 24 | 25 | Startables.deepStart(eventuateKafkaCluster.kafka).join(); 26 | } 27 | 28 | @Test 29 | public void shouldStartKafka() { 30 | 31 | } 32 | 33 | @Test 34 | public void shouldRegisterProperties() { 35 | Map properties = new HashMap<>(); 36 | 37 | eventuateKafkaCluster.registerProperties(properties::put); 38 | 39 | assertThat(properties).containsKeys("eventuatelocal.kafka.bootstrap.servers", 40 | "eventuatelocal.zookeeper.connection.string"); 41 | } 42 | 43 | } -------------------------------------------------------------------------------- /eventuate-messaging-kafka-common/src/main/java/io/eventuate/messaging/kafka/common/EventuateKafkaMultiMessage.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.common; 2 | 3 | import io.eventuate.messaging.kafka.common.sbe.MultiMessageEncoder; 4 | import org.apache.commons.lang3.builder.EqualsBuilder; 5 | import org.apache.commons.lang3.builder.HashCodeBuilder; 6 | 7 | import java.util.Collections; 8 | import java.util.List; 9 | import java.util.Objects; 10 | 11 | public class EventuateKafkaMultiMessage extends KeyValue { 12 | 13 | private List headers; 14 | 15 | public EventuateKafkaMultiMessage(String key, String value) { 16 | this(key, value, Collections.emptyList()); 17 | } 18 | 19 | public EventuateKafkaMultiMessage(String key, String value, List headers) { 20 | super(key, value); 21 | this.headers = headers; 22 | } 23 | 24 | 25 | public List getHeaders() { 26 | return headers; 27 | } 28 | 29 | @Override 30 | public boolean equals(Object o) { 31 | return EqualsBuilder.reflectionEquals(this, o); 32 | } 33 | 34 | @Override 35 | public int hashCode() { 36 | return HashCodeBuilder.reflectionHashCode(this); 37 | } 38 | 39 | @Override 40 | public int estimateSize() { 41 | int headerSize = MultiMessageEncoder.MessagesEncoder.HeadersEncoder.HEADER_SIZE; 42 | int messagesSize = KeyValue.estimateSize(headers); 43 | return super.estimateSize() + headerSize + messagesSize; 44 | } 45 | 46 | 47 | } 48 | -------------------------------------------------------------------------------- /.github/workflows/build-and-test.yaml: -------------------------------------------------------------------------------- 1 | name: Build, and test 2 | 3 | on: 4 | push: 5 | 6 | jobs: 7 | build-and-test: 8 | 9 | runs-on: ubuntu-latest 10 | 11 | steps: 12 | - name: Checkout source 13 | uses: actions/checkout@v2 14 | 15 | - uses: actions/setup-java@v3 16 | with: 17 | java-version: '11' 18 | java-package: jdk 19 | distribution: corretto 20 | 21 | - name: Setup Gradle 22 | uses: gradle/gradle-build-action@v2 23 | with: 24 | cache-read-only: false 25 | 26 | - name: Build 27 | run: ./gradlew :eventuate-messaging-kafka-testcontainers:clean :eventuate-messaging-kafka-testcontainers:test --tests "io.eventuate.messaging.kafka.testcontainers.EventuateKafkaNativeClusterTest" 28 | 29 | 30 | - name: Save test results 31 | if: ${{ always() }} 32 | uses: actions/upload-artifact@v2 33 | with: 34 | name: test-reports 35 | path: | 36 | **/build/reports 37 | **/build/test-results 38 | 39 | - name: get container logs 40 | run: ./.github/workflows/print-container-logs.sh 41 | if: ${{ always() }} 42 | 43 | - name: Save container logs 44 | if: ${{ always() }} 45 | uses: actions/upload-artifact@v2 46 | with: 47 | name: container-logs 48 | path: ~/container-logs 49 | 50 | - name: Save test results 51 | if: ${{ always() }} 52 | uses: actions/upload-artifact@v2 53 | with: 54 | name: test-reports 55 | path: | 56 | **/build/reports 57 | **/build/test-results 58 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-basic-consumer/src/test/java/io/eventuate/messaging/kafka/micronaut/basic/consumer/EventuateKafkaConsumerConfigurationMicronautTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.micronaut.basic.consumer; 2 | 3 | import io.eventuate.messaging.kafka.basic.consumer.EventuateKafkaConsumerConfigurationProperties; 4 | import io.micronaut.test.annotation.MicronautTest; 5 | import org.junit.Assert; 6 | import org.junit.jupiter.api.Test; 7 | 8 | import javax.inject.Inject; 9 | 10 | import static org.junit.Assert.assertEquals; 11 | 12 | @MicronautTest(propertySources = "application.properties") 13 | public class EventuateKafkaConsumerConfigurationMicronautTest { 14 | 15 | @Inject 16 | private EventuateKafkaConsumerConfigurationProperties eventuateKafkaConsumerConfigurationProperties; 17 | 18 | @Test 19 | public void testPropertyParsing() { 20 | 21 | Assert.assertEquals(2, eventuateKafkaConsumerConfigurationProperties.getProperties().size()); 22 | 23 | Assert.assertEquals("10000", eventuateKafkaConsumerConfigurationProperties.getProperties().get("session.timeout.ms")); 24 | 25 | Assert.assertEquals("org.apache.kafka.common.serialization.StringSerializer", 26 | eventuateKafkaConsumerConfigurationProperties.getProperties().get("key.serializer")); 27 | 28 | assertEquals(5, eventuateKafkaConsumerConfigurationProperties.getBackPressure().getLow()); 29 | assertEquals(100, eventuateKafkaConsumerConfigurationProperties.getBackPressure().getHigh()); 30 | assertEquals(200, eventuateKafkaConsumerConfigurationProperties.getPollTimeout()); 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-common/src/main/java/io/eventuate/messaging/kafka/common/EventuateKafkaMultiMessages.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.common; 2 | 3 | import org.apache.commons.lang3.builder.EqualsBuilder; 4 | import org.apache.commons.lang3.builder.ToStringBuilder; 5 | 6 | import java.util.Collections; 7 | import java.util.List; 8 | import java.util.Objects; 9 | 10 | public class EventuateKafkaMultiMessages { 11 | private List headers; 12 | private List messages; 13 | 14 | public EventuateKafkaMultiMessages(List messages) { 15 | this(Collections.emptyList(), messages); 16 | } 17 | 18 | public EventuateKafkaMultiMessages(List headers, List messages) { 19 | this.headers = headers; 20 | this.messages = messages; 21 | } 22 | 23 | public List getHeaders() { 24 | return headers; 25 | } 26 | 27 | public List getMessages() { 28 | return messages; 29 | } 30 | 31 | public int estimateSize() { 32 | return KeyValue.estimateSize(headers) + KeyValue.estimateSize(messages); 33 | } 34 | 35 | @Override 36 | public String toString() { 37 | return ToStringBuilder.reflectionToString(this); 38 | } 39 | 40 | @Override 41 | public boolean equals(Object o) { 42 | return EqualsBuilder.reflectionEquals(this, o); 43 | } 44 | 45 | @Override 46 | public int hashCode() { 47 | return Objects.hash(headers, messages); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-basic-consumer/src/test/java/io/eventuate/messaging/kafka/spring/basic/consumer/EventuateKafkaConsumerConfigurationSpringTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.spring.basic.consumer; 2 | 3 | import io.eventuate.messaging.kafka.basic.consumer.EventuateKafkaConsumerConfigurationProperties; 4 | import org.junit.jupiter.api.Test; 5 | import org.springframework.beans.factory.annotation.Autowired; 6 | import org.springframework.boot.test.context.SpringBootTest; 7 | 8 | import static org.junit.jupiter.api.Assertions.assertEquals; 9 | 10 | @SpringBootTest(classes = EventuateKafkaConsumerSpringConfigurationPropertiesConfiguration.class) 11 | public class EventuateKafkaConsumerConfigurationSpringTest { 12 | 13 | @Autowired 14 | private EventuateKafkaConsumerConfigurationProperties eventuateKafkaConsumerConfigurationProperties; 15 | 16 | @Test 17 | public void testPropertyParsing() { 18 | 19 | assertEquals(2, eventuateKafkaConsumerConfigurationProperties.getProperties().size()); 20 | 21 | assertEquals("10000", eventuateKafkaConsumerConfigurationProperties.getProperties().get("session.timeout.ms")); 22 | 23 | assertEquals("org.apache.kafka.common.serialization.StringSerializer", 24 | eventuateKafkaConsumerConfigurationProperties.getProperties().get("key.serializer")); 25 | 26 | assertEquals(5, eventuateKafkaConsumerConfigurationProperties.getBackPressure().getLow()); 27 | assertEquals(100, eventuateKafkaConsumerConfigurationProperties.getBackPressure().getHigh()); 28 | assertEquals(200, eventuateKafkaConsumerConfigurationProperties.getPollTimeout()); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-common/src/main/java/io/eventuate/messaging/kafka/common/KeyValue.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.common; 2 | 3 | import org.apache.commons.lang3.builder.EqualsBuilder; 4 | import org.apache.commons.lang3.builder.ToStringBuilder; 5 | 6 | import java.util.Collection; 7 | import java.util.Objects; 8 | 9 | public class KeyValue { 10 | public static final int ESTIMATED_BYTES_PER_CHAR = 3; 11 | public static final int KEY_HEADER_SIZE = 4; 12 | public static final int VALUE_HEADER_SIZE = 4; 13 | 14 | private String key; 15 | private String value; 16 | 17 | public KeyValue(String key, String value) { 18 | this.key = key; 19 | this.value = value; 20 | } 21 | 22 | public String getKey() { 23 | return key; 24 | } 25 | 26 | public String getValue() { 27 | return value; 28 | } 29 | 30 | public int estimateSize() { 31 | int keyLength = estimatedStringSizeInBytes(key); 32 | int valueLength = estimatedStringSizeInBytes(value); 33 | return KEY_HEADER_SIZE + keyLength + VALUE_HEADER_SIZE + valueLength; 34 | } 35 | 36 | public static int estimateSize(Collection kvs) { 37 | return kvs.stream().mapToInt(KeyValue::estimateSize).sum(); 38 | } 39 | 40 | private int estimatedStringSizeInBytes(String s) { 41 | return s == null ? 0 : s.length() * ESTIMATED_BYTES_PER_CHAR; 42 | } 43 | 44 | @Override 45 | public String toString() { 46 | return ToStringBuilder.reflectionToString(this); 47 | } 48 | 49 | @Override 50 | public boolean equals(Object o) { 51 | return EqualsBuilder.reflectionEquals(this, o); 52 | } 53 | 54 | @Override 55 | public int hashCode() { 56 | return Objects.hash(key, value); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-testcontainers/src/main/java/io/eventuate/messaging/kafka/testcontainers/EventuateKafkaCluster.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.testcontainers; 2 | 3 | import io.eventuate.common.testcontainers.EventuateZookeeperContainer; 4 | import io.eventuate.common.testcontainers.ReusableNetworkFactory; 5 | import org.testcontainers.containers.Network; 6 | 7 | import java.util.function.BiConsumer; 8 | import java.util.function.Supplier; 9 | 10 | public class EventuateKafkaCluster { 11 | 12 | public final Network network; 13 | 14 | public final EventuateZookeeperContainer zookeeper; 15 | 16 | public final EventuateKafkaContainer kafka; 17 | 18 | public EventuateKafkaCluster() { 19 | this("foofoo"); 20 | } 21 | 22 | public EventuateKafkaCluster(String networkName) { 23 | network = ReusableNetworkFactory.createNetwork(networkName); 24 | zookeeper = new EventuateZookeeperContainer().withReuse(true) 25 | .withNetwork(network) 26 | .withNetworkAliases("zookeeper"); 27 | kafka = makeEventuateKafkaContainer() 28 | .dependsOn(zookeeper) 29 | .withNetwork(network) 30 | .withNetworkAliases("kafka") 31 | .withReuse(true); 32 | } 33 | 34 | protected EventuateKafkaContainer makeEventuateKafkaContainer() { 35 | return new EventuateKafkaContainer("zookeeper:2181"); 36 | } 37 | 38 | 39 | public void registerProperties(BiConsumer> registry) { 40 | zookeeper.registerProperties(registry); 41 | kafka.registerProperties(registry); 42 | } 43 | 44 | public EventuateKafkaCluster withReuse(boolean reuse) { 45 | zookeeper.withReuse(reuse); 46 | kafka.withReuse(reuse); 47 | return this; 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-producer/src/main/java/io/eventuate/messaging/kafka/producer/EventuateKafkaPartitioner.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.producer; 2 | 3 | import org.apache.kafka.common.PartitionInfo; 4 | import org.apache.kafka.common.utils.Utils; 5 | 6 | import java.util.List; 7 | import java.util.concurrent.ConcurrentHashMap; 8 | import java.util.concurrent.ConcurrentMap; 9 | import java.util.concurrent.ThreadLocalRandom; 10 | import java.util.concurrent.atomic.AtomicInteger; 11 | import java.util.stream.Collectors; 12 | 13 | public class EventuateKafkaPartitioner { 14 | 15 | private final ConcurrentMap topicCounterMap = new ConcurrentHashMap<>(); 16 | 17 | public int partition(String topic, byte[] keyBytes, List partitions) { 18 | int numPartitions = partitions.size(); 19 | if (keyBytes == null) { 20 | int nextValue = nextValue(topic); 21 | List availablePartitions = partitions.stream().filter(p -> p.leader() != null).collect(Collectors.toList()); 22 | if (availablePartitions.size() > 0) { 23 | int part = Utils.toPositive(nextValue) % availablePartitions.size(); 24 | return availablePartitions.get(part).partition(); 25 | } else { 26 | return Utils.toPositive(nextValue) % numPartitions; 27 | } 28 | } else { 29 | return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions; 30 | } 31 | } 32 | 33 | private int nextValue(String topic) { 34 | AtomicInteger counter = topicCounterMap.get(topic); 35 | if (null == counter) { 36 | counter = new AtomicInteger(ThreadLocalRandom.current().nextInt()); 37 | AtomicInteger currentCounter = topicCounterMap.putIfAbsent(topic, counter); 38 | if (currentCounter != null) { 39 | counter = currentCounter; 40 | } 41 | } 42 | return counter.getAndIncrement(); 43 | } 44 | } -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/TopicPartitionOffsets.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import org.apache.commons.lang3.builder.ToStringBuilder; 4 | 5 | import java.util.*; 6 | import java.util.stream.Collectors; 7 | 8 | /** 9 | * Tracks the offsets for a TopicPartition that are being processed or have been processed 10 | */ 11 | public class TopicPartitionOffsets { 12 | 13 | /** 14 | * offsets that are being processed 15 | */ 16 | private SortedSet unprocessed = new TreeSet<>(); 17 | 18 | /** 19 | * offsets that have been processed 20 | */ 21 | 22 | private Set processed = new HashSet<>(); 23 | 24 | @Override 25 | public String toString() { 26 | return new ToStringBuilder(this) 27 | .append("unprocessed", unprocessed) 28 | .append("processed", processed) 29 | .toString(); 30 | } 31 | 32 | public void noteUnprocessed(long offset) { 33 | unprocessed.add(offset); 34 | } 35 | 36 | public void noteProcessed(long offset) { 37 | processed.add(offset); 38 | } 39 | 40 | /** 41 | * @return large of all offsets that have been processed and can be committed 42 | */ 43 | Optional offsetToCommit() { 44 | Long result = null; 45 | for (long x : unprocessed) { 46 | if (processed.contains(x)) 47 | result = x; 48 | else 49 | break; 50 | } 51 | return Optional.ofNullable(result); 52 | } 53 | 54 | public void noteOffsetCommitted(long offset) { 55 | unprocessed = new TreeSet<>(unprocessed.stream().filter(x -> x > offset).collect(Collectors.toList())); 56 | processed = processed.stream().filter(x -> x > offset).collect(Collectors.toSet()); 57 | } 58 | 59 | public Set getPending() { 60 | Set result = new HashSet<>(unprocessed); 61 | result.removeAll(processed); 62 | return result; 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/test/java/io/eventuate/messaging/kafka/basic/consumer/OffsetTrackerTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 4 | import org.apache.kafka.common.TopicPartition; 5 | import org.junit.jupiter.api.Test; 6 | 7 | import java.util.Map; 8 | 9 | import static java.util.Collections.singletonMap; 10 | import static org.assertj.core.api.Assertions.assertThat; 11 | import static org.junit.jupiter.api.Assertions.assertEquals; 12 | 13 | public class OffsetTrackerTest { 14 | 15 | @Test 16 | public void shouldTrackOffsets() { 17 | 18 | TopicPartition tp = new TopicPartition("x", 0); 19 | 20 | OffsetTracker offsetTracker = new OffsetTracker(); 21 | 22 | offsetTracker.noteUnprocessed(tp, 1); 23 | offsetTracker.noteUnprocessed(tp, 2); 24 | offsetTracker.noteUnprocessed(tp, 3); 25 | 26 | offsetTracker.noteProcessed(tp, 2); 27 | 28 | assertThat(offsetTracker.offsetsToCommit()).isEmpty(); 29 | 30 | offsetTracker.noteProcessed(tp, 1); 31 | 32 | Map otc2 = offsetTracker.offsetsToCommit(); 33 | 34 | assertEquals(makeExpectedOffsets(tp, 2), otc2); 35 | 36 | offsetTracker.noteOffsetsCommitted(otc2); 37 | 38 | assertThat(offsetTracker.offsetsToCommit()).isEmpty(); 39 | 40 | offsetTracker.noteProcessed(tp, 3); 41 | 42 | Map otc3 = offsetTracker.offsetsToCommit(); 43 | assertEquals(makeExpectedOffsets(tp, 3), otc3); 44 | 45 | offsetTracker.noteOffsetsCommitted(otc3); 46 | 47 | assertThat(offsetTracker.offsetsToCommit()).isEmpty(); 48 | } 49 | 50 | private Map makeExpectedOffsets(TopicPartition tp, int offset) { 51 | return singletonMap(tp, new OffsetAndMetadata(offset + 1, "")); 52 | } 53 | 54 | } -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-integration-test/src/test/java/io/eventuate/messaging/kafka/basic/consumer/EventuateKafkaProducerConsumerFactory.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import io.eventuate.messaging.kafka.common.EventuateKafkaConfigurationProperties; 4 | import io.eventuate.messaging.kafka.consumer.MessageConsumerKafkaImpl; 5 | import io.eventuate.messaging.kafka.consumer.OriginalTopicPartitionToSwimlaneMapping; 6 | import io.eventuate.messaging.kafka.consumer.TopicPartitionToSwimlaneMapping; 7 | import io.eventuate.messaging.kafka.producer.EventuateKafkaProducer; 8 | import io.eventuate.messaging.kafka.producer.EventuateKafkaProducerConfigurationProperties; 9 | import io.micronaut.context.annotation.Factory; 10 | 11 | import javax.inject.Singleton; 12 | 13 | @Factory 14 | public class EventuateKafkaProducerConsumerFactory { 15 | private final TopicPartitionToSwimlaneMapping partitionToSwimLaneMapping = new OriginalTopicPartitionToSwimlaneMapping(); 16 | 17 | @Singleton 18 | public EventuateKafkaProducer producer(EventuateKafkaConfigurationProperties kafkaProperties, EventuateKafkaProducerConfigurationProperties producerProperties) { 19 | return new EventuateKafkaProducer(kafkaProperties.getBootstrapServers(), producerProperties); 20 | } 21 | 22 | @Singleton 23 | public MessageConsumerKafkaImpl messageConsumerKafka(EventuateKafkaConfigurationProperties props, 24 | EventuateKafkaConsumerConfigurationProperties eventuateKafkaConsumerConfigurationProperties, 25 | KafkaConsumerFactory kafkaConsumerFactory) { 26 | return new MessageConsumerKafkaImpl(props.getBootstrapServers(), eventuateKafkaConsumerConfigurationProperties, kafkaConsumerFactory, partitionToSwimLaneMapping); 27 | } 28 | 29 | @Singleton 30 | public KafkaConsumerFactory kafkaConsumerFactory() { 31 | return new DefaultKafkaConsumerFactory(); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-testcontainers/src/main/java/io/eventuate/messaging/kafka/testcontainers/EventuateKafkaNativeContainer.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.testcontainers; 2 | 3 | 4 | import io.eventuate.common.testcontainers.PropertyProvidingContainer; 5 | import org.jetbrains.annotations.NotNull; 6 | import org.testcontainers.kafka.KafkaContainer; 7 | 8 | import java.util.function.BiConsumer; 9 | import java.util.function.Supplier; 10 | 11 | public class EventuateKafkaNativeContainer extends KafkaContainer implements PropertyProvidingContainer { 12 | 13 | private String firstNetworkAlias; 14 | 15 | public EventuateKafkaNativeContainer() { 16 | super("apache/kafka-native:3.8.0"); 17 | } 18 | 19 | public EventuateKafkaNativeContainer(String imageName) { 20 | super(imageName); 21 | } 22 | 23 | @Override 24 | protected void configure() { 25 | String controllerQuorumVoters = "%s@%s:9094".formatted(getEnvMap().get("KAFKA_NODE_ID"), "localhost"); 26 | withEnv("KAFKA_CONTROLLER_QUORUM_VOTERS", controllerQuorumVoters); 27 | } 28 | 29 | @Override 30 | public EventuateKafkaNativeContainer withNetworkAliases(String... aliases) { 31 | super.withNetworkAliases(aliases); 32 | this.firstNetworkAlias = aliases[0]; 33 | return this; 34 | } 35 | 36 | @Override 37 | public EventuateKafkaNativeContainer withReuse(boolean reusable) { 38 | return (EventuateKafkaNativeContainer) super.withReuse(reusable); 39 | } 40 | 41 | @NotNull 42 | public String getBootstrapServersForContainer() { 43 | if (firstNetworkAlias == null) 44 | throw new IllegalStateException("First network alias not set"); 45 | return firstNetworkAlias + ":9093"; 46 | } 47 | 48 | @Override 49 | public void registerProperties(BiConsumer> registry) { 50 | registry.accept("eventuatelocal.kafka.bootstrap.servers", this::getBootstrapServers); 51 | } 52 | 53 | } 54 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-consumer/src/main/java/io/eventuate/messaging/kafka/spring/consumer/MessageConsumerKafkaConfiguration.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.spring.consumer; 2 | 3 | import io.eventuate.messaging.kafka.consumer.OriginalTopicPartitionToSwimlaneMapping; 4 | import io.eventuate.messaging.kafka.consumer.TopicPartitionToSwimlaneMapping; 5 | import org.springframework.beans.factory.annotation.Autowired; 6 | import org.springframework.context.annotation.Bean; 7 | import org.springframework.context.annotation.Configuration; 8 | import org.springframework.context.annotation.Import; 9 | import io.eventuate.messaging.kafka.basic.consumer.EventuateKafkaConsumerConfigurationProperties; 10 | import io.eventuate.messaging.kafka.basic.consumer.KafkaConsumerFactory; 11 | import io.eventuate.messaging.kafka.common.EventuateKafkaConfigurationProperties; 12 | import io.eventuate.messaging.kafka.consumer.MessageConsumerKafkaImpl; 13 | import io.eventuate.messaging.kafka.spring.basic.consumer.EventuateKafkaConsumerSpringConfigurationPropertiesConfiguration; 14 | import io.eventuate.messaging.kafka.spring.common.EventuateKafkaPropertiesConfiguration; 15 | 16 | @Configuration 17 | @Import({EventuateKafkaPropertiesConfiguration.class, EventuateKafkaConsumerSpringConfigurationPropertiesConfiguration.class}) 18 | public class MessageConsumerKafkaConfiguration { 19 | @Autowired(required=false) 20 | private TopicPartitionToSwimlaneMapping partitionToSwimLaneMapping = new OriginalTopicPartitionToSwimlaneMapping(); 21 | 22 | @Bean 23 | public MessageConsumerKafkaImpl messageConsumerKafka(EventuateKafkaConfigurationProperties props, 24 | EventuateKafkaConsumerConfigurationProperties eventuateKafkaConsumerConfigurationProperties, 25 | KafkaConsumerFactory kafkaConsumerFactory) { 26 | return new MessageConsumerKafkaImpl(props.getBootstrapServers(), eventuateKafkaConsumerConfigurationProperties, kafkaConsumerFactory, partitionToSwimLaneMapping); 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-basic-consumer/src/main/java/io/eventuate/messaging/kafka/micronaut/basic/consumer/EventuateKafkaConsumerMicronautConfigurationProperties.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.micronaut.basic.consumer; 2 | 3 | import io.eventuate.messaging.kafka.basic.consumer.BackPressureConfig; 4 | import io.micronaut.context.annotation.ConfigurationProperties; 5 | 6 | import java.util.HashMap; 7 | import java.util.Map; 8 | import java.util.stream.Collectors; 9 | 10 | @ConfigurationProperties("eventuate.local.kafka.consumer") 11 | public class EventuateKafkaConsumerMicronautConfigurationProperties { 12 | Map properties = new HashMap<>(); 13 | 14 | private BackPressureProperties backPressure = new BackPressureProperties(); 15 | private long pollTimeout = 100; 16 | 17 | public BackPressureProperties getBackPressure() { 18 | return backPressure; 19 | } 20 | 21 | public void setBackPressure(BackPressureProperties backPressure) { 22 | this.backPressure = backPressure; 23 | } 24 | 25 | public long getPollTimeout() { 26 | return pollTimeout; 27 | } 28 | 29 | public void setPollTimeout(long pollTimeout) { 30 | this.pollTimeout = pollTimeout; 31 | } 32 | public Map getProperties() { 33 | return properties 34 | .entrySet() 35 | .stream() 36 | .collect(Collectors.toMap(o -> o.getKey().replace("-", "."), Map.Entry::getValue)); 37 | } 38 | 39 | @ConfigurationProperties("backPressure") 40 | public static class BackPressureProperties { 41 | private int low = 0; 42 | private int high = Integer.MAX_VALUE; 43 | 44 | public int getLow() { 45 | return low; 46 | } 47 | 48 | public void setLow(int low) { 49 | this.low = low; 50 | } 51 | 52 | public int getHigh() { 53 | return high; 54 | } 55 | 56 | public void setHigh(int high) { 57 | this.high = high; 58 | } 59 | 60 | public BackPressureConfig toBackPressureConfig() { 61 | return new BackPressureConfig(low, high); 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/OffsetTracker.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import org.apache.commons.lang3.builder.ToStringBuilder; 4 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 5 | import org.apache.kafka.common.TopicPartition; 6 | 7 | import java.util.HashMap; 8 | import java.util.Map; 9 | import java.util.Set; 10 | 11 | /** 12 | * Keeps track of message offsets that are (a) being processed and (b) have been processed and can be committed 13 | */ 14 | public class OffsetTracker { 15 | 16 | private Map state = new HashMap<>(); 17 | 18 | @Override 19 | public String toString() { 20 | return new ToStringBuilder(this) 21 | .append("state", state) 22 | .toString(); 23 | } 24 | 25 | private TopicPartitionOffsets fetch(TopicPartition topicPartition) { 26 | TopicPartitionOffsets tpo = state.get(topicPartition); 27 | if (tpo == null) { 28 | tpo = new TopicPartitionOffsets(); 29 | state.put(topicPartition, tpo); 30 | } 31 | return tpo; 32 | } 33 | void noteUnprocessed(TopicPartition topicPartition, long offset) { 34 | fetch(topicPartition).noteUnprocessed(offset); 35 | } 36 | 37 | void noteProcessed(TopicPartition topicPartition, long offset) { 38 | fetch(topicPartition).noteProcessed(offset); 39 | } 40 | 41 | private final int OFFSET_ADJUSTMENT = 1; 42 | public Map offsetsToCommit() { 43 | Map result = new HashMap<>(); 44 | state.forEach((tp, tpo) -> tpo.offsetToCommit().ifPresent(offset -> result.put(tp, new OffsetAndMetadata(offset + OFFSET_ADJUSTMENT, "")))); 45 | return result; 46 | } 47 | 48 | public void noteOffsetsCommitted(Map offsetsToCommit) { 49 | offsetsToCommit.forEach((tp, om) -> { 50 | fetch(tp).noteOffsetCommitted(om.offset() - OFFSET_ADJUSTMENT); 51 | }); 52 | } 53 | 54 | public Map> getPending() { 55 | Map> result = new HashMap<>(); 56 | state.forEach((tp, tpo) -> { 57 | Set pending = tpo.getPending(); 58 | if (!pending.isEmpty()) 59 | result.put(tp, pending); 60 | }); 61 | return result; 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-consumer/src/main/java/io/eventuate/messaging/kafka/consumer/SwimlaneBasedDispatcher.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.consumer; 2 | 3 | import org.apache.kafka.common.TopicPartition; 4 | import org.slf4j.Logger; 5 | import org.slf4j.LoggerFactory; 6 | 7 | import java.util.concurrent.ConcurrentHashMap; 8 | import java.util.concurrent.Executor; 9 | import java.util.function.Consumer; 10 | 11 | public class SwimlaneBasedDispatcher { 12 | 13 | private static final Logger logger = LoggerFactory.getLogger(SwimlaneBasedDispatcher.class); 14 | 15 | private final ConcurrentHashMap map = new ConcurrentHashMap<>(); 16 | private final Executor executor; 17 | private final String subscriberId; 18 | 19 | private final TopicPartitionToSwimlaneMapping partitionToSwimLaneMapping; 20 | 21 | public SwimlaneBasedDispatcher(String subscriberId, Executor executor, TopicPartitionToSwimlaneMapping partitionToSwimLaneMapping) { 22 | this.subscriberId = subscriberId; 23 | this.executor = executor; 24 | this.partitionToSwimLaneMapping = partitionToSwimLaneMapping; 25 | } 26 | 27 | public SwimlaneDispatcherBacklog dispatch(RawKafkaMessage message, TopicPartition topicPartition, Consumer target) { 28 | Integer swimlane = partitionToSwimLaneMapping.toSwimlane(topicPartition, message.getMessageKey()); 29 | logger.trace("Dispatching to swimlane {} for {}", swimlane, message); 30 | SwimlaneDispatcher swimlaneDispatcher = getOrCreate(swimlane); 31 | return swimlaneDispatcher.dispatch(message, target); 32 | } 33 | 34 | private SwimlaneDispatcher getOrCreate(Integer swimlane) { 35 | SwimlaneDispatcher swimlaneDispatcher = map.get(swimlane); 36 | if (swimlaneDispatcher == null) { 37 | logger.trace("No dispatcher for {} {}. Attempting to create", subscriberId, swimlane); 38 | swimlaneDispatcher = new SwimlaneDispatcher(subscriberId, swimlane, executor); 39 | SwimlaneDispatcher r = map.putIfAbsent(swimlane, swimlaneDispatcher); 40 | if (r != null) { 41 | logger.trace("Using concurrently created SwimlaneDispatcher for {} {}", subscriberId, swimlane); 42 | swimlaneDispatcher = r; 43 | } else { 44 | logger.trace("Using newly created SwimlaneDispatcher for {} {}", subscriberId, swimlane); 45 | } 46 | } 47 | return swimlaneDispatcher; 48 | } 49 | } 50 | 51 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-micronaut-integration-test/src/test/java/io/eventuate/messaging/kafka/basic/consumer/EventuateKafkaBasicConsumerMicronautTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import io.eventuate.messaging.kafka.common.EventuateKafkaConfigurationProperties; 4 | import io.eventuate.messaging.kafka.consumer.MessageConsumerKafkaImpl; 5 | import io.eventuate.messaging.kafka.producer.EventuateKafkaProducer; 6 | import io.micronaut.context.annotation.Property; 7 | import io.micronaut.test.annotation.MicronautTest; 8 | import org.junit.jupiter.api.Test; 9 | 10 | import javax.inject.Inject; 11 | 12 | @MicronautTest 13 | @Property(name = "eventuate.local.kafka.consumer.backPressure.high", value = "3") 14 | public class EventuateKafkaBasicConsumerMicronautTest extends AbstractEventuateKafkaBasicConsumerTest { 15 | 16 | @Inject 17 | private EventuateKafkaConfigurationProperties kafkaProperties; 18 | 19 | @Inject 20 | private EventuateKafkaConsumerConfigurationProperties consumerProperties; 21 | 22 | @Inject 23 | private EventuateKafkaProducer producer; 24 | 25 | @Inject 26 | private MessageConsumerKafkaImpl consumer; 27 | 28 | @Inject 29 | private KafkaConsumerFactory kafkaConsumerFactory; 30 | 31 | @Test 32 | @Override 33 | public void shouldStopWhenHandlerThrowsException() { 34 | super.shouldStopWhenHandlerThrowsException(); 35 | } 36 | 37 | @Test 38 | @Override 39 | public void shouldConsumeMessages() { 40 | super.shouldConsumeMessages(); 41 | } 42 | 43 | @Test 44 | @Override 45 | public void shouldConsumeMessagesWithBackPressure() { 46 | super.shouldConsumeMessagesWithBackPressure(); 47 | } 48 | 49 | @Test 50 | @Override 51 | public void shouldConsumeBatchOfMessage() { 52 | super.shouldConsumeBatchOfMessage(); 53 | } 54 | 55 | @Override 56 | protected EventuateKafkaConfigurationProperties getKafkaProperties() { 57 | return kafkaProperties; 58 | } 59 | 60 | @Override 61 | protected EventuateKafkaConsumerConfigurationProperties getConsumerProperties() { 62 | return consumerProperties; 63 | } 64 | 65 | @Override 66 | protected EventuateKafkaProducer getProducer() { 67 | return producer; 68 | } 69 | 70 | @Override 71 | protected MessageConsumerKafkaImpl getConsumer() { 72 | return consumer; 73 | } 74 | 75 | @Override 76 | protected KafkaConsumerFactory getKafkaConsumerFactory() { 77 | return kafkaConsumerFactory; 78 | } 79 | } -------------------------------------------------------------------------------- /eventuate-messaging-kafka-testcontainers/src/test/java/io/eventuate/messaging/kafka/testcontainers/EventuateKafkaNativeClusterTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.testcontainers; 2 | 3 | import io.eventuate.common.testcontainers.PropertyProvidingContainer; 4 | import io.eventuate.messaging.kafka.consumer.KafkaSubscription; 5 | import io.eventuate.messaging.kafka.consumer.MessageConsumerKafkaImpl; 6 | import io.eventuate.messaging.kafka.spring.consumer.KafkaConsumerFactoryConfiguration; 7 | import io.eventuate.messaging.kafka.spring.consumer.MessageConsumerKafkaConfiguration; 8 | import org.junit.jupiter.api.Test; 9 | import org.slf4j.Logger; 10 | import org.slf4j.LoggerFactory; 11 | import org.springframework.beans.factory.annotation.Autowired; 12 | import org.springframework.boot.test.context.SpringBootTest; 13 | import org.springframework.context.annotation.Configuration; 14 | import org.springframework.context.annotation.Import; 15 | import org.springframework.test.context.DynamicPropertyRegistry; 16 | import org.springframework.test.context.DynamicPropertySource; 17 | import org.testcontainers.containers.output.Slf4jLogConsumer; 18 | 19 | import java.util.Collections; 20 | import java.util.UUID; 21 | 22 | @SpringBootTest 23 | public class EventuateKafkaNativeClusterTest { 24 | 25 | private final static Logger logger = LoggerFactory.getLogger(EventuateKafkaNativeClusterTest.class); 26 | 27 | private static final EventuateKafkaNativeCluster kafkaCluster = 28 | new EventuateKafkaNativeCluster("network-" + uuid()); 29 | 30 | @DynamicPropertySource 31 | static void registerContainerProperties(DynamicPropertyRegistry registry) { 32 | kafkaCluster.kafka.withLogConsumer(new Slf4jLogConsumer(logger).withPrefix("kafka:")); 33 | 34 | PropertyProvidingContainer.startAndProvideProperties(registry, kafkaCluster.kafka); 35 | } 36 | 37 | @Configuration 38 | @Import({MessageConsumerKafkaConfiguration.class, KafkaConsumerFactoryConfiguration.class}) 39 | public static class Config { 40 | } 41 | 42 | @Autowired 43 | private MessageConsumerKafkaImpl messageConsumerKafka; 44 | 45 | @Test 46 | public void shouldSubscribeToKafka() { 47 | 48 | KafkaSubscription s = messageConsumerKafka.subscribe(uuid(), 49 | Collections.singleton(uuid()), 50 | m -> {}); 51 | 52 | } 53 | 54 | private static String uuid() { 55 | return UUID.randomUUID().toString(); 56 | } 57 | 58 | } -------------------------------------------------------------------------------- /eventuate-messaging-kafka-common/src/main/resources/sbe-schema.xml: -------------------------------------------------------------------------------- 1 | 2 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-consumer/src/test/java/io/eventuate/messaging/kafka/consumer/SwimlaneDispatcherTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.consumer; 2 | 3 | import io.eventuate.messaging.kafka.common.EventuateBinaryMessageEncoding; 4 | import io.eventuate.util.test.async.Eventually; 5 | import org.junit.jupiter.api.Assertions; 6 | import org.junit.jupiter.api.BeforeEach; 7 | import org.junit.jupiter.api.Test; 8 | 9 | import java.util.concurrent.Executors; 10 | import java.util.concurrent.atomic.AtomicInteger; 11 | import java.util.function.Consumer; 12 | 13 | public class SwimlaneDispatcherTest { 14 | 15 | private SwimlaneDispatcher swimlaneDispatcher; 16 | private AtomicInteger numberOfMessagesReceived; 17 | private Consumer handler; 18 | 19 | @BeforeEach 20 | public void init() { 21 | swimlaneDispatcher = new SwimlaneDispatcher("1", 1, Executors.newCachedThreadPool()); 22 | numberOfMessagesReceived = new AtomicInteger(0); 23 | } 24 | 25 | @Test 26 | public void shouldDispatchManyMessages() { 27 | int numberOfMessagesToSend = 5; 28 | 29 | createHandler(); 30 | 31 | sendMessages(numberOfMessagesToSend); 32 | assertMessageReceived(numberOfMessagesToSend); 33 | } 34 | 35 | @Test 36 | public void testShouldRestart() { 37 | int numberOfMessagesToSend = 5; 38 | 39 | createHandler(); 40 | 41 | sendMessages(numberOfMessagesToSend); 42 | assertDispatcherStopped(); 43 | sendMessages(numberOfMessagesToSend); 44 | assertMessageReceived(numberOfMessagesToSend * 2); 45 | } 46 | 47 | private void createHandler() { 48 | handler = msg -> { 49 | numberOfMessagesReceived.incrementAndGet(); 50 | try { 51 | Thread.sleep(50); 52 | } catch (InterruptedException e) { 53 | throw new RuntimeException(e); 54 | } 55 | }; 56 | } 57 | 58 | private void sendMessages(int numberOfMessagesToSend) { 59 | for (int i = 0; i < numberOfMessagesToSend; i++) { 60 | if (i > 0) { 61 | Assertions.assertTrue(swimlaneDispatcher.getRunning()); 62 | } 63 | swimlaneDispatcher.dispatch(new RawKafkaMessage("", EventuateBinaryMessageEncoding.stringToBytes("")), handler); 64 | } 65 | } 66 | 67 | private void assertMessageReceived(int numberOfMessagesToSend) { 68 | Eventually.eventually(() -> Assertions.assertEquals(numberOfMessagesToSend, numberOfMessagesReceived.get())); 69 | } 70 | 71 | private void assertDispatcherStopped() { 72 | Eventually.eventually(() -> Assertions.assertFalse(swimlaneDispatcher.getRunning())); 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-testcontainers/src/test/java/io/eventuate/messaging/kafka/testcontainers/EventuateKafkaContainerTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.testcontainers; 2 | 3 | import io.eventuate.common.testcontainers.EventuateZookeeperContainer; 4 | import io.eventuate.common.testcontainers.PropertyProvidingContainer; 5 | import io.eventuate.messaging.kafka.consumer.KafkaSubscription; 6 | import io.eventuate.messaging.kafka.consumer.MessageConsumerKafkaImpl; 7 | import io.eventuate.messaging.kafka.spring.consumer.KafkaConsumerFactoryConfiguration; 8 | import io.eventuate.messaging.kafka.spring.consumer.MessageConsumerKafkaConfiguration; 9 | import org.junit.jupiter.api.Test; 10 | import org.springframework.beans.factory.annotation.Autowired; 11 | import org.springframework.boot.test.context.SpringBootTest; 12 | import org.springframework.context.annotation.Configuration; 13 | import org.springframework.context.annotation.Import; 14 | import org.springframework.test.context.DynamicPropertyRegistry; 15 | import org.springframework.test.context.DynamicPropertySource; 16 | import org.testcontainers.containers.Network; 17 | 18 | import java.util.Collections; 19 | import java.util.UUID; 20 | 21 | @SpringBootTest(classes = EventuateKafkaContainerTest.Config.class) 22 | public class EventuateKafkaContainerTest { 23 | 24 | public static Network network = Network.newNetwork(); 25 | 26 | public static EventuateZookeeperContainer zookeeper = new EventuateZookeeperContainer().withReuse(true) 27 | .withNetwork(network) 28 | .withNetworkAliases("zookeeper"); 29 | 30 | public static EventuateKafkaContainer kafka = 31 | EventuateKafkaContainer.makeFromDockerfile(zookeeper.getConnectionString()) 32 | .withNetwork(network) 33 | .withNetworkAliases("kafka") 34 | .withReuse(true); 35 | 36 | @DynamicPropertySource 37 | static void registerContainerProperties(DynamicPropertyRegistry registry) { 38 | PropertyProvidingContainer.startAndProvideProperties(registry, zookeeper, kafka); 39 | } 40 | 41 | @Configuration 42 | @Import({MessageConsumerKafkaConfiguration.class, KafkaConsumerFactoryConfiguration.class}) 43 | public static class Config { 44 | } 45 | 46 | @Autowired 47 | private MessageConsumerKafkaImpl messageConsumerKafka; 48 | 49 | @Test 50 | public void showStart() { 51 | System.out.println(kafka.getLogs()); 52 | KafkaSubscription s = messageConsumerKafka.subscribe(UUID.randomUUID().toString(), Collections.singleton(UUID.randomUUID().toString()), m -> { 53 | }); 54 | } 55 | 56 | 57 | } -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/DefaultKafkaMessageConsumer.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; 4 | import org.apache.kafka.clients.consumer.ConsumerRecords; 5 | import org.apache.kafka.clients.consumer.KafkaConsumer; 6 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 7 | import org.apache.kafka.common.PartitionInfo; 8 | import org.apache.kafka.common.TopicPartition; 9 | 10 | import java.time.Duration; 11 | import java.util.*; 12 | 13 | public class DefaultKafkaMessageConsumer implements KafkaMessageConsumer { 14 | 15 | private final KafkaConsumer delegate; 16 | 17 | public static KafkaMessageConsumer create(Properties properties) { 18 | return new DefaultKafkaMessageConsumer(new KafkaConsumer<>(properties)); 19 | } 20 | 21 | private DefaultKafkaMessageConsumer(KafkaConsumer delegate) { 22 | this.delegate = delegate; 23 | } 24 | 25 | @Override 26 | public void assign(Collection topicPartitions) { 27 | delegate.assign(topicPartitions); 28 | } 29 | 30 | @Override 31 | public void seekToEnd(Collection topicPartitions) { 32 | delegate.seekToEnd(topicPartitions); 33 | } 34 | 35 | @Override 36 | public long position(TopicPartition topicPartition) { 37 | return delegate.position(topicPartition); 38 | } 39 | 40 | @Override 41 | public void seek(TopicPartition topicPartition, long position) { 42 | delegate.seek(topicPartition, position); 43 | } 44 | 45 | @Override 46 | public void subscribe(List topics) { 47 | delegate.subscribe(new ArrayList<>(topics)); 48 | } 49 | 50 | @Override 51 | public void subscribe(Collection topics, ConsumerRebalanceListener callback) { 52 | delegate.subscribe(topics, callback); 53 | } 54 | 55 | @Override 56 | public void commitOffsets(Map offsets) { 57 | delegate.commitSync(offsets); 58 | } 59 | 60 | @Override 61 | public List partitionsFor(String topic) { 62 | return delegate.partitionsFor(topic); 63 | } 64 | 65 | @Override 66 | public ConsumerRecords poll(Duration duration) { 67 | return delegate.poll(duration); 68 | } 69 | 70 | @Override 71 | public void pause(Set partitions) { 72 | delegate.pause(partitions); 73 | } 74 | 75 | @Override 76 | public void resume(Set partitions) { 77 | delegate.resume(partitions); 78 | } 79 | 80 | @Override 81 | public void close() { 82 | delegate.close(); 83 | } 84 | 85 | @Override 86 | public void close(Duration duration) { 87 | delegate.close(duration); 88 | } 89 | 90 | } 91 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/test/java/io/eventuate/messaging/kafka/basic/consumer/BackPressureManagerTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import org.apache.kafka.common.TopicPartition; 4 | import org.junit.jupiter.api.BeforeEach; 5 | import org.junit.jupiter.api.Test; 6 | 7 | import java.util.Collections; 8 | import java.util.HashSet; 9 | import java.util.Set; 10 | 11 | import static java.util.Collections.emptySet; 12 | import static org.junit.jupiter.api.Assertions.assertEquals; 13 | 14 | public class BackPressureManagerTest { 15 | 16 | private BackPressureConfig config = new BackPressureConfig(); 17 | private BackPressureManager bpm; 18 | private Set twoTopicPartitions; 19 | private HashSet threeTopicPartitions; 20 | private TopicPartition thirdTopicPartition; 21 | private Set setOfThirdTopicPartition; 22 | 23 | @BeforeEach 24 | public void setUp() { 25 | this.config.setLow(5); 26 | this.config.setHigh(10); 27 | this.bpm = new BackPressureManager(config); 28 | this.twoTopicPartitions = new HashSet<>(); 29 | twoTopicPartitions.add(new TopicPartition("x", 0)); 30 | twoTopicPartitions.add(new TopicPartition("x", 1)); 31 | this.threeTopicPartitions = new HashSet<>(twoTopicPartitions); 32 | thirdTopicPartition = new TopicPartition("x", 2); 33 | threeTopicPartitions.add(thirdTopicPartition); 34 | setOfThirdTopicPartition = Collections.singleton(thirdTopicPartition); 35 | } 36 | 37 | @Test 38 | public void shouldNotApplyBackPressure() { 39 | BackPressureActions actions = bpm.update(twoTopicPartitions, 10); 40 | assertEmpty(actions); 41 | } 42 | 43 | private void assertEmpty(BackPressureActions actions) { 44 | assertEquals(emptySet(), actions.pause); 45 | assertEquals(emptySet(), actions.resume); 46 | } 47 | 48 | @Test 49 | public void shouldApplyBackPressure() { 50 | BackPressureActions actions = bpm.update(twoTopicPartitions, 11); 51 | assertActionsEqual(new BackPressureActions(twoTopicPartitions, emptySet()), actions); 52 | 53 | actions = bpm.update(setOfThirdTopicPartition, 11); 54 | assertActionsEqual(new BackPressureActions(setOfThirdTopicPartition, emptySet()), actions); 55 | 56 | actions = bpm.update(setOfThirdTopicPartition, 11); 57 | assertEmpty(actions); 58 | 59 | actions = bpm.update(setOfThirdTopicPartition, 6); 60 | assertEmpty(actions); 61 | 62 | actions = bpm.update(setOfThirdTopicPartition, 5); 63 | assertActionsEqual(new BackPressureActions(emptySet(), threeTopicPartitions), actions); 64 | 65 | actions = bpm.update(twoTopicPartitions, 10); 66 | assertEmpty(actions); 67 | 68 | } 69 | 70 | private void assertActionsEqual(BackPressureActions expected, BackPressureActions actual) { 71 | assertEquals(expected.pause, actual.pause); 72 | assertEquals(expected.resume, actual.resume); 73 | } 74 | 75 | } -------------------------------------------------------------------------------- /eventuate-messaging-kafka-common/src/test/java/io/eventuate/messaging/kafka/common/sbe/SbeMessageSerdeTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.common.sbe; 2 | 3 | import org.agrona.ExpandableArrayBuffer; 4 | import org.junit.jupiter.api.Test; 5 | 6 | import java.util.Arrays; 7 | import java.util.stream.IntStream; 8 | 9 | import static org.junit.jupiter.api.Assertions.assertEquals; 10 | 11 | public class SbeMessageSerdeTest { 12 | 13 | String[] keys = new String[]{"abcd", "key2"}; 14 | String[] values = new String[]{"Tim", "Value"}; 15 | 16 | @Test 17 | public void shouldDoSomething() { 18 | 19 | ExpandableArrayBuffer buffer = encode(); 20 | 21 | decode(buffer); 22 | } 23 | 24 | private ExpandableArrayBuffer encode() { 25 | ExpandableArrayBuffer buffer = new ExpandableArrayBuffer(1000); 26 | 27 | MessageHeaderEncoder messageHeaderEncoder = new MessageHeaderEncoder(); 28 | 29 | MultiMessageEncoder multiMessageEncoder = new MultiMessageEncoder().wrapAndApplyHeader(buffer, 0, messageHeaderEncoder); 30 | 31 | MultiMessageEncoder.MessagesEncoder messagesEncoder = multiMessageEncoder.messagesCount(keys.length); 32 | 33 | IntStream.range(0, keys.length).forEach(idx -> 34 | messagesEncoder 35 | .next().key(keys[idx]).value(values[idx]) 36 | ); 37 | 38 | 39 | int messageLength = MessageHeaderEncoder.ENCODED_LENGTH + multiMessageEncoder.encodedLength(); 40 | System.out.println("Encoded length=" + messageLength); 41 | 42 | // messageHeaderEncoder.wrap(buffer, 0).magicBytes(0, (short)10); 43 | 44 | ExpandableArrayBuffer buffer1 = new ExpandableArrayBuffer(1000); 45 | buffer1.putBytes(0, buffer, 0, messageLength); 46 | return buffer1; 47 | } 48 | 49 | private void decode(ExpandableArrayBuffer buffer) { 50 | MessageHeaderDecoder messageHeaderDecoder = new MessageHeaderDecoder(); 51 | 52 | messageHeaderDecoder.wrap(buffer, 0); 53 | 54 | final int templateId = messageHeaderDecoder.templateId(); 55 | final int actingBlockLength = messageHeaderDecoder.blockLength(); 56 | final int actingVersion = messageHeaderDecoder.version(); 57 | 58 | 59 | MultiMessageDecoder multiMessageDecoder = new MultiMessageDecoder().wrap(buffer, messageHeaderDecoder.encodedLength(), actingBlockLength, actingVersion); 60 | 61 | MultiMessageDecoder.MessagesDecoder messages = multiMessageDecoder.messages(); 62 | int count = messages.count(); 63 | 64 | assertEquals(keys.length, count); 65 | 66 | for (int i = 0; i < count; i++) { 67 | messages.next(); 68 | int keyLength = messages.keyLength(); 69 | byte[] keyBytes = new byte[keyLength]; 70 | messages.getKey(keyBytes, 0, keyLength); 71 | int valueLength = messages.valueLength(); 72 | byte[] valueBytes = new byte[valueLength]; 73 | messages.getKey(valueBytes, 0, valueLength); 74 | 75 | String key = new String(keyBytes); 76 | String value = new String(valueBytes); 77 | 78 | assertEquals(keys[i], key); 79 | assertEquals(values[i], value); 80 | 81 | 82 | } 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-integration-test/src/test/java/io/eventuate/messaging/kafka/spring/basic/consumer/EventuateKafkaBasicConsumerSpringWithSwimlanePerTopicTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.spring.basic.consumer; 2 | 3 | import io.eventuate.messaging.kafka.basic.consumer.AbstractEventuateKafkaBasicConsumerTest; 4 | import io.eventuate.messaging.kafka.basic.consumer.EventuateKafkaConsumerConfigurationProperties; 5 | import io.eventuate.messaging.kafka.basic.consumer.KafkaConsumerFactory; 6 | import io.eventuate.messaging.kafka.common.EventuateKafkaConfigurationProperties; 7 | import io.eventuate.messaging.kafka.consumer.MessageConsumerKafkaImpl; 8 | import io.eventuate.messaging.kafka.consumer.SwimlanePerTopicPartition; 9 | import io.eventuate.messaging.kafka.producer.EventuateKafkaProducer; 10 | import org.junit.jupiter.api.Test; 11 | import org.springframework.beans.factory.annotation.Autowired; 12 | import org.springframework.boot.autoconfigure.EnableAutoConfiguration; 13 | import org.springframework.boot.test.context.SpringBootTest; 14 | import org.springframework.context.annotation.Bean; 15 | import org.springframework.context.annotation.Configuration; 16 | import org.springframework.context.annotation.Import; 17 | 18 | @SpringBootTest(classes = EventuateKafkaBasicConsumerSpringWithSwimlanePerTopicTest.Config.class, 19 | properties = "eventuate.local.kafka.consumer.backPressure.high=3") 20 | public class EventuateKafkaBasicConsumerSpringWithSwimlanePerTopicTest extends AbstractEventuateKafkaBasicConsumerTest { 21 | 22 | @Configuration 23 | @EnableAutoConfiguration 24 | @Import(EventuateKafkaBasicConsumerSpringTest.EventuateKafkaConsumerTestConfiguration.class) 25 | public static class Config { 26 | @Bean 27 | public SwimlanePerTopicPartition swimlanePerTopicPartition() { 28 | return new SwimlanePerTopicPartition(); 29 | } 30 | } 31 | 32 | @Autowired 33 | private EventuateKafkaConfigurationProperties kafkaProperties; 34 | 35 | @Autowired 36 | private EventuateKafkaConsumerConfigurationProperties consumerProperties; 37 | 38 | @Autowired 39 | private EventuateKafkaProducer producer; 40 | 41 | @Autowired 42 | private MessageConsumerKafkaImpl consumer; 43 | 44 | @Autowired 45 | private KafkaConsumerFactory kafkaConsumerFactory; 46 | @Override 47 | protected EventuateKafkaConfigurationProperties getKafkaProperties() { 48 | return kafkaProperties; 49 | } 50 | 51 | @Override 52 | protected EventuateKafkaConsumerConfigurationProperties getConsumerProperties() { 53 | return consumerProperties; 54 | } 55 | 56 | @Override 57 | protected EventuateKafkaProducer getProducer() { 58 | return producer; 59 | } 60 | 61 | @Override 62 | protected MessageConsumerKafkaImpl getConsumer() { 63 | return consumer; 64 | } 65 | 66 | @Override 67 | protected KafkaConsumerFactory getKafkaConsumerFactory() { 68 | return kafkaConsumerFactory; 69 | } 70 | 71 | @Test 72 | @Override 73 | public void shouldConsumeMessages() { 74 | super.shouldConsumeMessages(); 75 | } 76 | 77 | } 78 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-producer/src/main/java/io/eventuate/messaging/kafka/producer/EventuateKafkaProducer.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.producer; 2 | 3 | import io.eventuate.messaging.kafka.common.EventuateBinaryMessageEncoding; 4 | import org.apache.kafka.clients.producer.KafkaProducer; 5 | import org.apache.kafka.clients.producer.Producer; 6 | import org.apache.kafka.clients.producer.ProducerRecord; 7 | import org.apache.kafka.common.PartitionInfo; 8 | import org.apache.kafka.common.serialization.StringSerializer; 9 | 10 | import java.time.Duration; 11 | import java.util.List; 12 | import java.util.Properties; 13 | import java.util.concurrent.CompletableFuture; 14 | 15 | public class EventuateKafkaProducer { 16 | 17 | private final Producer producer; 18 | private final StringSerializer stringSerializer = new StringSerializer(); 19 | private final EventuateKafkaPartitioner eventuateKafkaPartitioner = new EventuateKafkaPartitioner(); 20 | 21 | public EventuateKafkaProducer(String bootstrapServers, 22 | EventuateKafkaProducerConfigurationProperties eventuateKafkaProducerConfigurationProperties) { 23 | 24 | Properties producerProps = new Properties(); 25 | producerProps.put("bootstrap.servers", bootstrapServers); 26 | producerProps.put("enable.idempotence", "true"); 27 | producerProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 28 | producerProps.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer"); 29 | producerProps.putAll(eventuateKafkaProducerConfigurationProperties.getProperties()); 30 | producer = new KafkaProducer<>(producerProps); 31 | } 32 | 33 | public CompletableFuture send(String topic, String key, String body) { 34 | return send(topic, key, EventuateBinaryMessageEncoding.stringToBytes(body)); 35 | } 36 | 37 | public CompletableFuture send(String topic, int partition, String key, String body) { 38 | return send(topic, partition, key, EventuateBinaryMessageEncoding.stringToBytes(body)); 39 | } 40 | 41 | public CompletableFuture send(String topic, String key, byte[] bytes) { 42 | return send(new ProducerRecord<>(topic, key, bytes)); 43 | } 44 | 45 | public CompletableFuture send(String topic, int partition, String key, byte[] bytes) { 46 | return send(new ProducerRecord<>(topic, partition, key, bytes)); 47 | } 48 | 49 | private CompletableFuture send(ProducerRecord producerRecord) { 50 | CompletableFuture result = new CompletableFuture<>(); 51 | producer.send(producerRecord, (metadata, exception) -> { 52 | if (exception == null) 53 | result.complete(metadata); 54 | else 55 | result.completeExceptionally(exception); 56 | }); 57 | 58 | return result; 59 | } 60 | 61 | public int partitionFor(String topic, String key) { 62 | return eventuateKafkaPartitioner.partition(topic, stringSerializer.serialize(topic, key), partitionsFor(topic)); 63 | } 64 | 65 | public List partitionsFor(String topic) { 66 | return producer.partitionsFor(topic); 67 | } 68 | 69 | public void close() { 70 | producer.close(Duration.ofSeconds(1)); 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /gradlew.bat: -------------------------------------------------------------------------------- 1 | @rem 2 | @rem Copyright 2015 the original author or authors. 3 | @rem 4 | @rem Licensed under the Apache License, Version 2.0 (the "License"); 5 | @rem you may not use this file except in compliance with the License. 6 | @rem You may obtain a copy of the License at 7 | @rem 8 | @rem https://www.apache.org/licenses/LICENSE-2.0 9 | @rem 10 | @rem Unless required by applicable law or agreed to in writing, software 11 | @rem distributed under the License is distributed on an "AS IS" BASIS, 12 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | @rem See the License for the specific language governing permissions and 14 | @rem limitations under the License. 15 | @rem 16 | @rem SPDX-License-Identifier: Apache-2.0 17 | @rem 18 | 19 | @if "%DEBUG%"=="" @echo off 20 | @rem ########################################################################## 21 | @rem 22 | @rem Gradle startup script for Windows 23 | @rem 24 | @rem ########################################################################## 25 | 26 | @rem Set local scope for the variables with windows NT shell 27 | if "%OS%"=="Windows_NT" setlocal 28 | 29 | set DIRNAME=%~dp0 30 | if "%DIRNAME%"=="" set DIRNAME=. 31 | @rem This is normally unused 32 | set APP_BASE_NAME=%~n0 33 | set APP_HOME=%DIRNAME% 34 | 35 | @rem Resolve any "." and ".." in APP_HOME to make it shorter. 36 | for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi 37 | 38 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 39 | set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" 40 | 41 | @rem Find java.exe 42 | if defined JAVA_HOME goto findJavaFromJavaHome 43 | 44 | set JAVA_EXE=java.exe 45 | %JAVA_EXE% -version >NUL 2>&1 46 | if %ERRORLEVEL% equ 0 goto execute 47 | 48 | echo. 1>&2 49 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2 50 | echo. 1>&2 51 | echo Please set the JAVA_HOME variable in your environment to match the 1>&2 52 | echo location of your Java installation. 1>&2 53 | 54 | goto fail 55 | 56 | :findJavaFromJavaHome 57 | set JAVA_HOME=%JAVA_HOME:"=% 58 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe 59 | 60 | if exist "%JAVA_EXE%" goto execute 61 | 62 | echo. 1>&2 63 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2 64 | echo. 1>&2 65 | echo Please set the JAVA_HOME variable in your environment to match the 1>&2 66 | echo location of your Java installation. 1>&2 67 | 68 | goto fail 69 | 70 | :execute 71 | @rem Setup the command line 72 | 73 | set CLASSPATH= 74 | 75 | 76 | @rem Execute Gradle 77 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" -jar "%APP_HOME%\gradle\wrapper\gradle-wrapper.jar" %* 78 | 79 | :end 80 | @rem End local scope for the variables with windows NT shell 81 | if %ERRORLEVEL% equ 0 goto mainEnd 82 | 83 | :fail 84 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of 85 | rem the _cmd.exe /c_ return code! 86 | set EXIT_CODE=%ERRORLEVEL% 87 | if %EXIT_CODE% equ 0 set EXIT_CODE=1 88 | if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% 89 | exit /b %EXIT_CODE% 90 | 91 | :mainEnd 92 | if "%OS%"=="Windows_NT" endlocal 93 | 94 | :omega 95 | -------------------------------------------------------------------------------- /gradle/gradlew.bat: -------------------------------------------------------------------------------- 1 | @rem 2 | @rem Copyright 2015 the original author or authors. 3 | @rem 4 | @rem Licensed under the Apache License, Version 2.0 (the "License"); 5 | @rem you may not use this file except in compliance with the License. 6 | @rem You may obtain a copy of the License at 7 | @rem 8 | @rem https://www.apache.org/licenses/LICENSE-2.0 9 | @rem 10 | @rem Unless required by applicable law or agreed to in writing, software 11 | @rem distributed under the License is distributed on an "AS IS" BASIS, 12 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | @rem See the License for the specific language governing permissions and 14 | @rem limitations under the License. 15 | @rem 16 | @rem SPDX-License-Identifier: Apache-2.0 17 | @rem 18 | 19 | @if "%DEBUG%"=="" @echo off 20 | @rem ########################################################################## 21 | @rem 22 | @rem Gradle startup script for Windows 23 | @rem 24 | @rem ########################################################################## 25 | 26 | @rem Set local scope for the variables with windows NT shell 27 | if "%OS%"=="Windows_NT" setlocal 28 | 29 | set DIRNAME=%~dp0 30 | if "%DIRNAME%"=="" set DIRNAME=. 31 | @rem This is normally unused 32 | set APP_BASE_NAME=%~n0 33 | set APP_HOME=%DIRNAME% 34 | 35 | @rem Resolve any "." and ".." in APP_HOME to make it shorter. 36 | for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi 37 | 38 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 39 | set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" 40 | 41 | @rem Find java.exe 42 | if defined JAVA_HOME goto findJavaFromJavaHome 43 | 44 | set JAVA_EXE=java.exe 45 | %JAVA_EXE% -version >NUL 2>&1 46 | if %ERRORLEVEL% equ 0 goto execute 47 | 48 | echo. 1>&2 49 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2 50 | echo. 1>&2 51 | echo Please set the JAVA_HOME variable in your environment to match the 1>&2 52 | echo location of your Java installation. 1>&2 53 | 54 | goto fail 55 | 56 | :findJavaFromJavaHome 57 | set JAVA_HOME=%JAVA_HOME:"=% 58 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe 59 | 60 | if exist "%JAVA_EXE%" goto execute 61 | 62 | echo. 1>&2 63 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2 64 | echo. 1>&2 65 | echo Please set the JAVA_HOME variable in your environment to match the 1>&2 66 | echo location of your Java installation. 1>&2 67 | 68 | goto fail 69 | 70 | :execute 71 | @rem Setup the command line 72 | 73 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar 74 | 75 | 76 | @rem Execute Gradle 77 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" ${mainClassName ?: entryPointArgs} %* 78 | 79 | :end 80 | @rem End local scope for the variables with windows NT shell 81 | if %ERRORLEVEL% equ 0 goto mainEnd 82 | 83 | :fail 84 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of 85 | rem the _cmd.exe /c_ return code! 86 | set EXIT_CODE=%ERRORLEVEL% 87 | if %EXIT_CODE% equ 0 set EXIT_CODE=1 88 | if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% 89 | exit /b %EXIT_CODE% 90 | 91 | :mainEnd 92 | if "%OS%"=="Windows_NT" endlocal 93 | 94 | :omega 95 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-consumer/src/main/java/io/eventuate/messaging/kafka/consumer/SwimlaneDispatcher.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.consumer; 2 | 3 | import org.slf4j.Logger; 4 | import org.slf4j.LoggerFactory; 5 | 6 | import java.util.concurrent.Executor; 7 | import java.util.concurrent.LinkedBlockingQueue; 8 | import java.util.concurrent.atomic.AtomicBoolean; 9 | import java.util.function.Consumer; 10 | 11 | public class SwimlaneDispatcher { 12 | 13 | private static Logger logger = LoggerFactory.getLogger(SwimlaneDispatcher.class); 14 | 15 | private String subscriberId; 16 | private Integer swimlane; 17 | private Executor executor; 18 | 19 | private final LinkedBlockingQueue queue = new LinkedBlockingQueue<>(); 20 | private AtomicBoolean running = new AtomicBoolean(false); 21 | 22 | private SwimlaneDispatcherBacklog consumerStatus = new SwimlaneDispatcherBacklog(queue); 23 | 24 | public SwimlaneDispatcher(String subscriberId, Integer swimlane, Executor executor) { 25 | this.subscriberId = subscriberId; 26 | this.swimlane = swimlane; 27 | this.executor = executor; 28 | } 29 | 30 | public boolean getRunning() { 31 | return running.get(); 32 | } 33 | 34 | public SwimlaneDispatcherBacklog dispatch(RawKafkaMessage message, Consumer messageConsumer) { 35 | synchronized (queue) { 36 | QueuedMessage queuedMessage = new QueuedMessage(message, messageConsumer); 37 | queue.add(queuedMessage); 38 | logger.trace("added message to queue: {} {} {}", subscriberId, swimlane, message); 39 | if (running.compareAndSet(false, true)) { 40 | logger.trace("Stopped - attempting to process newly queued message: {} {}", subscriberId, swimlane); 41 | processNextQueuedMessage(); 42 | } else 43 | logger.trace("Running - Not attempting to process newly queued message: {} {}", subscriberId, swimlane); 44 | } 45 | return consumerStatus; 46 | } 47 | 48 | private void processNextQueuedMessage() { 49 | executor.execute(this::processQueuedMessage); 50 | } 51 | 52 | class QueuedMessage { 53 | RawKafkaMessage message; 54 | Consumer messageConsumer; 55 | 56 | public QueuedMessage(RawKafkaMessage message, Consumer messageConsumer) { 57 | this.message = message; 58 | this.messageConsumer = messageConsumer; 59 | } 60 | } 61 | 62 | 63 | public void processQueuedMessage() { 64 | while (true) { 65 | QueuedMessage queuedMessage = getNextMessage(); 66 | if (queuedMessage == null) { 67 | logger.trace("No queued message for {} {}", subscriberId, swimlane); 68 | return; 69 | } else { 70 | logger.trace("Invoking handler for message for {} {} {}", subscriberId, swimlane, queuedMessage.message); 71 | try { 72 | queuedMessage.messageConsumer.accept(queuedMessage.message); 73 | } catch (RuntimeException e) { 74 | logger.error("Exception handling message - terminating", e); 75 | return; 76 | } 77 | } 78 | } 79 | } 80 | 81 | private QueuedMessage getNextMessage() { 82 | QueuedMessage queuedMessage = queue.poll(); 83 | if (queuedMessage != null) 84 | return queuedMessage; 85 | 86 | synchronized (queue) { 87 | queuedMessage = queue.poll(); 88 | if (queuedMessage == null) { 89 | running.compareAndSet(true, false); 90 | } 91 | return queuedMessage; 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/KafkaMessageProcessor.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import org.apache.kafka.clients.consumer.ConsumerRecord; 4 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 5 | import org.apache.kafka.common.TopicPartition; 6 | import org.slf4j.Logger; 7 | import org.slf4j.LoggerFactory; 8 | 9 | import java.util.HashSet; 10 | import java.util.Map; 11 | import java.util.Set; 12 | import java.util.concurrent.BlockingQueue; 13 | import java.util.concurrent.LinkedBlockingQueue; 14 | import java.util.concurrent.atomic.AtomicReference; 15 | 16 | /** 17 | * Processes a Kafka message and tracks the message offsets that have been successfully processed and can be committed 18 | */ 19 | public class KafkaMessageProcessor { 20 | private final Logger logger = LoggerFactory.getLogger(getClass()); 21 | 22 | private final String subscriberId; 23 | private final EventuateKafkaConsumerMessageHandler handler; 24 | private final OffsetTracker offsetTracker = new OffsetTracker(); 25 | 26 | private final BlockingQueue> processedRecords = new LinkedBlockingQueue<>(); 27 | private final AtomicReference failed = new AtomicReference<>(); 28 | 29 | private final Set consumerBacklogs = new HashSet<>(); 30 | 31 | public KafkaMessageProcessor(String subscriberId, EventuateKafkaConsumerMessageHandler handler) { 32 | this.subscriberId = subscriberId; 33 | this.handler = handler; 34 | } 35 | 36 | 37 | public void process(ConsumerRecord record) { 38 | throwFailureException(); 39 | offsetTracker.noteUnprocessed(new TopicPartition(record.topic(), record.partition()), record.offset()); 40 | MessageConsumerBacklog consumerBacklog = handler.apply(record, (result, t) -> { 41 | handleMessagingHandlingOutcome(record, t); 42 | }); 43 | if (consumerBacklog != null) 44 | consumerBacklogs.add(consumerBacklog); 45 | } 46 | 47 | private void handleMessagingHandlingOutcome(ConsumerRecord record, Throwable t) { 48 | if (t != null) { 49 | logger.error("Got exception: ", t); 50 | failed.set(new KafkaMessageProcessorFailedException(t)); 51 | } else { 52 | logger.debug("Adding processed record to queue {} {}", subscriberId, record.offset()); 53 | processedRecords.add(record); 54 | } 55 | } 56 | 57 | void throwFailureException() { 58 | if (failed.get() != null) 59 | throw failed.get(); 60 | } 61 | 62 | public Map offsetsToCommit() { 63 | int count = 0; 64 | while (true) { 65 | ConsumerRecord record = processedRecords.poll(); 66 | if (record == null) 67 | break; 68 | count++; 69 | offsetTracker.noteProcessed(new TopicPartition(record.topic(), record.partition()), record.offset()); 70 | } 71 | logger.trace("removed {} {} processed records from queue", subscriberId, count); 72 | return offsetTracker.offsetsToCommit(); 73 | } 74 | 75 | public void noteOffsetsCommitted(Map offsetsToCommit) { 76 | offsetTracker.noteOffsetsCommitted(offsetsToCommit); 77 | } 78 | 79 | public OffsetTracker getPending() { 80 | return offsetTracker; 81 | } 82 | 83 | public int backlog() { 84 | return consumerBacklogs.stream().mapToInt(MessageConsumerBacklog::size).sum(); 85 | } 86 | 87 | } 88 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-testcontainers/src/main/java/io/eventuate/messaging/kafka/testcontainers/EventuateKafkaContainer.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.testcontainers; 2 | 3 | import com.github.dockerjava.api.command.InspectContainerResponse; 4 | import io.eventuate.common.testcontainers.ContainerUtil; 5 | import io.eventuate.common.testcontainers.EventuateGenericContainer; 6 | import io.eventuate.common.testcontainers.PropertyProvidingContainer; 7 | import org.jetbrains.annotations.NotNull; 8 | import org.testcontainers.containers.wait.strategy.Wait; 9 | import org.testcontainers.images.builder.ImageFromDockerfile; 10 | 11 | import java.io.IOException; 12 | import java.nio.file.FileSystems; 13 | import java.nio.file.Path; 14 | import java.util.function.BiConsumer; 15 | import java.util.function.Supplier; 16 | 17 | public class EventuateKafkaContainer extends EventuateGenericContainer implements PropertyProvidingContainer { 18 | 19 | private final String zookeeperConnect; 20 | 21 | public EventuateKafkaContainer(String zookeeperConnect) { 22 | super(ContainerUtil.findImage("eventuateio/eventuate-kafka", "eventuate.messaging.kafka.version.properties")); 23 | this.zookeeperConnect = zookeeperConnect; 24 | withConfiguration(); 25 | } 26 | 27 | public EventuateKafkaContainer(String image, String zookeeperConnect) { 28 | super(image); 29 | this.zookeeperConnect = zookeeperConnect; 30 | withConfiguration(); 31 | } 32 | 33 | @NotNull 34 | public static EventuateKafkaContainer makeFromDockerfile(String zookeeperConnect) { 35 | return new EventuateKafkaContainer(FileSystems.getDefault().getPath("../kafka/Dockerfile"), zookeeperConnect); 36 | } 37 | 38 | @Override 39 | protected int getPort() { 40 | return 29092; 41 | } 42 | 43 | public EventuateKafkaContainer(Path path, String zookeeperConnect) { 44 | super(new ImageFromDockerfile().withDockerfile(path)); 45 | this.zookeeperConnect = zookeeperConnect; 46 | withConfiguration(); 47 | } 48 | 49 | private void withConfiguration() { 50 | withEnv("KAFKA_LISTENERS", "LC://kafka:29092,LX://kafka:9092"); 51 | withEnv("KAFKA_ADVERTISED_LISTENERS", "LC://kafka:29092,LX://localhost:9092"); 52 | withEnv("KAFKA_LISTENER_SECURITY_PROTOCOL_MAP", "LC:PLAINTEXT,LX:PLAINTEXT"); 53 | withEnv("KAFKA_INTER_BROKER_LISTENER_NAME", "LC"); 54 | withEnv("KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR", "1"); 55 | withEnv("KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS", "60000"); 56 | withEnv("KAFKA_ZOOKEEPER_CONNECT", zookeeperConnect); 57 | withExposedPorts(9092); 58 | waitingFor(Wait.forHealthcheck()); 59 | } 60 | 61 | @Override 62 | public void start() { 63 | super.start(); 64 | } 65 | 66 | @Override 67 | public void registerProperties(BiConsumer> registry) { 68 | registry.accept("eventuatelocal.kafka.bootstrap.servers", 69 | () -> "localhost:%s".formatted(getFirstMappedPort())); 70 | } 71 | 72 | protected void containerIsStarted(InspectContainerResponse containerInfo) { 73 | super.containerIsStarted(containerInfo); 74 | execute("bin/kafka-configs.sh", "--alter", "--bootstrap-server", "kafka:29092", "--entity-type", "brokers", "--entity-name", "0", "--add-config", "advertised.listeners=[LX://localhost:%s,LC://kafka:29092]".formatted(getMappedPort(9092))); 75 | } 76 | 77 | private void execute(String... command) { 78 | ExecResult result; 79 | try { 80 | result = execInContainer(command); 81 | } catch (IOException | InterruptedException e) { 82 | throw new RuntimeException(e); 83 | } 84 | if (result.getExitCode() != 0) { 85 | throw new IllegalStateException(result.toString()); 86 | } 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-leadership/src/test/java/io/eventuate/coordination/leadership/zookeeper/KafkaLeadershipTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.coordination.leadership.zookeeper; 2 | 3 | import io.eventuate.common.testcontainers.EventuateZookeeperContainer; 4 | import io.eventuate.common.testcontainers.PropertyProvidingContainer; 5 | import io.eventuate.common.testcontainers.ReusableNetworkFactory; 6 | import io.eventuate.coordination.leadership.LeaderSelectedCallback; 7 | import io.eventuate.coordination.leadership.tests.AbstractLeadershipTest; 8 | import io.eventuate.messaging.kafka.basic.consumer.DefaultKafkaConsumerFactory; 9 | import io.eventuate.messaging.kafka.basic.consumer.KafkaConsumerFactory; 10 | import io.eventuate.messaging.kafka.common.EventuateKafkaConfigurationProperties; 11 | import io.eventuate.messaging.kafka.spring.common.EventuateKafkaPropertiesConfiguration; 12 | import io.eventuate.messaging.kafka.testcontainers.EventuateKafkaContainer; 13 | import org.junit.jupiter.api.BeforeEach; 14 | import org.springframework.beans.factory.annotation.Autowired; 15 | import org.springframework.boot.autoconfigure.EnableAutoConfiguration; 16 | import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; 17 | import org.springframework.boot.test.context.SpringBootTest; 18 | import org.springframework.context.annotation.Bean; 19 | import org.springframework.context.annotation.Configuration; 20 | import org.springframework.context.annotation.Import; 21 | import org.springframework.test.context.DynamicPropertyRegistry; 22 | import org.springframework.test.context.DynamicPropertySource; 23 | import org.testcontainers.containers.Network; 24 | 25 | import java.util.Collections; 26 | import java.util.UUID; 27 | 28 | @SpringBootTest(classes = KafkaLeadershipTest.Config.class) 29 | public class KafkaLeadershipTest extends AbstractLeadershipTest { 30 | 31 | @Configuration 32 | @EnableAutoConfiguration 33 | @Import({EventuateKafkaPropertiesConfiguration.class}) 34 | public static class Config { 35 | 36 | @Bean 37 | @ConditionalOnMissingBean 38 | public KafkaConsumerFactory kafkaConsumerFactory() { 39 | return new DefaultKafkaConsumerFactory(); 40 | } 41 | 42 | } 43 | 44 | @Autowired 45 | private KafkaConsumerFactory kafkaConsumerFactory; 46 | 47 | @Autowired 48 | private EventuateKafkaConfigurationProperties eventuateKafkaConfigurationProperties; 49 | 50 | private static String networkName = "testnetwork"; 51 | public static Network network = ReusableNetworkFactory.createNetwork(networkName); 52 | 53 | public static EventuateZookeeperContainer zookeeper = new EventuateZookeeperContainer().withReuse(true) 54 | .withNetwork(network) 55 | .withNetworkAliases("zookeeper"); 56 | 57 | public static EventuateKafkaContainer kafka = 58 | new EventuateKafkaContainer("eventuateio/eventuate-kafka:0.19.0.BUILD-SNAPSHOT", zookeeper.getConnectionString()) 59 | .withNetwork(network) 60 | .withNetworkAliases("kafka") 61 | .withReuse(true); 62 | 63 | @DynamicPropertySource 64 | static void registerContainerProperties(DynamicPropertyRegistry registry) { 65 | PropertyProvidingContainer.startAndProvideProperties(registry, zookeeper, kafka); 66 | } 67 | 68 | private String lockId; 69 | private int leaderIdx; 70 | 71 | @BeforeEach 72 | public void init() { 73 | leaderIdx = 0; 74 | lockId = "/zk/lock/test/%s".formatted(UUID.randomUUID().toString()); 75 | } 76 | 77 | @Override 78 | protected KafkaLeaderSelector createLeaderSelector(LeaderSelectedCallback leaderSelectedCallback, Runnable leaderRemovedCallback) { 79 | return new KafkaLeaderSelector(lockId, 80 | "random-tbd-leader-id-" + leaderIdx++, 81 | leaderSelectedCallback, 82 | leaderRemovedCallback, 83 | eventuateKafkaConfigurationProperties.getBootstrapServers(), 84 | Collections.emptyMap(), kafkaConsumerFactory); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-leadership/src/main/java/io/eventuate/coordination/leadership/zookeeper/KafkaLeaderSelector.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.coordination.leadership.zookeeper; 2 | 3 | import io.eventuate.coordination.leadership.EventuateLeaderSelector; 4 | import io.eventuate.coordination.leadership.LeaderSelectedCallback; 5 | import io.eventuate.messaging.kafka.basic.consumer.ConsumerPropertiesFactory; 6 | import io.eventuate.messaging.kafka.basic.consumer.KafkaConsumerFactory; 7 | import io.eventuate.messaging.kafka.basic.consumer.KafkaMessageConsumer; 8 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; 9 | import org.apache.kafka.common.TopicPartition; 10 | import org.slf4j.Logger; 11 | import org.slf4j.LoggerFactory; 12 | 13 | import java.time.Duration; 14 | import java.util.*; 15 | import java.util.concurrent.Executor; 16 | import java.util.concurrent.Executors; 17 | 18 | public class KafkaLeaderSelector implements EventuateLeaderSelector { 19 | private final Logger logger = LoggerFactory.getLogger(getClass()); 20 | 21 | private final KafkaConsumerFactory kafkaConsumerFactory; 22 | private final Map consumerProperties; 23 | private final String lockId; 24 | private final String leaderId; 25 | private final LeaderSelectedCallback leaderSelectedCallback; 26 | private final Runnable leaderRemovedCallback; 27 | private final String bootstrapServer; 28 | private volatile KafkaMessageConsumer consumer; 29 | 30 | private final Executor executor = Executors.newCachedThreadPool(); 31 | private Timer timer; 32 | 33 | 34 | public KafkaLeaderSelector(String lockId, 35 | LeaderSelectedCallback leaderSelectedCallback, 36 | Runnable leaderRemovedCallback, 37 | String bootstrapServer, KafkaConsumerFactory kafkaConsumerFactory) { 38 | 39 | this(lockId, UUID.randomUUID().toString(), leaderSelectedCallback, leaderRemovedCallback, bootstrapServer, Collections.emptyMap(), kafkaConsumerFactory); 40 | } 41 | 42 | public KafkaLeaderSelector(String lockId, 43 | String leaderId, 44 | LeaderSelectedCallback leaderSelectedCallback, 45 | Runnable leaderRemovedCallback, String bootstrapServer, Map consumerProperties, KafkaConsumerFactory kafkaConsumerFactory) { 46 | this.lockId = lockId; 47 | this.leaderId = leaderId; 48 | this.leaderSelectedCallback = leaderSelectedCallback; 49 | this.leaderRemovedCallback = leaderRemovedCallback; 50 | this.bootstrapServer = bootstrapServer; 51 | this.consumerProperties = consumerProperties; 52 | this.kafkaConsumerFactory = kafkaConsumerFactory; 53 | } 54 | 55 | @Override 56 | public void start() { 57 | logger.info("Starting leader selector: {}", leaderId); 58 | 59 | Properties consumerProperties = ConsumerPropertiesFactory.makeDefaultConsumerProperties(bootstrapServer, lockId); 60 | consumerProperties.putAll(this.consumerProperties); 61 | 62 | consumer = kafkaConsumerFactory.makeConsumer(lockId, consumerProperties); 63 | 64 | ConsumerRebalanceListener rebalanceListener = new ConsumerRebalanceListener() { 65 | 66 | private boolean selected = false; 67 | 68 | @Override 69 | public void onPartitionsRevoked(Collection partitions) { 70 | logger.info("Partitions revoked: {}", partitions); 71 | if (partitions.stream().anyMatch(tp -> tp.partition() == 0)) { 72 | executor.execute(this::noteLeadershipRevoked); 73 | } 74 | } 75 | 76 | @Override 77 | public void onPartitionsAssigned(Collection partitions) { 78 | logger.info("Partitions assigned: {}", partitions); 79 | if (partitions.stream().anyMatch(tp -> tp.partition() == 0)) { 80 | executor.execute(this::noteLeadershipSelected); 81 | selected = true; 82 | } 83 | 84 | } 85 | 86 | private void noteLeadershipSelected() { 87 | logger.info("Leadership selected {}", leaderId); 88 | leaderSelectedCallback.run(() -> { 89 | if (selected) { 90 | selected = false; 91 | leaderRemovedCallback.run(); 92 | stop(); 93 | start(); 94 | } 95 | }); 96 | } 97 | 98 | private void noteLeadershipRevoked() { 99 | logger.info("Leadership revoked {}", leaderId); 100 | if (selected) { 101 | selected = false; 102 | leaderRemovedCallback.run(); 103 | } 104 | } 105 | }; 106 | 107 | consumer.subscribe(Collections.singletonList("eventuate-leadership-coordination"), rebalanceListener); 108 | 109 | timer = new Timer(); 110 | timer.scheduleAtFixedRate(new TimerTask() { 111 | @Override 112 | public void run() { 113 | synchronized (consumer) { 114 | consumer.poll(Duration.ofMillis(5000)); 115 | } 116 | } 117 | }, 0L, 5000L); 118 | 119 | logger.info("Started leader selector {}", leaderId); 120 | } 121 | 122 | @Override 123 | public void stop() { 124 | logger.info("Closing leader selector, leaderId : {}", leaderId); 125 | timer.cancel(); 126 | synchronized (consumer) { 127 | consumer.close(); 128 | } 129 | logger.info("Closed leader selector, leaderId : {}", leaderId); 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | orbs: 3 | # eventuate-gradle-build-and-test: "eventuate_io/eventuate-gradle-build-and-test@0.2.9" 4 | # Some features unsupported by above ORB e.g. set-multi-arch-image-env-vars.sh 5 | build-and-test: 6 | jobs: 7 | build-and-test: 8 | parameters: 9 | script: 10 | description: the script to execute 11 | type: string 12 | resource_class: 13 | description: the resource class 14 | type: string 15 | default: 'medium' 16 | skip_multi_arch_env_vars: 17 | description: if set don't set multi-arch env vars 18 | type: string 19 | default: '' 20 | machine: 21 | image: ubuntu-2204:2024.01.1 22 | resource_class: <> 23 | working_directory: ~/eventuate-messaging-kafka 24 | steps: 25 | - checkout 26 | - restore_cache: 27 | keys: 28 | - eventuate-messaging-kafka-{{ checksum "gradle.properties" }}-{{ checksum "gradle/wrapper/gradle-wrapper.properties" }} 29 | - eventuate-messaging-kafka- 30 | - run: 31 | command: | 32 | ./gradlew testClasses aggregateJavaDocs 33 | - save_cache: 34 | paths: 35 | - ~/.gradle 36 | key: eventuate-messaging-kafka-{{ checksum "gradle.properties" }}-{{ checksum "gradle/wrapper/gradle-wrapper.properties" }} 37 | - run: 38 | command: | 39 | if [ -z "<>" ] ; then 40 | . set-multi-arch-image-env-vars.sh 41 | fi 42 | <> 43 | - run: 44 | name: Save test results 45 | command: ./.circleci/save-containers-and-tests.sh 46 | when: always 47 | - store_test_results: 48 | path: ~/junit 49 | - store_artifacts: 50 | path: ~/junit 51 | - store_artifacts: 52 | path: ~/container-logs 53 | jobs: 54 | build-multi-arch-images: 55 | docker: 56 | - image: cimg/base:stable 57 | working_directory: ~/eventuate-common 58 | steps: 59 | - checkout 60 | - setup_remote_docker 61 | - run: 62 | name: docker buildx build 63 | command: | 64 | . set-multi-arch-image-env-vars.sh 65 | docker context create tls-env 66 | docker buildx create tls-env --use 67 | 68 | # failed to solve: process "/dev/.buildkit_qemu_emulator 69 | # https://github.com/docker/buildx/issues/493#issuecomment-754834977 70 | # https://github.com/tonistiigi/binfmt#installing-emulators 71 | 72 | docker run --privileged --rm tonistiigi/binfmt:qemu-v6.2.0 --install arm64,arm 73 | 74 | ./build-multi-arch-images.sh 75 | deploy-multi-arch: 76 | docker: 77 | - image: cimg/base:stable 78 | working_directory: ~/eventuate-common 79 | steps: 80 | - checkout 81 | - run: 82 | command: | 83 | . set-multi-arch-image-env-vars.sh 84 | ./deploy-multi-arch.sh 85 | workflows: 86 | version: 2 87 | build-test-and-deploy: 88 | jobs: 89 | - build-and-test/build-and-test: 90 | name: build-and-test-intel 91 | skip_multi_arch_env_vars: "true" 92 | script: | 93 | docker context create tls-env 94 | docker buildx create tls-env --use 95 | docker run --privileged --rm tonistiigi/binfmt:qemu-v6.2.0 --install arm64,arm 96 | export DOCKER_HOST_NAME=$(hostname) 97 | ./build-and-test-all-locally.sh 98 | - build-and-test/build-and-test: 99 | name: build-and-test-arm 100 | resource_class: arm.medium 101 | skip_multi_arch_env_vars: "true" 102 | script: | 103 | docker context create tls-env 104 | docker buildx create tls-env --use 105 | docker run --privileged --rm tonistiigi/binfmt:qemu-v6.2.0 --install amd64 106 | hostname -I 107 | export DOCKER_HOST_NAME=$(hostname -I | sed -e 's/ .*//g') 108 | echo $DOCKER_HOST_NAME 109 | ./build-and-test-all-locally.sh 110 | - build-multi-arch-images: 111 | context: 112 | - publish 113 | requires: 114 | - build-and-test-intel 115 | - build-and-test-arm 116 | - build-and-test/build-and-test: 117 | name: test-multi-arch-intel 118 | script: ./build-and-test-all.sh 119 | requires: 120 | - build-multi-arch-images 121 | - build-and-test/build-and-test: 122 | name: test-multi-arch-arm 123 | script: ./build-and-test-all.sh 124 | resource_class: arm.medium 125 | requires: 126 | - build-multi-arch-images 127 | - deploy-multi-arch: 128 | context: 129 | - publish 130 | requires: 131 | - test-multi-arch-intel 132 | - test-multi-arch-arm 133 | - build-and-test/build-and-test: 134 | name: deploy-artifacts 135 | script: ./deploy-artifacts.sh 136 | context: 137 | - publish 138 | requires: 139 | - test-multi-arch-intel 140 | - test-multi-arch-arm 141 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-consumer/src/main/java/io/eventuate/messaging/kafka/consumer/MessageConsumerKafkaImpl.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.consumer; 2 | 3 | import io.eventuate.messaging.kafka.basic.consumer.EventuateKafkaConsumer; 4 | import io.eventuate.messaging.kafka.basic.consumer.EventuateKafkaConsumerConfigurationProperties; 5 | import io.eventuate.messaging.kafka.basic.consumer.EventuateKafkaConsumerMessageHandler; 6 | import io.eventuate.messaging.kafka.basic.consumer.KafkaConsumerFactory; 7 | import io.eventuate.messaging.kafka.common.EventuateBinaryMessageEncoding; 8 | import io.eventuate.messaging.kafka.common.EventuateKafkaMultiMessageConverter; 9 | import io.eventuate.messaging.kafka.common.EventuateKafkaMultiMessage; 10 | import io.eventuate.messaging.partitionmanagement.CommonMessageConsumer; 11 | import org.apache.kafka.clients.consumer.ConsumerRecord; 12 | import org.apache.kafka.common.TopicPartition; 13 | import org.slf4j.Logger; 14 | import org.slf4j.LoggerFactory; 15 | 16 | import java.util.*; 17 | import java.util.concurrent.CompletableFuture; 18 | import java.util.concurrent.Executors; 19 | import java.util.function.BiConsumer; 20 | import java.util.stream.Collectors; 21 | 22 | public class MessageConsumerKafkaImpl implements CommonMessageConsumer { 23 | 24 | private final Logger logger = LoggerFactory.getLogger(getClass()); 25 | 26 | private final String id = UUID.randomUUID().toString(); 27 | 28 | private final String bootstrapServers; 29 | private final List consumers = new ArrayList<>(); 30 | private final EventuateKafkaConsumerConfigurationProperties eventuateKafkaConsumerConfigurationProperties; 31 | private final KafkaConsumerFactory kafkaConsumerFactory; 32 | private final EventuateKafkaMultiMessageConverter eventuateKafkaMultiMessageConverter = new EventuateKafkaMultiMessageConverter(); 33 | private final TopicPartitionToSwimlaneMapping partitionToSwimLaneMapping; 34 | 35 | public MessageConsumerKafkaImpl(String bootstrapServers, 36 | EventuateKafkaConsumerConfigurationProperties eventuateKafkaConsumerConfigurationProperties, 37 | KafkaConsumerFactory kafkaConsumerFactory, TopicPartitionToSwimlaneMapping partitionToSwimLaneMapping) { 38 | this.bootstrapServers = bootstrapServers; 39 | this.eventuateKafkaConsumerConfigurationProperties = eventuateKafkaConsumerConfigurationProperties; 40 | this.kafkaConsumerFactory = kafkaConsumerFactory; 41 | this.partitionToSwimLaneMapping = partitionToSwimLaneMapping; 42 | } 43 | 44 | public KafkaSubscription subscribe(String subscriberId, Set channels, KafkaMessageHandler handler) { 45 | return subscribeWithReactiveHandler(subscriberId, channels, kafkaMessage -> { 46 | handler.accept(kafkaMessage); 47 | return CompletableFuture.completedFuture(null); 48 | }); 49 | } 50 | 51 | public KafkaSubscription subscribeWithReactiveHandler(String subscriberId, Set channels, ReactiveKafkaMessageHandler handler) { 52 | 53 | SwimlaneBasedDispatcher swimlaneBasedDispatcher = new SwimlaneBasedDispatcher(subscriberId, Executors.newCachedThreadPool(), partitionToSwimLaneMapping); 54 | 55 | EventuateKafkaConsumerMessageHandler kcHandler = (record, callback) -> { 56 | if (eventuateKafkaMultiMessageConverter.isMultiMessage(record.value())) { 57 | return handleBatch(record, swimlaneBasedDispatcher, callback, handler); 58 | } else { 59 | return swimlaneBasedDispatcher.dispatch(new RawKafkaMessage(record.key(), record.value()), new TopicPartition(record.topic(), record.partition()), message -> handle(message, callback, handler)); 60 | } 61 | }; 62 | 63 | EventuateKafkaConsumer kc = new EventuateKafkaConsumer(subscriberId, 64 | kcHandler, 65 | new ArrayList<>(channels), 66 | bootstrapServers, 67 | eventuateKafkaConsumerConfigurationProperties, 68 | kafkaConsumerFactory); 69 | 70 | consumers.add(kc); 71 | 72 | kc.start(); 73 | 74 | return new KafkaSubscription(() -> { 75 | kc.stop(); 76 | consumers.remove(kc); 77 | }); 78 | } 79 | 80 | private SwimlaneDispatcherBacklog handleBatch(ConsumerRecord record, 81 | SwimlaneBasedDispatcher swimlaneBasedDispatcher, 82 | BiConsumer callback, 83 | ReactiveKafkaMessageHandler handler) { 84 | return eventuateKafkaMultiMessageConverter 85 | .convertBytesToMessages(record.value()) 86 | .getMessages() 87 | .stream() 88 | .map(EventuateKafkaMultiMessage::getValue) 89 | .map(KafkaMessage::new) 90 | .map(kafkaMessage -> 91 | swimlaneBasedDispatcher.dispatch(new RawKafkaMessage(record.key(), record.value()), new TopicPartition(record.topic(), record.partition()), message -> handle(message, callback, handler))) 92 | .reduce((first, second) -> second) 93 | .get(); 94 | } 95 | 96 | private void handle(RawKafkaMessage message, BiConsumer callback, ReactiveKafkaMessageHandler kafkaMessageHandler) { 97 | try { 98 | kafkaMessageHandler 99 | .apply(new KafkaMessage(EventuateBinaryMessageEncoding.bytesToString(message.getPayload()))) 100 | .get(); 101 | } catch (RuntimeException e) { 102 | callback.accept(null, e); 103 | throw e; 104 | } catch (Throwable e) { 105 | callback.accept(null, e); 106 | throw new RuntimeException(e); 107 | } 108 | callback.accept(null, null); 109 | } 110 | 111 | @Override 112 | public void close() { 113 | consumers.forEach(EventuateKafkaConsumer::stop); 114 | } 115 | 116 | public String getId() { 117 | return id; 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /kafka/config/server.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # see kafka.server.KafkaConfig for additional details and defaults 16 | 17 | ############################# Server Basics ############################# 18 | 19 | # The id of the broker. This must be set to a unique integer for each broker. 20 | broker.id=0 21 | 22 | ############################# Socket Server Settings ############################# 23 | 24 | # The address the socket server listens on. It will get the value returned from 25 | # java.net.InetAddress.getCanonicalHostName() if not configured. 26 | # FORMAT: 27 | # listeners = security_protocol://host_name:port 28 | # EXAMPLE: 29 | # listeners = PLAINTEXT://your.host.name:9092 30 | #listeners=PLAINTEXT://:9092 31 | 32 | # Hostname and port the broker will advertise to producers and consumers. If not set, 33 | # it uses the value for "listeners" if configured. Otherwise, it will use the value 34 | # returned from java.net.InetAddress.getCanonicalHostName(). 35 | 36 | # still cached 37 | 38 | listeners=${KAFKA_LISTENERS} 39 | advertised.listeners=${KAFKA_ADVERTISED_LISTENERS} 40 | listener.security.protocol.map=${KAFKA_LISTENER_SECURITY_PROTOCOL_MAP} 41 | inter.broker.listener.name=${KAFKA_INTER_BROKER_LISTENER_NAME} 42 | offsets.topic.replication.factor=${KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR} 43 | 44 | 45 | # The number of threads handling network requests 46 | num.network.threads=3 47 | 48 | # The number of threads doing disk I/O 49 | num.io.threads=8 50 | 51 | # The send buffer (SO_SNDBUF) used by the socket server 52 | socket.send.buffer.bytes=102400 53 | 54 | # The receive buffer (SO_RCVBUF) used by the socket server 55 | socket.receive.buffer.bytes=102400 56 | 57 | # The maximum size of a request that the socket server will accept (protection against OOM) 58 | socket.request.max.bytes=104857600 59 | 60 | 61 | ############################# Log Basics ############################# 62 | 63 | # A comma seperated list of directories under which to store log files 64 | log.dirs=${KAFKA_LOG_DIRS} 65 | 66 | # The default number of log partitions per topic. More partitions allow greater 67 | # parallelism for consumption, but this will also result in more files across 68 | # the brokers. 69 | num.partitions=2 70 | 71 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. 72 | # This value is recommended to be increased for installations with data dirs located in RAID array. 73 | num.recovery.threads.per.data.dir=1 74 | 75 | ############################# Log Flush Policy ############################# 76 | 77 | # Messages are immediately written to the filesystem but by default we only fsync() to sync 78 | # the OS cache lazily. The following configurations control the flush of data to disk. 79 | # There are a few important trade-offs here: 80 | # 1. Durability: Unflushed data may be lost if you are not using replication. 81 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. 82 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. 83 | # The settings below allow one to configure the flush policy to flush data after a period of time or 84 | # every N messages (or both). This can be done globally and overridden on a per-topic basis. 85 | 86 | # The number of messages to accept before forcing a flush of data to disk 87 | #log.flush.interval.messages=10000 88 | 89 | # The maximum amount of time a message can sit in a log before we force a flush 90 | #log.flush.interval.ms=1000 91 | 92 | ############################# Log Retention Policy ############################# 93 | 94 | # The following configurations control the disposal of log segments. The policy can 95 | # be set to delete segments after a period of time, or after a given size has accumulated. 96 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens 97 | # from the end of the log. 98 | 99 | # The minimum age of a log file to be eligible for deletion 100 | log.retention.hours=168 101 | 102 | # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining 103 | # segments don't drop below log.retention.bytes. 104 | #log.retention.bytes=1073741824 105 | 106 | # The maximum size of a log segment file. When this size is reached a new log segment will be created. 107 | log.segment.bytes=1073741824 108 | 109 | # The interval at which log segments are checked to see if they can be deleted according 110 | # to the retention policies 111 | log.retention.check.interval.ms=300000 112 | 113 | ############################# Zookeeper ############################# 114 | 115 | # Zookeeper connection string (see zookeeper docs for details). 116 | # This is a comma separated host:port pairs, each corresponding to a zk 117 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". 118 | # You can also append an optional chroot string to the urls to specify the 119 | # root directory for all kafka znodes. 120 | zookeeper.connect=${KAFKA_ZOOKEEPER_CONNECT} 121 | 122 | 123 | # Timeout in ms for connecting to zookeeper 124 | zookeeper.connection.timeout.ms=6000 125 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-spring-integration-test/src/test/java/io/eventuate/messaging/kafka/spring/basic/consumer/EventuateKafkaBasicConsumerSpringTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.spring.basic.consumer; 2 | 3 | import io.eventuate.messaging.kafka.basic.consumer.AbstractEventuateKafkaBasicConsumerTest; 4 | import io.eventuate.messaging.kafka.basic.consumer.DefaultKafkaConsumerFactory; 5 | import io.eventuate.messaging.kafka.basic.consumer.EventuateKafkaConsumerConfigurationProperties; 6 | import io.eventuate.messaging.kafka.basic.consumer.KafkaConsumerFactory; 7 | import io.eventuate.messaging.kafka.common.EventuateKafkaConfigurationProperties; 8 | import io.eventuate.messaging.kafka.consumer.*; 9 | import io.eventuate.messaging.kafka.spring.common.EventuateKafkaPropertiesConfiguration; 10 | import io.eventuate.messaging.kafka.producer.EventuateKafkaProducer; 11 | import io.eventuate.messaging.kafka.producer.EventuateKafkaProducerConfigurationProperties; 12 | import io.eventuate.messaging.kafka.spring.producer.EventuateKafkaProducerSpringConfigurationPropertiesConfiguration; 13 | import io.eventuate.util.test.async.Eventually; 14 | import org.junit.jupiter.api.Test; 15 | import org.springframework.beans.factory.annotation.Autowired; 16 | import org.springframework.boot.autoconfigure.EnableAutoConfiguration; 17 | import org.springframework.boot.test.context.SpringBootTest; 18 | import org.springframework.context.annotation.Bean; 19 | import org.springframework.context.annotation.Configuration; 20 | import org.springframework.context.annotation.Import; 21 | 22 | import java.util.Collections; 23 | import java.util.concurrent.LinkedBlockingQueue; 24 | import java.util.concurrent.TimeUnit; 25 | 26 | import static org.assertj.core.api.Assertions.assertThat; 27 | import static org.junit.jupiter.api.Assertions.assertEquals; 28 | 29 | @SpringBootTest(classes = EventuateKafkaBasicConsumerSpringTest.EventuateKafkaConsumerTestConfiguration.class, 30 | properties = "eventuate.local.kafka.consumer.backPressure.high=3") 31 | public class EventuateKafkaBasicConsumerSpringTest extends AbstractEventuateKafkaBasicConsumerTest { 32 | 33 | @Configuration 34 | @EnableAutoConfiguration 35 | @Import({EventuateKafkaConsumerSpringConfigurationPropertiesConfiguration.class, 36 | EventuateKafkaProducerSpringConfigurationPropertiesConfiguration.class, 37 | EventuateKafkaPropertiesConfiguration.class}) 38 | public static class EventuateKafkaConsumerTestConfiguration { 39 | 40 | @Autowired(required=false) 41 | private TopicPartitionToSwimlaneMapping partitionToSwimLaneMapping = new OriginalTopicPartitionToSwimlaneMapping(); 42 | 43 | @Bean 44 | public EventuateKafkaProducer producer(EventuateKafkaConfigurationProperties kafkaProperties, 45 | EventuateKafkaProducerConfigurationProperties producerProperties) { 46 | return new EventuateKafkaProducer(kafkaProperties.getBootstrapServers(), producerProperties); 47 | } 48 | 49 | @Bean 50 | public MessageConsumerKafkaImpl messageConsumerKafka(EventuateKafkaConfigurationProperties props, 51 | EventuateKafkaConsumerConfigurationProperties eventuateKafkaConsumerConfigurationProperties, 52 | KafkaConsumerFactory kafkaConsumerFactory) { 53 | return new MessageConsumerKafkaImpl(props.getBootstrapServers(), eventuateKafkaConsumerConfigurationProperties, kafkaConsumerFactory, partitionToSwimLaneMapping); 54 | } 55 | 56 | @Bean 57 | public KafkaConsumerFactory kafkaConsumerFactory() { 58 | return new DefaultKafkaConsumerFactory(); 59 | } 60 | } 61 | 62 | @Autowired 63 | private EventuateKafkaConfigurationProperties kafkaProperties; 64 | 65 | @Autowired 66 | private EventuateKafkaConsumerConfigurationProperties consumerProperties; 67 | 68 | @Autowired 69 | private EventuateKafkaProducer producer; 70 | 71 | @Autowired 72 | private MessageConsumerKafkaImpl consumer; 73 | 74 | @Autowired 75 | private KafkaConsumerFactory kafkaConsumerFactory; 76 | 77 | @Test 78 | @Override 79 | public void shouldStopWhenHandlerThrowsException() { 80 | super.shouldStopWhenHandlerThrowsException(); 81 | } 82 | 83 | @Test 84 | @Override 85 | public void shouldConsumeMessages() { 86 | super.shouldConsumeMessages(); 87 | } 88 | 89 | @Test 90 | @Override 91 | public void shouldConsumeMessagesWithBackPressure() { 92 | super.shouldConsumeMessagesWithBackPressure(); 93 | } 94 | 95 | @Test 96 | @Override 97 | public void shouldConsumeBatchOfMessage() { 98 | super.shouldConsumeBatchOfMessage(); 99 | } 100 | 101 | @Override 102 | protected EventuateKafkaConfigurationProperties getKafkaProperties() { 103 | return kafkaProperties; 104 | } 105 | 106 | @Override 107 | protected EventuateKafkaConsumerConfigurationProperties getConsumerProperties() { 108 | return consumerProperties; 109 | } 110 | 111 | @Override 112 | protected EventuateKafkaProducer getProducer() { 113 | return producer; 114 | } 115 | 116 | @Override 117 | protected MessageConsumerKafkaImpl getConsumer() { 118 | return consumer; 119 | } 120 | 121 | @Override 122 | protected KafkaConsumerFactory getKafkaConsumerFactory() { 123 | return kafkaConsumerFactory; 124 | } 125 | 126 | @Test 127 | public void shouldSaveOffsets() throws InterruptedException { 128 | String subscriberId = "subscriber-" + System.currentTimeMillis(); 129 | String topic = "topic-" + System.currentTimeMillis(); 130 | LinkedBlockingQueue messages = new LinkedBlockingQueue<>(); 131 | 132 | sendMessages(topic); 133 | 134 | handler = kafkaMessage -> { 135 | try { 136 | TimeUnit.MILLISECONDS.sleep(20); 137 | } catch (InterruptedException e) { 138 | throw new RuntimeException(e); 139 | } 140 | messages.add(kafkaMessage); 141 | }; 142 | 143 | KafkaSubscription subscription = getConsumer().subscribe(subscriberId, Collections.singleton(topic), handler); 144 | 145 | Eventually.eventually(() -> 146 | assertEquals(2, messages.size())); 147 | 148 | subscription.close(); 149 | 150 | messages.clear(); 151 | 152 | subscription = getConsumer().subscribe(subscriberId, Collections.singleton(topic), handler); 153 | TimeUnit.SECONDS.sleep(10); 154 | 155 | assertThat(messages).isEmpty(); 156 | 157 | } 158 | 159 | } -------------------------------------------------------------------------------- /eventuate-messaging-kafka-integration-test/src/main/java/io/eventuate/messaging/kafka/basic/consumer/AbstractEventuateKafkaBasicConsumerTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import io.eventuate.messaging.kafka.common.EventuateKafkaConfigurationProperties; 4 | import io.eventuate.messaging.kafka.common.EventuateKafkaMultiMessage; 5 | import io.eventuate.messaging.kafka.common.EventuateKafkaMultiMessageConverter; 6 | import io.eventuate.messaging.kafka.consumer.KafkaMessage; 7 | import io.eventuate.messaging.kafka.consumer.KafkaMessageHandler; 8 | import io.eventuate.messaging.kafka.consumer.KafkaSubscription; 9 | import io.eventuate.messaging.kafka.consumer.MessageConsumerKafkaImpl; 10 | import io.eventuate.messaging.kafka.producer.EventuateKafkaProducer; 11 | import io.eventuate.util.test.async.Eventually; 12 | 13 | import java.util.Arrays; 14 | import java.util.Collections; 15 | import java.util.List; 16 | import java.util.UUID; 17 | import java.util.concurrent.CompletableFuture; 18 | import java.util.concurrent.ExecutionException; 19 | import java.util.concurrent.LinkedBlockingQueue; 20 | import java.util.concurrent.TimeUnit; 21 | import java.util.function.BiConsumer; 22 | 23 | import static org.junit.jupiter.api.Assertions.assertEquals; 24 | import static org.mockito.Mockito.*; 25 | 26 | public abstract class AbstractEventuateKafkaBasicConsumerTest { 27 | 28 | protected abstract EventuateKafkaConfigurationProperties getKafkaProperties(); 29 | 30 | protected abstract EventuateKafkaConsumerConfigurationProperties getConsumerProperties(); 31 | 32 | protected abstract EventuateKafkaProducer getProducer(); 33 | 34 | protected abstract MessageConsumerKafkaImpl getConsumer(); 35 | 36 | protected abstract KafkaConsumerFactory getKafkaConsumerFactory(); 37 | 38 | protected KafkaMessageHandler handler; 39 | 40 | public void shouldStopWhenHandlerThrowsException() { 41 | String subscriberId = "subscriber-" + System.currentTimeMillis(); 42 | String topic = "topic-" + System.currentTimeMillis(); 43 | 44 | EventuateKafkaConsumerMessageHandler handler = makeExceptionThrowingHandler(); 45 | 46 | EventuateKafkaConsumer consumer = makeConsumer(subscriberId, topic, handler); 47 | 48 | sendMessages(topic); 49 | 50 | assertConsumerStopped(consumer); 51 | 52 | assertHandlerInvokedAtLeastOnce(handler); 53 | } 54 | 55 | public void shouldConsumeMessages() { 56 | String subscriberId = "subscriber-" + System.currentTimeMillis(); 57 | String topic = "topic-" + System.currentTimeMillis(); 58 | LinkedBlockingQueue messages = new LinkedBlockingQueue<>(); 59 | 60 | for (int i = 0; i < 100; i++) 61 | sendMessages(topic); 62 | 63 | handler = kafkaMessage -> { 64 | try { 65 | TimeUnit.MILLISECONDS.sleep(20); 66 | } catch (InterruptedException e) { 67 | throw new RuntimeException(e); 68 | } 69 | messages.add(kafkaMessage); 70 | }; 71 | 72 | KafkaSubscription subscription = getConsumer().subscribe(subscriberId, Collections.singleton(topic), handler); 73 | 74 | Eventually.eventually(() -> { 75 | assertEquals(200, messages.size()); 76 | }); 77 | 78 | subscription.close(); 79 | 80 | } 81 | 82 | public void shouldConsumeMessagesWithBackPressure() { 83 | String subscriberId = "subscriber-" + System.currentTimeMillis(); 84 | String topic = "topic-" + System.currentTimeMillis(); 85 | LinkedBlockingQueue messages = new LinkedBlockingQueue<>(); 86 | 87 | for (int i = 0; i < 100; i++) 88 | sendMessages(topic); 89 | 90 | handler = kafkaMessage -> { 91 | try { 92 | TimeUnit.MILLISECONDS.sleep(20); 93 | } catch (InterruptedException e) { 94 | throw new RuntimeException(e); 95 | } 96 | messages.add(kafkaMessage); 97 | }; 98 | 99 | KafkaSubscription subscription = getConsumer().subscribe(subscriberId, Collections.singleton(topic), handler); 100 | 101 | Eventually.eventually(() -> assertEquals(200, messages.size())); 102 | 103 | subscription.close(); 104 | 105 | } 106 | 107 | 108 | public void shouldConsumeBatchOfMessage() { 109 | String subscriberId = "subscriber-" + System.currentTimeMillis(); 110 | String topic = "topic-" + System.currentTimeMillis(); 111 | 112 | List messages = Arrays.asList(new EventuateKafkaMultiMessage(null, "a"), 113 | new EventuateKafkaMultiMessage(null, "b"), new EventuateKafkaMultiMessage(null, "c")); 114 | 115 | getProducer().send(topic, null, new EventuateKafkaMultiMessageConverter().convertMessagesToBytes(messages)); 116 | 117 | handler = mock(KafkaMessageHandler.class); 118 | 119 | KafkaSubscription subscription = getConsumer().subscribe(subscriberId, Collections.singleton(topic), handler); 120 | 121 | Eventually.eventually(() -> verify(handler, times(3)).accept(any())); 122 | 123 | subscription.close(); 124 | } 125 | 126 | private EventuateKafkaConsumer makeConsumer(String subscriberId, String topic, EventuateKafkaConsumerMessageHandler handler) { 127 | EventuateKafkaConsumer consumer = new EventuateKafkaConsumer(subscriberId, 128 | handler, 129 | Collections.singletonList(topic), 130 | getKafkaProperties().getBootstrapServers(), 131 | getConsumerProperties(), 132 | getKafkaConsumerFactory()); 133 | 134 | consumer.start(); 135 | return consumer; 136 | } 137 | 138 | protected void sendMessages(String topic) { 139 | for (int i = 0 ; i < 2 ; i++) { 140 | try { 141 | getProducer().send(topic, UUID.randomUUID().toString(), "body" + i).get(); 142 | } catch (InterruptedException | ExecutionException e) { 143 | throw new RuntimeException(e); 144 | } 145 | } 146 | } 147 | 148 | private void assertHandlerInvokedAtLeastOnce(EventuateKafkaConsumerMessageHandler handler) { 149 | verify(handler, atLeast(1)).apply(any(), any()); 150 | } 151 | 152 | private EventuateKafkaConsumerMessageHandler makeExceptionThrowingHandler() { 153 | EventuateKafkaConsumerMessageHandler handler = mock(EventuateKafkaConsumerMessageHandler.class); 154 | 155 | doAnswer(invocation -> { 156 | CompletableFuture.runAsync(() -> ((BiConsumer) invocation.getArguments()[1]).accept(null, new RuntimeException("Test is simulating failure"))); 157 | return null; 158 | }).when(handler).apply(any(), any()); 159 | return handler; 160 | } 161 | 162 | private void assertConsumerStopped(EventuateKafkaConsumer consumer) { 163 | Eventually.eventually(() -> assertEquals(EventuateKafkaConsumerState.MESSAGE_HANDLING_FAILED, consumer.getState())); 164 | } 165 | 166 | } -------------------------------------------------------------------------------- /eventuate-messaging-kafka-common/src/test/java/io/eventuate/messaging/kafka/common/EventuateKafkaMultiMessageConverterTest.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.common; 2 | 3 | import io.eventuate.messaging.kafka.common.sbe.MessageHeaderEncoder; 4 | import io.eventuate.messaging.kafka.common.sbe.MultiMessageEncoder; 5 | import org.hamcrest.Matcher; 6 | import org.hamcrest.Matchers; 7 | import org.junit.jupiter.api.Assertions; 8 | import org.junit.jupiter.api.Test; 9 | 10 | import java.io.UnsupportedEncodingException; 11 | import java.nio.charset.StandardCharsets; 12 | import java.util.ArrayList; 13 | import java.util.Arrays; 14 | import java.util.Collections; 15 | import java.util.List; 16 | 17 | import static org.hamcrest.MatcherAssert.assertThat; 18 | 19 | public class EventuateKafkaMultiMessageConverterTest { 20 | 21 | private static final List HEADERS = 22 | new ArrayList<>(Arrays.asList(new EventuateKafkaMultiMessagesHeader("commonheader1key", "commonheader1value"), new EventuateKafkaMultiMessagesHeader("commonheader2key", "commonheader2value"))); 23 | 24 | private static final List TWO_BYTE_CHARACTER_MESSAGES = Collections.singletonList(new EventuateKafkaMultiMessage("ключ", "значение")); 25 | private static final List THREE_BYTE_CHARACTER_MESSAGES = Collections.singletonList(new EventuateKafkaMultiMessage("罗杰·费德勒真棒!", "德约科维奇摇滚!")); 26 | private static final EventuateKafkaMultiMessage THREE_BYTE_CHARACTER_MESSAGES_0 = THREE_BYTE_CHARACTER_MESSAGES.get(0); 27 | 28 | private static final String MESSAGE_0_KEY = "key1"; 29 | private static final String MESSAGE_0_VALUE = "value1"; 30 | private static final String MESSAGE_1_KEY = "key2"; 31 | private static final String MESSAGE_1_VALUE = "value2"; 32 | 33 | private static final List SIMPLE_MESSAGES = 34 | Arrays.asList(new EventuateKafkaMultiMessage(MESSAGE_0_KEY, MESSAGE_0_VALUE), new EventuateKafkaMultiMessage(MESSAGE_1_KEY, MESSAGE_1_VALUE)); 35 | 36 | private static final List MESSAGES_WITH_HEADERS = 37 | Arrays.asList(new EventuateKafkaMultiMessage(MESSAGE_0_KEY, MESSAGE_0_VALUE, new ArrayList<>(Arrays.asList(new EventuateKafkaMultiMessageHeader("header1key", "header1value"), new EventuateKafkaMultiMessageHeader("header2key", "header2value")))), 38 | new EventuateKafkaMultiMessage(MESSAGE_1_KEY, MESSAGE_1_VALUE, new ArrayList<>(Arrays.asList(new EventuateKafkaMultiMessageHeader("header3key", "header3value"), new EventuateKafkaMultiMessageHeader("header4key", "header4value"))))); 39 | 40 | private static final List EMPTY_MESSAGES = Collections.singletonList(new EventuateKafkaMultiMessage("", "")); 41 | 42 | private static final List NULL_MESSAGES = Collections.singletonList(new EventuateKafkaMultiMessage(null, null)); 43 | 44 | private byte[] serializedMessages; 45 | private int estimatedSize; 46 | 47 | @Test 48 | public void testMessageBuilderSimpleMessages() { 49 | testMessageBuilder(new EventuateKafkaMultiMessages(SIMPLE_MESSAGES)); 50 | } 51 | 52 | @Test 53 | public void testMessageBuilder2ByteCharacterMessages() { 54 | testMessageBuilder(new EventuateKafkaMultiMessages(TWO_BYTE_CHARACTER_MESSAGES)); 55 | assertEstimatedGreaterOrEqualToActual(); 56 | } 57 | 58 | private void assertEstimatedGreaterOrEqualToActual() { 59 | assertThat(estimatedSize, Matchers.greaterThanOrEqualTo(serializedMessages.length)); 60 | } 61 | 62 | @Test 63 | public void testMessageBuilder3ByteCharacterMessages() throws UnsupportedEncodingException { 64 | 65 | assertThat(THREE_BYTE_CHARACTER_MESSAGES_0.getKey().getBytes(StandardCharsets.UTF_8).length, Matchers.greaterThan(THREE_BYTE_CHARACTER_MESSAGES_0.getKey().length() * 2)); 66 | assertThat(THREE_BYTE_CHARACTER_MESSAGES_0.getValue().getBytes(StandardCharsets.UTF_8).length, Matchers.greaterThan(THREE_BYTE_CHARACTER_MESSAGES_0.getValue().length() * 2)); 67 | testMessageBuilder(new EventuateKafkaMultiMessages(THREE_BYTE_CHARACTER_MESSAGES)); 68 | assertEstimatedGreaterOrEqualToActual(); 69 | } 70 | 71 | @Test 72 | public void testMessageBuilderMessagesWithHeaders() { 73 | testMessageBuilder(new EventuateKafkaMultiMessages(HEADERS, MESSAGES_WITH_HEADERS)); 74 | } 75 | 76 | @Test 77 | public void testMessageBuilderNullMessages() { 78 | testMessageBuilder(new EventuateKafkaMultiMessages(NULL_MESSAGES), new EventuateKafkaMultiMessages(EMPTY_MESSAGES)); 79 | } 80 | 81 | @Test 82 | public void testMessageBuilderEmptyMessages() { 83 | testMessageBuilder(new EventuateKafkaMultiMessages(EMPTY_MESSAGES), new EventuateKafkaMultiMessages(EMPTY_MESSAGES)); 84 | } 85 | 86 | @Test 87 | public void testMessageBuilderSizeCheck() { 88 | int sizeOfHeaderAndFirstMessage = MessageHeaderEncoder.ENCODED_LENGTH + MultiMessageEncoder.MessagesEncoder.HEADER_SIZE + MultiMessageEncoder.HeadersEncoder.HEADER_SIZE 89 | + SIMPLE_MESSAGES.get(0).estimateSize(); 90 | 91 | EventuateKafkaMultiMessageConverter.MessageBuilder messageBuilder = 92 | new EventuateKafkaMultiMessageConverter.MessageBuilder(sizeOfHeaderAndFirstMessage); 93 | 94 | Assertions.assertTrue(messageBuilder.addMessage(SIMPLE_MESSAGES.get(0))); 95 | Assertions.assertFalse(messageBuilder.addMessage(SIMPLE_MESSAGES.get(1))); 96 | } 97 | 98 | @Test 99 | public void testMessageBuilderHeaderSizeCheck() { 100 | int sizeOfFirstMessage = 101 | KeyValue.KEY_HEADER_SIZE + MESSAGE_0_KEY.length() * KeyValue.ESTIMATED_BYTES_PER_CHAR + 102 | KeyValue.VALUE_HEADER_SIZE + MESSAGE_0_VALUE.length() * KeyValue.ESTIMATED_BYTES_PER_CHAR; 103 | 104 | EventuateKafkaMultiMessageConverter.MessageBuilder messageBuilder = 105 | new EventuateKafkaMultiMessageConverter.MessageBuilder(sizeOfFirstMessage); 106 | 107 | Assertions.assertFalse(messageBuilder.addMessage(SIMPLE_MESSAGES.get(0))); 108 | } 109 | 110 | @Test 111 | public void testMessageConverterSimpleMessages() { 112 | testMessageConverter(new EventuateKafkaMultiMessages(SIMPLE_MESSAGES)); 113 | } 114 | 115 | @Test 116 | public void testMessageConverterNullMessages() { 117 | testMessageConverter(new EventuateKafkaMultiMessages(NULL_MESSAGES), new EventuateKafkaMultiMessages(EMPTY_MESSAGES)); 118 | } 119 | 120 | @Test 121 | public void testMessageConverterEmptyMessages() { 122 | testMessageConverter(new EventuateKafkaMultiMessages(EMPTY_MESSAGES), new EventuateKafkaMultiMessages(EMPTY_MESSAGES)); 123 | } 124 | 125 | public void testMessageBuilder(EventuateKafkaMultiMessages messages) { 126 | testMessageBuilder(messages, messages); 127 | } 128 | 129 | public void testMessageBuilder(EventuateKafkaMultiMessages original, EventuateKafkaMultiMessages result) { 130 | EventuateKafkaMultiMessageConverter.MessageBuilder messageBuilder = new EventuateKafkaMultiMessageConverter.MessageBuilder(1000000); 131 | EventuateKafkaMultiMessageConverter eventuateMultiMessageConverter = new EventuateKafkaMultiMessageConverter(); 132 | 133 | Assertions.assertTrue(messageBuilder.setHeaders(original.getHeaders())); 134 | Assertions.assertTrue(original.getMessages().stream().allMatch(messageBuilder::addMessage)); 135 | 136 | serializedMessages = messageBuilder.toBinaryArray(); 137 | estimatedSize = EventuateKafkaMultiMessageConverter.HEADER_SIZE + original.estimateSize(); 138 | 139 | assertEstimatedGreaterOrEqualToActual(); 140 | 141 | EventuateKafkaMultiMessages deserializedMessages = eventuateMultiMessageConverter.convertBytesToMessages(serializedMessages); 142 | 143 | Assertions.assertEquals(result, deserializedMessages); 144 | } 145 | 146 | public void testMessageConverter(EventuateKafkaMultiMessages messages) { 147 | testMessageConverter(messages, messages); 148 | } 149 | 150 | public void testMessageConverter(EventuateKafkaMultiMessages original, EventuateKafkaMultiMessages result) { 151 | EventuateKafkaMultiMessageConverter eventuateMultiMessageConverter = new EventuateKafkaMultiMessageConverter(); 152 | 153 | byte[] serializedMessages = eventuateMultiMessageConverter.convertMessagesToBytes(original); 154 | 155 | EventuateKafkaMultiMessages deserializedMessages = eventuateMultiMessageConverter.convertBytesToMessages(serializedMessages); 156 | 157 | Assertions.assertEquals(result, deserializedMessages); 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /eventuate-messaging-kafka-basic-consumer/src/main/java/io/eventuate/messaging/kafka/basic/consumer/EventuateKafkaConsumer.java: -------------------------------------------------------------------------------- 1 | package io.eventuate.messaging.kafka.basic.consumer; 2 | 3 | import org.apache.kafka.clients.consumer.ConsumerRecord; 4 | import org.apache.kafka.clients.consumer.ConsumerRecords; 5 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 6 | import org.apache.kafka.common.PartitionInfo; 7 | import org.apache.kafka.common.TopicPartition; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | import java.time.Duration; 12 | import java.time.temporal.ChronoUnit; 13 | import java.util.*; 14 | import java.util.concurrent.atomic.AtomicBoolean; 15 | 16 | /** 17 | * A Kafka consumer that manually commits offsets and supports asynchronous message processing 18 | */ 19 | public class EventuateKafkaConsumer { 20 | 21 | private static Logger logger = LoggerFactory.getLogger(EventuateKafkaConsumer.class); 22 | private final String subscriberId; 23 | private final EventuateKafkaConsumerMessageHandler handler; 24 | private final List topics; 25 | private final KafkaConsumerFactory kafkaConsumerFactory; 26 | private final BackPressureConfig backPressureConfig; 27 | private final long pollTimeout; 28 | private AtomicBoolean stopFlag = new AtomicBoolean(false); 29 | private Properties consumerProperties; 30 | private volatile EventuateKafkaConsumerState state = EventuateKafkaConsumerState.CREATED; 31 | 32 | private volatile boolean closeConsumerOnStop = true; 33 | 34 | private Optional consumerCallbacks = Optional.empty(); 35 | 36 | public EventuateKafkaConsumer(String subscriberId, 37 | EventuateKafkaConsumerMessageHandler handler, 38 | List topics, 39 | String bootstrapServers, 40 | EventuateKafkaConsumerConfigurationProperties eventuateKafkaConsumerConfigurationProperties, 41 | KafkaConsumerFactory kafkaConsumerFactory) { 42 | this.subscriberId = subscriberId; 43 | this.handler = handler; 44 | this.topics = topics; 45 | this.kafkaConsumerFactory = kafkaConsumerFactory; 46 | 47 | this.consumerProperties = ConsumerPropertiesFactory.makeDefaultConsumerProperties(bootstrapServers, subscriberId); 48 | this.consumerProperties.putAll(eventuateKafkaConsumerConfigurationProperties.getProperties()); 49 | this.backPressureConfig = eventuateKafkaConsumerConfigurationProperties.getBackPressure(); 50 | this.pollTimeout = eventuateKafkaConsumerConfigurationProperties.getPollTimeout(); 51 | } 52 | 53 | public void setConsumerCallbacks(Optional consumerCallbacks) { 54 | this.consumerCallbacks = consumerCallbacks; 55 | } 56 | 57 | public boolean isCloseConsumerOnStop() { 58 | return closeConsumerOnStop; 59 | } 60 | 61 | public void setCloseConsumerOnStop(boolean closeConsumerOnStop) { 62 | this.closeConsumerOnStop = closeConsumerOnStop; 63 | } 64 | 65 | public static List verifyTopicExistsBeforeSubscribing(KafkaMessageConsumer consumer, String topic) { 66 | try { 67 | logger.debug("Verifying Topic {}", topic); 68 | List partitions = consumer.partitionsFor(topic); 69 | logger.debug("Got these partitions {} for Topic {}", partitions, topic); 70 | return partitions; 71 | } catch (Throwable e) { 72 | logger.error("Got exception: ", e); 73 | throw new RuntimeException(e); 74 | } 75 | } 76 | 77 | private void maybeCommitOffsets(KafkaMessageConsumer consumer, KafkaMessageProcessor processor) { 78 | Map offsetsToCommit = processor.offsetsToCommit(); 79 | if (!offsetsToCommit.isEmpty()) { 80 | consumerCallbacks.ifPresent(ConsumerCallbacks::onTryCommitCallback); 81 | logger.debug("Committing offsets {} {}", subscriberId, offsetsToCommit); 82 | consumer.commitOffsets(offsetsToCommit); 83 | logger.debug("Committed offsets {}", subscriberId); 84 | processor.noteOffsetsCommitted(offsetsToCommit); 85 | consumerCallbacks.ifPresent(ConsumerCallbacks::onCommitedCallback); 86 | } 87 | } 88 | 89 | public void start() { 90 | try { 91 | KafkaMessageConsumer consumer = kafkaConsumerFactory.makeConsumer(subscriberId, consumerProperties); 92 | 93 | KafkaMessageProcessor processor = new KafkaMessageProcessor(subscriberId, handler); 94 | 95 | BackPressureManager backpressureManager = new BackPressureManager(backPressureConfig); 96 | 97 | for (String topic : topics) { 98 | verifyTopicExistsBeforeSubscribing(consumer, topic); 99 | } 100 | 101 | subscribe(consumer); 102 | 103 | new Thread(() -> { 104 | 105 | try { 106 | runPollingLoop(consumer, processor, backpressureManager); 107 | 108 | maybeCommitOffsets(consumer, processor); 109 | 110 | state = EventuateKafkaConsumerState.STOPPED; 111 | 112 | if (closeConsumerOnStop) { 113 | consumer.close(); 114 | } 115 | 116 | } catch (KafkaMessageProcessorFailedException e) { 117 | // We are done 118 | logger.trace("Terminating since KafkaMessageProcessorFailedException"); 119 | state = EventuateKafkaConsumerState.MESSAGE_HANDLING_FAILED; 120 | consumer.close(Duration.of(200, ChronoUnit.MILLIS)); 121 | } catch (Throwable e) { 122 | logger.error("Got exception: ", e); 123 | state = EventuateKafkaConsumerState.FAILED; 124 | consumer.close(Duration.of(200, ChronoUnit.MILLIS)); 125 | throw new RuntimeException(e); 126 | } 127 | logger.trace("Stopped in state {}", state); 128 | 129 | }, "Eventuate-subscriber-" + subscriberId).start(); 130 | 131 | state = EventuateKafkaConsumerState.STARTED; 132 | 133 | } catch (Exception e) { 134 | logger.error("Error subscribing", e); 135 | state = EventuateKafkaConsumerState.FAILED_TO_START; 136 | throw new RuntimeException(e); 137 | } 138 | } 139 | 140 | private void subscribe(KafkaMessageConsumer consumer) { 141 | logger.debug("Subscribing to {} {}", subscriberId, topics); 142 | consumer.subscribe(topics); 143 | logger.debug("Subscribed to {} {}", subscriberId, topics); 144 | } 145 | 146 | private void runPollingLoop(KafkaMessageConsumer consumer, KafkaMessageProcessor processor, BackPressureManager backPressureManager) { 147 | while (!stopFlag.get()) { 148 | ConsumerRecords records = consumer.poll(Duration.of(100, ChronoUnit.MILLIS)); 149 | if (!records.isEmpty()) 150 | logger.debug("Got {} {} records", subscriberId, records.count()); 151 | 152 | if (records.isEmpty()) 153 | processor.throwFailureException(); 154 | else 155 | for (ConsumerRecord record : records) { 156 | logger.debug("processing record subscriberId={} tpo=({} {} {}) body={}", subscriberId, record.topic(), record.partition(), record.offset(), record.value()); 157 | if (logger.isDebugEnabled()) 158 | logger.debug("EventuateKafkaAggregateSubscriptions subscriber = %s, offset = %d, key = %s, value = %s".formatted(subscriberId, record.offset(), record.key(), record.value())); 159 | processor.process(record); 160 | } 161 | if (!records.isEmpty()) 162 | logger.debug("Processed {} {} records", subscriberId, records.count()); 163 | 164 | try { 165 | maybeCommitOffsets(consumer, processor); 166 | } catch (Exception e) { 167 | logger.error("Cannot commit offsets", e); 168 | consumerCallbacks.ifPresent(ConsumerCallbacks::onCommitFailedCallback); 169 | } 170 | 171 | if (!records.isEmpty()) 172 | logger.debug("To commit {} {}", subscriberId, processor.getPending()); 173 | 174 | int backlog = processor.backlog(); 175 | 176 | Set topicPartitions = new HashSet<>(); 177 | for (ConsumerRecord record : records) { 178 | topicPartitions.add(new TopicPartition(record.topic(), record.partition())); 179 | } 180 | BackPressureActions actions = backPressureManager.update(topicPartitions, backlog); 181 | 182 | if (!actions.pause.isEmpty()) { 183 | logger.info("Subscriber {} pausing {} due to backlog {} > {}", subscriberId, actions.pause, backlog, backPressureConfig.getHigh()); 184 | consumer.pause(actions.pause); 185 | } 186 | 187 | if (!actions.resume.isEmpty()) { 188 | logger.info("Subscriber {} resuming {} due to backlog {} <= {}", subscriberId, actions.resume, backlog, backPressureConfig.getLow()); 189 | consumer.resume(actions.resume); 190 | } 191 | 192 | 193 | } 194 | } 195 | 196 | public void stop() { 197 | stopFlag.set(true); 198 | // can't call consumer.close(), it is not thread safe, 199 | // it can produce java.util.ConcurrentModificationException: KafkaConsumer is not safe for multi-threaded access 200 | } 201 | 202 | public EventuateKafkaConsumerState getState() { 203 | return state; 204 | } 205 | } 206 | -------------------------------------------------------------------------------- /gradlew: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # 4 | # Copyright © 2015-2021 the original authors. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # https://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | # SPDX-License-Identifier: Apache-2.0 19 | # 20 | 21 | ############################################################################## 22 | # 23 | # Gradle start up script for POSIX generated by Gradle. 24 | # 25 | # Important for running: 26 | # 27 | # (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is 28 | # noncompliant, but you have some other compliant shell such as ksh or 29 | # bash, then to run this script, type that shell name before the whole 30 | # command line, like: 31 | # 32 | # ksh Gradle 33 | # 34 | # Busybox and similar reduced shells will NOT work, because this script 35 | # requires all of these POSIX shell features: 36 | # * functions; 37 | # * expansions «$var», «${var}», «${var:-default}», «${var+SET}», 38 | # «${var#prefix}», «${var%suffix}», and «$( cmd )»; 39 | # * compound commands having a testable exit status, especially «case»; 40 | # * various built-in commands including «command», «set», and «ulimit». 41 | # 42 | # Important for patching: 43 | # 44 | # (2) This script targets any POSIX shell, so it avoids extensions provided 45 | # by Bash, Ksh, etc; in particular arrays are avoided. 46 | # 47 | # The "traditional" practice of packing multiple parameters into a 48 | # space-separated string is a well documented source of bugs and security 49 | # problems, so this is (mostly) avoided, by progressively accumulating 50 | # options in "$@", and eventually passing that to Java. 51 | # 52 | # Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, 53 | # and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; 54 | # see the in-line comments for details. 55 | # 56 | # There are tweaks for specific operating systems such as AIX, CygWin, 57 | # Darwin, MinGW, and NonStop. 58 | # 59 | # (3) This script is generated from the Groovy template 60 | # https://github.com/gradle/gradle/blob/HEAD/platforms/jvm/plugins-application/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt 61 | # within the Gradle project. 62 | # 63 | # You can find Gradle at https://github.com/gradle/gradle/. 64 | # 65 | ############################################################################## 66 | 67 | # Attempt to set APP_HOME 68 | 69 | # Resolve links: $0 may be a link 70 | app_path=$0 71 | 72 | # Need this for daisy-chained symlinks. 73 | while 74 | APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path 75 | [ -h "$app_path" ] 76 | do 77 | ls=$( ls -ld "$app_path" ) 78 | link=${ls#*' -> '} 79 | case $link in #( 80 | /*) app_path=$link ;; #( 81 | *) app_path=$APP_HOME$link ;; 82 | esac 83 | done 84 | 85 | # This is normally unused 86 | # shellcheck disable=SC2034 87 | APP_BASE_NAME=${0##*/} 88 | # Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) 89 | APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s\n' "$PWD" ) || exit 90 | 91 | # Use the maximum available, or set MAX_FD != -1 to use that value. 92 | MAX_FD=maximum 93 | 94 | warn () { 95 | echo "$*" 96 | } >&2 97 | 98 | die () { 99 | echo 100 | echo "$*" 101 | echo 102 | exit 1 103 | } >&2 104 | 105 | # OS specific support (must be 'true' or 'false'). 106 | cygwin=false 107 | msys=false 108 | darwin=false 109 | nonstop=false 110 | case "$( uname )" in #( 111 | CYGWIN* ) cygwin=true ;; #( 112 | Darwin* ) darwin=true ;; #( 113 | MSYS* | MINGW* ) msys=true ;; #( 114 | NONSTOP* ) nonstop=true ;; 115 | esac 116 | 117 | CLASSPATH="\\\"\\\"" 118 | 119 | 120 | # Determine the Java command to use to start the JVM. 121 | if [ -n "$JAVA_HOME" ] ; then 122 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 123 | # IBM's JDK on AIX uses strange locations for the executables 124 | JAVACMD=$JAVA_HOME/jre/sh/java 125 | else 126 | JAVACMD=$JAVA_HOME/bin/java 127 | fi 128 | if [ ! -x "$JAVACMD" ] ; then 129 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME 130 | 131 | Please set the JAVA_HOME variable in your environment to match the 132 | location of your Java installation." 133 | fi 134 | else 135 | JAVACMD=java 136 | if ! command -v java >/dev/null 2>&1 137 | then 138 | die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 139 | 140 | Please set the JAVA_HOME variable in your environment to match the 141 | location of your Java installation." 142 | fi 143 | fi 144 | 145 | # Increase the maximum file descriptors if we can. 146 | if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then 147 | case $MAX_FD in #( 148 | max*) 149 | # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. 150 | # shellcheck disable=SC2039,SC3045 151 | MAX_FD=$( ulimit -H -n ) || 152 | warn "Could not query maximum file descriptor limit" 153 | esac 154 | case $MAX_FD in #( 155 | '' | soft) :;; #( 156 | *) 157 | # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. 158 | # shellcheck disable=SC2039,SC3045 159 | ulimit -n "$MAX_FD" || 160 | warn "Could not set maximum file descriptor limit to $MAX_FD" 161 | esac 162 | fi 163 | 164 | # Collect all arguments for the java command, stacking in reverse order: 165 | # * args from the command line 166 | # * the main class name 167 | # * -classpath 168 | # * -D...appname settings 169 | # * --module-path (only if needed) 170 | # * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. 171 | 172 | # For Cygwin or MSYS, switch paths to Windows format before running java 173 | if "$cygwin" || "$msys" ; then 174 | APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) 175 | CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) 176 | 177 | JAVACMD=$( cygpath --unix "$JAVACMD" ) 178 | 179 | # Now convert the arguments - kludge to limit ourselves to /bin/sh 180 | for arg do 181 | if 182 | case $arg in #( 183 | -*) false ;; # don't mess with options #( 184 | /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath 185 | [ -e "$t" ] ;; #( 186 | *) false ;; 187 | esac 188 | then 189 | arg=$( cygpath --path --ignore --mixed "$arg" ) 190 | fi 191 | # Roll the args list around exactly as many times as the number of 192 | # args, so each arg winds up back in the position where it started, but 193 | # possibly modified. 194 | # 195 | # NB: a `for` loop captures its iteration list before it begins, so 196 | # changing the positional parameters here affects neither the number of 197 | # iterations, nor the values presented in `arg`. 198 | shift # remove old arg 199 | set -- "$@" "$arg" # push replacement arg 200 | done 201 | fi 202 | 203 | 204 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 205 | DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' 206 | 207 | # Collect all arguments for the java command: 208 | # * DEFAULT_JVM_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, 209 | # and any embedded shellness will be escaped. 210 | # * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be 211 | # treated as '${Hostname}' itself on the command line. 212 | 213 | set -- \ 214 | "-Dorg.gradle.appname=$APP_BASE_NAME" \ 215 | -classpath "$CLASSPATH" \ 216 | -jar "$APP_HOME/gradle/wrapper/gradle-wrapper.jar" \ 217 | "$@" 218 | 219 | # Stop when "xargs" is not available. 220 | if ! command -v xargs >/dev/null 2>&1 221 | then 222 | die "xargs is not available" 223 | fi 224 | 225 | # Use "xargs" to parse quoted args. 226 | # 227 | # With -n1 it outputs one arg per line, with the quotes and backslashes removed. 228 | # 229 | # In Bash we could simply go: 230 | # 231 | # readarray ARGS < <( xargs -n1 <<<"$var" ) && 232 | # set -- "${ARGS[@]}" "$@" 233 | # 234 | # but POSIX shell has neither arrays nor command substitution, so instead we 235 | # post-process each arg (as a line of input to sed) to backslash-escape any 236 | # character that might be a shell metacharacter, then use eval to reverse 237 | # that process (while maintaining the separation between arguments), and wrap 238 | # the whole thing up as a single "set" statement. 239 | # 240 | # This will of course break if any of these variables contains a newline or 241 | # an unmatched quote. 242 | # 243 | 244 | eval "set -- $( 245 | printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | 246 | xargs -n1 | 247 | sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | 248 | tr '\n' ' ' 249 | )" '"$@"' 250 | 251 | exec "$JAVACMD" "$@" 252 | --------------------------------------------------------------------------------