├── .circleci ├── config.yml ├── publish.sh └── publishOnCommitMsg.sh ├── .gitignore ├── CONTRIBUTING.md ├── LICENSE ├── NOTICE ├── README.md ├── build.gradle ├── buildSrc ├── build.gradle └── src │ └── main │ └── groovy │ └── com │ └── linkedin │ └── gradle │ └── build │ └── DistributeTask.groovy ├── config └── checkstyle │ └── checkstyle.xml ├── gradle.properties ├── gradle └── wrapper │ ├── gradle-wrapper.jar │ └── gradle-wrapper.properties ├── gradlew ├── gradlew.bat ├── integration-tests ├── build.gradle └── src │ └── test │ ├── java │ └── com │ │ └── linkedin │ │ └── kafka │ │ └── clients │ │ ├── auditing │ │ └── ConfigureAuditorTest.java │ │ ├── consumer │ │ ├── LiKafkaConsumerIntegrationTest.java │ │ ├── LiKafkaConsumerSSLIntegrationTest.java │ │ └── LiKafkaInstrumentedConsumerIntegrationTest.java │ │ ├── largemessage │ │ └── LargeMessageIntegrationTest.java │ │ ├── producer │ │ ├── LiKafkaInstrumentedProducerIntegrationTest.java │ │ ├── LiKafkaProducerIntegrationTest.java │ │ ├── LiKafkaProducerSSLIntegrationTest.java │ │ └── RoundRobinPartitioner.java │ │ └── utils │ │ └── LiKafkaClientsTestUtils.java │ └── resources │ └── log4j.properties ├── kafka-test-harness ├── build.gradle └── src │ ├── main │ └── java │ │ └── com │ │ └── linkedin │ │ └── kafka │ │ └── clients │ │ └── utils │ │ └── tests │ │ ├── AbstractKafkaClientsIntegrationTestHarness.java │ │ ├── AbstractKafkaIntegrationTestHarness.java │ │ ├── AbstractZookeeperTestHarness.java │ │ ├── EmbeddedBroker.java │ │ ├── EmbeddedBrokerBuilder.java │ │ ├── EmbeddedZookeeper.java │ │ ├── KafkaTestUtils.java │ │ └── TestSslUtils.java │ └── test │ ├── java │ └── com │ │ └── linkedin │ │ └── kafka │ │ └── clients │ │ └── utils │ │ └── tests │ │ ├── EmbeddedBrokerTest.java │ │ ├── EmbeddedZookeeperTest.java │ │ └── KafkaIntegrationTestHarnessTest.java │ └── resources │ └── log4j.properties ├── li-apache-kafka-clients ├── build.gradle └── src │ ├── main │ └── java │ │ └── com │ │ └── linkedin │ │ └── kafka │ │ └── clients │ │ ├── annotations │ │ └── InterfaceOrigin.java │ │ ├── auditing │ │ ├── AuditType.java │ │ ├── Auditor.java │ │ ├── LoggingAuditor.java │ │ ├── NoOpAuditor.java │ │ └── abstractimpl │ │ │ ├── AbstractAuditor.java │ │ │ ├── AuditKey.java │ │ │ ├── AuditStats.java │ │ │ └── CountingAuditStats.java │ │ ├── common │ │ ├── ClusterDescriptor.java │ │ ├── ClusterGroupDescriptor.java │ │ ├── FederatedClientCommandCallback.java │ │ ├── FederatedClientCommandType.java │ │ ├── InstrumentedClientLoggingHandler.java │ │ ├── LargeMessageHeaderValue.java │ │ ├── LiKafkaCommonClientConfigs.java │ │ ├── LiKafkaFederatedClient.java │ │ ├── LiKafkaFederatedClientType.java │ │ ├── MetricsProxy.java │ │ ├── PartitionLookupResult.java │ │ └── TopicLookupResult.java │ │ ├── consumer │ │ ├── ConsumerFactory.java │ │ ├── DelegatingConsumer.java │ │ ├── LiKafkaConsumer.java │ │ ├── LiKafkaConsumerBuilder.java │ │ ├── LiKafkaConsumerConfig.java │ │ ├── LiKafkaConsumerImpl.java │ │ ├── LiKafkaConsumerRebalanceListener.java │ │ ├── LiKafkaInstrumentedConsumerImpl.java │ │ ├── LiKafkaOffsetCommitCallback.java │ │ └── LiOffsetResetStrategy.java │ │ ├── largemessage │ │ ├── ConsumerRecordsProcessResult.java │ │ ├── ConsumerRecordsProcessor.java │ │ ├── DefaultSegmentDeserializer.java │ │ ├── DefaultSegmentSerializer.java │ │ ├── DeliveredMessageOffsetTracker.java │ │ ├── LargeMessage.java │ │ ├── LargeMessageBufferPool.java │ │ ├── LargeMessageCallback.java │ │ ├── LargeMessageOffsetTracker.java │ │ ├── LargeMessageSegment.java │ │ ├── MessageAssembler.java │ │ ├── MessageAssemblerImpl.java │ │ ├── MessageSplitter.java │ │ ├── MessageSplitterImpl.java │ │ └── errors │ │ │ ├── ConsumerRecordsProcessingException.java │ │ │ ├── InvalidSegmentException.java │ │ │ ├── LargeMessageDroppedException.java │ │ │ ├── LargeMessageException.java │ │ │ ├── LargeMessageSendException.java │ │ │ ├── OffsetNotTrackedException.java │ │ │ ├── RecordProcessingException.java │ │ │ └── SkippableException.java │ │ ├── producer │ │ ├── DelegatingProducer.java │ │ ├── LiKafkaInstrumentedProducerImpl.java │ │ ├── LiKafkaProducer.java │ │ ├── LiKafkaProducerBuilder.java │ │ ├── LiKafkaProducerConfig.java │ │ ├── LiKafkaProducerImpl.java │ │ ├── ProducerFactory.java │ │ └── UUIDFactory.java │ │ └── utils │ │ ├── CloseableLock.java │ │ ├── CompositeCollection.java │ │ ├── CompositeIterator.java │ │ ├── CompositeMap.java │ │ ├── CompositeSet.java │ │ ├── Constants.java │ │ ├── KafkaConsumerLock.java │ │ ├── LiKafkaClientsUtils.java │ │ ├── PrimitiveEncoderDecoder.java │ │ └── QueuedMap.java │ └── test │ ├── java │ └── com │ │ └── linkedin │ │ └── kafka │ │ └── clients │ │ ├── auditing │ │ └── abstractimpl │ │ │ ├── AbstractAuditorTest.java │ │ │ └── CountingAuditStatsTest.java │ │ ├── consumer │ │ └── MockLiKafkaConsumer.java │ │ ├── largemessage │ │ ├── ConsumerRecordsProcessorTest.java │ │ ├── DefaultSegmentDeserializerTest.java │ │ ├── LageMessageCallbackTest.java │ │ ├── LargeMessageBufferPoolTest.java │ │ ├── LargeMessageTest.java │ │ ├── MessageAssemblerTest.java │ │ ├── MessageSplitterTest.java │ │ └── SerializerDeserializerTest.java │ │ ├── producer │ │ └── MockLiKafkaProducer.java │ │ └── utils │ │ ├── CompositeMapTest.java │ │ ├── LiKafkaClientsTestUtils.java │ │ ├── LiKafkaClientsUtilsTest.java │ │ ├── PrimitiveEncoderDecoderTest.java │ │ └── QueuedMapTest.java │ └── resources │ └── log4j.properties ├── semantic-build-versioning.gradle └── settings.gradle /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | jobs: 4 | 5 | build: 6 | working_directory: ~/workspace 7 | docker: 8 | - image: circleci/openjdk:8-jdk 9 | steps: 10 | - checkout 11 | - restore_cache: 12 | key: dependency-cache-{{ checksum "build.gradle" }} 13 | - run: 14 | command: ./gradlew clean build 15 | - save_cache: 16 | key: dependency-cache-{{ checksum "build.gradle" }} 17 | paths: 18 | - ~/.gradle 19 | - run: 20 | command: mkdir ~/test-results 21 | - run: 22 | command: find ~/workspace -type f -regex ".*/test-results/.*xml" -exec ln {} ~/test-results/ \; 23 | - store_test_results: 24 | path: ~/test-results 25 | - store_artifacts: 26 | path: ~/test-results 27 | - store_artifacts: 28 | path: build/libs 29 | 30 | publish: 31 | working_directory: ~/workspace 32 | docker: 33 | - image: circleci/openjdk:8-jdk 34 | steps: 35 | - checkout 36 | - restore_cache: 37 | key: dependency-cache-{{ checksum "build.gradle" }} 38 | - run: 39 | command: ./gradlew distributeBuild 40 | 41 | workflows: 42 | version: 2 43 | build-and-publish: 44 | jobs: 45 | - build: 46 | filters: 47 | tags: 48 | only: /.*/ 49 | - publish: 50 | requires: 51 | - build 52 | filters: 53 | branches: 54 | ignore: /.*/ 55 | tags: 56 | only: /^[0-9]+\.[0-9]+\.[0-9]+$/ 57 | -------------------------------------------------------------------------------- /.circleci/publish.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | currentTag=`git describe --tags` 6 | buildVersion=`grep -oP '^[\s]*version[\s]*=[\s]*\K([^\s]*)(?=([\s]*))' gradle.properties` 7 | 8 | echo current tag is $currentTag, build version is $buildVersion 9 | 10 | if [ "x$currentTag" != "x$buildVersion" ]; then 11 | echo "current tag version does not match project version" 12 | exit 1 13 | fi 14 | 15 | echo "Publishing a release" 16 | ./gradlew distributeBuild 17 | 18 | -------------------------------------------------------------------------------- /.circleci/publishOnCommitMsg.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if git log -1 --pretty=%B | grep "^RELEASE.*"; 4 | then 5 | echo "Publishing a release on commit msg" 6 | ./gradlew distributeBuild 7 | else 8 | echo "Not a release by commit msg" 9 | fi 10 | 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | #gradle 2 | .gradle/ 3 | build/ 4 | .mario/ 5 | 6 | #idea 7 | .idea/ 8 | *.iml 9 | *.ipr 10 | *.iws 11 | out/ 12 | 13 | #eclipse 14 | .project 15 | .settings 16 | .classpath 17 | 18 | #vi 19 | *~ 20 | 21 | #osx 22 | .DS_Store 23 | 24 | #mario 25 | .mario/ -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | Contribution Agreement 2 | ====================== 3 | 4 | As a contributor, you represent that the code you submit is your 5 | original work or that of your employer (in which case you represent you 6 | have the right to bind your employer). By submitting code, you (and, if 7 | applicable, your employer) are licensing the submitted code to LinkedIn 8 | and the open source community subject to the BSD 2-Clause license. 9 | 10 | Responsible Disclosure of Security Vulnerabilities 11 | ================================================== 12 | 13 | Please do not file reports on Github for security issues. 14 | Please review the guidelines at https://www.linkedin.com/help/linkedin/answer/62924/security-vulnerabilities?lang=en 15 | 16 | Tips for Getting Your Pull Request Accepted 17 | =========================================== 18 | 19 | 1. Make sure all new features are tested and the tests pass. 20 | 2. Bug fixes must include a test case demonstrating the error that it fixes. 21 | 3. Open an issue first and seek advice for your change before submitting 22 | a pull request. Large features which have never been discussed are 23 | unlikely to be accepted. 24 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 2-CLAUSE LICENSE 2 | 3 | Copyright 2017 LinkedIn Corporation. 4 | All Rights Reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 17 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 20 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright 2017 LinkedIn Corporation 2 | All Rights Reserved. 3 | 4 | Licensed under the BSD 2-Clause License (the "License"). 5 | See LICENSE in the project root for license information. 6 | 7 | This product includes software developed by The Apache Software 8 | Foundation (http://www.apache.org/). 9 | 10 | This product includes/uses Jackson (http://jackson.codehaus.org/) 11 | Copyright (c) 2007- Tatu Saloranta, tatu.saloranta@iki.fi 12 | License: Apache 2.0 13 | 14 | This product includes/uses SLF4J (http://slf4j.org) 15 | Copyright (c) 2004 QOS.ch 16 | License: MIT 17 | 18 | This product includes/uses TestNG (http://testng.org/) 19 | Copyright (c) 2004 Cedric Beust 20 | License: Apache 2.0 21 | 22 | This product includes/uses BouncyCastle (http://www.bouncycastle.org/) 23 | Copyright (c) 2000 - 2016 The Legion of the Bouncy Castle Inc. 24 | License: MIT 25 | 26 | This product includes/uses Scala (http://http://www.scala-lang.org/) 27 | Copyright (c) 2002-2016 EPFL 28 | Copyright (c) 2011-2016 Lightbend, Inc. (formerly Typesafe, Inc.) 29 | All rights reserved. 30 | License: BSD -------------------------------------------------------------------------------- /buildSrc/build.gradle: -------------------------------------------------------------------------------- 1 | plugins { 2 | id "java-gradle-plugin" 3 | id "groovy" 4 | id "idea" 5 | } 6 | 7 | repositories { 8 | mavenLocal() 9 | jcenter() 10 | } 11 | 12 | dependencies { 13 | compile gradleApi() 14 | compile localGroovy() 15 | 16 | compile 'org.ajoberstar:gradle-git:1.2.0' 17 | compile group: 'org.apache.httpcomponents', name: 'fluent-hc', version: '4.5.2' 18 | compile('org.jfrog.buildinfo:build-info-extractor-gradle:4.8.1') { 19 | exclude module: 'groovy-all' 20 | } 21 | } -------------------------------------------------------------------------------- /buildSrc/src/main/groovy/com/linkedin/gradle/build/DistributeTask.groovy: -------------------------------------------------------------------------------- 1 | package com.linkedin.gradle.build 2 | 3 | import groovy.json.JsonBuilder 4 | import org.apache.http.client.fluent.Request 5 | import org.apache.http.entity.ContentType 6 | import org.gradle.api.DefaultTask 7 | import org.gradle.api.tasks.TaskAction 8 | import org.jfrog.gradle.plugin.artifactory.dsl.ArtifactoryPluginConvention 9 | 10 | class DistributeTask extends DefaultTask { 11 | 12 | @TaskAction 13 | public void distributeBuild() { 14 | ArtifactoryPluginConvention convention = project.convention.plugins.artifactory 15 | def buildNumber = convention.clientConfig.info.buildNumber 16 | def buildName = convention.clientConfig.info.buildName 17 | def context = convention.clientConfig.publisher.contextUrl 18 | def password = convention.clientConfig.publisher.password 19 | 20 | if (password == null || password.equals("")) { 21 | throw new IllegalArgumentException("password not set") 22 | } 23 | 24 | def body = [ 25 | "publish" : "true", 26 | "overrideExistingFiles": "false", 27 | "async" : "true", 28 | "targetRepo" : "maven", 29 | "sourceRepos" : ["li-apache-kafka-clients"], 30 | "dryRun" : "false" 31 | ] 32 | 33 | def bodyString = new JsonBuilder(body).toString() 34 | 35 | def url = "$context/api/build/distribute/$buildName/$buildNumber" 36 | logger.lifecycle("url {}", url) 37 | def response = Request.Post(url) 38 | .bodyString(bodyString, ContentType.APPLICATION_JSON) 39 | .addHeader("X-JFrog-Art-Api", password) 40 | .execute() 41 | .returnResponse() 42 | 43 | def bout = new ByteArrayOutputStream() 44 | response.getEntity().writeTo(bout); 45 | String errMsg = new String(bout.toByteArray()); 46 | logger.lifecycle("Distribute Response: {} {}", response, errMsg) 47 | 48 | if (!Integer.toString(response.getStatusLine().getStatusCode()).startsWith("2")) { 49 | throw new IOException("http post failed") 50 | } 51 | } 52 | } -------------------------------------------------------------------------------- /gradle.properties: -------------------------------------------------------------------------------- 1 | org.gradle.daemon=false 2 | org.gradle.parallel=false 3 | org.gradle.jvmargs=-Xms512m -Xmx512m 4 | systemProp.sonar.host.url=https://sonarcloud.io -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linkedin/li-apache-kafka-clients/b252f7a8fa7494773a9c6e88ab3545529fabbe28/gradle/wrapper/gradle-wrapper.jar -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionBase=GRADLE_USER_HOME 2 | distributionPath=wrapper/dists 3 | zipStoreBase=GRADLE_USER_HOME 4 | zipStorePath=wrapper/dists 5 | distributionUrl=https\://services.gradle.org/distributions/gradle-5.2.1-all.zip 6 | -------------------------------------------------------------------------------- /gradlew: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ############################################################################## 4 | ## 5 | ## Gradle start up script for UN*X 6 | ## 7 | ############################################################################## 8 | 9 | # Attempt to set APP_HOME 10 | # Resolve links: $0 may be a link 11 | PRG="$0" 12 | # Need this for relative symlinks. 13 | while [ -h "$PRG" ] ; do 14 | ls=`ls -ld "$PRG"` 15 | link=`expr "$ls" : '.*-> \(.*\)$'` 16 | if expr "$link" : '/.*' > /dev/null; then 17 | PRG="$link" 18 | else 19 | PRG=`dirname "$PRG"`"/$link" 20 | fi 21 | done 22 | SAVED="`pwd`" 23 | cd "`dirname \"$PRG\"`/" >/dev/null 24 | APP_HOME="`pwd -P`" 25 | cd "$SAVED" >/dev/null 26 | 27 | APP_NAME="Gradle" 28 | APP_BASE_NAME=`basename "$0"` 29 | 30 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 31 | DEFAULT_JVM_OPTS="" 32 | 33 | # Use the maximum available, or set MAX_FD != -1 to use that value. 34 | MAX_FD="maximum" 35 | 36 | warn () { 37 | echo "$*" 38 | } 39 | 40 | die () { 41 | echo 42 | echo "$*" 43 | echo 44 | exit 1 45 | } 46 | 47 | # OS specific support (must be 'true' or 'false'). 48 | cygwin=false 49 | msys=false 50 | darwin=false 51 | nonstop=false 52 | case "`uname`" in 53 | CYGWIN* ) 54 | cygwin=true 55 | ;; 56 | Darwin* ) 57 | darwin=true 58 | ;; 59 | MINGW* ) 60 | msys=true 61 | ;; 62 | NONSTOP* ) 63 | nonstop=true 64 | ;; 65 | esac 66 | 67 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar 68 | 69 | # Determine the Java command to use to start the JVM. 70 | if [ -n "$JAVA_HOME" ] ; then 71 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 72 | # IBM's JDK on AIX uses strange locations for the executables 73 | JAVACMD="$JAVA_HOME/jre/sh/java" 74 | else 75 | JAVACMD="$JAVA_HOME/bin/java" 76 | fi 77 | if [ ! -x "$JAVACMD" ] ; then 78 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME 79 | 80 | Please set the JAVA_HOME variable in your environment to match the 81 | location of your Java installation." 82 | fi 83 | else 84 | JAVACMD="java" 85 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 86 | 87 | Please set the JAVA_HOME variable in your environment to match the 88 | location of your Java installation." 89 | fi 90 | 91 | # Increase the maximum file descriptors if we can. 92 | if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then 93 | MAX_FD_LIMIT=`ulimit -H -n` 94 | if [ $? -eq 0 ] ; then 95 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then 96 | MAX_FD="$MAX_FD_LIMIT" 97 | fi 98 | ulimit -n $MAX_FD 99 | if [ $? -ne 0 ] ; then 100 | warn "Could not set maximum file descriptor limit: $MAX_FD" 101 | fi 102 | else 103 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" 104 | fi 105 | fi 106 | 107 | # For Darwin, add options to specify how the application appears in the dock 108 | if $darwin; then 109 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" 110 | fi 111 | 112 | # For Cygwin, switch paths to Windows format before running java 113 | if $cygwin ; then 114 | APP_HOME=`cygpath --path --mixed "$APP_HOME"` 115 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` 116 | JAVACMD=`cygpath --unix "$JAVACMD"` 117 | 118 | # We build the pattern for arguments to be converted via cygpath 119 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` 120 | SEP="" 121 | for dir in $ROOTDIRSRAW ; do 122 | ROOTDIRS="$ROOTDIRS$SEP$dir" 123 | SEP="|" 124 | done 125 | OURCYGPATTERN="(^($ROOTDIRS))" 126 | # Add a user-defined pattern to the cygpath arguments 127 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then 128 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" 129 | fi 130 | # Now convert the arguments - kludge to limit ourselves to /bin/sh 131 | i=0 132 | for arg in "$@" ; do 133 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` 134 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option 135 | 136 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition 137 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` 138 | else 139 | eval `echo args$i`="\"$arg\"" 140 | fi 141 | i=$((i+1)) 142 | done 143 | case $i in 144 | (0) set -- ;; 145 | (1) set -- "$args0" ;; 146 | (2) set -- "$args0" "$args1" ;; 147 | (3) set -- "$args0" "$args1" "$args2" ;; 148 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;; 149 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; 150 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; 151 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; 152 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; 153 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; 154 | esac 155 | fi 156 | 157 | # Escape application args 158 | save () { 159 | for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done 160 | echo " " 161 | } 162 | APP_ARGS=$(save "$@") 163 | 164 | # Collect all arguments for the java command, following the shell quoting and substitution rules 165 | eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" 166 | 167 | # by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong 168 | if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then 169 | cd "$(dirname "$0")" 170 | fi 171 | 172 | exec "$JAVACMD" "$@" 173 | -------------------------------------------------------------------------------- /gradlew.bat: -------------------------------------------------------------------------------- 1 | @if "%DEBUG%" == "" @echo off 2 | @rem ########################################################################## 3 | @rem 4 | @rem Gradle startup script for Windows 5 | @rem 6 | @rem ########################################################################## 7 | 8 | @rem Set local scope for the variables with windows NT shell 9 | if "%OS%"=="Windows_NT" setlocal 10 | 11 | set DIRNAME=%~dp0 12 | if "%DIRNAME%" == "" set DIRNAME=. 13 | set APP_BASE_NAME=%~n0 14 | set APP_HOME=%DIRNAME% 15 | 16 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 17 | set DEFAULT_JVM_OPTS= 18 | 19 | @rem Find java.exe 20 | if defined JAVA_HOME goto findJavaFromJavaHome 21 | 22 | set JAVA_EXE=java.exe 23 | %JAVA_EXE% -version >NUL 2>&1 24 | if "%ERRORLEVEL%" == "0" goto init 25 | 26 | echo. 27 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 28 | echo. 29 | echo Please set the JAVA_HOME variable in your environment to match the 30 | echo location of your Java installation. 31 | 32 | goto fail 33 | 34 | :findJavaFromJavaHome 35 | set JAVA_HOME=%JAVA_HOME:"=% 36 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe 37 | 38 | if exist "%JAVA_EXE%" goto init 39 | 40 | echo. 41 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 42 | echo. 43 | echo Please set the JAVA_HOME variable in your environment to match the 44 | echo location of your Java installation. 45 | 46 | goto fail 47 | 48 | :init 49 | @rem Get command-line arguments, handling Windows variants 50 | 51 | if not "%OS%" == "Windows_NT" goto win9xME_args 52 | 53 | :win9xME_args 54 | @rem Slurp the command line arguments. 55 | set CMD_LINE_ARGS= 56 | set _SKIP=2 57 | 58 | :win9xME_args_slurp 59 | if "x%~1" == "x" goto execute 60 | 61 | set CMD_LINE_ARGS=%* 62 | 63 | :execute 64 | @rem Setup the command line 65 | 66 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar 67 | 68 | @rem Execute Gradle 69 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% 70 | 71 | :end 72 | @rem End local scope for the variables with windows NT shell 73 | if "%ERRORLEVEL%"=="0" goto mainEnd 74 | 75 | :fail 76 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of 77 | rem the _cmd.exe /c_ return code! 78 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 79 | exit /b 1 80 | 81 | :mainEnd 82 | if "%OS%"=="Windows_NT" endlocal 83 | 84 | :omega 85 | -------------------------------------------------------------------------------- /integration-tests/build.gradle: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. 3 | * Licensed under the BSD 2-Clause License (the "License").
 4 | * See License in the project root for license information. 5 | */ 6 | plugins { 7 | id "java" 8 | id "idea" 9 | } 10 | 11 | dependencies { 12 | testCompile (project(":kafka-test-harness")) { 13 | exclude group: 'org.slf4j', module:'slf4j-log4j12' 14 | } 15 | testCompile ("com.linkedin.mario:mario-vertx:${rootProject.ext.marioVersion}") { 16 | exclude group: "com.linkedin.northguard" 17 | } 18 | // testCompile ("com.linkedin.mario:mario-integration-tests:${rootProject.ext.marioVersion}") { 19 | // exclude group: 'org.apache.kafka' 20 | // } 21 | testCompile "commons-lang:commons-lang:2.6" 22 | } -------------------------------------------------------------------------------- /integration-tests/src/test/java/com/linkedin/kafka/clients/auditing/ConfigureAuditorTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.auditing; 6 | 7 | import com.linkedin.kafka.clients.consumer.LiKafkaConsumer; 8 | import com.linkedin.kafka.clients.consumer.LiKafkaConsumerImpl; 9 | import com.linkedin.kafka.clients.producer.LiKafkaProducer; 10 | import com.linkedin.kafka.clients.producer.LiKafkaProducerConfig; 11 | import com.linkedin.kafka.clients.producer.LiKafkaProducerImpl; 12 | import com.linkedin.kafka.clients.utils.tests.AbstractKafkaClientsIntegrationTestHarness; 13 | import java.util.concurrent.TimeUnit; 14 | import org.testng.annotations.BeforeMethod; 15 | import org.testng.annotations.Test; 16 | 17 | import java.util.Map; 18 | import java.util.Properties; 19 | 20 | import static org.testng.AssertJUnit.assertEquals; 21 | 22 | public class ConfigureAuditorTest extends AbstractKafkaClientsIntegrationTestHarness { 23 | @BeforeMethod 24 | @Override 25 | public void setUp() { 26 | super.setUp(); 27 | } 28 | 29 | @Test 30 | public void testProducerConfigure() { 31 | Properties props = new Properties(); 32 | props.setProperty(LiKafkaProducerConfig.AUDITOR_CLASS_CONFIG, TestAuditor.class.getName()); 33 | 34 | LiKafkaProducer producerConfig = createProducer(props); 35 | assertEquals(1, TestAuditor.configureMethodInvocations.get().intValue()); 36 | producerConfig.close(); 37 | 38 | final TestAuditor producerAuditor = new TestAuditor(); 39 | LiKafkaProducer producerInstance = new LiKafkaProducerImpl(getProducerProperties(new Properties()), 40 | null, null, null, producerAuditor); 41 | assertEquals(1, TestAuditor.configureMethodInvocations.get().intValue()); 42 | producerInstance.close(); 43 | } 44 | 45 | @Test 46 | public void testConsumerConfigure() { 47 | Properties props = new Properties(); 48 | props.setProperty(LiKafkaProducerConfig.AUDITOR_CLASS_CONFIG, TestAuditor.class.getName()); 49 | 50 | LiKafkaConsumer producerConfig = createConsumer(props); 51 | assertEquals(1, TestAuditor.configureMethodInvocations.get().intValue()); 52 | producerConfig.close(); 53 | 54 | final TestAuditor consumerAuditor = new TestAuditor(); 55 | LiKafkaConsumerImpl producerInstance = new LiKafkaConsumerImpl(getConsumerProperties(new Properties()), 56 | null, null, null, consumerAuditor); 57 | assertEquals(1, TestAuditor.configureMethodInvocations.get().intValue()); 58 | producerInstance.close(); 59 | } 60 | 61 | public static class TestAuditor extends NoOpAuditor { 62 | private static ThreadLocal configureMethodInvocations = ThreadLocal.withInitial(() -> 0); 63 | 64 | @Override 65 | public void configure(Map configs) { 66 | configureMethodInvocations.set(configureMethodInvocations.get() + 1); 67 | } 68 | 69 | @Override 70 | public void close() { 71 | configureMethodInvocations.set(0); 72 | } 73 | 74 | @Override 75 | public void close(long timeout, TimeUnit unit) { 76 | configureMethodInvocations.set(0); 77 | } 78 | 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /integration-tests/src/test/java/com/linkedin/kafka/clients/consumer/LiKafkaConsumerSSLIntegrationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.consumer; 6 | 7 | import java.io.File; 8 | import java.io.IOException; 9 | import org.apache.kafka.common.security.auth.SecurityProtocol; 10 | import org.testng.Assert; 11 | import org.testng.annotations.Test; 12 | 13 | 14 | @Test 15 | public class LiKafkaConsumerSSLIntegrationTest extends LiKafkaConsumerIntegrationTest { 16 | 17 | private File _trustStoreFile; 18 | 19 | public LiKafkaConsumerSSLIntegrationTest() { 20 | super(); 21 | try { 22 | _trustStoreFile = File.createTempFile("truststore", ".jks"); 23 | } catch (IOException e) { 24 | Assert.fail("Failed to create trust store"); 25 | } 26 | } 27 | 28 | @Override 29 | public File trustStoreFile() { 30 | return _trustStoreFile; 31 | } 32 | 33 | @Override 34 | public SecurityProtocol securityProtocol() { 35 | return SecurityProtocol.SSL; 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /integration-tests/src/test/java/com/linkedin/kafka/clients/producer/LiKafkaProducerSSLIntegrationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.producer; 6 | 7 | import java.io.File; 8 | import java.io.IOException; 9 | import org.apache.kafka.common.security.auth.SecurityProtocol; 10 | import org.testng.Assert; 11 | 12 | 13 | public class LiKafkaProducerSSLIntegrationTest extends LiKafkaProducerIntegrationTest { 14 | 15 | private File _trustStoreFile; 16 | 17 | public LiKafkaProducerSSLIntegrationTest() { 18 | super(); 19 | try { 20 | _trustStoreFile = File.createTempFile("truststore", ".jks"); 21 | } catch (IOException e) { 22 | Assert.fail("Failed to create trust store"); 23 | } 24 | } 25 | 26 | @Override 27 | public File trustStoreFile() { 28 | return _trustStoreFile; 29 | } 30 | 31 | @Override 32 | public SecurityProtocol securityProtocol() { 33 | return SecurityProtocol.SSL; 34 | } 35 | 36 | } 37 | -------------------------------------------------------------------------------- /integration-tests/src/test/java/com/linkedin/kafka/clients/producer/RoundRobinPartitioner.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.producer; 6 | 7 | import java.util.List; 8 | import java.util.Map; 9 | import org.apache.kafka.clients.producer.Partitioner; 10 | import org.apache.kafka.common.Cluster; 11 | import org.apache.kafka.common.PartitionInfo; 12 | 13 | 14 | /** 15 | * a partitioner that sends every msg to a different partition, round-robin style 16 | */ 17 | public class RoundRobinPartitioner implements Partitioner { 18 | private int counter = 0; 19 | 20 | @Override 21 | public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { 22 | List partitions = cluster.partitionsForTopic(topic); 23 | int numPartitions = partitions.size(); 24 | int p = counter % numPartitions; 25 | counter++; 26 | return partitions.get(p).partition(); 27 | } 28 | 29 | @Override 30 | public void close() { 31 | 32 | } 33 | 34 | @Override 35 | public void configure(Map configs) { 36 | 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /integration-tests/src/test/java/com/linkedin/kafka/clients/utils/LiKafkaClientsTestUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils; 6 | 7 | import com.linkedin.kafka.clients.largemessage.LargeMessageSegment; 8 | 9 | import java.nio.ByteBuffer; 10 | import java.util.Arrays; 11 | import java.util.Random; 12 | import java.util.UUID; 13 | 14 | import static org.testng.Assert.assertEquals; 15 | 16 | /** 17 | * The util class for unit test. 18 | */ 19 | public class LiKafkaClientsTestUtils { 20 | 21 | private LiKafkaClientsTestUtils() { 22 | } 23 | 24 | public static LargeMessageSegment createLargeMessageSegment(UUID messageId, 25 | int seq, 26 | int numberOfSegments, 27 | int messageSizeInBytes, 28 | int segmentSize) { 29 | byte[] bytes = new byte[segmentSize]; 30 | Arrays.fill(bytes, (byte) seq); 31 | return new LargeMessageSegment(messageId, seq, numberOfSegments, messageSizeInBytes, ByteBuffer.wrap(bytes)); 32 | } 33 | 34 | public static void verifyMessage(byte[] serializedMessage, int messageSizeInBytes, int segmentSize) { 35 | int i = 0; 36 | for (; i < messageSizeInBytes / segmentSize; i++) { 37 | for (int j = 0; j < segmentSize; j++) { 38 | assertEquals(serializedMessage[i * segmentSize + j], (byte) i, "Byte value should match seq."); 39 | } 40 | } 41 | for (int j = 0; j < messageSizeInBytes % segmentSize; j++) { 42 | assertEquals(serializedMessage[i * segmentSize + j], (byte) i, "Byte value should match seq."); 43 | } 44 | } 45 | 46 | public static String getRandomString(int length) { 47 | char[] chars = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; 48 | Random random = new Random(); 49 | StringBuilder stringBuiler = new StringBuilder(); 50 | for (int i = 0; i < length; i++) { 51 | stringBuiler.append(chars[Math.abs(random.nextInt()) % 16]); 52 | } 53 | return stringBuiler.toString(); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /integration-tests/src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | # 4 | 5 | log4j.rootLogger=INFO, stdout 6 | 7 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 8 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 9 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n 10 | 11 | log4j.logger.com.linkedin.kafka.clients=ERROR 12 | log4j.logger.org.apache.kafka=ERROR 13 | 14 | # zkclient can be verbose, during debugging it is common to adjust is separately 15 | log4j.logger.org.I0Itec.zkclient.ZkClient=WARN 16 | log4j.logger.org.apache.zookeeper=WARN 17 | -------------------------------------------------------------------------------- /kafka-test-harness/build.gradle: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. 3 | * Licensed under the BSD 2-Clause License (the "License").
 4 | * See License in the project root for license information. 5 | */ 6 | plugins { 7 | id "java-library" 8 | id "maven-publish" 9 | id "com.jfrog.artifactory" 10 | id "idea" 11 | } 12 | 13 | dependencies { 14 | compile project(':li-apache-kafka-clients') 15 | compile "com.linkedin.kafka:kafka-clients:${rootProject.ext.liKafkaVersion}" 16 | //matches kafka server dependency 17 | compile ("org.apache.zookeeper:zookeeper:3.4.9") { 18 | exclude group: 'org.slf4j', module:'slf4j-log4j12' 19 | } 20 | compile 'com.101tec:zkclient:0.10' 21 | compile 'commons-io:commons-io:2.6' 22 | compile 'org.bouncycastle:bcpkix-jdk15on:1.59' 23 | runtime "com.linkedin.kafka:kafka_2.11:${rootProject.ext.liKafkaVersion}" 24 | } 25 | 26 | publishing { 27 | publications { 28 | java(MavenPublication) { 29 | from components.java 30 | artifact sourcesJar 31 | artifact javadocJar 32 | pom.withXml { 33 | def root = asNode() 34 | root.appendNode('name', 'kafka-test-harness') 35 | root.appendNode('description', 'a test harness for kafka-related integration tests') 36 | root.children().last() + rootProject.ext.pomConfig 37 | } 38 | } 39 | } 40 | } 41 | 42 | artifactoryPublish.dependsOn assemble 43 | artifactoryPublish.dependsOn publishToMavenLocal 44 | -------------------------------------------------------------------------------- /kafka-test-harness/src/main/java/com/linkedin/kafka/clients/utils/tests/AbstractKafkaClientsIntegrationTestHarness.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils.tests; 6 | 7 | import com.linkedin.kafka.clients.consumer.LiKafkaConsumer; 8 | import com.linkedin.kafka.clients.consumer.LiKafkaConsumerConfig; 9 | import com.linkedin.kafka.clients.consumer.LiKafkaConsumerImpl; 10 | import com.linkedin.kafka.clients.largemessage.DefaultSegmentDeserializer; 11 | import com.linkedin.kafka.clients.largemessage.DefaultSegmentSerializer; 12 | import com.linkedin.kafka.clients.producer.LiKafkaProducer; 13 | import com.linkedin.kafka.clients.producer.LiKafkaProducerConfig; 14 | import com.linkedin.kafka.clients.producer.LiKafkaProducerImpl; 15 | import java.io.File; 16 | import java.util.Properties; 17 | import org.apache.kafka.clients.CommonClientConfigs; 18 | import org.apache.kafka.clients.admin.AdminClient; 19 | import org.apache.kafka.clients.admin.AdminClientConfig; 20 | import org.apache.kafka.clients.consumer.Consumer; 21 | import org.apache.kafka.clients.consumer.ConsumerConfig; 22 | import org.apache.kafka.clients.consumer.KafkaConsumer; 23 | import org.apache.kafka.clients.producer.KafkaProducer; 24 | import org.apache.kafka.clients.producer.Producer; 25 | import org.apache.kafka.clients.producer.ProducerConfig; 26 | import org.apache.kafka.common.network.Mode; 27 | import org.apache.kafka.common.security.auth.SecurityProtocol; 28 | import org.apache.kafka.common.serialization.ByteArrayDeserializer; 29 | import org.apache.kafka.common.serialization.ByteArraySerializer; 30 | import org.apache.kafka.common.serialization.StringDeserializer; 31 | import org.apache.kafka.common.serialization.StringSerializer; 32 | 33 | 34 | public abstract class AbstractKafkaClientsIntegrationTestHarness extends AbstractKafkaIntegrationTestHarness { 35 | protected final static int DEFAULT_MAX_SEGMENT_BYTES = 200; 36 | 37 | @Override 38 | public void setUp() { 39 | super.setUp(); 40 | } 41 | 42 | protected LiKafkaProducer createProducer(Properties overrides) { 43 | Properties props = getProducerProperties(overrides); 44 | return new LiKafkaProducerImpl<>(props); 45 | } 46 | 47 | protected Producer createRawProducer() { 48 | Properties props = new Properties(); 49 | props.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getCanonicalName()); 50 | props.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getCanonicalName()); 51 | Properties finalProducerProps = getProducerProperties(props); 52 | return new KafkaProducer<>(finalProducerProps); 53 | } 54 | 55 | protected Properties getProducerProperties(Properties overrides) { 56 | Properties result = new Properties(); 57 | 58 | //populate defaults 59 | result.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()); 60 | result.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()); 61 | result.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()); 62 | 63 | setSecurityConfigs(result, "producer"); 64 | 65 | result.setProperty(LiKafkaProducerConfig.MAX_MESSAGE_SEGMENT_BYTES_CONFIG, "" + DEFAULT_MAX_SEGMENT_BYTES); 66 | result.setProperty(LiKafkaProducerConfig.SEGMENT_SERIALIZER_CLASS_CONFIG, DefaultSegmentSerializer.class.getCanonicalName()); 67 | 68 | //apply overrides 69 | if (overrides != null) { 70 | result.putAll(overrides); 71 | } 72 | 73 | return result; 74 | } 75 | 76 | protected LiKafkaConsumer createConsumer(Properties overrides) { 77 | Properties props = getConsumerProperties(overrides); 78 | return new LiKafkaConsumerImpl<>(props); 79 | } 80 | 81 | protected Consumer createRawConsumer() { 82 | Properties props = new Properties(); 83 | props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getCanonicalName()); 84 | props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getCanonicalName()); 85 | Properties finalConsumerProps = getConsumerProperties(props); 86 | return new KafkaConsumer<>(finalConsumerProps); 87 | } 88 | 89 | protected Properties getConsumerProperties(Properties overrides) { 90 | Properties result = new Properties(); 91 | 92 | //populate defaults 93 | result.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()); 94 | result.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testingConsumer"); 95 | result.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getCanonicalName()); 96 | result.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getCanonicalName()); 97 | 98 | setSecurityConfigs(result, "consumer"); 99 | 100 | result.setProperty(LiKafkaConsumerConfig.MESSAGE_ASSEMBLER_BUFFER_CAPACITY_CONFIG, "300000"); 101 | result.setProperty(LiKafkaConsumerConfig.MESSAGE_ASSEMBLER_EXPIRATION_OFFSET_GAP_CONFIG, "10000"); 102 | result.setProperty(LiKafkaConsumerConfig.EXCEPTION_ON_MESSAGE_DROPPED_CONFIG, "true"); 103 | result.setProperty(LiKafkaConsumerConfig.MAX_TRACKED_MESSAGES_PER_PARTITION_CONFIG, "10000"); 104 | result.setProperty(LiKafkaConsumerConfig.SEGMENT_DESERIALIZER_CLASS_CONFIG, DefaultSegmentDeserializer.class.getCanonicalName()); 105 | 106 | //apply overrides 107 | if (overrides != null) { 108 | result.putAll(overrides); 109 | } 110 | 111 | return result; 112 | } 113 | 114 | protected AdminClient createRawAdminClient(Properties overrides) { 115 | Properties props = getAdminClientProperties(overrides); 116 | return AdminClient.create(props); 117 | } 118 | 119 | protected Properties getAdminClientProperties(Properties overrides) { 120 | Properties result = new Properties(); 121 | 122 | //populate defaults 123 | result.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()); 124 | 125 | setSecurityConfigs(result, "adminClient"); 126 | 127 | //apply overrides 128 | if (overrides != null) { 129 | result.putAll(overrides); 130 | } 131 | 132 | return result; 133 | } 134 | 135 | protected void setSecurityConfigs(Properties clientProps, String certAlias) { 136 | SecurityProtocol protocol = securityProtocol(); 137 | if (protocol == SecurityProtocol.SSL) { 138 | File trustStoreFile = trustStoreFile(); 139 | if (trustStoreFile == null) { 140 | throw new AssertionError("ssl set but no trust store provided"); 141 | } 142 | clientProps.setProperty(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, protocol.name); 143 | try { 144 | clientProps.putAll(TestSslUtils.createSslConfig(true, true, Mode.CLIENT, trustStoreFile, certAlias)); 145 | } catch (Exception e) { 146 | throw new IllegalStateException(e); 147 | } 148 | } 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /kafka-test-harness/src/main/java/com/linkedin/kafka/clients/utils/tests/AbstractKafkaIntegrationTestHarness.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils.tests; 6 | 7 | import java.io.File; 8 | import java.util.ArrayList; 9 | import java.util.Collections; 10 | import java.util.HashSet; 11 | import java.util.LinkedHashMap; 12 | import java.util.List; 13 | import java.util.Map; 14 | import java.util.Random; 15 | import java.util.Set; 16 | import java.util.StringJoiner; 17 | import org.apache.kafka.common.security.auth.SecurityProtocol; 18 | 19 | 20 | public abstract class AbstractKafkaIntegrationTestHarness extends AbstractZookeeperTestHarness { 21 | private final static Random RANDOM = new Random(); 22 | protected Map _brokers = null; 23 | protected Set _deadBrokers; 24 | protected String _bootstrapUrl; 25 | 26 | @Override 27 | public void setUp() { 28 | super.setUp(); 29 | if (_brokers != null) { 30 | return; 31 | } 32 | 33 | _brokers = new LinkedHashMap<>(); 34 | List> brokerConfigs = buildBrokerConfigs(); 35 | if (brokerConfigs == null || brokerConfigs.isEmpty()) { 36 | throw new AssertionError("Broker configs " + brokerConfigs + " should not be null or empty"); 37 | } 38 | for (Map brokerConfig : brokerConfigs) { 39 | EmbeddedBroker broker = new EmbeddedBroker(brokerConfig); 40 | int id = broker.getId(); 41 | if (_brokers.putIfAbsent(id, broker) != null) { 42 | KafkaTestUtils.quietly(broker::close); //wont be picked up by teardown 43 | throw new IllegalStateException("multiple brokers defined with id " + id); 44 | } 45 | } 46 | 47 | StringJoiner joiner = new StringJoiner(","); 48 | _brokers.values().forEach(broker -> joiner.add(broker.getAddr(securityProtocol()))); 49 | _bootstrapUrl = joiner.toString(); 50 | _deadBrokers = new HashSet<>(); 51 | } 52 | 53 | @Override 54 | public void tearDown() { 55 | try { 56 | if (_brokers != null) { 57 | for (EmbeddedBroker broker : _brokers.values()) { 58 | KafkaTestUtils.quietly(broker::close); 59 | } 60 | _brokers.clear(); 61 | _brokers = null; 62 | } 63 | } finally { 64 | super.tearDown(); 65 | } 66 | } 67 | 68 | protected EmbeddedBroker serverForId(int id) { 69 | return broker(id); 70 | } 71 | 72 | protected EmbeddedBroker broker(int id) { 73 | EmbeddedBroker broker = _brokers.get(id); 74 | if (broker == null) { 75 | throw new IllegalArgumentException("Invalid server id " + id); 76 | } 77 | return broker; 78 | } 79 | 80 | public String bootstrapServers() { 81 | return _bootstrapUrl; 82 | } 83 | 84 | /** 85 | * returns the list of broker configs for all brokers created by this test 86 | * (as determined by clusterSize() 87 | * @return list of broker configs, one config map per broker to be created 88 | */ 89 | protected List> buildBrokerConfigs() { 90 | List> configs = new ArrayList<>(); 91 | for (int i = 0; i < clusterSize(); i++) { 92 | EmbeddedBrokerBuilder builder = new EmbeddedBrokerBuilder(); 93 | builder.zkConnect(zookeeper()); 94 | builder.nodeId(i); 95 | builder.enable(securityProtocol()); 96 | if (securityProtocol() == SecurityProtocol.SSL) { 97 | if (trustStoreFile() != null) { 98 | builder.trustStore(trustStoreFile()); 99 | } 100 | } else { 101 | if (trustStoreFile() != null) { 102 | throw new AssertionError("security protocol not set yet trust store file provided"); 103 | } 104 | } 105 | Map config = builder.buildConfig(); 106 | config.putAll(overridingProps()); 107 | configs.add(config); 108 | } 109 | return configs; 110 | } 111 | 112 | protected SecurityProtocol securityProtocol() { 113 | return SecurityProtocol.PLAINTEXT; 114 | } 115 | 116 | protected File trustStoreFile() { 117 | return null; 118 | } 119 | 120 | protected int clusterSize() { 121 | return 1; 122 | } 123 | 124 | protected Map overridingProps() { 125 | return Collections.emptyMap(); 126 | } 127 | 128 | /** 129 | * Kill broker by broker id 130 | * @param id id of broker 131 | * @throws Exception if anything goes wrong 132 | */ 133 | public void killBroker(int id) throws Exception { 134 | EmbeddedBroker broker = _brokers.get(id); 135 | 136 | if (!_deadBrokers.contains(id)) { 137 | broker.shutdown(); 138 | broker.awaitShutdown(); 139 | _deadBrokers.add(id); 140 | } 141 | } 142 | 143 | /** 144 | * Kill a random broker that is not alive. 145 | * 146 | * @return id of broker killed 147 | * @throws Exception if anything goes wrong 148 | */ 149 | public int killRandomBroker() throws Exception { 150 | int index = RANDOM.nextInt(_brokers.size()); 151 | int id = (Integer) _brokers.keySet().toArray()[index]; 152 | killBroker(id); 153 | return id; 154 | } 155 | 156 | /** 157 | * Restart all dead brokers 158 | * @return Returns a list of brokers that were restarted 159 | * @throws Exception all exceptions caused while starting brokers 160 | */ 161 | public List restartDeadBrokers() throws Exception { 162 | List brokersStarted = new ArrayList<>(); 163 | for (int id : _deadBrokers) { 164 | _brokers.get(id).startup(); 165 | brokersStarted.add(id); 166 | } 167 | _deadBrokers.clear(); 168 | return brokersStarted; 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /kafka-test-harness/src/main/java/com/linkedin/kafka/clients/utils/tests/AbstractZookeeperTestHarness.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils.tests; 6 | 7 | public abstract class AbstractZookeeperTestHarness { 8 | protected EmbeddedZookeeper _zookeeper = null; 9 | 10 | public void setUp() { 11 | if (_zookeeper == null) { 12 | _zookeeper = new EmbeddedZookeeper(); 13 | } 14 | } 15 | 16 | public void tearDown() { 17 | if (_zookeeper != null) { 18 | KafkaTestUtils.quietly(() -> _zookeeper.close()); 19 | _zookeeper = null; 20 | } 21 | } 22 | 23 | protected EmbeddedZookeeper zookeeper() { 24 | return _zookeeper; 25 | } 26 | 27 | protected String zkConnect() { 28 | return zookeeper().getConnectionString(); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /kafka-test-harness/src/main/java/com/linkedin/kafka/clients/utils/tests/EmbeddedBroker.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils.tests; 6 | 7 | import java.io.File; 8 | import java.lang.reflect.Constructor; 9 | import java.lang.reflect.Method; 10 | import java.net.URI; 11 | import java.util.HashMap; 12 | import java.util.Map; 13 | import org.apache.commons.io.FileUtils; 14 | import org.apache.kafka.common.network.ListenerName; 15 | import org.apache.kafka.common.security.auth.SecurityProtocol; 16 | import org.apache.kafka.common.utils.Time; 17 | 18 | 19 | public class EmbeddedBroker implements AutoCloseable { 20 | private final static Class KAFKA_CONFIG_CLASS; 21 | private final static Class KAFKA_SERVER_CLASS; 22 | private final static Class SCALA_OPTION_CLASS; 23 | private final static Class SCALA_SEQ_CLASS; 24 | private final static Class SCALA_ARRAY_BUFFER_CLASS; 25 | private final static Constructor CONFIG_CTR; 26 | private final static Constructor SERVER_CTR; 27 | private final static Constructor ARR_BUF_CTR; 28 | private final static Method STARTUP_METHOD; 29 | private final static Method SHUTDOWN_METHOD; 30 | private final static Method AWAIT_SHUTDOWN_METHOD; 31 | private final static Object EMPTY_OPTION; 32 | private final static Method BOUND_PORT_METHOD; 33 | 34 | static { 35 | try { 36 | KAFKA_CONFIG_CLASS = Class.forName("kafka.server.KafkaConfig"); 37 | KAFKA_SERVER_CLASS = Class.forName("kafka.server.KafkaServer"); 38 | SCALA_OPTION_CLASS = Class.forName("scala.Option"); 39 | SCALA_SEQ_CLASS = Class.forName("scala.collection.Seq"); 40 | SCALA_ARRAY_BUFFER_CLASS = Class.forName("scala.collection.mutable.ArrayBuffer"); 41 | CONFIG_CTR = KAFKA_CONFIG_CLASS.getConstructor(Map.class); 42 | SERVER_CTR = KAFKA_SERVER_CLASS.getConstructor(KAFKA_CONFIG_CLASS, Time.class, SCALA_OPTION_CLASS, SCALA_SEQ_CLASS); 43 | ARR_BUF_CTR = SCALA_ARRAY_BUFFER_CLASS.getConstructor(); 44 | STARTUP_METHOD = KAFKA_SERVER_CLASS.getMethod("startup"); 45 | SHUTDOWN_METHOD = KAFKA_SERVER_CLASS.getMethod("shutdown"); 46 | AWAIT_SHUTDOWN_METHOD = KAFKA_SERVER_CLASS.getMethod("awaitShutdown"); 47 | BOUND_PORT_METHOD = KAFKA_SERVER_CLASS.getMethod("boundPort", ListenerName.class); 48 | Method emptyOptionMethod = SCALA_OPTION_CLASS.getMethod("empty"); 49 | EMPTY_OPTION = emptyOptionMethod.invoke(null); 50 | } catch (Exception e) { 51 | throw new IllegalStateException(e); 52 | } 53 | } 54 | 55 | private int id; 56 | private File logDir; 57 | private Map ports = new HashMap<>(); 58 | private Map hosts = new HashMap<>(); 59 | private Object serverInstance; 60 | 61 | public EmbeddedBroker(Map config) { 62 | try { 63 | Object configInstance = CONFIG_CTR.newInstance(config); //also validates 64 | parseConfigs(config); 65 | Object emptyArrayBuffer = ARR_BUF_CTR.newInstance(); 66 | serverInstance = SERVER_CTR.newInstance(configInstance, Time.SYSTEM, EMPTY_OPTION, emptyArrayBuffer); 67 | startup(); 68 | ports.replaceAll((securityProtocol, port) -> { 69 | try { 70 | return (Integer) BOUND_PORT_METHOD.invoke(serverInstance, ListenerName.forSecurityProtocol(securityProtocol)); 71 | } catch (Exception e) { 72 | throw new IllegalStateException(e); 73 | } 74 | }); 75 | } catch (Exception e) { 76 | throw new IllegalStateException(e); 77 | } 78 | } 79 | 80 | private void parseConfigs(Map config) { 81 | id = Integer.parseInt((String) config.get("broker.id")); 82 | logDir = new File((String) config.get("log.dir")); 83 | 84 | //bind addresses 85 | String listenersString = (String) config.get("listeners"); 86 | for (String protocolAddr : listenersString.split("\\s*,\\s*")) { 87 | try { 88 | URI uri = new URI(protocolAddr.trim()); 89 | SecurityProtocol protocol = SecurityProtocol.forName(uri.getScheme()); 90 | hosts.put(protocol, uri.getHost()); 91 | ports.put(protocol, null); //we get the value after boot 92 | } catch (Exception e) { 93 | throw new IllegalStateException(e); 94 | } 95 | } 96 | } 97 | 98 | public int getId() { 99 | return id; 100 | } 101 | 102 | public String getAddr(SecurityProtocol protocol) { 103 | if (!hosts.containsKey(protocol)) { 104 | return null; 105 | } 106 | return hosts.get(protocol) + ":" + ports.get(protocol); 107 | } 108 | 109 | public String getPlaintextAddr() { 110 | return getAddr(SecurityProtocol.PLAINTEXT); 111 | } 112 | 113 | public String getSslAddr() { 114 | return getAddr(SecurityProtocol.SSL); 115 | } 116 | 117 | public void startup() throws Exception { 118 | STARTUP_METHOD.invoke(serverInstance); 119 | } 120 | 121 | public void shutdown() throws Exception { 122 | SHUTDOWN_METHOD.invoke(serverInstance); 123 | } 124 | 125 | public void awaitShutdown() throws Exception { 126 | AWAIT_SHUTDOWN_METHOD.invoke(serverInstance); 127 | } 128 | 129 | @Override 130 | public void close() throws Exception { 131 | KafkaTestUtils.quietly(this::shutdown); 132 | KafkaTestUtils.quietly(this::awaitShutdown); 133 | KafkaTestUtils.quietly(() -> FileUtils.forceDelete(logDir)); 134 | } 135 | 136 | public static EmbeddedBrokerBuilder newServer() { 137 | return new EmbeddedBrokerBuilder(); 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /kafka-test-harness/src/main/java/com/linkedin/kafka/clients/utils/tests/EmbeddedZookeeper.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils.tests; 6 | 7 | import java.io.File; 8 | import java.lang.reflect.Constructor; 9 | import java.lang.reflect.Method; 10 | import java.net.InetAddress; 11 | import java.net.InetSocketAddress; 12 | import java.util.concurrent.CountDownLatch; 13 | import org.apache.zookeeper.server.NIOServerCnxnFactory; 14 | import org.apache.zookeeper.server.ServerCnxnFactory; 15 | import org.apache.zookeeper.server.ZooKeeperServer; 16 | 17 | 18 | public class EmbeddedZookeeper implements AutoCloseable { 19 | private File snapshotDir; 20 | private File logDir; 21 | private int tickTime; 22 | private String hostAddress; 23 | private int port; 24 | private ZooKeeperServer zk; 25 | private ServerCnxnFactory cnxnFactory; 26 | private CountDownLatch shutdownLatch = null; 27 | 28 | public EmbeddedZookeeper() { 29 | try { 30 | snapshotDir = KafkaTestUtils.newTempDir(); 31 | logDir = KafkaTestUtils.newTempDir(); 32 | tickTime = 500; 33 | zk = new ZooKeeperServer(snapshotDir, logDir, tickTime); 34 | registerShutdownHandler(zk); 35 | cnxnFactory = new NIOServerCnxnFactory(); 36 | InetAddress localHost = InetAddress.getLocalHost(); 37 | hostAddress = localHost.getHostAddress(); 38 | InetSocketAddress bindAddress = new InetSocketAddress(localHost, port); 39 | cnxnFactory.configure(bindAddress, 0); 40 | cnxnFactory.startup(zk); 41 | port = zk.getClientPort(); 42 | } catch (Exception e) { 43 | throw new IllegalStateException(e); 44 | } 45 | //sanity check 46 | if (zk.getClientPort() != port) { 47 | throw new IllegalStateException(); 48 | } 49 | } 50 | 51 | public String getHostAddress() { 52 | return hostAddress; 53 | } 54 | 55 | public int getPort() { 56 | return port; 57 | } 58 | 59 | public String getConnectionString() { 60 | return hostAddress + ":" + port; 61 | } 62 | 63 | @Override 64 | public void close() throws Exception { 65 | KafkaTestUtils.quietly(() -> zk.shutdown()); 66 | KafkaTestUtils.quietly(() -> cnxnFactory.shutdown()); 67 | if (shutdownLatch != null) { 68 | KafkaTestUtils.quietly(() -> shutdownLatch.await()); 69 | } 70 | } 71 | 72 | /** 73 | * starting with ZK 3.4.9 there's a shutdown handler. 74 | * if one isnt registered ZK will spew errors at shutdown time, even though 75 | * both the handler interface and the method of registering one are not public API. 76 | * see https://issues.apache.org/jira/browse/ZOOKEEPER-2795 77 | * such craftsmanship. much wow. 78 | * @param zk a ZK server instance 79 | * @throws Exception if anything goes wrong 80 | */ 81 | private void registerShutdownHandler(ZooKeeperServer zk) throws Exception { 82 | Class handlerClass; 83 | try { 84 | handlerClass = Class.forName("org.apache.zookeeper.server.ZooKeeperServerShutdownHandler"); 85 | } catch (ClassNotFoundException e) { 86 | //older ZK. forget about it 87 | return; 88 | } 89 | Method registerMethod = ZooKeeperServer.class.getDeclaredMethod("registerServerShutdownHandler", handlerClass); 90 | Constructor ctr = handlerClass.getDeclaredConstructor(CountDownLatch.class); 91 | ctr.setAccessible(true); 92 | shutdownLatch = new CountDownLatch(1); 93 | Object handlerInstance = ctr.newInstance(shutdownLatch); 94 | registerMethod.setAccessible(true); 95 | registerMethod.invoke(zk, handlerInstance); 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /kafka-test-harness/src/main/java/com/linkedin/kafka/clients/utils/tests/KafkaTestUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils.tests; 6 | 7 | import java.io.File; 8 | import java.io.IOException; 9 | import java.nio.file.Files; 10 | import java.util.ArrayList; 11 | import java.util.Collections; 12 | import java.util.List; 13 | import java.util.Properties; 14 | import java.util.Random; 15 | import java.util.concurrent.TimeUnit; 16 | import java.util.concurrent.atomic.AtomicBoolean; 17 | import org.apache.commons.io.FileUtils; 18 | import org.apache.kafka.clients.admin.AdminClient; 19 | import org.apache.kafka.clients.consumer.KafkaConsumer; 20 | import org.apache.kafka.clients.producer.KafkaProducer; 21 | 22 | 23 | public class KafkaTestUtils { 24 | private final static AtomicBoolean SHUTDOWN_HOOK_INSTALLED = new AtomicBoolean(false); 25 | private final static Thread SHUTDOWN_HOOK; 26 | private final static List FILES_TO_CLEAN_UP = Collections.synchronizedList(new ArrayList<>()); 27 | public final static String EXCEPTION_MESSAGE = "DESERIALIZATION_EXCEPTION_"; 28 | 29 | static { 30 | SHUTDOWN_HOOK = new Thread(() -> { 31 | Exception firstIssue = null; 32 | for (File toCleanUp : FILES_TO_CLEAN_UP) { 33 | if (!toCleanUp.exists()) { 34 | continue; 35 | } 36 | try { 37 | FileUtils.forceDelete(toCleanUp); 38 | } catch (IOException issue) { 39 | if (firstIssue == null) { 40 | firstIssue = issue; 41 | } else { 42 | firstIssue.addSuppressed(issue); 43 | } 44 | } 45 | } 46 | if (firstIssue != null) { 47 | System.err.println("unable to delete one or more files"); 48 | firstIssue.printStackTrace(System.err); 49 | throw new IllegalStateException(firstIssue); 50 | } 51 | }, "KafkaTestUtils cleanup hook"); 52 | SHUTDOWN_HOOK.setUncaughtExceptionHandler((t, e) -> { 53 | System.err.println("thread " + t.getName() + " died to uncaught exception"); 54 | e.printStackTrace(System.err); 55 | }); 56 | } 57 | 58 | private KafkaTestUtils() { 59 | //utility class 60 | } 61 | 62 | public static KafkaProducer vanillaProducerFor(EmbeddedBroker broker) { 63 | String bootstrap = broker.getPlaintextAddr(); 64 | if (bootstrap == null) { 65 | bootstrap = broker.getSslAddr(); 66 | } 67 | 68 | Properties props = new Properties(); 69 | props.put("bootstrap.servers", bootstrap); 70 | props.put("acks", "all"); 71 | props.put("retries", 0); 72 | props.put("batch.size", 16384); 73 | props.put("linger.ms", 1); 74 | props.put("buffer.memory", 1024 * 1024); 75 | props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 76 | props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 77 | 78 | return new KafkaProducer<>(props); 79 | } 80 | 81 | public static KafkaConsumer vanillaConsumerFor(EmbeddedBroker broker) { 82 | String bootstrap = broker.getPlaintextAddr(); 83 | if (bootstrap == null) { 84 | bootstrap = broker.getSslAddr(); 85 | } 86 | 87 | Properties props = new Properties(); 88 | props.put("bootstrap.servers", bootstrap); 89 | props.put("group.id", "test"); 90 | props.put("auto.offset.reset", "earliest"); 91 | props.put("enable.auto.commit", "false"); 92 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 93 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 94 | KafkaConsumer consumer = new KafkaConsumer<>(props); 95 | 96 | return consumer; 97 | } 98 | 99 | public static AdminClient adminClientFor(EmbeddedBroker broker) { 100 | String bootstrap = broker.getPlaintextAddr(); 101 | if (bootstrap == null) { 102 | bootstrap = broker.getSslAddr(); 103 | } 104 | 105 | Properties props = new Properties(); 106 | props.put("bootstrap.servers", bootstrap); 107 | 108 | return AdminClient.create(props); 109 | } 110 | 111 | public static File newTempDir() { 112 | try { 113 | return cleanup(Files.createTempDirectory(null).toFile()); 114 | } catch (IOException e) { 115 | throw new IllegalStateException(e); 116 | } 117 | } 118 | 119 | public static File cleanup(File toCleanUp) { 120 | if (SHUTDOWN_HOOK_INSTALLED.compareAndSet(false, true)) { 121 | Runtime.getRuntime().addShutdownHook(SHUTDOWN_HOOK); 122 | } 123 | FILES_TO_CLEAN_UP.add(toCleanUp); 124 | return toCleanUp; 125 | } 126 | 127 | public static void quietly(Task task) { 128 | try { 129 | task.run(); 130 | } catch (Exception e) { 131 | e.printStackTrace(System.err); 132 | } 133 | } 134 | 135 | public static void waitUntil( 136 | String description, 137 | UsableSupplier condition, 138 | long sleepIncrements, 139 | long timeout, TimeUnit timeoutUnit, 140 | boolean allowExceptions 141 | ) throws InterruptedException { 142 | long start = System.currentTimeMillis(); 143 | long deadline = start + timeoutUnit.toMillis(timeout); 144 | long now = start; 145 | while (now < deadline) { 146 | try { 147 | if (condition.get()) { 148 | return; 149 | } 150 | } catch (Exception e) { 151 | if (!allowExceptions) { 152 | throw new RuntimeException(e); 153 | } 154 | } 155 | Thread.sleep(sleepIncrements); 156 | now = System.currentTimeMillis(); 157 | } 158 | throw new IllegalStateException("condition " + description + " did not turn true within " + timeout + " " + timeoutUnit); 159 | } 160 | 161 | public static String getRandomString(int length) { 162 | char[] chars = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; 163 | Random random = new Random(); 164 | StringBuilder stringBuilder = new StringBuilder(); 165 | for (int i = 0; i < length; i++) { 166 | stringBuilder.append(chars[Math.abs(random.nextInt()) % 16]); 167 | } 168 | return stringBuilder.toString(); 169 | } 170 | 171 | public static String getExceptionString(int length) { 172 | StringBuilder stringBuilder = new StringBuilder(); 173 | stringBuilder.append(EXCEPTION_MESSAGE); 174 | for (int i = EXCEPTION_MESSAGE.length(); i < length; i++) { 175 | stringBuilder.append('X'); 176 | } 177 | 178 | return stringBuilder.toString(); 179 | } 180 | 181 | @FunctionalInterface 182 | public interface Task { 183 | void run() throws Exception; 184 | } 185 | 186 | @FunctionalInterface 187 | public interface UsableSupplier { 188 | T get() throws Exception; 189 | } 190 | } 191 | -------------------------------------------------------------------------------- /kafka-test-harness/src/test/java/com/linkedin/kafka/clients/utils/tests/EmbeddedBrokerTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils.tests; 6 | 7 | import java.time.Duration; 8 | import java.util.Collections; 9 | import java.util.concurrent.TimeUnit; 10 | import org.apache.kafka.clients.admin.AdminClient; 11 | import org.apache.kafka.clients.admin.NewTopic; 12 | import org.apache.kafka.clients.consumer.ConsumerRecords; 13 | import org.apache.kafka.clients.consumer.KafkaConsumer; 14 | import org.apache.kafka.clients.producer.KafkaProducer; 15 | import org.apache.kafka.clients.producer.ProducerRecord; 16 | import org.apache.kafka.clients.producer.RecordMetadata; 17 | import org.testng.Assert; 18 | import org.testng.annotations.Test; 19 | 20 | 21 | public class EmbeddedBrokerTest { 22 | 23 | @Test 24 | public void testSimpleScenario() throws Exception { 25 | try (EmbeddedZookeeper zk = new EmbeddedZookeeper(); 26 | EmbeddedBroker broker = EmbeddedBroker.newServer().zkConnect(zk).enablePlaintext().build(); 27 | AdminClient adminClient = KafkaTestUtils.adminClientFor(broker); 28 | KafkaProducer producer = KafkaTestUtils.vanillaProducerFor(broker); 29 | KafkaConsumer consumer = KafkaTestUtils.vanillaConsumerFor(broker)) { 30 | 31 | String topicName = "topic"; 32 | adminClient.createTopics(Collections.singletonList(new NewTopic(topicName, 1, (short) 1))).all().get(1, TimeUnit.MINUTES); 33 | RecordMetadata md = producer.send(new ProducerRecord<>(topicName, "key", "value")).get(); 34 | Assert.assertNotNull(md); 35 | Assert.assertEquals(0, md.offset()); //1st msg 36 | 37 | consumer.subscribe(Collections.singletonList(topicName)); 38 | ConsumerRecords records = consumer.poll(Duration.ofSeconds(10)); 39 | 40 | Assert.assertEquals(1, records.count()); 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /kafka-test-harness/src/test/java/com/linkedin/kafka/clients/utils/tests/EmbeddedZookeeperTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils.tests; 6 | 7 | import java.net.ConnectException; 8 | import java.net.Socket; 9 | import java.util.UUID; 10 | import java.util.concurrent.TimeUnit; 11 | import org.I0Itec.zkclient.ZkClient; 12 | import org.apache.zookeeper.CreateMode; 13 | import org.testng.Assert; 14 | import org.testng.annotations.Test; 15 | 16 | 17 | public class EmbeddedZookeeperTest { 18 | 19 | @Test 20 | public void testSimpleScenario() throws Exception { 21 | String connectionString; 22 | String host; 23 | int port; 24 | try (EmbeddedZookeeper zk = new EmbeddedZookeeper()) { 25 | connectionString = zk.getConnectionString(); 26 | host = zk.getHostAddress(); 27 | port = zk.getPort(); 28 | Assert.assertEquals(host + ":" + port, connectionString); 29 | ZkClient client = new ZkClient(connectionString); 30 | try { 31 | String path = "/" + UUID.randomUUID().toString(); 32 | client.waitUntilConnected(5, TimeUnit.SECONDS); 33 | client.create(path, "payload", CreateMode.PERSISTENT); 34 | Assert.assertEquals("payload", client.readData(path)); 35 | } finally { 36 | client.close(); 37 | } 38 | } 39 | //now verify shut down 40 | try { 41 | new Socket(host, port); 42 | Assert.fail("expected to fail"); 43 | } catch (ConnectException ignored) { 44 | 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /kafka-test-harness/src/test/java/com/linkedin/kafka/clients/utils/tests/KafkaIntegrationTestHarnessTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils.tests; 6 | 7 | import java.util.List; 8 | import java.util.Set; 9 | import org.testng.Assert; 10 | import org.testng.annotations.AfterTest; 11 | import org.testng.annotations.BeforeTest; 12 | import org.testng.annotations.Test; 13 | 14 | 15 | public class KafkaIntegrationTestHarnessTest { 16 | private static final int CLUSTER_SIZE = 4; 17 | private AbstractKafkaIntegrationTestHarness _kafkaIntegrationTestHarness; 18 | 19 | @BeforeTest 20 | public void setup() { 21 | _kafkaIntegrationTestHarness = new AbstractKafkaIntegrationTestHarness() { 22 | @Override 23 | protected int clusterSize() { 24 | return CLUSTER_SIZE; 25 | } 26 | }; 27 | _kafkaIntegrationTestHarness.setUp(); 28 | } 29 | 30 | @AfterTest 31 | public void teardown() { 32 | _kafkaIntegrationTestHarness.tearDown(); 33 | } 34 | 35 | @Test 36 | public void testKillBroker() throws Exception { 37 | Set brokerIds = _kafkaIntegrationTestHarness._brokers.keySet(); 38 | Assert.assertFalse(brokerIds.isEmpty(), "broker not initialized"); 39 | Assert.assertEquals(brokerIds.size(), CLUSTER_SIZE, "expected cluster size doesn't match the initialized brokers"); 40 | 41 | int killedBrokerId = -1; 42 | for (Integer brokerId : brokerIds) { 43 | killedBrokerId = brokerId; 44 | _kafkaIntegrationTestHarness.killBroker(killedBrokerId); 45 | break; 46 | } 47 | 48 | List restartedBrokers = _kafkaIntegrationTestHarness.restartDeadBrokers(); 49 | Assert.assertEquals(restartedBrokers.size(), 1, "unexpected brokers restarted"); 50 | Assert.assertTrue(restartedBrokers.contains(killedBrokerId), "broker restart is not the broker that was killed"); 51 | } 52 | 53 | @Test 54 | public void testKillRandomBroker() throws Exception { 55 | Set brokerIds = _kafkaIntegrationTestHarness._brokers.keySet(); 56 | Assert.assertFalse(brokerIds.isEmpty(), "broker not initialized"); 57 | Assert.assertEquals(brokerIds.size(), CLUSTER_SIZE, "expected cluster size doesn't match the initialized brokers"); 58 | 59 | int killedBrokerId = _kafkaIntegrationTestHarness.killRandomBroker(); 60 | 61 | List restartedBrokers = _kafkaIntegrationTestHarness.restartDeadBrokers(); 62 | Assert.assertEquals(restartedBrokers.size(), 1, "unexpected brokers restarted"); 63 | Assert.assertTrue(restartedBrokers.contains(killedBrokerId), "broker restart is not the broker that was killed"); 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /kafka-test-harness/src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | # 4 | 5 | log4j.rootLogger=INFO, stdout 6 | 7 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 8 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 9 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n 10 | 11 | log4j.logger.com.linkedin.kafka.clients=ERROR 12 | log4j.logger.org.apache.kafka=ERROR 13 | 14 | # zkclient can be verbose, during debugging it is common to adjust is separately 15 | log4j.logger.org.I0Itec.zkclient.ZkClient=WARN 16 | log4j.logger.org.apache.zookeeper=WARN 17 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/build.gradle: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. 3 | * Licensed under the BSD 2-Clause License (the "License").
 4 | * See License in the project root for license information. 5 | */ 6 | import com.linkedin.gradle.build.DistributeTask 7 | 8 | plugins { 9 | id "java-library" 10 | id "maven-publish" 11 | id "com.jfrog.artifactory" 12 | id "idea" 13 | } 14 | 15 | dependencies { 16 | compile "com.linkedin.kafka:kafka-clients:${rootProject.ext.liKafkaVersion}" 17 | compile "com.linkedin.mario:mario-client-all:${rootProject.ext.marioVersion}" 18 | 19 | testCompile "org.mockito:mockito-core:2.24.0" 20 | } 21 | 22 | publishing { 23 | publications { 24 | java(MavenPublication) { 25 | from components.java 26 | artifact sourcesJar 27 | artifact javadocJar 28 | pom.withXml { 29 | def root = asNode() 30 | root.appendNode('name', 'li-apache-kafka-clients') 31 | root.appendNode('description', 'extended kafka clients') 32 | root.children().last() + rootProject.ext.pomConfig 33 | } 34 | } 35 | } 36 | } 37 | 38 | artifactoryPublish.dependsOn assemble 39 | artifactoryPublish.dependsOn publishToMavenLocal 40 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/annotations/InterfaceOrigin.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.annotations; 6 | 7 | import java.lang.annotation.Documented; 8 | import java.lang.annotation.ElementType; 9 | import java.lang.annotation.Target; 10 | 11 | /** 12 | * Annotations to specify whether the interface is from open source or LiKafkaClients only. 13 | */ 14 | public class InterfaceOrigin { 15 | 16 | /** 17 | * Meaning this method is inherited from Apache Kafka 18 | */ 19 | @Documented 20 | @Target(ElementType.METHOD) 21 | public @interface ApacheKafka { 22 | 23 | } 24 | 25 | /** 26 | * Meaning this method is defined only in LiKafkaClients. 27 | */ 28 | @Documented 29 | @Target(ElementType.METHOD) 30 | public @interface LiKafkaClients { 31 | 32 | } 33 | 34 | } 35 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/auditing/AuditType.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.auditing; 6 | 7 | /** 8 | * The interface of the audit type. 9 | * 10 | * There are three predefined audit type used by LiKafkaProducer and LiKafkaConsumer. User may define custom audit type. 11 | */ 12 | public interface AuditType { 13 | 14 | String name(); 15 | 16 | public static final AuditType SUCCESS = new AuditType() { 17 | @Override 18 | public String name() { 19 | return "SUCCESS"; 20 | } 21 | 22 | @Override 23 | public String toString() { 24 | return name(); 25 | } 26 | }; 27 | 28 | public static final AuditType FAILURE = new AuditType() { 29 | @Override 30 | public String name() { 31 | return "FAILURE"; 32 | } 33 | 34 | @Override 35 | public String toString() { 36 | return name(); 37 | } 38 | }; 39 | 40 | public static final AuditType ATTEMPT = new AuditType() { 41 | @Override 42 | public String name() { 43 | return "ATTEMPT"; 44 | } 45 | 46 | @Override 47 | public String toString() { 48 | return name(); 49 | } 50 | }; 51 | } -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/auditing/Auditor.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.auditing; 6 | 7 | import com.linkedin.kafka.clients.producer.LiKafkaProducerConfig; 8 | 9 | import org.apache.kafka.common.Configurable; 10 | import org.apache.kafka.clients.producer.KafkaProducer; 11 | 12 | import java.util.Map; 13 | import java.util.concurrent.TimeUnit; 14 | 15 | /** 16 | * The auditor interface. This class allows user to implement their own auditing solution. 17 | * 18 | * Notice that the auditor may be used by multiple threads, so the implementation should be thread safe. 19 | */ 20 | public interface Auditor extends Configurable { 21 | 22 | /** 23 | * This method will be invoked by LiKafkaProducer on instantiation. 24 | * Notice that if the auditor is used by the producer, there will be an additional 25 | * {@link KafkaProducer KafkaProducer<byte[], byte[]>} object passed in the configuration with the key of 26 | * {@link LiKafkaProducerConfig#CURRENT_PRODUCER}. User can use this producer send auditing events to the same Kafka 27 | * cluster the producer is is producing to. This is to avoid creating another producer. 28 | * 29 | * @param configs The configurations for the auditor 30 | */ 31 | void configure(Map configs); 32 | 33 | /** 34 | * Start the auditor. 35 | */ 36 | void start(); 37 | 38 | /** 39 | * Get the audit token from the key and value of the record. 40 | * This method helps the producer avoid holding the key and value until the message sending is completed. 41 | * 42 | * @param key the key of the record. 43 | * @param value the value of the record. 44 | * @return the custom audit information. 45 | */ 46 | Object auditToken(K key, V value); 47 | 48 | /** 49 | * Audit the record based on the given information. 50 | * 51 | * @param auditToken The user extracted auditing information. 52 | * @param topic The topic of the record. 53 | * @param timestamp The timestamp of the record. 54 | * @param messageCount The number of messages to record. 55 | * @param bytesCount The number of bytes to record. 56 | * @param auditType The type of the auditing action. 57 | */ 58 | void record(Object auditToken, 59 | String topic, 60 | Long timestamp, 61 | Long messageCount, 62 | Long bytesCount, 63 | AuditType auditType); 64 | 65 | /** 66 | * Close the auditor with timeout. 67 | * This method will be called when producer is closed with a timeout. 68 | * 69 | * @param timeout the maximum time to wait to close the auditor. 70 | * @param unit The time unit. 71 | */ 72 | void close(long timeout, TimeUnit unit); 73 | 74 | /** 75 | * The LiKafkaProducer and LiKafkaConsumer will call this method when the producer or consumer is closed. 76 | * Close the auditor. 77 | */ 78 | void close(); 79 | 80 | 81 | } 82 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/auditing/LoggingAuditor.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.auditing; 6 | 7 | import com.linkedin.kafka.clients.auditing.abstractimpl.AbstractAuditor; 8 | import com.linkedin.kafka.clients.auditing.abstractimpl.AuditKey; 9 | import com.linkedin.kafka.clients.auditing.abstractimpl.AuditStats; 10 | import com.linkedin.kafka.clients.auditing.abstractimpl.CountingAuditStats; 11 | import org.apache.kafka.common.utils.Time; 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | 15 | import java.util.Date; 16 | import java.util.Map; 17 | 18 | /** 19 | * A simple auditor that logs the message count aggregated by time buckets. 20 | */ 21 | public class LoggingAuditor extends AbstractAuditor { 22 | private static final Logger AUDIT_LOG = LoggerFactory.getLogger(LoggingAuditor.class); 23 | 24 | public static final String BUCKET_MS = "auditor.bucket.ms"; 25 | private static final String BUCKET_MS_DEFAULT = "600000"; 26 | 27 | private long _bucketMs = -1L; 28 | 29 | public LoggingAuditor() { 30 | super(); 31 | } 32 | 33 | public LoggingAuditor(String name, Time time) { 34 | super(name, time); 35 | } 36 | 37 | public void printSummary(AuditStats auditStats) { 38 | CountingAuditStats countingAuditStats = (CountingAuditStats) auditStats; 39 | long bucketMs = countingAuditStats.bucketMs(); 40 | Map stats = countingAuditStats.stats(); 41 | for (Map.Entry entry : stats.entrySet()) { 42 | AuditKey auditKey = (AuditKey) entry.getKey(); 43 | CountingAuditStats.AuditingCounts auditingCounts = entry.getValue(); 44 | String start = new Date(auditKey.bucket() * bucketMs).toString(); 45 | String end = new Date(auditKey.bucket() * bucketMs + bucketMs).toString(); 46 | AUDIT_LOG.info("[{} - {}] : {}, {}", start, end, auditKey, auditingCounts); 47 | } 48 | } 49 | 50 | @Override 51 | @SuppressWarnings("unchecked") 52 | public void configure(Map configs) { 53 | super.configure(configs); 54 | _bucketMs = Long.parseLong((String) ((Map) configs).getOrDefault(BUCKET_MS, BUCKET_MS_DEFAULT)); 55 | } 56 | 57 | @Override 58 | public void onTick(AuditStats lastStats) { 59 | printSummary(lastStats); 60 | } 61 | 62 | @Override 63 | public void onClosed(AuditStats currentStats, AuditStats nextStats, long timeout) { 64 | AUDIT_LOG.info("Logging auditing stats on closure..."); 65 | printSummary(currentStats); 66 | printSummary(nextStats); 67 | } 68 | 69 | @Override 70 | protected CountingAuditStats createAuditStats() { 71 | return new CountingAuditStats(_bucketMs); 72 | } 73 | 74 | @Override 75 | protected Object getAuditKey(Object auditToken, 76 | String topic, 77 | Long timestamp, 78 | Long messageCount, 79 | Long sizeInBytes, 80 | AuditType auditType) { 81 | return new AuditKey(topic, timestamp / _bucketMs, auditType); 82 | } 83 | 84 | @Override 85 | public Object auditToken(K key, V value) { 86 | // The logging auditor does not have custom audit information. 87 | return null; 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/auditing/NoOpAuditor.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.auditing; 6 | 7 | import java.util.Map; 8 | import java.util.concurrent.TimeUnit; 9 | 10 | /** 11 | * The default no-op auditor class. 12 | */ 13 | public class NoOpAuditor implements Auditor { 14 | 15 | @Override 16 | public void configure(Map configs) { 17 | 18 | } 19 | 20 | @Override 21 | public void start() { 22 | 23 | } 24 | 25 | @Override 26 | public Object auditToken(K key, V value) { 27 | return null; 28 | } 29 | 30 | @Override 31 | public void record(Object auditToken, 32 | String topic, 33 | Long timestamp, 34 | Long messageCount, 35 | Long bytesCount, 36 | AuditType auditType) { 37 | 38 | } 39 | 40 | 41 | @Override 42 | public void close(long timeout, TimeUnit unit) { 43 | 44 | } 45 | 46 | @Override 47 | public void close() { 48 | 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/auditing/abstractimpl/AuditKey.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.auditing.abstractimpl; 6 | 7 | import com.linkedin.kafka.clients.auditing.AuditType; 8 | 9 | import java.util.Objects; 10 | 11 | /** 12 | * This class is an example of AuditKey implementation. 13 | * The AuditKey we defined here is simply a combination of the topic, bucket and audit type. For different use 14 | * cases, user may want to define a different audit key. 15 | */ 16 | public final class AuditKey { 17 | private final String _topic; 18 | private final Long _bucket; 19 | private final AuditType _auditType; 20 | 21 | public AuditKey(String topic, Long bucket, AuditType auditType) { 22 | _topic = topic; 23 | _bucket = bucket; 24 | _auditType = auditType; 25 | } 26 | 27 | public String topic() { 28 | return _topic; 29 | } 30 | 31 | public Long bucket() { 32 | return _bucket; 33 | } 34 | 35 | public AuditType auditType() { 36 | return _auditType; 37 | } 38 | 39 | @Override 40 | public boolean equals(Object obj) { 41 | if (this == obj) { 42 | return true; 43 | } 44 | if (obj == null || getClass() != obj.getClass()) { 45 | return false; 46 | } 47 | AuditKey auditKey = (AuditKey) obj; 48 | return Objects.equals(_topic, auditKey.topic()) && Objects.equals(_bucket, auditKey.bucket()) 49 | && Objects.equals(_auditType, auditKey.auditType()); 50 | } 51 | 52 | @Override 53 | public int hashCode() { 54 | return Objects.hash(_topic, _bucket, _auditType); 55 | } 56 | 57 | @Override 58 | public String toString() { 59 | return "(" + _topic + ',' + _bucket + ',' + auditType() + ')'; 60 | } 61 | 62 | } 63 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/auditing/abstractimpl/AuditStats.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.auditing.abstractimpl; 6 | 7 | /** 8 | * The interface for audit stats. This is used to hold the audit information of a tick in {@link AbstractAuditor}. 9 | * 10 | * Users may have a different implementation to have different auditing behavior. An example of the implementation can 11 | * be found in {@link CountingAuditStats} 12 | * 13 | * The implementation of this interface needs to be thread safe. 14 | */ 15 | public interface AuditStats { 16 | 17 | /** 18 | * The method that record the message for audit. 19 | * 20 | * @param auditKey The audit key for the record. (e.g. combination of topic, key and audit type). 21 | * @param messageCount The number of messages to record. 22 | * @param bytesCount the number of bytes to record. 23 | * 24 | * @throws IllegalStateException Thrown if the audit stats is updated after it is closed. 25 | */ 26 | void update(Object auditKey, long messageCount, long bytesCount) throws IllegalStateException; 27 | 28 | /** 29 | * Close the audit stats. 30 | * 31 | * The implementation needs to ensure that the stats won't be changed after it is closed. 32 | */ 33 | void close(); 34 | } 35 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/auditing/abstractimpl/CountingAuditStats.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.auditing.abstractimpl; 6 | 7 | import java.util.Map; 8 | import java.util.concurrent.ConcurrentHashMap; 9 | import java.util.concurrent.atomic.AtomicInteger; 10 | import java.util.concurrent.atomic.AtomicLong; 11 | 12 | /** 13 | * A class that aggregate the statistics for auditing by simply counting the number of records of different auditing 14 | * types for each topic. 15 | * 16 | * This class is thread safe. 17 | */ 18 | public class CountingAuditStats implements AuditStats { 19 | 20 | private final long _bucketMs; 21 | private final Map _stats; 22 | 23 | // The variables for synchronization on ticks. 24 | private final AtomicInteger _recordingInProgress; 25 | private volatile boolean _closed; 26 | 27 | public CountingAuditStats(long bucketMs) { 28 | _bucketMs = bucketMs; 29 | _stats = new ConcurrentHashMap<>(); 30 | _recordingInProgress = new AtomicInteger(0); 31 | _closed = false; 32 | } 33 | 34 | public long bucketMs() { 35 | return _bucketMs; 36 | } 37 | 38 | public Map stats() { 39 | return _stats; 40 | } 41 | 42 | public void update(Object auditKey, long messageCount, long bytesCount) { 43 | try { 44 | // Increment the counter to claim usage. This is to make sure we do not close an AuditStats that is in use. 45 | _recordingInProgress.incrementAndGet(); 46 | if (_closed) { 47 | throw new IllegalStateException("Stats has been closed. The caller should get the new AuditStats and retry."); 48 | } 49 | 50 | AuditingCounts statsForTopic = _stats.computeIfAbsent(auditKey, v -> new AuditingCounts()); 51 | statsForTopic.recordMessage(messageCount, bytesCount); 52 | } finally { 53 | _recordingInProgress.decrementAndGet(); 54 | } 55 | } 56 | 57 | public void close() { 58 | _closed = true; 59 | // We loop waiting if there is any other threads using this stats. 60 | // This is a spin lock, we should be able to get out of the loop pretty quickly and never end up in a tight loop. 61 | while (_recordingInProgress.get() > 0) { } 62 | } 63 | 64 | /** 65 | * A container class that hosts the messages count and bytes count for each audit key. 66 | */ 67 | public static final class AuditingCounts { 68 | private final AtomicLong _messageCount = new AtomicLong(0); 69 | private final AtomicLong _bytesCount = new AtomicLong(0); 70 | 71 | public void recordMessage(long messageCount, long bytesCount) { 72 | _messageCount.addAndGet(messageCount); 73 | _bytesCount.addAndGet(bytesCount); 74 | } 75 | 76 | public long messageCount() { 77 | return _messageCount.get(); 78 | } 79 | 80 | public long bytesCount() { 81 | return _bytesCount.get(); 82 | } 83 | 84 | @Override 85 | public String toString() { 86 | return "(" + _messageCount.get() + " messages, " + _bytesCount.get() + " bytes)"; 87 | } 88 | } 89 | 90 | } 91 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/common/ClusterDescriptor.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.common; 6 | 7 | import java.util.Objects; 8 | 9 | 10 | // A generic descriptor for a cluster 11 | public class ClusterDescriptor { 12 | private final String _name; 13 | private final String _bootstrapUrl; 14 | private final String _zkConnection; 15 | 16 | public ClusterDescriptor(String name, String bootstrapUrl, String zkConnection) { 17 | _name = name; 18 | _bootstrapUrl = bootstrapUrl; 19 | _zkConnection = zkConnection; 20 | } 21 | 22 | public String getName() { 23 | return _name; 24 | } 25 | 26 | public String getBootstrapUrl() { 27 | return _bootstrapUrl; 28 | } 29 | 30 | public String getZkConnection() { 31 | return _zkConnection; 32 | } 33 | 34 | public String toString() { 35 | return _name + " (bootstrap: " + _bootstrapUrl + ", zk: " + _zkConnection + ")"; 36 | } 37 | 38 | @Override 39 | public boolean equals(Object o) { 40 | if (this == o) { 41 | return true; 42 | } 43 | if (o == null || getClass() != o.getClass()) { 44 | return false; 45 | } 46 | ClusterDescriptor other = (ClusterDescriptor) o; 47 | return _name.equals(other.getName()) && _bootstrapUrl.equals(other.getBootstrapUrl()) && 48 | _zkConnection.equals(other.getZkConnection()); 49 | } 50 | 51 | @Override 52 | public int hashCode() { 53 | return Objects.hash(_name, _bootstrapUrl, _zkConnection); 54 | } 55 | } -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/common/ClusterGroupDescriptor.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.common; 6 | 7 | import java.util.Objects; 8 | 9 | 10 | // A generic descriptor for a cluster group 11 | public class ClusterGroupDescriptor { 12 | private final String _name; 13 | private final String _environment; 14 | 15 | public ClusterGroupDescriptor(String name, String environment) { 16 | _name = name; 17 | _environment = environment; 18 | } 19 | 20 | public String getName() { 21 | return _name; 22 | } 23 | 24 | public String getEnvironment() { 25 | return _environment; 26 | } 27 | 28 | public String toString() { 29 | return _name + "@" + _environment; 30 | } 31 | 32 | @Override 33 | public boolean equals(Object o) { 34 | if (this == o) { 35 | return true; 36 | } 37 | if (o == null || getClass() != o.getClass()) { 38 | return false; 39 | } 40 | ClusterGroupDescriptor other = (ClusterGroupDescriptor) o; 41 | return _name == other.getName() && _environment == other.getEnvironment(); 42 | } 43 | 44 | @Override 45 | public int hashCode() { 46 | return Objects.hash(_name, _environment); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/common/FederatedClientCommandCallback.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.common; 6 | 7 | import java.util.Map; 8 | import java.util.UUID; 9 | 10 | 11 | // A callback interface for executing commands requested by the metadata service. 12 | // 13 | // A command-specific callback will implement this interface. 14 | public interface FederatedClientCommandCallback { 15 | public FederatedClientCommandType getCommandType(); 16 | 17 | void onReceivingCommand(UUID commandRequestId, Map args); 18 | } -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/common/FederatedClientCommandType.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.common; 6 | 7 | 8 | public enum FederatedClientCommandType { 9 | } -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/common/InstrumentedClientLoggingHandler.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.common; 6 | 7 | import com.linkedin.kafka.clients.utils.LiKafkaClientsUtils; 8 | import com.linkedin.mario.client.LoggingHandler; 9 | import org.slf4j.Logger; 10 | 11 | 12 | /** 13 | * logging handler for instrumented clients to log connection issues 14 | * to conductor. 15 | * since conductor is an optional dependency for these clients the 16 | * logging performed is less verbose and emits warnings instead 17 | * of errors (which is what the default conductor client would do) 18 | */ 19 | public class InstrumentedClientLoggingHandler implements LoggingHandler { 20 | private final Logger logger; 21 | private final String clientId; 22 | private final String prefix; 23 | 24 | public InstrumentedClientLoggingHandler(Logger logger, String clientId) { 25 | if (logger == null) { 26 | throw new IllegalArgumentException("must provide logger"); 27 | } 28 | this.logger = logger; 29 | this.clientId = clientId; 30 | this.prefix = this.clientId == null ? "" : ("client " + clientId + " "); 31 | } 32 | 33 | @Override 34 | public void logConnectionFailure( 35 | String lastAttemptedWebsocketUrl, 36 | Throwable issue, 37 | long failureTime, 38 | int numConsecutiveFailures, 39 | long delayToNextAttempt) { 40 | if (numConsecutiveFailures == 1) { 41 | Throwable root = LiKafkaClientsUtils.getRootCause(issue); //could be null 42 | String rootDesc = root == null ? "" : (" (" + root.getMessage() + ")"); 43 | //only log on 1st failure, log a warning, and dont display full stack trace 44 | if (lastAttemptedWebsocketUrl == null || lastAttemptedWebsocketUrl.isEmpty()) { 45 | logger.warn("{}unable to locate conductor{}. will keep retrying in the background", prefix, rootDesc); 46 | } else { 47 | logger.warn("{}unable to open websocket to conductor at {}{}. will keep retrying in the background", 48 | prefix, lastAttemptedWebsocketUrl, rootDesc); 49 | } 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/common/LargeMessageHeaderValue.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.common; 6 | 7 | import com.linkedin.kafka.clients.utils.PrimitiveEncoderDecoder; 8 | import java.util.UUID; 9 | 10 | 11 | /** 12 | * This class represents the header value for a large message. 13 | * Every large message header takes up 29 bytes and is structured as follows 14 | * 15 | * | Type | UUID | segmentNumber | numberOfSegments | messageSizeInBytes | 16 | * | 1 byte | 16 bytes | 4 bytes | 4 bytes | 4 bytes | 17 | * 18 | * The Large message header values will be used to support large messages eventually. 19 | * (as opposed to encoding large segment metadata info inside the payload) 20 | */ 21 | public class LargeMessageHeaderValue { 22 | public static final UUID EMPTY_UUID = new UUID(0L, 0L); 23 | public static final int INVALID_SEGMENT_ID = -1; 24 | public static final int INVALID_MESSAGE_SIZE = -1; 25 | 26 | private static final int LEGACY_HEADER_SIZE = 1 + PrimitiveEncoderDecoder.LONG_SIZE + 27 | PrimitiveEncoderDecoder.LONG_SIZE + PrimitiveEncoderDecoder.INT_SIZE + PrimitiveEncoderDecoder.INT_SIZE; 28 | // new field added in LEGACY_V2 - messageSizeInBytes 29 | private static final int LEGACY_V2_HEADER_SIZE = LEGACY_HEADER_SIZE + PrimitiveEncoderDecoder.INT_SIZE; 30 | // same as LEGACY_V2_HEADER_SIZE 31 | private static final int V3_HEADER_SIZE = LEGACY_V2_HEADER_SIZE; 32 | private final byte _type; 33 | private final UUID _uuid; 34 | private final int _segmentNumber; 35 | private final int _numberOfSegments; 36 | private final int _messageSizeInBytes; 37 | 38 | // This indicates that the large message framework is using 39 | // SegmentSerializer/SegmentDeserializer interface to split 40 | // and assemble large message segments. 41 | public static final byte LEGACY = (byte) 0; 42 | // Added new field - messageSizeInBytes to the header value 43 | public static final byte LEGACY_V2 = (byte) 1; 44 | // Added new "type" - In the new version, header-based records 45 | // will be used for large messages and this version tells the 46 | // assembler to not expect any segment metadata(a.k.a payload header) in the payload. 47 | public static final byte V3 = (byte) 2; 48 | 49 | public LargeMessageHeaderValue(byte type, UUID uuid, int segmentNumber, int numberOfSegments, int messageSizeInBytes) { 50 | _type = type; 51 | _uuid = uuid; 52 | _segmentNumber = segmentNumber; 53 | _numberOfSegments = numberOfSegments; 54 | _messageSizeInBytes = messageSizeInBytes; 55 | } 56 | 57 | public int getMessageSizeInBytes() { 58 | return _messageSizeInBytes; 59 | } 60 | 61 | public int getSegmentNumber() { 62 | return _segmentNumber; 63 | } 64 | 65 | public int getNumberOfSegments() { 66 | return _numberOfSegments; 67 | } 68 | 69 | public UUID getUuid() { 70 | return _uuid; 71 | } 72 | 73 | public byte getType() { 74 | return _type; 75 | } 76 | 77 | public static byte[] toBytes(LargeMessageHeaderValue largeMessageHeaderValue) { 78 | byte[] serialized; 79 | switch (largeMessageHeaderValue.getType()) { 80 | case LEGACY: 81 | serialized = new byte[LEGACY_HEADER_SIZE]; 82 | break; 83 | case LEGACY_V2: 84 | serialized = new byte[LEGACY_V2_HEADER_SIZE]; 85 | break; 86 | default: 87 | serialized = new byte[V3_HEADER_SIZE]; 88 | break; 89 | } 90 | int byteOffset = 0; 91 | serialized[byteOffset] = largeMessageHeaderValue.getType(); 92 | byteOffset += 1; // for type 93 | PrimitiveEncoderDecoder.encodeLong(largeMessageHeaderValue.getUuid().getLeastSignificantBits(), serialized, byteOffset); 94 | byteOffset += PrimitiveEncoderDecoder.LONG_SIZE; // for UUID(least significant bits) 95 | PrimitiveEncoderDecoder.encodeLong(largeMessageHeaderValue.getUuid().getMostSignificantBits(), serialized, byteOffset); 96 | byteOffset += PrimitiveEncoderDecoder.LONG_SIZE; // for UUID(most significant bits) 97 | PrimitiveEncoderDecoder.encodeInt(largeMessageHeaderValue.getSegmentNumber(), serialized, byteOffset); 98 | byteOffset += PrimitiveEncoderDecoder.INT_SIZE; // for segment number 99 | PrimitiveEncoderDecoder.encodeInt(largeMessageHeaderValue.getNumberOfSegments(), serialized, byteOffset); 100 | // maintain compatibility for LEGACY_V2 and V3 101 | if (largeMessageHeaderValue.getType() >= LEGACY_V2) { 102 | byteOffset += PrimitiveEncoderDecoder.INT_SIZE; // for message size 103 | PrimitiveEncoderDecoder.encodeInt(largeMessageHeaderValue.getMessageSizeInBytes(), serialized, byteOffset); 104 | } 105 | return serialized; 106 | } 107 | 108 | public static LargeMessageHeaderValue fromBytes(byte[] bytes) { 109 | int byteOffset = 0; 110 | 111 | byte type = bytes[byteOffset]; 112 | byteOffset += 1; 113 | long leastSignificantBits = PrimitiveEncoderDecoder.decodeLong(bytes, byteOffset); 114 | byteOffset += PrimitiveEncoderDecoder.LONG_SIZE; 115 | long mostSignificantBits = PrimitiveEncoderDecoder.decodeLong(bytes, byteOffset); 116 | byteOffset += PrimitiveEncoderDecoder.LONG_SIZE; 117 | int segmentNumber = PrimitiveEncoderDecoder.decodeInt(bytes, byteOffset); 118 | byteOffset += PrimitiveEncoderDecoder.INT_SIZE; 119 | int numberOfSegments = PrimitiveEncoderDecoder.decodeInt(bytes, byteOffset); 120 | // strict check in case we modify V3_HEADER_SIZE later 121 | if (bytes.length == V3_HEADER_SIZE || bytes.length == V3_HEADER_SIZE) { 122 | byteOffset += PrimitiveEncoderDecoder.INT_SIZE; 123 | int messageSizeInBytes = PrimitiveEncoderDecoder.decodeInt(bytes, byteOffset); 124 | return new LargeMessageHeaderValue(type, new UUID(mostSignificantBits, leastSignificantBits), segmentNumber, numberOfSegments, messageSizeInBytes); 125 | } 126 | return new LargeMessageHeaderValue(type, new UUID(mostSignificantBits, leastSignificantBits), segmentNumber, numberOfSegments, INVALID_MESSAGE_SIZE); 127 | } 128 | 129 | } 130 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/common/LiKafkaCommonClientConfigs.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.common; 6 | 7 | 8 | /** 9 | * The common configurations for both LiKafkaFederatedProducer and LiKafkaFederatedConsumer 10 | */ 11 | public final class LiKafkaCommonClientConfigs { 12 | public static final String METADATA_SERVICE_CLIENT_CLASS_CONFIG = "li.metadata.service.client"; 13 | public static final String METADATA_SERVICE_CLIENT_CLASS_DOC = "The metadata service client class"; 14 | 15 | public static final String METADATA_SERVICE_REQUEST_TIMEOUT_MS_CONFIG = "li.metadata.service.request.timeout.ms"; 16 | public static final String METADATA_SERVICE_REQUEST_TIMEOUT_MS_DOC = 17 | "Timeout in milliseconds for requests to the metadata service"; 18 | 19 | public static final String CLUSTER_GROUP_CONFIG = "li.cluster.group"; 20 | public static final String CLUSTER_GROUP_DOC = "The name of the cluster group"; 21 | 22 | public static final String CLUSTER_ENVIRONMENT_CONFIG = "li.cluster.environment"; 23 | public static final String CLUSTER_ENVIRONMENT_DOC = "The location of the cluster group"; 24 | 25 | private LiKafkaCommonClientConfigs() { 26 | // Not called. Just to avoid style check error. 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/common/LiKafkaFederatedClient.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.common; 6 | 7 | 8 | // A superinterface for LiKafkaFederatedProducerImpl and LiKafkaFederatedConsumerImpl. 9 | public interface LiKafkaFederatedClient { 10 | LiKafkaFederatedClientType getClientType(); 11 | } -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/common/LiKafkaFederatedClientType.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.common; 6 | 7 | 8 | public enum LiKafkaFederatedClientType { 9 | FEDERATED_PRODUCER, 10 | FEDERATED_CONSUMER 11 | } -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/common/MetricsProxy.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.common; 6 | 7 | import java.util.Collection; 8 | import java.util.Map; 9 | import java.util.Set; 10 | import org.apache.kafka.common.Metric; 11 | import org.apache.kafka.common.MetricName; 12 | 13 | /** 14 | * this class allows delegating kafka metrics to an underlying delegate 15 | * kafka client, allowing the delegate to be replaced/recreated without 16 | * "invalidating" metrics maps user code may hold on to.
17 | * it is meant to allow user code like the following to continue to "just work":
18 | * 19 | * KafkaClient client = ...
20 | * Map<MetricName, ? extends Metric> metrics = client.getMetrics();
21 | * // ... long time later ...
22 | * //do something with metrics map
23 | * 24 | * while still allowing instrumented clients to replace the underlying kafka client 25 | */ 26 | public abstract class MetricsProxy implements Map { 27 | 28 | /** 29 | * @return the metrics map for the current delegate client, or an empty map if none. 30 | */ 31 | protected abstract Map getMetrics(); 32 | 33 | @Override 34 | public int size() { 35 | return getMetrics().size(); 36 | } 37 | 38 | @Override 39 | public boolean isEmpty() { 40 | return getMetrics().isEmpty(); 41 | } 42 | 43 | @Override 44 | public boolean containsKey(Object key) { 45 | return getMetrics().containsKey(key); 46 | } 47 | 48 | @Override 49 | public boolean containsValue(Object value) { 50 | return getMetrics().containsValue(value); 51 | } 52 | 53 | @Override 54 | public Object get(Object key) { 55 | return getMetrics().get(key); 56 | } 57 | 58 | @Override 59 | public Metric put(Object key, Object value) { 60 | throw new UnsupportedOperationException(); //this collection is immutable in vanilla kafka anyway 61 | } 62 | 63 | @Override 64 | public Object remove(Object key) { 65 | throw new UnsupportedOperationException(); //this collection is immutable in vanilla kafka anyway 66 | } 67 | 68 | @Override 69 | public void putAll(Map m) { 70 | throw new UnsupportedOperationException(); //this collection is immutable in vanilla kafka anyway 71 | } 72 | 73 | @Override 74 | public void clear() { 75 | throw new UnsupportedOperationException(); //this collection is immutable in vanilla kafka anyway 76 | } 77 | 78 | @Override 79 | public Set keySet() { 80 | return getMetrics().keySet(); 81 | } 82 | 83 | @Override 84 | public Collection values() { 85 | return getMetrics().values(); 86 | } 87 | 88 | @Override 89 | public Set entrySet() { 90 | return getMetrics().entrySet(); 91 | } 92 | } -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/common/PartitionLookupResult.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.common; 6 | 7 | import java.util.Collections; 8 | import java.util.Map; 9 | import java.util.Set; 10 | 11 | import org.apache.kafka.common.TopicPartition; 12 | 13 | 14 | // This contains the result of the location lookup for a set of partitions across multiple clusters in a cluster group. 15 | public class PartitionLookupResult { 16 | private Map> _partitionsByCluster; 17 | private Set _nonexistentTopics; 18 | 19 | public PartitionLookupResult() { 20 | _partitionsByCluster = Collections.emptyMap(); 21 | _nonexistentTopics = Collections.emptySet(); 22 | } 23 | 24 | public PartitionLookupResult(Map> partitionsByCluster, 25 | Set nonexistentTopics) { 26 | _partitionsByCluster = partitionsByCluster; 27 | _nonexistentTopics = nonexistentTopics; 28 | } 29 | 30 | public Map> getPartitionsByCluster() { 31 | return _partitionsByCluster; 32 | } 33 | 34 | public Set getNonexistentTopics() { 35 | return _nonexistentTopics; 36 | } 37 | } -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/common/TopicLookupResult.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.common; 6 | 7 | import java.util.Collections; 8 | import java.util.Map; 9 | import java.util.Set; 10 | 11 | 12 | // This contains the result of the location lookup for a set of topics across multiple clusters in a cluster group. 13 | public class TopicLookupResult { 14 | private Map> _topicsByCluster; 15 | private Set _nonexistentTopics; 16 | 17 | public TopicLookupResult() { 18 | _topicsByCluster = Collections.emptyMap(); 19 | _nonexistentTopics = Collections.emptySet(); 20 | } 21 | 22 | public TopicLookupResult(Map> topicsByCluster, Set nonexistentTopics) { 23 | _topicsByCluster = topicsByCluster; 24 | _nonexistentTopics = nonexistentTopics; 25 | } 26 | 27 | public Map> getTopicsByCluster() { 28 | return _topicsByCluster; 29 | } 30 | 31 | public Set getNonexistentTopics() { 32 | return _nonexistentTopics; 33 | } 34 | } -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/consumer/ConsumerFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.consumer; 6 | 7 | import java.util.Properties; 8 | import org.apache.kafka.clients.consumer.Consumer; 9 | 10 | 11 | @FunctionalInterface 12 | public interface ConsumerFactory { 13 | Consumer create(Properties base, Properties overrides); 14 | } -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/consumer/DelegatingConsumer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.consumer; 6 | 7 | import org.apache.kafka.clients.consumer.Consumer; 8 | 9 | 10 | public interface DelegatingConsumer extends Consumer { 11 | Consumer getDelegate(); 12 | } 13 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/consumer/LiKafkaConsumerBuilder.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.consumer; 6 | 7 | import java.util.Collections; 8 | import java.util.Map; 9 | 10 | 11 | class LiKafkaConsumerBuilder { 12 | private Map _configMap; 13 | 14 | public LiKafkaConsumerBuilder() { 15 | this(Collections.emptyMap()); 16 | } 17 | 18 | public LiKafkaConsumerBuilder(Map configMap) { 19 | _configMap = configMap; 20 | } 21 | 22 | public LiKafkaConsumerBuilder setConsumerConfig(Map configMap) { 23 | _configMap = configMap; 24 | return this; 25 | } 26 | 27 | public LiKafkaConsumer build() { 28 | // Serializers and auditor will be created using associated consumer properties. 29 | return new LiKafkaConsumerImpl(_configMap); 30 | } 31 | } -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/consumer/LiKafkaConsumerRebalanceListener.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.consumer; 6 | 7 | import com.linkedin.kafka.clients.largemessage.ConsumerRecordsProcessor; 8 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; 9 | import org.apache.kafka.common.TopicPartition; 10 | import org.slf4j.Logger; 11 | import org.slf4j.LoggerFactory; 12 | 13 | import java.util.Collection; 14 | import java.util.HashSet; 15 | import java.util.Set; 16 | 17 | /** 18 | * The rebalance listener for LiKafkaClients that is large message aware. 19 | */ 20 | class LiKafkaConsumerRebalanceListener implements ConsumerRebalanceListener { 21 | private static final Logger LOG = LoggerFactory.getLogger(LiKafkaConsumerRebalanceListener.class); 22 | private final ConsumerRecordsProcessor _consumerRecordsProcessor; 23 | private final LiKafkaConsumer _consumer; 24 | private final Set _partitionsRemoved; 25 | private final boolean _autoCommitEnabled; 26 | private ConsumerRebalanceListener _userListener; 27 | 28 | LiKafkaConsumerRebalanceListener(ConsumerRecordsProcessor consumerRecordsProcessor, 29 | LiKafkaConsumer consumer, 30 | boolean autoCommitEnabled) { 31 | _consumerRecordsProcessor = consumerRecordsProcessor; 32 | _consumer = consumer; 33 | _partitionsRemoved = new HashSet<>(); 34 | _autoCommitEnabled = autoCommitEnabled; 35 | } 36 | 37 | @Override 38 | public void onPartitionsRevoked(Collection topicPartitions) { 39 | LOG.debug("Consumer rebalancing. Revoked partitions: {}", topicPartitions); 40 | // Record the partitions that might be revoked, if the partitions are really revoked, we need to clean up 41 | // the state. 42 | _partitionsRemoved.clear(); 43 | _partitionsRemoved.addAll(topicPartitions); 44 | 45 | try { 46 | // Fire user listener. 47 | _userListener.onPartitionsRevoked(topicPartitions); 48 | 49 | // Commit offset if auto commit is enabled. 50 | if (_autoCommitEnabled) { 51 | _consumer.commitSync(); 52 | } 53 | } finally { 54 | _consumerRecordsProcessor.clearAllConsumerHighWaterMarks(); 55 | } 56 | } 57 | 58 | @Override 59 | public void onPartitionsAssigned(Collection topicPartitions) { 60 | LOG.debug("Consumer rebalancing. Assigned partitions: {}", topicPartitions); 61 | // Remove the partitions that are assigned back to this consumer 62 | _partitionsRemoved.removeAll(topicPartitions); 63 | for (TopicPartition tp : _partitionsRemoved) { 64 | _consumerRecordsProcessor.clear(tp); 65 | } 66 | // Fire user listener. 67 | _userListener.onPartitionsAssigned(topicPartitions); 68 | } 69 | 70 | public void setUserListener(ConsumerRebalanceListener userListener) { 71 | _userListener = userListener; 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/consumer/LiKafkaOffsetCommitCallback.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.consumer; 6 | 7 | import com.linkedin.kafka.clients.utils.LiKafkaClientsUtils; 8 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 9 | import org.apache.kafka.clients.consumer.OffsetCommitCallback; 10 | import org.apache.kafka.common.TopicPartition; 11 | 12 | import java.util.HashMap; 13 | import java.util.Map; 14 | 15 | /** 16 | * The offset commit callback for LiKafkaConsumer which is large message aware. 17 | */ 18 | class LiKafkaOffsetCommitCallback implements OffsetCommitCallback { 19 | private OffsetCommitCallback _userCallback = null; 20 | 21 | @Override 22 | public void onComplete(Map topicPartitionOffsetAndMetadataMap, Exception e) { 23 | if (_userCallback != null) { 24 | Map userOffsetMap = topicPartitionOffsetAndMetadataMap; 25 | if (topicPartitionOffsetAndMetadataMap != null) { 26 | userOffsetMap = new HashMap<>(); 27 | for (Map.Entry entry : topicPartitionOffsetAndMetadataMap.entrySet()) { 28 | String rawMetadata = entry.getValue().metadata(); 29 | long userOffset = LiKafkaClientsUtils.offsetFromWrappedMetadata(rawMetadata); 30 | String userMetadata = LiKafkaClientsUtils.metadataFromWrappedMetadata(rawMetadata); 31 | userOffsetMap.put(entry.getKey(), new OffsetAndMetadata(userOffset, userMetadata)); 32 | } 33 | } 34 | _userCallback.onComplete(userOffsetMap, e); 35 | } 36 | } 37 | 38 | public void setUserCallback(OffsetCommitCallback userCallback) { 39 | _userCallback = userCallback; 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/consumer/LiOffsetResetStrategy.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.consumer; 6 | 7 | /** 8 | * Since enums are essentially final in Java, we are duplicating the values from 9 | * the open-source equivalent {@link org.apache.kafka.clients.consumer.OffsetResetStrategy} 10 | * 11 | * This workaround needs to be provided until apache/kafka can support a closest reset policy, 12 | * aka KIP-320 (https://cwiki.apache.org/confluence/display/KAFKA/KIP-320%3A+Allow+fetchers+to+detect+and+handle+log+truncation) 13 | */ 14 | public enum LiOffsetResetStrategy { 15 | EARLIEST, LATEST, NONE, LICLOSEST 16 | } 17 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/largemessage/ConsumerRecordsProcessResult.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage; 6 | 7 | import com.linkedin.kafka.clients.largemessage.errors.ConsumerRecordsProcessingException; 8 | import com.linkedin.kafka.clients.largemessage.errors.RecordProcessingException; 9 | import java.util.ArrayList; 10 | import java.util.Collection; 11 | import java.util.Collections; 12 | import java.util.HashMap; 13 | import java.util.List; 14 | import java.util.Map; 15 | import org.apache.kafka.clients.consumer.ConsumerRecord; 16 | import org.apache.kafka.clients.consumer.ConsumerRecords; 17 | import org.apache.kafka.common.TopicPartition; 18 | 19 | 20 | /** 21 | * The process result of ConsumerRecords returned by the open source KafkaConsumer. 22 | * 23 | * It contains the following information: 24 | * 1. The processed consumer records. 25 | * 2. If there were exception in processing, the offsets to skip those problematic messages for each partition. 26 | * 3. The the exception thrown by the last problematic partition. (We just need to throw an exception to the user). 27 | */ 28 | public class ConsumerRecordsProcessResult { 29 | public class OffsetPair { 30 | private final long _currentOffset; 31 | private final long _resumeOffset; 32 | 33 | public OffsetPair(long currentOffset, long resumeOffset) { 34 | _currentOffset = currentOffset; 35 | _resumeOffset = resumeOffset; 36 | } 37 | 38 | public long getCurrentOffset() { 39 | return _currentOffset; 40 | } 41 | 42 | public long getResumeOffset() { 43 | return _resumeOffset; 44 | } 45 | } 46 | 47 | private final Map _offsetPair; 48 | private final List _exceptions; 49 | private final Map _exceptionMap; 50 | private Map>> _processedRecords; 51 | 52 | ConsumerRecordsProcessResult() { 53 | _processedRecords = new HashMap<>(); 54 | _offsetPair = new HashMap<>(); 55 | _exceptions = new ArrayList<>(); 56 | _exceptionMap = new HashMap<>(); 57 | } 58 | 59 | void addRecord(TopicPartition tp, ConsumerRecord record) { 60 | // Only put record into map if it is not null 61 | if (record != null) { 62 | List> list = _processedRecords.computeIfAbsent(tp, k -> new ArrayList<>()); 63 | list.add(record); 64 | } 65 | } 66 | 67 | void recordException(TopicPartition tp, long offset, RuntimeException e) { 68 | RecordProcessingException rpe = new RecordProcessingException(tp, offset, e); 69 | _exceptions.add(rpe); 70 | _exceptionMap.put(tp, rpe); 71 | // The resume offset is the error offset + 1. i.e. if user ignore the exception thrown and poll again, the resuming 72 | // offset should be this one. 73 | _offsetPair.putIfAbsent(tp, new OffsetPair(offset, offset + 1)); 74 | } 75 | 76 | public void clearRecords() { 77 | _processedRecords = null; 78 | } 79 | 80 | public boolean hasError(TopicPartition tp) { 81 | return offsets().containsKey(tp); 82 | } 83 | 84 | /** 85 | * Returns true if any topic-partitions in the collection has an exception 86 | * @param topicPartitions topic partitions to check for errors 87 | * @return true if there's an error stored for any of the topic partitions given 88 | */ 89 | public boolean hasError(Collection topicPartitions) { 90 | if (topicPartitions != null && !topicPartitions.isEmpty()) { 91 | for (TopicPartition tp : topicPartitions) { 92 | if (offsets().containsKey(tp)) { 93 | return true; 94 | } 95 | } 96 | } 97 | return false; 98 | } 99 | 100 | public boolean hasException() { 101 | return !_exceptionMap.isEmpty(); 102 | } 103 | 104 | public ConsumerRecordsProcessingException exception() { 105 | return _exceptions.isEmpty() ? null : new ConsumerRecordsProcessingException(_exceptions); 106 | } 107 | 108 | public ConsumerRecordsProcessingException exception(Collection topicPartitions) { 109 | List recordProcessingExceptions = new ArrayList<>(topicPartitions.size()); 110 | topicPartitions.forEach(tp -> { 111 | if (_exceptionMap.containsKey(tp)) { 112 | recordProcessingExceptions.add(_exceptionMap.get(tp)); 113 | } 114 | }); 115 | 116 | return _exceptions.isEmpty() ? null : new ConsumerRecordsProcessingException(recordProcessingExceptions); 117 | } 118 | 119 | public ConsumerRecords consumerRecords() { 120 | return new ConsumerRecords<>(_processedRecords); 121 | } 122 | 123 | public Map offsets() { 124 | return Collections.unmodifiableMap(_offsetPair); 125 | } 126 | } 127 | 128 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/largemessage/DefaultSegmentDeserializer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage; 6 | 7 | import org.apache.kafka.common.serialization.Deserializer; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | import java.nio.ByteBuffer; 12 | import java.util.Map; 13 | import java.util.UUID; 14 | 15 | /** 16 | * Default deserializer for large message segment 17 | */ 18 | public class DefaultSegmentDeserializer implements Deserializer { 19 | private static final Logger LOG = LoggerFactory.getLogger(DefaultSegmentDeserializer.class); 20 | private static final int CHECKSUM_LENGTH = Integer.BYTES; 21 | 22 | @Override 23 | public void configure(Map configs, boolean isKey) { 24 | 25 | } 26 | 27 | @Override 28 | public LargeMessageSegment deserialize(String s, byte[] bytes) { 29 | int headerLength = 1 + LargeMessageSegment.SEGMENT_INFO_OVERHEAD + CHECKSUM_LENGTH; 30 | if (bytes.length < headerLength) { 31 | LOG.debug("Serialized segment size too small, not large message segment."); 32 | return null; 33 | } 34 | ByteBuffer byteBuffer = ByteBuffer.wrap(bytes); 35 | byte version = byteBuffer.get(); 36 | if (version > LargeMessageSegment.CURRENT_VERSION) { 37 | LOG.debug("Serialized version byte is greater than {}. not large message segment.", 38 | LargeMessageSegment.CURRENT_VERSION); 39 | return null; 40 | } 41 | int checksum = byteBuffer.getInt(); 42 | long messageIdMostSignificantBits = byteBuffer.getLong(); 43 | long messageIdLeastSignificantBits = byteBuffer.getLong(); 44 | if (checksum != ((int) (messageIdMostSignificantBits + messageIdLeastSignificantBits))) { 45 | LOG.debug("Serialized segment checksum does not match. not large message segment."); 46 | return null; 47 | } 48 | UUID messageId = new UUID(messageIdMostSignificantBits, messageIdLeastSignificantBits); 49 | int sequenceNumber = byteBuffer.getInt(); //expected to be [0, numberOfSegments) 50 | int numberOfSegments = byteBuffer.getInt(); //expected to be >0 51 | int messageSizeInBytes = byteBuffer.getInt(); //expected to be >= bytes.length - headerLength 52 | if (sequenceNumber < 0 || numberOfSegments <= 0 || sequenceNumber >= numberOfSegments) { 53 | LOG.warn("Serialized segment sequence {} not in [0, {}). treating as regular payload", sequenceNumber, numberOfSegments); 54 | return null; 55 | } 56 | int segmentPayloadSize = bytes.length - headerLength; //how much user data in this record 57 | if (messageSizeInBytes < segmentPayloadSize) { 58 | //there cannot be more data in a single segment than the total size of the assembled msg 59 | LOG.warn("Serialized segment size {} bigger than assembled msg size {}, treating as regular payload", segmentPayloadSize, messageSizeInBytes); 60 | return null; 61 | } 62 | ByteBuffer payload = byteBuffer.slice(); 63 | return new LargeMessageSegment(messageId, sequenceNumber, numberOfSegments, messageSizeInBytes, payload); 64 | } 65 | 66 | @Override 67 | public void close() { 68 | 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/largemessage/DefaultSegmentSerializer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage; 6 | 7 | import org.apache.kafka.common.serialization.Serializer; 8 | 9 | import java.nio.ByteBuffer; 10 | import java.util.Map; 11 | 12 | /** 13 | * The default large message segment serializer. 14 | * The format of the serialized segment is: 15 | * 1 byte - version 16 | * 4 bytes - checksum to determine if bytes is large message segment or not. 17 | * 16 bytes - messageId 18 | * 4 bytes - sequence number 19 | * 4 bytes - number of segments 20 | * 4 bytes - message size in bytes 21 | * X bytes - payload 22 | */ 23 | public class DefaultSegmentSerializer implements Serializer { 24 | // 1 is for version byte; Integer.BYTES is for checksum; SEGMENT_INFO_OVERHEAD is for other metadata 25 | public static final int PAYLOAD_HEADER_OVERHEAD = 1 + Integer.BYTES + LargeMessageSegment.SEGMENT_INFO_OVERHEAD; 26 | 27 | @Override 28 | public void configure(Map configs, boolean isKey) { 29 | 30 | } 31 | 32 | @Override 33 | public byte[] serialize(String s, LargeMessageSegment segment) { 34 | ByteBuffer byteBuffer = ByteBuffer.allocate(PAYLOAD_HEADER_OVERHEAD + segment.payload.limit()); 35 | byteBuffer.put(LargeMessageSegment.CURRENT_VERSION); 36 | byteBuffer.putInt((int) (segment.messageId.getMostSignificantBits() + segment.messageId.getLeastSignificantBits())); 37 | byteBuffer.putLong(segment.messageId.getMostSignificantBits()); 38 | byteBuffer.putLong(segment.messageId.getLeastSignificantBits()); 39 | byteBuffer.putInt(segment.sequenceNumber); 40 | byteBuffer.putInt(segment.numberOfSegments); 41 | byteBuffer.putInt(segment.messageSizeInBytes); 42 | byteBuffer.put(segment.payload); 43 | return byteBuffer.array(); 44 | } 45 | 46 | @Override 47 | public void close() { 48 | 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/largemessage/LargeMessage.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage; 6 | 7 | import com.linkedin.kafka.clients.largemessage.errors.InvalidSegmentException; 8 | import org.apache.kafka.common.TopicPartition; 9 | 10 | import java.nio.ByteBuffer; 11 | import java.util.HashMap; 12 | import java.util.Map; 13 | import java.util.UUID; 14 | 15 | /** 16 | * The class to buffer incomplete message segments. 17 | */ 18 | public class LargeMessage { 19 | private final Map _segments; 20 | private final int _messageSize; 21 | private final int _numberOfSegments; 22 | private final TopicPartition _tp; 23 | private final UUID _messageId; 24 | private final long _startingOffset; 25 | private long _bufferedBytes; 26 | 27 | LargeMessage(TopicPartition tp, UUID messageId, long startingOffset, int messageSize, int numberOfSegments) { 28 | _messageSize = messageSize; 29 | _numberOfSegments = numberOfSegments; 30 | _segments = new HashMap<>(); 31 | _bufferedBytes = 0; 32 | _tp = tp; 33 | _messageId = messageId; 34 | _startingOffset = startingOffset; 35 | } 36 | 37 | public synchronized long bufferedSizeInBytes() { 38 | return _bufferedBytes; 39 | } 40 | 41 | public synchronized SegmentAddResult addSegment(LargeMessageSegment segment, long offset) { 42 | int seq = segment.sequenceNumber; 43 | int segmentSize = segment.payload.remaining(); 44 | validateSegment(segment); 45 | // Ignore duplicated segment. 46 | if (!_segments.containsKey(seq)) { 47 | _segments.put(seq, segment.payload); 48 | _bufferedBytes += segmentSize; 49 | if (_segments.size() == _numberOfSegments) { 50 | // If we have got all the segments, assemble the original serialized message. 51 | return new SegmentAddResult(assembleMessage(), segmentSize, _startingOffset); 52 | } 53 | } else { 54 | // duplicate segment 55 | return new SegmentAddResult(null, 0, _startingOffset); 56 | } 57 | // The segment is buffered, but it did not complete a large message. 58 | return new SegmentAddResult(null, segmentSize, _startingOffset); 59 | } 60 | 61 | public TopicPartition topicPartition() { 62 | return _tp; 63 | } 64 | 65 | public UUID messageId() { 66 | return _messageId; 67 | } 68 | 69 | public long startingOffset() { 70 | return _startingOffset; 71 | } 72 | 73 | public String toString() { 74 | return String.format("[TopicPartition:%s, UUID:%s, NumberOfSegments:%d, MessageSize:%d, BufferedBytes:%d]", 75 | _tp, _messageId, _numberOfSegments, _messageSize, _bufferedBytes); 76 | } 77 | 78 | private void validateSegment(LargeMessageSegment segment) { 79 | int segmentSize = segment.payload.remaining(); 80 | int seq = segment.sequenceNumber; 81 | 82 | if (segmentSize <= 0) { 83 | throw new InvalidSegmentException("Invalid segment: " + segment + ". Segment size should be greater than 0."); 84 | } 85 | 86 | if (_messageSize != segment.messageSizeInBytes 87 | || _numberOfSegments != segment.numberOfSegments) { 88 | throw new InvalidSegmentException("Detected UUID conflict. Segment: " + segment); 89 | } 90 | 91 | if (!_segments.containsKey(seq) && _bufferedBytes + segmentSize > _messageSize) { 92 | throw new InvalidSegmentException("Invalid segment: " + segment + ". Segments have more bytes than the " + 93 | "message has. Message size =" + _messageSize + ", segments total bytes = " + 94 | (_bufferedBytes + segmentSize)); 95 | } 96 | } 97 | 98 | private byte[] assembleMessage() { 99 | if (_bufferedBytes != _messageSize) { 100 | throw new InvalidSegmentException("Buffered bytes in the message should equal to message size." 101 | + " Buffered bytes = " + _bufferedBytes + "message size = " + _messageSize); 102 | } 103 | byte[] serializedMessage = new byte[_messageSize]; 104 | int segmentStart = 0; 105 | for (int i = 0; i < _numberOfSegments; i++) { 106 | ByteBuffer payload = _segments.get(i); 107 | int payloadSize = payload.remaining(); 108 | payload.get(serializedMessage, segmentStart, payloadSize); 109 | segmentStart += payloadSize; 110 | } 111 | assert (segmentStart == _messageSize); 112 | return serializedMessage; 113 | } 114 | 115 | /** 116 | * This is the container class to return the result of a segment addition. 117 | */ 118 | class SegmentAddResult { 119 | private final byte[] _serializedMessage; 120 | private final long _startingOffset; 121 | private final int _bytesAdded; 122 | 123 | SegmentAddResult(byte[] serializedMessage, int bytesAdded, long startingOffset) { 124 | _serializedMessage = serializedMessage; 125 | _bytesAdded = bytesAdded; 126 | _startingOffset = startingOffset; 127 | } 128 | 129 | /** 130 | * Return the completed large message in its serialized bytes format. 131 | * 132 | * @return The assembled serialized message if a large message is completed, otherwise null. 133 | */ 134 | byte[] serializedMessage() { 135 | return _serializedMessage; 136 | } 137 | 138 | /** 139 | * @return The size of segment in bytes that has been added to the buffer. It does not count for duplicate segment. 140 | */ 141 | int bytesAdded() { 142 | return _bytesAdded; 143 | } 144 | 145 | /** 146 | * @return the offset of the first segment in this large message. 147 | */ 148 | long startingOffset() { 149 | return _startingOffset; 150 | } 151 | } 152 | 153 | } -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/largemessage/LargeMessageCallback.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage; 6 | 7 | import com.linkedin.kafka.clients.largemessage.errors.LargeMessageSendException; 8 | import org.apache.kafka.clients.producer.Callback; 9 | import org.apache.kafka.clients.producer.RecordMetadata; 10 | 11 | /** 12 | * This is the callback class for large message. It works in the following way: 13 | * 1. It holds the original callback provided by user. 14 | * 2. The user callback will only fire once When all the segments of the large messages are acked. If all the segments 15 | * are sent successfully, the user callback will receive no exception. If exceptions are received by several 16 | * segments, the user callback will receive the first exception. 17 | * Because the callback will only be called by one single thread, no synchronization is needed. 18 | */ 19 | public class LargeMessageCallback implements Callback { 20 | private final int _numSegments; 21 | private final Callback _userCallback; 22 | private int _acksReceived; 23 | private int _segmentsSent; 24 | private Exception _exception; 25 | 26 | public LargeMessageCallback(int numSegments, Callback userCallback) { 27 | _numSegments = numSegments; 28 | _acksReceived = 0; 29 | _segmentsSent = 0; 30 | _userCallback = userCallback; 31 | _exception = null; 32 | } 33 | 34 | @Override 35 | public void onCompletion(RecordMetadata recordMetadata, Exception e) { 36 | // The callback will only be fired once. 37 | _acksReceived++; 38 | 39 | // Set exception to be the first exception 40 | if (e != null && _exception == null) { 41 | _exception = e; 42 | } 43 | if (e == null) { 44 | _segmentsSent++; 45 | } 46 | // Invoke user callback when receive the last callback of the large message. 47 | if (_acksReceived == _numSegments) { 48 | if (_exception == null) { 49 | _userCallback.onCompletion(recordMetadata, null); 50 | } else { 51 | _userCallback.onCompletion( 52 | null, 53 | new LargeMessageSendException(String.format("Error when sending large message. Sent %d of %d segments.", 54 | _segmentsSent, _numSegments), _exception) 55 | ); 56 | } 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/largemessage/LargeMessageOffsetTracker.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage; 6 | 7 | import com.linkedin.kafka.clients.utils.QueuedMap; 8 | import org.apache.kafka.common.TopicPartition; 9 | 10 | import java.util.ArrayList; 11 | import java.util.Collections; 12 | import java.util.HashMap; 13 | import java.util.List; 14 | import java.util.Map; 15 | import java.util.UUID; 16 | 17 | /** 18 | * The class keeps track of the safe offset for each partition. 19 | */ 20 | public class LargeMessageOffsetTracker { 21 | 22 | private final Map> _offsetMap; 23 | 24 | LargeMessageOffsetTracker() { 25 | _offsetMap = new HashMap<>(); 26 | } 27 | 28 | Map safeOffsets() { 29 | if (_offsetMap.size() == 0) { 30 | return Collections.emptyMap(); 31 | } 32 | 33 | Map safeOffsetMap = new HashMap<>(); 34 | for (TopicPartition tp : _offsetMap.keySet()) { 35 | QueuedMap offsetMapForPartition = _offsetMap.get(tp); 36 | UUID eldest = offsetMapForPartition.getEldestKey(); 37 | if (eldest != null) { 38 | safeOffsetMap.put(tp, offsetMapForPartition.get(eldest)); 39 | } 40 | } 41 | return safeOffsetMap; 42 | } 43 | 44 | long safeOffset(TopicPartition tp) { 45 | long offset = Long.MAX_VALUE; 46 | QueuedMap offsetMapForPartition = _offsetMap.get(tp); 47 | if (offsetMapForPartition != null) { 48 | UUID eldest = offsetMapForPartition.getEldestKey(); 49 | if (eldest != null) { 50 | offset = offsetMapForPartition.get(eldest); 51 | } 52 | } 53 | return offset; 54 | } 55 | 56 | void untrackMessage(TopicPartition tp, UUID messageId) { 57 | QueuedMap offsetMapForPartition = _offsetMap.get(tp); 58 | if (offsetMapForPartition != null) { 59 | offsetMapForPartition.remove(messageId); 60 | } else { 61 | throw new IllegalStateException("Could not find message " + messageId + " in partition " + tp 62 | + ". This should not happen because a completed large message must have been tracked by offset " 63 | + "tracker!"); 64 | } 65 | } 66 | 67 | void maybeTrackMessage(TopicPartition tp, UUID messageId, long offset) { 68 | QueuedMap offsetMapForPartition = getAndMaybePutOffsetMapForPartition(tp); 69 | if (offsetMapForPartition.get(messageId) == null) { 70 | offsetMapForPartition.put(messageId, offset); 71 | } 72 | } 73 | 74 | List expireMessageUntilOffset(TopicPartition tp, long offset) { 75 | List expired = new ArrayList<>(); 76 | QueuedMap offsetMapForPartition = _offsetMap.get(tp); 77 | UUID eldest = offsetMapForPartition == null ? null : offsetMapForPartition.getEldestKey(); 78 | while (eldest != null && offsetMapForPartition.get(eldest) < offset) { 79 | expired.add(eldest); 80 | offsetMapForPartition.remove(eldest); 81 | eldest = offsetMapForPartition.getEldestKey(); 82 | } 83 | return expired; 84 | } 85 | 86 | void clear() { 87 | _offsetMap.clear(); 88 | } 89 | 90 | void clear(TopicPartition tp) { 91 | _offsetMap.remove(tp); 92 | } 93 | 94 | private QueuedMap getAndMaybePutOffsetMapForPartition(TopicPartition tp) { 95 | QueuedMap offsetMapForPartition = _offsetMap.get(tp); 96 | if (offsetMapForPartition == null) { 97 | offsetMapForPartition = new QueuedMap<>(); 98 | _offsetMap.put(tp, offsetMapForPartition); 99 | } 100 | return offsetMapForPartition; 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/largemessage/LargeMessageSegment.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage; 6 | 7 | import com.linkedin.kafka.clients.common.LargeMessageHeaderValue; 8 | import com.linkedin.kafka.clients.largemessage.errors.InvalidSegmentException; 9 | import java.nio.ByteBuffer; 10 | import java.util.Arrays; 11 | import java.util.UUID; 12 | 13 | /** 14 | * The class that holds a large message segment. 15 | *

16 | * Each large message segment contains the following information: 17 | *

    18 | *
  • MessageId: The message id of the original large message.
  • 19 | *
  • SequenceNumber: The sequence number of the segment.
  • 20 | *
  • NumberOfSegments: The total number of segments the original large message has.
  • 21 | *
  • MessageSizeInBytes: The size of the original large message in bytes.
  • 22 | *
  • payload: The payload ByteBuffer of the segment.
  • 23 | *
24 | * 25 | * Please notice that it is not guaranteed that the payload ByteBuffer has a dedicated underlying byte array. To 26 | * get a dedicated byte array representation of the payload, {@link #payloadArray()} method should be called. 27 | * 28 | */ 29 | public class LargeMessageSegment { 30 | public final UUID messageId; 31 | public final int sequenceNumber; 32 | public final int numberOfSegments; 33 | public final int messageSizeInBytes; 34 | public final ByteBuffer payload; 35 | // The segment information over head bytes when serialize. 36 | public static final int SEGMENT_INFO_OVERHEAD = 16 + Integer.BYTES + Integer.BYTES + Integer.BYTES; 37 | // TODO: migrate to V3 version(i.e. changing CURRENT_VERSION to V3) when bumping major version 38 | // In the new major version, we shall only uses record headers for LM support instead of using both payload header and record header 39 | public static final byte CURRENT_VERSION = LargeMessageHeaderValue.LEGACY; 40 | 41 | public LargeMessageSegment(UUID messageId, 42 | int sequenceNumber, 43 | int numberOfSegments, 44 | int messageSizeInBytes, 45 | ByteBuffer payload) { 46 | this.messageId = messageId; 47 | this.sequenceNumber = sequenceNumber; 48 | this.numberOfSegments = numberOfSegments; 49 | this.messageSizeInBytes = messageSizeInBytes; 50 | this.payload = payload; 51 | } 52 | 53 | /** 54 | * Notice that the payload as a ByteBuffer does not guarantee to have a dedicated underlying byte array. So calling 55 | * {@code payload.array()} will not always give the payload byte array. This method should be called if user wants 56 | * to have the payload byte array. 57 | * 58 | * @return The payload as a byte array. 59 | */ 60 | public byte[] payloadArray() { 61 | if (payload.arrayOffset() == 0 && payload.limit() == payload.array().length) { 62 | return payload.array(); 63 | } else { 64 | return Arrays.copyOfRange(payload.array(), payload.arrayOffset(), payload.arrayOffset() + payload.limit()); 65 | } 66 | } 67 | 68 | public void sanityCheck() throws InvalidSegmentException { 69 | if (messageId == null) { 70 | throw new InvalidSegmentException("Message Id can not be null"); 71 | } 72 | if (messageSizeInBytes < 0) { 73 | throw new InvalidSegmentException("message size (" + messageSizeInBytes + ") should be >= 0"); 74 | } 75 | if (payload == null) { 76 | throw new InvalidSegmentException("payload cannot be null"); 77 | } 78 | //this tries to handle cases where payload has not been flipped/rewound 79 | long dataSize = payload.position() > 0 ? payload.position() : payload.limit(); 80 | if (dataSize > messageSizeInBytes) { 81 | throw new InvalidSegmentException("segment size (" + dataSize + ") should not be larger than message size (" + messageSizeInBytes + ")"); 82 | } 83 | if (numberOfSegments <= 0) { 84 | throw new InvalidSegmentException("number of segments should be > 0, instead is " + numberOfSegments); 85 | } 86 | if (sequenceNumber < 0 || sequenceNumber > numberOfSegments - 1) { 87 | throw new InvalidSegmentException("Sequence number " + sequenceNumber + " should fall between [0," + (numberOfSegments - 1) + "]."); 88 | } 89 | } 90 | 91 | @Override 92 | public String toString() { 93 | return "[messageId=" + messageId + ",seq=" + sequenceNumber + ",numSegs=" + numberOfSegments + ",messageSize=" + 94 | messageSizeInBytes + ",payloadSize=" + payload.limit() + "]"; 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/largemessage/MessageAssembler.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage; 6 | 7 | import org.apache.kafka.common.TopicPartition; 8 | import org.apache.kafka.common.header.Header; 9 | 10 | 11 | /** 12 | * The interface of the assembler on consumer side to re-assemeble the message segments created by 13 | * {@link MessageSplitter}. Message assembler is also responsible for keep tracking of the safe offset to commit 14 | * for a partition (see {@link #safeOffset}) 15 | */ 16 | public interface MessageAssembler { 17 | 18 | /** 19 | * Assemble the message segments to the original message. 20 | * When the segment provided can complete an original message, the original message will be returned. Otherwise it 21 | * returns null. 22 | * 23 | * @param tp the partition of this segment. 24 | * @param offset the offset of this segment. 25 | * @param segmentBytes a message segment in byte array format created by {@link MessageSplitter} 26 | * @return The assemble result if a message is successfully assembled, otherwise returns null. 27 | */ 28 | // TODO: this method should be marked as deprecated for the next major version bump. 29 | AssembleResult assemble(TopicPartition tp, long offset, byte[] segmentBytes); 30 | 31 | /** 32 | * Assemble the message segments to the original message. 33 | * When the segment provided can complete an original message, the original message will be returned. Otherwise it 34 | * returns null. 35 | * 36 | * @param tp the partition of this segment. 37 | * @param offset the offset of this segment. 38 | * @param segmentBytes a message segment in byte array format created by {@link MessageSplitter} 39 | * @param header record header of this segment 40 | * @return The assemble result if a message is successfully assembled, otherwise returns null. 41 | */ 42 | AssembleResult assemble(TopicPartition tp, long offset, byte[] segmentBytes, Header header); 43 | /** 44 | * Get the safe offset for a particular partition. When safe offset of a partition is not available, Long.Max_Value 45 | * will be returned. This will also expire any large messages that can not be assembled if the interval between 46 | * the uncompleted large message offset and the currentPosition is larger than some threshold for example 47 | * {@link com.linkedin.kafka.clients.consumer.LiKafkaConsumerConfig#MESSAGE_ASSEMBLER_EXPIRATION_OFFSET_GAP_CONFIG} 48 | * 49 | * @param tp the topic partition to get safe offset. 50 | * @param currentPosition the current position of the consumer for the specified tp. If the current position 51 | * is not known the highest known consumed position can be used with corresponding less 52 | * accuracy in expiring messages. 53 | * @return the safe offset. 54 | */ 55 | long safeOffset(TopicPartition tp, long currentPosition); 56 | 57 | /** 58 | * This method is to clean up all the states in the message assembler. 59 | */ 60 | void clear(); 61 | 62 | /** 63 | * This method clears up the states of a partition. 64 | * 65 | * @param tp the partition to clear state. 66 | */ 67 | void clear(TopicPartition tp); 68 | 69 | /** 70 | * Close the assembler. 71 | */ 72 | void close(); 73 | 74 | 75 | 76 | class AssembleResult { 77 | public static final byte[] INCOMPLETE_RESULT = new byte[0]; 78 | private final byte[] _messageBytes; 79 | private final long _messageStartingOffset; 80 | private final long _messageEndingOffset; 81 | 82 | AssembleResult(byte[] messageBytes, long startingOffset, long endingOffset) { 83 | _messageBytes = messageBytes; 84 | _messageStartingOffset = startingOffset; 85 | _messageEndingOffset = endingOffset; 86 | } 87 | 88 | public byte[] messageBytes() { 89 | return _messageBytes; 90 | } 91 | 92 | public long messageStartingOffset() { 93 | return _messageStartingOffset; 94 | } 95 | 96 | public long messageEndingOffset() { 97 | return _messageEndingOffset; 98 | } 99 | 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/largemessage/MessageAssemblerImpl.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage; 6 | 7 | import com.linkedin.kafka.clients.common.LargeMessageHeaderValue; 8 | import com.linkedin.kafka.clients.utils.Constants; 9 | import java.nio.ByteBuffer; 10 | import org.apache.kafka.common.TopicPartition; 11 | import org.apache.kafka.common.header.Header; 12 | import org.apache.kafka.common.serialization.Deserializer; 13 | 14 | import static com.linkedin.kafka.clients.largemessage.MessageAssembler.AssembleResult.INCOMPLETE_RESULT; 15 | 16 | 17 | /** 18 | * The implementation of {@link MessageAssembler} 19 | */ 20 | public class MessageAssemblerImpl implements MessageAssembler { 21 | private final LargeMessageBufferPool _messagePool; 22 | private final Deserializer _segmentDeserializer; 23 | 24 | public MessageAssemblerImpl(long bufferCapacity, 25 | long expirationOffsetGap, 26 | boolean exceptionOnMessageDropped, 27 | Deserializer segmentDeserializer) { 28 | _messagePool = new LargeMessageBufferPool(bufferCapacity, expirationOffsetGap, exceptionOnMessageDropped); 29 | _segmentDeserializer = segmentDeserializer; 30 | } 31 | 32 | public MessageAssemblerImpl(long bufferCapacity, 33 | long expirationOffsetGap, 34 | boolean exceptionOnMessageDropped) { 35 | this(bufferCapacity, expirationOffsetGap, exceptionOnMessageDropped, null); 36 | } 37 | 38 | @Deprecated 39 | public MessageAssemblerImpl(long bufferCapacity, 40 | long expirationOffsetGap, 41 | boolean exceptionOnMessageDropped, 42 | Deserializer segmentDeserializer, 43 | @SuppressWarnings("unused") boolean treatInvalidMessageSegmentsAsPayload) { 44 | _messagePool = new LargeMessageBufferPool(bufferCapacity, expirationOffsetGap, exceptionOnMessageDropped); 45 | _segmentDeserializer = segmentDeserializer; 46 | } 47 | 48 | @Override 49 | public AssembleResult assemble(TopicPartition tp, long offset, byte[] segmentBytes) { 50 | if (segmentBytes == null) { 51 | return new AssembleResult(null, offset, offset); 52 | } 53 | 54 | LargeMessageSegment segment = _segmentDeserializer.deserialize(tp.topic(), segmentBytes); 55 | return assembleSegment(tp, offset, segmentBytes, segment); 56 | } 57 | 58 | @Override 59 | public AssembleResult assemble(TopicPartition tp, 60 | long offset, 61 | byte[] segmentBytes, 62 | Header header) { 63 | if (segmentBytes == null) { 64 | return new AssembleResult(null, offset, offset); 65 | } 66 | 67 | // no LM record header or wrong key, use default assemble() 68 | if (header == null || !header.key().equals(Constants.LARGE_MESSAGE_HEADER)) { 69 | return assemble(tp, offset, segmentBytes); 70 | } 71 | // retrieve segment header 72 | LargeMessageHeaderValue largeMessageHeader = LargeMessageHeaderValue.fromBytes(header.value()); 73 | // check version, if it is older than V3, still use assembler with payload header 74 | if (largeMessageHeader.getType() < LargeMessageHeaderValue.V3) { 75 | return assemble(tp, offset, segmentBytes); 76 | } 77 | ByteBuffer payload = ByteBuffer.wrap(segmentBytes); 78 | 79 | // create segment 80 | LargeMessageSegment segment = new LargeMessageSegment(largeMessageHeader.getUuid(), largeMessageHeader.getSegmentNumber(), 81 | largeMessageHeader.getNumberOfSegments(), largeMessageHeader.getMessageSizeInBytes(), payload); 82 | return assembleSegment(tp, offset, segmentBytes, segment); 83 | } 84 | 85 | @Override 86 | public long safeOffset(TopicPartition tp, long currentPosition) { 87 | return _messagePool.safeOffset(tp, currentPosition); 88 | } 89 | 90 | @Override 91 | public void clear() { 92 | _messagePool.clear(); 93 | } 94 | 95 | @Override 96 | public void clear(TopicPartition tp) { 97 | _messagePool.clear(tp); 98 | } 99 | 100 | @Override 101 | public void close() { 102 | } 103 | 104 | // A helper method to avoid duplicate codes for assembling a segment 105 | private AssembleResult assembleSegment(TopicPartition tp, 106 | long offset, 107 | byte[] segmentBytes, 108 | LargeMessageSegment segment) { 109 | if (segment == null) { 110 | //not a segment 111 | return new AssembleResult(segmentBytes, offset, offset); 112 | } else { 113 | //sanity-check the segment 114 | segment.sanityCheck(); 115 | 116 | // Return immediately if it is a single segment message. 117 | if (segment.numberOfSegments == 1) { 118 | return new AssembleResult(segment.payloadArray(), offset, offset); 119 | } else { 120 | LargeMessage.SegmentAddResult result = _messagePool.tryCompleteMessage(tp, offset, segment); 121 | return new AssembleResult(result.serializedMessage() == null ? INCOMPLETE_RESULT : result.serializedMessage(), result.startingOffset(), offset); 122 | } 123 | } 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/largemessage/errors/ConsumerRecordsProcessingException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage.errors; 6 | 7 | import java.util.Collections; 8 | import java.util.Iterator; 9 | import java.util.List; 10 | 11 | 12 | public class ConsumerRecordsProcessingException extends RuntimeException { 13 | private final List _recordProcessingExceptions; 14 | 15 | public ConsumerRecordsProcessingException(List exceptions) { 16 | super(String.format("Received exception when processing messages for %d partitions.", exceptions.size()), exceptions.get(0)); 17 | Iterator exceptionIterator = exceptions.iterator(); 18 | // skip the first exception. 19 | exceptionIterator.next(); 20 | while (exceptionIterator.hasNext()) { 21 | addSuppressed(exceptionIterator.next()); 22 | } 23 | _recordProcessingExceptions = exceptions; 24 | } 25 | 26 | public List recordProcessingExceptions() { 27 | return Collections.unmodifiableList(_recordProcessingExceptions); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/largemessage/errors/InvalidSegmentException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage.errors; 6 | 7 | /** 8 | * This exception is thrown when a {@link com.linkedin.kafka.clients.largemessage.LargeMessageSegment} contains 9 | * invalid fields. This exception can also be thrown when several individually valid segments cannot be assembled to 10 | * a complete large message. 11 | */ 12 | public class InvalidSegmentException extends LargeMessageException { 13 | 14 | public InvalidSegmentException(String message) { 15 | super(message); 16 | } 17 | 18 | } 19 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/largemessage/errors/LargeMessageDroppedException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage.errors; 6 | 7 | /** 8 | * Thrown when a message is dropped from the message pool due to buffer full. 9 | */ 10 | public class LargeMessageDroppedException extends LargeMessageException { 11 | 12 | public LargeMessageDroppedException(String message) { 13 | super(message); 14 | } 15 | 16 | } 17 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/largemessage/errors/LargeMessageException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage.errors; 6 | 7 | import org.apache.kafka.common.KafkaException; 8 | 9 | /** 10 | * Exceptions for large messages. 11 | */ 12 | public abstract class LargeMessageException extends KafkaException { 13 | 14 | private static final long serialVersionUID = 1L; 15 | 16 | public LargeMessageException(String message, Throwable cause) { 17 | super(message, cause); 18 | } 19 | 20 | public LargeMessageException(String message) { 21 | super(message); 22 | } 23 | 24 | public LargeMessageException(Throwable cause) { 25 | super(cause); 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/largemessage/errors/LargeMessageSendException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage.errors; 6 | 7 | /** 8 | * The exception is thrown when a large message send failed. 9 | */ 10 | public class LargeMessageSendException extends LargeMessageException { 11 | 12 | public LargeMessageSendException(String message, Throwable cause) { 13 | super(message, cause); 14 | } 15 | 16 | public LargeMessageSendException(String message) { 17 | super(message); 18 | } 19 | 20 | } 21 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/largemessage/errors/OffsetNotTrackedException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage.errors; 6 | 7 | import org.apache.kafka.common.TopicPartition; 8 | 9 | 10 | /** 11 | * Thrown when an offset is not tracked by the consumer. LiKafkaConsumerImpl keeps track of all the messages it has 12 | * consumed to support large message aware seek(). This exception indicates that the users is trying to seek back 13 | * to an offset that is less than the earliest offset the consumer has ever consumed. 14 | */ 15 | public class OffsetNotTrackedException extends LargeMessageException { 16 | private final TopicPartition _topicPartition; 17 | private final long _offset; 18 | 19 | public OffsetNotTrackedException(TopicPartition topicPartition, long offset, String message) { 20 | super(message); 21 | _topicPartition = topicPartition; 22 | _offset = offset; 23 | } 24 | 25 | public TopicPartition getTopicPartition() { 26 | return _topicPartition; 27 | } 28 | 29 | public long getOffset() { 30 | return _offset; 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/largemessage/errors/RecordProcessingException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage.errors; 6 | 7 | import org.apache.kafka.common.TopicPartition; 8 | 9 | 10 | /** 11 | * An exception indicating a consumer record processing encountered error. 12 | */ 13 | public class RecordProcessingException extends RuntimeException { 14 | private final TopicPartition _topicPartition; 15 | private final long _offset; 16 | 17 | public RecordProcessingException(TopicPartition tp, long offset, Throwable cause) { 18 | super(cause); 19 | _topicPartition = tp; 20 | _offset = offset; 21 | } 22 | 23 | public TopicPartition topicPartition() { 24 | return _topicPartition; 25 | } 26 | 27 | public long offset() { 28 | return _offset; 29 | } 30 | 31 | @Override 32 | public synchronized Throwable fillInStackTrace() { 33 | return this; 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/largemessage/errors/SkippableException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage.errors; 6 | 7 | public class SkippableException extends RuntimeException { 8 | 9 | private static final long serialVersionUID = 1L; 10 | 11 | public SkippableException() { 12 | super(); 13 | } 14 | 15 | public SkippableException(String message, Throwable cause) { 16 | super(message, cause); 17 | } 18 | 19 | public SkippableException(String message) { 20 | super(message); 21 | } 22 | 23 | public SkippableException(Throwable cause) { 24 | super(cause); 25 | } 26 | 27 | } 28 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/producer/DelegatingProducer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.producer; 6 | 7 | import org.apache.kafka.clients.producer.Producer; 8 | 9 | 10 | public interface DelegatingProducer extends Producer { 11 | Producer getDelegate(); 12 | } 13 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/producer/LiKafkaProducer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.producer; 6 | 7 | import com.linkedin.kafka.clients.annotations.InterfaceOrigin; 8 | import java.util.concurrent.TimeUnit; 9 | import org.apache.kafka.clients.producer.Producer; 10 | 11 | 12 | /** 13 | * The general producer interface that allows allows pluggable serializers and deserializers. 14 | * LiKafkaProducer has the same interface as open source {@link Producer}. We define the interface separately to allow 15 | * future extensions. 16 | * @see LiKafkaProducerImpl 17 | */ 18 | public interface LiKafkaProducer extends Producer { 19 | /** 20 | * Flush any accumulated records from the producer. If the close does not complete within the timeout, throws exception. 21 | * If the underlying producer does not support bounded flush, this method defaults to {@link #flush()} 22 | * TODO: This API is added as a HOTFIX until the API change is available in apache/kafka 23 | */ 24 | @InterfaceOrigin.LiKafkaClients 25 | void flush(long timeout, TimeUnit timeUnit); 26 | } 27 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/producer/LiKafkaProducerBuilder.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.producer; 6 | 7 | import java.util.Collections; 8 | import java.util.Map; 9 | 10 | 11 | // A class that builds a LiKafkaProducer using the passed-in producer configs. This is supposed to be used by 12 | // the federated producer to create per-cluster producers. 13 | class LiKafkaProducerBuilder { 14 | private Map _configMap; 15 | 16 | public LiKafkaProducerBuilder() { 17 | this(Collections.emptyMap()); 18 | } 19 | 20 | public LiKafkaProducerBuilder(Map configMap) { 21 | _configMap = configMap; 22 | } 23 | 24 | public LiKafkaProducerBuilder setProducerConfig(Map configMap) { 25 | _configMap = configMap; 26 | return this; 27 | } 28 | 29 | public LiKafkaProducer build() { 30 | // Serializers and auditor will be created using associated producer properties. 31 | return new LiKafkaProducerImpl(_configMap); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/producer/ProducerFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.producer; 6 | 7 | import java.util.Properties; 8 | import org.apache.kafka.clients.producer.Producer; 9 | 10 | 11 | @FunctionalInterface 12 | public interface ProducerFactory { 13 | Producer create(Properties base, Properties overrides); 14 | } -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/producer/UUIDFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.producer; 6 | 7 | import com.linkedin.kafka.clients.utils.LiKafkaClientsUtils; 8 | import java.util.Map; 9 | import java.util.UUID; 10 | import org.apache.kafka.clients.producer.ProducerRecord; 11 | import org.apache.kafka.common.Configurable; 12 | 13 | 14 | /** 15 | * The UUID factory class to generate UUID. 16 | */ 17 | public interface UUIDFactory extends Configurable { 18 | 19 | /** 20 | * @return a non-null UUID 21 | */ 22 | UUID createUuid(); 23 | 24 | /** 25 | * @param record a producer record to get the UUID from/by 26 | * @return a UUID based on the producer record. 27 | */ 28 | UUID getUuid(ProducerRecord record); 29 | 30 | /** 31 | * The default implementation of UUIDFactory. 32 | */ 33 | class DefaultUUIDFactory implements UUIDFactory { 34 | 35 | @Override 36 | public void configure(Map configs) { 37 | 38 | } 39 | 40 | @Override 41 | public UUID createUuid() { 42 | return LiKafkaClientsUtils.randomUUID(); 43 | } 44 | 45 | @Override 46 | public UUID getUuid(ProducerRecord record) { 47 | return LiKafkaClientsUtils.randomUUID(); 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/utils/CloseableLock.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils; 6 | 7 | import java.util.concurrent.locks.Lock; 8 | 9 | 10 | /** 11 | * a handy decorator around java locks that allows them to be used with try-with-resources expressions 12 | */ 13 | public class CloseableLock implements AutoCloseable { 14 | private final Lock lock; 15 | 16 | public CloseableLock(Lock lock) { 17 | this.lock = lock; 18 | this.lock.lock(); 19 | } 20 | 21 | @Override 22 | public void close() { 23 | lock.unlock(); 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/utils/CompositeCollection.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils; 6 | 7 | import java.util.Collection; 8 | import java.util.Iterator; 9 | 10 | /** 11 | * quick and simple unmodifiable implementation of a collection on top of a pair of other collections. 12 | * @param value type 13 | */ 14 | public class CompositeCollection implements Collection { 15 | private final Collection a; 16 | private final Collection b; 17 | 18 | public CompositeCollection(Collection a, Collection b) { 19 | if (a == null || b == null) { 20 | throw new IllegalArgumentException("arguments must not be null"); 21 | } 22 | this.a = a; 23 | this.b = b; 24 | } 25 | 26 | @Override 27 | public int size() { 28 | return a.size() + b.size(); 29 | } 30 | 31 | @Override 32 | public boolean isEmpty() { 33 | return a.isEmpty() && b.isEmpty(); 34 | } 35 | 36 | @Override 37 | public boolean contains(Object o) { 38 | return a.contains(o) || b.contains(o); 39 | } 40 | 41 | @Override 42 | public Iterator iterator() { 43 | return new CompositeIterator<>(a.iterator(), b.iterator()); 44 | } 45 | 46 | @Override 47 | public Object[] toArray() { 48 | throw new UnsupportedOperationException(); 49 | } 50 | 51 | @Override 52 | public U[] toArray(U[] a) { 53 | throw new UnsupportedOperationException(); 54 | } 55 | 56 | @Override 57 | public boolean add(T t) { 58 | throw new UnsupportedOperationException(); 59 | } 60 | 61 | @Override 62 | public boolean remove(Object o) { 63 | throw new UnsupportedOperationException(); 64 | } 65 | 66 | @Override 67 | public boolean containsAll(Collection c) { 68 | throw new UnsupportedOperationException(); 69 | } 70 | 71 | @Override 72 | public boolean addAll(Collection c) { 73 | throw new UnsupportedOperationException(); 74 | } 75 | 76 | @Override 77 | public boolean removeAll(Collection c) { 78 | throw new UnsupportedOperationException(); 79 | } 80 | 81 | @Override 82 | public boolean retainAll(Collection c) { 83 | throw new UnsupportedOperationException(); 84 | } 85 | 86 | @Override 87 | public void clear() { 88 | throw new UnsupportedOperationException(); 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/utils/CompositeIterator.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils; 6 | 7 | import java.util.Iterator; 8 | 9 | /** 10 | * quick and simple unmodifiable implementation of an iterator on top of a pair of other iterators. 11 | * @param value type 12 | */ 13 | public class CompositeIterator implements Iterator { 14 | private final Iterator a; 15 | private final Iterator b; 16 | 17 | public CompositeIterator(Iterator a, Iterator b) { 18 | if (a == null || b == null) { 19 | throw new IllegalArgumentException("arguments must not be null"); 20 | } 21 | this.a = a; 22 | this.b = b; 23 | } 24 | 25 | @Override 26 | public boolean hasNext() { 27 | return a.hasNext() || b.hasNext(); 28 | } 29 | 30 | @Override 31 | public T next() { 32 | if (a.hasNext()) { 33 | return a.next(); 34 | } 35 | return b.next(); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/utils/CompositeMap.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils; 6 | 7 | import java.util.Collection; 8 | import java.util.Map; 9 | import java.util.Set; 10 | 11 | 12 | /** 13 | * quick and simple unmodifiable implementation of a map on top of a pair of other maps. 14 | * note that this class is meant for a particular use case (composition of kafka client metrics) 15 | * and is written to be fast over perfectly correct 16 | * @param key type 17 | * @param value type 18 | */ 19 | public class CompositeMap implements Map { 20 | private final Map a; 21 | private final Map b; 22 | 23 | public CompositeMap(Map a, Map b) { 24 | if (a == null || a.isEmpty() || b == null || b.isEmpty()) { 25 | throw new IllegalArgumentException("arguments must be non empty"); 26 | } 27 | this.a = a; 28 | this.b = b; 29 | } 30 | 31 | @Override 32 | public int size() { 33 | return a.size() + b.size(); //we assume they're foreign 34 | } 35 | 36 | @Override 37 | public boolean isEmpty() { 38 | return a.isEmpty() && b.isEmpty(); 39 | } 40 | 41 | @Override 42 | public boolean containsKey(Object key) { 43 | return a.containsKey(key) || b.containsKey(key); 44 | } 45 | 46 | @Override 47 | public boolean containsValue(Object value) { 48 | return a.containsValue(value) || b.containsValue(value); 49 | } 50 | 51 | @Override 52 | public V get(Object key) { 53 | //this assumes no map container a null value (and is faster than a containsKey + get) 54 | V v = a.get(key); 55 | if (v != null) { 56 | return v; 57 | } 58 | return b.get(key); 59 | } 60 | 61 | @Override 62 | public V put(K key, V value) { 63 | throw new UnsupportedOperationException(); 64 | } 65 | 66 | @Override 67 | public V remove(Object key) { 68 | throw new UnsupportedOperationException(); 69 | } 70 | 71 | @Override 72 | public void putAll(Map m) { 73 | throw new UnsupportedOperationException(); 74 | } 75 | 76 | @Override 77 | public void clear() { 78 | throw new UnsupportedOperationException(); 79 | } 80 | 81 | @Override 82 | public Set keySet() { 83 | return new CompositeSet<>(a.keySet(), b.keySet()); 84 | } 85 | 86 | @Override 87 | public Collection values() { 88 | return new CompositeCollection<>(a.values(), b.values()); 89 | } 90 | 91 | @Override 92 | public Set> entrySet() { 93 | return new CompositeSet<>(a.entrySet(), b.entrySet()); 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/utils/CompositeSet.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils; 6 | 7 | import java.util.Collection; 8 | import java.util.Iterator; 9 | import java.util.Set; 10 | 11 | /** 12 | * quick and simple unmodifiable implementation of a set on top of a pair of other sets. 13 | * note that this class is meant for a particular use case (composition of kafka client metrics) 14 | * and is written to be fast over perfectly correct 15 | * @param value type 16 | */ 17 | public class CompositeSet implements Set { 18 | private final Set a; 19 | private final Set b; 20 | 21 | public CompositeSet(Set a, Set b) { 22 | if (a == null || b == null) { 23 | throw new IllegalArgumentException("arguments must not be null"); 24 | } 25 | this.a = a; 26 | this.b = b; 27 | } 28 | 29 | @Override 30 | public int size() { 31 | return a.size() + b.size(); //we assume they're foreign, for perf reasons 32 | } 33 | 34 | @Override 35 | public boolean isEmpty() { 36 | return a.isEmpty() && b.isEmpty(); 37 | } 38 | 39 | @Override 40 | public boolean contains(Object o) { 41 | return a.contains(o) || b.contains(o); 42 | } 43 | 44 | @Override 45 | public Iterator iterator() { 46 | return new CompositeIterator<>(a.iterator(), b.iterator()); 47 | } 48 | 49 | @Override 50 | public Object[] toArray() { 51 | throw new UnsupportedOperationException(); 52 | } 53 | 54 | @Override 55 | public U[] toArray(U[] a) { 56 | throw new UnsupportedOperationException(); 57 | } 58 | 59 | @Override 60 | public boolean add(T t) { 61 | throw new UnsupportedOperationException(); 62 | } 63 | 64 | @Override 65 | public boolean remove(Object o) { 66 | throw new UnsupportedOperationException(); 67 | } 68 | 69 | @Override 70 | public boolean containsAll(Collection c) { 71 | throw new UnsupportedOperationException(); 72 | } 73 | 74 | @Override 75 | public boolean addAll(Collection c) { 76 | throw new UnsupportedOperationException(); 77 | } 78 | 79 | @Override 80 | public boolean retainAll(Collection c) { 81 | throw new UnsupportedOperationException(); 82 | } 83 | 84 | @Override 85 | public boolean removeAll(Collection c) { 86 | throw new UnsupportedOperationException(); 87 | } 88 | 89 | @Override 90 | public void clear() { 91 | throw new UnsupportedOperationException(); 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/utils/Constants.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils; 6 | 7 | public class Constants { 8 | // The variables reserved by kafka for auditing purpose 9 | public static final String TIMESTAMP_HEADER = "_t"; 10 | public static final String LARGE_MESSAGE_HEADER = "_lm"; 11 | public static final String SAFE_OFFSET_HEADER = "_so"; 12 | 13 | /** 14 | * Avoid instantiating the constants class 15 | */ 16 | private Constants() { 17 | 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/utils/KafkaConsumerLock.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils; 6 | 7 | import java.util.ConcurrentModificationException; 8 | import java.util.concurrent.TimeUnit; 9 | import java.util.concurrent.atomic.AtomicInteger; 10 | import java.util.concurrent.atomic.AtomicReference; 11 | import java.util.concurrent.locks.Condition; 12 | import java.util.concurrent.locks.Lock; 13 | 14 | 15 | /** 16 | * a "lock" that implements kafka consumer locking semantics.
17 | * note that kafka consumer locking semantics are not good for general purpose locking :-) 18 | */ 19 | public class KafkaConsumerLock implements Lock { 20 | 21 | //the thread that currently holds the lock (==is operating on the consumer). null for none 22 | private AtomicReference ownerThread = new AtomicReference<>(null); 23 | //"depth" (number of times acquired) for current owner thread. this is to provide reenterance support 24 | private AtomicInteger refCount = new AtomicInteger(0); 25 | 26 | @Override 27 | public void lock() { 28 | if (!tryLock()) { 29 | Thread owner = ownerThread.get(); //may be null if we got unscheduled. this is just best effort for logging. 30 | throw new ConcurrentModificationException("KafkaConsumer is not safe for multi-threaded access. " 31 | + "competing thread is " + (owner != null ? owner.getName() : "unknown")); 32 | } 33 | } 34 | 35 | @Override 36 | public void lockInterruptibly() throws InterruptedException { 37 | throw new UnsupportedOperationException("not implemented (yet?)"); 38 | } 39 | 40 | @Override 41 | public boolean tryLock() { 42 | Thread current = Thread.currentThread(); 43 | Thread owner = ownerThread.get(); 44 | if (owner != current && !ownerThread.compareAndSet(null, current)) { 45 | //we lost 46 | return false; 47 | } 48 | refCount.incrementAndGet(); 49 | return true; 50 | } 51 | 52 | @Override 53 | public boolean tryLock(long time, TimeUnit unit) throws InterruptedException { 54 | throw new UnsupportedOperationException("not implemented (yet?)"); 55 | } 56 | 57 | @Override 58 | public void unlock() { 59 | if (refCount.decrementAndGet() == 0) { 60 | ownerThread.set(null); 61 | } 62 | } 63 | 64 | @Override 65 | public Condition newCondition() { 66 | throw new UnsupportedOperationException("not implemented (yet?)"); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/utils/PrimitiveEncoderDecoder.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils; 6 | 7 | /** 8 | * Class to encode/decode primitive integer/long types. 9 | */ 10 | public class PrimitiveEncoderDecoder { 11 | // The number of bytes for a long variable 12 | public static final int LONG_SIZE = Long.SIZE / Byte.SIZE; 13 | public static final int INT_SIZE = Integer.SIZE / Byte.SIZE; 14 | 15 | /** 16 | * Avoid instantiating PrimitiveEncoderDecoder class 17 | */ 18 | private PrimitiveEncoderDecoder() { 19 | 20 | } 21 | 22 | /** 23 | * Encodes a long value into a {@link PrimitiveEncoderDecoder#LONG_SIZE} byte array 24 | * @param value value to be encoded 25 | * @param output output where encoded data will be stored 26 | * @param pos position in output where value will be encoded starting from 27 | */ 28 | public static void encodeLong(long value, byte[] output, int pos) { 29 | if (output == null) { 30 | throw new IllegalArgumentException("The input result cannot be null"); 31 | } 32 | 33 | if (pos < 0) { 34 | throw new IllegalArgumentException("position cannot be less than zero"); 35 | } 36 | 37 | if (output.length < pos + LONG_SIZE) { 38 | throw new IllegalArgumentException( 39 | String.format("Not adequate bytes available to encode the long value(array length = %d, pos = %d", output.length, pos) 40 | ); 41 | } 42 | 43 | for (int i = LONG_SIZE - 1; i >= 0; i--) { 44 | output[pos + i] = (byte) (value & 0xffL); 45 | value >>= 8; 46 | } 47 | } 48 | 49 | /** 50 | * Encodes a long value into a newly created byte[] and returns it. 51 | * @param value value to be encoded 52 | * @return encoded form of value 53 | */ 54 | public static byte[] encodeLong(long value) { 55 | byte[] data = new byte[LONG_SIZE]; 56 | encodeLong(value, data, 0); 57 | return data; 58 | } 59 | 60 | /** 61 | * Encodes a int value into a {@link PrimitiveEncoderDecoder#INT_SIZE} byte array 62 | * @param value value to be encoded 63 | * @param output destination to be encoded into 64 | * @param pos position in destination where encoded value will start 65 | */ 66 | public static void encodeInt(int value, byte[] output, int pos) { 67 | if (output == null) { 68 | throw new IllegalArgumentException("The input result cannot be null"); 69 | } 70 | 71 | if (pos < 0) { 72 | throw new IllegalArgumentException("position cannot be less than zero"); 73 | } 74 | 75 | if (output.length < pos + INT_SIZE) { 76 | throw new IllegalArgumentException( 77 | String.format("Not adequate bytes available to encode the int value(array length = %d, pos = %d", output.length, pos) 78 | ); 79 | } 80 | 81 | output[pos] = (byte) (value >> 24); 82 | output[pos + 1] = (byte) (value >> 16); 83 | output[pos + 2] = (byte) (value >> 8); 84 | output[pos + 3] = (byte) value; 85 | } 86 | 87 | /** 88 | * Encodes a int value int a newly created byte[] and returns it 89 | * @param value value to be encoded 90 | * @return encoded value 91 | */ 92 | public static byte[] encodeInt(int value) { 93 | byte[] data = new byte[INT_SIZE]; 94 | encodeInt(value, data, 0); 95 | return data; 96 | } 97 | 98 | 99 | /** 100 | * Decodes {@link PrimitiveEncoderDecoder#LONG_SIZE} bytes from offset in the input byte array 101 | * @param input where to read encoded form from 102 | * @param pos position in input to start reading from 103 | * @return a decoded long 104 | */ 105 | public static long decodeLong(byte[] input, int pos) { 106 | sanityCheck(input, pos, LONG_SIZE); 107 | 108 | return (input[pos] & 0xFFL) << 56 109 | | (input[pos + 1] & 0xFFL) << 48 110 | | (input[pos + 2] & 0xFFL) << 40 111 | | (input[pos + 3] & 0xFFL) << 32 112 | | (input[pos + 4] & 0xFFL) << 24 113 | | (input[pos + 5] & 0xFFL) << 16 114 | | (input[pos + 6] & 0xFFL) << 8 115 | | (input[pos + 7] & 0xFFL); 116 | } 117 | 118 | 119 | /** 120 | * Decodes {@link PrimitiveEncoderDecoder#INT_SIZE} bytes from offset in the input byte array 121 | * @param input where to read encoded form from 122 | * @param pos position in input to start reading from 123 | * @return a decoded int 124 | */ 125 | public static int decodeInt(byte[] input, int pos) { 126 | sanityCheck(input, pos, INT_SIZE); 127 | 128 | return input[pos] << 24 | (input[pos + 1] & 0xFF) << 16 | (input[pos + 2] & 0xFF) << 8 | (input[pos + 3] & 0xFF); 129 | } 130 | 131 | private static void sanityCheck(byte[] input, int pos, int dataSize) { 132 | if (input == null) { 133 | throw new IllegalArgumentException("bytes cannot be null"); 134 | } 135 | 136 | if (pos < 0) { 137 | throw new IllegalArgumentException("position cannot be less than zero"); 138 | } 139 | 140 | if (input.length < pos + dataSize) { 141 | throw new IllegalArgumentException( 142 | String.format("Not adequate bytes available in the input array(array length = %d, pos = %d)", input.length, pos) 143 | ); 144 | } 145 | } 146 | } -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/main/java/com/linkedin/kafka/clients/utils/QueuedMap.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils; 6 | 7 | import java.util.concurrent.ConcurrentHashMap; 8 | 9 | /** 10 | * A util class that helps maintain the insertion order of keys to a Java map. This class allows O(1) removal of keys 11 | * based on their insertion order. This class is not thread safe. 12 | *

13 | * There are some classes in the Java library that supports similar function but not exactly what we need. 14 | * A TreeMap sorts the entries by key and can get the smallest key in O(1), but since our key is message ID so we will 15 | * need an additional data structure to maintain the order. 16 | * A QueueMap maintains the insertion order but can only remove the eldest key when inserting a new entry into the 17 | * map. 18 | */ 19 | public class QueuedMap { 20 | private ConcurrentHashMap _map; 21 | private DoublyLinkedList _queue; 22 | 23 | public QueuedMap() { 24 | _map = new ConcurrentHashMap<>(); 25 | _queue = new DoublyLinkedList(); 26 | } 27 | 28 | /** 29 | * Put a key-value pair into the map. 30 | * 31 | * @param k key 32 | * @param v value 33 | */ 34 | public void put(K k, V v) { 35 | remove(k); 36 | MapValue mapValue = new MapValue(v); 37 | mapValue.node = _queue.addKey(k); 38 | _map.put(k, mapValue); 39 | } 40 | 41 | /** 42 | * Get value based on key. 43 | * 44 | * @param k key 45 | * @return value associated with the key if it exists, otherwise returns null. 46 | */ 47 | public V get(K k) { 48 | MapValue mapValue = _map.get(k); 49 | return mapValue == null ? null : mapValue.value; 50 | } 51 | 52 | /** 53 | * Remove a key-value pair from the map. 54 | * 55 | * @param k key 56 | * @return value associated with the key if it exists, otherwise returns null. 57 | */ 58 | public V remove(K k) { 59 | MapValue mapValue = _map.remove(k); 60 | if (mapValue != null) { 61 | _queue.remove(mapValue.node); 62 | } 63 | return mapValue == null ? null : mapValue.value; 64 | } 65 | 66 | /** 67 | * Get the eldest key inserted into the map. 68 | * 69 | * @return the eldest key in the map. 70 | */ 71 | public K getEldestKey() { 72 | ListNode node = _queue.peek(); 73 | return node == null ? null : node.key; 74 | 75 | } 76 | 77 | public int size() { 78 | return _map.size(); 79 | } 80 | 81 | public void clear() { 82 | _map.clear(); 83 | _queue = new DoublyLinkedList(); 84 | } 85 | 86 | // Helper classes 87 | private class MapValue { 88 | V value; 89 | ListNode node; 90 | 91 | public MapValue(V v) { 92 | value = v; 93 | } 94 | } 95 | 96 | private class ListNode { 97 | ListNode prev; 98 | ListNode next; 99 | K key; 100 | 101 | public ListNode(K k) { 102 | prev = null; 103 | next = null; 104 | key = k; 105 | } 106 | } 107 | 108 | /** 109 | * We need to have a raw doubly linked list to allow O(1) deletion from the middle of the list. 110 | * Java LinkedList does not expose the node in the list so the removal will be O(n). 111 | */ 112 | private class DoublyLinkedList { 113 | private ListNode _head; 114 | private ListNode _tail; 115 | private int _size; 116 | 117 | public DoublyLinkedList() { 118 | _head = null; 119 | _tail = null; 120 | _size = 0; 121 | } 122 | 123 | public synchronized void add(ListNode node) { 124 | if (_head == null) { 125 | _head = node; 126 | _tail = node; 127 | node.prev = null; 128 | node.next = null; 129 | } else { 130 | _tail.next = node; 131 | node.prev = _tail; 132 | node.next = null; 133 | _tail = node; 134 | } 135 | _size++; 136 | } 137 | 138 | public synchronized ListNode addKey(K key) { 139 | ListNode node = new ListNode(key); 140 | add(node); 141 | return node; 142 | } 143 | 144 | public synchronized ListNode peek() { 145 | return _head; 146 | } 147 | 148 | public synchronized void remove(ListNode node) { 149 | if (node != _head) { 150 | node.prev.next = node.next; 151 | } else { 152 | _head = node.next; 153 | } 154 | if (node != _tail) { 155 | node.next.prev = node.prev; 156 | } else { 157 | _tail = node.prev; 158 | } 159 | 160 | node.next = null; 161 | node.prev = null; 162 | _size -= 1; 163 | } 164 | 165 | public synchronized int size() { 166 | return _size; 167 | } 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/test/java/com/linkedin/kafka/clients/auditing/abstractimpl/CountingAuditStatsTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.auditing.abstractimpl; 6 | 7 | import com.linkedin.kafka.clients.auditing.AuditType; 8 | import org.testng.annotations.Test; 9 | 10 | import java.util.Map; 11 | 12 | import static org.testng.Assert.assertEquals; 13 | import static org.testng.Assert.assertTrue; 14 | 15 | /** 16 | * The unit test for AuditStats 17 | */ 18 | public class CountingAuditStatsTest { 19 | 20 | private static final int NUM_THREAD = 5; 21 | private static final int NUM_TIMESTAMPS = 10000; 22 | private static final long BUCKET_MS = 1000L; 23 | private static final String[] TOPICS = {"topic0", "topic1", "topic2"}; 24 | private static final AuditType[] AUDIT_TYPES = {AuditType.SUCCESS, AuditType.FAILURE, AuditType.ATTEMPT}; 25 | 26 | @Test 27 | public void testRecord() { 28 | Thread[] recorders = new Thread[NUM_THREAD]; 29 | CountingAuditStats stats = new CountingAuditStats(BUCKET_MS); 30 | 31 | for (int i = 0; i < NUM_THREAD; i++) { 32 | recorders[i] = new Recorder(stats); 33 | recorders[i].start(); 34 | } 35 | 36 | for (int i = 0; i < NUM_THREAD; i++) { 37 | try { 38 | recorders[i].join(); 39 | } catch (InterruptedException e) { 40 | e.printStackTrace(); 41 | } 42 | } 43 | 44 | Map counters = stats.stats(); 45 | long numBuckets = (NUM_TIMESTAMPS + BUCKET_MS - 1) / BUCKET_MS; 46 | long expectedNumMessages = NUM_TIMESTAMPS / numBuckets * NUM_THREAD; 47 | for (int typeIndex = 0; typeIndex < AUDIT_TYPES.length; typeIndex++) { 48 | for (int topicIndex = 0; topicIndex < TOPICS.length; topicIndex++) { 49 | for (long bucketIndex = 0; bucketIndex < numBuckets; bucketIndex++) { 50 | AuditKey auditKey = 51 | new AuditKey(TOPICS[topicIndex], bucketIndex, AUDIT_TYPES[typeIndex]); 52 | assertTrue(counters.containsKey(auditKey)); 53 | assertEquals(counters.get(auditKey).messageCount(), expectedNumMessages, 54 | "The topic should have " + expectedNumMessages + " messages in the bucket"); 55 | assertEquals(counters.get(auditKey).bytesCount(), 2 * expectedNumMessages, 56 | "The topic should have " + 2 * expectedNumMessages + " messages in the bucket"); 57 | } 58 | } 59 | } 60 | } 61 | 62 | private class Recorder extends Thread { 63 | private final CountingAuditStats _stats; 64 | 65 | Recorder(CountingAuditStats stats) { 66 | _stats = stats; 67 | } 68 | 69 | @Override 70 | public void run() { 71 | for (int typeIndex = 0; typeIndex < AUDIT_TYPES.length; typeIndex++) { 72 | for (int topicIndex = 0; topicIndex < TOPICS.length; topicIndex++) { 73 | for (long timestamp = 0; timestamp < NUM_TIMESTAMPS; timestamp++) { 74 | AuditKey auditKey = 75 | new AuditKey(TOPICS[topicIndex], timestamp / BUCKET_MS, AUDIT_TYPES[typeIndex]); 76 | _stats.update(auditKey, 1, 2); 77 | } 78 | } 79 | } 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/test/java/com/linkedin/kafka/clients/largemessage/DefaultSegmentDeserializerTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage; 6 | 7 | import java.nio.ByteBuffer; 8 | import java.util.Arrays; 9 | import java.util.UUID; 10 | import org.testng.Assert; 11 | import org.testng.annotations.Test; 12 | 13 | 14 | public class DefaultSegmentDeserializerTest { 15 | 16 | @Test 17 | public void testZeroChecksum() { 18 | DefaultSegmentSerializer segmentSerializer = new DefaultSegmentSerializer(); 19 | 20 | //doctor a UUID such that the projected checksum is 0 21 | long a = (Long.MAX_VALUE / 2) - 1; 22 | long b = (Long.MAX_VALUE / 2) + 3; 23 | int checksum = (int) (a + b); 24 | 25 | Assert.assertEquals(checksum, 0, "projected checksum should be 0. instead was " + checksum); 26 | 27 | UUID msgId = new UUID(a, b); 28 | byte[] payload = new byte[] {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; 29 | LargeMessageSegment segment = new LargeMessageSegment(msgId, 0, 1, 10, ByteBuffer.wrap(payload)); 30 | 31 | byte[] serialized = segmentSerializer.serialize("topic", segment); 32 | 33 | DefaultSegmentDeserializer segmentDeserializer = new DefaultSegmentDeserializer(); 34 | 35 | LargeMessageSegment deserialized = segmentDeserializer.deserialize("topic", serialized); 36 | 37 | Assert.assertNotNull(deserialized); 38 | Assert.assertEquals(deserialized.messageId, msgId); 39 | Assert.assertTrue(Arrays.equals(payload, deserialized.payloadArray())); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/test/java/com/linkedin/kafka/clients/largemessage/LageMessageCallbackTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage; 6 | 7 | import com.linkedin.kafka.clients.largemessage.errors.LargeMessageSendException; 8 | import org.apache.kafka.clients.producer.Callback; 9 | import org.apache.kafka.clients.producer.RecordMetadata; 10 | import org.apache.kafka.common.TopicPartition; 11 | import org.testng.annotations.Test; 12 | 13 | import java.util.concurrent.atomic.AtomicInteger; 14 | 15 | import static org.testng.AssertJUnit.assertEquals; 16 | import static org.testng.AssertJUnit.assertTrue; 17 | 18 | /** 19 | * The unit test for large message callback. 20 | */ 21 | public class LageMessageCallbackTest { 22 | private final int numSegments = 10; 23 | 24 | @Test 25 | public void testLargeMessageCallbackWithoutException() { 26 | final AtomicInteger callbackFired = new AtomicInteger(0); 27 | LargeMessageCallback callback = new LargeMessageCallback(numSegments, new Callback() { 28 | @Override 29 | public void onCompletion(RecordMetadata recordMetadata, Exception e) { 30 | callbackFired.incrementAndGet(); 31 | assertEquals("No exception should be there.", e, null); 32 | } 33 | }); 34 | 35 | for (int i = 0; i < numSegments - 1; i++) { 36 | callback.onCompletion(new RecordMetadata(new TopicPartition("topic", 0), 0L, 0L, 0L, 0L, 0, 0), null); 37 | assertTrue("The user callback should not be fired.", callbackFired.get() == 0); 38 | } 39 | callback.onCompletion(new RecordMetadata(new TopicPartition("topic", 0), 0L, 0L, 0L, 0L, 0, 0), null); 40 | assertTrue("The user callback should not be fired.", callbackFired.get() == 1); 41 | } 42 | 43 | @Test 44 | public void testLargeMessageCallbackWithException() { 45 | final AtomicInteger callbackFired = new AtomicInteger(0); 46 | final Exception e1 = new LargeMessageSendException("Exception 1"); 47 | final Exception e2 = new LargeMessageSendException("Exception 2"); 48 | LargeMessageCallback callback = new LargeMessageCallback(numSegments, new Callback() { 49 | @Override 50 | public void onCompletion(RecordMetadata recordMetadata, Exception e) { 51 | callbackFired.incrementAndGet(); 52 | assertTrue("The exception should be e1", e.getCause() == e1); 53 | assertEquals("Error when sending large message. Sent 8 of 10 segments.", e.getMessage()); 54 | } 55 | }); 56 | 57 | for (int i = 0; i < numSegments - 1; i++) { 58 | Exception e = null; 59 | if (i == 3) { 60 | e = e1; 61 | } 62 | callback.onCompletion(new RecordMetadata(new TopicPartition("topic", 0), 0L, 0L, 0L, 0L, 0, 0), e); 63 | assertTrue("The user callback should not be fired.", callbackFired.get() == 0); 64 | } 65 | callback.onCompletion(new RecordMetadata(new TopicPartition("topic", 0), 0L, 0L, 0L, 0L, 0, 0), e2); 66 | assertTrue("The user callback should not be fired.", callbackFired.get() == 1); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/test/java/com/linkedin/kafka/clients/largemessage/LargeMessageTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage; 6 | 7 | import com.linkedin.kafka.clients.largemessage.errors.InvalidSegmentException; 8 | import com.linkedin.kafka.clients.utils.LiKafkaClientsTestUtils; 9 | import com.linkedin.kafka.clients.utils.LiKafkaClientsUtils; 10 | import org.apache.kafka.common.TopicPartition; 11 | import org.testng.annotations.Test; 12 | 13 | import java.util.UUID; 14 | 15 | import static org.testng.Assert.assertEquals; 16 | import static org.testng.Assert.assertTrue; 17 | import static org.testng.Assert.fail; 18 | 19 | /** 20 | * Unit test for incomplete message. 21 | */ 22 | public class LargeMessageTest { 23 | private final int messageSizeInBytes = 15; 24 | private final int numberOfSegments = 2; 25 | private final UUID messageId = LiKafkaClientsUtils.randomUUID(); 26 | 27 | @Test 28 | public void testIncompleteMessage() { 29 | LargeMessage message = new LargeMessage(new TopicPartition("topic", 0), 30 | LiKafkaClientsUtils.randomUUID(), 31 | 0L, 32 | messageSizeInBytes, 33 | numberOfSegments); 34 | 35 | LargeMessageSegment segment0 = LiKafkaClientsTestUtils.createLargeMessageSegment(messageId, 0, numberOfSegments, messageSizeInBytes, 10); 36 | LargeMessageSegment segment1 = LiKafkaClientsTestUtils.createLargeMessageSegment(messageId, 1, numberOfSegments, messageSizeInBytes, 5); 37 | 38 | byte[] serializedMessage = message.addSegment(segment1, 1).serializedMessage(); 39 | assert serializedMessage == null; 40 | 41 | assertEquals(message.bufferedSizeInBytes(), 5, "5 bytes should be buffered"); 42 | serializedMessage = message.addSegment(segment0, 1).serializedMessage(); 43 | assert serializedMessage != null; 44 | assertEquals(message.bufferedSizeInBytes(), 15, "15 bytes should be buffered"); 45 | assert serializedMessage.length == messageSizeInBytes; 46 | 47 | // verify the bytes. 48 | LiKafkaClientsTestUtils.verifyMessage(serializedMessage, messageSizeInBytes, 10); 49 | } 50 | 51 | @Test(expectedExceptions = InvalidSegmentException.class) 52 | public void testZeroLengthSegment() { 53 | LargeMessage message = new LargeMessage(new TopicPartition("topic", 0), 54 | LiKafkaClientsUtils.randomUUID(), 55 | 0L, 56 | messageSizeInBytes, 57 | numberOfSegments); 58 | 59 | LargeMessageSegment zeroLengthSegment = LiKafkaClientsTestUtils.createLargeMessageSegment(messageId, 0, numberOfSegments, messageSizeInBytes, 0); 60 | message.addSegment(zeroLengthSegment, 0); 61 | } 62 | 63 | @Test(expectedExceptions = InvalidSegmentException.class) 64 | public void testSegmentTotalSizeGreaterThanMesssageSize() { 65 | LargeMessage message = new LargeMessage(new TopicPartition("topic", 0), 66 | LiKafkaClientsUtils.randomUUID(), 67 | 0L, 68 | messageSizeInBytes, 69 | numberOfSegments); 70 | 71 | LargeMessageSegment segment0 = LiKafkaClientsTestUtils.createLargeMessageSegment(messageId, 0, numberOfSegments, messageSizeInBytes, 10); 72 | LargeMessageSegment segment1 = LiKafkaClientsTestUtils.createLargeMessageSegment(messageId, 1, numberOfSegments, messageSizeInBytes, 10); 73 | message.addSegment(segment0, 0); 74 | message.addSegment(segment1, 1); 75 | 76 | } 77 | 78 | @Test 79 | public void testConflictUUID() { 80 | LargeMessage message = new LargeMessage(new TopicPartition("topic", 0), 81 | LiKafkaClientsUtils.randomUUID(), 82 | 0L, 83 | messageSizeInBytes, 84 | numberOfSegments); 85 | 86 | LargeMessageSegment segment = LiKafkaClientsTestUtils.createLargeMessageSegment(messageId, 0, numberOfSegments, messageSizeInBytes, 10); 87 | message.addSegment(segment, 0); 88 | segment = LiKafkaClientsTestUtils.createLargeMessageSegment(messageId, 0, numberOfSegments + 1, messageSizeInBytes, 10); 89 | try { 90 | message.addSegment(segment, numberOfSegments + 1); 91 | fail("Should throw exception."); 92 | } catch (Throwable t) { 93 | assertTrue(t.getMessage().startsWith("Detected UUID conflict")); 94 | } 95 | 96 | segment = LiKafkaClientsTestUtils.createLargeMessageSegment(messageId, 0, numberOfSegments, messageSizeInBytes + 1, 10); 97 | try { 98 | message.addSegment(segment, numberOfSegments); 99 | fail("Should throw exception."); 100 | } catch (Throwable t) { 101 | assertTrue(t.getMessage().startsWith("Detected UUID conflict")); 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/test/java/com/linkedin/kafka/clients/largemessage/MessageSplitterTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage; 6 | 7 | import com.linkedin.kafka.clients.producer.UUIDFactory; 8 | import com.linkedin.kafka.clients.utils.LiKafkaClientsUtils; 9 | import com.linkedin.kafka.clients.utils.LiKafkaClientsTestUtils; 10 | import org.apache.kafka.clients.producer.ProducerRecord; 11 | import org.apache.kafka.common.TopicPartition; 12 | import org.apache.kafka.common.serialization.Deserializer; 13 | import org.apache.kafka.common.serialization.Serializer; 14 | import org.apache.kafka.common.serialization.StringDeserializer; 15 | import org.apache.kafka.common.serialization.StringSerializer; 16 | import org.testng.annotations.Test; 17 | 18 | import java.util.List; 19 | import java.util.UUID; 20 | 21 | import static org.testng.Assert.assertEquals; 22 | 23 | /** 24 | * Unit test for message splitter. 25 | */ 26 | public class MessageSplitterTest { 27 | @Test 28 | public void testSplit() { 29 | TopicPartition tp = new TopicPartition("topic", 0); 30 | UUID id = LiKafkaClientsUtils.randomUUID(); 31 | String message = LiKafkaClientsTestUtils.getRandomString(1000); 32 | Serializer stringSerializer = new StringSerializer(); 33 | Deserializer stringDeserializer = new StringDeserializer(); 34 | Serializer segmentSerializer = new DefaultSegmentSerializer(); 35 | Deserializer segmentDeserializer = new DefaultSegmentDeserializer(); 36 | MessageSplitter splitter = new MessageSplitterImpl(200, segmentSerializer, new UUIDFactory.DefaultUUIDFactory<>()); 37 | 38 | byte[] serializedMessage = stringSerializer.serialize("topic", message); 39 | List> records = splitter.split("topic", id, serializedMessage); 40 | assertEquals(records.size(), 5, "Should have 6 segments."); 41 | MessageAssembler assembler = new MessageAssemblerImpl(10000, 10000, true, segmentDeserializer); 42 | String assembledMessage = null; 43 | UUID uuid = null; 44 | for (int i = 0; i < records.size(); i++) { 45 | ProducerRecord record = records.get(i); 46 | LargeMessageSegment segment = segmentDeserializer.deserialize("topic", record.value()); 47 | if (uuid == null) { 48 | uuid = segment.messageId; 49 | } else { 50 | assertEquals(segment.messageId, uuid, "messageId should match."); 51 | } 52 | assertEquals(segment.numberOfSegments, 5, "segment number should be 5"); 53 | assertEquals(segment.messageSizeInBytes, serializedMessage.length, "message size should the same"); 54 | assertEquals(segment.sequenceNumber, i, "SequenceNumber should match"); 55 | 56 | assembledMessage = stringDeserializer.deserialize(null, assembler.assemble(tp, i, record.value()).messageBytes()); 57 | } 58 | assertEquals(assembledMessage, message, "messages should match."); 59 | } 60 | 61 | } 62 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/test/java/com/linkedin/kafka/clients/largemessage/SerializerDeserializerTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.largemessage; 6 | 7 | import com.linkedin.kafka.clients.utils.LiKafkaClientsTestUtils; 8 | import com.linkedin.kafka.clients.utils.LiKafkaClientsUtils; 9 | import org.apache.kafka.common.serialization.Deserializer; 10 | import org.apache.kafka.common.serialization.Serializer; 11 | import org.apache.kafka.common.serialization.StringDeserializer; 12 | import org.apache.kafka.common.serialization.StringSerializer; 13 | import org.testng.annotations.Test; 14 | 15 | import java.nio.ByteBuffer; 16 | 17 | import static org.testng.Assert.assertEquals; 18 | 19 | /** 20 | * Test for DefaultSegmentSerializer and DefaultSegmentDeserializer. 21 | */ 22 | public class SerializerDeserializerTest { 23 | 24 | @Test 25 | public void testSerde() { 26 | Serializer stringSerializer = new StringSerializer(); 27 | Deserializer stringDeserializer = new StringDeserializer(); 28 | Serializer segmentSerializer = new DefaultSegmentSerializer(); 29 | Deserializer segmentDeserializer = new DefaultSegmentDeserializer(); 30 | 31 | String s = LiKafkaClientsTestUtils.getRandomString(100); 32 | assertEquals(s.length(), 100); 33 | byte[] stringBytes = stringSerializer.serialize("topic", s); 34 | assertEquals(stringBytes.length, 100); 35 | LargeMessageSegment segment = 36 | new LargeMessageSegment(LiKafkaClientsUtils.randomUUID(), 0, 2, stringBytes.length, ByteBuffer.wrap(stringBytes)); 37 | // String bytes + segment header 38 | byte[] serializedSegment = segmentSerializer.serialize("topic", segment); 39 | assertEquals(serializedSegment.length, 1 + stringBytes.length + LargeMessageSegment.SEGMENT_INFO_OVERHEAD + 4); 40 | 41 | LargeMessageSegment deserializedSegment = segmentDeserializer.deserialize("topic", serializedSegment); 42 | assertEquals(deserializedSegment.messageId, segment.messageId); 43 | assertEquals(deserializedSegment.messageSizeInBytes, segment.messageSizeInBytes); 44 | assertEquals(deserializedSegment.numberOfSegments, segment.numberOfSegments); 45 | assertEquals(deserializedSegment.sequenceNumber, segment.sequenceNumber); 46 | assertEquals(deserializedSegment.payload.limit(), 100); 47 | String deserializedString = stringDeserializer.deserialize("topic", deserializedSegment.payloadArray()); 48 | assertEquals(deserializedString.length(), s.length()); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/test/java/com/linkedin/kafka/clients/producer/MockLiKafkaProducer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.producer; 6 | 7 | import java.util.List; 8 | import java.util.Map; 9 | import java.util.concurrent.Future; 10 | import java.util.concurrent.TimeUnit; 11 | 12 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 13 | import org.apache.kafka.clients.producer.Callback; 14 | import org.apache.kafka.clients.producer.MockProducer; 15 | import org.apache.kafka.clients.producer.ProducerRecord; 16 | import org.apache.kafka.clients.producer.RecordMetadata; 17 | import org.apache.kafka.common.Metric; 18 | import org.apache.kafka.common.MetricName; 19 | import org.apache.kafka.common.PartitionInfo; 20 | import org.apache.kafka.common.TopicPartition; 21 | import org.apache.kafka.common.errors.ProducerFencedException; 22 | import org.apache.kafka.common.serialization.ByteArraySerializer; 23 | 24 | 25 | // Mock LiKafkaProducer with raw byte key and value 26 | public class MockLiKafkaProducer implements LiKafkaProducer { 27 | private MockProducer _delegate; 28 | 29 | public MockLiKafkaProducer() { 30 | _delegate = new MockProducer<>(false, new ByteArraySerializer(), new ByteArraySerializer()); 31 | } 32 | 33 | @Override 34 | public Future send(ProducerRecord producerRecord) { 35 | return send(producerRecord, null); 36 | } 37 | 38 | @Override 39 | public Future send(ProducerRecord producerRecord, Callback callback) { 40 | return _delegate.send(producerRecord, callback); 41 | } 42 | 43 | @Override 44 | public void flush() { 45 | flush(Integer.MAX_VALUE, TimeUnit.MILLISECONDS); 46 | } 47 | 48 | @Override 49 | public void flush(long timeout, TimeUnit timeUnit) { 50 | // Timeout is not really tested here. 51 | _delegate.flush(); 52 | } 53 | 54 | @Override 55 | public List partitionsFor(String topic) { 56 | return _delegate.partitionsFor(topic); 57 | } 58 | 59 | @Override 60 | public Map metrics() { 61 | throw new UnsupportedOperationException("Not implemented yet"); 62 | } 63 | 64 | @Override 65 | public void close() { 66 | close(Integer.MAX_VALUE, TimeUnit.MILLISECONDS); 67 | } 68 | 69 | @Override 70 | public void close(long timeout, TimeUnit timeUnit) { 71 | _delegate.close(timeout, timeUnit); 72 | } 73 | 74 | @Override 75 | public void initTransactions() { 76 | throw new UnsupportedOperationException("Not supported"); 77 | } 78 | 79 | @Override 80 | public void beginTransaction() throws ProducerFencedException { 81 | throw new UnsupportedOperationException("Not supported"); 82 | } 83 | 84 | @Override 85 | public void sendOffsetsToTransaction(Map offsets, 86 | String consumerGroupId) throws ProducerFencedException { 87 | throw new UnsupportedOperationException("Not supported"); 88 | } 89 | 90 | @Override 91 | public void commitTransaction() throws ProducerFencedException { 92 | throw new UnsupportedOperationException("Not supported"); 93 | } 94 | 95 | @Override 96 | public void abortTransaction() throws ProducerFencedException { 97 | throw new UnsupportedOperationException("Not supported"); 98 | } 99 | 100 | public MockProducer getDelegate() { 101 | return _delegate; 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/test/java/com/linkedin/kafka/clients/utils/CompositeMapTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils; 6 | 7 | import java.util.ArrayList; 8 | import java.util.Arrays; 9 | import java.util.HashMap; 10 | import java.util.HashSet; 11 | import java.util.List; 12 | import java.util.Map; 13 | import java.util.Set; 14 | import org.testng.Assert; 15 | import org.testng.annotations.Test; 16 | 17 | 18 | public class CompositeMapTest { 19 | 20 | @Test 21 | public void testIterations() throws Exception { 22 | Map a = new HashMap<>(); 23 | a.put("a", 1); 24 | a.put("b", 2); 25 | Map b = new HashMap<>(); 26 | b.put("c", 3); 27 | 28 | CompositeMap composite = new CompositeMap<>(a, b); 29 | 30 | Set keys = new HashSet<>(); 31 | Set values = new HashSet<>(); 32 | List> entries = new ArrayList<>(); 33 | for (Map.Entry entry : composite.entrySet()) { 34 | entries.add(entry); 35 | keys.add(entry.getKey()); 36 | values.add(entry.getValue()); 37 | } 38 | Assert.assertEquals(entries.size(), 3); 39 | Assert.assertEquals(entries.size(), 3); 40 | Assert.assertEquals(keys, new HashSet<>(Arrays.asList("a", "b", "c"))); 41 | Assert.assertEquals(values, new HashSet<>(Arrays.asList(3, 2, 1))); 42 | 43 | keys.clear(); 44 | for (String key : composite.keySet()) { 45 | keys.add(key); 46 | } 47 | Assert.assertEquals(keys, new HashSet<>(Arrays.asList("a", "b", "c"))); 48 | 49 | values.clear(); 50 | for (Integer value : composite.values()) { 51 | values.add(value); 52 | } 53 | Assert.assertEquals(values, new HashSet<>(Arrays.asList(3, 2, 1))); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/test/java/com/linkedin/kafka/clients/utils/LiKafkaClientsTestUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils; 6 | 7 | import com.linkedin.kafka.clients.largemessage.LargeMessageSegment; 8 | 9 | import java.nio.ByteBuffer; 10 | import java.util.Arrays; 11 | import java.util.Random; 12 | import java.util.UUID; 13 | 14 | import static org.testng.Assert.assertEquals; 15 | 16 | /** 17 | * The util class for unit test. 18 | */ 19 | public class LiKafkaClientsTestUtils { 20 | 21 | private LiKafkaClientsTestUtils() { 22 | } 23 | 24 | public static LargeMessageSegment createLargeMessageSegment(UUID messageId, 25 | int seq, 26 | int numberOfSegments, 27 | int messageSizeInBytes, 28 | int segmentSize) { 29 | byte[] bytes = new byte[segmentSize]; 30 | Arrays.fill(bytes, (byte) seq); 31 | return new LargeMessageSegment(messageId, seq, numberOfSegments, messageSizeInBytes, ByteBuffer.wrap(bytes)); 32 | } 33 | 34 | public static void verifyMessage(byte[] serializedMessage, int messageSizeInBytes, int segmentSize) { 35 | int i = 0; 36 | for (; i < messageSizeInBytes / segmentSize; i++) { 37 | for (int j = 0; j < segmentSize; j++) { 38 | assertEquals(serializedMessage[i * segmentSize + j], (byte) i, "Byte value should match seq."); 39 | } 40 | } 41 | for (int j = 0; j < messageSizeInBytes % segmentSize; j++) { 42 | assertEquals(serializedMessage[i * segmentSize + j], (byte) i, "Byte value should match seq."); 43 | } 44 | } 45 | 46 | public static String getRandomString(int length) { 47 | char[] chars = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; 48 | Random random = new Random(); 49 | StringBuilder stringBuiler = new StringBuilder(); 50 | for (int i = 0; i < length; i++) { 51 | stringBuiler.append(chars[Math.abs(random.nextInt()) % 16]); 52 | } 53 | return stringBuiler.toString(); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/test/java/com/linkedin/kafka/clients/utils/LiKafkaClientsUtilsTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils; 6 | 7 | import java.util.Map; 8 | import org.testng.Assert; 9 | import org.testng.annotations.Test; 10 | 11 | 12 | public class LiKafkaClientsUtilsTest { 13 | 14 | @Test 15 | public void testGetKnownLibraryVersions() throws Exception { 16 | Map versions = LiKafkaClientsUtils.getKnownLibraryVersions(); 17 | Assert.assertEquals(versions.size(), 3); 18 | Assert.assertNotNull(versions.get("com.linkedin.kafka")); 19 | Assert.assertNotNull(versions.get("com.linkedin.mario")); 20 | //if we run from an IDE we wont get a version for "com.linkedin.kafka.clients" 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/test/java/com/linkedin/kafka/clients/utils/PrimitiveEncoderDecoderTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils; 6 | 7 | import org.testng.Assert; 8 | import org.testng.annotations.Test; 9 | 10 | 11 | public class PrimitiveEncoderDecoderTest { 12 | private void verifyLong(long x) { 13 | byte[] data = new byte[8]; 14 | PrimitiveEncoderDecoder.encodeLong(x, data, 0); 15 | Assert.assertEquals(data.length, 8); 16 | Assert.assertEquals(PrimitiveEncoderDecoder.decodeLong(data, 0), x); 17 | Assert.assertEquals(PrimitiveEncoderDecoder.decodeLong(PrimitiveEncoderDecoder.encodeLong(x), 0), x); 18 | } 19 | 20 | private void verifyInt(int x) { 21 | byte[] data = new byte[4]; 22 | PrimitiveEncoderDecoder.encodeInt(x, data, 0); 23 | Assert.assertEquals(data.length, 4); 24 | Assert.assertEquals(PrimitiveEncoderDecoder.decodeInt(data, 0), x); 25 | Assert.assertEquals(PrimitiveEncoderDecoder.decodeInt(PrimitiveEncoderDecoder.encodeInt(x), 0), x); 26 | } 27 | 28 | @Test 29 | public void testEncodeDecodeLong() { 30 | verifyLong(Long.MIN_VALUE); 31 | verifyLong(Long.MAX_VALUE); 32 | verifyLong(-1L); 33 | verifyLong(0L); 34 | verifyLong(1L); 35 | verifyLong(1000000000L); 36 | verifyLong(-1000000000L); 37 | verifyLong(System.currentTimeMillis()); 38 | 39 | verifyInt(Integer.MIN_VALUE); 40 | verifyInt(Integer.MAX_VALUE); 41 | verifyInt(-1); 42 | verifyInt(0); 43 | verifyInt(1); 44 | verifyInt(1000000000); 45 | verifyInt(-1000000000); 46 | } 47 | 48 | @Test(expectedExceptions = {IllegalArgumentException.class}) 49 | public void testInsufficientDataForDecodeLong() { 50 | long value = 100; 51 | byte[] serialized = PrimitiveEncoderDecoder.encodeLong(value); 52 | Assert.assertNotNull(serialized); 53 | Assert.assertEquals(serialized.length, 8); 54 | Assert.assertNotEquals(PrimitiveEncoderDecoder.decodeLong(serialized, 1), value); 55 | } 56 | 57 | @Test(expectedExceptions = {IllegalArgumentException.class}) 58 | public void testInsufficientDataForDecodeInt() { 59 | int value = 100; 60 | byte[] serialized = PrimitiveEncoderDecoder.encodeInt(value); 61 | Assert.assertNotNull(serialized); 62 | Assert.assertEquals(serialized.length, 4); 63 | Assert.assertNotEquals(PrimitiveEncoderDecoder.decodeLong(serialized, 1), value); 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/test/java/com/linkedin/kafka/clients/utils/QueuedMapTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | package com.linkedin.kafka.clients.utils; 6 | 7 | import org.testng.annotations.Test; 8 | 9 | import static org.testng.Assert.assertEquals; 10 | 11 | /** 12 | * Unit test for QueuedMap 13 | */ 14 | public class QueuedMapTest { 15 | 16 | @Test 17 | public void testPutAndGet() { 18 | QueuedMap map = new QueuedMap<>(); 19 | assertEquals(map.getEldestKey(), null, "Empty map should return null for eldest key."); 20 | assertEquals(map.size(), 0, "Map size should be 0"); 21 | map.put(0, 0); 22 | assertEquals(map.size(), 1, "Map size should be 1 now"); 23 | assertEquals((int) map.get(0), 0, "0 should be in the map now"); 24 | assertEquals((int) map.getEldestKey(), 0, "Eldest key should be 0"); 25 | } 26 | 27 | @Test 28 | public void testRemove() { 29 | QueuedMap map = new QueuedMap<>(); 30 | map.put(0, 0); 31 | // Remove non-exist key 32 | map.remove(1); 33 | assertEquals(map.size(), 1, "Map size should be 1"); 34 | assertEquals((int) map.get(0), 0, "0 should be in the map"); 35 | assertEquals((int) map.getEldestKey(), 0, "Eldest key should be 0"); 36 | // Remove exist key 37 | map.remove(0); 38 | assertEquals(map.get(0), null, "0 should be in the map now"); 39 | assertEquals(map.getEldestKey(), null, "Empty map should return null for eldest key."); 40 | assertEquals(map.size(), 0, "Map size should be 0"); 41 | } 42 | 43 | @Test 44 | public void testEldestKey() { 45 | QueuedMap map = new QueuedMap<>(); 46 | assert map.getEldestKey() == null; 47 | map.put(0, 0); 48 | map.put(1, 1); 49 | assertEquals((int) map.getEldestKey(), 0, "Eldest key should be 0"); 50 | map.remove(0); 51 | assertEquals((int) map.getEldestKey(), 1, "Eldest key should be 1 now"); 52 | assertEquals(map.get(0), null, "0 should have been removed."); 53 | map.put(0, 0); 54 | assertEquals((int) map.getEldestKey(), 1, "Eldest key should be 1 now"); 55 | map.put(1, 1); 56 | assertEquals((int) map.getEldestKey(), 0, "Eldest key should be 0 now"); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /li-apache-kafka-clients/src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | # 4 | 5 | log4j.rootLogger=INFO, stdout 6 | 7 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 8 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 9 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n 10 | 11 | log4j.logger.com.linkedin.kafka.clients=ERROR 12 | log4j.logger.org.apache.kafka=ERROR 13 | 14 | # zkclient can be verbose, during debugging it is common to adjust is separately 15 | log4j.logger.org.I0Itec.zkclient.ZkClient=WARN 16 | log4j.logger.org.apache.zookeeper=WARN 17 | -------------------------------------------------------------------------------- /semantic-build-versioning.gradle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linkedin/li-apache-kafka-clients/b252f7a8fa7494773a9c6e88ab3545529fabbe28/semantic-build-versioning.gradle -------------------------------------------------------------------------------- /settings.gradle: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License").
 See License in the project root for license information. 3 | */ 4 | 5 | import org.gradle.util.GradleVersion 6 | 7 | buildscript { 8 | repositories { 9 | maven { 10 | url 'https://plugins.gradle.org/m2/' 11 | } 12 | } 13 | dependencies { 14 | classpath 'gradle.plugin.net.vivin:gradle-semantic-build-versioning:4.0.0' 15 | } 16 | } 17 | 18 | apply plugin: 'net.vivin.gradle-semantic-build-versioning' 19 | 20 | //otherwise it defaults to the folder name 21 | rootProject.name = 'li-apache-kafka-clients' 22 | 23 | include 'li-apache-kafka-clients', 'kafka-test-harness', 'integration-tests' 24 | 25 | def gradleVer = GradleVersion.current() 26 | if (gradleVer < GradleVersion.version("5.2.1")) { 27 | logger.error("this build requires gradle >= 5.2.1. current detection version is " + gradleVer + ". use newer gradle or (better yet) use the wrapper") 28 | } --------------------------------------------------------------------------------