├── .fossa.yml ├── .github └── workflows │ └── ci_build_test.yaml ├── .gitignore ├── .gitlab-ci.yml ├── LICENSE ├── README.md ├── build.sh ├── ci ├── Dockerfile.kafka-connect-splunk ├── Jenkinsfile ├── export_data.py ├── fix_hosts.sh ├── kafka-bastion.dockerfile ├── kafka-data-gen.dockerfile ├── kafka_cluster_gen.py ├── kafka_orca_gen.py ├── orca_create_splunk.py ├── perf.py ├── run_bastion.sh ├── run_data_gen.sh ├── run_kafka_connect.sh ├── setup_splunk_hec.sh └── splunk_cloud_ci.sh ├── config ├── connect-distributed-quickstart.properties └── connect-distributed.properties ├── dependency-reduced-pom.xml ├── pom.xml ├── sonar-project.properties ├── src ├── changes ├── main │ ├── java │ │ └── com │ │ │ └── splunk │ │ │ ├── hecclient │ │ │ ├── ConcurrentHec.java │ │ │ ├── DoubleSerializer.java │ │ │ ├── Event.java │ │ │ ├── EventBatch.java │ │ │ ├── Hec.java │ │ │ ├── HecAckPollResponse.java │ │ │ ├── HecAckPoller.java │ │ │ ├── HecChannel.java │ │ │ ├── HecConfig.java │ │ │ ├── HecEmptyEventException.java │ │ │ ├── HecException.java │ │ │ ├── HecInf.java │ │ │ ├── HecNullEventException.java │ │ │ ├── HecURIBuilder.java │ │ │ ├── HttpClientBuilder.java │ │ │ ├── Indexer.java │ │ │ ├── IndexerInf.java │ │ │ ├── JsonEvent.java │ │ │ ├── JsonEventBatch.java │ │ │ ├── LoadBalancer.java │ │ │ ├── LoadBalancerInf.java │ │ │ ├── Poller.java │ │ │ ├── PollerCallback.java │ │ │ ├── PostResponse.java │ │ │ ├── RawEvent.java │ │ │ ├── RawEventBatch.java │ │ │ ├── ResponsePoller.java │ │ │ ├── examples │ │ │ │ ├── HecExample.java │ │ │ │ ├── HecPerf.java │ │ │ │ ├── HecPerfConfig.java │ │ │ │ └── PrintIt.java │ │ │ └── package-info.java │ │ │ └── kafka │ │ │ └── connect │ │ │ ├── AbstractClientWrapper.java │ │ │ ├── HecClientWrapper.java │ │ │ ├── JacksonStructModule.java │ │ │ ├── KafkaRecordTracker.java │ │ │ ├── SplunkSinkConnector.java │ │ │ ├── SplunkSinkConnectorConfig.java │ │ │ ├── SplunkSinkRecord.java │ │ │ ├── SplunkSinkTask.java │ │ │ └── VersionUtils.java │ └── resources │ │ └── version.properties └── test │ ├── java │ └── com │ │ └── splunk │ │ ├── hecclient │ │ ├── CloseableHttpClientMock.java │ │ ├── CloseableHttpResponseMock.java │ │ ├── ConcurrentHecTest.java │ │ ├── DoubleSerializerTest.java │ │ ├── HecAckPollResponseTest.java │ │ ├── HecAckPollerTest.java │ │ ├── HecChannelTest.java │ │ ├── HecConfigTest.java │ │ ├── HecTest.java │ │ ├── HecURIBuilderTest.java │ │ ├── HttpClientBuilderTest.java │ │ ├── HttpEntityMock.java │ │ ├── IndexerMock.java │ │ ├── IndexerTest.java │ │ ├── JsonEvenBatchTest.java │ │ ├── JsonEventTest.java │ │ ├── LoadBalancerMock.java │ │ ├── LoadBalancerTest.java │ │ ├── PollerCallbackMock.java │ │ ├── PollerMock.java │ │ ├── PostResponseTest.java │ │ ├── RawEventBatchTest.java │ │ ├── RawEventTest.java │ │ ├── ResponsePollerTest.java │ │ ├── StatusLineMock.java │ │ └── UnitUtil.java │ │ └── kafka │ │ └── connect │ │ ├── ConfigProfile.java │ │ ├── HecMock.java │ │ ├── KafkaRecordTrackerTest.java │ │ ├── MockHecClientWrapper.java │ │ ├── SplunkSinkConnecterTest.java │ │ ├── SplunkSinkConnectorConfigTest.java │ │ ├── SplunkSinkRecordTest.java │ │ ├── SplunkSinkTaskTest.java │ │ ├── StructEventTest.java │ │ ├── UnitUtil.java │ │ └── VersionUtilsTest.java │ └── resources │ ├── keystoretest.jks │ ├── keystoretest.p12 │ ├── log4j2.xml │ └── testversion.properties ├── target └── site │ └── jacoco │ ├── jacoco.csv │ └── jacoco.xml └── test ├── README.md ├── config.sh ├── config.yaml ├── conftest.py ├── lib ├── commonkafka.py ├── commonsplunk.py ├── connect_params.py ├── connector.template ├── connector_upgrade.py ├── data_gen.py ├── eventproducer_connector_upgrade.py └── helper.py ├── logging.conf ├── pytest.ini ├── requirements.txt └── testcases ├── test_configurations.py ├── test_crud.py ├── test_data_enrichment.py └── test_data_onboarding.py /.fossa.yml: -------------------------------------------------------------------------------- 1 | version: 3 2 | server: https://app.fossa.com 3 | project: 4 | id: "kafka-connect-splunk" 5 | team: "TA-Automation" -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Compiled class file 3 | *.class 4 | 5 | # Log file 6 | *.log 7 | 8 | # BlueJ files 9 | *.ctxt 10 | 11 | # Mobile Tools for Java (J2ME) 12 | .mtj.tmp/ 13 | 14 | # Package Files # 15 | *.jar 16 | *.war 17 | *.ear 18 | *.zip 19 | *.tar.gz 20 | *.rar 21 | 22 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 23 | hs_err_pid* 24 | target/* 25 | .idea/* 26 | .DS_Store 27 | 28 | splunk-kafka-connect/ 29 | pom.xml.versionsBackup 30 | .classpath 31 | .project 32 | *.iml 33 | .settings/ 34 | 35 | **/__pycache__/* 36 | test/venv/ 37 | venv 38 | 39 | !target/site/ 40 | target/site/jacoco/*.html 41 | target/site/jacoco/*/* 42 | target/site/*.exec 43 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | include: 2 | - file: /cicd/sonarqube-3.0.1.yml 3 | project: ci-cd/templates 4 | -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # variables 4 | kafkaversion=2.0.0 5 | builddir=/tmp/splunk-kafka-connect-build/splunk-kafka-connect 6 | 7 | githash=`git rev-parse --short HEAD 2>/dev/null | sed "s/\(.*\)/@\1/"` # get current git hash 8 | gitbranch=`git rev-parse --abbrev-ref HEAD` # get current git branch 9 | gitversion=`git describe --abbrev=0 --tags 2>/dev/null` # returns the latest tag from current commit 10 | jarversion=${gitversion} 11 | 12 | # if no version found from git tag, it is a dev build 13 | if [[ -z "$gitversion" ]]; then 14 | gitversion="dev" 15 | jarversion=${gitversion}-SNAPSHOT 16 | fi 17 | 18 | packagename=splunk-kafka-connect-${gitversion}.tar.gz 19 | 20 | # record git info in version.properties file under resources folder 21 | resourcedir='src/main/resources' 22 | /bin/rm -f ${resourcedir}/version.properties 23 | echo githash=${githash} >> ${resourcedir}/version.properties 24 | echo gitbranch=${gitbranch} >> ${resourcedir}/version.properties 25 | echo gitversion=${gitversion} >> ${resourcedir}/version.properties 26 | 27 | 28 | curdir=`pwd` 29 | 30 | /bin/rm -rf ${builddir} 31 | mkdir -p ${builddir}/connectors 32 | mkdir -p ${builddir}/bin 33 | mkdir -p ${builddir}/config 34 | mkdir -p ${builddir}/libs 35 | 36 | # Build the package 37 | echo "Building the connector package ..." 38 | mvn versions:set -DnewVersion=${jarversion} 39 | mvn package > /dev/null 2>&1 40 | 41 | # Copy over the pacakge 42 | echo "Copy over splunk-kafka-connect jar ..." 43 | cp target/splunk-kafka-connect-${jarversion}.jar ${builddir}/connectors 44 | cp config/* ${builddir}/config 45 | cp README.md ${builddir} 46 | cp LICENSE ${builddir} 47 | 48 | # Download kafka 49 | echo "Downloading kafka_2.11-${kafkaversion} ..." 50 | wget -q https://archive.apache.org/dist/kafka/${kafkaversion}/kafka_2.11-${kafkaversion}.tgz -P ${builddir} 51 | cd ${builddir} && tar xzf kafka_2.11-${kafkaversion}.tgz 52 | 53 | # Copy over kafka connect runtime 54 | echo "Copy over kafka connect runtime ..." 55 | cp kafka_2.11-${kafkaversion}/bin/connect-distributed.sh ${builddir}/bin 56 | cp kafka_2.11-${kafkaversion}/bin/kafka-run-class.sh ${builddir}/bin 57 | cp kafka_2.11-${kafkaversion}/config/connect-log4j.properties ${builddir}/config 58 | cp kafka_2.11-${kafkaversion}/libs/*.jar ${builddir}/libs 59 | 60 | # Clean up 61 | echo "Clean up ..." 62 | /bin/rm -rf kafka_2.11-${kafkaversion} 63 | /bin/rm -f kafka_2.11-${kafkaversion}.tgz 64 | 65 | # Package up 66 | echo "Package ${packagename} ..." 67 | cd .. && tar czf ${packagename} splunk-kafka-connect 68 | 69 | echo "Copy package ${packagename} to ${curdir} ..." 70 | cp ${packagename} ${curdir} 71 | 72 | /bin/rm -rf splunk-kafka-connect ${packagename} 73 | echo "Done with build & packaging" 74 | 75 | echo 76 | 77 | cat << EOP 78 | To run the splunk-kafka-connect, do the following steps: 79 | 1. untar the package: tar xzf splunk-kafka-connect.tar.gz 80 | 2. config config/connect-distributed.properties according to your env 81 | 3. run: bash bin/connect-distributed.sh config/connect-distributed.properties 82 | 4. Use Kafka Connect REST api to create data collection tasks 83 | EOP 84 | -------------------------------------------------------------------------------- /ci/Dockerfile.kafka-connect-splunk: -------------------------------------------------------------------------------- 1 | FROM openkbs/jdk11-mvn-py3 2 | 3 | ARG ssh_prv_key 4 | ARG ssh_pub_key 5 | ENV kafkaversion=2.5.0 6 | ENV ESERV_HOME=/tmp 7 | 8 | RUN mkdir -p /kafka-connect/kafka 9 | RUN mkdir /kafka-connect/logs 10 | 11 | RUN apt-get update && apt-get install -y \ 12 | git openssh-client openssl musl-dev curl 13 | 14 | RUN wget -q https://bootstrap.pypa.io/get-pip.py -P / && python get-pip.py && pip install requests && pip install psutil 15 | 16 | RUN wget -q http://apache.mirrors.hoobly.com/kafka/${kafkaversion}/kafka_2.12-${kafkaversion}.tgz -P / && \ 17 | tar -xf kafka_2.12-2.5.0.tgz -C /kafka-connect/kafka --strip-components 1 && rm -f kafka_2.12-${kafkaversion}.tgz 18 | 19 | RUN ssh-keygen -f ${ESERV_HOME}/id_rsa -t rsa -N '' && \ 20 | cp ${ESERV_HOME}/id_rsa.pub ${ESERV_HOME}/authorized_keys && \ 21 | chmod 640 ${ESERV_HOME}/authorized_keys 22 | 23 | WORKDIR /kafka-connect 24 | 25 | ADD run_kafka_connect.sh /kafka-connect/run_kafka_connect.sh 26 | ADD config.yaml /kafka-connect/config.yaml 27 | 28 | EXPOSE 9092 8083 29 | CMD ["/bin/bash", "-c", "/kafka-connect/run_kafka_connect.sh"] 30 | -------------------------------------------------------------------------------- /ci/Jenkinsfile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env groovy 2 | 3 | @Library('jenkinstools@master') _ 4 | 5 | import com.splunk.jenkins.DockerRequest; 6 | import com.splunk.tool.plugin.docker.extension.BadDockerExitCode; 7 | 8 | def dockerReq = new DockerRequest(steps, 9 | currentBuild, 10 | env, 11 | [userId: "10777", 12 | imageName: "repo.splunk.com/splunk/gdi/splunk-ubuntu-python3:latest", 13 | repoName: "git@github.com:splunk/kafka-connect-splunk.git", 14 | runner: "yarn", 15 | remotePath: "/build"]) 16 | 17 | 18 | withSplunkWrapNode("master") { 19 | def imageName = "repo.splunk.com/splunk/gdi/kafka-connect-splunk:build-${env.BUILD_NUMBER}" 20 | 21 | try { 22 | stage("Build splunk cloud stack") { 23 | println "=============build splunk cloud stack==============" 24 | withCredentials([file(credentialsId: 'gdi_connectors_orca', variable: 'ORCA_CREDENTIALS')]) { 25 | sh "tar -ovxf $ORCA_CREDENTIALS"; 26 | splunkPrepareAndCheckOut request: dockerReq, 27 | files: "${WORKSPACE}/.orca, ${WORKSPACE}/.ssh"; 28 | } 29 | splunkRunScript request:dockerReq, 30 | script: 31 | """ 32 | chmod +x ci/splunk_cloud_ci.sh && sh ci/splunk_cloud_ci.sh 33 | """; 34 | } 35 | stage('Build image') { 36 | println "=============build image==============" 37 | withCredentials([file(credentialsId: 'gdi_connectors_orca', variable: 'ORCA_CREDENTIALS')]) { 38 | sh "tar -ovxf $ORCA_CREDENTIALS"; 39 | splunkPrepareAndCheckOut request: dockerReq, 40 | files: "${WORKSPACE}/.orca, ${WORKSPACE}/.ssh"; 41 | } 42 | splunkDockerImageBuild workDir: "/build/kafka-connect-splunk/ci", 43 | imageName: imageName, 44 | dockerfilePath: "Dockerfile.kafka-connect-splunk" 45 | } 46 | stage('Run integration tests') { 47 | println "=============run kafka and integration tests==============" 48 | splunkDockerStart imageName: imageName, 49 | mountBindings: '/var/run/docker.sock:/var/run/docker.sock:ro', 50 | script: 51 | """ 52 | /bin/bash -c /kafka-connect/run_kafka_connect.sh 53 | """; 54 | } 55 | } 56 | catch (BadDockerExitCode e) { 57 | currentBuild.result = "FAILURE"; 58 | echo "Exception Caught: ${e.getMessage()}"; 59 | echo "Stack Trace: ${e.printStackTrace()}"; 60 | } 61 | catch (Exception e) { 62 | currentBuild.result = "FAILURE"; 63 | echo "Exception Caught: ${e.getMessage()}"; 64 | echo "Stack Trace: ${e.printStackTrace()}"; 65 | } 66 | finally { 67 | println "=============delete splunk cloud stack==============" 68 | splunkCopyFromDocker files: "ci/stack_id", 69 | imageName: imageName, 70 | remotePath: "/build/kafka-connect-splunk"; 71 | archiveArtifacts allowEmptyArchive: true, artifacts: "target/stack_id"; 72 | withCredentials([file(credentialsId: 'gdi_connectors_orca', variable: 'ORCA_CREDENTIALS')]) { 73 | sh '''#!/bin/bash 74 | tar -ovxf $ORCA_CREDENTIALS 75 | mkdir -p ~/.orca 76 | cp -r .orca/* ~/.orca 77 | chmod 600 ~/.orca/*id_rsa 78 | chown -R 10777 ~/.orca 79 | pip install splunk_orca==1.1.0 -i https://repo.splunk.com/artifactory/api/pypi/pypi/simple --upgrade 80 | stack_id=`cat target/stack_id` 81 | python -m splunk_orca --cloud cloudworks destroy $stack_id 82 | ''' 83 | } 84 | steps.cleanWs(); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /ci/fix_hosts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function fix_hosts() { 4 | if [ -f /etc/hosts2 ] && [ ! -f /fixed_host ]; then 5 | cat /etc/hosts2 >> /etc/hosts 6 | touch /fixed_host 7 | fi 8 | } 9 | 10 | while : 11 | do 12 | fix_hosts 13 | sleep 1 14 | done 15 | -------------------------------------------------------------------------------- /ci/kafka-bastion.dockerfile: -------------------------------------------------------------------------------- 1 | FROM anapsix/alpine-java:8_jdk 2 | 3 | RUN apk update && apk upgrade && apk add git && apk add openssh && apk add openssl && apk add curl && apk add python 4 | 5 | ENV kafkaversion=0.11.0.2 6 | RUN wget -q https://archive.apache.org/dist/kafka/${kafkaversion}/kafka_2.11-${kafkaversion}.tgz -P / && cd / && tar xzf kafka_2.11-${kafkaversion}.tgz && rm -f kafka_2.11-${kafkaversion}.tgz 7 | 8 | RUN wget -q https://bootstrap.pypa.io/get-pip.py -P / && python get-pip.py && pip install requests && pip install python-dateutil 9 | 10 | RUN mkdir -p /root/.ssh 11 | ADD id_rsa /root/.ssh/id_rsa 12 | RUN chmod 600 /root/.ssh/id_rsa 13 | 14 | ADD id_rsa.pub /root/.ssh/id_rsa.pub 15 | ADD known_hosts /root/.ssh/known_hosts 16 | 17 | RUN mkdir -p /kafka-bastion/ 18 | WORKDIR /kafka-bastion 19 | 20 | ADD run_bastion.sh /kafka-bastion/run_bastion.sh 21 | 22 | CMD ["/bin/bash", "-c", "/kafka-bastion/run_bastion.sh"] 23 | -------------------------------------------------------------------------------- /ci/kafka-data-gen.dockerfile: -------------------------------------------------------------------------------- 1 | FROM anapsix/alpine-java:8_jdk 2 | 3 | RUN apk update && apk upgrade && apk add git && apk add openssh && apk add openssl 4 | 5 | RUN mkdir -p /bin/gradle 6 | 7 | ENV GRADLE_VERSION=4.3.1 8 | ENV GRADLE_HOME=/bin/gradle/gradle-${GRADLE_VERSION} 9 | ENV PATH=${PATH}:${GRADLE_HOME}/bin 10 | 11 | RUN wget -q https://services.gradle.org/distributions/gradle-${GRADLE_VERSION}-bin.zip -P /bin/gradle \ 12 | && cd /bin/gradle && unzip gradle-${GRADLE_VERSION}-bin.zip \ 13 | && rm gradle-${GRADLE_VERSION}-bin.zip 14 | 15 | RUN mkdir -p /kafka-data-gen 16 | WORKDIR /kafka-data-gen 17 | 18 | RUN mkdir -p /root/.ssh 19 | ADD known_hosts /root/.ssh/known_hosts 20 | 21 | ADD fix_hosts.sh /fix_hosts.sh 22 | ADD run_data_gen.sh /kafka-data-gen/run_data_gen.sh 23 | 24 | CMD ["/bin/bash", "-c", "/kafka-data-gen/run_data_gen.sh"] 25 | -------------------------------------------------------------------------------- /ci/orca_create_splunk.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import time 3 | import os 4 | import logging 5 | import subprocess 6 | import json 7 | import jsonpath 8 | import sys 9 | 10 | logging.basicConfig(level=logging.INFO) 11 | logger = logging.getLogger(__name__) 12 | _env_var = os.environ 13 | 14 | 15 | def create_cloud_stack(): 16 | cmd = f"python3 -m splunk_orca --cloud cloudworks --printer json create --splunk-version {_env_var['SPLUNK_VERSION']}" 17 | try: 18 | proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, 19 | stderr=subprocess.STDOUT) 20 | output, error = proc.communicate() 21 | logger.info(output) 22 | data = json.loads(output) 23 | stack_id = jsonpath.jsonpath(data, '$..stack_id')[0] 24 | if error: 25 | logger.error(error.strip()) 26 | logger.info(f'The stack [{stack_id}] is Creating.') 27 | return stack_id 28 | except OSError as e: 29 | logger.error(e) 30 | 31 | 32 | def get_status(stack_id): 33 | cmd = f"python3 -m splunk_orca --cloud cloudworks --printer json show containers --deployment-id {stack_id}" 34 | 35 | try: 36 | proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, 37 | stderr=subprocess.STDOUT) 38 | output, error = proc.communicate() 39 | data = json.loads(output) 40 | status = jsonpath.jsonpath(data, '$..status')[0] 41 | if error: 42 | logger.error(error.strip()) 43 | return status 44 | except OSError as e: 45 | logger.error(e) 46 | 47 | 48 | def wait_until_stack_ready(stack_id): 49 | t_end = time.time() + 3600 50 | while time.time() < t_end: 51 | status = get_status(stack_id) 52 | if status == 'READY': 53 | logger.info(f'The stack [{stack_id}] is Ready to use.') 54 | return 55 | logger.error(f"Time out when creating Splunk cloud stack: {stack_id}") 56 | 57 | 58 | if __name__ == '__main__': 59 | stack_id = create_cloud_stack() 60 | wait_until_stack_ready(stack_id) 61 | sys.stdout.write(stack_id) 62 | -------------------------------------------------------------------------------- /ci/run_bastion.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | curdir=`pwd` 4 | git clone git@github.com:splunk/kafka-connect-splunk.git 5 | branch=${KAFKA_CONNECT_BRANCH:-develop} 6 | cd kafka-connect-splunk && git checkout ${branch} 7 | 8 | duration=${SLEEP:-600} 9 | sleep ${duration} 10 | 11 | bash ${curdir}/kafka-connect-splunk/ci/fix_hosts.sh > /tmp/fixhosts 2>&1 & 12 | 13 | python ${curdir}/kafka-connect-splunk/ci/perf.py 14 | 15 | tail -f /dev/null 16 | -------------------------------------------------------------------------------- /ci/run_data_gen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | git clone https://github.com/dtregonning/kafka-data-gen.git 4 | cd kafka-data-gen && gradle install 5 | 6 | sleep 600 7 | 8 | bash /fix_hosts.sh > /tmp/fixhosts 2>&1 & 9 | 10 | KAFKA_DATA_GEN_SIZE=${KAFKA_DATA_GEN_SIZE:-1} 11 | KAFKA_DATA_GEN_SIZE=$(($KAFKA_DATA_GEN_SIZE - 1)) 12 | 13 | # gen an exec shell scrip on the fly 14 | 15 | echo "#!/bin/bash" > do_run_data_gen.sh 16 | echo "while :" >> do_run_data_gen.sh 17 | echo "do" >> do_run_data_gen.sh 18 | echo " java -Xmx${JVM_MAX_HEAP:-4G} -Xms${JVM_MIN_HEAP:-512M} -jar build/libs/kafka-data-gen.jar -message-count ${MESSAGE_COUNT} -message-size ${MESSAGE_SIZE} -topic ${KAFKA_TOPIC} -bootstrap.servers ${KAFKA_BOOTSTRAP_SERVERS} -eps ${EPS}" >> do_run_data_gen.sh 19 | echo " sleep 1" >> do_run_data_gen.sh 20 | echo "done" >> do_run_data_gen.sh 21 | 22 | chmod +x do_run_data_gen.sh 23 | 24 | for i in `seq ${KAFKA_DATA_GEN_SIZE}` 25 | do 26 | bash do_run_data_gen.sh & 27 | done 28 | 29 | bash do_run_data_gen.sh 30 | -------------------------------------------------------------------------------- /ci/run_kafka_connect.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CI_KAFKA_TOPIC=test-datagen 4 | # Checkout, build and run kafka-connect-splunk in the fight 5 | 6 | curdir=`pwd` 7 | git clone https://github.com/splunk/kafka-connect-splunk.git 8 | 9 | branch=${KAFKA_CONNECT_BRANCH:-develop} 10 | # build the package 11 | cd kafka-connect-splunk && git checkout ${branch} && bash build.sh 12 | cd /kafka-connect 13 | cp kafka-connect-splunk/target/splunk-kafka-connect-v*.jar /kafka-connect/ 14 | yes | cp -rf config.yaml kafka-connect-splunk/test/config.yaml 15 | 16 | sed -i 's/plugin.path=connectors/plugin.path=\/kafka-connect/' /kafka-connect/kafka-connect-splunk/config/connect-distributed-quickstart.properties 17 | sed -i 's/key.converter=org.apache.kafka.connect.storage.StringConverter/key.converter=org.apache.kafka.connect.json.JsonConverter/' /kafka-connect/kafka-connect-splunk/config/connect-distributed-quickstart.properties 18 | sed -i 's/value.converter=org.apache.kafka.connect.storage.StringConverter/value.converter=org.apache.kafka.connect.json.JsonConverter/' /kafka-connect/kafka-connect-splunk/config/connect-distributed-quickstart.properties 19 | 20 | #debug=${KAFKA_CONNECT_LOGGING:-DEBUG} 21 | #echo "log4j.logger.com.splunk=${debug}" >> config/connect-log4j.properties 22 | 23 | cd kafka 24 | 25 | echo "Start ZooKeeper" 26 | bin/zookeeper-server-start.sh config/zookeeper.properties > /kafka-connect/logs/zookeeper.txt 2>&1 & 27 | 28 | echo "Start kafka server" 29 | bin/kafka-server-start.sh config/server.properties > /kafka-connect/logs/kafka.txt 2>&1 & 30 | 31 | echo "Run connect" 32 | ./bin/connect-distributed.sh /kafka-connect/kafka-connect-splunk/config/connect-distributed-quickstart.properties > /kafka-connect/logs/kafka_connect.txt 2>&1 & 33 | 34 | echo "-----------------run integration tests-----------------" 35 | cd /kafka-connect/kafka-connect-splunk/test 36 | pip install virtualenv 37 | virtualenv venv 38 | pip install -r requirements.txt 39 | source venv/bin/activate 40 | pytest 41 | -------------------------------------------------------------------------------- /ci/setup_splunk_hec.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | echo "=============setup splunk HEC==============" 4 | 5 | CI_SPLUNK_HOST=$1 6 | 7 | echo "Enable HEC services ..." 8 | curl -X POST -u ${CI_SPLUNK_USERNAME}:${CI_SPLUNK_PASSWORD} -k https://$CI_SPLUNK_HOST:${CI_SPLUNK_PORT}/servicesNS/nobody/splunk_httpinput/data/inputs/http/http/enable 9 | 10 | echo "Create new HEC token ..." 11 | curl -X POST -u ${CI_SPLUNK_USERNAME}:${CI_SPLUNK_PASSWORD} -k -d "name=splunk_hec_token&token=${CI_SPLUNK_HEC_TOKEN}" https://$CI_SPLUNK_HOST:${CI_SPLUNK_PORT}/servicesNS/nobody/splunk_httpinput/data/inputs/http 12 | 13 | echo "Enable HEC new-token ..." 14 | curl -k -X POST -u ${CI_SPLUNK_USERNAME}:${CI_SPLUNK_PASSWORD} https://$CI_SPLUNK_HOST:${CI_SPLUNK_PORT}/servicesNS/admin/splunk_httpinput/data/inputs/http/splunk_hec_token/enable 15 | 16 | echo "Create new HEC token with ack ..." 17 | curl -X POST -u ${CI_SPLUNK_USERNAME}:${CI_SPLUNK_PASSWORD} -k -d "name=splunk_hec_token_ack&token=${CI_SPLUNK_HEC_TOKEN_ACK}&useACK=1" https://$CI_SPLUNK_HOST:${CI_SPLUNK_PORT}/servicesNS/nobody/splunk_httpinput/data/inputs/http 18 | 19 | echo "Enable HEC new-token ..." 20 | curl -k -X POST -u ${CI_SPLUNK_USERNAME}:${CI_SPLUNK_PASSWORD} https://$CI_SPLUNK_HOST:${CI_SPLUNK_PORT}/servicesNS/admin/splunk_httpinput/data/inputs/http/splunk_hec_token_ack/enable 21 | 22 | echo "Setup Indexes ..." 23 | curl -X POST -u ${CI_SPLUNK_USERNAME}:${CI_SPLUNK_PASSWORD} -k -d "name=${CI_INDEX_EVENTS}&datatype=event" https://$CI_SPLUNK_HOST:${CI_SPLUNK_PORT}/servicesNS/-/search/data/indexes 24 | curl -X POST -u ${CI_SPLUNK_USERNAME}:${CI_SPLUNK_PASSWORD} -k -d "name=${CI_KAFKA_HEADER_INDEX}&datatype=event" https://$CI_SPLUNK_HOST:${CI_SPLUNK_PORT}/servicesNS/-/search/data/indexes 25 | 26 | 27 | curl -k -X POST -u ${CI_SPLUNK_USERNAME}:${CI_SPLUNK_PASSWORD} https://$CI_SPLUNK_HOST:${CI_SPLUNK_PORT}/services/server/control/restart 28 | -------------------------------------------------------------------------------- /ci/splunk_cloud_ci.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | pip3 install splunk_orca==1.1.0 -i https://repo.splunk.com/artifactory/api/pypi/pypi/simple --upgrade 4 | splunk_orca --version 5 | pip3 install -r test/requirements.txt 6 | STACK_ID=`python3 ci/orca_create_splunk.py` 7 | CI_SPLUNK_HOST="$STACK_ID.stg.splunkcloud.com" 8 | 9 | chmod +x ci/setup_splunk_hec.sh && sh ci/setup_splunk_hec.sh $CI_SPLUNK_HOST 10 | echo "-----------------update config.yaml-----------------" 11 | sed -i "s/splunkd_url: https:\/\/127.0.0.1:8089/splunkd_url: https:\/\/$CI_SPLUNK_HOST:8089/g" /build/kafka-connect-splunk/test/config.yaml 12 | sed -i "s/splunk_hec_url: https:\/\/127.0.0.1:8088/splunk_hec_url: https:\/\/$CI_SPLUNK_HOST:8088/g" /build/kafka-connect-splunk/test/config.yaml 13 | sed -i "s/splunk_password: helloworld/splunk_password: ${CI_SPLUNK_PASSWORD}/g" /build/kafka-connect-splunk/test/config.yaml 14 | 15 | cp /build/kafka-connect-splunk/test/config.yaml /build/kafka-connect-splunk/ci/config.yaml 16 | echo "${STACK_ID}" > /build/kafka-connect-splunk/ci/stack_id -------------------------------------------------------------------------------- /config/connect-distributed-quickstart.properties: -------------------------------------------------------------------------------- 1 | # These are defaults. This file just demonstrates how to override some settings. 2 | bootstrap.servers=localhost:9092 3 | 4 | # The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will 5 | # need to configure these based on the format they want their data in when loaded from or stored into Kafka 6 | #key.converter=org.apache.kafka.connect.json.JsonConverter 7 | #value.converter=org.apache.kafka.connect.json.JsonConverter 8 | 9 | key.converter=org.apache.kafka.connect.storage.StringConverter 10 | value.converter=org.apache.kafka.connect.storage.StringConverter 11 | 12 | key.converter.schemas.enable=false 13 | value.converter.schemas.enable=false 14 | 15 | # The internal converter used for offsets and config data is configurable and must be specified, but most users will 16 | # always want to use the built-in default. Offset and config data is never visible outside of Copcyat in this format. 17 | internal.key.converter=org.apache.kafka.connect.json.JsonConverter 18 | internal.value.converter=org.apache.kafka.connect.json.JsonConverter 19 | 20 | internal.key.converter.schemas.enable=false 21 | internal.value.converter.schemas.enable=false 22 | 23 | # Flush much faster (10s) than normal, which is useful for testing/debugging 24 | offset.flush.interval.ms=10000 25 | 26 | plugin.path=connectors/ 27 | 28 | group.id=kafka-connect-splunk-hec-sink 29 | config.storage.topic=__kafka-connect-splunk-task-configs 30 | config.storage.replication.factor=1 31 | 32 | offset.storage.topic=__kafka-connect-splunk-offsets 33 | offset.storage.replication.factor=1 34 | offset.storage.partitions=1 35 | 36 | status.storage.topic=__kafka-connect-splunk-statuses 37 | status.storage.replication.factor=1 38 | status.storage.partitions=1 39 | 40 | 41 | #ssl.key.password= 42 | # 43 | #ssl.keystore.type=JKS 44 | #ssl.keystore.location= 45 | #ssl.keystore.password= 46 | # 47 | #ssl.truststore.type= 48 | #ssl.truststore.password= 49 | #ssl.truststore.location= 50 | # 51 | #sasl.kerberos.service.name= 52 | #security.protocol= 53 | # 54 | #ssl.enabled.protocols= 55 | #ssl.protocol=TLS 56 | #ssl.provider= 57 | # 58 | #sasl.kerberos.kinit.cmd= 59 | # 60 | # 61 | #ssl.cipher.suites= 62 | #ssl.endpoint.identification.algorithm= 63 | #ssl.keymanager.algorithm=SunX509 64 | #ssl.trustmanager.algorithm=PKIX 65 | 66 | rest.advertised.host.name=localhost 67 | rest.host.name=localhost 68 | -------------------------------------------------------------------------------- /config/connect-distributed.properties: -------------------------------------------------------------------------------- 1 | # These are defaults. This file just demonstrates how to override some settings. 2 | bootstrap.servers= 3 | 4 | # The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will 5 | # need to configure these based on the format they want their data in when loaded from or stored into Kafka 6 | #key.converter=org.apache.kafka.connect.json.JsonConverter 7 | #value.converter=org.apache.kafka.connect.json.JsonConverter 8 | 9 | key.converter=org.apache.kafka.connect.storage.StringConverter 10 | value.converter=org.apache.kafka.connect.storage.StringConverter 11 | 12 | key.converter.schemas.enable=false 13 | value.converter.schemas.enable=false 14 | 15 | # The internal converter used for offsets and config data is configurable and must be specified, but most users will 16 | # always want to use the built-in default. Offset and config data is never visible outside of Copcyat in this format. 17 | internal.key.converter=org.apache.kafka.connect.json.JsonConverter 18 | internal.value.converter=org.apache.kafka.connect.json.JsonConverter 19 | 20 | internal.key.converter.schemas.enable=false 21 | internal.value.converter.schemas.enable=false 22 | 23 | # Flush much faster (10s) than normal, which is useful for testing/debugging 24 | offset.flush.interval.ms=10000 25 | 26 | plugin.path=connectors/ 27 | 28 | group.id=kafka-connect-splunk-hec-sink 29 | config.storage.topic=__kafka-connect-splunk-task-configs 30 | config.storage.replication.factor=3 31 | 32 | offset.storage.topic=__kafka-connect-splunk-offsets 33 | offset.storage.replication.factor=3 34 | offset.storage.partitions=25 35 | 36 | status.storage.topic=__kafka-connect-splunk-statuses 37 | status.storage.replication.factor=3 38 | status.storage.partitions=5 39 | 40 | 41 | #ssl.key.password= 42 | # 43 | #ssl.keystore.type=JKS 44 | #ssl.keystore.location= 45 | #ssl.keystore.password= 46 | # 47 | #ssl.truststore.type= 48 | #ssl.truststore.password= 49 | #ssl.truststore.location= 50 | # 51 | #sasl.kerberos.service.name= 52 | #security.protocol= 53 | # 54 | #ssl.enabled.protocols= 55 | #ssl.protocol=TLS 56 | #ssl.provider= 57 | # 58 | #sasl.kerberos.kinit.cmd= 59 | # 60 | # 61 | #ssl.cipher.suites= 62 | #ssl.endpoint.identification.algorithm= 63 | #ssl.keymanager.algorithm=SunX509 64 | #ssl.trustmanager.algorithm=PKIX 65 | 66 | # rest.advertised.host.name=localhost 67 | # rest.host.name=localhost 68 | -------------------------------------------------------------------------------- /sonar-project.properties: -------------------------------------------------------------------------------- 1 | sonar.projectKey=github-mirrors.kafka-connect-splunk 2 | sonar.sources=src/main/java/ 3 | sonar.language=java 4 | sonar.java.binaries=. 5 | sonar.exclusions=src/java/test/**,**/examples/** 6 | sonar.java.coveragePlugin=jacoco 7 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/ConcurrentHec.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.slf4j.Logger; 19 | import org.slf4j.LoggerFactory; 20 | 21 | import java.util.ArrayList; 22 | import java.util.Arrays; 23 | import java.util.List; 24 | import java.util.concurrent.*; 25 | 26 | public class ConcurrentHec implements HecInf { 27 | private static final Logger log = LoggerFactory.getLogger(ConcurrentHec.class); 28 | 29 | private LinkedBlockingQueue batches; 30 | private ExecutorService executorService; 31 | private List hecs; 32 | private PollerCallback pollerCallback; 33 | private volatile boolean stopped; 34 | 35 | public ConcurrentHec(int numberOfThreads, boolean useAck, HecConfig config, PollerCallback cb) { 36 | this(numberOfThreads, useAck, config, cb, new LoadBalancer(config, null)); 37 | } 38 | 39 | public ConcurrentHec(int numberOfThreads, boolean useAck, HecConfig config, PollerCallback cb, LoadBalancerInf loadBalancer) { 40 | batches = new LinkedBlockingQueue<>(config.getConcurrentHecQueueCapacity()); 41 | ThreadFactory e = (Runnable r) -> new Thread(r, "Concurrent-HEC-worker"); 42 | executorService = Executors.newFixedThreadPool(numberOfThreads, e); 43 | initHec(numberOfThreads, useAck, config, cb, loadBalancer); 44 | loadBalancer.setHttpClient(hecs.get(0).getHttpClient()); 45 | pollerCallback = cb; 46 | stopped = false; 47 | 48 | for (int i = 0; i < numberOfThreads; i++) { 49 | final int id = i; 50 | Runnable r = () -> { 51 | run(id); 52 | }; 53 | executorService.submit(r); 54 | } 55 | } 56 | 57 | @Override 58 | public final void send(final EventBatch batch) { 59 | try { 60 | boolean offerSuccess = batches.offer(batch, 1000, TimeUnit.MILLISECONDS); 61 | if (!offerSuccess) { 62 | log.warn("Linked blocking queue is full (size = {}) for event batch = {}, failed to offer batch into queue", batches.size(), batch.getUUID()); 63 | throw new HecException("linked blocking event queue is full, failed to offer batch into queue"); 64 | } 65 | } catch (InterruptedException ex) { 66 | throw new HecException("failed to offer batch into queue", ex); 67 | } 68 | } 69 | 70 | @Override 71 | public final void close() { 72 | if (stopped) { 73 | return; 74 | } 75 | 76 | stopped = true; 77 | executorService.shutdown(); 78 | } 79 | 80 | private void run(int id) { 81 | // Note, never exit this function unless a shutdown, otherwise the worker thread will be gone. 82 | final Hec hec = hecs.get(id); 83 | while (!stopped) { 84 | EventBatch batch; 85 | try { 86 | batch = batches.poll(1, TimeUnit.SECONDS); 87 | } catch (InterruptedException ex) { 88 | continue; 89 | } 90 | 91 | if (batch != null) { 92 | send(hec, batch); 93 | } 94 | } 95 | hec.close(); 96 | } 97 | 98 | private void send(final Hec hec, final EventBatch batch) { 99 | try { 100 | hec.send(batch); 101 | } catch (Exception ex) { 102 | batch.fail(); 103 | pollerCallback.onEventFailure(Arrays.asList(batch), ex); 104 | log.error("sending batch to splunk encountered error", ex); 105 | } 106 | } 107 | 108 | private void initHec(int count, boolean useAck, HecConfig config, PollerCallback cb, LoadBalancerInf loadBalancer) { 109 | config.setTotalChannels(Math.max(config.getTotalChannels() / count, 1)); 110 | hecs = new ArrayList<>(); 111 | for (int i = 0; i < count; i++) { 112 | if (useAck) { 113 | hecs.add(Hec.newHecWithAck(config, cb, loadBalancer)); 114 | } else { 115 | hecs.add(Hec.newHecWithoutAck(config, cb, loadBalancer)); 116 | } 117 | } 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/DoubleSerializer.java: -------------------------------------------------------------------------------- 1 | package com.splunk.hecclient; 2 | 3 | import com.fasterxml.jackson.core.JsonGenerator; 4 | import com.fasterxml.jackson.core.JsonProcessingException; 5 | import com.fasterxml.jackson.databind.JsonSerializer; 6 | import com.fasterxml.jackson.databind.SerializerProvider; 7 | 8 | import java.io.IOException; 9 | import java.math.BigDecimal; 10 | import java.math.RoundingMode; 11 | 12 | /* 13 | * Copyright 2017 Splunk, Inc.. 14 | * 15 | * Licensed under the Apache License, Version 2.0 (the "License"); 16 | * you may not use this file except in compliance with the License. 17 | * You may obtain a copy of the License at 18 | * 19 | * http://www.apache.org/licenses/LICENSE-2.0 20 | * 21 | * Unless required by applicable law or agreed to in writing, software 22 | * distributed under the License is distributed on an "AS IS" BASIS, 23 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 24 | * See the License for the specific language governing permissions and 25 | * limitations under the License. 26 | */ 27 | public class DoubleSerializer extends JsonSerializer { 28 | @Override 29 | public void serialize(Double value, JsonGenerator jgen, SerializerProvider provider) throws IOException { 30 | String d = BigDecimal.valueOf(value).setScale(6, RoundingMode.HALF_UP).toPlainString(); 31 | jgen.writeNumber(d); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/HecAckPollResponse.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import java.util.*; 19 | 20 | final class HecAckPollResponse { 21 | // {"acks":{"1":true,"2":true,"3":true,"4":false,"5":false,"6":false}} 22 | private final SortedMap acks = new TreeMap<>(); 23 | 24 | public Collection getSuccessIds() { 25 | Set successful = new HashSet<>(); 26 | for (Map.Entry e: acks.entrySet()) { 27 | if (e.getValue()) { // was 'true' in json, meaning it succeeded 28 | successful.add(Long.parseLong(e.getKey())); 29 | } 30 | } 31 | return successful; 32 | } 33 | 34 | public Map getAcks() { 35 | return acks; 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/HecChannel.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.apache.http.client.methods.HttpUriRequest; 19 | 20 | import java.util.HashMap; 21 | import java.util.Map; 22 | 23 | final class HecChannel { 24 | private String id; 25 | private Map chField; 26 | private IndexerInf indexer; 27 | private boolean isAvailable; 28 | 29 | public HecChannel(IndexerInf idx) { 30 | id = newChannelId(); 31 | indexer = idx; 32 | isAvailable = true; 33 | } 34 | 35 | public IndexerInf getIndexer() { 36 | return indexer; 37 | } 38 | 39 | public String getId() { 40 | return id; 41 | } 42 | 43 | public HecChannel setTracking(boolean trackChannel) { 44 | if (trackChannel) { 45 | enableTracking(); 46 | } else { 47 | disableTracking(); 48 | } 49 | 50 | return this; 51 | } 52 | 53 | public void setId() { id = newChannelId(); } 54 | 55 | public void setAvailable(boolean isAvailable) { this.isAvailable = isAvailable; } 56 | 57 | public void send(final EventBatch batch) { 58 | if (chField != null) { 59 | batch.addExtraFields(chField); 60 | } 61 | indexer.send(batch); 62 | } 63 | 64 | // for convenience 65 | public String executeHttpRequest(final HttpUriRequest req) { 66 | return indexer.executeHttpRequest(req); 67 | } 68 | 69 | public boolean hasBackPressure() { return indexer.hasBackPressure(); } 70 | 71 | public boolean isNotAvailable() { return isAvailable == false; } 72 | 73 | @Override 74 | public boolean equals(Object obj) { 75 | if (obj == null) { 76 | return false; 77 | } 78 | 79 | if (obj instanceof HecChannel) { 80 | HecChannel ch = (HecChannel) obj; 81 | return id.equals(ch.getId()); 82 | } 83 | return false; 84 | } 85 | 86 | @Override 87 | public int hashCode() { 88 | return id.hashCode(); 89 | } 90 | 91 | @Override 92 | public String toString() { 93 | return id; 94 | } 95 | 96 | private HecChannel enableTracking() { 97 | if (chField == null) { 98 | chField = new HashMap<>(); 99 | chField.put("hec-channel", id); 100 | } 101 | return this; 102 | } 103 | 104 | private HecChannel disableTracking() { 105 | chField = null; 106 | return this; 107 | } 108 | 109 | private static String newChannelId() { 110 | return java.util.UUID.randomUUID().toString(); 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/HecEmptyEventException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2018 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | /** 19 | * HecEmptyEventException is an exception which is triggered during the creation of an Event(JsonEvent or RawEvent) 20 | * when Event is created with an empty String (""). 21 | * 22 | * @version 1.1.0 23 | * @since 1.1.0 24 | */ 25 | public class HecEmptyEventException extends HecException { 26 | private static final long serialVersionUID = 34L; 27 | 28 | public HecEmptyEventException(String message) { 29 | super(message); 30 | } 31 | 32 | public HecEmptyEventException(String message, Throwable cause) { 33 | super(message, cause); 34 | } 35 | } -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/HecException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017-2018 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | public class HecException extends RuntimeException { 19 | private static final long serialVersionUID = 34L; 20 | 21 | public HecException(String message) { 22 | super(message); 23 | } 24 | 25 | public HecException(String message, Throwable cause) { 26 | super(message, cause); 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/HecInf.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | public interface HecInf { 19 | void send(final EventBatch batch); 20 | void close(); 21 | } 22 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/HecNullEventException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2018 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | /** 19 | * HecNullEventException is an exception which is triggered during the creation of an Event(JsonEvent or RawEvent) 20 | * with no Event Data. 21 | * 22 | * @version 1.1.0 23 | * @since 1.1.0 24 | */ 25 | public class HecNullEventException extends HecException { 26 | private static final long serialVersionUID = 34L; 27 | 28 | public HecNullEventException(String message) { 29 | super(message); 30 | } 31 | 32 | public HecNullEventException(String message, Throwable cause) { 33 | super(message, cause); 34 | } 35 | } -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/HecURIBuilder.java: -------------------------------------------------------------------------------- 1 | package com.splunk.hecclient; 2 | 3 | import org.apache.http.Consts; 4 | import org.apache.http.client.utils.URIBuilder; 5 | import org.apache.http.client.utils.URLEncodedUtils; 6 | 7 | import java.net.URI; 8 | import java.net.URISyntaxException; 9 | 10 | public class HecURIBuilder { 11 | public static final String AUTO_EXTRACT_TIMESTAMP_PARAMETER = "auto_extract_timestamp"; 12 | 13 | private final String baseUrl; 14 | private final HecConfig hecConfig; 15 | 16 | public HecURIBuilder(String baseUrl, HecConfig hecConfig) { 17 | this.baseUrl = baseUrl; 18 | this.hecConfig = hecConfig; 19 | } 20 | 21 | public URI getURI(String endpoint) { 22 | try { 23 | URIBuilder uriBuilder = new URIBuilder(baseUrl); 24 | int idx = endpoint.indexOf('?'); 25 | if (idx == -1) { 26 | // json endpoint 27 | uriBuilder = uriBuilder.setPath(endpoint); 28 | } else { 29 | // in case of raw endpoint, the endpoint will be in form "/services/collector/raw?index=xxx&source=xxx" 30 | // extract the path and params via a split on '?' 31 | uriBuilder = uriBuilder.setPath(endpoint.substring(0, idx)); 32 | uriBuilder = uriBuilder.setParameters(URLEncodedUtils.parse(endpoint.substring(idx+1), Consts.UTF_8)); 33 | } 34 | 35 | if (hecConfig.getAutoExtractTimestamp() != null) { 36 | uriBuilder.addParameter(AUTO_EXTRACT_TIMESTAMP_PARAMETER, hecConfig.getAutoExtractTimestamp().toString()); 37 | } 38 | return uriBuilder.build(); 39 | } catch (URISyntaxException e) { 40 | throw new RuntimeException(e); 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/IndexerInf.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.apache.http.Header; 19 | import org.apache.http.client.methods.HttpUriRequest; 20 | 21 | interface IndexerInf { 22 | boolean send(final EventBatch batch); 23 | String executeHttpRequest(final HttpUriRequest req); 24 | boolean hasBackPressure(); 25 | String getBaseUrl(); 26 | Header[] getHeaders(); 27 | } 28 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/JsonEvent.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import com.fasterxml.jackson.annotation.JsonInclude; 19 | 20 | import java.util.HashMap; 21 | import java.util.Map; 22 | 23 | /** 24 | * JSONEvent is used as the Object to represented Splunk events when the /services/collector/event HEC endpoint is to 25 | * be used for Splunk ingestion. 26 | *

27 | * This class contains overridden methods from Event which will allow adding extra fields to events, 28 | * retrieving extra fields, converting the JsonEvent object to a String and converting the JsonEvent object into a byte 29 | * representation. 30 | * @see Event 31 | * @version 1.0 32 | * @since 1.0 33 | */ 34 | @JsonInclude(JsonInclude.Include.NON_NULL) 35 | public final class JsonEvent extends Event { 36 | private Map fields; 37 | 38 | /** 39 | * Creates a new json event. 40 | * 41 | * @param data Object representation of the event itself without all the extras. Event Data Only 42 | * @param tied Object representation of the entire Record being constructed into an Event. 43 | * 44 | * @since 1.0 45 | * @see Event 46 | */ 47 | public JsonEvent(Object data, Object tied) { 48 | super(data, tied); 49 | } 50 | 51 | /** 52 | * Creates a new json event with default values. 53 | * 54 | * @since 1.0 55 | */ 56 | JsonEvent() { 57 | } 58 | 59 | /** 60 | * ExtraFields consist of custom fields used for enriching events to be bundled in with the base Event. This can 61 | * used to categorize certain events, allowing flexibility of searching for this field after ingested in Splunk. 62 | * This differs from the setFields method as it will append any extra fields to the the 63 | * 64 | * @param extraFields Object representation of the event with associated meta-data. 65 | * @return Current representation of JsonEvent. 66 | * @see JsonEvent 67 | * @since 1.0 68 | */ 69 | @Override 70 | public JsonEvent addFields(final Map extraFields) { 71 | if (extraFields == null || extraFields.isEmpty()) { 72 | return this; 73 | } 74 | 75 | if (fields == null) { 76 | fields = new HashMap<>(); 77 | } 78 | 79 | fields.putAll(extraFields); 80 | invalidate(); 81 | 82 | return this; 83 | } 84 | 85 | /** 86 | * ExtraFields consist of custom fields used for enriching events to be bundled in with the base Event. This can 87 | * used to categorize certain events, allowing flexibility of searching for this field after ingested in Splunk. 88 | * This differs from the addFields method as it will replace any fields that are currently associated to this object. 89 | * 90 | * @param extraFields Object representation of the event with associated meta-data. 91 | * @return Current representation of JsonEvent. 92 | * @see JsonEvent 93 | * @since 1.0 94 | */ 95 | @Override 96 | public JsonEvent setFields(final Map extraFields) { 97 | fields = extraFields; 98 | invalidate(); 99 | return this; 100 | } 101 | 102 | /** 103 | * ExtraFields consist of custom fields used for enriching events to be bundled in with the base Event. This can 104 | * used to categorize certain events, allowing flexibility of searching for this field after ingested in Splunk. 105 | * 106 | * @return Map representation of fields 107 | * @see Map 108 | * @since 1.0 109 | */ 110 | @Override 111 | public Map getFields() { 112 | return fields; 113 | } 114 | 115 | /** 116 | * Using ObjectMapper the JsonEvent is serialized to a String and returned. 117 | * 118 | * @return Serialized String representation of JsonEvent including all variables in superclass Event. 119 | * 120 | * @throws HecException 121 | * @see com.fasterxml.jackson.databind.ObjectMapper 122 | * @since 1.0 123 | */ 124 | @Override 125 | public String toString() { 126 | try { 127 | return jsonMapper.writeValueAsString(this); 128 | } catch (Exception ex) { 129 | log.error("failed to json serlized JsonEvent", ex); 130 | throw new HecException("failed to json serialized JsonEvent", ex); 131 | } 132 | } 133 | 134 | /** 135 | * Checks to ensure the byte representation of the Event has not already been calculated. If so, it will return 136 | * what is already in variable bytes. Otherwise the ObjectMapper through annotations will serialize the 137 | * JsonEvent object. 138 | * 139 | * @return Serialized byte array representation of JsonEvent including all variables in superclass Event. Will return the 140 | * value already contained in bytes if it is not null for the Event. 141 | * 142 | * @throws HecException 143 | * @see com.fasterxml.jackson.databind.ObjectMapper 144 | * @since 1.0 145 | */ 146 | @Override 147 | public byte[] getBytes() { 148 | if (bytes != null) { 149 | return bytes; 150 | } 151 | 152 | try { 153 | bytes = jsonMapper.writeValueAsBytes(this); 154 | } catch (Exception ex) { 155 | log.error("Invalid json event", ex); 156 | throw new HecException("Failed to json marshal the event", ex); 157 | } 158 | return bytes; 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/JsonEventBatch.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.apache.commons.lang3.builder.HashCodeBuilder; 19 | 20 | public final class JsonEventBatch extends EventBatch { 21 | public static final String ENDPOINT = "/services/collector/event"; 22 | public static final String CONTENT_TYPE = "application/json; profile=urn:splunk:event:1.0; charset=utf-8"; 23 | 24 | @Override 25 | public void add(Event event) { 26 | if (event instanceof JsonEvent) { 27 | events.add(event); 28 | len += event.length(); 29 | } else { 30 | throw new HecException("only JsonEvent can be add to JsonEventBatch"); 31 | } 32 | } 33 | 34 | @Override 35 | public final String getRestEndpoint() { 36 | return ENDPOINT; 37 | } 38 | 39 | @Override 40 | public String getContentType() { 41 | return CONTENT_TYPE; 42 | } 43 | 44 | @Override 45 | public EventBatch createFromThis() { 46 | return new JsonEventBatch(); 47 | } 48 | 49 | @Override 50 | public int hashCode() { 51 | return new HashCodeBuilder() 52 | .append(ENDPOINT) 53 | .toHashCode(); 54 | } 55 | 56 | @Override 57 | public boolean equals(Object obj) { 58 | if (obj instanceof JsonEventBatch) { 59 | final JsonEventBatch other = (JsonEventBatch) obj; 60 | return obj.equals(other); 61 | } 62 | return false; 63 | } 64 | } -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/LoadBalancerInf.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.apache.http.impl.client.CloseableHttpClient; 19 | 20 | public interface LoadBalancerInf { 21 | void add(String indexerUrl, HecChannel channel); 22 | void remove(HecChannel channel); 23 | void send(final EventBatch batch); 24 | int size(); 25 | void setHttpClient(CloseableHttpClient httpClient); 26 | void close(); 27 | } 28 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/Poller.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | public interface Poller { 19 | void start(); 20 | void stop(); 21 | void add(HecChannel channel, EventBatch batch, String response); 22 | void fail(HecChannel channel, EventBatch batch, Exception ex); 23 | void stickySessionHandler(HecChannel channel); 24 | void setStickySessionToTrue(); 25 | // minimum load channel 26 | HecChannel getMinLoadChannel(); 27 | long getTotalOutstandingEventBatches(); 28 | } 29 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/PollerCallback.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import java.util.List; 19 | 20 | // The implementation of PollerCallback shall be multi-thread safe since 21 | // the callback may be invoked in multiple threads 22 | public interface PollerCallback { 23 | void onEventFailure(final List failure, Exception ex); 24 | void onEventCommitted(final List committed); 25 | } 26 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/PostResponse.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import com.fasterxml.jackson.annotation.JsonIgnoreProperties; 19 | 20 | @JsonIgnoreProperties(ignoreUnknown = true) 21 | final class PostResponse { 22 | // {"text":"Success","code":0,"ackId":7} 23 | private String text; 24 | private int code = -1; 25 | private long ackId = -1; 26 | 27 | PostResponse() { 28 | } 29 | 30 | public boolean isSucceed() { 31 | return code == 0; 32 | } 33 | 34 | public String getText() { 35 | return text; 36 | } 37 | 38 | public long getAckId() { 39 | return ackId; 40 | } 41 | 42 | public PostResponse setCode(int c) { 43 | code = c; 44 | return this; 45 | } 46 | 47 | public PostResponse setText(String t) { 48 | text = t; 49 | return this; 50 | } 51 | 52 | public PostResponse setAckId(long id) { 53 | ackId = id; 54 | return this; 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/RawEvent.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import java.io.UnsupportedEncodingException; 19 | import java.nio.charset.StandardCharsets; 20 | 21 | /** 22 | * RawEvent is used as the Object to represented Splunk events when the /services/collector/raw HEC endpoint is to be 23 | * used for Splunk ingestion. 24 | *

25 | * This class contains overridden methods from Event which will allow serialization of the RawEvent object into a 26 | * String or byte array. 27 | * @see Event 28 | * @version 1.0 29 | * @since 1.0 30 | */ 31 | public final class RawEvent extends Event { 32 | public RawEvent(Object data, Object tied) { 33 | super(data, tied); 34 | // by default disable carriage return line breaker 35 | setLineBreaker(""); 36 | } 37 | 38 | /** 39 | * Checks to see if a byte representation of RawEvent has already been calculated. If so this value is returned. 40 | * Next a serious of type comparison checks to determinate the format type of data that was used to create the raw 41 | * event. If its a String, convert to bytes. if its already of a byte array type, return a byte array. Finally if 42 | * we slip to the final conditional we assume the data is in json format. the json event is then converted to bytes. 43 | * 44 | * @return Serialized byte array representation of RawEvent including all variables in superclass Event. Will return the 45 | * value already contained in bytes if it is not null for the Event. 46 | * 47 | * @throws HecException 48 | * @see com.fasterxml.jackson.databind.ObjectMapper 49 | * @since 1.0 50 | */ 51 | 52 | @Override 53 | public byte[] getBytes() { 54 | if (bytes != null) { 55 | return bytes; 56 | } 57 | 58 | if (event instanceof String) { 59 | String s = (String) event; 60 | bytes = s.getBytes(StandardCharsets.UTF_8); 61 | } else if (event instanceof byte[]) { 62 | bytes = (byte[]) event; 63 | } else { 64 | // JSON object 65 | try { 66 | bytes = jsonMapper.writeValueAsBytes(event); 67 | } catch (Exception ex) { 68 | log.error("Invalid json data", ex); 69 | throw new HecException("Failed to json marshal the data", ex); 70 | } 71 | } 72 | return bytes; 73 | } 74 | 75 | /** 76 | * Sets the value of the line breaker. The line breaker is used to add a separator value that is streamed along 77 | * with the event into Splunk. This line breaker value can then be used in conjunction with the Splunk configurable 78 | * LINE_BREAKER to 79 | * break events. 80 | * 81 | * @return Current representation of RawEvent. 82 | * 83 | * @since 1.0 84 | */ 85 | public final Event setLineBreaker(final String breaker) { 86 | if (breaker != null) { 87 | this.lineBreaker = breaker; 88 | } 89 | return this; 90 | } 91 | 92 | /** 93 | * Raw event is serialized to a String and returned. 94 | * 95 | * @return String representation of RawEvent including all variables in superclass Event. 96 | * 97 | * @throws HecException 98 | * @since 1.0 99 | */ 100 | @Override 101 | public String toString() { 102 | return new String(getBytes(), StandardCharsets.UTF_8); 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/RawEventBatch.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.apache.commons.lang3.builder.EqualsBuilder; 19 | import org.apache.commons.lang3.builder.HashCodeBuilder; 20 | import org.apache.http.client.utils.URIBuilder; 21 | 22 | public final class RawEventBatch extends EventBatch { 23 | public static final String ENDPOINT = "/services/collector/raw"; 24 | public static final String CONTENT_TYPE = "text/plain; profile=urn:splunk:event:1.0; charset=utf-8"; 25 | 26 | private String index; 27 | private String source; 28 | private String sourcetype; 29 | private String host; 30 | private long time = -1; 31 | 32 | // index, source etc metadata is for the whole raw batch 33 | public RawEventBatch(String index, String source, String sourcetype, String host, long time) { 34 | this.index = index; 35 | this.source = source; 36 | this.sourcetype = sourcetype; 37 | this.host = host; 38 | this.time = time; 39 | } 40 | 41 | public String getIndex() { 42 | return index; 43 | } 44 | 45 | public String getSource() { 46 | return source; 47 | } 48 | 49 | public String getSourcetype() { 50 | return sourcetype; 51 | } 52 | 53 | public String getHost() { 54 | return host; 55 | } 56 | 57 | public long getTime() { 58 | return time; 59 | } 60 | 61 | public static Builder factory() { 62 | return new Builder(); 63 | } 64 | 65 | public static final class Builder { 66 | private String index; 67 | private String source; 68 | private String sourcetype; 69 | private String host; 70 | private long time = -1; 71 | 72 | public Builder setIndex(final String index) { 73 | this.index = index; 74 | return this; 75 | } 76 | 77 | public Builder setSource(final String source) { 78 | this.source = source; 79 | return this; 80 | } 81 | 82 | public Builder setSourcetype(final String sourcetype) { 83 | this.sourcetype = sourcetype; 84 | return this; 85 | } 86 | 87 | public Builder setHost(final String host) { 88 | this.host = host; 89 | return this; 90 | } 91 | 92 | public Builder setTime(final long time) { 93 | this.time = time; 94 | return this; 95 | } 96 | 97 | public RawEventBatch build() { 98 | return new RawEventBatch(index, source, sourcetype, host, time); 99 | } 100 | } 101 | 102 | @Override 103 | public void add(Event event) throws HecException { 104 | if (event instanceof RawEvent) { 105 | events.add(event); 106 | len += event.length(); 107 | } else { 108 | throw new HecException("only RawEvent can be add to RawEventBatch"); 109 | } 110 | } 111 | 112 | @Override 113 | public final String getRestEndpoint() { 114 | return ENDPOINT + getMetadataParams(); 115 | } 116 | 117 | @Override 118 | public String getContentType() { 119 | return CONTENT_TYPE; 120 | } 121 | 122 | @Override 123 | public EventBatch createFromThis() { 124 | return new Builder() 125 | .setIndex(index) 126 | .setSource(source) 127 | .setSourcetype(sourcetype) 128 | .setHost(host) 129 | .build(); 130 | } 131 | 132 | private String getMetadataParams() { 133 | URIBuilder params = new URIBuilder(); 134 | putIfPresent(index, "index", params); 135 | putIfPresent(sourcetype, "sourcetype", params); 136 | putIfPresent(source, "source", params); 137 | putIfPresent(host, "host", params); 138 | 139 | if (time != -1) { 140 | params.addParameter("time", String.valueOf(time)); 141 | } 142 | 143 | return params.toString(); 144 | } 145 | 146 | private static void putIfPresent(String val, String tag, URIBuilder params) { 147 | if (val != null && !val.isEmpty()) { 148 | params.addParameter(tag, val); 149 | } 150 | } 151 | 152 | @Override 153 | public int hashCode() { 154 | return new HashCodeBuilder() 155 | .append(index) 156 | .append(sourcetype) 157 | .append(source) 158 | .append(host) 159 | .toHashCode(); 160 | } 161 | 162 | @Override 163 | public boolean equals(Object obj) { 164 | if (obj instanceof RawEventBatch) { 165 | final RawEventBatch other = (RawEventBatch) obj; 166 | return new EqualsBuilder() 167 | .append(index, other.index) 168 | .append(sourcetype, other.sourcetype) 169 | .append(source, other.source) 170 | .append(host, other.host) 171 | .isEquals(); 172 | } 173 | return false; 174 | } 175 | } -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/ResponsePoller.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import com.fasterxml.jackson.databind.ObjectMapper; 19 | import org.slf4j.Logger; 20 | import org.slf4j.LoggerFactory; 21 | 22 | import java.util.Arrays; 23 | 24 | public final class ResponsePoller implements Poller { 25 | private static final Logger log = LoggerFactory.getLogger(ResponsePoller.class); 26 | private static final ObjectMapper jsonMapper = new ObjectMapper(); 27 | 28 | private PollerCallback callback; 29 | 30 | public ResponsePoller(PollerCallback callback) { 31 | this.callback = callback; 32 | } 33 | 34 | @Override 35 | public void stickySessionHandler(HecChannel channel) { 36 | // Only required while acknowledgement=true 37 | } 38 | 39 | @Override 40 | public void start() { 41 | // Only required while acknowledgement=true 42 | } 43 | 44 | @Override 45 | public void stop() { 46 | // Only required while acknowledgement=true 47 | } 48 | 49 | @Override 50 | public void fail(HecChannel channel, EventBatch batch, Exception ex) { 51 | batch.fail(); 52 | if (callback != null) { 53 | callback.onEventFailure(Arrays.asList(batch), ex); 54 | } 55 | } 56 | 57 | @Override 58 | public long getTotalOutstandingEventBatches() { 59 | return 0; 60 | } 61 | 62 | @Override 63 | public HecChannel getMinLoadChannel() { 64 | return null; 65 | } 66 | 67 | @Override 68 | public void add(HecChannel channel, EventBatch batch, String resp) { 69 | try { 70 | PostResponse response = jsonMapper.readValue(resp, PostResponse.class); 71 | if (!response.isSucceed()) { 72 | fail(channel, batch, new HecException(response.getText())); 73 | return; 74 | } 75 | if (response.getText().equals("Invalid data format")) { 76 | log.warn("Invalid Splunk HEC data format. Ignoring events. channel={} index={} events={}", channel, channel.getIndexer(), batch.toString()); 77 | } 78 | } catch (Exception ex) { 79 | log.error("failed to parse response", resp, ex); 80 | fail(channel, batch, ex); 81 | return; 82 | } 83 | 84 | batch.commit(); 85 | if (callback != null) { 86 | callback.onEventCommitted(Arrays.asList(batch)); 87 | } 88 | } 89 | 90 | public void setStickySessionToTrue() {} 91 | } 92 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/examples/HecExample.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient.examples; 17 | 18 | import com.splunk.hecclient.*; 19 | import org.apache.http.impl.client.CloseableHttpClient; 20 | import org.slf4j.Logger; 21 | import org.slf4j.LoggerFactory; 22 | 23 | import java.util.Arrays; 24 | import java.util.List; 25 | import java.util.concurrent.TimeUnit; 26 | 27 | public final class HecExample { 28 | public static void main(String[] args) { 29 | Logger log = LoggerFactory.getLogger(HecExample.class); 30 | 31 | List uris = Arrays.asList("https://localhost:8088"); 32 | String tokenWithAck = "936AF219-CF36-4C8C-AA0C-FD9793A0F4D4"; 33 | HecConfig config = new HecConfig(uris, tokenWithAck); 34 | config.setAckPollInterval(10) 35 | .setEventBatchTimeout(60) 36 | .setDisableSSLCertVerification(true) 37 | .setHttpKeepAlive(true) 38 | .setMaxHttpConnectionPerChannel(4); 39 | 40 | CloseableHttpClient httpClient = Hec.createHttpClient(config); 41 | Poller poller = Hec.createPoller(config, new PrintIt()); 42 | 43 | // Json 44 | int n = 100000; 45 | int m = 100; 46 | 47 | Hec hec = new Hec(config, httpClient, poller, new LoadBalancer(config, httpClient)); 48 | Thread jsonThr = new Thread(new Runnable() { 49 | public void run() { 50 | for (int j = 0; j < n; j++) { 51 | EventBatch batch = new JsonEventBatch(); 52 | for (int i = 0; i < m; i++) { 53 | Event evt = new JsonEvent("my message: " + (m * j + i), null); 54 | evt.setSourcetype("test-json-event"); 55 | batch.add(evt); 56 | } 57 | log.info("json batch: " + j); 58 | hec.send(batch); 59 | } 60 | } 61 | }); 62 | jsonThr.start(); 63 | 64 | // raw 65 | Hec rawHec = new Hec(config, httpClient, poller, new LoadBalancer(config, httpClient)); 66 | Thread rawThr = new Thread(new Runnable() { 67 | public void run() { 68 | for (int j = 0; j < n; j++) { 69 | EventBatch batch = new RawEventBatch("main", null,"test-raw-event", null, -1); 70 | for (int i = 0; i < m; i++) { 71 | Event evt = new RawEvent("my raw message: " + (m * j + i) + "\n", null); 72 | evt.setSourcetype("test-raw-event"); 73 | batch.add(evt); 74 | } 75 | log.info("raw batch: " + j); 76 | rawHec.send(batch); 77 | } 78 | } 79 | }); 80 | rawThr.start(); 81 | 82 | try { 83 | TimeUnit.SECONDS.sleep(6000); 84 | } catch (InterruptedException ex) { 85 | } 86 | 87 | hec.close(); 88 | rawHec.close(); 89 | log.info("Done"); 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/examples/HecPerfConfig.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient.examples; 17 | 18 | import com.splunk.hecclient.HecConfig; 19 | 20 | public final class HecPerfConfig { 21 | private HecConfig config; 22 | private int concurrency; 23 | private int clientPoolSize; 24 | private int iterations; 25 | 26 | public HecPerfConfig(HecConfig config, int concurrency, int clientPoolSize, int iterations) { 27 | this.config = config; 28 | this.concurrency = concurrency; 29 | this.clientPoolSize = clientPoolSize; 30 | this.iterations = iterations; 31 | } 32 | 33 | public HecConfig getHecClientConfig() { 34 | return config; 35 | } 36 | 37 | public int getConcurrency() { 38 | return concurrency; 39 | } 40 | 41 | public int getClientPoolSize() { 42 | return clientPoolSize; 43 | } 44 | 45 | public int getIterations() { 46 | return iterations; 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/examples/PrintIt.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient.examples; 17 | 18 | import com.splunk.hecclient.EventBatch; 19 | import com.splunk.hecclient.PollerCallback; 20 | 21 | import java.util.List; 22 | import java.util.concurrent.atomic.AtomicLong; 23 | 24 | public final class PrintIt implements PollerCallback { 25 | private AtomicLong eventsFailed = new AtomicLong(0); 26 | private AtomicLong events = new AtomicLong(0); 27 | 28 | @Override 29 | public void onEventFailure(List batches, Exception ex) { 30 | eventsFailed.addAndGet(batches.size()); 31 | System.out.println("Failed: " + eventsFailed.get()); 32 | } 33 | 34 | @Override 35 | public void onEventCommitted(List batches) { 36 | events.addAndGet(batches.size()); 37 | System.out.println("committed: " + events.get()); 38 | } 39 | 40 | public long getTotalEventsHandled() { 41 | return eventsFailed.get() + events.get(); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/hecclient/package-info.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | /** 18 | * The hecclient lib is a Http Event Collector (HEC for short) library for Splunk which mainly covers the following features 19 | * 1. Post events to /event HEC endpoint 20 | * a. When HEC token has "useACk = true" enabled, the library will do ACK polling and call callbacks provied by clients for acknowledged events or failed events. 21 | * b. When HEC token has "useACK = false" enabled, the library will act just a HTTP client, however it still call callbacks provided by clients for POSTed events or failed events. 22 | * c. It supports source, sourcetype, index etc metdata overrides by clients. 23 | * d. It supports event data enrichment 24 | * 2. Post events to /raw HEC endpoint 25 | * a. When HEC token has "useACk = true" enabled, the library will do ACK polling and call callbacks provied by clients for acknowledged events or failed events. 26 | * b. When HEC token has "useACK = false" enabled, the library will act just a HTTP client, however it still call callbacks provided by clients for POSTed events or failed events. 27 | * c. It supports source, sourcetype, index etc metdata overrides by clients. 28 | * d. It doesn't supportievent data enrichment in this mode 29 | * 3. Concurrent Hec Client support 30 | * 4. HTTP connection pooling and reuse 31 | * 5. Cookies, stikcy session etc auto reuse with HTTP connection pooling 32 | */ 33 | package com.splunk.hecclient; 34 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/kafka/connect/AbstractClientWrapper.java: -------------------------------------------------------------------------------- 1 | package com.splunk.kafka.connect; 2 | 3 | import org.apache.http.impl.client.CloseableHttpClient; 4 | 5 | import com.splunk.hecclient.HecConfig; 6 | 7 | public abstract class AbstractClientWrapper { 8 | abstract CloseableHttpClient getClient(HecConfig config); 9 | } 10 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/kafka/connect/HecClientWrapper.java: -------------------------------------------------------------------------------- 1 | package com.splunk.kafka.connect; 2 | 3 | import org.apache.http.impl.client.CloseableHttpClient; 4 | 5 | import com.splunk.hecclient.Hec; 6 | import com.splunk.hecclient.HecConfig; 7 | 8 | public class HecClientWrapper extends AbstractClientWrapper { 9 | 10 | @Override 11 | CloseableHttpClient getClient(HecConfig config) { 12 | return Hec.createHttpClient(config); 13 | 14 | } 15 | 16 | 17 | } 18 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/kafka/connect/JacksonStructModule.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.kafka.connect; 17 | 18 | import com.fasterxml.jackson.core.JsonGenerator; 19 | import com.fasterxml.jackson.databind.JsonSerializer; 20 | import com.fasterxml.jackson.databind.SerializerProvider; 21 | import com.fasterxml.jackson.databind.module.SimpleModule; 22 | import org.apache.kafka.connect.data.Field; 23 | import org.apache.kafka.connect.data.Struct; 24 | 25 | import java.io.IOException; 26 | import java.util.LinkedHashMap; 27 | import java.util.Map; 28 | 29 | 30 | public class JacksonStructModule extends SimpleModule { 31 | private static final long serialVersionUID = 123431242; 32 | 33 | public JacksonStructModule() { 34 | addSerializer(Struct.class, new StructSerializer()); 35 | } 36 | 37 | static class StructSerializer extends JsonSerializer { 38 | @Override 39 | public void serialize(Struct struct, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) throws IOException { 40 | final Map result = new LinkedHashMap<>(struct.schema().fields().size()); 41 | for (Field field : struct.schema().fields()) { 42 | result.put(field.name(), struct.get(field)); 43 | } 44 | jsonGenerator.writeObject(result); 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/main/java/com/splunk/kafka/connect/SplunkSinkRecord.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017-2018 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.kafka.connect; 17 | 18 | import org.apache.commons.lang3.builder.HashCodeBuilder; 19 | import org.apache.commons.lang3.builder.EqualsBuilder; 20 | import org.apache.kafka.connect.header.Header; 21 | import org.apache.kafka.connect.header.Headers; 22 | import org.apache.kafka.connect.sink.SinkRecord; 23 | import org.slf4j.Logger; 24 | import org.slf4j.LoggerFactory; 25 | 26 | /** 27 | * SplunkSinkRecord provides helper functionality to enable Header support for the Splunk Connect for Kafka, Namely 28 | * Header functionality introspection and comparison. 29 | *

30 | * 31 | * @version 1.1.0 32 | * @since 1.1.0 33 | */ 34 | public class SplunkSinkRecord { 35 | private static final Logger log = LoggerFactory.getLogger(SplunkSinkRecord.class); 36 | Headers headers; 37 | SplunkSinkConnectorConfig connectorConfig; 38 | String splunkHeaderIndex = ""; 39 | String splunkHeaderHost = ""; 40 | String splunkHeaderSource = ""; 41 | String splunkHeaderSourcetype = ""; 42 | 43 | public SplunkSinkRecord() {} 44 | 45 | /** 46 | * Creates a new Kafka Header utility object. Will take a Kafka SinkRecord and Splunk Sink Connector configuration 47 | * and create the object based on Headers included with te Kafka Record. 48 | * 49 | * @param record Kafka SinkRecord to be introspected and headers retrieved from. 50 | * @param connectorConfig Splunk Connector configuration used to determine headers of importance 51 | * @version 1.1.0 52 | * @since 1.1.0 53 | */ 54 | public SplunkSinkRecord(SinkRecord record, SplunkSinkConnectorConfig connectorConfig) { 55 | this.connectorConfig = connectorConfig; 56 | this.headers = record.headers(); 57 | if(this.headers != null) { 58 | setMetadataValues(); 59 | } 60 | } 61 | 62 | /** 63 | * CompareRecordHeaders will compare a SinkRecords Header values against values that have already populate the 64 | * Kakfa Header Utility object. This is used in batching events with the same meta-data values while using the /raw 65 | * event point in Splunk 66 | * 67 | * @param record Kafka SinkRecord to be introspected and headers retrieved from. 68 | * @version 1.1.0 69 | * @since 1.1.0 70 | */ 71 | protected boolean compareRecordHeaders(SinkRecord record) { 72 | headers = record.headers(); 73 | 74 | Header indexHeader = headers.lastWithName(connectorConfig.headerIndex); 75 | Header hostHeader = headers.lastWithName(connectorConfig.headerHost); 76 | Header sourceHeader = headers.lastWithName(connectorConfig.headerSource); 77 | Header sourcetypeHeader = headers.lastWithName(connectorConfig.headerSourcetype); 78 | 79 | String index = ""; 80 | String host = ""; 81 | String source = ""; 82 | String sourcetype = ""; 83 | 84 | if(indexHeader != null) { 85 | index = indexHeader.value().toString(); 86 | } 87 | if(hostHeader != null) { 88 | host = hostHeader.value().toString(); 89 | } 90 | if(sourceHeader != null) { 91 | source = sourceHeader.value().toString(); 92 | } 93 | if(sourcetypeHeader != null) { 94 | sourcetype = sourcetypeHeader.value().toString(); 95 | } 96 | 97 | return splunkHeaderIndex.equals(index) && splunkHeaderHost.equals(host) && 98 | splunkHeaderSource.equals(source) && splunkHeaderSourcetype.equals(sourcetype); 99 | } 100 | 101 | private void setMetadataValues() { 102 | Header indexHeader = this.headers.lastWithName(connectorConfig.headerIndex); 103 | Header hostHeader = this.headers.lastWithName(connectorConfig.headerHost); 104 | Header sourceHeader = this.headers.lastWithName(connectorConfig.headerSource); 105 | Header sourcetypeHeader = this.headers.lastWithName(connectorConfig.headerSourcetype); 106 | 107 | if(indexHeader != null) { 108 | splunkHeaderIndex = indexHeader.value().toString(); 109 | } 110 | if(hostHeader != null) { 111 | splunkHeaderHost = hostHeader.value().toString(); 112 | } 113 | if(sourceHeader != null) { 114 | splunkHeaderSource = sourceHeader.value().toString(); 115 | } 116 | if(sourcetypeHeader != null) { 117 | splunkHeaderSourcetype = sourcetypeHeader.value().toString(); 118 | } 119 | } 120 | 121 | public String id() { 122 | String separator = "$$$"; 123 | return new StringBuilder() 124 | .append(splunkHeaderIndex) 125 | .append(separator) 126 | .append(splunkHeaderHost) 127 | .append(separator) 128 | .append(splunkHeaderSource) 129 | .append(separator) 130 | .append(splunkHeaderSourcetype) 131 | .toString(); 132 | } 133 | 134 | @Override 135 | public int hashCode() { 136 | return new HashCodeBuilder() 137 | .append(splunkHeaderIndex) 138 | .append(splunkHeaderHost) 139 | .append(splunkHeaderSource) 140 | .append(splunkHeaderSourcetype) 141 | .toHashCode(); 142 | } 143 | 144 | @Override 145 | public boolean equals(Object obj) { 146 | if (obj instanceof SplunkSinkRecord) { 147 | final SplunkSinkRecord other = (SplunkSinkRecord) obj; 148 | return id().equals(other.id()); 149 | } 150 | return false; 151 | } 152 | 153 | public Headers getHeaders() { 154 | return headers; 155 | } 156 | 157 | public String getSplunkHeaderIndex() { 158 | return splunkHeaderIndex; 159 | } 160 | 161 | public String getSplunkHeaderHost() { 162 | return splunkHeaderHost; 163 | } 164 | 165 | public String getSplunkHeaderSource() { 166 | return splunkHeaderSource; 167 | } 168 | 169 | public String getSplunkHeaderSourcetype() { 170 | return splunkHeaderSourcetype; 171 | } 172 | } -------------------------------------------------------------------------------- /src/main/java/com/splunk/kafka/connect/VersionUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.kafka.connect; 17 | 18 | import java.io.BufferedReader; 19 | import java.io.IOException; 20 | import java.io.InputStream; 21 | import java.io.InputStreamReader; 22 | import java.util.ArrayList; 23 | import java.util.List; 24 | 25 | import org.slf4j.Logger; 26 | import org.slf4j.LoggerFactory; 27 | 28 | public final class VersionUtils { 29 | private static final String VERSION_PROPERTIES_FILE = "/version.properties"; 30 | private static final String DEFAULT_VERSION = "dev"; 31 | private static final String DEFAULT_APP_NAME = "Splunk Connect for Kafka"; 32 | private static final Logger log = LoggerFactory.getLogger(VersionUtils.class); 33 | 34 | /** 35 | * Returns the version string that is set in the version.properties 36 | * resource file and default application name for this application 37 | * 38 | * @return version string 39 | */ 40 | public static String getVersionString() { 41 | List properties = readVersionProperties(); 42 | 43 | return getVersionFromProperties(properties); 44 | } 45 | 46 | /** 47 | * Returns the application name as a string 48 | * 49 | * @return app name string 50 | */ 51 | public static String getAppName() { 52 | return DEFAULT_APP_NAME; 53 | } 54 | 55 | /** 56 | * Returns the version string gets from the list of properties. 57 | * If version string does not exist, returns the default version. 58 | * 59 | * @param properties list of git properties 60 | * @return the version string 61 | */ 62 | public static String getVersionFromProperties(List properties) { 63 | String versionStr = DEFAULT_VERSION; 64 | 65 | if (properties == null) { 66 | return versionStr; 67 | } 68 | 69 | for (String item : properties) { 70 | String[] res = item.split("gitversion="); 71 | if (res.length > 1) { 72 | versionStr = res[1].trim(); 73 | log.debug("found git version string={} in version.properties file", versionStr); 74 | break; 75 | } 76 | } 77 | 78 | return versionStr; 79 | } 80 | 81 | /** 82 | * Returns a list of properties by reading version properties file 83 | * 84 | * @return list of properties 85 | */ 86 | public static List readVersionProperties() { 87 | return readResourceFile(VERSION_PROPERTIES_FILE); 88 | } 89 | 90 | /** 91 | * Returns a list of properties by reading given resource file 92 | * Each line in the file is an item of the list 93 | * 94 | * @param resourceFileName name of the resource file 95 | * @return list of properties 96 | */ 97 | public static List readResourceFile(String resourceFileName) { 98 | List properties = new ArrayList<>(); 99 | 100 | try { 101 | InputStream in = VersionUtils.class.getResourceAsStream(resourceFileName); 102 | 103 | // if the resource file can't be found, return an empty list 104 | if (in == null) { 105 | return properties; 106 | } 107 | 108 | BufferedReader reader = new BufferedReader(new InputStreamReader(in)); 109 | String line; 110 | 111 | while ((line = reader.readLine()) != null) { 112 | properties.add(line); 113 | } 114 | 115 | // close the BufferedReader when we're done 116 | reader.close(); 117 | } catch (IOException ex) { 118 | log.error("Failed to read properties file {}.", VERSION_PROPERTIES_FILE, ex); 119 | return properties; 120 | } 121 | 122 | return properties; 123 | } 124 | 125 | } -------------------------------------------------------------------------------- /src/main/resources/version.properties: -------------------------------------------------------------------------------- 1 | githash= 2 | gitbranch=release/2.2.x 3 | gitversion=v2.2.2 4 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/CloseableHttpClientMock.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.apache.http.HttpHost; 19 | import org.apache.http.HttpRequest; 20 | import org.apache.http.client.methods.CloseableHttpResponse; 21 | import org.apache.http.conn.*; 22 | import org.apache.http.impl.client.CloseableHttpClient; 23 | import org.apache.http.params.*; 24 | import org.apache.http.protocol.HttpContext; 25 | 26 | import java.io.IOException; 27 | 28 | @SuppressWarnings( "deprecation") 29 | public class CloseableHttpClientMock extends CloseableHttpClient { 30 | public static final String SUCCESS = "{\"text\":\"Success\",\"code\":0,\"ackId\":2}"; 31 | public static final String SERVER_BUSY = "{\"text\":\"Server busy\",\"code\":1}"; 32 | public static final String NO_DATA_ERROR = "{\"text\":\"No data\",\"code\":5}"; 33 | public static final String INVALID_DATA_FORMAT = "{\"text\":\"Invalid data format\",\"code\":6}"; 34 | public static final String INVALID_TOKEN = "{\"text\":\"Invalid token\",\"code\":4}"; 35 | public static final String INVALID_INDEX = "{\"text\":\"Incorrect index\",\"code\":4,\"invalid-event-number\":1}"; 36 | public static final String EXCEPTION = "excpetion"; 37 | 38 | private String resp = ""; 39 | private boolean throwOnClose = false; 40 | private boolean throwOnGetContent = false; 41 | 42 | protected CloseableHttpResponse doExecute(HttpHost target, HttpRequest request, 43 | HttpContext context) throws IOException { 44 | if (resp == EXCEPTION) { 45 | throw new IOException("mocked up"); 46 | } 47 | 48 | if (resp.equals(SUCCESS)) { 49 | return createResponse(resp, 200); 50 | } else if (resp.equals(SERVER_BUSY)) { 51 | return createResponse(resp, 503); 52 | } else if (resp.equals(NO_DATA_ERROR)) { 53 | return createResponse(resp, 400); 54 | }else if (resp.equals(INVALID_TOKEN)) { 55 | return createResponse(resp, 400); 56 | }else if (resp.equals(INVALID_INDEX)) { 57 | return createResponse(resp, 400); 58 | } else { 59 | return createResponse(SUCCESS, 201); 60 | } 61 | } 62 | 63 | private CloseableHttpResponse createResponse(String content, int statusCode) { 64 | HttpEntityMock entity = new HttpEntityMock(); 65 | entity.setThrowOnGetContent(throwOnGetContent); 66 | entity.setContent(content); 67 | 68 | StatusLineMock status = new StatusLineMock(statusCode); 69 | 70 | CloseableHttpResponseMock resp = new CloseableHttpResponseMock(); 71 | resp.setThrowOnClose(throwOnClose); 72 | resp.setEntity(entity); 73 | resp.setStatusLine(status); 74 | return resp; 75 | } 76 | 77 | public CloseableHttpClientMock setResponse(final String resp) { 78 | this.resp = resp; 79 | return this; 80 | } 81 | 82 | public CloseableHttpClientMock setThrowOnClose(final boolean th) { 83 | this.throwOnClose = th; 84 | return this; 85 | } 86 | 87 | public CloseableHttpClientMock setThrowOnGetContent(final boolean th) { 88 | this.throwOnGetContent = th; 89 | return this; 90 | } 91 | 92 | 93 | @Override 94 | @Deprecated 95 | public ClientConnectionManager getConnectionManager() { 96 | return null; 97 | } 98 | 99 | @Override 100 | @Deprecated 101 | public HttpParams getParams() { 102 | return null; 103 | } 104 | 105 | @Override 106 | public void close() throws IOException { 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/CloseableHttpResponseMock.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.apache.http.*; 19 | import org.apache.http.client.methods.CloseableHttpResponse; 20 | import org.apache.http.impl.client.CloseableHttpClient; 21 | import org.apache.http.params.*; 22 | 23 | import java.io.IOException; 24 | import java.util.Locale; 25 | 26 | @SuppressWarnings("deprecation") 27 | public class CloseableHttpResponseMock implements CloseableHttpResponse { 28 | private StatusLine statusLine; 29 | private HttpEntity entity; 30 | private boolean throwOnClose = false; 31 | 32 | public CloseableHttpResponseMock setThrowOnClose(boolean th) { 33 | throwOnClose = th; 34 | return this; 35 | } 36 | @Override 37 | public void close() throws IOException { 38 | if (throwOnClose) { 39 | throw new IOException("mockup"); 40 | } 41 | } 42 | 43 | @Override 44 | public StatusLine getStatusLine() { 45 | return statusLine; 46 | } 47 | 48 | @Override 49 | public void setStatusLine(final StatusLine statusline) { 50 | this.statusLine = statusline; 51 | } 52 | 53 | @Override 54 | public void setStatusLine(final ProtocolVersion ver, final int code) { 55 | } 56 | 57 | @Override 58 | public void setStatusLine(final ProtocolVersion ver, final int code, final String reason) { 59 | } 60 | 61 | @Override 62 | public void setStatusCode(final int code) throws IllegalStateException { 63 | } 64 | 65 | @Override 66 | public void setReasonPhrase(final String reason) throws IllegalStateException { 67 | } 68 | 69 | @Override 70 | public HttpEntity getEntity() { 71 | return entity; 72 | } 73 | 74 | @Override 75 | public void setEntity(final HttpEntity entity) { 76 | this.entity = entity; 77 | } 78 | 79 | @Override 80 | public Locale getLocale() { 81 | return null; 82 | } 83 | 84 | @Override 85 | public void setLocale(final Locale loc) { 86 | } 87 | 88 | @Override 89 | public ProtocolVersion getProtocolVersion() { 90 | return null; 91 | } 92 | 93 | @Override 94 | public boolean containsHeader(final String name) { 95 | return true; 96 | } 97 | 98 | @Override 99 | public Header[] getHeaders(final String name) { 100 | return null; 101 | } 102 | 103 | @Override 104 | public Header getFirstHeader(final String name) { 105 | return null; 106 | } 107 | 108 | @Override 109 | public Header getLastHeader(final String name) { 110 | return null; 111 | } 112 | 113 | @Override 114 | public Header[] getAllHeaders() { 115 | return null; 116 | } 117 | 118 | @Override 119 | public void addHeader(final Header header) { 120 | } 121 | 122 | @Override 123 | public void addHeader(final String name, final String value) { 124 | } 125 | 126 | @Override 127 | public void setHeader(final Header header) { 128 | } 129 | 130 | @Override 131 | public void setHeader(final String name, final String value) { 132 | } 133 | 134 | @Override 135 | public void setHeaders(final Header[] headers) { 136 | } 137 | 138 | @Override 139 | public void removeHeader(final Header header) { 140 | } 141 | 142 | @Override 143 | public void removeHeaders(final String name) { 144 | } 145 | 146 | @Override 147 | public HeaderIterator headerIterator() { 148 | return null; 149 | } 150 | 151 | @Override 152 | public HeaderIterator headerIterator(final String name) { 153 | return null; 154 | } 155 | 156 | @Override 157 | @Deprecated 158 | public HttpParams getParams() { 159 | return null; 160 | } 161 | 162 | @Override 163 | @Deprecated 164 | public void setParams(final HttpParams params) { 165 | } 166 | } 167 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/ConcurrentHecTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.junit.Assert; 19 | import org.junit.Test; 20 | import org.slf4j.Logger; 21 | import org.slf4j.LoggerFactory; 22 | 23 | public class ConcurrentHecTest { 24 | @Test 25 | public void create() { 26 | HecConfig config = UnitUtil.createHecConfig(); 27 | HecInf hec = new ConcurrentHec(1, true, config, null); 28 | Assert.assertNotNull(hec); 29 | hec.close(); 30 | 31 | hec = new ConcurrentHec(1, false, config, null, new LoadBalancerMock()); 32 | Assert.assertNotNull(hec); 33 | hec.close(); 34 | hec.close(); 35 | } 36 | 37 | @Test 38 | public void send() { 39 | HecConfig config = UnitUtil.createHecConfig(); 40 | LoadBalancerMock lb = new LoadBalancerMock(); 41 | HecInf hec = new ConcurrentHec(1, true, config,null, lb); 42 | hec.send(UnitUtil.createBatch()); 43 | UnitUtil.milliSleep(20); 44 | Assert.assertEquals(1, lb.getBatches().size()); 45 | Assert.assertEquals(1, lb.getBatches().get(0).getEvents().size()); 46 | Assert.assertEquals("ni", lb.getBatches().get(0).getEvents().get(0).getEvent()); 47 | hec.close(); 48 | hec.close(); 49 | } 50 | 51 | @Test 52 | public void sendWithFailure() { 53 | HecConfig config = UnitUtil.createHecConfig(); 54 | LoadBalancerMock lb = new LoadBalancerMock(); 55 | PollerCallbackMock poller = new PollerCallbackMock(); 56 | lb.setThrowOnSend(true); 57 | HecInf hec = new ConcurrentHec(1, true, config, poller, lb); 58 | hec.send(UnitUtil.createBatch()); 59 | UnitUtil.milliSleep(20); 60 | Assert.assertEquals(0, lb.getBatches().size()); 61 | Assert.assertEquals(1, poller.getFailed().size()); 62 | Assert.assertTrue(poller.getFailed().get(0).isFailed()); 63 | Assert.assertEquals(1, poller.getFailed().get(0).getFailureCount()); 64 | hec.close(); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/DoubleSerializerTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import com.fasterxml.jackson.core.JsonProcessingException; 19 | import com.fasterxml.jackson.databind.ObjectMapper; 20 | import com.fasterxml.jackson.databind.annotation.JsonSerialize; 21 | import org.junit.Assert; 22 | import org.junit.Test; 23 | 24 | public class DoubleSerializerTest { 25 | @JsonSerialize(using = DoubleSerializer.class) 26 | private Double d; 27 | 28 | void setD(Double d) { 29 | this.d = d; 30 | } 31 | 32 | @Test 33 | public void serialize() throws JsonProcessingException { 34 | ObjectMapper mapper = new ObjectMapper(); 35 | byte[] bytes = mapper.writeValueAsBytes(this); 36 | Assert.assertEquals("{\"d\":null}", new String(bytes)); 37 | 38 | d = 10000.0; 39 | bytes = mapper.writeValueAsBytes(this); 40 | Assert.assertEquals("{\"d\":10000.000000}", new String(bytes)); 41 | 42 | d = 10000.123456; 43 | bytes = mapper.writeValueAsBytes(this); 44 | Assert.assertEquals("{\"d\":10000.123456}", new String(bytes)); 45 | 46 | d = 10000.123456789; 47 | bytes = mapper.writeValueAsBytes(this); 48 | Assert.assertEquals("{\"d\":10000.123457}", new String(bytes)); 49 | 50 | d = 10000.123456189; 51 | bytes = mapper.writeValueAsBytes(this); 52 | Assert.assertEquals("{\"d\":10000.123456}", new String(bytes)); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/HecAckPollResponseTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import com.fasterxml.jackson.databind.ObjectMapper; 19 | import org.junit.Assert; 20 | import org.junit.Test; 21 | 22 | import java.io.IOException; 23 | import java.util.Collection; 24 | import java.util.Map; 25 | 26 | public class HecAckPollResponseTest { 27 | private static final ObjectMapper jsonMapper = new ObjectMapper(); 28 | 29 | @Test 30 | public void getSuccessIds() { 31 | HecAckPollResponse resp = getResposne(); 32 | Collection succeed = resp.getSuccessIds(); 33 | Assert.assertEquals(3, succeed.size()); 34 | for (int i = 1; i < 4; i++) { 35 | Assert.assertTrue(succeed.contains(new Long(i))); 36 | } 37 | } 38 | 39 | @Test 40 | public void getAcks() { 41 | HecAckPollResponse resp = getResposne(); 42 | Map acks = resp.getAcks(); 43 | Assert.assertEquals(6, acks.size()); 44 | for (int i = 1; i < 4; i++) { 45 | Assert.assertEquals(acks.get(String.valueOf(i)), true); 46 | } 47 | 48 | for (int i = 4; i < 7; i++) { 49 | Assert.assertEquals(acks.get(String.valueOf(i)), false); 50 | } 51 | } 52 | 53 | private HecAckPollResponse getResposne() { 54 | String acks = "{\"acks\":{\"1\":true,\"2\":true,\"3\":true,\"4\":false,\"5\":false,\"6\":false}}"; 55 | try { 56 | return jsonMapper.readValue(acks, HecAckPollResponse.class); 57 | } catch (IOException ex) { 58 | Assert.assertTrue("failed to deserialize from acks", false); 59 | throw new HecException("failed to deserialize from acks", ex); 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/HecChannelTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.apache.http.client.methods.HttpPost; 19 | import org.apache.http.client.methods.HttpUriRequest; 20 | import org.junit.Assert; 21 | import org.junit.Test; 22 | 23 | import java.util.List; 24 | 25 | public class HecChannelTest { 26 | @Test 27 | public void getterSetter() { 28 | IndexerInf indexer = new IndexerMock(); 29 | HecChannel ch = new HecChannel(indexer); 30 | IndexerInf idx = ch.getIndexer(); 31 | Assert.assertTrue(idx == indexer); 32 | 33 | String id = ch.getId(); 34 | Assert.assertNotNull(id); 35 | Assert.assertFalse(id.isEmpty()); 36 | 37 | Assert.assertEquals(id, ch.toString()); 38 | Assert.assertNotNull(ch.hashCode()); 39 | 40 | Assert.assertFalse(ch.isNotAvailable()); 41 | ch.setAvailable(true); 42 | Assert.assertFalse(ch.isNotAvailable()); 43 | ch.setAvailable(false); 44 | Assert.assertTrue(ch.isNotAvailable()); 45 | 46 | ch.setId(); 47 | String newId = ch.getId(); 48 | Assert.assertNotNull(newId); 49 | Assert.assertFalse(newId.isEmpty()); 50 | Assert.assertNotEquals(id, newId); 51 | } 52 | 53 | @Test 54 | public void setTracking() { 55 | IndexerInf indexer = new IndexerMock(); 56 | HecChannel ch = new HecChannel(indexer); 57 | 58 | // enable channel tracking 59 | ch.setTracking(true); 60 | 61 | // we do it again to cover more branch 62 | ch.setTracking(true); 63 | EventBatch batch = new JsonEventBatch(); 64 | Event event = new JsonEvent("ni", "hao"); 65 | batch.add(event); 66 | ch.send(batch); 67 | Assert.assertEquals(ch.getId(), event.getFields().get("hec-channel")); 68 | 69 | // disable channel tracking 70 | ch.setTracking(false); 71 | // we do it again to cover more branch 72 | ch.setTracking(false); 73 | batch = new JsonEventBatch(); 74 | event = new JsonEvent("ni", "hao"); 75 | batch.add(event); 76 | ch.send(batch); 77 | Assert.assertNull(event.getFields()); 78 | } 79 | 80 | @Test 81 | public void send() { 82 | IndexerMock indexer = new IndexerMock(); 83 | HecChannel ch = new HecChannel(indexer); 84 | 85 | // enable channel tracking 86 | EventBatch batch = new JsonEventBatch(); 87 | Event event = new JsonEvent("ni", "hao"); 88 | batch.add(event); 89 | ch.send(batch); 90 | 91 | List batches = indexer.getBatches(); 92 | Assert.assertEquals(1, batches.size()); 93 | Assert.assertEquals(1, batches.get(0).getEvents().size()); 94 | Assert.assertEquals("ni", batches.get(0).getEvents().get(0).getEvent()); 95 | Assert.assertEquals("hao", batches.get(0).getEvents().get(0).getTied()); 96 | } 97 | 98 | @Test 99 | public void executeHttpRequest() { 100 | HttpUriRequest req = new HttpPost(); 101 | IndexerMock indexer = new IndexerMock(); 102 | HecChannel ch = new HecChannel(indexer); 103 | String res = ch.executeHttpRequest(req); 104 | Assert.assertEquals(null, res); 105 | List reqs = indexer.getRequests(); 106 | Assert.assertEquals(1, reqs.size()); 107 | Assert.assertEquals(req, reqs.get(0)); 108 | } 109 | 110 | @Test 111 | public void eq() { 112 | HecChannel lhsCh = new HecChannel(null); 113 | HecChannel rhsCh = new HecChannel(null); 114 | Assert.assertFalse(lhsCh.equals(rhsCh)); 115 | 116 | Object copy = lhsCh; 117 | Assert.assertTrue(lhsCh.equals(copy)); 118 | 119 | Assert.assertFalse(lhsCh.equals(null)); 120 | Assert.assertFalse(lhsCh.equals(lhsCh.getId())); 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/HecConfigTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.junit.Assert; 19 | import org.junit.Test; 20 | 21 | import java.util.Arrays; 22 | import java.util.List; 23 | 24 | public class HecConfigTest { 25 | @Test 26 | public void getterSetter() { 27 | String uri = "https://dummy:8088"; 28 | String token = "mytoken"; 29 | HecConfig config = new HecConfig(Arrays.asList(uri), token); 30 | 31 | List uris = config.getUris(); 32 | Assert.assertEquals(1, uris.size()); 33 | Assert.assertEquals(uri, uris.get(0)); 34 | Assert.assertEquals(token, config.getToken()); 35 | 36 | config.setAckPollInterval(1) 37 | .setDisableSSLCertVerification(true) 38 | .setHttpKeepAlive(false) 39 | .setSocketSendBufferSize(2) 40 | .setSocketTimeout(3) 41 | .setMaxHttpConnectionPerChannel(4) 42 | .setTotalChannels(5) 43 | .setAckPollThreads(6) 44 | .setEnableChannelTracking(true) 45 | .setEventBatchTimeout(7) 46 | .setTrustStorePath("test") 47 | .setTrustStoreType("PKCS12") 48 | .setTrustStorePassword("pass") 49 | .setHasCustomTrustStore(true) 50 | .setBackoffThresholdSeconds(10) 51 | .setlbPollInterval(120); 52 | 53 | Assert.assertTrue(config.getDisableSSLCertVerification()); 54 | Assert.assertTrue(config.getEnableChannelTracking()); 55 | Assert.assertFalse(config.getHttpKeepAlive()); 56 | Assert.assertEquals(1, config.getAckPollInterval()); 57 | Assert.assertEquals(2, config.getSocketSendBufferSize()); 58 | Assert.assertEquals(3, config.getSocketTimeout()); 59 | Assert.assertEquals(4, config.getMaxHttpConnectionPerChannel()); 60 | Assert.assertEquals(5, config.getTotalChannels()); 61 | Assert.assertEquals(6, config.getAckPollThreads()); 62 | Assert.assertEquals(7, config.getEventBatchTimeout()); 63 | Assert.assertEquals("test", config.getTrustStorePath()); 64 | Assert.assertEquals("PKCS12", config.getTrustStoreType()); 65 | Assert.assertEquals("pass", config.getTrustStorePassword()); 66 | Assert.assertEquals(10000, config.getBackoffThresholdSeconds()); 67 | Assert.assertEquals(120000, config.getlbPollInterval()); 68 | Assert.assertTrue(config.getHasCustomTrustStore()); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/HecTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.junit.Assert; 19 | import org.junit.Test; 20 | 21 | public class HecTest { 22 | @Test 23 | public void create() { 24 | HecConfig config = UnitUtil.createHecConfig(); 25 | Hec hec = Hec.newHecWithAck(config, null); 26 | Assert.assertNotNull(hec); 27 | 28 | hec = Hec.newHecWithAck(config, Hec.createHttpClient(config), null); 29 | Assert.assertNotNull(hec); 30 | 31 | hec = Hec.newHecWithAck(config, null, new LoadBalancerMock()); 32 | Assert.assertNotNull(hec); 33 | 34 | hec = Hec.newHecWithoutAck(config, null); 35 | Assert.assertNotNull(hec); 36 | 37 | hec = Hec.newHecWithoutAck(config, Hec.createHttpClient(config), null); 38 | Assert.assertNotNull(hec); 39 | 40 | hec = Hec.newHecWithoutAck(config, null, new LoadBalancerMock()); 41 | Assert.assertNotNull(hec); 42 | } 43 | 44 | @Test 45 | public void sendEmptyBatch() { 46 | LoadBalancerMock lb = new LoadBalancerMock(); 47 | HecConfig config = UnitUtil.createHecConfig(); 48 | Poller pm = new PollerMock(); 49 | Hec hec = new Hec(config, Hec.createHttpClient(config), pm, lb); 50 | hec.send(new JsonEventBatch()); 51 | Assert.assertEquals(0, lb.getBatches().size()); 52 | } 53 | 54 | @Test 55 | public void send() { 56 | LoadBalancerMock lb = new LoadBalancerMock(); 57 | HecConfig config = UnitUtil.createHecConfig(); 58 | Poller pm = new PollerMock(); 59 | Hec hec = new Hec(config, Hec.createHttpClient(config), pm, lb); 60 | hec.send(UnitUtil.createBatch()); 61 | Assert.assertEquals(1, lb.getBatches().size()); 62 | } 63 | 64 | @Test 65 | public void close() { 66 | LoadBalancerMock lb = new LoadBalancerMock(); 67 | HecConfig config = UnitUtil.createHecConfig(); 68 | PollerMock pm = new PollerMock(); 69 | Hec hec = new Hec(config, Hec.createHttpClient(config), pm, lb); 70 | Assert.assertTrue(pm.isStarted()); 71 | 72 | hec.close(); 73 | Assert.assertFalse(pm.isStarted()); 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/HecURIBuilderTest.java: -------------------------------------------------------------------------------- 1 | package com.splunk.hecclient; 2 | 3 | import org.junit.Assert; 4 | import org.junit.Test; 5 | 6 | import java.net.URI; 7 | import java.util.Collections; 8 | 9 | import static com.splunk.hecclient.JsonEventBatch.ENDPOINT; 10 | 11 | public class HecURIBuilderTest { 12 | private static final String RAW_ENDPOINT = "/services/collector/raw?index=main&source=source"; 13 | private static final String BASE_URL = "https://localhost:8088"; 14 | private static final String TOKEN = "mytoken"; 15 | 16 | @Test 17 | public void testDefaultValues() { 18 | { 19 | HecConfig hecConfig = new HecConfig(Collections.emptyList(), TOKEN); 20 | HecURIBuilder builder = new HecURIBuilder(BASE_URL, hecConfig); 21 | 22 | URI uri = builder.getURI(ENDPOINT); 23 | 24 | Assert.assertEquals("https://localhost:8088/services/collector/event", uri.toString()); 25 | } 26 | { 27 | HecConfig hecConfig = new HecConfig(Collections.emptyList(), TOKEN); 28 | HecURIBuilder builder = new HecURIBuilder(BASE_URL, hecConfig); 29 | 30 | URI uri = builder.getURI(RAW_ENDPOINT); 31 | 32 | Assert.assertEquals("https://localhost:8088/services/collector/raw?index=main&source=source", uri.toString()); 33 | } 34 | } 35 | 36 | @Test 37 | public void testAutoExtractTimestamp() { 38 | { 39 | HecConfig hecConfig = new HecConfig(Collections.emptyList(), TOKEN) 40 | .setAutoExtractTimestamp(true); 41 | HecURIBuilder builder = new HecURIBuilder(BASE_URL, hecConfig); 42 | 43 | URI uri = builder.getURI(ENDPOINT); 44 | 45 | Assert.assertEquals("https://localhost:8088/services/collector/event?" + 46 | HecURIBuilder.AUTO_EXTRACT_TIMESTAMP_PARAMETER + "=true", 47 | uri.toString()); 48 | } 49 | { 50 | HecConfig hecConfig = new HecConfig(Collections.emptyList(), TOKEN) 51 | .setAutoExtractTimestamp(false); 52 | HecURIBuilder builder = new HecURIBuilder(BASE_URL, hecConfig); 53 | 54 | URI uri = builder.getURI(ENDPOINT); 55 | 56 | Assert.assertEquals("https://localhost:8088/services/collector/event?" + 57 | HecURIBuilder.AUTO_EXTRACT_TIMESTAMP_PARAMETER + "=false", 58 | uri.toString()); 59 | } 60 | { 61 | HecConfig hecConfig = new HecConfig(Collections.emptyList(), TOKEN) 62 | .setAutoExtractTimestamp(false); 63 | HecURIBuilder builder = new HecURIBuilder(BASE_URL, hecConfig); 64 | 65 | URI uri = builder.getURI(RAW_ENDPOINT); 66 | 67 | Assert.assertEquals("https://localhost:8088/services/collector/raw?index=main&source=source&" + 68 | HecURIBuilder.AUTO_EXTRACT_TIMESTAMP_PARAMETER + "=false", 69 | uri.toString()); 70 | } 71 | { 72 | HecConfig hecConfig = new HecConfig(Collections.emptyList(), TOKEN) 73 | .setAutoExtractTimestamp(true); 74 | HecURIBuilder builder = new HecURIBuilder(BASE_URL, hecConfig); 75 | 76 | URI uri = builder.getURI(RAW_ENDPOINT); 77 | 78 | Assert.assertEquals("https://localhost:8088/services/collector/raw?index=main&source=source&" + 79 | HecURIBuilder.AUTO_EXTRACT_TIMESTAMP_PARAMETER + "=true", 80 | uri.toString()); 81 | } 82 | } 83 | } -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/HttpClientBuilderTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.apache.http.impl.client.CloseableHttpClient; 19 | import org.junit.Assert; 20 | import org.junit.Test; 21 | 22 | 23 | public class HttpClientBuilderTest { 24 | @Test 25 | public void buildUnsecure() { 26 | HttpClientBuilder builder = new HttpClientBuilder(); 27 | CloseableHttpClient client = builder.setMaxConnectionPoolSizePerDestination(1) 28 | .setMaxConnectionPoolSize(2) 29 | .setSocketSendBufferSize(1024) 30 | .setSocketTimeout(120) 31 | .setDisableSSLCertVerification(true) 32 | .build(); 33 | Assert.assertNotNull(client); 34 | } 35 | 36 | @Test 37 | public void buildSecureDefault() { 38 | HttpClientBuilder builder = new HttpClientBuilder(); 39 | CloseableHttpClient client = builder.setMaxConnectionPoolSizePerDestination(1) 40 | .setMaxConnectionPoolSize(2) 41 | .setSocketSendBufferSize(1024) 42 | .setSocketTimeout(120) 43 | .setDisableSSLCertVerification(false) 44 | .build(); 45 | Assert.assertNotNull(client); 46 | } 47 | @Test 48 | public void buildSecureCustomKeystore() { 49 | HttpClientBuilder builder = new HttpClientBuilder(); 50 | CloseableHttpClient client = builder.setMaxConnectionPoolSizePerDestination(1) 51 | .setMaxConnectionPoolSize(2) 52 | .setSocketSendBufferSize(1024) 53 | .setSocketTimeout(120) 54 | .setDisableSSLCertVerification(false) 55 | .setSslContext(Hec.loadCustomSSLContext("./src/test/resources/keystoretest.jks", "JKS", "Notchangeme")) 56 | .build(); 57 | Assert.assertNotNull(client); 58 | } 59 | @Test 60 | public void buildSecureCustomKeystorePkcs12() { 61 | HttpClientBuilder builder = new HttpClientBuilder(); 62 | CloseableHttpClient client = builder.setMaxConnectionPoolSizePerDestination(1) 63 | .setMaxConnectionPoolSize(2) 64 | .setSocketSendBufferSize(1024) 65 | .setSocketTimeout(120) 66 | .setDisableSSLCertVerification(false) 67 | .setSslContext(Hec.loadCustomSSLContext("./src/test/resources/keystoretest.p12", "PKCS12", "Notchangeme")) 68 | .build(); 69 | Assert.assertNotNull(client); 70 | } 71 | 72 | @Test 73 | public void buildDefault() { 74 | HttpClientBuilder builder = new HttpClientBuilder(); 75 | CloseableHttpClient client = builder.build(); 76 | Assert.assertNotNull(client); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/HttpEntityMock.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.apache.http.HttpEntity; 19 | import org.apache.http.entity.AbstractHttpEntity; 20 | import java.io.*; 21 | 22 | public class HttpEntityMock extends AbstractHttpEntity { 23 | private String content = ""; 24 | private boolean throwOnGet = false; 25 | 26 | public HttpEntityMock setContent(final String content) { 27 | this.content = content; 28 | return this; 29 | } 30 | 31 | public HttpEntityMock setThrowOnGetContent(boolean th) { 32 | throwOnGet = th; 33 | return this; 34 | } 35 | 36 | @Override 37 | public boolean isRepeatable() { 38 | return true; 39 | } 40 | 41 | @Override 42 | public long getContentLength() { 43 | return content.length(); 44 | } 45 | 46 | @Override 47 | public boolean isStreaming() { 48 | return false; 49 | } 50 | 51 | @Override 52 | public InputStream getContent() throws IOException, UnsupportedOperationException { 53 | if (throwOnGet) { 54 | throw new IOException("mocked up"); 55 | } 56 | return new ByteArrayInputStream(content.getBytes()); 57 | } 58 | 59 | @Override 60 | public void writeTo(OutputStream outstream) throws IOException { 61 | outstream.write(content.getBytes()); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/IndexerMock.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.apache.http.Header; 19 | import org.apache.http.client.methods.HttpUriRequest; 20 | 21 | import java.util.ArrayList; 22 | import java.util.List; 23 | 24 | public final class IndexerMock implements IndexerInf { 25 | private List batches = new ArrayList<>(); 26 | private List requests = new ArrayList<>(); 27 | private String response; 28 | private boolean backPressure = false; 29 | 30 | @Override 31 | public boolean send(final EventBatch batch) { 32 | batches.add(batch); 33 | return true; 34 | } 35 | 36 | @Override 37 | public String executeHttpRequest(final HttpUriRequest req) { 38 | requests.add(req); 39 | return response; 40 | } 41 | 42 | @Override 43 | public String getBaseUrl() { 44 | return ""; 45 | } 46 | 47 | @Override 48 | public Header[] getHeaders() { 49 | return null; 50 | } 51 | 52 | @Override 53 | public boolean hasBackPressure() { 54 | return backPressure; 55 | } 56 | 57 | public IndexerMock setBackPressure(boolean backPressure) { 58 | this.backPressure = backPressure; 59 | return this; 60 | } 61 | 62 | public List getBatches() { 63 | return batches; 64 | } 65 | 66 | public List getRequests() { 67 | return requests; 68 | } 69 | 70 | public IndexerMock setResponse(String response) { 71 | this.response = response; 72 | return this; 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/LoadBalancerMock.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.apache.http.impl.client.CloseableHttpClient; 19 | 20 | import java.util.ArrayList; 21 | import java.util.List; 22 | 23 | public class LoadBalancerMock implements LoadBalancerInf { 24 | private List batches = new ArrayList<>(); 25 | private boolean throwOnSend = false; 26 | 27 | @Override 28 | public void add(String indexerUrl, HecChannel channel) { 29 | } 30 | 31 | public void remove(HecChannel channel) { 32 | } 33 | 34 | public void send(final EventBatch batch) { 35 | if (throwOnSend) { 36 | throw new HecException("mocked up"); 37 | } 38 | batches.add(batch); 39 | } 40 | 41 | public LoadBalancerMock setThrowOnSend(boolean throwOnSend) { 42 | this.throwOnSend = throwOnSend; 43 | return this; 44 | } 45 | 46 | public int size() { 47 | return 0; 48 | } 49 | 50 | @Override 51 | public void setHttpClient(CloseableHttpClient httpClient) { 52 | } 53 | 54 | @Override 55 | public void close() { 56 | } 57 | 58 | public List getBatches() { 59 | return batches; 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/PollerCallbackMock.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import java.util.ArrayList; 19 | import java.util.List; 20 | import java.util.concurrent.ConcurrentLinkedQueue; 21 | 22 | public class PollerCallbackMock implements PollerCallback { 23 | private ConcurrentLinkedQueue failed = new ConcurrentLinkedQueue<>(); 24 | private ConcurrentLinkedQueue committed = new ConcurrentLinkedQueue<>(); 25 | 26 | public void onEventFailure(final List failure, Exception ex) { 27 | failed.addAll(failure); 28 | } 29 | 30 | public void onEventCommitted(final List commit) { 31 | committed.addAll(commit); 32 | } 33 | 34 | public List getFailed() { 35 | List results = new ArrayList<>(); 36 | results.addAll(failed); 37 | return results; 38 | } 39 | 40 | public List getCommitted() { 41 | List results = new ArrayList<>(); 42 | results.addAll(committed); 43 | return results; 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/PollerMock.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import java.util.ArrayList; 19 | import java.util.HashMap; 20 | import java.util.List; 21 | import java.util.Map; 22 | 23 | public class PollerMock implements Poller { 24 | private boolean started; 25 | private HecChannel channel; 26 | private EventBatch batch; 27 | private EventBatch failedBatch; 28 | private String response; 29 | private Exception exception; 30 | 31 | @Override 32 | public void start() { 33 | started = true; 34 | } 35 | 36 | @Override 37 | public void stop() { 38 | started = false; 39 | } 40 | 41 | @Override 42 | public void fail(HecChannel channel, EventBatch batch, Exception ex) { 43 | this.channel = channel; 44 | this.failedBatch = batch; 45 | this.exception = ex; 46 | } 47 | 48 | @Override 49 | public long getTotalOutstandingEventBatches() { 50 | return 0; 51 | } 52 | 53 | @Override 54 | public HecChannel getMinLoadChannel() { 55 | return null; 56 | } 57 | 58 | @Override 59 | public void add(HecChannel channel, EventBatch batch, String resp) { 60 | this.channel = channel; 61 | this.batch = batch; 62 | this.response = resp; 63 | } 64 | 65 | @Override 66 | public void stickySessionHandler(HecChannel channel) { 67 | // Not required for mock 68 | } 69 | 70 | public boolean isStarted() { 71 | return started; 72 | } 73 | 74 | public HecChannel getChannel() { 75 | return channel; 76 | } 77 | 78 | public EventBatch getBatch() { 79 | return batch; 80 | } 81 | 82 | public EventBatch getFailedBatch() { 83 | return failedBatch; 84 | } 85 | 86 | public Exception getException() { 87 | return exception; 88 | } 89 | 90 | public String getResponse() { 91 | return response; 92 | } 93 | public void setStickySessionToTrue() { 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/PostResponseTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import com.fasterxml.jackson.databind.ObjectMapper; 19 | import org.junit.Assert; 20 | import org.junit.Test; 21 | 22 | import java.io.IOException; 23 | 24 | public class PostResponseTest { 25 | private static final ObjectMapper jsonMapper = new ObjectMapper(); 26 | 27 | @Test 28 | public void isSucceed() { 29 | PostResponse resp = getResponse(true); 30 | Assert.assertTrue(resp.isSucceed()); 31 | 32 | resp = getResponse(false); 33 | Assert.assertFalse(resp.isSucceed()); 34 | } 35 | 36 | @Test 37 | public void getText() { 38 | PostResponse resp = getResponse(true); 39 | Assert.assertEquals("Success", resp.getText()); 40 | } 41 | 42 | @Test 43 | public void getAckId() { 44 | PostResponse resp = getResponse(true); 45 | Assert.assertEquals(7, resp.getAckId()); 46 | } 47 | 48 | @Test 49 | public void getterSetter() { 50 | PostResponse resp = new PostResponse(); 51 | resp.setCode(0); 52 | Assert.assertTrue(resp.isSucceed()); 53 | 54 | resp.setText("Failed"); 55 | Assert.assertEquals("Failed", resp.getText()); 56 | 57 | resp.setAckId(100); 58 | Assert.assertEquals(100, resp.getAckId()); 59 | } 60 | 61 | private PostResponse getResponse(boolean success) { 62 | String resp; 63 | if (success) { 64 | resp = "{\"text\":\"Success\",\"code\":0,\"ackId\":7}"; 65 | } else { 66 | resp = "{\"text\":\"Failed\",\"code\":-10}"; 67 | } 68 | 69 | try { 70 | return jsonMapper.readValue(resp, PostResponse.class); 71 | } catch (IOException ex) { 72 | Assert.assertTrue("failed to deserialize from acks", false); 73 | throw new HecException("failed to deserialize from acks", ex); 74 | } 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/RawEventBatchTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.apache.http.HttpEntity; 19 | import org.junit.Assert; 20 | import org.junit.Test; 21 | 22 | import java.io.ByteArrayInputStream; 23 | import java.io.ByteArrayOutputStream; 24 | import java.io.IOException; 25 | import java.util.List; 26 | import java.util.zip.GZIPInputStream; 27 | 28 | public class RawEventBatchTest { 29 | @Test 30 | public void add() { 31 | Event event = new RawEvent("ni", "hao"); 32 | EventBatch batch = RawEventBatch.factory().build(); 33 | batch.add(event); 34 | List events = batch.getEvents(); 35 | Assert.assertEquals(events.size(), 1); 36 | Event eventGot = events.get(0); 37 | Assert.assertEquals(event.getEvent(), eventGot.getEvent()); 38 | Assert.assertEquals(event.getTied(), eventGot.getTied()); 39 | } 40 | 41 | @Test(expected = HecException.class) 42 | public void addWithFailure() { 43 | Event event = new JsonEvent("ni", "hao"); 44 | EventBatch batch = RawEventBatch.factory().build(); 45 | batch.add(event); 46 | } 47 | 48 | @Test 49 | public void getRestEndpoint() { 50 | // Without metadata 51 | EventBatch batch = RawEventBatch.factory().build(); 52 | Assert.assertEquals(batch.getRestEndpoint(), RawEventBatch.ENDPOINT); 53 | 54 | // With all metadata 55 | EventBatch rawBatch = RawEventBatch.factory() 56 | .setHost("localhost") 57 | .setSource("source") 58 | .setSourcetype("sourcetype") 59 | .setTime(1000) 60 | .setIndex("index") 61 | .build(); 62 | String endpoint = rawBatch.getRestEndpoint(); 63 | Assert.assertTrue(endpoint.contains("index=index")); 64 | Assert.assertTrue(endpoint.contains("host=localhost")); 65 | Assert.assertTrue(endpoint.contains("source=source")); 66 | Assert.assertTrue(endpoint.contains("sourcetype=sourcetype")); 67 | Assert.assertTrue(endpoint.contains("time=1000")); 68 | 69 | // With partial metadata 70 | EventBatch rawBatchPartial = RawEventBatch.factory() 71 | .setHost("localhost") 72 | .setIndex("index") 73 | .setSource("") 74 | .build(); 75 | endpoint = rawBatchPartial.getRestEndpoint(); 76 | Assert.assertTrue(endpoint.contains("index=index")); 77 | Assert.assertTrue(endpoint.contains("host=localhost")); 78 | Assert.assertFalse(endpoint.contains("source=")); 79 | Assert.assertFalse(endpoint.contains("sourcetype=")); 80 | Assert.assertFalse(endpoint.contains("time=")); 81 | } 82 | 83 | @Test 84 | public void getContentType() { 85 | EventBatch batch = RawEventBatch.factory().build(); 86 | Assert.assertEquals(batch.getContentType(), RawEventBatch.CONTENT_TYPE); 87 | } 88 | 89 | @Test 90 | public void createFromThis() { 91 | EventBatch batch = RawEventBatch.factory().build(); 92 | EventBatch rawBatch = batch.createFromThis(); 93 | Assert.assertNotNull(rawBatch); 94 | Assert.assertTrue(rawBatch instanceof RawEventBatch); 95 | } 96 | 97 | @Test 98 | public void getter() { 99 | RawEventBatch batch = RawEventBatch.factory() 100 | .setSource("source") 101 | .setIndex("index") 102 | .setSourcetype("sourcetype") 103 | .setHost("host") 104 | .setTime(1) 105 | .build(); 106 | Assert.assertEquals("source", batch.getSource()); 107 | Assert.assertEquals("sourcetype", batch.getSourcetype()); 108 | Assert.assertEquals("index", batch.getIndex()); 109 | Assert.assertEquals(1, batch.getTime()); 110 | } 111 | 112 | @Test 113 | public void checkEquals() { 114 | RawEventBatch batchOne = RawEventBatch.factory() 115 | .setSource("source3") 116 | .setIndex("idx1") 117 | .setSourcetype("sourcetype2") 118 | .setHost("host4") 119 | .build(); 120 | 121 | RawEventBatch batchTwo = RawEventBatch.factory() 122 | .setSource("source") 123 | .setIndex("idx") 124 | .setSourcetype("1sourcetype2") 125 | .setHost("3host4") 126 | .build(); 127 | 128 | Assert.assertFalse(batchOne.equals(batchTwo)); 129 | } 130 | 131 | @Test 132 | public void testGZIPCompressionForRaw() { 133 | EventBatch batch = RawEventBatch.factory().build(); 134 | batch.setEnableCompression(true); 135 | Assert.assertTrue(batch.isEnableCompression()); 136 | Event event = new RawEvent("hello world! hello world! hello world!", null); 137 | batch.add(event); 138 | HttpEntity entity = batch.getHttpEntityTemplate(); 139 | byte[] data = new byte[1024]; 140 | try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { 141 | entity.writeTo(out); 142 | String expected = "hello world! hello world! hello world!"; 143 | ByteArrayInputStream bis = new ByteArrayInputStream(out.toByteArray()); 144 | GZIPInputStream gis = new GZIPInputStream(bis); 145 | int read = gis.read(data, 0, data.length); 146 | gis.close(); 147 | bis.close(); 148 | 149 | // Decode the bytes into a String 150 | String ori = new String(data, 0, read, "UTF-8"); 151 | Assert.assertEquals(expected, ori); 152 | } catch (IOException ex) { 153 | Assert.assertTrue("failed to compress and decompress the data", false); 154 | throw new HecException("failed to compress and decompress the data", ex); 155 | } 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/ResponsePollerTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.junit.Assert; 19 | import org.junit.Test; 20 | 21 | public class ResponsePollerTest { 22 | @Test 23 | public void start() { 24 | ResponsePoller poller = new ResponsePoller(null); 25 | poller.start(); 26 | } 27 | 28 | @Test 29 | public void stop() { 30 | ResponsePoller poller = new ResponsePoller(null); 31 | poller.stop(); 32 | } 33 | 34 | @Test 35 | public void fail() { 36 | PollerCallbackMock cb = new PollerCallbackMock(); 37 | ResponsePoller poller = new ResponsePoller(cb); 38 | EventBatch batch = UnitUtil.createBatch(); 39 | poller.fail(null, batch, null); 40 | Assert.assertTrue(batch.isFailed()); 41 | Assert.assertEquals(1, cb.getFailed().size()); 42 | } 43 | 44 | @Test 45 | public void getTotalOutstandingEventBatches() { 46 | ResponsePoller poller = new ResponsePoller(null); 47 | Assert.assertEquals(0, poller.getTotalOutstandingEventBatches()); 48 | } 49 | 50 | @Test 51 | public void getMinLoadChannel() { 52 | ResponsePoller poller = new ResponsePoller(null); 53 | Assert.assertNull(poller.getMinLoadChannel()); 54 | } 55 | 56 | @Test 57 | public void addFailedBatch() { 58 | PollerCallbackMock cb = new PollerCallbackMock(); 59 | ResponsePoller poller = new ResponsePoller(cb); 60 | EventBatch batch = UnitUtil.createBatch(); 61 | String resp = "{\"text\":\"Failed\",\"code\":-10}"; 62 | poller.add(null, batch, resp); 63 | Assert.assertTrue(batch.isFailed()); 64 | Assert.assertEquals(1, cb.getFailed().size()); 65 | 66 | // without callback 67 | poller = new ResponsePoller(null); 68 | batch = UnitUtil.createBatch(); 69 | resp = "{\"text\":\"Failed\",\"code\":-10}"; 70 | poller.add(null, batch, resp); 71 | Assert.assertTrue(batch.isFailed()); 72 | } 73 | 74 | @Test 75 | public void addSuccessfulBatch() { 76 | PollerCallbackMock cb = new PollerCallbackMock(); 77 | ResponsePoller poller = new ResponsePoller(cb); 78 | 79 | EventBatch batch = UnitUtil.createBatch(); 80 | String resp = "{\"text\":\"Success\",\"code\":0}"; 81 | poller.add(null, batch, resp); 82 | Assert.assertTrue(batch.isCommitted()); 83 | Assert.assertEquals(1, cb.getCommitted().size()); 84 | 85 | // without callback 86 | poller = new ResponsePoller(null); 87 | batch = UnitUtil.createBatch(); 88 | resp = "{\"text\":\"Success\",\"code\":0}"; 89 | poller.add(null, batch, resp); 90 | Assert.assertTrue(batch.isCommitted()); 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/StatusLineMock.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.apache.http.ProtocolVersion; 19 | import org.apache.http.StatusLine; 20 | 21 | public class StatusLineMock implements StatusLine { 22 | private int status; 23 | 24 | public StatusLineMock(int status) { 25 | this.status = status; 26 | } 27 | 28 | public ProtocolVersion getProtocolVersion() { 29 | return new ProtocolVersion("http", 1, 1); 30 | } 31 | 32 | public int getStatusCode() { 33 | return status; 34 | } 35 | 36 | public String getReasonPhrase() { 37 | return "POST"; 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/hecclient/UnitUtil.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.hecclient; 17 | 18 | import org.junit.Assert; 19 | 20 | import java.io.IOException; 21 | import java.io.InputStream; 22 | import java.util.Arrays; 23 | import java.util.concurrent.TimeUnit; 24 | 25 | public class UnitUtil { 26 | public static HecConfig createHecConfig() { 27 | return new HecConfig(Arrays.asList("https://dummyhost:8088"), "token") 28 | .setKerberosPrincipal(""); 29 | } 30 | 31 | public static EventBatch createBatch() { 32 | EventBatch batch = new JsonEventBatch(); 33 | Event event = new JsonEvent("ni", "hao"); 34 | batch.add(event); 35 | return batch; 36 | } 37 | 38 | public static EventBatch createMultiBatch(int count) { 39 | EventBatch batch = new JsonEventBatch(); 40 | for (int i = 0; i < count; i++) { 41 | Event event = new JsonEvent("ni-" + i, "hao-" + i); 42 | batch.add(event); 43 | } 44 | return batch; 45 | } 46 | 47 | public static EventBatch createRawEventBatch() { 48 | Event event = new RawEvent("ni", "hao"); 49 | EventBatch batch = RawEventBatch.factory().build(); 50 | batch.add(event); 51 | return batch; 52 | } 53 | 54 | public static void milliSleep(long milliseconds) { 55 | try { 56 | TimeUnit.MILLISECONDS.sleep(milliseconds); 57 | } catch (InterruptedException ex) { 58 | } 59 | } 60 | 61 | public static int read(final InputStream stream, byte[] data) { 62 | int siz = 0; 63 | while (true) { 64 | try { 65 | int read = stream.read(data, siz, data.length - siz); 66 | if (read < 0) { 67 | break; 68 | } 69 | siz += read; 70 | } catch (IOException ex) { 71 | Assert.assertTrue("failed to read from stream", false); 72 | throw new HecException("failed to read from stream", ex); 73 | } 74 | } 75 | return siz; 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/kafka/connect/HecMock.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.kafka.connect; 17 | 18 | import com.splunk.hecclient.EventBatch; 19 | import com.splunk.hecclient.HecException; 20 | import com.splunk.hecclient.HecInf; 21 | 22 | import java.util.ArrayList; 23 | import java.util.Arrays; 24 | import java.util.List; 25 | 26 | public class HecMock implements HecInf { 27 | static final String success = "success"; 28 | static final String successAndThenFailure = "successAndThenFailure"; 29 | static final String failure = "failure"; 30 | 31 | private List batches; 32 | private SplunkSinkTask task; 33 | private String sentResult = "success"; 34 | 35 | public HecMock(SplunkSinkTask task) { 36 | this.task = task; 37 | this.batches = new ArrayList<>(); 38 | } 39 | 40 | @Override 41 | public void close() { 42 | } 43 | 44 | @Override 45 | public void send(final EventBatch batch) { 46 | batches.add(batch); 47 | if (sentResult.equals(success)) { 48 | batch.commit(); 49 | task.onEventCommitted(Arrays.asList(batch)); 50 | } else if (sentResult.equals(failure)) { 51 | batch.fail(); 52 | task.onEventFailure(Arrays.asList(batch), new HecException("mockup")); 53 | } else { 54 | batch.fail(); 55 | task.onEventFailure(Arrays.asList(batch), new HecException("mockup")); 56 | } 57 | } 58 | 59 | public void setSendReturnResult(final String result) { 60 | sentResult = result; 61 | } 62 | 63 | public List getBatches() { 64 | return batches; 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/kafka/connect/KafkaRecordTrackerTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.kafka.connect; 17 | 18 | import com.splunk.hecclient.EventBatch; 19 | import com.splunk.hecclient.UnitUtil; 20 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 21 | import org.apache.kafka.common.TopicPartition; 22 | import org.apache.kafka.connect.sink.SinkRecord; 23 | import org.junit.Assert; 24 | import org.junit.Test; 25 | 26 | import java.util.ArrayList; 27 | import java.util.Collection; 28 | import java.util.List; 29 | import java.util.Map; 30 | import java.util.concurrent.ExecutionException; 31 | import java.util.concurrent.ExecutorService; 32 | import java.util.concurrent.Executors; 33 | import java.util.concurrent.Future; 34 | 35 | public class KafkaRecordTrackerTest { 36 | @Test 37 | public void addFailedEventBatch() { 38 | EventBatch batch = UnitUtil.createBatch(); 39 | batch.fail(); 40 | batch.getEvents().get(0).setTied(createSinkRecord(1)); 41 | KafkaRecordTracker tracker = new KafkaRecordTracker(); 42 | tracker.open(createTopicPartitionList()); 43 | tracker.addFailedEventBatch(batch); 44 | Collection failed = tracker.getAndRemoveFailedRecords(); 45 | Assert.assertEquals(1, failed.size()); 46 | } 47 | 48 | @Test(expected = RuntimeException.class) 49 | public void addNonFailedEventBatch() { 50 | EventBatch batch = UnitUtil.createBatch(); 51 | KafkaRecordTracker tracker = new KafkaRecordTracker(); 52 | tracker.addFailedEventBatch(batch); 53 | } 54 | 55 | @Test 56 | public void removeEventBatchMultiThread() { 57 | List batches = new ArrayList<>(); 58 | KafkaRecordTracker tracker = new KafkaRecordTracker(); 59 | tracker.open(createTopicPartitionList(500)); 60 | 61 | for (int i = 0; i < 100; i++) { 62 | EventBatch batch = UnitUtil.createMultiBatch(500); 63 | for (int j = 0; j < 500; j++) { 64 | batch.getEvents().get(j).setTied(createSinkRecord(j, i * 1000 + j)); 65 | } 66 | batch.commit(); 67 | batches.add(batch); 68 | tracker.addEventBatch(batch); 69 | } 70 | 71 | Assert.assertEquals(50000, tracker.totalEvents()); 72 | ExecutorService executorService = Executors.newFixedThreadPool(2); 73 | try { 74 | Future first = executorService.submit(() -> tracker.removeAckedEventBatches(batches)); 75 | Future second = executorService.submit(() -> tracker.removeAckedEventBatches(batches)); 76 | 77 | first.get(); 78 | second.get(); 79 | } catch (ExecutionException | InterruptedException e) { 80 | throw new RuntimeException(e); 81 | } finally { 82 | executorService.shutdown(); 83 | } 84 | 85 | Assert.assertEquals(0, tracker.totalEvents()); 86 | } 87 | @Test 88 | public void addEventBatch() { 89 | List batches = new ArrayList<>(); 90 | KafkaRecordTracker tracker = new KafkaRecordTracker(); 91 | for (int i = 0; i < 3; i++) { 92 | EventBatch batch = UnitUtil.createBatch(); 93 | batch.getEvents().get(0).setTied(createSinkRecord(i)); 94 | batches.add(batch); 95 | tracker.open(createTopicPartitionList()); 96 | tracker.addEventBatch(batch); 97 | } 98 | Map offsets = tracker.computeOffsets(); 99 | Assert.assertTrue(offsets.isEmpty()); 100 | 101 | batches.get(0).commit(); 102 | tracker.removeAckedEventBatches(batches); 103 | offsets = tracker.computeOffsets(); 104 | Assert.assertEquals(1, offsets.size()); 105 | 106 | batches.get(2).commit(); 107 | tracker.removeAckedEventBatches(batches); 108 | offsets = tracker.computeOffsets(); 109 | Assert.assertEquals(1, offsets.size()); 110 | 111 | batches.get(1).commit(); 112 | tracker.removeAckedEventBatches(batches); 113 | offsets = tracker.computeOffsets(); 114 | Assert.assertEquals(1, offsets.size()); 115 | 116 | offsets = tracker.computeOffsets(); 117 | Assert.assertEquals(1, offsets.size()); 118 | 119 | } 120 | 121 | @Test 122 | public void addEventBatchWithNonSinkRecord() { 123 | KafkaRecordTracker tracker = new KafkaRecordTracker(); 124 | for (int i = 0; i < 3; i++) { 125 | EventBatch batch = UnitUtil.createBatch(); 126 | batch.getEvents().get(0).setTied(""); 127 | batch.commit(); 128 | tracker.addEventBatch(batch); 129 | } 130 | Map offsets = tracker.computeOffsets(); 131 | Assert.assertEquals(0, offsets.size()); 132 | } 133 | 134 | private SinkRecord createSinkRecord(long offset) { 135 | return new SinkRecord("t", 1, null, null, null, "ni, hao", offset); 136 | } 137 | 138 | private List createTopicPartitionList() { 139 | ArrayList tps = new ArrayList<>(); 140 | tps.add(new TopicPartition("t", 1)); 141 | return tps; 142 | } 143 | 144 | private SinkRecord createSinkRecord(int partition, long offset) { 145 | return new SinkRecord("t", partition, null, null, null, "ni, hao", offset); 146 | } 147 | 148 | private List createTopicPartitionList(int number) { 149 | ArrayList tps = new ArrayList<>(); 150 | for (int i = 0; i < number; i++) { 151 | tps.add(new TopicPartition("t", i)); 152 | } 153 | return tps; 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/kafka/connect/MockHecClientWrapper.java: -------------------------------------------------------------------------------- 1 | package com.splunk.kafka.connect; 2 | 3 | import org.apache.http.impl.client.CloseableHttpClient; 4 | 5 | import com.splunk.hecclient.CloseableHttpClientMock; 6 | import com.splunk.hecclient.Hec; 7 | import com.splunk.hecclient.HecConfig; 8 | 9 | public class MockHecClientWrapper extends AbstractClientWrapper{ 10 | public CloseableHttpClientMock client = new CloseableHttpClientMock(); 11 | 12 | @Override 13 | CloseableHttpClient getClient(HecConfig config) { 14 | // TODO Auto-generated method stub 15 | if (config==null){} 16 | 17 | return client; 18 | } 19 | 20 | } 21 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/kafka/connect/SplunkSinkRecordTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017-2018 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.kafka.connect; 17 | 18 | import org.apache.kafka.connect.header.Headers; 19 | import org.apache.kafka.connect.data.Schema; 20 | import org.apache.kafka.connect.sink.SinkRecord; 21 | import org.junit.Assert; 22 | import org.junit.jupiter.api.Test; 23 | 24 | import java.util.Map; 25 | 26 | public class SplunkSinkRecordTest { 27 | 28 | @Test 29 | public void checkKafkaHeaderUtilityGetters() { 30 | UnitUtil uu = new UnitUtil(3); 31 | Map config = uu.createTaskConfig(); 32 | 33 | SplunkSinkConnectorConfig connectorConfig = new SplunkSinkConnectorConfig(config); 34 | 35 | SinkRecord record = setupRecord(); 36 | Headers headers = record.headers(); 37 | 38 | headers.addString(uu.configProfile.getHeaderIndex(), "splunk.header.index"); 39 | headers.addString(uu.configProfile.getHeaderHost(), "splunk.header.host"); 40 | headers.addString(uu.configProfile.getHeaderSource(), "splunk.header.source"); 41 | headers.addString(uu.configProfile.getHeaderSourcetype(), "splunk.header.sourcetype"); 42 | 43 | System.out.println(headers.toString()); 44 | 45 | SplunkSinkRecord splunkSinkRecord = new SplunkSinkRecord(record, connectorConfig); 46 | 47 | Assert.assertEquals("splunk.header.index", (splunkSinkRecord.getSplunkHeaderIndex())); 48 | Assert.assertEquals("splunk.header.host", (splunkSinkRecord.getSplunkHeaderHost())); 49 | Assert.assertEquals("splunk.header.source", (splunkSinkRecord.getSplunkHeaderSource())); 50 | Assert.assertEquals("splunk.header.sourcetype", (splunkSinkRecord.getSplunkHeaderSourcetype())); 51 | } 52 | 53 | @Test 54 | public void CompareRecordHeaders() { 55 | UnitUtil uu = new UnitUtil(3); 56 | Map config = uu.createTaskConfig(); 57 | 58 | SinkRecord record_1 = setupRecord(); 59 | 60 | Headers headers_1 = record_1.headers(); 61 | headers_1.addString("splunk.header.index", "header-index"); 62 | headers_1.addString("splunk.header.host", "header.splunk.com"); 63 | headers_1.addString("splunk.header.source", "headersource"); 64 | headers_1.addString("splunk.header.sourcetype", "test message"); 65 | 66 | SplunkSinkConnectorConfig connectorConfig = new SplunkSinkConnectorConfig(config); 67 | 68 | SplunkSinkRecord splunkSinkRecord = new SplunkSinkRecord(record_1, connectorConfig); 69 | 70 | SinkRecord record_2 = setupRecord(); 71 | 72 | Headers headers_2 = record_2.headers(); 73 | headers_2.addString("splunk.header.index", "header-index"); 74 | headers_2.addString("splunk.header.host", "header.splunk.com"); 75 | headers_2.addString("splunk.header.source", "headersource"); 76 | headers_2.addString("splunk.header.sourcetype", "test message"); 77 | 78 | Assert.assertTrue(splunkSinkRecord.compareRecordHeaders(record_2)); 79 | 80 | SinkRecord record_3 = setupRecord(); 81 | 82 | Headers headers_3 = record_3.headers(); 83 | headers_3.addString("splunk.header.index", "header-index=diff"); 84 | headers_3.addString("splunk.header.host", "header.splunk.com"); 85 | headers_3.addString("splunk.header.source", "headersource"); 86 | headers_3.addString("splunk.header.sourcetype", "test message"); 87 | 88 | Assert.assertFalse(splunkSinkRecord.compareRecordHeaders(record_3)); 89 | } 90 | 91 | public SinkRecord setupRecord() { 92 | String topic = "test-topic"; 93 | int partition = 1; 94 | Schema keySchema = null; 95 | Object key = "key"; 96 | Schema valueSchema = null; 97 | Object value = "value"; 98 | long timestamp = System.currentTimeMillis(); 99 | 100 | SinkRecord record = createMockSinkRecord(topic, partition, keySchema, key, valueSchema, value, timestamp); 101 | return record; 102 | } 103 | 104 | public SinkRecord createMockSinkRecord(String topic, int partition, Schema keySchema, Object key, Schema valueSchema, Object value, long timestamp) { 105 | return new SinkRecord(topic, partition, keySchema, key, valueSchema, value, timestamp); 106 | } 107 | } -------------------------------------------------------------------------------- /src/test/java/com/splunk/kafka/connect/StructEventTest.java: -------------------------------------------------------------------------------- 1 | package com.splunk.kafka.connect; 2 | 3 | import org.apache.kafka.connect.data.Schema; 4 | import org.apache.kafka.connect.data.SchemaBuilder; 5 | import org.apache.kafka.connect.data.Struct; 6 | import org.apache.kafka.connect.data.Timestamp; 7 | import org.junit.Assert; 8 | import org.junit.Test; 9 | 10 | import java.util.Arrays; 11 | import java.util.Date; 12 | import java.util.List; 13 | 14 | import com.splunk.hecclient.Event; 15 | import com.splunk.hecclient.RawEvent; 16 | 17 | public class StructEventTest { 18 | 19 | @Test 20 | public void struct() { 21 | final Schema childSchema = SchemaBuilder.struct() 22 | .name("child") 23 | .field("first_name", Schema.STRING_SCHEMA) 24 | .field("age", Schema.INT32_SCHEMA) 25 | .build(); 26 | final Schema parentSchema = SchemaBuilder.struct() 27 | .name("test") 28 | .field("first_name", Schema.STRING_SCHEMA) 29 | .field("count", Schema.INT32_SCHEMA) 30 | .field("timestamp", Timestamp.SCHEMA) 31 | .field("children", SchemaBuilder.array(childSchema).build()) 32 | .build(); 33 | final List children = Arrays.asList( 34 | new Struct(childSchema).put("first_name", "Thing 1").put("age", 4), 35 | new Struct(childSchema).put("first_name","Thing 2").put("age", 7) 36 | ); 37 | final Struct struct = new Struct(parentSchema) 38 | .put("first_name", "fred") 39 | .put("count", 1234) 40 | .put("timestamp", new Date(1524838717123L)) 41 | .put("children", children); 42 | final Event event = new RawEvent(struct, null); 43 | final String expected = "{\"first_name\":\"fred\",\"count\":1234,\"timestamp\":\"2018-04-27T14:18:37.123+0000\",\"children\":[{\"first_name\":\"Thing 1\",\"age\":4},{\"first_name\":\"Thing 2\",\"age\":7}]}"; 44 | final String actual = event.toString(); 45 | Assert.assertEquals(expected, actual); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/kafka/connect/UnitUtil.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.kafka.connect; 17 | 18 | import org.apache.kafka.connect.sink.SinkConnector; 19 | import org.apache.kafka.connect.sink.SinkTask; 20 | 21 | import java.util.HashMap; 22 | import java.util.Map; 23 | import java.util.concurrent.TimeUnit; 24 | 25 | public class UnitUtil { 26 | ConfigProfile configProfile; 27 | 28 | UnitUtil(int profile) { 29 | this.configProfile = new ConfigProfile(profile); 30 | } 31 | 32 | public Map createTaskConfig() { 33 | Map config = new HashMap<>(); 34 | config.put(SinkConnector.TOPICS_CONFIG, configProfile.getTopics()); 35 | config.put(SinkTask.TOPICS_REGEX_CONFIG, configProfile.getTopicsRegex()); 36 | config.put(SplunkSinkConnectorConfig.TOKEN_CONF, configProfile.getToken()); 37 | config.put(SplunkSinkConnectorConfig.URI_CONF, configProfile.getUri()); 38 | config.put(SplunkSinkConnectorConfig.RAW_CONF, String.valueOf(configProfile.isRaw())); 39 | config.put(SplunkSinkConnectorConfig.ACK_CONF , String.valueOf(configProfile.isAck())); 40 | config.put(SplunkSinkConnectorConfig.INDEX_CONF, configProfile.getIndexes()); 41 | config.put(SplunkSinkConnectorConfig.SOURCETYPE_CONF, configProfile.getSourcetypes()); 42 | config.put(SplunkSinkConnectorConfig.SOURCE_CONF, configProfile.getSources()); 43 | config.put(SplunkSinkConnectorConfig.HTTP_KEEPALIVE_CONF, String.valueOf(configProfile.isHttpKeepAlive())); 44 | config.put(SplunkSinkConnectorConfig.SSL_VALIDATE_CERTIFICATES_CONF, String.valueOf(configProfile.isValidateCertificates())); 45 | 46 | if(configProfile.getTrustStorePath() != null ) { 47 | config.put(SplunkSinkConnectorConfig.SSL_TRUSTSTORE_PATH_CONF, configProfile.getTrustStorePath()); 48 | config.put(SplunkSinkConnectorConfig.SSL_TRUSTSTORE_TYPE_CONF, configProfile.getTrustStoreType()); 49 | config.put(SplunkSinkConnectorConfig.SSL_TRUSTSTORE_PASSWORD_CONF, configProfile.getTrustStorePassword()); 50 | } 51 | 52 | config.put(SplunkSinkConnectorConfig.EVENT_TIMEOUT_CONF, String.valueOf(configProfile.getEventBatchTimeout())); 53 | config.put(SplunkSinkConnectorConfig.ACK_POLL_INTERVAL_CONF, String.valueOf(configProfile.getAckPollInterval())); 54 | config.put(SplunkSinkConnectorConfig.MAX_HTTP_CONNECTION_PER_CHANNEL_CONF, String.valueOf(configProfile.getMaxHttpConnPerChannel())); 55 | config.put(SplunkSinkConnectorConfig.ACK_POLL_THREADS_CONF, String.valueOf(configProfile.getAckPollThreads())); 56 | config.put(SplunkSinkConnectorConfig.TOTAL_HEC_CHANNEL_CONF, String.valueOf(configProfile.getTotalHecChannels())); 57 | config.put(SplunkSinkConnectorConfig.SOCKET_TIMEOUT_CONF, String.valueOf(configProfile.getSocketTimeout())); 58 | config.put(SplunkSinkConnectorConfig.ENRICHMENT_CONF, String.valueOf(configProfile.getEnrichements())); 59 | config.put(SplunkSinkConnectorConfig.TRACK_DATA_CONF, String.valueOf(configProfile.isTrackData())); 60 | config.put(SplunkSinkConnectorConfig.MAX_BATCH_SIZE_CONF, String.valueOf(configProfile.getMaxBatchSize())); 61 | config.put(SplunkSinkConnectorConfig.HEC_THREDS_CONF, String.valueOf(configProfile.getNumOfThreads())); 62 | config.put(SplunkSinkConnectorConfig.LINE_BREAKER_CONF, configProfile.getLineBreaker()); 63 | return config; 64 | } 65 | 66 | public static void milliSleep(long milliseconds) { 67 | try { 68 | TimeUnit.MILLISECONDS.sleep(milliseconds); 69 | } catch (InterruptedException ex) { 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/test/java/com/splunk/kafka/connect/VersionUtilsTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Splunk, Inc.. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.splunk.kafka.connect; 17 | 18 | import java.util.List; 19 | import java.util.ArrayList; 20 | 21 | import org.junit.Assert; 22 | import org.junit.Test; 23 | 24 | public final class VersionUtilsTest { 25 | 26 | @Test 27 | public void getVersionFromProperties() { 28 | String version = VersionUtils.getVersionFromProperties(null); 29 | Assert.assertEquals(version, "dev"); 30 | 31 | version = VersionUtils.getVersionFromProperties(new ArrayList()); 32 | Assert.assertEquals(version, "dev"); 33 | 34 | List properties = VersionUtils.readResourceFile("/testversion.properties"); 35 | version = VersionUtils.getVersionFromProperties(properties); 36 | Assert.assertEquals(version, "0.1.3"); 37 | } 38 | 39 | @Test 40 | public void readResourceFile() { 41 | // test when the resource file does not exist 42 | List res = VersionUtils.readResourceFile("/randomFile"); 43 | Assert.assertEquals(res.size(), 0); 44 | 45 | 46 | res = VersionUtils.readResourceFile("/testversion.properties"); 47 | Assert.assertEquals(res.size(), 3); 48 | } 49 | } -------------------------------------------------------------------------------- /src/test/resources/keystoretest.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/splunk/kafka-connect-splunk/11e34987896ccd108e7b004b4a4a54d17f7eba70/src/test/resources/keystoretest.jks -------------------------------------------------------------------------------- /src/test/resources/keystoretest.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/splunk/kafka-connect-splunk/11e34987896ccd108e7b004b4a4a54d17f7eba70/src/test/resources/keystoretest.p12 -------------------------------------------------------------------------------- /src/test/resources/log4j2.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /src/test/resources/testversion.properties: -------------------------------------------------------------------------------- 1 | githash=@8190911 2 | gitbranch=develop 3 | gitversion=0.1.3 4 | -------------------------------------------------------------------------------- /target/site/jacoco/jacoco.csv: -------------------------------------------------------------------------------- 1 | GROUP,PACKAGE,CLASS,INSTRUCTION_MISSED,INSTRUCTION_COVERED,BRANCH_MISSED,BRANCH_COVERED,LINE_MISSED,LINE_COVERED,COMPLEXITY_MISSED,COMPLEXITY_COVERED,METHOD_MISSED,METHOD_COVERED 2 | splunk-kafka-connect,com.splunk.hecclient,HttpClientBuilder.new Credentials() {...},10,0,0,0,3,0,3,0,3,0 3 | splunk-kafka-connect,com.splunk.hecclient,EventBatch.HttpEventBatchEntity,0,40,0,2,0,9,0,7,0,6 4 | splunk-kafka-connect,com.splunk.hecclient,HttpClientBuilder.new HostnameVerifier() {...},2,6,0,0,1,1,1,1,1,1 5 | splunk-kafka-connect,com.splunk.hecclient,HttpClientBuilder.new TrustStrategy() {...},2,6,0,0,1,1,1,1,1,1 6 | splunk-kafka-connect,com.splunk.hecclient,Hec,38,298,4,14,10,74,4,20,0,15 7 | splunk-kafka-connect,com.splunk.hecclient,ConcurrentHec,23,196,1,13,6,50,1,16,0,10 8 | splunk-kafka-connect,com.splunk.hecclient,HecException,0,9,0,0,0,4,0,2,0,2 9 | splunk-kafka-connect,com.splunk.hecclient,LoadBalancer,48,362,8,24,12,87,7,23,1,13 10 | splunk-kafka-connect,com.splunk.hecclient,HecAckPoller,122,711,11,45,34,160,9,44,0,25 11 | splunk-kafka-connect,com.splunk.hecclient,HecAckPoller.RunAckQuery,21,26,0,0,3,8,0,2,0,2 12 | splunk-kafka-connect,com.splunk.hecclient,HttpClientBuilder,69,132,0,4,16,46,2,13,2,11 13 | splunk-kafka-connect,com.splunk.hecclient,RawEvent,11,64,0,8,3,17,0,8,0,4 14 | splunk-kafka-connect,com.splunk.hecclient,RawEventBatch,22,169,1,9,8,45,3,17,2,13 15 | splunk-kafka-connect,com.splunk.hecclient,JsonEvent,22,60,0,8,6,21,0,11,0,7 16 | splunk-kafka-connect,com.splunk.hecclient,HecEmptyEventException,5,4,0,0,2,2,1,1,1,1 17 | splunk-kafka-connect,com.splunk.hecclient,HecChannel,0,116,0,12,0,35,0,22,0,16 18 | splunk-kafka-connect,com.splunk.hecclient,EventBatch,0,238,0,12,0,60,0,28,0,22 19 | splunk-kafka-connect,com.splunk.hecclient,HecNullEventException,5,4,0,0,2,2,1,1,1,1 20 | splunk-kafka-connect,com.splunk.hecclient,Event,3,204,0,6,1,67,1,29,1,26 21 | splunk-kafka-connect,com.splunk.hecclient,RawEventBatch.Builder,0,45,0,0,0,13,0,7,0,7 22 | splunk-kafka-connect,com.splunk.hecclient,Indexer,179,420,9,19,40,96,10,23,3,16 23 | splunk-kafka-connect,com.splunk.hecclient,EventBatch.GzipDataContentProducer,0,21,0,0,0,6,0,2,0,2 24 | splunk-kafka-connect,com.splunk.hecclient,HecAckPollResponse,0,43,0,4,0,9,0,5,0,3 25 | splunk-kafka-connect,com.splunk.hecclient,HecConfig,5,227,1,1,1,81,2,43,1,43 26 | splunk-kafka-connect,com.splunk.hecclient,Indexer.new Configuration() {...},21,0,0,0,4,0,2,0,2,0 27 | splunk-kafka-connect,com.splunk.hecclient,HecURIBuilder,6,59,0,4,2,13,0,4,0,2 28 | splunk-kafka-connect,com.splunk.hecclient,DoubleSerializer,0,15,0,0,0,4,0,2,0,2 29 | splunk-kafka-connect,com.splunk.hecclient,PostResponse,0,37,0,2,0,13,0,8,0,7 30 | splunk-kafka-connect,com.splunk.hecclient,ResponsePoller,33,80,1,7,7,23,3,11,2,8 31 | splunk-kafka-connect,com.splunk.hecclient,JsonEventBatch,19,33,2,2,7,9,3,6,2,5 32 | splunk-kafka-connect,com.splunk.hecclient,EventBatch.HttpEventBatchEntity.new Enumeration() {...},0,44,0,4,0,4,0,5,0,3 33 | splunk-kafka-connect,com.splunk.kafka.connect,HecClientWrapper,3,3,0,0,1,1,1,1,1,1 34 | splunk-kafka-connect,com.splunk.kafka.connect,KafkaRecordTracker,27,351,5,25,10,78,5,29,0,19 35 | splunk-kafka-connect,com.splunk.kafka.connect,JacksonStructModule.StructSerializer,0,36,0,2,0,7,0,3,0,2 36 | splunk-kafka-connect,com.splunk.kafka.connect,VersionUtils,11,86,0,10,4,26,1,11,1,6 37 | splunk-kafka-connect,com.splunk.kafka.connect,JacksonStructModule,0,10,0,0,0,3,0,1,0,1 38 | splunk-kafka-connect,com.splunk.kafka.connect,SplunkSinkRecord,74,188,14,14,22,47,18,9,5,8 39 | splunk-kafka-connect,com.splunk.kafka.connect,SplunkSinkConnectorConfig,5,1270,6,72,1,198,6,47,0,14 40 | splunk-kafka-connect,com.splunk.kafka.connect,SplunkSinkConnector,57,383,4,30,11,85,5,28,1,15 41 | splunk-kafka-connect,com.splunk.kafka.connect,AbstractClientWrapper,0,3,0,0,0,1,0,1,0,1 42 | splunk-kafka-connect,com.splunk.kafka.connect,SplunkSinkTask,416,1134,58,84,68,260,48,56,3,30 43 | -------------------------------------------------------------------------------- /test/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Prerequsite 3 | * Python version must be > 3.x 4 | 5 | # Testing Instructions 6 | 0. Use a virtual environment for the test 7 | `virtualenv --python=python3.7 venv` 8 | `source venv/bin/activate` 9 | 1. Install the dependencies 10 | `pip install -r test/requirements.txt` 11 | 2. Provided required options at `test/config.yaml` 12 | **Options are:** 13 | splunkd_url 14 | * Description: splunkd url used to send test data to. Eg: https://localhost:8089 15 | 16 | splunk_hec_url 17 | * Description: splunk HTTP Event Collector's address and port. Eg: https://127.0.0.1:8088 18 | 19 | splunk_user 20 | * Description: splunk username 21 | 22 | splunk_token 23 | * Description: splunk hec token 24 | 25 | splunk_token_ack 26 | * Description: splunk hec token with ack enabled 27 | 28 | splunk_index 29 | * Description: splunk index to ingest test data 30 | 31 | kafka_broker_url 32 | * address of kafka broker. Eg: 127.0.0.1:9092 33 | 34 | kafka_connect_url 35 | * Description: url used to interact with kafka connect 36 | 37 | kafka-topic 38 | * Description: kafka topic used to get data with kafka connect 39 | 40 | 3. Start the test 41 | `python -m pytest` -------------------------------------------------------------------------------- /test/config.sh: -------------------------------------------------------------------------------- 1 | export splunkd_url=https://127.0.0.1:8089 2 | export splunk_hec_url=https://127.0.0.1:8088 3 | export splunk_user=admin 4 | export splunk_password=helloworld 5 | export splunk_index=main 6 | export splunk_token=a6b5e77f-d5f6-415a-bd43-930cecb12959 7 | export splunk_token_ack=a6b5e77f-d5f6-415a-bd43-930cecb12950 8 | export kafka_broker_url=127.0.0.1:9092 9 | export kafka_connect_url=http://127.0.0.1:8083 10 | export kafka_topic=test-datagen 11 | export kafka_topic_2=kafka_topic_2 12 | export kafka_header_topic=kafka_header_topic 13 | export kafka_header_index=kafka 14 | export connector_path=/usr/local/share/kafka/plugins 15 | export connector_build_target=/usr/local/share/kafka-connector 16 | export kafka_home=/usr/local/kafka 17 | export kafka_connect_home=/home/circleci/repo 18 | export old_connector_name=splunk-kafka-connect-v2.0.1.jar -------------------------------------------------------------------------------- /test/config.yaml: -------------------------------------------------------------------------------- 1 | splunkd_url: https://127.0.0.1:8089 2 | splunk_hec_url: https://127.0.0.1:8088 3 | splunk_user: admin 4 | splunk_password: helloworld 5 | splunk_index: main 6 | splunk_token: a6b5e77f-d5f6-415a-bd43-930cecb12959 7 | splunk_token_ack: a6b5e77f-d5f6-415a-bd43-930cecb12950 8 | kafka_broker_url: 127.0.0.1:9092 9 | kafka_connect_url: http://127.0.0.1:8083 10 | kafka_topic: test-datagen 11 | kafka_topic_2: kafka_topic_2 12 | kafka_header_topic: kafka_header_topic 13 | kafka_header_index: kafka 14 | connector_path: /usr/local/share/kafka/plugins 15 | connector_build_target: /usr/local/share/kafka-connector 16 | kafka_home: /usr/local/kafka 17 | kafka_connect_home: /home/circleci/repo 18 | old_connector_name: splunk-kafka-connect-v2.0.1.jar -------------------------------------------------------------------------------- /test/conftest.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright 2018-2019 Splunk, Inc.. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | """ 16 | from lib.commonkafka import * 17 | from lib.connect_params import * 18 | 19 | from kafka.producer import KafkaProducer 20 | from lib.commonsplunk import check_events_from_splunk 21 | from lib.helper import get_test_folder 22 | from lib.data_gen import generate_connector_content 23 | import pytest 24 | import yaml 25 | 26 | logging.config.fileConfig(os.path.join(get_test_folder(), "logging.conf")) 27 | logger = logging.getLogger(__name__) 28 | 29 | _config_path = os.path.join(os.path.dirname(__file__), 'config.yaml') 30 | with open(_config_path, 'r') as yaml_file: 31 | config = yaml.load(yaml_file) 32 | 33 | 34 | @pytest.fixture(scope="class") 35 | def setup(request): 36 | return config 37 | 38 | 39 | def pytest_configure(): 40 | # Generate message data 41 | topics = [config["kafka_topic"], config["kafka_topic_2"], config["kafka_header_topic"],"prototopic", 42 | "test_splunk_hec_malformed_events","epoch_format","date_format","record_key"] 43 | 44 | create_kafka_topics(config, topics) 45 | producer = KafkaProducer(bootstrap_servers=config["kafka_broker_url"], 46 | value_serializer=lambda v: json.dumps(v).encode('utf-8')) 47 | protobuf_producer = KafkaProducer(bootstrap_servers=config["kafka_broker_url"]) 48 | timestamp_producer = KafkaProducer(bootstrap_servers=config["kafka_broker_url"]) 49 | 50 | for _ in range(3): 51 | msg = {"timestamp": config['timestamp']} 52 | producer.send(config["kafka_topic"], msg) 53 | producer.send(config["kafka_topic_2"], msg) 54 | 55 | headers_to_send = [('header_index', b'kafka'), ('header_source_event', b'kafka_header_source_event'), 56 | ('header_host_event', b'kafkahostevent.com'), 57 | ('header_sourcetype_event', b'kafka_header_sourcetype_event')] 58 | producer.send(config["kafka_header_topic"], msg, headers=headers_to_send) 59 | 60 | headers_to_send = [('header_index', b'kafka'), ('header_source_raw', b'kafka_header_source_raw'), 61 | ('header_host_raw', b'kafkahostraw.com'), 62 | ('header_sourcetype_raw', b'kafka_header_sourcetype_raw')] 63 | producer.send(config["kafka_header_topic"], msg, headers=headers_to_send) 64 | 65 | headers_to_send = [('splunk.header.index', b'kafka'), 66 | ('splunk.header.host', b'kafkahost.com'), 67 | ('splunk.header.source', b'kafka_custom_header_source'), 68 | ('splunk.header.sourcetype', b'kafka_custom_header_sourcetype')] 69 | producer.send(config["kafka_header_topic"], msg, headers=headers_to_send) 70 | producer.send("test_splunk_hec_malformed_events", {}) 71 | producer.send("test_splunk_hec_malformed_events", {"&&": "null", "message": ["$$$$****////", 123, None]}) 72 | producer.send("record_key",{"timestamp": config['timestamp']},b"{}") 73 | protobuf_producer.send("prototopic",value=b'\x00\x00\x00\x00\x01\x00\n\x011\x1533\xf3?\x1a\x05Hello') 74 | timestamp_producer.send("date_format",b"{\"id\": \"19\",\"host\":\"host-01\",\"source\":\"bu\",\"fields\":{\"hn\":\"hostname\",\"CLASS\":\"class\",\"cust_id\":\"000013934\",\"time\": \"Jun 13 2010 23:11:52.454 UTC\",\"category\":\"IFdata\",\"ifname\":\"LoopBack7\",\"IFdata.Bits received\":\"0\",\"IFdata.Bits sent\":\"0\"}") 75 | timestamp_producer.send("epoch_format",b"{\"id\": \"19\",\"host\":\"host-01\",\"source\":\"bu\",\"fields\":{\"hn\":\"hostname\",\"CLASS\":\"class\",\"cust_id\":\"000013934\",\"time\": \"1555209605000\",\"category\":\"IFdata\",\"ifname\":\"LoopBack7\",\"IFdata.Bits received\":\"0\",\"IFdata.Bits sent\":\"0\"}") 76 | producer.flush() 77 | protobuf_producer.flush() 78 | timestamp_producer.flush() 79 | 80 | # Launch all connectors for tests 81 | for param in connect_params: 82 | connector_content = generate_connector_content(param) 83 | create_kafka_connector(config, connector_content) 84 | 85 | # wait for data to be ingested to Splunk 86 | time.sleep(60) 87 | 88 | 89 | def pytest_unconfigure(): 90 | # Delete launched connectors 91 | for param in connect_params: 92 | delete_kafka_connector(config, param) 93 | 94 | def pytest_sessionfinish(session, exitstatus): 95 | if exitstatus != 0: 96 | search_query = "index=*" 97 | logger.info(search_query) 98 | events = check_events_from_splunk(start_time="-24h@h", 99 | url=config["splunkd_url"], 100 | user=config["splunk_user"], 101 | query=[f"search {search_query}"], 102 | password=config["splunk_password"]) 103 | myfile = open('events.txt', 'w+') 104 | for i in events: 105 | myfile.write("%s\n" % i) 106 | myfile.close() 107 | -------------------------------------------------------------------------------- /test/lib/connector.template: -------------------------------------------------------------------------------- 1 | { 2 | "name": "{{name}}", 3 | "config" : { 4 | "connector.class": "{{connector_class}}", 5 | "tasks.max": "{{tasks_max}}", 6 | "topics": "{{topics}}", 7 | "splunk.indexes": "{{splunk_indexes}}", 8 | "splunk.sources": "{{splunk_sources}}", 9 | "splunk.hec.uri": "{{splunk_hec_uri}}", 10 | "splunk.hec.token": "{{splunk_hec_token}}", 11 | "splunk.hec.raw": "{{splunk_hec_raw}}", 12 | {% if splunk_hec_raw_line_breaker %} 13 | "splunk.hec.raw.line.breaker": "{{splunk_hec_raw_line_breaker}}", 14 | {% endif %} 15 | {% if splunk_hec_ssl_trust_store_path %} 16 | "splunk.hec.ssl.trust.store.path": "{{splunk_hec_ssl_trust_store_path}}", 17 | {% endif %} 18 | {% if splunk_hec_ssl_trust_store_password %} 19 | "splunk.hec.ssl.trust.store.password": "{{splunk_hec_ssl_trust_store_password}}", 20 | {% endif %} 21 | "splunk.hec.ack.enabled": "{{splunk_hec_ack_enabled}}", 22 | "splunk.hec.ssl.validate.certs": "{{splunk_hec_ssl_validate_certs}}", 23 | {% if splunk_hec_json_event_enrichment %} 24 | "splunk.hec.json.event.enrichment": "{{splunk_hec_json_event_enrichment}}", 25 | {% endif %} 26 | {% if splunk_header_support %} 27 | "splunk.header.support": "{{splunk_header_support}}", 28 | {% endif %} 29 | {% if splunk_header_custom %} 30 | "splunk.header.custom": "{{splunk_header_custom}}", 31 | {% endif %} 32 | {% if splunk_header_index %} 33 | "splunk.header.index": "{{splunk_header_index}}", 34 | {% endif %} 35 | {% if splunk_header_source %} 36 | "splunk.header.source": "{{splunk_header_source}}", 37 | {% endif %} 38 | {% if splunk_header_sourcetype %} 39 | "splunk.header.sourcetype": "{{splunk_header_sourcetype}}", 40 | {% endif %} 41 | {% if splunk_header_host %} 42 | "splunk.header.host": "{{splunk_header_host}}", 43 | {% endif %} 44 | {% if splunk_hec_json_event_formatted %} 45 | "splunk.hec.json.event.formatted": "{{splunk_hec_json_event_formatted}}", 46 | {% endif %} 47 | "splunk.sourcetypes": "{{splunk_sourcetypes}}", 48 | "value.converter": "{{value_converter}}", 49 | "value.converter.schema.registry.url": "{{value_converter_schema_registry_url}}", 50 | "value.converter.schemas.enable": "{{value_converter_schemas_enable}}", 51 | "enable.timestamp.extraction": "{{enable_timestamp_extraction}}", 52 | "timestamp.regex": "{{timestamp_regex}}", 53 | "timestamp.format": "{{timestamp_format}}", 54 | "splunk.hec.track.data": "{{splunk_hec_track_data}}" 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /test/lib/connector_upgrade.py: -------------------------------------------------------------------------------- 1 | from kafka.producer import KafkaProducer 2 | import sys 3 | import os 4 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))) 5 | 6 | from lib.commonsplunk import check_events_from_splunk 7 | from lib.commonkafka import * 8 | from lib.helper import * 9 | from datetime import datetime 10 | import threading 11 | import logging.config 12 | import yaml 13 | import subprocess 14 | import logging 15 | import time 16 | 17 | logging.config.fileConfig(os.path.join(get_test_folder(), "logging.conf")) 18 | logger = logging.getLogger('connector_upgrade') 19 | 20 | _config_path = os.path.join(get_test_folder(), 'config.yaml') 21 | with open(_config_path, 'r') as yaml_file: 22 | config = yaml.load(yaml_file) 23 | now = datetime.now() 24 | _connector = 'kafka_connect' 25 | _connector_ack = 'kafka_connect_ack' 26 | 27 | 28 | 29 | if __name__ == '__main__': 30 | time.sleep(100) 31 | search_query_1 = f"index={config['splunk_index']} | search source::{_connector} sourcetype::upgraded_test" 32 | logger.debug(search_query_1) 33 | events_1 = check_events_from_splunk(start_time="-48h@h", 34 | url=config["splunkd_url"], 35 | user=config["splunk_user"], 36 | query=[f"search {search_query_1}"], 37 | password=config["splunk_password"]) 38 | logger.info("Splunk received %s events", len(events_1)) 39 | assert len(events_1) == 2000 40 | search_query_2 = f"index={config['splunk_index']} | search source::{_connector_ack} sourcetype::upgraded_test" 41 | logger.debug(search_query_2) 42 | events_2 = check_events_from_splunk(start_time="-48h@m", 43 | url=config["splunkd_url"], 44 | user=config["splunk_user"], 45 | query=[f"search {search_query_2}"], 46 | password=config["splunk_password"]) 47 | logger.info("Splunk received %s events ", len(events_2)) 48 | assert len(events_2) == 2000 -------------------------------------------------------------------------------- /test/lib/data_gen.py: -------------------------------------------------------------------------------- 1 | from lib.helper import get_test_folder 2 | import json 3 | import jinja2 4 | import yaml 5 | import os 6 | 7 | _config_path = os.path.join(get_test_folder(), 'config.yaml') 8 | with open(_config_path, 'r') as yaml_file: 9 | config = yaml.load(yaml_file) 10 | 11 | 12 | def generate_connector_content(input_disc=None): 13 | default_disc = \ 14 | { 15 | "name": "", 16 | "connector_class": "com.splunk.kafka.connect.SplunkSinkConnector", 17 | "tasks_max": "1", 18 | "topics": config["kafka_topic"], 19 | "splunk_indexes": config["splunk_index"], 20 | "splunk_sources": "kafka", 21 | "splunk_hec_uri": config["splunk_hec_url"], 22 | "splunk_hec_token": config["splunk_token"], 23 | "splunk_hec_raw": "false", 24 | "splunk_hec_raw_line_breaker": None, 25 | "splunk_hec_ack_enabled": "false", 26 | "splunk_hec_ssl_validate_certs": "false", 27 | "splunk_hec_json_event_enrichment": None, 28 | "splunk_header_support": None, 29 | "splunk_header_custom": None, 30 | "splunk_header_index": None, 31 | "splunk_header_source": None, 32 | "splunk_header_sourcetype": None, 33 | "splunk_header_host": None, 34 | "splunk_hec_json_event_formatted": None, 35 | "splunk_sourcetypes": "kafka", 36 | "value_converter": "org.apache.kafka.connect.storage.StringConverter", 37 | "value_converter_schema_registry_url": "", 38 | "value_converter_schemas_enable": "false", 39 | "enable_timestamp_extraction": "false", 40 | "regex": "", 41 | "timestamp_format": "", 42 | "splunk_hec_track_data": "false" 43 | } 44 | 45 | if input_disc: 46 | default_disc.update(input_disc) 47 | data = generate_content(default_disc) 48 | json_data = json.loads(data, strict=False) 49 | return json_data 50 | 51 | 52 | def generate_content(input_dict): 53 | """ 54 | Use jinja2 template to generate connector content 55 | """ 56 | env = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)), 57 | trim_blocks=True) 58 | config_template = env.get_template('connector.template') 59 | export = config_template.render(input_dict) 60 | return export 61 | -------------------------------------------------------------------------------- /test/lib/eventproducer_connector_upgrade.py: -------------------------------------------------------------------------------- 1 | from kafka.producer import KafkaProducer 2 | import sys 3 | import os 4 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))) 5 | 6 | from lib.commonsplunk import check_events_from_splunk 7 | from lib.commonkafka import * 8 | from lib.helper import * 9 | from datetime import datetime 10 | import threading 11 | import logging.config 12 | import yaml 13 | import subprocess 14 | import logging 15 | import time 16 | 17 | logging.config.fileConfig(os.path.join(get_test_folder(), "logging.conf")) 18 | logger = logging.getLogger('eventproducer_connector_upgrade') 19 | 20 | _config_path = os.path.join(get_test_folder(), 'config.yaml') 21 | with open(_config_path, 'r') as yaml_file: 22 | config = yaml.load(yaml_file) 23 | now = datetime.now() 24 | _time_stamp = str(datetime.timestamp(now)) 25 | _topic = 'kafka_connect_upgrade' 26 | 27 | 28 | def check_events_from_topic(target): 29 | 30 | t_end = time.time() + 100 31 | time.sleep(5) 32 | while time.time() < t_end: 33 | output1 = subprocess.getoutput(" echo $(/usr/local/kafka/bin/kafka-run-class.sh kafka.tools.GetOffsetShell --broker-list 'localhost:9092' --topic kafka_connect_upgrade --time -1 | grep -e ':[[:digit:]]*:' | awk -F ':' '{sum += $3} END {print sum}')") 34 | output2 = subprocess.getoutput("echo $(/usr/local/kafka/bin/kafka-run-class.sh kafka.tools.GetOffsetShell --broker-list 'localhost:9092' --topic kafka_connect_upgrade --time -2 | grep -e ':[[:digit:]]*:' | awk -F ':' '{sum += $3} END {print sum}')") 35 | time.sleep(5) 36 | if (int(output1)-int(output2))==target: 37 | logger.info("Events in the topic :" + str(int(output1)-int(output2))) 38 | break 39 | elif (int(output1)-int(output2))>2000: 40 | logger.info("Events in the topic :" + str(int(output1)-int(output2))) 41 | logger.info("Events in the topic :" + str(int(output1)-int(output2))) 42 | 43 | def generate_kafka_events(num): 44 | # Generate message data 45 | topics = [_topic] 46 | client = KafkaAdminClient(bootstrap_servers=config["kafka_broker_url"], client_id='test') 47 | broker_topics = client.list_topics() 48 | logger.info(broker_topics) 49 | if _topic not in broker_topics: 50 | create_kafka_topics(config, topics) 51 | producer = KafkaProducer(bootstrap_servers=config["kafka_broker_url"], 52 | value_serializer=lambda v: json.dumps(v).encode('utf-8')) 53 | 54 | for i in range(num): 55 | msg = f'timestamp={_time_stamp} count={i+1}\n' 56 | producer.send(_topic, msg) 57 | time.sleep(0.05) 58 | producer.flush() 59 | 60 | if __name__ == '__main__': 61 | 62 | time.sleep(20) 63 | logger.info("Generate Kafka events ...") 64 | thread_gen = threading.Thread(target=generate_kafka_events, args=(1000,), daemon=True) 65 | thread_gen.start() 66 | check_events_from_topic(int(sys.argv[1])) 67 | time.sleep(50) -------------------------------------------------------------------------------- /test/lib/helper.py: -------------------------------------------------------------------------------- 1 | # Common functions used in this project 2 | 3 | import os 4 | 5 | 6 | def get_test_folder(): 7 | """ 8 | returns the test folder 9 | """ 10 | return os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) 11 | 12 | -------------------------------------------------------------------------------- /test/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root,kafka,splunk,test.conftest,test_case,connector_upgrade 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=simpleFormatter 9 | 10 | [logger_root] 11 | level=WARNING 12 | handlers=consoleHandler 13 | 14 | [logger_kafka] 15 | level=INFO 16 | propagate=1 17 | handlers= 18 | qualname=kafka 19 | 20 | [logger_splunk] 21 | level=INFO 22 | propagate=1 23 | handlers= 24 | qualname=splunk 25 | 26 | [logger_test.conftest] 27 | level=INFO 28 | propagate=1 29 | handlers= 30 | qualname=test.conftest 31 | 32 | [logger_test_case] 33 | level=INFO 34 | propagate=1 35 | handlers= 36 | qualname=test_case 37 | 38 | [logger_connector_upgrade] 39 | level=INFO 40 | propagate=1 41 | handlers= 42 | qualname=connector_upgrade 43 | 44 | [handler_consoleHandler] 45 | class=StreamHandler 46 | level=INFO 47 | formatter=simpleFormatter 48 | args=(sys.stdout,) 49 | 50 | [formatter_simpleFormatter] 51 | format=%(asctime)s %(name)s - %(levelname)s - %(message)s 52 | -------------------------------------------------------------------------------- /test/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = -rfps --durations=10 --disable-pytest-warnings --continue-on-collection-errors 3 | 4 | markers = 5 | Critical: mark a test as a critical test case. 6 | -------------------------------------------------------------------------------- /test/requirements.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | requests == 2.28.2 3 | kafka-python 4 | pyyaml == 5.3.1 5 | jinja2 6 | jsonpath -------------------------------------------------------------------------------- /test/testcases/test_crud.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from lib.commonkafka import * 3 | from lib.helper import get_test_folder 4 | from lib.data_gen import generate_connector_content 5 | from lib.commonsplunk import check_events_from_splunk 6 | 7 | logging.config.fileConfig(os.path.join(get_test_folder(), "logging.conf")) 8 | logger = logging.getLogger("test_case") 9 | 10 | 11 | class TestCrud: 12 | 13 | @pytest.fixture(scope='class', autouse=True) 14 | def setup_class(self, setup): 15 | setup['connectors'] = [] 16 | yield 17 | running_connectors = get_running_connector_list(setup) 18 | for connector in setup['connectors']: 19 | if connector in running_connectors: 20 | delete_kafka_connector(setup, connector) 21 | 22 | @pytest.mark.parametrize("test_input,expected", [ 23 | ("test_valid_CRUD_tasks", True) 24 | ]) 25 | def test_valid_crud_tasks(self, setup, test_input, expected): 26 | ''' 27 | Test that valid kafka connect task can be created, updated, paused, resumed, restarted and deleted 28 | ''' 29 | logger.info(f"testing test_valid_CRUD_tasks input={test_input} expected={expected} ") 30 | 31 | # defining a connector definition dict for the parameters to be sent to the API 32 | connector_definition = { 33 | "name": "kafka-connect-splunk", 34 | "config": { 35 | "connector.class": "com.splunk.kafka.connect.SplunkSinkConnector", 36 | "tasks.max": "3", 37 | "topics": setup["kafka_topic"], 38 | "splunk.indexes": setup["splunk_index"], 39 | "splunk.hec.uri": setup["splunk_hec_url"], 40 | "splunk.hec.token": setup["splunk_token"], 41 | "splunk.hec.raw": "false", 42 | "splunk.hec.ack.enabled": "false", 43 | "splunk.hec.ssl.validate.certs": "false" 44 | } 45 | } 46 | 47 | # Validate create task 48 | assert create_kafka_connector(setup, connector_definition, success=expected) == expected 49 | setup['connectors'].append("kafka-connect-splunk") 50 | 51 | # updating the definition to use 5 tasks instead of 3 52 | connector_definition = { 53 | "name": "kafka-connect-splunk", 54 | "config": { 55 | "connector.class": "com.splunk.kafka.connect.SplunkSinkConnector", 56 | "tasks.max": "5", 57 | "topics": setup["kafka_topic"], 58 | "splunk.indexes": setup["splunk_index"], 59 | "splunk.hec.uri": setup["splunk_hec_url"], 60 | "splunk.hec.token": setup["splunk_token"], 61 | "splunk.hec.raw": "false", 62 | "splunk.hec.ack.enabled": "false", 63 | "splunk.hec.ssl.validate.certs": "false" 64 | } 65 | } 66 | 67 | # Validate update task 68 | assert update_kafka_connector(setup, connector_definition) == expected 69 | 70 | # Validate get tasks 71 | tasks = get_kafka_connector_tasks(setup, connector_definition,10) 72 | assert tasks == int(connector_definition["config"]["tasks.max"]) 73 | 74 | # Validate pause task 75 | assert pause_kafka_connector(setup, connector_definition) == expected 76 | 77 | # Validate resume task 78 | assert resume_kafka_connector(setup, connector_definition) == expected 79 | 80 | # Validate restart task 81 | assert restart_kafka_connector(setup, connector_definition) == expected 82 | 83 | # Validate delete task 84 | assert delete_kafka_connector(setup, connector_definition) == expected 85 | 86 | @pytest.mark.parametrize("test_case, config_input, expected", [ 87 | ("test_invalid_tasks_max", {"name": "test_invalid_tasks_max", "tasks_max": "dummy-string"}, False), 88 | ("test_invalid_splunk_hec_raw", {"name": "test_invalid_splunk_hec_raw", "splunk_hec_raw": "disable"}, False), 89 | ("test_invalid_topics", {"name": "test_invalid_topics", "topics": ""}, False) 90 | ]) 91 | def test_invalid_crud_tasks(self, setup, test_case, config_input, expected): 92 | ''' 93 | Test that invalid kafka connect task cannot be created 94 | ''' 95 | logger.info(f"testing {test_case} input={config_input} expected={expected} ") 96 | 97 | connector_definition_invalid_tasks = generate_connector_content(config_input) 98 | setup['connectors'].append(test_case) 99 | 100 | assert create_kafka_connector(setup, connector_definition_invalid_tasks, success=expected) == expected 101 | 102 | @pytest.mark.parametrize("test_case, config_input, expected", [ 103 | ("event_enrichment_non_key_value", {"name": "event_enrichment_non_key_value", 104 | "splunk_hec_json_event_enrichment": "testing-testing non KV"}, False), 105 | ("event_enrichment_non_key_value_3_tasks", {"name": "event_enrichment_non_key_value_3_tasks", 106 | "tasks_max": "3", 107 | "splunk_hec_json_event_enrichment": "testing-testing non KV"}, False), 108 | ("event_enrichment_not_separated_by_commas", {"name": "event_enrichment_not_separated_by_commas", 109 | "splunk_hec_json_event_enrichment": "key1=value1 key2=value2"}, False), 110 | ("event_enrichment_not_separated_by_commas_3_tasks", {"name": "event_enrichment_not_separated_by_commas_3_tasks", 111 | "tasks_max": "3", 112 | "splunk_hec_json_event_enrichment": "key1=value1 key2=value2"}, False) 113 | 114 | ]) 115 | def test_invalid_crud_event_enrichment_tasks(self, setup, test_case, config_input, expected): 116 | ''' 117 | Test that invalid event_enrichment kafka connect task can be created but task status should be FAILED 118 | and no data should enter splunk 119 | ''' 120 | logger.info(f"testing {test_case} input={config_input}") 121 | 122 | connector_definition_invalid_tasks = generate_connector_content(config_input) 123 | setup['connectors'].append(test_case) 124 | 125 | assert create_kafka_connector(setup, connector_definition_invalid_tasks) == expected -------------------------------------------------------------------------------- /test/testcases/test_data_enrichment.py: -------------------------------------------------------------------------------- 1 | from lib.commonkafka import * 2 | from lib.commonsplunk import check_events_from_splunk 3 | from lib.helper import get_test_folder 4 | import pytest 5 | import re 6 | 7 | logging.config.fileConfig(os.path.join(get_test_folder(), "logging.conf")) 8 | logger = logging.getLogger("test_case") 9 | 10 | 11 | class TestDataEnrichment: 12 | 13 | @pytest.mark.parametrize("test_scenario,test_input,expected", [ 14 | ("supplement", "¡¢£¤¥¦§¨©ª«¬­®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖרÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ", 3), 15 | ("extendedA", "ĀāĂ㥹ĆćĈĉĊċČčĎďĐđĒēĔĕĖėĘęĚěĜĝĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀŁłŃńŅņŇňʼnŊŋŌōŎŏŐőŒœŔŕŖŗŘřŚśŜŝŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſ", 3), 16 | ("extendedB", "ƀƁƂƃƄƅƆƇƈƉƊƋƌƍƎƏƐƑƒƓƔƕƖƗƘƙƚƛƜƝƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƻƼƽƾƿǀǁǂǃDŽDždžLJLjljNJNjnjǍǎǏǐǑǒǓǔǕǖǗǘǙǚǛǜǝǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZDzdzǴǵǶǷǸǹǺǻǼǽǾǿȀȁȂȃȄȅȆȇȈȉȊȋȌȍȎȏȐȑȒȓȔȕȖȗȘșȚțȜȝȞȟȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀɁɂɃɄɅɆɇɈɉɊɋɌɍɎɏ", 3), 17 | ("IPAextensions", "ɐɑɒɓɔɕɖɗɘəɚɛɜɝɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀʁʂʃʄʅʆʇʈʉʊʋʌʍʎʏʐʑʒʓʔʕʖʗʘʙʚʛʜʝʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯ", 3), 18 | ("spaceModifier", "ʰʱʲʳʴʵʶʷʸʹʺʻʼʽʾʿˀˁ˂˃˄˅ˆˇˈˉˊˋˌˍˎˏːˑ˒˓˔˕˖˗˘˙˚˛˜˝˞˟ˠˡˢˣˤ˥˦˧˨˩˪˫ˬ˭ˮ˯˰˱˲˳˴˵˶˷˸˹˺˻˼˽˾˿", 3), 19 | # Skipping this test. because it is special character, I cannot get it to work in search 20 | # ("diacriticalMarks", "̀ ́ ̂ ̃ ̄ ̅ ̆ ̇ ̈ ̉ ̊ ̋ ̌ ̍ ̎ ̏ ̐ ̑ ̒ ̓ ̔ ̕ ̖ ̗ ̘ ̙ ̚ ̛ ̜ ̝ ̞ ̟ ̠ ̡ ̢ ̣ ̤ ̥ ̦ ̧ ̨ ̩ ̪ ̫ ̬ ̭ ̮ ̯ ̰ ̱ ̲ ̳ ̴ ̵ ̶ ̷ ̸ ̹ ̺ ̻ ̼ ̽ ̾ ̿ ̀ ́ ͂ ̓ ̈́ ͅ ͆ ͇ ͈ ͉ ͊ ͋ ͌ ͍ ͎ ͏ ͐ ͑ ͒ ͓ ͔ ͕ ͖ ͗ ͘ ͙ ͚ ͛ ͜ ͝ ͞ ͟ ͠ ͡ ͢ ͣ ͤ ͥ ͦ ͧ ͨ ͩ ͪ ͫ ͬ ͭ ͮ ͯ", 1), 21 | ("greek", "ͰͱͲͳʹ͵Ͷͷ͸͹ͺͻͼͽ;Ϳ΀΁΂΃΄΅Ά·ΈΉΊ΋Ό΍ΎΏΐΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡ΢ΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπρςστυφχψωϊϋόύώϏϐϑϒϓϔϕϖϗϘϙϚϛϜϝϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵ϶ϷϸϹϺϻϼϽϾϿ", 3) 22 | ]) 23 | def test_data_enrichment_latin1(self, setup, test_scenario, test_input, expected): 24 | logger.info(f"testing {test_scenario} input={test_input} expected={expected} event(s)") 25 | search_query = f"index={setup['splunk_index']} | search timestamp=\"{setup['timestamp']}\" chars::{test_input}" 26 | logger.info(search_query) 27 | events = check_events_from_splunk(start_time="-1h@h", 28 | url=setup["splunkd_url"], 29 | user=setup["splunk_user"], 30 | query=[f"search {search_query}"], 31 | password=setup["splunk_password"]) 32 | logger.info("Splunk received %s events in the last hour", 33 | len(events)) 34 | assert len(events) == expected 35 | 36 | @pytest.mark.parametrize("test_case, test_input, expected", [ 37 | ("line_breaking_of_raw_data", "kafka:topic_test_break_raw", 1), 38 | ("line_breaking_of_event_data", "kafka:topic_test_break_event", 3) 39 | ]) 40 | def test_line_breaking_configuration(self, setup, test_case, test_input, expected): 41 | logger.info(f"testing {test_case} expected={expected} ") 42 | search_query = f"index={setup['splunk_index']} | search timestamp=\"{setup['timestamp']}\" source::{test_input}" 43 | logger.info(search_query) 44 | events = check_events_from_splunk(start_time="-1h@h", 45 | url=setup["splunkd_url"], 46 | user=setup["splunk_user"], 47 | query=[f"search {search_query}"], 48 | password=setup["splunk_password"]) 49 | logger.info("Splunk received %s events in the last hour", 50 | len(events)) 51 | assert len(events) == expected 52 | if test_case == 'line_breaking_of_raw_data': 53 | event_raw_data = events[0]['_raw'].strip() 54 | # Replace the white spaces since Splunk 8.0.2 and 8.0.3 behave differently 55 | actual_raw_data = re.sub(r'\s+', '', event_raw_data) 56 | expected_data = "{\"timestamp\":\"%s\"}######" \ 57 | "{\"timestamp\":\"%s\"}######" \ 58 | "{\"timestamp\":\"%s\"}######" % ( 59 | setup["timestamp"], setup["timestamp"], setup["timestamp"]) 60 | assert actual_raw_data == expected_data, \ 61 | f'\nActual value: \n{actual_raw_data} \ndoes not match expected value: \n{expected_data}' 62 | 63 | @pytest.mark.parametrize("test_scenario, test_input, expected", [ 64 | ("record_key_extraction", "sourcetype::track_record_key", "{}"), 65 | ]) 66 | def test_record_key_data_enrichment(self, setup, test_scenario, test_input, expected): 67 | logger.info(f"testing {test_scenario} input={test_input} expected={expected} event(s)") 68 | search_query = f"index={setup['splunk_index']} | search {test_input} | fields *" 69 | logger.info(search_query) 70 | events = check_events_from_splunk(start_time="-15m@m", 71 | url=setup["splunkd_url"], 72 | user=setup["splunk_user"], 73 | query=[f"search {search_query}"], 74 | password=setup["splunk_password"]) 75 | logger.info("Splunk received %s events in the last hour", len(events)) 76 | 77 | if(len(events)==1): 78 | assert events[0]["kafka_record_key"] == expected 79 | else: 80 | assert False,"No event found or duplicate events found" 81 | -------------------------------------------------------------------------------- /test/testcases/test_data_onboarding.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from lib.commonsplunk import check_events_from_splunk 3 | from lib.commonkafka import * 4 | from lib.helper import get_test_folder 5 | 6 | logging.config.fileConfig(os.path.join(get_test_folder(), "logging.conf")) 7 | logger = logging.getLogger("test_case") 8 | 9 | 10 | class TestDataOnboarding: 11 | 12 | @pytest.mark.parametrize("test_scenario, test_input, expected", [ 13 | ("raw_endpoint_no_ack", "sourcetype::raw_data-no-ack", 1), 14 | ("raw_endpoint_with_ack", "sourcetype::raw_data-ack", 1), 15 | ("event-endpoint-no-ack", "chars::data-onboarding-event-endpoint-no-ack", 3), 16 | ("event-endpoint-ack", "chars::data-onboarding-event-endpoint-ack", 3), 17 | ]) 18 | def test_data_onboarding(self, setup, test_scenario, test_input, expected): 19 | logger.info(f"testing {test_scenario} input={test_input} expected={expected} event(s)") 20 | search_query = f"index={setup['splunk_index']} | search timestamp=\"{setup['timestamp']}\" {test_input}" 21 | logger.info(search_query) 22 | events = check_events_from_splunk(start_time="-15m@m", 23 | url=setup["splunkd_url"], 24 | user=setup["splunk_user"], 25 | query=[f"search {search_query}"], 26 | password=setup["splunk_password"]) 27 | logger.info("Splunk received %s events in the last hour", len(events)) 28 | assert len(events) == expected 29 | 30 | # @pytest.mark.parametrize("test_scenario, test_input, expected", [ 31 | # ("protobuf", "sourcetype::protobuf", 1), 32 | # ]) 33 | # def test_proto_data_onboarding(self, setup, test_scenario, test_input, expected): 34 | # logger.info(f"testing {test_scenario} input={test_input} expected={expected} event(s)") 35 | # search_query = f"index={setup['splunk_index']} | search {test_input}" 36 | # logger.info(search_query) 37 | # events = check_events_from_splunk(start_time="-15m@m", 38 | # url=setup["splunkd_url"], 39 | # user=setup["splunk_user"], 40 | # query=[f"search {search_query}"], 41 | # password=setup["splunk_password"]) 42 | # logger.info("Splunk received %s events in the last hour", len(events)) 43 | # assert len(events) == expected 44 | 45 | @pytest.mark.parametrize("test_scenario, test_input, expected", [ 46 | ("date_format", "latest=1365209605.000 sourcetype::date_format", "2010-06-13T23:11:52.454+00:00"), 47 | ("epoch_format", "latest=1565209605.000 sourcetype::epoch_format", "2019-04-14T02:40:05.000+00:00"), 48 | ]) 49 | def test_extracted_timestamp_data_onboarding_date_format(self, setup, test_scenario, test_input, expected): 50 | logger.info(f"testing {test_scenario} input={test_input} expected={expected} event(s)") 51 | search_query = f"index={setup['splunk_index']} {test_input}" 52 | logger.info(search_query) 53 | events = check_events_from_splunk(start_time="-15m@m", 54 | url=setup["splunkd_url"], 55 | user=setup["splunk_user"], 56 | query=[f"search {search_query}"], 57 | password=setup["splunk_password"]) 58 | logger.info("Splunk received %s events in the last hour", len(events)) 59 | if(len(events)==1): 60 | assert events[0]["_time"] == expected 61 | else: 62 | assert False,"No event found or duplicate events found" 63 | --------------------------------------------------------------------------------