├── .gitignore
├── .travis.yml
├── LICENSE
├── README.md
├── build.gradle
├── gradle.properties
├── settings.gradle
└── src
├── main
└── scala
│ └── com
│ └── landoop
│ └── kafka
│ └── testing
│ ├── ConnectSamples.scala
│ ├── EmbeddedConnect.scala
│ ├── KCluster.scala
│ ├── PortProvider.scala
│ └── SchemaRegistryService.scala
└── test
├── resources
└── log4j.properties
└── scala
└── com
└── landoop
└── kafka
└── testing
├── BasicTest.scala
└── ClusterTestingCapabilities.scala
/.gitignore:
--------------------------------------------------------------------------------
1 | .iml
2 | *.log
3 | .cache
4 | *.class
5 | .history
6 | .scala_dependencies
7 | dist/
8 | .idea/
9 | build/
10 | target/
11 | .gradle/
12 | lib_managed/
13 | src_managed/
14 | project/boot/
15 | .worksheet.lib/
16 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: scala
2 | scala:
3 | - 2.11.8
4 |
5 | jdk:
6 | - oraclejdk8
7 |
8 | # sudo: true
9 |
10 | # Enable if you want to use gradlew
11 | #before_install:
12 | # - chmod +x gradlew
13 |
14 | # If you omit install, travis will always run gradle assemble
15 | install: echo "skip 'gradle assembly'"
16 |
17 | script:
18 | - gradle clean build
19 |
20 | cache:
21 | directories:
22 | - $HOME/.gradle/caches/
23 | - $HOME/.gradle/wrapper/
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://travis-ci.org/Landoop/kafka-testing)
2 |
3 | [
](http://search.maven.org/#search%7Cga%7C1%7Ckafka-testing_2.11)
4 | [
](http://search.maven.org/#search%7Cga%7C1%7Ckafka-testing_2.12)
5 |
6 | # Kafka Unit Testing
7 |
8 | Allows you to start and stop for unit testing applications that communicate with Kafka `one or more Kafka brokers + a ZooKeeper instance + a Schema Registry instance + a Kafka Connect instance`
9 |
10 | ## Versions
11 |
12 | | kafka-testing | Kafka broker | Zookeeper | Schema Registry | Kafka Connect |
13 | |---------------|---------------------------|-----------| ----------------| --------------|
14 | | 0.1 | kafka_2.11 : 0.10.2.0 | 3.4.6 | 3.2.0 | 3.2.0 |
15 | | 0.2 | kafka_2.11 : 0.10.2.1-cp2 | 3.4.6 | 3.2.2 | 3.2.2 |
16 | | 0.2 | kafka_2.12 : 0.10.2.1 | 3.4.6 | 3.2.2 | 3.2.2 |
17 | | 2.1 | 1.1 | 3.4.6 | 4.1.0 | 1.1.0 |
18 |
19 | ## Maven central
20 |
21 | ```xml
22 |
23 | com.landoop
24 | kafka-testing_2.11
25 | 2.1
26 |
27 | ```
28 |
29 | ```gradle
30 | compile 'com.landoop:kafka-testing_2.11:2.1
31 |
32 | ```
33 |
34 | ```sbt
35 | libraryDependencies += "com.landoop" %% "kafka-testing" % "2.1"
36 | ```
37 |
38 | ### Required additional dependencies
39 | This library requires Apache Kafka test-jars within the scope (requirement of KCluster)
40 | ```sbt
41 | libraryDependencies ++= Seq(
42 | "org.apache.kafka" %% "kafka" % "1.1.0" % Compile classifier "test",
43 | "org.apache.kafka" %% "kafka" % "1.1.0" % Compile,
44 | "org.apache.kafka" % "kafka-clients" % "1.1.0" % Compile classifier "test",
45 | "org.apache.kafka" % "kafka-clients" % "1.1.0" % Compile
46 | )```
47 |
48 | ## Using it
49 |
50 | ```scala
51 | val kafkaCluster: KCluster = new KCluster()
52 |
53 | //get kafka brokers
54 | val brokers = kafkaCluster.BrokersList
55 |
56 | //get schema registry client
57 | val schemaRegistryClient = kafkaCluster.SchemaRegistryService.get.restClient
58 |
59 |
60 | //get schema registry endpoint
61 | val schemaRegistryEndpoint = kafkaCluster.SchemaRegistryService.get.Endpoint
62 |
63 | //get Zookeeper Client
64 | val zkClient = kafkaCluster.ZKClient
65 |
66 | //start connect
67 | kafkaCluster.startEmbeddedConnect(...)
68 | ```
69 |
70 | ## License
71 |
72 | ```
73 | Copyright 2017 Landoop
74 |
75 | Licensed under the Apache License, Version 2.0 (the "License");
76 | you may not use this file except in compliance with the License.
77 | You may obtain a copy of the License at
78 |
79 | http://www.apache.org/licenses/LICENSE-2.0
80 |
81 | Unless required by applicable law or agreed to in writing, software
82 | distributed under the License is distributed on an "AS IS" BASIS,
83 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
84 | See the License for the specific language governing permissions and
85 | limitations under the License.
86 | ```
87 |
--------------------------------------------------------------------------------
/build.gradle:
--------------------------------------------------------------------------------
1 | buildscript {
2 | repositories {
3 | jcenter()
4 | maven {
5 | url 'https://plugins.gradle.org/m2/'
6 | }
7 | }
8 | dependencies {
9 | classpath 'com.github.maiflai:gradle-scalatest:0.21'
10 | classpath 'io.codearte.gradle.nexus:gradle-nexus-staging-plugin:0.11.0'
11 | classpath 'net.researchgate:gradle-release:2.7.0'
12 | }
13 | }
14 |
15 | apply plugin: 'signing'
16 | apply plugin: 'io.codearte.nexus-staging'
17 | apply plugin: 'net.researchgate.release'
18 |
19 |
20 | allprojects {
21 | group = 'com.landoop'
22 | version = version
23 | description = "Kafka Unit Testing: Embeded broker, ZK, SR, Connect services"
24 |
25 | apply plugin: 'scala'
26 | apply plugin: 'maven'
27 | apply plugin: 'com.github.maiflai.scalatest'
28 | sourceCompatibility = 1.8
29 | targetCompatibility = 1.8
30 |
31 | ext {
32 | scalaMajorVersion = '2.11'
33 | scala = '2.11.11'
34 | scalaCheck = '1.13.5'
35 | kafkaVersion = "2.0.0"
36 | confluentVersion = "5.0.0"
37 | scalaTest = '3.0.3'
38 | junitVersion = '4.12'
39 | mockitoVersion = '2.8.47'
40 | avroVersion = '1.8.1'
41 | avro4sVersion = "1.6.2"
42 | scalaLoggingVersion = '3.7.2'
43 | bouncycastleVersion = "1.57"
44 | apacheDirectoryVersion = "2.0.0-M24"
45 | }
46 |
47 | repositories {
48 | mavenLocal()
49 | mavenCentral()
50 | maven { url "http://repo.typesafe.com/typesafe/releases/" }
51 | maven { url "http://packages.confluent.io/maven/" }
52 | }
53 |
54 | configurations {
55 | provided
56 | compile.extendsFrom provided
57 | }
58 |
59 | dependencies {
60 |
61 | compile "org.scala-lang:scala-library:$scala"
62 | compile "com.typesafe.scala-logging:scala-logging_$scalaMajorVersion:$scalaLoggingVersion"
63 | compile("io.confluent:kafka-schema-registry:$confluentVersion")
64 |
65 | compile("io.confluent:kafka-schema-registry-client:$confluentVersion") {
66 | exclude group: "com.fasterxml.jackson.core"
67 | exclude group: "jackson-databind"
68 | }
69 | compile("org.apache.kafka:kafka-clients:$kafkaVersion")
70 | compile("io.confluent:kafka-avro-serializer:$confluentVersion")
71 | compile("org.apache.avro:avro:$avroVersion")
72 |
73 | compile("org.apache.kafka:kafka_$scalaMajorVersion:$kafkaVersion")
74 | compile("org.apache.kafka:kafka_$scalaMajorVersion:$kafkaVersion:test@jar")
75 | compile("org.apache.kafka:kafka-clients:$kafkaVersion:test@jar")
76 | compile("io.confluent:common-config:$confluentVersion")
77 | compile("io.confluent:common-utils:$confluentVersion")
78 | compile("io.confluent:rest-utils:$confluentVersion")
79 | compile("org.apache.kafka:connect-runtime:$kafkaVersion")
80 |
81 | compile("junit:junit:$junitVersion")
82 | compile("org.bouncycastle:bcpkix-jdk15on:$bouncycastleVersion")
83 | compile("org.apache.directory.server:apacheds-core-api:$apacheDirectoryVersion") {
84 | exclude group: "org.apache.directory.api", module: "api-ldap-schema-data"
85 | }
86 | compile("org.apache.directory.server:apacheds-interceptor-kerberos:$apacheDirectoryVersion") {
87 | exclude group: "org.apache.directory.api", module: "api-ldap-schema-data"
88 | }
89 | compile("org.apache.directory.server:apacheds-protocol-shared:$apacheDirectoryVersion") {
90 | exclude group: "org.apache.directory.api", module: "api-ldap-schema-data"
91 | }
92 | compile("org.apache.directory.server:apacheds-protocol-kerberos:$apacheDirectoryVersion") {
93 | exclude group: "org.apache.directory.api", module: "api-ldap-schema-data"
94 | }
95 | compile("org.apache.directory.server:apacheds-protocol-ldap:$apacheDirectoryVersion") {
96 | exclude group: "org.apache.directory.api", module: "api-ldap-schema-data"
97 | }
98 | compile("org.apache.directory.server:apacheds-ldif-partition:$apacheDirectoryVersion") {
99 | exclude group: "org.apache.directory.api", module: "api-ldap-schema-data"
100 | }
101 | compile("org.apache.directory.server:apacheds-mavibot-partition:$apacheDirectoryVersion") {
102 | exclude group: "org.apache.directory.api", module: "api-ldap-schema-data"
103 | }
104 | compile("org.apache.directory.server:apacheds-jdbm-partition:$apacheDirectoryVersion") {
105 | exclude group: "org.apache.directory.api", module: "api-ldap-schema-data"
106 | }
107 | compile("org.apache.directory.server:apacheds-all:$apacheDirectoryVersion") {
108 | exclude group: "org.apache.directory.shared", module: "shared-ldap-schema"
109 | exclude group: "org.apache.directory.api", module: "api-ldap-schema-data"
110 | }
111 | compile("org.apache.directory.server:apacheds-server-integ:$apacheDirectoryVersion") {
112 | exclude group: "org.apache.directory.shared", module: "shared-ldap-schema"
113 | exclude group: "org.apache.directory.api", module: "api-ldap-schema-data"
114 | }
115 | compile("org.apache.directory.server:apacheds-core-integ:$apacheDirectoryVersion") {
116 | exclude group: "org.apache.directory.shared", module: "shared-ldap-schema"
117 | exclude group: "org.apache.directory.api", module: "api-ldap-schema-data"
118 | }
119 |
120 |
121 | testCompile "org.mockito:mockito-core:$mockitoVersion"
122 | testCompile "org.scalacheck:scalacheck_$scalaMajorVersion:$scalaCheck"
123 | testCompile "org.scalatest:scalatest_$scalaMajorVersion:$scalaTest"
124 | testCompile "junit:junit:$junitVersion"
125 | testCompile "org.apache.curator:curator-test:3.1.0"
126 | testCompile 'org.powermock:powermock-module-junit4:1.6.5'
127 | testCompile 'org.pegdown:pegdown:1.1.0'
128 | }
129 |
130 | test {
131 | maxParallelForks = 1
132 | minHeapSize '256m'
133 | maxHeapSize '2048m'
134 | systemProperty 'keystore', projectDir.canonicalPath + "/src/test/resources/stc_keystore.jks"
135 | systemProperty 'truststore', projectDir.canonicalPath + "/src/test/resources/stc_truststore.jks"
136 | }
137 |
138 | task testJar(type: Jar, dependsOn: testClasses) {
139 | baseName = "test-${project.archivesBaseName}"
140 | from sourceSets.test.output
141 | }
142 |
143 | configurations {
144 | tests
145 | }
146 |
147 | task sourcesJar(type: Jar) {
148 | classifier = 'sources'
149 | from sourceSets.main.allSource
150 | }
151 |
152 | task javadocJar(type: Jar) {
153 | classifier = 'javadoc'
154 | from javadoc
155 | }
156 |
157 | task scaladocJar(type: Jar) {
158 | classifier = 'scaladoc'
159 | from '../LICENSE'
160 | from scaladoc
161 | }
162 |
163 | tasks.withType(Tar) {
164 | compression Compression.GZIP
165 | extension = 'tgz'
166 | }
167 |
168 | artifacts {
169 | archives javadocJar, scaladocJar, sourcesJar
170 | }
171 |
172 | task compile(dependsOn: 'compileScala')
173 |
174 | signing {
175 | required { gradle.taskGraph.hasTask("uploadArchives") }
176 | sign configurations.archives
177 | }
178 |
179 | // OSSRH publication
180 | if (project.hasProperty('release')) {
181 | uploadArchives {
182 | repositories {
183 | mavenDeployer {
184 | // POM signature
185 | beforeDeployment { MavenDeployment deployment -> signing.signPom(deployment) }
186 | // Target repository
187 | repository(url: "https://oss.sonatype.org/service/local/staging/deploy/maven2/") {
188 | authentication(userName: ossrhUsername, password: ossrhPassword)
189 | }
190 | pom.project {
191 | name project.name
192 | description project.description
193 | packaging 'jar'
194 | url 'https://github.com/landoop/kafka-testing'
195 |
196 | scm {
197 | connection 'scm:git:https://github.com/landoop/kafka-testing.git'
198 | developerConnection 'scm:git:git@github.com:landoop/kafka-testing.git'
199 | url 'https://github.com/landoop/kafka-testing.git'
200 | }
201 |
202 | licenses {
203 | license {
204 | name 'Apache License 2.0'
205 | url 'http://www.apache.org/licenses/LICENSE-2.0.html'
206 | distribution 'repo'
207 | }
208 | }
209 |
210 | developers {
211 | developer {
212 | id = 'stheppi'
213 | name = 'Stefan Bocutiu'
214 | email = 'stefan@landoop.com'
215 | }
216 | developer {
217 | id = 'Antwnis'
218 | name = 'Antonios Chalkiopoulos'
219 | email = 'antonios@landoop.com'
220 | }
221 | }
222 | }
223 | }
224 | }
225 | }
226 |
227 | nexusStaging {
228 | packageGroup = project.getGroup()
229 | username = ossrhUsername
230 | password = ossrhPassword
231 | }
232 | }
233 | }
234 |
235 |
--------------------------------------------------------------------------------
/gradle.properties:
--------------------------------------------------------------------------------
1 | version=2.1
2 |
3 | ossrhUsername=you
4 | ossrhPassword=me
5 |
--------------------------------------------------------------------------------
/settings.gradle:
--------------------------------------------------------------------------------
1 | rootProject.name = 'kafka-testing_2.11'
2 |
--------------------------------------------------------------------------------
/src/main/scala/com/landoop/kafka/testing/ConnectSamples.scala:
--------------------------------------------------------------------------------
1 | package com.landoop.kafka.testing
2 |
3 | import java.util
4 | import java.util.Properties
5 |
6 | import org.apache.kafka.connect.runtime.distributed.DistributedConfig
7 | import org.apache.kafka.connect.runtime.{ConnectorConfig, WorkerConfig}
8 |
9 | import scala.collection.JavaConverters._
10 |
11 | object ConnectSamples {
12 |
13 | def workerConfig(bootstapServers: String, schemaRegistryUrl: String): util.Map[String, AnyRef] = Map(
14 | DistributedConfig.GROUP_ID_CONFIG -> "testing-group-id",
15 | WorkerConfig.BOOTSTRAP_SERVERS_CONFIG -> bootstapServers,
16 | WorkerConfig.KEY_CONVERTER_CLASS_CONFIG -> "org.apache.kafka.connect.json.JsonConverter",
17 | WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG -> "org.apache.kafka.connect.json.JsonConverter",
18 | WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG -> "com.qubole.streamx.ByteArrayConverter",
19 |
20 | DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG -> "connect-offsets",
21 | DistributedConfig.CONFIG_TOPIC_CONFIG -> "connect-configs",
22 | DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG -> "connect-status",
23 | WorkerConfig.INTERNAL_KEY_CONVERTER_CLASS_CONFIG -> "org.apache.kafka.connect.json.JsonConverter",
24 | WorkerConfig.INTERNAL_VALUE_CONVERTER_CLASS_CONFIG -> "org.apache.kafka.connect.json.JsonConverter",
25 | "schema.registry.url" -> schemaRegistryUrl
26 | ).asInstanceOf[Map[String, AnyRef]].asJava
27 |
28 | val sourceConfig: util.Map[String, AnyRef] = Map(
29 | ConnectorConfig.NAME_CONFIG -> "file-source-connector",
30 | ConnectorConfig.CONNECTOR_CLASS_CONFIG -> "org.apache.kafka.connect.file.FileStreamSourceConnector",
31 | ConnectorConfig.TASKS_MAX_CONFIG -> "1",
32 | "topic" -> "file-topic",
33 | "file" -> "/var/log/*"
34 | ).asInstanceOf[Map[String, AnyRef]].asJava
35 |
36 | def workerProperties(bootstapServers: String, schemaRegistryUrl: String): Properties = {
37 | val props = new Properties()
38 | props.putAll(workerConfig(bootstapServers, schemaRegistryUrl))
39 | props
40 | }
41 |
42 | val sourceProperties: Properties = {
43 | val props = new Properties()
44 | props.putAll(sourceConfig)
45 | props
46 | }
47 |
48 | }
49 |
--------------------------------------------------------------------------------
/src/main/scala/com/landoop/kafka/testing/EmbeddedConnect.scala:
--------------------------------------------------------------------------------
1 | package com.landoop.kafka.testing
2 |
3 | import org.apache.kafka.common.utils.SystemTime
4 | import org.apache.kafka.common.utils.Time
5 | import org.apache.kafka.common.utils.Utils
6 | import org.apache.kafka.connect.runtime.{ConnectorConfig, Herder, Worker}
7 | import org.apache.kafka.connect.runtime.distributed.DistributedConfig
8 | import org.apache.kafka.connect.runtime.distributed.DistributedHerder
9 | import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo
10 | import org.apache.kafka.connect.storage._
11 | import org.apache.kafka.connect.util.FutureCallback
12 | import java.util.Properties
13 | import java.util.UUID
14 | import java.util.concurrent.CountDownLatch
15 | import java.util.concurrent.ExecutionException
16 | import java.util.concurrent.TimeUnit
17 | import java.util.concurrent.TimeoutException
18 | import java.util.concurrent.atomic.AtomicBoolean
19 | import scala.collection.JavaConversions._
20 | import com.typesafe.scalalogging.StrictLogging
21 | import org.apache.kafka.connect.runtime.isolation.Plugins
22 |
23 | /**
24 | * Embedded Kafka Connect server as per KIP-26
25 | */
26 | case class EmbeddedConnect(workerConfig: Properties, connectorConfigs: List[Properties]) extends StrictLogging {
27 |
28 | private val REQUEST_TIMEOUT_MS = 120000
29 | private val startLatch: CountDownLatch = new CountDownLatch(1)
30 | private val shutdown: AtomicBoolean = new AtomicBoolean(false)
31 | private val stopLatch: CountDownLatch = new CountDownLatch(1)
32 |
33 | private var worker: Worker = _
34 | private var herder: DistributedHerder = _
35 |
36 | // ConnectEmbedded - throws Exception
37 | val time: Time = new SystemTime()
38 | val config: DistributedConfig = new DistributedConfig(Utils.propsToStringMap(workerConfig))
39 |
40 | val offsetBackingStore: KafkaOffsetBackingStore = new KafkaOffsetBackingStore()
41 | offsetBackingStore.configure(config)
42 | //not sure if this is going to work but because we don't have advertised url we can get at least a fairly random
43 | val workerId: String = UUID.randomUUID().toString
44 | println("---> " + config.toString)
45 | worker = new Worker(workerId, time, new Plugins(Map.empty[String, String]), config, offsetBackingStore)
46 |
47 | val statusBackingStore: StatusBackingStore = new KafkaStatusBackingStore(time, worker.getInternalValueConverter)
48 | statusBackingStore.configure(config)
49 |
50 | val configBackingStore: ConfigBackingStore = new KafkaConfigBackingStore(worker.getInternalValueConverter, config, worker.configTransformer())
51 |
52 | //advertisedUrl = "" as we don't have the rest server - hopefully this will not break anything
53 | herder = new DistributedHerder(config, time, worker, "KafkaCluster1",statusBackingStore, configBackingStore, "")
54 |
55 | def start(): Unit = {
56 | try {
57 | logger.info("Kafka ConnectEmbedded starting")
58 |
59 | sys.ShutdownHookThread {
60 | logger.info("exiting")
61 | try {
62 | startLatch.await()
63 | EmbeddedConnect.this.stop()
64 | } catch {
65 | case e: InterruptedException => logger.error("Interrupted in shutdown hook while waiting for Kafka Connect startup to finish");
66 | }
67 | }
68 | worker.start()
69 | herder.start()
70 |
71 | logger.info("Kafka ConnectEmbedded started")
72 |
73 | connectorConfigs.foreach { connectorConfig: Properties =>
74 | val callback = new FutureCallback[Herder.Created[ConnectorInfo]]()
75 | val name = connectorConfig.getProperty(ConnectorConfig.NAME_CONFIG)
76 | herder.putConnectorConfig(name, Utils.propsToStringMap(connectorConfig), true, callback)
77 | callback.get(REQUEST_TIMEOUT_MS, TimeUnit.MILLISECONDS)
78 | }
79 |
80 | } catch {
81 | case e: InterruptedException => logger.error("Starting interrupted ", e)
82 | case e: ExecutionException => logger.error("Submitting connector config failed", e.getCause)
83 | case e: TimeoutException => logger.error("Submitting connector config timed out", e)
84 | } finally {
85 | startLatch.countDown()
86 | }
87 | }
88 |
89 | def stop(): Unit = {
90 | try {
91 | val wasShuttingDown = shutdown.getAndSet(true)
92 | if (!wasShuttingDown) {
93 | logger.info("Kafka ConnectEmbedded stopping")
94 | herder.stop()
95 | worker.stop()
96 | logger.info("Kafka ConnectEmbedded stopped")
97 | }
98 | } finally {
99 | stopLatch.countDown()
100 | }
101 | }
102 |
103 | def awaitStop(): Unit = {
104 | try {
105 | stopLatch.await()
106 | } catch {
107 | case e: InterruptedException => logger.error("Interrupted waiting for Kafka Connect to shutdown")
108 | }
109 | }
110 |
111 | }
--------------------------------------------------------------------------------
/src/main/scala/com/landoop/kafka/testing/KCluster.scala:
--------------------------------------------------------------------------------
1 | package com.landoop.kafka.testing
2 |
3 | import java.util.Properties
4 |
5 | import io.confluent.kafka.schemaregistry.avro.AvroCompatibilityLevel
6 | import io.confluent.kafka.schemaregistry.rest.SchemaRegistryConfig
7 | import kafka.admin.{AdminUtils, RackAwareMode}
8 | import kafka.server.{KafkaConfig, KafkaServer}
9 | import kafka.utils.{CoreUtils, TestUtils, ZkUtils}
10 | import kafka.zk.EmbeddedZookeeper
11 | import org.I0Itec.zkclient.ZkClient
12 | import org.apache.kafka.common.security.auth.SecurityProtocol
13 | import org.apache.kafka.common.utils.SystemTime
14 |
15 | import scala.collection.immutable.IndexedSeq
16 |
17 | /**
18 | * Test harness to run against a real, local Kafka cluster and REST proxy. This is essentially
19 | * Kafka's ZookeeperTestHarness and KafkaServerTestHarness traits combined and ported to Java with
20 | * the addition of the REST proxy. Defaults to a 1-ZK, 3-broker, 1 REST proxy cluster.
21 | *
22 | * NOTE: the code makes sure the localhost, 0.0.0.0 are not going through the proxy
23 | */
24 | sealed class KCluster(brokersNumber: Int = 1,
25 | schemaRegistryEnabled: Boolean = true,
26 | avroCompatibilityLevel: AvroCompatibilityLevel = AvroCompatibilityLevel.NONE) extends AutoCloseable {
27 |
28 | private val Zookeeper = new EmbeddedZookeeper
29 | val ZookeeperConnection = s"localhost:${Zookeeper.port}"
30 |
31 | var Connect: EmbeddedConnect = _
32 | var kafkaConnectEnabled: Boolean = false
33 |
34 | private val ZookeeperUtils = ZkUtils.apply(
35 | ZookeeperConnection,
36 | KCluster.ZKSessionTimeout,
37 | KCluster.ZKConnectionTimeout,
38 | setZkAcls()) // true or false doesn't matter because the schema registry Kafka principal is the same as the
39 | // Kafka broker principal, so ACLs won't make any difference. The principals are the same because
40 | // ZooKeeper, Kafka, and the Schema Registry are run in the same process during testing and hence share
41 | // the same JAAS configuration file. Read comments in ASLClusterTestHarness.java for more details.
42 |
43 | val ZKClient: ZkClient = ZookeeperUtils.zkClient
44 |
45 | val BrokersConfig: IndexedSeq[KafkaConfig] = (1 to brokersNumber).map(i => getKafkaConfig(i))
46 |
47 | val Brokers: IndexedSeq[KafkaServer] = BrokersConfig.map(TestUtils.createServer(_, new SystemTime()))
48 |
49 | //val BrokersPort: IndexedSeq[Int] = Brokers.map(_.boundPort(SecurityProtocol.PLAINTEXT))
50 |
51 | val BrokersList: String = TestUtils.getBrokerListStrFromServers(Brokers, getSecurityProtocol)
52 |
53 | val SchemaRegistryService: Option[SchemaRegistryService] = {
54 | if (schemaRegistryEnabled) {
55 | val schemaRegPort = PortProvider.one
56 | val scheamRegService = new SchemaRegistryService(schemaRegPort,
57 | ZookeeperConnection,
58 | KCluster.KAFKASTORE_TOPIC,
59 | avroCompatibilityLevel,
60 | true)
61 |
62 | Some(scheamRegService)
63 | } else {
64 | None
65 | }
66 | }
67 |
68 |
69 | private def setZkAcls() = {
70 | getSecurityProtocol == SecurityProtocol.SASL_PLAINTEXT ||
71 | getSecurityProtocol == SecurityProtocol.SASL_SSL
72 | }
73 |
74 | def createTopic(topicName: String, partitions: Int = 1, replication: Int = 1): Unit = {
75 | AdminUtils.createTopic(ZookeeperUtils, topicName, partitions, replication, new Properties, RackAwareMode.Enforced)
76 | }
77 |
78 | def startEmbeddedConnect(workerConfig: Properties, connectorConfigs: List[Properties]): Unit = {
79 | kafkaConnectEnabled = true
80 | Connect = EmbeddedConnect(workerConfig, connectorConfigs)
81 | Connect.start()
82 | }
83 |
84 | private def buildBrokers() = {
85 | (0 to brokersNumber).map(getKafkaConfig)
86 | .map { config =>
87 | val server = TestUtils.createServer(config, new SystemTime)
88 | (config, server)
89 | }.unzip
90 | }
91 |
92 |
93 | private def injectProperties(props: Properties): Unit = {
94 | props.setProperty("auto.create.topics.enable", "true")
95 | props.setProperty("num.partitions", "1")
96 | /*val folder = new File("kafka.cluster")
97 | if (!folder.exists())
98 | folder.mkdir()
99 |
100 | val logDir = new File("kafka.cluster", UUID.randomUUID().toString)
101 | logDir.mkdir()
102 |
103 | props.setProperty("log.dir", logDir.getAbsolutePath)*/
104 | }
105 |
106 | private def getKafkaConfig(brokerId: Int): KafkaConfig = {
107 | val props: Properties = TestUtils.createBrokerConfig(
108 | brokerId,
109 | ZookeeperConnection,
110 | enableControlledShutdown = false,
111 | enableDeleteTopic = false,
112 | TestUtils.RandomPort,
113 | interBrokerSecurityProtocol = None,
114 | trustStoreFile = None,
115 | KCluster.EMPTY_SASL_PROPERTIES,
116 | enablePlaintext = true,
117 | enableSaslPlaintext = false,
118 | TestUtils.RandomPort,
119 | enableSsl = false,
120 | TestUtils.RandomPort,
121 | enableSaslSsl = false,
122 | TestUtils.RandomPort,
123 | None)
124 | injectProperties(props)
125 | KafkaConfig.fromProps(props)
126 | }
127 |
128 | private def getSecurityProtocol = SecurityProtocol.PLAINTEXT
129 |
130 |
131 | def close(): Unit = {
132 | if (kafkaConnectEnabled) {
133 | Connect.stop()
134 | }
135 | SchemaRegistryService.foreach(_.close())
136 | Brokers.foreach { server =>
137 | server.shutdown
138 | CoreUtils.delete(server.config.logDirs)
139 | }
140 |
141 | ZookeeperUtils.close()
142 | Zookeeper.shutdown()
143 | }
144 | }
145 |
146 |
147 | object KCluster {
148 | val DEFAULT_NUM_BROKERS = 1
149 | val KAFKASTORE_TOPIC = SchemaRegistryConfig.DEFAULT_KAFKASTORE_TOPIC
150 | val EMPTY_SASL_PROPERTIES: Option[Properties] = None
151 |
152 | System.setProperty("http.nonProxyHosts", "localhost|0.0.0.0|127.0.0.1")
153 |
154 | // a larger connection timeout is required for SASL tests
155 | val ZKConnectionTimeout = 30000
156 |
157 | // SASL connections tend to take longer.
158 | val ZKSessionTimeout = 6000
159 |
160 | }
--------------------------------------------------------------------------------
/src/main/scala/com/landoop/kafka/testing/PortProvider.scala:
--------------------------------------------------------------------------------
1 | package com.landoop.kafka.testing
2 |
3 | import java.net.{InetAddress, ServerSocket}
4 |
5 | object PortProvider {
6 | def appy(count: Int): Vector[Int] = {
7 | (1 to count).map { _ =>
8 | val serverSocket = new ServerSocket(0, 0, InetAddress.getLocalHost)
9 | val port = serverSocket.getLocalPort
10 | serverSocket.close()
11 | port
12 | }.toVector
13 | }
14 |
15 | def one: Int = appy(1).head
16 | }
17 |
--------------------------------------------------------------------------------
/src/main/scala/com/landoop/kafka/testing/SchemaRegistryService.scala:
--------------------------------------------------------------------------------
1 | package com.landoop.kafka.testing
2 |
3 | import java.net.{Socket, SocketException}
4 | import java.util.Properties
5 |
6 | import com.typesafe.scalalogging.StrictLogging
7 | import io.confluent.kafka.schemaregistry.avro.AvroCompatibilityLevel
8 | import io.confluent.kafka.schemaregistry.client.rest.RestService
9 | import io.confluent.kafka.schemaregistry.rest.{SchemaRegistryConfig, SchemaRegistryRestApplication}
10 | import io.confluent.kafka.schemaregistry.storage.{SchemaRegistry, SchemaRegistryIdentity}
11 | import org.eclipse.jetty.server.Server
12 |
13 | class SchemaRegistryService(val port: Int,
14 | val zookeeperConnection: String,
15 | val kafkaTopic: String,
16 | val avroCompatibilityLevel: AvroCompatibilityLevel,
17 | val masterEligibility: Boolean) extends StrictLogging {
18 |
19 | private val app = new SchemaRegistryRestApplication({
20 | val prop = new Properties
21 | prop.setProperty("port", port.asInstanceOf[Integer].toString)
22 | prop.setProperty(SchemaRegistryConfig.KAFKASTORE_CONNECTION_URL_CONFIG, zookeeperConnection)
23 | prop.put(SchemaRegistryConfig.KAFKASTORE_TOPIC_CONFIG, kafkaTopic)
24 | prop.put(SchemaRegistryConfig.COMPATIBILITY_CONFIG, avroCompatibilityLevel.toString)
25 | prop.put(SchemaRegistryConfig.MASTER_ELIGIBILITY, masterEligibility.asInstanceOf[AnyRef])
26 | prop
27 | })
28 |
29 | val restServer = startServer(port)
30 |
31 | var Endpoint: String = getEndpoint(restServer)
32 |
33 | val restClient = new RestService(Endpoint)
34 |
35 | def startServer(port: Int, retries: Int = 5): Option[Server] = {
36 | var retry = retries > 0
37 | var restServer: Option[Server] = None
38 | if (retry) {
39 | if (isPortInUse(port)) {
40 | logger.info(s"Schema Registry Port $port is already in use")
41 | Thread.sleep(2000)
42 | startServer(port, retries - 1)
43 | } else {
44 | restServer = Some(app.createServer)
45 | restServer.get.start()
46 | }
47 | }
48 | restServer
49 | }
50 |
51 | def getEndpoint(restServer: Option[Server]): String = {
52 | if (restServer.isDefined) {
53 | val uri = restServer.get.getURI.toString
54 | if (uri.endsWith("/")) {
55 | uri.substring(0, uri.length - 1)
56 | } else {
57 | uri
58 | }
59 | } else ""
60 | }
61 |
62 | private def isPortInUse(port: Integer): Boolean = try {
63 | new Socket("127.0.0.1", port).close()
64 | true
65 | }
66 | catch {
67 | case e: SocketException => false
68 | }
69 |
70 | def close() {
71 | if (restServer.isDefined) {
72 | restServer.get.stop()
73 | restServer.get.join()
74 | }
75 | }
76 |
77 | def isMaster: Boolean = app.schemaRegistry.isMaster
78 |
79 | def setMaster(schemaRegistryIdentity: SchemaRegistryIdentity): Unit =
80 | app.schemaRegistry.setMaster(schemaRegistryIdentity)
81 |
82 | def myIdentity: SchemaRegistryIdentity = app.schemaRegistry.myIdentity
83 |
84 | def masterIdentity: SchemaRegistryIdentity = app.schemaRegistry.masterIdentity
85 |
86 | def schemaRegistry: SchemaRegistry = app.schemaRegistry
87 | }
88 |
--------------------------------------------------------------------------------
/src/test/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright 2017 Datamountaineer.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | #
16 |
17 | # suppress inspection "UnusedProperty" for whole file
18 | log4j.rootLogger=INFO,stdout
19 |
20 | #stdout
21 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
22 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
23 | log4j.appender.stdout.layout.conversionPattern=%d{ISO8601} %-5p [%t] [%c] [%M:%L] %m%n
24 |
--------------------------------------------------------------------------------
/src/test/scala/com/landoop/kafka/testing/BasicTest.scala:
--------------------------------------------------------------------------------
1 | package com.landoop.kafka.testing
2 |
3 | import org.apache.avro.Schema
4 | import org.apache.avro.generic.GenericData
5 | import org.apache.kafka.clients.producer.ProducerRecord
6 |
7 | class BasicTest extends ClusterTestingCapabilities {
8 |
9 | private val createAvroRecord = {
10 | val userSchema = "{\"namespace\": \"example.avro\", \"type\": \"record\", " + "\"name\": \"User\"," + "\"fields\": [{\"name\": \"name\", \"type\": \"string\"}]}"
11 | val parser = new Schema.Parser
12 | val schema = parser.parse(userSchema)
13 | val avroRecord = new GenericData.Record(schema)
14 | avroRecord.put("name", "testUser")
15 | avroRecord
16 | }
17 |
18 | "KCluster" should {
19 | "start up and be able to handle avro records being sent " in {
20 | val topic = "testAvro" + System.currentTimeMillis()
21 | val avroRecord = createAvroRecord
22 | val objects = Array[AnyRef](avroRecord)
23 | val producerProps = stringAvroProducerProps
24 | val producer = createProducer[String, Any](producerProps)
25 |
26 | for (o <- objects) {
27 | val message = new ProducerRecord[String, Any](topic, o)
28 | producer.send(message)
29 | }
30 | val consumerProps = stringAvroConsumerProps()
31 | val consumer = createStringAvroConsumer(consumerProps)
32 | val records = consumeStringAvro(consumer, topic, objects.length)
33 | objects.toSeq shouldBe records
34 | }
35 |
36 | "write and read avro records" in {
37 | val topic = "testAvro" + System.currentTimeMillis()
38 | val avroRecord = createAvroRecord
39 | val objects = Array[Any](avroRecord, true, 130, 345L, 1.23f, 2.34d, "abc", "def".getBytes)
40 | val producerProps = stringAvroProducerProps
41 | val producer = createProducer[String, Any](producerProps)
42 | for (o <- objects) {
43 | producer.send(new ProducerRecord[String, Any](topic, o))
44 | }
45 | val consumerProps = stringAvroConsumerProps("group" + System.currentTimeMillis())
46 | val consumer = createStringAvroConsumer(consumerProps)
47 | val records = consumeStringAvro(consumer, topic, objects.length)
48 | objects.deep shouldBe records.toArray.deep
49 | }
50 | }
51 |
52 | }
53 |
--------------------------------------------------------------------------------
/src/test/scala/com/landoop/kafka/testing/ClusterTestingCapabilities.scala:
--------------------------------------------------------------------------------
1 | package com.landoop.kafka.testing
2 |
3 | import java.lang.management.ManagementFactory
4 | import java.net.{Socket, SocketException}
5 | import java.rmi.registry.{LocateRegistry, Registry}
6 | import java.rmi.server.UnicastRemoteObject
7 | import java.util
8 | import java.util.Properties
9 | import javax.management.remote.{JMXConnectorServer, JMXConnectorServerFactory, JMXServiceURL}
10 |
11 | import com.typesafe.scalalogging.StrictLogging
12 | import io.confluent.kafka.serializers.{KafkaAvroDeserializer, KafkaAvroSerializer}
13 | import org.apache.kafka.clients.consumer.{Consumer, ConsumerRecord, KafkaConsumer}
14 | import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig}
15 | import org.apache.kafka.common.serialization._
16 | import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpec}
17 |
18 | import scala.collection.JavaConversions._
19 | import scala.collection.JavaConverters._
20 | import scala.collection.mutable
21 |
22 | trait ClusterTestingCapabilities extends WordSpec with Matchers with BeforeAndAfterAll with StrictLogging {
23 |
24 | System.setProperty("http.nonProxyHosts", "localhost|0.0.0.0|127.0.0.1")
25 |
26 | val SCHEMA_REGISTRY_URL = "schema.registry.url"
27 |
28 | var registry: Registry = _
29 | val kafkaCluster: KCluster = new KCluster()
30 |
31 | var jmxConnectorServer: Option[JMXConnectorServer] = None
32 |
33 | def startEmbeddedConnect(workerConfig: Properties, connectorConfigs: List[Properties]): Unit = {
34 | kafkaCluster.startEmbeddedConnect(workerConfig, connectorConfigs)
35 | }
36 |
37 | def isPortInUse(port: Integer): Boolean = {
38 | try {
39 | new Socket("127.0.0.1", port).close()
40 | true
41 | }
42 | catch {
43 | case e: SocketException => false
44 | }
45 | }
46 |
47 | protected override def afterAll(): Unit = {
48 | logger.info("Cleaning embedded cluster. Server = " + jmxConnectorServer)
49 | try {
50 | if (jmxConnectorServer.isDefined) {
51 | jmxConnectorServer.get.stop()
52 | }
53 | if (Option(registry).isDefined) {
54 | registry.list().foreach { s =>
55 | registry.unbind(s)
56 | }
57 | UnicastRemoteObject.unexportObject(registry, true)
58 | }
59 | kafkaCluster.close()
60 | } catch {
61 | case e: Throwable =>
62 | logger.error(
63 | s"""|
64 | | ERROR in closing Embedded Kafka cluster $e
65 | """.stripMargin)
66 | }
67 | }
68 |
69 | /**
70 | * Run this method to enable JMX statistics across all embedded apps
71 | *
72 | * @param port - The JMX port to enable RMI stats
73 | */
74 | def loadJMXAgent(port: Int, retries: Int = 10): Unit = {
75 | var retry = retries > 0
76 | if (retry) {
77 | if (isPortInUse(port)) {
78 | logger.info(s"JMX Port $port already in use")
79 | Thread.sleep(2000)
80 | loadJMXAgent(port, retries - 1)
81 | } else {
82 | logger.info(s"Starting JMX Port of embedded Kafka system $port")
83 | registry = LocateRegistry.createRegistry(port)
84 | val env = mutable.Map[String, String]()
85 | env += ("com.sun.management.jmxremote.authenticate" -> "false")
86 | env += ("com.sun.management.jmxremote.ssl" -> "false")
87 | val jmxServiceURL = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://:" + port + "/jmxrmi")
88 | val mbeanServer = ManagementFactory.getPlatformMBeanServer
89 | jmxConnectorServer = Some(JMXConnectorServerFactory.newJMXConnectorServer(jmxServiceURL, env.asJava, mbeanServer))
90 | jmxConnectorServer.get.start()
91 | retry = false
92 | Thread.sleep(2000)
93 | }
94 | }
95 | if (retries == 0) {
96 | logger.error(
97 | """|
98 | | Could not load JMX agent
99 | """.stripMargin)
100 | }
101 | }
102 |
103 | /** Helpful Producers **/
104 | def avroAvroProducerProps: Properties = {
105 | val props = new Properties
106 | props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[KafkaAvroSerializer])
107 | props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[KafkaAvroSerializer])
108 | props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaCluster.BrokersList)
109 | props.put(SCHEMA_REGISTRY_URL, kafkaCluster.SchemaRegistryService.get.Endpoint)
110 | props
111 | }
112 |
113 | def intAvroProducerProps: Properties = {
114 | val props = new Properties
115 | props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[IntegerSerializer])
116 | props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[KafkaAvroSerializer])
117 | props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaCluster.BrokersList)
118 | props.put(SCHEMA_REGISTRY_URL, kafkaCluster.SchemaRegistryService.get.Endpoint)
119 | props
120 | }
121 |
122 | def getAvroProducerProps[T <: Serializer[_]](ser: Class[T]): Properties = {
123 | val props = new Properties
124 | props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ser)
125 | props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[KafkaAvroSerializer])
126 | props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaCluster.BrokersList)
127 | props.put(SCHEMA_REGISTRY_URL, kafkaCluster.SchemaRegistryService.get.Endpoint)
128 | props
129 | }
130 |
131 | def stringAvroProducerProps: Properties = {
132 | val props = new Properties
133 | props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])
134 | props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[KafkaAvroSerializer])
135 | props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaCluster.BrokersList)
136 | props.put(SCHEMA_REGISTRY_URL, kafkaCluster.SchemaRegistryService.get.Endpoint)
137 | props
138 | }
139 |
140 |
141 | def avroStringProducerProps: Properties = {
142 | val props = new Properties
143 | props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])
144 | props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[KafkaAvroSerializer])
145 | props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaCluster.BrokersList)
146 | props.put(SCHEMA_REGISTRY_URL, kafkaCluster.SchemaRegistryService.get.Endpoint)
147 | props
148 | }
149 |
150 | def stringstringProducerProps: Properties = {
151 | val props = new Properties
152 | props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])
153 | props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])
154 | props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaCluster.BrokersList)
155 | props.put(SCHEMA_REGISTRY_URL, kafkaCluster.SchemaRegistryService.get.Endpoint)
156 | props
157 | }
158 |
159 | def createProducer[K, T](props: Properties): KafkaProducer[K, T] = new KafkaProducer[K, T](props)
160 |
161 | /** Helpful Consumers **/
162 | def stringAvroConsumerProps(group: String = "stringAvroGroup"): Properties = {
163 | val props = new Properties
164 | props.put("bootstrap.servers", kafkaCluster.BrokersList)
165 | props.put("group.id", group)
166 | props.put("session.timeout.ms", "6000") // default value of group.min.session.timeout.ms.
167 | props.put("heartbeat.interval.ms", "2000")
168 | props.put("enable.auto.commit", "false")
169 | props.put("auto.offset.reset", "earliest")
170 | props.put("key.deserializer", classOf[StringDeserializer])
171 | props.put("value.deserializer", classOf[KafkaAvroDeserializer])
172 | props.put(SCHEMA_REGISTRY_URL, kafkaCluster.SchemaRegistryService.get.Endpoint)
173 | props
174 | }
175 |
176 | def stringstringConsumerProps(group: String = "stringstringGroup"): Properties = {
177 | val props = new Properties
178 | props.put("bootstrap.servers", kafkaCluster.BrokersList)
179 | props.put("group.id", group)
180 | props.put("session.timeout.ms", "6000") // default value of group.min.session.timeout.ms.
181 | props.put("heartbeat.interval.ms", "2000")
182 | props.put("enable.auto.commit", "false")
183 | props.put("auto.offset.reset", "earliest")
184 | props.put("key.deserializer", classOf[StringDeserializer])
185 | props.put("value.deserializer", classOf[StringDeserializer])
186 | props
187 | }
188 |
189 | def bytesbytesConsumerProps(group: String = "bytes2bytesGroup"): Properties = {
190 | val props = new Properties
191 | props.put("bootstrap.servers", kafkaCluster.BrokersList)
192 | props.put("group.id", group)
193 | props.put("session.timeout.ms", "6000") // default value of group.min.session.timeout.ms.
194 | props.put("heartbeat.interval.ms", "2000")
195 | props.put("auto.commit.interval.ms", "1000")
196 | props.put("auto.offset.reset", "earliest")
197 | props.put("key.deserializer", classOf[BytesDeserializer])
198 | props.put("value.deserializer", classOf[BytesDeserializer])
199 | props
200 | }
201 |
202 | def createStringAvroConsumer(props: Properties): KafkaConsumer[String, AnyRef] = {
203 | new KafkaConsumer[String, AnyRef](props)
204 | }
205 |
206 | /** Consume **/
207 | def consumeStringAvro(consumer: Consumer[String, AnyRef], topic: String, numMessages: Int): Seq[AnyRef] = {
208 |
209 | consumer.subscribe(util.Arrays.asList(topic))
210 |
211 | def accum(records: Seq[AnyRef]): Seq[AnyRef] = {
212 | if (records.size < numMessages) {
213 | val consumedRecords = consumer.poll(1000)
214 | accum(consumedRecords.foldLeft(records) { case (acc, r) =>
215 | acc :+ r.value()
216 | })
217 | } else {
218 | consumer.close()
219 | records
220 | }
221 | }
222 |
223 | accum(Vector.empty)
224 | }
225 |
226 | def consumeRecords[K, V](consumer: Consumer[K, V], topic: String): Iterator[ConsumerRecord[K, V]] = {
227 | consumer.subscribe(util.Arrays.asList(topic))
228 | val result = Iterator.continually {
229 | consumer.poll(1000)
230 | }.flatten
231 | result
232 | }
233 |
234 | }
235 |
--------------------------------------------------------------------------------