├── project
├── build.properties
├── plugins.sbt
└── Build.scala
├── version.sbt
├── .travis.yml
├── .gitignore
├── src
├── main
│ ├── resources
│ │ └── logback.xml
│ ├── scala-2.11
│ │ └── com
│ │ │ └── tuplejump
│ │ │ └── embedded
│ │ │ └── kafka
│ │ │ └── Logging.scala
│ ├── scala-2.10
│ │ └── com
│ │ │ └── tuplejump
│ │ │ └── embedded
│ │ │ └── kafka
│ │ │ └── Logging.scala
│ └── scala
│ │ └── com
│ │ └── tuplejump
│ │ └── embedded
│ │ └── kafka
│ │ ├── Events.scala
│ │ ├── package.scala
│ │ ├── Actors.scala
│ │ ├── Assertions.scala
│ │ ├── EmbeddedIO.scala
│ │ ├── EmbeddedZookeeper.scala
│ │ ├── Settings.scala
│ │ └── EmbeddedKafka.scala
├── test
│ └── scala
│ │ └── com
│ │ └── tuplejump
│ │ └── embedded
│ │ └── kafka
│ │ ├── AbstractSpec.scala
│ │ └── SimpleConsumer.scala
└── it
│ └── scala
│ └── com
│ └── tuplejump
│ └── embedded
│ └── kafka
│ └── InitialSpec.scala
├── README.md
├── scalastyle-config.xml
└── LICENSE
/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version = 0.13.11
--------------------------------------------------------------------------------
/version.sbt:
--------------------------------------------------------------------------------
1 |
2 | version in ThisBuild := "0.0.6"
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: scala
2 | jdk:
3 | - oraclejdk8
4 | sudo: false
5 | scala:
6 | - 2.10.5
7 | - 2.11.7
8 |
9 | script:
10 | - "sbt ++$TRAVIS_SCALA_VERSION -Dtravis=true test"
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.class
2 | .cache/
3 |
4 | # SBT
5 | lib_managed
6 | .history
7 | .lib/
8 | dist/*
9 | lib_managed/
10 | src_managed/
11 | target
12 | **/target/
13 | lib_managed/
14 | src_managed/
15 | project/project
16 | project/boot/
17 | project/plugins/project/
18 |
19 | # Mac
20 | .DS_Store
21 |
22 | # Log files
23 | /logs
24 | *.log
25 | checkpoint
26 |
27 | # IntelliJ
28 | .idea
29 | .idea_modules
30 | *.iml
31 |
32 | # Worksheets (Eclipse or IntelliJ)
33 | *.sc
34 |
35 | # Eclipse
36 | .cache*
37 | .classpath
38 | .project
39 | .scala_dependencies
40 | .settings
41 | .target
42 | .worksheet
--------------------------------------------------------------------------------
/src/main/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | [%level] [%date{ISO8601}] [%logger]: %msg%n
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/src/main/scala-2.11/com/tuplejump/embedded/kafka/Logging.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Tuplejump
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.tuplejump.embedded.kafka
18 |
19 | import com.typesafe.scalalogging.StrictLogging
20 |
21 | trait Logging extends StrictLogging
22 |
--------------------------------------------------------------------------------
/src/main/scala-2.10/com/tuplejump/embedded/kafka/Logging.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Tuplejump
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.tuplejump.embedded.kafka
18 |
19 | import org.slf4j.LoggerFactory
20 |
21 | trait Logging {
22 |
23 | protected val logger = LoggerFactory.getLogger(getClass.getName)
24 |
25 | }
26 |
--------------------------------------------------------------------------------
/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | addSbtPlugin("com.github.gseitz" % "sbt-release" % "0.8.5")
2 | addSbtPlugin("com.eed3si9n" % "sbt-buildinfo" % "0.5.0")
3 | addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "0.1.8")
4 | addSbtPlugin("org.brianmckenna" % "sbt-wartremover" % "0.14")
5 | addSbtPlugin("org.scoverage" % "sbt-scoverage" % "1.3.3")
6 | addSbtPlugin("org.scalastyle" %% "scalastyle-sbt-plugin" % "0.6.0")
7 | addSbtPlugin("de.heikoseeberger" % "sbt-header" % "1.5.0")
8 | addSbtPlugin("com.sksamuel.sbt-versions" % "sbt-versions" % "0.2.0")
9 | addSbtPlugin("org.xerial.sbt" % "sbt-sonatype" % "1.1")
10 | addSbtPlugin("com.jsuereth" % "sbt-pgp" % "1.0.0")
11 | //addSbtPlugin("com.scalapenos" % "sbt-prompt" % "0.2.1")
12 | //addSbtPlugin("com.typesafe.sbt" % "sbt-git" % "0.8.5")
--------------------------------------------------------------------------------
/src/test/scala/com/tuplejump/embedded/kafka/AbstractSpec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Tuplejump
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.tuplejump.embedded.kafka
18 |
19 | import org.scalatest._
20 |
21 | /** Basic unit test abstraction. */
22 | trait AbstractSpec extends WordSpecLike with Matchers with BeforeAndAfter with BeforeAndAfterAll
23 |
24 | trait FunctionalSpec extends Suite with FreeSpecLike with Matchers with BeforeAndAfter with BeforeAndAfterAll
25 |
--------------------------------------------------------------------------------
/src/main/scala/com/tuplejump/embedded/kafka/Events.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Tuplejump
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.tuplejump.embedded.kafka
18 |
19 | object Events {
20 | sealed trait Task extends Serializable
21 | case object PublishTask extends Task
22 |
23 | sealed trait Bufferable[A] extends Serializable {
24 | def data: Iterable[A]
25 | }
26 | final case class SampleEvent(data: AnyRef*)
27 | final case class PublishTo[A](topic: String, data: A*) extends Bufferable[A]
28 | }
29 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Embedded Kafka[](http://travis-ci.org/tuplejump/embedded-kafka)
2 | Embedded Kafka for demos, testing and quick prototyping. Much is taken right from the Apache Spark Streaming Kafka test files.
3 |
4 | ## Features
5 |
6 | - Quick and easy prototyping, testing
7 | - Safe demos - not reliant on network
8 | - Compatible with Apache Kafka: 0.9.0.0, 0.9.0.1
9 | - Compatible with Scala 2.10 and 2.11
10 | - Embedded Zookeeper which starts automatically when starting Kafka
11 | - Simple Kafka consumer
12 |
13 | ## Roadmap
14 |
15 | - Improved and updated consumers
16 | - Validation
17 | - Additional and improved configuration
18 | - Documentation
19 | - And much more
20 |
21 | ## Scala Version
22 | This project uses Scala 2.11 by default. To build against Scala 2.10 vs 2.11 run
23 |
24 | sbt -Dscala.version=2.10.6
25 | sbt -Dscala.version=2.10.6 test
26 | sbt -Dscala.version=2.10.6 publish-local
27 |
28 | ### Cross Build
29 | Cross build both Scala versions by starting sbt then running these or other SBT tasks:
30 |
31 | + publish-local
32 | + test
33 |
34 |
--------------------------------------------------------------------------------
/src/main/scala/com/tuplejump/embedded/kafka/package.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Tuplejump
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.tuplejump.embedded
18 |
19 | package object kafka {
20 |
21 |
22 | final val DefaultZkPort: Int = 2181
23 |
24 | final val DefaultKafkaPort: Int = 9092
25 |
26 | final val DefaultHostString: String = "127.0.0.1" //DefaultHost.getHostAddress
27 |
28 | final val DefaultZookeeperConnect: String = s"$DefaultHostString:$DefaultZkPort"
29 |
30 | final val DefaultKafkaConnect: String = s"$DefaultHostString:$DefaultKafkaPort"
31 |
32 | final val DefaultGroupId: String = s"consumer-${scala.util.Random.nextInt(10000)}"
33 |
34 | }
35 |
--------------------------------------------------------------------------------
/src/main/scala/com/tuplejump/embedded/kafka/Actors.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Tuplejump
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.tuplejump.embedded.kafka
18 |
19 | import scala.reflect.ClassTag
20 | import scala.util.Try
21 | import akka.actor.Actor
22 | import akka.actor.Props
23 | import kafka.producer.{ProducerConfig, KeyedMessage, Producer}
24 |
25 | // only string so far
26 | //TODO error handling
27 | class KafkaPublisher[K,V : ClassTag](producer: Producer[K,V]) extends Actor {
28 |
29 | override def postStop(): Unit = Try(producer.close())
30 |
31 | def receive: Actor.Receive = {
32 | case e: Events.PublishTo[V] => publish(e)
33 | }
34 |
35 | private def publish(e: Events.PublishTo[V]): Unit =
36 | producer.send(e.data.toArray.map { new KeyedMessage[K,V](e.topic, _) }: _*)
37 |
38 | }
39 |
40 |
41 | object KafkaPublisher {
42 |
43 | def props(producerConfig: ProducerConfig): Props = {
44 | val producer = new Producer[String,String](producerConfig)
45 | Props(new KafkaPublisher(producer))
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/src/main/scala/com/tuplejump/embedded/kafka/Assertions.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Tuplejump
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.tuplejump.embedded.kafka
18 |
19 | import java.util.concurrent.TimeoutException
20 |
21 | import scala.annotation.tailrec
22 | import scala.util.control.NonFatal
23 |
24 | /**
25 | * Simple helper assertions.
26 | */
27 | trait Assertions {
28 |
29 | def eventually[T](timeout: Long, interval: Long)(func: => T): T = {
30 | def makeAttempt(): Either[Throwable, T] = {
31 | try Right(func) catch {
32 | case NonFatal(e) => Left(e)
33 | }
34 | }
35 |
36 | val startTime = System.currentTimeMillis()
37 | @tailrec
38 | def tryAgain(attempt: Int): T = {
39 | makeAttempt() match {
40 | case Right(result) => result
41 | case Left(e) =>
42 | val duration = System.currentTimeMillis() - startTime
43 | if (duration < timeout) {
44 | Thread.sleep(interval)
45 | } else {
46 | throw new TimeoutException(e.getMessage)
47 | }
48 |
49 | tryAgain(attempt + 1)
50 | }
51 | }
52 |
53 | tryAgain(1)
54 | }
55 |
56 | }
57 |
--------------------------------------------------------------------------------
/src/test/scala/com/tuplejump/embedded/kafka/SimpleConsumer.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Tuplejump
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.tuplejump.embedded.kafka
18 |
19 | import java.util.Properties
20 | import java.util.concurrent.{CountDownLatch, Executors}
21 |
22 | import scala.util.Try
23 | import kafka.serializer.StringDecoder
24 | import kafka.consumer.{ Consumer, ConsumerConfig }
25 |
26 | /**
27 | * Very simple consumer of a single Kafka topic.
28 | * TODO this is the old consumer. Update to new consumer.
29 | */
30 | class SimpleConsumer(
31 | val latch: CountDownLatch,
32 | consumerConfig: Map[String, String],
33 | topic: String,
34 | groupId: String,
35 | partitions: Int,
36 | numThreads: Int) {
37 |
38 | val connector = Consumer.create(createConsumerConfig)
39 |
40 | val streams = connector
41 | .createMessageStreams(Map(topic -> partitions), new StringDecoder(), new StringDecoder())
42 | .get(topic)
43 |
44 | val executor = Executors.newFixedThreadPool(numThreads)
45 |
46 | for (stream <- streams) {
47 | executor.submit(new Runnable() {
48 | def run(): Unit = {
49 | for (s <- stream) {
50 | while (s.iterator.hasNext) {
51 | latch.countDown()
52 | }
53 | }
54 | }
55 | })
56 | }
57 |
58 | private def createConsumerConfig: ConsumerConfig = {
59 | import scala.collection.JavaConverters._
60 | val props = new Properties()
61 | props.putAll(consumerConfig.asJava)
62 | new ConsumerConfig(props)
63 | }
64 |
65 | def shutdown(): Unit = Try {
66 | connector.shutdown()
67 | executor.shutdown()
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/src/main/scala/com/tuplejump/embedded/kafka/EmbeddedIO.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Tuplejump
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.tuplejump.embedded.kafka
18 |
19 | import java.io.{ File => JFile }
20 |
21 | import scala.util.Try
22 | import org.apache.commons.io.FileUtils
23 |
24 | object EmbeddedIO extends Logging {
25 |
26 | private val shutdownDeletePaths = new scala.collection.mutable.HashSet[String]()
27 |
28 | val logsDir = new JFile(".", "logs")
29 | dirSetup(new JFile(logsDir.getAbsolutePath))
30 |
31 | /** Creates tmp dirs that automatically get deleted on shutdown. */
32 | def createTempDir(tmpName: String): JFile =
33 | dirSetup(new JFile(logsDir, tmpName))
34 |
35 | private def dirSetup(dir: JFile): JFile = {
36 | if (logsDir.exists()) deleteRecursively(logsDir)
37 | dir.mkdir
38 |
39 | logger.info(s"Created dir ${dir.getAbsolutePath.replace("./", "")}")
40 |
41 | registerShutdownDeleteDir(dir)
42 |
43 | sys.runtime.addShutdownHook(new Thread("delete temp dir " + dir) {
44 | override def run(): Unit = {
45 | if (!hasRootAsShutdownDeleteDir(dir)) deleteRecursively(dir)
46 | }
47 | })
48 | dir
49 | }
50 |
51 | protected def registerShutdownDeleteDir(file: JFile) {
52 | shutdownDeletePaths.synchronized {
53 | shutdownDeletePaths += file.getAbsolutePath
54 | }
55 | }
56 |
57 | private def hasRootAsShutdownDeleteDir(file: JFile): Boolean = {
58 | val absolutePath = file.getAbsolutePath
59 | shutdownDeletePaths.synchronized {
60 | shutdownDeletePaths.exists { path =>
61 | !absolutePath.equals(path) && absolutePath.startsWith(path)
62 | }
63 | }
64 | }
65 |
66 | protected def deleteRecursively(delete: JFile): Unit =
67 | for {
68 | file <- Option(delete)
69 | } Try(FileUtils.deleteDirectory(file))
70 | }
71 |
--------------------------------------------------------------------------------
/src/it/scala/com/tuplejump/embedded/kafka/InitialSpec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Tuplejump
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.tuplejump.embedded.kafka
18 |
19 | import java.util.concurrent.{TimeUnit, CountDownLatch}
20 |
21 | import org.apache.kafka.common.serialization.StringDeserializer
22 | import org.scalatest.concurrent.Eventually
23 | import org.scalatest.concurrent.PatienceConfiguration.Timeout
24 | import org.scalatest.time.{Millis, Span}
25 |
26 | class InitialSpec extends AbstractSpec with Eventually with Logging {
27 |
28 | private val timeout = Timeout(Span(10000, Millis))
29 |
30 | "Initially, EmbeddedKafka" must {
31 | val kafka = new EmbeddedKafka()
32 | val topic = "test"
33 | val total = 1000
34 | val latch = new CountDownLatch(total)
35 |
36 | "start embedded zookeeper and embedded kafka" in {
37 | kafka.isRunning should be (false)
38 | kafka.start()
39 | eventually(timeout)(kafka.isRunning)
40 | }
41 | "create a topic" in {
42 | kafka.createTopic(topic, 1, 1)
43 | }
44 | "publish messages to the embedded kafka instance" in {
45 | val config = kafka.consumerConfig(
46 | group = "some.group",
47 | kafkaConnect = kafka.kafkaConfig.hostName + ":" + kafka.kafkaConfig.port,
48 | zkConnect = kafka.kafkaConfig.zkConnect,
49 | offsetPolicy = "largest",//latest with new consumer
50 | autoCommitEnabled = true,
51 | kDeserializer = classOf[StringDeserializer],
52 | vDeserializer = classOf[StringDeserializer])
53 | val consumer = new SimpleConsumer(latch, config, topic, "consumer.group", 1, 1)
54 |
55 | val batch1 = for (n <- 0 until total) yield s"message-test-$n"
56 |
57 | logger.info(s"Publishing ${batch1.size} messages...")
58 |
59 | kafka.sendMessages(topic, batch1)
60 | latch.await(10000, TimeUnit.MILLISECONDS)
61 | latch.getCount should be (0)
62 |
63 | consumer.shutdown()
64 | }
65 | "shut down relatively cleanly for now" in {
66 | kafka.shutdown()
67 | eventually(timeout)(!kafka.isRunning)
68 | }
69 | }
70 | }
71 |
--------------------------------------------------------------------------------
/src/main/scala/com/tuplejump/embedded/kafka/EmbeddedZookeeper.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Tuplejump
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.tuplejump.embedded.kafka
18 |
19 | import java.io.{File => JFile}
20 | import java.net.InetSocketAddress
21 | import java.util.concurrent.atomic.AtomicReference
22 |
23 | import scala.util.Try
24 | import org.I0Itec.zkclient.exception.ZkMarshallingError
25 | import org.I0Itec.zkclient.serialize.ZkSerializer
26 | import org.apache.zookeeper.server.{ NIOServerCnxnFactory, ZooKeeperServer }
27 |
28 | /**
29 | * Implements a simple standalone ZooKeeperServer.
30 | * To create a ZooKeeper client object, the application needs to pass a
31 | * connection string containing a comma separated list of host:port pairs,
32 | * each corresponding to a ZooKeeper server.
33 | *
34 | * Session establishment is asynchronous. This constructor will initiate
35 | * connection to the server and return immediately - potentially (usually)
36 | * before the session is fully established. The watcher argument specifies
37 | * the watcher that will be notified of any changes in state. This
38 | * notification can come at any point before or after the constructor call
39 | * has returned.
40 | *
41 | * The instantiated ZooKeeper client object will pick an arbitrary server
42 | * from the connectString and attempt to connect to it. If establishment of
43 | * the connection fails, another server in the connect string will be tried
44 | * (the order is non-deterministic, as we random shuffle the list), until a
45 | * connection is established. The client will continue attempts until the
46 | * session is explicitly closed.
47 | */
48 | class EmbeddedZookeeper(val connectTo: String,
49 | val tickTime: Int,
50 | snapDir: JFile,
51 | dataDir: JFile) extends Assertions with Logging {
52 |
53 | logger.info("Starting Zookeeper")
54 |
55 | private val _factory = new AtomicReference[Option[NIOServerCnxnFactory]](None)
56 |
57 | private val _zookeeper = new AtomicReference[Option[ZooKeeperServer]](None)
58 |
59 | def isRunning: Boolean = _zookeeper.get exists (_.isRunning)
60 |
61 | def zookeeper: ZooKeeperServer = _zookeeper.get.getOrElse {
62 | logger.info("Attempt to call server before starting EmbeddedKafka instance. Starting automatically...")
63 | start()
64 | eventually(5000, 500)(assert(isRunning))
65 |
66 | _zookeeper.get.getOrElse(throw new IllegalStateException("Kafka server not initialized."))
67 | }
68 |
69 | /** Starts only one. */
70 | def start(): Unit = {
71 | val server = new ZooKeeperServer(snapDir, dataDir, tickTime)
72 | _zookeeper.set(Some(server))
73 |
74 | val (ip, port) = {
75 | val splits = connectTo.split(":")
76 | (splits(0), splits(1).toInt)
77 | }
78 |
79 | val f = new NIOServerCnxnFactory()
80 | f.configure(new InetSocketAddress(ip, port), 16)
81 | f.startup(server)
82 |
83 | _factory.set(Some(f))
84 |
85 | logger.info(s"ZooKeeperServer isRunning: $isRunning")
86 | }
87 |
88 | def shutdown(): Unit = {
89 | logger.info(s"Shutting down ZK NIOServerCnxnFactory.")
90 |
91 | for (v <- _factory.get) v.shutdown()
92 | _factory.set(None)
93 |
94 | for (v <- _zookeeper.get) {
95 | Try(v.shutdown())
96 | //awaitCond(!v.isRunning, 2000.millis)
97 | logger.info(s"ZooKeeper server shut down.")
98 | }
99 | _zookeeper.set(None)
100 | }
101 | }
102 |
103 | object DefaultStringSerializer extends ZkSerializer {
104 |
105 | @throws(classOf[ZkMarshallingError])
106 | def serialize(data: Object): Array[Byte] = data match {
107 | case a: String => a.getBytes("UTF-8")
108 | case _ => throw new ZkMarshallingError(s"Unsupported type '${data.getClass}'")
109 | }
110 |
111 | @throws(classOf[ZkMarshallingError])
112 | def deserialize(bytes: Array[Byte]): Object = bytes match {
113 | case b if Option(b).isEmpty => "" //ick
114 | case b => new String(bytes, "UTF-8")
115 | }
116 | }
117 |
--------------------------------------------------------------------------------
/src/main/scala/com/tuplejump/embedded/kafka/Settings.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Tuplejump
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.tuplejump.embedded.kafka
18 |
19 | import java.util.Properties
20 |
21 | import scala.collection.JavaConverters._
22 |
23 | /**
24 | * TODO remove hard coded, defaults, add config.
25 | * * Replication configurations *
26 | * "num.replica.fetchers" -> "4",
27 | * "replica.fetch.max.bytes=1048576
28 | * "replica.fetch.wait.max.ms=500
29 | * "replica.high.watermark.checkpoint.interval.ms=5000
30 | * "replica.socket.timeout.ms=30000
31 | * "replica.socket.receive.buffer.bytes=65536
32 | * "replica.lag.time.max.ms=10000
33 |
34 | * "controller.socket.timeout.ms=30000
35 | * "controller.message.queue.size=10
36 |
37 | * * Log configuration *
38 | * "num.partitions" -> "8",
39 | * "message.max.bytes" -> "1000000",
40 | * "auto.create.topics.enable" -> "true",
41 | * "log.index.interval.bytes" -> "4096",
42 | * "log.index.size.max.bytes" -> "10485760",
43 | * "log.retention.hours" -> "168",
44 | * "log.flush.interval.ms" -> "10000",
45 | * "log.flush.interval.messages" -> "20000",
46 | * "log.flush.scheduler.interval.ms" -> "2000",
47 | * "log.roll.hours" -> "168",
48 | * "log.retention.check.interval.ms" -> "300000",
49 | * "log.segment.bytes" -> "1073741824",
50 |
51 | * * ZK configuration *
52 | * "zookeeper.connection.timeout.ms" -> "6000",
53 | * "zookeeper.sync.time.ms" -> "2000",
54 |
55 | * * Socket server configuration *
56 | * "num.io.threads" -> "8",
57 | * "num.network.threads" -> "8",
58 | * "socket.request.max.bytes" -> "104857600",
59 | * "socket.receive.buffer.bytes" -> "1048576",
60 | * "socket.send.buffer.bytes" -> "1048576",
61 | * "queued.max.requests" -> "16",
62 | * "fetch.purgatory.purge.interval.requests" -> "100",
63 | * "producer.purgatory.purge.interval.requests" -> "100"
64 | */
65 | trait Settings {
66 |
67 | def brokerConfig(kafkaConnect: String, zkConnect: String, logDir: String): Map[String, String] = {
68 | val seed = kafkaConnect.split(",").head.split(":").head //TODO cleanup
69 | Map(
70 | //"broker.id" -> "0",
71 | "host.name" -> kafkaConnect.split(",").head.split(":").head, //TODO cleanup,
72 | "metadata.broker.list" -> kafkaConnect,
73 | "advertised.host.name" -> seed,
74 | "advertised.port" -> "9092",
75 | "log.dir" -> logDir,
76 | "log.dirs" -> logDir,
77 | "zookeeper.connect" -> zkConnect,
78 | "replica.high.watermark.checkpoint.interval.ms" -> "5000",
79 | "log.flush.interval.messages" -> "1",
80 | "replica.socket.timeout.ms" -> "1500",
81 | "controlled.shutdown.enable" -> "true"
82 | )
83 | }
84 |
85 | def producerConfig(kafkaConnect: String, kSerializer: Class[_], vSerializer: Class[_]): Map[String, String] =
86 | Map(
87 | "metadata.broker.list" -> kafkaConnect,//bug? fails if not set, warns if is
88 | "bootstrap.servers" -> kafkaConnect,
89 | "key.serializer" -> kSerializer.getName,
90 | "value.serializer" -> vSerializer.getName
91 | )
92 |
93 | /** offsetPolicy = latest,earliest,none */
94 | def consumerConfig(group: String,
95 | kafkaConnect: String,
96 | zkConnect: String,
97 | offsetPolicy: String,
98 | autoCommitEnabled: Boolean,
99 | kDeserializer: Class[_],
100 | vDeserializer: Class[_]): Map[String, String] =
101 | Map(
102 | "bootstrap.servers" -> kafkaConnect,
103 | "group.id" -> group,
104 | "auto.offset.reset" -> offsetPolicy,
105 | "enable.auto.commit" -> autoCommitEnabled.toString,
106 | "key.deserializer" -> kDeserializer.getName,
107 | "value.deserializer" -> vDeserializer.getName,
108 | "zookeeper.connect" -> zkConnect)
109 |
110 |
111 | def kafkaParams(group: String,
112 | kafkaConnect: String,
113 | zkConnect: String,
114 | offsetPolicy: String,
115 | autoCommitEnabled: Boolean,
116 | kDeserializer: Class[_],
117 | vDeserializer: Class[_]): Map[String, String] =
118 | consumerConfig(group, kafkaConnect, zkConnect, offsetPolicy, autoCommitEnabled, kDeserializer, vDeserializer)
119 |
120 | def mapToProps(values: Map[String, String]): Properties = {
121 | val props = new Properties()
122 | props.putAll(values.asJava)
123 | props
124 | }
125 | }
126 |
--------------------------------------------------------------------------------
/scalastyle-config.xml:
--------------------------------------------------------------------------------
1 |
2 | Scalastyle standard configuration
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
--------------------------------------------------------------------------------
/src/main/scala/com/tuplejump/embedded/kafka/EmbeddedKafka.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Tuplejump
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.tuplejump.embedded.kafka
18 |
19 | import java.util.concurrent.atomic.{ AtomicReference, AtomicBoolean }
20 |
21 | import org.apache.kafka.common.serialization.StringSerializer
22 | import org.apache.kafka.clients.producer.{ProducerRecord, KafkaProducer}
23 | import kafka.admin.AdminUtils
24 | import kafka.producer.ProducerConfig
25 | import kafka.server.{ KafkaConfig, KafkaServer }
26 | import org.I0Itec.zkclient.ZkClient
27 |
28 | final class EmbeddedKafka(kafkaConnect: String, zkConnect: String)
29 | extends Settings with Assertions with Logging {
30 |
31 | def this() = this(DefaultKafkaConnect, DefaultZookeeperConnect)
32 |
33 | /** Should an error occur, make sure it shuts down. */
34 | sys.runtime.addShutdownHook(new Thread("Shutting down embedded kafka") {
35 | override def run(): Unit = shutdown()
36 | })
37 |
38 | val logDir = EmbeddedIO.logsDir
39 | val snapDir = EmbeddedIO.createTempDir("zk-snapshots")
40 | val dataDir = EmbeddedIO.createTempDir("zk-data")
41 |
42 | val config = brokerConfig(kafkaConnect, zkConnect, dataDir.getAbsolutePath)
43 |
44 | val kafkaConfig: KafkaConfig = new KafkaConfig(mapToProps(config))
45 |
46 | /** hard-coded for Strings only so far. Roadmap: making configurable. */
47 | val producerConfig: ProducerConfig = new ProducerConfig(mapToProps(
48 | super.producerConfig(kafkaConnect, classOf[StringSerializer], classOf[StringSerializer])
49 | ))
50 |
51 | private val _isRunning = new AtomicBoolean(false)
52 |
53 | private val _zookeeper = new AtomicReference[Option[EmbeddedZookeeper]](None)
54 |
55 | private val _zkClient = new AtomicReference[Option[ZkClient]](None)
56 |
57 | private val _server = new AtomicReference[Option[KafkaServer]](None)
58 |
59 | private val _producer = new AtomicReference[Option[KafkaProducer[String, String]]](None)
60 |
61 | def server: KafkaServer = _server.get.getOrElse {
62 | logger.info("Attempt to call server before starting EmbeddedKafka instance. Starting automatically...")
63 | start()
64 | eventually(5000, 500)(assert(isRunning, "Kafka must be running."))
65 | _server.get.getOrElse(throw new IllegalStateException("Kafka server not initialized."))
66 | }
67 |
68 | def isRunning: Boolean = {
69 | _zookeeper.get.exists(_.isRunning) && _server.get.isDefined && _isRunning.get() // a better way?
70 | }
71 |
72 | def producer: KafkaProducer[String, String] = _producer.get.getOrElse {
73 | require(isRunning, "Attempt to call producer before starting EmbeddedKafka instance. Call EmbeddedKafka.start() first.")
74 | val p = try new KafkaProducer[String, String](producerConfig.props.props) catch {
75 | case e: Throwable => logger.error(s"Unable to create producer.", e); throw e
76 | }
77 | _producer.set(Some(p))
78 | p
79 | }
80 |
81 | /** Starts the embedded Zookeeper server and Kafka brokers. */
82 | def start(): Unit = {
83 | require(_zookeeper.get.forall(!_.isRunning), "Zookeeper should not be running prior to calling 'start'.")
84 | require(_server.get.isEmpty, "KafkaServer should not be running prior to calling 'start'.")
85 |
86 | val zk = new EmbeddedZookeeper(zkConnect, 6000, snapDir, dataDir)
87 | zk.start()
88 | eventually(5000, 500) {
89 | require(zk.isRunning, "Zookeeper must be started before proceeding with setup.")
90 | _zookeeper.set(Some(zk))
91 | }
92 |
93 | logger.info("Starting ZkClient")
94 | _zkClient.set(Some(new ZkClient(zkConnect, 6000, 60000, DefaultStringSerializer)))
95 |
96 |
97 | logger.info("Starting KafkaServer")
98 | _server.set(Some(new KafkaServer(kafkaConfig)))
99 | server.startup()
100 |
101 | _isRunning.set(true) // TODO add a test
102 | }
103 |
104 | /** Creates a Kafka topic and waits until it is propagated to the cluster. */
105 | def createTopic(topic: String, numPartitions: Int, replicationFactor: Int): Unit = {
106 | AdminUtils.createTopic(server.zkUtils, topic, numPartitions, replicationFactor)
107 | eventually(10000, 1000) {
108 | assert(AdminUtils.topicExists(server.zkUtils, topic))
109 | logger.info(s"Topic [$topic] created.")
110 | }
111 | }
112 |
113 | /** Send the messages to the Kafka broker */
114 | def sendMessages(topic: String, messageToFreq: Map[String, Int]): Unit = {
115 | val messages = messageToFreq.flatMap { case (s, freq) => Seq.fill(freq)(s) }.toArray
116 | sendMessages(topic, messages)
117 | }
118 |
119 | /** Send the array of messages to the Kafka broker */
120 | def sendMessages(topic: String, messages: Iterable[String]): Unit =
121 | for {
122 | message <- messages
123 | } producer.send(new ProducerRecord[String, String](topic, message))
124 |
125 | /* TODO with a key and as [K,V] and Array[Byte]*/
126 | private val toRecord = (topic: String, group: Option[String], message: String) => group match {
127 | case Some(k) => new ProducerRecord[String, String](topic, k, message)
128 | case _ => new ProducerRecord[String, String](topic, message)
129 | }
130 |
131 | /** Shuts down the embedded servers.*/
132 | def shutdown(): Unit = try {
133 | logger.info(s"Shutting down Kafka server.")
134 |
135 | for (v <- _producer.get) v.close()
136 | for (v <- _server.get) {
137 | //https://issues.apache.org/jira/browse/KAFKA-1887
138 | v.kafkaController.shutdown()
139 | v.shutdown()
140 | v.awaitShutdown()
141 | }
142 |
143 | for (v <- _zkClient.get) v.close()
144 |
145 | for (v <- _zookeeper.get) v.shutdown()
146 |
147 | _producer.set(None)
148 | _server.set(None)
149 | _zkClient.set(None)
150 | _zookeeper.set(None)
151 | } catch {
152 | case e: Throwable =>
153 | logger.error("Error shutting down.", e)
154 | }
155 | }
156 |
--------------------------------------------------------------------------------
/project/Build.scala:
--------------------------------------------------------------------------------
1 |
2 | import scala.language.postfixOps
3 | import sbt.Keys._
4 | import sbt._
5 | import de.heikoseeberger.sbtheader.AutomateHeaderPlugin
6 |
7 | object Build extends sbt.Build {
8 |
9 | lazy val root = project.in(file("."))
10 | .settings(Settings.common ++ Settings.publishSettings)
11 | .settings(Seq(libraryDependencies ++= Seq(
12 | "org.apache.kafka" %% "kafka" % "0.9.0.1",
13 | "ch.qos.logback" % "logback-classic" % "1.0.7",
14 | "commons-io" % "commons-io" % "2.4",
15 | "org.scalacheck" %% "scalacheck" % "1.12.5" % "test, it",
16 | "org.scalatest" %% "scalatest" % "2.2.5" % "test, it"
17 | ) ++
18 | (CrossVersion.partialVersion(scalaVersion.value) match {
19 | case Some((2, minor)) if minor < 11 => Seq(
20 | "com.typesafe.akka" %% "akka-actor" % "2.3.14",
21 | "org.slf4j" % "slf4j-api" % "1.7.13")
22 | case _ => Seq(
23 | "com.typesafe.akka" %% "akka-actor" % "2.4.1",
24 | "com.typesafe.scala-logging" %% "scala-logging" % "3.1.0")
25 | })))
26 | .enablePlugins(AutomateHeaderPlugin) configs IntegrationTest
27 |
28 | }
29 |
30 | object Settings extends sbt.Build {
31 | import de.heikoseeberger.sbtheader.HeaderPlugin
32 | import de.heikoseeberger.sbtheader.license.Apache2_0
33 | import wartremover.WartRemover.autoImport._
34 | import com.typesafe.tools.mima.plugin.MimaKeys._
35 | import com.typesafe.tools.mima.plugin.MimaPlugin._
36 | import org.scalastyle.sbt.ScalastylePlugin._
37 | import scoverage.ScoverageKeys
38 |
39 | val versionStatus = settingKey[Unit]("The Scala version used in cross-build reapply for '+ package', '+ publish'.")
40 |
41 | final val javaVersion = scala.util.Properties.javaVersion
42 |
43 | final val javaBinaryVersion = javaVersion.dropRight(5)
44 |
45 | lazy val buildSettings = Seq(
46 | name := "embedded-kafka",
47 | organization := "com.tuplejump",
48 | licenses += ("Apache-2.0", url("http://www.apache.org/licenses/LICENSE-2.0")),
49 | homepage := Some(url("http://github.com/tuplejump/embedded-kafka")),
50 |
51 | pomExtra :=
52 |
53 | git@github.com:tuplejump/embedded-kafka.git
54 | scm:git:git@github.com:tuplejump/embedded-kafka.git
55 |
56 |
57 |
58 | helena
59 | Helena Edelson
60 |
61 | ,
62 |
63 | crossScalaVersions := Seq("2.11.7", "2.10.6"),
64 |
65 | crossVersion := CrossVersion.binary,
66 |
67 | scalaVersion := sys.props.getOrElse("scala.version", crossScalaVersions.value.head),
68 |
69 | versionStatus := {
70 | println(s"Scala: ${scalaVersion.value} Java: $javaVersion")
71 | },
72 |
73 | HeaderPlugin.autoImport.headers := Map(
74 | "scala" -> Apache2_0("2016", "Tuplejump"),
75 | "conf" -> Apache2_0("2016", "Tuplejump", "#")
76 | )
77 | )
78 |
79 | val encoding = Seq("-encoding", "UTF-8")
80 |
81 | lazy val common = buildSettings ++ testSettings ++ styleSettings ++ Seq(
82 |
83 | cancelable in Global := true,
84 |
85 | crossPaths in ThisBuild := true,
86 |
87 | logBuffered in Compile := false,
88 | logBuffered in Test := false,
89 |
90 | outputStrategy := Some(StdoutOutput),
91 |
92 | ScoverageKeys.coverageHighlighting := true,
93 |
94 | aggregate in update := false,
95 |
96 | updateOptions := updateOptions.value.withCachedResolution(true),
97 |
98 | incOptions := incOptions.value.withNameHashing(true),
99 |
100 | scalacOptions ++= encoding ++ Seq(
101 | "-Xfatal-warnings",
102 | "-deprecation",
103 | "-feature",
104 | "-language:_",
105 | "-unchecked",
106 | "-Xlint",
107 | "-Yno-adapted-args",
108 | "-Ywarn-dead-code" // N.B. doesn't work well with the ??? hole
109 | /* "-Ywarn-numeric-widen",
110 | "-Ywarn-value-discard") */
111 | ),
112 |
113 | scalacOptions ++= (
114 | CrossVersion.partialVersion(scalaVersion.value) match {
115 | case Some((2, minor)) if minor < 11 => Seq.empty
116 | case _ => Seq("-Ywarn-unused-import")
117 | }),
118 |
119 | javacOptions ++= encoding ++ Seq(
120 | "-source", javaBinaryVersion,
121 | "-target", javaBinaryVersion,
122 | "-Xmx1G",
123 | "-Xlint:unchecked",
124 | "-Xlint:deprecation"
125 | ),
126 |
127 | autoCompilerPlugins := true,
128 | autoAPIMappings := true,
129 | //exportJars := true
130 |
131 | ivyScala := ivyScala.value map {
132 | _.copy(overrideScalaVersion = true)
133 | },
134 |
135 | ivyLoggingLevel in ThisBuild := UpdateLogging.Quiet,
136 |
137 | evictionWarningOptions in update := EvictionWarningOptions.default
138 | .withWarnTransitiveEvictions(false)
139 | .withWarnDirectEvictions(false)
140 | .withWarnScalaVersionEviction(false),
141 |
142 | crossPaths in ThisBuild := true,
143 |
144 | wartremoverErrors in (Compile, compile) :=
145 | (CrossVersion.partialVersion(scalaVersion.value) match {
146 | case Some((2, minor)) if minor < 11 =>
147 | Seq.empty[wartremover.Wart]
148 | case _ =>
149 | Warts.allBut(
150 | Wart.Any,//todo exclude actor
151 | Wart.Throw,
152 | Wart.DefaultArguments,
153 | Wart.NonUnitStatements,
154 | Wart.Nothing,
155 | Wart.ToString)
156 | }),
157 |
158 | parallelExecution in ThisBuild := false,
159 | parallelExecution in Global := false,
160 |
161 | publishMavenStyle := false,
162 |
163 | managedSourceDirectories := (scalaSource in Compile).value :: Nil,
164 | unmanagedSourceDirectories := (scalaSource in Compile).value :: Nil
165 | )
166 |
167 | val compileScalastyle = taskKey[Unit]("compileScalastyle")
168 |
169 | val testScalastyle = taskKey[Unit]("testScalastyle")
170 |
171 | lazy val styleSettings = Seq(
172 | testScalastyle := org.scalastyle.sbt.ScalastylePlugin.scalastyle.in(Test).toTask("").value,
173 | scalastyleFailOnError := true,
174 | compileScalastyle := org.scalastyle.sbt.ScalastylePlugin.scalastyle.in(Compile).toTask("").value
175 | )
176 |
177 | val testConfigs = inConfig(Test)(Defaults.testTasks) ++ inConfig(IntegrationTest)(Defaults.itSettings)
178 |
179 | val testOptionSettings = Seq(
180 | Tests.Argument(TestFrameworks.ScalaTest, "-oDF")
181 | )
182 |
183 | lazy val testSettings = testConfigs ++ Seq(
184 | parallelExecution in Test := false,
185 | parallelExecution in IntegrationTest := false,
186 | testOptions in Test ++= testOptionSettings,
187 | testOptions in IntegrationTest ++= testOptionSettings,
188 | fork in Test := true,
189 | fork in IntegrationTest := true,
190 | (compile in IntegrationTest) <<= (compile in Test, compile in IntegrationTest) map { (_, c) => c },
191 | (internalDependencyClasspath in IntegrationTest) <<= Classpaths.concat(internalDependencyClasspath in IntegrationTest, exportedProducts in Test)
192 | )
193 |
194 | lazy val mimaSettings = mimaDefaultSettings ++ Seq(
195 | previousArtifact := None,
196 | binaryIssueFilters ++= Seq.empty
197 | )
198 |
199 | lazy val publishSettings = Seq(
200 | publishMavenStyle := true,
201 | publishTo := {
202 | val nexus = "https://oss.sonatype.org/"
203 | if (isSnapshot.value)
204 | Some("snapshots" at nexus + "content/repositories/snapshots")
205 | else
206 | Some("releases" at nexus + "service/local/staging/deploy/maven2")
207 | },
208 | publishArtifact in Test := false,
209 | pomIncludeRepository := { _ => false }
210 | )
211 | }
212 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------