├── .gitignore ├── .travis.yml ├── LICENSE ├── NOTICE ├── README.md ├── build.sbt ├── project ├── CentralRequirementsPlugin.scala ├── Dependencies.scala ├── build.properties └── plugins.sbt ├── src ├── main │ └── scala │ │ ├── kafka │ │ └── consumer │ │ │ └── TopicOffsetConsumerConnector.scala │ │ └── mutatis │ │ ├── DecodedEvent.scala │ │ ├── Init.scala │ │ └── mutatis.scala └── test │ └── scala │ ├── common │ └── KafkaTestHelper.scala │ └── xenomorph │ ├── ConsumerSpec.scala │ ├── EmbeddedKafkaBuilder.scala │ ├── IntegrationSpec.scala │ ├── ProducerSpec.scala │ ├── UnitSpec.scala │ └── example.scala └── version.sbt /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .idea_modules 3 | target 4 | project/boot 5 | *.swp 6 | project.vim 7 | tags 8 | .lib 9 | *~ 10 | *# 11 | .DS_Store 12 | .history 13 | .cache 14 | .classpath 15 | .project 16 | .settings 17 | release-notes 18 | logs 19 | project/project 20 | project/target 21 | target 22 | lib_managed 23 | src_managed 24 | project/boot 25 | tmp 26 | .history 27 | dist 28 | .DS_Store 29 | .cache 30 | .settings 31 | .classpath 32 | bin 33 | .class 34 | .project 35 | .ivy2 36 | gpg.sbt 37 | 38 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: scala 2 | 3 | matrix: 4 | include: 5 | - jdk: oraclejdk8 6 | scala: 2.11.8 7 | 8 | branches: 9 | only: 10 | - master 11 | 12 | before_script: 13 | - "if [ $TRAVIS_PULL_REQUEST = 'false' ]; then git checkout -qf $TRAVIS_BRANCH; fi" 14 | 15 | script: 16 | - | 17 | if [ $TRAVIS_PULL_REQUEST = 'false' ]; then 18 | if [ $RELEASE_ON_PUSH = 'false' ]; then 19 | sbt test coverageReport 20 | else 21 | sbt ++$TRAVIS_SCALA_VERSION 'release with-defaults' 22 | fi 23 | else 24 | sbt test coverageReport 25 | fi 26 | 27 | cache: 28 | directories: 29 | - $HOME/.ivy2/cache 30 | - $HOME/.sbt/boot/scala-$TRAVIS_SCALA_VERSION 31 | 32 | after_success: 33 | - find $HOME/.sbt -name "*.lock" | xargs rm 34 | - find $HOME/.ivy2 -name "ivydata-*.properties" | xargs rm 35 | 36 | env: 37 | global: 38 | - secure: "k9RZkT/qlkpJfqy3OQLobD3GXKhdc4ayDUQ+9uTZ9E5c0NSbeFCMWfLByG7WgyZif25ZHbW8drPXdY/WVS85gH3SVHsqeUSV4I+wmiHoFbae/zE+3OYplo24LItujxV19bLisiVeYABR6Kvg96V42Iv+aC/4s0Iq4tUIh1+INvniXeyX+Wu9tTAW30aZ1z5SQaayZDytrwNGLRFdxLAc7aIhtEpeYbEr7llW2BP2hDfS2viXm9ibXTVwWVJT1aTdm8ENTBVNczNKGrLjnpVydU8wwWuv5qzPGKAxWkhFkQiwsY3GFsRQUX+8O4FhFZD9a/bz9QVixHDxdzSG6O/EjtPtCydv2f/9s0MM4SycdJHeweCXgkLTH0WmzeMicT8cAaZNW4wB6+2Q9BggA0RovQDf8u3ROBnNY4mi4RjAxJkZWM0btYpcr8mV0/YMEvRnwFuM1p9BYVXtKNji4UBF/4DUlbzUGELr09GqhbTIzjYW9ZuQ3WTYFNl20jjtsj6wWkjqRoqYF+MWSEDYfOtHgEDRQ5dZPokefsaMQ7tkCylr0ZECtpE0m6ZDAeZ7pOeiVLiuOmtOiDvoAPDLBJdJormSKWAppb43JXfnvwzH5h3TrDMuO+vckjVWYNzFlCWG14Q0+XaReWk3OPIr5o11Jo/uwrpZPNZE0l/VRWCg14s=" 39 | - secure: "n3ZN9b4KuWO4vP2w0SpmrDydLmY/Z9adINccjrrK5hBCLmtzWLLil7AekBk0WZEL0gxqpQ5vpAlnVlN8Krn/KxyxvL7G0y93gK3oGU2s/a514pE49MGwpAk0JOEAYZyBoZ/MxrYhWccv+P6HFxbDskEKqB5ymA56VjKH/xdVX5vU7/wEi9RHoqXVun2Fxn+u/mSGOtZ+SGpfD2KpBWsLW0gq7VDsuHKX8NcvH7HSgGzibe4aH6joLCWCv51kO9+tJF0x9NAnnlDzskIP4AsNi8CCMofqHwZaqrzVToS/eaoyWBOfPLd+LisbX5++Yj4u5u8kpdHKFZo0U3dwqeoZ5j4CTUeQPef4vhurhSyrXzpM7T5OurhuxWcvBKVLaXWHa1n8/CSA/OINM14EadVop57YWiveFfHtgnLuDZtZribFR6HgG5N+BRwT8UJNxpFgfnRzfGdcdALMlDTkc4jqyImO+KfGtQef7avNbuxE9257RmwgRo/U8OuGDfh8BJDLa3LWQClorE/II8528L/lfEcvyc3UiIPgEF/zJqHARlo4ywNh7PD1GeaNXi2UIfbBbs/nBy62ccPspLIIOq8wG1kyqk236qyYDA7Wa8w4tGg8J0KpT4OSVRHs97nQ7zD1oBWF4XyYD71Hhjz1ip8OwshWiUkuezDplbvXStBmQUU=" 40 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner]. 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | 204 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | ============================================================== 2 | mutatis 3 | Copyright (C) 2017 Verizon. 4 | ============================================================== 5 | 6 | This product includes software developed by 7 | Verizon (www.verizon.com; www.github.com/Verizon). 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # mutatis 2 | 3 | [![Build Status](https://travis-ci.org/Verizon/mutatis.svg?branch=master)](https://travis-ci.org/Verizon/mutatis) 4 | [![Maven Central](https://maven-badges.herokuapp.com/maven-central/io.verizon.mutatis/core_2.11/badge.svg)](https://maven-badges.herokuapp.com/maven-central/io.verizon.mutatis/core_2.11) 5 | 6 | Scalaz Streams wrapper for Kafka Producer and Consumer 7 | 8 | ## Getting Started 9 | 10 | These instructions will get a copy of the project up and running on your local machine for development and testing purposes. 11 | 12 | ### Prerequisites 13 | 14 | Set up kafka server. To do this, download and start Zookeeper and Kafka, as per https://kafka.apache.org/082/documentation.html#quickstart 15 | 16 | ### Installing 17 | 18 | * create a topic called "test8" with 8 partitions: 19 | ``` 20 | ./bin/kafka-topics.sh --create \ 21 | --zookeeper localhost:2181 \ 22 | --replication-factor 1 \ 23 | --partitions 8 \ 24 | --topic test8 25 | ``` 26 | * Start producer - notice one message is produced every 100 milliseconds: 27 | ``` 28 | sbt "test:run-main mutatis.ExampleProducer" 29 | ``` 30 | * Start consumer - notice one message takes 250 milliseconds to process, one consumer is not enough: 31 | ``` 32 | sbt "test:run-main mutatis.ExampleConsumer" 33 | ``` 34 | 35 | See `src/test/scala/mutatis/example.scala` for a complete example 36 | 37 | ## Running the tests 38 | 39 | `sbt test` 40 | 41 | ## Built With 42 | 43 | * [Kafka](https://kafka.apache.org/082/documentation.html) - A distributed streaming platform 44 | * [Scalaz](https://github.com/scalaz/scalaz) - An extension to the core Scala library for functional programming. 45 | 46 | ## Versioning 47 | 48 | We use [SemVer](http://semver.org/) for versioning. For the versions available, see the [tags on this repository](./tags). 49 | 50 | ## Authors 51 | 52 | * https://github.com/dougkang 53 | * https://github.com/haripriyamurthy 54 | * https://github.com/rolandomanrique 55 | * https://github.com/kothari-pk 56 | -------------------------------------------------------------------------------- /build.sbt: -------------------------------------------------------------------------------- 1 | //: ---------------------------------------------------------------------------- 2 | //: Copyright (C) 2017 Verizon. All Rights Reserved. 3 | //: 4 | //: Licensed under the Apache License, Version 2.0 (the "License"); 5 | //: you may not use this file except in compliance with the License. 6 | //: You may obtain a copy of the License at 7 | //: 8 | //: http://www.apache.org/licenses/LICENSE-2.0 9 | //: 10 | //: Unless required by applicable law or agreed to in writing, software 11 | //: distributed under the License is distributed on an "AS IS" BASIS, 12 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | //: See the License for the specific language governing permissions and 14 | //: limitations under the License. 15 | //: 16 | //: ---------------------------------------------------------------------------- 17 | import verizon.build._ 18 | import Dependencies._ 19 | 20 | libraryDependencies ++= Seq( 21 | Kafka, 22 | KafkaTest, 23 | Journal, 24 | ScalaTest, 25 | Scalaz, 26 | ScalazStream 27 | ) 28 | 29 | organization in Global := "io.verizon.mutatis" 30 | 31 | coverageHighlighting := true 32 | 33 | parallelExecution in Test := false 34 | -------------------------------------------------------------------------------- /project/CentralRequirementsPlugin.scala: -------------------------------------------------------------------------------- 1 | //: ---------------------------------------------------------------------------- 2 | //: Copyright (C) 2017 Verizon. All Rights Reserved. 3 | //: 4 | //: Licensed under the Apache License, Version 2.0 (the "License"); 5 | //: you may not use this file except in compliance with the License. 6 | //: You may obtain a copy of the License at 7 | //: 8 | //: http://www.apache.org/licenses/LICENSE-2.0 9 | //: 10 | //: Unless required by applicable law or agreed to in writing, software 11 | //: distributed under the License is distributed on an "AS IS" BASIS, 12 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | //: See the License for the specific language governing permissions and 14 | //: limitations under the License. 15 | //: 16 | //: ---------------------------------------------------------------------------- 17 | package verizon.build 18 | 19 | import sbt._, Keys._ 20 | import xerial.sbt.Sonatype.autoImport.sonatypeProfileName 21 | 22 | object CentralRequirementsPlugin extends AutoPlugin { 23 | 24 | override def trigger = allRequirements 25 | 26 | override def requires = RigPlugin 27 | 28 | override lazy val projectSettings = Seq( 29 | publishTo := Some("releases" at "https://oss.sonatype.org/service/local/staging/deploy/maven2"), 30 | sonatypeProfileName := "io.verizon", 31 | pomExtra in Global := { 32 | 33 | 34 | fadeddata 35 | Dustin Withers 36 | https://github.com/fadeddata 37 | 38 | 39 | haripriyamurthy 40 | Haripriya Murthy 41 | https://github.com/haripriyamurthy 42 | 43 | 44 | kothari-pk 45 | Prateek Kothari 46 | https://github.com/kothari-pk 47 | 48 | 49 | rolandomanrique 50 | Rolando Manrique 51 | https://github.com/rolandomanrique 52 | 53 | 54 | dougkang 55 | Doug Kang 56 | https://github.com/dougkang 57 | 58 | 59 | }, 60 | licenses := Seq("Apache-2.0" -> url("https://www.apache.org/licenses/LICENSE-2.0.html")), 61 | homepage := Some(url("http://verizon.github.io/mutatis/")), 62 | scmInfo := Some(ScmInfo(url("https://github.com/verizon/mutatis"), 63 | "git@github.com:verizon/mutatis.git")) 64 | ) 65 | } -------------------------------------------------------------------------------- /project/Dependencies.scala: -------------------------------------------------------------------------------- 1 | //: ---------------------------------------------------------------------------- 2 | //: Copyright (C) 2017 Verizon. All Rights Reserved. 3 | //: 4 | //: Licensed under the Apache License, Version 2.0 (the "License"); 5 | //: you may not use this file except in compliance with the License. 6 | //: You may obtain a copy of the License at 7 | //: 8 | //: http://www.apache.org/licenses/LICENSE-2.0 9 | //: 10 | //: Unless required by applicable law or agreed to in writing, software 11 | //: distributed under the License is distributed on an "AS IS" BASIS, 12 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | //: See the License for the specific language governing permissions and 14 | //: limitations under the License. 15 | //: 16 | //: ---------------------------------------------------------------------------- 17 | import sbt._ 18 | import Keys._ 19 | 20 | object Dependencies { 21 | val Kafka = "org.apache.kafka" %% "kafka" % "0.8.2.2" 22 | val KafkaTest = "org.apache.kafka" %% "kafka" % "0.8.2.2" classifier "test" 23 | val Journal = "com.verizon.journal" %% "core" % "2.2.0" 24 | val ScalaTest = "org.scalatest" %% "scalatest" % "2.2.6" % "test" 25 | val Scalaz = "org.scalaz" %% "scalaz-core" % "7.1.7" 26 | val ScalazStream = "org.scalaz.stream" %% "scalaz-stream" % "0.7.3a" 27 | } 28 | -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=0.13.15 2 | -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | //: ---------------------------------------------------------------------------- 2 | //: Copyright (C) 2017 Verizon. All Rights Reserved. 3 | //: 4 | //: Licensed under the Apache License, Version 2.0 (the "License"); 5 | //: you may not use this file except in compliance with the License. 6 | //: You may obtain a copy of the License at 7 | //: 8 | //: http://www.apache.org/licenses/LICENSE-2.0 9 | //: 10 | //: Unless required by applicable law or agreed to in writing, software 11 | //: distributed under the License is distributed on an "AS IS" BASIS, 12 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | //: See the License for the specific language governing permissions and 14 | //: limitations under the License. 15 | //: 16 | //: ---------------------------------------------------------------------------- 17 | addSbtPlugin("io.verizon.build" % "sbt-rig" % "4.0.36") 18 | -------------------------------------------------------------------------------- /src/main/scala/kafka/consumer/TopicOffsetConsumerConnector.scala: -------------------------------------------------------------------------------- 1 | //: ---------------------------------------------------------------------------- 2 | //: Copyright (C) 2017 Verizon. All Rights Reserved. 3 | //: 4 | //: Licensed under the Apache License, Version 2.0 (the "License"); 5 | //: you may not use this file except in compliance with the License. 6 | //: You may obtain a copy of the License at 7 | //: 8 | //: http://www.apache.org/licenses/LICENSE-2.0 9 | //: 10 | //: Unless required by applicable law or agreed to in writing, software 11 | //: distributed under the License is distributed on an "AS IS" BASIS, 12 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | //: See the License for the specific language governing permissions and 14 | //: limitations under the License. 15 | //: 16 | //: ---------------------------------------------------------------------------- 17 | package kafka.consumer 18 | 19 | import kafka.common.TopicAndPartition 20 | 21 | class TopicOffsetConsumerConnector(consumerConfig: ConsumerConfig) { 22 | private val zkConnector = new ZookeeperConsumerConnector(consumerConfig, true) 23 | val consumerConnector: ConsumerConnector = zkConnector 24 | 25 | def commitOffset(topicPartition: TopicAndPartition, offset: Long): Unit = { 26 | zkConnector.commitOffsetToZooKeeper(topicPartition, offset) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/main/scala/mutatis/DecodedEvent.scala: -------------------------------------------------------------------------------- 1 | //: ---------------------------------------------------------------------------- 2 | //: Copyright (C) 2017 Verizon. All Rights Reserved. 3 | //: 4 | //: Licensed under the Apache License, Version 2.0 (the "License"); 5 | //: you may not use this file except in compliance with the License. 6 | //: You may obtain a copy of the License at 7 | //: 8 | //: http://www.apache.org/licenses/LICENSE-2.0 9 | //: 10 | //: Unless required by applicable law or agreed to in writing, software 11 | //: distributed under the License is distributed on an "AS IS" BASIS, 12 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | //: See the License for the specific language governing permissions and 14 | //: limitations under the License. 15 | //: 16 | //: ---------------------------------------------------------------------------- 17 | package mutatis 18 | 19 | import kafka.message.MessageAndMetadata 20 | 21 | case class DecodedEvent[K, M](messageAndMetadata: MessageAndMetadata[K, M]) { 22 | val key: K = messageAndMetadata.key() 23 | val message: M = messageAndMetadata.message() 24 | } 25 | -------------------------------------------------------------------------------- /src/main/scala/mutatis/Init.scala: -------------------------------------------------------------------------------- 1 | //: ---------------------------------------------------------------------------- 2 | //: Copyright (C) 2017 Verizon. All Rights Reserved. 3 | //: 4 | //: Licensed under the Apache License, Version 2.0 (the "License"); 5 | //: you may not use this file except in compliance with the License. 6 | //: You may obtain a copy of the License at 7 | //: 8 | //: http://www.apache.org/licenses/LICENSE-2.0 9 | //: 10 | //: Unless required by applicable law or agreed to in writing, software 11 | //: distributed under the License is distributed on an "AS IS" BASIS, 12 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | //: See the License for the specific language governing permissions and 14 | //: limitations under the License. 15 | //: 16 | //: ---------------------------------------------------------------------------- 17 | package mutatis 18 | 19 | import java.util.concurrent.atomic.AtomicBoolean 20 | 21 | import kafka.consumer.KafkaStream 22 | import kafka.message.MessageAndMetadata 23 | 24 | private case class Init[K, V]( 25 | streams: Seq[KafkaStream[K, V]], 26 | commitOffset: (MessageAndMetadata[K, V]) => Unit, 27 | shutdown: () => Unit, 28 | stopCommitter: AtomicBoolean) 29 | 30 | -------------------------------------------------------------------------------- /src/main/scala/mutatis/mutatis.scala: -------------------------------------------------------------------------------- 1 | //: ---------------------------------------------------------------------------- 2 | //: Copyright (C) 2017 Verizon. All Rights Reserved. 3 | //: 4 | //: Licensed under the Apache License, Version 2.0 (the "License"); 5 | //: you may not use this file except in compliance with the License. 6 | //: You may obtain a copy of the License at 7 | //: 8 | //: http://www.apache.org/licenses/LICENSE-2.0 9 | //: 10 | //: Unless required by applicable law or agreed to in writing, software 11 | //: distributed under the License is distributed on an "AS IS" BASIS, 12 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | //: See the License for the specific language governing permissions and 14 | //: limitations under the License. 15 | //: 16 | //: ---------------------------------------------------------------------------- 17 | import java.util.concurrent.atomic.AtomicBoolean 18 | import java.util.concurrent._ 19 | 20 | import journal.Logger 21 | import kafka.common.TopicAndPartition 22 | import kafka.consumer._ 23 | import kafka.message.MessageAndMetadata 24 | import kafka.producer.async.DefaultEventHandler 25 | import kafka.producer._ 26 | import kafka.serializer.{Decoder, Encoder, NullEncoder} 27 | import kafka.utils.Utils 28 | 29 | import scala.concurrent.duration.{Duration, _} 30 | import scalaz.concurrent._ 31 | import scalaz.stream.Process._ 32 | import scalaz.stream._ 33 | import scalaz.stream.time.awakeEvery 34 | import scalaz.{-\/, \/-} 35 | 36 | package object mutatis { 37 | 38 | val log = Logger[this.type] 39 | private implicit val pool: ScheduledExecutorService = 40 | Executors.newSingleThreadScheduledExecutor(daemonThreads("mutatis-committer")) 41 | 42 | private def daemonThreads(name: String) = new ThreadFactory { 43 | def newThread(r: Runnable) = { 44 | val t = Executors.defaultThreadFactory.newThread(r) 45 | t.setDaemon(true) 46 | t.setName(name) 47 | t 48 | } 49 | } 50 | 51 | private def init[K, V]( 52 | consumerConfig: ConsumerConfig, 53 | topic: String, 54 | keyDecoder: Decoder[K], 55 | messageDecoder: Decoder[V], 56 | numStreams: Int, 57 | refreshTime: Duration = 2.minutes) = { 58 | val props = consumerConfig.props.props 59 | props.setProperty("auto.commit.enable", "false") 60 | val fixedCC = new ConsumerConfig(props) 61 | 62 | val tocc = new TopicOffsetConsumerConnector(fixedCC) 63 | val consumerConnector: ConsumerConnector = tocc.consumerConnector 64 | val filterSpec = Whitelist(topic) 65 | 66 | val streams = 67 | consumerConnector.createMessageStreamsByFilter(filterSpec, numStreams, keyDecoder, messageDecoder) 68 | 69 | val commitOffsetMap = new ConcurrentHashMap[TopicAndPartition, Long]() 70 | 71 | val commitOffset: MessageAndMetadata[K, V] => Unit = msg => { 72 | commitOffsetMap.put(TopicAndPartition(msg.topic, msg.partition), msg.offset) 73 | () 74 | } 75 | 76 | val stopCommitter: AtomicBoolean = new AtomicBoolean(false) 77 | 78 | (Process.eval(commit(tocc, commitOffsetMap)) ++ awakeEvery(refreshTime) 79 | .evalMap[Task, Unit](_ => commit(tocc, commitOffsetMap))).run 80 | .runAsyncInterruptibly(_ => (), stopCommitter) 81 | 82 | val shutdown: () => Unit = tocc.consumerConnector.shutdown 83 | Init(streams, commitOffset, shutdown, stopCommitter) 84 | } 85 | 86 | def consumer[K, V]( 87 | consumerConfig: ConsumerConfig, 88 | topic: String, 89 | keyDecoder: Decoder[K], 90 | messageDecoder: Decoder[V], 91 | numStreams: Int, 92 | refreshTime: Duration = 2.minutes): Process[Task, Process[Task, DecodedEvent[K, V]]] = { 93 | Process 94 | .bracket[Task, Init[K, V], Process[Task, DecodedEvent[K, V]]]( 95 | Task.delay(init(consumerConfig, topic, keyDecoder, messageDecoder, numStreams, refreshTime)))(i => 96 | eval_(Task.delay(i.stopCommitter.set(true)))) { init => 97 | Process 98 | .emitAll(init.streams) 99 | .map { stream => 100 | streamConsumer(init.commitOffset, init.shutdown)(stream) 101 | } 102 | } 103 | } 104 | 105 | private def commit( 106 | tocc: TopicOffsetConsumerConnector, 107 | commitOffsetMap: ConcurrentHashMap[TopicAndPartition, Long]): Task[Unit] = 108 | Task { 109 | val commitOffsetsIter = commitOffsetMap.entrySet().iterator() 110 | while (commitOffsetsIter.hasNext) { 111 | val commitOffset = commitOffsetsIter.next() 112 | tocc.commitOffset(commitOffset.getKey, commitOffset.getValue) 113 | } 114 | }.attempt.map { 115 | case -\/(e) => log.error("commit failed", e) 116 | case \/-(_) => () 117 | } 118 | 119 | private def streamConsumer[K, V](commitOffset: MessageAndMetadata[K, V] => Unit, shutdown: () => Unit)( 120 | stream: KafkaStream[K, V]): Process[Task, DecodedEvent[K, V]] = { 121 | Process 122 | .bracket[Task, ConsumerIterator[K, V], DecodedEvent[K, V]](Task.delay(stream.iterator())) { consumer => 123 | eval_(Task.delay(shutdown())) 124 | } { consumer => 125 | val begin = eval_(Task delay { 126 | log.info(s"${Thread.currentThread()} - Start pulling records from Kafka.") 127 | }) 128 | 129 | val process: Process[Task, DecodedEvent[K, V]] = 130 | syncPoll(DecodedEvent(consumer.next)).flatMap { message => 131 | Process 132 | .emit(message) 133 | .onComplete(commit(commitOffset, message.messageAndMetadata).drain) 134 | } 135 | 136 | begin ++ process 137 | } 138 | } 139 | 140 | private def commit[K, V]( 141 | commit: MessageAndMetadata[K, V] => Unit, 142 | msg: MessageAndMetadata[K, V]): Process[Task, Unit] = 143 | Process eval Task.delay { 144 | log.debug( 145 | s"${Thread.currentThread()} - Committing offset=${msg.offset} topic=${msg.topic} partition=${msg.partition}") 146 | commit(msg) 147 | } 148 | 149 | private def syncPoll[K, V](blockingTask: => DecodedEvent[K, V]): Process[Task, DecodedEvent[K, V]] = { 150 | val t = Task.delay(blockingTask) 151 | 152 | Process repeatEval t 153 | } 154 | 155 | def producer[V](cfg: ProducerConfig, topic: String, msgEncoder: Encoder[V]): Sink[Task, V] = { 156 | 157 | val prod = producer(cfg, None, msgEncoder) 158 | 159 | sink 160 | .lift[Task, V] { v => 161 | Task.delay[Unit] { 162 | log.debug(s"${Thread.currentThread()} sending event for value=$v") 163 | 164 | prod.send(new KeyedMessage(topic, v)) 165 | } 166 | } 167 | .onComplete { 168 | Process.suspend { 169 | log.info("End of stream. Closing producer") 170 | prod.close 171 | Process.halt 172 | } 173 | } 174 | } 175 | 176 | def producer[K, V]( 177 | cfg: ProducerConfig, 178 | topic: String, 179 | keyEncoder: Encoder[K], 180 | msgEncoder: Encoder[V]): Sink[Task, (K, V)] = { 181 | 182 | val prod = producer(cfg, Some(keyEncoder), msgEncoder) 183 | 184 | sink 185 | .lift[Task, (K, V)] { 186 | case (k, v) => 187 | Task.delay[Unit] { 188 | log.debug(s"${Thread.currentThread()} sending event for key=$k and value=$v") 189 | 190 | prod.send(new KeyedMessage(topic, k, v)) 191 | } 192 | } 193 | .onComplete { 194 | Process.suspend { 195 | log.info("End of stream. Closing producer") 196 | prod.close 197 | Process.halt 198 | } 199 | } 200 | } 201 | 202 | private def producer[K, V]( 203 | cfg: ProducerConfig, 204 | keyEncoder: Option[Encoder[K]], 205 | msgEncoder: Encoder[V]): Producer[K, V] = { 206 | new Producer[K, V]( 207 | cfg, 208 | new DefaultEventHandler[K, V]( 209 | cfg, 210 | Utils.createObject[Partitioner](cfg.partitionerClass, cfg.props), 211 | msgEncoder, 212 | keyEncoder.getOrElse(new NullEncoder[K]()), 213 | new ProducerPool(cfg)) 214 | ) 215 | } 216 | 217 | implicit class ProcessObjectSyntax(val self: Process.type) extends AnyVal { 218 | 219 | def bracket[F[_], A, O](req: F[A])(release: A => Process[F, Nothing])( 220 | rcv: A => Process[F, O]): Process[F, O] = { 221 | 222 | Process.await(req) { a => 223 | rcv(a) onComplete release(a) 224 | } 225 | } 226 | } 227 | 228 | } 229 | -------------------------------------------------------------------------------- /src/test/scala/common/KafkaTestHelper.scala: -------------------------------------------------------------------------------- 1 | //: ---------------------------------------------------------------------------- 2 | //: Copyright (C) 2017 Verizon. All Rights Reserved. 3 | //: 4 | //: Licensed under the Apache License, Version 2.0 (the "License"); 5 | //: you may not use this file except in compliance with the License. 6 | //: You may obtain a copy of the License at 7 | //: 8 | //: http://www.apache.org/licenses/LICENSE-2.0 9 | //: 10 | //: Unless required by applicable law or agreed to in writing, software 11 | //: distributed under the License is distributed on an "AS IS" BASIS, 12 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | //: See the License for the specific language governing permissions and 14 | //: limitations under the License. 15 | //: 16 | //: ---------------------------------------------------------------------------- 17 | package common 18 | 19 | import kafka.utils.{ZKGroupTopicDirs, ZkUtils} 20 | import org.I0Itec.zkclient.ZkClient 21 | import org.apache.zookeeper.data.Stat 22 | 23 | object KafkaTestHelper { 24 | 25 | def offsetDir(group: String, topic: String) = new ZKGroupTopicDirs(group, topic).consumerOffsetDir 26 | 27 | def readZkNode(zkClient: ZkClient, path: String): (String, Stat) = ZkUtils.readData(zkClient, path) 28 | 29 | def getOffset(zkClient: ZkClient, group: String, topic: String, partition: Int) = 30 | readZkNode(zkClient, offsetDir(group, topic) + "/" + partition)._1 31 | 32 | } 33 | -------------------------------------------------------------------------------- /src/test/scala/xenomorph/ConsumerSpec.scala: -------------------------------------------------------------------------------- 1 | //: ---------------------------------------------------------------------------- 2 | //: Copyright (C) 2017 Verizon. All Rights Reserved. 3 | //: 4 | //: Licensed under the Apache License, Version 2.0 (the "License"); 5 | //: you may not use this file except in compliance with the License. 6 | //: You may obtain a copy of the License at 7 | //: 8 | //: http://www.apache.org/licenses/LICENSE-2.0 9 | //: 10 | //: Unless required by applicable law or agreed to in writing, software 11 | //: distributed under the License is distributed on an "AS IS" BASIS, 12 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | //: See the License for the specific language governing permissions and 14 | //: limitations under the License. 15 | //: 16 | //: ---------------------------------------------------------------------------- 17 | package mutatis 18 | 19 | import java.util.concurrent.atomic.AtomicReference 20 | 21 | import kafka.producer._ 22 | import kafka.serializer.{Decoder, DefaultDecoder, StringDecoder} 23 | import scala.concurrent.duration._ 24 | import scalaz.{-\/, \/-} 25 | import scalaz.stream._ 26 | import scalaz.concurrent.Task 27 | import common.KafkaTestHelper._ 28 | 29 | class ConsumerSpec extends UnitSpec with EmbeddedKafkaBuilder { 30 | type Bytes = Array[Byte] 31 | 32 | val bytesDecoder = new DefaultDecoder() 33 | val stringDecoder = new StringDecoder() 34 | 35 | val dataStore = new AtomicReference[List[String]](List.empty[String]) 36 | 37 | val data: List[(Int, String)] = (1 to 16).toList.map { num => 38 | num -> s"test-message-$num" 39 | } 40 | 41 | val messages: List[KeyedMessage[Array[Byte], Array[Byte]]] = data.map { 42 | case (num, message) => 43 | new KeyedMessage(topic, num.toString.getBytes(), message.getBytes()) 44 | } 45 | 46 | def produce(): Unit = { 47 | val producer: Producer[Array[Byte], Array[Byte]] = new Producer(producerConfig) 48 | producer.send(messages: _*) 49 | producer.close() 50 | } 51 | 52 | "Consumer should" - { 53 | "should consume message in order produced" in { 54 | produce() 55 | 56 | val dataStoreSink: Sink[Task, DecodedEvent[Bytes, String]] = sink.lift { s => 57 | Task.delay { 58 | val data = dataStore.get() 59 | dataStore.set(data :+ s.message.reverse) 60 | () 61 | } 62 | } 63 | 64 | consumer[Bytes, String](consumerConfig, topic, bytesDecoder, stringDecoder, 1).flatMap { s => 65 | s through dataStoreSink 66 | }.take(messages.size).runLog.attempt.run 67 | 68 | dataStore.get shouldEqual data.map(_._2.reverse) 69 | } 70 | 71 | "should consume message in order produced and commit periodically" in { 72 | produce() 73 | 74 | val dataStoreSink: Sink[Task, DecodedEvent[Bytes, String]] = sink.lift { s => 75 | Task.delay { 76 | val data = dataStore.get() 77 | dataStore.set(data :+ s.message.reverse) 78 | Thread.sleep(200) 79 | () 80 | } 81 | } 82 | 83 | consumer[Bytes, String](consumerConfig, topic, bytesDecoder, stringDecoder, 1, 150.millisecond).flatMap { s => 84 | s through dataStoreSink 85 | }.take(messages.size).runLog.attempt.run 86 | 87 | // Why 14 if there we are taking 16 messages 88 | // 1) Offset are 0 indexed. Now, why 15 :) 89 | // 2) The connection is closed immediately after the last message is consumed. 90 | // This gives no time to commit the last one 91 | getOffset(zkClient, groupId, topic, 0) shouldBe "14" 92 | } 93 | 94 | "should handle exceptions in decoder" in { 95 | produce() 96 | 97 | val badDecoder = new Decoder[String] { 98 | override def fromBytes(bytes: Bytes): String = new String(bytes, "UTF8").toInt.toString 99 | } 100 | 101 | val seq = consumer[Bytes, String](consumerConfig, topic, bytesDecoder, badDecoder, 1) 102 | .flatMap(a => a) 103 | .take(1) 104 | .runLog 105 | .attempt 106 | .run 107 | 108 | seq match { 109 | case \/-(_) => fail("should not be success") 110 | case -\/(e) => e shouldBe a[NumberFormatException] 111 | } 112 | } 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /src/test/scala/xenomorph/EmbeddedKafkaBuilder.scala: -------------------------------------------------------------------------------- 1 | //: ---------------------------------------------------------------------------- 2 | //: Copyright (C) 2017 Verizon. All Rights Reserved. 3 | //: 4 | //: Licensed under the Apache License, Version 2.0 (the "License"); 5 | //: you may not use this file except in compliance with the License. 6 | //: You may obtain a copy of the License at 7 | //: 8 | //: http://www.apache.org/licenses/LICENSE-2.0 9 | //: 10 | //: Unless required by applicable law or agreed to in writing, software 11 | //: distributed under the License is distributed on an "AS IS" BASIS, 12 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | //: See the License for the specific language governing permissions and 14 | //: limitations under the License. 15 | //: 16 | //: ---------------------------------------------------------------------------- 17 | package mutatis 18 | 19 | import java.util.UUID 20 | 21 | import kafka.admin.TopicCommand 22 | import kafka.consumer.ConsumerConfig 23 | import kafka.producer._ 24 | import kafka.server._ 25 | import org.I0Itec.zkclient.ZkClient 26 | import org.scalatest._ 27 | import kafka.utils._ 28 | import kafka.zk._ 29 | 30 | trait EmbeddedKafkaBuilder extends BeforeAndAfterEach { this: Suite => 31 | val brokerId = 0 32 | val topic = "topic" 33 | val zkConnect = TestZKUtils.zookeeperConnect 34 | 35 | var kafkaServer: KafkaServer = _ 36 | var zkClient: ZkClient = _ 37 | var zkServer: EmbeddedZookeeper = _ 38 | var consumerConfig: ConsumerConfig = _ 39 | var producerConfig: ProducerConfig = _ 40 | val groupId = UUID.randomUUID().toString 41 | 42 | override def beforeEach(): Unit = { 43 | // setup Zookeeper 44 | zkServer = new EmbeddedZookeeper(zkConnect) 45 | zkClient = new ZkClient(zkServer.connectString, 30000, 30000, ZKStringSerializer) 46 | 47 | // setup Broker 48 | val port = TestUtils.choosePort 49 | val props = TestUtils.createBrokerConfig(brokerId, port, enableControlledShutdown = true) 50 | 51 | val config = new KafkaConfig(props) 52 | val mock = new MockTime() 53 | kafkaServer = TestUtils.createServer(config, mock) 54 | 55 | // create topic 56 | val arguments = Array("--topic", topic, "--partitions", "1", "--replication-factor", "1") 57 | TopicCommand.createTopic(zkClient, new TopicCommand.TopicCommandOptions(arguments)) 58 | 59 | TestUtils.waitUntilMetadataIsPropagated(List(kafkaServer), topic, 0, 5000) 60 | 61 | val consumerProperties = 62 | TestUtils.createConsumerProperties( 63 | zkConnect = zkServer.connectString, 64 | groupId = groupId, 65 | consumerId = UUID.randomUUID().toString, 66 | consumerTimeout = -1) 67 | 68 | consumerConfig = new ConsumerConfig(consumerProperties) 69 | 70 | // setup producer 71 | val properties = TestUtils.getProducerConfig("localhost:" + port) 72 | producerConfig = new ProducerConfig(properties) 73 | 74 | super.beforeEach() // To be stackable, must call super.beforeEach 75 | } 76 | 77 | override def afterEach(): Unit = { 78 | try super.afterEach() // To be stackable, must call super.afterEach 79 | finally { 80 | kafkaServer.shutdown() 81 | zkClient.close() 82 | zkServer.shutdown() 83 | } 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/test/scala/xenomorph/IntegrationSpec.scala: -------------------------------------------------------------------------------- 1 | //: ---------------------------------------------------------------------------- 2 | //: Copyright (C) 2017 Verizon. All Rights Reserved. 3 | //: 4 | //: Licensed under the Apache License, Version 2.0 (the "License"); 5 | //: you may not use this file except in compliance with the License. 6 | //: You may obtain a copy of the License at 7 | //: 8 | //: http://www.apache.org/licenses/LICENSE-2.0 9 | //: 10 | //: Unless required by applicable law or agreed to in writing, software 11 | //: distributed under the License is distributed on an "AS IS" BASIS, 12 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | //: See the License for the specific language governing permissions and 14 | //: limitations under the License. 15 | //: 16 | //: ---------------------------------------------------------------------------- 17 | package mutatis 18 | 19 | import kafka.serializer.{DefaultDecoder, StringDecoder, StringEncoder} 20 | 21 | import scalaz.concurrent.Task 22 | import scalaz.stream.Process 23 | 24 | class IntegrationSpec extends UnitSpec with EmbeddedKafkaBuilder { 25 | 26 | "Producer should produce events that can be consumed by the mutatis consumer" in { 27 | val data = List("a", "b", "c") 28 | val dataP: Process[Task, String] = Process.emitAll(data) 29 | 30 | val sink = dataP to producer[String](cfg = producerConfig, topic = topic, msgEncoder = new StringEncoder) 31 | sink.runLog.run 32 | 33 | val read = 34 | consumer[Array[Byte], String](consumerConfig, topic, new DefaultDecoder(), new StringDecoder(), 1) 35 | .flatMap(s => s) 36 | .take(3) 37 | .runLog 38 | .run 39 | 40 | read.toList.map(_.message) shouldEqual (data) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /src/test/scala/xenomorph/ProducerSpec.scala: -------------------------------------------------------------------------------- 1 | //: ---------------------------------------------------------------------------- 2 | //: Copyright (C) 2017 Verizon. All Rights Reserved. 3 | //: 4 | //: Licensed under the Apache License, Version 2.0 (the "License"); 5 | //: you may not use this file except in compliance with the License. 6 | //: You may obtain a copy of the License at 7 | //: 8 | //: http://www.apache.org/licenses/LICENSE-2.0 9 | //: 10 | //: Unless required by applicable law or agreed to in writing, software 11 | //: distributed under the License is distributed on an "AS IS" BASIS, 12 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | //: See the License for the specific language governing permissions and 14 | //: limitations under the License. 15 | //: 16 | //: ---------------------------------------------------------------------------- 17 | package mutatis 18 | 19 | import kafka.consumer.{Consumer, Whitelist} 20 | import kafka.serializer._ 21 | 22 | import scalaz.concurrent.Task 23 | import scalaz.{-\/, \/-} 24 | import scalaz.stream._ 25 | 26 | class ProducerSpec extends UnitSpec with EmbeddedKafkaBuilder { 27 | 28 | val data = List("a", "b", "c") 29 | val dataP: Process[Task, String] = Process.emitAll(data) 30 | 31 | "Producer should" - { 32 | "produce events that can be consumed" in { 33 | 34 | val sink = dataP to producer[String](cfg = producerConfig, topic = topic, msgEncoder = new StringEncoder) 35 | sink.runLog.run 36 | 37 | val consumer = Consumer.create(consumerConfig) 38 | val records = consumer 39 | .createMessageStreamsByFilter[String, String](Whitelist(topic), 1, new StringDecoder, new StringDecoder) 40 | .flatMap { stream => 41 | stream.iterator.take(3).map(_.message) 42 | } 43 | 44 | records shouldEqual (data) 45 | } 46 | } 47 | 48 | "stop processing and return when the data cannot be encoded" in { 49 | val badEncoder = new Encoder[String] { 50 | override def toBytes(bytes: String): Array[Byte] = throw new RuntimeException("oh no") 51 | } 52 | 53 | val sink = dataP to producer[String](cfg = producerConfig, topic = topic, msgEncoder = badEncoder) 54 | sink.runLog.attempt.run match { 55 | case -\/(e) => e.getMessage shouldEqual ("oh no") 56 | case \/-(_) => fail("should not be success") 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/test/scala/xenomorph/UnitSpec.scala: -------------------------------------------------------------------------------- 1 | //: ---------------------------------------------------------------------------- 2 | //: Copyright (C) 2017 Verizon. All Rights Reserved. 3 | //: 4 | //: Licensed under the Apache License, Version 2.0 (the "License"); 5 | //: you may not use this file except in compliance with the License. 6 | //: You may obtain a copy of the License at 7 | //: 8 | //: http://www.apache.org/licenses/LICENSE-2.0 9 | //: 10 | //: Unless required by applicable law or agreed to in writing, software 11 | //: distributed under the License is distributed on an "AS IS" BASIS, 12 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | //: See the License for the specific language governing permissions and 14 | //: limitations under the License. 15 | //: 16 | //: ---------------------------------------------------------------------------- 17 | package mutatis 18 | 19 | import org.scalatest._ 20 | 21 | abstract class UnitSpec extends FreeSpec with Matchers with OptionValues with Inside with Inspectors 22 | -------------------------------------------------------------------------------- /src/test/scala/xenomorph/example.scala: -------------------------------------------------------------------------------- 1 | //: ---------------------------------------------------------------------------- 2 | //: Copyright (C) 2017 Verizon. All Rights Reserved. 3 | //: 4 | //: Licensed under the Apache License, Version 2.0 (the "License"); 5 | //: you may not use this file except in compliance with the License. 6 | //: You may obtain a copy of the License at 7 | //: 8 | //: http://www.apache.org/licenses/LICENSE-2.0 9 | //: 10 | //: Unless required by applicable law or agreed to in writing, software 11 | //: distributed under the License is distributed on an "AS IS" BASIS, 12 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | //: See the License for the specific language governing permissions and 14 | //: limitations under the License. 15 | //: 16 | //: ---------------------------------------------------------------------------- 17 | package mutatis 18 | 19 | /** 20 | * Goal for this example is to demonstrate use case where a producer generates events faster than consumers can 21 | * process each event so it is required to load balance across multiple consumers to handle the load. 22 | * 23 | * 1. Set up kafka server: 24 | * Download and start zk and kafka per https://kafka.apache.org/082/documentation.html#quickstart 25 | * 26 | * 2. create a topic called "test8" with 8 partitions: 27 | * ./bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 8 --topic test8 28 | * 29 | * 3. Start producer - notice one message is produced every 100 milliseconds: 30 | * sbt "test:run-main mutatis.ExampleProducer" 31 | * 32 | * 4. Start consumer - notice one message takes 250 milliseconds to process, one consumer is not enough: 33 | * sbt "test:run-main mutatis.ExampleConsumer" 34 | * 35 | * Consideration when setting up number of partitions, consumers, and streams per consumer: 36 | * - Number of partitions represent the max number of streams you can have across all consumers 37 | * - Above we created a topic with 8 streams (update config.numberOfTopics as needed) 38 | * - Consumers are set up to have 2 concurrent streams (update config.streams to change it) 39 | * - Per above, max number of consumers you can have is 4 (8 topics divided by 2 streams per consumer) 40 | * - A fifth consumer will not receive any messages from kafka because there will be no partitions to assign to it 41 | * - Killing one of the 4 active consumer will trigger rebalance and the 5th consumer will pick up the slack 42 | */ 43 | import kafka.serializer.{Decoder, Encoder} 44 | import kafka.consumer.ConsumerConfig 45 | import java.util.Properties 46 | import java.util.concurrent.{ExecutorService, Executors, ScheduledExecutorService} 47 | 48 | import kafka.producer.ProducerConfig 49 | 50 | import scalaz.concurrent.{Strategy, Task} 51 | import scalaz.stream.{merge, time, Sink} 52 | import scala.concurrent.ExecutionContext 53 | import scala.concurrent.duration._ 54 | import scala.language.postfixOps 55 | 56 | object ExampleProducer extends App { 57 | import config._ 58 | 59 | implicit val pool: ScheduledExecutorService = Executors.newScheduledThreadPool(3, Executors.defaultThreadFactory()) 60 | implicit val ec: ExecutionContext = ExecutionContext.fromExecutor(pool) 61 | implicit val S: Strategy = Strategy.Executor(pool) 62 | 63 | val sink: Sink[Task, Duration] = mutatis.producer(producerCfg, topic, encoder) 64 | val producer = time.awakeEvery(100 milliseconds) to sink 65 | producer.run.run 66 | } 67 | 68 | object ExampleConsumer extends App { 69 | import config._ 70 | 71 | implicit val pool: ExecutorService = Executors.newFixedThreadPool(streams, Executors.defaultThreadFactory()) 72 | implicit val ec: ExecutionContext = ExecutionContext.fromExecutor(pool) 73 | implicit val S: Strategy = Strategy.Executor(pool) 74 | 75 | val consumer = merge.mergeN(streams)( 76 | // Map the internal stream to worker.process so that we commit to kafka only after processing is done 77 | mutatis 78 | .consumer(consumerCfg, topic, keyDecoder, decoder, streams) 79 | .map(stream => 80 | stream.evalMap { event => 81 | // this will happen BEFORE commit to kafka 82 | worker.process(event) 83 | }) 84 | )(S) 85 | 86 | val consumerWithPostProcessing = consumer.map(_ => () /* this will happen AFTER commit to kafka */ ) 87 | 88 | consumerWithPostProcessing.run.run 89 | } 90 | 91 | object config { 92 | val producerProps = new Properties() 93 | producerProps.put("metadata.broker.list", "localhost:9092") 94 | producerProps.put("acks", "all") 95 | producerProps.put("retries", "0") 96 | producerProps.put("batch.size", "16384") 97 | producerProps.put("linger.ms", "1") 98 | producerProps.put("buffer.memory", "33554432") 99 | producerProps.put("serializer.class", "kafka.serializer.DefaultEncoder") 100 | val producerCfg = new ProducerConfig(producerProps) 101 | 102 | val consumerProps = new Properties() 103 | consumerProps.put("zookeeper.connect", "localhost:2181") 104 | consumerProps.put("auto.offset.reset", "smallest") 105 | consumerProps.put("consumer.timeout.ms", "60000") 106 | consumerProps.put("auto.commit.interval.ms", "5000000") 107 | consumerProps.put("group.id", "test1") 108 | val consumerCfg = new ConsumerConfig(consumerProps) 109 | 110 | val streams = 2 111 | 112 | val numberOfTopics = 8 113 | 114 | val topic = "test8" 115 | 116 | val encoder = new Encoder[Duration] { 117 | override def toBytes(t: Duration): Array[Byte] = t.toString.toCharArray.map(_.toByte) 118 | } 119 | 120 | val decoder = new Decoder[String] { 121 | override def fromBytes(bytes: Array[Byte]) = new String(bytes.map(_.toChar)) 122 | } 123 | 124 | val keyEncoder = new Encoder[Duration] { 125 | override def toBytes(t: Duration): Array[Byte] = BigInt(t.toMillis % numberOfTopics).toByteArray 126 | } 127 | 128 | val keyDecoder = new Decoder[Int] { 129 | override def fromBytes(bytes: Array[Byte]) = BigInt(bytes).toInt 130 | } 131 | } 132 | 133 | object worker { 134 | 135 | def process(event: DecodedEvent[Int, String]): Task[Unit] = Task.delay { 136 | Thread.sleep(250) 137 | println( 138 | s"thread=${Thread.currentThread.getName} topic=${event.messageAndMetadata.topic} " + 139 | s"partition=${event.messageAndMetadata.partition} key=${event.key} msg=${event.message}") 140 | } 141 | 142 | } 143 | -------------------------------------------------------------------------------- /version.sbt: -------------------------------------------------------------------------------- 1 | //: ---------------------------------------------------------------------------- 2 | //: Copyright (C) 2017 Verizon. All Rights Reserved. 3 | //: 4 | //: Licensed under the Apache License, Version 2.0 (the "License"); 5 | //: you may not use this file except in compliance with the License. 6 | //: You may obtain a copy of the License at 7 | //: 8 | //: http://www.apache.org/licenses/LICENSE-2.0 9 | //: 10 | //: Unless required by applicable law or agreed to in writing, software 11 | //: distributed under the License is distributed on an "AS IS" BASIS, 12 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | //: See the License for the specific language governing permissions and 14 | //: limitations under the License. 15 | //: 16 | //: ---------------------------------------------------------------------------- 17 | version in ThisBuild := "0.2.0-SNAPSHOT" 18 | --------------------------------------------------------------------------------