├── .gitignore ├── LICENSE ├── README.md ├── build.sbt ├── images ├── app_stream_flow.gif ├── consumer_stream_creation_slow.gif ├── producer_stream_creation_slow.gif └── template-overview.png ├── project ├── Dependencies.scala ├── Versions.scala ├── build.properties └── plugins.sbt └── src ├── main ├── resources │ ├── application.conf │ └── logback.xml └── scala │ └── com │ └── omearac │ ├── Main.scala │ ├── consumers │ ├── ConsumerStream.scala │ ├── ConsumerStreamManager.scala │ ├── DataConsumer.scala │ └── EventConsumer.scala │ ├── http │ ├── HttpService.scala │ └── routes │ │ ├── ConsumerCommands.scala │ │ └── ProducerCommands.scala │ ├── producers │ ├── DataProducer.scala │ ├── EventProducer.scala │ ├── ProducerStream.scala │ └── ProducerStreamManager.scala │ ├── settings │ └── Settings.scala │ └── shared │ ├── AkkaStreams.scala │ ├── EventSourcing.scala │ ├── JsonMessageConversion.scala │ └── Messages.scala └── test ├── resources ├── application.conf └── logback-test.xml └── scala └── akka ├── HTTPInterfaceSpec.scala ├── SettingsSpec.scala └── kafka ├── ConsumerStreamManagerSpec.scala ├── ConsumerStreamSpec.scala ├── DataConsumerSpec.scala ├── DataProducerSpec.scala ├── EventConsumerSpec.scala ├── EventProducerSpec.scala ├── ProducerStreamManagerSpec.scala └── ProducerStreamSpec.scala /.gitignore: -------------------------------------------------------------------------------- 1 | #sbt 2 | project/project 3 | project/target 4 | target 5 | 6 | #IntelliJ 7 | .idea 8 | .idea_modules 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Reactive Kafka Microservice Template 2 | =================== 3 | 4 | 5 | This project is an example of how to use Apache Kafka as the main method of communication between microservices using the Akka Streams library [Reactive-Kafka](https://github.com/akka/reactive-kafka). There is no domain/business logic implemented here and the idea would be to use this as a template to then customise for your own microservices. This template provides examples of the following: 6 | 7 | - Creating consumer and producer Akka Streams using Reactive-Kafka 8 | - Batch committing the Kafka Offsets of consumed messages 9 | - Integrating the consumer and producer streams with Akka Actors using backpressure 10 | - Akka HTTP frontend API for manually starting/stopping the consumer streams 11 | - JSON (un)marshalling to/from Scala case classes 12 | - Local Event Sourcing using Akka EventBus 13 | - Logging 14 | - Akka HTTP, Actor, and Stream unit and integration testing 15 | 16 | ![alt tag](https://raw.githubusercontent.com/omearac/reactive-kafka-microservice-template/master/images/template-overview.png) 17 | 18 | 19 | Overview 20 | ------------- 21 | 22 | The core function of this microservice example is to publish some example messages to Kafka from an Akka Actor which acts as a Source for an Akka Stream by making use of backpressure. We call this producer Actor the "data" producer. There is another Akka Stream connected to that same Kafka topic to then consume the messages off the queue. This second stream sends the messages to an Akka Actor (the "data consumer Actor") which acts as a Sink for the Akka Stream. Once this Sink Actor receives the messages he simply prints the results to the console. 23 | 24 | During application startup, the streams are created which triggers local events to be published to the ActorSystems built in EventBus. Then, there is an "event" producer Actor who is subscribed to that local EventBus who gets the Events and then publishes them via another Akka Stream to Kafka. Again, there is a consumer stream who is subscribed to this event topic channel on Kafka whose "event" Consumer Actor stream Sink then prints the results to the console. 25 | 26 | Finally, there is an Akka HTTP interface which allows the user to tell the "data" producer Actor to publish messages to Kafka, as well as start and stop both the "data" consumer stream and the "event" consumer stream. 27 | 28 | Thus, in total there are 4 Akka Streams which are materialised. Two consuming Streams which pull data from the Kafka Message Broker, and two publishing Streams which push messages to the Kafka Message Broker. 29 | 30 | The following diagram shows the 4 Akka Streams which are created (in green), along with the Akka Actors which serve as the Sinks (for consumer Streams) and Sources (for producer Streams). The Akka Actors and Akka Streams are integrated using `Source.queue(_,OverflowStrategy.backpressure)` for the producer Streams and `Sink.actorRefWithAck` for the consumer Streams so that we have full backpressure both ways as per the Akka [documentation](http://doc.akka.io/docs/akka/2.4.16/scala/stream/stream-integrations.html). 31 | 32 | There are two Kafka topics **TestDataChannel** and **AppEventChannel** on the Kafka message broker. Then, (case class) messages `KafkaMessage` and `ExampleAppEvent` are converted to JSON and are published/consumed to/from their respective Kafka topics. 33 | 34 | ![alt tag](https://raw.githubusercontent.com/omearac/reactive-kafka-microservice-template/master/images/app_stream_flow.gif) 35 | 36 | 37 | ---------- 38 | 39 | 40 | Stream Creation Patterns 41 | ------------- 42 | ### Creating a Consumer Stream 43 | The process of creating a consumer Stream involves the following steps as illustrated in the below animation. The ConsumerStreamManager is an Actor who is responsible for the lifecycle of the consumer Akka Stream, i.e. he creates, runs and can terminate them. A consumer Actor serves as the messages endpoint (the Sink) of the Streams. Once a Consumer Actor (who will act as the Stream Sink) and the ConsumerStreamManager are created: 44 | 45 | 46 | 1. An `InititializeConsumerStream` message is sent to the manager with the reference to the Consumer Actor and the message type (case class) the consumed JSON data from Kafka must be converted to 47 | 2. Upon receiving this message, the StreamManager initialises the Akka Stream using information from the `application.conf` settings file using the Reactive-Kafka library. 48 | 3. Once the stream has started, the message type and stream reference is saved to a collection so the Stream can be terminated on command if necessary. The StreamManager then tells the Consumer Actor the stream has materialised by sending it an `ActivatedConsumerStream(Topic)` message and also emits a event to the local EventBus associated to the ActorSystem. 49 | 4. Upon receiving the `ActivatedConsumerStream` message from the ConsumerStreamManager, the ConsumerActor saves the address of the ConsumerStreamManager. The Actor stays in its "non-consuming state" until it receives the `STREAM_INIT` message from the Akka Stream in which case the Actor then changes state to Consuming mode bia Akka's `become` method. The reference to the ConsumerStreamManager is kept in order to manually terminate the stream (which can be done by the Akka HTTP front end as described below). 50 | 51 | ![alt tag](https://raw.githubusercontent.com/omearac/reactive-kafka-microservice-template/master/images/consumer_stream_creation_slow.gif) 52 | 53 | 54 | ### Creating a Producer Stream 55 | The process of creating a producer Stream is essentially the same concept as creating the Consumer Stream, except now the Producer Actor is the Akka Stream Source. Again, we create an instance of the ProducerStreamManager. This Actor is responsible for the lifecycle of the producer Akka Stream, i.e. he creates and runs them. Once a Producer Actor and the ProducerStreamManager are created: 56 | 57 | 1. An `InititializeProducerStream` message is sent to the manager with the reference to the Producer Actor and the message type (case class) the consumed JSON data from Kafka must be converted to 58 | 2. Upon receiving this message, the StreamManager initialises the Akka Stream using information from the `application.conf` settings file using the Reactive-Kafka library. 59 | 3. Once the stream has started, the StreamManager then tells the Producer Actor the stream has materialised and passes it the streams reference in the message `ActivatedProducerStream(streamRef, Topic)`. An event describing what has occurred is then published to the local EventBus associated to the ActorSystem. 60 | 4. Upon receiving the `ActivatedProducerStream` message from the ProducerStreamManager, the Producer Actor then changes state to be in Publishing mode via Akka's `become`method. 61 | 62 | ![alt tag](https://raw.githubusercontent.com/omearac/reactive-kafka-microservice-template/master/images/producer_stream_creation_slow.gif) 63 | 64 | 65 | Running 66 | ------------- 67 | 68 | ### Setting up Apache Kafka and Zookeeper 69 | 70 | There are many online tutorials and guides on how to install and setup Apache Kafka so only the basics are explained here. To use [Apache Kafka](https://kafka.apache.org/downloads), a running [Apache Zookeeper](https://zookeeper.apache.org/releases.html) server instance is required. 71 | The Kafka download comes with a Zookeeper server automatically as described in the installation guide/tutorial for Reactive-Kafka [here](https://vanwilgenburg.wordpress.com/2016/09/19/getting-started-with-akka-stream-kafka/). In my setups, I usually separately install each of the components to get things up and running: Apache Zookeeper, Apache Kafka. First download and install Zookeeper and Kafka components as described by their respective installation guides. This project currently is currently running with Kafka 0.10.0.1, Zookeeper 3.4.8 (but should work with newer releases of Zookeeper). 72 | 73 | >**Note:** 74 | > 75 | >Kafka does not currently come bundled with a nice web UI like some other message broker technologies so this tutorial will show how to create Kafka topics via a Terminal. [Kafka Manager](https://github.com/yahoo/kafka-manager) is a nice UI tool I can recommend which runs a little server that connects to Kafka and allows you to visualise/create/delete Kafka brokers, topics, partitions, logs...etc. 76 | 77 | Then start the Zookeeper server instance from Terminal: 78 | ``` 79 | >zkServer start 80 | ``` 81 | Now start a Kafka instance by pointing it to a properties file that was installed by default 82 | ``` 83 | >kafka-server-start /usr/local/etc/kafka/server.properties 84 | ``` 85 | 86 | > **Note:** 87 | > 88 | > If the Kafka server returns an error that it cannot find the Zookeeper instance, verify that the server.properties and server.default.properties files in ``/usr/local/etc/kafka`` have ``zookeeper.connect=localhost:2181`` as the Zookeeper server location. 89 | 90 | To create new Topics in Kafka enter 91 | 92 | ``` 93 | >kafka-topics --zookeeper localhost:2181 --create --topic MyBrandNewKafkaTopic --replication-factor 1 --partition 5 94 | ``` 95 | 96 | To check the topics: 97 | ``` 98 | >kafka-topics --describe --zookeeper localhost:2181 99 | ``` 100 | which should show that Kafka your newly created MyBrandNewKafkaTopic. With these details sorted out, we can now run our application. 101 | 102 | ### Running the Application 103 | 104 | We assume the user has already configured Apache Kafka to be running locally. The two Topics we will be publishing/consuming to/from are "**TestDataChannel**" and "**AppEventChannel**", as expressed in the application.conf resource file. Furthermore, an additional two channels which are used for "End-to-End" testing are called "**TempChannel1**" and "**TempChannel2**", as expressed in the `reference.conf` resource file. These 4 topics must be created with **5** partitions each. For an introduction to setting up and running Apache Kafka 105 | 106 | To run from Terminal: 107 | ``` 108 | // From root of project directory which contains the build.sbt file 109 | >sbt run 110 | ``` 111 | There are two main classes inside this repository. The `Main.scala` application starts all 4 Streams during initialisation. Starting the `Main.scala` app starts the Akka HTTP interface. The user is first prompted to select the port the server is to be hosted at. 112 | 113 | // From root of project directory which contains the build.sbt file 114 | >sbt run 115 | [info] Running example.Main 116 | Specify the TCP port do you want to host the HTTP server at (e.g. 8001, 8080..etc)? 117 | Hit Return when finished: 8080 118 | 119 | Once running, navigate to (or via cURL) the following endpoints to start/stop the consumer streams and to produce 'n' messages. 120 | 121 | /data_consumer/start 122 | /data_consumer/stop 123 | /event_consumer/start 124 | /event_consumer/stop 125 | /data_producer/produce/n 126 | 127 | >**Example:** 128 | > 129 | >Running `Main.scala` app and selecting port 8080 from the Terminal for the Akka HTTP server, we can make the producer Actor produce 50 messages to Kafka 130 | > 131 | > `curl localhost:8080/data_producer/produce/50` 132 | > 133 | > returns the response 134 | > 135 | > `50 messages Produced as Ordered, Boss!` 136 | > 137 | > then in the Terminal window which is running the `Main.scala` app we see the messages have been consumed by both the Event Stream and Data Stream from Kafka 138 | > 139 | > >ExampleAppEvent(15:01:17:20:59:52.579,akka://akka-reactive-kafka app/user/dataProducer#1828074211,MessagesPublished(50)) 140 | >KafkaMessage(15:01:17:20:59:52.579, send me to kafka, yo!,1) 141 | > KafkaMessage(15:01:17:20:59:52.579, send me to kafka, yo!,2) 142 | > KafkaMessage(15:01:17:20:59:52.579, send me to kafka, yo!,9) 143 | > KafkaMessage(15:01:17:20:59:52.579, send me to kafka, yo!,15) 144 | > KafkaMessage(15:01:17:20:59:52.579, send me to kafka, yo!,23) 145 | >KafkaMessage(15:01:17:20:59:52.579, send me to kafka, yo!,3) 146 | > . 147 | > . 148 | 149 | >In separate Terminals you can now run additional instances of the application and see that these new Kafka consumer streams (`DataConsumer` and `EventConsumer`) will share the messages which are published to the topics "**TestDataChannel**" and "**AppEventChannel**". 150 | 151 | 152 | Testing 153 | ------------- 154 | 155 | ``` 156 | // From root of project directory which contains the build.sbt file 157 | >sbt test 158 | ``` 159 | 160 | 161 | Author & License 162 | ------------- 163 | If you have any questions about this project please email Corey OMeara @ lastname.firstname@gmail.com. 164 | This code is open source software licensed under the Apache 2.0 [License](http://www.apache.org/licenses/LICENSE-2.0.html). For additional information please see the LICENSE file. -------------------------------------------------------------------------------- /build.sbt: -------------------------------------------------------------------------------- 1 | import Dependencies._ 2 | 3 | name := "reactive-kafka-microservice-template" 4 | 5 | version := "1.0" 6 | 7 | scalaVersion := "2.11.8" 8 | 9 | libraryDependencies ++= Seq( 10 | akka_http_core, 11 | akka_http_testkit, 12 | akka_testkit, 13 | akka_slf4j, 14 | kafka, 15 | logback, 16 | log4j_over_slf4j, 17 | io_spray, 18 | play_json, 19 | scalatest 20 | ) 21 | 22 | //Run tests Sequentially 23 | parallelExecution in Test := false -------------------------------------------------------------------------------- /images/app_stream_flow.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/omearac/reactive-kafka-microservice-template/ca85d340138d85e341854b6df6a8108ac7de37ff/images/app_stream_flow.gif -------------------------------------------------------------------------------- /images/consumer_stream_creation_slow.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/omearac/reactive-kafka-microservice-template/ca85d340138d85e341854b6df6a8108ac7de37ff/images/consumer_stream_creation_slow.gif -------------------------------------------------------------------------------- /images/producer_stream_creation_slow.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/omearac/reactive-kafka-microservice-template/ca85d340138d85e341854b6df6a8108ac7de37ff/images/producer_stream_creation_slow.gif -------------------------------------------------------------------------------- /images/template-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/omearac/reactive-kafka-microservice-template/ca85d340138d85e341854b6df6a8108ac7de37ff/images/template-overview.png -------------------------------------------------------------------------------- /project/Dependencies.scala: -------------------------------------------------------------------------------- 1 | import sbt._ 2 | 3 | object Dependencies { 4 | val kafka = "com.typesafe.akka" %% "akka-stream-kafka" % Versions.akka_kafka 5 | 6 | val logback = "ch.qos.logback" % "logback-classic" % Versions.logback 7 | 8 | val log4j_over_slf4j = "org.slf4j" % "log4j-over-slf4j" % Versions.log4j_over_slf4j 9 | 10 | val akka_slf4j = "com.typesafe.akka" %% "akka-slf4j" % Versions.akka 11 | 12 | val scalatest = "org.scalatest" %% "scalatest" % Versions.scalatest % "test" 13 | 14 | val akka_testkit = "com.typesafe.akka" %% "akka-testkit" % Versions.akka % "test" 15 | 16 | val play_json = "com.typesafe.play" % "play-json_2.11" % Versions.play_json 17 | 18 | val akka_http_core = "com.typesafe.akka" %% "akka-http-core" % Versions.akka_http 19 | 20 | val akka_http_testkit = "com.typesafe.akka" %% "akka-http-testkit" % Versions.akka_http 21 | 22 | val io_spray = "io.spray" %% "spray-json" % Versions.io_spray 23 | } 24 | -------------------------------------------------------------------------------- /project/Versions.scala: -------------------------------------------------------------------------------- 1 | object Versions { 2 | 3 | val akka = "2.4.16" 4 | 5 | val akka_http = "10.0.1" 6 | 7 | val akka_kafka = "0.13" 8 | 9 | val logback = "1.1.3" 10 | 11 | val log4j_over_slf4j = "1.7.12" 12 | 13 | val io_spray = "1.3.2" 14 | 15 | val play_json = "2.4.0-M2" 16 | 17 | val scalatest = "3.0.1" 18 | } 19 | -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version = 0.13.8 -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | logLevel := Level.Warn -------------------------------------------------------------------------------- /src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | http { 2 | host = "0.0.0.0", 3 | //port = 8080 4 | } 5 | 6 | akka { 7 | # Loggers to register at boot time (akka.event.Logging$DefaultLogger logs 8 | # to STDOUT) 9 | loggers = ["akka.event.slf4j.Slf4jLogger"] 10 | 11 | # Log level used by the configured loggers (see "loggers") as soon 12 | # as they have been started; before that, see "stdout-loglevel" 13 | # Options: OFF, ERROR, WARNING, INFO, DEBUG 14 | loglevel = "ERROR" 15 | 16 | # Log level for the very basic logger activated during ActorSystem startup. 17 | # This logger prints the log messages to stdout (System.out). 18 | # Options: OFF, ERROR, WARNING, INFO, DEBUG 19 | //stdout-loglevel = "DEBUG" 20 | 21 | # Filter of log events that is used by the LoggingAdapter before 22 | # publishToKafka log events to the eventStream. 23 | logging-filter = "akka.event.DefaultLoggingFilter" 24 | 25 | #Dead-letter logging settings 26 | #log-dead-letters = off 27 | #log-dead-letters-during-shutdown = off 28 | 29 | kafka { 30 | # Properties for akka.kafka.ConsumerSettings can be 31 | # defined in this section or a configuration section with 32 | # the same layout. 33 | consumer { 34 | 35 | //The number of unique Kafka consumers consuming from individual topics 36 | num-consumers = "2" 37 | c1 { 38 | bootstrap-servers = "localhost:9092" 39 | groupId = "group1" 40 | subscription-topic = "TestDataChannel" 41 | 42 | #MessageType to convert internal case class from to JSON 43 | message-type = "KafkaMessage" 44 | 45 | # Tuning property of scheduled polls. 46 | poll-interval = 50ms 47 | 48 | # Tuning property of the `KafkaConsumer.poll` parameter. 49 | # Note that non-zero value means that blocking of the thread that 50 | # is executing the stage will be blocked. 51 | poll-timeout = 50ms 52 | 53 | # The stage will be await outstanding offset commit requests before 54 | # shutting down, but if that takes longer than this timeout it will 55 | # stop forcefully. 56 | stop-timeout = 30s 57 | 58 | # How long to wait for `KafkaConsumer.close` 59 | close-timeout = 20s 60 | 61 | # If offset commit requests are not completed within this timeout 62 | # the returned Future is completed `TimeoutException`. 63 | commit-timeout = 15s 64 | 65 | # If the KafkaConsumer can't connect to the broker the poll will be 66 | # aborted after this timeout. The KafkaConsumerActor will throw 67 | # org.apache.kafka.common.errors.WakeupException, which can be handled 68 | # with Actor supervision strategy. 69 | wakeup-timeout = 10s 70 | 71 | # Fully qualified config path which holds the dispatcher configuration 72 | # to be used by the KafkaConsumerActor. Some blocking may occur. 73 | use-dispatcher = "akka.kafka.default-dispatcher" 74 | 75 | # Properties defined by org.apache.kafka.clients.consumers.ConsumerConfig 76 | # can be defined in this configuration section. 77 | kafka-clients { 78 | enable.auto.commit = false 79 | #auto.commit.interval.ms = 10000 80 | } 81 | } 82 | 83 | c2 { 84 | bootstrap-servers = "localhost:9092" 85 | groupId = "group2" 86 | subscription-topic = "AppEventChannel" 87 | message-type = "ExampleAppEvent" 88 | poll-interval = 50ms 89 | poll-timeout = 50ms 90 | stop-timeout = 30s 91 | close-timeout = 20s 92 | commit-timeout = 15s 93 | wakeup-timeout = 10s 94 | use-dispatcher = "akka.kafka.default-dispatcher" 95 | kafka-clients { 96 | enable.auto.commit = false 97 | #auto.commit.interval.ms = 10000 98 | } 99 | } 100 | } 101 | 102 | # Properties for akka.kafka.ProducerSettings can be 103 | # defined in this section or a configuration section with 104 | # the same layout. 105 | producer { 106 | //The number of unique Kafka producers producing to individual topics 107 | num-producers = "2" 108 | 109 | p1 { 110 | bootstrap-servers = "localhost:9092" 111 | publish-topic = "TestDataChannel" 112 | 113 | #MessageType to convert internal case class from to JSON 114 | message-type = "KafkaMessage" 115 | 116 | # Tuning parameter of how many sends that can run in parallel. 117 | parallelism = 100 118 | 119 | # How long to wait for `KafkaProducer.close` 120 | close-timeout = 60s 121 | 122 | # Fully qualified config path which holds the dispatcher configuration 123 | # to be used by the producers stages. Some blocking may occur. 124 | # When this value is empty, the dispatcher configured for the stream 125 | # will be used. 126 | use-dispatcher = "akka.kafka.default-dispatcher" 127 | 128 | request.required.acks = "1" 129 | //Below is necessary for having multiple consumers consume the same topic 130 | //num.partitions has to be geq num of consumers 131 | //auto.offset.reset = "smallest" 132 | num.partitions = "5" 133 | } 134 | 135 | p2 { 136 | bootstrap-servers = "localhost:9092" 137 | message-type = "ExampleAppEvent" 138 | publish-topic = "AppEventChannel" 139 | parallelism = 100 140 | close-timeout = 60s 141 | use-dispatcher = "akka.kafka.default-dispatcher" 142 | request.required.acks = "1" 143 | num.partitions = "5" 144 | } 145 | } 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | [%highlight(%-5level)] %cyan(%logger{5}): %msg %n 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /src/main/scala/com/omearac/Main.scala: -------------------------------------------------------------------------------- 1 | package com.omearac 2 | 3 | import akka.actor.{ActorSystem, Props} 4 | import akka.event.Logging 5 | import akka.http.scaladsl.Http 6 | import akka.http.scaladsl.Http.ServerBinding 7 | import com.omearac.consumers.ConsumerStreamManager.InitializeConsumerStream 8 | import com.omearac.consumers.{ConsumerStreamManager, DataConsumer, EventConsumer} 9 | import com.omearac.http.HttpService 10 | import com.omearac.producers.ProducerStreamManager.InitializeProducerStream 11 | import com.omearac.producers.{DataProducer, EventProducer, ProducerStreamManager} 12 | import com.omearac.settings.Settings 13 | import com.omearac.shared.AkkaStreams 14 | import com.omearac.shared.KafkaMessages.{ExampleAppEvent, KafkaMessage} 15 | 16 | import scala.concurrent.{Await, Future} 17 | import scala.concurrent.duration._ 18 | import scala.io.StdIn 19 | 20 | /** 21 | * This starts the Reactive Kafka Microservice Template 22 | */ 23 | 24 | object Main extends App with HttpService with AkkaStreams { 25 | implicit val system = ActorSystem("akka-reactive-kafka-app") 26 | val log = Logging(system, this.getClass.getName) 27 | 28 | //Start the akka-http server and listen for http requests 29 | val akkaHttpServer = startAkkaHTTPServer() 30 | 31 | //Create the Producer Stream Manager and Consumer Stream Manager 32 | val producerStreamManager = system.actorOf(Props(new ProducerStreamManager), "producerStreamManager") 33 | val consumerStreamManager = system.actorOf(Props(new ConsumerStreamManager), "consumerStreamManager") 34 | 35 | //Create actor to publish event messages to kafka stream. 36 | val eventProducer = system.actorOf(EventProducer.props, "eventProducer") 37 | producerStreamManager ! InitializeProducerStream(eventProducer, ExampleAppEvent) 38 | 39 | //Create actor to consume event messages from kafka stream. 40 | val eventConsumer = system.actorOf(EventConsumer.props, "eventConsumer") 41 | consumerStreamManager ! InitializeConsumerStream(eventConsumer, ExampleAppEvent) 42 | 43 | //Create actor to publish data messages to kafka stream. 44 | val dataProducer = system.actorOf(DataProducer.props, "dataProducer") 45 | producerStreamManager ! InitializeProducerStream(dataProducer, KafkaMessage) 46 | 47 | //Create actor to consume data messages from kafka stream. 48 | val dataConsumer = system.actorOf(DataConsumer.props, "dataConsumer") 49 | consumerStreamManager ! InitializeConsumerStream(dataConsumer, KafkaMessage) 50 | 51 | //Shutdown 52 | shutdownApplication() 53 | 54 | private def startAkkaHTTPServer(): Future[ServerBinding] = { 55 | val settings = Settings(system).Http 56 | val host = settings.host 57 | 58 | println(s"Specify the TCP port do you want to host the HTTP server at (e.g. 8001, 8080..etc)? \nHit Return when finished:") 59 | val portNum = StdIn.readInt() 60 | 61 | println(s"Waiting for http requests at http://$host:$portNum/") 62 | Http().bindAndHandle(routes, host, portNum) 63 | } 64 | 65 | private def shutdownApplication(): Unit = { 66 | scala.sys.addShutdownHook({ 67 | println("Terminating the Application...") 68 | akkaHttpServer.flatMap(_.unbind()) 69 | system.terminate() 70 | Await.result(system.whenTerminated, 30 seconds) 71 | println("Application Terminated") 72 | }) 73 | } 74 | } 75 | 76 | 77 | 78 | -------------------------------------------------------------------------------- /src/main/scala/com/omearac/consumers/ConsumerStream.scala: -------------------------------------------------------------------------------- 1 | package com.omearac.consumers 2 | 3 | import akka.actor.{ActorRef, ActorSystem} 4 | import akka.kafka.ConsumerMessage.CommittableOffsetBatch 5 | import akka.kafka.scaladsl.Consumer 6 | import akka.kafka.{ConsumerMessage, ConsumerSettings, Subscriptions} 7 | import akka.stream.scaladsl.{Flow, Sink} 8 | import com.omearac.shared.EventMessages.FailedMessageConversion 9 | import com.omearac.shared.JsonMessageConversion.Conversion 10 | import com.omearac.shared.{AkkaStreams, EventSourcing} 11 | import org.apache.kafka.clients.consumer.ConsumerConfig 12 | import org.apache.kafka.common.serialization.{ByteArrayDeserializer, StringDeserializer} 13 | 14 | import scala.collection.mutable.ArrayBuffer 15 | import scala.concurrent.Future 16 | 17 | /** 18 | * This trait defines the functions for creating the consumer stream components as well 19 | * as functions for which are used in the stream Flow. 20 | */ 21 | 22 | trait ConsumerStream extends AkkaStreams with EventSourcing { 23 | implicit val system: ActorSystem 24 | def self: ActorRef 25 | 26 | 27 | def createStreamSink(consumerActorSink : ActorRef) = { 28 | Sink.actorRefWithAck(consumerActorSink, "STREAM_INIT", "OK", "STREAM_DONE") 29 | } 30 | 31 | def createStreamSource(consumerProperties: Map[String,String]) = { 32 | val kafkaMBAddress = consumerProperties("bootstrap-servers") 33 | val groupID = consumerProperties("groupId") 34 | val topicSubscription = consumerProperties("subscription-topic") 35 | val consumerSettings = ConsumerSettings(system, new ByteArrayDeserializer, new StringDeserializer) 36 | .withBootstrapServers(kafkaMBAddress) 37 | .withGroupId(groupID) 38 | .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") 39 | 40 | Consumer.committableSource(consumerSettings, Subscriptions.topics(topicSubscription)) 41 | } 42 | 43 | def createStreamFlow[msgType: Conversion] = { 44 | Flow[ConsumerMessage.CommittableMessage[Array[Byte], String]] 45 | .map(msg => (msg.committableOffset, Conversion[msgType].convertFromJson(msg.record.value))) 46 | //Publish the conversion error event messages returned from the JSONConversion 47 | .map (tuple => publishConversionErrors[msgType](tuple)) 48 | .filter(result => result.isRight) 49 | .map(test => test.right.get) 50 | //Group the commit offsets and correctly converted messages for more efficient Kafka commits 51 | .batch(max = 20, tuple => (CommittableOffsetBatch.empty.updated(tuple._1), ArrayBuffer[msgType](tuple._2))) 52 | {(tupleOfCommitOffsetAndMsgs, tuple) => 53 | (tupleOfCommitOffsetAndMsgs._1.updated(tuple._1), tupleOfCommitOffsetAndMsgs._2 :+ tuple._2) 54 | } 55 | //Take the first element of the tuple (set of commit numbers) to add to kafka commit log and then return the collection of grouped case class messages 56 | .mapAsync(4)(tupleOfCommitOffsetAndMsgs => commitOffsetsToKafka[msgType](tupleOfCommitOffsetAndMsgs)) 57 | .map(msgGroup => msgGroup._2) 58 | } 59 | 60 | def commitOffsetsToKafka[msgType](tupleOfCommitOffsetAndMsgs: (ConsumerMessage.CommittableOffsetBatch, ArrayBuffer[msgType])) = Future { 61 | (tupleOfCommitOffsetAndMsgs._1.commitScaladsl(), tupleOfCommitOffsetAndMsgs._2) 62 | } 63 | 64 | def publishConversionErrors[msgType](tupleOfCommitOffsetAndConversionResults: (ConsumerMessage.CommittableOffset, Either[FailedMessageConversion,msgType])) 65 | : Either[Unit,(ConsumerMessage.CommittableOffset,msgType)] = { 66 | 67 | if (tupleOfCommitOffsetAndConversionResults._2.isLeft) { 68 | 69 | //Publish a local event that there was a failure in conversion 70 | publishLocalEvent(tupleOfCommitOffsetAndConversionResults._2.left.get) 71 | 72 | //Commit the Kafka Offset to acknowledge that the message was consumed 73 | Left(tupleOfCommitOffsetAndConversionResults._1.commitScaladsl()) 74 | } 75 | else 76 | Right(tupleOfCommitOffsetAndConversionResults._1,tupleOfCommitOffsetAndConversionResults._2.right.get) 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /src/main/scala/com/omearac/consumers/ConsumerStreamManager.scala: -------------------------------------------------------------------------------- 1 | package com.omearac.consumers 2 | 3 | import akka.actor._ 4 | import akka.event.Logging 5 | import akka.kafka.scaladsl.Consumer.Control 6 | import com.omearac.consumers.ConsumerStreamManager.{InitializeConsumerStream, TerminateConsumerStream} 7 | import com.omearac.settings.Settings 8 | import com.omearac.shared.EventMessages.{ActivatedConsumerStream, TerminatedConsumerStream} 9 | import com.omearac.shared.JsonMessageConversion.Conversion 10 | import com.omearac.shared.KafkaMessages.{ExampleAppEvent, KafkaMessage} 11 | 12 | import scala.collection.mutable 13 | 14 | 15 | /** 16 | * This actor is responsible for creating and terminating the consuming akka-kafka streams. 17 | * Upon receiving an InitializeConsumerStream message with a corresponding message type 18 | * (KafkaMessage or ExampleAppEvent) and consumer sink actor reference, this manager initializes the stream, 19 | * sends an ActivatedConsumerStream message to the sink actor and finally publishes a local event to the 20 | * Akka Event Bus. 21 | * 22 | * When this actor receives a TerminateConsumerStream message along with an associated kafka topic, he gets 23 | * the corresponding stream reference from its activeConsumerStreams collection and then shuts down the stream. 24 | */ 25 | 26 | object ConsumerStreamManager { 27 | 28 | //Command Messages 29 | case class InitializeConsumerStream(consumerActorRef: ActorRef, msgType: Any) 30 | 31 | case class TerminateConsumerStream(kafkaTopic: String) 32 | 33 | def props: Props = Props(new ConsumerStreamManager) 34 | 35 | } 36 | 37 | class ConsumerStreamManager extends Actor with ConsumerStream { 38 | implicit val system = context.system 39 | val log = Logging(system, this.getClass.getName) 40 | 41 | //Once the stream is created, we store its reference and associated kafka topic so we can shut it down on command 42 | var activeConsumerStreams: mutable.Map[String, Control] = mutable.Map() 43 | 44 | //Get Kafka Consumer Config Settings 45 | val settings = Settings(system).KafkaConsumers 46 | 47 | def receive: Receive = { 48 | case InitializeConsumerStream(consumerActorRef, KafkaMessage) => 49 | 50 | //Get consumer properties corresponding to that which subscribes to message type KafkaMessage 51 | val consumerProperties = settings.KafkaConsumerInfo("KafkaMessage") 52 | startConsumerStream[KafkaMessage](consumerActorRef, consumerProperties) 53 | 54 | case InitializeConsumerStream(consumerActorRef, ExampleAppEvent) => 55 | 56 | //Get consumer properties corresponding to that which subscribes to the message type ExampleAppEvent 57 | val consumerProperties = settings.KafkaConsumerInfo("ExampleAppEvent") 58 | startConsumerStream[ExampleAppEvent](consumerActorRef, consumerProperties) 59 | 60 | case TerminateConsumerStream(kafkaTopic) => terminateConsumerStream(sender, kafkaTopic) 61 | 62 | case other => log.error(s"Consumer Stream Manager got unknown message: $other") 63 | } 64 | 65 | 66 | def startConsumerStream[msgType: Conversion](consumerActorSink: ActorRef, consumerProperties: Map[String, String]) = { 67 | val streamSource = createStreamSource(consumerProperties) 68 | val streamFlow = createStreamFlow[msgType] 69 | val streamSink = createStreamSink(consumerActorSink) 70 | val consumerStream = streamSource.via(streamFlow).to(streamSink).run() 71 | 72 | //Add the active consumer stream reference and topic to the active stream collection 73 | val kafkaTopic = consumerProperties("subscription-topic") 74 | activeConsumerStreams += kafkaTopic -> consumerStream 75 | 76 | //Tell the consumer actor sink the stream has been started for the kafka topic and publish the event 77 | consumerActorSink ! ActivatedConsumerStream(kafkaTopic) 78 | publishLocalEvent(ActivatedConsumerStream(kafkaTopic)) 79 | } 80 | 81 | 82 | def terminateConsumerStream(consumerActorSink: ActorRef, kafkaTopic: String) = { 83 | try { 84 | println(s"ConsumerStreamManager got TerminateStream command for topic: $kafkaTopic. Terminating stream...") 85 | val stream = activeConsumerStreams(kafkaTopic) 86 | val stopped = stream.stop 87 | 88 | stopped.onComplete { 89 | case _ => 90 | stream.shutdown() 91 | 92 | //Remove the topic name from activeConsumerStreams collection 93 | activeConsumerStreams -= kafkaTopic 94 | 95 | //Publish an app event that the stream was killed. The stream will send an onComplete message to the Sink 96 | publishLocalEvent(TerminatedConsumerStream(kafkaTopic)) 97 | println(s"Terminated stream for topic: $kafkaTopic.") 98 | } 99 | } 100 | catch { 101 | case e: NoSuchElementException => 102 | consumerActorSink ! "STREAM_DONE" 103 | log.info(s"Stream Consumer in consuming mode but no stream to consume from: ($consumerActorSink,$kafkaTopic)") 104 | case e: Exception => log.error(s"Exception during manual termination of the Consumer Stream for topic $kafkaTopic : $e") 105 | } 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/main/scala/com/omearac/consumers/DataConsumer.scala: -------------------------------------------------------------------------------- 1 | package com.omearac.consumers 2 | 3 | import akka.actor._ 4 | import akka.event.Logging 5 | import com.omearac.consumers.ConsumerStreamManager.{InitializeConsumerStream, TerminateConsumerStream} 6 | import com.omearac.consumers.DataConsumer.{ConsumerActorReply, ManuallyInitializeStream, ManuallyTerminateStream} 7 | import com.omearac.settings.Settings 8 | import com.omearac.shared.EventMessages.ActivatedConsumerStream 9 | import com.omearac.shared.EventSourcing 10 | import com.omearac.shared.KafkaMessages.KafkaMessage 11 | 12 | import scala.collection.mutable.ArrayBuffer 13 | 14 | 15 | /** 16 | * This actor serves as a Sink for the kafka stream that is created by the ConsumerStreamManager. 17 | * The corresponding stream converts the json from the kafka topic TestDataChannel to the message type KafkaMessage. 18 | * Once this actor receives a batch of such messages he prints them out. 19 | * 20 | * This actor can be started and stopped manually from the HTTP interface, and in doing so, changes between receiving 21 | * states. 22 | */ 23 | 24 | object DataConsumer { 25 | 26 | //Command Messages 27 | case object ManuallyInitializeStream 28 | 29 | case object ManuallyTerminateStream 30 | 31 | //Document Messages 32 | case class ConsumerActorReply(message: String) 33 | 34 | def props: Props = Props(new DataConsumer) 35 | } 36 | 37 | class DataConsumer extends Actor with EventSourcing { 38 | implicit val system = context.system 39 | val log = Logging(system, this.getClass.getName) 40 | 41 | //Once stream is started by manager, we save the actor ref of the manager 42 | var consumerStreamManager: ActorRef = null 43 | 44 | //Get Kafka Topic 45 | val kafkaTopic = Settings(system).KafkaConsumers.KafkaConsumerInfo("KafkaMessage")("subscription-topic") 46 | 47 | def receive: Receive = { 48 | 49 | case ActivatedConsumerStream(_) => consumerStreamManager = sender() 50 | 51 | case "STREAM_INIT" => 52 | sender() ! "OK" 53 | println("Data Consumer entered consuming state!") 54 | context.become(consumingData) 55 | 56 | case ManuallyTerminateStream => sender() ! ConsumerActorReply("Data Consumer Stream Already Stopped") 57 | 58 | case ManuallyInitializeStream => 59 | consumerStreamManager ! InitializeConsumerStream(self, KafkaMessage) 60 | sender() ! ConsumerActorReply("Data Consumer Stream Started") 61 | 62 | case other => log.error("Data Consumer got unknown message while in idle:" + other) 63 | } 64 | 65 | def consumingData: Receive = { 66 | case ActivatedConsumerStream(_) => consumerStreamManager = sender() 67 | 68 | case consumerMessageBatch: ArrayBuffer[_] => 69 | sender() ! "OK" 70 | consumerMessageBatch.foreach(println) 71 | 72 | case "STREAM_DONE" => 73 | context.become(receive) 74 | 75 | case ManuallyInitializeStream => sender() ! ConsumerActorReply("Data Consumer Already Started") 76 | 77 | case ManuallyTerminateStream => 78 | consumerStreamManager ! TerminateConsumerStream(kafkaTopic) 79 | sender() ! ConsumerActorReply("Data Consumer Stream Stopped") 80 | 81 | case other => log.error("Data Consumer got Unknown message while in consuming " + other) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /src/main/scala/com/omearac/consumers/EventConsumer.scala: -------------------------------------------------------------------------------- 1 | package com.omearac.consumers 2 | 3 | import akka.actor.{Actor, ActorRef, Props} 4 | import akka.event.Logging 5 | import com.omearac.consumers.ConsumerStreamManager.{InitializeConsumerStream, TerminateConsumerStream} 6 | import com.omearac.consumers.DataConsumer.{ConsumerActorReply, ManuallyInitializeStream, ManuallyTerminateStream} 7 | import com.omearac.settings.Settings 8 | import com.omearac.shared.EventMessages.ActivatedConsumerStream 9 | import com.omearac.shared.EventSourcing 10 | import com.omearac.shared.KafkaMessages.ExampleAppEvent 11 | 12 | import scala.collection.mutable.ArrayBuffer 13 | 14 | 15 | /** 16 | * This actor serves as a Sink for the kafka stream that is created by the ConsumerStreamManager. 17 | * The corresponding stream converts the json from the kafka topic AppEventChannel to the message type ExampleAppEvent. 18 | * Once this actor receives a batch of such messages he prints them out. 19 | * 20 | * This actor can be started and stopped manually from the HTTP interface, and in doing so, changes between receiving 21 | * states. 22 | */ 23 | 24 | object EventConsumer { 25 | 26 | def props: Props = Props(new EventConsumer) 27 | } 28 | 29 | class EventConsumer extends Actor with EventSourcing { 30 | implicit val system = context.system 31 | val log = Logging(system, this.getClass.getName) 32 | 33 | //Once stream is started by manager, we save the actor ref of the manager 34 | var consumerStreamManager: ActorRef = null 35 | 36 | //Get Kafka Topic 37 | val kafkaTopic = Settings(system).KafkaConsumers.KafkaConsumerInfo("ExampleAppEvent")("subscription-topic") 38 | 39 | def receive: Receive = { 40 | case InitializeConsumerStream(_, ExampleAppEvent) => 41 | consumerStreamManager ! InitializeConsumerStream(self, ExampleAppEvent) 42 | 43 | case ActivatedConsumerStream(_) => consumerStreamManager = sender() 44 | 45 | case "STREAM_INIT" => 46 | sender() ! "OK" 47 | println("Event Consumer entered consuming state!") 48 | context.become(consumingEvents) 49 | 50 | case ManuallyTerminateStream => sender() ! ConsumerActorReply("Event Consumer Stream Already Stopped") 51 | 52 | case ManuallyInitializeStream => 53 | consumerStreamManager ! InitializeConsumerStream(self, ExampleAppEvent) 54 | sender() ! ConsumerActorReply("Event Consumer Stream Started") 55 | 56 | case other => log.error("Event Consumer got unknown message while in idle:" + other) 57 | } 58 | 59 | def consumingEvents: Receive = { 60 | case ActivatedConsumerStream(_) => consumerStreamManager = sender() 61 | 62 | case consumerMessageBatch: ArrayBuffer[_] => 63 | sender() ! "OK" 64 | consumerMessageBatch.foreach(println) 65 | 66 | case "STREAM_DONE" => 67 | context.become(receive) 68 | 69 | case ManuallyInitializeStream => sender() ! ConsumerActorReply("Event Consumer Already Started") 70 | 71 | case ManuallyTerminateStream => 72 | consumerStreamManager ! TerminateConsumerStream(kafkaTopic) 73 | sender() ! ConsumerActorReply("Event Consumer Stream Stopped") 74 | 75 | case other => log.error("Event Consumer got unknown message while in consuming: " + other) 76 | } 77 | } 78 | 79 | -------------------------------------------------------------------------------- /src/main/scala/com/omearac/http/HttpService.scala: -------------------------------------------------------------------------------- 1 | package com.omearac.http 2 | 3 | import akka.http.scaladsl.server.Directives._ 4 | import com.omearac.http.routes.{ConsumerCommands, ProducerCommands} 5 | 6 | 7 | trait HttpService extends ConsumerCommands with ProducerCommands { 8 | //Joining the Http Routes 9 | def routes = producerHttpCommands ~ dataConsumerHttpCommands ~ eventConsumerHttpCommands 10 | } 11 | -------------------------------------------------------------------------------- /src/main/scala/com/omearac/http/routes/ConsumerCommands.scala: -------------------------------------------------------------------------------- 1 | package com.omearac.http.routes 2 | 3 | import akka.actor.ActorRef 4 | import akka.event.LoggingAdapter 5 | import akka.http.scaladsl.model.StatusCodes 6 | import akka.http.scaladsl.server.Directives._ 7 | import akka.http.scaladsl.server._ 8 | import akka.pattern.ask 9 | import akka.util.Timeout 10 | import com.omearac.consumers.DataConsumer.{ConsumerActorReply, ManuallyInitializeStream, ManuallyTerminateStream} 11 | 12 | import scala.concurrent.duration._ 13 | 14 | /** 15 | * This trait defines the HTTP API for starting and stopping the Data and Event Consumer Streams 16 | */ 17 | 18 | trait ConsumerCommands { 19 | def dataConsumer: ActorRef 20 | 21 | def eventConsumer: ActorRef 22 | 23 | def log: LoggingAdapter 24 | 25 | val dataConsumerHttpCommands: Route = pathPrefix("data_consumer") { 26 | implicit val timeout = Timeout(10 seconds) 27 | path("stop") { 28 | get { 29 | onSuccess(dataConsumer ? ManuallyTerminateStream) { 30 | case m: ConsumerActorReply => log.info(m.message); complete(StatusCodes.OK, m.message); 31 | case _ => complete(StatusCodes.InternalServerError) 32 | } 33 | } 34 | } ~ 35 | path("start") { 36 | get { 37 | onSuccess(dataConsumer ? ManuallyInitializeStream) { 38 | case m: ConsumerActorReply => log.info(m.message); complete(StatusCodes.OK, m.message) 39 | case _ => complete(StatusCodes.InternalServerError) 40 | } 41 | } 42 | } 43 | } 44 | 45 | val eventConsumerHttpCommands: Route = pathPrefix("event_consumer") { 46 | implicit val timeout = Timeout(10 seconds) 47 | path("stop") { 48 | get { 49 | onSuccess(eventConsumer ? ManuallyTerminateStream) { 50 | case m: ConsumerActorReply => log.info(m.message); complete(StatusCodes.OK, m.message); 51 | case _ => complete(StatusCodes.InternalServerError) 52 | } 53 | } 54 | } ~ 55 | path("start") { 56 | get { 57 | onSuccess(eventConsumer ? ManuallyInitializeStream) { 58 | case m: ConsumerActorReply => log.info(m.message); complete(StatusCodes.OK, m.message) 59 | case _ => complete(StatusCodes.InternalServerError) 60 | } 61 | } 62 | } 63 | } 64 | 65 | } 66 | -------------------------------------------------------------------------------- /src/main/scala/com/omearac/http/routes/ProducerCommands.scala: -------------------------------------------------------------------------------- 1 | package com.omearac.http.routes 2 | 3 | import akka.actor.ActorRef 4 | import akka.event.LoggingAdapter 5 | import akka.http.scaladsl.model.StatusCodes 6 | import akka.http.scaladsl.server.Directives._ 7 | import akka.http.scaladsl.server.Route 8 | import akka.pattern.ask 9 | import akka.util.Timeout 10 | import com.omearac.producers.DataProducer.PublishMessages 11 | import com.omearac.shared.EventMessages.MessagesPublished 12 | 13 | import scala.concurrent.duration._ 14 | 15 | 16 | /** 17 | * This trait defines the HTTP API for telling the DataProducer to publish data messages to Kafka via the Stream 18 | */ 19 | 20 | trait ProducerCommands { 21 | def log: LoggingAdapter 22 | def dataProducer: ActorRef 23 | 24 | val producerHttpCommands: Route = pathPrefix("data_producer"){ 25 | implicit val timeout = Timeout(10 seconds) 26 | path("produce" / IntNumber) { 27 | {numOfMessagesToProduce => 28 | get { 29 | onSuccess(dataProducer ? PublishMessages(numOfMessagesToProduce)) { 30 | case MessagesPublished(numberOfMessages) => complete(StatusCodes.OK, numberOfMessages + " messages Produced as Ordered, Boss!") 31 | case _ => complete(StatusCodes.InternalServerError) 32 | } 33 | } 34 | } 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/main/scala/com/omearac/producers/DataProducer.scala: -------------------------------------------------------------------------------- 1 | package com.omearac.producers 2 | 3 | import akka.actor._ 4 | import akka.event.Logging 5 | import akka.stream.scaladsl.SourceQueueWithComplete 6 | import com.omearac.producers.DataProducer.PublishMessages 7 | import com.omearac.shared.EventMessages.{ActivatedProducerStream, MessagesPublished} 8 | import com.omearac.shared.EventSourcing 9 | import com.omearac.shared.KafkaMessages.KafkaMessage 10 | 11 | /** 12 | * This actor publishes 'KafkaMessage's to the Kafka topic TestDataChannel. The idea would be that another microservice is subscribed to 13 | * the TestDataChannel topic and can then react to data messages this microservice emits. 14 | * This actor gets the stream connection reference from the ProducerStreamManager such that when he gets a 15 | * PublishMessages command message from the HTTP interface, he will create KafkaMessages and then offer/send them to the stream. 16 | */ 17 | 18 | object DataProducer { 19 | 20 | //Command Messages 21 | case class PublishMessages(numberOfMessages: Int) 22 | 23 | def props: Props = Props(new DataProducer) 24 | 25 | } 26 | 27 | class DataProducer extends Actor with EventSourcing { 28 | 29 | import context._ 30 | 31 | implicit val system = context.system 32 | val log = Logging(system, this.getClass.getName) 33 | 34 | var producerStream: SourceQueueWithComplete[Any] = null 35 | 36 | def receive: Receive = { 37 | case ActivatedProducerStream(streamRef, kafkaTopic) => 38 | producerStream = streamRef 39 | become(publishData) 40 | case msg: PublishMessages => if (producerStream == null) self ! msg 41 | case other => log.error("DataProducer got the unknown message while in idle: " + other) 42 | } 43 | 44 | def publishData: Receive = { 45 | case PublishMessages(numberOfMessages) => 46 | for (i <- 1 to numberOfMessages) { 47 | val myPublishableMessage = KafkaMessage(timetag, " send me to kafka, yo!", i) 48 | producerStream.offer(myPublishableMessage) 49 | } 50 | 51 | //Tell the akka-http front end that messages were sent 52 | sender() ! MessagesPublished(numberOfMessages) 53 | publishLocalEvent(MessagesPublished(numberOfMessages)) 54 | case other => log.error("DataProducer got the unknown message while producing: " + other) 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/main/scala/com/omearac/producers/EventProducer.scala: -------------------------------------------------------------------------------- 1 | package com.omearac.producers 2 | 3 | import akka.actor.{Actor, Props} 4 | import akka.event.Logging 5 | import akka.stream.scaladsl.SourceQueueWithComplete 6 | import com.omearac.shared.EventMessages.ActivatedProducerStream 7 | import com.omearac.shared.EventSourcing 8 | import com.omearac.shared.KafkaMessages.ExampleAppEvent 9 | 10 | 11 | /** 12 | * This actor receives local app events called "ExampleAppEvent"s which are initially published to the 13 | * "internal" Akka Event Bus and which he is subscribed to. He then publishes the event messages to the Kafka 14 | * topic called AppEventChannel. The idea would be that another microservice is subscribed to 15 | * the AppEventChannel topic and can then react to events this microservice emits. 16 | */ 17 | 18 | object EventProducer { 19 | 20 | def props: Props = Props(new EventProducer) 21 | } 22 | 23 | class EventProducer extends Actor with EventSourcing { 24 | 25 | import context._ 26 | 27 | implicit val system = context.system 28 | val log = Logging(system, this.getClass.getName) 29 | 30 | var producerStream: SourceQueueWithComplete[Any] = null 31 | val subscribedMessageTypes = Seq(classOf[ExampleAppEvent]) 32 | 33 | override def preStart(): Unit = { 34 | super.preStart() 35 | subscribedMessageTypes.foreach(system.eventStream.subscribe(self, _)) 36 | } 37 | 38 | override def postStop(): Unit = { 39 | subscribedMessageTypes.foreach(system.eventStream.unsubscribe(self, _)) 40 | super.postStop() 41 | } 42 | 43 | def receive: Receive = { 44 | case ActivatedProducerStream(streamRef, _) => 45 | producerStream = streamRef 46 | become(publishEvent) 47 | 48 | case msg: ExampleAppEvent => if (producerStream == null) self ! msg else producerStream.offer(msg) 49 | case other => log.error("EventProducer got the unknown message while in idle: " + other) 50 | } 51 | 52 | def publishEvent: Receive = { 53 | case msg: ExampleAppEvent => producerStream.offer(msg) 54 | case other => log.error("EventProducer got the unknown message while producing: " + other) 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/main/scala/com/omearac/producers/ProducerStream.scala: -------------------------------------------------------------------------------- 1 | package com.omearac.producers 2 | 3 | import akka.actor.{ActorRef, ActorSystem} 4 | import akka.kafka.ProducerSettings 5 | import akka.kafka.scaladsl.Producer 6 | import akka.stream.OverflowStrategy 7 | import akka.stream.scaladsl.{Flow, Source} 8 | import com.omearac.shared.JsonMessageConversion.Conversion 9 | import com.omearac.shared.{AkkaStreams, EventSourcing} 10 | import org.apache.kafka.clients.producer.ProducerRecord 11 | import org.apache.kafka.common.serialization.{ByteArraySerializer, StringSerializer} 12 | 13 | /** 14 | * This trait defines the functions for creating the producer stream components. 15 | */ 16 | 17 | trait ProducerStream extends AkkaStreams with EventSourcing { 18 | implicit val system: ActorSystem 19 | def self: ActorRef 20 | 21 | def createStreamSource[msgType] = { 22 | Source.queue[msgType](Int.MaxValue,OverflowStrategy.backpressure) 23 | } 24 | 25 | def createStreamSink(producerProperties: Map[String, String]) = { 26 | val kafkaMBAddress = producerProperties("bootstrap-servers") 27 | val producerSettings = ProducerSettings(system, new ByteArraySerializer, new StringSerializer).withBootstrapServers(kafkaMBAddress) 28 | 29 | Producer.plainSink(producerSettings) 30 | } 31 | 32 | def createStreamFlow[msgType: Conversion](producerProperties: Map[String, String]) = { 33 | val numberOfPartitions = producerProperties("num.partitions").toInt -1 34 | val topicToPublish = producerProperties("publish-topic") 35 | val rand = new scala.util.Random 36 | val range = 0 to numberOfPartitions 37 | 38 | Flow[msgType].map { msg => 39 | val partition = range(rand.nextInt(range.length)) 40 | val stringJSONMessage = Conversion[msgType].convertToJson(msg) 41 | new ProducerRecord[Array[Byte], String](topicToPublish, partition, null, stringJSONMessage) 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/main/scala/com/omearac/producers/ProducerStreamManager.scala: -------------------------------------------------------------------------------- 1 | package com.omearac.producers 2 | 3 | import akka.actor._ 4 | import com.omearac.producers.ProducerStreamManager.InitializeProducerStream 5 | import com.omearac.settings.Settings 6 | import com.omearac.shared.EventMessages.ActivatedProducerStream 7 | import com.omearac.shared.JsonMessageConversion.Conversion 8 | import com.omearac.shared.KafkaMessages.{ExampleAppEvent, KafkaMessage} 9 | 10 | /** 11 | * This actor is responsible for creating and terminating the publishing akka-kafka streams. 12 | * Upon receiving an InitializeProducerStream message with a corresponding message type 13 | * (KafkaMessage or ExampleAppEvent) and producer source actor reference, this manager initializes the stream, 14 | * sends an ActivatedProducerStream message to the source actor and finally publishes a local event to the 15 | * Akka Event Bus. 16 | */ 17 | 18 | object ProducerStreamManager { 19 | 20 | //CommandMessage 21 | case class InitializeProducerStream(producerActorRef: ActorRef, msgType: Any) 22 | 23 | def props: Props = Props(new ProducerStreamManager) 24 | } 25 | 26 | class ProducerStreamManager extends Actor with ProducerStream { 27 | implicit val system = context.system 28 | 29 | //Get Kafka Producer Config Settings 30 | val settings = Settings(system).KafkaProducers 31 | 32 | //Edit this receive method with any new Streamed message types 33 | def receive: Receive = { 34 | case InitializeProducerStream(producerActorRef, KafkaMessage) => { 35 | 36 | //Get producer properties 37 | val producerProperties = settings.KafkaProducerInfo("KafkaMessage") 38 | startProducerStream[KafkaMessage](producerActorRef, producerProperties) 39 | } 40 | case InitializeProducerStream(producerActorRef, ExampleAppEvent) => { 41 | 42 | //Get producer properties 43 | val producerProperties = settings.KafkaProducerInfo("ExampleAppEvent") 44 | startProducerStream[ExampleAppEvent](producerActorRef, producerProperties) 45 | } 46 | case other => println(s"Producer Stream Manager got unknown message: $other") 47 | } 48 | 49 | 50 | def startProducerStream[msgType: Conversion](producerActorSource: ActorRef, producerProperties: Map[String, String]) = { 51 | val streamSource = createStreamSource[msgType] 52 | val streamFlow = createStreamFlow[msgType](producerProperties) 53 | val streamSink = createStreamSink(producerProperties) 54 | val producerStream = streamSource.via(streamFlow).to(streamSink).run() 55 | 56 | //Send the completed stream reference to the actor who wants to publish to it 57 | val kafkaTopic = producerProperties("publish-topic") 58 | producerActorSource ! ActivatedProducerStream(producerStream, kafkaTopic) 59 | publishLocalEvent(ActivatedProducerStream(producerStream, kafkaTopic)) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/main/scala/com/omearac/settings/Settings.scala: -------------------------------------------------------------------------------- 1 | package com.omearac.settings 2 | 3 | import akka.actor.ActorSystem 4 | 5 | /** 6 | * In this class we read in the application.conf configuration file to get the various consumer/producer settings 7 | * as well as the akka-http host 8 | */ 9 | 10 | class Settings(system:ActorSystem) { 11 | object Http { 12 | val host = system.settings.config.getString("http.host") 13 | } 14 | object KafkaProducers { 15 | val numberOfProducers = system.settings.config.getInt("akka.kafka.producer.num-producers") 16 | 17 | //TODO: We only have one bootstrap server (kafka broker) at the moment so we get one IP below) 18 | val KafkaProducerInfo: Map[String, Map[String,String]] = (for (i <- 1 to numberOfProducers) yield { 19 | val kafkaMessageType = system.settings.config.getString(s"akka.kafka.producer.p$i.message-type") 20 | val kafkaMessageBrokerIP = system.settings.config.getString(s"akka.kafka.producer.p$i.bootstrap-servers") 21 | val kafkaTopic = system.settings.config.getString(s"akka.kafka.producer.p$i.publish-topic") 22 | val numberOfPartitions = system.settings.config.getString(s"akka.kafka.producer.p$i.num.partitions") 23 | kafkaMessageType -> Map("bootstrap-servers" -> kafkaMessageBrokerIP, "publish-topic" -> kafkaTopic, "num.partitions" -> numberOfPartitions) 24 | }).toMap 25 | } 26 | 27 | object KafkaConsumers { 28 | val numberOfConsumers = system.settings.config.getInt("akka.kafka.consumer.num-consumers") 29 | 30 | //TODO: We only have one bootstrap server (kafka broker) at the moment so we get one IP below) 31 | val KafkaConsumerInfo: Map[String, Map[String,String]] = (for (i <- 1 to numberOfConsumers) yield { 32 | val kafkaMessageType = system.settings.config.getString(s"akka.kafka.consumer.c$i.message-type") 33 | val kafkaMessageBrokerIP = system.settings.config.getString(s"akka.kafka.consumer.c$i.bootstrap-servers") 34 | val kafkaTopic = system.settings.config.getString(s"akka.kafka.consumer.c$i.subscription-topic") 35 | val groupId = system.settings.config.getString(s"akka.kafka.consumer.c$i.groupId") 36 | kafkaMessageType -> Map("bootstrap-servers" -> kafkaMessageBrokerIP, "subscription-topic" -> kafkaTopic, "groupId" -> groupId) 37 | }).toMap 38 | } 39 | } 40 | 41 | object Settings { 42 | def apply(system: ActorSystem) = new Settings(system) 43 | } -------------------------------------------------------------------------------- /src/main/scala/com/omearac/shared/AkkaStreams.scala: -------------------------------------------------------------------------------- 1 | package com.omearac.shared 2 | 3 | import akka.actor.ActorSystem 4 | import akka.stream.ActorMaterializer 5 | 6 | /** 7 | *This trait contains the components required to materialize and run Akka Streams 8 | */ 9 | 10 | trait AkkaStreams { 11 | implicit val system: ActorSystem 12 | implicit def executionContext = system.dispatcher 13 | implicit def materializer = ActorMaterializer() 14 | } 15 | -------------------------------------------------------------------------------- /src/main/scala/com/omearac/shared/EventSourcing.scala: -------------------------------------------------------------------------------- 1 | package com.omearac.shared 2 | 3 | import java.util.Date 4 | 5 | import akka.actor.{ActorRef, ActorSystem} 6 | import akka.serialization._ 7 | import com.omearac.shared.EventMessages.EventMessage 8 | import com.omearac.shared.KafkaMessages.ExampleAppEvent 9 | 10 | /** 11 | * This trait converts EventMessages to ExampleAppEvents and defines the method for publishing them to the local 12 | * Akka Event Bus. The conversion occurs since we eventally publish the ExampleAppEvents to Kafka via a stream once 13 | * they're picked up from the local bus. 14 | */ 15 | 16 | trait EventSourcing { 17 | implicit val system: ActorSystem 18 | val dateFormat = new java.text.SimpleDateFormat("dd:MM:yy:HH:mm:ss.SSS") 19 | lazy val timetag = dateFormat.format(new Date(System.currentTimeMillis())) 20 | def self: ActorRef 21 | 22 | 23 | def publishLocalEvent(msg: EventMessage) : Unit = { 24 | val exampleAppEvent = ExampleAppEvent(timetag, Serialization.serializedActorPath(self), msg.toString) 25 | system.eventStream.publish(exampleAppEvent) 26 | } 27 | } 28 | 29 | 30 | -------------------------------------------------------------------------------- /src/main/scala/com/omearac/shared/JsonMessageConversion.scala: -------------------------------------------------------------------------------- 1 | package com.omearac.shared 2 | 3 | import akka.util.Timeout 4 | import com.omearac.shared.EventMessages.FailedMessageConversion 5 | import com.omearac.shared.KafkaMessages.{ExampleAppEvent, KafkaMessage} 6 | import play.api.libs.json.Json 7 | import spray.json._ 8 | 9 | import scala.concurrent.duration._ 10 | 11 | 12 | /** 13 | * Here we define a typeclass which converts case class messages to/from JSON. 14 | * Currently, we can convert KafkaMessage and ExampleAppEvent messages to/from JSON. 15 | * Any additional case class types need to have conversion methods defined here. 16 | */ 17 | 18 | 19 | object JsonMessageConversion { 20 | implicit val resolveTimeout = Timeout(3 seconds) 21 | 22 | trait Conversion[T] { 23 | def convertFromJson(msg: String): Either[FailedMessageConversion, T] 24 | def convertToJson(msg: T): String 25 | } 26 | 27 | //Here is where we create implicit objects for each Message Type you wish to convert to/from JSON 28 | object Conversion extends DefaultJsonProtocol { 29 | 30 | implicit object KafkaMessageConversions extends Conversion[KafkaMessage] { 31 | implicit val json3 = jsonFormat3(KafkaMessage) 32 | 33 | /** 34 | * Converts the JSON string from the CommittableMessage to KafkaMessage case class 35 | * @param msg is the json string to be converted to KafkaMessage case class 36 | * @return either a KafkaMessage or Unit (if conversion fails) 37 | */ 38 | def convertFromJson(msg: String): Either[FailedMessageConversion, KafkaMessage] = { 39 | try { 40 | Right(msg.parseJson.convertTo[KafkaMessage]) 41 | } 42 | catch { 43 | case e: Exception => Left(FailedMessageConversion("kafkaTopic", msg, "to: KafkaMessage")) 44 | } 45 | } 46 | def convertToJson(msg: KafkaMessage) = { 47 | implicit val writes = Json.writes[KafkaMessage] 48 | Json.toJson(msg).toString 49 | } 50 | } 51 | 52 | implicit object ExampleAppEventConversion extends Conversion[ExampleAppEvent] { 53 | implicit val json3 = jsonFormat3(ExampleAppEvent) 54 | 55 | /** 56 | * Converts the JSON string from the CommittableMessage to ExampleAppEvent case class 57 | * @param msg is the json string to be converted to ExampleAppEvent case class 58 | * @return either a ExampleAppEvent or Unit (if conversion fails) 59 | */ 60 | def convertFromJson(msg: String): Either[FailedMessageConversion, ExampleAppEvent] = { 61 | try { 62 | Right(msg.parseJson.convertTo[ExampleAppEvent]) 63 | } 64 | catch { 65 | case e: Exception => Left(FailedMessageConversion("kafkaTopic", msg, "to: ExampleAppEvent")) 66 | } 67 | } 68 | def convertToJson(msg: ExampleAppEvent) = { 69 | implicit val writes = Json.writes[ExampleAppEvent] 70 | Json.toJson(msg).toString 71 | } 72 | } 73 | 74 | //Adding some sweet sweet syntactic sugar 75 | def apply[T: Conversion] : Conversion[T] = implicitly 76 | } 77 | } 78 | 79 | 80 | -------------------------------------------------------------------------------- /src/main/scala/com/omearac/shared/Messages.scala: -------------------------------------------------------------------------------- 1 | package com.omearac.shared 2 | 3 | import akka.stream.scaladsl.SourceQueueWithComplete 4 | 5 | /** 6 | * EventMessages are those which are emitted throughout the application and KafkaMessages are those which 7 | * are converted to/from JSON to be published/consumed to/from Kafka. 8 | * The EventMessages are converted to ExampleAppEvents when they are published. 9 | */ 10 | 11 | object EventMessages { 12 | abstract class EventMessage 13 | case class ActivatedConsumerStream(kafkaTopic: String) extends EventMessage 14 | case class TerminatedConsumerStream(kafkaTopic: String) extends EventMessage 15 | case class ActivatedProducerStream[msgType](producerStream: SourceQueueWithComplete[msgType], kafkaTopic: String) extends EventMessage 16 | case class MessagesPublished(numberOfMessages: Int) extends EventMessage 17 | case class FailedMessageConversion(kafkaTopic: String, msg: String, msgType: String) extends EventMessage 18 | } 19 | 20 | object KafkaMessages { 21 | case class KafkaMessage(time: String, subject: String, item: Int) 22 | case class ExampleAppEvent(time: String, senderID: String, eventType: String) 23 | } -------------------------------------------------------------------------------- /src/test/resources/application.conf: -------------------------------------------------------------------------------- 1 | http { 2 | host = "0.0.0.0" 3 | port = "8080" 4 | } 5 | 6 | akka { 7 | loggers = ["akka.testkit.TestEventListener"] 8 | #loggers = ["akka.event.slf4j.Slf4jLogger"] 9 | loglevel = "DEBUG" 10 | logging-filter = "akka.event.DefaultLoggingFilter" 11 | 12 | kafka { 13 | consumer { 14 | num-consumers = "2" 15 | c1 { 16 | bootstrap-servers = "localhost:9092" 17 | groupId = "group1" 18 | subscription-topic = "TempChannel1" 19 | message-type = "KafkaMessage" 20 | poll-interval = 50ms 21 | poll-timeout = 50ms 22 | stop-timeout = 30s 23 | close-timeout = 20s 24 | commit-timeout = 15s 25 | wakeup-timeout = 10s 26 | use-dispatcher = "akka.kafka.default-dispatcher" 27 | kafka-clients { 28 | enable.auto.commit = false 29 | } 30 | } 31 | 32 | c2 { 33 | bootstrap-servers = "localhost:9092" 34 | groupId = "group2" 35 | subscription-topic = "TempChannel2" 36 | message-type = "ExampleAppEvent" 37 | poll-interval = 50ms 38 | poll-timeout = 50ms 39 | stop-timeout = 30s 40 | close-timeout = 20s 41 | commit-timeout = 15s 42 | wakeup-timeout = 10s 43 | use-dispatcher = "akka.kafka.default-dispatcher" 44 | kafka-clients { 45 | enable.auto.commit = false 46 | } 47 | } 48 | } 49 | 50 | producer { 51 | num-producers = "2" 52 | 53 | p1 { 54 | bootstrap-servers = "localhost:9092" 55 | publish-topic = "TempChannel1" 56 | message-type = "KafkaMessage" 57 | parallelism = 100 58 | close-timeout = 60s 59 | use-dispatcher = "akka.kafka.default-dispatcher" 60 | 61 | request.required.acks = "1" 62 | num.partitions = "5" 63 | } 64 | 65 | p2 { 66 | bootstrap-servers = "localhost:9092" 67 | message-type = "ExampleAppEvent" 68 | publish-topic = "TempChannel2" 69 | parallelism = 100 70 | close-timeout = 60s 71 | use-dispatcher = "akka.kafka.default-dispatcher" 72 | request.required.acks = "1" 73 | num.partitions = "5" 74 | } 75 | } 76 | } 77 | } 78 | 79 | -------------------------------------------------------------------------------- /src/test/resources/logback-test.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | [%highlight(%-5level)] %cyan(%logger{5}): %msg %n 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /src/test/scala/akka/HTTPInterfaceSpec.scala: -------------------------------------------------------------------------------- 1 | package akka 2 | 3 | import akka.event.Logging 4 | import akka.http.scaladsl.testkit.ScalatestRouteTest 5 | import akka.stream.QueueOfferResult 6 | import akka.stream.QueueOfferResult.Enqueued 7 | import akka.stream.scaladsl.SourceQueueWithComplete 8 | import akka.testkit.{TestActorRef, TestProbe} 9 | import com.omearac.consumers.{DataConsumer, EventConsumer} 10 | import com.omearac.http.routes.{ConsumerCommands, ProducerCommands} 11 | import com.omearac.producers.DataProducer 12 | import org.scalatest.{Matchers, WordSpec} 13 | 14 | import scala.concurrent.Future 15 | 16 | 17 | class HTTPInterfaceSpec extends WordSpec 18 | with Matchers with ScalatestRouteTest 19 | with ConsumerCommands with ProducerCommands { 20 | 21 | val log = Logging(system, this.getClass.getName) 22 | 23 | //Mocks for DataConsumer Tests 24 | val dataConsumer = TestActorRef(new DataConsumer) 25 | val manager = TestProbe() 26 | dataConsumer.underlyingActor.consumerStreamManager = manager.ref 27 | 28 | //Mocks for EventConsumer Tests 29 | val eventConsumer = TestActorRef(new EventConsumer) 30 | eventConsumer.underlyingActor.consumerStreamManager = manager.ref 31 | 32 | //Mocks for DataProducer Tests 33 | val dataProducer = TestActorRef(new DataProducer) 34 | val mockProducerStream: SourceQueueWithComplete[Any] = new SourceQueueWithComplete[Any] { 35 | override def complete(): Unit = println("complete") 36 | 37 | override def fail(ex: Throwable): Unit = println("fail") 38 | 39 | override def offer(elem: Any): Future[QueueOfferResult] = Future{Enqueued} 40 | 41 | override def watchCompletion(): Future[Done] = Future{Done} 42 | } 43 | 44 | 45 | "The HTTP interface to control the DataConsumerStream" should { 46 | "return a Already Stopped message for GET requests to /data_consumer/stop" in { 47 | Get("/data_consumer/stop") ~> dataConsumerHttpCommands ~> check { 48 | responseAs[String] shouldEqual "Data Consumer Stream Already Stopped" 49 | } 50 | } 51 | 52 | "return a Stream Started response for GET requests to /data_consumer/start" in { 53 | Get("/data_consumer/start") ~> dataConsumerHttpCommands ~> check { 54 | responseAs[String] shouldEqual "Data Consumer Stream Started" 55 | } 56 | } 57 | } 58 | 59 | "The HTTP interface to control the EventConsumerStream" should { 60 | "return a Already Stopped message for GET requests to /event_consumer/stop" in { 61 | Get("/event_consumer/stop") ~> eventConsumerHttpCommands ~> check { 62 | responseAs[String] shouldEqual "Event Consumer Stream Already Stopped" 63 | } 64 | } 65 | 66 | "return a Stream Started response for GET requests to /data_consumer/start" in { 67 | Get("/event_consumer/start") ~> eventConsumerHttpCommands ~> check { 68 | responseAs[String] shouldEqual "Event Consumer Stream Started" 69 | } 70 | } 71 | } 72 | 73 | "The HTTP interface to tell the DataProducer Actor to publish messages to Kafka" should { 74 | "return a Messages Produced message for GET requests to /data_producer/produce/10" in { 75 | dataProducer.underlyingActor.producerStream = mockProducerStream 76 | val producing = dataProducer.underlyingActor.publishData 77 | dataProducer.underlyingActor.context.become(producing) 78 | 79 | Get("/data_producer/produce/10") ~> producerHttpCommands ~> check { 80 | responseAs[String] shouldEqual "10 messages Produced as Ordered, Boss!" 81 | } 82 | } 83 | } 84 | } 85 | 86 | -------------------------------------------------------------------------------- /src/test/scala/akka/SettingsSpec.scala: -------------------------------------------------------------------------------- 1 | package akka 2 | 3 | import akka.actor.ActorSystem 4 | import com.omearac.settings.Settings 5 | import org.scalatest._ 6 | 7 | 8 | class SettingsSpec extends WordSpecLike with Matchers { 9 | val system = ActorSystem("SettingsSpec") 10 | val settings = Settings(system) 11 | 12 | "Consumer Settings" must { 13 | "read in correct values from config" in { 14 | settings.Http.host should ===("0.0.0.0") 15 | settings.KafkaConsumers.numberOfConsumers should ===(2) 16 | val consumerSettings = settings.KafkaConsumers.KafkaConsumerInfo 17 | 18 | val consumerA = consumerSettings("KafkaMessage") 19 | consumerA("bootstrap-servers") should ===("localhost:9092") 20 | consumerA("subscription-topic") should ===("TempChannel1") 21 | consumerA("groupId") should ===("group1") 22 | 23 | val consumerB = consumerSettings("ExampleAppEvent") 24 | consumerB("bootstrap-servers") should ===("localhost:9092") 25 | consumerB("subscription-topic") should ===("TempChannel2") 26 | consumerB("groupId") should ===("group2") 27 | } 28 | } 29 | 30 | "Producer Settings" must { 31 | "read in correct values from config" in { 32 | settings.KafkaProducers.numberOfProducers should ===(2) 33 | val producerSettings = settings.KafkaProducers.KafkaProducerInfo 34 | 35 | val producerA = producerSettings("KafkaMessage") 36 | producerA("bootstrap-servers") should ===("localhost:9092") 37 | producerA("publish-topic") should ===("TempChannel1") 38 | producerA("num.partitions") should ===("5") 39 | 40 | val producerB = producerSettings("ExampleAppEvent") 41 | producerB("bootstrap-servers") should ===("localhost:9092") 42 | producerB("publish-topic") should ===("TempChannel2") 43 | producerB("num.partitions") should ===("5") 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/test/scala/akka/kafka/ConsumerStreamManagerSpec.scala: -------------------------------------------------------------------------------- 1 | package akka.kafka 2 | 3 | import akka.actor.ActorSystem 4 | import akka.testkit.{DefaultTimeout, ImplicitSender, TestActorRef, TestKit, TestProbe} 5 | import com.omearac.consumers.ConsumerStreamManager 6 | import com.omearac.consumers.ConsumerStreamManager.{InitializeConsumerStream, TerminateConsumerStream} 7 | import com.omearac.shared.AkkaStreams 8 | import com.omearac.shared.EventMessages.{ActivatedConsumerStream, TerminatedConsumerStream} 9 | import com.omearac.shared.KafkaMessages.{ExampleAppEvent, KafkaMessage} 10 | import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} 11 | 12 | import scala.collection.mutable.ArrayBuffer 13 | 14 | 15 | class ConsumerStreamManagerSpec extends TestKit(ActorSystem("ConsumerStreamManagerSpec")) 16 | with DefaultTimeout with ImplicitSender 17 | with WordSpecLike with Matchers with BeforeAndAfterAll 18 | with AkkaStreams { 19 | 20 | val testConsumerStreamManager = TestActorRef(new ConsumerStreamManager) 21 | val consumerStreamManagerActor = testConsumerStreamManager.underlyingActor 22 | 23 | //Create an test event listener for the local message bus 24 | val testEventListener = TestProbe() 25 | system.eventStream.subscribe(testEventListener.ref, classOf[ExampleAppEvent]) 26 | 27 | 28 | override def afterAll: Unit = { 29 | shutdown() 30 | } 31 | 32 | 33 | "Sending InitializeConsumerStream(self, KafkaMessage) to ConsumerStreamManager" should { 34 | "initialize the stream for that particular message type, return ActivatedConsumerStream(\"TempChannel1\") and produce local event " in { 35 | testConsumerStreamManager ! InitializeConsumerStream(self, KafkaMessage) 36 | val eventMsg = ActivatedConsumerStream("TempChannel1") 37 | val expectedMsgs = Seq(eventMsg, "STREAM_INIT") 38 | var receivedMsgs = ArrayBuffer[Any]() 39 | 40 | while (receivedMsgs.length < expectedMsgs.length) { 41 | expectMsgPF() { 42 | case msg: ActivatedConsumerStream => receivedMsgs += msg 43 | case "STREAM_INIT" => receivedMsgs += "STREAM_INIT" 44 | } 45 | } 46 | testEventListener.expectMsgPF() { 47 | case ExampleAppEvent(_, _, m) => if (m == eventMsg.toString) () else fail() 48 | } 49 | } 50 | } 51 | 52 | "Sending InitializeConsumerStream(self, ExampleAppEvent) to ConsumerStreamManager" should { 53 | "initialize the stream for that particular message type, return ActivatedConsumerStream(\"TempChannel2\") and produce local event " in { 54 | testConsumerStreamManager ! InitializeConsumerStream(self, ExampleAppEvent) 55 | val eventMsg = ActivatedConsumerStream("TempChannel2") 56 | val expectedMsgs = Seq(eventMsg, "STREAM_INIT") 57 | var receivedMsgs = ArrayBuffer[Any]() 58 | 59 | while (receivedMsgs.length < expectedMsgs.length) { 60 | expectMsgPF() { 61 | case msg: ActivatedConsumerStream => receivedMsgs += msg 62 | case "STREAM_INIT" => receivedMsgs += "STREAM_INIT" 63 | } 64 | } 65 | testEventListener.expectMsgPF() { 66 | case ExampleAppEvent(_, _, m) => if (m == eventMsg.toString) () else fail() 67 | } 68 | } 69 | } 70 | 71 | "Sending TerminateConsumerStream(\"TempChannel1\") to ConsumerStreamManager" should { 72 | "terminate the stream for that particular message type associated to the channel, " + 73 | "the Stream will return a \"STREAM_DONE\" msg and then a TerminatedConsumerStream(\"TempChannel1\") event will be and produced locally " in { 74 | //First resetting the internal state of the manager that keeps the active channels during testing 75 | consumerStreamManagerActor.activeConsumerStreams -= "TempChannel1" 76 | consumerStreamManagerActor.activeConsumerStreams -= "TempChannel2" 77 | 78 | testConsumerStreamManager ! InitializeConsumerStream(self, KafkaMessage) 79 | val eventMsg = ActivatedConsumerStream("TempChannel1") 80 | val expectedMsgs = Seq(eventMsg, "STREAM_INIT") 81 | var receivedMsgs = ArrayBuffer[Any]() 82 | 83 | while (receivedMsgs.length < expectedMsgs.length) { 84 | expectMsgPF() { 85 | case msg: ActivatedConsumerStream => receivedMsgs += msg 86 | case "STREAM_INIT" => receivedMsgs += "STREAM_INIT" 87 | } 88 | } 89 | consumerStreamManagerActor.activeConsumerStreams.size shouldBe 1 90 | testEventListener.expectMsgPF() { 91 | case ExampleAppEvent(_, _, m) => if (m == eventMsg.toString) () else fail() 92 | } 93 | 94 | testConsumerStreamManager ! TerminateConsumerStream("TempChannel1") 95 | while (receivedMsgs.length < expectedMsgs.length + 1) { 96 | expectMsgPF() { 97 | case "STREAM_DONE" => receivedMsgs += "STREAM_DONE" 98 | case "STREAM_INIT" => () //do nothing since this is left over from the previous message test 99 | } 100 | } 101 | Thread.sleep(500) 102 | consumerStreamManagerActor.activeConsumerStreams.size shouldBe 0 103 | val resultMessage2 = TerminatedConsumerStream("TempChannel1") 104 | testEventListener.expectMsgPF() { 105 | case ExampleAppEvent(_, _, m) => if (m == resultMessage2.toString) () else fail() 106 | } 107 | } 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /src/test/scala/akka/kafka/ConsumerStreamSpec.scala: -------------------------------------------------------------------------------- 1 | package akka.kafka 2 | 3 | import akka.actor.ActorSystem 4 | import akka.stream.scaladsl.{Flow, Sink, Source} 5 | import akka.testkit._ 6 | import com.omearac.consumers.ConsumerStreamManager.InitializeConsumerStream 7 | import com.omearac.consumers.{ConsumerStream, ConsumerStreamManager} 8 | import com.omearac.producers.ProducerStream 9 | import com.omearac.settings.Settings 10 | import com.omearac.shared.JsonMessageConversion.Conversion 11 | import com.omearac.shared.KafkaMessages.{ExampleAppEvent, KafkaMessage} 12 | import org.apache.kafka.clients.producer.ProducerRecord 13 | import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} 14 | 15 | import scala.collection.mutable.ArrayBuffer 16 | import scala.concurrent.duration._ 17 | 18 | 19 | class ConsumerStreamSpec extends TestKit(ActorSystem("ConsumerStreamSpec")) 20 | with DefaultTimeout with ImplicitSender 21 | with WordSpecLike with Matchers with BeforeAndAfterAll 22 | with ConsumerStream with ProducerStream { 23 | 24 | //Create an test event listener for the local message bus 25 | val testEventListener = TestProbe() 26 | system.eventStream.subscribe(testEventListener.ref, classOf[ExampleAppEvent]) 27 | 28 | //Stuff for testing the stream 29 | val consumerSettings = Settings(system).KafkaConsumers 30 | val producerSettings = Settings(system).KafkaProducers 31 | val testConsumerStreamManager = TestActorRef(new ConsumerStreamManager) 32 | val probe = TestProbe() 33 | 34 | override def afterAll: Unit = { 35 | shutdown() 36 | } 37 | 38 | 39 | "Consuming KafkaMessages in JSON from from Kafka" should { 40 | "be converted to KafkaMessages and all of them then obtained by the Stream Sink " in { 41 | 42 | //Creating KafkaMessage Consumer Stream Components 43 | val consumerProps = consumerSettings.KafkaConsumerInfo("KafkaMessage") 44 | val consumerSource = createStreamSource(consumerProps) 45 | val consumerFlow = createStreamFlow[KafkaMessage] 46 | val consumerSink = Sink.actorRef(probe.ref, "DONE") 47 | consumerSource.via(consumerFlow).runWith(consumerSink) 48 | 49 | //Creating collection of received messages to compare sent ones to 50 | var receivedKafkaMsgs = ArrayBuffer[Any]() 51 | 52 | //Publish some test messages 53 | val numOfMessages = 10 54 | val kafkaMsgs = for {i <- 1 to numOfMessages} yield KafkaMessage("sometime", "somestuff", i) 55 | val producerProps = producerSettings.KafkaProducerInfo("KafkaMessage") 56 | val producerSource = Source(kafkaMsgs) 57 | val producerFlow = createStreamFlow[KafkaMessage](producerProps) 58 | val producerSink = createStreamSink(producerProps) 59 | producerSource.via(producerFlow).runWith(producerSink) 60 | 61 | while (receivedKafkaMsgs.length < kafkaMsgs.length) { 62 | probe.expectMsgPF(5 seconds) { 63 | case msgBatch: ArrayBuffer[_] => for (msg <- msgBatch) { 64 | if (kafkaMsgs.contains(msg)) { 65 | receivedKafkaMsgs += msg; 66 | () 67 | } else fail() 68 | } 69 | case "complete" => () 70 | case other => println("Unknown Message:" + other); () 71 | } 72 | } 73 | } 74 | } 75 | 76 | "Consuming ExampleAppEvent messages in JSON from from Kafka" should { 77 | "be converted to ExampleAppEvents and all of them then obtained by the Stream Sink " in { 78 | 79 | //Creating KafkaMessage Consumer Stream Components 80 | val consumerProps = consumerSettings.KafkaConsumerInfo("ExampleAppEvent") 81 | val consumerSource = createStreamSource(consumerProps) 82 | val consumerFlow = createStreamFlow[ExampleAppEvent] 83 | val consumerSink = Sink.actorRef(probe.ref, "DONE") 84 | consumerSource.via(consumerFlow).runWith(consumerSink) 85 | 86 | //Creating collection of received messages to compare sent ones to 87 | var receivedEventMsgs = ArrayBuffer[Any]() 88 | 89 | //Publish some test messages 90 | val numOfMessages = 10 91 | val eventMsgs = for {i <- 1 to numOfMessages} yield ExampleAppEvent("sometime", "senderID", s"Event number $i/$numOfMessages occured") 92 | val producerProps = producerSettings.KafkaProducerInfo("ExampleAppEvent") 93 | val producerSource = Source(eventMsgs) 94 | val producerFlow = createStreamFlow[ExampleAppEvent](producerProps) 95 | val producerSink = createStreamSink(producerProps) 96 | producerSource.via(producerFlow).runWith(producerSink) 97 | 98 | while (receivedEventMsgs.length < eventMsgs.length) { 99 | probe.expectMsgPF(5 seconds) { 100 | case msgBatch: ArrayBuffer[_] => for (msg <- msgBatch) { 101 | if (eventMsgs.contains(msg)) { 102 | receivedEventMsgs += msg; 103 | () 104 | } else fail() 105 | } 106 | case "complete" => () 107 | } 108 | } 109 | } 110 | } 111 | 112 | "Consuming KafkaMessages messages in JSON from from Kafka ExampleAppEventChannel" should { 113 | "fail to be converted to ExampleAppEvent messages and hence nothing should be obtained by the Stream Sink " in { 114 | 115 | //Manually creating a producer stream with a custom Flow which sends the messages to the wrong topic 116 | val numOfMessages = 10 117 | val kafkaMsgs = for {i <- 1 to numOfMessages} yield KafkaMessage("sometime", "somestuff", i) 118 | val producerProps = producerSettings.KafkaProducerInfo("ExampleAppEvent") 119 | val producerSource = Source(kafkaMsgs) 120 | val producerFlow = Flow[KafkaMessage].map { msg => 121 | val stringJSONMessage = Conversion[KafkaMessage].convertToJson(msg) 122 | val topicToPublish = "TempChannel2" 123 | new ProducerRecord[Array[Byte], String](topicToPublish, 0, null, stringJSONMessage) 124 | } 125 | val producerSink = createStreamSink(producerProps) 126 | producerSource.via(producerFlow).runWith(producerSink) 127 | 128 | //Creating collection of received messages to compare sent ones to 129 | var receivedEventMsgs = ArrayBuffer[String]() 130 | 131 | //Start a normal consumer stream which will fail to convert the published messages 132 | testConsumerStreamManager ! InitializeConsumerStream(self, ExampleAppEvent) 133 | 134 | //Using the already materialized and ran ConsumerStream 135 | while (receivedEventMsgs.length < kafkaMsgs.length) { 136 | testEventListener.expectMsgPF(5 seconds) { 137 | case ExampleAppEvent(_, _, msg) => 138 | if (msg contains "FailedMessageConversion") { 139 | receivedEventMsgs += msg 140 | } else if (msg contains "ActivatedConsumerStream") () else fail() 141 | } 142 | } 143 | } 144 | } 145 | } -------------------------------------------------------------------------------- /src/test/scala/akka/kafka/DataConsumerSpec.scala: -------------------------------------------------------------------------------- 1 | package akka.kafka 2 | 3 | import akka.actor.{Actor, ActorSystem, Props} 4 | import akka.testkit.{DefaultTimeout, ImplicitSender, TestActorRef, TestKit} 5 | import com.omearac.consumers.ConsumerStreamManager.{InitializeConsumerStream, TerminateConsumerStream} 6 | import com.omearac.consumers.DataConsumer 7 | import com.omearac.consumers.DataConsumer.{ConsumerActorReply, ManuallyInitializeStream, ManuallyTerminateStream} 8 | import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} 9 | 10 | import scala.collection.mutable.ArrayBuffer 11 | 12 | 13 | class DataConsumerSpec extends TestKit(ActorSystem("DataConsumerSpec")) 14 | with DefaultTimeout with ImplicitSender 15 | with WordSpecLike with Matchers with BeforeAndAfterAll { 16 | 17 | //Creating the Actors 18 | val testConsumer = TestActorRef(new DataConsumer) 19 | val mockStreamAndManager = system.actorOf(Props(new MockStreamAndManager), "mockStreamAndManager") 20 | 21 | override def afterAll: Unit = { 22 | shutdown() 23 | } 24 | 25 | class MockStreamAndManager extends Actor { 26 | val receive: Receive = { 27 | case InitializeConsumerStream(_, _) => testConsumer ! "STREAM_INIT" 28 | case TerminateConsumerStream(_) => testConsumer ! "STREAM_DONE" 29 | } 30 | } 31 | 32 | 33 | "Sending ManuallyTerminateStream to DataConsumer in receive state" should { 34 | "return a Stream Already Stopped reply " in { 35 | testConsumer ! ManuallyTerminateStream 36 | expectMsg(ConsumerActorReply("Data Consumer Stream Already Stopped")) 37 | } 38 | } 39 | 40 | "Sending ManuallyInitializeStream to DataConsumer in receive state" should { 41 | "forward the message to the ConsumerStreamManager and change state to consuming" in { 42 | testConsumer.underlyingActor.consumerStreamManager = mockStreamAndManager 43 | testConsumer ! ManuallyInitializeStream 44 | expectMsg(ConsumerActorReply("Data Consumer Stream Started")) 45 | //Now check for state change 46 | Thread.sleep(750) 47 | testConsumer ! ManuallyInitializeStream 48 | expectMsg(ConsumerActorReply("Data Consumer Already Started")) 49 | } 50 | } 51 | 52 | "Sending STREAM_DONE to DataConsumer while in consuming state" should { 53 | "change state to idle state" in { 54 | val consuming = testConsumer.underlyingActor.consumingData 55 | testConsumer.underlyingActor.context.become(consuming) 56 | testConsumer ! "STREAM_DONE" 57 | //Now check for state change 58 | Thread.sleep(750) 59 | testConsumer ! ManuallyTerminateStream 60 | expectMsg(ConsumerActorReply("Data Consumer Stream Already Stopped")) 61 | } 62 | } 63 | "Sending ManuallyTerminateStream to DataConsumer while in consuming state" should { 64 | "forward the message to the ConsumerStreamManager and then upon reply, change state to idle" in { 65 | val consuming = testConsumer.underlyingActor.consumingData 66 | testConsumer.underlyingActor.context.become(consuming) 67 | testConsumer ! ManuallyTerminateStream 68 | expectMsg(ConsumerActorReply("Data Consumer Stream Stopped")) 69 | //Now check for state change 70 | Thread.sleep(750) 71 | testConsumer ! ManuallyTerminateStream 72 | expectMsg(ConsumerActorReply("Data Consumer Stream Already Stopped")) 73 | } 74 | } 75 | 76 | "Sending ConsumerMessageBatch message" should { 77 | "reply OK" in { 78 | val msgBatch: ArrayBuffer[String] = ArrayBuffer("test1") 79 | val consuming = testConsumer.underlyingActor.consumingData 80 | testConsumer.underlyingActor.context.become(consuming) 81 | testConsumer.underlyingActor.consumerStreamManager = mockStreamAndManager 82 | testConsumer ! msgBatch 83 | expectMsg("OK") 84 | } 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /src/test/scala/akka/kafka/DataProducerSpec.scala: -------------------------------------------------------------------------------- 1 | package akka.kafka 2 | 3 | import akka.Done 4 | import akka.actor.ActorSystem 5 | import akka.stream.QueueOfferResult 6 | import akka.stream.QueueOfferResult.Enqueued 7 | import akka.stream.scaladsl.SourceQueueWithComplete 8 | import akka.testkit.{DefaultTimeout, EventFilter, ImplicitSender, TestActorRef, TestKit, TestProbe} 9 | import com.omearac.producers.DataProducer 10 | import com.omearac.producers.DataProducer.PublishMessages 11 | import com.omearac.shared.AkkaStreams 12 | import com.omearac.shared.EventMessages.{ActivatedProducerStream, MessagesPublished} 13 | import com.omearac.shared.KafkaMessages.ExampleAppEvent 14 | import com.typesafe.config.ConfigFactory 15 | import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} 16 | 17 | import scala.concurrent.Future 18 | 19 | 20 | class DataProducerSpec extends TestKit(ActorSystem("DataProducerSpec", ConfigFactory.parseString( 21 | """ 22 | akka.loggers = ["akka.testkit.TestEventListener"] """))) 23 | with DefaultTimeout with ImplicitSender 24 | with WordSpecLike with Matchers with BeforeAndAfterAll 25 | with AkkaStreams { 26 | 27 | val testProducer = TestActorRef(new DataProducer) 28 | val producerActor = testProducer.underlyingActor 29 | 30 | val mockProducerStream: SourceQueueWithComplete[Any] = new SourceQueueWithComplete[Any] { 31 | override def complete(): Unit = println("complete") 32 | 33 | override def fail(ex: Throwable): Unit = println("fail") 34 | 35 | override def offer(elem: Any): Future[QueueOfferResult] = Future { 36 | Enqueued 37 | } 38 | 39 | override def watchCompletion(): Future[Done] = Future { 40 | Done 41 | } 42 | } 43 | 44 | override def afterAll: Unit = { 45 | shutdown() 46 | } 47 | 48 | //Create an test event listener for the local message bus 49 | val testEventListener = TestProbe() 50 | system.eventStream.subscribe(testEventListener.ref, classOf[ExampleAppEvent]) 51 | 52 | 53 | "Sending ActivatedProducerStream to DataProducer in receive state" should { 54 | "save the stream ref and change state to producing " in { 55 | testProducer ! ActivatedProducerStream(mockProducerStream, "TestTopic") 56 | Thread.sleep(500) 57 | producerActor.producerStream should be(mockProducerStream) 58 | EventFilter.error(message = "DataProducer got the unknown message while producing: testMessage", occurrences = 1) intercept { 59 | testProducer ! "testMessage" 60 | } 61 | } 62 | } 63 | 64 | "Sending PublishMessages(number: Int) to DataProducer in publishData state" should { 65 | "return MessagesPublished(number: Int) and publish the local event " in { 66 | val producing = producerActor.publishData 67 | producerActor.context.become(producing) 68 | producerActor.producerStream = mockProducerStream 69 | val resultMessage = MessagesPublished(5) 70 | testProducer ! PublishMessages(5) 71 | expectMsg(resultMessage) 72 | testEventListener.expectMsgPF() { 73 | case ExampleAppEvent(_, _, m) => if (m == resultMessage.toString) () else fail() 74 | } 75 | } 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/test/scala/akka/kafka/EventConsumerSpec.scala: -------------------------------------------------------------------------------- 1 | package akka.kafka 2 | 3 | import akka.actor.{Actor, ActorSystem, Props} 4 | import akka.testkit.{DefaultTimeout, ImplicitSender, TestActorRef, TestKit} 5 | import com.omearac.consumers.ConsumerStreamManager.{InitializeConsumerStream, TerminateConsumerStream} 6 | import com.omearac.consumers.DataConsumer.{ConsumerActorReply, ManuallyInitializeStream, ManuallyTerminateStream} 7 | import com.omearac.consumers.EventConsumer 8 | import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} 9 | 10 | import scala.collection.mutable.ArrayBuffer 11 | 12 | 13 | class EventConsumerSpec extends TestKit(ActorSystem("EventConsumerSpec")) 14 | with DefaultTimeout with ImplicitSender 15 | with WordSpecLike with Matchers with BeforeAndAfterAll { 16 | 17 | //Creating the Actors 18 | val testConsumer = TestActorRef(new EventConsumer) 19 | val mockStreamAndManager = system.actorOf(Props(new MockStreamAndManager), "mockStreamAndManager") 20 | 21 | override def afterAll: Unit = { 22 | shutdown() 23 | } 24 | 25 | class MockStreamAndManager extends Actor { 26 | val receive: Receive = { 27 | case InitializeConsumerStream(_, _) => testConsumer ! "STREAM_INIT" 28 | case TerminateConsumerStream(_) => testConsumer ! "STREAM_DONE" 29 | } 30 | } 31 | 32 | 33 | "Sending ManuallyTerminateStream to EventConsumer in receive state" should { 34 | "return a Stream Already Stopped reply " in { 35 | testConsumer ! ManuallyTerminateStream 36 | expectMsg(ConsumerActorReply("Event Consumer Stream Already Stopped")) 37 | } 38 | } 39 | 40 | "Sending ManuallyInitializeStream to EventConsumer in receive state" should { 41 | "forward the message to the ConsumerStreamManager and change state to consuming" in { 42 | testConsumer.underlyingActor.consumerStreamManager = mockStreamAndManager 43 | testConsumer ! ManuallyInitializeStream 44 | expectMsg(ConsumerActorReply("Event Consumer Stream Started")) 45 | //Now check for state change 46 | Thread.sleep(750) 47 | testConsumer ! ManuallyInitializeStream 48 | expectMsg(ConsumerActorReply("Event Consumer Already Started")) 49 | } 50 | } 51 | 52 | "Sending STREAM_DONE to EventConsumer while in consuming state" should { 53 | "change state to idle state" in { 54 | val consuming = testConsumer.underlyingActor.consumingEvents 55 | testConsumer.underlyingActor.context.become(consuming) 56 | testConsumer ! "STREAM_DONE" 57 | //Now check for state change 58 | Thread.sleep(750) 59 | testConsumer ! ManuallyTerminateStream 60 | expectMsg(ConsumerActorReply("Event Consumer Stream Already Stopped")) 61 | } 62 | } 63 | "Sending ManuallyTerminateStream to EventConsumer while in consuming state" should { 64 | "forward the message to the ConsumerStreamManager and then upon reply, change state to idle" in { 65 | val consuming = testConsumer.underlyingActor.consumingEvents 66 | testConsumer.underlyingActor.context.become(consuming) 67 | testConsumer ! ManuallyTerminateStream 68 | expectMsg(ConsumerActorReply("Event Consumer Stream Stopped")) 69 | //Now check for state change 70 | Thread.sleep(750) 71 | testConsumer ! ManuallyTerminateStream 72 | expectMsg(ConsumerActorReply("Event Consumer Stream Already Stopped")) 73 | } 74 | } 75 | 76 | "Sending ConsumerMessageBatch message" should { 77 | "reply OK" in { 78 | val msgBatch: ArrayBuffer[String] = ArrayBuffer("test1") 79 | val consuming = testConsumer.underlyingActor.consumingEvents 80 | testConsumer.underlyingActor.context.become(consuming) 81 | testConsumer.underlyingActor.consumerStreamManager = mockStreamAndManager 82 | testConsumer ! msgBatch 83 | expectMsg("OK") 84 | } 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /src/test/scala/akka/kafka/EventProducerSpec.scala: -------------------------------------------------------------------------------- 1 | package akka.kafka 2 | 3 | import java.util.Date 4 | 5 | import akka.Done 6 | import akka.actor.ActorSystem 7 | import akka.serialization.Serialization 8 | import akka.stream.QueueOfferResult 9 | import akka.stream.QueueOfferResult.Enqueued 10 | import akka.stream.scaladsl.SourceQueueWithComplete 11 | import akka.testkit.{DefaultTimeout, EventFilter, ImplicitSender, TestActorRef, TestKit, TestProbe} 12 | import com.omearac.producers.EventProducer 13 | import com.omearac.shared.AkkaStreams 14 | import com.omearac.shared.EventMessages.{ActivatedProducerStream, MessagesPublished} 15 | import com.omearac.shared.KafkaMessages.ExampleAppEvent 16 | import com.typesafe.config.ConfigFactory 17 | import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} 18 | 19 | import scala.concurrent.Future 20 | 21 | 22 | class EventProducerSpec extends TestKit(ActorSystem("EventProducerSpec",ConfigFactory.parseString(""" 23 | akka.loggers = ["akka.testkit.TestEventListener"] """))) 24 | with DefaultTimeout with ImplicitSender 25 | with WordSpecLike with Matchers with BeforeAndAfterAll 26 | with AkkaStreams { 27 | 28 | val testProducer = TestActorRef(new EventProducer) 29 | val producerActor = testProducer.underlyingActor 30 | val mockProducerStream: SourceQueueWithComplete[Any] = new SourceQueueWithComplete[Any] { 31 | override def complete(): Unit = println("complete") 32 | 33 | override def fail(ex: Throwable): Unit = println("fail") 34 | 35 | override def offer(elem: Any): Future[QueueOfferResult] = Future{Enqueued} 36 | 37 | override def watchCompletion(): Future[Done] = Future{Done} 38 | } 39 | 40 | override def afterAll: Unit = { 41 | shutdown() 42 | } 43 | 44 | //Create an test event listener for the local message bus 45 | val testEventListener = TestProbe() 46 | system.eventStream.subscribe(testEventListener.ref, classOf[ExampleAppEvent]) 47 | 48 | 49 | "Sending ActivatedProducerStream to EventProducer in receive state" should { 50 | "save the stream ref and change state to producing " in { 51 | testProducer ! ActivatedProducerStream(mockProducerStream, "TestTopic") 52 | Thread.sleep(500) 53 | producerActor.producerStream should be(mockProducerStream) 54 | EventFilter.error(message = "EventProducer got the unknown message while producing: testMessage", occurrences = 1) intercept { 55 | testProducer ! "testMessage" 56 | } 57 | } 58 | } 59 | 60 | "Sending ExampleAppEvent to system bus while EventProducer is in publishEvent state" should { 61 | "offer the ExampleAppEvent to the stream " in { 62 | val producingState = producerActor.publishEvent 63 | producerActor.context.become(producingState) 64 | producerActor.producerStream = mockProducerStream 65 | val dateFormat = new java.text.SimpleDateFormat("dd:MM:yy:HH:mm:ss.SSS") 66 | lazy val timetag = dateFormat.format(new Date(System.currentTimeMillis())) 67 | val eventMsg = MessagesPublished(5) 68 | val testMessage = ExampleAppEvent(timetag,Serialization.serializedActorPath(self),eventMsg.toString) 69 | system.eventStream.publish(testMessage) 70 | testEventListener.expectMsgPF(){ 71 | case ExampleAppEvent(_,_,m) => if (m == eventMsg.toString) () else fail() 72 | } 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/test/scala/akka/kafka/ProducerStreamManagerSpec.scala: -------------------------------------------------------------------------------- 1 | package akka.kafka 2 | 3 | import akka.actor.ActorSystem 4 | import akka.stream.scaladsl.SourceQueueWithComplete 5 | import akka.testkit.{DefaultTimeout, ImplicitSender, TestActorRef, TestKit, TestProbe} 6 | import com.omearac.producers.ProducerStreamManager 7 | import com.omearac.producers.ProducerStreamManager.InitializeProducerStream 8 | import com.omearac.shared.AkkaStreams 9 | import com.omearac.shared.EventMessages.ActivatedProducerStream 10 | import com.omearac.shared.KafkaMessages.{ExampleAppEvent, KafkaMessage} 11 | import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} 12 | 13 | 14 | class ProducerStreamManagerSpec extends TestKit(ActorSystem("ProducerStreamManagerSpec")) 15 | with DefaultTimeout with ImplicitSender 16 | with WordSpecLike with Matchers with BeforeAndAfterAll 17 | with AkkaStreams { 18 | 19 | val testProducerStreamManager = TestActorRef(new ProducerStreamManager) 20 | val producerStreamManagerActor = testProducerStreamManager.underlyingActor 21 | 22 | //Create an test event listener for the local message bus 23 | val testEventListener = TestProbe() 24 | system.eventStream.subscribe(testEventListener.ref, classOf[ExampleAppEvent]) 25 | 26 | override def afterAll: Unit = { 27 | shutdown() 28 | } 29 | 30 | 31 | "Sending InitializeProducerStream(self, KafkaMessage) to ProducerStreamManager" should { 32 | "initialize the stream for that particular message type, return ActivatedProducerStream(streaRef, \"TempChannel1\") and produce local event " in { 33 | testProducerStreamManager ! InitializeProducerStream(self, KafkaMessage) 34 | Thread.sleep(500) 35 | var streamRef: SourceQueueWithComplete[Any] = null 36 | expectMsgPF() { 37 | case ActivatedProducerStream(sr, kt) => if (kt == "TempChannel1") { 38 | streamRef = sr; () 39 | } else fail() 40 | } 41 | 42 | Thread.sleep(500) 43 | val resultMessage = ActivatedProducerStream(streamRef, "TempChannel1") 44 | testEventListener.expectMsgPF() { 45 | case ExampleAppEvent(_, _, m) => if (m == resultMessage.toString) () else fail() 46 | } 47 | } 48 | } 49 | 50 | "Sending InitializeProducerStream(self, ExampleAppEvent) to ProducerStreamManager" should { 51 | "initialize the stream for that particular message type, return ActivatedProducerStream(streaRef, \"TempChannel2\") and produce local event " in { 52 | testProducerStreamManager ! InitializeProducerStream(self, ExampleAppEvent) 53 | Thread.sleep(500) 54 | var streamRef: SourceQueueWithComplete[Any] = null 55 | expectMsgPF() { 56 | case ActivatedProducerStream(sr, kt) => if (kt == "TempChannel2") { 57 | streamRef = sr; () 58 | } else fail() 59 | } 60 | 61 | Thread.sleep(500) 62 | val resultMessage = ActivatedProducerStream(streamRef, "TempChannel2") 63 | testEventListener.expectMsgPF() { 64 | case ExampleAppEvent(_, _, m) => if (m == resultMessage.toString) () else fail() 65 | } 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /src/test/scala/akka/kafka/ProducerStreamSpec.scala: -------------------------------------------------------------------------------- 1 | package akka.kafka 2 | 3 | import akka.actor.ActorSystem 4 | import akka.stream.scaladsl.{Sink, Source} 5 | import akka.testkit.{DefaultTimeout, ImplicitSender, TestKit, TestProbe} 6 | import com.omearac.consumers.ConsumerStream 7 | import com.omearac.producers.ProducerStream 8 | import com.omearac.settings.Settings 9 | import com.omearac.shared.JsonMessageConversion.Conversion 10 | import com.omearac.shared.KafkaMessages.{ExampleAppEvent, KafkaMessage} 11 | import org.apache.kafka.clients.producer.ProducerRecord 12 | import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} 13 | 14 | 15 | class ProducerStreamSpec extends TestKit(ActorSystem("ProducerStreamSpec")) 16 | with DefaultTimeout with ImplicitSender 17 | with WordSpecLike with Matchers with BeforeAndAfterAll 18 | with ConsumerStream with ProducerStream { 19 | 20 | val settings = Settings(system).KafkaProducers 21 | val probe = TestProbe() 22 | 23 | override def afterAll: Unit = { 24 | shutdown() 25 | } 26 | 27 | "Sending KafkaMessages to the KafkaMessage producerStream" should { 28 | "be converted to JSON and obtained by the Stream Sink " in { 29 | 30 | //Creating Producer Stream Components for publishing KafkaMessages 31 | val producerProps = settings.KafkaProducerInfo("KafkaMessage") 32 | val numOfMessages = 50 33 | val kafkaMsgs = for { i <- 0 to numOfMessages} yield KafkaMessage("sometime", "somestuff", i) 34 | val producerSource= Source(kafkaMsgs) 35 | val producerFlow = createStreamFlow[KafkaMessage](producerProps) 36 | val producerSink = Sink.actorRef(probe.ref, "complete") 37 | 38 | val jsonKafkaMsgs = for { msg <- kafkaMsgs} yield Conversion[KafkaMessage].convertToJson(msg) 39 | 40 | producerSource.via(producerFlow).runWith(producerSink) 41 | for (i <- 0 to jsonKafkaMsgs.length) { 42 | probe.expectMsgPF(){ 43 | case m: ProducerRecord[_,_] => if (jsonKafkaMsgs.contains(m.value())) () else fail() 44 | case "complete" => () 45 | } 46 | } 47 | } 48 | } 49 | 50 | "Sending ExampleAppEvent messages to the EventMessage producerStream" should { 51 | "be converted to JSON and obtained by the Stream Sink " in { 52 | 53 | //Creating Producer Stream Components for publishing ExampleAppEvent messages 54 | val producerProps = settings.KafkaProducerInfo("ExampleAppEvent") 55 | val numOfMessages = 50 56 | val eventMsgs = for { i <- 0 to 50} yield ExampleAppEvent("sometime", "senderID", s"Event number $i occured") 57 | 58 | val producerSource= Source(eventMsgs) 59 | val producerFlow = createStreamFlow[ExampleAppEvent](producerProps) 60 | val producerSink = Sink.actorRef(probe.ref, "complete") 61 | 62 | val jsonAppEventMsgs = for{ msg <- eventMsgs} yield Conversion[ExampleAppEvent].convertToJson(msg) 63 | producerSource.via(producerFlow).runWith(producerSink) 64 | for (i <- 0 to jsonAppEventMsgs.length){ 65 | probe.expectMsgPF(){ 66 | case m: ProducerRecord[_,_] => if (jsonAppEventMsgs.contains(m.value())) () else fail() 67 | case "complete" => () 68 | } 69 | } 70 | } 71 | } 72 | } 73 | --------------------------------------------------------------------------------