├── .gitignore
├── LICENSE
├── README.md
├── build.sbt
├── img
├── chain.png
├── consumer.png
├── direct_exchange.png
├── fanout_exchange.png
├── flow.png
├── producer.png
├── queue.png
├── runnable_flow.png
├── sink.png
├── source.png
└── topic_exchange.png
├── project
├── build.properties
└── plugins.sbt
└── src
└── test
├── resources
├── application.conf
├── csv
│ └── starwars.csv
├── logback.xml
└── routes
└── scala
└── com
└── github
└── dnvriend
└── streams
├── TestSpec.scala
├── actorpublisher
└── AkkaPublisherSubscriberTest.scala
├── collection
└── SourceFromCollectionTest.scala
├── customstage
├── CustomStreamProcessingTest.scala
├── Ex1IdentityStageTest.scala
├── Ex2CustomMapTest.scala
├── Ex3CustomFilterTest.scala
└── Ex4StatefulStageTest.scala
├── failure
└── FailureTest.scala
├── flow
├── AkkaStreamsTest.scala
├── FlowErrorTest.scala
├── OverflowStrategyTest.scala
├── RunnableFlowTest.scala
└── SimpleFlowTest.scala
├── graph
└── FlowTest.scala
├── http
├── StreamingClient.scala
└── StreamingClientTest.scala
├── io
└── FileIOTest.scala
├── nesting
└── FlatteningStagesTest.scala
├── sink
├── ActorRefWithAckTest.scala
└── ActorSubscriberTest.scala
├── source
├── FailedSource.scala
├── QueueSourceTest.scala
└── SourceTest.scala
├── stage
├── async
│ ├── MapAsyncStageTest.scala
│ └── MapAsyncUnorderedStageTest.scala
├── fanout
│ └── BroadcastStageTest.scala
├── simple
│ ├── CollectStageTest.scala
│ ├── DropStageTest.scala
│ ├── DropWhileStageTest.scala
│ ├── FilterStageTest.scala
│ ├── FoldStageTest.scala
│ ├── GroupByTest.scala
│ ├── GroupedStageTest.scala
│ ├── MapAsyncStageTest.scala
│ ├── MapConcatTest.scala
│ ├── MapStageTest.scala
│ ├── RecoverStageTest.scala
│ ├── ScanStageTest.scala
│ ├── TakeStageTest.scala
│ └── TakeWhileStageTest.scala
└── timer
│ └── TakeWithinStageTest.scala
├── streammaterialization
└── StreamMaterializationTest.scala
└── util
├── ClasspathResources.scala
└── InputCustomer.scala
/.gitignore:
--------------------------------------------------------------------------------
1 | /RUNNING_PID
2 | /logs/
3 | /project/*-shim.sbt
4 | /project/project/
5 | /project/target/
6 | /target/
7 | *.iml
8 | .idea/
9 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Introduction to Akka Streams
2 | This project is for studying purposes only. It contains a lot of information I shamelessly copied from the Internet and will
3 | never be published apart from GitHub. It contains a lot of try-outs regarding the akka-streams project and serves as a proofing
4 | ground for testing out Akka Streams, the reactive-streams standard and the interoperability between all libraries and components that
5 | will support the akka-streams standard. In my humble opinion the standard will be ground breaking how engineers will design enterprise
6 | solutions and finally will support an open standard for several systems to operate reactively.
7 |
8 | > Stream processing is a different paradigm to the Actor Model or to Future composition, therefore it may take some
9 | > careful study of this subject until you feel familiar with the tools and techniques.
10 | -- Akka Streams Documentation
11 |
12 | ## Documentation
13 | - [Akka Streams Documentation](http://doc.akka.io/docs/akka-stream-and-http-experimental/1.0-RC2/scala.html)
14 | - [Quick Start - Reactive Tweets](http://doc.akka.io/docs/akka-stream-and-http-experimental/1.0-RC2/scala/stream-quickstart.html#stream-quickstart-scala)
15 | - [Akka Streams API](http://doc.akka.io/api/akka-stream-and-http-experimental/1.0-RC2/)
16 | - [Design Principles behind Reactive Streams](http://doc.akka.io/docs/akka-stream-and-http-experimental/1.0-RC2/stream-design.html#stream-design)
17 | - [Streams Cookbook](http://doc.akka.io/docs/akka-stream-and-http-experimental/1.0-RC2/scala/stream-cookbook.html#stream-cookbook-scala)
18 | - [Overview of built-in stages and their semantics](http://doc.akka.io/docs/akka-stream-and-http-experimental/1.0-RC2/stages-overview.html#stages-overview)
19 | - [Integrating with Actors, external services and reactive streams](http://doc.akka.io/docs/akka-stream-and-http-experimental/1.0-RC2/scala/stream-integrations.html)
20 | - [Reactive Streams](http://www.reactive-streams.org/)
21 |
22 | ## Blogs
23 | - [Reactive Streams 1.0.0 interview Viktor Klang](http://scalatimes.us2.list-manage2.com/track/click?u=ba834c562d82d9aba5eaf90ba&id=f0c61fc8e5&e=9e9b2b2bbe)
24 | - [Bryan Gilbert - RANDOM.NEXT](http://bryangilbert.com/blog/2015/02/04/akka-reactive-streams/)
25 | - [Jon Brisbin - The Reactive Streams Project: Tribalism as a Force for Good](http://jbrisbin.com/post/82994020622/the-reactive-streams-project-tribalism-as-a-force)
26 | - [Adam Warski - Reactive Queue with Akka Reactive Streams](http://www.warski.org/blog/2014/06/reactive-queue-with-akka-reactive-streams/)
27 | - [Boldradius - Introduction to Akka Streams](http://boldradius.com/blog-post/VS0NpTAAADAACs_E/introduction-to-akka-streams)
28 | - [Scraping Reddit with Akka Streams 1.0](https://github.com/pkinsky/akka-streams-example)
29 | - [Backpressure in action with websockets and akka-streams](http://www.smartjava.org/content/backpressure-action-websockets-and-akka-streams)
30 |
31 | ## Testing
32 | - [Testing Streams](http://doc.akka.io/docs/akka-stream-and-http-experimental/1.0-RC1/scala/stream-testkit.html)
33 |
34 | ## Slides
35 | - [Konrad Malawski - Reactive Streams / Akka Streams - GeeCON Prague 2014](http://www.slideshare.net/ktoso/reactive-streams-akka-streams-geecon-prague-2014)
36 | - [Reactive Streams and RabbitMQ](http://www.slideshare.net/mkiedys/reactive-streams-and-rabbitmq)
37 |
38 | ## Github
39 | - [ScalaConsultants Team Blog - Akka Streams and RabbitMQ](http://blog.scalac.io/2014/06/23/akka-streams-and-rabbitmq.html)
40 | - [Reactive RabbitMq Activator Template](https://github.com/jczuchnowski/rabbitmq-akka-stream#master)
41 | - [Iterators - Reactive Microservices](https://github.com/theiterators/reactive-microservices)
42 | - [Iterators - Akka HTTP microservice example](https://github.com/theiterators/akka-http-microservice)
43 |
44 | ## Activator Template
45 | - [Akka Streams Activator Template](http://www.typesafe.com/activator/template/akka-stream-scala)
46 |
47 | ## Video
48 | - [Mathias Doenitz](https://twitter.com/sirthias) - [Parleys - Scala Days Amsterdam 2015 - The reactive streams implementation landscape](https://www.parleys.com/tutorial/the-reactive-streams-implementation-landscape)
49 | - [Youtube - Konrad Malawski - Fresh from the oven - ScalarConf Warsaw 2015](https://www.youtube.com/watch?v=WnTSuYL4_wU)
50 | - [Youtube - Dr. Roland Kuhn - Akka Stream and Akka HTTP reactive web toolkit](https://www.parleys.com/tutorial/akka-http-reactive-web-toolkit)
51 | - [Youtube - Introducing Reactive Streams](https://www.youtube.com/watch?v=khmVMvlP_QA)
52 | - [Youtube - Spray and Akka HTTP](https://www.youtube.com/watch?v=o5PUDI4qi10)
53 | - [Youtube - Reactive Stream Processing with Akka Streams](https://www.youtube.com/watch?v=XCP6zg46utU)
54 | - [Youtube - Netflix JavaScript Talks - Async JavaScript with Reactive Extensions](https://www.youtube.com/watch?v=XRYN2xt11Ek)
55 | - [Youtube - Asynchronous Programming at Netflix](https://www.youtube.com/watch?v=gawmdhCNy-A)
56 | - [Youtube - Building Reactive applications with Spring Reactor and the Reactive Streams standard](https://www.youtube.com/watch?v=AvwZEWu5PPc)
57 | - [Youtube - Typesafe - Play All Day: Reactive Streams and Play 3.0](https://www.youtube.com/watch?v=0i0RRZvARkM)
58 | - [Youtube - Dr. Roland Kuhn - Reactive Streams: Handling Data-Flows the Reactive Way](https://www.youtube.com/watch?v=oUnfAcwDQr4)
59 | - [Youtube - Reactive Streams - Tim Harper](https://www.youtube.com/watch?v=xJn2kMHUl6s)
60 | - [Youtube - Reactive microservices with Reactor - Stephane Maldini](https://www.youtube.com/watch?v=lzkBo3YTvAQ)
61 | - [Youtube - Reactive Stream Processing with kafka-rx](https://www.youtube.com/watch?v=S-Ynyel9pkk)
62 | - [Youtube - Technology Hour - Implementing the Reactive Manifesto with Akka - Adam Warski](https://www.youtube.com/watch?v=LXEhQPEupX8)
63 | - [What does it mean to be Reactive? - Erik Meijer](https://www.youtube.com/watch?v=sTSQlYX5DU0)
64 | - [Typesafe - Going Reactive in Java with Typesafe Reactive Platform](https://www.youtube.com/watch?v=y70Z5S2eSIo)
65 | - [Typesafe - Deep Dive into the Typesafe Reactive Platform - Akka and Scala](https://www.youtube.com/watch?v=fMWzKEN6uTY)
66 | - [Typesafe - Deep Dive into the Typesafe Reactive Platform - Activator and Play](https://www.youtube.com/watch?v=EJl9mQ0051g)
67 | - [Typesafe - What Have The Monads Ever Done For Us with Dick Wall](https://www.youtube.com/watch?v=2IYNPUp751g)
68 | - [Typesafe - Deep Dive into the Typesafe Reactive Platform - Ecosystem and Tools](https://www.youtube.com/watch?v=3nNerwsqrQI)
69 |
70 | ## Stream Materialization
71 | When constructing flows and graphs in Akka Streams think of them as preparing a blueprint, an execution plan. Stream materialization is the process of taking a stream description (the graph) and allocating all the necessary resources it needs in order to run. In the case of Akka Streams this often means starting up Actors which power the processing, but is not restricted to that - it could also mean opening files or socket connections etc. – depending on what the stream needs.
72 |
73 | Materialization is triggered at so called "terminal operations". Most notably this includes the various forms of the `run()` and `runWith()` methods defined on flow elements as well as a small number of special syntactic sugars for running with well-known sinks, such as `runForeach(el => )` (being an alias to `runWith(Sink.foreach(el => ))`.
74 |
75 | Reusing instances of linear computation stages (`Source`, `Sink`, `Flow`) inside `FlowGraphs` is legal, yet will materialize that stage multiple times. Well not always. An alternative is to pass existing graphs—of any shape—into the factory method that produces a new graph `FlowGraph.closed(topHeadSink, bottomHeadSink) { implicit builder => ...}` The difference between these approaches is that importing using `b.add(...)` ignores the materialized value of the imported graph while importing via the factory method allows its inclusion, and reuses the materialized Actors.
76 |
77 | # Akka stream extensions
78 | > Streamz is a resource combinator library for scalaz-stream. It allows Process instances to consume from and produce to.
79 |
80 | - [Martin Krasser - Streamz](https://github.com/krasserm/streamz)
81 |
82 | > Develop generic Sources/Flows/Sinks not provided out-of-the-box by Akka-Stream. It supports Postgres, Elasticsearch, Shapeless and AWS.
83 |
84 | - [MfgLabs - Akka Stream Extensions](http://mfglabs.github.io/akka-stream-extensions/)
85 |
86 | # Nice projects with Akka Streams
87 | > A playground of video processing examples in Akka streams and Scala.
88 |
89 | - [Josh Suereth - Streamerz](https://github.com/jsuereth/streamerz)
90 |
91 | > Sample Play application using Akka actors to stream tweets over websockets.
92 |
93 | - [Eric Mittelhammer - Reactive Tweets](https://github.com/ericmittelhammer/reactive-tweets)
94 |
95 | ## Reactive Kafka
96 | > Reactive Streams wrapper for Apache Kafka. -- [Reactive Kafka](https://github.com/softwaremill/reactive-kafka)
97 |
98 | - [Apache Kafka]()
99 | - [GitHub - Reactive Kafka](https://github.com/softwaremill/reactive-kafka)
100 |
101 | *Note:* You will need a configured [Apache Kafka](http://kafka.apache.org/) and [Apache Zookeeper](https://zookeeper.apache.org/).
102 |
103 | ```scala
104 | import akka.actor.ActorSystem
105 | import akka.stream.ActorFlowMaterializer
106 | import akka.stream.scaladsl.{Sink, Source}
107 | import com.softwaremill.react.kafka.ReactiveKafka
108 |
109 | implicit val materializer = ActorFlowMaterializer()
110 | implicit val actorSystem = ActorSystem("ReactiveKafka")
111 |
112 | val kafka = new ReactiveKafka(host = "localhost:9092", zooKeeperHost = "localhost:2181")
113 | val publisher = kafka.consume("lowercaseStrings", "groupName", new StringDecoder())
114 | val subscriber = kafka.publish("uppercaseStrings", "groupName", new StringEncoder())
115 |
116 |
117 | Source(publisher).map(_.toUpperCase).to(Sink(subscriber)).run()
118 | ```
119 |
120 | ## Reactive Rabbit
121 | > Reactive Streams driver for AMQP protocol. Powered by RabbitMQ library. -- [Reactive Rabbit](https://github.com/ScalaConsultants/reactive-rabbit)
122 |
123 | - [RabbitMq](https://www.rabbitmq.com/)
124 | - [GitHub - Reactive Rabbit](https://github.com/ScalaConsultants/reactive-rabbit)
125 | - [Activator Template - RabbitMQ Akka Stream](https://github.com/jczuchnowski/rabbitmq-akka-stream#master)
126 |
127 | Note: You will need a RabbitMQ instance and a configured `reactive-rabbit` connection, see the [reference.conf](https://github.com/ScalaConsultants/reactive-rabbit/blob/master/src/main/resources/reference.conf) for more information. Better yet, fire up [Typesafe Activator](https://www.typesafe.com/get-started) and try out the [RabbitMQ Akka Stream](https://github.com/jczuchnowski/rabbitmq-akka-stream) Activator Template.
128 |
129 | ```scala
130 | import akka.actor.ActorSystem
131 | import akka.stream.ActorFlowMaterializer
132 | import akka.stream.scaladsl.{Sink, Source}
133 | import io.scalac.amqp.Connection
134 |
135 | // streaming invoices to Accounting Department
136 | val connection = Connection()
137 | val queue = connection.consume(queue = "invoices")
138 | val exchange = connection
139 | .publish(
140 | exchange = "accounting_department",
141 | routingKey = "invoices"
142 | )
143 |
144 | implicit val system = ActorSystem()
145 | implicit val materializer = ActorFlowMaterializer()
146 |
147 | // (queue) ~> (sink)
148 | Source(queue).map(_.message).to(Sink(exchange)).run()
149 | ```
150 |
151 | # RabbitMQ
152 | > RabbitMQ is open source message broker software (sometimes called message-oriented middleware) that implements the Advanced Message Queuing Protocol (AMQP). The RabbitMQ server is written in the Erlang programming language and is built on the Open Telecom Platform framework for clustering and failover. Client libraries to interface with the broker are available for all major programming languages. -- [Wikipedia](http://en.wikipedia.org/wiki/RabbitMQ)
153 |
154 | - [RabbitMQ Website](http://www.rabbitmq.com/)
155 | - [RabbitMQ Simulator](http://tryrabbitmq.com/)
156 |
157 | ## Blogs
158 | - [LostTechies - RabbitMQ: Exchange Types](https://lostechies.com/derekgreer/2012/03/28/rabbitmq-for-windows-exchange-types/)
159 |
160 | ## Concepts
161 | * *Exchange:* This is the initial destination for all published messages and the entity in charge of applying routing rules for these messages to reach their destinations. Exchanges control the routing of messages to queues. Each exchange type defines a specific routing algorithm which the server uses to determine which bound queues a published message should be routed to. Routing rules include the following: direct (point-to-point), topic (publish-subscribe) and fanout (multicast).
162 | * *Queue:* This is the final destination for messages ready to be consumed. A single message can be copied and can reach multiple queues if the exchange's routing rule says so. RabbitMQ contains a special exchange, the *default exchange* (a.k.a. *nameless exchange*) with an empty string as its name. When a queue is declared, that new queue will automatically be bound to that *default exchange*, using the queue name as the *routing key*. This means that you can send messages using an empty string for the exchange name which will use the default exchange, but use the queue name for the routing-key. This way the bind will filter out messages for the queue and only those messages will be sent to the queue.
163 | * *Binding:* This is a virtual connection between an exchange and a queue that enables messages to flow from the former to the latter. A routing key can be associated with a binding in relation to the exchange routing rule. A binding is a relationship between an exchange and a queue. This can be simply read as: the queue is interested in messages from this exchange. A bind can have a *binding key* set. The meaning of a binding key depends on the exchange type it is configured to. Fanout exchanges will ignore this value.
164 |
165 | ## RabbitMQ Messaging Model
166 | The core idea in the messaging model in RabbitMQ is that the producer never sends any messages directly to a queue. Actually, quite often the producer doesn't even know if a message will be delivered to any queue at all.
167 |
168 | Instead, the producer can only send messages to an *exchange*. An exchange is a very simple thing. On one side it receives messages from producers and the other side it pushes them to queues or other exchanges. The exchange must know exactly what to do with a message it receives. Should it be appended to a particular queue? Should it be appended to many queues? Or should it get discarded. The rules for that are defined by the exchange type.
169 |
170 | There are a few exchange types available: *direct* (point-to-point), *topic* (publish-subscribe) and *fanout* (multicast).
171 |
172 | # Fanout Exchange
173 | The *fanout exchange* is very simple. As you can probably guess from the name, it just broadcasts all the messages it receives to all the queues it knows. It does nothing with *routing keys* and only does mindless broadcasting, not very exciting.
174 |
175 | The Fanout exchange type routes messages to all bound queues indiscriminately. If a routing key is provided, it will simply be ignored. The following illustrates how the fanout exchange type works:
176 |
177 | 
178 |
179 | When using the fanout exchange type, different queues can be declared to handle messages in different ways. For instance, a message indicating a customer order has been placed might be received by one queue whose consumers fulfill the order, another whose consumers update a read-only history of orders, and yet another whose consumers record the order for reporting purposes.
180 |
181 | ## Direct Exchange
182 | The *direct exchange* routing algorithm is also very simple - a message goes to the queues whose binding key exactly matches the routing key of the message. Also not very exciting. It is legal to have multiple direct bindings with several different *binding keys*. Eg, having three bindings from an exchange with keys 'red', 'green', 'yellow' will route only messages with the *routing key* 'red', 'green' and 'yellow' to the queue, all other messages will be discarded! It is also possible to route the same message with two bindings with the same binding key to two queues. In that case the direct exchange will act like a broadcaster.
183 |
184 | The Direct exchange type routes messages with a routing key equal to the routing key declared by the binding queue. Messages sent to the exchange with a routing key that has no binding will be dropped and will never reach a queue, ever! The following illustrates how the direct exchange type works:
185 |
186 | 
187 |
188 | The Direct exchange type is useful when you would like to distinguish messages published to the same exchange using a simple string identifier. Every queue is automatically bound to a *default exchange* (a.k.a. *nameless exchange*) using a routing key equal to the queue name. This default exchange is declared as a Direct exchange.
189 |
190 | ## Topic Exchange
191 | Messages sent to a *topic exchange* can't have an arbitrary routing_key - it must be a list of words, delimited by dots. The words can be anything, but usually they specify some features connected to the message. A few valid routing key examples: "stock.usd.nyse", "nyse.vmw", "quick.orange.rabbit". There can be as many words in the routing key as you like, up to the limit of 255 bytes.
192 |
193 | The binding key must also be in the same form. The logic behind the topic exchange is similar to a direct one - a message sent with a particular routing key will be delivered to all the queues that are bound with a matching binding key. However there are two important special cases for binding keys:
194 |
195 | * * (star) can substitute for exactly one word.
196 | * # (hash) can substitute for zero or more words.
197 |
198 | The topic exchange is powerful and can behave like other exchanges. For example, the fanout exchange does a simple broadcast. The direct exchange can act like a topic exchange when two bindings with the same binding key are configured to two queues, then a message with that routing key will be sent to the two queues. In case of a topic exchange, When a queue is bound with "#" (hash) binding key - it will receive all the messages, regardless of the routing key - like in fanout exchange. So a topic exchange can also behave like a fanout exchange when configured with a single "#" (hash).
199 |
200 | For example, a routing key that consists of three words (two dots). The first word in the routing key will describe a celerity, second a colour and third a species: "..".
201 |
202 | We created three bindings: Q1 is bound with binding key "*.orange.*" and Q2 with "*.*.rabbit" and "lazy.#".
203 |
204 | These bindings can be summarised as:
205 |
206 | * Q1 is interested in all the orange animals.
207 | * Q2 wants to hear everything about rabbits, and everything about lazy animals.
208 |
209 | A message with a routing key set to "quick.orange.rabbit" will be delivered to both queues. Message "lazy.orange.elephant" also will go to both of them. On the other hand "quick.orange.fox" will only go to the first queue, and "lazy.brown.fox" only to the second. "lazy.pink.rabbit" will be delivered to the second queue only once, even though it matches two bindings. "quick.brown.fox" doesn't match any binding so it will be discarded.
210 |
211 | What happens if we break our contract and send a message with one or four words, like "orange" or "quick.orange.male.rabbit"? Well, these messages won't match any bindings and will be lost.
212 |
213 | On the other hand "lazy.orange.male.rabbit", even though it has four words, will match the last binding and will be delivered to the second queue.
214 |
215 | When special characters "*" (star) and "#" (hash) aren't used in bindings, the topic exchange will behave just like a direct one.
216 |
217 | The Topic exchange type routes messages to queues whose routing key matches all, or a portion of a routing key. With topic exchanges, messages are published with routing keys containing a series of words separated by a dot (e.g. “word1.word2.word3”). Queues binding to a topic exchange supply a matching pattern for the server to use when routing the message. Patterns may contain an asterisk (“*”) to match a word in a specific position of the routing key, or a hash (“#”) to match zero or more words. For example, a message published with a routing key of “honda.civic.navy” would match queues bound with “honda.civic.navy”, “*.civic.*”, “honda.#”, or “#”, but would not match “honda.accord.navy”, “honda.accord.silver”, “*.accord.*”, or “ford.#”. The following illustrates how the fanout exchange type works:
218 |
219 | 
220 |
221 | The Topic exchange type is useful for directing messages based on multiple categories (e.g. product type and shipping preference), or for routing messages originating from multiple sources (e.g. logs containing an application name and severity level).
222 |
223 | ## Docker
224 | - [library/rabbitmq](https://registry.hub.docker.com/u/library/rabbitmq/)
225 |
226 | ## GitHub
227 | - [RabbitMQ](https://github.com/docker-library/docs/tree/master/rabbitmq)
228 |
229 | # Apache ActiveMQ
230 | > Apache ActiveMQ is an open source message broker written in Java together with a full Java Message Service (JMS) client. It provides "Enterprise Features" which in this case means fostering the communication from more than one client or server. Supported clients include Java via JMS 1.1 as well as several other "cross language" clients. The communication is managed with features such as computer clustering and ability to use any database as a JMS persistence provider besides virtual memory, cache, and journal persistency. -- [Wikipedia](http://en.wikipedia.org/wiki/Apache_ActiveMQ)
231 |
232 | # RabbitMQ vs ActiveMQ
233 | > RabbitMQ is an AMQP broker, while ActiveMQ is a JMS one. I suggest you read the AMQP [Wikipedia](http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol) article to get an idea of the concepts used in AMQP, which are different than the ones you're familiar in JMS. One of the main difference is that in AMQP a producer sends to an exchange `without knowing the actual message distribution strategy` while in JMS the producer targets either a `queue` or a `topic` (thus being aware of the type of message routing in place). So it's hard to tell what's done better or worse, as the semantics are very different between JMS and AMQP. -- [Stackoverflow](http://stackoverflow.com/questions/7044157/switching-from-activemq-to-rabbitmq)
234 |
235 | > RabbitMQ's queues and exchanges are all configured via the AMQP protocol so a client library allows you to configure all your destinations and their behavior. ActiveMQ requires specific destination configuration because the JMS spec doesn't cover any of the administration side of things. Besides that, RabbitMQ's system configuration is Erlang-esque, while ActiveMQ is usually configured in XML. So you'll have to get used to the {tuple} and <> lovely syntax. RabbitMQ is usually installed with OS packages, -- edit (or the `[library/rabbitmq](https://registry.hub.docker.com/u/library/rabbitmq/)` Docker image) -- while ActiveMQ distributions are archives you drop anywhere (or Maven deps you embed into something else). -- [Stackoverflow](http://stackoverflow.com/questions/7044157/switching-from-activemq-to-rabbitmq)
236 |
237 | > RabbitMQ’s provisioning capabilities make it the perfect communication bus for anyone building a distributed application, particularly one that leverages cloud-based resources and rapid deployment.
238 | -- [RabbitMQ in Action](http://www.manning.com/videla/)
239 |
240 | ## RabbitMQ Video
241 | - [Youtube - RabbitMQ is the new king](https://www.youtube.com/watch?v=kA8rPIDa388)
242 | - [Youtube - RabbitMQ: Message that Just Works (Part 1)](https://www.youtube.com/watch?v=ABGMjX4K0D8)
243 | - [Youtube - RabbitMQ: Message that Just Works (Part 2)](https://www.youtube.com/watch?v=puMLEy5kk2s)
244 | - [Youtube - RabbitMQ: Message that Just Works (Part 3)](https://www.youtube.com/watch?v=bUA0fMJGQBE)
245 | - [Youtube - RabbitMQ: Message that Just Works (Part 4)](https://www.youtube.com/watch?v=LWVYaaBH3NY)
246 | - [Youtube - Reliable Messaging With RabbitMQ](https://www.youtube.com/watch?v=XjuiZM7JzPw)
247 | - [Youtube - What RabbitMQ Can For You](https://www.youtube.com/watch?v=4lDSwfrfM-I)
248 |
249 | ## Blogs
250 | - [Getting Cirrius - Node-Webkit - an example of AngularJS using AMQP.](http://www.gettingcirrius.com/2013/10/node-webkit-example-of-angularjs-using.html)
251 |
252 | # Apache Qpid
253 | > Apache Qpid, an open-source (Apache 2.0 licensed) messaging system, implements the Advanced Message Queuing Protocol. It provides transaction management, queuing, distribution, security, management, clustering, federation and heterogeneous multi-platform support.
254 | -- [Wikipedia](http://en.wikipedia.org/wiki/Apache_Qpid)
255 |
256 | # JMS
257 | > The Java Message Service (JMS) API is a Java Message Oriented Middleware (MOM) API for sending messages between two or more clients. JMS is a part of the Java Platform, Enterprise Edition, and is defined by a specification developed under the Java Community Process as JSR 914. It is a messaging standard that allows application components based on the Java Enterprise Edition (Java EE) to create, send, receive, and read messages. It allows the communication between different components of a distributed application to be loosely coupled, reliable, and asynchronous.
258 | -- [Wikipedia](http://en.wikipedia.org/wiki/Java_Message_Service)
259 |
260 | > JMS attempted to solve the lock-in and interoperability problem by providing a common Java API that hides the actual interface to the individual vendor MQ products.
261 | -- [RabbitMQ in Action](http://www.manning.com/videla/)
262 |
263 | # AMQP
264 | > The Advanced Message Queuing Protocol (AMQP) is an open standard application layer protocol for message-oriented middleware. The defining features of AMQP are message orientation, queuing, routing (including point-to-point and publish-and-subscribe), reliability and security.
265 | -- [Wikipedia](http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol)
266 |
267 | -- [AMQP is the IP of business systems](https://www.youtube.com/watch?v=SXZJau292Uw)
268 |
269 | - [Youtube - Understanding AMQP 1.0](https://www.youtube.com/watch?v=SXZJau292Uw)
270 | - [Youtube - Advanced Message Queuing Protocol](https://www.youtube.com/watch?v=lz-aofC3nkU)
271 |
272 | # MQTT
273 | > MQTT (formerly Message Queue Telemetry Transport) is a publish-subscribe based "light weight" messaging protocol for use on top of the TCP/IP protocol. It is designed for connections with remote locations where a "small code footprint" is required and/or network bandwidth is limited. The Publish-Subscribe messaging pattern requires a message broker. The broker is responsible for distributing messages to interested clients based on the topic of a message. Andy Stanford-Clark and Arlen Nipper of Cirrus Link Solutions authored the first version of the protocol in 1999.
274 | -- [Wikipedia](http://en.wikipedia.org/wiki/MQTT)
275 |
276 | # STOMP
277 | > Simple (or Streaming) Text Oriented Message Protocol (STOMP), formerly known as TTMP, is a simple text-based protocol, designed for working with message-oriented middleware. It provides an interoperable wire format that allows STOMP clients to talk with any message broker supporting the protocol. It is thus language-agnostic, meaning a broker developed for one programming language or platform can receive communications from client software developed in another language.
278 | -- [Wikipedia](http://en.wikipedia.org/wiki/Streaming_Text_Oriented_Messaging_Protocol)
279 |
280 | # XMPP
281 | > Extensible Messaging and Presence Protocol (XMPP) is a communications protocol for message-oriented middleware based on XML (Extensible Markup Language). It enables the near-real-time exchange of structured yet extensible data between any two or more network entities. The protocol was originally named Jabber, and was developed by the Jabber open-source community in 1999 for near real-time instant messaging (IM), presence information, and contact list maintenance. Designed to be extensible, the protocol has also been used for publish-subscribe systems, signalling for VoIP, video, file transfer, gaming, Internet of Things (IoT) applications such as the smart grid, and social networking services.
282 | -- [Wikipedia](http://en.wikipedia.org/wiki/XMPP)
283 |
284 | # Slick with Reactive Streams Support
285 | > Slick is a modern database query and access library for Scala. It allows you to work with stored data almost as if you were using Scala collections while at the same time giving you full control over when a database access happens and which data is transferred. You can write your database queries in Scala instead of SQL, thus profiting from the static checking, compile-time safety and compositionality of Scala. Slick features an extensible query compiler which can generate code for different backends.
286 | -- [Slick](http://slick.typesafe.com/)
287 |
288 | - [Slick 3.0 Streaming](http://slick.typesafe.com/doc/3.0.0/dbio.html#streaming)
289 |
290 | ## Books
291 | - [Protocol specification](https://www.rabbitmq.com/resources/specs/amqp0-9-1.pdf)
292 |
293 | ## Blogs
294 | - [InfoQ - Slick 3: Reactive Streams for Asynchronous Database Access in Scala](http://www.infoq.com/news/2015/05/slick3?utm_content=buffer52e7c&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer)
295 |
296 | ## Apache Zookeeper
297 | > Apache ZooKeeper is an effort to develop and maintain an open-source server which enables highly reliable distributed coordination. -- [Apache Zookeeper](https://zookeeper.apache.org/)
298 |
299 | - [Apache Zookeeper Documentation](https://zookeeper.apache.org/doc/trunk/)
300 |
301 | ## Apache Kafka
302 | > Apache Kafka is publish-subscribe messaging rethought as a distributed commit log. -- [Apache Kafka](http://kafka.apache.org/)
303 |
304 | - [Apache Kafka Documentation](http://kafka.apache.org/documentation.html)
305 |
306 | ## ElasticMQ
307 | > ElasticMQ is a message queue system, offering an actor-based Scala and an SQS-compatible REST (query) interface.
308 | -- [ElasticMQ](https://github.com/adamw/elasticmq)
309 |
310 | ## Amazon SQS
311 | > Amazon Simple Queue Service (SQS) is a fast, reliable, scalable, fully managed message queuing service. SQS makes it simple and cost-effective to decouple the components of a cloud application. You can use SQS to transmit any volume of data, at any level of throughput, without losing messages or requiring other services to be always available.
312 | -- [Amazon SQS](http://aws.amazon.com/sqs/)
313 |
314 | ## Slick
315 | - [Activator Template - Hello Slick](https://github.com/typesafehub/activator-hello-slick#slick-3.0)
316 | - [Activator Template - Slick Plain SQL](https://github.com/typesafehub/activator-slick-plainsql)
317 |
318 | # MongoDB
319 | > MongoDB (from “humongous”) is a cross-platform document-oriented database. Classified as a NoSQL database, MongoDB eschews the traditional table-based relational database structure in favor of JSON-like documents with dynamic schemas (MongoDB calls the format BSON), making the integration of data in certain types of applications easier and faster.
320 | -- [library/mongo](https://registry.hub.docker.com/u/library/mongo/)
321 |
322 | - [library/mongo](https://registry.hub.docker.com/u/library/mongo/)
323 |
324 | ## Tepkin
325 | > Reactive MongoDB Driver for Scala and Java built on top of Akka IO and Akka Streams.
326 | -- [Tepkin](https://github.com/fehmicansaglam/tepkin)
327 |
328 | - [GitHub](https://github.com/fehmicansaglam/tepkin)
329 |
330 | # Apache Cassandra
331 | > Apache Cassandra is an open source distributed database management system designed to handle large amounts of data across many commodity servers, providing high availability with no single point of failure. Cassandra offers robust support for clusters spanning multiple datacenters, with asynchronous masterless replication allowing low latency operations for all clients.
332 | -- [library/cassandra](https://registry.hub.docker.com/u/library/cassandra/)
333 |
334 | - [library/cassandra](https://registry.hub.docker.com/u/library/cassandra/)
335 |
336 | ## Akka Persistence Cassandra
337 | > Replicated Akka Persistence journal and snapshot store backed by Apache Cassandra.
338 | -- [Akka Persistence Cassandra](https://github.com/krasserm/akka-persistence-cassandra/)
339 |
340 | - [Akka Persistence Cassandra](https://github.com/krasserm/akka-persistence-cassandra/)
341 |
342 | # Introduction
343 | ## Blocking
344 | Blocking typically occurs in a "pull" based system. These systems pull data as required. The problem is that when
345 | there is no data to pull, they often block the thread which is inefficient.
346 |
347 | I find it best to think about these problems in terms of plumbing. In our pull scenario, we have a series of pipes
348 | connected to a water source. We put a pump on the end of the pipes that will pull water through the pipes and empty it
349 | out at our destination. The problem here is that when we run out of water, the pump doesn't know there is a problem
350 | and continues to try to pull water. Do this long enough and your pump will burn out.
351 |
352 | ## Back Pressure
353 | In a "push" based system, it is possible for the producer to create more data than the consumer can handle which can
354 | cause the consumer to crash.
355 |
356 | Our push scenario moves the pump to the other end. Now we are pumping water into our pipes which then flows into a sink
357 | at the other end. The pump can be triggered by a float so it only works when there is water to pump. The problem is the
358 | sink is not capped. This means that when we fill it up, the water just overflows and the pump keeps pumping. Also not good.
359 |
360 | ## Akka Streams
361 | What we need is a system which puts a pump at the water source and also puts a cap on the sink. This means that the
362 | pump at the source will only run when there is water to pump, and meanwhile the sink will fill up and because it is
363 | capped it will create back pressure when it is full. The back pressure can trigger the pump to stop pumping again.
364 |
365 | This is exactly what Akka Streams does for us. In fact, if you look at the terminology for Akka Streams you will see
366 | that it lays it out in the exact terms I have been using. The most basic stream in Akka Streams consists of two parts:
367 | A Source and a Sink.
368 |
369 | ## Source
370 | 
371 |
372 | A Source is the input to the stream. It is from here that all the data will flow. Each Source has a single output channel
373 | and no input channel. Data flows from the Source, through the output channel, and into whatever might be connected to that
374 | Source. Examples of Sources could include a database query, an http request, or even something as simple as a random
375 | number generator. In our analogy, this is our water source, which is connected to our pump. It is drawing water from a
376 | reservoir and pushing it through our pipes.
377 |
378 | ## Sink
379 | 
380 |
381 | A Sink is the endpoint for the stream. The data from the stream will eventually find it's way to the Sink. A Sink has a
382 | single input channel and no output channel. Data flows into the input channel and collects in the Sink. Examples of Sink
383 | behavior could include writing to a database, writing to a file, or aggregating data in memory. This is the capped sink
384 | in our analogy. Water is flowing through the pipes and eventually collecting in our sink.
385 |
386 | ## Runnable Flow
387 | 
388 |
389 | If you connect a Source to a Sink you get a Runnable Flow. This is the most basic complete form you can make in Akka Streams.
390 | Your stream is ready to use and data will now flow through it. Until you connect both a Source and a Sink, the data can not flow.
391 | Again, looking to our analogy, if you have a water source and a pump, but nowhere to pump the water to, then you don't have a
392 | complete system. Conversely, if you have a sink, but no water to pump into it, then again it isn't a complete system.
393 | Only when you connect the two do you get a complete system.
394 |
395 | ## Flow
396 | 
397 |
398 | While you can do a lot with just a Source and a Sink, things get more interesting when you add a Flow into the mix.
399 | A Flow can be used to apply transformations to the data coming out of a Source before putting it into a Sink. The Flow
400 | then has a single input channel and a single output channel. This allows it to be connected to both a Source and a Sink.
401 | Connecting a Flow to just a Source gives you a new Source. Connecting a Flow to just a Sink gives you a new Sink.
402 | Connecting a Source, Flow and Sink gives you a Runnable Flow. For our analogy this is the equivalent of putting a bend
403 | in the pipes, or perhaps narrowing or widening the pipes to change the flow rate. You are providing some way to alter the
404 | flow of the water.
405 |
406 | ## A Chain
407 | 
408 |
409 | Because Flows have both an input and an output you can chain them together allowing data to flow from a single Source,
410 | through multiple Flows and finally into the Sink.
411 |
412 | A Runnable Flow, no matter how complex, includes all the facilities for back pressure. Data flows through the system one
413 | way, but requests for additional data to flow back through the system in the other direction. Under the hood, the Sink
414 | sends a request back through the Flows to the Source. This request notifies the Source that the Sink is ready to handle
415 | some more data. The Source will then push a set amount of data through the Flows into the Sink. The Sink will then
416 | process this data and when it has finished it will send another request for more data. This means that if the Sink gets
417 | backed up, then the time between those requests will increase and the necessary back pressure is generated.
418 |
419 | # akka-http
420 | > Akka HTTP is a stream-based, fully asynchronous, low-overhead HTTP/1.1 client/server implemented on top of Akka Streams.
421 |
422 | ## Documentation
423 | - [Akka Stream & Akka HTTP](http://doc.akka.io/docs/akka-stream-and-http-experimental/1.0-RC2/scala.html)
424 |
425 | ## Blogs
426 | - [SmartJava - Building a REST service in Scala with Akka HTTP, Akka Streams and reactive mongo](http://www.smartjava.org/content/building-rest-service-scala-akka-http-akka-streams-and-reactive-mongo)
427 |
428 | ## Video
429 | - [Youtube - Akka HTTP — The What, Why and How](https://www.youtube.com/watch?v=y_slPbktLr0)
430 |
--------------------------------------------------------------------------------
/build.sbt:
--------------------------------------------------------------------------------
1 | name := "intro-to-akka-streams"
2 |
3 | organization := "com.github.dnvriend"
4 |
5 | version := "1.0.0"
6 |
7 | scalaVersion := "2.11.8"
8 |
9 | val akkaVersion = "2.4.17"
10 | val httpVersion = "10.0.5"
11 |
12 | libraryDependencies += "com.typesafe.akka" %% "akka-actor" % akkaVersion
13 | libraryDependencies += "com.typesafe.akka" %% "akka-stream" % akkaVersion
14 | libraryDependencies += "com.typesafe.akka" %% "akka-http-core" % httpVersion
15 | libraryDependencies += "com.typesafe.akka" %% "akka-http-spray-json" % httpVersion
16 | libraryDependencies += "com.typesafe.akka" %% "akka-slf4j" % akkaVersion
17 | libraryDependencies += "ch.qos.logback" % "logback-classic" % "1.2.3"
18 |
19 | libraryDependencies += "com.typesafe.akka" %% "akka-testkit" % akkaVersion % Test
20 | libraryDependencies += "com.typesafe.akka" %% "akka-stream-testkit" % akkaVersion % Test
21 | libraryDependencies += "org.typelevel" %% "scalaz-scalatest" % "1.1.2" % Test
22 | libraryDependencies += "org.scalatestplus.play" %% "scalatestplus-play" % "2.0.0" % Test
23 |
24 | fork in Test := true
25 |
26 | javaOptions in Test ++= Seq("-Xms30m","-Xmx30m")
27 |
28 | parallelExecution in Test := false
29 |
30 | licenses +=("Apache-2.0", url("http://opensource.org/licenses/apache2.0.php"))
31 |
32 | // enable scala code formatting //
33 | import scalariform.formatter.preferences._
34 | import com.typesafe.sbt.SbtScalariform
35 |
36 | // Scalariform settings
37 | SbtScalariform.autoImport.scalariformPreferences := SbtScalariform.autoImport.scalariformPreferences.value
38 | .setPreference(AlignSingleLineCaseStatements, true)
39 | .setPreference(AlignSingleLineCaseStatements.MaxArrowIndent, 100)
40 | .setPreference(DoubleIndentClassDeclaration, true)
41 |
42 | // enable updating file headers //
43 | import de.heikoseeberger.sbtheader.license.Apache2_0
44 |
45 | headers := Map(
46 | "scala" -> Apache2_0("2016", "Dennis Vriend"),
47 | "conf" -> Apache2_0("2016", "Dennis Vriend", "#")
48 | )
49 |
50 | // configure build info //
51 | // build info configuration //
52 | buildInfoKeys := Seq[BuildInfoKey](name, version, scalaVersion, sbtVersion)
53 |
54 | buildInfoPackage := "com.github.dnvriend"
55 |
56 | // enable plugins //
57 | enablePlugins(AutomateHeaderPlugin)
58 | enablePlugins(BuildInfoPlugin)
59 | enablePlugins(PlayScala)
60 | disablePlugins(PlayLayoutPlugin)
--------------------------------------------------------------------------------
/img/chain.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dnvriend/intro-to-akka-streams/37d10e0705f9ec3c496cabc11fc4e75e1a63648b/img/chain.png
--------------------------------------------------------------------------------
/img/consumer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dnvriend/intro-to-akka-streams/37d10e0705f9ec3c496cabc11fc4e75e1a63648b/img/consumer.png
--------------------------------------------------------------------------------
/img/direct_exchange.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dnvriend/intro-to-akka-streams/37d10e0705f9ec3c496cabc11fc4e75e1a63648b/img/direct_exchange.png
--------------------------------------------------------------------------------
/img/fanout_exchange.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dnvriend/intro-to-akka-streams/37d10e0705f9ec3c496cabc11fc4e75e1a63648b/img/fanout_exchange.png
--------------------------------------------------------------------------------
/img/flow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dnvriend/intro-to-akka-streams/37d10e0705f9ec3c496cabc11fc4e75e1a63648b/img/flow.png
--------------------------------------------------------------------------------
/img/producer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dnvriend/intro-to-akka-streams/37d10e0705f9ec3c496cabc11fc4e75e1a63648b/img/producer.png
--------------------------------------------------------------------------------
/img/queue.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dnvriend/intro-to-akka-streams/37d10e0705f9ec3c496cabc11fc4e75e1a63648b/img/queue.png
--------------------------------------------------------------------------------
/img/runnable_flow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dnvriend/intro-to-akka-streams/37d10e0705f9ec3c496cabc11fc4e75e1a63648b/img/runnable_flow.png
--------------------------------------------------------------------------------
/img/sink.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dnvriend/intro-to-akka-streams/37d10e0705f9ec3c496cabc11fc4e75e1a63648b/img/sink.png
--------------------------------------------------------------------------------
/img/source.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dnvriend/intro-to-akka-streams/37d10e0705f9ec3c496cabc11fc4e75e1a63648b/img/source.png
--------------------------------------------------------------------------------
/img/topic_exchange.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dnvriend/intro-to-akka-streams/37d10e0705f9ec3c496cabc11fc4e75e1a63648b/img/topic_exchange.png
--------------------------------------------------------------------------------
/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version=0.13.15
2 |
--------------------------------------------------------------------------------
/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | // to format scala source code
2 | addSbtPlugin("org.scalariform" % "sbt-scalariform" % "1.6.0")
3 |
4 | // enable updating file headers eg. for copyright
5 | addSbtPlugin("de.heikoseeberger" % "sbt-header" % "1.8.0")
6 |
7 | // generates Scala source from your build definitions //
8 | // see: https://github.com/sbt/sbt-buildinfo
9 | addSbtPlugin("com.eed3si9n" % "sbt-buildinfo" % "0.5.0")
10 |
11 | // enable playframework
12 | addSbtPlugin("com.typesafe.play" % "sbt-plugin" % "2.5.14")
13 |
--------------------------------------------------------------------------------
/src/test/resources/application.conf:
--------------------------------------------------------------------------------
1 | # Copyright 2016 Dennis Vriend
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | akka {
16 | stdout-loglevel = off // defaults to WARNING can be disabled with off. The stdout-loglevel is only in effect during system startup and shutdown
17 | log-dead-letters-during-shutdown = on
18 | loglevel = debug
19 | log-dead-letters = on
20 | log-config-on-start = off // Log the complete configuration at INFO level when the actor system is started
21 |
22 | loggers = ["akka.event.slf4j.Slf4jLogger"]
23 | logging-filter = "akka.event.slf4j.Slf4jLoggingFilter"
24 |
25 | actor {
26 | debug {
27 | receive = off // log all messages sent to an actor if that actors receive method is a LoggingReceive
28 | autoreceive = off // log all special messages like Kill, PoisoffPill etc sent to all actors
29 | lifecycle = off // log all actor lifecycle events of all actors
30 | fsm = off // enable logging of all events, transitioffs and timers of FSM Actors that extend LoggingFSM
31 | event-stream = off // enable logging of subscriptions (subscribe/unsubscribe) on the ActorSystem.eventStream
32 | }
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/src/test/resources/csv/starwars.csv:
--------------------------------------------------------------------------------
1 | firstname,lastname
2 | darth,vader
3 | leia,organa
4 | luke,skywalker
5 | han,solo
6 | boba,fett
7 | obi-wan,kenobi
8 | darth,maul
9 | darth,sidious
10 | padme,amidala
11 | lando,calrissian
12 | mace,windu
13 |
--------------------------------------------------------------------------------
/src/test/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | debug
7 |
8 |
9 | %date{ISO8601} - %logger -> %-5level[%thread] %logger{0} - %msg%n
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/src/test/resources/routes:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dnvriend/intro-to-akka-streams/37d10e0705f9ec3c496cabc11fc4e75e1a63648b/src/test/resources/routes
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/TestSpec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams
18 |
19 | import akka.NotUsed
20 | import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
21 | import akka.event.{ Logging, LoggingAdapter }
22 | import akka.stream.Materializer
23 | import akka.stream.scaladsl.Source
24 | import akka.stream.testkit.TestSubscriber
25 | import akka.stream.testkit.scaladsl.TestSink
26 | import akka.testkit.TestProbe
27 | import akka.util.Timeout
28 | import com.github.dnvriend.streams.util.ClasspathResources
29 | import org.scalatest._
30 | import org.scalatest.concurrent.{ Eventually, ScalaFutures }
31 | import org.scalatestplus.play.guice.GuiceOneServerPerSuite
32 | import play.api.inject.BindingKey
33 | import play.api.libs.json.{ Format, Json }
34 | import play.api.test.WsTestClient
35 |
36 | import scala.collection.immutable._
37 | import scala.concurrent.duration._
38 | import scala.concurrent.{ ExecutionContext, Future }
39 | import scala.reflect.ClassTag
40 | import scala.util.Try
41 |
42 | object Person {
43 | implicit val format: Format[Person] = Json.format[Person]
44 | }
45 |
46 | final case class Person(firstName: String, age: Int)
47 |
48 | class TestSpec extends FlatSpec
49 | with Matchers
50 | with GivenWhenThen
51 | with OptionValues
52 | with TryValues
53 | with ScalaFutures
54 | with WsTestClient
55 | with BeforeAndAfterAll
56 | with BeforeAndAfterEach
57 | with Eventually
58 | with ClasspathResources
59 | with GuiceOneServerPerSuite {
60 |
61 | def getComponent[A: ClassTag] = app.injector.instanceOf[A]
62 | def getNamedComponent[A](name: String)(implicit ct: ClassTag[A]): A =
63 | app.injector.instanceOf[A](BindingKey(ct.runtimeClass.asInstanceOf[Class[A]]).qualifiedWith(name))
64 |
65 | // set the port number of the HTTP server
66 | override lazy val port: Int = 8081
67 | implicit val timeout: Timeout = 1.second
68 | implicit val pc: PatienceConfig = PatienceConfig(timeout = 30.seconds, interval = 300.millis)
69 | implicit val system: ActorSystem = getComponent[ActorSystem]
70 | implicit val ec: ExecutionContext = getComponent[ExecutionContext]
71 | implicit val mat: Materializer = getComponent[Materializer]
72 | val log: LoggingAdapter = Logging(system, this.getClass)
73 |
74 | // ================================== Supporting Operations ====================================
75 | def id: String = java.util.UUID.randomUUID().toString
76 |
77 | implicit class FutureToTry[T](f: Future[T]) {
78 | def toTry: Try[T] = Try(f.futureValue)
79 | }
80 |
81 | implicit class SourceOps[A](src: Source[A, NotUsed]) {
82 | def testProbe(f: TestSubscriber.Probe[A] ⇒ Unit): Unit =
83 | f(src.runWith(TestSink.probe(system)))
84 | }
85 |
86 | def withIterator[T](start: Int = 0)(f: Source[Int, NotUsed] ⇒ T): T =
87 | f(Source.fromIterator(() ⇒ Iterator from start))
88 |
89 | def fromCollection[A](xs: Iterable[A])(f: TestSubscriber.Probe[A] ⇒ Unit): Unit =
90 | f(Source(xs).runWith(TestSink.probe(system)))
91 |
92 | def killActors(refs: ActorRef*): Unit = {
93 | val tp = TestProbe()
94 | refs.foreach { ref ⇒
95 | tp watch ref
96 | tp.send(ref, PoisonPill)
97 | tp.expectTerminated(ref)
98 | }
99 | }
100 | }
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/actorpublisher/AkkaPublisherSubscriberTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.actorpublisher
18 |
19 | import akka.{ NotUsed, Done }
20 | import akka.actor.{ ActorLogging, Props }
21 | import akka.stream.actor.ActorPublisherMessage._
22 | import akka.stream.actor.ActorSubscriberMessage.{ OnComplete, OnError, OnNext }
23 | import akka.stream.actor.{ ActorPublisher, ActorSubscriber, MaxInFlightRequestStrategy }
24 | import akka.stream.scaladsl._
25 | import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, ClosedShape, Supervision }
26 | import com.github.dnvriend.streams.TestSpec
27 |
28 | import scala.concurrent.Future
29 |
30 | /**
31 | * A simple stream publisher that generates numbers every time a `Request` for demand has been received.
32 | * The life cycle state of the subscription is tracked with the following boolean members:
33 | *
34 | * - isActive
35 | * - isCompleted
36 | * - isErrorEmitted
37 | * isCanceled
38 | *
39 | *
40 | * see: http://doc.akka.io/api/akka-stream-and-http-experimental/1.0-RC2/#akka.stream.actor.ActorPublisher
41 | */
42 | class NumberPublisher(delay: Long = 50) extends ActorPublisher[Long] with ActorLogging {
43 | val stopAt = 25
44 | var counter: Long = 0L
45 |
46 | override def receive = {
47 | // a subscriber will send the demand message.
48 | case Request(demand) if totalDemand > 0 && isActive ⇒
49 | log.info("[Request]: demand: {}", demand)
50 | if (isActive && totalDemand > 0 && counter <= stopAt)
51 | try {
52 | (1L to totalDemand).foreach { _ ⇒
53 | log.info("Foreach: {}", counter)
54 | Thread.sleep(delay) // to slow logging down
55 | onNext(counter)
56 | counter += 1
57 | }
58 | } catch {
59 | case t: Throwable ⇒
60 | // You can terminate the stream with failure by calling `onError`.
61 | // After that you are not allowed to call `onNext`, `onError` and `onComplete`.
62 | log.error(t, "")
63 | onError(t)
64 | stop()
65 | }
66 | else {
67 | log.info(s"$stopAt reached, stopping")
68 | onComplete()
69 | }
70 |
71 | case Cancel ⇒
72 | log.info("[Cancel]")
73 | // When the stream subscriber cancels the subscription the ActorPublisher.Cancel message
74 | // is delivered to this actor. After that subsequent calls to `onNext` will be ignored.
75 | onComplete()
76 |
77 | case SubscriptionTimeoutExceeded ⇒
78 | log.info("[SubscriptionTimeoutExceeded]")
79 | onComplete()
80 | stop()
81 |
82 | case m ⇒ log.info("[!!!DROPPING!!!]: {}", m)
83 | }
84 |
85 | def stop(): Unit = {
86 | // If the actor is stopped the stream will be completed,
87 | // unless it was not already terminated with failure, completed
88 | // or canceled.
89 | context.stop(self)
90 | }
91 |
92 | override def preStart(): Unit = {
93 | log.info("Starting")
94 | super.preStart()
95 | }
96 |
97 | override def postStop(): Unit = {
98 | log.info("Stopping")
99 | super.postStop()
100 | }
101 | }
102 |
103 | /**
104 | * A stream subscriber with full control of stream back pressure.
105 | * It will receive the following messages from the stream:
106 | *
107 | * - ActorSubscriberMessage.OnNext
108 | * - ActorSubscriberMessage.OnComplete
109 | * - ActorSubscriberMessage.OnError
110 | *
111 | *
112 | * It can also receive other, non-stream messages, in the same way as any actor.
113 | */
114 | class NumberSubscriber(maxInFlight: Int = 1, f: Long ⇒ Unit) extends ActorSubscriber with ActorLogging {
115 | var inFlight = 0
116 |
117 | // requestStrategy controls stream back pressure. After each incoming message the ActorSubscriber
118 | // will automatically invoke the `RequestStrategy.requestDemand` and propagate
119 | // the returned demand to the stream.
120 | override protected def requestStrategy =
121 | // Requests up to the max and also takes the number of messages that have been queued
122 | // internally or delegated to other actors into account. Concrete subclass must implement
123 | // the method `inFlightInternally`. It will request elements in minimum batches of the
124 | // defined `batchSize`.
125 | new MaxInFlightRequestStrategy(max = maxInFlight) {
126 | override def inFlightInternally = inFlight
127 | }
128 |
129 | override def receive = {
130 | case OnNext(msg: Long) ⇒
131 | inFlight += 1
132 | Thread.sleep(100) // do some heavy computing :)
133 | log.info("[OnNext]: {}, inflight: {}", msg, inFlight)
134 | f(msg)
135 | inFlight -= 1
136 |
137 | case OnComplete ⇒
138 | log.info("[OnComplete]")
139 | stop()
140 |
141 | case OnError ⇒
142 | log.info("[OnError]")
143 | stop()
144 |
145 | case m ⇒ log.info("[!!!DROPPING!!!]: {}", m)
146 | }
147 | def stop(): Unit = {
148 | // If the actor is stopped the stream will be completed,
149 | // unless it was not already terminated with failure, completed
150 | // or canceled.
151 | context.stop(self)
152 | }
153 |
154 | override def preStart(): Unit = {
155 | log.info("Starting")
156 | super.preStart()
157 | }
158 |
159 | override def postStop(): Unit = {
160 | log.info("Stopping")
161 | super.postStop()
162 | }
163 | }
164 |
165 | class AkkaPublisherSubscriberTest extends TestSpec {
166 |
167 | /**
168 | * A println sink
169 | */
170 | val printlnSink: Sink[AnyRef, Future[Done]] = Sink.foreach(println)
171 |
172 | /**
173 | * The GraphDSL that will be reused; it is a simple broadcast, splitting the flow into 2
174 | */
175 | def graph(sink: Sink[AnyRef, Future[Done]], f: Long ⇒ Unit) = RunnableGraph.fromGraph(
176 | GraphDSL.create(sink) { implicit b ⇒ sink ⇒
177 | import GraphDSL.Implicits._
178 | val src = Source.actorPublisher(Props(new NumberPublisher()))
179 | val numberSink = Sink.actorSubscriber(Props(new NumberSubscriber(1, f)))
180 | val bcast = b.add(Broadcast[AnyRef](2))
181 |
182 | src ~> bcast ~> numberSink
183 | bcast ~> sink
184 | ClosedShape
185 | }
186 | )
187 |
188 | "NumberProducer" should "count some time" in {
189 | // the default demand is 4, and will not ask for more
190 | Source.actorPublisher(Props(new NumberPublisher))
191 | .runForeach(println)
192 | .futureValue
193 | }
194 |
195 | it should "use a subscriber to supply backpressure" in {
196 | graph(printlnSink, x ⇒ ()).run().futureValue
197 | }
198 |
199 | it should "throws an exception when count == 10, println continues" in {
200 | // note, the actor crashes and will be stopped, but the println sink will continue
201 | graph(printlnSink, x ⇒ if (x == 10) throw new RuntimeException("10 reached")).run().futureValue
202 | }
203 |
204 | // should use actor supervision for this.. TBC
205 | ignore should "throw an exception but the actor will recover, the message will be dropped though" in {
206 | val decider: Supervision.Decider = {
207 | case _ ⇒ Supervision.Restart
208 | }
209 | implicit val mat = ActorMaterializer(ActorMaterializerSettings(system).withSupervisionStrategy(decider))
210 | graph(printlnSink, x ⇒ if (x == 10) throw new RuntimeException("10 reached")).run().futureValue
211 | }
212 | }
213 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/collection/SourceFromCollectionTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.collection
18 |
19 | import com.github.dnvriend.streams.TestSpec
20 |
21 | class SourceFromCollectionTest extends TestSpec {
22 | "null" should "throw NullPointerException" in {
23 | intercept[NullPointerException] {
24 | fromCollection[Nothing](null) { tp ⇒
25 | tp.request(1)
26 | tp.expectComplete()
27 | }
28 | }.getMessage should include("Element must not be null, rule 2.13")
29 | }
30 |
31 | "Option" should "when empty, complete" in {
32 | fromCollection(Option.empty[String].toList) { tp ⇒
33 | tp.request(1)
34 | tp.expectComplete()
35 | }
36 | }
37 |
38 | it should "emit a single element then complete" in {
39 | fromCollection(Option("a").toList) { tp ⇒
40 | tp.request(1)
41 | tp.expectNext("a")
42 | tp.expectComplete()
43 | }
44 | }
45 |
46 | "List" should "when empty, complete" in {
47 | fromCollection(List.empty[String]) { tp ⇒
48 | tp.request(1)
49 | tp.expectComplete()
50 | }
51 |
52 | fromCollection[Nothing](List()) { tp ⇒
53 | tp.request(1)
54 | tp.expectComplete()
55 | }
56 |
57 | fromCollection[Nothing](Nil) { tp ⇒
58 | tp.request(1)
59 | tp.expectComplete()
60 | }
61 | }
62 |
63 | it should "emit onError when processing list of null values" in {
64 | fromCollection(List.fill(3)(null)) { tp ⇒
65 | tp.request(1)
66 | tp.expectError().getMessage should include("Element must not be null, rule 2.13")
67 | }
68 | }
69 |
70 | it should "emit onError when processing list containing null values" in {
71 | fromCollection(List("a", null, "b")) { tp ⇒
72 | tp.request(1)
73 | tp.expectNext("a")
74 | tp.request(1)
75 | tp.expectError().getMessage should include("Element must not be null, rule 2.13")
76 | }
77 | }
78 |
79 | it should "emit three elements then complete" in {
80 | fromCollection(List("a", "b", "c")) { tp ⇒
81 | tp.request(1)
82 | tp.expectNext("a")
83 | tp.request(1)
84 | tp.expectNext("b")
85 | tp.request(1)
86 | tp.expectNext("c")
87 | tp.expectComplete()
88 | }
89 | }
90 |
91 | "Vector" should "when empty, complete" in {
92 | fromCollection(Vector.empty[String]) { tp ⇒
93 | tp.request(1)
94 | tp.expectComplete()
95 | }
96 | }
97 |
98 | it should "emit three elements then complete" in {
99 | fromCollection(Vector("a", "b", "c")) { tp ⇒
100 | tp.request(1)
101 | tp.expectNext("a")
102 | tp.request(1)
103 | tp.expectNext("b")
104 | tp.request(1)
105 | tp.expectNext("c")
106 | tp.expectComplete()
107 | }
108 | }
109 |
110 | "Set" should "when empty, complete" in {
111 | fromCollection(Set.empty[String]) { tp ⇒
112 | tp.request(1)
113 | tp.expectComplete()
114 | }
115 | }
116 |
117 | it should "emit three elements then complete" in {
118 | fromCollection(Set("a", "b", "c")) { tp ⇒
119 | tp.request(1)
120 | tp.expectNext("a")
121 | tp.request(1)
122 | tp.expectNext("b")
123 | tp.request(1)
124 | tp.expectNext("c")
125 | tp.expectComplete()
126 | }
127 | }
128 | }
129 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/customstage/CustomStreamProcessingTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.customstage
18 |
19 | import akka.NotUsed
20 | import akka.actor.ActorSystem
21 | import akka.stream.scaladsl.Source
22 | import akka.stream.stage.{ GraphStage, GraphStageLogic, OutHandler }
23 | import akka.stream.testkit.TestSubscriber
24 | import akka.stream.testkit.scaladsl.TestSink
25 | import akka.stream._
26 | import com.github.dnvriend.streams.TestSpec
27 |
28 | import scala.concurrent.Future
29 | import scala.concurrent.duration.FiniteDuration
30 | import scala.concurrent.duration._
31 |
32 | class CustomStreamProcessingTest extends TestSpec {
33 |
34 | "Custom Number Source" should "" in {
35 | CustomNumbersSource.withTestProbe() { tp ⇒
36 | tp.request(1)
37 | tp.expectNext(1)
38 | tp.request(1)
39 | tp.expectNext(2)
40 | tp.cancel()
41 | tp.expectNoMsg(100.millis)
42 | }
43 |
44 | CustomNumbersSource.withTestProbe() { tp ⇒
45 | tp.request(2)
46 | tp.expectNext(1)
47 | tp.expectNext(2)
48 | tp.cancel()
49 | tp.expectNoMsg(100.millis)
50 | }
51 |
52 | CustomNumbersSource.withTestProbe() { tp ⇒
53 | tp.request(3)
54 | tp.expectNext(1)
55 | tp.expectNext(2)
56 | tp.expectNext(3)
57 | tp.cancel()
58 | tp.expectNoMsg(100.millis)
59 | }
60 |
61 | CustomNumbersSource.withTestProbe() { tp ⇒
62 | tp.cancel()
63 | tp.expectNoMsg(100.millis)
64 | }
65 | }
66 | }
67 |
68 | class CustomNumbersSource extends GraphStage[SourceShape[Int]] {
69 | val out: Outlet[Int] = Outlet("NumbersSource")
70 | override val shape: SourceShape[Int] = SourceShape(out)
71 |
72 | override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
73 | new GraphStageLogic(shape) {
74 | private var counter = 1
75 |
76 | setHandler(out, new OutHandler {
77 | override def onPull(): Unit = {
78 | push(out, counter) // Emits an element through the given output port.
79 | // complete(out) // Signals that there will be no more elements emitted on the given port.
80 | // fail(out, new RuntimeException) // Signals failure through the given port.
81 | counter += 1
82 | }
83 |
84 | @scala.throws[Exception](classOf[Exception])
85 | override def onDownstreamFinish(): Unit = {
86 | // println("===> Upstream cancelled the stream")
87 | // re-using super
88 | super.onDownstreamFinish()
89 | }
90 | })
91 | }
92 | }
93 | object CustomNumbersSource {
94 | def apply()(implicit mat: Materializer): Source[Int, NotUsed] = {
95 | // start documentation //
96 | // the following is just some documentation for the api //
97 | // val sourceGraph: Graph[SourceShape[Int], NotUsed] = new CustomNumbersSource
98 | // val mySource: Source[Int, NotUsed] = Source.fromGraph(sourceGraph)
99 | // val result1: Future[Int] = mySource.take(10).runFold(0)(_ + _)
100 | // val result2: Future[Int] = mySource.take(100).runFold(0)(_ + _)
101 | // end documentation //
102 |
103 | // off course you would just use the next call //
104 | Source.fromGraph(new CustomNumbersSource)
105 | }
106 |
107 | def withTestProbe(within: FiniteDuration = 10.seconds)(f: TestSubscriber.Probe[Int] ⇒ Unit)(implicit system: ActorSystem, mat: Materializer): Unit = {
108 | val probe = apply().runWith(TestSink.probe[Int])
109 | f(probe.within(within)(probe))
110 | }
111 | }
112 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/customstage/Ex1IdentityStageTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.customstage
18 |
19 | import akka.stream.impl.fusing.GraphStages
20 | import akka.stream.stage._
21 | import akka.stream.testkit.scaladsl.TestSink
22 | import akka.stream.{ Attributes, FlowShape, Inlet, Outlet }
23 | import com.github.dnvriend.streams.TestSpec
24 |
25 | class Ex1IdentityStageTest extends TestSpec {
26 |
27 | /**
28 | * Custom transformation stages can be created when you need some kind of processing
29 | * logic inside a stage that is not part of the standard processing capabilities of akka-streams.
30 | *
31 | * For an overview of what comes out of the box have a look at:
32 | * http://doc.akka.io/docs/akka-stream-and-http-experimental/2.0.1/stages-overview.html
33 | *
34 | * Say we just want to learn how to use the PushPullStage, which is the most elementary transformation stage
35 | * available in akka-streams, and implement some custom logic, just to learn, we can extend the stage and
36 | * implement our logic inside the stage. Let's create an `identity` stage, that just forwards an element when
37 | * received to the next stage. Not a whole lot to it, let's take a look:
38 | */
39 |
40 | "CustomIdentityStage" should "be implemented with a PushPullStage" in {
41 |
42 | /**
43 | * A custom identity stage that takes elements of type `A`, and it extends the
44 | * PushPullStage, which has inputs and outputs, so it takes `A` as input, and outputs the same `A`
45 | */
46 | class CustomIdentityStage[A] extends PushPullStage[A, A] {
47 | /**
48 | * Forward the element when there is demand, conceptually:
49 | *
50 | * Source ~> CustomIdentityStage ~> Sink
51 | *
52 | * When the Sink generates demand, forward the element from the Source with no processing.
53 | */
54 | override def onPush(elem: A, ctx: Context[A]): SyncDirective =
55 | ctx.push(elem)
56 |
57 | /**
58 | * request more elements from upstream, conceptually:
59 | *
60 | * Source ~> CustomIdentityStage ~> Sink
61 | *
62 | * When the Sink generates demand, forward the demand to the Source so it will
63 | * emit a new Element of type `A`
64 | */
65 | override def onPull(ctx: Context[A]): SyncDirective =
66 | ctx.pull() // request for more elements from upstream (other stages before us)
67 | }
68 |
69 | /**
70 | * To use the custom transformation stage, call `transform()` on a `Flow` or `Source`
71 | * which takes a factory function returning a Stage: `f: () => Stage`
72 | *
73 | * In the example below we use a TestProbe as the Source that generates demand and
74 | * does assertions.
75 | */
76 | withIterator() { src ⇒
77 | src.take(2)
78 | .transform(() ⇒ new CustomIdentityStage)
79 | .runWith(TestSink.probe[Int])
80 | .request(Int.MaxValue)
81 | .expectNext(0, 1)
82 | .expectComplete()
83 | }
84 | }
85 |
86 | it should "also be implemented using the PushStage" in {
87 | /**
88 | * When the stage just propagates the pull upwards to the `previous` stage, it is not necessary to override
89 | * the onPull handler at all. Such transformations are better of by extending the `PushStage`. The conceptual
90 | * mapping will still be the same.
91 | *
92 | * The reason to use `PushStage` is not just cosmetic. Internal optimizations rely on the fact that the
93 | * `onPull` method only calls `ctx.pull()` and allow the environment do process elements faster than without
94 | * this knowledge. By extending `PushStage` the environment can be sure that `onPull()` was not overridden since
95 | * it is final on `PushStage`.
96 | */
97 |
98 | class CustomIdentityStage[A] extends PushStage[A, A] {
99 | override def onPush(elem: A, ctx: Context[A]): SyncDirective =
100 | ctx.push(elem)
101 | }
102 |
103 | /**
104 | * To use the custom transformation stage, call `transform()` on a `Flow` or `Source`
105 | * which takes a factory function returning a Stage: `f: () => Stage`
106 | *
107 | * In the example below we use a TestProbe as the Source that generates demand and
108 | * does assertions.
109 | */
110 | withIterator() { src ⇒
111 | src.transform(() ⇒ new CustomIdentityStage)
112 | .take(2)
113 | .runWith(TestSink.probe[Int])
114 | .request(Int.MaxValue)
115 | .expectNext(0, 1)
116 | .expectComplete()
117 | }
118 | }
119 |
120 | it should "also be implemented as a GraphStage" in {
121 | /**
122 | * The `GraphStage` abstraction can be used to create arbitrary graph processing stages with any number of input
123 | * or output ports. It is a counterpart of the GraphDSL.create() method which creates new stream processing stages
124 | * by composing others. Where GraphStage differs is that it creates a stage that is itself not divisible into
125 | * smaller ones, and allows state to be maintained inside it in a safe way.
126 | */
127 |
128 | class CustomIdentityStage[A] extends GraphStage[FlowShape[A, A]] {
129 | val in = Inlet[A]("Identity.in")
130 | val out = Outlet[A]("Identity.out")
131 |
132 | override def shape: FlowShape[A, A] = FlowShape.of(in, out)
133 |
134 | override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
135 | setHandler(in, new InHandler {
136 | override def onPush(): Unit =
137 | push(out, grab(in))
138 | })
139 |
140 | setHandler(out, new OutHandler {
141 | override def onPull(): Unit = pull(in)
142 | })
143 | }
144 | }
145 |
146 | withIterator() { src ⇒
147 | src.take(2)
148 | .via(new CustomIdentityStage)
149 | .runWith(TestSink.probe[Int])
150 | .request(Int.MaxValue)
151 | .expectNext(0, 1)
152 | .expectComplete()
153 | }
154 | }
155 |
156 | it should "already be implemented in the akka stream API" in {
157 | withIterator() { src ⇒
158 | src.take(2)
159 | .via(GraphStages.identity)
160 | .runWith(TestSink.probe[Int])
161 | .request(Int.MaxValue)
162 | .expectNext(0, 1)
163 | .expectComplete()
164 | }
165 | }
166 | }
167 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/customstage/Ex2CustomMapTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.customstage
18 |
19 | import akka.stream.{ Outlet, Inlet, Attributes, FlowShape }
20 | import akka.stream.stage._
21 | import akka.stream.testkit.scaladsl.TestSink
22 | import com.github.dnvriend.streams.TestSpec
23 |
24 | class Ex2CustomMapTest extends TestSpec {
25 |
26 | "CustomMapStage" should "be implemented with a PushPullStage" in {
27 |
28 | /**
29 | * A custom map stage that takes elements of type `A`, and converts the element to type 'B'.
30 | * The `CustomMapStage` does this by using the supplied function that converts `A` into `B`
31 | *
32 | * The PushPull stage has input and output ports. The `input` ports are the callback methods,
33 | * `onPush(elem,ctx)` and `onPull(ctx)`. The `output` ports are implemented as methods on
34 | * on the `Context` object that is supplied as a parameter on the event handler methods below.
35 | *
36 | * By calling exactly one "output port" method we wire up these four ports in various ways.
37 | * `Calling` an output port is called wireing, because the element that has been supplied by the akka-streams
38 | * runtime to the PushPullStage by calling the `onPush(in,ctx)` or the `onPull(ctx)` method is passed to exactly
39 | * one output port.
40 | *
41 | * The CustomMapStage calls `ctx.push()` from the `onPush()` event handler and it also calls
42 | * `ctx.pull()` from the `onPull` handler resulting in the conceptual wiring:
43 | *
44 | * +---------------------------------+
45 | * | onPush(in,ctx) ctx.push(out) |
46 | * O-------------> f(in) -->---------O
47 | * | |
48 | * O-------------<------<------------O
49 | * | ctx.pull() onPull(ctx) |
50 | * +---------------------------------+
51 | *
52 | * Map is a typical example of a one-to-one transformation of a stream, the element will be processed and
53 | * forwarded.
54 | *
55 | * Note:
56 | * A map is just a function from f: A => B, so we will extend the PushPullStage to create this map function
57 | */
58 | class CustomMapStage[A, B](f: A ⇒ B) extends PushPullStage[A, B] {
59 | override def onPush(elem: A, ctx: Context[B]): SyncDirective =
60 | ctx.push(f(elem)) // transform the element and pushes it downstream when there is demand for it
61 |
62 | override def onPull(ctx: Context[B]): SyncDirective =
63 | ctx.pull() // request for more elements from upstream (other stages before us)
64 | }
65 |
66 | /**
67 | * To use the custom transformation stage, call `transform()` on a `Flow` or `Source`
68 | * which takes a factory function returning a Stage: `f: () => Stage`
69 | *
70 | * In the example below we use a TestProbe as the Source that generates demand and
71 | * does assertions.
72 | */
73 | withIterator(1) { src ⇒
74 | src.transform(() ⇒ new CustomMapStage(_ * 2))
75 | .take(2)
76 | .runWith(TestSink.probe[Int])
77 | .request(Int.MaxValue)
78 | .expectNext(2, 4)
79 | .expectComplete()
80 | }
81 | }
82 |
83 | it should "also be implemented using the PushStage" in {
84 | /**
85 | * When the stage just propagates the pull upwards to the `previous` stage, it is not necessary to override
86 | * the onPull handler at all. Such transformations are better of by extending the `PushStage`. The conceptual
87 | * mapping will still be the same.
88 | */
89 |
90 | class CustomMapStage[A, B](f: A ⇒ B) extends PushStage[A, B] {
91 | override def onPush(elem: A, ctx: Context[B]): SyncDirective =
92 | ctx.push(f(elem)) // transform the element and pushes it downstream when there is demand for it
93 | }
94 |
95 | /**
96 | * To use the custom transformation stage, call `transform()` on a `Flow` or `Source`
97 | * which takes a factory function returning a Stage: `f: () => Stage`
98 | *
99 | * In the example below we use a TestProbe as the Source that generates demand and
100 | * does assertions.
101 | */
102 | /**
103 | * To use the custom transformation stage, call `transform()` on a `Flow` or `Source`
104 | * which takes a factory function returning a Stage: `f: () => Stage`
105 | *
106 | * In the example below we use a TestProbe as the Source that generates demand and
107 | * does assertions.
108 | */
109 | withIterator(1) { src ⇒
110 | src.transform(() ⇒ new CustomMapStage(_ * 2))
111 | .take(2)
112 | .runWith(TestSink.probe[Int])
113 | .request(Int.MaxValue)
114 | .expectNext(2, 4)
115 | .expectComplete()
116 | }
117 | }
118 |
119 | it should "also be implemented as a GraphStage" in {
120 | class CustomMapStage[A, B](f: A ⇒ B) extends GraphStage[FlowShape[A, B]] {
121 | val in = Inlet[A]("Map.in")
122 | val out = Outlet[B]("Map.out")
123 |
124 | override def shape: FlowShape[A, B] = FlowShape.of(in, out)
125 |
126 | override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
127 | setHandler(in, new InHandler {
128 | override def onPush(): Unit =
129 | push(out, f(grab(in)))
130 | })
131 |
132 | setHandler(out, new OutHandler {
133 | override def onPull(): Unit =
134 | pull(in)
135 | })
136 | }
137 | }
138 |
139 | withIterator(1) { src ⇒
140 | src.via(new CustomMapStage(_ * 2))
141 | .take(2)
142 | .runWith(TestSink.probe[Int])
143 | .request(Int.MaxValue)
144 | .expectNext(2, 4)
145 | .expectComplete()
146 | }
147 | }
148 | }
149 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/customstage/Ex3CustomFilterTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.customstage
18 |
19 | import akka.stream.stage._
20 | import akka.stream.testkit.scaladsl.TestSink
21 | import akka.stream.{ Attributes, FlowShape, Inlet, Outlet }
22 | import com.github.dnvriend.streams.TestSpec
23 |
24 | class Ex3CustomFilterTest extends TestSpec {
25 | "CustomFilterStage" should "be implemented with a PushPullStage" in {
26 |
27 | /**
28 | * A custom filter stage that, if the given predicate matches the current element, the element will be
29 | * forwarded/propagating downwards, otherwise we return the "ball" to our upstream so that we get a new element.
30 | *
31 | * This behavior is achieved by modifying the `CustomMapStage` from `Ex2CustomMapTest` example by adding a
32 | * conditional in the `onPush` handler and decide between a `ctx.pull()` or `ctx.push(elem)` call and results
33 | * in the following conceptual wiring:
34 | *
35 | * +---------------------------------+
36 | * | onPush(in,ctx) ctx.push(out) |
37 | * O----+----> if p(in) -->--------O
38 | * | | if !p(in) |
39 | * O--<-v--------<------<------------O
40 | * | ctx.pull() onPull(ctx) |
41 | * +---------------------------------+
42 | */
43 | class CustomFilterStage[A](p: A ⇒ Boolean) extends PushPullStage[A, A] {
44 | override def onPush(elem: A, ctx: Context[A]): SyncDirective =
45 | if (p(elem)) ctx.push(elem) else ctx.pull()
46 |
47 | override def onPull(ctx: Context[A]): SyncDirective =
48 | ctx.pull() // request for more elements from upstream (other stages before us)
49 | }
50 |
51 | /**
52 | * To use the custom transformation stage, call `transform()` on a `Flow` or `Source`
53 | * which takes a factory function returning a Stage: `f: () => Stage`
54 | *
55 | * In the example below we use a TestProbe as the Source that generates demand and
56 | * does assertions.
57 | */
58 | withIterator(1) { src ⇒
59 | src.transform(() ⇒ new CustomFilterStage(_ % 2 == 0))
60 | .take(5)
61 | .runWith(TestSink.probe[Int])
62 | .request(Int.MaxValue)
63 | .expectNext(2, 4, 6, 8, 10)
64 | .expectComplete()
65 | }
66 | }
67 |
68 | it should "also be implemented using the PushStage" in {
69 | /**
70 | * When the stage just propagates the pull upwards to the `previous` stage, it is not necessary to override
71 | * the onPull handler at all. Such transformations are better of by extending the `PushStage`. The conceptual
72 | * mapping will still be the same.
73 | */
74 |
75 | class CustomFilterStage[A](p: A ⇒ Boolean) extends PushStage[A, A] {
76 | override def onPush(elem: A, ctx: Context[A]): SyncDirective =
77 | if (p(elem)) ctx.push(elem) else ctx.pull()
78 | }
79 |
80 | /**
81 | * To use the custom transformation stage, call `transform()` on a `Flow` or `Source`
82 | * which takes a factory function returning a Stage: `f: () => Stage`
83 | *
84 | * In the example below we use a TestProbe as the Source that generates demand and
85 | * does assertions.
86 | */
87 | /**
88 | * To use the custom transformation stage, call `transform()` on a `Flow` or `Source`
89 | * which takes a factory function returning a Stage: `f: () => Stage`
90 | *
91 | * In the example below we use a TestProbe as the Source that generates demand and
92 | * does assertions.
93 | */
94 | withIterator(1) { src ⇒
95 | src.transform(() ⇒ new CustomFilterStage(_ % 2 == 0))
96 | .take(5)
97 | .runWith(TestSink.probe[Int])
98 | .request(Int.MaxValue)
99 | .expectNext(2, 4, 6, 8, 10)
100 | .expectComplete()
101 | }
102 | }
103 |
104 | it should "also be implemented as a GraphStage" in {
105 | class CustomFilterStage[A](p: A ⇒ Boolean) extends GraphStage[FlowShape[A, A]] {
106 | val in = Inlet[A]("Filter.in")
107 | val out = Outlet[A]("Filter.out")
108 |
109 | override def shape = FlowShape.of(in, out)
110 |
111 | override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
112 | setHandler(in, new InHandler {
113 | override def onPush(): Unit = {
114 | val elem: A = grab(in)
115 | if (p(elem)) push(out, elem) else pull(in)
116 | }
117 | })
118 |
119 | setHandler(out, new OutHandler {
120 | override def onPull(): Unit = pull(in)
121 | })
122 | }
123 | }
124 |
125 | withIterator(1) { src ⇒
126 | src.via(new CustomFilterStage(_ % 2 == 0))
127 | .take(5)
128 | .runWith(TestSink.probe[Int])
129 | .request(Int.MaxValue)
130 | .expectNext(2, 4, 6, 8, 10)
131 | .expectComplete()
132 | }
133 | }
134 | }
135 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/customstage/Ex4StatefulStageTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.customstage
18 |
19 | import akka.stream.stage._
20 | import akka.stream.testkit.scaladsl.TestSink
21 | import akka.stream.{ Attributes, FlowShape, Inlet, Outlet }
22 | import com.github.dnvriend.streams.TestSpec
23 |
24 | class Ex4StatefulStageTest extends TestSpec {
25 |
26 | /**
27 | * `StatefulStage` is a `PushPullStage` that provides convenience methods to make some things easier.
28 | * The behavior is defined in `StageState` instances.
29 | *
30 | * The `initial` behavior is specified by subclassing `StatefulStage` and implementing
31 | * the `initial` method.
32 | *
33 | * The behavior can be changed by using become. Use `emit` or `emitAndFinish` to push more than one element
34 | * from StageState.onPush or StageState.onPull.
35 | *
36 | * Use `terminationEmit` to push final elements from `onUpstreamFinish` or `onUpstreamFailure`.
37 | */
38 |
39 | "CustomDuplicatorStage" should "be implemented with a StatefulStage" in {
40 |
41 | /**
42 | * The custom duplicator stage does exactly what you think, it emits two elements
43 | * when one is received. Lets test it:
44 | */
45 | class CustomDuplicatorStage[A]() extends StatefulStage[A, A] {
46 | override def initial: StageState[A, A] = new StageState[A, A] {
47 | override def onPush(elem: A, ctx: Context[A]): SyncDirective =
48 | emit(List(elem, elem).iterator, ctx)
49 | }
50 | }
51 |
52 | /**
53 | * To use the custom transformation stage, call `transform()` on a `Flow` or `Source`
54 | * which takes a factory function returning a Stage: `f: () => Stage`
55 | *
56 | * In the example below we use a TestProbe as the Source that generates demand and
57 | * does assertions.
58 | */
59 | withIterator(1) { src ⇒
60 | src.take(2)
61 | .transform(() ⇒ new CustomDuplicatorStage)
62 | .runWith(TestSink.probe[Int])
63 | .request(Int.MaxValue)
64 | .expectNext(1, 1, 2, 2)
65 | .expectComplete()
66 | }
67 | }
68 |
69 | it should "be used for other stateful things also" in {
70 | class SumEvenAndUnevenNumbersCollector(p: Int ⇒ Boolean) extends StatefulStage[Int, (Int, Int)] {
71 | var sumEven: Int = 0
72 | var sumUnEven: Int = 0
73 |
74 | override def initial: StageState[Int, (Int, Int)] = new StageState[Int, (Int, Int)] {
75 | override def onPush(elem: Int, ctx: Context[(Int, Int)]): SyncDirective =
76 | if (p(elem)) {
77 | val tuple = (sumEven, sumUnEven)
78 | sumEven = 0
79 | sumUnEven = 0
80 | ctx.push(tuple)
81 | } else {
82 | if (elem % 2 != 0) {
83 | sumUnEven = sumUnEven + elem
84 | ctx.pull()
85 | } else {
86 | sumEven = sumEven + elem
87 | ctx.pull()
88 | }
89 | }
90 | }
91 | }
92 |
93 | /**
94 | * To use the custom transformation stage, call `transform()` on a `Flow` or `Source`
95 | * which takes a factory function returning a Stage: `f: () => Stage`
96 | *
97 | * In the example below we use a TestProbe as the Source that generates demand and
98 | * does assertions.
99 | */
100 | withIterator(1) { src ⇒
101 | src.take(20)
102 | .transform(() ⇒ new SumEvenAndUnevenNumbersCollector(_ % 10 == 0)) // emit every 10 elements
103 | .runWith(TestSink.probe[(Int, Int)])
104 | .request(Int.MaxValue)
105 | .expectNext((20, 25), (60, 75))
106 | .expectComplete()
107 | }
108 | }
109 |
110 | it should "be implemented as a GraphShape" in {
111 | // as the StatefulStage will be deprecated, let's look at how to handle state in a GraphShape
112 |
113 | class CustomDuplicatorStage[A]() extends GraphStage[FlowShape[A, A]] {
114 |
115 | val in = Inlet[A]("Duplicator.in")
116 | val out = Outlet[A]("Duplicator.out")
117 |
118 | override def shape: FlowShape[A, A] = FlowShape.of(in, out)
119 |
120 | override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
121 | // note: all mutable state must be inside the GraphStageLogic
122 | var lastElem: Option[A] = None
123 |
124 | setHandler(in, new InHandler {
125 | override def onPush(): Unit = {
126 | val elem = grab(in)
127 | lastElem = Some(elem)
128 | push(out, elem)
129 | }
130 |
131 | override def onUpstreamFinish(): Unit = {
132 | if (lastElem.isDefined) emit(out, lastElem.get)
133 | complete(out)
134 | }
135 | })
136 |
137 | setHandler(out, new OutHandler {
138 | override def onPull(): Unit = {
139 | if (lastElem.isDefined) {
140 | push(out, lastElem.get)
141 | lastElem = None
142 | } else {
143 | pull(in)
144 | }
145 | }
146 | })
147 | }
148 | }
149 |
150 | withIterator(1) { src ⇒
151 | src.take(2)
152 | .via(new CustomDuplicatorStage)
153 | .runWith(TestSink.probe[Int])
154 | .request(Int.MaxValue)
155 | .expectNext(1, 1, 2, 2)
156 | .expectComplete()
157 | }
158 | }
159 | }
160 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/failure/FailureTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.failure
18 |
19 | import akka.stream.testkit.scaladsl.TestSink
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | import scala.concurrent.Future
23 |
24 | class FailureTest extends TestSpec {
25 | def failedFuture: (Any) => Future[Nothing] = (_: Any) ⇒ Future.failed(new Throwable("Failure"))
26 |
27 | it should "fail a stream with a Future.failed" in {
28 | withIterator() { src ⇒
29 | src.take(5)
30 | .mapAsync(1)(failedFuture)
31 | .runWith(TestSink.probe[Seq[Int]])
32 | .request(Integer.MAX_VALUE)
33 | .expectError()
34 | }
35 | }
36 |
37 | it should "fail the resulting future with a Future.failed" in {
38 | withIterator() { src ⇒
39 | src.take(5).mapAsync(1)(failedFuture).runForeach(_ ⇒ ()).toTry should be a 'failure
40 | }
41 | }
42 |
43 | def throwException = (_: Any) ⇒ throw new RuntimeException("Failure")
44 |
45 | it should "fail a stream when throwing an Exception" in {
46 | withIterator() { src ⇒
47 | src.take(5)
48 | .map(throwException)
49 | .runWith(TestSink.probe[Seq[Int]])
50 | .request(Integer.MAX_VALUE)
51 | .expectError()
52 | }
53 | }
54 |
55 | it should "fail the resulting future when throwing an Exception" in {
56 | withIterator() { src ⇒
57 | src.take(5).mapAsync(1)(throwException).runForeach(_ ⇒ ()).toTry should be a 'failure
58 | }
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/flow/AkkaStreamsTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.flow
18 |
19 | import akka.{ Done, NotUsed }
20 | import akka.stream.scaladsl._
21 | import akka.stream.testkit.scaladsl._
22 | import com.github.dnvriend.streams.util.{ OutputCustomer, InputCustomer }
23 | import com.github.dnvriend.streams.TestSpec
24 |
25 | import scala.collection.immutable
26 | import scala.concurrent.Future
27 |
28 | class AkkaStreamsTest extends TestSpec {
29 | /**
30 | * The Source, it is a generator for 100 input customers with random first and random last name
31 | */
32 | lazy val inputCustomersSource: Source[InputCustomer, NotUsed] = Source((1 to 100).map(_ ⇒ InputCustomer.random()))
33 |
34 | /**
35 | * The flow, it is a transformer from InputCustomer to OutputCustomer
36 | */
37 | lazy val normalizeFlow: Flow[InputCustomer, OutputCustomer, NotUsed] = Flow[InputCustomer].mapConcat { (inputCustomer: InputCustomer) ⇒
38 | inputCustomer.name.split(" ").toList match {
39 | case firstName :: lastName :: Nil ⇒ immutable.Seq(OutputCustomer(firstName, lastName))
40 | case _ ⇒ immutable.Seq[OutputCustomer]()
41 | }
42 | }
43 |
44 | /**
45 | * The sink: it logs all OutputCustomers using the logger
46 | */
47 | lazy val writeCustomersSink = Sink.foreach[OutputCustomer] { (outputCustomer: OutputCustomer) ⇒
48 | log.info("Customer: {}", outputCustomer)
49 | }
50 |
51 | "The Akka Stream Chain" should "execute normally" in {
52 | val chain: Future[Done] = inputCustomersSource.via(normalizeFlow).runWith(writeCustomersSink)
53 | chain.toTry should be a 'success
54 | }
55 |
56 | it should "process 100 customers" in {
57 | var counter = 0
58 | val counterSink = Sink.foreach[OutputCustomer] { _ ⇒
59 | counter += 1
60 | }
61 | inputCustomersSource.via(normalizeFlow).runWith(counterSink).toTry should be a 'success
62 | counter shouldBe 100
63 | }
64 |
65 | it should "transform a customer" in {
66 | inputCustomersSource
67 | .via(normalizeFlow)
68 | .runWith(TestSink.probe[OutputCustomer])
69 | .request(1)
70 | .expectNext() match {
71 | case OutputCustomer(_, _) ⇒
72 | case u ⇒ fail("Unexpected: " + u)
73 | }
74 | }
75 |
76 | // Testing Streams
77 | // see: http://doc.akka.io/docs/akka-stream-and-http-experimental/1.0-RC1/scala/stream-testkit.html
78 | "Probe Sink" should "be testable" in {
79 | // Using probe as a Sink allows manual control over demand and assertions over elements coming downstream.
80 | // Streams testkit provides a sink that materializes to a TestSubscriber.Probe.
81 | Source(1 to 4)
82 | .filter(_ % 2 == 0)
83 | .map(_ * 2)
84 | .runWith(TestSink.probe[Int])
85 | .request(2)
86 | .expectNext(4, 8)
87 | .expectComplete()
88 | }
89 |
90 | "Probe Source" should "be testable" in {
91 | // A source that materializes to TestPublisher.Probe can be used for asserting demand or controlling when stream
92 | // is completed or ended with an error.
93 | TestSource.probe[Int]
94 | .toMat(Sink.cancelled)(Keep.left)
95 | .run()
96 | .expectCancellation()
97 | }
98 |
99 | "Source" should "be created from Range" in {
100 | Source(1 to 2)
101 | .map(identity)
102 | .runWith(TestSink.probe[Int])
103 | .request(2)
104 | .expectNext(1, 2)
105 | .expectComplete()
106 | }
107 |
108 | it should "be created from a List" in {
109 | Source(List(1, 2))
110 | .runWith(TestSink.probe[Int])
111 | .request(2)
112 | .expectNext(1, 2)
113 | .expectComplete()
114 | }
115 |
116 | it should "be created from a Vector" in {
117 | Source(Vector(1, 2))
118 | .runWith(TestSink.probe[Int])
119 | .request(2)
120 | .expectNext(1, 2)
121 | .expectComplete()
122 | }
123 | }
124 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/flow/FlowErrorTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.flow
18 |
19 | import akka.stream.ActorAttributes.supervisionStrategy
20 | import akka.stream.Supervision.resumingDecider
21 | import akka.stream.scaladsl._
22 | import akka.stream.testkit.scaladsl._
23 | import com.github.dnvriend.streams.TestSpec
24 |
25 | import scala.concurrent.Future
26 | import scala.util.{ Failure, Success, Try }
27 |
28 | class FlowErrorTest extends TestSpec {
29 |
30 | "Error stream" should "" in {
31 | }
32 |
33 | // it should "stop the stream" in {
34 | // Source(Future[String](throw new RuntimeException("Test")))
35 | // .withAttributes(supervisionStrategy(resumingDecider))
36 | // .map { x => println(x); x }
37 | // .runWith(TestSink.probe[String])
38 | // .request(1)
39 | // .expectError()
40 | // }
41 |
42 | it should "resume with no result for the failed future" in {
43 | val t = new RuntimeException("Test")
44 | Source(List(1, 2, 3))
45 | .log("before")
46 | .mapAsync(3) { x ⇒
47 | Future {
48 | if (x == 2) throw t else x
49 | }
50 | }
51 | .withAttributes(supervisionStrategy(resumingDecider))
52 | .log("after")
53 | .runWith(TestSink.probe[Int])
54 | .request(4)
55 | /* it will drop the failed future so no marble there
56 | (1) (2) (3)
57 | [ mapAync ]
58 | (1) (3)
59 | */
60 | .expectNext(1)
61 | .expectNext(3)
62 | .expectComplete()
63 | }
64 |
65 | it should "resume and return results for all values" in {
66 | val t = new RuntimeException("Test")
67 | Source(List(1, 2, 3))
68 | .log("before")
69 | .mapAsync(1) { x ⇒
70 | Future {
71 | if (x == 2) throw t else Try(x)
72 | }.recover {
73 | case t: Throwable ⇒
74 | Failure(t)
75 | }
76 | }
77 | // .withAttributes(supervisionStrategy(resumingDecider))
78 | .log("after")
79 | .runWith(TestSink.probe[Try[Int]])
80 | .request(4)
81 | /* The future will return a Future[Try[T]], which can be recovered
82 | so all marbles are there
83 | (1) (2) (3)
84 | [ mapAync ]
85 | (S(1)) (F(t)) (S(3))
86 | */
87 | .expectNext(Success(1))
88 | .expectNext(Failure(t))
89 | .expectNext(Success(3))
90 | .expectComplete()
91 | }
92 | }
93 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/flow/OverflowStrategyTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.flow
18 |
19 | import akka.stream.OverflowStrategy
20 | import akka.stream.scaladsl.Source
21 | import com.github.dnvriend.streams.TestSpec
22 |
23 | import scala.concurrent.Future
24 |
25 | class OverflowStrategyTest extends TestSpec {
26 |
27 | val toTenSrc = Source(1 to 10)
28 |
29 | def overFlowStrategy(overflowStrategy: OverflowStrategy, name: String, buffer: Int = 1): Future[Int] =
30 | toTenSrc.log(name).buffer(buffer, overflowStrategy).log("after_buffer").runFold(0)((c, _) ⇒ c + 1)
31 |
32 | /**
33 | * OverflowStrategy.backpressure:
34 | *
35 | * If the buffer is full when a new element is available
36 | * this strategy backpressures the upstream publisher until
37 | * space becomes available in the buffer.
38 | *
39 | * Note: No elements will be dropped
40 | */
41 | "OverflowStrategyTest" should "OverflowStrategy.backpressure" in {
42 | overFlowStrategy(OverflowStrategy.backpressure, "backpressure").futureValue shouldBe 10
43 | }
44 |
45 | /**
46 | * OverflowStrategy.dropHead:
47 | *
48 | * If the buffer is full when a new element arrives,
49 | * drops the oldest element from the buffer to make space for
50 | * the new element.
51 | *
52 | * Note: Some elements could be dropped
53 | */
54 | it should "OverflowStrategy.dropHead" in {
55 | overFlowStrategy(OverflowStrategy.dropHead, "dropHead").futureValue should be <= 10
56 | }
57 |
58 | /**
59 | * OverflowStrategy.dropTail:
60 | *
61 | * If the buffer is full when a new element arrives,
62 | * drops the youngest element from the buffer to make space for
63 | * the new element.
64 | *
65 | * Note: Some elements could be dropped
66 | */
67 | it should "OverflowStrategy.dropTail" in {
68 | overFlowStrategy(OverflowStrategy.dropTail, "dropTail").futureValue should be <= 10
69 | }
70 |
71 | /**
72 | * OverflowStrategy.dropBuffer:
73 | *
74 | * If the buffer is full when a new element arrives,
75 | * drops all the buffered elements to make space for the new element.
76 | */
77 | it should "OverflowStrategy.dropBuffer" in {
78 | overFlowStrategy(OverflowStrategy.dropBuffer, "dropBuffer").futureValue should be <= 10
79 | }
80 |
81 | /**
82 | * OverflowStrategy.fail:
83 | *
84 | * If the buffer is full when a new element is available
85 | * this strategy completes the stream with failure.
86 | */
87 | it should "OverflowStrategy.fail" in {
88 | intercept[RuntimeException] {
89 | overFlowStrategy(OverflowStrategy.fail, "fail", buffer = 0).futureValue
90 | }
91 | }
92 | }
93 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/flow/RunnableFlowTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.flow
18 |
19 | import akka.stream.scaladsl._
20 | import akka.{ Done, NotUsed }
21 | import com.github.dnvriend.streams.TestSpec
22 |
23 | import scala.concurrent.Future
24 |
25 | class RunnableFlowTest extends TestSpec {
26 | /**
27 | *
28 | * It is possible to attach a Flow to a Source resulting in a composite source,
29 | * and it is also possible to prepend a Flow to a Sink to get a new sink.
30 | *
31 | * After a stream is properly terminated by having both a source and a sink, it will be
32 | * represented by the RunnableFlow type, indicating that it is ready to be executed.
33 | *
34 | * It is important to remember that even after constructing the RunnableFlow by connecting
35 | * all the source, sink and different processing stages, no data will flow through it until
36 | * it is 'materialized'.
37 | *
38 | * Materialization is the process of allocating all resources needed to run the computation
39 | * described by a Flow (in Akka Streams this will often involve starting up Actors).
40 | *
41 | * Thanks to Flows being simply a description of the processing pipeline they are immutable, thread-safe,
42 | * and freely shareable, which means that it is for example safe to share and send them between actors,
43 | * to have one actor prepare the work, and then have it be materialized at some completely different place in the code.
44 | */
45 |
46 | "RunnableFlow" should "be defined" in {
47 | val source: Source[Int, NotUsed] = Source(1 to 10)
48 | val sink: Sink[Int, Future[Int]] = Sink.fold[Int, Int](0)(_ + _)
49 |
50 | // connect the Source to the Sink, obtaining a RunnableFlow, which is
51 | // a Model of the processing pipeline
52 | val runnable: RunnableGraph[Future[Int]] = source.toMat(sink)(Keep.right)
53 |
54 | // materialize the flow (convert the RunnableFlow model to a runtime representation
55 | // using the ActorFlowMaterializer which creates a network of actors that will give the
56 | // behavior defined by the model) and get the value of the FoldSink
57 | val sum: Future[Int] = runnable.run()
58 |
59 | // create a new processing pipeline, run the network with the result from the future
60 | Source.fromFuture(sum).map(_ * 2).runWith(Sink.foreach(println))
61 |
62 | sum.futureValue shouldBe 55
63 | }
64 |
65 | /**
66 | * After running (materializing) the RunnableFlow[T] we get back the materialized value of type T.
67 | *
68 | * Every stream processing stage can produce a materialized value, and it is the responsibility of
69 | * the user to combine them to a new type.
70 | *
71 | * In the above example we used 'toMat' to indicate that we want to 'transform the materialized
72 | * value of the source and sink', and we used the convenience function Keep.right to say that we are
73 | * only interested in the materialized value of the sink.
74 | *
75 | * In our example the FoldSink materializes a value of type Future which will represent the result
76 | * of the folding process over the stream.
77 | *
78 | * In general, a stream can expose multiple materialized values, but it is quite common to be interested
79 | * in only the value of the Source or the Sink in the stream.
80 | *
81 | * For this reason there is a convenience method called runWith() available for Sink, Source or Flow requiring,
82 | * respectively, a supplied Source (in order to run a Sink), a Sink (in order to run a Source) or both a Source
83 | * and a Sink (in order to run a Flow, since it has neither attached yet).
84 | */
85 |
86 | /**
87 | * Defining sources, sinks and flows
88 | *
89 | * The objects Source and Sink define various ways to create sources and sinks of elements.
90 | *
91 | * The following examples show some of the most useful constructs (refer to the API documentation for more details):
92 | */
93 |
94 | "Sources" should "be created" in {
95 | // Create a source from an Iterable
96 | val s1: Source[Int, NotUsed] = Source(List(1, 2, 3))
97 |
98 | // Create a source from a Future
99 | val s2: Source[String, NotUsed] = Source.fromFuture(Future.successful("Hello Streams!"))
100 |
101 | // Create a source from a single element
102 | val s3: Source[String, NotUsed] = Source.single("only one element")
103 |
104 | // an empty source
105 | val s4: Source[String, NotUsed] = Source.empty[String]
106 | }
107 |
108 | "Sinks" should "be created" in {
109 | // Sink that folds over the stream and returns a Future
110 | // of the final result as its materialized value
111 | val s1: Sink[Int, Future[Int]] = Sink.fold[Int, Int](0)(_ + _)
112 |
113 | // Sink that returns a Future as its materialized value,
114 | // containing the first element of the stream
115 | val s2: Sink[Int, Future[Int]] = Sink.head[Int]
116 |
117 | // A Sink that consumes a stream without doing anything with the elements
118 | val s3: Sink[Any, Future[Done]] = Sink.ignore
119 |
120 | // A Sink that executes a side-effecting call for every element of the stream
121 | val s4: Sink[String, Future[Done]] = Sink.foreach[String](println(_))
122 | }
123 |
124 | /**
125 | * There are various ways to wire up different parts of a stream, the following examples
126 | * show some of the available options:
127 | */
128 |
129 | "Streams" should "be wired up from different parts" in {
130 | // Explicitly creating and wiring up a Source, Sink and Flow
131 | // the Sink is of type Sink[Int, Future[Unit]]
132 | val runnable: RunnableGraph[NotUsed] =
133 | Source(1 to 6)
134 | .via(
135 | Flow[Int].map(_ * 2)
136 | )
137 | .to(
138 | Sink.foreach(println(_))
139 | )
140 |
141 | // Starting from a Source
142 | val source = Source(1 to 6).map(_ * 2)
143 | val runnable2: RunnableGraph[NotUsed] =
144 | source
145 | .to(Sink.foreach(println(_)))
146 |
147 | // Starting from a Sink
148 | val sink: Sink[Int, NotUsed] = Flow[Int].map(_ * 2).to(Sink.foreach(println(_)))
149 | val runnable3: RunnableGraph[NotUsed] =
150 | Source(1 to 6)
151 | .to(sink)
152 | }
153 |
154 | }
155 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/flow/SimpleFlowTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.flow
18 |
19 | import akka.stream.{ OverflowStrategy, SourceShape }
20 | import akka.stream.scaladsl.{ Concat, Framing, GraphDSL, Merge, RunnableGraph, Sink, Source }
21 | import akka.util.ByteString
22 | import com.github.dnvriend.streams.{ Person, TestSpec }
23 | import com.github.dnvriend.streams.flow.SimpleFlowTest.StarWars
24 | import play.api.libs.json.Json
25 |
26 | import scala.collection.immutable.{ Iterable, Seq }
27 | import scala.concurrent.Future
28 |
29 | object SimpleFlowTest {
30 |
31 | final case class StarWars(first: String, last: String)
32 |
33 | }
34 |
35 | class SimpleFlowTest extends TestSpec {
36 |
37 | it should "mapAsync with odd number of parallelism" in {
38 | Source(1 to 3).mapAsync(5)(i ⇒ Future(i * 2))
39 | .runWith(Sink.seq).futureValue shouldBe Seq(2, 4, 6)
40 | }
41 |
42 | it should "zip with an index" in {
43 | Source(Seq("a", "b")).statefulMapConcat { () ⇒
44 | var index = 0L
45 | def next: Long = {
46 | index += 1
47 | index
48 | }
49 | (string) ⇒ Iterable((string, next))
50 | }.take(10).runWith(Sink.seq).futureValue shouldBe Seq(("a", 1), ("b", 2))
51 |
52 | Source(List("a", "b", "c"))
53 | .zip(Source.fromIterator(() ⇒ Iterator from 1))
54 | .runWith(Sink.seq).futureValue shouldBe Seq(("a", 1), ("b", 2), ("c", 3))
55 | }
56 |
57 | it should "emit only odd numbers" in {
58 | Source.fromIterator(() ⇒ Iterator from 0).statefulMapConcat { () ⇒
59 | var index = 1L
60 | def next: Long = {
61 | index += 1L
62 | if (index % 2 != 0) index else {
63 | next
64 | }
65 | }
66 | (string) ⇒ Iterable((string, next))
67 | }.take(10).runForeach(println)
68 | }
69 |
70 | it should "create tuples" in {
71 | Source(List(List("a", "b"), List("c", "d")))
72 | .flatMapConcat { xs ⇒
73 | Source(xs).take(1).zip(Source(xs).drop(1))
74 | }.runWith(Sink.seq).futureValue shouldBe Seq(("a", "b"), ("c", "d"))
75 | }
76 |
77 | it should "parse some csv from the classpath" in withByteStringSource("csv/starwars.csv") { src ⇒
78 | src.via(Framing.delimiter(ByteString("\n"), Integer.MAX_VALUE))
79 | .map(_.utf8String)
80 | .drop(1)
81 | .map(_.split(",").toList)
82 | .flatMapConcat { xs ⇒
83 | Source(xs).take(1).zip(Source(xs).drop(1))
84 | }.map(StarWars.tupled)
85 | .runWith(Sink.seq).futureValue shouldBe Seq(
86 | StarWars("darth", "vader"),
87 | StarWars("leia", "organa"),
88 | StarWars("luke", "skywalker"),
89 | StarWars("han", "solo"),
90 | StarWars("boba", "fett"),
91 | StarWars("obi-wan", "kenobi"),
92 | StarWars("darth", "maul"),
93 | StarWars("darth", "sidious"),
94 | StarWars("padme", "amidala"),
95 | StarWars("lando", "calrissian"),
96 | StarWars("mace", "windu")
97 | )
98 | }
99 |
100 | it should "concat" in {
101 | Source(List(1, 2)).concat(Source(List(3, 4)))
102 | .runWith(Sink.seq).futureValue shouldBe Seq(1, 2, 3, 4)
103 | }
104 |
105 | it should "merge" in {
106 | Source.fromGraph(GraphDSL.create() { implicit b ⇒
107 | import GraphDSL.Implicits._
108 | val merge = b.add(Concat[Int](2))
109 | Source.single(1) ~> merge
110 | Source.repeat(5) ~> merge
111 | SourceShape(merge.out)
112 | }).take(4).runWith(Sink.seq).futureValue shouldBe Seq(1, 5, 5, 5)
113 |
114 | Source.single(1).concat(Source.repeat(5))
115 | .take(4).runWith(Sink.seq).futureValue shouldBe Seq(1, 5, 5, 5)
116 | }
117 |
118 | it should "unfold" in {
119 | import scala.concurrent.duration._
120 | Source.tick(0.seconds, 500.millis, 0).flatMapConcat { _ ⇒
121 | Source.unfold(0) { (e) ⇒
122 | val next = e + 1
123 | if (next > 3) None else Some((next, next))
124 | }
125 | }.take(6).runWith(Sink.seq).futureValue shouldBe Seq(1, 2, 3, 1, 2, 3)
126 | }
127 | }
128 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/graph/FlowTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.graph
18 |
19 | import akka.stream.ClosedShape
20 | import akka.stream.scaladsl._
21 | import akka.{ Done, NotUsed }
22 | import com.github.dnvriend.streams.TestSpec
23 |
24 | import scala.concurrent.Future
25 |
26 | class FlowTest extends TestSpec {
27 |
28 | /**
29 | * / -- f2 --\
30 | * (in) --- (bcast) (merge) -- f3 -- (out)
31 | * | -- f4 -- /
32 | */
33 |
34 | val ignoreSink: Sink[Int, Future[Done]] = Sink.ignore
35 | val resultSink: Sink[Int, Future[Int]] = Sink.head[Int]
36 | val foldSink: Sink[AnyRef, Future[Long]] = Sink.fold(0L) { (c, _) ⇒ c + 1 }
37 | val in: Source[Int, NotUsed] = Source(1 to 1)
38 |
39 | "SimpleFlow" should "receive single scalar number" in {
40 | val g = RunnableGraph.fromGraph(
41 | GraphDSL.create(resultSink) { implicit builder: GraphDSL.Builder[Future[Int]] ⇒ out ⇒
42 | import GraphDSL.Implicits._
43 | val bcast = builder.add(Broadcast[Int](2))
44 | val merge = builder.add(Merge[Int](2))
45 |
46 | val f1 = Flow[Int].map(_ + 10).log("f1")
47 | val f2 = Flow[Int].map(_ + 20).log("f2")
48 | val f3 = Flow[Int].map(_ + 30).log("f3")
49 | val f4 = Flow[Int].map(_ + 40).log("f4")
50 |
51 | in ~> f1 ~> bcast ~> f2 ~> merge ~> f3 ~> out
52 | bcast ~> f4 ~> merge
53 | ClosedShape
54 | }
55 | )
56 | g.run().futureValue shouldBe 61
57 | }
58 |
59 | "Graphs" should "return materialized values when a component has been added to it" in {
60 | val xx: RunnableGraph[NotUsed] = RunnableGraph.fromGraph(GraphDSL.create() { implicit b ⇒
61 | import GraphDSL.Implicits._
62 | val src = Source(1 to 5)
63 | val snk = Sink.ignore
64 | src ~> snk
65 | ClosedShape
66 | })
67 |
68 | val snk = Sink.ignore
69 | val xy: RunnableGraph[Future[Done]] = RunnableGraph.fromGraph(GraphDSL.create(snk) { implicit b ⇒ (snk) ⇒
70 | import GraphDSL.Implicits._
71 | val src = Source(1 to 5)
72 | src ~> snk
73 | ClosedShape
74 | })
75 |
76 | val snk2 = Sink.ignore
77 | val xz: RunnableGraph[(Future[Done], Future[Done])] = RunnableGraph.fromGraph(GraphDSL.create(snk, snk2)(Keep.both) { implicit b ⇒ (s, s2) ⇒
78 | import GraphDSL.Implicits._
79 | val src = Source(1 to 5)
80 | val bcast = b.add(Broadcast[Int](2, false))
81 | src ~> bcast
82 | bcast ~> s
83 | bcast ~> s2
84 | ClosedShape
85 | })
86 |
87 | val snk3 = Sink.ignore
88 | val zz: RunnableGraph[(Future[Done], Future[Done], Future[Done])] = RunnableGraph.fromGraph(GraphDSL.create(snk, snk2, snk3)((_, _, _)) { implicit b ⇒ (s, s2, s3) ⇒
89 | import GraphDSL.Implicits._
90 | val src = Source(1 to 5)
91 | val bcast = b.add(Broadcast[Int](3, false))
92 | src ~> bcast
93 | bcast ~> s
94 | bcast ~> s2
95 | bcast ~> s3
96 | ClosedShape
97 | })
98 |
99 | // and so on, and so forth...
100 | }
101 | }
102 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/http/StreamingClient.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.http
18 |
19 | import akka.actor.ActorSystem
20 | import akka.http.scaladsl.Http
21 | import akka.http.scaladsl.model.HttpMethods._
22 | import akka.http.scaladsl.model.{ HttpRequest, HttpResponse }
23 | import akka.stream.Materializer
24 | import akka.stream.scaladsl._
25 |
26 | import scala.concurrent.Future
27 |
28 | object StreamingClient {
29 | def doGet(host: String, port: Int, path: String)(implicit system: ActorSystem, mat: Materializer): Future[HttpResponse] = {
30 | val conn: Flow[HttpRequest, HttpResponse, Future[Http.OutgoingConnection]] = Http().outgoingConnection(host, port)
31 | val request = HttpRequest(GET, uri = path)
32 | Source.single(request).via(conn).runWith(Sink.head[HttpResponse])
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/http/StreamingClientTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.http
18 |
19 | import com.github.dnvriend.streams.TestSpec
20 |
21 | // see: https://github.com/abrighton/akka-http-test/blob/master/src/main/scala/akkahttp/test/TestClient.scala
22 | class StreamingClientTest extends TestSpec {
23 |
24 | "StreamingClient" should "Get from google" in {
25 | val resp = StreamingClient.doGet("www.google.com", 80, "/").futureValue
26 | println(resp)
27 | }
28 |
29 | }
30 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/io/FileIOTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.io
18 |
19 | import akka.stream.scaladsl.{ Sink, Source }
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | class FileIOTest extends TestSpec {
23 | trait Foo
24 | case class ImportStarted(fileName: String, processId: String) extends Foo
25 | case class ImportFinished(a: String = "") extends Foo
26 | case class ImportFailed(t: Throwable) extends Foo
27 | case class NestedType2(a: String = "") extends Foo
28 | case class NestedType1(b: String = "") extends Foo
29 | case class RootType(c: String = "") extends Foo
30 |
31 | case class ImportFileCommand(processId: String = "abcdefg", fileName: String = "fileName.xml")
32 | it should "import" in {
33 | // import proces
34 |
35 | def unmarshaller(fileName: String, processId: String) =
36 | Source(List(ImportStarted(fileName, processId), NestedType2(), NestedType1(), RootType(), ImportFinished()))
37 |
38 | Source(List.fill(1)(ImportFileCommand()))
39 | .flatMapConcat { cmd ⇒
40 | unmarshaller(cmd.fileName, cmd.processId)
41 | .map {
42 | // case _: NestedType2 ⇒ throw new RuntimeException("error")
43 | case e ⇒ e
44 | }
45 | }
46 | .recover {
47 | case t: Throwable ⇒ ImportFailed(t)
48 | }
49 | .runWith(Sink.seq).futureValue should matchPattern {
50 | case Seq(ImportStarted("fileName.xml", "abcdefg"), NestedType2(_), ImportFailed(_)) ⇒
51 | case Seq(ImportStarted("fileName.xml", "abcdefg"), NestedType2(_), NestedType1(_), RootType(_), ImportFinished(_)) ⇒
52 | }
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/nesting/FlatteningStagesTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.nesting
18 |
19 | import akka.stream.scaladsl.Source
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | import scala.concurrent.Future
23 |
24 | class FlatteningStagesTest extends TestSpec {
25 |
26 | it should "flatten and concat all sub-streams and output the result" in withIterator(1) { src ⇒
27 | src.take(3).flatMapConcat { i ⇒
28 | Source.fromFuture(Future(i)).map(_ + 1)
29 | }.testProbe { tp ⇒
30 | tp.request(Long.MaxValue)
31 | tp.expectNext(2, 3, 4)
32 | tp.expectComplete()
33 | }
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/sink/ActorRefWithAckTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.sink
18 |
19 | import akka.actor.{ Actor, ActorRef, Props }
20 | import akka.stream.scaladsl.{ Sink, Source }
21 | import akka.stream.testkit.TestPublisher
22 | import akka.stream.testkit.scaladsl.TestSource
23 | import akka.testkit.TestProbe
24 | import com.github.dnvriend.streams.TestSpec
25 | import scala.concurrent.duration._
26 |
27 | import scala.reflect.ClassTag
28 |
29 | // see: https://github.com/akka/akka/blob/4acc1cca6a27be0ff80f801de3640f91343dce94/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefBackpressureSinkSpec.scala
30 | object ActorRefWithAckTest {
31 | final val InitMessage = "start"
32 | final val CompleteMessage = "done"
33 | final val AckMessage = "ack"
34 |
35 | class Forwarder(ref: ActorRef) extends Actor {
36 | def receive = {
37 | case msg @ `InitMessage` ⇒
38 | sender() ! AckMessage
39 | ref forward msg
40 | case msg @ `CompleteMessage` ⇒
41 | ref forward msg
42 | case msg ⇒
43 | sender() ! AckMessage
44 | ref forward msg
45 | }
46 | }
47 | }
48 |
49 | class ActorRefWithAckTest extends TestSpec {
50 | import ActorRefWithAckTest._
51 | def createActor[A: ClassTag](testProbeRef: ActorRef): ActorRef =
52 | system.actorOf(Props(implicitly[ClassTag[A]].runtimeClass, testProbeRef))
53 |
54 | def withForwarder(xs: Int*)(f: TestProbe ⇒ Unit): Unit = {
55 | val tp = TestProbe()
56 | val ref = createActor[Forwarder](tp.ref)
57 | Source(xs.toList).runWith(Sink.actorRefWithAck(ref, InitMessage, AckMessage, CompleteMessage))
58 | try f(tp) finally killActors(ref)
59 | }
60 |
61 | def withTestPublisher[A](f: (TestPublisher.Probe[A], TestProbe, ActorRef) ⇒ Unit): Unit = {
62 | val tp = TestProbe()
63 | val ref = createActor[Forwarder](tp.ref)
64 | val pub: TestPublisher.Probe[A] = TestSource.probe[A].to(Sink.actorRefWithAck(ref, InitMessage, AckMessage, CompleteMessage)).run()
65 | try f(pub, tp, ref) finally killActors(ref)
66 | }
67 |
68 | it should "send the elements to the ActorRef" in {
69 | // which means that the forwarder actor that acts as a sink
70 | // will initially receive an InitMessage
71 | // next it will receive each `payload` element, here 1, 2 and 3,
72 | // finally the forwarder will receive the CompletedMessage, stating that
73 | // the producer completes the stream because there are no more elements (a finite stream)
74 | withForwarder(1, 2, 3) { tp ⇒
75 | tp.expectMsg(InitMessage)
76 | tp.expectMsg(1)
77 | tp.expectMsg(2)
78 | tp.expectMsg(3)
79 | tp.expectMsg(CompleteMessage)
80 | tp.expectNoMsg(100.millis)
81 | }
82 | }
83 |
84 | it should "send the elements to the ActorRef manually 1, 2 and 3" in {
85 | withTestPublisher[Int] { (pub, tp, _) ⇒
86 | pub.sendNext(1)
87 | tp.expectMsg(InitMessage)
88 | tp.expectMsg(1)
89 |
90 | pub.sendNext(2)
91 | tp.expectMsg(2)
92 |
93 | pub.sendNext(3)
94 | tp.expectMsg(3)
95 |
96 | pub.sendComplete()
97 | tp.expectMsg(CompleteMessage)
98 | tp.expectNoMsg(100.millis)
99 | }
100 | }
101 |
102 | it should "cancel stream when actor terminates" in {
103 | withTestPublisher[Int] { (pub, tp, ref) ⇒
104 | pub.sendNext(1)
105 | tp.expectMsg(InitMessage)
106 | tp.expectMsg(1)
107 | killActors(ref)
108 | pub.expectCancellation()
109 | }
110 | }
111 | }
112 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/sink/ActorSubscriberTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.sink
18 |
19 | import akka.Done
20 | import akka.actor.Actor.Receive
21 | import akka.actor.{ ActorRef, Props }
22 | import akka.event.LoggingReceive
23 | import akka.stream.actor.ActorSubscriberMessage.{ OnComplete, OnError, OnNext }
24 | import akka.stream.actor.{ ActorSubscriber, OneByOneRequestStrategy, RequestStrategy }
25 | import akka.stream.scaladsl.{ Sink, Source }
26 | import akka.stream.testkit.TestPublisher
27 | import akka.stream.testkit.scaladsl.TestSource
28 | import akka.testkit.TestProbe
29 | import com.github.dnvriend.streams.TestSpec
30 | import com.github.dnvriend.streams.sink.ActorSubscriberTest.TestActorSubscriber
31 |
32 | import scala.concurrent.Future
33 | import scala.reflect.ClassTag
34 |
35 | object ActorSubscriberTest {
36 | final val OnNextMessage = "onNext"
37 | final val OnCompleteMessage = "onComplete"
38 | final val OnErrorMessage = "onError"
39 |
40 | class TestActorSubscriber(ref: ActorRef) extends ActorSubscriber {
41 | override protected val requestStrategy: RequestStrategy = OneByOneRequestStrategy
42 | override def receive: Receive = LoggingReceive {
43 | case OnNext(msg) ⇒ ref ! OnNextMessage
44 | case OnComplete ⇒ ref ! OnCompleteMessage
45 | case OnError(cause) ⇒ ref ! OnErrorMessage
46 | }
47 | }
48 | }
49 |
50 | //class ActorSubscriberTest extends TestSpec {
51 | // def withForwarder(xs: Int*)(f: TestProbe ⇒ Unit): Unit = {
52 | // val tp = TestProbe()
53 | // val ref = new TestActorSubscriber(tp.ref)
54 | // Source(xs.toList).to(Sink.actorSubscriber(Props())).mapMaterializedValue(_ ⇒ Future.successful[Done]).run()
55 | // try f(tp) finally killActors(ref)
56 | // }
57 | //
58 | //}
59 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/source/FailedSource.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.source
18 |
19 | import akka.Done
20 | import akka.stream.OverflowStrategy
21 | import akka.stream.scaladsl.{ Keep, Merge, Sink, Source, SourceQueueWithComplete }
22 | import com.github.dnvriend.streams.TestSpec
23 |
24 | import scala.concurrent.Future
25 | import scala.concurrent.duration._
26 | import scala.collection.immutable._
27 |
28 | class FailedSource extends TestSpec {
29 | it should "fail the stream" in {
30 | Source.failed[Int](new RuntimeException("test error")).testProbe { tp ⇒
31 | tp.request(Long.MaxValue)
32 | tp.expectError()
33 | }
34 | }
35 |
36 | it should "complete a stream" in {
37 | val (queue: SourceQueueWithComplete[Int], done: Future[Done]) = Source.queue[Int](1, OverflowStrategy.dropNew)
38 | .toMat(Sink.ignore)(Keep.both).run
39 | queue.complete()
40 | done.toTry should be a 'success
41 | }
42 |
43 | it should "complete a stream normally" in {
44 | val (queue: SourceQueueWithComplete[String], done: Future[Done]) = Source.queue[String](1, OverflowStrategy.dropNew).flatMapConcat {
45 | case "stop" ⇒ Source.failed(new RuntimeException("test error"))
46 | case str ⇒ Source.single(str)
47 | }.toMat(Sink.seq)(Keep.both).run
48 |
49 | Thread.sleep(3000)
50 | queue.offer("foo").futureValue
51 | queue.offer("bar").futureValue
52 | queue.complete()
53 | done.futureValue shouldBe List("foo", "bar")
54 | }
55 |
56 | it should "force stop a stream with an error" in {
57 | val (queue: SourceQueueWithComplete[String], done: Future[Done]) = Source.queue[String](1, OverflowStrategy.dropNew).flatMapConcat {
58 | case "stop" ⇒ Source.failed(new RuntimeException("test error"))
59 | case str ⇒ Source.single(str)
60 | }.toMat(Sink.seq)(Keep.both).run
61 |
62 | Thread.sleep(3000)
63 | queue.offer("stop").futureValue
64 | done.toTry should be a 'failure
65 | }
66 |
67 | }
68 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/source/QueueSourceTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.source
18 |
19 | import akka.stream.OverflowStrategy
20 | import akka.stream.scaladsl.{ Keep, Sink, Source, SourceQueueWithComplete }
21 | import com.github.dnvriend.streams.TestSpec
22 |
23 | import scala.collection.immutable._
24 | import scala.concurrent.Future
25 |
26 | class QueueSourceTest extends TestSpec {
27 | it should "queue a b and c and return Seq(a, b, c)" in {
28 | val (queue: SourceQueueWithComplete[String], xs: Future[Seq[String]]) =
29 | Source.queue[String](Int.MaxValue, OverflowStrategy.backpressure).toMat(Sink.seq)(Keep.both).run()
30 |
31 | queue.offer("a").toTry should be a 'success // offer 'a' to stream
32 | queue.offer("b").toTry should be a 'success // b
33 | queue.offer("c").toTry should be a 'success // and c
34 |
35 | // complete the queue
36 | queue.complete()
37 | queue.watchCompletion().toTry should be a 'success
38 |
39 | // get the results of the stream
40 | xs.futureValue shouldEqual Seq("a", "b", "c")
41 | xs.futureValue should not equal Seq("c", "b", "a")
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/source/SourceTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.source
18 |
19 | import akka.stream.scaladsl.{ Concat, Merge, Sink, Source }
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | class SourceTest extends TestSpec {
23 | type Seq[A] = scala.collection.immutable.Seq[A]
24 |
25 | // Takes multiple streams and outputs one stream formed from the input streams
26 | // by first emitting all of the elements from the first stream and then emitting
27 | // all of the elements from the second stream, etc.
28 | it should "concat three sources" in {
29 | Source.combine(
30 | Source.single("Hello"),
31 | Source.repeat("World").take(5),
32 | Source.single("!")
33 | )(Concat(_)).runWith(Sink.seq)
34 | .futureValue shouldBe Seq("Hello", "World", "World", "World", "World", "World", "!")
35 | }
36 |
37 | // Merge several streams, taking elements as they arrive from input streams
38 | // picking randomly when several have elements ready
39 | it should "merge three sources" in {
40 | val xs = Source.combine(
41 | Source.single("Hello"),
42 | Source.repeat("World").take(5),
43 | Source.single("!")
44 | )(Merge(_)).runWith(Sink.seq)
45 | .futureValue
46 | xs should contain allOf ("Hello", "!", "World")
47 | xs.count(_ == "World") shouldBe 5
48 | xs.count(_ == "!") shouldBe 1
49 | xs.count(_ == "Hello") shouldBe 1
50 | }
51 |
52 | it should "emit a single element" in {
53 | Source.single("Foo")
54 | .recover { case t: Throwable ⇒ "Bar" }
55 | .runWith(Sink.seq).futureValue shouldBe Seq("Foo")
56 | }
57 |
58 | it should "recover from an exception after materialization" in {
59 | Source.fromIterator(() ⇒ Iterator(throw new RuntimeException("")))
60 | .recover { case t: Throwable ⇒ "Bar" }
61 | .runWith(Sink.seq).futureValue shouldBe Seq("Bar")
62 | }
63 |
64 | it should "recover a failed stream when exception in a stage" in {
65 | Source.single("Foo")
66 | .map { e ⇒ throw new RuntimeException(""); e }
67 | .recover { case t: Throwable ⇒ "Bar" }
68 | .runWith(Sink.seq).futureValue shouldBe Seq("Bar")
69 | }
70 |
71 | // cause, exception gets evaluated before materialization of the graph
72 | it should "not recover from an exception in the source" in {
73 | intercept[RuntimeException] { // thus an exception is thrown up the stack; the exception is not propagated
74 | Source.single(throw new RuntimeException(""))
75 | .recover { case t: Throwable ⇒ "Bar" }
76 | .runWith(Sink.seq).toTry should be a 'failure
77 | }
78 | }
79 |
80 | // it should "do a map-reduce operation just like Hadoop or Spark" in {
81 | // def substr(xs: Array[String]) =
82 | // Source(xs.toList)
83 | // .groupBy(1000, identity)
84 | // .map(_ → 1)
85 | // .reduce((l, r) ⇒ (l._1, l._2 + r._2))
86 | // .mergeSubstreams
87 | // //
88 | // Source.single("Alice was beginning to get very tired of sitting by her sister on the bank, and of having nothing to do: once or twice she had peeped into the book her sister was reading, but it had no pictures or conversations in it, 'and what is the use of a book,' thought Alice 'without pictures or conversations?'\nSo she was considering in her own mind (as well as she could, for the hot day made her feel very sleepy and stupid), whether the pleasure of making a daisy-chain would be worth the trouble of getting up and picking the daisies, when suddenly a White Rabbit with pink eyes ran close by her.\nThere was nothing so very remarkable in that; nor did Alice think it so very much out of the way to hear the Rabbit say to itself, 'Oh dear! Oh dear! I shall be late!' (when she thought it over afterwards, it occurred to her that she ought to have wondered at this, but at the time it all seemed quite natural); but when the Rabbit actually took a watch out of its waistcoat-pocket, and looked at it, and then hurried on, Alice started to her feet, for it flashed across her mind that she had never before seen a rabbit with either a waistcoat-pocket, or a watch to take out of it, and burning with curiosity, she ran across the field after it, and fortunately was just in time to see it pop down a large rabbit-hole under the hedge.\nIn another moment down went Alice after it, never once considering how in the world she was to get out again.\nThe rabbit-hole went straight on like a tunnel for some way, and then dipped suddenly down, so suddenly that Alice had not a moment to think about stopping herself before she found herself falling down a very deep well.\nEither the well was very deep, or she fell very slowly, for she had plenty of time as she went down to look about her and to wonder what was going to happen next. First, she tried to look down and make out what she was coming to, but it was too dark to see anything; then she looked at the sides of the well, and noticed that they were filled with cupboards and book-shelves; here and there she saw maps and pictures hung upon pegs. She took down a jar from one of the shelves as she passed; it was labelled 'ORANGE MARMALADE', but to her great disappointment it was empty: she did not like to drop the jar for fear of killing somebody, so managed to put it into one of the cupboards as she fell past it.\n'Well!' thought Alice to herself, 'after such a fall as this, I shall think nothing of tumbling down stairs! How brave they'll all think me at home! Why, I wouldn't say anything about it, even if I fell off the top of the house!' (Which was very likely true.)\nDown, down, down. Would the fall never come to an end! 'I wonder how many miles I've fallen by this time?' she said aloud. 'I must be getting somewhere near the centre of the earth. Let me see: that would be four thousand miles down, I think—' (for, you see, Alice had learnt several things of this sort in her lessons in the schoolroom, and though this was not a very good opportunity for showing off her knowledge, as there was no one to listen to her, still it was good practice to say it over) '—yes, that's about the right distance—but then I wonder what Latitude or Longitude I've got to?' (Alice had no idea what Latitude was, or Longitude either, but thought they were nice grand words to say.)\nPresently she began again. 'I wonder if I shall fall right through the earth! How funny it'll seem to come out among the people that walk with their heads downward! The Antipathies, I think—' (she was rather glad there was no one listening, this time, as it didn't sound at all the right word) '—but I shall have to ask them what the name of the country is, you know. Please, Ma'am, is this New Zealand or Australia?' (and she tried to curtsey as she spoke—fancy curtseying as you're falling through the air! Do you think you could manage it?) 'And what an ignorant little girl she'll think me for asking! No, it'll never do to ask: perhaps I shall see it written up somewhere.'")
89 | // .map(_.split(" "))
90 | // .flatMapConcat(xs ⇒ substr(xs))
91 | // .filter { case (k, v) ⇒ k == "Alice" } // get Alice
92 | // .runWith(Sink.seq)
93 | // .futureValue shouldBe ""
94 | // }
95 | }
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/stage/async/MapAsyncStageTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.stage.async
18 |
19 | import akka.stream.testkit.scaladsl.TestSink
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | import scala.concurrent.Future
23 |
24 | class MapAsyncStageTest extends TestSpec {
25 | /**
26 | * Transform this stream by applying the given function to each of the elements
27 | * as they pass through this processing step. The function returns a `Future` and the
28 | * value of that future will be emitted downstream. The number of Futures
29 | * that shall run in parallel is given as the first argument to ``mapAsync``.
30 | * These Futures may complete in any order, but the elements that
31 | * are emitted downstream are in the same order as received from upstream.
32 | *
33 | * If the group by function `f` throws an exception or if the `Future` is completed
34 | * with failure and the supervision decision is [[akka.stream.Supervision.Stop]]
35 | * the stream will be completed with failure.
36 | *
37 | * If the group by function `f` throws an exception or if the `Future` is completed
38 | * with failure and the supervision decision is [[akka.stream.Supervision.Resume]] or
39 | * [[akka.stream.Supervision.Restart]] the element is dropped and the stream continues.
40 | *
41 | * - Emits when: the Future returned by the provided function finishes for the next element in sequence
42 | *
43 | * - Backpressures when: the number of futures reaches the configured parallelism and the downstream
44 | * backpressures or the first future is not completed
45 | *
46 | * - Completes when: upstream completes and all futures has been completed and all elements has been emitted
47 | * - Cancels when: downstream cancels
48 | */
49 |
50 | "MapAsync" should "transform the stream by applying the function to each element" in {
51 | withIterator() { src ⇒
52 | src.take(3)
53 | .mapAsync(2)(num ⇒ Future(num * 2))
54 | .runWith(TestSink.probe[Int])
55 | .request(4)
56 | .expectNext(0, 2, 4)
57 | .expectComplete()
58 | }
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/stage/async/MapAsyncUnorderedStageTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.stage.async
18 |
19 | import akka.stream.scaladsl.Source
20 | import akka.stream.testkit.scaladsl.TestSink
21 | import com.github.dnvriend.streams.TestSpec
22 |
23 | import scala.concurrent.Future
24 |
25 | class MapAsyncUnorderedStageTest extends TestSpec {
26 | /**
27 | * Transform this stream by applying the given function to each of the elements
28 | * as they pass through this processing step. The function returns a `Future` and the
29 | * value of that future will be emitted downstreams.
30 | *
31 | * As many futures as requested elements by downstream may run in parallel and each processed element
32 | * will be emitted dowstream as soon as it is ready, i.e. it is possible that the elements are not
33 | * emitted downstream in the same order as received from upstream.
34 | *
35 | * If the group by function `f` throws an exception or if the `Future` is completed
36 | * with failure and the supervision decision is [[akka.stream.Supervision.Stop]]
37 | * the stream will be completed with failure.
38 | *
39 | * If the group by function `f` throws an exception or if the `Future` is completed
40 | * with failure and the supervision decision is [[akka.stream.Supervision.Resume]] or
41 | * [[akka.stream.Supervision.Restart]] the element is dropped and the stream continues.
42 | *
43 | * - Emits when: any of the Futures returned by the provided function complete
44 | * - Backpressures when: the number of futures reaches the configured parallelism and the downstream backpressures
45 | * - Completes when: upstream completes and all futures has been completed and all elements has been emitted
46 | * - Cancels when: downstream cancels
47 | */
48 |
49 | "MapAsyncUnordered" should "transform the stream by applying the function to each element" in {
50 | withIterator() { src ⇒
51 | src.take(10)
52 | .mapAsyncUnordered(4)(num ⇒ Future(num * 2))
53 | .runWith(TestSink.probe[Int])
54 | .request(11)
55 | .expectNextUnordered(0, 2, 4, 6, 8, 10, 12, 14, 16, 18)
56 | .expectComplete()
57 | }
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/stage/fanout/BroadcastStageTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.stage.fanout
18 |
19 | import com.github.dnvriend.streams.TestSpec
20 |
21 | class BroadcastStageTest extends TestSpec {
22 |
23 | }
24 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/stage/simple/CollectStageTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.stage.simple
18 |
19 | import akka.stream.testkit.scaladsl.TestSink
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | class CollectStageTest extends TestSpec {
23 | /**
24 | * Transform this stream by applying the given partial function to each of the elements
25 | * on which the function is defined as they pass through this processing step.
26 | * Non-matching elements are filtered out.
27 | *
28 | * - Emits when: the provided partial function is defined for the element
29 | * - Backpressures when: the partial function is defined for the element and downstream backpressures
30 | * - Completes when: upstream completes
31 | * - Cancels when: downstream cancels
32 | */
33 |
34 | it should "emit only elements on which the partial function is defined" in {
35 | withIterator() { src ⇒
36 | src.take(10)
37 | .collect {
38 | case e if e < 5 ⇒ e
39 | }
40 | .runWith(TestSink.probe[Int])
41 | .request(Integer.MAX_VALUE)
42 | .expectNext(0, 1, 2, 3, 4)
43 | .expectComplete()
44 | }
45 | }
46 |
47 | it should "transform the stream by applying the partial function" in {
48 | withIterator() { src ⇒
49 | src.take(10)
50 | .collect {
51 | case e if e < 5 ⇒ e.toString
52 | case e if e >= 5 && e < 8 ⇒ (e * 2).toString
53 | }
54 | .runWith(TestSink.probe[String])
55 | .request(Integer.MAX_VALUE)
56 | .expectNext("0", "1", "2", "3", "4", "10", "12", "14")
57 | .expectComplete()
58 | }
59 | }
60 |
61 | it should "transform the stream by applying the partial function for each element" in {
62 | withIterator() { src ⇒
63 | src.take(10)
64 | .collect {
65 | case e if e < 5 ⇒ e.toString
66 | case e if e >= 5 && e < 8 ⇒ (e * 2).toString
67 | case _ ⇒ "UNKNOWN"
68 | }
69 | .runWith(TestSink.probe[String])
70 | .request(Integer.MAX_VALUE)
71 | .expectNext("0", "1", "2", "3", "4", "10", "12", "14", "UNKNOWN", "UNKNOWN")
72 | .expectComplete()
73 | }
74 | }
75 | }
76 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/stage/simple/DropStageTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.stage.simple
18 |
19 | import akka.stream.testkit.scaladsl.TestSink
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | class DropStageTest extends TestSpec {
23 | /**
24 | * Discard the given number of elements at the beginning of the stream.
25 | * No elements will be dropped if `n` is zero or negative.
26 | *
27 | * - Emits when: the specified number of elements has been dropped already
28 | * - Backpressures when: the specified number of elements has been dropped and downstream backpressures
29 | * - Completes when: upstream completes
30 | * - Cancels when: downstream cancels
31 | */
32 |
33 | "Drop" should "discard the given number of elements at the beginning of the stream" in {
34 | withIterator() { src ⇒
35 | src.take(10)
36 | .drop(5)
37 | .runWith(TestSink.probe[Int])
38 | .request(Integer.MAX_VALUE)
39 | .expectNext(5, 6, 7, 8, 9)
40 | .expectComplete()
41 | }
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/stage/simple/DropWhileStageTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.stage.simple
18 |
19 | import akka.stream.testkit.scaladsl.TestSink
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | class DropWhileStageTest extends TestSpec {
23 | /**
24 | * Discard elements at the beginning of the stream while predicate is true.
25 | * All elements will be taken after predicate returns false first time.
26 | *
27 | * - Emits when: predicate returned false and for all following stream elements
28 | * - Backpressures when: predicate returned false and downstream backpressures
29 | * - Completes when: upstream completes
30 | * - Cancels when: downstream cancels
31 | */
32 |
33 | "DropWhile" should "discard elements while the predicate is true, else it emits elements" in {
34 | withIterator() { src ⇒
35 | src.take(10)
36 | .dropWhile(_ < 5)
37 | .runWith(TestSink.probe[Int])
38 | .request(Integer.MAX_VALUE)
39 | .expectNext(5, 6, 7, 8, 9)
40 | .expectComplete()
41 | }
42 |
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/stage/simple/FilterStageTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.stage.simple
18 |
19 | import akka.stream.testkit.scaladsl.TestSink
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | class FilterStageTest extends TestSpec {
23 | /**
24 | * Only pass on those elements that satisfy the given predicate.
25 | *
26 | * - Emits when: the given predicate returns true for the element
27 | * - Backpressures when: the given predicate returns true for the element and downstream backpressures
28 | * - Completes when: upstream completes
29 | * - Cancels when: downstream cancels
30 | */
31 |
32 | "Filter a sequence of numbers for even numbers" should "emit only even numbers" in {
33 | withIterator() { src ⇒
34 | src.take(10)
35 | .filter(_ % 2 == 0)
36 | .runWith(TestSink.probe[Int])
37 | .request(Integer.MAX_VALUE)
38 | .expectNext(0, 2, 4, 6, 8)
39 | .expectComplete()
40 | }
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/stage/simple/FoldStageTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.stage.simple
18 |
19 | import akka.stream.testkit.scaladsl.TestSink
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | class FoldStageTest extends TestSpec {
23 | /**
24 | * Similar to `scan` but only emits its result when the upstream completes,
25 | * after which it also completes. Applies the given function towards its current and next value,
26 | * yielding the next current value.
27 | *
28 | * If the function `f` throws an exception and the supervision decision is
29 | * [[akka.stream.Supervision.Restart]] current value starts at `zero` again
30 | * the stream will continue.
31 | *
32 | * - Emits when: upstream completes
33 | * - Backpressures when: downstream backpressures
34 | * - Completes when: upstream completes
35 | * - Cancels when: downstream cancels
36 | */
37 |
38 | "Fold" should "emit only an element when the upstream completes" in {
39 | withIterator() { src ⇒
40 | src.take(4)
41 | .fold(0) { (c, _) ⇒ c + 1 }
42 | .runWith(TestSink.probe[Int])
43 | .request(Integer.MAX_VALUE)
44 | .expectNext(4)
45 | .expectComplete()
46 | }
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/stage/simple/GroupByTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.stage.simple
18 |
19 | import com.github.dnvriend.streams.TestSpec
20 | import org.scalatest.Ignore
21 |
22 | @Ignore
23 | class GroupByTest extends TestSpec {
24 | // it should "partition incoming elements" in {
25 | // withIterator() { src ⇒
26 | // src
27 | // .take(10)
28 | // .groupBy(10, {
29 | // case e if e < 2 ⇒ "lt2"
30 | // case e if e >= 2 && e < 5 ⇒ "gtoe2orlt5"
31 | // case e ⇒ "gtoe5"
32 | // })
33 | // .map()
34 | // }
35 | // }
36 | }
37 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/stage/simple/GroupedStageTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.stage.simple
18 |
19 | import akka.stream.testkit.scaladsl.TestSink
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | class GroupedStageTest extends TestSpec {
23 |
24 | /**
25 | * Chunk up this stream into groups of the given size, with the last group
26 | * possibly smaller than requested due to end-of-stream.
27 | *
28 | * `n` must be positive, otherwise IllegalArgumentException is thrown.
29 | *
30 | * - Emits when: the specified number of elements has been accumulated or upstream completed
31 | * - Backpressures when: a group has been assembled and downstream backpressures
32 | * - Completes when: upstream completes
33 | * - Cancels when: downstream cancels
34 | */
35 |
36 | "Grouping a stream of numbers in sequences of three" should "result in two sequences" in {
37 | withIterator() { src ⇒
38 | src.take(5)
39 | .grouped(3)
40 | .runWith(TestSink.probe[Seq[Int]])
41 | .request(Integer.MAX_VALUE)
42 | .expectNext(List(0, 1, 2), List(3, 4))
43 | .expectComplete()
44 | }
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/stage/simple/MapAsyncStageTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.stage.simple
18 |
19 | import akka.stream.testkit.scaladsl.TestSink
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | import scala.concurrent.Future
23 |
24 | class MapAsyncStageTest extends TestSpec {
25 |
26 | /**
27 | * Transform this stream by applying the given function to each of the elements
28 | * as they pass through this processing step.
29 | *
30 | * - Emits when: the mapping function returns an element
31 | * - Backpressures when: downstream backpressures
32 | * - Completes when: upstream completes
33 | * - Cancels when: downstream cancels
34 | */
35 |
36 | it should "transform the stream by applying the function to each element" in {
37 | withIterator() { src ⇒
38 | src.take(3)
39 | .mapAsync(1)(e ⇒ Future.successful(e * 2))
40 | .runWith(TestSink.probe[Int])
41 | .request(Integer.MAX_VALUE)
42 | .expectNext(0, 2, 4)
43 | .expectComplete()
44 | }
45 | }
46 |
47 | it should "emit an Error when the Future completes with a failure" in {
48 | withIterator() { src ⇒
49 | src.take(3)
50 | .mapAsync(1)(_ ⇒ Future.failed(new RuntimeException("")))
51 | .runWith(TestSink.probe[Int])
52 | .request(Int.MaxValue)
53 | .expectError()
54 | }
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/stage/simple/MapConcatTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.stage.simple
18 |
19 | import akka.stream.testkit.scaladsl.TestSink
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | class MapConcatTest extends TestSpec {
23 | /**
24 | * Transform each input element into an `Iterable` of output elements that is
25 | * then flattened into the output stream.
26 | *
27 | * The returned `Iterable` MUST NOT contain `null` values,
28 | * as they are illegal as stream elements - according to the Reactive Streams specification.
29 | *
30 | * - Emits when: the mapping function returns an element or there are still remaining elements
31 | * from the previously calculated collection
32 | *
33 | * - Backpressures when: downstream backpressures or there are still remaining elements from the
34 | * previously calculated collection
35 | *
36 | * - Completes when: upstream completes and all remaining elements has been emitted
37 | *
38 | * - Cancels when: downstream cancels
39 | */
40 |
41 | "MapConcat" should "transform each input element into an 'iterable' of output elements that is then flattened into the output stream" in {
42 | withIterator() { src ⇒
43 | src.take(3)
44 | .mapConcat(e ⇒ List(e, e, e))
45 | .runWith(TestSink.probe[Int])
46 | .request(Integer.MAX_VALUE)
47 | .expectNext(0, 0, 0, 1, 1, 1, 2, 2, 2)
48 | .expectComplete()
49 | }
50 | }
51 |
52 | it should "flatten two lists" in {
53 | withIterator() { src ⇒
54 | src.take(5)
55 | .grouped(3)
56 | .mapConcat(identity)
57 | .runWith(TestSink.probe[Int])
58 | .request(Integer.MAX_VALUE)
59 | .expectNext(0, 1, 2, 3, 4)
60 | .expectComplete()
61 | }
62 | }
63 |
64 | it should "flatten two sequences" in {
65 | withIterator() { src ⇒
66 | src.take(10)
67 | .splitWhen(_ < 3)
68 | .concatSubstreams
69 | .runForeach(println)
70 | }
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/stage/simple/MapStageTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.stage.simple
18 |
19 | import akka.stream.testkit.scaladsl.TestSink
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | class MapStageTest extends TestSpec {
23 | /**
24 | * Transform this stream by applying the given function to each of the elements
25 | * as they pass through this processing step.
26 | *
27 | * - Emits when: the mapping function returns an element
28 | * - Backpressures when: downstream backpressures
29 | * - Completes when: upstream completes
30 | * - Cancels when: downstream cancels
31 | */
32 |
33 | it should "transform the stream by applying the function to each element" in {
34 | withIterator() { src ⇒
35 | src.take(3)
36 | .map(_ * 2)
37 | .runWith(TestSink.probe[Int])
38 | .request(Integer.MAX_VALUE)
39 | .expectNext(0, 2, 4)
40 | .expectComplete()
41 | }
42 | }
43 |
44 | it should "emit an Error when the map throws an Exception" in {
45 | withIterator() { src ⇒
46 | src.take(3)
47 | .map(_ ⇒ throw new RuntimeException(""))
48 | .runWith(TestSink.probe[Int])
49 | .request(Int.MaxValue)
50 | .expectError()
51 | }
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/stage/simple/RecoverStageTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.stage.simple
18 |
19 | import akka.stream.testkit.scaladsl.TestSink
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | class RecoverStageTest extends TestSpec {
23 | /**
24 | * Recover allows to send last element on failure and gracefully complete the stream
25 | *
26 | * Note:
27 | * Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
28 | * This stage can recover the failure signal, but not the skipped elements, which will be dropped.
29 | *
30 | * - Emits when: element is available from the upstream or upstream is failed and pf returns an element
31 | * - Backpressures when: downstream backpressures
32 | * - Completes when: upstream completes or upstream failed with exception pf can handle
33 | * - Cancels when: downstream cancels
34 | *
35 | */
36 |
37 | "Recover" should "emits / forward received elements for non-error messages / normal operation" in {
38 | withIterator() { src ⇒
39 | src.take(3)
40 | .recover {
41 | case e: RuntimeException ⇒ 1000
42 | }
43 | .runWith(TestSink.probe[Int])
44 | .request(Integer.MAX_VALUE)
45 | .expectNext(0, 1, 2)
46 | .expectComplete()
47 | }
48 | }
49 |
50 | it should "emit 1000 when the stream fails thus recover the last element, afterwards the stream completes" in {
51 | withIterator() { src ⇒
52 | src.take(3)
53 | .collect {
54 | case 1 ⇒ throw new RuntimeException("Forced exception")
55 | case e ⇒ e
56 | }
57 | .recover {
58 | case e: RuntimeException ⇒ 1000
59 | }
60 | .runWith(TestSink.probe[Int])
61 | .request(Integer.MAX_VALUE)
62 | .expectNextUnordered(0, 1000)
63 | .expectComplete()
64 | }
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/stage/simple/ScanStageTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.stage.simple
18 |
19 | import akka.stream.testkit.scaladsl.TestSink
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | class ScanStageTest extends TestSpec {
23 |
24 | /**
25 | * Similar to `fold` but is not a terminal operation,
26 | * emits its current value which starts at `zero` and then
27 | * applies the current and next value to the given function `f`,
28 | * emitting the next current value.
29 | *
30 | * If the function `f` throws an exception and the supervision decision is
31 | * [[akka.stream.Supervision.Restart]] current value starts at `zero` again
32 | * the stream will continue.
33 | *
34 | * - Emits when: the function scanning the element returns a new element
35 | * - Backpressures when: downstream backpressures
36 | * - Completes when: upstream completes
37 | * - Cancels when: downstream cancels
38 | */
39 |
40 | "Scan" should "do the same as fold, but emits the next current value to the stream" in {
41 | withIterator() { src ⇒
42 | src.take(4)
43 | .scan(0) { (c, _) ⇒ c + 1 }
44 | .runWith(TestSink.probe[Int])
45 | .request(Integer.MAX_VALUE)
46 | .expectNext(0, 1, 2, 3, 4)
47 | .expectComplete()
48 | }
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/stage/simple/TakeStageTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.stage.simple
18 |
19 | import akka.stream.testkit.scaladsl.TestSink
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | class TakeStageTest extends TestSpec {
23 | /**
24 | * Terminate processing (and cancel the upstream publisher) after the given
25 | * number of elements. Due to input buffering some elements may have been
26 | * requested from upstream publishers that will then not be processed downstream
27 | * of this step.
28 | *
29 | * The stream will be completed without producing any elements if `n` is zero
30 | * or negative.
31 | *
32 | * - Emits when: the specified number of elements to take has not yet been reached
33 | * - Backpressures when: downstream backpressures
34 | * - Completes when: the defined number of elements has been taken or upstream completes
35 | * - Cancels when: the defined number of elements has been taken or downstream cancels
36 | */
37 |
38 | "Take" should "emit only 'n' number of elements and then complete" in {
39 | withIterator() { src ⇒
40 | src.take(3)
41 | .runWith(TestSink.probe[Int])
42 | .request(Integer.MAX_VALUE)
43 | .expectNext(0, 1, 2)
44 | .expectComplete()
45 | }
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/stage/simple/TakeWhileStageTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.stage.simple
18 |
19 | import akka.stream.testkit.scaladsl.TestSink
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | class TakeWhileStageTest extends TestSpec {
23 | /**
24 | * Terminate processing (and cancel the upstream publisher) after predicate
25 | * returns false for the first time. Due to input buffering some elements may have been
26 | * requested from upstream publishers that will then not be processed downstream
27 | * of this step.
28 | *
29 | * The stream will be completed without producing any elements if predicate is false for
30 | * the first stream element.
31 | *
32 | * - Emits when: the predicate is true
33 | * - Backpressures when: downstream backpressures
34 | * - Completes when: predicate returned false or upstream completes
35 | * - Cancels when predicate returned false or downstream cancels
36 | */
37 |
38 | "TakeWhile" should "emit elements while the predicate is true, and completes when the predicate is false" in {
39 | withIterator() { src ⇒
40 | src.takeWhile(_ < 5)
41 | .runWith(TestSink.probe[Int])
42 | .request(Integer.MAX_VALUE)
43 | .expectNext(0, 1, 2, 3, 4)
44 | .expectComplete()
45 | }
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/stage/timer/TakeWithinStageTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.stage.timer
18 |
19 | import akka.stream.testkit.scaladsl.TestSink
20 | import com.github.dnvriend.streams.TestSpec
21 |
22 | import scala.concurrent.duration._
23 |
24 | class TakeWithinStageTest extends TestSpec {
25 | /**
26 | * Terminate processing (and cancel the upstream publisher) after the given
27 | * duration. Due to input buffering some elements may have been
28 | * requested from upstream publishers that will then not be processed downstream
29 | * of this step.
30 | *
31 | * Note that this can be combined with `take` to limit the number of elements
32 | * within the duration.
33 | *
34 | * - Emits when: an upstream element arrives
35 | * - Backpressures when: downstream backpressures
36 | * - Completes when: upstream completes or timer fires
37 | * - Cancels when: downstream cancels or timer fires
38 | */
39 |
40 | "TakeWithin" should "take elements in the duration window, when the window has passed, the stream completes" in {
41 | withIterator() { src ⇒
42 | src.takeWithin(500.millis)
43 | .map { e ⇒ Thread.sleep(200); e }
44 | .runWith(TestSink.probe[Int])
45 | .request(5)
46 | .expectNext(0, 1, 2, 3, 4)
47 | .expectComplete()
48 | }
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/streammaterialization/StreamMaterializationTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.streammaterialization
18 |
19 | import akka.Done
20 | import akka.stream.scaladsl.Sink
21 | import com.github.dnvriend.streams.TestSpec
22 |
23 | class StreamMaterializationTest extends TestSpec {
24 |
25 | /**
26 | * When constructing flows and graphs in Akka Streams think of them as preparing a blueprint, an execution plan.
27 | * Stream materialization is the process of taking a stream description (the graph) and allocating all the necessary
28 | * resources it needs in order to run.
29 | *
30 | * In the case of Akka Streams this often means starting up Actors which power the processing, but is not restricted
31 | * to that - it could also mean opening files or socket connections etc. – depending on what the stream needs.
32 | */
33 |
34 | /**
35 | * Materialization is triggered at so called "terminal operations". Most notably this includes the various forms
36 | * of the run() and runWith() methods defined on flow elements as well as a small number of special syntactic sugars
37 | * for running with well-known sinks, such as runForeach(el => ) (being an alias to runWith(Sink.foreach(el => )).
38 | */
39 |
40 | "Stream Materialization" should "be triggered using runFold" in {
41 | withIterator() { src ⇒
42 | src.take(10)
43 | .runFold(0) { (c, _) ⇒ c + 1 }
44 | .futureValue shouldBe 10
45 | }
46 | }
47 |
48 | it should "be triggered using runWith" in {
49 | withIterator() { src ⇒
50 | src.take(10)
51 | .runForeach(_ ⇒ ())
52 | .futureValue shouldBe Done
53 | }
54 | }
55 |
56 | it should "be triggered using runWith (which takes a sink shape)" in {
57 | withIterator() { src ⇒
58 | src.take(10)
59 | .runWith(Sink.foreach(_ ⇒ ()))
60 | .futureValue shouldBe Done
61 | }
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/util/ClasspathResources.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.util
18 |
19 | import java.io.InputStream
20 |
21 | import akka.NotUsed
22 | import akka.stream.IOResult
23 | import akka.stream.scaladsl.{ Source, StreamConverters }
24 | import akka.util.ByteString
25 |
26 | import scala.concurrent.Future
27 | import scala.io.{ Source ⇒ ScalaIOSource }
28 | import scala.util.Try
29 | import scala.xml.pull.{ XMLEvent, XMLEventReader }
30 |
31 | trait ClasspathResources {
32 | def withInputStream[T](fileName: String)(f: InputStream ⇒ T): T = {
33 | val is = fromClasspathAsStream(fileName)
34 | try {
35 | f(is)
36 | } finally {
37 | Try(is.close())
38 | }
39 | }
40 |
41 | def withXMLEventReader[T](fileName: String)(f: XMLEventReader ⇒ T): T =
42 | withInputStream(fileName) { is ⇒
43 | f(new XMLEventReader(ScalaIOSource.fromInputStream(is)))
44 | }
45 |
46 | def withXMLEventSource[T](fileName: String)(f: Source[XMLEvent, NotUsed] ⇒ T): T =
47 | withXMLEventReader(fileName) { reader ⇒
48 | f(Source.fromIterator(() ⇒ reader))
49 | }
50 |
51 | def withByteStringSource[T](fileName: String)(f: Source[ByteString, Future[IOResult]] ⇒ T): T =
52 | withInputStream(fileName) { inputStream ⇒
53 | f(StreamConverters.fromInputStream(() ⇒ inputStream))
54 | }
55 |
56 | def streamToString(is: InputStream): String =
57 | ScalaIOSource.fromInputStream(is).mkString
58 |
59 | def fromClasspathAsString(fileName: String): String =
60 | streamToString(fromClasspathAsStream(fileName))
61 |
62 | def fromClasspathAsStream(fileName: String): InputStream =
63 | getClass.getClassLoader.getResourceAsStream(fileName)
64 |
65 | }
66 |
--------------------------------------------------------------------------------
/src/test/scala/com/github/dnvriend/streams/util/InputCustomer.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2016 Dennis Vriend
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.github.dnvriend.streams.util
18 |
19 | import scala.util.Random
20 |
21 | case class InputCustomer(name: String)
22 | case class OutputCustomer(firstName: String, lastName: String)
23 |
24 | object InputCustomer {
25 | def random() = InputCustomer(s"FirstName${Random.nextInt(1000)} LastName${Random.nextInt(1000)}")
26 | }
27 |
--------------------------------------------------------------------------------