├── .github └── workflows │ └── ci.yml ├── .gitignore ├── .scalafmt.conf ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── RELEASING.md ├── build.sbt ├── project ├── build.properties └── plugins.sbt └── src ├── main ├── java │ └── akka │ │ └── stream │ │ └── contrib │ │ ├── DirectoryChanges.java │ │ └── FileTailSource.java ├── resources │ └── reference.conf └── scala │ └── akka │ └── stream │ └── contrib │ ├── Accumulate.scala │ ├── AccumulateWhileUnchanged.scala │ ├── DelayFlow.scala │ ├── FeedbackLoop.scala │ ├── Implicits.scala │ ├── IntervalBasedRateLimiter.scala │ ├── KeepAliveConcat.scala │ ├── LastElement.scala │ ├── MergeByIndex.scala │ ├── PagedSource.scala │ ├── PartitionWith.scala │ ├── PassThroughFlow.scala │ ├── Pulse.scala │ ├── Retry.scala │ ├── Sample.scala │ ├── SourceGen.scala │ ├── SourceRepeatEval.scala │ ├── TimeWindow.scala │ ├── Timed.scala │ ├── TokenThrottle.scala │ ├── UnfoldFlow.scala │ ├── Valve.scala │ ├── ZipInputStreamSource.scala │ └── latencyTimer.scala └── test ├── java └── akka │ └── stream │ └── contrib │ ├── DirectoryChangesTest.java │ └── FileTailSourceTest.java └── scala └── akka └── stream └── contrib ├── AccumulateSpec.scala ├── AccumulateWhileUnchangedSpec.scala ├── BaseStreamSpec.scala ├── DelayFlowSpec.scala ├── FeedbackLoopSpec.scala ├── IntervalBasedRateLimiterSpec.scala ├── KeepAliveConcatSpec.scala ├── LastElementSpec.scala ├── LatencyTimerSpec.scala ├── MergeByIndexSpec.scala ├── PagedSourceSpec.scala ├── PartitionWithSpec.scala ├── PassThroughFlowSpec.scala ├── PulseSpec.scala ├── RetrySpec.scala ├── SampleSpec.scala ├── SourceRepeatEvalSpec.scala ├── TimeWindowSpec.scala ├── TimedSpec.scala ├── TokenThrottleSpec.scala ├── UnfoldFlowSpec.scala ├── ValveSpec.scala └── ZipInputStreamSourceSpec.scala /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - master 8 | - main 9 | tags-ignore: 10 | - v* 11 | 12 | jobs: 13 | build: 14 | name: Code Style, Tests, Documentation 15 | runs-on: ubuntu-20.04 16 | env: 17 | JAVA_OPTS: -Xms2G -Xmx2G -Xss2M -XX:ReservedCodeCacheSize=256M -Dfile.encoding=UTF-8 18 | 19 | steps: 20 | - name: Checkout 21 | uses: actions/checkout@v2 22 | 23 | - name: Set up JDK 11 24 | uses: coursier/setup-action@v1.1.2 25 | with: 26 | jvm: adopt:11 27 | 28 | - name: Cache Coursier cache 29 | uses: coursier/cache-action@v6.3 30 | 31 | - name: "Code style, tests, documentation" 32 | run: sbt "scalafmtCheckAll; scalafmtSbtCheck; test; doc" 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea* 2 | *.env 3 | *.log 4 | *.iml 5 | target/ 6 | /.target/ 7 | .DS_Store 8 | .cache* 9 | .classpath 10 | .project 11 | .settings 12 | .tmpBin/ 13 | *.sublime-project 14 | /bin/ 15 | ext-lib-src/ 16 | .ensime 17 | .ensime_cache/ 18 | moquette_store.mapdb 19 | moquette_store.mapdb.p 20 | moquette_store.mapdb.t 21 | -------------------------------------------------------------------------------- /.scalafmt.conf: -------------------------------------------------------------------------------- 1 | version = 1.4.0 2 | 3 | style = defaultWithAlign 4 | 5 | align.tokens = [off] 6 | danglingParentheses = true 7 | docstrings = JavaDoc 8 | indentOperator = spray 9 | maxColumn = 120 10 | rewrite.rules = [RedundantBraces, RedundantParens, SortImports] 11 | unindentTopLevelOperators = true 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This software is licensed under the Apache 2 license, quoted below. 2 | 3 | Copyright 2016 Lightbend Inc. [http://www.lightbend.com] 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); you may not 6 | use this file except in compliance with the License. You may obtain a copy of 7 | the License at 8 | 9 | [http://www.apache.org/licenses/LICENSE-2.0] 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 13 | WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 14 | License for the specific language governing permissions and limitations under 15 | the License. 16 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Akka Stream Contrib 2 | =================== 3 | 4 | This project provides a home to Akka Streams add-ons which does not fit into the core Akka Streams module. There can be several reasons for it to not be included in the core module, such as: 5 | 6 | * the functionality is not considered to match the purpose of the core module 7 | * it is an experiment or requires iterations with user feedback before including into the stable core module 8 | * it requires a faster release cycle 9 | 10 | **This repository is not released as a binary artifact and only shared as sources.** 11 | 12 | Caveat Emptor 13 | ------------- 14 | 15 | A component in this project does not have to obey the rule of staying binary compatible between releases. Breaking API changes may be introduced without notice as we refine and simplify based on your feedback. A module may be dropped in any release without prior deprecation. The Lightbend subscription does not cover support for these modules. 16 | -------------------------------------------------------------------------------- /RELEASING.md: -------------------------------------------------------------------------------- 1 | We do not publish binaries for this repository. 2 | -------------------------------------------------------------------------------- /build.sbt: -------------------------------------------------------------------------------- 1 | organization := "com.typesafe.akka" 2 | name := "akka-stream-contrib" 3 | 4 | crossScalaVersions := Seq("2.13.0") 5 | scalaVersion := crossScalaVersions.value.head 6 | 7 | val AkkaVersion = "2.6.0" 8 | 9 | resolvers += "Akka library repository".at("https://repo.akka.io/maven") 10 | 11 | libraryDependencies ++= Seq( 12 | "com.typesafe.akka" %% "akka-stream" % AkkaVersion, 13 | "com.typesafe.akka" %% "akka-stream-testkit" % AkkaVersion % Test, 14 | "junit" % "junit" % "4.12" % Test, // Common Public License 1.0 15 | "com.novocode" % "junit-interface" % "0.11" % Test, // BSD-like 16 | "com.google.jimfs" % "jimfs" % "1.1" % Test, // ApacheV2 17 | "org.scalatest" %% "scalatest" % "3.1.0" % Test, // ApacheV2 18 | "org.scalamock" %% "scalamock" % "4.4.0" % Test, // ApacheV2 19 | "com.miguno.akka" %% "akka-mock-scheduler" % "0.5.5" % Test // ApacheV2 20 | ) 21 | 22 | organizationName := "Lightbend Inc." 23 | organizationHomepage := Some(url("http://www.lightbend.com")) 24 | homepage := Some(url("https://github.com/akka/akka-stream-contrib")) 25 | licenses := Seq(("Apache License, Version 2.0", url("http://www.apache.org/licenses/LICENSE-2.0"))) 26 | scmInfo := Some( 27 | ScmInfo(url("https://github.com/akka/akka-stream-contrib"), "git@github.com:akka/akka-stream-contrib.git") 28 | ) 29 | developers += Developer("contributors", 30 | "Contributors", 31 | "https://gitter.im/akka/dev", 32 | url("https://github.com/akka/akka-stream-contrib/graphs/contributors")) 33 | 34 | scalacOptions ++= 35 | Seq("-encoding", "UTF-8", "-feature", "-unchecked", "-deprecation", "-Xlint") ++ ( 36 | if (scalaVersion.value startsWith "2.13.") 37 | Seq( 38 | "-Wdead-code", 39 | "-Wnumeric-widen", 40 | "-Xsource:2.14" 41 | ) 42 | else 43 | Seq( 44 | //"-Xfatal-warnings", 45 | "-Xlint", 46 | "-Yno-adapted-args", 47 | "-Ywarn-dead-code", 48 | "-Ywarn-numeric-widen", 49 | "-Xfuture" 50 | ) 51 | ) 52 | 53 | // By default scalatest futures time out in 150 ms, dilate that to 600ms. 54 | // This should not impact the total test time as we don't expect to hit this 55 | // timeout, and indeed it doesn't appear to. 56 | Test / testOptions += Tests.Argument(TestFrameworks.ScalaTest, "-F", "4") 57 | 58 | // show full stack traces and test case durations 59 | Test / testOptions += Tests.Argument("-oDF") 60 | 61 | // -v Log "test run started" / "test started" / "test run finished" events on log level "info" instead of "debug". 62 | // -a Show stack traces and exception class name for AssertionErrors. 63 | testOptions += Tests.Argument(TestFrameworks.JUnit, "-v", "-a") 64 | 65 | enablePlugins(AutomateHeaderPlugin) 66 | headerLicense := Some(HeaderLicense.Custom(s"Copyright (C) 2016 Lightbend Inc. ")) 67 | scalafmtOnCompile := true 68 | -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=1.6.2 2 | -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.6.5") 2 | addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.4.6") 3 | -------------------------------------------------------------------------------- /src/main/java/akka/stream/contrib/DirectoryChanges.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib; 6 | 7 | import akka.NotUsed; 8 | import akka.japi.Pair; 9 | import akka.stream.Attributes; 10 | import akka.stream.Outlet; 11 | import akka.stream.SourceShape; 12 | import akka.stream.javadsl.Source; 13 | import akka.stream.stage.AbstractOutHandler; 14 | import akka.stream.stage.GraphStage; 15 | import akka.stream.stage.GraphStageLogic; 16 | import akka.stream.stage.TimerGraphStageLogic; 17 | 18 | import com.sun.nio.file.SensitivityWatchEventModifier; 19 | 20 | import scala.Tuple2; 21 | import scala.concurrent.duration.FiniteDuration; 22 | 23 | import java.io.IOException; 24 | import java.nio.file.*; 25 | import java.util.ArrayDeque; 26 | import java.util.Queue; 27 | 28 | import static java.nio.file.StandardWatchEventKinds.*; 29 | 30 | /** 31 | * Watches a file system directory and streams change events from it. 32 | * 33 | * Note that the JDK watcher is notoriously slow on some platform (up to 1s after event actually happened on OSX for example) 34 | * 35 | * @deprecated since 0.10, use Alpakka's implementation instead https://developer.lightbend.com/docs/alpakka/current/file.html#listing-directory-contents 36 | */ 37 | @Deprecated 38 | public final class DirectoryChanges extends GraphStage>> { 39 | 40 | public enum Change { 41 | Modification, 42 | Creation, 43 | Deletion 44 | } 45 | 46 | private final static Attributes DEFAULT_ATTRIBUTES = Attributes.name("DirectoryChanges"); 47 | 48 | private final Path directoryPath; 49 | private final FiniteDuration pollInterval; 50 | private final int maxBufferSize; 51 | private final Outlet> out = Outlet.create("DirectoryChanges.out"); 52 | private final SourceShape> shape = SourceShape.of(out); 53 | 54 | /** 55 | * @param directoryPath Directory to watch 56 | * @param pollInterval Interval between polls to the JDK watch service when a push comes in and there was no changes, if 57 | * the JDK implementation is slow, it will not help lowering this 58 | * @param maxBufferSize Maximum number of buffered directory changes before the stage fails 59 | */ 60 | public DirectoryChanges(Path directoryPath, FiniteDuration pollInterval, int maxBufferSize) { 61 | this.directoryPath = directoryPath; 62 | this.pollInterval = pollInterval; 63 | this.maxBufferSize = maxBufferSize; 64 | } 65 | 66 | @Override 67 | public SourceShape> shape() { 68 | return shape; 69 | } 70 | 71 | @Override 72 | public Attributes initialAttributes() { 73 | return DEFAULT_ATTRIBUTES; 74 | } 75 | 76 | @Override 77 | public GraphStageLogic createLogic(Attributes inheritedAttributes) throws IOException { 78 | if (!Files.exists(directoryPath)) throw new IllegalArgumentException("The path: '" + directoryPath + "' does not exist"); 79 | if (!Files.isDirectory(directoryPath)) throw new IllegalArgumentException("The path '" + directoryPath + "' is not a directory"); 80 | 81 | return new TimerGraphStageLogic(shape) { 82 | private final Queue> buffer = new ArrayDeque<>(); 83 | private final WatchService service = directoryPath.getFileSystem().newWatchService(); 84 | private final WatchKey watchKey = directoryPath.register( 85 | service, 86 | new WatchEvent.Kind[] { ENTRY_CREATE, ENTRY_MODIFY, ENTRY_DELETE, OVERFLOW }, 87 | // this is com.sun internal, but the service is useless on OSX without it 88 | SensitivityWatchEventModifier.HIGH 89 | ); 90 | 91 | { 92 | setHandler(out, new AbstractOutHandler(){ 93 | 94 | @Override 95 | public void onPull() throws Exception { 96 | if (!buffer.isEmpty()) { 97 | pushHead(); 98 | } else { 99 | doPoll(); 100 | if (!buffer.isEmpty()) { 101 | pushHead(); 102 | } else { 103 | schedulePoll(); 104 | } 105 | } 106 | } 107 | }); 108 | } 109 | 110 | @Override 111 | public void onTimer(Object timerKey) { 112 | if (!isClosed(out)) { 113 | doPoll(); 114 | if (!buffer.isEmpty()) { 115 | pushHead(); 116 | } else { 117 | schedulePoll(); 118 | } 119 | } 120 | } 121 | 122 | @Override 123 | public void postStop() { 124 | try { 125 | if (watchKey.isValid()) watchKey.cancel(); 126 | service.close(); 127 | } catch (Exception ex) { 128 | // Remove when #21168 is in a release 129 | throw new RuntimeException(ex); 130 | } 131 | } 132 | 133 | private void pushHead() { 134 | final Pair head = buffer.poll(); 135 | if (head != null) { 136 | push(out, head); 137 | } 138 | } 139 | 140 | private void schedulePoll() { 141 | scheduleOnce("poll", pollInterval); 142 | } 143 | 144 | private void doPoll() { 145 | try { 146 | for (WatchEvent event: watchKey.pollEvents()) { 147 | final WatchEvent.Kind kind = event.kind(); 148 | 149 | if (OVERFLOW.equals(kind)) { 150 | // overflow means that some file system change events may have been missed, 151 | // that may be ok for some scenarios but to make sure it does not pass unnoticed we fail the stage 152 | failStage(new RuntimeException("Overflow from watch service: '" + directoryPath + "'")); 153 | 154 | } else { 155 | // if it's not an overflow it must be a Path event 156 | @SuppressWarnings("unchecked") 157 | final Path path = (Path) event.context(); 158 | final Path absolutePath = directoryPath.resolve(path); 159 | final Change change = kindToChange(kind); 160 | 161 | buffer.add(new Pair<>(absolutePath, change)); 162 | if (buffer.size() > maxBufferSize) { 163 | failStage(new RuntimeException("Max event buffer size " + 164 | maxBufferSize + " reached for $path")); 165 | } 166 | } 167 | 168 | } 169 | } finally { 170 | if (!watchKey.reset()) { 171 | // directory no longer accessible 172 | completeStage(); 173 | } 174 | } 175 | } 176 | 177 | 178 | 179 | // convert from the parametrized API to our much nicer API enum 180 | private Change kindToChange(WatchEvent.Kind kind) { 181 | final Change change; 182 | if (kind.equals(ENTRY_CREATE)) { 183 | change = Change.Creation; 184 | } else if (kind.equals(ENTRY_DELETE)) { 185 | change = Change.Deletion; 186 | } else if (kind.equals(ENTRY_MODIFY)) { 187 | change = Change.Modification; 188 | } else { 189 | throw new RuntimeException("Unexpected kind of event gotten from watch service for path '" + 190 | directoryPath + "': " + kind); 191 | } 192 | return change; 193 | } 194 | 195 | 196 | }; 197 | } 198 | 199 | @Override 200 | public String toString() { 201 | return "DirectoryChanges(" + directoryPath + ')'; 202 | } 203 | 204 | 205 | // factory methods 206 | 207 | /** 208 | * Java API 209 | * 210 | * @param directoryPath Directory to watch 211 | * @param pollInterval Interval between polls to the JDK watch service when a push comes in and there was no changes, if 212 | * the JDK implementation is slow, it will not help lowering this 213 | * @param maxBufferSize Maximum number of buffered directory changes before the stage fails 214 | */ 215 | public static Source, NotUsed> create(Path directoryPath, FiniteDuration pollInterval, int maxBufferSize) { 216 | return Source.fromGraph(new DirectoryChanges(directoryPath, pollInterval, maxBufferSize)); 217 | } 218 | 219 | /** 220 | * Scala API 221 | * 222 | * @param directoryPath Directory to watch 223 | * @param pollInterval Interval between polls to the JDK watch service when a push comes in and there was no changes, if 224 | * the JDK implementation is slow, it will not help lowering this 225 | * @param maxBufferSize Maximum number of buffered directory changes before the stage fails 226 | */ 227 | public static akka.stream.scaladsl.Source, NotUsed> apply(Path directoryPath, FiniteDuration pollInterval, int maxBufferSize) { 228 | return create(directoryPath, pollInterval, maxBufferSize) 229 | .map((Pair pair) -> Tuple2.apply(pair.first(), pair.second())) 230 | .asScala(); 231 | } 232 | 233 | } 234 | -------------------------------------------------------------------------------- /src/main/java/akka/stream/contrib/FileTailSource.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib; 6 | 7 | import akka.NotUsed; 8 | import akka.stream.Attributes; 9 | import akka.stream.Outlet; 10 | import akka.stream.SourceShape; 11 | import akka.stream.javadsl.Source; 12 | import akka.stream.stage.*; 13 | import akka.util.ByteString; 14 | import scala.concurrent.duration.FiniteDuration; 15 | import scala.util.Failure; 16 | import scala.util.Success; 17 | import scala.util.Try; 18 | 19 | import java.io.IOException; 20 | import java.nio.ByteBuffer; 21 | import java.nio.channels.AsynchronousFileChannel; 22 | import java.nio.channels.CompletionHandler; 23 | import java.nio.file.Files; 24 | import java.nio.file.Path; 25 | import java.nio.file.StandardOpenOption; 26 | 27 | /** 28 | * Read the entire contents of a file, and then when the end is reached, keep reading 29 | * newly appended data. Like the unix command `tail -f`. 30 | * 31 | * Aborting the stage can be done by combining with a [[akka.stream.KillSwitch]] 32 | * @deprecated since 0.10, use Alpakka's implementation instead https://developer.lightbend.com/docs/alpakka/current/file.html#tailing-a-file-into-a-stream 33 | */ 34 | @Deprecated 35 | public final class FileTailSource extends GraphStage> { 36 | 37 | private final Path path; 38 | private final int maxChunkSize; 39 | private final long startingPosition; 40 | private final FiniteDuration pollingInterval; 41 | private final Outlet out = Outlet.create("FileTailSource.out"); 42 | private final SourceShape shape = SourceShape.of(out); 43 | 44 | // this is stateless, so can be shared among instances 45 | private static final CompletionHandler>> completionHandler = new CompletionHandler>>() { 46 | @Override 47 | public void completed(Integer result, AsyncCallback> attachment) { 48 | attachment.invoke(new Success<>(result)); 49 | } 50 | 51 | @Override 52 | public void failed(Throwable exc, AsyncCallback> attachment) { 53 | attachment.invoke(new Failure<>(exc)); 54 | } 55 | }; 56 | 57 | public FileTailSource(Path path, int maxChunkSize, long startingPosition, FiniteDuration pollingInterval) { 58 | this.path = path; 59 | this.maxChunkSize = maxChunkSize; 60 | this.startingPosition = startingPosition; 61 | this.pollingInterval = pollingInterval; 62 | } 63 | 64 | @Override 65 | public SourceShape shape() { 66 | return shape; 67 | } 68 | 69 | @Override 70 | public GraphStageLogic createLogic(Attributes inheritedAttributes) throws IOException { 71 | if (!Files.exists(path)) throw new IllegalArgumentException("Path '" + path + "' does not exist"); 72 | if (Files.isDirectory(path)) throw new IllegalArgumentException("Path '" + path + "' cannot be tailed, it is a directory"); 73 | if (!Files.isReadable(path)) throw new IllegalArgumentException("No read permission for '" + path + "'"); 74 | 75 | return new TimerGraphStageLogic(shape) { 76 | private final ByteBuffer buffer = ByteBuffer.allocate(maxChunkSize); 77 | private final AsynchronousFileChannel channel = AsynchronousFileChannel.open(path, StandardOpenOption.READ); 78 | 79 | private long position = startingPosition; 80 | private AsyncCallback> chunkCallback; 81 | 82 | { 83 | setHandler(out, new AbstractOutHandler() { 84 | @Override 85 | public void onPull() throws Exception { 86 | doPull(); 87 | } 88 | }); 89 | } 90 | 91 | @Override 92 | public void preStart() { 93 | chunkCallback = createAsyncCallback((tryInteger) -> { 94 | if (tryInteger.isSuccess()) { 95 | int readBytes = tryInteger.get(); 96 | if (readBytes > 0) { 97 | buffer.flip(); 98 | push(out, ByteString.fromByteBuffer(buffer)); 99 | position += readBytes; 100 | buffer.clear(); 101 | } else { 102 | // hit end, try again in a while 103 | scheduleOnce("poll", pollingInterval); 104 | } 105 | 106 | } else { 107 | failStage(tryInteger.failed().get()); 108 | } 109 | 110 | }); 111 | } 112 | 113 | @Override 114 | public void onTimer(Object timerKey) { 115 | doPull(); 116 | } 117 | 118 | 119 | private void doPull() { 120 | channel.read(buffer, position, chunkCallback, completionHandler); 121 | } 122 | 123 | @Override 124 | public void postStop() { 125 | try { 126 | if (channel.isOpen()) channel.close(); 127 | } catch(Exception ex) { 128 | // Remove when #21168 is fixed 129 | throw new RuntimeException(ex); 130 | } 131 | } 132 | }; 133 | } 134 | 135 | 136 | // factory methods 137 | 138 | /** 139 | * Java API: 140 | * 141 | * Read the entire contents of a file, and then when the end is reached, keep reading 142 | * newly appended data. Like the unix command `tail -f`. 143 | * 144 | * Aborting the stage can be done by combining with a [[akka.stream.KillSwitch]] 145 | * 146 | * @param path a file path to tail 147 | * @param maxChunkSize The max emitted size of the `ByteString`s 148 | * @param startingPosition Offset into the file to start reading 149 | * @param pollingInterval When the end has been reached, look for new content with this interval 150 | */ 151 | public static Source create(Path path, int maxChunkSize, long startingPosition, FiniteDuration pollingInterval) { 152 | return Source.fromGraph(new FileTailSource(path, maxChunkSize, startingPosition, pollingInterval)); 153 | } 154 | 155 | /** 156 | * Scala API: 157 | * 158 | * Read the entire contents of a file, and then when the end is reached, keep reading 159 | * newly appended data. Like the unix command `tail -f`. 160 | * 161 | * Aborting the stage can be done by combining with a [[akka.stream.KillSwitch]] 162 | * 163 | * @param path a file path to tail 164 | * @param maxChunkSize The max emitted size of the `ByteString`s 165 | * @param startingPosition Offset into the file to start reading 166 | * @param pollingInterval When the end has been reached, look for new content with this interval 167 | */ 168 | public static akka.stream.scaladsl.Source apply(Path path, int maxChunkSize, long startingPosition, FiniteDuration pollingInterval) { 169 | return create(path, maxChunkSize, startingPosition, pollingInterval).asScala(); 170 | } 171 | 172 | } 173 | -------------------------------------------------------------------------------- /src/main/resources/reference.conf: -------------------------------------------------------------------------------- 1 | ################################### 2 | # Akka Stream Contrib Config File # 3 | ################################### 4 | 5 | # This is the reference config file that contains all the default settings. 6 | # Make your edits/overrides in your application.conf. 7 | 8 | akka { 9 | stream { 10 | contrib { 11 | retry-timeout = 5.seconds 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/Accumulate.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.japi.function 8 | import akka.stream.{Attributes, FlowShape, Inlet, Outlet} 9 | import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler} 10 | 11 | /** 12 | * This companion defines a factory for [[Accumulate]] instances, see [[Accumulate.apply]]. 13 | */ 14 | object Accumulate { 15 | 16 | /** 17 | * Factory for [[Accumulate]] instances. 18 | * 19 | * @param zero zero value for folding 20 | * @param f binary operation for folding 21 | * @tparam A input type 22 | * @tparam B output type 23 | * @return [[Accumulate]] instance 24 | */ 25 | @deprecated("Use scan and drop(1) instead", since = "0.5") 26 | def apply[A, B](zero: B)(f: (B, A) => B): Accumulate[A, B] = new Accumulate(zero)(f) 27 | 28 | /** 29 | * Java API: Factory for [[Accumulate]] instances. 30 | * 31 | * @param zero zero value for folding 32 | * @param f binary operation for folding 33 | * @tparam A input type 34 | * @tparam B output type 35 | * @return [[Accumulate]] instance 36 | */ 37 | @deprecated("Use scan and drop(1) instead", since = "0.5") 38 | def create[A, B](zero: B, f: function.Function2[B, A, B]): Accumulate[A, B] = new Accumulate(zero)(f.apply) 39 | } 40 | 41 | /** 42 | * This stage emits folded values like `scan`, but the first element emitted is not the zero value but the result of 43 | * applying the given function to the given zero value and the first pushed element. 44 | * 45 | * @param zero zero value for folding 46 | * @param f binary operation for folding 47 | * @tparam A input type 48 | * @tparam B output type 49 | */ 50 | @deprecated("Use scan and drop(1) instead", since = "0.5") 51 | final class Accumulate[A, B] private (zero: B)(f: (B, A) => B) extends GraphStage[FlowShape[A, B]] { 52 | 53 | override val shape = FlowShape(Inlet[A]("accumulate.in"), Outlet[B]("accumulate.out")) 54 | 55 | override def createLogic(attributes: Attributes) = new GraphStageLogic(shape) { 56 | import shape._ 57 | 58 | private var acc = zero 59 | 60 | setHandler(in, new InHandler { 61 | override def onPush() = { 62 | acc = f(acc, grab(in)) 63 | push(out, acc) 64 | } 65 | }) 66 | 67 | setHandler(out, new OutHandler { 68 | override def onPull() = pull(in) 69 | }) 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/AccumulateWhileUnchanged.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.japi.function 8 | import akka.stream.stage.{GraphStage, InHandler, OutHandler, TimerGraphStageLogic} 9 | import akka.stream.{Attributes, FlowShape, Inlet, Outlet} 10 | 11 | import scala.collection.immutable 12 | import scala.concurrent.duration.FiniteDuration 13 | 14 | object AccumulateWhileUnchanged { 15 | 16 | /** 17 | * Factory for [[AccumulateWhileUnchanged]] instances 18 | * 19 | * @param propertyExtractor a function to extract the observed element property 20 | * @param maxElements maximum number of elements to accumulate before emitting, if defined. 21 | * @param maxDuration maximum duration to accumulate elements before emitting, if defined. 22 | * @tparam Element type of accumulated elements 23 | * @tparam Property type of the observed property 24 | * @return [[AccumulateWhileUnchanged]] instance 25 | */ 26 | def apply[Element, Property](propertyExtractor: Element => Property, 27 | maxElements: Option[Int] = None, 28 | maxDuration: Option[FiniteDuration] = None) = 29 | new AccumulateWhileUnchanged(propertyExtractor, maxElements, maxDuration) 30 | 31 | /** 32 | * Java API: Factory for [[AccumulateWhileUnchanged]] instances 33 | * 34 | * @param propertyExtractor a function to extract the observed element property 35 | * @param maxElements maximum number of elements to accumulate before emitting, if defined. 36 | * @param maxDuration maximum duration to accumulate elements before emitting, if defined. 37 | * @tparam Element type of accumulated elements 38 | * @tparam Property type of the observed property 39 | * @return [[AccumulateWhileUnchanged]] instance 40 | */ 41 | def create[Element, Property](propertyExtractor: function.Function[Element, Property], 42 | maxElements: Option[Int] = None, 43 | maxDuration: Option[FiniteDuration] = None) = 44 | new AccumulateWhileUnchanged(propertyExtractor.apply, maxElements, maxDuration) 45 | } 46 | 47 | /** 48 | * Accumulates elements of type [[Element]] while a property extracted with [[propertyExtractor]] remains unchanged, 49 | * emits an accumulated sequence when the property changes, maxElements is reached or maxDuration has passed. 50 | * 51 | * @param propertyExtractor a function to extract the observed element property 52 | * @param maxElements maximum number of elements to accumulate before emitting, if defined. 53 | * @param maxDuration maximum duration to accumulate elements before emitting, if defined. 54 | * @tparam Element type of accumulated elements 55 | * @tparam Property type of the observed property 56 | */ 57 | final class AccumulateWhileUnchanged[Element, Property](propertyExtractor: Element => Property, 58 | maxElements: Option[Int] = None, 59 | maxDuration: Option[FiniteDuration] = None) 60 | extends GraphStage[FlowShape[Element, immutable.Seq[Element]]] { 61 | 62 | val in = Inlet[Element]("AccumulateWhileUnchanged.in") 63 | val out = Outlet[immutable.Seq[Element]]("AccumulateWhileUnchanged.out") 64 | 65 | override def shape = FlowShape.of(in, out) 66 | 67 | override def createLogic(inheritedAttributes: Attributes) = new TimerGraphStageLogic(shape) { 68 | 69 | private var currentState: Option[Property] = None 70 | private var nbElements: Int = 0 71 | private var downstreamWaiting = false 72 | private val buffer = Vector.newBuilder[Element] 73 | 74 | setHandlers( 75 | in, 76 | out, 77 | new InHandler with OutHandler { 78 | 79 | override def onPush(): Unit = { 80 | val nextElement = grab(in) 81 | val nextState = propertyExtractor(nextElement) 82 | 83 | if (currentState.isEmpty) currentState = Some(nextState) 84 | 85 | (currentState, maxElements) match { 86 | case (Some(`nextState`), None) => stash(nextElement) 87 | case (Some(`nextState`), Some(max)) if nbElements < max => stash(nextElement) 88 | case _ => pushResults(Some(nextElement), Some(nextState)) 89 | } 90 | } 91 | 92 | override def onPull(): Unit = { 93 | downstreamWaiting = true 94 | if (!hasBeenPulled(in)) { 95 | pull(in) 96 | } 97 | } 98 | 99 | override def onUpstreamFinish(): Unit = { 100 | val result = buffer.result() 101 | if (result.nonEmpty) { 102 | emit(out, result) 103 | } 104 | completeStage() 105 | } 106 | 107 | private def stash(nextElement: Element) = { 108 | buffer += nextElement 109 | nbElements += 1 110 | if (downstreamWaiting) pull(in) 111 | } 112 | } 113 | ) 114 | 115 | override def preStart(): Unit = { 116 | super.preStart() 117 | maxDuration match { 118 | case Some(max) => schedulePeriodically(None, max) 119 | case None => () 120 | } 121 | } 122 | override def postStop(): Unit = 123 | buffer.clear() 124 | 125 | override protected def onTimer(timerKey: Any): Unit = 126 | pushResults(None, None) 127 | 128 | private def pushResults(nextElement: Option[Element], nextState: Option[Property]): Unit = { 129 | if (!isAvailable(out)) { 130 | require(nextElement.isEmpty, s"pushResults: not available, would drop nextElement=$nextElement") 131 | return 132 | } 133 | 134 | val result = buffer.result() 135 | buffer.clear() 136 | nbElements = 0 137 | 138 | if (result.nonEmpty) { 139 | push(out, result) 140 | downstreamWaiting = false 141 | } 142 | 143 | nextElement match { 144 | case Some(next) => 145 | buffer += next 146 | nbElements += 1 147 | case None => () 148 | } 149 | 150 | currentState = nextState 151 | } 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/DelayFlow.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.NotUsed 8 | import akka.stream.impl.fusing.GraphStages.SimpleLinearGraphStage 9 | import akka.stream.Attributes 10 | import akka.stream.contrib.DelayFlow.DelayStrategy 11 | import akka.stream.scaladsl.Flow 12 | import akka.stream.stage._ 13 | import scala.concurrent.duration._ 14 | 15 | object DelayFlow { 16 | 17 | /** 18 | * Flow with fixed delay for each element. 19 | * @param fixedDelay value of the delay 20 | */ 21 | def apply[T](fixedDelay: FiniteDuration): Flow[T, T, NotUsed] = 22 | if (fixedDelay <= Duration.Zero) 23 | Flow[T] 24 | else 25 | DelayFlow[T](() => DelayStrategy.fixedDelay(fixedDelay)) 26 | 27 | /** 28 | * Flow for universal delay management, allows to manage delay through [[DelayStrategy]]. 29 | * It determines delay for each ongoing element invoking `DelayStrategy.nextDelay(elem: T): FiniteDuration`. 30 | * Implementing [[DelayStrategy]] with your own gives you flexible ability to manage delay value depending on coming elements. 31 | * It is important notice that [[DelayStrategy]] can be stateful. 32 | * There are also predefined strategies, see [[DelayStrategy]] companion object's methods. 33 | * 34 | * 35 | * For example: 36 | * {{{ 37 | * //delay, infinitely increasing by `1 second` on every Failure 38 | * new DelayStrategy[Try[AnyRef]]{ 39 | * var delay = Duration.Zero 40 | * override def nextDelay(elem: Try[AnyRef]): FiniteDuration = { 41 | * if(elem.isFailure){ 42 | * delay += (1 second) 43 | * } 44 | * delay 45 | * } 46 | * } 47 | * }}} 48 | * @param strategySupplier creates new [[DelayStrategy]] object for each materialization 49 | * @see [[DelayStrategy]] 50 | */ 51 | def apply[T](strategySupplier: () => DelayStrategy[_ >: T]): Flow[T, T, NotUsed] = 52 | Flow.fromGraph(new DelayFlow[T](strategySupplier)) 53 | 54 | object DelayStrategy { 55 | 56 | /** 57 | * Fixed delay strategy, always returns constant delay for any element. 58 | * @param delay value of the delay 59 | */ 60 | def fixedDelay(delay: FiniteDuration): DelayStrategy[Any] = new DelayStrategy[Any] { 61 | override def nextDelay(elem: Any): FiniteDuration = delay 62 | } 63 | 64 | /** 65 | * Strategy with linear increasing delay. 66 | * It starts with `initialDelay` for each element, 67 | * increases by `increaseStep` every time when `needsIncrease` returns `true` up to `maxDelay`, 68 | * when `needsIncrease` returns `false` it resets to `initialDelay`. 69 | * @param increaseStep step by which delay is increased 70 | * @param needsIncrease if `true` delay increases, if `false` delay resets to `initialDelay` 71 | * @param initialDelay initial delay for each of elements 72 | * @param maxDelay limits maximum delay 73 | */ 74 | def linearIncreasingDelay[T](increaseStep: FiniteDuration, 75 | needsIncrease: T => Boolean, 76 | initialDelay: FiniteDuration = Duration.Zero, 77 | maxDelay: Duration = Duration.Inf): DelayStrategy[T] = { 78 | require(increaseStep > Duration.Zero, "Increase step must be positive") 79 | require(maxDelay > initialDelay, "Max delay must be bigger than initial delay") 80 | 81 | new DelayStrategy[T] { 82 | 83 | private var delay = initialDelay 84 | 85 | override def nextDelay(elem: T): FiniteDuration = { 86 | if (needsIncrease(elem)) { 87 | val next = delay + increaseStep 88 | if (next < maxDelay) { 89 | delay = next 90 | } else { 91 | delay = maxDelay.asInstanceOf[FiniteDuration] 92 | } 93 | 94 | } else { 95 | delay = initialDelay 96 | } 97 | delay 98 | } 99 | 100 | } 101 | 102 | } 103 | 104 | } 105 | 106 | /** 107 | * Allows to manage delay and can be stateful to compute delay for any sequence of elements, 108 | * all elements go through nextDelay() updating state and returning delay for each element 109 | */ 110 | trait DelayStrategy[T] { 111 | 112 | /** 113 | * Returns delay for ongoing element, `Duration.Zero` means passing without delay 114 | */ 115 | def nextDelay(elem: T): FiniteDuration 116 | 117 | } 118 | 119 | } 120 | 121 | /** 122 | * Flow stage for universal delay management, allows to manage delay through [[DelayStrategy]]. 123 | * It determines delay for each ongoing element invoking `DelayStrategy.nextDelay(elem: T): FiniteDuration`. 124 | * Implementing [[DelayStrategy]] with your own gives you flexible ability to manage delay value depending on coming elements. 125 | * It is important notice that [[DelayStrategy]] can be stateful. 126 | * There are also predefined strategies, see [[DelayStrategy]] companion object's methods. 127 | * @param strategySupplier creates new [[DelayStrategy]] object for each materialization 128 | * @see [[DelayStrategy]] 129 | */ 130 | final class DelayFlow[T](strategySupplier: () => DelayStrategy[_ >: T]) extends SimpleLinearGraphStage[T] { 131 | 132 | override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = 133 | new TimerGraphStageLogic(shape) with InHandler with OutHandler { 134 | 135 | private case object DelayTimerKey 136 | 137 | private val strategy = strategySupplier() 138 | 139 | private var delayedElem: AnyRef = _ 140 | 141 | override def onPush(): Unit = { 142 | val elem = grab(in) 143 | val delay = strategy.nextDelay(elem) 144 | if (delay <= Duration.Zero) { 145 | push(out, elem) 146 | } else { 147 | delayedElem = elem.asInstanceOf[AnyRef] 148 | scheduleOnce(DelayTimerKey, delay) 149 | } 150 | } 151 | 152 | override def onPull(): Unit = 153 | pull(in) 154 | 155 | override def onTimer(timerKey: Any): Unit = { 156 | push(out, delayedElem.asInstanceOf[T]) 157 | delayedElem = null 158 | if (isClosed(in)) { 159 | completeStage() 160 | } 161 | } 162 | 163 | override def onUpstreamFinish(): Unit = 164 | if (!isTimerActive(DelayTimerKey)) { 165 | completeStage() 166 | } 167 | 168 | setHandler(out, this) 169 | setHandler(in, this) 170 | } 171 | 172 | } 173 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/FeedbackLoop.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.stream.scaladsl.{Flow, GraphDSL, Keep, MergePreferred} 8 | import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler} 9 | import akka.stream.{Attributes, FanOutShape2, FlowShape, Graph, Inlet, Outlet, OverflowStrategy} 10 | 11 | object FeedbackLoop { 12 | 13 | /** 14 | * Feeds `forwardFlow`'s first output [[O0]] via `feedbackArc` back to `forwardFlow`'s input. 15 | * To prevent deadlocks, `feedbackArc` is never backpressured. Instead, elements emitted by 16 | * the `feedbackArc` are buffered and the resulting flow fails if that buffer overflows. 17 | */ 18 | def apply[I, O0, O, M1, M2, M](forwardFlow: Graph[FanOutShape2[I, O0, O], M1], 19 | feedbackArc: Graph[FlowShape[O0, I], M2], 20 | feedbackBufferSize: Int)(combineMat: (M1, M2) => M): Flow[I, O, M] = 21 | Flow.fromGraph(GraphDSL.create(forwardFlow, feedbackArc)(combineMat) { implicit builder => (fw, fb) => 22 | { 23 | import GraphDSL.Implicits._ 24 | 25 | val merge = builder.add(MergePreferred[I](1)) 26 | merge.out ~> fw.in 27 | 28 | // Feed forwardFlow's first output to feedbackArc, but do not signal feedbackArc's cancellation to forwardFlow. 29 | // Failure or completion will propagate to forwardFlow from the other end of the feedbackArc. 30 | fb <~ ignoreAfterDownstreamFinish[O0] <~ fw.out0 31 | 32 | // Feed feedbackArc back to forwardFlow, but never backpressure feedbackArc. 33 | // To that end, use an intermediate buffer that fails on overflow. 34 | merge.preferred <~ Flow[I].buffer(feedbackBufferSize, OverflowStrategy.fail) <~ fb 35 | 36 | FlowShape(merge.in(0), fw.out1) 37 | } 38 | }) 39 | 40 | object Implicits { 41 | implicit class FanOut2Ops[I, O0, O, M1](val fanOut: Graph[FanOutShape2[I, O0, O], M1]) extends AnyVal { 42 | def feedbackViaMat[M2, M](feedbackArc: Graph[FlowShape[O0, I], M2], 43 | feedbackBufferSize: Int)(combineMat: (M1, M2) => M): Flow[I, O, M] = 44 | FeedbackLoop(fanOut, feedbackArc, feedbackBufferSize)(combineMat) 45 | 46 | def feedbackVia[M2](feedbackArc: Graph[FlowShape[O0, I], M2], feedbackBufferSize: Int): Flow[I, O, M1] = 47 | feedbackViaMat(feedbackArc, feedbackBufferSize)(Keep.left) 48 | } 49 | 50 | implicit class FeedbackShapeOps[I, O, M](val fanOut: Graph[FanOutShape2[I, I, O], M]) extends AnyVal { 51 | def feedback(feedbackBufferSize: Int): Flow[I, O, M] = 52 | fanOut.feedbackVia(Flow[I], feedbackBufferSize) 53 | } 54 | } 55 | 56 | /** 57 | * Flow that passes all upstream elements downstream and after downstream completes, 58 | * keeps consuming (and ignoring) all upstream elements. 59 | */ 60 | private def ignoreAfterDownstreamFinish[T]: GraphStage[FlowShape[T, T]] = 61 | new GraphStage[FlowShape[T, T]] { 62 | override val shape: FlowShape[T, T] = FlowShape(Inlet("in"), Outlet("out")) 63 | 64 | override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { 65 | var downstreamFinished = false 66 | 67 | setHandler( 68 | shape.out, 69 | new OutHandler { 70 | override def onPull(): Unit = pull(shape.in) 71 | 72 | override def onDownstreamFinish(): Unit = { 73 | downstreamFinished = true 74 | if (!hasBeenPulled(shape.in)) { 75 | pull(shape.in) 76 | } 77 | } 78 | } 79 | ) 80 | 81 | setHandler(shape.in, new InHandler { 82 | def onPush(): Unit = { 83 | val elem = grab(shape.in) 84 | if (downstreamFinished) { 85 | pull(shape.in) 86 | } else { 87 | push(shape.out, elem) 88 | } 89 | } 90 | }) 91 | } 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/Implicits.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.Done 8 | import akka.stream.contrib.LatencyTimer.TimedResult 9 | import akka.stream.{Graph, SinkShape} 10 | import akka.stream.scaladsl.Flow 11 | import akka.stream.scaladsl.Source 12 | 13 | import scala.concurrent.Future 14 | import scala.concurrent.duration.FiniteDuration 15 | 16 | /** 17 | * Additional [[akka.stream.scaladsl.Flow]] and [[akka.stream.scaladsl.Flow]] operators. 18 | */ 19 | object Implicits { 20 | 21 | /** 22 | * Provides time measurement utilities on Stream elements. 23 | * 24 | * See [[Timed]] 25 | */ 26 | implicit class TimedSourceDsl[I, Mat](val source: Source[I, Mat]) extends AnyVal { 27 | 28 | /** 29 | * Measures time from receiving the first element and completion events - one for each subscriber of this `Flow`. 30 | */ 31 | def timed[O, Mat2](measuredOps: Source[I, Mat] ⇒ Source[O, Mat2], 32 | onComplete: FiniteDuration ⇒ Unit): Source[O, Mat2] = 33 | Timed.timed[I, O, Mat, Mat2](source, measuredOps, onComplete) 34 | 35 | /** 36 | * Measures rolling interval between immediately subsequent `matching(o: O)` elements. 37 | */ 38 | def timedIntervalBetween(matching: I ⇒ Boolean, onInterval: FiniteDuration ⇒ Unit): Source[I, Mat] = 39 | Timed.timedIntervalBetween[I, Mat](source, matching, onInterval) 40 | } 41 | 42 | /** 43 | * Provides time measurement utilities on Stream elements. 44 | * 45 | * See [[Timed]] 46 | */ 47 | implicit class TimedFlowDsl[I, O, Mat](val flow: Flow[I, O, Mat]) extends AnyVal { 48 | 49 | /** 50 | * Measures time from receiving the first element and completion events - one for each subscriber of this `Flow`. 51 | */ 52 | def timed[Out, Mat2](measuredOps: Flow[I, O, Mat] ⇒ Flow[I, Out, Mat2], 53 | onComplete: FiniteDuration ⇒ Unit): Flow[I, Out, Mat2] = 54 | Timed.timed[I, O, Out, Mat, Mat2](flow, measuredOps, onComplete) 55 | 56 | /** 57 | * Measures rolling interval between immediately subsequent `matching(o: O)` elements. 58 | */ 59 | def timedIntervalBetween(matching: O ⇒ Boolean, onInterval: FiniteDuration ⇒ Unit): Flow[I, O, Mat] = 60 | Timed.timedIntervalBetween[I, O, Mat](flow, matching, onInterval) 61 | } 62 | 63 | /** 64 | * Provides latency measurement utilities on Stream elements. 65 | * 66 | * See [[LatencyTimer]] 67 | */ 68 | implicit class MeteredFlowDsl[I, O, Mat](val flow: Flow[I, O, Mat]) extends AnyVal { 69 | 70 | /** 71 | * Wraps a given flow and measures the time between input and output. The second parameter is the function which is called for each measured element. 72 | * The [[TimedResult]] contains the result of the wrapped flow as well in case some logic has to be done. 73 | * 74 | * Important Note: the wrapped flow must preserve the order, otherwise timing will be wrong. 75 | * 76 | * @param resultFunction side-effect function which gets called with the result 77 | * @return Flow of the the same shape as the wrapped flow 78 | */ 79 | def measureLatency(resultFunction: TimedResult[O] ⇒ Unit): Flow[I, O, Mat] = 80 | LatencyTimer(flow, resultFunction) 81 | 82 | /** 83 | * Wraps a given flow and measures the time between input and output. The measured result is pushed to a dedicated sink. 84 | * The [[TimedResult]] contains the result of the wrapped flow as well in case some logic has to be done. 85 | * 86 | * Important Note: the wrapped flow must preserve the order, otherwise timing will be wrong. 87 | * 88 | * @param sink a sink which will handle the [[TimedResult]] 89 | * @return Flow of the the same shape as the wrapped flow 90 | */ 91 | def measureLatency(sink: Graph[SinkShape[TimedResult[O]], Future[Done]]): Flow[I, O, Mat] = 92 | LatencyTimer(flow, sink) 93 | } 94 | 95 | } 96 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/IntervalBasedRateLimiter.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.NotUsed 8 | import akka.stream._ 9 | import akka.stream.scaladsl.Flow 10 | 11 | import scala.collection.immutable 12 | import scala.concurrent.duration._ 13 | 14 | object IntervalBasedRateLimiter { 15 | 16 | /** 17 | * Specialized type of rate limiter which emits batches of elements (with size limited by the @maxBatchSize parameter) 18 | * with a minimum time interval of @minInterval. 19 | * 20 | * Because the next emit is scheduled after we downstream the current batch, the effective throughput, 21 | * depending on the minimal interval length, may never reach the maximum allowed one. 22 | * You can minimize these delays by sending bigger batches less often. 23 | * 24 | * @param minInterval minimal pause to be kept before downstream the next batch. Should be >= 10 milliseconds. 25 | * @param maxBatchSize maximum number of elements to send in the single batch 26 | * @tparam T type of element 27 | */ 28 | def create[T](minInterval: FiniteDuration, maxBatchSize: Int): Graph[FlowShape[T, immutable.Seq[T]], NotUsed] = 29 | Flow[T].groupedWithin(maxBatchSize, minInterval).via(DelayFlow[immutable.Seq[T]](minInterval)) 30 | 31 | } 32 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/KeepAliveConcat.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.stream._ 8 | import akka.stream.stage._ 9 | 10 | import scala.collection.JavaConverters._ 11 | import scala.concurrent.duration.FiniteDuration 12 | 13 | /** 14 | * Sends elements from buffer if upstream does not emit for a configured amount of time. In other words, this 15 | * stage attempts to maintains a base rate of emitted elements towards the downstream using elements from upstream. 16 | * 17 | * If upstream emits new elements until the accumulated elements in the buffer exceed the specified minimum size 18 | * used as the keep alive elements, then the base rate is no longer maintained until we reach another period without 19 | * elements form upstream. 20 | * 21 | * The keep alive period is the keep alive failover size times the interval. 22 | * 23 | * '''Emits when''' upstream emits an element or if the upstream was idle for the configured period 24 | * 25 | * '''Backpressures when''' downstream backpressures 26 | * 27 | * '''Completes when''' upstream completes 28 | * 29 | * '''Cancels when''' downstream cancels 30 | * 31 | * @see [[akka.stream.scaladsl.FlowOps#keepAlive]] 32 | * @see [[akka.stream.scaladsl.FlowOps#expand]] 33 | */ 34 | final case class KeepAliveConcat[T](keepAliveFailoverSize: Int, interval: FiniteDuration, extrapolate: T ⇒ Seq[T]) 35 | extends GraphStage[FlowShape[T, T]] { 36 | 37 | require(keepAliveFailoverSize > 0, "The buffer keep alive failover size must be greater than 0.") 38 | 39 | val in = Inlet[T]("KeepAliveConcat.in") 40 | val out = Outlet[T]("KeepAliveConcat.out") 41 | 42 | override val shape = FlowShape(in, out) 43 | 44 | override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = 45 | new TimerGraphStageLogic(shape) with InHandler with OutHandler { 46 | 47 | private val buffer = new java.util.ArrayDeque[T](keepAliveFailoverSize) 48 | 49 | override def preStart(): Unit = { 50 | schedulePeriodically(None, interval) 51 | pull(in) 52 | } 53 | 54 | override def onPush(): Unit = { 55 | val elem = grab(in) 56 | if (buffer.size() < keepAliveFailoverSize) buffer.addAll(extrapolate(elem).asJava) 57 | else buffer.addLast(elem) 58 | 59 | if (isAvailable(out) && !buffer.isEmpty) push(out, buffer.removeFirst()) 60 | else pull(in) 61 | } 62 | 63 | override def onPull(): Unit = 64 | if (isClosed(in)) { 65 | if (buffer.isEmpty) completeStage() 66 | else push(out, buffer.removeFirst()) 67 | } else if (buffer.size() > keepAliveFailoverSize) { 68 | push(out, buffer.removeFirst()) 69 | } else if (!hasBeenPulled(in)) { 70 | pull(in) 71 | } 72 | 73 | override def onTimer(timerKey: Any) = 74 | if (isAvailable(out) && !buffer.isEmpty) push(out, buffer.removeFirst()) 75 | 76 | override def onUpstreamFinish(): Unit = 77 | if (buffer.isEmpty) completeStage() 78 | 79 | setHandlers(in, out, this) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/LastElement.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.stream.stage.{GraphStageLogic, GraphStageWithMaterializedValue, InHandler, OutHandler} 8 | import akka.stream.{Attributes, FlowShape, Inlet, Outlet} 9 | import scala.concurrent.{Future, Promise} 10 | 11 | /** 12 | * This companion defines a factory for [[LastElement]] instances, see [[LastElement.apply]]. 13 | */ 14 | object LastElement { 15 | 16 | /** 17 | * Factory for [[LastElement]] instances. 18 | * 19 | * @tparam A input and output type 20 | * @return [[LastElement]] instance 21 | */ 22 | def apply[A](): LastElement[A] = new LastElement 23 | 24 | /** 25 | * Java API: Factory for [[LastElement]] instances. 26 | * 27 | * @tparam A input and output type 28 | * @return [[LastElement]] instance 29 | */ 30 | def create[A](): LastElement[A] = new LastElement 31 | } 32 | 33 | /** 34 | * This stage materializes to the last element pushed before upstream completion, if any, thereby recovering from any 35 | * failure. Pushed elements are just passed along. 36 | * 37 | * @tparam A input and output type 38 | */ 39 | final class LastElement[A] private extends GraphStageWithMaterializedValue[FlowShape[A, A], Future[Option[A]]] { 40 | 41 | override val shape = FlowShape(Inlet[A]("lastElement.in"), Outlet[A]("lastElement.out")) 42 | 43 | override def createLogicAndMaterializedValue(attributes: Attributes) = { 44 | 45 | val matValue = Promise[Option[A]]() 46 | 47 | val logic = new GraphStageLogic(shape) { 48 | import shape._ 49 | 50 | private var currentElement = Option.empty[A] 51 | 52 | setHandler( 53 | in, 54 | new InHandler { 55 | override def onPush() = { 56 | val element = grab(in) 57 | currentElement = Some(element) 58 | push(out, element) 59 | } 60 | 61 | override def onUpstreamFinish() = { 62 | matValue.success(currentElement) 63 | super.onUpstreamFinish() 64 | } 65 | 66 | override def onUpstreamFailure(t: Throwable) = { 67 | matValue.success(currentElement) 68 | super.onUpstreamFinish() 69 | } 70 | } 71 | ) 72 | 73 | setHandler(out, new OutHandler { 74 | override def onPull() = pull(in) 75 | }) 76 | } 77 | 78 | (logic, matValue.future) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/MergeByIndex.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.stream.impl.Stages.DefaultAttributes 8 | import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler} 9 | import akka.stream.{Attributes, Inlet, Outlet, UniformFanInShape} 10 | 11 | import scala.collection.{immutable, mutable} 12 | 13 | /** 14 | * Merges multiple incoming inputs based on a total ordering extracted from the elements. 15 | * 16 | * Merging is done by keeping track of an expected next index value (starting with zero and monotonically increasing). 17 | * The merging is able to cope with gaps in the index sequence that emerge from filtering out elements before merging. 18 | * This stage buffers up to inputPorts elements before emitting. 19 | * 20 | * '''Emits when''' one input element e consumed has index(e) being the next expected index, or when 21 | * all input ports have provided elements; in this case, an index omission is assumed, and the element with the lowest 22 | * index is emitted 23 | * 24 | * '''Backpressures when''' downstream backpressures, and one element from each input port has been buffered 25 | * 26 | * '''Completes when''' all upstreams complete and all buffered elements were emitted 27 | * 28 | * '''Cancels when''' downstream cancels 29 | * 30 | * '''Errors when''' the index sequence isn't strict monotonically increasing (e.g. if it contains duplicate index values) 31 | */ 32 | object MergeByIndex { 33 | 34 | /** 35 | * Creates a merge-by-index stage for tuples with second component of type Long (e.g. as created by .zipWithIndex) 36 | * 37 | * @param inputPorts number of inputs to merge 38 | * @tparam T type of the first tuple component 39 | * @return a merge stage that orders tuples by their second component. 40 | */ 41 | def apply[T](inputPorts: Int): MergeByIndex[(T, Long)] = new MergeByIndex[(T, Long)](inputPorts, _._2) 42 | 43 | /** 44 | * Creates a merge-by-index stage where the index of elements is determined by the index function. 45 | * 46 | * @param inputPorts the number of inputs to merge 47 | * @param index extractor function yielding the index of an element 48 | * @tparam T type of elements to merge 49 | * @return a merge stage that orders tuples by their index as specified by the index extractor function. 50 | */ 51 | def apply[T](inputPorts: Int, index: T => Long): MergeByIndex[T] = new MergeByIndex[T](inputPorts, index) 52 | } 53 | 54 | /** 55 | * A merge stage that respects element ordering as given by their index. 56 | * 57 | * @param inputPorts number of inputs to merge 58 | * @param index extractor function yielding the index of an element 59 | * @tparam T type of elements to merge 60 | */ 61 | final class MergeByIndex[T](val inputPorts: Int, index: T => Long) extends GraphStage[UniformFanInShape[T, T]] { 62 | 63 | // one input might seem counter intuitive but saves us from special handling in other places 64 | require(inputPorts >= 1, "A MergeByIndex must have one or more input ports") 65 | 66 | val in: immutable.IndexedSeq[Inlet[T]] = Vector.tabulate(inputPorts)(i => Inlet[T]("MergeByIndex.in" + i)) 67 | val out: Outlet[T] = Outlet[T]("MergeByIndex.out") 68 | 69 | override def initialAttributes: Attributes = DefaultAttributes.merge 70 | override val shape: UniformFanInShape[T, T] = UniformFanInShape(out, in: _*) 71 | 72 | private type QueueT = (T, Long, Int) 73 | 74 | override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { 75 | 76 | // keeps track of the next expected index 77 | private var expectedIndex: Long = 0L 78 | 79 | // buffers pulled elements in order of their index 80 | private val buffer = mutable.PriorityQueue.empty(Ordering.by((i: QueueT) => i._2).reverse) 81 | 82 | // keeps track of closed inlets that have elements in the buffer - relevant for correctly handling index omissions 83 | // when upstreams start to complete 84 | private val bufferedClosedInlets = mutable.BitSet.empty 85 | 86 | // keeps track of the maximum expected buffer length - relevant for handling index omissions 87 | private var maxBufferLength = inputPorts 88 | 89 | override def preStart(): Unit = { 90 | var ix = 0 91 | while (ix < in.size) { 92 | tryPull(in(ix)) 93 | ix += 1 94 | } 95 | } 96 | 97 | private def maybeEmit(): Unit = { 98 | if (buffer.nonEmpty) { 99 | if (buffer.head._2 == expectedIndex) { 100 | emitAndPull(buffer.dequeue()) 101 | } else if (elementsFromAllInletsBuffered) { 102 | // if all inlets pushed elements and we didn't find the expected index, we know this is an index omission. 103 | // it is therefore fine and necessary to emit the element with the smallest index seen. 104 | emitAndPull(buffer.dequeue()) 105 | } 106 | } 107 | 108 | if (noMoreElementsExpected) completeStage() 109 | } 110 | 111 | private def elementsFromAllInletsBuffered = buffer.length == maxBufferLength 112 | 113 | private def updateMaxBufferLength(): Unit = 114 | // needs to account for open inlets and elements of closed inlets that is already buffered. 115 | // only if this amount of elements are in the buffer, we have received an element from each upstream and it is safe 116 | // to deduce an index omission. 117 | maxBufferLength = in.count(i => !isClosed(i)) + bufferedClosedInlets.size 118 | 119 | private def noMoreElementsExpected = maxBufferLength == 0 120 | 121 | private def emitAndPull(queueElem: QueueT): Unit = { 122 | val (elem, index, inletIndex) = queueElem 123 | val inlet = in(inletIndex) 124 | verifyElementIndex(index, inlet) 125 | push(out, elem) 126 | if (!isClosed(inlet)) { 127 | pull(inlet) 128 | } else { 129 | // Optimization: check this only if inlet is closed. 130 | if (bufferedClosedInlets.contains(inletIndex)) { 131 | // in case this inlet was closed and an element was buffered, it isn't any more now. 132 | bufferedClosedInlets.remove(inletIndex) 133 | updateMaxBufferLength() 134 | } 135 | } 136 | expectedIndex = index + 1 137 | } 138 | 139 | private def verifyElementIndex(elemIndex: Long, in: Inlet[T]): Unit = 140 | if (elemIndex < expectedIndex) 141 | throw new IllegalArgumentException( 142 | s"Index sequence is non-monotonic: element received from ${in.s} with index $elemIndex has smaller than currently expected index $expectedIndex" 143 | ) 144 | 145 | { 146 | var ix = 0 147 | while (ix < in.size) { 148 | val i = in(ix) 149 | setHandler(i, createInHandler(ix, i)) 150 | ix += 1 151 | } 152 | } 153 | 154 | private def createInHandler(pos: Int, in: Inlet[T]) = new InHandler { 155 | override def onPush(): Unit = { 156 | val elem = grab(in) 157 | val elemIndex = index(elem) 158 | verifyElementIndex(elemIndex, in) 159 | buffer.enqueue((elem, elemIndex, pos)) 160 | if (isAvailable(out)) maybeEmit() 161 | } 162 | 163 | override def onUpstreamFinish(): Unit = { 164 | // for properly handling index omissions we need to remember how many elements from closed inlets are buffered. 165 | if (buffer.exists(_._3 == pos)) bufferedClosedInlets.add(pos) 166 | updateMaxBufferLength() 167 | if (isAvailable(out)) maybeEmit() // a finished upstream may unblock emitting. 168 | else if (noMoreElementsExpected) completeStage() 169 | } 170 | } 171 | 172 | setHandler(out, new OutHandler { 173 | override def onPull(): Unit = maybeEmit() 174 | }) 175 | } 176 | 177 | override def toString = "MergeByIndex" 178 | } 179 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/PagedSource.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.NotUsed 8 | import akka.japi.function 9 | import akka.stream.scaladsl.Source 10 | 11 | import scala.collection.immutable 12 | import scala.concurrent.{ExecutionContext, Future} 13 | 14 | /** 15 | * Defines a factory for PagedSource. 16 | * 17 | * @define pageSourceFactory 18 | * PagesSource is a Source streaming items from a paged API. 19 | * The paged API is accessed with a page key and returns data. 20 | * This data contain a list of items and optional information about the key of the next page. 21 | * 22 | */ 23 | object PagedSource { 24 | 25 | type PagedSource[T] = Source[T, NotUsed] 26 | 27 | case class Page[T, K](items: immutable.Iterable[T], nextKey: Option[K]) 28 | 29 | /** 30 | * PagedSource factory. 31 | * 32 | * $pageSourceFactory 33 | * 34 | * @param firstKey key of first page 35 | * @param f map page key to Future of page data 36 | * @param executor execution context for futures 37 | * @tparam T type of page items 38 | * @tparam K type of page keys 39 | */ 40 | def apply[T, K](firstKey: K)(f: K => Future[Page[T, K]])(implicit executor: ExecutionContext): PagedSource[T] = { 41 | val pageSource: PagedSource[Page[T, K]] = 42 | Source.unfoldAsync[Option[K], Page[T, K]](Some(firstKey)) { key => 43 | val pageFuture: Future[Page[T, K]] = key match { 44 | case Some(k) => f(k) 45 | case None => Future.successful(Page(immutable.Seq.empty, None)) 46 | } 47 | pageFuture.map { 48 | case nonEmptyPage @ Page(items, nextKey) if items.nonEmpty => Some(nextKey -> nonEmptyPage) 49 | case _ => None 50 | } 51 | } 52 | pageSource.flatMapConcat(page => Source(page.items)) 53 | } 54 | 55 | /** 56 | * Java API: PagedSource factory. 57 | * 58 | * $pageSourceFactory 59 | * 60 | * @param firstKey key of first page 61 | * @param f map page key to Future of page data 62 | * @param executor execution context for futures 63 | * @tparam T type of page items 64 | * @tparam K type of page keys 65 | * @return [[PagedSource]] instance 66 | */ 67 | def create[T, K](firstKey: K, 68 | f: function.Function[K, Future[Page[T, K]]], 69 | executor: ExecutionContext): PagedSource[T] = 70 | PagedSource[T, K](firstKey)(f.apply)(executor) 71 | 72 | } 73 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/PartitionWith.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.japi.function 8 | import akka.stream.scaladsl.{GraphDSL, Keep} 9 | import akka.stream.{Attributes, FanOutShape2, FlowShape, Graph} 10 | import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler} 11 | 12 | /** 13 | * This companion defines a factory for [[PartitionWith]] instances, see [[PartitionWith.apply]]. 14 | */ 15 | object PartitionWith { 16 | 17 | /** 18 | * Factory for [[PartitionWith]] instances. 19 | * 20 | * @param p partition function 21 | * @param eagerCancel when `false` (the default), cancel after all downstream have cancelled. 22 | * When `true`, cancel as soon as any downstream cancels and complete the remaining downstreams 23 | * @tparam In input type 24 | * @tparam Out0 left output type 25 | * @tparam Out1 right output type 26 | * @return [[PartitionWith]] instance 27 | */ 28 | def apply[In, Out0, Out1](p: In => Either[Out0, Out1], eagerCancel: Boolean = false): PartitionWith[In, Out0, Out1] = 29 | new PartitionWith(p, eagerCancel) 30 | 31 | /** 32 | * Java API: Factory for [[PartitionWith]] instances. 33 | * 34 | * @param p partition function 35 | * @param eagerCancel when `false` (the default), cancel after all downstream have cancelled. 36 | * When `true`, cancel as soon as any downstream cancels and complete the remaining downstreams 37 | * @tparam In input type 38 | * @tparam Out0 left output type 39 | * @tparam Out1 right output type 40 | * @return [[PartitionWith]] instance 41 | */ 42 | def create[In, Out0, Out1](p: function.Function[In, Either[Out0, Out1]], 43 | eagerCancel: Boolean = false): PartitionWith[In, Out0, Out1] = 44 | new PartitionWith(p.apply, eagerCancel) 45 | 46 | object Implicits { 47 | implicit final class FlowGraphOps[In, Out, M](val flowGraph: Graph[FlowShape[In, Out], M]) extends AnyVal { 48 | 49 | /** 50 | * Partition the output of the decorated flow according to the given partition function. 51 | */ 52 | def partitionWith[Out0, Out1](p: Out => Either[Out0, Out1]): Graph[FanOutShape2[In, Out0, Out1], M] = 53 | GraphDSL.create(flowGraph, PartitionWith(p))(Keep.left) { implicit builder => (flow, fanOut) => 54 | { 55 | import GraphDSL.Implicits._ 56 | flow.out ~> fanOut.in 57 | new FanOutShape2(flow.in, fanOut.out0, fanOut.out1) 58 | } 59 | } 60 | } 61 | } 62 | } 63 | 64 | /** 65 | * This stage partitions input to 2 different outlets, 66 | * applying different transformations on the elements, 67 | * according to the received partition function. 68 | * 69 | * @param p partition function 70 | * @param eagerCancel when `false` (the default), cancel after all downstream have cancelled. 71 | * When `true`, cancel as soon as any downstream cancels and complete the remaining downstreams 72 | * @tparam In input type 73 | * @tparam Out0 left output type 74 | * @tparam Out1 right output type 75 | */ 76 | final class PartitionWith[In, Out0, Out1] private (p: In => Either[Out0, Out1], eagerCancel: Boolean) 77 | extends GraphStage[FanOutShape2[In, Out0, Out1]] { 78 | 79 | override val shape = new FanOutShape2[In, Out0, Out1]("partitionWith") 80 | 81 | override def createLogic(attributes: Attributes) = new GraphStageLogic(shape) { 82 | import shape._ 83 | 84 | private var pending: Either[Out0, Out1] = null 85 | private var activeDownstreamCount = 2 86 | 87 | setHandler( 88 | in, 89 | new InHandler { 90 | override def onPush() = { 91 | val elem = grab(in) 92 | p(elem) match { 93 | case Left(o) if isAvailable(out0) => 94 | push(out0, o) 95 | if (isAvailable(out1)) 96 | pull(in) 97 | case Right(o) if isAvailable(out1) => 98 | push(out1, o) 99 | if (isAvailable(out0)) 100 | pull(in) 101 | case either => 102 | pending = either 103 | } 104 | } 105 | 106 | override def onUpstreamFinish() = 107 | if (pending eq null) 108 | completeStage() 109 | } 110 | ) 111 | 112 | setHandler( 113 | out0, 114 | new OutHandler { 115 | override def onPull() = 116 | if (pending ne null) pending.left.foreach { o => 117 | push(out0, o) 118 | if (isClosed(in)) completeStage() 119 | else { 120 | pending = null 121 | if (isAvailable(out1)) 122 | pull(in) 123 | } 124 | } else if (!hasBeenPulled(in)) pull(in) 125 | 126 | override def onDownstreamFinish(): Unit = 127 | downstreamFinished() 128 | } 129 | ) 130 | 131 | setHandler( 132 | out1, 133 | new OutHandler { 134 | override def onPull() = 135 | if (pending ne null) pending.right.foreach { o => 136 | push(out1, o) 137 | if (isClosed(in)) completeStage() 138 | else { 139 | pending = null 140 | if (isAvailable(out0)) 141 | pull(in) 142 | } 143 | } else if (!hasBeenPulled(in)) pull(in) 144 | 145 | override def onDownstreamFinish(): Unit = 146 | downstreamFinished() 147 | } 148 | ) 149 | 150 | private def downstreamFinished(): Unit = { 151 | activeDownstreamCount -= 1 152 | if (eagerCancel) { 153 | completeStage() 154 | } else if (activeDownstreamCount == 0) { 155 | cancel(in) 156 | } 157 | } 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/PassThroughFlow.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.NotUsed 8 | import akka.stream._ 9 | import akka.stream.scaladsl._ 10 | 11 | /** 12 | * Given `Flow[In, Out]` and `f: (In, Out) => Result`, 13 | * transforms original flow into a `Flow[In, Result]`, i.e. 14 | * for every element `e` signalled from upstream, emits `f(e, flow(e))`. 15 | * 16 | * Has an overloaded factory that fixes `f` to be a tuple constructor, i.e. 17 | * for every element `e` signalled from upstream, emits a tuple `(e, flow(e))`. 18 | * 19 | * IMPORTANT! 20 | * This flow combinator is guaranteed to work correctly on flows 21 | * that have behavior of classic total functions, meaning that 22 | * they should be a one-t-one functions that don't 23 | * reorder, drop, inject etc new elements. 24 | * In the future these restrictions may be lifted, 25 | * for now please refer to the following resources for more: 26 | * - [[https://github.com/akka/akka-stream-contrib/pull/142#discussion_r228875614]] 27 | * - [[https://github.com/akka/akka/issues/15957]] and linked issues 28 | * - [[https://discuss.lightbend.com/t/passing-stream-elements-into-and-over-flow/2536]] and linked resources 29 | * 30 | * Applies no internal buffering / flow control etc. 31 | * 32 | * '''Emits when''' upstream emits an element 33 | * 34 | * '''Backpressures when''' downstream backpressures 35 | * 36 | * '''Completes when''' upstream completes 37 | * 38 | * '''Cancels when''' downstream cancels 39 | * 40 | * Examples: 41 | * 42 | * 1. Consuming from Kafka: Allows for busines logic to stay unaware of commits: 43 | * 44 | * {{{ 45 | * val logic: Flow[CommittableMessage[String, Array[Byte]], ProcessingResult] = 46 | * Flow[CommittableMessage[String, Array[Byte]]] 47 | * .map(m => process(m.record)) 48 | * 49 | * // Used like this: 50 | * Consumer 51 | * .committableSource(settings, Subscriptions.topics("my-topic")) 52 | * .via(PassThroughFlow(logic)) 53 | * .map { case (committableMessage, processingResult) => 54 | * // decide to commit or not based on `processingResult` 55 | * } 56 | * 57 | * // or: 58 | * 59 | * Consumer 60 | * .committableSource(settings, Subscriptions.topics("my-topic")) 61 | * .via(PassThroughFlow(logic, Keep.left)) // process messages but return original elements 62 | * .mapAsync(1)(_.commitScalaDsl()) 63 | * }}} 64 | * 65 | * 2. Logging HTTP request-response based on some rule 66 | * {{{ 67 | * // assuming Akka HTTP entities: 68 | * val route: Route = ??? 69 | * 70 | * // Route has an implicit conversion to Flow[HttpRequest, HttpResponse] 71 | * 72 | * Flow[HttpRequest] 73 | * .map { r => 74 | * // don't log web crawlers' requests 75 | * if (userAgent(r) != "google-bot") { 76 | * logRequest(r) 77 | * } 78 | * r 79 | * } 80 | * .via(PassThroughFlow(route)) // req => (req, resp) 81 | * .map { case (req, resp) => 82 | * // don't log responses to web crawlers 83 | * if (userAgent(req) != "google-bot") { 84 | * logResponse(resp) 85 | * } 86 | * resp 87 | * } 88 | * }}} 89 | */ 90 | object PassThroughFlow { 91 | def apply[I, O](processingFlow: Flow[I, O, NotUsed]): Graph[FlowShape[I, (I, O)], NotUsed] = 92 | apply[I, O, (I, O)](processingFlow, Keep.both) 93 | 94 | def apply[I, O, R](processingFlow: Flow[I, O, NotUsed], output: (I, O) => R): Graph[FlowShape[I, R], NotUsed] = 95 | Flow.fromGraph(GraphDSL.create() { implicit builder => 96 | import GraphDSL.Implicits._ 97 | 98 | val broadcast: UniformFanOutShape[I, I] = 99 | builder.add(Broadcast[I](2)) 100 | 101 | val zip: FanInShape2[I, O, R] = 102 | builder.add(ZipWith[I, O, R]((left, right) => output(left, right))) 103 | 104 | // format: off 105 | broadcast.out(0) ~> zip.in0 106 | broadcast.out(1) ~> processingFlow ~> zip.in1 107 | // format: on 108 | 109 | FlowShape(broadcast.in, zip.out) 110 | }) 111 | } 112 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/Pulse.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.stream.Attributes 8 | import akka.stream.impl.fusing.GraphStages.SimpleLinearGraphStage 9 | import akka.stream.stage._ 10 | 11 | import scala.concurrent.duration.FiniteDuration 12 | 13 | /** 14 | * Signals demand only once every [[interval]] (''pulse'') and then back-pressures. Requested element is emitted downstream if there is demand. 15 | * 16 | * It can be used to implement simple time-window processing 17 | * where data is aggregated for predefined amount of time and the computed aggregate is emitted once per this time. 18 | * See [[TimeWindow]] 19 | * 20 | * @param interval ''pulse'' period 21 | * @param initiallyOpen if `true` - emits the first available element before ''pulsing'' 22 | * @tparam T type of element 23 | */ 24 | final class Pulse[T](val interval: FiniteDuration, val initiallyOpen: Boolean = false) 25 | extends SimpleLinearGraphStage[T] { 26 | override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = 27 | new TimerGraphStageLogic(shape) with InHandler with OutHandler { 28 | 29 | setHandlers(in, out, this) 30 | 31 | override def preStart(): Unit = if (!initiallyOpen) startPulsing() 32 | override def onPush(): Unit = if (isAvailable(out)) push(out, grab(in)) 33 | override def onPull(): Unit = if (!pulsing) { 34 | pull(in) 35 | startPulsing() 36 | } 37 | 38 | override protected def onTimer(timerKey: Any): Unit = 39 | if (isAvailable(out) && !isClosed(in) && !hasBeenPulled(in)) pull(in) 40 | 41 | private def startPulsing() = { 42 | pulsing = true 43 | schedulePeriodically("PulseTimer", interval) 44 | } 45 | private var pulsing = false 46 | } 47 | 48 | override def toString = "Pulse" 49 | } 50 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/Sample.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import java.util.concurrent.ThreadLocalRandom 8 | 9 | import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler} 10 | import akka.stream.{Attributes, FlowShape, Inlet, Outlet} 11 | 12 | object Sample { 13 | 14 | /** 15 | * 16 | * returns every nth elements 17 | * 18 | * @param nth must > 0 19 | * @tparam T 20 | * @return 21 | */ 22 | def apply[T](nth: Int): Sample[T] = Sample[T](() => nth) 23 | 24 | /** 25 | * 26 | * randomly sampling on a stream 27 | * 28 | * @param maxStep must > 0, default 1000, the randomly step will be between 1 (inclusive) and maxStep (inclusive) 29 | * @tparam T 30 | * @return 31 | */ 32 | def random[T](maxStep: Int = 1000): Sample[T] = { 33 | require(maxStep > 0, "max step for a random sampling must > 0") 34 | Sample[T](() => ThreadLocalRandom.current().nextInt(maxStep) + 1) 35 | } 36 | } 37 | 38 | /** 39 | * supports sampling on stream 40 | * 41 | * @param next a lambda returns next sample position 42 | * @tparam T 43 | */ 44 | case class Sample[T](next: () => Int) extends GraphStage[FlowShape[T, T]] { 45 | override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = 46 | new GraphStageLogic(shape) with InHandler with OutHandler { 47 | var step = getNextStep() 48 | var counter = 0 49 | 50 | def onPull(): Unit = 51 | pull(in) 52 | 53 | def onPush(): Unit = { 54 | counter += 1 55 | if (counter >= step) { 56 | counter = 0 57 | step = getNextStep() 58 | push(out, grab(in)) 59 | } else { 60 | pull(in) 61 | } 62 | } 63 | 64 | private def getNextStep(): Long = { 65 | val nextStep = next() 66 | require(nextStep > 0, s"sampling step should be a positive value: ${nextStep}") 67 | nextStep 68 | } 69 | 70 | setHandlers(in, out, this) 71 | } 72 | 73 | val in = Inlet[T]("Sample-in") 74 | val out = Outlet[T]("Sample-out") 75 | override val shape = FlowShape(in, out) 76 | } 77 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/SourceGen.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.stream._ 8 | import akka.stream.scaladsl._ 9 | import akka.stream.stage._ 10 | import akka.util.Timeout 11 | 12 | /** 13 | * Source factory methods are placed here 14 | */ 15 | object SourceGen { 16 | 17 | /** 18 | * Create a `Source` that will unfold a value of type `S` by 19 | * passing it through a flow. The flow should emit a 20 | * pair of the next state `S` and output elements of type `E`. 21 | * Source completes when the flow completes. 22 | * 23 | * The `timeout` parameter specifies waiting time after inner 24 | * flow provided by the user for unfold flow API cancels 25 | * upstream, to get also the downstream cancelation (as 26 | * graceful completion or failure which is propagated). 27 | * If inner flow fails to complete/fail downstream, stage is 28 | * failed with an IllegalStateException. 29 | * 30 | * IMPORTANT CAVEAT: 31 | * The given flow must not change the number of elements passing through it (i.e. it should output 32 | * exactly one element for every received element). Ignoring this, will have an unpredicted result, 33 | * and may result in a deadlock. 34 | */ 35 | def unfoldFlow[S, E, M](seed: S)(flow: Graph[FlowShape[S, (S, E)], M])(implicit timeout: Timeout): Source[E, M] = { 36 | 37 | val generateUnfoldFlowGraphStageLogic = (shape: FanOutShape2[(S, E), S, E]) => 38 | new UnfoldFlowGraphStageLogic[(S, E), S, E](shape, seed, timeout) { 39 | setHandler(nextElem, new InHandler { 40 | override def onPush() = { 41 | val (s, e) = grab(nextElem) 42 | pending = s 43 | push(output, e) 44 | pushedToCycle = false 45 | } 46 | }) 47 | } 48 | 49 | unfoldFlowGraph(new FanOut2unfoldingStage(generateUnfoldFlowGraphStageLogic), flow) 50 | } 51 | 52 | /** 53 | * Create a `Source` that will unfold a value of type `S` by 54 | * passing it through a flow. The flow should emit an output 55 | * value of type `O`, that when fed to the unfolding function, 56 | * generates a pair of the next state `S` and output elements of type `E`. 57 | * 58 | * The `timeout` parameter specifies waiting time after inner 59 | * flow provided by the user for unfold flow API cancels 60 | * upstream, to get also the downstream cancelation (as 61 | * graceful completion or failure which is propagated). 62 | * If inner flow fails to complete/fail downstream, stage is 63 | * failed with an IllegalStateException. 64 | * 65 | * IMPORTANT CAVEAT: 66 | * The given flow must not change the number of elements passing through it (i.e. it should output 67 | * exactly one element for every received element). Ignoring this, will have an unpredicted result, 68 | * and may result in a deadlock. 69 | */ 70 | def unfoldFlowWith[E, S, O, M](seed: S, flow: Graph[FlowShape[S, O], M])( 71 | unfoldWith: O => Option[(S, E)] 72 | )(implicit timeout: Timeout): Source[E, M] = { 73 | 74 | val generateUnfoldFlowGraphStageLogic = (shape: FanOutShape2[O, S, E]) => 75 | new UnfoldFlowGraphStageLogic[O, S, E](shape, seed, timeout) { 76 | setHandler( 77 | nextElem, 78 | new InHandler { 79 | override def onPush() = { 80 | val o = grab(nextElem) 81 | unfoldWith(o) match { 82 | case None => completeStage() 83 | case Some((s, e)) => { 84 | pending = s 85 | push(output, e) 86 | pushedToCycle = false 87 | } 88 | } 89 | } 90 | } 91 | ) 92 | } 93 | 94 | unfoldFlowGraph(new FanOut2unfoldingStage(generateUnfoldFlowGraphStageLogic), flow) 95 | } 96 | 97 | /** INTERNAL API */ 98 | private[akka] def unfoldFlowGraph[E, S, O, M](fanOut2Stage: GraphStage[FanOutShape2[O, S, E]], 99 | flow: Graph[FlowShape[S, O], M]): Source[E, M] = 100 | Source.fromGraph(GraphDSL.create(flow) { implicit b => 101 | { f => 102 | { 103 | import GraphDSL.Implicits._ 104 | 105 | val fo2 = b.add(fanOut2Stage) 106 | fo2.out0 ~> f ~> fo2.in 107 | SourceShape(fo2.out1) 108 | } 109 | } 110 | }) 111 | } 112 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/SourceRepeatEval.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import java.util.concurrent.atomic.AtomicBoolean 8 | import akka.actor.Cancellable 9 | import akka.stream.Attributes 10 | import akka.stream.impl.Unfold 11 | import akka.stream.scaladsl.Source 12 | 13 | /** 14 | * Create a `Source` that will output elements of type `A` 15 | * given a "producer" function 16 | * 17 | * Examples: 18 | * 19 | * stream of current times: 20 | * 21 | * {{{ 22 | * SourceRepeatEval(() => System.currentTimeMillis) 23 | * }}} 24 | * 25 | * stream of random numbers: 26 | * 27 | * {{{ 28 | * SourceRepeatEval(() => Random.nextInt) 29 | * }}} 30 | * 31 | * Behavior is the same as in 32 | * {{{ 33 | * Source.repeat(()).map(_ => x) 34 | * }}} 35 | * 36 | * Supports cancellation via materialized `Cancellable`. 37 | */ 38 | object SourceRepeatEval { 39 | def apply[A](genElement: () => A): Source[A, Cancellable] = { 40 | val c: Cancellable = new Cancellable { 41 | private val stopped: AtomicBoolean = new AtomicBoolean(false) 42 | override def cancel(): Boolean = stopped.compareAndSet(false, true) 43 | override def isCancelled: Boolean = stopped.get() 44 | } 45 | 46 | def nextStep: Unit => Option[(Unit, A)] = { _ => 47 | { 48 | if (c.isCancelled) { 49 | None 50 | } else { 51 | Some(() -> genElement()) 52 | } 53 | } 54 | } 55 | 56 | Source 57 | .fromGraph(new Unfold[Unit, A]((), nextStep)) 58 | .withAttributes(Attributes.name("repeat-eval")) 59 | .mapMaterializedValue(_ => c) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/TimeWindow.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.NotUsed 8 | import akka.stream.scaladsl.Flow 9 | 10 | import scala.concurrent.duration.FiniteDuration 11 | 12 | object TimeWindow { 13 | 14 | /** 15 | * Aggregates data for predefined amount of time. The computed aggregate is emitted after window expires, thereafter a new window starts. 16 | * 17 | * For example: 18 | * {{{ 19 | * Source.tick(0.seconds, 100.milliseconds, 1) 20 | * .via(TimeWindow(500.milliseconds, eager = false)(identity[Int])(_ + _)) 21 | * }}} 22 | * will emit sum of 1s after each 500ms - so you could expect a stream of 5s if timers were ideal 23 | * If ''eager'' had been true in the following example, you would have observed initial 1 with no delay, followed by stream of sums 24 | * 25 | * @param of window duration 26 | * @param eager if ''true'' emits the very first seed with no delay, otherwise the first element emitted is the result of the first time-window aggregation 27 | * @param seed provides the initial state when a new window starts 28 | * @param aggregate produces updated aggregate 29 | * @tparam A type of incoming element 30 | * @tparam S type of outgoing (aggregated) element 31 | * @return flow implementing time-window aggregation 32 | */ 33 | def apply[A, S](of: FiniteDuration, eager: Boolean = true)(seed: A => S)(aggregate: (S, A) ⇒ S): Flow[A, S, NotUsed] = 34 | Flow[A].conflateWithSeed(seed)(aggregate).via(new Pulse(of, eager)) 35 | } 36 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/Timed.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import java.util.concurrent.atomic.AtomicLong 8 | 9 | import akka.stream.Attributes 10 | import akka.stream.impl.fusing.GraphStages.SimpleLinearGraphStage 11 | import akka.stream.scaladsl.{Flow, Source} 12 | import akka.stream.stage._ 13 | 14 | import scala.concurrent.duration._ 15 | 16 | /** 17 | * Provides operations needed to implement the `timed` DSL 18 | */ 19 | trait TimedOps { 20 | 21 | import Timed._ 22 | 23 | /** 24 | * 25 | * 26 | * Measures time from receiving the first element and completion events - one for each subscriber of this `Flow`. 27 | */ 28 | def timed[I, O, Mat, Mat2](source: Source[I, Mat], 29 | measuredOps: Source[I, Mat] ⇒ Source[O, Mat2], 30 | onComplete: FiniteDuration ⇒ Unit): Source[O, Mat2] = { 31 | val ctx = new TimedFlowContext 32 | 33 | val startTimed = Flow[I].via(new StartTimed(ctx)).named("startTimed") 34 | val stopTimed = Flow[O].via(new StopTimed(ctx, onComplete)).named("stopTimed") 35 | 36 | measuredOps(source.via(startTimed)).via(stopTimed) 37 | } 38 | 39 | /** 40 | * 41 | * 42 | * Measures time from receiving the first element and completion events - one for each subscriber of this `Flow`. 43 | */ 44 | def timed[I, O, Out, Mat, Mat2](flow: Flow[I, O, Mat], 45 | measuredOps: Flow[I, O, Mat] ⇒ Flow[I, Out, Mat2], 46 | onComplete: FiniteDuration ⇒ Unit): Flow[I, Out, Mat2] = { 47 | // todo is there any other way to provide this for Flow, without duplicating impl? 48 | // they do share a super-type (FlowOps), but all operations of FlowOps return path dependant type 49 | val ctx = new TimedFlowContext 50 | 51 | val startTimed = Flow[O].via(new StartTimed(ctx)).named("startTimed") 52 | val stopTimed = Flow[Out].via(new StopTimed(ctx, onComplete)).named("stopTimed") 53 | 54 | measuredOps(flow.via(startTimed)).via(stopTimed) 55 | } 56 | 57 | } 58 | 59 | /** 60 | * 61 | * 62 | * Provides operations needed to implement the `timedIntervalBetween` DSL 63 | */ 64 | trait TimedIntervalBetweenOps { 65 | 66 | import Timed._ 67 | 68 | /** 69 | * Measures rolling interval between immediately subsequent `matching(o: O)` elements. 70 | */ 71 | def timedIntervalBetween[O, Mat](source: Source[O, Mat], 72 | matching: O ⇒ Boolean, 73 | onInterval: FiniteDuration ⇒ Unit): Source[O, Mat] = { 74 | val timedInterval = Flow[O].via(new TimedInterval[O](matching, onInterval)).named("timedInterval") 75 | source.via(timedInterval) 76 | } 77 | 78 | /** 79 | * Measures rolling interval between immediately subsequent `matching(o: O)` elements. 80 | */ 81 | def timedIntervalBetween[I, O, Mat](flow: Flow[I, O, Mat], 82 | matching: O ⇒ Boolean, 83 | onInterval: FiniteDuration ⇒ Unit): Flow[I, O, Mat] = { 84 | val timedInterval = Flow[O].via(new TimedInterval[O](matching, onInterval)).named("timedInterval") 85 | flow.via(timedInterval) 86 | } 87 | } 88 | 89 | object Timed extends TimedOps with TimedIntervalBetweenOps { 90 | 91 | // todo needs java DSL 92 | 93 | final class TimedFlowContext { 94 | import scala.concurrent.duration._ 95 | 96 | private val _start = new AtomicLong 97 | private val _stop = new AtomicLong 98 | 99 | def start(): Unit = 100 | _start.compareAndSet(0, System.nanoTime()) 101 | 102 | def stop(): FiniteDuration = { 103 | _stop.compareAndSet(0, System.nanoTime()) 104 | compareStartAndStop() 105 | } 106 | 107 | private def compareStartAndStop(): FiniteDuration = { 108 | val stp = _stop.get 109 | if (stp <= 0) Duration.Zero 110 | else (stp - _start.get).nanos 111 | } 112 | } 113 | 114 | final class StartTimed[T](timedContext: TimedFlowContext) extends SimpleLinearGraphStage[T] { 115 | 116 | override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = 117 | new GraphStageLogic(shape) with InHandler with OutHandler { 118 | 119 | private var started = false 120 | 121 | override def onPush(): Unit = { 122 | if (!started) { 123 | timedContext.start() 124 | started = true 125 | } 126 | push(out, grab(in)) 127 | } 128 | 129 | override def onPull(): Unit = pull(in) 130 | 131 | setHandlers(in, out, this) 132 | } 133 | } 134 | 135 | final class StopTimed[T](timedContext: TimedFlowContext, _onComplete: FiniteDuration ⇒ Unit) 136 | extends SimpleLinearGraphStage[T] { 137 | 138 | override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = 139 | new GraphStageLogic(shape) with InHandler with OutHandler { 140 | 141 | override def onPush(): Unit = push(out, grab(in)) 142 | 143 | override def onPull(): Unit = pull(in) 144 | 145 | override def onUpstreamFailure(cause: Throwable): Unit = { 146 | stopTime() 147 | failStage(cause) 148 | } 149 | 150 | override def onUpstreamFinish(): Unit = { 151 | stopTime() 152 | completeStage() 153 | } 154 | 155 | private def stopTime(): Unit = { 156 | val d = timedContext.stop() 157 | _onComplete(d) 158 | } 159 | 160 | setHandlers(in, out, this) 161 | } 162 | } 163 | 164 | final class TimedInterval[T](matching: T ⇒ Boolean, onInterval: FiniteDuration ⇒ Unit) 165 | extends SimpleLinearGraphStage[T] { 166 | 167 | override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = 168 | new GraphStageLogic(shape) with InHandler with OutHandler { 169 | 170 | private var prevNanos = 0L 171 | private var matched = 0L 172 | 173 | override def onPush(): Unit = { 174 | val elem = grab(in) 175 | if (matching(elem)) { 176 | val d = updateInterval(elem) 177 | 178 | if (matched > 1) 179 | onInterval(d) 180 | } 181 | push(out, elem) 182 | } 183 | 184 | override def onPull(): Unit = pull(in) 185 | 186 | private def updateInterval(in: T): FiniteDuration = { 187 | matched += 1 188 | val nowNanos = System.nanoTime() 189 | val d = nowNanos - prevNanos 190 | prevNanos = nowNanos 191 | d.nanoseconds 192 | } 193 | 194 | setHandlers(in, out, this) 195 | } 196 | 197 | } 198 | 199 | } 200 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/TokenThrottle.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.stream.scaladsl.GraphDSL 8 | import akka.stream.{Attributes, FanInShape2, FlowShape, Graph, Inlet, Outlet, SourceShape} 9 | import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler} 10 | import akka.util.OptionVal 11 | import akka.util.OptionVal._ 12 | 13 | /** 14 | * Throttles a flow based on tokens provided consumed from a provided token source. The flow emits elements only if 15 | * the needed amount of tokens for the next element is available. The amount of tokens needed is specified by a cost 16 | * function. 17 | * 18 | * Tokens are consumed as-needed, and pre-fetched once the internal token buffer is empty. Also, an initial fetch is 19 | * performed to initially fill the internal token bucket. 20 | * 21 | * '''Emits when''' an input element e is available and costCalculation(e) tokens are available 22 | * 23 | * '''Backpressures when''' downstream backpressures, or when not enough tokens are available for the next element. For 24 | * the token input: when no tokens are needed (tokens are pulled as needed) 25 | * 26 | * '''Completes when''' upstream completes, and also when token source completes and all internally stored tokens were 27 | * consumed (best effort; also completes when the next element cost is higher than the available tokens) 28 | * 29 | * '''Cancels when''' downstream cancels 30 | */ 31 | object TokenThrottle { 32 | 33 | /** 34 | * Creates a token-based throttling flow. 35 | * 36 | * @param tokenSource source of tokens to use for controlling throughput 37 | * @param costCalculation cost function that determines the cost of a given element 38 | * @tparam A element type 39 | * @tparam M materialized value of token source 40 | * @return simple token throttle graph 41 | */ 42 | def apply[A, M](tokenSource: Graph[SourceShape[Long], M])(costCalculation: A => Long): Graph[FlowShape[A, A], M] = 43 | GraphDSL.create(tokenSource) { implicit b => tokens => 44 | import GraphDSL.Implicits._ 45 | val throttle = b.add(new TokenThrottle[A](costCalculation)) 46 | tokens ~> throttle.in1 47 | FlowShape(throttle.in0, throttle.out) 48 | } 49 | } 50 | 51 | /** 52 | * Graph stage for token-based throttling. Use it directly for constructing more complex graphs. 53 | * 54 | * @param costCalculation cost function that determines the cost of a given element 55 | * @tparam A element type 56 | */ 57 | final class TokenThrottle[A](costCalculation: A => Long) extends GraphStage[FanInShape2[A, Long, A]] { 58 | override def initialAttributes: Attributes = Attributes.name("TokenThrottle") 59 | 60 | override val shape: FanInShape2[A, Long, A] = new FanInShape2[A, Long, A]("TokenThrottle") 61 | 62 | def out: Outlet[A] = shape.out 63 | 64 | val elemsIn: Inlet[A] = shape.in0 65 | val tokensIn: Inlet[Long] = shape.in1 66 | 67 | override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { 68 | 69 | var tokens: Long = 0 70 | 71 | var buffer: OptionVal[A] = none 72 | 73 | var cost: Long = -1 // invariant: buffer.isDefined => cost == costCalculation(buffer.get) 74 | 75 | var tokensCompleted = false 76 | 77 | var elemsCompleted = false 78 | 79 | private def maybeEmit(): Unit = 80 | if (buffer.isDefined) { 81 | if (tokens >= cost) { 82 | if (isAvailable(out)) { 83 | tokens -= cost 84 | push(out, buffer.get) 85 | buffer = none 86 | pullNextOrComplete() 87 | } 88 | } else { 89 | askForTokensOrComplete() 90 | } 91 | } 92 | 93 | private def pullNextOrComplete(): Unit = 94 | if (elemsCompleted || tokensExhausted) { 95 | completeStage() 96 | } else { 97 | pull(elemsIn) 98 | if (tokens == 0) askForTokens() // note that !tokensExhausted && tokens == 0 implies !tokensCompleted 99 | } 100 | 101 | private def tokensExhausted = tokensCompleted && tokens == 0 102 | 103 | private def nextElementTooExpensive = buffer.isDefined && cost > tokens 104 | 105 | private def askForTokensOrComplete(): Unit = if (!tokensCompleted) askForTokens() else completeStage() 106 | 107 | private def askForTokens(): Unit = if (!hasBeenPulled(tokensIn)) pull(tokensIn) 108 | 109 | override def preStart(): Unit = { 110 | pull(elemsIn) 111 | pull(tokensIn) 112 | } 113 | 114 | setHandler( 115 | elemsIn, 116 | new InHandler { 117 | override def onPush(): Unit = { 118 | val elem = grab(elemsIn) 119 | buffer = Some(elem) 120 | cost = costCalculation(elem) // pre-compute cost here to call cost function only once per element 121 | if (cost < 0) throw new IllegalArgumentException("Cost must be non-negative") 122 | maybeEmit() 123 | } 124 | 125 | override def onUpstreamFinish(): Unit = { 126 | if (buffer.isEmpty) completeStage() 127 | elemsCompleted = true 128 | } 129 | } 130 | ) 131 | 132 | setHandler( 133 | tokensIn, 134 | new InHandler { 135 | override def onPush(): Unit = { 136 | tokens += grab(tokensIn) 137 | maybeEmit() 138 | } 139 | 140 | override def onUpstreamFinish(): Unit = { 141 | if (tokens == 0 || nextElementTooExpensive) completeStage() 142 | tokensCompleted = true 143 | } 144 | } 145 | ) 146 | 147 | setHandler(out, new OutHandler { 148 | override def onPull(): Unit = maybeEmit() 149 | }) 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/UnfoldFlow.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.stream.{Attributes, FanOutShape2} 8 | import akka.stream.stage.{GraphStage, GraphStageLogic, OutHandler} 9 | import akka.util.Timeout 10 | 11 | /** INTERNAL API */ 12 | private[akka] abstract class UnfoldFlowGraphStageLogic[O, S, E] private[stream] (shape: FanOutShape2[O, S, E], 13 | seed: S, 14 | timeout: Timeout) 15 | extends GraphStageLogic(shape) { 16 | 17 | val feedback = shape.out0 18 | val output = shape.out1 19 | val nextElem = shape.in 20 | 21 | var pending: S = seed 22 | var pushedToCycle = false 23 | 24 | setHandler( 25 | feedback, 26 | new OutHandler { 27 | override def onPull() = if (!pushedToCycle && isAvailable(output)) { 28 | push(feedback, pending) 29 | pending = null.asInstanceOf[S] 30 | pushedToCycle = true 31 | } 32 | 33 | override def onDownstreamFinish() = 34 | // Do Nothing until `timeout` to try and intercept completion as downstream, 35 | // but cancel stream after timeout if inlet is not closed to prevent deadlock. 36 | materializer.scheduleOnce( 37 | timeout.duration, 38 | new Runnable { 39 | override def run() = 40 | getAsyncCallback[Unit] { _ => 41 | if (!isClosed(nextElem)) { 42 | failStage( 43 | new IllegalStateException( 44 | s"unfoldFlow source's inner flow canceled only upstream, while downstream remain available for $timeout" 45 | ) 46 | ) 47 | } 48 | }.invoke(()) 49 | } 50 | ) 51 | } 52 | ) 53 | 54 | setHandler( 55 | output, 56 | new OutHandler { 57 | override def onPull() = { 58 | pull(nextElem) 59 | if (!pushedToCycle && isAvailable(feedback)) { 60 | push(feedback, pending) 61 | pending = null.asInstanceOf[S] 62 | pushedToCycle = true 63 | } 64 | } 65 | } 66 | ) 67 | } 68 | 69 | /** INTERNAL API */ 70 | private[akka] class FanOut2unfoldingStage[O, S, E] private[stream] ( 71 | generateGraphStageLogic: FanOutShape2[O, S, E] => UnfoldFlowGraphStageLogic[O, S, E] 72 | ) extends GraphStage[FanOutShape2[O, S, E]] { 73 | override val shape = new FanOutShape2[O, S, E]("unfoldFlow") 74 | override def createLogic(attributes: Attributes) = generateGraphStageLogic(shape) 75 | } 76 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/Valve.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.stream.stage.{GraphStageLogic, GraphStageWithMaterializedValue, InHandler, OutHandler} 8 | import akka.stream._ 9 | import akka.stream.contrib.SwitchMode.{Close, Open} 10 | 11 | import scala.concurrent.{Future, Promise} 12 | 13 | /** 14 | * Pause/ Resume a Flow 15 | */ 16 | sealed trait ValveSwitch { 17 | 18 | /** 19 | * Change the state of the valve 20 | * 21 | * @param mode expected mode to switch on 22 | * @return A future that completes with true if the mode did change and false if it already was in the requested mode 23 | */ 24 | def flip(mode: SwitchMode): Future[Boolean] 25 | 26 | /** 27 | * Obtain the state of the valve 28 | * 29 | * @return A future that completes with [[SwitchMode]] to indicate the current state of the valve 30 | */ 31 | def getMode(): Future[SwitchMode] 32 | } 33 | 34 | object Valve { 35 | 36 | /** 37 | * Factory for [[Valve]] instances. 38 | * 39 | */ 40 | def apply[A](): Valve[A] = Valve[A](SwitchMode.Open) 41 | 42 | /** 43 | * Java API: Factory for [[Valve]] instances. 44 | * 45 | */ 46 | def create[A](): Valve[A] = Valve[A](SwitchMode.Open) 47 | 48 | /** 49 | * Factory for [[Valve]] instances. 50 | * 51 | */ 52 | def apply[A](mode: SwitchMode): Valve[A] = new Valve[A](mode) 53 | 54 | /** 55 | * Java API: Factory for [[Valve]] instances. 56 | * 57 | */ 58 | def create[A](mode: SwitchMode): Valve[A] = Valve[A](mode) 59 | 60 | } 61 | 62 | /** 63 | * Materializes into a [[Future]] of [[ValveSwitch]] which provides a the method flip that stops or restarts the flow of elements passing through the stage. As long as the valve is closed it will backpressure. 64 | * 65 | * Note that closing the valve could result in one element being buffered inside the stage, and if the stream completes or fails while being closed, that element may be lost. 66 | * 67 | * @param mode state of the valve at the startup of the flow (by default Open) 68 | */ 69 | final class Valve[A](mode: SwitchMode) extends GraphStageWithMaterializedValue[FlowShape[A, A], Future[ValveSwitch]] { 70 | 71 | val in: Inlet[A] = Inlet[A]("valve.in") 72 | 73 | val out: Outlet[A] = Outlet[A]("valve.out") 74 | 75 | override val shape = FlowShape(in, out) 76 | 77 | override def createLogicAndMaterializedValue( 78 | inheritedAttributes: Attributes 79 | ): (GraphStageLogic, Future[ValveSwitch]) = { 80 | val logic = new ValveGraphStageLogic(shape, mode) 81 | (logic, logic.promise.future) 82 | } 83 | 84 | private class ValveGraphStageLogic(shape: Shape, var mode: SwitchMode) 85 | extends GraphStageLogic(shape) 86 | with InHandler 87 | with OutHandler { 88 | 89 | val promise = Promise[ValveSwitch] 90 | 91 | private val switch = new ValveSwitch { 92 | 93 | val flipCallback = getAsyncCallback[(SwitchMode, Promise[Boolean])] { 94 | case (flipToMode, promise) => 95 | val succeed = mode match { 96 | case _ if flipToMode == mode => false 97 | 98 | case Open => 99 | mode = SwitchMode.Close 100 | true 101 | 102 | case Close => 103 | if (isAvailable(in)) { 104 | push(out, grab(in)) 105 | } else if (isAvailable(out) && !hasBeenPulled(in)) { 106 | pull(in) 107 | } 108 | 109 | mode = SwitchMode.Open 110 | true 111 | } 112 | 113 | promise.success(succeed) 114 | } 115 | 116 | val getModeCallback = getAsyncCallback[Promise[SwitchMode]](_.success(mode)) 117 | 118 | override def flip(flipToMode: SwitchMode): Future[Boolean] = { 119 | val promise = Promise[Boolean]() 120 | flipCallback.invoke((flipToMode, promise)) 121 | promise.future 122 | } 123 | 124 | override def getMode(): Future[SwitchMode] = { 125 | val promise = Promise[SwitchMode]() 126 | implicit val ec = materializer.executionContext 127 | getModeCallback 128 | .invokeWithFeedback(promise) 129 | .flatMap(_ => promise.future) 130 | } 131 | } 132 | 133 | setHandlers(in, out, this) 134 | 135 | override def onPush(): Unit = 136 | if (isOpen) { 137 | push(out, grab(in)) 138 | } 139 | 140 | override def onPull(): Unit = 141 | if (isOpen) { 142 | pull(in) 143 | } 144 | 145 | private def isOpen = mode == SwitchMode.Open 146 | 147 | override def preStart() = 148 | promise.success(switch) 149 | } 150 | 151 | } 152 | 153 | trait SwitchMode 154 | 155 | object SwitchMode { 156 | 157 | case object Open extends SwitchMode 158 | 159 | case object Close extends SwitchMode 160 | } 161 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/ZipInputStreamSource.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import java.util.zip.{ZipEntry, ZipInputStream} 8 | import akka.stream.Attributes.{name, InputBuffer} 9 | import akka.stream.contrib.ZipInputStreamSource.ZipEntryData 10 | import akka.stream.impl.Stages.DefaultAttributes.IODispatcher 11 | import akka.stream.scaladsl.Source 12 | import akka.stream.stage.{GraphStageLogic, GraphStageWithMaterializedValue, OutHandler} 13 | import akka.stream.{Attributes, Outlet, SourceShape} 14 | import akka.util.ByteString 15 | import akka.util.ByteString.ByteString1C 16 | import scala.annotation.tailrec 17 | import scala.collection.immutable 18 | import scala.concurrent.{Future, Promise} 19 | import scala.util.control.NonFatal 20 | 21 | /** 22 | * This companion defines a factory for [[ZipInputStreamSource]] instances, 23 | * see [[ZipInputStreamSource.apply]]. 24 | */ 25 | object ZipInputStreamSource { 26 | 27 | final val DefaultChunkSize = 8192 28 | final val DefaultAllowedZipExtensions = immutable.Seq(".zip") 29 | 30 | /** 31 | * Data type for zip entries. 32 | * 33 | * @param name file name 34 | * @param creationTime The last modification time of the entry in milliseconds 35 | * since the epoch, or -1 if not specified 36 | */ 37 | final case class ZipEntryData(name: String, creationTime: Long) 38 | 39 | /** 40 | * Factory for [[ZipInputStreamSource]] instances wrapped 41 | * into [[Source]]. 42 | * 43 | * @param in a function that builds a [[ZipInputStream]] 44 | * @param chunkSize the size of the chunks 45 | * @param allowedZipExtensions collection of allowed extensions for zipped containers, 46 | * as zip or jar files. Only ".zip" extension allowed by default. 47 | * @return [[ZipInputStreamSource]] instance 48 | */ 49 | def apply( 50 | in: () => ZipInputStream, 51 | chunkSize: Int = DefaultChunkSize, 52 | allowedZipExtensions: immutable.Seq[String] = DefaultAllowedZipExtensions 53 | ): Source[(ZipEntryData, ByteString), Future[Long]] = 54 | Source 55 | .fromGraph(new ZipInputStreamSource(in, chunkSize, allowedZipExtensions)) 56 | .withAttributes(name("zipInputStreamSource") and IODispatcher) 57 | 58 | /** 59 | * Java API: Factory for [[ZipInputStreamSource]] instances wrapped 60 | * into [[Source]]. 61 | * 62 | * @param in a function that builds a [[ZipInputStream]] 63 | * @param chunkSize the size of the chunks 64 | * @param allowedZipExtensions collection of allowed extensions for zipped containers, 65 | * as zip or jar files. Only ".zip" extension allowed by default. 66 | * @return [[ZipInputStreamSource]] instance 67 | */ 68 | def create( 69 | in: Function0[ZipInputStream], 70 | chunkSize: Int = DefaultChunkSize, 71 | allowedZipExtensions: immutable.Seq[String] = DefaultAllowedZipExtensions 72 | ): Source[(ZipEntryData, ByteString), Future[Long]] = 73 | Source 74 | .fromGraph(new ZipInputStreamSource(in, chunkSize, allowedZipExtensions)) 75 | .withAttributes(name("zipInputStreamSource") and IODispatcher) 76 | } 77 | 78 | /** 79 | * A stage that works as a [[Source]] of data chunks extracted from 80 | * zip files. In addition to regular files, the zip file might contain 81 | * directories and other zip files. Every chunk is a tuple of [[ZipEntryData]] 82 | * and [[ByteString]], where the former carries basic info about the file from 83 | * which the bytes come and the latter carries those bytes. This stage 84 | * materializes to the total amount of read bytes. 85 | * 86 | * @param in a function that builds a [[ZipInputStream]] 87 | * @param chunkSize the size of the chunks 88 | */ 89 | final class ZipInputStreamSource private (in: () => ZipInputStream, 90 | chunkSize: Int, 91 | allowedZipExtensions: immutable.Seq[String]) 92 | extends GraphStageWithMaterializedValue[SourceShape[(ZipEntryData, ByteString)], Future[Long]] { 93 | 94 | val matValue = Promise[Long]() 95 | 96 | override val shape = 97 | SourceShape(Outlet[(ZipEntryData, ByteString)]("zipInputStreamSource.out")) 98 | 99 | override def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = { 100 | 101 | val InputBuffer(initialBuffer, maxBuffer) = 102 | inheritedAttributes.getAttribute(classOf[InputBuffer], InputBuffer(16, 16)) 103 | 104 | val logic = new GraphStageLogic(shape) { 105 | import shape._ 106 | 107 | private var is: ZipInputStream = null 108 | private var readBytesTotal: Long = 0L 109 | private var buffer: Vector[(ZipEntryData, ByteString)] = Vector.empty 110 | private var eof: Boolean = false 111 | private var currentEntry: Option[ZipEntry] = None 112 | private var currentStreams: Seq[ZipInputStream] = Seq() 113 | 114 | override def preStart(): Unit = { 115 | super.preStart() 116 | try { 117 | is = in() 118 | currentStreams = List(is) 119 | fillBuffer(initialBuffer) 120 | } catch { 121 | case NonFatal(ex) => 122 | matValue.failure(ex) 123 | failStage(ex) 124 | } 125 | } 126 | 127 | override def postStop(): Unit = { 128 | if (is != null) { 129 | is.close() 130 | } 131 | super.postStop() 132 | } 133 | 134 | setHandler( 135 | out, 136 | new OutHandler { 137 | 138 | override def onPull(): Unit = { 139 | fillBuffer(maxBuffer) 140 | buffer match { 141 | case Seq() => 142 | finalize() 143 | case head +: Seq() => 144 | push(out, head) 145 | finalize() 146 | case head +: tail => 147 | push(out, head) 148 | buffer = tail 149 | } 150 | def finalize() = 151 | try { 152 | is.close() 153 | } finally { 154 | matValue.success(readBytesTotal) 155 | complete(out) 156 | } 157 | } 158 | 159 | override def onDownstreamFinish(): Unit = 160 | try { 161 | is.close() 162 | } finally { 163 | matValue.success(readBytesTotal) 164 | super.onDownstreamFinish() 165 | } 166 | } 167 | ) // end of handler 168 | 169 | @tailrec private def nextEntry(streams: Seq[ZipInputStream]): (Option[ZipEntry], Seq[ZipInputStream]) = 170 | streams match { 171 | case Seq() => (None, streams) 172 | case (z :: zs) => 173 | val entry = Option(z.getNextEntry) 174 | entry match { 175 | case None => 176 | nextEntry(zs) 177 | case Some(e) if isZipFile(e) => 178 | nextEntry(new ZipInputStream(z) +: streams) 179 | case Some(e) if e.isDirectory => 180 | nextEntry(streams) 181 | case _ => 182 | (entry, streams) 183 | } 184 | } 185 | 186 | private def isZipFile(e: ZipEntry) = { 187 | val name = e.getName.toLowerCase 188 | allowedZipExtensions.exists(name.endsWith) 189 | } 190 | 191 | /** BLOCKING I/O READ */ 192 | private def readChunk() = { 193 | def read(arr: Array[Byte]) = currentStreams.headOption.flatMap { stream => 194 | val readBytes = stream.read(arr) 195 | if (readBytes == -1) { 196 | val (entry, streams) = nextEntry(currentStreams) 197 | currentStreams = streams 198 | currentEntry = entry 199 | entry.map(zipEntry => { 200 | (zipEntry, streams.head.read(arr)) 201 | }) 202 | } else { 203 | Some((currentEntry.get, readBytes)) 204 | } 205 | } 206 | val arr = Array.ofDim[Byte](chunkSize) 207 | read(arr) match { 208 | case None => 209 | eof = true 210 | case Some((entry, readBytes)) => 211 | readBytesTotal += readBytes 212 | val entryData = ZipEntryData(entry.getName, entry.getTime) 213 | val chunk = 214 | if (readBytes == chunkSize) 215 | ByteString1C(arr) 216 | else 217 | ByteString1C(arr).take(readBytes) 218 | buffer :+= ((entryData, chunk)) 219 | } 220 | } // readChunk 221 | 222 | private def fillBuffer(size: Int) = 223 | while (buffer.length < size && !eof) { 224 | readChunk() 225 | } 226 | 227 | } 228 | 229 | (logic, matValue.future) 230 | } 231 | } 232 | -------------------------------------------------------------------------------- /src/main/scala/akka/stream/contrib/latencyTimer.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import java.time.Clock 8 | import java.time.temporal.ChronoUnit 9 | 10 | import akka.Done 11 | import akka.annotation.InternalApi 12 | import akka.stream._ 13 | import akka.stream.scaladsl.{Flow, GraphDSL, Sink} 14 | import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler} 15 | import akka.stream.contrib.LatencyTimer._ 16 | 17 | import scala.concurrent.Future 18 | import scala.concurrent.duration._ 19 | 20 | private final class LatencyTimerStartStage[InOut](private val clock: Clock) extends GraphStage[TimerStartShape[InOut]] { 21 | 22 | private[contrib] val in = Inlet.create[InOut]("LatencyTimerStart.In") 23 | private[contrib] val out = Outlet.create[InOut]("LatencyTimerStart.Out") 24 | private[contrib] val outTimerContext = Outlet.create[TimerContext]("LatencyTimerStart.OutTimerContext") 25 | 26 | override val shape: TimerStartShape[InOut] = TimerStartShape(in, out, outTimerContext) 27 | 28 | override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { 29 | 30 | setHandler(in, new InHandler { 31 | override def onPush(): Unit = { 32 | emit(outTimerContext, TimerContext(clock)) 33 | push(out, grab(in)) 34 | } 35 | }) 36 | 37 | private[this] val outHandler = new OutHandler { 38 | override def onPull(): Unit = if (!hasBeenPulled(in)) tryPull(in) 39 | } 40 | 41 | setHandler(out, outHandler) 42 | setHandler(outTimerContext, outHandler) 43 | } 44 | } 45 | 46 | private final class LatencyTimerEndStage[InOut] extends GraphStage[TimerEndShape[InOut]] { 47 | 48 | private[contrib] val in = Inlet.create[InOut]("LatencyTimerEnd.In") 49 | private[contrib] val inTimerContext = Inlet.create[TimerContext]("LatencyTimerEnd.InTimerContext") 50 | private[contrib] val out = Outlet.create[InOut]("LatencyTimerEnd.Out") 51 | private[contrib] val outTimedResult = Outlet.create[TimedResult[InOut]]("LatencyTimerEnd.TimedResult") 52 | 53 | override val shape: TimerEndShape[InOut] = TimerEndShape(in, inTimerContext, out, outTimedResult) 54 | 55 | override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { 56 | 57 | private[this] var timerContext = None: Option[TimerContext] 58 | 59 | setHandler( 60 | in, 61 | new InHandler { 62 | override def onPush(): Unit = { 63 | val element = grab(in) 64 | emit(outTimedResult, TimedResult(element, timerContext.get.stop())) 65 | timerContext = None 66 | push(out, element) 67 | } 68 | } 69 | ) 70 | 71 | setHandler(inTimerContext, new InHandler { 72 | override def onPush(): Unit = timerContext = Some(grab(inTimerContext)) 73 | }) 74 | 75 | setHandler(out, new OutHandler { 76 | override def onPull(): Unit = tryPull(in) 77 | }) 78 | 79 | setHandler(outTimedResult, new OutHandler { 80 | override def onPull(): Unit = tryPull(inTimerContext) 81 | }) 82 | } 83 | } 84 | 85 | /** 86 | * {{{ 87 | * +------------+ 88 | * | | ~> InOut 89 | * InOut ~> | TimerStart | 90 | * | | ~> TimerContext 91 | * +------------+ 92 | * }}} 93 | */ 94 | private final case class TimerStartShape[InOut](in: Inlet[InOut], 95 | out: Outlet[InOut], 96 | outTimerContext: Outlet[TimerContext]) 97 | extends Shape { 98 | override val inlets: collection.immutable.Seq[Inlet[_]] = in :: Nil 99 | override val outlets: collection.immutable.Seq[Outlet[_]] = out :: outTimerContext :: Nil 100 | 101 | override def deepCopy(): TimerStartShape[InOut] = 102 | TimerStartShape(in.carbonCopy(), out.carbonCopy(), outTimerContext.carbonCopy()) 103 | } 104 | 105 | /** 106 | * {{{ 107 | * +------------+ 108 | * InOut ~> | | ~> InOut 109 | * | TimerEnd | 110 | * TimerContext ~> | | ~> TimedResult[InOut, FiniteDuration] 111 | * +------------+ 112 | * }}} 113 | */ 114 | private final case class TimerEndShape[InOut](in: Inlet[InOut], 115 | inTimerContext: Inlet[TimerContext], 116 | out: Outlet[InOut], 117 | outTimedResult: Outlet[TimedResult[InOut]]) 118 | extends Shape { 119 | override val inlets: collection.immutable.Seq[Inlet[_]] = in :: inTimerContext :: Nil 120 | override val outlets: collection.immutable.Seq[Outlet[_]] = out :: outTimedResult :: Nil 121 | 122 | override def deepCopy(): TimerEndShape[InOut] = 123 | TimerEndShape(in.carbonCopy(), inTimerContext.carbonCopy(), out.carbonCopy(), outTimedResult.carbonCopy()) 124 | } 125 | 126 | private[contrib] object LatencyTimerStartStage { 127 | def apply[T](clock: Clock): LatencyTimerStartStage[T] = new LatencyTimerStartStage[T](clock) 128 | } 129 | 130 | private[contrib] object LatencyTimerEndStage { 131 | def apply[T](): LatencyTimerEndStage[T] = new LatencyTimerEndStage[T]() 132 | } 133 | 134 | object LatencyTimer { 135 | 136 | private[contrib] case class TimerContext(clock: Clock) { 137 | private val started = clock.instant() 138 | 139 | def stop(): FiniteDuration = FiniteDuration(started.until(clock.instant(), ChronoUnit.NANOS), NANOSECONDS) 140 | } 141 | 142 | case class TimedResult[T](outcome: T, measuredTime: FiniteDuration) 143 | 144 | @InternalApi // mainly for testing (mock Clock) 145 | private[contrib] def createGraph[I, O, Mat]( 146 | flow: Flow[I, O, Mat], 147 | sink: Graph[SinkShape[TimedResult[O]], Future[Done]] 148 | )(clock: Clock): Flow[I, O, Mat] = { 149 | val graph = GraphDSL.create(flow) { implicit builder: GraphDSL.Builder[Mat] => 150 | import GraphDSL.Implicits._ 151 | fl => 152 | // Junctions need to be created from blueprint via `builder.add(...)` 153 | val startTimer = builder.add(LatencyTimerStartStage[I](clock)) 154 | val endTimer = builder.add(LatencyTimerEndStage[O]()) 155 | 156 | // @formatter:off 157 | startTimer.out ~> fl ~> endTimer.in 158 | startTimer.outTimerContext ~> endTimer.inTimerContext; endTimer.outTimedResult ~> sink 159 | // @formatter:on 160 | FlowShape(startTimer.in, endTimer.out) 161 | } 162 | Flow.fromGraph(graph) 163 | } 164 | 165 | /** 166 | * Wraps a given flow and measures the time between input and output. The measured result is pushed to a dedicated sink. 167 | * The [[TimedResult]] contains the result of the wrapped flow as well in case some logic has to be done. 168 | * 169 | * Important Note: the wrapped flow must preserve the order, otherwise timing will be wrong. 170 | * 171 | * Consider bringing [[akka.stream.contrib.Implicits]] into scope for DSL support. 172 | * 173 | * @param flow the flow which will be measured 174 | * @param sink a sink which will handle the [[TimedResult]] 175 | * @tparam I input-type of the wrapped flow 176 | * @tparam O output-type of the wrapped flow 177 | * @tparam Mat materialized-type of the wrapped flow 178 | * @return Flow of the the same shape as the wrapped flow 179 | */ 180 | def apply[I, O, Mat](flow: Flow[I, O, Mat], sink: Graph[SinkShape[TimedResult[O]], Future[Done]]): Flow[I, O, Mat] = 181 | createGraph(flow, sink)(Clock.systemDefaultZone()) 182 | 183 | /** 184 | * Wraps a given flow and measures the time between input and output. The second parameter is the function which is called for each measured element. 185 | * The [[TimedResult]] contains the result of the wrapped flow as well in case some logic has to be done. 186 | * 187 | * Important Note: the wrapped flow must preserve the order, otherwise timing will be wrong. 188 | * 189 | * Consider bringing [[akka.stream.contrib.Implicits]] into scope for DSL support. 190 | * 191 | * @param flow the flow which will be measured 192 | * @param resultFunction side-effect function which gets called with the result 193 | * @tparam I input-type of the wrapped flow 194 | * @tparam O output-type of the wrapped flow 195 | * @tparam Mat materialized-type of the wrapped flow 196 | * @return Flow of the the same shape as the wrapped flow 197 | */ 198 | def apply[I, O, Mat](flow: Flow[I, O, Mat], resultFunction: TimedResult[O] => Unit): Flow[I, O, Mat] = 199 | this(flow, Sink.foreach[TimedResult[O]](resultFunction)) 200 | } 201 | -------------------------------------------------------------------------------- /src/test/java/akka/stream/contrib/DirectoryChangesTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib; 6 | 7 | import akka.actor.ActorSystem; 8 | import akka.japi.Pair; 9 | import akka.stream.ActorMaterializer; 10 | import akka.stream.Materializer; 11 | import akka.stream.javadsl.Sink; 12 | import akka.stream.testkit.TestSubscriber; 13 | import akka.testkit.TestKit; 14 | import com.google.common.jimfs.Configuration; 15 | import com.google.common.jimfs.Jimfs; 16 | import com.google.common.jimfs.WatchServiceConfiguration; 17 | import org.junit.After; 18 | import org.junit.Before; 19 | import org.junit.Test; 20 | import scala.concurrent.duration.Duration; 21 | import scala.concurrent.duration.FiniteDuration; 22 | 23 | import java.nio.file.FileSystem; 24 | import java.nio.file.Files; 25 | import java.nio.file.Path; 26 | import java.util.ArrayList; 27 | import java.util.List; 28 | import java.util.concurrent.TimeUnit; 29 | import java.util.stream.Stream; 30 | 31 | import static org.junit.Assert.assertEquals; 32 | 33 | public class DirectoryChangesTest { 34 | 35 | private ActorSystem system; 36 | private Materializer materializer; 37 | private FileSystem fs; 38 | private Path testDir; 39 | 40 | @Before 41 | public void setup() throws Exception { 42 | system = ActorSystem.create(); 43 | materializer = ActorMaterializer.create(system); 44 | 45 | fs = Jimfs.newFileSystem( 46 | Configuration.forCurrentPlatform() 47 | .toBuilder() 48 | .setWatchServiceConfiguration(WatchServiceConfiguration.polling(10, TimeUnit.MILLISECONDS)) 49 | .build() 50 | ); 51 | 52 | testDir = fs.getPath("testdir"); 53 | 54 | Files.createDirectory(testDir); 55 | } 56 | 57 | 58 | @Test 59 | public void sourceShouldEmitOnDirectoryChanges() throws Exception { 60 | final TestSubscriber.Probe> probe = TestSubscriber.probe(system); 61 | 62 | DirectoryChanges.create(testDir, FiniteDuration.create(250, TimeUnit.MILLISECONDS), 200) 63 | .runWith(Sink.fromSubscriber(probe), materializer); 64 | 65 | probe.request(1); 66 | 67 | final Path createdFile = Files.createFile(testDir.resolve("test1file1.sample")); 68 | 69 | final Pair pair1 = probe.expectNext(); 70 | assertEquals(pair1.second(), DirectoryChanges.Change.Creation); 71 | assertEquals(pair1.first(), createdFile); 72 | 73 | Files.write(createdFile, "Some data".getBytes()); 74 | 75 | final Pair pair2 = probe.requestNext(); 76 | assertEquals(pair2.second(), DirectoryChanges.Change.Modification); 77 | assertEquals(pair2.first(), createdFile); 78 | 79 | Files.delete(createdFile); 80 | 81 | final Pair pair3 = probe.requestNext(); 82 | assertEquals(pair3.second(), DirectoryChanges.Change.Deletion); 83 | assertEquals(pair3.first(), createdFile); 84 | 85 | probe.cancel(); 86 | } 87 | 88 | 89 | @Test 90 | public void emitMultipleChanges() throws Exception { 91 | final TestSubscriber.Probe> probe = 92 | TestSubscriber.>probe(system); 93 | 94 | final int numberOfChanges = 50; 95 | 96 | DirectoryChanges.create( 97 | testDir, 98 | FiniteDuration.create(250, TimeUnit.MILLISECONDS), 99 | numberOfChanges * 2 100 | ).runWith(Sink.fromSubscriber(probe), materializer); 101 | 102 | probe.request(numberOfChanges); 103 | 104 | final int halfRequested = numberOfChanges / 2; 105 | final List files = new ArrayList<>(); 106 | 107 | for (int i = 0; i < halfRequested; i++) { 108 | final Path file = Files.createFile(testDir.resolve("test2files" + i)); 109 | files.add(file); 110 | } 111 | 112 | for (int i = 0; i < halfRequested; i++) { 113 | probe.expectNext(); 114 | } 115 | 116 | for (int i = 0; i < halfRequested; i++) { 117 | Files.delete(files.get(i)); 118 | } 119 | 120 | for (int i = 0; i < halfRequested; i++) { 121 | probe.expectNext(); 122 | } 123 | 124 | probe.cancel(); 125 | } 126 | 127 | @After 128 | public void tearDown() throws Exception { 129 | TestKit.shutdownActorSystem(system, Duration.create("20 seconds"), true); 130 | fs.close(); 131 | } 132 | 133 | } 134 | -------------------------------------------------------------------------------- /src/test/java/akka/stream/contrib/FileTailSourceTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib; 6 | 7 | import akka.NotUsed; 8 | import akka.actor.ActorSystem; 9 | import akka.japi.Pair; 10 | import akka.stream.*; 11 | import akka.stream.javadsl.Framing; 12 | import akka.stream.javadsl.Keep; 13 | import akka.stream.javadsl.Sink; 14 | import akka.stream.javadsl.Source; 15 | import akka.stream.testkit.TestSubscriber; 16 | import akka.testkit.TestKit; 17 | import akka.util.ByteString; 18 | import com.google.common.jimfs.Configuration; 19 | import com.google.common.jimfs.Jimfs; 20 | import org.junit.After; 21 | import org.junit.Before; 22 | import org.junit.Test; 23 | 24 | import static java.nio.charset.StandardCharsets.UTF_8; 25 | import static java.nio.file.StandardOpenOption.APPEND; 26 | import static java.nio.file.StandardOpenOption.WRITE; 27 | import static org.junit.Assert.*; 28 | import scala.concurrent.duration.FiniteDuration; 29 | 30 | import java.nio.charset.StandardCharsets; 31 | import java.nio.file.*; 32 | import java.util.concurrent.CompletionStage; 33 | import java.util.concurrent.TimeUnit; 34 | 35 | 36 | public class FileTailSourceTest { 37 | 38 | private FileSystem fs; 39 | private ActorSystem system; 40 | private Materializer materializer; 41 | 42 | @Before 43 | public void setup() { 44 | fs = Jimfs.newFileSystem(Configuration.unix()); 45 | system = ActorSystem.create(); 46 | materializer = ActorMaterializer.create(system); 47 | } 48 | 49 | 50 | 51 | @Test 52 | public void canReadAnEntireFile() throws Exception { 53 | final Path path = fs.getPath("/file"); 54 | final String dataInFile = "a\nb\nc\nd"; 55 | Files.write(path, dataInFile.getBytes(UTF_8)); 56 | 57 | final Source source = FileTailSource.create( 58 | path, 59 | 8192, // chunk size 60 | 0, // starting position 61 | FiniteDuration.create(250, TimeUnit.MILLISECONDS)); 62 | 63 | final TestSubscriber.Probe subscriber = TestSubscriber.probe(system); 64 | 65 | final UniqueKillSwitch killSwitch = 66 | source.viaMat(KillSwitches.single(), Keep.right()) 67 | .to(Sink.fromSubscriber(subscriber)) 68 | .run(materializer); 69 | 70 | ByteString result = subscriber.requestNext(); 71 | assertEquals(dataInFile, result.utf8String()); 72 | 73 | killSwitch.shutdown(); 74 | subscriber.expectComplete(); 75 | 76 | } 77 | 78 | @Test 79 | public void willReadNewLinesAppendedAfterReadingTheInitialContents() throws Exception { 80 | final Path path = fs.getPath("/file"); 81 | Files.write(path, "a\n".getBytes(UTF_8)); 82 | 83 | final Source source = FileTailSource.create( 84 | path, 85 | 8192, // chunk size 86 | 0, // starting position 87 | FiniteDuration.create(250, TimeUnit.MILLISECONDS)); 88 | 89 | final TestSubscriber.Probe subscriber = TestSubscriber.probe(system); 90 | 91 | final UniqueKillSwitch killSwitch = 92 | source.viaMat(KillSwitches.single(), Keep.right()) 93 | .to(Sink.fromSubscriber(subscriber)) 94 | .run(materializer); 95 | 96 | ByteString result1 = subscriber.requestNext(); 97 | assertEquals("a\n", result1.utf8String()); 98 | 99 | subscriber.request(1); 100 | Files.write(path, "b\n".getBytes(UTF_8), WRITE, APPEND); 101 | assertEquals("b\n", subscriber.expectNext().utf8String()); 102 | 103 | Files.write(path, "c\n".getBytes(UTF_8), WRITE, APPEND); 104 | subscriber.request(1); 105 | assertEquals("c\n", subscriber.expectNext().utf8String()); 106 | 107 | killSwitch.shutdown(); 108 | subscriber.expectComplete(); 109 | } 110 | 111 | @After 112 | public void tearDown() throws Exception { 113 | fs.close(); 114 | fs = null; 115 | TestKit.shutdownActorSystem(system, FiniteDuration.create(10, TimeUnit.SECONDS), true); 116 | system = null; 117 | materializer = null; 118 | } 119 | 120 | 121 | // small sample of usage, tails the first argument file path 122 | public static void main(String... args) { 123 | if(args.length != 1) throw new IllegalArgumentException("Usage: FileTailSourceTest [path]"); 124 | 125 | ActorSystem system = ActorSystem.create(); 126 | Materializer materializer = ActorMaterializer.create(system); 127 | 128 | FileSystem fs = FileSystems.getDefault(); 129 | Source source = FileTailSource.create( 130 | fs.getPath(args[0]), 131 | 8192, // chunk size 132 | 0, // starting position 133 | FiniteDuration.create(250, TimeUnit.MILLISECONDS)); 134 | 135 | source.via(Framing.delimiter(ByteString.fromString("\n"), 8192)) 136 | .map(bytes -> bytes.utf8String()) 137 | .runForeach((line) -> System.out.println(line), materializer); 138 | 139 | } 140 | 141 | } 142 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/AccumulateSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.stream.scaladsl.{Keep, Source} 8 | import akka.stream.testkit.scaladsl.{TestSink, TestSource} 9 | 10 | class AccumulateSpec extends BaseStreamSpec { 11 | 12 | "Accumulate" should { 13 | "emit folded vaules starting with the result of applying the given function to the given zero and the first pushed element" in { 14 | val (source, sink) = TestSource 15 | .probe[Int] 16 | .via(Accumulate(0)(_ + _)) 17 | .toMat(TestSink.probe)(Keep.both) 18 | .run() 19 | sink.request(99) 20 | source.sendNext(1) 21 | source.sendNext(2) 22 | source.sendNext(3) 23 | sink.expectNext(1, 3, 6) 24 | source.sendComplete() 25 | sink.expectComplete() 26 | } 27 | 28 | "not emit any value for an empty source" in { 29 | Source(Vector.empty[Int]) 30 | .via(Accumulate(0)(_ + _)) 31 | .runWith(TestSink.probe) 32 | .request(99) 33 | .expectComplete() 34 | } 35 | 36 | "fail on upstream failure" in { 37 | val (source, sink) = TestSource 38 | .probe[Int] 39 | .via(Accumulate(0)(_ + _)) 40 | .toMat(TestSink.probe)(Keep.both) 41 | .run() 42 | sink.request(99) 43 | source.sendError(new Exception) 44 | sink.expectError() 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/AccumulateWhileUnchangedSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.stream.scaladsl.{Keep, Source} 8 | import akka.stream.testkit.scaladsl.{TestSink, TestSource} 9 | 10 | import scala.collection.immutable 11 | import scala.collection.immutable.Seq 12 | import scala.concurrent.duration._ 13 | 14 | class AccumulateWhileUnchangedSpec extends BaseStreamSpec { 15 | 16 | "AccumulateWhileUnchanged" should { 17 | 18 | "emit accumulated elements when the given property changes" in { 19 | val sink = Source(SampleElements.All) 20 | .via(AccumulateWhileUnchanged(_.value)) 21 | .toMat(TestSink.probe)(Keep.right) 22 | .run() 23 | 24 | sink.request(42) 25 | sink.expectNext(SampleElements.Ones, SampleElements.Twos, SampleElements.Threes) 26 | sink.expectComplete() 27 | } 28 | 29 | "not emit any value for an empty source" in { 30 | Source 31 | .empty[Element] 32 | .via(AccumulateWhileUnchanged(_.value)) 33 | .runWith(TestSink.probe) 34 | .request(42) 35 | .expectComplete() 36 | } 37 | 38 | "fail on upstream failure" in { 39 | val (source, sink) = TestSource 40 | .probe[Element] 41 | .via(AccumulateWhileUnchanged(_.value)) 42 | .toMat(TestSink.probe)(Keep.both) 43 | .run() 44 | sink.request(42) 45 | source.sendError(new Exception) 46 | sink.expectError() 47 | } 48 | 49 | "used with maxElements" should { 50 | "emit after maxElements or when the property changes" in { 51 | val sink = Source(SampleElements.All) 52 | .via(AccumulateWhileUnchanged(_.value, Some(2))) 53 | .toMat(TestSink.probe)(Keep.right) 54 | .run() 55 | 56 | sink.request(42) 57 | sink.expectNext(Seq(SampleElements.E11, SampleElements.E21)) 58 | sink.expectNext(Seq(SampleElements.E31)) 59 | sink.expectNext(Seq(SampleElements.E42, SampleElements.E52)) 60 | sink.expectNext(Seq(SampleElements.E63)) 61 | sink.expectComplete() 62 | } 63 | } 64 | 65 | "used with maxDuration" should { 66 | "emit after maxDuration or when the property changes" in { 67 | val (src, sink) = TestSource 68 | .probe[Element] 69 | .via(AccumulateWhileUnchanged(_.value, maxDuration = Some(500.millis))) 70 | .toMat(TestSink.probe[Seq[Element]])(Keep.both) 71 | .run() 72 | 73 | sink.request(42) 74 | SampleElements.Ones.foreach(src.sendNext) 75 | sink.expectNoMsg(300.millis) 76 | sink.expectNext(SampleElements.Ones) 77 | src.sendComplete() 78 | sink.expectComplete() 79 | } 80 | 81 | "emit after maxDuration with backpressure" in { 82 | val (src, sink) = TestSource 83 | .probe[Element] 84 | .via(AccumulateWhileUnchanged(_.value, maxDuration = Some(100.millis))) 85 | .toMat(TestSink.probe[Seq[Element]])(Keep.both) 86 | .run() 87 | 88 | // Pull/Push Ones without backpressure 89 | sink.request(1) 90 | SampleElements.Ones.foreach(src.sendNext) 91 | sink.expectNext(SampleElements.Ones) 92 | 93 | // Make more input data available without downstream demand for it 94 | SampleElements.Twos.foreach(src.sendNext) 95 | // Wait for longer than maxDuration so the timer expires and Twos are pushed 96 | sink.expectNoMsg(200.millis) 97 | SampleElements.Threes.foreach(src.sendNext) 98 | // Wait for longer than maxDuration so the timer expires. 99 | // Threes can't be pushed yet since there is no demand. 100 | sink.expectNoMsg(200.millis) 101 | 102 | // Verify all expected messages arrive at sink 103 | sink.request(2) 104 | sink.expectNext(SampleElements.Twos) 105 | sink.expectNext(SampleElements.Threes) 106 | 107 | src.sendComplete() 108 | sink.expectComplete() 109 | } 110 | 111 | "emit after maxDuration with long wait" in { 112 | val (src, sink) = TestSource 113 | .probe[Element] 114 | .via(AccumulateWhileUnchanged(_.value, maxDuration = Some(100.millis))) 115 | .toMat(TestSink.probe[Seq[Element]])(Keep.both) 116 | .run() 117 | 118 | // Pull/Push Ones without backpressure 119 | sink.request(1) 120 | SampleElements.Ones.foreach(src.sendNext) 121 | sink.expectNext(SampleElements.Ones) 122 | 123 | // Ask for more data, but wait long enough for the timer to expire before providing it 124 | sink.request(1) 125 | sink.expectNoMsg(200.millis) 126 | 127 | // Elements made available together should be grouped together 128 | SampleElements.Twos.foreach(src.sendNext) 129 | sink.expectNext(SampleElements.Twos) 130 | 131 | src.sendComplete() 132 | sink.expectComplete() 133 | } 134 | } 135 | 136 | "used with maxElements and maxDuration" should { 137 | "emit without dropping" in { 138 | val (src, sink) = TestSource 139 | .probe[Element] 140 | .via(AccumulateWhileUnchanged(_.value, maxElements = Some(2), maxDuration = Some(500.millis))) 141 | .toMat(TestSink.probe[Seq[Element]])(Keep.both) 142 | .run() 143 | 144 | SampleElements.Twos.foreach(src.sendNext) 145 | sink.request(1) 146 | sink.expectNext(SampleElements.Twos) 147 | 148 | SampleElements.Ones.foreach(src.sendNext) 149 | sink.request(1) 150 | sink.expectNext(SampleElements.Ones.take(2)) 151 | 152 | // Complete should return last element of Ones immediately 153 | src.sendComplete() 154 | sink.request(1) 155 | sink.expectNext(Seq(SampleElements.Ones(2))) 156 | sink.expectComplete() 157 | } 158 | } 159 | } 160 | } 161 | 162 | case class Element(id: Int, value: Int) 163 | 164 | object SampleElements { 165 | 166 | val E11 = Element(1, 1) 167 | val E21 = Element(2, 1) 168 | val E31 = Element(3, 1) 169 | val E42 = Element(4, 2) 170 | val E52 = Element(5, 2) 171 | val E63 = Element(6, 3) 172 | 173 | val Ones = immutable.Seq(E11, E21, E31) 174 | val Twos = immutable.Seq(E42, E52) 175 | val Threes = immutable.Seq(E63) 176 | 177 | val All = Ones ++ Twos ++ Threes 178 | } 179 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/BaseStreamSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.actor.ActorSystem 8 | import akka.stream.ActorMaterializer 9 | import com.typesafe.config.{Config, ConfigFactory} 10 | import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpec} 11 | 12 | import scala.concurrent.Await 13 | import scala.concurrent.duration.DurationInt 14 | 15 | trait BaseStreamSpec extends WordSpec with Matchers with BeforeAndAfterAll { 16 | 17 | protected implicit val system = { 18 | def systemConfig = 19 | config.withFallback(ConfigFactory.load()) 20 | ActorSystem("default", systemConfig) 21 | } 22 | 23 | protected implicit val mat = ActorMaterializer() 24 | 25 | override protected def afterAll() = { 26 | Await.ready(system.terminate(), 42.seconds) 27 | super.afterAll() 28 | } 29 | 30 | protected def config: Config = ConfigFactory.empty() 31 | } 32 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/DelayFlowSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.stream.contrib.DelayFlow.DelayStrategy 8 | import akka.stream.scaladsl.Source 9 | import scala.language.postfixOps 10 | import akka.stream.testkit.scaladsl.TestSink 11 | import scala.concurrent.duration._ 12 | import akka.testkit._ 13 | 14 | class DelayFlowSpec extends BaseStreamSpec { 15 | 16 | "DelayFlow" should { 17 | 18 | "work with empty source" in { 19 | Source 20 | .empty[Int] 21 | .via(DelayFlow(Duration.Zero)) 22 | .runWith(TestSink.probe) 23 | .request(1) 24 | .expectComplete() 25 | } 26 | 27 | "work with fixed delay" in { 28 | 29 | val fixedDelay = 1 second 30 | 31 | val elems = 1 to 10 32 | 33 | val probe = Source(elems) 34 | .map(_ => System.nanoTime()) 35 | .via(DelayFlow[Long](fixedDelay)) 36 | .map(start => System.nanoTime() - start) 37 | .runWith(TestSink.probe) 38 | 39 | elems.foreach(_ => { 40 | val next = probe 41 | .request(1) 42 | .expectNext(fixedDelay + fixedDelay.dilated) 43 | next should be >= fixedDelay.toNanos 44 | }) 45 | 46 | probe.expectComplete() 47 | 48 | } 49 | 50 | "work without delay" in { 51 | 52 | val elems = Vector(1, 2, 3, 4, 5, 6, 7, 8, 9, 0) 53 | 54 | Source(elems) 55 | .via(DelayFlow[Int](Duration.Zero)) 56 | .runWith(TestSink.probe) 57 | .request(elems.size) 58 | .expectNextN(elems) 59 | .expectComplete() 60 | } 61 | 62 | "work with linear increasing delay" in { 63 | 64 | val elems = 1 to 10 65 | val step = 1 second 66 | val initial = 1 second 67 | val max = 5 seconds 68 | 69 | def incWhile(i: (Int, Long)): Boolean = i._1 < 7 70 | 71 | val probe = Source(elems) 72 | .map(e => (e, System.nanoTime())) 73 | .via( 74 | DelayFlow[(Int, Long)]( 75 | () => 76 | DelayStrategy 77 | .linearIncreasingDelay(step, incWhile, initial, max) 78 | ) 79 | ) 80 | .map(start => System.nanoTime() - start._2) 81 | .runWith(TestSink.probe) 82 | 83 | elems.foreach( 84 | e => 85 | if (incWhile((e, 1L))) { 86 | val afterIncrease = initial + e * step 87 | val delay = if (afterIncrease < max) { 88 | afterIncrease 89 | } else { 90 | max 91 | } 92 | val next = probe 93 | .request(1) 94 | .expectNext(delay + delay.dilated) 95 | next should be >= delay.toNanos 96 | } else { 97 | val next = probe 98 | .request(1) 99 | .expectNext(initial + initial.dilated) 100 | next should be >= initial.toNanos 101 | } 102 | ) 103 | 104 | probe.expectComplete() 105 | 106 | } 107 | 108 | } 109 | 110 | } 111 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/FeedbackLoopSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.stream.{Attributes, OverflowStrategy} 8 | import akka.stream.scaladsl.{Flow, Keep, Source} 9 | import akka.stream.testkit.scaladsl.{TestSink, TestSource} 10 | import scala.concurrent.duration._ 11 | 12 | class FeedbackLoopSpec extends BaseStreamSpec { 13 | 14 | "Feedback" should { 15 | "not deadlock on slow downstream" in { 16 | val N = 1000 17 | 18 | val forwardFlow = PartitionWith((i: Int) => if (i % 2 == 1) Left(i / 2) else Right(i)) 19 | val feedbackArc = Flow[Int] 20 | val testedFlow = FeedbackLoop(forwardFlow, feedbackArc, N / 2)(Keep.none) 21 | 22 | val sub = Source(1 to N) 23 | .via(testedFlow) 24 | .via(Flow[Int].delay(1.millis, OverflowStrategy.backpressure)) 25 | .toMat(TestSink.probe[Int])(Keep.right) 26 | .run() 27 | 28 | sub.request(n = N.toLong) 29 | sub.expectNextN(N.toLong) 30 | } 31 | 32 | "not deadlock on slow feedback arc" in { 33 | val bufferSize = 8 34 | 35 | val forwardFlow = PartitionWith((i: Long) => if (i % 2 == 1) Left(i / 2) else Right(i)) 36 | val feedbackArc = Flow[Long] 37 | .delay(1.millis, OverflowStrategy.backpressure) 38 | .withAttributes(Attributes.inputBuffer(bufferSize, bufferSize)) 39 | val testedFlow = FeedbackLoop(forwardFlow, feedbackArc, bufferSize)(Keep.none) 40 | 41 | val (pub, sub) = TestSource 42 | .probe[Long] 43 | .via(testedFlow) 44 | .toMat(TestSink.probe[Long])(Keep.both) 45 | .run() 46 | 47 | val N = 1000L 48 | 49 | sub.request(n = N) 50 | for (i <- 1L to N) { 51 | pub.sendNext(i) 52 | } 53 | 54 | sub.expectNextN(N) 55 | } 56 | 57 | "fail on too many messages in the feedback arc" in { 58 | val forwardFlow = PartitionWith((i: Long) => if (i > 0) Left(i - 1) else Right(i)) 59 | val feedbackArc = Flow[Long].mapConcat(i => List(i, i)) 60 | val testedFlow = FeedbackLoop(forwardFlow, feedbackArc, 1000)(Keep.none) 61 | 62 | val (pub, sub) = TestSource 63 | .probe[Long] 64 | .via(testedFlow) 65 | .toMat(TestSink.probe[Long])(Keep.both) 66 | .run() 67 | 68 | sub.request(n = 1) 69 | pub.sendNext(1000L) 70 | sub.expectError() 71 | } 72 | 73 | "fail on forward flow failure" in { 74 | import PartitionWith.Implicits._ 75 | 76 | val forwardFlow = Flow[Long] 77 | .mapConcat(i => if (i > 0L) List(Left(i - 1), Right(i)) else throw new IllegalArgumentException(i.toString)) 78 | .partitionWith(identity) 79 | val feedbackArc = Flow[Long] 80 | val testedFlow = FeedbackLoop(forwardFlow, feedbackArc, 1)(Keep.none) 81 | 82 | val (pub, sub) = TestSource 83 | .probe[Long] 84 | .via(testedFlow) 85 | .toMat(TestSink.probe[Long])(Keep.both) 86 | .run() 87 | 88 | sub.request(n = 10) 89 | pub.sendNext(5L) 90 | sub.expectNext(5L, 4L, 3L, 2L, 1L) 91 | val error = sub.expectError() 92 | assert(error.asInstanceOf[IllegalArgumentException].getMessage == 0L.toString) 93 | } 94 | 95 | "fail on feedback arc failure" in { 96 | import PartitionWith.Implicits._ 97 | 98 | val forwardFlow = Flow[Long] 99 | .mapConcat(i => if (i > 0L) List(Left(i - 1), Right(i)) else List(Right(i))) 100 | .partitionWith(identity) 101 | val feedbackArc = Flow[Long].map(i => if (i > 2L) i else throw new IllegalArgumentException(i.toString)) 102 | val testedFlow = FeedbackLoop(forwardFlow, feedbackArc, 1)(Keep.none) 103 | 104 | val (pub, sub) = TestSource 105 | .probe[Long] 106 | .via(testedFlow) 107 | .toMat(TestSink.probe[Long])(Keep.both) 108 | .run() 109 | 110 | sub.request(n = 10) 111 | pub.sendNext(5L) 112 | sub.expectNext(5L, 4L, 3L) 113 | val error = sub.expectError() 114 | assert(error.asInstanceOf[IllegalArgumentException].getMessage == 2L.toString) 115 | } 116 | 117 | "fail on upstream failure" in { 118 | import PartitionWith.Implicits._ 119 | import FeedbackLoop.Implicits._ 120 | 121 | val forwardFlow = Flow[Long] 122 | .mapConcat(i => if (i > 0L) List(Left(i - 1), Right(i)) else List(Right(i))) 123 | .partitionWith(identity) 124 | val testedFlow = forwardFlow.feedback(1) 125 | 126 | val (pub, sub) = TestSource 127 | .probe[Long] 128 | .via(testedFlow) 129 | .toMat(TestSink.probe[Long])(Keep.both) 130 | .run() 131 | 132 | sub.request(n = 10) 133 | pub.sendNext(5L) 134 | sub.expectNext(5L, 4L, 3L, 2L, 1L, 0L) 135 | pub.sendError(new IllegalArgumentException("foo")) 136 | val error = sub.expectError() 137 | assert(error.asInstanceOf[IllegalArgumentException].getMessage == "foo") 138 | } 139 | 140 | "keep working after feedback arc completes" in { 141 | import PartitionWith.Implicits._ 142 | 143 | val forwardFlow = Flow[Long] 144 | .mapConcat { i => 145 | if (i > 0L) List(Left(i - 1), Right(i)) else List(Right(i)) 146 | } 147 | .partitionWith(identity) 148 | val feedbackArc = Flow[Long].take(5) 149 | val testedFlow = FeedbackLoop(forwardFlow, feedbackArc, 1)(Keep.none) 150 | 151 | val (pub, sub) = TestSource 152 | .probe[Long] 153 | .via(testedFlow) 154 | .toMat(TestSink.probe[Long])(Keep.both) 155 | .run() 156 | 157 | sub.request(n = 10) 158 | pub.sendNext(10L) 159 | sub.expectNext(10L, 9L, 8L, 7L, 6L, 5L) 160 | pub.sendNext(4L) 161 | sub.expectNext(4L) 162 | pub.sendNext(3L) 163 | sub.expectNext(3L) 164 | } 165 | 166 | "be able to compute the Fibonacci sequence" in { 167 | import PartitionWith.Implicits._ 168 | import FeedbackLoop.Implicits._ 169 | 170 | val forwardFlow = Flow[(Int, Int)] 171 | .mapConcat { case pair @ (n, _) => Right(n) :: Left(pair) :: Nil } 172 | .partitionWith(identity) 173 | val feedbackArc = Flow[(Int, Int)] 174 | .map { case (n, n1) => (n1, n + n1) } 175 | val testedFlow = forwardFlow.feedbackVia(feedbackArc, 1) 176 | 177 | val (pub, sub) = TestSource 178 | .probe[(Int, Int)] 179 | .via(testedFlow) 180 | .toMat(TestSink.probe[Int])(Keep.both) 181 | .run() 182 | 183 | sub.request(n = 20) 184 | pub.sendNext((0, 1)) 185 | sub.expectNextN(List(0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181)) 186 | } 187 | } 188 | } 189 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/IntervalBasedRateLimiterSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.NotUsed 8 | import akka.stream.ThrottleMode 9 | import akka.stream.scaladsl.Source 10 | import akka.stream.testkit.TestSubscriber.{OnComplete, OnNext} 11 | import akka.stream.testkit.scaladsl.TestSink 12 | import org.scalatest.Matchers 13 | 14 | import scala.concurrent.duration.{FiniteDuration, _} 15 | 16 | class IntervalBasedRateLimiterSpec extends IntervalBasedThrottlerTestKit { 17 | 18 | "IntervalBasedRateLimiter" should { 19 | "limit rate of messages" when { 20 | "frequency is low (1 element per 500ms)" in testCase(source = infiniteSource, 21 | numOfElements = 6, 22 | maxBatchSize = 1, 23 | minInterval = 500.millis) 24 | 25 | "frequency is medium (10 elements per 100ms)" in testCase(source = infiniteSource, 26 | numOfElements = 300, 27 | maxBatchSize = 10, 28 | minInterval = 100.millis) 29 | 30 | "frequency is moderate (20 elements per 100ms)" in testCase(source = infiniteSource, 31 | numOfElements = 600, 32 | maxBatchSize = 20, 33 | minInterval = 100.millis) 34 | 35 | "frequency is moderate (200 elements per 1000ms)" in testCase(source = infiniteSource, 36 | numOfElements = 600, 37 | maxBatchSize = 200, 38 | minInterval = 1000.millis) 39 | 40 | "frequency is high (200 elements per 100ms)" in testCase(source = infiniteSource, 41 | numOfElements = 6000, 42 | maxBatchSize = 200, 43 | minInterval = 100.millis) 44 | 45 | "frequency is high (2 000 elements per 1 000ms)" in testCase(source = infiniteSource, 46 | numOfElements = 6000, 47 | maxBatchSize = 2000, 48 | minInterval = 1000.millis) 49 | 50 | "frequency is very high (50 000 elements per 1 000ms)" in testCase(source = infiniteSource, 51 | numOfElements = 150000, 52 | maxBatchSize = 50000, 53 | minInterval = 1000.millis) 54 | 55 | "source is slow" in testCase(source = slowInfiniteSource(300.millis), 56 | numOfElements = 10, 57 | maxBatchSize = 1, 58 | minInterval = 100.millis) 59 | } 60 | } 61 | 62 | } 63 | 64 | trait IntervalBasedThrottlerTestKit extends BaseStreamSpec { 65 | this: Matchers => 66 | 67 | type Batch = Seq[Int] 68 | 69 | def testCase(source: => Source[Int, _], numOfElements: Int, maxBatchSize: Int, minInterval: FiniteDuration): Unit = { 70 | 71 | val flow = source 72 | .take(numOfElements.toLong) 73 | .via(IntervalBasedRateLimiter.create[Int](minInterval, maxBatchSize)) 74 | .map { batch => 75 | (System.currentTimeMillis(), batch) 76 | } 77 | .runWith(TestSink.probe[(Long, Seq[Int])]) 78 | 79 | val (timestamps, batches) = { 80 | 81 | def collectTimestampsAndBatches(acc: List[(Long, Batch)]): List[(Long, Batch)] = { 82 | flow.request(1) 83 | flow.expectEventPF { 84 | case OnNext(batch: (Long, Batch)) => 85 | collectTimestampsAndBatches(batch :: acc) 86 | case OnComplete | _ => 87 | acc.reverse 88 | } 89 | } 90 | 91 | collectTimestampsAndBatches(Nil) 92 | }.unzip 93 | 94 | val intervals: Seq[FiniteDuration] = timestamps 95 | .sliding(2, 1) 96 | .map { 97 | case List(first, second) => (second - first).millis 98 | } 99 | .toList 100 | 101 | intervals.foreach { 102 | _ should be >= minInterval 103 | } 104 | 105 | batches.flatten should contain theSameElementsInOrderAs (1 to numOfElements).inclusive 106 | batches.size should (be(numOfElements / maxBatchSize) or be(numOfElements / maxBatchSize + 1)) 107 | } 108 | 109 | protected def infiniteSource: Source[Int, NotUsed] = Source(Stream.from(1, 1)) 110 | 111 | protected def slowInfiniteSource(pushDelay: FiniteDuration): Source[Int, NotUsed] = 112 | infiniteSource.throttle(1, pushDelay, 1, ThrottleMode.shaping) 113 | 114 | } 115 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/KeepAliveConcatSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.stream.scaladsl.{Sink, Source} 8 | import akka.stream.testkit.{TestPublisher, TestSubscriber} 9 | import akka.stream.{ActorMaterializer, ActorMaterializerSettings} 10 | 11 | import scala.concurrent.Await 12 | import scala.concurrent.duration._ 13 | 14 | class KeepAliveConcatSpec extends BaseStreamSpec { 15 | 16 | val settings = ActorMaterializerSettings(system) 17 | .withInputBuffer(initialSize = 2, maxSize = 16) 18 | 19 | implicit val materializer = ActorMaterializer(settings) 20 | 21 | val sampleSource = Source((1 to 10).grouped(3).toVector) 22 | val expand = (lst: IndexedSeq[Int]) ⇒ lst.toList.map(Vector(_)) 23 | 24 | "keepAliveConcat" must { 25 | 26 | "not emit additional elements if upstream is fast enough" in { 27 | Await 28 | .result(sampleSource 29 | .via(KeepAliveConcat(5, 1.second, expand)) 30 | .grouped(1000) 31 | .runWith(Sink.head), 32 | 3.seconds) 33 | .flatten should ===(1 to 10) 34 | } 35 | 36 | "emit elements periodically after silent periods" in { 37 | val sourceWithIdleGap = Source((1 to 5).grouped(3).toList) ++ 38 | Source((6 to 10).grouped(3).toList).initialDelay(2.second) 39 | 40 | Await 41 | .result(sourceWithIdleGap 42 | .via(KeepAliveConcat(5, 0.6.seconds, expand)) 43 | .grouped(1000) 44 | .runWith(Sink.head), 45 | 3.seconds) 46 | .flatten should ===(1 to 10) 47 | } 48 | 49 | "immediately pull upstream" in { 50 | val upstream = TestPublisher.probe[Vector[Int]]() 51 | val downstream = TestSubscriber.probe[Vector[Int]]() 52 | 53 | Source.fromPublisher(upstream).via(KeepAliveConcat(2, 1.second, expand)).runWith(Sink.fromSubscriber(downstream)) 54 | 55 | downstream.request(1) 56 | 57 | upstream.sendNext(Vector(1)) 58 | downstream.expectNext(Vector(1)) 59 | 60 | upstream.sendComplete() 61 | downstream.expectComplete() 62 | } 63 | 64 | "immediately pull upstream after busy period" in { 65 | val upstream = TestPublisher.probe[IndexedSeq[Int]]() 66 | val downstream = TestSubscriber.probe[IndexedSeq[Int]]() 67 | 68 | (sampleSource ++ Source.fromPublisher(upstream)) 69 | .via(KeepAliveConcat(2, 1.second, expand)) 70 | .runWith(Sink.fromSubscriber(downstream)) 71 | 72 | downstream.request(10) 73 | downstream.expectNextN((1 to 3).grouped(1).toVector ++ (4 to 10).grouped(3).toVector) 74 | 75 | downstream.request(1) 76 | 77 | upstream.sendNext(Vector(1)) 78 | downstream.expectNext(Vector(1)) 79 | 80 | upstream.sendComplete() 81 | downstream.expectComplete() 82 | } 83 | 84 | "work if timer fires before initial request after busy period" in { 85 | val upstream = TestPublisher.probe[IndexedSeq[Int]]() 86 | val downstream = TestSubscriber.probe[IndexedSeq[Int]]() 87 | 88 | (sampleSource ++ Source.fromPublisher(upstream)) 89 | .via(KeepAliveConcat(2, 1.second, expand)) 90 | .runWith(Sink.fromSubscriber(downstream)) 91 | 92 | downstream.request(10) 93 | downstream.expectNextN((1 to 3).grouped(1).toVector ++ (4 to 10).grouped(3).toVector) 94 | 95 | downstream.expectNoMsg(1.5.second) 96 | downstream.request(1) 97 | 98 | upstream.sendComplete() 99 | downstream.expectComplete() 100 | } 101 | 102 | } 103 | 104 | } 105 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/LastElementSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.stream.scaladsl.{Keep, Sink, Source} 8 | import akka.stream.testkit.scaladsl.TestSink 9 | import akka.testkit.TestDuration 10 | import scala.concurrent.duration.DurationInt 11 | import scala.concurrent.{Await, Future} 12 | 13 | class LastElementSpec extends BaseStreamSpec { 14 | 15 | "A stream via LastElement" should { 16 | "materialize to the last element emitted by a finite nonempty successful source" in { 17 | val (lastElement, probe) = Source(Vector(1, 2, 3)) 18 | .viaMat(LastElement())(Keep.right) 19 | .toMat(TestSink.probe)(Keep.both) 20 | .run() 21 | probe 22 | .request(3) 23 | .expectNext(1, 2, 3) 24 | .expectComplete() 25 | Await.result(lastElement, 1.second.dilated) shouldBe Some(3) 26 | } 27 | 28 | "materialize to `None` for an empty successful source" in { 29 | val (lastElement, probe) = Source(Vector.empty[Int]) 30 | .viaMat(LastElement())(Keep.right) 31 | .toMat(TestSink.probe)(Keep.both) 32 | .run() 33 | probe 34 | .request(3) 35 | .expectComplete() 36 | Await.result(lastElement, 1.second.dilated) shouldBe None 37 | } 38 | 39 | "materialize to the last element emitted by a source before it failed" in { 40 | import system.dispatcher 41 | val (lastElement, lastEmitted) = Source 42 | .fromIterator(() => Iterator.iterate(1)(n => if (n >= 3) sys.error("FAILURE") else n + 1)) 43 | .viaMat(LastElement())(Keep.right) 44 | .toMat(Sink.fold[Option[Int], Int](None)((_, o) => Some(o)))(Keep.both) 45 | .run() 46 | val Vector(l1, l2) = Await.result(Future.sequence(Vector(lastElement, lastEmitted)), 1.second.dilated) 47 | l1 shouldBe l2 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/LatencyTimerSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import java.time.Clock 8 | import java.time.temporal.ChronoUnit 9 | 10 | import akka.actor.ActorSystem 11 | import akka.stream.contrib.LatencyTimer.TimedResult 12 | import akka.stream.scaladsl.{Flow, Keep, Sink} 13 | import akka.stream.testkit.scaladsl.TestSource 14 | import akka.stream.{ActorAttributes, Supervision} 15 | import org.scalamock.scalatest.MockFactory 16 | import org.scalatest.featurespec.AnyFeatureSpec 17 | import org.scalatest.matchers.should.Matchers 18 | import org.scalatest.{GivenWhenThen, OptionValues} 19 | 20 | import scala.concurrent.duration._ 21 | import scala.concurrent.{Await, ExecutionContextExecutor} 22 | 23 | import akka.stream.contrib.Implicits._ 24 | 25 | class LatencyTimerSpec extends AnyFeatureSpec with GivenWhenThen with Matchers with MockFactory with OptionValues { 26 | 27 | implicit val system: ActorSystem = ActorSystem() 28 | implicit val executionContext: ExecutionContextExecutor = system.dispatcher 29 | 30 | /** 31 | * 32 | */ 33 | Scenario("LatencyTimer measures time correct and continues with the wrapped flow") { 34 | val clockMock = mock[Clock] 35 | val baseInstant = Clock.systemDefaultZone().instant() 36 | var timerResult: FiniteDuration = null 37 | 38 | val TimePassed = 100.millis 39 | 40 | Given(s"a Flow stage that 'waits' for $TimePassed") 41 | val testFlow = Flow.fromFunction[String, String] { s => 42 | s"hello $s" 43 | } 44 | val measuredFlow = LatencyTimer.createGraph( 45 | testFlow, 46 | Sink.foreach[TimedResult[String]](x => timerResult = x.measuredTime) 47 | )(clockMock) 48 | 49 | (clockMock.instant _).expects().returns(baseInstant) 50 | (clockMock.instant _).expects().returns(baseInstant.plus(TimePassed.length, ChronoUnit.MILLIS)) 51 | 52 | When("running the stream with one element") 53 | val (sourceProbe, materializedValue) = TestSource.probe[String].via(measuredFlow).toMat(Sink.head)(Keep.both).run() 54 | sourceProbe.sendNext("timer") 55 | sourceProbe.sendComplete() 56 | Await.result(materializedValue, 1.seconds) 57 | 58 | Then(s"the measured time is $TimePassed") 59 | timerResult should equal(TimePassed) 60 | 61 | And("the wrapped flow materializes correct") 62 | materializedValue.value.value.get should equal("hello timer") 63 | } 64 | 65 | /** 66 | * 67 | */ 68 | Scenario("LatencyTimer handles diverting Flows") { 69 | val clockMock = mock[Clock] 70 | val baseInstant = Clock.systemDefaultZone().instant() 71 | var timerResult: FiniteDuration = null 72 | 73 | val TimePassedFirst = 100.millis 74 | val TimePassedSecondDiverted = 10.millis 75 | val TimePassedThird = 50.millis 76 | 77 | Given("a Integer-Flow that diverts for element == 2") 78 | val divertingTestFlow = Flow 79 | .fromFunction[Int, Int] { i => 80 | i 81 | } 82 | .divertTo(Sink.ignore, i => i == 2) 83 | val measuredFlow = LatencyTimer.createGraph( 84 | divertingTestFlow, 85 | Sink.foreach[TimedResult[Int]](x => timerResult = x.measuredTime) 86 | )(clockMock) 87 | 88 | // first element 89 | (clockMock.instant _).expects().returns(baseInstant) 90 | (clockMock.instant _).expects().returns(baseInstant.plus(TimePassedFirst.length, ChronoUnit.MILLIS)) 91 | // second element 92 | (clockMock.instant _) 93 | .expects() 94 | .returns(baseInstant) // (only started because the stop clock is never called because of diverted element) 95 | // third element 96 | (clockMock.instant _).expects().returns(baseInstant) 97 | (clockMock.instant _).expects().returns(baseInstant.plus(TimePassedThird.length, ChronoUnit.MILLIS)) 98 | 99 | When("running the stream with one element, measuring the time") 100 | val (sourceProbe, materializedValue) = TestSource.probe[Int].via(measuredFlow).toMat(Sink.last)(Keep.both).run() 101 | sourceProbe.sendNext(1) 102 | sourceProbe.sendNext(2) 103 | sourceProbe.sendNext(3) 104 | sourceProbe.sendComplete() 105 | Await.result(materializedValue, 1.second) 106 | 107 | Then(s"the measured time is $TimePassedThird, which is the last element sent") 108 | timerResult should equal(TimePassedThird) 109 | 110 | And("the wrapped flow materializes correct") 111 | materializedValue.value.value.get should equal(3) 112 | } 113 | 114 | /** 115 | * In the DSL we cannot mock out the Clock instance, hence we test DSL separatly 116 | */ 117 | Scenario("LatencyTimer DSL works as expected") { 118 | When("running a stream with a flow-stage called by the DSL") 119 | val measuredFlow = Flow 120 | .fromFunction[String, String](s => s"hello $s") 121 | .measureLatency(Sink.ignore) 122 | val (sourceProbe, materializedValue) = TestSource 123 | .probe[String] 124 | .via(measuredFlow) 125 | .toMat(Sink.head)(Keep.both) 126 | .run() 127 | sourceProbe.sendNext("timer") 128 | sourceProbe.sendComplete() 129 | Await.result(materializedValue, 1.second) 130 | 131 | Then("the wrapped flow materializes correct") 132 | materializedValue.value.value.get should equal("hello timer") 133 | } 134 | 135 | Scenario("LatencyTimer handles errors") { 136 | val clockMock = mock[Clock] 137 | val baseInstant = Clock.systemDefaultZone().instant() 138 | var timerResult: FiniteDuration = null 139 | 140 | val TimePassedFirst = 100.millis 141 | val TimePassedSecondDiverted = 10.millis 142 | val TimePassedThird = 50.millis 143 | 144 | Given("a resume restart policy") 145 | val decider: Supervision.Decider = _ => Supervision.Resume 146 | 147 | // first element 148 | (clockMock.instant _).expects().returns(baseInstant) 149 | (clockMock.instant _).expects().returns(baseInstant.plus(TimePassedFirst.length, ChronoUnit.MILLIS)) 150 | // second element (only started because the stop clock is never called due to resume) 151 | (clockMock.instant _).expects().returns(baseInstant) 152 | // third element 153 | (clockMock.instant _).expects().returns(baseInstant) 154 | (clockMock.instant _).expects().returns(baseInstant.plus(TimePassedThird.length, ChronoUnit.MILLIS)) 155 | 156 | When("running a stream that is throwing and error") 157 | val flow = Flow.fromFunction[Int, Int](i => if (i == 2) throw new RuntimeException("foobar") else i) 158 | val measuredFlow = 159 | LatencyTimer.createGraph(flow, Sink.foreach[TimedResult[Int]](x => timerResult = x.measuredTime))(clockMock) 160 | val (sourceProbe, materializedValue) = TestSource 161 | .probe[Int] 162 | .via(measuredFlow) 163 | .withAttributes(ActorAttributes.supervisionStrategy(decider)) 164 | .toMat(Sink.last)(Keep.both) 165 | .run() 166 | sourceProbe.sendNext(1) 167 | sourceProbe.sendNext(2) 168 | sourceProbe.sendNext(3) 169 | sourceProbe.sendComplete() 170 | Await.result(materializedValue, 1.second) 171 | 172 | Then("the wrapped flow materializes correct") 173 | materializedValue.value.value.get should equal(3) 174 | 175 | And(s"the measured time is $TimePassedThird, which is the element sent after resuming") 176 | timerResult should equal(TimePassedThird) 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/MergeByIndexSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.NotUsed 8 | import akka.actor.ActorSystem 9 | import akka.stream.scaladsl.{Flow, GraphDSL, Partition, RunnableGraph, Sink, Source} 10 | import akka.stream.testkit.{TestPublisher, TestSubscriber} 11 | import akka.stream.{ActorMaterializer, ClosedShape, FlowShape, OverflowStrategy} 12 | import org.scalatest.concurrent.ScalaFutures 13 | import org.scalatest.{MustMatchers, WordSpec} 14 | 15 | import scala.concurrent.duration._ 16 | import scala.util.Random 17 | 18 | //noinspection TypeAnnotation 19 | class MergeByIndexSpec extends WordSpec with MustMatchers with ScalaFutures { 20 | implicit val system = ActorSystem("merge-by-index") 21 | implicit val mat = ActorMaterializer() 22 | implicit override val patienceConfig = PatienceConfig(5.seconds) 23 | 24 | "MergeByIndex" should { 25 | 26 | "pull from all upstreams" in new ThreeWayMergeGraph { 27 | // expecting subscriptions is already performed in ThreeWayMergeGraph 28 | completeAll() 29 | } 30 | 31 | "merge elements in order" in new ThreeWayMergeGraph { 32 | in1.sendNext(1L) 33 | in2.sendNext(0L) 34 | requestedNextOne() mustBe 0L 35 | requestedNextOne() mustBe 1L 36 | completeAll() 37 | } 38 | 39 | "wait for missing index if not all inputs are available" in new ThreeWayMergeGraph { 40 | in1.sendNext(1L) 41 | noNextOne() // waits for index 0. 42 | 43 | in3.sendNext(2L) 44 | out.expectNoMessage(50.millis) // still waits for index 0. 45 | 46 | in2.sendNext(0L) 47 | nextOne() mustBe 0L // emits index 0. 48 | requestedNextOne() mustBe 1L // emits index 1. 49 | requestedNextOne() mustBe 2L // emits index 2. 50 | completeAll() 51 | } 52 | 53 | "emit element on index omission" in new ThreeWayMergeGraph { 54 | in1.sendNext(1L) 55 | noNextOne() // waits for index 0. 56 | 57 | in2.sendNext(2L) 58 | in3.sendNext(3L) 59 | nextOne() mustBe 1L // emits index 1 as we now know there's a gap. 60 | completeAll() 61 | } 62 | 63 | "complete only when all inputs complete" in new ThreeWayMergeGraph { 64 | in3.sendNext(1L) 65 | in2.sendNext(0L) 66 | in1.sendNext(2L) 67 | 68 | requestedNextOne() 69 | requestedNextOne() 70 | requestedNextOne() 71 | 72 | in2.sendComplete() 73 | out.expectNoMessage(50.millis) 74 | 75 | in1.sendComplete() 76 | out.expectNoMessage(50.millis) 77 | 78 | in3.sendComplete() 79 | out.expectComplete() 80 | } 81 | 82 | "error if index sequence is non-monotonic" in new ThreeWayMergeGraph { 83 | in1.sendNext(1L) 84 | in2.sendNext(0L) 85 | requestedNextOne() mustBe 0L 86 | requestedNextOne() mustBe 1L 87 | 88 | in3.sendNext(1L) 89 | out.expectError() mustBe a[IllegalArgumentException] 90 | } 91 | 92 | "emit element on completion after index omission" in new ThreeWayMergeGraph { 93 | in1.sendNext(2L) 94 | in2.sendNext(1L) // index 0 is omitted 95 | in3.sendComplete() // ... which the stage should now infer due to completion of in3 96 | 97 | requestedNextOne() mustBe 1L 98 | requestedNextOne() mustBe 2L 99 | in2.sendComplete() 100 | in1.sendComplete() 101 | 102 | out.expectComplete() 103 | } 104 | 105 | "always merge items in order" in { 106 | // using poor man's property-based testing 107 | val inputLength = 1000 108 | val testRepetitions = 50 109 | val branchCount = 20 110 | 111 | val flow = Flow.fromGraph(GraphDSL.create() { implicit builder: GraphDSL.Builder[NotUsed] => 112 | import GraphDSL.Implicits._ 113 | 114 | val partition = builder.add(Partition[Long](branchCount, _ => Random.nextInt(branchCount))) 115 | val merge = builder.add(MergeByIndex(branchCount, identity[Long])) 116 | val buffer = Flow[Long].buffer(3 * inputLength / branchCount, OverflowStrategy.backpressure) 117 | 118 | for (_ <- 1 to branchCount) partition ~> buffer ~> merge 119 | 120 | FlowShape(partition.in, merge.out) 121 | }) 122 | 123 | for (_ <- 1 to testRepetitions) { 124 | val input = List.tabulate(inputLength)(_.toLong).filterNot(_ => Random.nextInt(5) == 0) // add random gaps 125 | val output = Source(input).via(flow).runWith(Sink.seq).futureValue 126 | output mustBe input 127 | } 128 | } 129 | } 130 | 131 | trait ThreeWayMergeGraph { 132 | val pub1 = TestPublisher.manualProbe[Long]() 133 | val pub2 = TestPublisher.manualProbe[Long]() 134 | val pub3 = TestPublisher.manualProbe[Long]() 135 | val out = TestSubscriber.manualProbe[Long]() 136 | 137 | val graph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder: GraphDSL.Builder[NotUsed] => 138 | import GraphDSL.Implicits._ 139 | val in1 = Source.fromPublisher(pub1) 140 | val in2 = Source.fromPublisher(pub2) 141 | val in3 = Source.fromPublisher(pub3) 142 | val outSink = Sink.fromSubscriber(out) 143 | 144 | val merge = builder.add(MergeByIndex(3, identity[Long])) 145 | 146 | in1 ~> merge ~> outSink 147 | in2 ~> merge 148 | in3 ~> merge 149 | ClosedShape 150 | }) 151 | 152 | graph.run() 153 | 154 | val in1 = pub1.expectSubscription() 155 | val in2 = pub2.expectSubscription() 156 | val in3 = pub3.expectSubscription() 157 | 158 | val subscription = out.expectSubscription() 159 | 160 | def requestedNextOne(): Long = { 161 | subscription.request(1) 162 | out.expectNext() 163 | } 164 | 165 | def nextOne(): Long = out.expectNext() 166 | 167 | def noNextOne(): Unit = { 168 | subscription.request(1) 169 | out.expectNoMessage(50.millis) 170 | } 171 | 172 | def completeAll(): Unit = { 173 | in1.sendComplete() 174 | in2.sendComplete() 175 | in3.sendComplete() 176 | } 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/PagedSourceSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import org.scalatest.concurrent.ScalaFutures 8 | 9 | import PagedSourceSpec._ 10 | import akka.stream.scaladsl.Sink 11 | 12 | import scala.concurrent.Future 13 | 14 | object PagedSourceSpec { 15 | 16 | import scala.concurrent.ExecutionContext.Implicits.global 17 | 18 | case class MultiplesOfTwo(size: Option[Int] = None) { 19 | 20 | val itemsPerPage = 2 21 | 22 | def page(key: Int): Future[PagedSource.Page[Int, Int]] = 23 | Future { 24 | val indices = key * itemsPerPage until (key + 1) * itemsPerPage 25 | val filteredIndices = size match { 26 | case Some(sz) => indices.filter(_ < sz) 27 | case None => indices 28 | } 29 | PagedSource.Page(filteredIndices.map(_ * 2), Some(key + 1)) 30 | } 31 | } 32 | 33 | object IndexedStringPages { 34 | def page(key: Int): List[String] = key match { 35 | case 1 => List("a", "b", "c") 36 | case 2 => List("d", "e") 37 | case _ => Nil 38 | } 39 | } 40 | 41 | object LinkedIntPages { 42 | def page(key: String): (List[Int], String) = key match { 43 | case "first" => (List(1, 2), "second") 44 | case "second" => (List(3, 4, 5), "") 45 | case _ => (List(6), "") 46 | } 47 | } 48 | 49 | } 50 | 51 | class PagedSourceSpec extends BaseStreamSpec with ScalaFutures { 52 | 53 | import scala.concurrent.ExecutionContext.Implicits.global 54 | 55 | "PagedSource - MultiplesOfTwo" should { 56 | "return the items in the proper order" in { 57 | val source = PagedSource(0)(MultiplesOfTwo().page(_)) 58 | 59 | val result = source.take(3).runWith(Sink.seq) 60 | whenReady(result) { a => 61 | a shouldBe List(0, 2, 4) 62 | } 63 | } 64 | 65 | "return not more items then available" in { 66 | val source = PagedSource(0)(MultiplesOfTwo(Some(4)).page(_)) 67 | 68 | val result = source.take(10).runWith(Sink.seq) 69 | whenReady(result) { a => 70 | a.length shouldBe 4 71 | } 72 | } 73 | } 74 | 75 | "PagedSource - IndexedStringPages" should { 76 | val source = PagedSource[String, Int](1)(i => Future(PagedSource.Page(IndexedStringPages.page(i), Some(i + 1)))) 77 | "return the items in the proper order" in { 78 | val result = source.take(4).runWith(Sink.seq) 79 | whenReady(result) { a => 80 | a shouldBe Seq("a", "b", "c", "d") 81 | } 82 | } 83 | "close stream when received empty page" in { 84 | val result = source.runWith(Sink.seq) 85 | whenReady(result) { a => 86 | a shouldBe Seq("a", "b", "c", "d", "e") 87 | } 88 | } 89 | } 90 | 91 | "PagedSource - LinkedIntPages" should { 92 | val source = PagedSource[Int, String]("first") { key => 93 | val (items, next) = LinkedIntPages.page(key) 94 | Future(PagedSource.Page(items, if (next.isEmpty) None else Some(next))) 95 | } 96 | "return the items in the proper order" in { 97 | val result = source.take(4).runWith(Sink.seq) 98 | whenReady(result) { a => 99 | a shouldBe Seq(1, 2, 3, 4) 100 | } 101 | } 102 | "close stream when received empty link" in { 103 | val result = source.runWith(Sink.seq) 104 | whenReady(result) { a => 105 | a shouldBe Seq(1, 2, 3, 4, 5) 106 | } 107 | } 108 | } 109 | 110 | } 111 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/PartitionWithSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.NotUsed 8 | import akka.stream.scaladsl._ 9 | import akka.stream.{ClosedShape, FanInShape2, FanOutShape2, FlowShape, Graph} 10 | import akka.stream.testkit.scaladsl.{TestSink, TestSource} 11 | 12 | class PartitionWithSpec extends BaseStreamSpec { 13 | 14 | private def fanOutAndIn[I, X, Y, O, M](fanOutGraph: Graph[FanOutShape2[I, X, Y], M], 15 | fanInGraph: Graph[FanInShape2[X, Y, O], NotUsed]): Flow[I, O, M] = 16 | Flow.fromGraph(GraphDSL.create(fanOutGraph, fanInGraph)(Keep.left) { implicit builder => (fanOut, fanIn) => 17 | import GraphDSL.Implicits._ 18 | 19 | fanOut.out0 ~> fanIn.in0 20 | fanOut.out1 ~> fanIn.in1 21 | 22 | FlowShape(fanOut.in, fanIn.out) 23 | }) 24 | 25 | private def zipFanOut[I, O1, O2, M](fanOutGraph: Graph[FanOutShape2[I, O1, O2], M]): Flow[I, (O1, O2), M] = 26 | fanOutAndIn(fanOutGraph, Zip[O1, O2]) 27 | 28 | private def mergeFanOut[I, O, M](fanOutGraph: Graph[FanOutShape2[I, O, O], M]): Flow[I, O, M] = 29 | Flow.fromGraph(GraphDSL.create(fanOutGraph) { implicit builder => fanOut => 30 | import GraphDSL.Implicits._ 31 | 32 | val mrg = builder.add(Merge[O](2)) 33 | 34 | fanOut.out0 ~> mrg.in(0) 35 | fanOut.out1 ~> mrg.in(1) 36 | 37 | FlowShape(fanOut.in, mrg.out) 38 | }) 39 | 40 | val flow = mergeFanOut(PartitionWith[Int, Int, Int] { 41 | case i if i % 2 == 0 => Left(i / 2) 42 | case i => Right(i * 3 - 1) 43 | }) 44 | 45 | "PartitionWith" should { 46 | "partition ints according to their parity" in { 47 | 48 | val (source, sink) = TestSource 49 | .probe[Int] 50 | .via(flow) 51 | .toMat(TestSink.probe)(Keep.both) 52 | .run() 53 | sink.request(99) 54 | source.sendNext(1) 55 | source.sendNext(2) 56 | source.sendNext(3) 57 | sink.expectNext(2, 1, 8) 58 | source.sendComplete() 59 | sink.expectComplete() 60 | } 61 | 62 | "not emit any value for an empty source" in { 63 | Source(Vector.empty[Int]) 64 | .via(flow) 65 | .runWith(TestSink.probe) 66 | .request(99) 67 | .expectComplete() 68 | } 69 | 70 | "fail on upstream failure" in { 71 | val (source, sink) = TestSource 72 | .probe[Int] 73 | .via(flow) 74 | .toMat(TestSink.probe)(Keep.both) 75 | .run() 76 | sink.request(99) 77 | source.sendError(new Exception) 78 | sink.expectError() 79 | } 80 | 81 | "allow flow of values of one partition even when the other outlet was not pulled" in { 82 | val source = TestSource.probe[Int] 83 | val sink0 = TestSink.probe[Int] 84 | val sink1 = TestSink.probe[Int] 85 | 86 | val graph = GraphDSL.create(source, sink0, sink1)(Tuple3.apply) { implicit b => (src, snk0, snk1) => 87 | import GraphDSL.Implicits._ 88 | 89 | val pw = b.add(PartitionWith[Int, Int, Int] { 90 | case i if i % 2 == 0 => Left(i) 91 | case i => Right(i) 92 | }) 93 | 94 | src.out ~> pw.in 95 | pw.out0 ~> snk0.in 96 | pw.out1 ~> snk1.in 97 | 98 | ClosedShape 99 | } 100 | val (pub, sub1, sub2) = RunnableGraph.fromGraph(graph).run() 101 | 102 | sub1.request(10) 103 | (1 to 10).foreach(i => pub.sendNext(2 * i)) 104 | sub1.expectNext(2, 4, 6, 8, 10, 12, 14, 16, 18, 20) 105 | 106 | sub2.request(10) 107 | (1 to 10).foreach(i => pub.sendNext(2 * i + 1)) 108 | sub2.expectNext(3, 5, 7, 9, 11, 13, 15, 17, 19, 21) 109 | } 110 | 111 | "with eagerCancel=false (the default), continue after cancellation of one of the downstreams" in { 112 | val source = TestSource.probe[Int] 113 | val sink0 = TestSink.probe[Int] 114 | val sink1 = TestSink.probe[Int] 115 | 116 | val graph = GraphDSL.create(source, sink0, sink1)(Tuple3.apply) { implicit b => (src, snk0, snk1) => 117 | import GraphDSL.Implicits._ 118 | 119 | val partition = b.add(PartitionWith[Int, Int, Int](i => if (i % 2 == 0) Left(i) else Right(i))) 120 | 121 | src.out ~> partition.in 122 | partition.out0 ~> snk0.in 123 | partition.out1 ~> snk1.in 124 | 125 | ClosedShape 126 | } 127 | val (pub, sub0, sub1) = RunnableGraph.fromGraph(graph).run() 128 | 129 | sub1.request(n = 1) 130 | sub0.cancel() 131 | pub.sendNext(5) 132 | sub1.expectNext(5) 133 | } 134 | 135 | "with eagerCancel=true, cancel and complete the other downstream after cancellation of one of the downstreams" in { 136 | val source = TestSource.probe[Int] 137 | val sink0 = TestSink.probe[Int] 138 | val sink1 = TestSink.probe[Int] 139 | 140 | val graph = GraphDSL.create(source, sink0, sink1)(Tuple3.apply) { implicit b => (src, snk0, snk1) => 141 | import GraphDSL.Implicits._ 142 | 143 | val partition = 144 | b.add(PartitionWith[Int, Int, Int](i => if (i % 2 == 0) Left(i) else Right(i), eagerCancel = true)) 145 | 146 | src.out ~> partition.in 147 | partition.out0 ~> snk0.in 148 | partition.out1 ~> snk1.in 149 | 150 | ClosedShape 151 | } 152 | val (pub, sub0, sub1) = RunnableGraph.fromGraph(graph).run() 153 | 154 | sub1.request(n = 1) 155 | sub0.cancel() 156 | pub.expectCancellation() 157 | sub1.expectComplete() 158 | } 159 | } 160 | 161 | "partitionWith extension method" should { 162 | "be callable on Flow and partition its output" in { 163 | import PartitionWith.Implicits._ 164 | 165 | val flow = zipFanOut(Flow[Int].partitionWith(i => if (i % 2 == 0) Left(-i) else Right(i))) 166 | 167 | val (source, sink) = TestSource 168 | .probe[Int] 169 | .via(flow) 170 | .toMat(TestSink.probe)(Keep.both) 171 | .run() 172 | 173 | sink.request(5) 174 | (1 to 10).foreach(source.sendNext) 175 | sink.expectNextN(List((-2, 1), (-4, 3), (-6, 5), (-8, 7), (-10, 9))) 176 | } 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/PassThroughFlowSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.NotUsed 8 | import akka.stream.scaladsl.{Flow, Keep, Source} 9 | import akka.stream.testkit.TestSubscriber 10 | import akka.stream.testkit.scaladsl.TestSink 11 | 12 | class PassThroughFlowSpec extends BaseStreamSpec { 13 | 14 | "a stream via PassThroughFlow" should { 15 | "pass input elements alongside output" in { 16 | val originalFlow: Flow[Int, String, NotUsed] = Flow[Int].map(_.toString) 17 | 18 | val probe: TestSubscriber.Probe[(Int, String)] = Source(1 to 10) 19 | .via(PassThroughFlow(originalFlow)) 20 | .toMat(TestSink.probe)(Keep.right) 21 | .run() 22 | 23 | probe 24 | .request(10) 25 | .expectNextN((1 to 10).map(i => i -> i.toString)) 26 | } 27 | 28 | "apply a function to the output" in { 29 | val sideEffects = List.newBuilder[String] 30 | 31 | val originalFlow: Flow[Int, sideEffects.type, NotUsed] = 32 | Flow[Int].map(i => sideEffects += i.toString) 33 | 34 | val probe: TestSubscriber.Probe[Int] = Source(1 to 10) 35 | .via(PassThroughFlow(originalFlow, Keep.left)) 36 | .toMat(TestSink.probe)(Keep.right) 37 | .run() 38 | 39 | probe 40 | .request(10) 41 | .expectNextN(1 to 10) 42 | 43 | assert(sideEffects.result() == (1 to 10).map(_.toString)) 44 | } 45 | 46 | "combine elements given a function" in { 47 | val originalFlow: Flow[Int, Int, NotUsed] = Flow[Int].map(i => i * i) 48 | 49 | val probe: TestSubscriber.Probe[Int] = Source(1 to 10) 50 | .via(PassThroughFlow(originalFlow, (i: Int, o: Int) => o / i)) 51 | .toMat(TestSink.probe)(Keep.right) 52 | .run() 53 | 54 | probe 55 | .request(10) 56 | .expectNextN(1 to 10) 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/PulseSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.stream.scaladsl.{Keep, Sink, Source} 8 | import akka.stream.testkit.scaladsl.{TestSink, TestSource} 9 | import akka.testkit.TestDuration 10 | import org.scalatest.concurrent.ScalaFutures 11 | 12 | import scala.concurrent.duration._ 13 | 14 | class PulseSpec extends BaseStreamSpec with ScalaFutures { 15 | private val pulseInterval = 20.milliseconds 16 | 17 | "Pulse Stage" should { 18 | 19 | "signal demand once every interval" in { 20 | val (probe, future) = TestSource 21 | .probe[Int] 22 | .via(new Pulse[Int](pulseInterval.dilated)) 23 | .toMat(Sink.seq)(Keep.both) 24 | .run() 25 | 26 | probe.sendNext(1) 27 | probe.expectNoMsg(pulseInterval) 28 | probe.sendNext(2) 29 | probe.expectNoMsg(pulseInterval) 30 | probe.sendComplete() 31 | 32 | whenReady(future) { 33 | _ should contain inOrderOnly (1, 2) 34 | } 35 | } 36 | 37 | "keep backpressure if there is no demand from downstream" in { 38 | val elements = 1 to 10 39 | 40 | val probe = Source(elements) 41 | .via(new Pulse[Int](pulseInterval.dilated)) 42 | .runWith(TestSink.probe) 43 | 44 | probe.ensureSubscription() 45 | // lets waste some time without a demand and let pulse run its timer 46 | probe.expectNoMsg(pulseInterval * 10) 47 | 48 | probe.request(elements.length.toLong) 49 | elements.foreach(probe.expectNext) 50 | } 51 | 52 | } 53 | 54 | "An initially-opened Pulse Stage" should { 55 | 56 | "emit the first available element" in { 57 | val future = Source 58 | .repeat(1) 59 | .via(new Pulse[Int](pulseInterval.dilated, initiallyOpen = true)) 60 | .initialTimeout(2.milliseconds.dilated) 61 | .runWith(Sink.headOption) 62 | 63 | whenReady(future) { 64 | _ shouldBe Some(1) 65 | } 66 | } 67 | 68 | "signal demand once every interval" in { 69 | val (probe, future) = TestSource 70 | .probe[Int] 71 | .via(new Pulse[Int](pulseInterval.dilated, initiallyOpen = true)) 72 | .toMat(Sink.seq)(Keep.both) 73 | .run() 74 | 75 | probe.sendNext(1) 76 | probe.expectNoMsg(pulseInterval) 77 | probe.sendNext(2) 78 | probe.expectNoMsg(pulseInterval) 79 | probe.sendComplete() 80 | 81 | whenReady(future) { 82 | _ should contain inOrderOnly (1, 2) 83 | } 84 | } 85 | 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/SampleSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.actor.ActorSystem 8 | import akka.stream._ 9 | import akka.stream.scaladsl.{Sink, Source} 10 | import org.scalatest.{Matchers, WordSpec} 11 | 12 | import scala.concurrent.Await 13 | import scala.concurrent.duration._ 14 | import scala.language.postfixOps 15 | 16 | class SampleSpec extends WordSpec with Matchers { 17 | private implicit val system = ActorSystem("SampleTest") 18 | private implicit val materializer = ActorMaterializer() 19 | 20 | "Sample Stage" should { 21 | "returns every Nth element in stream" in { 22 | val list = 1 to 1000 23 | val source = Source.fromIterator[Int](() => list.toIterator) 24 | 25 | for (n <- 1 to 100) { 26 | val future = source.via(Sample(n)).runWith(Sink.seq) 27 | val expected = list.filter(_ % n == 0).toList 28 | 29 | Await.result(future, 3 seconds) should ===(expected) 30 | } 31 | } 32 | 33 | "returns elements randomly" in { 34 | // a fake random, increase by 1 for every invocation result 35 | var num = 0 36 | val mockRandom = () => { 37 | num += 1 38 | num 39 | } 40 | 41 | val future = Source 42 | .fromIterator[Int](() => (1 to 10).toIterator) 43 | .via(Sample(mockRandom)) 44 | .runWith(Sink.seq) 45 | 46 | Await.result(future, 3 seconds) should ===((1 :: 3 :: 6 :: 10 :: Nil)) 47 | } 48 | 49 | "throws exception when next step <= 0" in { 50 | intercept[IllegalArgumentException] { 51 | Await.result(Source.empty.via(Sample(() => 0)).runWith(Sink.seq), 3 seconds) 52 | } 53 | 54 | intercept[IllegalArgumentException] { 55 | Await.result(Source.empty.via(Sample(() => -1)).runWith(Sink.seq), 3 seconds) 56 | } 57 | } 58 | 59 | "throws exceptions when max random step <= 0" in { 60 | intercept[IllegalArgumentException] { 61 | Await.result(Source.empty.via(Sample.random(0)).runWith(Sink.seq), 3 seconds) 62 | } 63 | } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/SourceRepeatEvalSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import java.util.concurrent.atomic.AtomicInteger 8 | import scala.util.Random 9 | import akka.stream.scaladsl.Keep 10 | import akka.stream.testkit.scaladsl.TestSink 11 | 12 | class SourceRepeatEvalSpec extends BaseStreamSpec { 13 | 14 | "SourceRepeatEval" should { 15 | "generate elements" in { 16 | val int = new AtomicInteger(0) 17 | 18 | val probe = SourceRepeatEval(() => int.getAndIncrement()) 19 | .take(10) 20 | .toMat(TestSink.probe)(Keep.right) 21 | .run() 22 | 23 | assert(probe.request(10).expectNextN(10) == (0 until 10)) 24 | 25 | assert(int.get() == 10) 26 | } 27 | 28 | "support cancellation" in { 29 | val (c, probe) = SourceRepeatEval(() => Random.nextInt) 30 | .toMat(TestSink.probe)(Keep.both) 31 | .run() 32 | 33 | probe.requestNext() 34 | probe.requestNext() 35 | probe.requestNext() 36 | 37 | c.cancel() 38 | 39 | probe.request(1) 40 | probe.expectComplete() 41 | } 42 | 43 | "report correct cancellation state" in { 44 | val int = new AtomicInteger(0) 45 | 46 | val (c, probe) = SourceRepeatEval(() => int.getAndIncrement()) 47 | .toMat(TestSink.probe)(Keep.both) 48 | .run() 49 | 50 | assert(probe.requestNext() == 0) 51 | assert(!c.isCancelled) 52 | 53 | assert(c.cancel()) 54 | 55 | probe.request(1).expectComplete() 56 | 57 | assert(c.isCancelled) 58 | assert(!c.cancel()) 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/TimeWindowSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import java.util.concurrent.ThreadFactory 8 | 9 | import akka.event.LoggingAdapter 10 | import akka.stream.scaladsl.Source 11 | import akka.stream.testkit.scaladsl.TestSink 12 | import com.miguno.akka.testing.{MockScheduler, VirtualTime} 13 | import com.typesafe.config.{Config, ConfigFactory} 14 | 15 | import scala.concurrent.duration._ 16 | 17 | class AkkaMockScheduler extends { 18 | val time = new VirtualTime 19 | } with MockScheduler(time) { 20 | def this(config: Config, adapter: LoggingAdapter, tf: ThreadFactory) = this() 21 | } 22 | 23 | class TimeWindowSpec extends BaseStreamSpec { 24 | 25 | override def config = ConfigFactory.parseString(s""" 26 | |akka.scheduler.implementation = ${classOf[AkkaMockScheduler].getName} 27 | """.stripMargin) 28 | 29 | private val timeWindow = 100.millis 30 | private val epsilonTime = 10.millis 31 | 32 | private val scheduler = system.scheduler.asInstanceOf[AkkaMockScheduler] 33 | 34 | "TimeWindow flow" should { 35 | "aggregate data for predefined amount of time" in { 36 | val summingWindow = TimeWindow(timeWindow, eager = false)(identity[Int])(_ + _) 37 | 38 | val sub = Source 39 | .repeat(1) 40 | .via(summingWindow) 41 | .runWith(TestSink.probe) 42 | 43 | sub.request(2) 44 | 45 | sub.expectNoMsg(timeWindow + epsilonTime) 46 | scheduler.time.advance(timeWindow + epsilonTime) 47 | scheduler.tick() 48 | sub.expectNext() 49 | 50 | sub.expectNoMsg(timeWindow + epsilonTime) 51 | scheduler.time.advance(timeWindow + epsilonTime) 52 | scheduler.tick() 53 | sub.expectNext() 54 | } 55 | 56 | "emit the first seed if eager" in { 57 | val summingWindow = TimeWindow(timeWindow, eager = true)(identity[Int])(_ + _) 58 | 59 | val sub = Source 60 | .repeat(1) 61 | .via(summingWindow) 62 | .runWith(TestSink.probe) 63 | 64 | sub.request(2) 65 | 66 | sub.expectNext() 67 | 68 | sub.expectNoMsg(timeWindow + epsilonTime) 69 | scheduler.time.advance(timeWindow + epsilonTime) 70 | scheduler.tick() 71 | sub.expectNext() 72 | } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/TimedSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.stream.scaladsl.{Flow, Sink, Source} 8 | import akka.stream.testkit.TestSubscriber 9 | import akka.testkit.TestProbe 10 | import org.reactivestreams.{Publisher, Subscriber} 11 | 12 | import scala.concurrent.duration.{Duration, FiniteDuration} 13 | 14 | class TimedSpec extends BaseStreamSpec { 15 | 16 | "Timed Source" should { 17 | 18 | import akka.stream.contrib.Implicits.TimedSourceDsl 19 | 20 | "measure time it between elements matching a predicate" in { 21 | val testActor = TestProbe() 22 | 23 | val measureBetweenEvery = 5 24 | val n = 20 25 | 26 | val printInfo = (interval: Duration) ⇒ { 27 | testActor.ref ! interval 28 | info(s"Measured interval between $measureBetweenEvery elements was: $interval") 29 | } 30 | 31 | val source = Source(1 to n).timedIntervalBetween(_ % measureBetweenEvery == 0, printInfo) 32 | 33 | source.runWith(Sink.ignore) 34 | (1 until n / measureBetweenEvery) foreach { _ => 35 | testActor.expectMsgType[FiniteDuration] 36 | } 37 | } 38 | 39 | "measure time it takes from start to complete, by wrapping operations" in { 40 | val testActor = TestProbe() 41 | 42 | val n = 50 43 | val printInfo = (d: FiniteDuration) ⇒ { 44 | testActor.ref ! d 45 | info(s"Processing $n elements took $d") 46 | } 47 | 48 | Source(1 to n).timed(_.map(identity), onComplete = printInfo).runWith(Sink.ignore) 49 | testActor.expectMsgType[FiniteDuration] 50 | } 51 | 52 | } 53 | 54 | "Timed Flow" should { 55 | 56 | import akka.stream.contrib.Implicits.TimedFlowDsl 57 | 58 | "measure time it between elements matching a predicate" in { 59 | val probe = TestProbe() 60 | 61 | val flow: Flow[Int, Long, _] = Flow[Int].map(_.toLong).timedIntervalBetween(in ⇒ in % 2 == 1, d ⇒ probe.ref ! d) 62 | 63 | val c1 = TestSubscriber.manualProbe[Long]() 64 | Source(List(1, 2, 3)).via(flow).runWith(Sink.fromSubscriber(c1)) 65 | 66 | val s = c1.expectSubscription() 67 | s.request(100) 68 | c1.expectNext(1L) 69 | c1.expectNext(2L) 70 | c1.expectNext(3L) 71 | c1.expectComplete() 72 | 73 | val duration = probe.expectMsgType[Duration] 74 | info(s"Got duration (first): $duration") 75 | } 76 | 77 | "measure time from start to complete, by wrapping operations" in { 78 | val probe = TestProbe() 79 | 80 | // making sure the types come out as expected 81 | val flow: Flow[Int, String, _] = 82 | Flow[Int].timed(_.map(_.toDouble).map(_.toInt).map(_.toString), duration ⇒ probe.ref ! duration).map { 83 | s: String ⇒ 84 | s + "!" 85 | } 86 | 87 | val (flowIn: Subscriber[Int], flowOut: Publisher[String]) = 88 | flow.runWith(Source.asSubscriber[Int], Sink.asPublisher[String](false)) 89 | 90 | val c1 = TestSubscriber.manualProbe[String]() 91 | val c2 = flowOut.subscribe(c1) 92 | 93 | val p = Source(0 to 100).runWith(Sink.asPublisher(false)) 94 | p.subscribe(flowIn) 95 | 96 | val s = c1.expectSubscription() 97 | s.request(200) 98 | 0 to 100 foreach { i ⇒ 99 | c1.expectNext(i.toString + "!") 100 | } 101 | c1.expectComplete() 102 | 103 | val duration = probe.expectMsgType[Duration] 104 | info(s"Took: $duration") 105 | } 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/TokenThrottleSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import java.util.concurrent.atomic.AtomicInteger 8 | 9 | import akka.actor.ActorSystem 10 | import akka.stream.ActorMaterializer 11 | import akka.stream.scaladsl.{Keep, Sink, Source} 12 | import akka.stream.testkit.scaladsl.{TestSink, TestSource} 13 | import akka.stream.testkit.{TestPublisher, TestSubscriber} 14 | import org.scalatest.concurrent.ScalaFutures 15 | import org.scalatest.{MustMatchers, WordSpec} 16 | 17 | import scala.concurrent.ExecutionContextExecutor 18 | import scala.concurrent.duration._ 19 | 20 | class TokenThrottleSpec extends WordSpec with MustMatchers with ScalaFutures { 21 | 22 | implicit val system: ActorSystem = ActorSystem() 23 | implicit val mat: ActorMaterializer = ActorMaterializer() 24 | implicit val ec: ExecutionContextExecutor = system.dispatcher 25 | 26 | "token throttle" should { 27 | 28 | "let elements pass only when tokens are available" in { 29 | val (elems, tokens, out) = throttledGraph 30 | 31 | tokens.sendNext(2) 32 | elems.sendNext(1) 33 | out.requestNext() mustBe 1 34 | elems.sendNext(2) 35 | out.requestNext() mustBe 2 36 | elems.sendNext(3) 37 | an[AssertionError] shouldBe thrownBy(out.requestNext(100.millis)) // expect element to be blocked 38 | tokens.sendNext(1) 39 | out.requestNext() mustBe 3 40 | } 41 | 42 | "ask for tokens only when tokens are needed" in { 43 | val tokenAsked = new AtomicInteger() 44 | val tokens = Source.repeat(10L).take(20).alsoTo(Sink.foreach(_ => tokenAsked.incrementAndGet())) 45 | 46 | Source 47 | .repeat(1) 48 | .take(25) 49 | .via(TokenThrottle(tokens)(_ => 1)) 50 | .runWith(Sink.ignore) 51 | .futureValue 52 | 53 | tokenAsked.get() mustBe 3 54 | } 55 | 56 | "consume tokens according to cost" in { 57 | val tokenAsked = new AtomicInteger() 58 | val tokens = Source.repeat(1L).alsoTo(Sink.foreach(_ => tokenAsked.incrementAndGet())) 59 | 60 | val sum = Source 61 | .fromIterator(() => Stream.from(1, 1).iterator) 62 | .take(40) 63 | .via(TokenThrottle(tokens)(_.toLong)) 64 | .runWith(Sink.fold(0)(_ + _)) 65 | .futureValue 66 | 67 | tokenAsked.get() mustBe sum 68 | } 69 | 70 | "complete when all tokens are consumed" in { 71 | val (elems, tokens, out) = throttledGraph 72 | 73 | tokens.sendNext(2) 74 | elems.sendNext(1) 75 | out.requestNext() mustBe 1 76 | tokens.sendComplete() 77 | 78 | elems.sendNext(2) 79 | out.requestNext() mustBe 2 80 | out.expectComplete() 81 | } 82 | 83 | "complete when elements are consumed" in { 84 | 85 | val (elems, tokens, out) = throttledGraph 86 | 87 | tokens.sendNext(10) 88 | elems.sendNext(1) 89 | out.requestNext() mustBe 1 90 | elems.sendNext(2) 91 | out.requestNext() mustBe 2 92 | elems.sendComplete() 93 | out.expectComplete() 94 | } 95 | 96 | "completes if element is buffered and token source completes with too few remaining tokens" in { 97 | val ((elems, tokens), out) = TestSource 98 | .probe[Int] 99 | .viaMat(TokenThrottle(TestSource.probe[Long])(_ => 5))(Keep.both) 100 | .toMat(TestSink.probe)(Keep.both) 101 | .run() 102 | 103 | tokens.sendNext(8) 104 | elems.sendNext(1) 105 | elems.sendNext(2) 106 | out.requestNext() mustBe 1 107 | tokens.sendComplete() 108 | out.expectComplete() 109 | } 110 | 111 | "asks for tokens to satisfy current item cost even if downstream did not yet request" in { 112 | val ((elems, tokens), out) = TestSource 113 | .probe[Int] 114 | .viaMat(TokenThrottle(TestSource.probe[Long])(_ => 100))(Keep.both) 115 | .toMat(TestSink.probe)(Keep.both) 116 | .run() 117 | 118 | elems.sendNext(1) 119 | for (_ <- 1 to 100) { 120 | if (tokens.pending == 0) tokens.expectRequest() 121 | tokens.pending mustBe >=(1L) 122 | tokens.sendNext(1) 123 | } 124 | out.requestNext() mustBe 1 125 | } 126 | } 127 | 128 | def throttledGraph: (TestPublisher.Probe[Int], TestPublisher.Probe[Long], TestSubscriber.Probe[Int]) = { 129 | val ((elems, tokens), out) = TestSource 130 | .probe[Int] 131 | .viaMat(TokenThrottle(TestSource.probe[Long])(_ => 1))(Keep.both) 132 | .toMat(TestSink.probe)(Keep.both) 133 | .run() 134 | (elems, tokens, out) 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/ValveSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import akka.actor.ActorSystem 8 | import akka.pattern.after 9 | import akka.stream.{ActorMaterializer, StreamDetachedException} 10 | import akka.stream.contrib.SwitchMode.{Close, Open} 11 | import akka.stream.scaladsl.{Keep, Sink, Source} 12 | import akka.stream.testkit.scaladsl._ 13 | import org.scalatest._ 14 | import org.scalatest.Matchers._ 15 | import org.scalatest.concurrent.ScalaFutures 16 | import scala.language.postfixOps 17 | 18 | import scala.concurrent.duration._ 19 | 20 | class ValveSpec extends WordSpec with ScalaFutures { 21 | 22 | implicit val system = ActorSystem() 23 | implicit val materializer = ActorMaterializer() 24 | implicit val executionContext = materializer.executionContext 25 | 26 | "A closed valve" should { 27 | 28 | "emit only 3 elements into a sequence when the valve is switched to open" in { 29 | 30 | val (switchFut, seq) = Source(1 to 3) 31 | .viaMat(new Valve(SwitchMode.Close))(Keep.right) 32 | .toMat(Sink.seq)(Keep.both) 33 | .run() 34 | 35 | whenReady(switchFut) { switch => 36 | after(100 millis, system.scheduler) { 37 | switch.flip(Open) 38 | }.futureValue shouldBe true 39 | 40 | seq.futureValue should contain inOrder (1, 2, 3) 41 | } 42 | } 43 | 44 | "emit only 5 elements when the valve is switched to open" in { 45 | val (switchFut, probe) = Source(1 to 5) 46 | .viaMat(new Valve(SwitchMode.Close))(Keep.right) 47 | .toMat(TestSink.probe[Int])(Keep.both) 48 | .run() 49 | 50 | whenReady(switchFut) { switch => 51 | probe.request(2) 52 | probe.expectNoMsg(100 millis) 53 | 54 | whenReady(switch.flip(Open)) { 55 | _ shouldBe true 56 | } 57 | 58 | probe.expectNext shouldBe 1 59 | probe.expectNext shouldBe 2 60 | 61 | probe.request(3) 62 | probe.expectNext shouldBe 3 63 | probe.expectNext shouldBe 4 64 | probe.expectNext shouldBe 5 65 | 66 | probe.expectComplete() 67 | } 68 | } 69 | 70 | "emit only 3 elements when the valve is switch to open/close/open" in { 71 | val ((sourceProbe, switchFut), sinkProbe) = TestSource 72 | .probe[Int] 73 | .viaMat(Valve())(Keep.both) 74 | .toMat(TestSink.probe[Int])(Keep.both) 75 | .run() 76 | 77 | whenReady(switchFut) { switch => 78 | sinkProbe.request(1) 79 | whenReady(switch.flip(Close)) { 80 | _ shouldBe true 81 | } 82 | sourceProbe.sendNext(1) 83 | sinkProbe.expectNoMsg(100 millis) 84 | 85 | whenReady(switch.flip(Open)) { 86 | _ shouldBe true 87 | } 88 | sinkProbe.expectNext shouldEqual 1 89 | 90 | whenReady(switch.flip(Close)) { 91 | _ shouldBe true 92 | } 93 | whenReady(switch.flip(Open)) { 94 | _ shouldBe true 95 | } 96 | sinkProbe.expectNoMsg(100 millis) 97 | 98 | sinkProbe.request(1) 99 | sinkProbe.request(1) 100 | sourceProbe.sendNext(2) 101 | sourceProbe.sendNext(3) 102 | sourceProbe.sendComplete() 103 | 104 | sinkProbe.expectNext shouldBe 2 105 | sinkProbe.expectNext shouldBe 3 106 | 107 | sinkProbe.expectComplete() 108 | } 109 | } 110 | 111 | "return false when the valve is already closed" in { 112 | val (switchFut, probe) = Source(1 to 5) 113 | .viaMat(Valve(SwitchMode.Close))(Keep.right) 114 | .toMat(TestSink.probe[Int])(Keep.both) 115 | .run() 116 | 117 | whenReady(switchFut) { switch => 118 | whenReady(switch.flip(Close)) { element => 119 | element should be(false) 120 | } 121 | whenReady(switch.flip(Close)) { element => 122 | element should be(false) 123 | } 124 | } 125 | } 126 | 127 | "emit nothing when the source is empty" in { 128 | val (switch, seq) = Source.empty 129 | .viaMat(Valve(SwitchMode.Close))(Keep.right) 130 | .toMat(Sink.seq)(Keep.both) 131 | .run() 132 | 133 | whenReady(seq, timeout(200 millis)) { 134 | _ shouldBe empty 135 | } 136 | } 137 | 138 | "emit nothing when the source is failing" in { 139 | val (switch, seq) = Source 140 | .failed(new IllegalArgumentException("Fake exception")) 141 | .viaMat(Valve(SwitchMode.Close))(Keep.right) 142 | .toMat(Sink.seq)(Keep.both) 143 | .run() 144 | 145 | whenReady(seq.failed) { e => 146 | e shouldBe an[IllegalArgumentException] 147 | } 148 | } 149 | 150 | "not pull elements again when opened and closed and re-opened" in { 151 | 152 | val (probe, switchFut, resultFuture) = TestSource 153 | .probe[Int] 154 | .viaMat(Valve(SwitchMode.Close))(Keep.both) 155 | .toMat(Sink.head)((l, r) => (l._1, l._2, r)) 156 | .run() 157 | 158 | whenReady(switchFut) { switch => 159 | val result = for { 160 | _ <- switch.flip(SwitchMode.Open) 161 | _ <- switch.flip(SwitchMode.Close) 162 | _ <- switch.flip(SwitchMode.Open) 163 | _ = probe.sendNext(1) 164 | _ = probe.sendComplete() 165 | r <- resultFuture 166 | } yield r 167 | 168 | whenReady(result) { 169 | _ shouldBe 1 170 | } 171 | } 172 | } 173 | 174 | "be in closed state" in { 175 | val (switchFut, seq) = Source(1 to 3) 176 | .viaMat(new Valve(SwitchMode.Close))(Keep.right) 177 | .toMat(Sink.seq)(Keep.both) 178 | .run() 179 | 180 | whenReady(switchFut) { switch => 181 | whenReady(switch.getMode()) { 182 | _ shouldBe Close 183 | } 184 | } 185 | } 186 | 187 | } 188 | 189 | "A opened valve" should { 190 | 191 | "emit 5 elements after it has been close/open" in { 192 | val (switchFut, probe) = Source(1 to 5) 193 | .viaMat(Valve())(Keep.right) 194 | .toMat(TestSink.probe[Int])(Keep.both) 195 | .run() 196 | 197 | whenReady(switchFut) { switch => 198 | probe.request(2) 199 | probe.expectNext() shouldBe 1 200 | probe.expectNext() shouldBe 2 201 | 202 | whenReady(switch.flip(Close)) { 203 | _ shouldBe true 204 | } 205 | 206 | probe.request(1) 207 | probe.expectNoMsg(100 millis) 208 | 209 | whenReady(switch.flip(Open)) { 210 | _ shouldBe true 211 | } 212 | probe.expectNext() shouldBe 3 213 | 214 | probe.request(2) 215 | probe.expectNext() shouldBe 4 216 | probe.expectNext() shouldBe 5 217 | 218 | probe.expectComplete() 219 | } 220 | } 221 | 222 | "return false when the valve is already opened" in { 223 | val (switchFut, probe) = Source(1 to 5) 224 | .viaMat(Valve())(Keep.right) 225 | .toMat(TestSink.probe[Int])(Keep.both) 226 | .run() 227 | 228 | whenReady(switchFut) { switch => 229 | whenReady(switch.flip(Open)) { 230 | _ shouldBe false 231 | } 232 | whenReady(switch.flip(Open)) { 233 | _ shouldBe false 234 | } 235 | } 236 | } 237 | 238 | "emit only 3 elements into a sequence" in { 239 | 240 | val (switch, seq) = Source(1 to 3) 241 | .viaMat(Valve())(Keep.right) 242 | .toMat(Sink.seq)(Keep.both) 243 | .run() 244 | 245 | whenReady(seq, timeout(200 millis)) { 246 | _ should contain inOrder (1, 2, 3) 247 | } 248 | } 249 | 250 | "emit nothing when the source is empty" in { 251 | val (switch, seq) = Source.empty 252 | .viaMat(Valve())(Keep.right) 253 | .toMat(Sink.seq)(Keep.both) 254 | .run() 255 | 256 | whenReady(seq, timeout(200 millis)) { 257 | _ shouldBe empty 258 | } 259 | 260 | } 261 | 262 | "emit nothing when the source is failing" in { 263 | val (switch, seq) = Source 264 | .failed(new IllegalArgumentException("Fake exception")) 265 | .viaMat(Valve())(Keep.right) 266 | .toMat(Sink.seq)(Keep.both) 267 | .run() 268 | 269 | whenReady(seq.failed) { e => 270 | e shouldBe an[IllegalArgumentException] 271 | } 272 | } 273 | 274 | "not pull elements again when closed and re-opened" in { 275 | 276 | val (probe, switchFut, resultFuture) = TestSource 277 | .probe[Int] 278 | .viaMat(Valve())(Keep.both) 279 | .toMat(Sink.head)((l, r) => (l._1, l._2, r)) 280 | .run() 281 | 282 | whenReady(switchFut) { switch => 283 | val result = for { 284 | _ <- switch.flip(SwitchMode.Close) 285 | _ <- switch.flip(SwitchMode.Open) 286 | _ = probe.sendNext(1) 287 | _ = probe.sendComplete() 288 | r <- resultFuture 289 | } yield r 290 | 291 | whenReady(result) { 292 | _ shouldBe 1 293 | } 294 | } 295 | } 296 | 297 | "be in open state" in { 298 | val (switchFut, probe) = Source(1 to 5) 299 | .viaMat(Valve())(Keep.right) 300 | .toMat(TestSink.probe[Int])(Keep.both) 301 | .run() 302 | whenReady(switchFut) { switch => 303 | whenReady(switch.getMode()) { 304 | _ shouldBe Open 305 | } 306 | } 307 | } 308 | 309 | } 310 | 311 | "A completed valve" should { 312 | 313 | "fail to report its mode" in { 314 | 315 | val (switchFut, terminatedFut) = Source.empty 316 | .viaMat(new Valve(SwitchMode.Close))(Keep.right) 317 | .toMat(Sink.ignore)(Keep.both) 318 | .run() 319 | 320 | whenReady(switchFut.zip(terminatedFut)) { 321 | case (switch, _) => 322 | after(100 millis, system.scheduler) { 323 | switch.getMode 324 | }.failed.futureValue shouldBe a[StreamDetachedException] 325 | } 326 | } 327 | } 328 | 329 | } 330 | -------------------------------------------------------------------------------- /src/test/scala/akka/stream/contrib/ZipInputStreamSourceSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Lightbend Inc. 3 | */ 4 | 5 | package akka.stream.contrib 6 | 7 | import java.io.{ByteArrayInputStream, ByteArrayOutputStream} 8 | import java.util.zip.{ZipEntry, ZipInputStream, ZipOutputStream} 9 | import akka.stream.scaladsl.Keep 10 | import akka.stream.testkit.scaladsl.TestSink 11 | import akka.testkit.TestDuration 12 | import scala.concurrent.Await 13 | import scala.concurrent.duration.DurationInt 14 | 15 | class ZipInputStreamSourceSpec extends BaseStreamSpec { 16 | 17 | "A ZipInputStreamSource" should { 18 | "emit as many chunks as files when files fit on the chunks" in { 19 | check(numFiles = 3) 20 | } 21 | 22 | "emit more chunks than files when files don't fit on the chunks" in { 23 | check(numFiles = 3, chunkSize = 256) 24 | } 25 | 26 | "emit an extra chunk per file when the distribution is not exact" in { 27 | check(numFiles = 3, chunkSize = 250) 28 | } 29 | 30 | "materialize to 0 bytes when the file is empty" in { 31 | check(numFiles = 0) 32 | } 33 | 34 | "fail when the zip cannot be read and materialize to an Exception" in { 35 | val (ex, probe) = ZipInputStreamSource(() => throw new Exception) 36 | .toMat(TestSink.probe)(Keep.both) 37 | .run() 38 | probe 39 | .expectSubscriptionAndError() 40 | 41 | intercept[Exception] { 42 | Await.result(ex, 1.second.dilated) 43 | } 44 | } 45 | } 46 | 47 | private def check(numFiles: Int, fileSize: Int = 1024, chunkSize: Int = 1024) = { 48 | val expectedSeq = 49 | Vector.fill(numFiles)(loremIpsum.take(fileSize).toVector.grouped(chunkSize).toVector).flatten 50 | val nChunksPerFile = fileSize / chunkSize + (if (fileSize % chunkSize != 0) 1 else 0) 51 | val totalRequested = if (numFiles > 0) numFiles.toLong * nChunksPerFile else 1 52 | 53 | val (totalBytesRead, probe) = 54 | ZipInputStreamSource(() => new ZipInputStream(sampleZipFile(numFiles, fileSize)), chunkSize) 55 | .map { case (_, bs) => bs.toVector } 56 | .toMat(TestSink.probe)(Keep.both) 57 | .run() 58 | probe 59 | .request(totalRequested) 60 | .expectNextN(expectedSeq) 61 | .expectComplete() 62 | 63 | Await.result(totalBytesRead, 1.second.dilated) shouldBe (numFiles * fileSize) 64 | } 65 | 66 | private def sampleZipFile(numFiles: Int, sizePerFile: Int = 1024, dirRatio: Int = 4): ByteArrayInputStream = 67 | withZos { zos => 68 | (1 to numFiles).foreach(i1 => { 69 | val dirName = if (i1 > dirRatio) s"directory_${i1 / dirRatio}/" else "" 70 | if (i1 > dirRatio && i1 % dirRatio == 1) { 71 | zos.putNextEntry(new ZipEntry(dirName)) // New directory 72 | zos.closeEntry() 73 | } 74 | val entryName = s"${dirName}file_$i1" 75 | if (i1 % 2 != 0) { 76 | zos.putNextEntry(new ZipEntry(entryName)) 77 | zos.write(sampleFile(sizePerFile)) 78 | } else { // nested zip 79 | zos.putNextEntry(new ZipEntry(s"$entryName.zip")) 80 | val bais = sampleZipFile(1, sizePerFile) 81 | val arr = new Array[Byte](bais.available) 82 | bais.read(arr) 83 | zos.write(arr) 84 | bais.close() 85 | } 86 | zos.closeEntry() 87 | }) 88 | } 89 | 90 | private def withZos(f: ZipOutputStream => Unit) = { 91 | val baos = new ByteArrayOutputStream() 92 | val zos = new ZipOutputStream(baos) 93 | try { 94 | f(zos) 95 | zos.finish() 96 | new ByteArrayInputStream(baos.toByteArray) 97 | } finally { 98 | if (zos != null) 99 | zos.close() 100 | } 101 | } 102 | 103 | private def sampleFile(size: Int) = loremIpsum.take(size).toArray 104 | 105 | private def loremIpsum: Stream[Byte] = 106 | Stream.concat("""|Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent auctor imperdiet 107 | |velit, eu dapibus nisl dapibus vitae. Sed quam lacus, fringilla posuere ligula at, 108 | |aliquet laoreet nulla. Aliquam id fermentum justo. Aliquam et massa consequat, 109 | |pellentesque dolor nec, gravida libero. Phasellus elit eros, finibus eget 110 | |sollicitudin ac, consectetur sed ante. Etiam ornare lacus blandit nisi gravida 111 | |accumsan. Sed in lorem arcu. Vivamus et eleifend ligula. Maecenas ut commodo ante. 112 | |Suspendisse sit amet placerat arcu, porttitor sagittis velit. Quisque gravida mi a 113 | |porttitor ornare. Cras lorem nisl, sollicitudin vitae odio at, vehicula maximus 114 | |mauris. Sed ac purus ac turpis pellentesque cursus ac eget est. Pellentesque 115 | |habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. 116 | |""".stripMargin.toCharArray.map(_.toByte)) #::: loremIpsum 117 | 118 | } 119 | --------------------------------------------------------------------------------