├── .gitignore ├── LICENSE ├── README.md ├── bench └── src │ └── test │ └── scala │ └── com │ └── phaller │ └── rasync │ └── bench │ ├── fpbenchmarks.scala │ └── rabenchmarks.scala ├── build.sbt ├── core └── src │ ├── main │ └── scala │ │ └── com │ │ └── phaller │ │ └── rasync │ │ ├── cell │ │ ├── Cell.scala │ │ ├── CellCompleter.scala │ │ ├── callbackRunnable.scala │ │ └── outcome.scala │ │ ├── lattice │ │ ├── Key.scala │ │ ├── Lattice.scala │ │ ├── lattices │ │ │ ├── NaturalNumberLattice.scala │ │ │ └── PowerSetLattice.scala │ │ └── updater.scala │ │ ├── pool │ │ ├── HandlerPool.scala │ │ └── scheduldingStrategy.scala │ │ └── util │ │ └── Counter.scala │ └── test │ ├── java │ └── pureness │ │ └── Demo.java │ └── scala │ └── com │ └── phaller │ └── rasync │ └── test │ ├── Backoff.scala │ ├── BaseSuite.scala │ ├── ExceptionSuite.scala │ ├── InternalBaseSuite.scala │ ├── KeyResolutionSuite.scala │ ├── LatticeSuite.scala │ ├── LazySuite.scala │ ├── MixedKeyResolutionSuite.scala │ ├── PoolSuite.scala │ ├── PowerSetLatticeSuite.scala │ ├── PsSuite.scala │ ├── SequentialSuite.scala │ ├── completerFactory.scala │ ├── immutability │ └── ImmutabilityDemo.scala │ ├── lattice │ ├── Immutability.scala │ ├── IntUpdater.scala │ └── Purity.scala │ └── opal │ ├── ImmutabilityAnalysis.scala │ ├── OPALSuite.scala │ ├── PurityAnalysis.scala │ └── ifds │ ├── AbstractIFDSAnalysis.scala │ ├── IFDSProperty.scala │ └── TestTaintAnalysis.scala ├── monte-carlo-npv └── src │ └── main │ └── scala │ └── com │ └── phaller │ └── rasync │ └── npv │ ├── Distribution.scala │ ├── MonteCarloNpv.scala │ ├── NetPresentValue.scala │ ├── NpvCellTask.scala │ ├── NpvTask.scala │ └── StatsCollector.scala ├── project ├── Dependencies.scala ├── Util.scala ├── build.properties └── plugins.sbt ├── sandbox └── parallelsum.scala └── shippable.yml /.gitignore: -------------------------------------------------------------------------------- 1 | lib/target/ 2 | project/target/ 3 | target/ 4 | reactive-async.iws 5 | reactive-async.ipr 6 | .idea 7 | *~ 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015-2018 Philipp Haller 2 | Copyright (c) 2016 Simon Geries 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 10 | 11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Reactive Async 2 | 3 | Reactive Async is a concurrent programming model, which decouples 4 | concurrent computations using so-called *cells*, shared locations 5 | which generalize 6 | [futures](https://en.wikipedia.org/wiki/Futures_and_promises) as well 7 | as deterministic abstractions such as 8 | [LVars](https://hackage.haskell.org/package/lvish). Compared to 9 | previously proposed programming models Reactive Async provides (a) a 10 | fallback mechanism for the case where no computation ever computes the 11 | value of a given cell, and (b) explicit and optimized handling of 12 | *cyclic dependencies* between cells. In this repository you find a 13 | complete implementation of the Reactive Async programming model in and 14 | for Scala. 15 | 16 | Talks: 17 | 18 | - Talk at Scala Days 2016: [video](https://www.youtube.com/watch?v=S9xxhyDYoZk), 19 | [slides](https://speakerdeck.com/phaller/programming-with-futures-lattices-and-quiescence) 20 | 21 | - Talk at ACM SIGPLAN Scala Symposium 2016: 22 | [slides](https://speakerdeck.com/phaller/reactive-async-expressive-deterministic-concurrency) 23 | 24 | - Talk at ISSTA 2020: 25 | [video](https://www.youtube.com/watch?v=ejueBIa6FBY&t=1313) 26 | 27 | Papers and thesis: 28 | 29 | - Dominik Helm, Florian Kübler, Jan Thomas Kölzer, Philipp Haller, Michael Eichberg, Guido Salvaneschi and Mira Mezini. 30 | [A programming model for semi-implicit parallelization of static analyses](http://www.csc.kth.se/~phaller/doc/helm20-issta.pdf). 31 | Proc. ACM SIGSOFT International Symposium on Software Testing and Analysis. ACM, 2020. [[ACM DL](https://dl.acm.org/doi/10.1145/3395363.3397367)] 32 | 33 | - Philipp Haller, Simon Geries, Michael Eichberg, and Guido Salvaneschi. 34 | [Reactive Async: Expressive Deterministic Concurrency](http://www.csc.kth.se/~phaller/doc/haller16-scala.pdf). 35 | Proc. ACM SIGPLAN Scala Symposium. ACM, 2016. [[ACM DL](http://dl.acm.org/citation.cfm?id=2998396)] 36 | 37 | - Master's thesis: Simon Geries. [Reactive Async: Safety and efficiency 38 | of new abstractions for reactive, asynchronous 39 | programming](http://urn.kb.se/resolve?urn=urn%3Anbn%3Ase%3Akth%3Adiva-191330). KTH, 40 | School of Computer Science and Communication (CSC). 2016. 41 | 42 | ## Contributing 43 | 44 | Reactive Async is published under the [BSD 2-Clause 45 | License](https://opensource.org/licenses/BSD-2-Clause) (see file 46 | `LICENSE` in the project's root directory). Contributions submitted 47 | using the normal means to contribute to the project--such as pull 48 | requests and patches--indicate the contributors' assent for inclusion 49 | of that software in the canonical version under the project's license. 50 | 51 | ## Building 52 | 53 | Building Reactive Async requires 54 | [sbt](http://www.scala-sbt.org). Follow these steps: 55 | 56 | ``` 57 | $ sbt 58 | > project core 59 | > compile 60 | ``` 61 | 62 | To package the Reactive Async library into a `jar` file use `package` 63 | instead of `compile`. 64 | 65 | ## Testing 66 | 67 | The test suite (based on [ScalaTest](http://www.scalatest.org)) is run 68 | as follows: 69 | 70 | ``` 71 | $ sbt 72 | > project core 73 | > test 74 | ``` 75 | 76 | ## Benchmarking 77 | 78 | ### Microbenchmarks 79 | 80 | The microbenchmarks (based on 81 | [ScalaMeter](https://scalameter.github.io)) are run as follows: 82 | 83 | ``` 84 | $ sbt 85 | > project bench 86 | > test 87 | ``` 88 | 89 | Note that this consumes a fair amount of memory. Thus, it might be 90 | necessary to increase the JVM's maximum heap size before starting sbt. 91 | -------------------------------------------------------------------------------- /bench/src/test/scala/com/phaller/rasync/bench/fpbenchmarks.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync 2 | package bench 3 | 4 | import scala.concurrent.Promise 5 | import scala.annotation.tailrec 6 | import org.scalameter.api._ 7 | import org.scalameter.picklers.noPickler._ 8 | 9 | object FuturesAndPromisesBenchmarks extends PerformanceTest.Microbenchmark { 10 | /* configuration */ 11 | override def executor = LocalExecutor( 12 | new Executor.Warmer.Default, 13 | Aggregator.min, 14 | new Measurer.Default) 15 | override def reporter = new LoggingReporter 16 | override def persistor = Persistor.None 17 | 18 | val nrOfPromises = 100000 19 | val size = Gen.single("Number Of Promises")(nrOfPromises) 20 | 21 | /* creation of promises */ 22 | performance of "Promises" in { 23 | measure method "creating" in { 24 | using(size) config ( 25 | exec.benchRuns -> 9) in { 26 | r => for (i <- 1 to r) Promise[Int]() 27 | } 28 | } 29 | } 30 | 31 | /* creation and completion of futures */ 32 | performance of "Promises" in { 33 | measure method "creating and completing" in { 34 | using(size) config ( 35 | exec.benchRuns -> 9) in { 36 | r => 37 | for (i <- 1 to r) { 38 | val p = Promise[Int] 39 | p.success(1) 40 | } 41 | } 42 | } 43 | } 44 | 45 | /* refinement of promises */ 46 | performance of "Promises" in { 47 | measure method "refinement" in { 48 | using(Gen.unit(s"$nrOfPromises promises")) config ( 49 | exec.benchRuns -> 9) in { 50 | (Unit) => 51 | { 52 | var i = 0 53 | val promises = createListPromises(nrOfPromises, List.empty) 54 | for (p <- promises) { 55 | i = i + 1 56 | p.success(i) 57 | } 58 | } 59 | } 60 | } 61 | } 62 | 63 | @tailrec 64 | def createListPromises(amount: Int, promises: List[Promise[Int]]): List[Promise[Int]] = { 65 | val p = Promise[Int] 66 | if (amount == 0) p :: promises 67 | else createListPromises(amount - 1, p :: promises) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /bench/src/test/scala/com/phaller/rasync/bench/rabenchmarks.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync 2 | package bench 3 | 4 | import com.phaller.rasync.cell.CellCompleter 5 | import com.phaller.rasync.lattice.lattices.{ NaturalNumberKey, NaturalNumberLattice } 6 | import com.phaller.rasync.pool.HandlerPool 7 | import lattice.Lattice 8 | import org.scalameter.api._ 9 | import org.scalameter.picklers.noPickler._ 10 | 11 | import scala.concurrent.{ Await, Promise } 12 | import scala.concurrent.duration._ 13 | 14 | object ReactiveAsyncBenchmarks extends PerformanceTest.Microbenchmark { 15 | /* configuration */ 16 | override def executor = LocalExecutor( 17 | new Executor.Warmer.Default, 18 | Aggregator.min, 19 | new Measurer.Default) 20 | override def reporter = new LoggingReporter 21 | override def persistor = Persistor.None 22 | 23 | val nrOfCells = 100000 24 | val nrOfThreads = 8 25 | val size = Gen.single(s"$nrOfCells cells")(nrOfCells) 26 | 27 | /* lattice instance for cells */ 28 | implicit val naturalNumberLattice: Lattice[Int] = new NaturalNumberLattice 29 | 30 | /* creation of cells/cell completers */ 31 | performance of "Cells" in { 32 | measure method "creating" in { 33 | using(size) config ( 34 | exec.benchRuns -> 9) in { 35 | r => 36 | { 37 | implicit val pool = new HandlerPool(NaturalNumberKey, nrOfThreads) 38 | for (i <- 1 to r) 39 | pool.execute(() => { CellCompleter[Int, Null]() }: Unit) 40 | waitUntilQuiescent(pool) 41 | } 42 | } 43 | } 44 | } 45 | 46 | /* completion of cells */ 47 | performance of "Cells" in { 48 | measure method "create and putFinal" in { 49 | using(size) config ( 50 | exec.benchRuns -> 9) in { 51 | r => 52 | { 53 | implicit val pool = new HandlerPool(NaturalNumberKey, nrOfThreads) 54 | for (i <- 1 to r) { 55 | pool.execute(() => { 56 | val cellCompleter = CellCompleter[Int, Null]() 57 | cellCompleter.putFinal(1) 58 | }) 59 | } 60 | waitUntilQuiescent(pool) 61 | } 62 | } 63 | } 64 | } 65 | 66 | performance of "Cells" in { 67 | measure method "putNext" in { 68 | using(Gen.unit(s"$nrOfCells cells")) config ( 69 | exec.benchRuns -> 9) in { 70 | (Unit) => 71 | implicit val pool = new HandlerPool(NaturalNumberKey, nrOfThreads) 72 | val cellCompleter = CellCompleter[Int, Null]() 73 | for (i <- 1 to nrOfCells) pool.execute(() => cellCompleter.putNext(i)) 74 | waitUntilQuiescent(pool) 75 | } 76 | } 77 | } 78 | 79 | def waitUntilQuiescent(pool: HandlerPool[_, _]): Unit = { 80 | val p = Promise[Boolean] 81 | pool.onQuiescent { () => 82 | p.success(true) 83 | } 84 | Await.ready(p.future, 30.seconds) 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /build.sbt: -------------------------------------------------------------------------------- 1 | import Dependencies._ // see project/Dependencies.scala 2 | import Util._ // see project/Util.scala 3 | 4 | val buildVersion = "0.2.1-SNAPSHOT" 5 | organization in ThisBuild := "com.phaller" 6 | licenses in ThisBuild += ("BSD 2-Clause", url("http://opensource.org/licenses/BSD-2-Clause")) 7 | 8 | def commonSettings = Seq( 9 | version in ThisBuild := buildVersion, 10 | scalaVersion := buildScalaVersion, 11 | logBuffered := false, 12 | parallelExecution in Test := false, 13 | resolvers in ThisBuild += "Sonatype OSS Snapshots" at "https://oss.sonatype.org/content/repositories/snapshots" 14 | ) 15 | 16 | def noPublish = Seq( 17 | publish := {}, 18 | publishLocal := {} 19 | ) 20 | 21 | lazy val core: Project = (project in file("core")). 22 | settings(commonSettings: _*). 23 | settings( 24 | name := "reactive-async", 25 | libraryDependencies += scalaTest, 26 | libraryDependencies += opalCommon, 27 | libraryDependencies += opalTAC, 28 | scalacOptions += "-feature" 29 | ) 30 | 31 | lazy val npv: Project = (project in file("monte-carlo-npv")). 32 | settings(commonSettings: _*). 33 | settings( 34 | name := "reactive-async-npv", 35 | scalacOptions += "-feature", 36 | skip in publish := true 37 | ). 38 | dependsOn(core) 39 | 40 | lazy val Benchmark = config("bench") extend Test 41 | 42 | lazy val bench: Project = (project in file("bench")). 43 | settings(commonSettings: _*). 44 | settings( 45 | name := "reactive-async-bench", 46 | libraryDependencies += scalaTest, 47 | libraryDependencies += opalCommon, 48 | // libraryDependencies += opalAI % Test, 49 | libraryDependencies += scalaMeter, 50 | testFrameworks += new TestFramework("org.scalameter.ScalaMeterFramework"), 51 | skip in publish := true 52 | ).configs( 53 | Benchmark 54 | ).settings( 55 | inConfig(Benchmark)(Defaults.testSettings): _* 56 | ). 57 | dependsOn(core) 58 | 59 | javaOptions in ThisBuild ++= Seq("-Xmx27G", "-Xms1024m", "-XX:ThreadStackSize=2048") 60 | -------------------------------------------------------------------------------- /core/src/main/scala/com/phaller/rasync/cell/CellCompleter.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync.cell 2 | 3 | import com.phaller.rasync.lattice.Updater 4 | import com.phaller.rasync.pool.HandlerPool 5 | 6 | import scala.util.{ Failure, Try } 7 | 8 | /** 9 | * Interface trait for programmatically completing a cell. Analogous to `Promise[V]`. 10 | */ 11 | private[rasync] trait CellCompleter[V, E >: Null] { 12 | 13 | /** 14 | * The cell associated with this completer. 15 | */ 16 | val cell: Cell[V, E] 17 | 18 | /** A method to call */ 19 | private[rasync] val init: (Cell[V, E]) => Outcome[V] 20 | 21 | /** 22 | * Update `this` cells value with `x` and freeze it. 23 | * The new value of `this` cell is determined by its updater. 24 | */ 25 | def putFinal(x: V): Unit 26 | 27 | /** 28 | * Update `this` cells value with `x`. 29 | * The new value of `this` cell is determined by its updater. 30 | */ 31 | def putNext(x: V): Unit 32 | 33 | /** 34 | * Update `this` cells value with `x`. If `isFinal` is `true`, the 35 | * cell will be frozen. 36 | * The new value of `this` cell is determined by its updater. 37 | */ 38 | def put(x: V, isFinal: Boolean = false): Unit 39 | 40 | /** Complete the cell without changing its value. */ 41 | def freeze(): Unit 42 | 43 | def putFailure(e: Failure[V]): Unit 44 | 45 | private[rasync] def tryNewState(value: V): Unit 46 | private[rasync] def tryComplete(value: Try[V], dontCall: Option[Seq[Cell[V, E]]]): Unit 47 | 48 | /** 49 | * Run code for `this` cell sequentially. 50 | */ 51 | private[rasync] def sequential(f: () => _, prio: Int): Unit 52 | } 53 | 54 | object CellCompleter { 55 | /** 56 | * Create a completer for a cell holding values of type `V` 57 | * given a `HandlerPool` and a `Key[V]`. 58 | */ 59 | def apply[V, E >: Null](init: (Cell[V, E]) => Outcome[V] = (_: Cell[V, E]) => NoOutcome, sequential: Boolean = false, entity: E = null)(implicit updater: Updater[V], pool: HandlerPool[V, E]): CellCompleter[V, E] = { 60 | val impl = 61 | if (sequential) new SequentialCellImpl[V, E](pool, updater, init, entity) 62 | else new ConcurrentCellImpl[V, E](pool, updater, init, entity) 63 | pool.register(impl) 64 | impl 65 | } 66 | 67 | /** 68 | * Create a cell completer which is already completed with value `result`. 69 | * 70 | * Note: there is no `K` type parameter, since we always use type 71 | * `DefaultKey[V]`, no other key would make sense. 72 | */ 73 | def completed[V, E >: Null](result: V, entity: E = null)(implicit updater: Updater[V], pool: HandlerPool[V, E]): CellCompleter[V, E] = { 74 | val impl = new ConcurrentCellImpl[V, E](pool, updater, _ => NoOutcome, entity) 75 | pool.register(impl) 76 | impl.putFinal(result) 77 | impl 78 | } 79 | 80 | } 81 | -------------------------------------------------------------------------------- /core/src/main/scala/com/phaller/rasync/cell/callbackRunnable.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync 2 | package cell 3 | 4 | import java.util.concurrent.atomic.AtomicReference 5 | 6 | import pool.HandlerPool 7 | import scala.annotation.tailrec 8 | 9 | import scala.concurrent.OnCompleteRunnable 10 | import scala.util.{ Failure, Success, Try } 11 | 12 | import com.phaller.rasync.util.Counter 13 | 14 | /** 15 | * CallbackRunnables are tasks that need to be run, when a value of a cell changes, that 16 | * some completer depends on. 17 | * 18 | * CallbackRunnables store information about the involved cells and the callback to 19 | * be run. 20 | */ 21 | private[rasync] abstract class CallbackRunnable[V, E >: Null] extends Runnable with OnCompleteRunnable { 22 | protected val pool: HandlerPool[V, E] 23 | 24 | protected val dependentCompleter: CellCompleter[V, E] 25 | 26 | /** The callback to be called. It retrieves an updated value of otherCell and returns an Outcome for dependentCompleter. */ 27 | protected val callback: Iterable[(Cell[V, E], Try[ValueOutcome[V]])] ⇒ Outcome[V] 28 | 29 | protected val updatedDependees = new AtomicReference[Set[Cell[V, E]]](Set.empty) 30 | protected var prio: Int = Int.MaxValue 31 | 32 | @tailrec 33 | final def addUpdate(other: Cell[V, E]): Unit = { 34 | val oldUpdatedDependees = updatedDependees.get 35 | val newUpdatedDependees = oldUpdatedDependees + other 36 | if (updatedDependees.compareAndSet(oldUpdatedDependees, newUpdatedDependees)) { 37 | 38 | // We store a priority for sequential execution of this callbackRunnable. 39 | // It is set to the highest priority found among the dependees that are 40 | // part of this update. 41 | // This computation of prio is not thread-safe but this does not matter for 42 | // priorities are no hard requirement anyway. 43 | prio = Math.min(prio, pool.schedulingStrategy.calcPriority(dependentCompleter.cell, other, other.getState())) 44 | 45 | // The first incoming update (since the last execution) starts this runnable. 46 | // Other cells might still be added to updatedDependees concurrently, the runnable 47 | // will collect all updates and forward them altogether. 48 | if (oldUpdatedDependees.isEmpty) { 49 | pool.execute(this, prio) 50 | Counter.inc("CallbackRunnable.addUpdate.triggerExecution") 51 | } else { 52 | Counter.inc("CallbackRunnable.addUpdate.aggregations") 53 | } 54 | } else addUpdate(other) // retry 55 | } 56 | 57 | /** 58 | * Call the callback and update dependentCompleter according to the callback's result. 59 | * This method is implemented by `ConcurrentCallbackRunnable` and `SequentialCalllbackRunnable`, 60 | * where the latter implementation ensures that the callback is run sequentially. 61 | */ 62 | override def run(): Unit 63 | 64 | protected def callCallback(): Unit = { 65 | if (!dependentCompleter.cell.isComplete) { 66 | 67 | try { 68 | // Remove all updates from the list of updates that need to be handled – they will now be handled 69 | val dependees = updatedDependees.getAndSet(Set.empty) 70 | val propagations = dependees.iterator.map(c ⇒ (c, c.getState())).toIterable 71 | 72 | val depsRemoved = // see below for depsRemoved 73 | callback(propagations) match { 74 | case NextOutcome(v) ⇒ 75 | dependentCompleter.putNext(v) 76 | false 77 | case FinalOutcome(v) ⇒ 78 | dependentCompleter.putFinal(v) 79 | true 80 | case FreezeOutcome ⇒ 81 | dependentCompleter.freeze() 82 | true 83 | case NoOutcome ⇒ 84 | // Do not change the value of the cell 85 | // but remove all dependees that have had 86 | // a final value from the lists of dependees. 87 | false 88 | } 89 | // if the dependency has not been removed yet, 90 | // we can remove it, if a FinalOutcome has been propagted 91 | // or a Failuare has been propagated, i.e. the dependee had been completed 92 | // and cannot change later 93 | if (!depsRemoved) { 94 | val toRemove = propagations.iterator.filter({ 95 | case (_, Success(NextOutcome(_))) ⇒ false 96 | case _ ⇒ true 97 | }).map(_._1).toIterable 98 | dependentCompleter.cell.removeDependeeCells(toRemove) 99 | } 100 | } catch { 101 | // An exception thrown in a callback is stored as the final value for the depender 102 | case e: Exception ⇒ 103 | dependentCompleter.putFailure(Failure(e)) 104 | } 105 | } 106 | } 107 | } 108 | 109 | /** 110 | * Run a callback concurrently, if a value in a cell changes. 111 | */ 112 | private[rasync] class ConcurrentCallbackRunnable[V, E >: Null](override val pool: HandlerPool[V, E], override val dependentCompleter: CellCompleter[V, E], override val callback: Iterable[(Cell[V, E], Try[ValueOutcome[V]])] ⇒ Outcome[V]) extends CallbackRunnable[V, E] { 113 | override def run(): Unit = 114 | callCallback() 115 | } 116 | 117 | /** 118 | * Run a callback sequentially (for a dependent cell), if a value in another cell changes. 119 | */ 120 | private[rasync] class SequentialCallbackRunnable[V, E >: Null](override val pool: HandlerPool[V, E], override val dependentCompleter: CellCompleter[V, E], override val callback: Iterable[(Cell[V, E], Try[ValueOutcome[V]])] ⇒ Outcome[V]) extends CallbackRunnable[V, E] { 121 | override def run(): Unit = 122 | dependentCompleter.sequential(callCallback _, prio) 123 | } 124 | -------------------------------------------------------------------------------- /core/src/main/scala/com/phaller/rasync/cell/outcome.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync.cell 2 | 3 | /** 4 | * Use this trait in callbacks to return the new value of a cell. 5 | * `NextOutcome(v)` and `FinalOutcome(v)` put the value `v` into 6 | * the cell; in the latter case the cell is completed. 7 | * Use `NoOutcome` to indicate that no progress is possible. 8 | */ 9 | sealed trait Outcome[+V] 10 | sealed trait FreezeOutcome[+V] extends Outcome[V] 11 | sealed trait IntermediateOutcome[+V] extends Outcome[V] 12 | sealed trait ValueOutcome[+V] extends Outcome[V] { 13 | val value: V 14 | } 15 | final case class NextOutcome[+V](override val value: V) extends ValueOutcome[V] with IntermediateOutcome[V] 16 | final case class FinalOutcome[+V](override val value: V) extends ValueOutcome[V] with FreezeOutcome[V] 17 | case object NoOutcome extends IntermediateOutcome[Nothing] 18 | case object FreezeOutcome extends FreezeOutcome[Nothing] 19 | 20 | object Outcome { 21 | 22 | /** Returns a `NextOutcome(value)` or `FinalOutcome(value)` object, depending on `isFinal`. */ 23 | def apply[V](value: V, isFinal: Boolean = false): ValueOutcome[V] = 24 | if (isFinal) FinalOutcome(value) 25 | else NextOutcome(value) 26 | 27 | /** Returns a `NextOutcome(value)` or `FinalOutcome(value)` object, depending on `isFinal`. */ 28 | def apply[V](e: (V, Boolean)): ValueOutcome[V] = 29 | if (e._2) FinalOutcome(e._1) 30 | else NextOutcome(e._1) 31 | 32 | /** 33 | * Returns a `NextOutcome`, `FinalOutcome`, or `NoOutcome` object. 34 | * 35 | * If `valueOpt` is `None`, `NoOutcome` is returned. Otherwise, a `NextOutcome` or 36 | * `FinalOutcome` is returned depending on `isFinal`. 37 | * 38 | * @param valueOpt Option of a new value. 39 | * @param isFinal Indicates if the value is final. 40 | * @return Returns a `NextOutcome`, `FinalOutcome`, or `NoOutcome` object. 41 | */ 42 | def apply[V](valueOpt: Option[V], isFinal: Boolean): Outcome[V] = 43 | valueOpt.map(value => if (isFinal) FinalOutcome(value) else NextOutcome(value)).getOrElse(NoOutcome) 44 | 45 | /** 46 | * Returns a `NextOutcome`, `FinalOutcome`, or `NoOutcome` object. 47 | * 48 | * If `valueOpt` is `None`, `NoOutcome` is returned. Otherwise, a `NextOutcome` or 49 | * `FinalOutcome` is returned depending on the boolean parameter. 50 | * 51 | * @param valueOpt Option of a new value, a pair of a value V and a boolean to indicate if it is final. 52 | * @return Returns a `NextOutcome`, `FinalOutcome`, or `NoOutcome` object. 53 | */ 54 | def apply[V](valueOpt: Option[(V, Boolean)]): Outcome[V] = valueOpt match { 55 | case Some((v, false)) => NextOutcome(v) 56 | case Some((v, true)) => FinalOutcome(v) 57 | case None => NoOutcome 58 | } 59 | 60 | /** Match outcomes. */ 61 | def unapply[V](outcome: Outcome[V]): Option[(V, Boolean)] = outcome match { 62 | case FinalOutcome(v) => Some(v, true) 63 | case NextOutcome(v) => Some(v, false) 64 | case _ => None 65 | } 66 | 67 | /** Match non-empty outcomes. */ 68 | def unapply[V](outcome: ValueOutcome[V]): (V, Boolean) = outcome match { 69 | case FinalOutcome(v) => (v, true) 70 | case NextOutcome(v) => (v, false) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /core/src/main/scala/com/phaller/rasync/lattice/Key.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync 2 | package lattice 3 | 4 | import com.phaller.rasync.cell.Cell 5 | 6 | trait Key[V, E >: Null] { 7 | def resolve(cells: Iterable[Cell[V, E]]): Iterable[(Cell[V, E], V)] 8 | def fallback(cells: Iterable[Cell[V, E]]): Iterable[(Cell[V, E], V)] 9 | } 10 | 11 | class DefaultKey[V, E >: Null] extends Key[V, E] { 12 | 13 | def resolve(cells: Iterable[Cell[V, E]]): Iterable[(Cell[V, E], V)] = { 14 | cells.map(cell => (cell, cell.getResult())) 15 | } 16 | 17 | def fallback(cells: Iterable[Cell[V, E]]): Iterable[(Cell[V, E], V)] = { 18 | cells.map(cell => (cell, cell.getResult())) 19 | } 20 | 21 | } 22 | 23 | object DefaultKey { 24 | def apply[V]: DefaultKey[V, Null] = new DefaultKey[V, Null]() 25 | } 26 | -------------------------------------------------------------------------------- /core/src/main/scala/com/phaller/rasync/lattice/Lattice.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync 2 | package lattice 3 | 4 | import scala.annotation.implicitNotFound 5 | 6 | trait PartialOrderingWithBottom[V] extends PartialOrdering[V] { 7 | /** 8 | * Result of comparing x with operand y. Returns None if operands are not comparable. If operands are comparable, returns Some(r) where 9 | * r < 0 iff x < y 10 | * r == 0 iff x == y 11 | * r > 0 iff x > y 12 | */ 13 | override def tryCompare(x: V, y: V): Option[Int] = 14 | if (lt(x, y)) Some(-1) 15 | else if (gt(x, y)) Some(1) 16 | else if (equiv(x, y)) Some(0) 17 | else None 18 | 19 | val bottom: V 20 | } 21 | 22 | object PartialOrderingWithBottom { 23 | def trivial[T >: Null]: PartialOrderingWithBottom[T] = { 24 | new PartialOrderingWithBottom[T] { 25 | override val bottom: T = null 26 | override def lteq(v1: T, v2: T): Boolean = 27 | (v1 == bottom) || (v1 == v2) 28 | } 29 | } 30 | } 31 | 32 | @implicitNotFound("type ${V} does not have a Lattice instance") 33 | trait Lattice[V] extends PartialOrderingWithBottom[V] { 34 | /** 35 | * Return the join of v1 and v2 wrt. the lattice. 36 | */ 37 | def join(v1: V, v2: V): V 38 | 39 | override def lteq(v1: V, v2: V): Boolean = { 40 | join(v1, v2) == v2 41 | } 42 | 43 | override def gteq(v1: V, v2: V): Boolean = { 44 | join(v1, v2) == v1 45 | } 46 | } 47 | 48 | object Lattice { 49 | implicit def pair[T](implicit lattice: Lattice[T]): Lattice[(T, T)] = { 50 | new Lattice[(T, T)] { 51 | def join(v1: (T, T), v2: (T, T)): (T, T) = 52 | (lattice.join(v1._1, v2._1), lattice.join(v1._2, v2._2)) 53 | val bottom: (T, T) = 54 | (lattice.bottom, lattice.bottom) 55 | override def lteq(v1: (T, T), v2: (T, T)): Boolean = 56 | lattice.lteq(v1._1, v2._1) && lattice.lteq(v1._2, v2._2) 57 | override def gteq(v1: (T, T), v2: (T, T)): Boolean = 58 | lattice.gteq(v1._1, v2._1) && lattice.gteq(v1._2, v2._2) 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /core/src/main/scala/com/phaller/rasync/lattice/lattices/NaturalNumberLattice.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync.lattice.lattices 2 | 3 | import com.phaller.rasync.lattice.{ DefaultKey, Lattice } 4 | 5 | object NaturalNumberKey extends DefaultKey[Int, Null] 6 | 7 | class NaturalNumberLattice extends Lattice[Int] { 8 | override def join(v1: Int, v2: Int): Int = { 9 | if (v2 > v1) v2 10 | else v1 11 | } 12 | 13 | override def lteq(v1: Int, v2: Int): Boolean = v1 <= v2 14 | override def gteq(v1: Int, v2: Int): Boolean = v1 >= v2 15 | override def lt(v1: Int, v2: Int): Boolean = v1 < v2 16 | override def gt(v1: Int, v2: Int): Boolean = v1 > v2 17 | override def tryCompare(x: Int, y: Int): Option[Int] = Some(x - y) 18 | 19 | override val bottom: Int = 0 20 | } 21 | -------------------------------------------------------------------------------- /core/src/main/scala/com/phaller/rasync/lattice/lattices/PowerSetLattice.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync.lattice 2 | package lattices 3 | 4 | class PowerSetLattice[T] extends Lattice[Set[T]] { 5 | 6 | def join(left: Set[T], right: Set[T]): Set[T] = 7 | left ++ right 8 | 9 | val bottom: Set[T] = 10 | Set[T]() 11 | 12 | } 13 | -------------------------------------------------------------------------------- /core/src/main/scala/com/phaller/rasync/lattice/updater.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync 2 | package lattice 3 | 4 | /** 5 | * An updater defines, how to react to a value that is being put to a cell. 6 | * Given a `current` value of the cell a a `next` value, the `update` method 7 | * returns the new value of the cell of a "combination" of `current` and `next`. 8 | * 9 | * The `bottom` value of an updater defines, what the initial value of a cell is. 10 | */ 11 | trait Updater[V] { 12 | val bottom: V 13 | def update(current: V, next: V): V 14 | } 15 | 16 | /** 17 | * AggregationUpdaters are built on lattices and compute new values as `join` of 18 | * all incoming ("next") values. Therefore, AggregationUpdaters do not throw exceptions 19 | * as the `join` of two lattice values is always defined. 20 | * 21 | * The initial value is the bottom value of the lattice. 22 | */ 23 | class AggregationUpdater[V](val lattice: Lattice[V]) extends Updater[V] { 24 | override val bottom: V = lattice.bottom 25 | override def update(current: V, next: V): V = lattice.join(current, next) 26 | } 27 | 28 | /** 29 | * MonotonicUpdaters are built on partial orderings. The incoming ("next") value is 30 | * used as the new value for the cell, as long as the update is monotonic. 31 | * Otherwise a NotMonotonicException is thrown. 32 | * 33 | * The initial value is the bottom value of the partial ordering. 34 | */ 35 | class MonotonicUpdater[V](val partialOrderingWithBottom: PartialOrderingWithBottom[V]) extends Updater[V] { 36 | override val bottom: V = partialOrderingWithBottom.bottom 37 | 38 | override def update(current: V, next: V): V = 39 | if (partialOrderingWithBottom.lteq(current, next)) next 40 | else throw NotMonotonicException(current, next) 41 | } 42 | 43 | object Updater { 44 | // (implicitely) convert a lattice to its canonic alaggregation updater 45 | implicit def latticeToUpdater[T](implicit lattice: Lattice[T]): Updater[T] = 46 | new AggregationUpdater[T](lattice) 47 | 48 | // convert a lattice to its canonical monotonic updater 49 | def partialOrderingToUpdater[T](implicit partialOrderingWithBottom: PartialOrderingWithBottom[T]): Updater[T] = 50 | new MonotonicUpdater[T](partialOrderingWithBottom) 51 | 52 | // create an updater for pairs of values. 53 | def pair[T](implicit updater: Updater[T]): Updater[(T, T)] = new Updater[(T, T)] { 54 | override def update(current: (T, T), next: (T, T)): (T, T) = 55 | (updater.update(current._1, next._1), updater.update(current._2, next._2)) 56 | 57 | override val bottom: (T, T) = 58 | (updater.asInstanceOf[PartialOrderingWithBottom[T]].bottom, updater.asInstanceOf[PartialOrderingWithBottom[T]].bottom) 59 | } 60 | } 61 | 62 | final case class NotMonotonicException[D](current: D, next: D) extends IllegalStateException( 63 | s"Violation of ordering with current $current and next $next!") 64 | -------------------------------------------------------------------------------- /core/src/main/scala/com/phaller/rasync/pool/HandlerPool.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync 2 | package pool 3 | 4 | import java.util.concurrent._ 5 | import java.util.concurrent.atomic.AtomicReference 6 | 7 | import cell.{ ConcurrentCellImpl, _ } 8 | import lattice.{ DefaultKey, Key, Updater } 9 | import org.opalj.graphs._ 10 | 11 | import scala.annotation.tailrec 12 | import scala.collection.JavaConverters._ 13 | import scala.concurrent.{ Future, Promise } 14 | import scala.util.{ Failure, Success } 15 | import scala.util.control.NonFatal 16 | 17 | /* Need to have reference equality for CAS. 18 | */ 19 | private class PoolState(val handlers: List[() => Unit] = List(), val submittedTasks: Int = 0) { 20 | def isQuiescent(): Boolean = 21 | submittedTasks == 0 22 | } 23 | 24 | class HandlerPool[V, E >: Null]( 25 | val key: Key[V, E] = new DefaultKey[V, E](), 26 | val parallelism: Int = Runtime.getRuntime.availableProcessors(), 27 | unhandledExceptionHandler: Throwable => Unit = _.printStackTrace(), 28 | val schedulingStrategy: SchedulingStrategy[V, E] = new DefaultScheduling[V, E]) { 29 | 30 | private val pool: AbstractExecutorService = 31 | schedulingStrategy match { 32 | case _: DefaultScheduling[_, _] => new ForkJoinPool(parallelism) 33 | case _ => new ThreadPoolExecutor(parallelism, parallelism, Int.MaxValue, TimeUnit.NANOSECONDS, new PriorityBlockingQueue[Runnable]()) 34 | } 35 | 36 | private val poolState = new AtomicReference[PoolState](new PoolState) 37 | 38 | private val cellsNotDone = new ConcurrentLinkedQueue[Cell[_, _]]() 39 | 40 | abstract class PriorityRunnable(val priority: Int) extends Runnable with Comparable[Runnable] { 41 | override def compareTo(t: Runnable): Int = { 42 | val p = t match { 43 | case runnable: PriorityRunnable => runnable.priority 44 | case _ => 1 45 | } 46 | priority - p 47 | } 48 | } 49 | 50 | /** 51 | * Returns a new cell in this HandlerPool. 52 | * 53 | * Creates a new cell with the given key. The `init` method is used to 54 | * determine an initial value for that cell and to set up dependencies via `whenNext`. 55 | * It gets called, when the cell is awaited, either directly by the triggerExecution method 56 | * of the HandlerPool or if a cell that depends on this cell is awaited. 57 | * 58 | * @param init A callback to return the initial value for this cell and to set up dependencies. 59 | * @param updater The updater used to update the value of this cell. 60 | * @return Returns a cell. 61 | */ 62 | def mkCell(init: (Cell[V, E]) => Outcome[V], entity: E = null)(implicit updater: Updater[V]): Cell[V, E] = { 63 | val impl = new ConcurrentCellImpl[V, E](this, updater, init, entity) 64 | this.register(impl) 65 | impl 66 | } 67 | 68 | def mkSequentialCell(init: (Cell[V, E]) => Outcome[V], entity: E = null)(implicit updater: Updater[V]): Cell[V, E] = { 69 | val impl = new SequentialCellImpl[V, E](this, updater, init, entity) 70 | this.register(impl) 71 | impl 72 | } 73 | 74 | /** 75 | * Returns a new cell in this HandlerPool. 76 | * 77 | * Creates a new, completed cell with value `v`. 78 | * 79 | * @param updater The updater used to update the value of this cell. 80 | * @return Returns a cell with value `v`. 81 | */ 82 | def mkCompletedCell(result: V, entity: E)(implicit updater: Updater[V]): Cell[V, E] = { 83 | CellCompleter.completed(result, entity)(updater, this).cell 84 | } 85 | 86 | /** 87 | * Add an event handler that is called, when the pool reaches quiescence. 88 | * 89 | * The `handler` is called once after the pool reaches quiescence the first time 90 | * after it has been added. 91 | */ 92 | @tailrec 93 | final def onQuiescent(handler: () => Unit): Unit = { 94 | val state = poolState.get() 95 | if (state.isQuiescent) { 96 | execute(new Runnable { def run(): Unit = handler() }, 0) 97 | } else { 98 | val newState = new PoolState(handler :: state.handlers, state.submittedTasks) 99 | val success = poolState.compareAndSet(state, newState) 100 | if (!success) 101 | onQuiescent(handler) 102 | } 103 | } 104 | 105 | /** 106 | * Register a cell with this HandlerPool. 107 | * 108 | * @param cell The cell. 109 | */ 110 | def register(cell: Cell[V, E]): Unit = 111 | cellsNotDone.add(cell) 112 | 113 | /** 114 | * Deregister a cell from this HandlerPool. 115 | * 116 | * @param cell The cell. 117 | */ 118 | def deregister(cell: Cell[V, E]): Unit = 119 | cellsNotDone.remove(cell) 120 | 121 | /** 122 | * Remove all completed cells from cellsNotDone. Cells are not removed on deregister, but when the queue is 123 | * queried. 124 | */ 125 | private def deregisterCompletedCells(): Unit = { 126 | cellsNotDone.removeIf(_.isComplete) 127 | } 128 | 129 | /** Returns all non-completed cells, when quiescence is reached. */ 130 | def quiescentIncompleteCells: Future[Iterable[Cell[_, _]]] = { 131 | val p = Promise[Iterable[Cell[_, _]]] 132 | this.onQuiescent { () => 133 | deregisterCompletedCells() 134 | p.success(cellsNotDone.asScala) 135 | } 136 | p.future 137 | } 138 | 139 | /** 140 | * Wait for a quiescent state. 141 | * Afterwards, resolve all cells without dependencies with the respective 142 | * `fallback` value calculated by it's `Key`. 143 | * Also, resolve cycles without dependencies (cSCCs) using the respective `Key`'s `resolve` method. 144 | * Both might lead to computations on other cells being triggered. 145 | * If more cells are unresolved, recursively wait for resolution. 146 | * 147 | * @return The future is set once the resolve is finished and the quiescent state is reached. 148 | * The boolean parameter indicates if cycles have been resolved or not. 149 | */ 150 | def quiescentResolveCell: Future[Unit] = { 151 | val p = Promise[Unit] 152 | this.onQuiescent { () => 153 | deregisterCompletedCells() 154 | 155 | val activeCells = this.cellsNotDone.asScala 156 | .filter(_.tasksActive()) 157 | .asInstanceOf[Iterable[Cell[V, E]]] 158 | 159 | val independent = activeCells.filter(_.isIndependent()) 160 | val waitAgain: Boolean = 161 | if (independent.nonEmpty) { 162 | // Resolve independent cells with fallback values 163 | resolveIndependent(independent) 164 | } else { 165 | // Otherwise, find and resolve closed strongly connected components and resolve them. 166 | 167 | // Find closed strongly connected component (cell) 168 | if (activeCells.isEmpty) { 169 | false 170 | } else { 171 | val cSCCs = closedSCCs[Cell[V, E]](activeCells, (cell: Cell[V, E]) => cell.cellDependencies) 172 | cSCCs 173 | .map(resolveCycle) // resolve all cSCCs 174 | .exists(b => b) // return if any resolution took place 175 | } 176 | } 177 | 178 | // Wait again for quiescent state. It's possible that other tasks where scheduled while 179 | // resolving the cells. 180 | if (waitAgain) { 181 | p.completeWith(quiescentResolveCell) 182 | } else { 183 | p.success(()) 184 | } 185 | } 186 | p.future 187 | } 188 | 189 | /** 190 | * Resolves a cycle of unfinished cells via the key's `resolve` method. 191 | */ 192 | private def resolveCycle(cells: Iterable[Cell[V, E]]): Boolean = 193 | resolve(cells, key.resolve) 194 | 195 | /** 196 | * Resolves a cell with default value with the key's `fallback` method. 197 | */ 198 | private def resolveIndependent(cells: Iterable[Cell[V, E]]): Boolean = 199 | resolve(cells, key.fallback) 200 | 201 | /** Resolve all cells with the associated value. */ 202 | private def resolve(cells: Iterable[Cell[V, E]], k: (Iterable[Cell[V, E]]) => Iterable[(Cell[V, E], V)]): Boolean = { 203 | try { 204 | val results = k(cells) 205 | val dontCall = results.map(_._1).toSeq 206 | for ((c, v) <- results) { 207 | val res = Success(v) 208 | execute(new Runnable { 209 | override def run(): Unit = { 210 | // resolve each cell with the given value 211 | // but do not propagate among the cells in the same set (i.e. the same cSCC) 212 | c.resolveWithValue(res, dontCall) 213 | } 214 | }, schedulingStrategy.calcPriority(c, res)) 215 | } 216 | results.nonEmpty 217 | } catch { 218 | case e: Exception => 219 | // if an exception occurs, resolve all cells with a failure 220 | val f = Failure(e) 221 | val dontCall = cells.toSeq 222 | cells.foreach(c => 223 | execute(() => c.resolveWithValue(f, dontCall), schedulingStrategy.calcPriority(c, f))) 224 | cells.nonEmpty 225 | } 226 | } 227 | 228 | /** 229 | * Increase the number of submitted tasks. 230 | * Change the PoolState accordingly. 231 | */ 232 | private def incSubmittedTasks(): Unit = { 233 | var submitSuccess = false 234 | while (!submitSuccess) { 235 | val state = poolState.get() 236 | val newState = new PoolState(state.handlers, state.submittedTasks + 1) 237 | submitSuccess = poolState.compareAndSet(state, newState) 238 | } 239 | } 240 | 241 | /** 242 | * Decrease the number of submitted tasks and run registered handlers, if quiescent. 243 | * Change the PoolState accordingly. 244 | */ 245 | private def decSubmittedTasks(i: Int = 1): Unit = { 246 | var success = false 247 | var handlersToRun: Option[List[() => Unit]] = None 248 | 249 | while (!success) { // reapeat until compareAndSet succeeded 250 | val state = poolState.get() 251 | if (state.submittedTasks > i) { 252 | // we can simply decrease the counter 253 | handlersToRun = None 254 | val newState = new PoolState(state.handlers, state.submittedTasks - i) 255 | success = poolState.compareAndSet(state, newState) 256 | } else if (state.submittedTasks == i) { 257 | // counter will drop to zero, so we need to call quiescent handlers later 258 | handlersToRun = Some(state.handlers) 259 | // a fresh state without any quiescent handler attached – those get called at most once! 260 | val newState = new PoolState() 261 | success = poolState.compareAndSet(state, newState) 262 | } else { 263 | throw new Exception("BOOM") 264 | } 265 | } 266 | // run all handler that have been attached at the time quiescence was reached 267 | if (handlersToRun.nonEmpty) { 268 | handlersToRun.get.foreach { handler => 269 | execute(new Runnable { 270 | def run(): Unit = handler() 271 | }, 0) // TODO set a priority 272 | } 273 | } 274 | } 275 | 276 | // Shouldn't we use: 277 | //def execute(f : => Unit) : Unit = 278 | // execute(new Runnable{def run() : Unit = f}) 279 | 280 | def execute(fun: () => Unit, priority: Int): Unit = 281 | execute(new Runnable { def run(): Unit = fun() }, priority) 282 | 283 | def execute(task: Runnable, priority: Int = 0): Unit = { 284 | // Submit task to the pool 285 | incSubmittedTasks() 286 | 287 | // Run the task 288 | try { 289 | pool.execute(new PriorityRunnable(priority) { 290 | def run(): Unit = { 291 | try { 292 | task.run() 293 | } catch { 294 | case NonFatal(e) => 295 | unhandledExceptionHandler(e) 296 | } finally { 297 | decSubmittedTasks() 298 | } 299 | } 300 | }) 301 | } catch { 302 | // If pool.execute() failed, we need to count down now. 303 | // (Normally, decSubmittedTasks is called after task.run()) 304 | case e: Exception => 305 | decSubmittedTasks() 306 | throw e 307 | } 308 | } 309 | 310 | /** 311 | * If a cell is triggered, it's `init` method is 312 | * run to both get an initial (or possibly final) value 313 | * and to set up dependencies (via whenNext/whenComplete). 314 | * All dependees automatically get triggered. 315 | * 316 | * @param cell The cell that is triggered. 317 | */ 318 | private[rasync] def triggerExecution(cell: Cell[V, E], priority: Int = 0): Unit = { 319 | if (cell.setTasksActive()) 320 | // if the cell's state has successfully been changed, schedule further computations 321 | execute(() => { 322 | val completer = cell.completer 323 | try { 324 | val outcome = completer.init(cell) // call the init method 325 | outcome match { 326 | case Outcome(v, isFinal) => completer.put(v, isFinal) 327 | case NoOutcome => /* don't do anything */ 328 | } 329 | } catch { 330 | case e: Exception => completer.putFailure(Failure(e)) 331 | } 332 | }, priority) 333 | } 334 | 335 | /** 336 | * Possibly initiates an orderly shutdown in which previously 337 | * submitted tasks are executed, but no new tasks are accepted. 338 | * This method should only be called, when the pool is quiescent. 339 | */ 340 | def shutdown(): Unit = 341 | pool.shutdown() 342 | 343 | /** 344 | * Waits for quiescence, then shuts the pool down. 345 | */ 346 | def onQuiescenceShutdown(): Unit = 347 | this.onQuiescent(() => pool.shutdown()) 348 | 349 | def reportFailure(t: Throwable): Unit = 350 | t.printStackTrace() 351 | 352 | } 353 | 354 | object HandlerPool { 355 | def apply[V]: HandlerPool[V, Null] = new HandlerPool[V, Null]() 356 | def apply[V](key: Key[V, Null]): HandlerPool[V, Null] = new HandlerPool[V, Null](key) 357 | } 358 | -------------------------------------------------------------------------------- /core/src/main/scala/com/phaller/rasync/pool/scheduldingStrategy.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync.pool 2 | 3 | import com.phaller.rasync.cell.Cell 4 | import com.phaller.rasync.cell.ValueOutcome 5 | 6 | import scala.util.Try 7 | 8 | /** 9 | * A scheduling strategy defines priorities for dependency callbacks 10 | * and cell resolution. 11 | * 12 | * Whenever a dependency is triggered due to a change of a dependee cell, 13 | * a task is added to the handler pool. Those tasks will eventually be picked 14 | * up and executed, potentielly concurrently to other tasks. 15 | * Each task added to the pool is assigned a priority as defined by a 16 | * SchedulingStrategy, where tasks with lower priorities are more likely 17 | * to be executed earlier. (Note that due to concurrent execution there is 18 | * no guarantee for any order of execution. Use sequential callbacks to 19 | * avoid concurrent execution of certain tasks.) 20 | * 21 | * If a tasks has been created because of a change in a dependee cell `other` being propgated to 22 | * a `dependentCell`, `SchedulingStrategy.calcPriority(dependentCell, other)` is called to 23 | * obtain a priority. 24 | * If a tasks has been created to complete a `cell` via a `Key`, 25 | * `SchedulingStrategy.calcPriority(cell)` is called. 26 | */ 27 | trait SchedulingStrategy[V, E >: Null] { 28 | def calcPriority(dependentCell: Cell[V, E], other: Cell[V, E], value: Try[ValueOutcome[V]]): Int 29 | def calcPriority(dependentCell: Cell[V, E], value: Try[V]): Int 30 | override def toString: String = this.getClass.getSimpleName 31 | } 32 | 33 | object SchedulingStrategy { 34 | def apply[V, E >: Null](s: SchedulingStrategy.type): SchedulingStrategy[V, E] = { 35 | s.getClass.asInstanceOf[Class[SchedulingStrategy[V, E]]].newInstance() 36 | } 37 | } 38 | 39 | /** All tasks are of equal priority. */ 40 | class DefaultScheduling[V, E >: Null] extends SchedulingStrategy[V, E] { 41 | override def calcPriority(dependentCell: Cell[V, E], other: Cell[V, E], value: Try[ValueOutcome[V]]): Int = 0 42 | override def calcPriority(cell: Cell[V, E], value: Try[V]): Int = 0 43 | } 44 | 45 | class SourcesWithManyTargetsFirst[V, E >: Null] extends SchedulingStrategy[V, E] { 46 | override def calcPriority(dependentCell: Cell[V, E], other: Cell[V, E], value: Try[ValueOutcome[V]]): Int = 47 | -other.numDependentCells 48 | 49 | override def calcPriority(cell: Cell[V, E], value: Try[V]): Int = 50 | -cell.numDependentCells 51 | } 52 | 53 | class SourcesWithManyTargetsLast[V, E >: Null] extends SchedulingStrategy[V, E] { 54 | override def calcPriority(dependentCell: Cell[V, E], other: Cell[V, E], value: Try[ValueOutcome[V]]): Int = 55 | other.numDependentCells 56 | 57 | override def calcPriority(cell: Cell[V, E], value: Try[V]): Int = 58 | cell.numDependentCells 59 | } 60 | 61 | class SourcesWithManySourcesFirst[V, E >: Null] extends SchedulingStrategy[V, E] { 62 | override def calcPriority(dependentCell: Cell[V, E], other: Cell[V, E], value: Try[ValueOutcome[V]]): Int = 63 | -other.numDependencies 64 | 65 | override def calcPriority(cell: Cell[V, E], value: Try[V]): Int = 66 | -cell.numDependencies 67 | } 68 | 69 | class SourcesWithManySourcesLast[V, E >: Null] extends SchedulingStrategy[V, E] { 70 | override def calcPriority(dependentCell: Cell[V, E], other: Cell[V, E], value: Try[ValueOutcome[V]]): Int = 71 | other.numDependencies 72 | 73 | override def calcPriority(cell: Cell[V, E], value: Try[V]): Int = 74 | cell.numDependencies 75 | } 76 | 77 | class TargetsWithManySourcesFirst[V, E >: Null] extends SchedulingStrategy[V, E] { 78 | override def calcPriority(dependentCell: Cell[V, E], other: Cell[V, E], value: Try[ValueOutcome[V]]): Int = 79 | -dependentCell.numDependencies 80 | 81 | override def calcPriority(cell: Cell[V, E], value: Try[V]): Int = 0 82 | } 83 | 84 | class TargetsWithManySourcesLast[V, E >: Null] extends SchedulingStrategy[V, E] { 85 | override def calcPriority(dependentCell: Cell[V, E], other: Cell[V, E], value: Try[ValueOutcome[V]]): Int = 86 | dependentCell.numDependencies 87 | 88 | override def calcPriority(dependentCell: Cell[V, E], value: Try[V]): Int = 0 89 | } 90 | 91 | class TargetsWithManyTargetsFirst[V, E >: Null] extends SchedulingStrategy[V, E] { 92 | override def calcPriority(dependentCell: Cell[V, E], other: Cell[V, E], value: Try[ValueOutcome[V]]): Int = 93 | -dependentCell.numDependencies 94 | 95 | override def calcPriority(dependentCell: Cell[V, E], value: Try[V]): Int = 0 96 | } 97 | 98 | class TargetsWithManyTargetsLast[V, E >: Null] extends SchedulingStrategy[V, E] { 99 | override def calcPriority(dependentCell: Cell[V, E], other: Cell[V, E], value: Try[ValueOutcome[V]]): Int = 100 | dependentCell.numDependentCells 101 | 102 | override def calcPriority(dependentCell: Cell[V, E], value: Try[V]): Int = 0 103 | } 104 | -------------------------------------------------------------------------------- /core/src/main/scala/com/phaller/rasync/util/Counter.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync.util 2 | 3 | import java.util.concurrent.ConcurrentLinkedQueue 4 | import java.util.concurrent.atomic.AtomicLong 5 | 6 | import scala.collection.concurrent.TrieMap 7 | 8 | object Counter { 9 | 10 | private val profilingCounter = TrieMap.empty[String, AtomicLong] 11 | 12 | def inc(key: String, n: Int = 1): Long = { 13 | profilingCounter.getOrElseUpdate(key, new AtomicLong(0)).addAndGet(n.toLong) 14 | } 15 | 16 | def dec(key: String, n: Int = 1): Long = { 17 | profilingCounter.getOrElseUpdate(key, new AtomicLong(0)).addAndGet(-n.toLong) 18 | } 19 | 20 | def get(key: String): Long = { 21 | profilingCounter.getOrElse(key, new AtomicLong(0)).get() 22 | } 23 | 24 | def reset(): Unit = { 25 | profilingCounter.clear() 26 | } 27 | 28 | override def toString: String = { 29 | val counters = s"\tCounters\n" + 30 | f"\t\t${"Number"}%10s ${"Name"}%-90s\n" + 31 | f"\t\t${"------"}%10s ${"----"}%-90s\n" + 32 | profilingCounter.toSeq.sortBy(x ⇒ x._1).map { x ⇒ 33 | f"\t\t${x._2.get}%10s ${x._1}%-90s\n" 34 | }.mkString 35 | 36 | counters 37 | } 38 | } 39 | 40 | class Statstics[T] { 41 | 42 | private val data: ConcurrentLinkedQueue[T] = new ConcurrentLinkedQueue[T]() 43 | 44 | def add(x: T) = data.add(x) 45 | 46 | def reset(): Unit = { 47 | data.clear() 48 | } 49 | 50 | def toArray(): Array[T] = data.toArray.asInstanceOf[Array[T]] 51 | 52 | override def toString: String = { 53 | data.toArray.asInstanceOf[Array[T]].mkString("\n") 54 | } 55 | } 56 | 57 | object InOutStats extends Statstics[(Int, Int)] 58 | -------------------------------------------------------------------------------- /core/src/test/java/pureness/Demo.java: -------------------------------------------------------------------------------- 1 | /* BSD 2-Clause License: 2 | * Copyright (c) 2009 - 2015 3 | * Software Technology Group 4 | * Department of Computer Science 5 | * Technische Universität Darmstadt 6 | * All rights reserved. 7 | * 8 | * Redistribution and use in source and binary forms, with or without 9 | * modification, are permitted provided that the following conditions are met: 10 | * 11 | * - Redistributions of source code must retain the above copyright notice, 12 | * this list of conditions and the following disclaimer. 13 | * - Redistributions in binary form must reproduce the above copyright notice, 14 | * this list of conditions and the following disclaimer in the documentation 15 | * and/or other materials provided with the distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 | * POSSIBILITY OF SUCH DAMAGE. 28 | */ 29 | package pureness; 30 | 31 | /** 32 | * Some Demo code to test/demonstrate the complexity related to calculating the purity of 33 | * methods in the presence of mutual recursive methods. 34 | * 35 | * @author Michael Eichberg 36 | * 37 | */ 38 | class Demo { 39 | 40 | private static final int myValue = -1; /* effectivelyFinal */ 41 | 42 | private Demo() {/* empty */ 43 | } 44 | 45 | public Demo identity() { 46 | return this; 47 | } 48 | 49 | public static int pureThoughItUsesField(int i, int j) { 50 | return i % j * myValue; 51 | } 52 | 53 | public static int pureThoughItUsesField2(int i, int j) { 54 | return i * j * myValue; 55 | } 56 | 57 | public static int simplyPure(int i, int j) { 58 | return i % 3 == 0 ? simplyPure(i, 0) : simplyPure(0, j); 59 | } 60 | 61 | public static int impure(int i) { 62 | return (int) (i * System.nanoTime()); 63 | } 64 | 65 | // 66 | // The following two methods are mutually dependent and are pure. 67 | // 68 | static int foo(int i) { 69 | return i < 0 ? i : bar(i - 10); 70 | } 71 | 72 | static int bar(int i) { 73 | return i % 2 == 0 ? i : foo(i - 1); 74 | } 75 | 76 | // The following methods are not directly involved in a 77 | // mutually recursive dependency, but require information about a set of 78 | // mutually recursive dependent methods. 79 | static int fooBar(int i) { // also observed by other methods 80 | return foo(i) + bar(i); 81 | } 82 | 83 | static int barFoo(int i) { 84 | return foo(i) + bar(i); // not observed 85 | } 86 | 87 | // The following two methods are mutually dependent and use an impure method. 88 | // 89 | 90 | static int npfoo(int i) { 91 | return i < 0 ? simplyPure(i, 0) : npbar(i - 10); 92 | } 93 | 94 | static int npbar(int i) { 95 | return i % 2 == 0 ? impure(i) : foo(i - 1); 96 | } 97 | 98 | // 99 | // All three methods are actually pure but have a dependency on each other... 100 | // 101 | static int m1(int i) { 102 | return i < 0 ? i : m2(i - 10); 103 | } 104 | 105 | static int m2(int i) { 106 | return i % 2 == 0 ? i : m3(i - 1); 107 | } 108 | 109 | static int m3(int i) { 110 | return i % 4 == 0 ? i : m1(i - 1); 111 | } 112 | 113 | // All three methods are depending on each other, but they are NOT pure, because 114 | // one calls an impure method. 115 | // 116 | 117 | static int m1np(int i) { 118 | return i < 0 ? i : m2np(i - 10); 119 | } 120 | 121 | static int m2np(int i) { 122 | return i % 2 == 0 ? i : m3np(i - 1); 123 | } 124 | 125 | static int m3np(int i) { 126 | int k = m1(i); 127 | int j = m1np(k - 1); 128 | return impure(j); 129 | } 130 | 131 | // The following method is pure, but only if we know the pureness of the target method 132 | // which we don't know if do not analyze the JDK! 133 | // 134 | 135 | static int cpure(int i) { 136 | return Math.abs(i) * 21; 137 | } 138 | 139 | static int cpureCallee(int i) { 140 | return cpure(i / 21); 141 | } 142 | 143 | static int cpureCalleeCallee1(int i) { 144 | return cpureCallee(i / 21); 145 | } 146 | 147 | static int cpureCalleeCallee2(int i) { 148 | return cpureCallee(i / 21); 149 | } 150 | 151 | static int cpureCalleeCalleeCallee(int i) { 152 | return cpureCalleeCallee1(i / 21) * cpureCalleeCallee2(i / 21); 153 | } 154 | 155 | static int cpureCalleeCalleeCalleCallee(int i) { 156 | return cpureCalleeCalleeCallee(1299); 157 | } 158 | 159 | // All methods are involved in multiple cycles of dependent methods 160 | // one calls an impure method. 161 | // 162 | 163 | static int mm1(int i) { 164 | return i < 0 ? i : mm2(i - 10); 165 | } 166 | 167 | static int mm2(int i) { 168 | return i % 2 == 0 ? mm1(-i) : mm3(i - 1); 169 | } 170 | 171 | static int mm3(int i) { 172 | int j = m3(i); 173 | int k = mm2(j); 174 | return m1np(k); 175 | } 176 | 177 | // Two cycles connected by a "weak link" (fooBar) 178 | // 179 | 180 | static int cm1(int i) { 181 | return i < 0 ? i : cm2(i - 10); 182 | } 183 | 184 | static int cm2(int i) { 185 | return i % 2 == 0 ? cm1(-i) : fooBar(i - 1); 186 | } 187 | 188 | // A classical strongly connected component 189 | // 190 | static int scc0(int i) { 191 | return i < 0 ? scc2(i - 10) : scc1(i - 111); 192 | } 193 | 194 | static int scc1(int i) { 195 | return i % 2 == 0 ? 32424 : scc3(i - 1); 196 | } 197 | 198 | static int scc2(int i) { 199 | return i % 2 == 0 ? 1001 : scc3(i - 3); 200 | } 201 | 202 | static int scc3(int i) { 203 | return scc0(12121 / i); 204 | } 205 | } 206 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/Backoff.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync.test 2 | 3 | // Exponential backoff 4 | // Taken, and adapted from ChemistrySet, see: https://github.com/aturon/ChemistrySet 5 | 6 | private object Backoff { 7 | val maxCount: Int = 14 8 | var procs = Runtime.getRuntime.availableProcessors 9 | def apply() = new Backoff 10 | } 11 | private final class Backoff { 12 | import Backoff._ 13 | 14 | var seed: Long = Thread.currentThread.getId 15 | var count = 0 16 | 17 | def getCount() = count 18 | 19 | // compute6 from j.u.c. 20 | private def noop(times: Int = 1): Int = { 21 | var seed: Int = 1 22 | var n: Int = times 23 | while (seed == 1 || n > 0) { // need to inspect seed or is optimized away 24 | seed = seed ^ (seed << 1) 25 | seed = seed ^ (seed >>> 3) 26 | seed = seed ^ (seed << 10) 27 | n -= 1 28 | } 29 | seed 30 | } 31 | 32 | def once() { 33 | if (count == 0) 34 | count = 1 35 | else { 36 | seed = Random.nextSeed(seed) 37 | noop(Random.scale(seed, (procs - 1) << (count + 2))) 38 | if (count < maxCount) count += 1 39 | } 40 | } 41 | } 42 | 43 | // an unsynchronized, but thread-varying RNG 44 | private final class Random(var seed: Long = 1) { 45 | def nextSeed { 46 | seed = Random.nextSeed(seed) 47 | } 48 | 49 | def next(max: Int): Int = { 50 | nextSeed 51 | Random.scale(seed, max) 52 | } 53 | } 54 | private object Random { 55 | def nextSeed(oldSeed: Long): Long = { 56 | var seed = oldSeed 57 | seed = seed ^ (seed << 13) 58 | seed = seed ^ (seed >>> 7) 59 | seed = seed ^ (seed << 17) 60 | seed 61 | } 62 | def scale(seed: Long, max: Int): Int = { 63 | if (max <= 0) max else { 64 | val r = (seed % max).toInt 65 | if (r < 0) -r else r 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/ExceptionSuite.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync 2 | package test 3 | 4 | import scala.language.implicitConversions 5 | import java.util.concurrent.{ CountDownLatch, TimeUnit } 6 | 7 | import scala.concurrent.duration._ 8 | import com.phaller.rasync.lattice._ 9 | import com.phaller.rasync.pool.HandlerPool 10 | import com.phaller.rasync.cell._ 11 | import com.phaller.rasync.lattice.lattices.{ NaturalNumberKey, NaturalNumberLattice } 12 | import org.scalatest.FunSuite 13 | 14 | import scala.concurrent.Await 15 | import scala.util.{ Failure, Success } 16 | 17 | class ExceptionSuite extends FunSuite { 18 | /* 19 | * This Suite contains test cases, where exceptions are thrown 20 | * - when initializing cells, 21 | * - in dependency callbacks 22 | * - in a Key's methods (fallback, resolve) 23 | * 24 | * It also tests, whether completed cells (with Success or Failure) 25 | * behave correclty wrt. exceptions in dependencies and keys. 26 | */ 27 | 28 | implicit val naturalNumberUpdater: Updater[Int] = Updater.latticeToUpdater(new NaturalNumberLattice) 29 | implicit def strToIntKey(s: String): NaturalNumberKey.type = NaturalNumberKey 30 | 31 | test("exception in init") { 32 | // If the init method throws an exception e, 33 | // the cell is completed with Failure(e) 34 | val latch = new CountDownLatch(1) 35 | val pool = HandlerPool[Int](NaturalNumberKey) 36 | val cell = pool.mkCell(_ => { 37 | throw new Exception("foo") 38 | }) 39 | 40 | // cell should be completed as a consequence 41 | cell.onComplete(_ => latch.countDown()) 42 | 43 | assert(!cell.isComplete) 44 | cell.trigger() 45 | 46 | // wait for cell to be completed 47 | latch.await() 48 | 49 | assert(cell.isComplete) 50 | // check, if cell has been completed with an exception 51 | cell.getTry() match { 52 | case Success(_) => assert(false) 53 | case Failure(e) => assert(e.getMessage == "foo") 54 | } 55 | 56 | pool.shutdown() 57 | } 58 | 59 | test("exception in concurrent callback") { 60 | // If the callback thrown an exception e, 61 | // the dependent cell is completed with Failure e 62 | val latch = new CountDownLatch(1) 63 | implicit val pool: HandlerPool[Int, Null] = new HandlerPool[Int, Null](NaturalNumberKey) 64 | val c0 = CellCompleter() 65 | val cell = pool.mkCell(c => { 66 | // build up dependency, throw exeption, if c0's value changes 67 | c.when(c0.cell)(_ => throw new Exception("foo")) 68 | NoOutcome 69 | }) 70 | 71 | // cell should be completed as a consequence 72 | cell.onComplete(_ => latch.countDown()) 73 | cell.trigger() 74 | 75 | assert(!cell.isComplete) 76 | 77 | // trigger dependency, s.t. callback is called 78 | c0.putFinal(1) 79 | 80 | // wait for cell to be completed 81 | latch.await() 82 | 83 | assert(cell.isComplete) 84 | // check, if cell has been completed with an exception 85 | cell.getTry() match { 86 | case Success(_) => assert(false) 87 | case Failure(e) => assert(e.getMessage == "foo") 88 | } 89 | 90 | pool.shutdown() 91 | } 92 | 93 | test("exception in sequential callback") { 94 | // If the callback thrown an exception e, 95 | // the dependent cell is completed with Failure e 96 | val latch = new CountDownLatch(1) 97 | implicit val pool: HandlerPool[Int, Null] = new HandlerPool[Int, Null](NaturalNumberKey) 98 | val c0 = CellCompleter() 99 | val cell = pool.mkSequentialCell(c => { 100 | // build up dependency, throw exeption, if c0's value changes 101 | c.when(c0.cell)(_ => throw new Exception("foo")) 102 | NoOutcome 103 | }) 104 | 105 | // cell should be completed as a consequence 106 | cell.onComplete(_ => latch.countDown()) 107 | cell.trigger() 108 | 109 | assert(!cell.isComplete) 110 | 111 | // trigger dependency, s.t. callback is called 112 | c0.putFinal(1) 113 | 114 | // wait for cell to be completed 115 | latch.await() 116 | 117 | assert(cell.isComplete) 118 | // check, if cell has been completed with an exception 119 | cell.getTry() match { 120 | case Success(_) => assert(false) 121 | case Failure(e) => assert(e.getMessage == "foo") 122 | } 123 | 124 | pool.shutdown() 125 | } 126 | 127 | test("exception in Key.resolve") { 128 | // If Key.resolved is called for cSSC of cells c 129 | // and throws an exception e, all cells c are completed 130 | // with Failure(e). 131 | 132 | // Define a key that throws exceptions 133 | object ExceptionKey extends Key[Int, Null] { 134 | override def resolve(cells: Iterable[Cell[Int, Null]]): Iterable[(Cell[Int, Null], Int)] = 135 | throw new Exception("foo") 136 | 137 | override def fallback(cells: Iterable[Cell[Int, Null]]): Iterable[(Cell[Int, Null], Int)] = 138 | throw new Exception("bar") 139 | } 140 | 141 | implicit val pool: HandlerPool[Int, Null] = new HandlerPool[Int, Null](ExceptionKey) 142 | val c0 = CellCompleter() 143 | val c1 = CellCompleter() 144 | val c2 = CellCompleter() 145 | val c3 = CellCompleter() 146 | val c4 = CellCompleter() 147 | 148 | // Create a cSSC 149 | c1.cell.when(c0.cell)(_ => NoOutcome) 150 | c2.cell.when(c1.cell)(_ => NoOutcome) 151 | c3.cell.when(c1.cell)(_ => NoOutcome) 152 | c4.cell.when(c2.cell)(_ => NoOutcome) 153 | c4.cell.when(c3.cell)(_ => NoOutcome) 154 | c0.cell.when(c4.cell)(_ => NoOutcome) 155 | 156 | // wait for the cycle to be resolved by ExceptionKey.resolve 157 | Await.ready(pool.quiescentResolveCell, 2.seconds) 158 | 159 | pool.onQuiescenceShutdown() 160 | 161 | // check for exceptions in all cells of the cycle 162 | for (c ← List(c0, c1, c2, c3, c4)) 163 | c.cell.getTry() match { 164 | case Success(_) => assert(false) 165 | case Failure(e) => assert(e.getMessage == "foo") 166 | } 167 | } 168 | 169 | test("exception in Key.fallback") { 170 | // If Key.fallback is called for a cell c 171 | // and throws an exception e, c is completed 172 | // with Failure(e). 173 | // A depedent cell receives the failure. 174 | 175 | // Define a key that throws exceptions 176 | object ExceptionKey extends Key[Int, Null] { 177 | override def resolve(cells: Iterable[Cell[Int, Null]]): Iterable[(Cell[Int, Null], Int)] = 178 | throw new Exception("foo") 179 | 180 | override def fallback(cells: Iterable[Cell[Int, Null]]): Iterable[(Cell[Int, Null], Int)] = 181 | throw new Exception("bar") 182 | } 183 | 184 | implicit val pool: HandlerPool[Int, Null] = new HandlerPool[Int, Null](ExceptionKey) 185 | val triggerLatch = new CountDownLatch(2) 186 | val c0 = CellCompleter[Int, Null](_ => { triggerLatch.countDown(); NoOutcome }) 187 | val c1 = CellCompleter[Int, Null](_ => { triggerLatch.countDown(); NoOutcome }) 188 | 189 | c1.cell.trigger() 190 | c0.cell.trigger() 191 | triggerLatch.await() 192 | 193 | // Create a dependency, c1 "recover" from the exception in c0 by completing with 10 194 | c1.cell.when(c0.cell)(_ => FinalOutcome(10)) 195 | 196 | // wait for c0 to be resolved by ExceptionKey.fallback 197 | Await.ready(pool.quiescentResolveCell, 2.seconds) 198 | 199 | // c0 should have been resolved with Failure("bar") 200 | c0.cell.getTry() match { 201 | case Success(_) => assert(false) 202 | case Failure(e) => assert(e.getMessage == "bar") 203 | } 204 | // c1 should have ignored this failure and contain 10 205 | assert(c1.cell.isComplete) 206 | assert(c1.cell.getResult() == 10) 207 | 208 | pool.shutdown() 209 | } 210 | 211 | test("exception after freeze") { 212 | // after a cell has been completed, an exception in one 213 | // of its callbacks should be ignored 214 | val latch = new CountDownLatch(1) 215 | implicit val pool: HandlerPool[Int, Null] = new HandlerPool[Int, Null](NaturalNumberKey) 216 | val c0 = CellCompleter() 217 | val c1 = CellCompleter() 218 | val c2 = CellCompleter() 219 | 220 | // Create a dependency, c1 "recover" from the exception in c0 by completing with 10 221 | c2.cell.when(c0.cell)(_ => FinalOutcome(10)) 222 | c2.cell.when(c1.cell)(_ => throw new Exception("BOOM")) 223 | c2.cell.onComplete(_ => latch.countDown()) 224 | 225 | // trigger completion of cell2 226 | c0.putFinal(0) 227 | // wait form completion of cell2 228 | latch.await(2, TimeUnit.SECONDS) 229 | // trigger an exception-throwing callback (this should be ignored) 230 | c1.putFinal(1) 231 | 232 | pool.onQuiescent(() => { 233 | // c2 should have been completed after c0.putFinal(…), 234 | // so the exception should not matter 235 | assert(c2.cell.isComplete) 236 | assert(c2.cell.getResult() == 10) 237 | 238 | pool.shutdown() 239 | }) 240 | } 241 | 242 | test("put after completion with exception") { 243 | // after a cell has been completed with an exception, 244 | // any subsequent put should be ignored. 245 | val latch = new CountDownLatch(1) 246 | implicit val pool: HandlerPool[Int, Null] = new HandlerPool[Int, Null](NaturalNumberKey) 247 | val c0 = CellCompleter() 248 | val c1 = CellCompleter() 249 | val c2 = CellCompleter() 250 | 251 | // Create dependencies 252 | c2.cell.when(c0.cell)(_ => FinalOutcome(10)) 253 | c2.cell.when(c1.cell)(_ => throw new Exception("foo")) 254 | c2.cell.onComplete(_ => latch.countDown()) 255 | 256 | c1.putFinal(1) 257 | latch.await(2, TimeUnit.SECONDS) 258 | c0.putFinal(0) 259 | 260 | pool.onQuiescent(() => { 261 | 262 | // c2 should have been completed after c1.putFinal(…), 263 | // so the FinalOutcome(10) should be ignored 264 | assert(c2.cell.isComplete) 265 | c2.cell.getTry() match { 266 | case Success(_) => assert(false) 267 | case Failure(e) => assert(e.getMessage == "foo") 268 | } 269 | 270 | pool.shutdown() 271 | }) 272 | 273 | } 274 | 275 | test("do not catch fatal exception") { 276 | // If an instance of Error is thrown, 277 | // this will not be used as value for 278 | // the respective cell. 279 | val latch1 = new CountDownLatch(1) 280 | val latch2 = new CountDownLatch(1) 281 | implicit val pool: HandlerPool[Int, Null] = new HandlerPool[Int, Null](NaturalNumberKey, unhandledExceptionHandler = _ => latch1.countDown()) 282 | val c0 = CellCompleter() 283 | val cell = pool.mkCell(c => { 284 | // build up dependency, throw error, if c0's value changes 285 | c.when(c0.cell)(_ => throw new Error("It's OK, if I am not caught. See description")) 286 | NoOutcome 287 | }) 288 | 289 | cell.trigger() 290 | 291 | // trigger dependency, s.t. callback is called 292 | // this causes the error to be thrown. 293 | // This should not be handled internally. 294 | c0.putFinal(1) 295 | // error should be caught by exception handler 296 | latch1.await() 297 | 298 | assert(!cell.isComplete) 299 | cell.completer.putFinal(10) 300 | // cell should be completed 301 | cell.onComplete(_ => latch2.countDown()) 302 | 303 | // wait for cell to be completed 304 | latch2.await() 305 | 306 | assert(cell.isComplete) 307 | // check, if cell has been completed with an exception 308 | assert(cell.getResult() == 10) 309 | 310 | pool.shutdown() 311 | } 312 | 313 | } 314 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/InternalBaseSuite.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync 2 | package test 3 | 4 | import cell._ 5 | import com.phaller.rasync.lattice.Updater 6 | import org.scalatest.FunSuite 7 | import pool.HandlerPool 8 | import lattice.IntUpdater 9 | 10 | import scala.util.Try 11 | 12 | class InternalBaseSuite extends FunSuite { 13 | 14 | implicit val stringIntUpdater: Updater[Int] = new IntUpdater 15 | 16 | def if10thenFinal20(updates: Iterable[(Cell[Int, Null], Try[ValueOutcome[Int]])]): Outcome[Int] = 17 | ifXthenFinalY(10, 20)(updates) 18 | 19 | def ifXthenFinalY(x: Int, y: Int)(upd: Iterable[(Cell[Int, Null], Try[ValueOutcome[Int]])]): Outcome[Int] = { 20 | val c = upd.head._2 21 | if (c.get.value == x) FinalOutcome(y) else NoOutcome 22 | } 23 | 24 | test("cellDependencies: By adding dependencies") { 25 | implicit val pool = new HandlerPool[Int, Null] 26 | val completer1 = CellCompleter[Int, Null]() 27 | val completer2 = CellCompleter[Int, Null]() 28 | val cell1 = completer1.cell 29 | val cell2 = completer2.cell 30 | cell1.when(cell2)(if10thenFinal20) 31 | cell1.when(cell2)(if10thenFinal20) 32 | 33 | assert(cell1.numDependencies == 1) 34 | assert(cell2.numDependencies == 0) 35 | } 36 | 37 | test("cellDependencies: By removing dependencies") { 38 | implicit val pool = new HandlerPool[Int, Null] 39 | val completer1 = CellCompleter[Int, Null]() 40 | val completer2 = CellCompleter[Int, Null]() 41 | val cell1 = completer1.cell 42 | val cell2 = completer2.cell 43 | cell1.when(cell2)(if10thenFinal20) 44 | cell1.when(cell2)(if10thenFinal20) 45 | 46 | completer1.putFinal(0) 47 | 48 | pool.onQuiescent(() => { 49 | assert(cell1.numDependencies == 0) 50 | assert(cell2.numDependencies == 0) 51 | }) 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/KeyResolutionSuite.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync.test 2 | 3 | import java.util.concurrent.CountDownLatch 4 | 5 | import com.phaller.rasync.cell._ 6 | import com.phaller.rasync.lattice._ 7 | import com.phaller.rasync.pool.HandlerPool 8 | import com.phaller.rasync.test.lattice.IntUpdater 9 | import org.scalatest.FunSuite 10 | 11 | import scala.concurrent.Await 12 | import scala.concurrent.duration._ 13 | import scala.util.{ Failure, Success, Try } 14 | 15 | /** 16 | * Tests where cylces or independent cells 17 | * need to be resolved via a Key. 18 | * This tests contains cycles that only constist 19 | * of a single type of Cells and do not mix 20 | * SequentialCells and ConcurrentCells. 21 | * For the mixedcase, see MixedKeyResolutionsuite 22 | */ 23 | abstract class KeyResolutionSuite extends FunSuite with CompleterFactory { 24 | def forwardAsNext(upd: Iterable[(Cell[Int, Null], Try[ValueOutcome[Int]])]): Outcome[Int] = { 25 | val c = upd.head._2 26 | NextOutcome(c.get.value) 27 | } 28 | 29 | implicit val intUpdater: Updater[Int] = new IntUpdater 30 | 31 | test("DefaultKey.resolve") { 32 | val k = new DefaultKey[Int, Null] 33 | implicit val pool = HandlerPool(k) 34 | val completer1 = mkCompleter[Int] 35 | val completer2 = mkCompleter[Int] 36 | completer1.cell.when(completer2.cell)(forwardAsNext) 37 | completer2.cell.when(completer1.cell)(forwardAsNext) 38 | completer1.putNext(5) 39 | Await.ready(pool.quiescentResolveCell, 2.seconds) 40 | assert(completer1.cell.isComplete) 41 | assert(completer2.cell.isComplete) 42 | assert(completer1.cell.getResult() == 5) 43 | assert(completer2.cell.getResult() == 5) 44 | pool.shutdown() 45 | } 46 | 47 | test("DefaultKey.fallback") { 48 | val k = DefaultKey[Int] 49 | implicit val pool = new HandlerPool[Int, Null](k) 50 | val completer1 = mkCompleter[Int] 51 | completer1.cell.trigger() 52 | completer1.putNext(5) 53 | Await.ready(pool.quiescentResolveCell, 2.seconds) 54 | assert(completer1.cell.isComplete) 55 | assert(completer1.cell.getResult() == 5) 56 | pool.shutdown() 57 | } 58 | 59 | test("DefaultKey.fallback with additional depender") { 60 | val k = DefaultKey[Int] 61 | implicit val pool = HandlerPool(k) 62 | val completer1 = mkCompleter[Int] 63 | val completer2 = mkCompleter[Int] 64 | completer2.cell.when(completer1.cell)(_ => FinalOutcome(10)) 65 | completer2.cell.trigger() 66 | completer1.putNext(5) 67 | Await.ready(pool.quiescentResolveCell, 2.seconds) 68 | assert(completer1.cell.isComplete) 69 | assert(completer1.cell.getResult() == 5) 70 | assert(completer2.cell.isComplete) 71 | assert(completer2.cell.getResult() == 10) 72 | pool.shutdown() 73 | } 74 | 75 | test("when: cSCC with constant resolution") { 76 | val latch = new CountDownLatch(4) 77 | 78 | object ConstantKey extends Key[Int, Null] { 79 | val RESOLVEDINCYCLE = 5 80 | val RESOLVEDASINDPENDENT = 10 81 | 82 | override def resolve(cells: Iterable[Cell[Int, Null]]): Iterable[(Cell[Int, Null], Int)] = cells.map((_, RESOLVEDINCYCLE)) 83 | 84 | override def fallback(cells: Iterable[Cell[Int, Null]]): Iterable[(Cell[Int, Null], Int)] = cells.map((_, RESOLVEDASINDPENDENT)) 85 | } 86 | 87 | implicit val pool = HandlerPool(ConstantKey) 88 | 89 | val completer1 = mkCompleter[Int] 90 | val cell1 = completer1.cell 91 | val completer2 = mkCompleter[Int] 92 | val cell2 = completer2.cell 93 | val completer3 = mkCompleter[Int] 94 | val cell3 = completer3.cell 95 | val completer4 = mkCompleter[Int] 96 | val cell4 = completer4.cell 97 | 98 | // set unwanted values: 99 | completer1.putNext(-1) 100 | completer2.putNext(-1) 101 | completer3.putNext(-1) 102 | completer4.putNext(-1) 103 | 104 | // create a cSCC, assert that none of the callbacks get called again. 105 | def c(upd: Iterable[(Cell[Int, Null], Try[ValueOutcome[Int]])]): Outcome[Int] = upd.head._2.get match { 106 | case FinalOutcome(_) => 107 | NoOutcome 108 | case NextOutcome(-1) => 109 | NoOutcome 110 | case _ => 111 | assert(false) 112 | NextOutcome(-2) 113 | } 114 | 115 | cell1.when(cell2)(c) 116 | cell1.when(cell3)(c) 117 | cell2.when(cell4)(c) 118 | cell3.when(cell4)(c) 119 | cell4.when(cell1)(c) 120 | 121 | for (c <- List(cell1, cell2, cell3, cell4)) 122 | c.onComplete { 123 | case Success(v) => 124 | assert(v === ConstantKey.RESOLVEDINCYCLE) 125 | assert(c.numDependencies === 0) 126 | latch.countDown() 127 | case Failure(e) => 128 | assert(false) 129 | latch.countDown() 130 | } 131 | 132 | // resolve cells 133 | val fut = pool.quiescentResolveCell 134 | Await.result(fut, 2.seconds) 135 | latch.await() 136 | 137 | pool.onQuiescenceShutdown() 138 | } 139 | 140 | test("when: cSCC with default resolution") { 141 | val latch = new CountDownLatch(4) 142 | 143 | implicit val pool = new HandlerPool[Int, Null] 144 | 145 | val completer1 = mkCompleter[Int] 146 | val cell1 = completer1.cell 147 | val completer2 = mkCompleter[Int] 148 | val cell2 = completer2.cell 149 | val completer3 = mkCompleter[Int] 150 | val cell3 = completer3.cell 151 | val completer4 = mkCompleter[Int] 152 | val cell4 = completer4.cell 153 | 154 | // set unwanted values: 155 | completer1.putNext(-1) 156 | completer2.putNext(-1) 157 | completer3.putNext(-1) 158 | completer4.putNext(-1) 159 | 160 | // create a cSCC, assert that none of the callbacks get called again. 161 | def c(upd: Iterable[(Cell[Int, Null], Try[ValueOutcome[Int]])]): Outcome[Int] = upd.head._2.get match { 162 | case FinalOutcome(_) => 163 | NoOutcome 164 | case NextOutcome(-1) => 165 | NoOutcome 166 | case _ => 167 | assert(false) 168 | NextOutcome(-2) 169 | } 170 | 171 | cell1.when(cell2)(c) 172 | cell1.when(cell3)(c) 173 | cell2.when(cell4)(c) 174 | cell3.when(cell4)(c) 175 | cell4.when(cell1)(c) 176 | 177 | for (c <- List(cell1, cell2, cell3, cell4)) 178 | c.onComplete { 179 | case Success(v) => 180 | assert(v === -1) 181 | assert(c.numDependencies === 0) 182 | latch.countDown() 183 | case Failure(e) => 184 | assert(false) 185 | latch.countDown() 186 | } 187 | 188 | // resolve cells 189 | val fut = pool.quiescentResolveCell 190 | Await.result(fut, 2.seconds) 191 | latch.await() 192 | 193 | pool.onQuiescenceShutdown() 194 | } 195 | 196 | test("when: cycle with default resolution") { 197 | sealed trait Value 198 | case object Bottom extends Value 199 | case object ShouldNotHappen extends Value 200 | 201 | implicit object ValueUpdater extends Updater[Value] { 202 | override def update(v1: Value, v2: Value): Value = v2 203 | override val bottom: Value = Bottom 204 | } 205 | 206 | implicit val pool: HandlerPool[Value, Null] = HandlerPool[Value] 207 | 208 | for (i <- 1 to 100) { 209 | val completer1 = mkCompleter[Value] 210 | val completer2 = mkCompleter[Value] 211 | val cell1 = completer1.cell 212 | val cell2 = completer2.cell 213 | 214 | cell1.when(cell2)(_ => NextOutcome(ShouldNotHappen)) 215 | cell2.when(cell1)(_ => NextOutcome(ShouldNotHappen)) 216 | 217 | val fut = pool.quiescentResolveCell 218 | Await.ready(fut, 1.minutes) 219 | 220 | assert(cell1.getResult() != ShouldNotHappen) 221 | assert(cell2.getResult() != ShouldNotHappen) 222 | } 223 | 224 | pool.onQuiescenceShutdown() 225 | } 226 | 227 | test("when: cycle with constant resolution") { 228 | sealed trait Value 229 | case object Bottom extends Value 230 | case object OK extends Value 231 | case object ShouldNotHappen extends Value 232 | 233 | implicit object ValueUpdater extends Updater[Value] { 234 | override def update(v1: Value, v2: Value): Value = if (v1 == Bottom) v2 else v1 // TODO or throw? 235 | override val bottom: Value = Bottom 236 | } 237 | 238 | object TheKey extends DefaultKey[Value, Null] { 239 | override def resolve(cells: Iterable[Cell[Value, Null]]): Iterable[(Cell[Value, Null], Value)] = { 240 | cells.map(cell => (cell, OK)) 241 | } 242 | } 243 | 244 | implicit val pool = HandlerPool[Value](TheKey) 245 | 246 | for (i <- 1 to 100) { 247 | val completer1 = mkCompleter[Value] 248 | val completer2 = mkCompleter[Value] 249 | val cell1 = completer1.cell 250 | val cell2 = completer2.cell 251 | 252 | cell1.when(cell2)(_ => NextOutcome(ShouldNotHappen)) 253 | cell2.when(cell1)(_ => NextOutcome(ShouldNotHappen)) 254 | 255 | val fut = pool.quiescentResolveCell 256 | Await.ready(fut, 1.minutes) 257 | 258 | assert(cell1.getResult() == OK) 259 | assert(cell2.getResult() == OK) 260 | } 261 | 262 | pool.onQuiescenceShutdown() 263 | } 264 | 265 | test("whenNext: cycle with additional outgoing dep") { 266 | sealed trait Value 267 | case object Bottom extends Value 268 | case object Resolved extends Value 269 | case object Fallback extends Value 270 | case object OK extends Value 271 | case object ShouldNotHappen extends Value 272 | 273 | implicit object ValueUpdater extends Updater[Value] { 274 | override def update(v1: Value, v2: Value): Value = v2 275 | override val bottom: Value = Bottom 276 | } 277 | 278 | object TheKey extends Key[Value, Null] { 279 | override def resolve(cells: Iterable[Cell[Value, Null]]): Iterable[(Cell[Value, Null], Value)] = { 280 | cells.map(cell => (cell, Resolved)) 281 | } 282 | override def fallback(cells: Iterable[Cell[Value, Null]]): Iterable[(Cell[Value, Null], Value)] = { 283 | cells.map(cell => (cell, Fallback)) 284 | } 285 | } 286 | 287 | implicit val pool = HandlerPool[Value](TheKey) 288 | val completer1 = mkCompleter[Value] 289 | val completer2 = mkCompleter[Value] 290 | val cell1 = completer1.cell 291 | val cell2 = completer2.cell 292 | val out = mkCompleter[Value] 293 | 294 | // let `cell1` and `cell2` form a cycle 295 | cell1.when(cell2)(_ => NextOutcome(ShouldNotHappen)) 296 | cell2.when(cell1)(_ => NextOutcome(ShouldNotHappen)) 297 | 298 | // the cycle is dependent on incoming information from `out` 299 | cell2.when(out.cell)(_ => NextOutcome(ShouldNotHappen)) 300 | 301 | // resolve the independent cell `out` and the cycle 302 | val fut = pool.quiescentResolveCell 303 | Await.ready(fut, 1.minutes) 304 | 305 | pool.onQuiescenceShutdown() 306 | 307 | assert(cell1.getResult() != ShouldNotHappen) 308 | assert(cell2.getResult() != ShouldNotHappen) 309 | assert(out.cell.getResult() == Fallback) 310 | } 311 | 312 | test("whenNext: cycle with additional incoming dep") { 313 | sealed trait Value 314 | case object Bottom extends Value 315 | case object Dummy extends Value 316 | case object Resolved extends Value 317 | case object OK extends Value 318 | case object ShouldNotHappen extends Value 319 | 320 | implicit object ValueUpdater extends Updater[Value] { 321 | override def update(v1: Value, v2: Value): Value = v2 322 | override val bottom: Value = Bottom 323 | } 324 | 325 | object TheKey extends Key[Value, Null] { 326 | override def resolve(cells: Iterable[Cell[Value, Null]]): Iterable[(Cell[Value, Null], Value)] = { 327 | cells.map(cell => (cell, Resolved)) 328 | } 329 | override def fallback(cells: Iterable[Cell[Value, Null]]): Iterable[(Cell[Value, Null], Value)] = { 330 | Seq() 331 | } 332 | } 333 | 334 | implicit val pool = HandlerPool[Value](TheKey) 335 | val completer1 = mkCompleter[Value] 336 | val completer2 = mkCompleter[Value] 337 | val cell1 = completer1.cell 338 | val cell2 = completer2.cell 339 | val in = mkCompleter[Value] 340 | in.putNext(Dummy) 341 | cell1.when(cell2)(_ => NextOutcome(ShouldNotHappen)) 342 | cell2.when(cell1)(_ => NextOutcome(ShouldNotHappen)) 343 | in.putNext(ShouldNotHappen) 344 | in.cell.when(cell1)(_ => FinalOutcome(OK)) 345 | 346 | val fut = pool.quiescentResolveCell 347 | Await.ready(fut, 1.minutes) 348 | 349 | pool.onQuiescenceShutdown() 350 | 351 | assert(cell1.getResult() != ShouldNotHappen) 352 | assert(cell2.getResult() != ShouldNotHappen) 353 | assert(in.cell.getResult() == OK) 354 | } 355 | } 356 | 357 | class ConcurrentKeyResolutionSuite extends KeyResolutionSuite with ConcurrentCompleterFactory 358 | 359 | class SequentialKeyResolutionSuite extends KeyResolutionSuite with SequentialCompleterFactory 360 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/LatticeSuite.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync 2 | package test 3 | 4 | import com.phaller.rasync.lattice._ 5 | import com.phaller.rasync.lattice.lattices.NaturalNumberLattice 6 | import org.scalatest.FunSuite 7 | 8 | class LatticeSuite extends FunSuite { 9 | test("lteq 1") { 10 | val l = new NaturalNumberLattice 11 | assert(l.lteq(1, 2)) 12 | } 13 | 14 | test("lteq 2") { 15 | val l = new NaturalNumberLattice 16 | assert(l.lteq(2, 2)) 17 | } 18 | 19 | test("lt 1") { 20 | val l = new NaturalNumberLattice 21 | assert(l.lt(1, 2)) 22 | } 23 | 24 | test("lt 2") { 25 | val l = new NaturalNumberLattice 26 | assert(!l.lt(2, 1)) 27 | } 28 | 29 | test("gteq 1") { 30 | val l = new NaturalNumberLattice 31 | assert(l.gteq(3, 2)) 32 | } 33 | 34 | test("gteq 2") { 35 | val l = new NaturalNumberLattice 36 | assert(l.gteq(2, 2)) 37 | } 38 | 39 | test("gt 1") { 40 | val l = new NaturalNumberLattice 41 | assert(!l.gt(1, 2)) 42 | } 43 | 44 | test("gt 2") { 45 | val l = new NaturalNumberLattice 46 | assert(l.gt(2, 1)) 47 | } 48 | 49 | test("tryCompare 1") { 50 | val l = new NaturalNumberLattice 51 | assert(l.tryCompare(1, 2).get < 0) 52 | } 53 | 54 | test("tryCompare 2") { 55 | val l = new NaturalNumberLattice 56 | assert(l.tryCompare(2, 2).get == 0) 57 | } 58 | 59 | test("tryCompare 3") { 60 | val l = new NaturalNumberLattice 61 | assert(l.tryCompare(2, 1).get > 0) 62 | } 63 | 64 | test("join") { 65 | val l = new NaturalNumberLattice 66 | assert(l.join(1, 2) == 2) 67 | } 68 | 69 | test("trivial 1") { 70 | object A 71 | object B 72 | val l = PartialOrderingWithBottom.trivial[AnyRef] 73 | assert(l.lt(null, A)) 74 | assert(l.lteq(null, A)) 75 | assert(!l.gt(null, A)) 76 | assert(!l.gteq(null, A)) 77 | assert(l.tryCompare(A, A).get == 0) 78 | assert(l.tryCompare(A, null).get > 0) 79 | assert(l.tryCompare(null, A).get < 0) 80 | 81 | assert(!l.lt(A, B)) 82 | assert(!l.lteq(A, B)) 83 | assert(!l.gt(A, B)) 84 | assert(!l.gteq(A, B)) 85 | assert(l.tryCompare(A, B).isEmpty) 86 | } 87 | 88 | test("client defined lattice 1") { 89 | sealed trait Value 90 | case object Bottom extends Value 91 | case object A extends Value 92 | case object B extends Value 93 | case object Top extends Value 94 | 95 | object L extends Lattice[Value] { 96 | override def join(v1: Value, v2: Value): Value = 97 | if (v1 == Bottom) v2 98 | else if (v2 == Bottom) v1 99 | else if (v1 == v2) v1 100 | else Top 101 | 102 | override val bottom: Value = Bottom 103 | } 104 | 105 | assert(L.lt(L.bottom, A)) 106 | assert(L.lteq(L.bottom, A)) 107 | assert(!L.gt(L.bottom, A)) 108 | assert(!L.gteq(L.bottom, A)) 109 | assert(L.tryCompare(A, A).get == 0) 110 | assert(L.tryCompare(L.bottom, A).get < 0) 111 | 112 | assert(!L.lt(A, B)) 113 | assert(!L.lteq(A, B)) 114 | assert(!L.gt(A, B)) 115 | assert(!L.gteq(A, B)) 116 | assert(L.tryCompare(A, B).isEmpty) 117 | 118 | assert(L.join(A, B) == Top) 119 | assert(L.join(Bottom, A) == A) 120 | assert(L.join(B, Bottom) == B) 121 | assert(L.join(A, Top) == Top) 122 | assert(L.join(Top, B) == Top) 123 | assert(L.join(Bottom, Top) == Top) 124 | assert(L.join(Top, Bottom) == Top) 125 | assert(L.join(A, A) == A) 126 | } 127 | 128 | test("PurityUpdater: successful updated") { 129 | val updater = Updater.partialOrderingToUpdater(Purity.PurityOrdering) 130 | 131 | val purity = updater.update(UnknownPurity, Pure) 132 | assert(purity == Pure) 133 | 134 | val newPurity = updater.update(purity, Pure) 135 | assert(newPurity == Pure) 136 | } 137 | 138 | test("PurityUpdater: failed updates") { 139 | val updater = Updater.partialOrderingToUpdater(Purity.PurityOrdering) 140 | 141 | try { 142 | val newPurity = updater.update(Impure, Pure) 143 | assert(false) 144 | } catch { 145 | case lve: NotMonotonicException[_] => assert(true) 146 | case e: Exception => assert(false) 147 | } 148 | 149 | try { 150 | val newPurity = updater.update(Pure, Impure) 151 | assert(false) 152 | } catch { 153 | case lve: NotMonotonicException[_] => assert(true) 154 | case e: Exception => assert(false) 155 | } 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/LazySuite.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync 2 | package test 3 | 4 | import java.util.concurrent.CountDownLatch 5 | 6 | import com.phaller.rasync.cell.{ Cell, FinalOutcome, NextOutcome, NoOutcome } 7 | import com.phaller.rasync.lattice.{ DefaultKey, Updater } 8 | import com.phaller.rasync.pool.HandlerPool 9 | import com.phaller.rasync.test.lattice.{ IntUpdater, StringIntKey } 10 | import org.scalatest.FunSuite 11 | 12 | import scala.concurrent.Await 13 | import scala.concurrent.duration._ 14 | 15 | class LazySuite extends FunSuite { 16 | 17 | implicit val stringIntUpdater: Updater[Int] = new IntUpdater 18 | 19 | test("lazy init") { 20 | val latch = new CountDownLatch(1) 21 | val pool = new HandlerPool[Int, Null] 22 | val cell = pool.mkCell(_ => { 23 | FinalOutcome(1) 24 | }) 25 | cell.onComplete(_ => latch.countDown()) 26 | 27 | assert(!cell.isComplete) 28 | cell.trigger() 29 | 30 | latch.await() 31 | 32 | assert(cell.isComplete) 33 | assert(cell.getResult() == 1) 34 | 35 | pool.shutdown() 36 | } 37 | 38 | test("trigger dependees") { 39 | val latch = new CountDownLatch(2) 40 | val pool = new HandlerPool[Int, Null] 41 | 42 | var cell1: Cell[Int, Null] = null 43 | var cell2: Cell[Int, Null] = null 44 | 45 | cell1 = pool.mkCell(_ => { 46 | FinalOutcome(1) 47 | }) 48 | 49 | cell2 = pool.mkCell(_ => { 50 | cell2.when(cell1)(it => { 51 | if (it.head._2.get.isInstanceOf[FinalOutcome[_]]) FinalOutcome(3) 52 | else NoOutcome 53 | }) 54 | NextOutcome(2) 55 | }) 56 | 57 | cell1.onComplete(_ => latch.countDown()) 58 | cell2.onComplete(_ => latch.countDown()) 59 | 60 | assert(!cell1.isComplete) 61 | assert(!cell2.isComplete) 62 | cell2.trigger() 63 | 64 | latch.await() 65 | 66 | assert(cell1.isComplete) 67 | assert(cell1.getResult() == 1) 68 | 69 | assert(cell2.isComplete) 70 | assert(cell2.getResult() == 3) 71 | 72 | pool.shutdown() 73 | } 74 | 75 | test("do not trigger unneeded cells") { 76 | val latch = new CountDownLatch(1) 77 | val pool = new HandlerPool[Int, Null] 78 | 79 | var cell1: Cell[Int, Null] = null 80 | var cell2: Cell[Int, Null] = null 81 | 82 | cell1 = pool.mkCell(_ => { 83 | assert(false) 84 | FinalOutcome(-11) 85 | }) 86 | 87 | cell2 = pool.mkCell(_ => { 88 | FinalOutcome(2) 89 | }) 90 | 91 | cell2.onComplete(_ => latch.countDown()) 92 | 93 | cell2.trigger() 94 | 95 | latch.await() 96 | 97 | assert(!cell1.isComplete) 98 | assert(cell1.getResult() == 0) 99 | 100 | pool.shutdown() 101 | } 102 | 103 | test("cycle deps") { 104 | val latch1 = new CountDownLatch(2) 105 | val latch2 = new CountDownLatch(2) 106 | val pool = new HandlerPool[Int, Null] 107 | 108 | var cell1: Cell[Int, Null] = null 109 | var cell2: Cell[Int, Null] = null 110 | 111 | cell1 = pool.mkCell(_ => { 112 | cell1.when(cell2)(it => { 113 | if (it.head._2.get.isInstanceOf[FinalOutcome[_]]) FinalOutcome(3) 114 | else NoOutcome 115 | 116 | }) 117 | NextOutcome(1) 118 | }) 119 | 120 | cell2 = pool.mkCell(_ => { 121 | cell2.when(cell1)(it => { 122 | if (it.head._2.get.isInstanceOf[FinalOutcome[_]]) FinalOutcome(3) 123 | else NoOutcome 124 | }) 125 | NextOutcome(2) 126 | }) 127 | 128 | cell1.onNext(_ => latch1.countDown()) 129 | cell2.onNext(_ => latch1.countDown()) 130 | 131 | cell1.onComplete(_ => latch2.countDown()) 132 | cell2.onComplete(_ => latch2.countDown()) 133 | 134 | cell2.trigger() 135 | latch1.await() 136 | 137 | val fut = pool.quiescentResolveCell 138 | Await.ready(fut, 2.seconds) 139 | 140 | latch2.await() 141 | 142 | assert(cell1.isComplete) 143 | assert(cell1.getResult() == 1) 144 | 145 | assert(cell2.isComplete) 146 | assert(cell2.getResult() == 2) 147 | 148 | pool.shutdown() 149 | } 150 | 151 | test("cycle deps with incoming dep") { 152 | val latch1 = new CountDownLatch(2) 153 | val latch2 = new CountDownLatch(3) 154 | val pool = new HandlerPool[Int, Null] 155 | 156 | var cell1: Cell[Int, Null] = null 157 | var cell2: Cell[Int, Null] = null 158 | var cell3: Cell[Int, Null] = null 159 | 160 | cell1 = pool.mkCell(_ => { 161 | cell1.when(cell2)(_ => NextOutcome(-1)) 162 | NextOutcome(101) 163 | }) 164 | 165 | cell2 = pool.mkCell(_ => { 166 | cell2.when(cell1)(_ => NextOutcome(-1)) 167 | NextOutcome(102) 168 | }) 169 | 170 | cell3 = pool.mkCell(_ => { 171 | cell3.when(cell1)(_ => FinalOutcome(103)) 172 | NextOutcome(-1) 173 | }) 174 | 175 | cell1.onNext(_ => latch1.countDown()) 176 | cell2.onNext(_ => latch1.countDown()) 177 | 178 | cell1.onComplete(_ => latch2.countDown()) 179 | cell2.onComplete(_ => latch2.countDown()) 180 | cell3.onComplete(_ => latch2.countDown()) 181 | 182 | assert(!cell1.isComplete) 183 | assert(!cell2.isComplete) 184 | 185 | cell3.trigger() 186 | latch1.await() 187 | 188 | val fut = pool.quiescentResolveCell 189 | Await.ready(fut, 2.seconds) 190 | 191 | latch2.await() 192 | 193 | assert(cell3.isComplete) 194 | assert(cell3.getResult() === 103) 195 | 196 | pool.shutdown() 197 | } 198 | 199 | test("cycle deps with incoming dep, resolve cycle first") { 200 | val theKey = new DefaultKey[Int, Null]() 201 | 202 | val latch1 = new CountDownLatch(2) 203 | val latch2 = new CountDownLatch(2) 204 | val latch3 = new CountDownLatch(1) 205 | val pool = new HandlerPool[Int, Null](theKey) 206 | 207 | var cell1: Cell[Int, Null] = null 208 | var cell2: Cell[Int, Null] = null 209 | var cell3: Cell[Int, Null] = null 210 | 211 | cell1 = pool.mkCell(c => { 212 | c.when(cell2)(_ => { 213 | NextOutcome(-111) 214 | }) 215 | NextOutcome(11) 216 | }) 217 | 218 | cell2 = pool.mkCell(c => { 219 | c.when(cell1)(_ => { 220 | NextOutcome(-222) 221 | }) 222 | NextOutcome(22) 223 | }) 224 | 225 | cell1.onNext(_ => latch1.countDown()) 226 | cell2.onNext(_ => latch1.countDown()) 227 | 228 | cell1.onComplete(_ => latch2.countDown()) 229 | cell2.onComplete(_ => latch2.countDown()) 230 | 231 | cell2.trigger() 232 | latch1.await() 233 | 234 | val fut = pool.quiescentResolveCell 235 | Await.ready(fut, 2.seconds) 236 | 237 | latch2.await() 238 | 239 | cell3 = pool.mkCell(c => { 240 | c.when(cell1)(_ => { 241 | FinalOutcome(333) 242 | }) 243 | NextOutcome(-3) 244 | }) 245 | 246 | cell3.onComplete(_ => latch3.countDown()) 247 | cell3.trigger() 248 | 249 | latch3.await() 250 | 251 | assert(cell3.isComplete) 252 | assert(cell3.getResult() === 333) 253 | 254 | pool.shutdown() 255 | } 256 | 257 | test("cycle does not get resolved, if not triggered") { 258 | val pool = new HandlerPool[Int, Null] 259 | var c1: Cell[Int, Null] = null 260 | var c2: Cell[Int, Null] = null 261 | c1 = pool.mkCell(_ => { 262 | c1.when(c2)(_ => FinalOutcome(-2)) 263 | FinalOutcome(-1) 264 | }) 265 | c2 = pool.mkCell(_ => { 266 | c2.when(c1)(_ => FinalOutcome(-2)) 267 | FinalOutcome(-1) 268 | }) 269 | 270 | val fut2 = pool.quiescentResolveCell 271 | Await.ready(fut2, 2.seconds) 272 | 273 | assert(c1.getResult() == 0) 274 | assert(!c1.isComplete) 275 | assert(c2.getResult() == 0) 276 | assert(!c2.isComplete) 277 | 278 | pool.shutdown() 279 | } 280 | // 281 | test("cell does not get resolved, if not triggered") { 282 | val pool = new HandlerPool[Int, Null] 283 | val c = pool.mkCell(_ => FinalOutcome(-1)) 284 | 285 | val fut2 = pool.quiescentResolveCell 286 | Await.ready(fut2, 2.seconds) 287 | 288 | assert(c.getResult() == 0) 289 | assert(!c.isComplete) 290 | 291 | pool.shutdown() 292 | } 293 | // 294 | test("cell gets resolved, if triggered") { 295 | val pool = new HandlerPool[Int, Null](new StringIntKey("")) 296 | val cell = pool.mkCell(_ => { 297 | NextOutcome(-1) 298 | }) 299 | cell.trigger() 300 | 301 | val fut2 = pool.quiescentResolveCell 302 | Await.ready(fut2, 2.seconds) 303 | 304 | assert(cell.isComplete) // cell should be completed with a fallback value 305 | assert(cell.getResult() == 1) // StringIntKey sets cell to fallback value `1`. 306 | 307 | pool.shutdown() 308 | } 309 | } 310 | 311 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/MixedKeyResolutionSuite.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync.test 2 | 3 | import java.util.concurrent.CountDownLatch 4 | 5 | import com.phaller.rasync.cell._ 6 | import com.phaller.rasync.lattice.{ DefaultKey, Key, Updater } 7 | import com.phaller.rasync.pool.HandlerPool 8 | import com.phaller.rasync.test.lattice.IntUpdater 9 | import org.scalatest.FunSuite 10 | 11 | import scala.concurrent.Await 12 | import scala.concurrent.duration._ 13 | import scala.util.{ Failure, Success, Try } 14 | 15 | /** 16 | * Tests where cylces or independent cells 17 | * need to be resolved via a Key. 18 | * This tests contains cycles that only constist 19 | * of a single type of Cells and do not mix 20 | * SequentialCells and ConcurrentCells. 21 | * For the mixedcase, see MixedKeyResolutionsuite 22 | */ 23 | class MixedKeyResolutionSuite extends FunSuite with MixedCompleterFactory { 24 | def forwardAsNext[E >: Null](upd: Iterable[(Cell[Int, E], Try[ValueOutcome[Int]])]): Outcome[Int] = { 25 | val c = upd.head._2 26 | NextOutcome(c.get.value) 27 | } 28 | 29 | implicit val intUpdater: Updater[Int] = new IntUpdater 30 | 31 | test("DefaultKey.resolve 1") { 32 | val k = new DefaultKey[Int, Null] 33 | implicit val pool = new HandlerPool[Int, Null](k) 34 | val completer1 = mkSeqCompleter[Int, Null] 35 | val completer2 = mkConCompleter[Int, Null] 36 | completer1.cell.when(completer2.cell)(forwardAsNext) 37 | completer2.cell.when(completer1.cell)(forwardAsNext) 38 | completer1.putNext(5) 39 | Await.ready(pool.quiescentResolveCell, 2.seconds) 40 | assert(completer1.cell.isComplete) 41 | assert(completer2.cell.isComplete) 42 | assert(completer1.cell.getResult() == 5) 43 | assert(completer2.cell.getResult() == 5) 44 | pool.shutdown() 45 | } 46 | 47 | test("DefaultKey.resolve 2") { 48 | val k = new DefaultKey[Int, Null] 49 | implicit val pool = new HandlerPool[Int, Null](k) 50 | val completer1 = mkConCompleter[Int, Null] 51 | val completer2 = mkSeqCompleter[Int, Null] 52 | completer1.cell.when(completer2.cell)(forwardAsNext) 53 | completer2.cell.when(completer1.cell)(forwardAsNext) 54 | completer1.putNext(5) 55 | Await.ready(pool.quiescentResolveCell, 2.seconds) 56 | assert(completer1.cell.isComplete) 57 | assert(completer2.cell.isComplete) 58 | assert(completer1.cell.getResult() == 5) 59 | assert(completer2.cell.getResult() == 5) 60 | pool.shutdown() 61 | } 62 | 63 | test("when: cSCC with constant resolution 1") { 64 | val latch = new CountDownLatch(4) 65 | 66 | object ConstantKey extends Key[Int, Null] { 67 | val RESOLVEDINCYCLE = 5 68 | val RESOLVEDASINDPENDENT = 10 69 | 70 | override def resolve(cells: Iterable[Cell[Int, Null]]): Iterable[(Cell[Int, Null], Int)] = cells.map((_, RESOLVEDINCYCLE)) 71 | 72 | override def fallback(cells: Iterable[Cell[Int, Null]]): Iterable[(Cell[Int, Null], Int)] = cells.map((_, RESOLVEDASINDPENDENT)) 73 | } 74 | 75 | implicit val pool = new HandlerPool[Int, Null](ConstantKey) 76 | 77 | val completer1 = mkConCompleter[Int, Null] 78 | val cell1 = completer1.cell 79 | val completer2 = mkConCompleter[Int, Null] 80 | val cell2 = completer2.cell 81 | val completer3 = mkSeqCompleter[Int, Null] 82 | val cell3 = completer3.cell 83 | val completer4 = mkSeqCompleter[Int, Null] 84 | val cell4 = completer4.cell 85 | 86 | // set unwanted values: 87 | completer1.putNext(-1) 88 | completer2.putNext(-1) 89 | completer3.putNext(-1) 90 | completer4.putNext(-1) 91 | 92 | // create a cSCC, assert that none of the callbacks get called again. 93 | def c(upd: Iterable[(Cell[Int, Null], Try[ValueOutcome[Int]])]): Outcome[Int] = upd.head._2.get match { 94 | case FinalOutcome(_) => 95 | NoOutcome 96 | case NextOutcome(-1) => 97 | NoOutcome 98 | case _ => 99 | assert(false) 100 | NextOutcome(-2) 101 | } 102 | 103 | cell1.when(cell2)(c) 104 | cell1.when(cell3)(c) 105 | cell2.when(cell4)(c) 106 | cell3.when(cell4)(c) 107 | cell4.when(cell1)(c) 108 | 109 | for (c <- List(cell1, cell2, cell3, cell4)) 110 | c.onComplete { 111 | case Success(v) => 112 | assert(v === ConstantKey.RESOLVEDINCYCLE) 113 | assert(c.numDependencies === 0) 114 | latch.countDown() 115 | case Failure(e) => 116 | assert(false) 117 | latch.countDown() 118 | } 119 | 120 | // resolve cells 121 | val fut = pool.quiescentResolveCell 122 | Await.result(fut, 2.seconds) 123 | latch.await() 124 | 125 | pool.onQuiescenceShutdown() 126 | } 127 | 128 | test("when: cSCC with constant resolution 2") { 129 | val latch = new CountDownLatch(4) 130 | 131 | object ConstantKey extends Key[Int, Null] { 132 | val RESOLVEDINCYCLE = 5 133 | val RESOLVEDASINDPENDENT = 10 134 | 135 | override def resolve(cells: Iterable[Cell[Int, Null]]): Iterable[(Cell[Int, Null], Int)] = cells.map((_, RESOLVEDINCYCLE)) 136 | 137 | override def fallback(cells: Iterable[Cell[Int, Null]]): Iterable[(Cell[Int, Null], Int)] = cells.map((_, RESOLVEDASINDPENDENT)) 138 | } 139 | 140 | implicit val pool = new HandlerPool[Int, Null](ConstantKey) 141 | 142 | val completer1 = mkConCompleter[Int, Null] 143 | val cell1 = completer1.cell 144 | val completer2 = mkSeqCompleter[Int, Null] 145 | val cell2 = completer2.cell 146 | val completer3 = mkSeqCompleter[Int, Null] 147 | val cell3 = completer3.cell 148 | val completer4 = mkConCompleter[Int, Null] 149 | val cell4 = completer4.cell 150 | 151 | // set unwanted values: 152 | completer1.putNext(-1) 153 | completer2.putNext(-1) 154 | completer3.putNext(-1) 155 | completer4.putNext(-1) 156 | 157 | // create a cSCC, assert that none of the callbacks get called again. 158 | def c(upd: Iterable[(Cell[Int, Null], Try[ValueOutcome[Int]])]): Outcome[Int] = upd.head._2.get match { 159 | case FinalOutcome(_) => 160 | NoOutcome 161 | case NextOutcome(-1) => 162 | NoOutcome 163 | case _ => 164 | assert(false) 165 | NextOutcome(-2) 166 | } 167 | 168 | cell1.when(cell2)(c) 169 | cell1.when(cell3)(c) 170 | cell2.when(cell4)(c) 171 | cell3.when(cell4)(c) 172 | cell4.when(cell1)(c) 173 | 174 | for (c <- List(cell1, cell2, cell3, cell4)) 175 | c.onComplete { 176 | case Success(v) => 177 | assert(v === ConstantKey.RESOLVEDINCYCLE) 178 | assert(c.numDependencies === 0) 179 | latch.countDown() 180 | case Failure(e) => 181 | assert(false) 182 | latch.countDown() 183 | } 184 | 185 | // resolve cells 186 | val fut = pool.quiescentResolveCell 187 | Await.result(fut, 2.seconds) 188 | latch.await() 189 | 190 | pool.onQuiescenceShutdown() 191 | } 192 | 193 | test("when: cSCC with default resolution 1") { 194 | val latch = new CountDownLatch(4) 195 | 196 | implicit val pool = new HandlerPool[Int, Null] 197 | 198 | val completer1 = mkSeqCompleter[Int, Null] 199 | val cell1 = completer1.cell 200 | val completer2 = mkConCompleter[Int, Null] 201 | val cell2 = completer2.cell 202 | val completer3 = mkSeqCompleter[Int, Null] 203 | val cell3 = completer3.cell 204 | val completer4 = mkConCompleter[Int, Null] 205 | val cell4 = completer4.cell 206 | 207 | // set unwanted values: 208 | completer1.putNext(-1) 209 | completer2.putNext(-1) 210 | completer3.putNext(-1) 211 | completer4.putNext(-1) 212 | 213 | // create a cSCC, assert that none of the callbacks get called again. 214 | def c(upd: Iterable[(Cell[Int, Null], Try[ValueOutcome[Int]])]): Outcome[Int] = upd.head._2.get match { 215 | case FinalOutcome(_) => 216 | NoOutcome 217 | case NextOutcome(-1) => 218 | NoOutcome 219 | case _ => 220 | assert(false) 221 | NextOutcome(-2) 222 | } 223 | 224 | cell1.when(cell2)(c) 225 | cell1.when(cell3)(c) 226 | cell2.when(cell4)(c) 227 | cell3.when(cell4)(c) 228 | cell4.when(cell1)(c) 229 | 230 | for (c <- List(cell1, cell2, cell3, cell4)) 231 | c.onComplete { 232 | case Success(v) => 233 | assert(v === -1) 234 | assert(c.numDependencies === 0) 235 | latch.countDown() 236 | case Failure(e) => 237 | assert(false) 238 | latch.countDown() 239 | } 240 | 241 | // resolve cells 242 | val fut = pool.quiescentResolveCell 243 | Await.result(fut, 2.seconds) 244 | latch.await() 245 | 246 | pool.onQuiescenceShutdown() 247 | } 248 | 249 | test("when: cSCC with default resolution 2") { 250 | val latch = new CountDownLatch(4) 251 | 252 | implicit val pool = new HandlerPool[Int, Null] 253 | 254 | val completer1 = mkConCompleter[Int, Null] 255 | val cell1 = completer1.cell 256 | val completer2 = mkConCompleter[Int, Null] 257 | val cell2 = completer2.cell 258 | val completer3 = mkSeqCompleter[Int, Null] 259 | val cell3 = completer3.cell 260 | val completer4 = mkConCompleter[Int, Null] 261 | val cell4 = completer4.cell 262 | 263 | // set unwanted values: 264 | completer1.putNext(-1) 265 | completer2.putNext(-1) 266 | completer3.putNext(-1) 267 | completer4.putNext(-1) 268 | 269 | // create a cSCC, assert that none of the callbacks get called again. 270 | def c(upd: Iterable[(Cell[Int, Null], Try[ValueOutcome[Int]])]): Outcome[Int] = upd.head._2.get match { 271 | case FinalOutcome(_) => 272 | NoOutcome 273 | case NextOutcome(-1) => 274 | NoOutcome 275 | case _ => 276 | assert(false) 277 | NextOutcome(-2) 278 | } 279 | 280 | cell1.when(cell2)(c) 281 | cell1.when(cell3)(c) 282 | cell2.when(cell4)(c) 283 | cell3.when(cell4)(c) 284 | cell4.when(cell1)(c) 285 | 286 | for (c <- List(cell1, cell2, cell3, cell4)) 287 | c.onComplete { 288 | case Success(v) => 289 | assert(v === -1) 290 | assert(c.numDependencies === 0) 291 | latch.countDown() 292 | case Failure(e) => 293 | assert(false) 294 | latch.countDown() 295 | } 296 | 297 | // resolve cells 298 | val fut = pool.quiescentResolveCell 299 | Await.result(fut, 2.seconds) 300 | latch.await() 301 | 302 | pool.onQuiescenceShutdown() 303 | } 304 | 305 | test("when: cycle with default resolution 1") { 306 | sealed trait Value 307 | case object Bottom extends Value 308 | case object ShouldNotHappen extends Value 309 | 310 | implicit object ValueUpdater extends Updater[Value] { 311 | override def update(v1: Value, v2: Value): Value = v2 312 | override val bottom: Value = Bottom 313 | } 314 | 315 | implicit val pool: HandlerPool[Value, Null] = new HandlerPool[Value, Null] 316 | 317 | for (i <- 1 to 100) { 318 | val completer1 = mkConCompleter[Value, Null] 319 | val completer2 = mkSeqCompleter[Value, Null] 320 | val cell1 = completer1.cell 321 | val cell2 = completer2.cell 322 | 323 | cell1.when(cell2)(_ => NextOutcome(ShouldNotHappen)) 324 | cell2.when(cell1)(_ => NextOutcome(ShouldNotHappen)) 325 | 326 | val fut = pool.quiescentResolveCell 327 | Await.ready(fut, 1.minutes) 328 | 329 | assert(cell1.getResult() != ShouldNotHappen) 330 | assert(cell2.getResult() != ShouldNotHappen) 331 | } 332 | 333 | pool.onQuiescenceShutdown() 334 | } 335 | 336 | test("when: cycle with default resolution 2") { 337 | sealed trait Value 338 | case object Bottom extends Value 339 | case object ShouldNotHappen extends Value 340 | 341 | implicit object ValueUpdater extends Updater[Value] { 342 | override def update(v1: Value, v2: Value): Value = v2 343 | override val bottom: Value = Bottom 344 | } 345 | 346 | implicit val pool: HandlerPool[Value, Null] = new HandlerPool[Value, Null] 347 | 348 | for (i <- 1 to 100) { 349 | val completer1 = mkSeqCompleter[Value, Null] 350 | val completer2 = mkConCompleter[Value, Null] 351 | val cell1 = completer1.cell 352 | val cell2 = completer2.cell 353 | 354 | cell1.when(cell2)(_ => NextOutcome(ShouldNotHappen)) 355 | cell2.when(cell1)(_ => NextOutcome(ShouldNotHappen)) 356 | 357 | val fut = pool.quiescentResolveCell 358 | Await.ready(fut, 1.minutes) 359 | 360 | assert(cell1.getResult() != ShouldNotHappen) 361 | assert(cell2.getResult() != ShouldNotHappen) 362 | } 363 | 364 | pool.onQuiescenceShutdown() 365 | } 366 | 367 | test("when: cycle with constant resolution 1") { 368 | sealed trait Value 369 | case object Bottom extends Value 370 | case object OK extends Value 371 | case object ShouldNotHappen extends Value 372 | 373 | implicit object ValueUpdater extends Updater[Value] { 374 | override def update(v1: Value, v2: Value): Value = if (v1 == Bottom) v2 else v1 // TODO or throw? 375 | override val bottom: Value = Bottom 376 | } 377 | 378 | object TheKey extends DefaultKey[Value, Null] { 379 | override def resolve(cells: Iterable[Cell[Value, Null]]): Iterable[(Cell[Value, Null], Value)] = { 380 | cells.map(cell => (cell, OK)) 381 | } 382 | } 383 | 384 | implicit val pool = new HandlerPool[Value, Null](TheKey) 385 | 386 | for (i <- 1 to 100) { 387 | val completer1 = mkConCompleter[Value, Null] 388 | val completer2 = mkSeqCompleter[Value, Null] 389 | val cell1 = completer1.cell 390 | val cell2 = completer2.cell 391 | 392 | cell1.when(cell2)(_ => NextOutcome(ShouldNotHappen)) 393 | cell2.when(cell1)(_ => NextOutcome(ShouldNotHappen)) 394 | 395 | val fut = pool.quiescentResolveCell 396 | Await.ready(fut, 1.minutes) 397 | 398 | assert(cell1.getResult() == OK) 399 | assert(cell2.getResult() == OK) 400 | } 401 | 402 | pool.onQuiescenceShutdown() 403 | } 404 | 405 | test("when: cycle with constant resolution 2") { 406 | sealed trait Value 407 | case object Bottom extends Value 408 | case object OK extends Value 409 | case object ShouldNotHappen extends Value 410 | 411 | implicit object ValueUpdater extends Updater[Value] { 412 | override def update(v1: Value, v2: Value): Value = if (v1 == Bottom) v2 else v1 // TODO or throw? 413 | override val bottom: Value = Bottom 414 | } 415 | 416 | object TheKey extends DefaultKey[Value, Null] { 417 | override def resolve(cells: Iterable[Cell[Value, Null]]): Iterable[(Cell[Value, Null], Value)] = { 418 | cells.map(cell => (cell, OK)) 419 | } 420 | } 421 | 422 | implicit val pool = new HandlerPool[Value, Null](TheKey) 423 | 424 | for (i <- 1 to 100) { 425 | val completer1 = mkSeqCompleter[Value, Null] 426 | val completer2 = mkConCompleter[Value, Null] 427 | val cell1 = completer1.cell 428 | val cell2 = completer2.cell 429 | 430 | cell1.when(cell2)(_ => NextOutcome(ShouldNotHappen)) 431 | cell2.when(cell1)(_ => NextOutcome(ShouldNotHappen)) 432 | 433 | val fut = pool.quiescentResolveCell 434 | Await.ready(fut, 1.minutes) 435 | 436 | assert(cell1.getResult() == OK) 437 | assert(cell2.getResult() == OK) 438 | } 439 | 440 | pool.onQuiescenceShutdown() 441 | } 442 | 443 | test("whenNext: cycle with additional outgoing dep 1") { 444 | sealed trait Value 445 | case object Bottom extends Value 446 | case object Resolved extends Value 447 | case object Fallback extends Value 448 | case object OK extends Value 449 | case object ShouldNotHappen extends Value 450 | 451 | implicit object ValueUpdater extends Updater[Value] { 452 | override def update(v1: Value, v2: Value): Value = v2 453 | override val bottom: Value = Bottom 454 | } 455 | 456 | object TheKey extends DefaultKey[Value, Null] { 457 | override def resolve(cells: Iterable[Cell[Value, Null]]): Iterable[(Cell[Value, Null], Value)] = { 458 | cells.map(cell => (cell, Resolved)) 459 | } 460 | override def fallback(cells: Iterable[Cell[Value, Null]]): Iterable[(Cell[Value, Null], Value)] = { 461 | cells.map(cell => (cell, Fallback)) 462 | } 463 | } 464 | 465 | implicit val pool = new HandlerPool[Value, Null](TheKey) 466 | val completer1 = mkConCompleter[Value, Null] 467 | val completer2 = mkConCompleter[Value, Null] 468 | val cell1 = completer1.cell 469 | val cell2 = completer2.cell 470 | val out = mkSeqCompleter[Value, Null] 471 | 472 | // let `cell1` and `cell2` form a cycle 473 | cell1.when(cell2)(_ => NextOutcome(ShouldNotHappen)) 474 | cell2.when(cell1)(_ => NextOutcome(ShouldNotHappen)) 475 | 476 | // the cycle is dependent on incoming information from `out` 477 | cell2.when(out.cell)(_ => NextOutcome(ShouldNotHappen)) 478 | 479 | // resolve the independent cell `out` and the cycle 480 | val fut = pool.quiescentResolveCell 481 | Await.ready(fut, 1.minutes) 482 | 483 | pool.onQuiescenceShutdown() 484 | 485 | assert(cell1.getResult() != ShouldNotHappen) 486 | assert(cell2.getResult() != ShouldNotHappen) 487 | assert(out.cell.getResult() == Fallback) 488 | } 489 | 490 | test("whenNext: cycle with additional outgoing dep 2") { 491 | sealed trait Value 492 | case object Bottom extends Value 493 | case object Resolved extends Value 494 | case object Fallback extends Value 495 | case object OK extends Value 496 | case object ShouldNotHappen extends Value 497 | 498 | implicit object ValueUpdater extends Updater[Value] { 499 | override def update(v1: Value, v2: Value): Value = v2 500 | override val bottom: Value = Bottom 501 | } 502 | 503 | object TheKey extends DefaultKey[Value, Null] { 504 | override def resolve(cells: Iterable[Cell[Value, Null]]): Iterable[(Cell[Value, Null], Value)] = { 505 | cells.map(cell => (cell, Resolved)) 506 | } 507 | override def fallback(cells: Iterable[Cell[Value, Null]]): Iterable[(Cell[Value, Null], Value)] = { 508 | cells.map(cell => (cell, Fallback)) 509 | } 510 | } 511 | 512 | implicit val pool = new HandlerPool[Value, Null](TheKey) 513 | val completer1 = mkSeqCompleter[Value, Null] 514 | val completer2 = mkConCompleter[Value, Null] 515 | val cell1 = completer1.cell 516 | val cell2 = completer2.cell 517 | val out = mkSeqCompleter[Value, Null] 518 | 519 | // let `cell1` and `cell2` form a cycle 520 | cell1.when(cell2)(_ => NextOutcome(ShouldNotHappen)) 521 | cell2.when(cell1)(_ => NextOutcome(ShouldNotHappen)) 522 | 523 | // the cycle is dependent on incoming information from `out` 524 | cell2.when(out.cell)(_ => NextOutcome(ShouldNotHappen)) 525 | 526 | // resolve the independent cell `out` and the cycle 527 | val fut = pool.quiescentResolveCell 528 | Await.ready(fut, 1.minutes) 529 | 530 | pool.onQuiescenceShutdown() 531 | 532 | assert(cell1.getResult() != ShouldNotHappen) 533 | assert(cell2.getResult() != ShouldNotHappen) 534 | assert(out.cell.getResult() == Fallback) 535 | } 536 | 537 | test("whenNext: cycle with additional incoming dep 1") { 538 | sealed trait Value 539 | case object Bottom extends Value 540 | case object Dummy extends Value 541 | case object Resolved extends Value 542 | case object OK extends Value 543 | case object ShouldNotHappen extends Value 544 | 545 | implicit object ValueUpdater extends Updater[Value] { 546 | override def update(v1: Value, v2: Value): Value = v2 547 | override val bottom: Value = Bottom 548 | } 549 | 550 | object TheKey extends DefaultKey[Value, Null] { 551 | override def resolve(cells: Iterable[Cell[Value, Null]]): Iterable[(Cell[Value, Null], Value)] = { 552 | cells.map(cell => (cell, Resolved)) 553 | } 554 | override def fallback(cells: Iterable[Cell[Value, Null]]): Iterable[(Cell[Value, Null], Value)] = { 555 | Seq() 556 | } 557 | } 558 | 559 | implicit val pool = new HandlerPool[Value, Null](TheKey) 560 | val completer1 = mkSeqCompleter[Value, Null] 561 | val completer2 = mkSeqCompleter[Value, Null] 562 | val cell1 = completer1.cell 563 | val cell2 = completer2.cell 564 | val in = mkConCompleter[Value, Null] 565 | in.putNext(Dummy) 566 | cell1.when(cell2)(_ => NextOutcome(ShouldNotHappen)) 567 | cell2.when(cell1)(_ => NextOutcome(ShouldNotHappen)) 568 | in.putNext(ShouldNotHappen) 569 | in.cell.when(cell1)(_ => FinalOutcome(OK)) 570 | 571 | val fut = pool.quiescentResolveCell 572 | Await.ready(fut, 1.minutes) 573 | 574 | pool.onQuiescenceShutdown() 575 | 576 | assert(cell1.getResult() != ShouldNotHappen) 577 | assert(cell2.getResult() != ShouldNotHappen) 578 | assert(in.cell.getResult() == OK) 579 | } 580 | 581 | test("whenNext: cycle with additional incoming dep 2") { 582 | sealed trait Value 583 | case object Bottom extends Value 584 | case object Dummy extends Value 585 | case object Resolved extends Value 586 | case object OK extends Value 587 | case object ShouldNotHappen extends Value 588 | 589 | implicit object ValueUpdater extends Updater[Value] { 590 | override def update(v1: Value, v2: Value): Value = v2 591 | override val bottom: Value = Bottom 592 | } 593 | 594 | object TheKey extends Key[Value, Null] { 595 | override def resolve(cells: Iterable[Cell[Value, Null]]): Iterable[(Cell[Value, Null], Value)] = { 596 | cells.map(cell => (cell, Resolved)) 597 | } 598 | override def fallback(cells: Iterable[Cell[Value, Null]]): Iterable[(Cell[Value, Null], Value)] = { 599 | Seq() 600 | } 601 | } 602 | 603 | implicit val pool = new HandlerPool[Value, Null](TheKey) 604 | val completer1 = mkConCompleter[Value, Null] 605 | val completer2 = mkConCompleter[Value, Null] 606 | val cell1 = completer1.cell 607 | val cell2 = completer2.cell 608 | val in = mkSeqCompleter[Value, Null] 609 | in.putNext(Dummy) 610 | cell1.when(cell2)(_ => NextOutcome(ShouldNotHappen)) 611 | cell2.when(cell1)(_ => NextOutcome(ShouldNotHappen)) 612 | in.putNext(ShouldNotHappen) 613 | in.cell.when(cell1)(_ => FinalOutcome(OK)) 614 | 615 | val fut = pool.quiescentResolveCell 616 | Await.ready(fut, 1.minutes) 617 | 618 | pool.onQuiescenceShutdown() 619 | 620 | assert(cell1.getResult() != ShouldNotHappen) 621 | assert(cell2.getResult() != ShouldNotHappen) 622 | assert(in.cell.getResult() == OK) 623 | } 624 | 625 | } 626 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/PoolSuite.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync 2 | package test 3 | 4 | import java.util.concurrent.{ ConcurrentHashMap, CountDownLatch } 5 | 6 | import com.phaller.rasync.cell.{ Cell, CellCompleter } 7 | import org.scalatest.FunSuite 8 | 9 | import scala.concurrent.{ Await, Promise } 10 | import scala.concurrent.duration._ 11 | import com.phaller.rasync.lattice.Updater 12 | import com.phaller.rasync.pool.HandlerPool 13 | import com.phaller.rasync.test.lattice.{ IntUpdater, StringIntKey } 14 | 15 | class PoolSuite extends FunSuite { 16 | test("onQuiescent") { 17 | val pool = HandlerPool[Int] 18 | 19 | var i = 0 20 | while (i < 10000) { 21 | val p1 = Promise[Boolean]() 22 | val p2 = Promise[Boolean]() 23 | pool.execute { () => { p1.success(true) }: Unit } 24 | pool.onQuiescent { () => p2.success(true) } 25 | try { 26 | Await.result(p2.future, 1.seconds) 27 | } catch { 28 | case t: Throwable => 29 | assert(false, s"failure after $i iterations") 30 | } 31 | i += 1 32 | } 33 | 34 | pool.shutdown() 35 | } 36 | 37 | test("register cells concurrently") { 38 | implicit val stringIntUpdater: Updater[Int] = new IntUpdater 39 | 40 | implicit val pool = new HandlerPool[Int, Null](new StringIntKey("s")) 41 | var regCells = new ConcurrentHashMap[Cell[Int, Null], Cell[Int, Null]]() 42 | for (_ <- 1 to 1000) { 43 | pool.execute(() => { 44 | val completer = CellCompleter[Int, Null]() 45 | completer.cell.trigger() 46 | regCells.put(completer.cell, completer.cell) 47 | () 48 | }) 49 | } 50 | val fut = pool.quiescentResolveCell // set all (registered) cells to 1 via key.fallback 51 | Await.ready(fut, 5.seconds) 52 | 53 | regCells.values().removeIf(_.getResult() != 0) 54 | assert(regCells.size === 0) 55 | } 56 | 57 | test("register cells concurrently 2") { 58 | implicit val stringIntUpdater: Updater[Int] = new IntUpdater 59 | 60 | implicit val pool = new HandlerPool[Int, Null](new StringIntKey("s")) 61 | var regCells = new ConcurrentHashMap[Cell[Int, Null], Cell[Int, Null]]() 62 | for (_ <- 1 to 1000) { 63 | pool.execute(() => { 64 | val completer = CellCompleter[Int, Null]() 65 | regCells.put(completer.cell, completer.cell) 66 | () 67 | }) 68 | } 69 | val fut = pool.quiescentResolveCell // set all (registered) cells to 1 via key.fallback 70 | Await.ready(fut, 5.seconds) 71 | 72 | assert(regCells.size === 1000) 73 | } 74 | 75 | test("handler pool quiescence") { 76 | implicit val pool = new HandlerPool[Int, Null] 77 | val latch = new CountDownLatch(1) 78 | val latch2 = new CountDownLatch(1) 79 | pool.execute { () => latch.await() } 80 | pool.onQuiescent { () => latch2.countDown() } 81 | latch.countDown() 82 | 83 | latch2.await() 84 | assert(true) 85 | 86 | pool.onQuiescenceShutdown() 87 | } 88 | 89 | } 90 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/PowerSetLatticeSuite.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync 2 | package test 3 | 4 | import org.scalatest.FunSuite 5 | import com.phaller.rasync.lattice.Lattice 6 | import com.phaller.rasync.lattice.lattices.PowerSetLattice 7 | 8 | object Util { 9 | 10 | def joinOfTwoElements[T](elem1: T, elem2: T)(lattice: Lattice[T]): T = { 11 | lattice.join(elem1, elem2) 12 | } 13 | 14 | def joinOfTwoElements2[T](elem1: T, elem2: T)(implicit lattice: Lattice[T]): T = { 15 | lattice.join(elem1, elem2) 16 | } 17 | 18 | def joinOfTwoElements3[T: Lattice](elem1: T, elem2: T): T = { 19 | val lattice = implicitly[Lattice[T]] 20 | lattice.join(elem1, elem2) 21 | } 22 | 23 | } 24 | 25 | class PowerSetLatticeSuite extends FunSuite { 26 | 27 | implicit def mkLattice[T]: PowerSetLattice[T] = 28 | new PowerSetLattice[T] 29 | 30 | test("join using lattice") { 31 | val powerSetLattice = new PowerSetLattice[Int] 32 | val elem1 = Set(1, 2) 33 | val elem2 = Set(4, 6) 34 | val result = Util.joinOfTwoElements(elem1, elem2)(powerSetLattice) 35 | assert(result == Set(1, 2, 4, 6)) 36 | } 37 | 38 | test("join using implicit lattice") { 39 | implicit val powerSetLattice = new PowerSetLattice[Int] 40 | val elem1 = Set(1, 2) 41 | val elem2 = Set(4, 6) 42 | val result = Util.joinOfTwoElements2(elem1, elem2) 43 | assert(result == Set(1, 2, 4, 6)) 44 | } 45 | 46 | test("join using implicit lattice 2") { 47 | // type checker knows: PowerSetLattice[T] <: Lattice[Set[T]] 48 | // type checker knows: calling mkLattice[Int] returns PowerSetLattice[Int] <: Lattice[Set[Int]] 49 | val elem1 = Set(1, 2) 50 | val elem2 = Set(4, 6) 51 | val result = Util.joinOfTwoElements2(elem1, elem2) /* (mkLattice[Int]) */ 52 | assert(result == Set(1, 2, 4, 6)) 53 | } 54 | 55 | test("join using implicit lattice 3") { 56 | // type checker knows: PowerSetLattice[T] <: Lattice[Set[T]] 57 | // type checker knows: calling mkLattice[Int] returns PowerSetLattice[Int] <: Lattice[Set[Int]] 58 | val elem1 = Set(1, 2) 59 | val elem2 = Set(4, 6) 60 | val result = Util.joinOfTwoElements3(elem1, elem2) /* (mkLattice[Int]) */ 61 | assert(result == Set(1, 2, 4, 6)) 62 | } 63 | 64 | // does not compile, because there is no type class instance for type Int 65 | /*test("join using implicit lattice 4") { 66 | import PowerSetLattice._ 67 | 68 | val elem1 = 2 69 | val elem2 = 4 70 | val result = Util.joinOfTwoElements3(elem1, elem2) 71 | }*/ 72 | 73 | } 74 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/PsSuite.scala: -------------------------------------------------------------------------------- 1 | //package com.phaller.rasync 2 | //package test 3 | // 4 | //import com.phaller.rasync.lattice.{ Key, Updater } 5 | //import lattice._ 6 | //import org.scalatest.FunSuite 7 | // 8 | //import scala.concurrent.duration._ 9 | //import scala.concurrent.Await 10 | // 11 | //class PsSuite extends FunSuite { 12 | // 13 | // implicit val stringIntUpdater: Updater[Int] = new StringIntUpdater 14 | // 15 | // test("cell dependency on itself whenNextSequential") { 16 | // implicit val pool = new HandlerPool 17 | // val completer1 = CellCompleter[ReactivePropertyStoreKey, Int](new ReactivePropertyStoreKey()) 18 | // val cell1 = completer1.cell 19 | // 20 | // cell1.trigger() 21 | // completer1.putNext(10) 22 | // 23 | // cell1.whenNextSequential(cell1, _ => { 24 | // NoOutcome 25 | // }) 26 | // 27 | // var fut = pool.quiescentResolveCycles 28 | // Await.ready(fut, 2.seconds) 29 | // 30 | // Thread.sleep(200) 31 | // 32 | // fut = pool.quiescentResolveDefaults 33 | // Await.ready(fut, 2.seconds) 34 | // } 35 | // 36 | // test("cell dependency on itself whenNext") { 37 | // implicit val pool = new HandlerPool(parallelism = 1) 38 | // val completer1 = CellCompleter[ReactivePropertyStoreKey, Int](new ReactivePropertyStoreKey()) 39 | // val completer2 = CellCompleter[ReactivePropertyStoreKey, Int](new ReactivePropertyStoreKey()) 40 | // val cell1 = completer1.cell 41 | // val cell2 = completer2.cell 42 | // 43 | // val completer10 = CellCompleter[ReactivePropertyStoreKey, Int](new ReactivePropertyStoreKey()) 44 | // val completer20 = CellCompleter[ReactivePropertyStoreKey, Int](new ReactivePropertyStoreKey()) 45 | // val cell10 = completer10.cell 46 | // val cell20 = completer20.cell 47 | // 48 | // completer2.putNext(1) 49 | // cell2.whenNext(cell1, x => { 50 | // if (x == 42) { 51 | // completer2.putFinal(43) 52 | // } 53 | // NoOutcome 54 | // }) 55 | // 56 | // completer20.putNext(1) 57 | // cell20.whenNextSequential(cell10, x => { 58 | // if (x == 10) { 59 | // completer20.putFinal(43) 60 | // } 61 | // NoOutcome 62 | // }) 63 | // 64 | // completer1.putNext(10) 65 | // 66 | // cell1.whenNext(cell1, _ => { 67 | // NoOutcome 68 | // }) 69 | // 70 | // var fut = pool.quiescentResolveCycles 71 | // Await.ready(fut, 2.seconds) 72 | // 73 | // Thread.sleep(200) 74 | // 75 | // fut = pool.quiescentResolveDefaults 76 | // Await.ready(fut, 10.seconds) 77 | // } 78 | // 79 | // class ReactivePropertyStoreKey extends Key[Int] { 80 | // override def resolve[K <: Key[Int]](cells: Iterable[Cell[K, Int]]): Iterable[(Cell[K, Int], Int)] = { 81 | // cells.map((_, 42)) 82 | // } 83 | // 84 | // override def fallback[K <: Key[Int]](cells: Iterable[Cell[K, Int]]): Iterable[(Cell[K, Int], Int)] = { 85 | // cells.map(cell ⇒ (cell, cell.getResult())) 86 | // } 87 | // 88 | // override def toString = "ReactivePropertyStoreKey" 89 | // } 90 | // 91 | // test("cell dependency on itself whenNextSequential using fallback only") { 92 | // implicit val pool = new HandlerPool(parallelism = 8) 93 | // val completer1 = CellCompleter[ReactivePropertyStoreKey, Int](new ReactivePropertyStoreKey()) 94 | // val cell1 = completer1.cell 95 | // 96 | // cell1.trigger() 97 | // completer1.putNext(10) 98 | // 99 | // cell1.whenNextSequential(cell1, _ => { 100 | // NoOutcome 101 | // }) 102 | // 103 | // val fut = pool.quiescentResolveDefaults 104 | // Await.ready(fut, 2.seconds) 105 | // } 106 | // 107 | // test("HandlerPool must be able to interrupt") { 108 | // implicit val pool = new HandlerPool(parallelism = 8) 109 | // val completer1 = CellCompleter[ReactivePropertyStoreKey, Int](new ReactivePropertyStoreKey()) 110 | // val completer2 = CellCompleter[ReactivePropertyStoreKey, Int](new ReactivePropertyStoreKey()) 111 | // val cell1 = completer1.cell 112 | // val cell2 = completer2.cell 113 | // 114 | // cell2.whenNextSequential(cell1, v => { 115 | // NextOutcome(v) 116 | // }) 117 | // 118 | // pool.interrupt() 119 | // Thread.sleep(200) 120 | // completer1.putNext(10) 121 | // 122 | // assert(cell2.getResult() == 0) 123 | // 124 | // pool.resume() 125 | // 126 | // val fut = pool.quiescentResolveDefaults 127 | // Await.ready(fut, 2.seconds) 128 | // 129 | // assert(cell2.getResult() == 10) 130 | // } 131 | // 132 | //} 133 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/SequentialSuite.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync.test 2 | 3 | import java.util.concurrent.{ CountDownLatch, TimeUnit } 4 | import java.util.concurrent.atomic.AtomicInteger 5 | 6 | import com.phaller.rasync.cell.Outcome 7 | import com.phaller.rasync.lattice.lattices.NaturalNumberKey 8 | import com.phaller.rasync.lattice.{ DefaultKey, Lattice, Updater } 9 | import com.phaller.rasync.pool.HandlerPool 10 | import com.phaller.rasync.test.lattice.IntUpdater 11 | import org.scalatest.FunSuite 12 | 13 | /** Verify that callbacks of SequentialCells do not run concurrently. */ 14 | class SequentialSuite extends FunSuite with SequentialCompleterFactory { 15 | implicit val intUpdater: Updater[Int] = new IntUpdater 16 | 17 | test("when: calling sequentially") { 18 | val n = 1000 19 | 20 | val runningCallbacks = new AtomicInteger(0) 21 | val latch = new CountDownLatch(1) 22 | val random = new scala.util.Random() 23 | 24 | implicit val pool = HandlerPool[Int](NaturalNumberKey) 25 | val completer1 = mkCompleter[Int] 26 | 27 | val cell1 = completer1.cell 28 | for (i <- 1 to n) { // create n predecessors 29 | val tmpCompleter = mkCompleter[Int] 30 | 31 | // let cell1 depend on the predecessor tmpCompleter 32 | cell1.when(tmpCompleter.cell)(it => { 33 | val x = it.head._2 34 | assert(runningCallbacks.incrementAndGet() == 1) 35 | Thread.`yield`() 36 | try { 37 | Thread.sleep(random.nextInt(3)) 38 | } catch { 39 | case _: InterruptedException => /* ignore */ 40 | } 41 | assert(runningCallbacks.decrementAndGet() == 0) 42 | Outcome(x.get.value * n, x.get.value == n) 43 | }) 44 | 45 | cell1.onComplete(_ => { 46 | latch.countDown() 47 | }) 48 | 49 | pool.execute(() => tmpCompleter.putFinal(i)) 50 | } 51 | 52 | assert(latch.await(10, TimeUnit.SECONDS)) 53 | 54 | assert(cell1.getResult() == n * n) 55 | 56 | pool.onQuiescenceShutdown() 57 | } 58 | 59 | test("when: state") { 60 | // cell1 has deps to 1000 cells. All callbacks 61 | // share a counter (i.e. state) that must not be 62 | // incremented concurrently 63 | val n = 1000 64 | var count = Set[Int]() 65 | 66 | class PowerSetLattice[T] extends Lattice[Set[T]] { 67 | 68 | def join(left: Set[T], right: Set[T]): Set[T] = 69 | left ++ right 70 | 71 | val bottom: Set[T] = 72 | Set[T]() 73 | 74 | } 75 | 76 | implicit val theUpdater: Updater[Set[Int]] = Updater.latticeToUpdater(new PowerSetLattice[Int]) 77 | 78 | val latch = new CountDownLatch(1) 79 | val random = new scala.util.Random() 80 | 81 | val theKey = DefaultKey[Set[Int]] 82 | implicit val pool = HandlerPool[Set[Int]](theKey) 83 | val completer1 = mkCompleter[Set[Int]] 84 | val cell1 = completer1.cell 85 | 86 | cell1.onComplete(_ => { 87 | latch.countDown() 88 | }) 89 | 90 | for (i <- 1 to n) { 91 | val completer2 = mkCompleter[Set[Int]] 92 | cell1.when(completer2.cell)(_ => { 93 | count = count ++ Set(count.size) 94 | Thread.`yield`() 95 | try { 96 | Thread.sleep(random.nextInt(3)) 97 | } catch { 98 | case _: InterruptedException => /* ignore */ 99 | } 100 | Outcome(count, count.size == n) 101 | }) 102 | pool.execute(() => completer2.putNext(Set(i))) 103 | } 104 | 105 | latch.await() 106 | 107 | assert(cell1.getResult().size == n) 108 | 109 | pool.onQuiescenceShutdown() 110 | } 111 | 112 | } 113 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/completerFactory.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync.test 2 | 3 | import com.phaller.rasync.cell.CellCompleter 4 | import com.phaller.rasync.lattice.Updater 5 | import com.phaller.rasync.pool.HandlerPool 6 | 7 | trait CompleterFactory { 8 | def mkCompleter[V, E >: Null](implicit updater: Updater[V], pool: HandlerPool[V, E], e: E = null): CellCompleter[V, E] 9 | def mkCompleter[V](implicit updater: Updater[V], pool: HandlerPool[V, Null]): CellCompleter[V, Null] = mkCompleter[V, Null] 10 | } 11 | 12 | trait ConcurrentCompleterFactory extends CompleterFactory { 13 | override def mkCompleter[V, E >: Null](implicit updater: Updater[V], pool: HandlerPool[V, E], e: E = null): CellCompleter[V, E] = CellCompleter(entity = e)(updater, pool) 14 | } 15 | 16 | trait SequentialCompleterFactory extends CompleterFactory { 17 | override def mkCompleter[V, E >: Null](implicit updater: Updater[V], pool: HandlerPool[V, E], e: E = null): CellCompleter[V, E] = CellCompleter(entity = e, sequential = true)(updater, pool) 18 | } 19 | 20 | trait MixedCompleterFactory { 21 | def mkSeqCompleter[V, E >: Null](implicit updater: Updater[V], pool: HandlerPool[V, E], e: E = null): CellCompleter[V, E] = CellCompleter[V, E](sequential = true, entity = e)(updater, pool) 22 | def mkConCompleter[V, E >: Null](implicit updater: Updater[V], pool: HandlerPool[V, E], e: E = null): CellCompleter[V, E] = CellCompleter[V, E](entity = e)(updater, pool) 23 | 24 | def mkSeqCompleter[V](implicit updater: Updater[V], pool: HandlerPool[V, Null]): CellCompleter[V, Null] = mkSeqCompleter[V, Null] 25 | def mkConCompleter[V](implicit updater: Updater[V], pool: HandlerPool[V, Null]): CellCompleter[V, Null] = mkConCompleter[V, Null] 26 | } -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/immutability/ImmutabilityDemo.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync.test.immutability; 2 | 3 | // The following mutabilities are defined w.r.t. the case that the class hierarchy is closed. 4 | 5 | abstract class Root( final val l: Int) // O: Immutable // T: Mutable 6 | 7 | class SRootI extends Root(1) // O: Immutable // T: Conditionally Immutable 8 | 9 | class SRootM(var j: Int) extends Root(1) // O: Mutable // T: Mutable 10 | 11 | class SSRootMN extends SRootM(-1) // O: Mutable // T: Mutable 12 | 13 | class SSRootICM( final val o: SRootM) extends SRootI // O: Conditionally Immutable // T: Conditionally Immutable 14 | 15 | class SRootI_EF extends Root(1) { // [If we have an analysis for effectively final fields…] // O: Immutable // T: Immutable 16 | // otherwise: // // O: Mutable // T: Mutable 17 | 18 | // The scala compiler does not generate a setter for the following private field. 19 | private[this] var x: Int = 100; 20 | 21 | override def toString: String = "SRootI with effectively final field" 22 | } 23 | 24 | class SSRootI_EF_I extends SRootI_EF // O: Immutable // T: Immutable 25 | 26 | final class SRootII extends Root(1) // O: Immutable // T: Immutable 27 | 28 | class X(val i: Int) // O: Immutable // T: Mutable 29 | 30 | class Y(var j: Int) extends X(j) // O: Mutable // T: Mutable 31 | 32 | class U { // O: ConditionallyImmutable // T: ConditionallyImmutable 33 | val x: X = new X(10) 34 | } 35 | 36 | class V { 37 | val u: U = new U 38 | } 39 | 40 | /* 41 | RESULTS WHEN USING APPLICATION MODE (CLOSED CLASS HIERARCHY) 42 | [info] immutability.Root => ImmutableObject => MutableType 43 | [info] immutability.SRootI => ImmutableObject => ConditionallyImmutableType 44 | [info] immutability.SRootII => ImmutableObject => ImmutableType 45 | [info] immutability.SSRootICM => ConditionallyImmutableObject => ConditionallyImmutableType 46 | [info] immutability.SRootI_EF => MutableObjectByAnalysis => MutableType 47 | [info] immutability.SRootM => MutableObjectByAnalysis => MutableType 48 | [info] immutability.SSRootI_EF_I => MutableObjectByAnalysis => MutableType 49 | [info] immutability.SSRootMN => MutableObjectByAnalysis => MutableType 50 | 51 | RESULTS WHEN USING LIBRAY WITH CLOSED PACKAGES ASSUMPTION 52 | [info] immutability.Root => ImmutableObject => MutableType 53 | [info] immutability.SRootI => ImmutableObject => MutableType (*) 54 | [info] immutability.SRootII => ImmutableObject => ImmutableType 55 | [info] immutability.SSRootICM => ConditionallyImmutableObject => MutableType(*) 56 | [info] immutability.SRootI_EF => MutableObjectByAnalysis => MutableType 57 | [info] immutability.SRootM => MutableObjectByAnalysis => MutableType 58 | [info] immutability.SSRootI_EF_I => MutableObjectByAnalysis => MutableType 59 | [info] immutability.SSRootMN => MutableObjectByAnalysis => MutableType 60 | */ 61 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/lattice/Immutability.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync 2 | package test 3 | package lattice 4 | 5 | import com.phaller.rasync.cell.Cell 6 | import com.phaller.rasync.lattice.{ Key, Lattice } 7 | 8 | object ImmutabilityKey extends Key[Immutability, Null] { 9 | 10 | def resolve(cells: Iterable[Cell[Immutability, Null]]): Iterable[(Cell[Immutability, Null], Immutability)] = { 11 | val conditionallyImmutableCells = cells.filter(_.getResult() == ConditionallyImmutable) 12 | if (conditionallyImmutableCells.nonEmpty) 13 | cells.map(cell => (cell, ConditionallyImmutable)) 14 | else 15 | cells.map(cell => (cell, Immutable)) 16 | } 17 | def fallback(cells: Iterable[Cell[Immutability, Null]]): Iterable[(Cell[Immutability, Null], Immutability)] = { 18 | cells.map(cell => (cell, Immutable)) 19 | } 20 | 21 | override def toString = "Immutability" 22 | } 23 | 24 | sealed trait Immutability 25 | case object Mutable extends Immutability 26 | case object ConditionallyImmutable extends Immutability 27 | case object Immutable extends Immutability 28 | 29 | object Immutability { 30 | 31 | implicit object ImmutabilityLattice extends Lattice[Immutability] { 32 | override def join(v1: Immutability, v2: Immutability): Immutability = { 33 | if (lteq(v2, v1)) v1 34 | else v2 35 | } 36 | 37 | override def lteq(lhs: Immutability, rhs: Immutability): Boolean = { 38 | lhs == rhs || lhs == Immutable || 39 | (lhs == ConditionallyImmutable && rhs != Immutable) 40 | } 41 | 42 | override val bottom: Immutability = Immutable 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/lattice/IntUpdater.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync.test.lattice 2 | 3 | import com.phaller.rasync.cell.Cell 4 | import com.phaller.rasync.lattice.{ Key, Updater } 5 | 6 | import scala.language.implicitConversions 7 | 8 | class StringIntKey(s: String) extends Key[Int, Null] { 9 | def resolve(cells: Iterable[Cell[Int, Null]]): Iterable[(Cell[Int, Null], Int)] = { 10 | cells.map((cell: Cell[Int, Null]) => (cell, 0)) 11 | } 12 | 13 | def fallback(cells: Iterable[Cell[Int, Null]]): Iterable[(Cell[Int, Null], Int)] = { 14 | cells.map((cell: Cell[Int, Null]) => (cell, 1)) 15 | } 16 | 17 | override def toString = s 18 | } 19 | 20 | object StringIntKey { 21 | implicit def strToIntKey(s: String): StringIntKey = 22 | new StringIntKey(s) 23 | } 24 | 25 | class IntUpdater extends Updater[Int] { 26 | override def update(v1: Int, v2: Int): Int = 27 | if (v1 != v2) v2 28 | else v1 29 | 30 | override val bottom: Int = 0 31 | } 32 | 33 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/lattice/Purity.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync 2 | package test 3 | 4 | import com.phaller.rasync.cell.Cell 5 | import com.phaller.rasync.lattice._ 6 | 7 | object PurityKey extends Key[Purity, Null] { 8 | 9 | def resolve(cells: Iterable[Cell[Purity, Null]]): Iterable[(Cell[Purity, Null], Purity)] = { 10 | cells.map(cell => (cell, Pure)) 11 | } 12 | 13 | def fallback(cells: Iterable[Cell[Purity, Null]]): Iterable[(Cell[Purity, Null], Purity)] = { 14 | cells.map(cell => (cell, Pure)) 15 | } 16 | 17 | override def toString = "Purity" 18 | } 19 | 20 | sealed trait Purity 21 | case object UnknownPurity extends Purity 22 | case object Pure extends Purity 23 | case object Impure extends Purity 24 | 25 | object Purity { 26 | implicit object PurityOrdering extends PartialOrderingWithBottom[Purity] { 27 | override def lteq(v1: Purity, v2: Purity): Boolean = { 28 | if (v1 == UnknownPurity) true 29 | else if (v1 == v2) true 30 | else false 31 | } 32 | 33 | override val bottom: Purity = UnknownPurity 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/opal/ImmutabilityAnalysis.scala: -------------------------------------------------------------------------------- 1 | //package com.phaller.rasync 2 | //package test 3 | //package opal 4 | // 5 | //import java.net.URL 6 | // 7 | //import org.opalj.fpcf._ 8 | // 9 | //import scala.collection.JavaConverters._ 10 | // 11 | //import scala.concurrent.Await 12 | //import scala.concurrent.duration._ 13 | // 14 | //import org.opalj.br.{ Field, ClassFile, ObjectType } 15 | //import org.opalj.br.analyses.{ BasicReport, DefaultOneStepAnalysis, Project, PropertyStoreKey } 16 | //import org.opalj.br.analyses.TypeExtensibilityKey 17 | //import org.opalj.fpcf.analyses.FieldMutabilityAnalysis 18 | //import org.opalj.fpcf.properties.FieldMutability 19 | // 20 | //object ImmutabilityAnalysis extends DefaultOneStepAnalysis { 21 | // 22 | // override def doAnalyze( 23 | // project: Project[URL], 24 | // parameters: Seq[String] = List.empty, 25 | // isInterrupted: () ⇒ Boolean): BasicReport = { 26 | // // Run ClassExtensibilityAnalysis 27 | // val projectStore = project.get(PropertyStoreKey) 28 | // val manager = project.get(FPCFAnalysesManagerKey) 29 | // //manager.runAll( 30 | // //FieldMutabilityAnalysis 31 | // // REPLACED ObjectImmutabilityAnalysis 32 | // // REPLACED TypeImmutabilityAnalysis 33 | // //) 34 | // 35 | // val startTime = System.currentTimeMillis // Used for measuring execution time 36 | // 37 | // // 1. Initialization of key data structures (two cell(completer) per class file) 38 | // // One for Object Immutability and one for Type Immutability. 39 | // implicit val pool = new HandlerPool() 40 | // 41 | // // classFileToObjectTypeCellCompleter._1 = ObjectImmutability 42 | // // classFileToObjectTypeCellCompleter._2 = TypeImmutability 43 | // var classFileToObjectTypeCellCompleter = 44 | // Map.empty[ClassFile, (CellCompleter[ImmutabilityKey.type, Immutability], CellCompleter[ImmutabilityKey.type, Immutability])] 45 | // for { 46 | // classFile <- project.allProjectClassFiles 47 | // } { 48 | // val cellCompleter1 = CellCompleter[ImmutabilityKey.type, Immutability](ImmutabilityKey) 49 | // val cellCompleter2 = CellCompleter[ImmutabilityKey.type, Immutability](ImmutabilityKey) 50 | // classFileToObjectTypeCellCompleter = classFileToObjectTypeCellCompleter + ((classFile, (cellCompleter1, cellCompleter2))) 51 | // } 52 | // 53 | // val middleTime = System.currentTimeMillis 54 | // 55 | // // java.lang.Object is by definition immutable 56 | // val objectClassFileOption = project.classFile(ObjectType.Object) 57 | // objectClassFileOption.foreach { cf => 58 | // classFileToObjectTypeCellCompleter(cf)._1.putFinal(Immutable) 59 | // classFileToObjectTypeCellCompleter(cf)._2.putFinal(Mutable) 60 | // } 61 | // 62 | // // All interfaces are by definition immutable 63 | // val allInterfaces = project.allProjectClassFiles.par.filter(cf => cf.isInterfaceDeclaration).toList 64 | // allInterfaces.foreach(cf => classFileToObjectTypeCellCompleter(cf)._1.putFinal(Immutable)) 65 | // 66 | // val classHierarchy = project.classHierarchy 67 | // import classHierarchy.allSubtypes 68 | // import classHierarchy.rootTypes 69 | // import classHierarchy.isInterface 70 | // // All classes that do not have complete superclass information are mutable 71 | // // due to the lack of knowledge. 72 | // val typesForWhichItMayBePossibleToComputeTheMutability = allSubtypes(ObjectType.Object, reflexive = true) 73 | // val unexpectedRootTypes = rootTypes.filter(rt ⇒ (rt ne ObjectType.Object) && !isInterface(rt).isNo) 74 | // unexpectedRootTypes.map(rt ⇒ allSubtypes(rt, reflexive = true)).flatten.view. 75 | // filter(ot ⇒ !typesForWhichItMayBePossibleToComputeTheMutability.contains(ot)). 76 | // foreach(ot ⇒ project.classFile(ot) foreach { cf ⇒ 77 | // classFileToObjectTypeCellCompleter(cf)._1.putFinal(Mutable) 78 | // }) 79 | // 80 | // // 2. trigger analyses 81 | // for { 82 | // classFile <- project.allProjectClassFiles.par 83 | // } { 84 | // pool.execute(() => { 85 | // if (!classFileToObjectTypeCellCompleter(classFile)._1.cell.isComplete) 86 | // objectImmutabilityAnalysis(project, classFileToObjectTypeCellCompleter, manager, classFile) 87 | // }) 88 | // pool.execute(() => { 89 | // if (!classFileToObjectTypeCellCompleter(classFile)._2.cell.isComplete) 90 | // typeImmutabilityAnalysis(project, classFileToObjectTypeCellCompleter, manager, classFile) 91 | // }) 92 | // } 93 | // pool.whileQuiescentResolveDefault 94 | // pool.shutdown() 95 | // 96 | // val endTime = System.currentTimeMillis 97 | // 98 | // val setupTime = middleTime - startTime 99 | // val analysisTime = endTime - middleTime 100 | // val combinedTime = endTime - startTime 101 | // 102 | // /* Fixes the results so the output looks good */ 103 | // val resultClassFiles = project.allProjectClassFiles.par.filter(!allInterfaces.contains(_)) 104 | // val mutableClassFilesInfo = for { 105 | // cf <- resultClassFiles if (classFileToObjectTypeCellCompleter(cf)._1.cell.getResult() == Mutable) 106 | // } yield (cf.thisType.toJava + " => " + 107 | // classFileToObjectTypeCellCompleter(cf)._1.cell.getResult() + "Object => " + 108 | // classFileToObjectTypeCellCompleter(cf)._2.cell.getResult() + "Type") 109 | // 110 | // val immutableClassFilesInfo = for { 111 | // cf <- resultClassFiles if (classFileToObjectTypeCellCompleter(cf)._1.cell.getResult() == Immutable) 112 | // } yield (cf.thisType.toJava + " => " + 113 | // classFileToObjectTypeCellCompleter(cf)._1.cell.getResult() + "Object => " + 114 | // classFileToObjectTypeCellCompleter(cf)._2.cell.getResult() + "Type") 115 | // 116 | // val conditionallyImmutableClassFilesInfo = for { 117 | // cf <- resultClassFiles if (classFileToObjectTypeCellCompleter(cf)._1.cell.getResult() == ConditionallyImmutable) 118 | // } yield (cf.thisType.toJava + " => " + 119 | // classFileToObjectTypeCellCompleter(cf)._1.cell.getResult() + "Object => " + 120 | // classFileToObjectTypeCellCompleter(cf)._2.cell.getResult() + "Type") 121 | // 122 | // val sortedClassFilesInfo = (immutableClassFilesInfo.toList.sorted ++ 123 | // conditionallyImmutableClassFilesInfo.toList.sorted ++ mutableClassFilesInfo.toList.sorted) 124 | // BasicReport(sortedClassFilesInfo.mkString("\n") + 125 | // s"\nSETUP TIME: $setupTime" + 126 | // s"\nANALYIS TIME: $analysisTime" + 127 | // s"\nCOMBINED TIME: $combinedTime") 128 | // } 129 | // 130 | // /** 131 | // * This function is used for the tests, to not run the external analyses several times. 132 | // * 133 | // * @param project 134 | // * @param manager 135 | // * @return 136 | // */ 137 | // def analyzeWithoutClassExtensibilityAndFieldMutabilityAnalysis( 138 | // project: Project[URL], 139 | // manager: FPCFAnalysesManager): BasicReport = { 140 | // // 1. Initialization of key data structures (two cell(completer) per class file) 141 | // // One for Object Immutability and one for Type Immutability. 142 | // implicit val pool = new HandlerPool() 143 | // 144 | // // classFileToObjectTypeCellCompleter._1 = ObjectImmutability 145 | // // classFileToObjectTypeCellCompleter._2 = TypeImmutability 146 | // var classFileToObjectTypeCellCompleter = 147 | // Map.empty[ClassFile, (CellCompleter[ImmutabilityKey.type, Immutability], CellCompleter[ImmutabilityKey.type, Immutability])] 148 | // for { 149 | // classFile <- project.allProjectClassFiles 150 | // } { 151 | // val cellCompleter1 = CellCompleter[ImmutabilityKey.type, Immutability](ImmutabilityKey) 152 | // val cellCompleter2 = CellCompleter[ImmutabilityKey.type, Immutability](ImmutabilityKey) 153 | // classFileToObjectTypeCellCompleter = classFileToObjectTypeCellCompleter + ((classFile, (cellCompleter1, cellCompleter2))) 154 | // } 155 | // 156 | // // java.lang.Object is by definition immutable 157 | // val objectClassFileOption = project.classFile(ObjectType.Object) 158 | // objectClassFileOption.foreach { cf => 159 | // classFileToObjectTypeCellCompleter(cf)._1.putFinal(Immutable) 160 | // classFileToObjectTypeCellCompleter(cf)._2.putFinal(Mutable) // Should the TypeImmutability be Mutable? 161 | // } 162 | // 163 | // // All interfaces are by definition immutable 164 | // val allInterfaces = project.allProjectClassFiles.par.filter(cf => cf.isInterfaceDeclaration).toList 165 | // allInterfaces.foreach(cf => classFileToObjectTypeCellCompleter(cf)._1.putFinal(Immutable)) 166 | // 167 | // val classHierarchy = project.classHierarchy 168 | // import classHierarchy.allSubtypes 169 | // import classHierarchy.rootTypes 170 | // import classHierarchy.isInterface 171 | // // All classes that do not have complete superclass information are mutable 172 | // // due to the lack of knowledge. 173 | // val typesForWhichItMayBePossibleToComputeTheMutability = allSubtypes(ObjectType.Object, reflexive = true) 174 | // val unexpectedRootTypes = rootTypes.filter(rt ⇒ (rt ne ObjectType.Object) && !isInterface(rt).isNo) 175 | // unexpectedRootTypes.map(rt ⇒ allSubtypes(rt, reflexive = true)).flatten.view. 176 | // filter(ot ⇒ !typesForWhichItMayBePossibleToComputeTheMutability.contains(ot)). 177 | // foreach(ot ⇒ project.classFile(ot) foreach { cf ⇒ 178 | // classFileToObjectTypeCellCompleter(cf)._1.putFinal(Mutable) 179 | // }) 180 | // 181 | // // 2. trigger analyses 182 | // for { 183 | // classFile <- project.allProjectClassFiles.par 184 | // } { 185 | // pool.execute(() => { 186 | // if (!classFileToObjectTypeCellCompleter(classFile)._1.cell.isComplete) 187 | // objectImmutabilityAnalysis(project, classFileToObjectTypeCellCompleter, manager, classFile) 188 | // }) 189 | // pool.execute(() => { 190 | // if (!classFileToObjectTypeCellCompleter(classFile)._2.cell.isComplete) 191 | // typeImmutabilityAnalysis(project, classFileToObjectTypeCellCompleter, manager, classFile) 192 | // }) 193 | // } 194 | // pool.whileQuiescentResolveCell 195 | // pool.shutdown() 196 | // 197 | // /* Fixes the results so the output looks good */ 198 | // val mutableClassFilesInfo = for { 199 | // (cf, (objImmutability, typeImmutability)) <- classFileToObjectTypeCellCompleter if (objImmutability.cell.getResult() == Mutable) 200 | // } yield (cf.thisType.toJava + " => " + objImmutability.cell.getResult() + "Object => " + typeImmutability.cell.getResult() + "Type") 201 | // 202 | // val immutableClassFilesInfo = for { 203 | // (cf, (objImmutability, typeImmutability)) <- classFileToObjectTypeCellCompleter if (objImmutability.cell.getResult() == Immutable) 204 | // } yield (cf.thisType.toJava + " => " + objImmutability.cell.getResult() + "Object => " + typeImmutability.cell.getResult() + "Type") 205 | // 206 | // val conditionallyImmutableClassFilesInfo = for { 207 | // (cf, (objImmutability, typeImmutability)) <- classFileToObjectTypeCellCompleter if (objImmutability.cell.getResult() == ConditionallyImmutable) 208 | // } yield (cf.thisType.toJava + " => " + objImmutability.cell.getResult() + "Object => " + typeImmutability.cell.getResult() + "Type") 209 | // 210 | // val sortedClassFilesInfo = (immutableClassFilesInfo.toList.sorted ++ 211 | // conditionallyImmutableClassFilesInfo.toList.sorted ++ mutableClassFilesInfo.toList.sorted) 212 | // BasicReport(sortedClassFilesInfo) 213 | // } 214 | // 215 | // /** 216 | // * Determines a class files' ObjectImmutability. 217 | // */ 218 | // def objectImmutabilityAnalysis( 219 | // project: Project[URL], 220 | // classFileToObjectTypeCellCompleter: Map[ClassFile, (CellCompleter[ImmutabilityKey.type, Immutability], CellCompleter[ImmutabilityKey.type, Immutability])], 221 | // manager: FPCFAnalysesManager, 222 | // cf: ClassFile): Unit = { 223 | // val cellCompleter = classFileToObjectTypeCellCompleter(cf)._1 224 | // 225 | // val classHierarchy = project.classHierarchy 226 | // val directSuperTypes = classHierarchy.directSupertypes(cf.thisType) 227 | // 228 | // // Check fields to determine ObjectImmutability 229 | // val nonFinalInstanceFields = cf.fields.collect { case f if !f.isStatic && !f.isFinal => f } 230 | // 231 | // if (!nonFinalInstanceFields.isEmpty) 232 | // cellCompleter.putFinal(Mutable) 233 | // 234 | // // If the cell hasn't already been completed with an ObjectImmutability, then it is 235 | // // dependent on FieldMutability and its superclasses 236 | // if (!cellCompleter.cell.isComplete) { 237 | // if (cf.fields.exists(f => !f.isStatic && f.fieldType.isArrayType)) 238 | // cellCompleter.putNext(ConditionallyImmutable) 239 | // else { 240 | // val fieldTypes: Set[ObjectType] = 241 | // cf.fields.collect { 242 | // case f if !f.isStatic && f.fieldType.isObjectType => f.fieldType.asObjectType 243 | // }.toSet 244 | // 245 | // val hasUnresolvableDependencies = 246 | // fieldTypes.exists { t => 247 | // project.classFile(t) match { 248 | // case None => true /* we have an unresolved dependency */ 249 | // case Some(classFile) => false /* do nothing */ 250 | // } 251 | // } 252 | // 253 | // if (hasUnresolvableDependencies) 254 | // cellCompleter.putNext(ConditionallyImmutable) 255 | // else { 256 | // val finalInstanceFields = cf.fields.collect { case f if !f.isStatic && f.isFinal => f } 257 | // finalInstanceFields.foreach { f => 258 | // if (f.fieldType.isObjectType) { 259 | // project.classFile(f.fieldType.asObjectType) match { 260 | // case Some(classFile) => 261 | // val fieldTypeCell = classFileToObjectTypeCellCompleter(classFile)._2.cell 262 | // cellCompleter.cell.whenNext( 263 | // fieldTypeCell, 264 | // (fieldImm: Immutability) => fieldImm match { 265 | // case Mutable | ConditionallyImmutable => NextOutcome(ConditionallyImmutable) 266 | // case Immutable => NoOutcome 267 | // }) 268 | // case None => /* Do nothing */ 269 | // } 270 | // } 271 | // } 272 | // } 273 | // } 274 | // // Check with superclass to determine ObjectImmutability 275 | // val directSuperClasses = directSuperTypes. 276 | // filter(superType => project.classFile(superType) != None). 277 | // map(superType => project.classFile(superType).get) 278 | // 279 | // directSuperClasses foreach { superClass => 280 | // cellCompleter.cell.whenNext( 281 | // classFileToObjectTypeCellCompleter(superClass)._1.cell, 282 | // (imm: Immutability) => imm match { 283 | // case Immutable => NoOutcome 284 | // case Mutable => FinalOutcome(Mutable) 285 | // case ConditionallyImmutable => NextOutcome(ConditionallyImmutable) 286 | // }) 287 | // } 288 | // } 289 | // } 290 | // 291 | // /** 292 | // * Determines a class files' TypeImmutability. 293 | // */ 294 | // def typeImmutabilityAnalysis( 295 | // project: Project[URL], 296 | // classFileToObjectTypeCellCompleter: Map[ClassFile, (CellCompleter[ImmutabilityKey.type, Immutability], CellCompleter[ImmutabilityKey.type, Immutability])], 297 | // manager: FPCFAnalysesManager, 298 | // cf: ClassFile): Unit = { 299 | // val typeExtensibility = project.get(TypeExtensibilityKey) 300 | // val cellCompleter = classFileToObjectTypeCellCompleter(cf)._2 301 | // val isExtensible = typeExtensibility(cf.thisType) 302 | // if (isExtensible.isYesOrUnknown) 303 | // cellCompleter.putFinal(Mutable) 304 | // 305 | // val classHierarchy = project.classHierarchy 306 | // val directSubtypes = classHierarchy.directSubtypesOf(cf.thisType) 307 | // 308 | // if (!cellCompleter.cell.isComplete) { 309 | // // If this class file doesn't have subtypes, then the TypeImmutability is the same as 310 | // // the ObjectImmutability 311 | // if (cf.isFinal || directSubtypes.isEmpty) { 312 | // cellCompleter.cell.whenNext( 313 | // classFileToObjectTypeCellCompleter(cf)._1.cell, 314 | // _ match { 315 | // case Immutable => NoOutcome 316 | // case Mutable => FinalOutcome(Mutable) 317 | // case ConditionallyImmutable => NextOutcome(ConditionallyImmutable) 318 | // }) 319 | // } else { 320 | // val unavailableSubtype = directSubtypes.find(t ⇒ project.classFile(t).isEmpty) 321 | // if (unavailableSubtype.isDefined) 322 | // cellCompleter.putFinal(Mutable) 323 | // 324 | // if (!cellCompleter.cell.isComplete) { 325 | // // Check subclasses to determine TypeImmutability 326 | // val directSubclasses = directSubtypes map { subtype ⇒ project.classFile(subtype).get } 327 | // directSubclasses foreach { subclass => 328 | // cellCompleter.cell.whenNext( 329 | // classFileToObjectTypeCellCompleter(subclass)._2.cell, 330 | // _ match { 331 | // case Immutable => NoOutcome 332 | // case Mutable => FinalOutcome(Mutable) 333 | // case ConditionallyImmutable => NextOutcome(ConditionallyImmutable) 334 | // }) 335 | // } 336 | // } 337 | // } 338 | // } 339 | // } 340 | //} 341 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/opal/OPALSuite.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync 2 | package test 3 | package opal 4 | 5 | import org.scalatest.FunSuite 6 | 7 | import org.opalj.br.analyses.Project 8 | 9 | import java.io.File 10 | 11 | class OPALSuite extends FunSuite { 12 | 13 | test("purity analysis with Demo.java: pure methods") { 14 | val file = new File("core") 15 | val lib = Project(file) 16 | 17 | val report = PurityAnalysis.doAnalyze(lib, List.empty, () => false).toConsoleString.split("\n") 18 | 19 | val pureMethods = List( 20 | "pureness.Demo{ public static int pureThoughItUsesField(int,int) }", 21 | "pureness.Demo{ public static int pureThoughItUsesField2(int,int) }", 22 | "pureness.Demo{ public static int simplyPure(int,int) }", 23 | "pureness.Demo{ static int foo(int) }", 24 | "pureness.Demo{ static int bar(int) }", 25 | "pureness.Demo{ static int fooBar(int) }", 26 | "pureness.Demo{ static int barFoo(int) }", 27 | "pureness.Demo{ static int m1(int) }", 28 | "pureness.Demo{ static int m2(int) }", 29 | "pureness.Demo{ static int m3(int) }", 30 | "pureness.Demo{ static int cm1(int) }", 31 | "pureness.Demo{ static int cm2(int) }", 32 | "pureness.Demo{ static int scc0(int) }", 33 | "pureness.Demo{ static int scc1(int) }", 34 | "pureness.Demo{ static int scc2(int) }", 35 | "pureness.Demo{ static int scc3(int) }") 36 | 37 | val finalRes = pureMethods.filter(!report.contains(_)) 38 | 39 | assert(finalRes.size == 0, report.mkString("\n")) 40 | } 41 | 42 | test("purity analysis with Demo.java: impure methods") { 43 | val file = new File("core") 44 | val lib = Project(file) 45 | 46 | val report = PurityAnalysis.doAnalyze(lib, List.empty, () => false).toConsoleString.split("\n") 47 | 48 | val impureMethods = List( 49 | "public static int impure(int)", 50 | "static int npfoo(int)", 51 | "static int npbar(int)", 52 | "static int mm1(int)", 53 | "static int mm2(int)", 54 | "static int mm3(int)", 55 | "static int m1np(int)", 56 | "static int m2np(int)", 57 | "static int m3np(int)", 58 | "static int cpure(int)", 59 | "static int cpureCallee(int)", 60 | "static int cpureCalleeCallee1(int)", 61 | "static int cpureCalleeCallee2(int)", 62 | "static int cpureCalleeCalleeCallee(int)", 63 | "static int cpureCalleeCalleeCalleeCallee(int)") 64 | 65 | val finalRes = impureMethods.filter(report.contains(_)) 66 | 67 | assert(finalRes.size == 0) 68 | } 69 | 70 | /*test("ImmutabilityAnalysis: Concurrency") { 71 | val file = new File("lib") 72 | val lib = Project(file) 73 | 74 | val manager = lib.get(FPCFAnalysesManagerKey) 75 | manager.run(ClassExtensibilityAnalysis) 76 | manager.runAll( 77 | FieldMutabilityAnalysis 78 | ) 79 | 80 | // Compare every next result received from the same analysis to `report` 81 | val report = ImmutabilityAnalysis.analyzeWithoutClassExtensibilityAndFieldMutabilityAnalysis(lib, manager).toConsoleString.split("\n") 82 | 83 | for (i <- 0 to 1000) { 84 | // Next result 85 | val newReport = ImmutabilityAnalysis.analyzeWithoutClassExtensibilityAndFieldMutabilityAnalysis(lib, manager).toConsoleString.split("\n") 86 | 87 | // Differs between the elements in `report` and `newReport`. 88 | // If they have the exact same elements, `finalRes` should be an 89 | // empty list. 90 | val finalRes = report.filterNot(newReport.toSet) 91 | 92 | assert(finalRes.isEmpty) 93 | } 94 | }*/ 95 | 96 | } 97 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/opal/PurityAnalysis.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync 2 | package test 3 | package opal 4 | 5 | import java.net.URL 6 | 7 | import com.phaller.rasync.cell._ 8 | import com.phaller.rasync.lattice.Updater 9 | import com.phaller.rasync.pool.{ HandlerPool, SchedulingStrategy } 10 | import scala.concurrent.Await 11 | import scala.concurrent.duration._ 12 | 13 | import org.opalj.Success 14 | 15 | import org.opalj.br.{ ClassFile, Method } 16 | import org.opalj.br.analyses.{ BasicReport, Project, ProjectAnalysisApplication } 17 | import org.opalj.br.instructions.GETFIELD 18 | import org.opalj.br.instructions.GETSTATIC 19 | import org.opalj.br.instructions.PUTFIELD 20 | import org.opalj.br.instructions.PUTSTATIC 21 | import org.opalj.br.instructions.MONITORENTER 22 | import org.opalj.br.instructions.MONITOREXIT 23 | import org.opalj.br.instructions.NEW 24 | import org.opalj.br.instructions.NEWARRAY 25 | import org.opalj.br.instructions.MULTIANEWARRAY 26 | import org.opalj.br.instructions.ANEWARRAY 27 | import org.opalj.br.instructions.AALOAD 28 | import org.opalj.br.instructions.AASTORE 29 | import org.opalj.br.instructions.ARRAYLENGTH 30 | import org.opalj.br.instructions.LALOAD 31 | import org.opalj.br.instructions.IALOAD 32 | import org.opalj.br.instructions.CALOAD 33 | import org.opalj.br.instructions.BALOAD 34 | import org.opalj.br.instructions.BASTORE 35 | import org.opalj.br.instructions.CASTORE 36 | import org.opalj.br.instructions.IASTORE 37 | import org.opalj.br.instructions.LASTORE 38 | import org.opalj.br.instructions.SASTORE 39 | import org.opalj.br.instructions.SALOAD 40 | import org.opalj.br.instructions.DALOAD 41 | import org.opalj.br.instructions.FALOAD 42 | import org.opalj.br.instructions.FASTORE 43 | import org.opalj.br.instructions.DASTORE 44 | import org.opalj.br.instructions.INVOKEDYNAMIC 45 | import org.opalj.br.instructions.INVOKESTATIC 46 | import org.opalj.br.instructions.INVOKESPECIAL 47 | import org.opalj.br.instructions.INVOKEVIRTUAL 48 | import org.opalj.br.instructions.INVOKEINTERFACE 49 | import org.opalj.br.instructions.MethodInvocationInstruction 50 | import org.opalj.br.instructions.NonVirtualMethodInvocationInstruction 51 | import org.opalj.bytecode.JRELibraryFolder 52 | import scala.util.Try 53 | 54 | import com.phaller.rasync.pool.DefaultScheduling 55 | import com.phaller.rasync.pool.SourcesWithManySourcesFirst 56 | import com.phaller.rasync.pool.SourcesWithManySourcesLast 57 | import com.phaller.rasync.pool.SourcesWithManyTargetsFirst 58 | import com.phaller.rasync.pool.SourcesWithManyTargetsLast 59 | import com.phaller.rasync.pool.TargetsWithManySourcesFirst 60 | import com.phaller.rasync.pool.TargetsWithManySourcesLast 61 | import com.phaller.rasync.pool.TargetsWithManyTargetsFirst 62 | import com.phaller.rasync.pool.TargetsWithManyTargetsLast 63 | import com.phaller.rasync.test.opal.ifds.Fact 64 | import com.phaller.rasync.test.opal.ifds.IFDSProperty 65 | 66 | import org.opalj.util.PerformanceEvaluation 67 | import org.opalj.br.DeclaredMethod 68 | 69 | // A strategy tailored to PurityAnalysis 70 | object PurityStrategy extends SchedulingStrategy[Purity, Null] { 71 | override def calcPriority(dependentCell: Cell[Purity, Null], other: Cell[Purity, Null], value: Try[ValueOutcome[Purity]]): Int = value match { 72 | case scala.util.Success(FinalOutcome(Impure)) => -1 73 | case _ => 1 74 | } 75 | 76 | override def calcPriority(dependentCell: Cell[Purity, Null], value: Try[Purity]): Int = value match { 77 | case scala.util.Success(Pure) => 0 78 | case _ => -1 79 | } 80 | } 81 | 82 | object PurityAnalysis extends ProjectAnalysisApplication { 83 | 84 | override def main(args: Array[String]): Unit = { 85 | val lib = Project(new java.io.File(args(args.length - 1))) //JRELibraryFolder.getAbsolutePath)) 86 | 87 | println("Heap size: " + Runtime.getRuntime().maxMemory()) 88 | 89 | schedulingStrategy = new DefaultScheduling[Purity, Null] 90 | PerformanceEvaluation.time { 91 | val report = PurityAnalysis.doAnalyze(lib.recreate(), List.empty, () => false) 92 | } { t ⇒ println(s"DefaultScheduling(Warmup),${t.timeSpan}") } 93 | 94 | for { 95 | scheduling ← List( 96 | new DefaultScheduling[Purity, Null], 97 | new SourcesWithManyTargetsFirst[Purity, Null], 98 | new SourcesWithManyTargetsLast[Purity, Null], 99 | new TargetsWithManySourcesFirst[Purity, Null], 100 | new TargetsWithManySourcesLast[Purity, Null], 101 | new TargetsWithManyTargetsFirst[Purity, Null], 102 | new TargetsWithManyTargetsLast[Purity, Null], 103 | new SourcesWithManySourcesFirst[Purity, Null], 104 | new SourcesWithManySourcesLast[Purity, Null], 105 | PurityStrategy) 106 | i ← (0 until 5) 107 | } { 108 | val p = lib.recreate() 109 | schedulingStrategy = scheduling 110 | PerformanceEvaluation.time { 111 | val report = PurityAnalysis.doAnalyze(p, List.empty, () => false) 112 | } { t ⇒ println(s"$scheduling,${t.timeSpan}") } 113 | //println(report.toConsoleString.split("\n").slice(0, 2).mkString("\n")) 114 | } 115 | } 116 | 117 | var schedulingStrategy: SchedulingStrategy[Purity, Null] = null 118 | 119 | override def doAnalyze( 120 | project: Project[URL], 121 | parameters: Seq[String] = List.empty, 122 | isInterrupted: () ⇒ Boolean): BasicReport = { 123 | 124 | val startTime = System.currentTimeMillis // Used for measuring execution time 125 | // 1. Initialization of key data structures (one cell(completer) per method) 126 | implicit val pool: HandlerPool[Purity, Null] = new HandlerPool(key = PurityKey, parallelism = 10, schedulingStrategy = schedulingStrategy) 127 | var methodToCell = Map.empty[Method, Cell[Purity, Null]] 128 | for { 129 | classFile <- project.allProjectClassFiles 130 | method <- classFile.methods 131 | } { 132 | val cell = pool.mkCell(_ => { 133 | analyze(project, methodToCell, classFile, method) 134 | })(Updater.partialOrderingToUpdater) 135 | methodToCell = methodToCell + ((method, cell)) 136 | } 137 | 138 | val middleTime = System.currentTimeMillis 139 | 140 | // 2. trigger analyses 141 | for { 142 | classFile <- project.allProjectClassFiles.par 143 | method <- classFile.methods 144 | } { 145 | methodToCell(method).trigger() 146 | } 147 | val fut = pool.quiescentResolveCell 148 | Await.ready(fut, 30.minutes) 149 | pool.shutdown() 150 | 151 | val endTime = System.currentTimeMillis 152 | 153 | val setupTime = middleTime - startTime 154 | val analysisTime = endTime - middleTime 155 | val combinedTime = endTime - startTime 156 | 157 | val pureMethods = methodToCell.filter(_._2.getResult() match { 158 | case Pure => true 159 | case _ => false 160 | }).keys 161 | 162 | val pureMethodsInfo = pureMethods.map(m => m.toJava).toList.sorted 163 | 164 | BasicReport(s"pure methods analysis:\nPURE=${pureMethods.size}\n\n" + pureMethodsInfo.mkString("\n") + 165 | s"\nSETUP TIME: $setupTime" + 166 | s"\nANALYIS TIME: $analysisTime" + 167 | s"\nCOMBINED TIME: $combinedTime") 168 | } 169 | 170 | /** 171 | * Determines the purity of the given method. 172 | */ 173 | def analyze( 174 | project: Project[URL], 175 | methodToCell: Map[Method, Cell[Purity, Null]], 176 | classFile: ClassFile, 177 | method: Method): Outcome[Purity] = { 178 | import project.nonVirtualCall 179 | 180 | val cell = methodToCell(method) 181 | 182 | if ( // Due to a lack of knowledge, we classify all native methods or methods that 183 | // belong to a library (and hence lack the body) as impure... 184 | method.body.isEmpty /*HERE: method.isNative || "isLibraryMethod(method)"*/ || 185 | // for simplicity we are just focusing on methods that do not take objects as parameters 186 | method.parameterTypes.exists(!_.isBaseType)) { 187 | return FinalOutcome(Impure) 188 | } 189 | 190 | val dependencies = scala.collection.mutable.Set.empty[Method] 191 | val declaringClassType = classFile.thisType 192 | val methodDescriptor = method.descriptor 193 | val methodName = method.name 194 | val body = method.body.get 195 | val instructions = body.instructions 196 | val maxPC = instructions.size 197 | 198 | var currentPC = 0 199 | while (currentPC < maxPC) { 200 | val instruction = instructions(currentPC) 201 | 202 | (instruction.opcode: @scala.annotation.switch) match { 203 | case GETSTATIC.opcode ⇒ 204 | val GETSTATIC(declaringClass, fieldName, fieldType) = instruction 205 | import project.resolveFieldReference 206 | resolveFieldReference(declaringClass, fieldName, fieldType) match { 207 | 208 | case Some(field) if field.isFinal ⇒ NoOutcome 209 | /* Nothing to do; constants do not impede purity! */ 210 | 211 | // case Some(field) if field.isPrivate /*&& field.isNonFinal*/ ⇒ 212 | // check if the field is effectively final 213 | 214 | case _ ⇒ 215 | return FinalOutcome(Impure); 216 | } 217 | 218 | case INVOKESPECIAL.opcode | INVOKESTATIC.opcode ⇒ instruction match { 219 | 220 | case MethodInvocationInstruction(`declaringClassType`, _, `methodName`, `methodDescriptor`) ⇒ 221 | // We have a self-recursive call; such calls do not influence 222 | // the computation of the method's purity and are ignored. 223 | // Let's continue with the evaluation of the next instruction. 224 | 225 | case mii: NonVirtualMethodInvocationInstruction ⇒ 226 | 227 | nonVirtualCall(method.classFile.thisType, mii) match { 228 | 229 | case Success(callee) ⇒ 230 | /* Recall that self-recursive calls are handled earlier! */ 231 | dependencies.add(callee) 232 | 233 | case _ /* Empty or Failure */ ⇒ 234 | 235 | // We know nothing about the target method (it is not 236 | // found in the scope of the current project). 237 | return FinalOutcome(Impure) 238 | } 239 | 240 | } 241 | 242 | case NEW.opcode | 243 | GETFIELD.opcode | 244 | PUTFIELD.opcode | PUTSTATIC.opcode | 245 | NEWARRAY.opcode | MULTIANEWARRAY.opcode | ANEWARRAY.opcode | 246 | AALOAD.opcode | AASTORE.opcode | 247 | BALOAD.opcode | BASTORE.opcode | 248 | CALOAD.opcode | CASTORE.opcode | 249 | SALOAD.opcode | SASTORE.opcode | 250 | IALOAD.opcode | IASTORE.opcode | 251 | LALOAD.opcode | LASTORE.opcode | 252 | DALOAD.opcode | DASTORE.opcode | 253 | FALOAD.opcode | FASTORE.opcode | 254 | ARRAYLENGTH.opcode | 255 | MONITORENTER.opcode | MONITOREXIT.opcode | 256 | INVOKEDYNAMIC.opcode | INVOKEVIRTUAL.opcode | INVOKEINTERFACE.opcode ⇒ 257 | return FinalOutcome(Impure) 258 | 259 | case _ ⇒ 260 | /* All other instructions (IFs, Load/Stores, Arith., etc.) are pure. */ 261 | } 262 | currentPC = body.pcOfNextInstruction(currentPC) 263 | } 264 | 265 | // Every method that is not identified as being impure is (conditionally) pure. 266 | if (dependencies.isEmpty) { 267 | FinalOutcome(Pure) 268 | } else { 269 | cell.when(dependencies.map(methodToCell))(c) 270 | NextOutcome(UnknownPurity) // == NoOutcome 271 | } 272 | } 273 | 274 | def c(v: Iterable[(Cell[Purity, Null], Try[ValueOutcome[Purity]])]): Outcome[Purity] = { 275 | // If any dependee is Impure, the dependent Cell is impure. 276 | // Otherwise, we do not know anything new. 277 | // Exception will be rethrown. 278 | if (v.collectFirst({ 279 | case (_, scala.util.Success(FinalOutcome(Impure))) => true 280 | case (_, scala.util.Failure(_)) => true 281 | }).isDefined) 282 | FinalOutcome(Impure) 283 | else NoOutcome 284 | } 285 | } 286 | -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/opal/ifds/IFDSProperty.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync.test.opal.ifds 2 | 3 | import org.opalj.fpcf.Property 4 | import org.opalj.fpcf.PropertyMetaInformation 5 | import org.opalj.value.KnownTypedValue 6 | import org.opalj.tac.DUVar 7 | 8 | trait IFDSPropertyMetaInformation[DataFlowFact] extends PropertyMetaInformation 9 | 10 | abstract class IFDSProperty[DataFlowFact] extends Property 11 | with IFDSPropertyMetaInformation[DataFlowFact] { 12 | 13 | /** The type of the TAC domain. */ 14 | type V = DUVar[KnownTypedValue] 15 | 16 | def flows: Map[AbstractIFDSAnalysis.Statement, Set[DataFlowFact]] 17 | 18 | override def equals(that: Any): Boolean = that match { 19 | case other: IFDSProperty[DataFlowFact] ⇒ flows == other.flows 20 | case _ ⇒ false 21 | } 22 | 23 | override def hashCode(): Int = flows.hashCode() 24 | } -------------------------------------------------------------------------------- /core/src/test/scala/com/phaller/rasync/test/opal/ifds/TestTaintAnalysis.scala: -------------------------------------------------------------------------------- 1 | /* BSD 2-Clause License - see OPAL/LICENSE for details. */ 2 | package com.phaller.rasync.test.opal.ifds 3 | 4 | import java.io.File 5 | import java.net.URL 6 | 7 | import com.phaller.rasync.pool._ 8 | 9 | import org.opalj.collection.immutable.RefArray 10 | import org.opalj.br.DeclaredMethod 11 | import org.opalj.br.ObjectType 12 | import org.opalj.br.Method 13 | import org.opalj.br.analyses.SomeProject 14 | import org.opalj.br.analyses.Project 15 | import org.opalj.fpcf.{ PropertyKey, PropertyStore, PropertyStoreContext } 16 | import org.opalj.fpcf.seq.PKESequentialPropertyStore 17 | import org.opalj.bytecode.JRELibraryFolder 18 | import org.opalj.log.LogContext 19 | import org.opalj.tac._ 20 | import org.opalj.util.{ Nanoseconds, PerformanceEvaluation } 21 | import org.scalatest.FunSuite 22 | import scala.collection.immutable.ListSet 23 | import scala.concurrent.Await 24 | import scala.concurrent.duration.Duration 25 | 26 | import com.phaller.rasync.test.opal.ifds.AbstractIFDSAnalysis.Statement 27 | import com.phaller.rasync.test.opal.ifds.AbstractIFDSAnalysis.V 28 | import com.phaller.rasync.util.Counter 29 | import org.opalj.bytecode 30 | 31 | import org.opalj.br.fpcf.PropertyStoreKey 32 | import org.opalj.br.fpcf.FPCFAnalysesManagerKey 33 | import org.opalj.ai.domain.l0.PrimitiveTACAIDomain 34 | import org.opalj.tac.fpcf.analyses.cg.CallGraphDeserializerScheduler 35 | 36 | trait Fact extends AbstractIFDSFact 37 | 38 | case class Variable(index: Int) extends Fact 39 | //case class ArrayElement(index: Int, element: Int) extends Fact 40 | case class StaticField(classType: ObjectType, fieldName: String) extends Fact 41 | case class InstanceField(index: Int, classType: ObjectType, fieldName: String) extends Fact 42 | case class FlowFact(flow: ListSet[Method]) extends Fact { 43 | override val hashCode: Int = { 44 | // HERE, a foldLeft introduces a lot of overhead due to (un)boxing. 45 | var r = 1 46 | flow.foreach(f ⇒ r = (r + f.hashCode()) * 31) 47 | r 48 | } 49 | } 50 | 51 | case object NullFact extends Fact with AbstractIFDSNullFact 52 | 53 | /** 54 | * A simple IFDS taint analysis. 55 | * 56 | * @author Dominik Helm 57 | */ 58 | class TestTaintAnalysis( 59 | parallelism: Int = Runtime.getRuntime.availableProcessors(), 60 | scheduling: SchedulingStrategy[IFDSProperty[Fact], (DeclaredMethod, Fact)])( 61 | implicit 62 | val project: SomeProject) extends AbstractIFDSAnalysis[Fact](parallelism, scheduling)(project) { 63 | 64 | override val property: IFDSPropertyMetaInformation[Fact] = Taint 65 | 66 | override def waitForCompletion(duration: Duration = Duration("10h")): Unit = { 67 | val fut = pool.quiescentResolveCell 68 | Await.ready(fut, duration) 69 | pool.shutdown() 70 | } 71 | 72 | // Methods below have not been changed when migrating the code to RA 73 | 74 | override def createProperty(result: Map[Statement, Set[Fact]]): IFDSProperty[Fact] = { 75 | new Taint(result) 76 | } 77 | 78 | override def normalFlow(stmt: Statement, succ: Statement, in: Set[Fact]): Set[Fact] = 79 | stmt.stmt.astID match { 80 | case Assignment.ASTID ⇒ 81 | handleAssignment(stmt, stmt.stmt.asAssignment.expr, in) 82 | /*case ArrayStore.ASTID ⇒ 83 | val store = stmt.stmt.asArrayStore 84 | val definedBy = store.arrayRef.asVar.definedBy 85 | val index = getConstValue(store.index, stmt.code) 86 | if (isTainted(store.value, in)) 87 | if (index.isDefined) // Taint known array index 88 | // Instead of using an iterator, we are going to use internal iteration 89 | // in ++ definedBy.iterator.map(ArrayElement(_, index.get)) 90 | definedBy.foldLeft(in) { (c, n) ⇒ c + ArrayElement(n, index.get) } 91 | else // Taint whole array if index is unknown 92 | // Instead of using an iterator, we are going to use internal iteration: 93 | // in ++ definedBy.iterator.map(Variable) 94 | definedBy.foldLeft(in) { (c, n) ⇒ c + Variable(n) } 95 | else in*/ 96 | case PutStatic.ASTID ⇒ 97 | val put = stmt.stmt.asPutStatic 98 | if (isTainted(put.value, in)) in + StaticField(put.declaringClass, put.name) 99 | else in 100 | /*case PutField.ASTID ⇒ 101 | val put = stmt.stmt.asPutField 102 | if (isTainted(put.value, in)) in + StaticField(put.declaringClass, put.name) 103 | else in*/ 104 | case PutField.ASTID ⇒ 105 | val put = stmt.stmt.asPutField 106 | val definedBy = put.objRef.asVar.definedBy 107 | if (isTainted(put.value, in)) 108 | definedBy.foldLeft(in) { (in, defSite) ⇒ 109 | in + InstanceField(defSite, put.declaringClass, put.name) 110 | } 111 | else in 112 | case _ ⇒ in 113 | } 114 | 115 | /** 116 | * Returns true if the expression contains a taint. 117 | */ 118 | def isTainted(expr: Expr[V], in: Set[Fact]): Boolean = { 119 | expr.isVar && in.exists { 120 | case Variable(index) ⇒ expr.asVar.definedBy.contains(index) 121 | //case ArrayElement(index, _) ⇒ expr.asVar.definedBy.contains(index) 122 | case InstanceField(index, _, _) ⇒ expr.asVar.definedBy.contains(index) 123 | case _ ⇒ false 124 | } 125 | } 126 | 127 | /** 128 | * Returns the constant int value of an expression if it exists, None otherwise. 129 | */ 130 | /*def getConstValue(expr: Expr[V], code: Array[Stmt[V]]): Option[Int] = { 131 | if (expr.isIntConst) Some(expr.asIntConst.value) 132 | else if (expr.isVar) { 133 | // TODO The following looks optimizable! 134 | val constVals = expr.asVar.definedBy.iterator.map[Option[Int]] { idx ⇒ 135 | if (idx >= 0) { 136 | val stmt = code(idx) 137 | if (stmt.astID == Assignment.ASTID && stmt.asAssignment.expr.isIntConst) 138 | Some(stmt.asAssignment.expr.asIntConst.value) 139 | else 140 | None 141 | } else None 142 | }.toIterable 143 | if (constVals.forall(option ⇒ option.isDefined && option.get == constVals.head.get)) 144 | constVals.head 145 | else None 146 | } else None 147 | }*/ 148 | 149 | def handleAssignment(stmt: Statement, expr: Expr[V], in: Set[Fact]): Set[Fact] = 150 | expr.astID match { 151 | case Var.ASTID ⇒ 152 | val newTaint = in.collect { 153 | case Variable(index) if expr.asVar.definedBy.contains(index) ⇒ 154 | Some(Variable(stmt.index)) 155 | /*case ArrayElement(index, taintIndex) if expr.asVar.definedBy.contains(index) ⇒ 156 | Some(ArrayElement(stmt.index, taintIndex))*/ 157 | case _ ⇒ None 158 | }.flatten 159 | in ++ newTaint 160 | /*case ArrayLoad.ASTID ⇒ 161 | val load = expr.asArrayLoad 162 | if (in.exists { 163 | // The specific array element may be tainted 164 | case ArrayElement(index, taintedIndex) ⇒ 165 | val element = getConstValue(load.index, stmt.code) 166 | load.arrayRef.asVar.definedBy.contains(index) && 167 | (element.isEmpty || taintedIndex == element.get) 168 | // Or the whole array 169 | case Variable(index) ⇒ load.arrayRef.asVar.definedBy.contains(index) 170 | case _ ⇒ false 171 | }) 172 | in + Variable(stmt.index) 173 | else 174 | in*/ 175 | case GetStatic.ASTID ⇒ 176 | val get = expr.asGetStatic 177 | if (in.contains(StaticField(get.declaringClass, get.name))) 178 | in + Variable(stmt.index) 179 | else in 180 | /*case GetField.ASTID ⇒ 181 | val get = expr.asGetField 182 | if (in.contains(StaticField(get.declaringClass, get.name))) 183 | in + Variable(stmt.index) 184 | else in*/ 185 | case GetField.ASTID ⇒ 186 | val get = expr.asGetField 187 | if (in.exists { 188 | // The specific field may be tainted 189 | case InstanceField(index, _, taintedField) ⇒ 190 | taintedField == get.name && get.objRef.asVar.definedBy.contains(index) 191 | // Or the whole object 192 | case Variable(index) ⇒ get.objRef.asVar.definedBy.contains(index) 193 | case _ ⇒ false 194 | }) 195 | in + Variable(stmt.index) 196 | else 197 | in 198 | case _ ⇒ in 199 | } 200 | 201 | override def callFlow( 202 | stmt: Statement, 203 | callee: DeclaredMethod, 204 | in: Set[Fact]): Set[Fact] = { 205 | val allParams = asCall(stmt.stmt).allParams 206 | if (callee.name == "sink") 207 | if (in.exists { 208 | case Variable(index) ⇒ 209 | allParams.exists(p ⇒ p.asVar.definedBy.contains(index)) 210 | case _ ⇒ false 211 | }) { 212 | println(s"Found flow: $stmt") 213 | } 214 | if (callee.name == "forName" && (callee.declaringClassType eq ObjectType.Class) && 215 | callee.descriptor.parameterTypes == RefArray(ObjectType.String)) 216 | if (in.exists { 217 | case Variable(index) ⇒ 218 | asCall(stmt.stmt).params.exists(p ⇒ p.asVar.definedBy.contains(index)) 219 | case _ ⇒ false 220 | }) { 221 | println(s"Found flow: $stmt") 222 | } 223 | if (true || (callee.descriptor.returnType eq ObjectType.Class) || 224 | (callee.descriptor.returnType eq ObjectType.Object) || 225 | (callee.descriptor.returnType eq ObjectType.String)) { 226 | var facts = Set.empty[Fact] 227 | in.foreach { 228 | case Variable(index) ⇒ // Taint formal parameter if actual parameter is tainted 229 | allParams.iterator.zipWithIndex.foreach { 230 | case (param, pIndex) if param.asVar.definedBy.contains(index) ⇒ 231 | facts += Variable(paramToIndex(pIndex, !callee.definedMethod.isStatic)) 232 | case _ ⇒ // Nothing to do 233 | } 234 | 235 | /*case ArrayElement(index, taintedIndex) ⇒ 236 | // Taint element of formal parameter if element of actual parameter is tainted 237 | allParams.zipWithIndex.collect { 238 | case (param, pIndex) if param.asVar.definedBy.contains(index) ⇒ 239 | ArrayElement(paramToIndex(pIndex, !callee.definedMethod.isStatic), taintedIndex) 240 | }*/ 241 | 242 | case InstanceField(index, declClass, taintedField) ⇒ 243 | // Taint field of formal parameter if field of actual parameter is tainted 244 | // Only if the formal parameter is of a type that may have that field! 245 | allParams.iterator.zipWithIndex.foreach { 246 | case (param, pIndex) if param.asVar.definedBy.contains(index) && 247 | (paramToIndex(pIndex, !callee.definedMethod.isStatic) != -1 || 248 | classHierarchy.isSubtypeOf(declClass, callee.declaringClassType)) ⇒ 249 | facts += InstanceField(paramToIndex(pIndex, !callee.definedMethod.isStatic), declClass, taintedField) 250 | case _ ⇒ // Nothing to do 251 | } 252 | case sf: StaticField ⇒ 253 | facts += sf 254 | } 255 | facts 256 | } else Set.empty 257 | } 258 | 259 | override def returnFlow( 260 | stmt: Statement, 261 | callee: DeclaredMethod, 262 | exit: Statement, 263 | succ: Statement, 264 | in: Set[Fact]): Set[Fact] = { 265 | if (callee.name == "source" && stmt.stmt.astID == Assignment.ASTID) 266 | Set(Variable(stmt.index)) 267 | else if (callee.name == "sanitize") 268 | Set.empty 269 | else { 270 | val call = asCall(stmt.stmt) 271 | val allParams = call.allParams 272 | var flows: Set[Fact] = Set.empty 273 | in.foreach { 274 | /*case ArrayElement(index, taintedIndex) if index < 0 && index > -100 ⇒ 275 | // Taint element of actual parameter if element of formal parameter is tainted 276 | val param = 277 | allParams(paramToIndex(index, !callee.definedMethod.isStatic)) 278 | flows ++= param.asVar.definedBy.iterator.map(ArrayElement(_, taintedIndex))*/ 279 | 280 | case InstanceField(index, declClass, taintedField) if index < 0 && index > -255 ⇒ 281 | // Taint field of actual parameter if field of formal parameter is tainted 282 | val param = allParams(paramToIndex(index, !callee.definedMethod.isStatic)) 283 | param.asVar.definedBy.foreach { defSite ⇒ 284 | flows += InstanceField(defSite, declClass, taintedField) 285 | } 286 | 287 | case sf: StaticField ⇒ 288 | flows += sf 289 | 290 | case FlowFact(flow) ⇒ 291 | val newFlow = flow + stmt.method 292 | if (entryPoints.contains(declaredMethods(exit.method))) { 293 | //println(s"flow: "+newFlow.map(_.toJava).mkString(", ")) 294 | } else { 295 | flows += FlowFact(newFlow) 296 | } 297 | 298 | case _ ⇒ 299 | } 300 | 301 | // Propagate taints of the return value 302 | if (exit.stmt.astID == ReturnValue.ASTID && stmt.stmt.astID == Assignment.ASTID) { 303 | val returnValue = exit.stmt.asReturnValue.expr.asVar 304 | in.foreach { 305 | case Variable(index) if returnValue.definedBy.contains(index) ⇒ 306 | flows += Variable(stmt.index) 307 | /*case ArrayElement(index, taintedIndex) if returnValue.definedBy.contains(index) ⇒ 308 | ArrayElement(stmt.index, taintedIndex)*/ 309 | case InstanceField(index, declClass, taintedField) if returnValue.definedBy.contains(index) ⇒ 310 | flows += InstanceField(stmt.index, declClass, taintedField) 311 | 312 | case _ ⇒ // nothing to do 313 | } 314 | } 315 | 316 | flows 317 | } 318 | } 319 | 320 | /** 321 | * Converts a parameter origin to the index in the parameter seq (and vice-versa). 322 | */ 323 | def paramToIndex(param: Int, includeThis: Boolean): Int = 324 | (if (includeThis) -1 else -2) - param 325 | 326 | override def callToReturnFlow(stmt: Statement, succ: Statement, in: Set[Fact]): Set[Fact] = { 327 | val call = asCall(stmt.stmt) 328 | if (call.name == "sanitize") { 329 | in.filter { 330 | case Variable(index) ⇒ 331 | !(call.params ++ call.receiverOption).exists { p ⇒ 332 | val definedBy = p.asVar.definedBy 333 | definedBy.size == 1 && definedBy.contains(index) 334 | } 335 | case _ ⇒ true 336 | } 337 | } else if (call.name == "forName" && (call.declaringClass eq ObjectType.Class) && 338 | call.descriptor.parameterTypes == RefArray(ObjectType.String)) { 339 | if (in.exists { 340 | case Variable(index) ⇒ 341 | asCall(stmt.stmt).params.exists(p ⇒ p.asVar.definedBy.contains(index)) 342 | case _ ⇒ false 343 | }) { 344 | /*if (entryPoints.contains(declaredMethods(stmt.method))) { 345 | println(s"flow: "+stmt.method.toJava) 346 | in 347 | } else*/ 348 | in ++ Set(FlowFact(ListSet(stmt.method))) 349 | } else { 350 | in 351 | } 352 | } else { 353 | in 354 | } 355 | } 356 | 357 | /** 358 | * If forName is called, we add a FlowFact. 359 | */ 360 | override def nativeCall(statement: Statement, callee: DeclaredMethod, successor: Statement, in: Set[Fact]): Set[Fact] = { 361 | /* val allParams = asCall(statement.stmt).allParams 362 | if (statement.stmt.astID == Assignment.ASTID && in.exists { 363 | case Variable(index) ⇒ 364 | allParams.zipWithIndex.exists { 365 | case (param, _) if param.asVar.definedBy.contains(index) ⇒ true 366 | case _ ⇒ false 367 | } 368 | /*case ArrayElement(index, _) ⇒ 369 | allParams.zipWithIndex.exists { 370 | case (param, _) if param.asVar.definedBy.contains(index) ⇒ true 371 | case _ ⇒ false 372 | }*/ 373 | case _ ⇒ false 374 | }) Set(Variable(statement.index)) 375 | else*/ Set.empty 376 | } 377 | 378 | val entryPoints: Map[DeclaredMethod, Fact] = (for { 379 | m ← project.allMethodsWithBody 380 | if (m.isPublic || m.isProtected) && (m.descriptor.returnType == ObjectType.Object || m.descriptor.returnType == ObjectType.Class) 381 | index ← m.descriptor.parameterTypes.zipWithIndex.collect { case (pType, index) if pType == ObjectType.String ⇒ index } 382 | } //yield (declaredMethods(m), null) 383 | yield declaredMethods(m) → Variable(-2 - index)).toMap 384 | } 385 | 386 | class Taint(val flows: Map[Statement, Set[Fact]]) extends IFDSProperty[Fact] { 387 | 388 | override type Self = Taint 389 | 390 | def key: PropertyKey[Taint] = Taint.key 391 | } 392 | 393 | object Taint extends IFDSPropertyMetaInformation[Fact] { 394 | override type Self = Taint 395 | 396 | val key: PropertyKey[Taint] = PropertyKey.create( 397 | "TestTaint", 398 | new Taint(Map.empty)) 399 | } 400 | 401 | object TestTaintAnalysisRunner extends FunSuite { 402 | 403 | def main(args: Array[String]): Unit = { 404 | 405 | val p0 = Project(new File(args(args.length - 1))) //bytecode.RTJar) 406 | 407 | com.phaller.rasync.pool.SchedulingStrategy 408 | 409 | // Using PropertySore here is fine, it is not use during analysis 410 | p0.getOrCreateProjectInformationKeyInitializationData( 411 | PropertyStoreKey, 412 | (context: List[PropertyStoreContext[AnyRef]]) ⇒ { 413 | implicit val lg: LogContext = p0.logContext 414 | val ps = PKESequentialPropertyStore(context: _*) 415 | PropertyStore.updateDebug(false) 416 | ps 417 | }) 418 | 419 | p0.getOrCreateProjectInformationKeyInitializationData( 420 | LazyDetachedTACAIKey, 421 | (m: Method) ⇒ new PrimitiveTACAIDomain(p0, m)) 422 | 423 | PerformanceEvaluation.time { 424 | val manager = p0.get(FPCFAnalysesManagerKey) 425 | manager.runAll(new CallGraphDeserializerScheduler(new File(args(args.length - 2)))) 426 | } { t ⇒ println(s"CG took ${t.toSeconds}") } 427 | 428 | for ( 429 | scheduling ← List( 430 | new DefaultScheduling[IFDSProperty[Fact], (DeclaredMethod, Fact)], 431 | new SourcesWithManyTargetsFirst[IFDSProperty[Fact], (DeclaredMethod, Fact)], 432 | new SourcesWithManyTargetsLast[IFDSProperty[Fact], (DeclaredMethod, Fact)], 433 | new TargetsWithManySourcesFirst[IFDSProperty[Fact], (DeclaredMethod, Fact)], 434 | new TargetsWithManySourcesLast[IFDSProperty[Fact], (DeclaredMethod, Fact)], 435 | new TargetsWithManyTargetsFirst[IFDSProperty[Fact], (DeclaredMethod, Fact)], 436 | new TargetsWithManyTargetsLast[IFDSProperty[Fact], (DeclaredMethod, Fact)], 437 | new SourcesWithManySourcesFirst[IFDSProperty[Fact], (DeclaredMethod, Fact)], 438 | new SourcesWithManySourcesLast[IFDSProperty[Fact], (DeclaredMethod, Fact)]); 439 | threads ← List(1, 2, 4, 8, 10, 16, 20, 32, 40) 440 | ) { 441 | var result = 0 442 | var analysis: TestTaintAnalysis = null 443 | var entryPoints: Map[DeclaredMethod, Fact] = null 444 | var ts: List[Long] = List.empty 445 | for (i ← (0 until 5)) { 446 | PerformanceEvaluation.time({ 447 | implicit val p: Project[URL] = p0 //.recreate(k ⇒ k == PropertyStoreKey.uniqueId || k == DeclaredMethodsKey.uniqueId) 448 | Counter.reset() 449 | 450 | // From now on, we may access ps for read operations only 451 | // We can now start TestTaintAnalysis using IFDS. 452 | analysis = new TestTaintAnalysis(threads, scheduling) 453 | 454 | entryPoints = analysis.entryPoints 455 | entryPoints.foreach(analysis.forceComputation) 456 | analysis.waitForCompletion() 457 | }) { t ⇒ 458 | 459 | result = 0 460 | for { 461 | e ← entryPoints 462 | fact ← analysis.getResult(e).flows.values.flatten.toSet[Fact] 463 | } { 464 | fact match { 465 | case FlowFact(flow) ⇒ 466 | result += 1; println(s"flow: " + flow.map(_.toJava).mkString(", ")) 467 | case _ ⇒ 468 | } 469 | } 470 | println(Counter.toString) 471 | println(s"NUM RESULTS = $result") 472 | println(s"time = ${t.toSeconds}") 473 | 474 | ts ::= t.timeSpan 475 | } 476 | } 477 | val lastAvg = ts.sum / ts.size 478 | println(s"AVG,${scheduling.getClass.getSimpleName},$threads,$lastAvg") 479 | } 480 | } 481 | } 482 | -------------------------------------------------------------------------------- /monte-carlo-npv/src/main/scala/com/phaller/rasync/npv/Distribution.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync.npv 2 | 3 | import java.util.concurrent.ThreadLocalRandom 4 | 5 | trait Distribution { 6 | def sample(): Double 7 | def getMax(): Double 8 | def getMin(): Double 9 | } 10 | 11 | class SingleValueDistribution(value: Double) extends Distribution { 12 | override def sample(): Double = value 13 | override def getMax(): Double = value 14 | override def getMin(): Double = value 15 | } 16 | 17 | class TriangleDistribution(min: Double, likely: Double, max: Double) 18 | extends Distribution { 19 | 20 | assert(max >= likely) 21 | assert(likely >= min) 22 | 23 | val fc: Double = (likely - min) / (max - min) 24 | 25 | override def sample(): Double = { 26 | val u = ThreadLocalRandom.current().nextDouble() 27 | if (u < fc) { 28 | min + Math.sqrt(u * (max - min) * (likely - min)) 29 | } else { 30 | max - Math.sqrt((1 - u) * (max - min) * (max - likely)) 31 | } 32 | } 33 | 34 | override def getMin(): Double = min 35 | 36 | def getLikely(): Double = likely 37 | 38 | override def getMax(): Double = max 39 | 40 | } 41 | -------------------------------------------------------------------------------- /monte-carlo-npv/src/main/scala/com/phaller/rasync/npv/MonteCarloNpv.scala: -------------------------------------------------------------------------------- 1 | //package com.phaller.rasync.npv 2 | // 3 | //import scala.concurrent.{ Promise, Await, ExecutionContext } 4 | //import scala.concurrent.duration._ 5 | // 6 | //import scala.util.{ Success, Failure } 7 | // 8 | //import java.util.concurrent.{ CountDownLatch, ForkJoinPool } 9 | //import java.util.concurrent.atomic.AtomicReference 10 | // 11 | //import com.phaller.rasync.{ HandlerPool, CellCompleter } 12 | //import com.phaller.rasync.lattice.{ Lattice, DefaultKey } 13 | // 14 | //class MonteCarloNpv { 15 | // import MonteCarloNpv._ 16 | // 17 | // private val initial: Distribution = new SingleValueDistribution(-20000) 18 | // private val year1: Distribution = new TriangleDistribution(0, 4000, 10000) 19 | // private val year2: Distribution = new TriangleDistribution(0, 4000, 10000) 20 | // private val year3: Distribution = new TriangleDistribution(1000, 8000, 20000) 21 | // private val year4: Distribution = new TriangleDistribution(1000, 8000, 20000) 22 | // private val year5: Distribution = new TriangleDistribution(5000, 12000, 40000) 23 | // private val rate: Distribution = new TriangleDistribution(2, 4, 8) 24 | // 25 | // def sequential(): StatsCollector = { 26 | // implicit val ctx = 27 | // ExecutionContext.fromExecutorService(new ForkJoinPool(numThreads)) 28 | // val p = Promise[StatsCollector]() 29 | // val task = 30 | // new NpvTask(p, 10, NUM_ITER, rate, initial, year1, year2, year3, year4, year5) 31 | // task.setMinChunkSize(NUM_ITER + 1) 32 | // task.run() 33 | // Await.result(p.future, 600.seconds) 34 | // } 35 | // 36 | // def parallel(minChunkSize: Int, numChunks: Int)(implicit ctx: ExecutionContext): StatsCollector = { 37 | // val p = Promise[StatsCollector]() 38 | // val task = 39 | // new NpvTask(p, 10, NUM_ITER, rate, initial, year1, year2, year3, year4, year5) 40 | // task.setMinChunkSize(minChunkSize) 41 | // task.setNumChunks(numChunks) 42 | // ctx.execute(task) 43 | // Await.result(p.future, 600.seconds) 44 | // } 45 | // 46 | // def cell(minChunkSize: Int, numChunks: Int)(implicit pool: HandlerPool): StatsCollector = { 47 | // implicit val lattice: Lattice[StatsCollector] = new StatsLattice 48 | // val p = CellCompleter[DefaultKey[StatsCollector], StatsCollector](new DefaultKey[StatsCollector]) 49 | // val task = 50 | // new NpvCellTask(p, 10, NUM_ITER, rate, initial, year1, year2, year3, year4, year5) 51 | // task.setMinChunkSize(minChunkSize) 52 | // task.setNumChunks(numChunks) 53 | // pool.execute(task) 54 | // val latch = new CountDownLatch(1) 55 | // val atomic = new AtomicReference[StatsCollector] 56 | // p.cell.onComplete { 57 | // case Success(x) => 58 | // atomic.lazySet(x) 59 | // latch.countDown() 60 | // case Failure(_) => 61 | // atomic.lazySet(null) 62 | // latch.countDown() 63 | // } 64 | // latch.await() 65 | // atomic.get() 66 | // } 67 | // 68 | //} 69 | // 70 | //object MonteCarloNpv { 71 | // private val NUM_ITER = 10000000 72 | // private var numThreads = 0 73 | // 74 | // private def oneSize(name: String, children: Int, chunkSize: Int, npv: MonteCarloNpv): Long = { 75 | // val swName: String = name + " (children=" + children + ", min fork size=" + chunkSize + ")" 76 | // println(swName) 77 | // implicit val ctx = 78 | // ExecutionContext.fromExecutorService(new ForkJoinPool(numThreads)) 79 | // val start = System.nanoTime() 80 | // val stats = npv.parallel(chunkSize, children) 81 | // val end = System.nanoTime() 82 | // // println(stats) 83 | // end - start 84 | // } 85 | // 86 | // private def oneSizeCell(name: String, children: Int, chunkSize: Int, npv: MonteCarloNpv): Long = { 87 | // val swName: String = name + " (children=" + children + ", min fork size=" + chunkSize + ")" 88 | // println(swName) 89 | // implicit val pool = new HandlerPool(numThreads) 90 | // val start = System.nanoTime() 91 | // val stats = npv.cell(chunkSize, children) 92 | // val end = System.nanoTime() 93 | // // println(stats) 94 | // pool.shutdown() 95 | // end - start 96 | // } 97 | // 98 | // def main(args: Array[String]): Unit = { 99 | // numThreads = args(0).toInt 100 | // println(s"Using $numThreads threads") 101 | // 102 | // val npv = new MonteCarloNpv() 103 | // 104 | // val startSeq = System.nanoTime() 105 | // val stats = npv.sequential() 106 | // val endSeq = System.nanoTime() 107 | // // println(stats) 108 | // 109 | // println(s"Time (sequential): ${(endSeq - startSeq) / 1000000} ms") 110 | // 111 | // val timeCell = oneSizeCell("Cells", 2, 500, npv) 112 | // println(s"Time (cells): ${timeCell / 1000000} ms") 113 | // 114 | // val timeFut = oneSize("Futures", 2, 500, npv) 115 | // println(s"Time (futures): ${timeFut / 1000000} ms") 116 | // } 117 | // 118 | //} 119 | -------------------------------------------------------------------------------- /monte-carlo-npv/src/main/scala/com/phaller/rasync/npv/NetPresentValue.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync.npv 2 | 3 | object NetPresentValue { 4 | 5 | def npv(flows: Array[Double], discountRate: Double): Double = { 6 | var result: Double = 0 7 | val rate: Double = 1 + (discountRate * 0.01) 8 | for (i <- 0 until flows.length) { 9 | result = result + (flows(i) / Math.pow(rate, i)) 10 | } 11 | result 12 | } 13 | 14 | } 15 | -------------------------------------------------------------------------------- /monte-carlo-npv/src/main/scala/com/phaller/rasync/npv/NpvCellTask.scala: -------------------------------------------------------------------------------- 1 | //package com.phaller.rasync.npv 2 | // 3 | //import com.phaller.rasync.cell.{ Cell, CellCompleter } 4 | //import com.phaller.rasync.lattice.{ DefaultKey, Lattice, NotMonotonicException } 5 | //import com.phaller.rasync.pool.HandlerPool 6 | // 7 | //import scala.util.{ Failure, Success } 8 | // 9 | //abstract class AbstractNpvTask extends Runnable { 10 | // 11 | // protected var minChunkSize: Int = 100 12 | // protected var numChunks: Int = 2 13 | // 14 | // def setMinChunkSize(minChunkSize: Int): Unit = { 15 | // this.minChunkSize = minChunkSize 16 | // } 17 | // 18 | // def setNumChunks(numChunks: Int): Unit = { 19 | // this.numChunks = numChunks 20 | // } 21 | // 22 | // def calcNumChunks(n: Int): Int = { 23 | // val nc: Int = Math.ceil(Math.sqrt(n / minChunkSize)).asInstanceOf[Int] 24 | // nc 25 | // } 26 | //} 27 | // 28 | //// trivial lattice 29 | //class StatsLattice extends Lattice[StatsCollector] { 30 | // override val bottom: StatsCollector = null 31 | // override def join(current: StatsCollector, next: StatsCollector): StatsCollector = { 32 | // if (current == null) next 33 | // else throw NotMonotonicException(current, next) 34 | // } 35 | //} 36 | // 37 | //class NpvCellTask(p: CellCompleter[StatsCollector], min: Double, max: Double, numBuckets: Int, numIterations: Int, rate: Distribution, flows: Distribution*)(implicit pool: HandlerPool[List[StatsCollector]]) extends AbstractNpvTask { 38 | // 39 | // type K = DefaultKey[StatsCollector] 40 | // 41 | // implicit val lattice: Lattice[StatsCollector] = new StatsLattice 42 | // 43 | // def this(p: CellCompleter[StatsCollector], numBuckets: Int, numIterations: Int, rate: Distribution, flows: Distribution*)(implicit pool: HandlerPool[List[StatsCollector]]) { 44 | // this(p, NpvTask.calculateMin(flows, rate), NpvTask.calculateMax(flows, rate), numBuckets, numIterations, rate, flows: _*) 45 | // } 46 | // 47 | // private def sampleFlows(): Array[Double] = { 48 | // val sample = Array.ofDim[Double](flows.length) 49 | // for (i <- 0 until flows.length) { 50 | // sample(i) = flows(i).sample() 51 | // } 52 | // sample 53 | // } 54 | // 55 | // def run(): Unit = { 56 | // val children = 57 | // if (numChunks < 0) calcNumChunks(numIterations) else numChunks 58 | // 59 | // if (numIterations <= minChunkSize || children == 1) { 60 | // val collector = new StatsCollector(min, max, numBuckets) 61 | // for (i <- 0 until numIterations) { 62 | // collector.addObs(NetPresentValue.npv(sampleFlows(), rate.sample())) 63 | // } 64 | // p.putFinal(collector) 65 | // } else { 66 | // var completers: List[CellCompleter[StatsCollector]] = List() 67 | // for (i <- 0 until children) { 68 | // val statsCompleter = 69 | // CellCompleter[StatsCollector]() 70 | // val subTask = new NpvCellTask(statsCompleter, min, max, numBuckets, numIterations / children, rate, flows: _*) 71 | // subTask.setMinChunkSize(minChunkSize) 72 | // subTask.setNumChunks(numChunks) 73 | // completers = statsCompleter :: completers 74 | // pool.execute(subTask) 75 | // } 76 | // val subCells = completers.map(_.cell) 77 | // Cell.sequence(subCells)(pool).onComplete { 78 | // case Success(listOfCollectors) => 79 | // val collector = new StatsCollector(min, max, numBuckets) 80 | // for (subCollector <- listOfCollectors) { 81 | // collector.combine(subCollector) 82 | // } 83 | // p.putFinal(collector) 84 | // case f @ Failure(_) => 85 | // p.tryComplete(f.asInstanceOf[Failure[StatsCollector]], None) 86 | // } 87 | // } 88 | // } 89 | // 90 | //} 91 | -------------------------------------------------------------------------------- /monte-carlo-npv/src/main/scala/com/phaller/rasync/npv/NpvTask.scala: -------------------------------------------------------------------------------- 1 | //package com.phaller.rasync.npv 2 | // 3 | //import scala.concurrent.{ Promise, Future, ExecutionContext } 4 | // 5 | //object NpvTask { 6 | // 7 | // def calculateMin(flows: Seq[Distribution], rate: Distribution): Double = { 8 | // val minFlows = Array.ofDim[Double](flows.length) 9 | // for (i <- 0 until flows.length) { 10 | // minFlows(i) = flows(i).getMin() 11 | // } 12 | // NetPresentValue.npv(minFlows, rate.getMax()) 13 | // } 14 | // 15 | // def calculateMax(flows: Seq[Distribution], rate: Distribution): Double = { 16 | // val maxFlows = Array.ofDim[Double](flows.length) 17 | // for (i <- 0 until flows.length) { 18 | // maxFlows(i) = flows(i).getMax() 19 | // } 20 | // NetPresentValue.npv(maxFlows, rate.getMin()) 21 | // } 22 | // 23 | //} 24 | // 25 | //class NpvTask(p: Promise[StatsCollector], min: Double, max: Double, numBuckets: Int, numIterations: Int, rate: Distribution, flows: Distribution*)(implicit ctx: ExecutionContext) extends AbstractNpvTask { 26 | // 27 | // def this(p: Promise[StatsCollector], numBuckets: Int, numIterations: Int, rate: Distribution, flows: Distribution*)(implicit ctx: ExecutionContext) { 28 | // this(p, NpvTask.calculateMin(flows, rate), NpvTask.calculateMax(flows, rate), numBuckets, numIterations, rate, flows: _*) 29 | // } 30 | // 31 | // private def sampleFlows(): Array[Double] = { 32 | // val sample = Array.ofDim[Double](flows.length) 33 | // for (i <- 0 until flows.length) { 34 | // sample(i) = flows(i).sample() 35 | // } 36 | // sample 37 | // } 38 | // 39 | // def run(): Unit = { 40 | // val children = 41 | // if (numChunks < 0) calcNumChunks(numIterations) else numChunks 42 | // 43 | // if (numIterations <= minChunkSize || children == 1) { 44 | // val collector = new StatsCollector(min, max, numBuckets) 45 | // for (i <- 0 until numIterations) { 46 | // collector.addObs(NetPresentValue.npv(sampleFlows(), rate.sample())) 47 | // } 48 | // p.success(collector) 49 | // } else { 50 | // var promises: List[Promise[StatsCollector]] = List() 51 | // for (i <- 0 until children) { 52 | // val statsPromise = Promise[StatsCollector]() 53 | // val subTask = new NpvTask(statsPromise, min, max, numBuckets, numIterations / children, rate, flows: _*) 54 | // subTask.setMinChunkSize(minChunkSize) 55 | // subTask.setNumChunks(numChunks) 56 | // promises = statsPromise :: promises 57 | // ctx.execute(subTask) 58 | // } 59 | // val subFutures = promises.map(_.future) 60 | // Future.sequence(subFutures).map { listOfCollectors => 61 | // val collector = new StatsCollector(min, max, numBuckets) 62 | // for (subCollector <- listOfCollectors) { 63 | // collector.combine(subCollector) 64 | // } 65 | // p.success(collector) 66 | // } 67 | // } 68 | // } 69 | // 70 | //} 71 | -------------------------------------------------------------------------------- /monte-carlo-npv/src/main/scala/com/phaller/rasync/npv/StatsCollector.scala: -------------------------------------------------------------------------------- 1 | package com.phaller.rasync.npv 2 | 3 | class StatsCollector(min: Double, max: Double, numBuckets: Int) { 4 | 5 | var instances: Int = 1 6 | private val range: Double = max - min 7 | private var mean: Double = 0 8 | private var numObs: Int = 0 9 | val buckets: Array[Int] = Array.ofDim[Int](numBuckets) 10 | 11 | def addObs(obs: Double): Unit = { 12 | mean = (obs + (numObs * mean)) / (numObs + 1) 13 | numObs += 1 14 | val bucket: Int = Math.floor(numBuckets * (obs - min) / range).asInstanceOf[Int] 15 | buckets(bucket) = buckets(bucket) + 1 16 | } 17 | 18 | def combine(collector: StatsCollector): Unit = { 19 | instances += collector.instances 20 | mean = ((numObs * mean) + (collector.numObs * collector.mean)) / numObs + collector.numObs 21 | numObs += collector.numObs 22 | for (i <- 0 until numBuckets) { 23 | buckets(i) = buckets(i) + collector.buckets(i) 24 | } 25 | } 26 | 27 | override def toString(): String = { 28 | val sb = new StringBuilder() 29 | sb.append("Collected Statistics") 30 | sb.append(System.lineSeparator()) 31 | sb.append("--------------------") 32 | sb.append(System.lineSeparator()) 33 | sb.append(System.lineSeparator()) 34 | sb.append(f"Number of instances: $instances%d") 35 | sb.append(System.lineSeparator()) 36 | sb.append(f"Mean: $mean%2f") 37 | sb.append(System.lineSeparator()) 38 | sb.append(f"Number of observations: $numObs%d") 39 | sb.append(System.lineSeparator()) 40 | sb.append("Histogram") 41 | sb.append(System.lineSeparator()) 42 | for (i <- 0 until numBuckets) { 43 | sb.append(f" ${i + 1}%3d ${buckets(i)}%d") 44 | sb.append(System.lineSeparator()) 45 | } 46 | sb.append(System.lineSeparator()) 47 | sb.toString() 48 | } 49 | 50 | } 51 | -------------------------------------------------------------------------------- /project/Dependencies.scala: -------------------------------------------------------------------------------- 1 | import sbt._ 2 | 3 | object Dependencies { 4 | lazy val scalaTest = "org.scalatest" %% "scalatest" % "3.0.5" % "test" 5 | lazy val opalCommon = "de.opal-project" %% "common" % "3.0.0-SNAPSHOT" 6 | lazy val opalTAC = "de.opal-project" %% "three-address-code" % "3.0.0-SNAPSHOT" % "test" 7 | lazy val scalaMeter = "com.storm-enroute" %% "scalameter" % "0.9" 8 | } 9 | -------------------------------------------------------------------------------- /project/Util.scala: -------------------------------------------------------------------------------- 1 | object Util { 2 | val buildScalaVersion = System.getProperty("scala.version", "2.12.5") 3 | val javaVersion = System.getProperty("java.version") 4 | } 5 | -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=0.13.16 2 | -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | resolvers += Resolver.url("scoverage-bintray", url("https://dl.bintray.com/sksamuel/sbt-plugins/"))(Resolver.ivyStylePatterns) 2 | addSbtPlugin("org.scoverage" %% "sbt-scoverage" % "1.5.1") 3 | 4 | addSbtPlugin("org.scalariform" % "sbt-scalariform" % "1.8.1") 5 | 6 | addSbtPlugin("org.foundweekends" % "sbt-bintray" % "0.5.4") 7 | -------------------------------------------------------------------------------- /sandbox/parallelsum.scala: -------------------------------------------------------------------------------- 1 | import lattice.{DefaultKey, Lattice, LatticeViolationException, NaturalNumberLattice, NaturalNumberKey} 2 | import cell.{Cell, CellCompleter, HandlerPool} 3 | 4 | import scala.util.{Try, Success, Failure} 5 | import scala.collection.immutable.{IndexedSeq, Vector} 6 | 7 | import scala.concurrent.{Future, Promise, ExecutionContext} 8 | 9 | import java.util.concurrent.{CountDownLatch, ForkJoinPool} 10 | import java.util.concurrent.atomic.AtomicLong 11 | 12 | 13 | object ParallelSum { 14 | 15 | type K = DefaultKey[Int] 16 | 17 | implicit val intLattice: Lattice[Int] = new NaturalNumberLattice 18 | 19 | def parallelSumFut(l: IndexedSeq[Int])(implicit ctx: ExecutionContext): Future[Int] = { 20 | val chunk = l.size / 2 21 | // chunk = half of sequential size 22 | if (chunk == 16384) { // sequential 23 | val sum = l.reduce(_ + _) 24 | val p = Promise[Int]() 25 | p.success(sum).future 26 | } else { 27 | val (left, right) = l.splitAt(chunk) 28 | // introduce parallelism 29 | val leftPromise = Promise[Int]() 30 | ctx.execute(new Runnable { 31 | def run(): Unit = { 32 | val res = parallelSumFut(left) 33 | leftPromise.completeWith(res) 34 | } 35 | }) 36 | val rightFut = parallelSumFut(right) 37 | val zipped = leftPromise.future.zip(rightFut) 38 | zipped.map(tup => tup._1 + tup._2) 39 | } 40 | } 41 | 42 | // final result is eventually put into returned cell 43 | def parallelSum(l: IndexedSeq[Int])(implicit pool: HandlerPool): Cell[K, Int] = { 44 | val chunk = l.size / 2 45 | // chunk = half of sequential size 46 | if (chunk == 16384) { // sequential 47 | val sum = l.reduce(_ + _) 48 | val completer = CellCompleter[K, Int](pool, new DefaultKey[Int]) 49 | completer.putFinal(sum) 50 | completer.cell 51 | } else { 52 | val (left, right) = l.splitAt(chunk) 53 | // introduce parallelism 54 | val leftCompleter = CellCompleter[K, Int](pool, new DefaultKey[Int]) 55 | pool.execute { () => 56 | val res = parallelSum(left) 57 | // TODO: replace with whenCompleteEx 58 | // res.whenComplete 59 | res.onComplete { 60 | case Success(x) => leftCompleter.putFinal(x) 61 | case f @ Failure(_) => leftCompleter.tryComplete(f) 62 | } 63 | } 64 | val rightCell = parallelSum(right) 65 | val zipped = leftCompleter.cell.zipFinal(rightCell) 66 | val completer = CellCompleter[K, Int](pool, new DefaultKey[Int]) 67 | // TODO: replace with `mapFinal` 68 | zipped.onComplete { 69 | case Success((x, y)) => completer.putFinal(x + y) 70 | case f @ Failure(_) => completer.tryComplete(f.asInstanceOf[Try[Int]]) 71 | } 72 | completer.cell 73 | } 74 | } 75 | 76 | // returns duration in ns 77 | def oneIteration(list: IndexedSeq[Int])(implicit pool: HandlerPool): Long = { 78 | val latch = new CountDownLatch(1) 79 | val time = new AtomicLong 80 | 81 | val startCell = System.nanoTime() 82 | val res = parallelSum(list) 83 | res.onComplete { 84 | case Success(x) => 85 | val endCell = System.nanoTime() 86 | time.lazySet(endCell - startCell) 87 | latch.countDown() 88 | case Failure(t) => 89 | assert(false) 90 | latch.countDown() 91 | } 92 | 93 | latch.await() 94 | time.get() 95 | } 96 | 97 | // returns duration in ns 98 | def oneIterationFut(list: IndexedSeq[Int])(implicit ctx: ExecutionContext): Long = { 99 | val latch = new CountDownLatch(1) 100 | val time = new AtomicLong 101 | 102 | val start = System.nanoTime() 103 | val res = parallelSumFut(list) 104 | res.onComplete { 105 | case Success(x) => 106 | val end = System.nanoTime() 107 | time.lazySet(end - start) 108 | latch.countDown() 109 | case Failure(t) => 110 | assert(false) 111 | latch.countDown() 112 | } 113 | 114 | latch.await() 115 | time.get() 116 | } 117 | 118 | def oneIterationSequential(seq: IndexedSeq[Int]): (Int, Long) = { 119 | val start = System.nanoTime() 120 | val sum = seq.reduce(_ + _) 121 | val end = System.nanoTime() 122 | (sum, end - start) 123 | } 124 | 125 | def main(args: Array[String]): Unit = { 126 | val numThreads = args(1).toInt 127 | println(s"Using $numThreads threads") 128 | 129 | val seed: Int = 5 130 | val rnd = new scala.util.Random(seed) 131 | //val list = List.fill(1048576) { rnd.nextInt(1000) } 132 | //val list = List.fill(2048) { rnd.nextInt(1000) } 133 | // out of memory: 134 | //val list: IndexedSeq[Int] = Vector.fill(1073741824) { rnd.nextInt(1) } 135 | // 2 ^ 24 = 16777216 136 | //val list: IndexedSeq[Int] = Vector.fill(16777216) { rnd.nextInt(1) } 137 | val seqSize = 2 << (args(0).toInt - 1) 138 | println(s"size of sequence: $seqSize") 139 | if (seqSize < 16384) { 140 | println(s"ERROR: size of sequence must be at least 16384") 141 | return 142 | } 143 | 144 | val list: IndexedSeq[Int] = Vector.fill(seqSize) { rnd.nextInt(1) } 145 | 146 | // reach steady state 147 | for (_ <- 1 to 100) 148 | oneIterationSequential(list) 149 | 150 | // average of 9 iterations 151 | val durationsSeq = (1 to 9).map(i => oneIterationSequential(list)) 152 | val durationSumSeq = durationsSeq.map(tup => tup._2 / 1000000).reduce(_ + _) 153 | val resSeq = durationSumSeq / 9 154 | //println(s"sum of all integers: $expectedResult") 155 | println(s"time (sequential): $resSeq ms") 156 | 157 | implicit val pool = new HandlerPool(numThreads) 158 | 159 | // reach steady state 160 | for (_ <- 1 to 100) 161 | oneIteration(list) 162 | 163 | // average of 9 iterations 164 | val durations = (1 to 9).map(i => oneIteration(list)) 165 | val durationSum = durations.map(d => d / 1000000).reduce(_ + _) 166 | val res = durationSum / 9 167 | println(s"time (cells): $res ms") 168 | 169 | implicit val ctx = ExecutionContext.fromExecutorService(new ForkJoinPool(numThreads)) 170 | // reach steady state 171 | for (_ <- 1 to 100) 172 | oneIterationFut(list) 173 | 174 | // average of 9 iterations 175 | val durationsFut = (1 to 9).map(i => oneIterationFut(list)) 176 | val durationSumFut = durationsFut.map(d => d / 1000000).reduce(_ + _) 177 | val resFut = durationSumFut / 9 178 | println(s"time (futures): $resFut ms") 179 | 180 | // clean up 181 | pool.shutdown() 182 | } 183 | } 184 | -------------------------------------------------------------------------------- /shippable.yml: -------------------------------------------------------------------------------- 1 | language: scala 2 | 3 | scala: 4 | - 2.12.4 5 | --------------------------------------------------------------------------------