├── project ├── build.properties └── MollyBuild.scala ├── src ├── test │ ├── resources │ │ ├── examples_ft │ │ │ ├── TODO │ │ │ ├── regiser.ded │ │ │ ├── commit │ │ │ │ ├── 2pc_timeout.ded │ │ │ │ ├── 2pc_assert_optimist.ded │ │ │ │ ├── 2pc_edb.ded │ │ │ │ ├── 2pc_assert.ded │ │ │ │ ├── 2pc.ded │ │ │ │ ├── 2pc_ctp.ded │ │ │ │ └── 3pc.ded │ │ │ ├── delivery │ │ │ │ ├── bcast_edb.ded │ │ │ │ ├── deliv_assert.ded │ │ │ │ ├── rdlog.ded │ │ │ │ ├── simplog.ded │ │ │ │ ├── classic_rb.ded │ │ │ │ ├── ack_rb.ded │ │ │ │ └── replog.ded │ │ │ ├── negative_support_test.ded │ │ │ ├── ramp │ │ │ │ ├── sequence.ded │ │ │ │ ├── ramp_edb.ded │ │ │ │ ├── ramp_assert.ded │ │ │ │ ├── encoding.ded │ │ │ │ └── ramp.ded │ │ │ ├── timeout.ded │ │ │ ├── util │ │ │ │ ├── timeout_svc.ded │ │ │ │ ├── heartbeat.ded │ │ │ │ └── leader.ded │ │ │ ├── barrier_test.ded │ │ │ ├── raft │ │ │ │ ├── clock.ded │ │ │ │ ├── raft_assert.ded │ │ │ │ ├── raft_edb.ded │ │ │ │ ├── election.ded │ │ │ │ └── raft.ded │ │ │ ├── heartbeat_assert.ded │ │ │ ├── gstore │ │ │ │ ├── group_delete.ded │ │ │ │ └── gstore.ded │ │ │ ├── orc2.ded │ │ │ ├── test_deliv.ded │ │ │ ├── real_heartbeat.ded │ │ │ ├── orc.ded │ │ │ ├── fake_zk.ded │ │ │ ├── flux │ │ │ │ ├── flux_buffer.ded │ │ │ │ ├── flux.ded │ │ │ │ ├── flux_clusterpairs.ded │ │ │ │ └── flux_partitionpairs.ded │ │ │ ├── fake_zk2.ded │ │ │ ├── test2.ded │ │ │ ├── test_ack.ded │ │ │ ├── tokens.ded │ │ │ ├── kafka.ded.orig │ │ │ ├── kafka.ded │ │ │ ├── chain_replication.ded │ │ │ ├── real_kafka.ded │ │ │ ├── chord.ded │ │ │ ├── paxos_synod.ded │ │ │ ├── pipeline.ded │ │ │ └── real_chord.ded │ │ └── log4j.properties │ └── scala │ │ └── edu │ │ └── berkeley │ │ └── cs │ │ └── boom │ │ └── molly │ │ ├── DedalusParserSuite.scala │ │ ├── SetUtilsSuite.scala │ │ ├── C4CodeGeneratorSuite.scala │ │ ├── derivations │ │ └── SolverSuite.scala │ │ ├── ASTSuite.scala │ │ ├── FailureSpecSuite.scala │ │ ├── FormulaSuite.scala │ │ ├── DerivationTreesSuite.scala │ │ ├── DedalusRewritesSuite.scala │ │ ├── symmetry │ │ └── SymmetryCheckerSuite.scala │ │ └── CounterexampleSuite.scala └── main │ ├── resources │ ├── vis_template │ │ └── fonts │ │ │ ├── glyphicons-halflings-regular.eot │ │ │ ├── glyphicons-halflings-regular.ttf │ │ │ └── glyphicons-halflings-regular.woff │ └── log4j.properties │ └── scala │ └── edu │ └── berkeley │ └── cs │ └── boom │ └── molly │ ├── util │ ├── HashcodeCaching.scala │ └── SetUtils.scala │ ├── wrappers │ ├── C4.java │ └── C4Wrapper.scala │ ├── UltimateModel.scala │ ├── report │ ├── GraphvizPrettyPrinter.scala │ ├── MollyCodecJsons.scala │ ├── ProvenanceDiagramGenerator.scala │ ├── HTMLWriter.scala │ └── SpacetimeDiagramGenerator.scala │ ├── RandomBenchmarkSweeper.scala │ ├── RandomBenchmark.scala │ ├── codegen │ └── C4CodeGenerator.scala │ ├── FailureSpec.scala │ ├── symmetry │ ├── SymmetryAwareSet.scala │ └── SymmetryChecker.scala │ ├── paperexperiments │ ├── TableOfCorrectPrograms.scala │ └── TableOfCounterexamples.scala │ ├── DedalusParser.scala │ ├── derivations │ ├── Solver.scala │ ├── SAT4JSolver.scala │ └── BooleanFormula.scala │ ├── ast │ └── AST.scala │ └── DedalusRewrites.scala ├── demo_html ├── fonts │ ├── glyphicons-halflings-regular.eot │ ├── glyphicons-halflings-regular.ttf │ └── glyphicons-halflings-regular.woff ├── vis_template │ └── fonts │ │ ├── glyphicons-halflings-regular.eot │ │ ├── glyphicons-halflings-regular.ttf │ │ └── glyphicons-halflings-regular.woff ├── run_0_spacetime.dot ├── runs.json ├── run_0_provenance.dot └── index.html ├── .gitmodules ├── .gitignore ├── demo_v1.ded ├── demo_v2.ded ├── Installation.md ├── demo_v3.ded ├── demo_v4.ded ├── Makefile ├── Vagrantfile └── README.md /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=0.13.7 2 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/TODO: -------------------------------------------------------------------------------- 1 | acks. 2 | 2pc. 3 | 4 | 5 | -------------------------------------------------------------------------------- /demo_html/fonts/glyphicons-halflings-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/palvaro/molly/HEAD/demo_html/fonts/glyphicons-halflings-regular.eot -------------------------------------------------------------------------------- /demo_html/fonts/glyphicons-halflings-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/palvaro/molly/HEAD/demo_html/fonts/glyphicons-halflings-regular.ttf -------------------------------------------------------------------------------- /demo_html/fonts/glyphicons-halflings-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/palvaro/molly/HEAD/demo_html/fonts/glyphicons-halflings-regular.woff -------------------------------------------------------------------------------- /demo_html/vis_template/fonts/glyphicons-halflings-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/palvaro/molly/HEAD/demo_html/vis_template/fonts/glyphicons-halflings-regular.eot -------------------------------------------------------------------------------- /demo_html/vis_template/fonts/glyphicons-halflings-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/palvaro/molly/HEAD/demo_html/vis_template/fonts/glyphicons-halflings-regular.ttf -------------------------------------------------------------------------------- /demo_html/vis_template/fonts/glyphicons-halflings-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/palvaro/molly/HEAD/demo_html/vis_template/fonts/glyphicons-halflings-regular.woff -------------------------------------------------------------------------------- /src/test/resources/examples_ft/regiser.ded: -------------------------------------------------------------------------------- 1 | register(Name, Value)@next :- register(Name, Value), notin update(Name, _); 2 | register(Name, Value)@next :- update(Name, Value); 3 | 4 | 5 | -------------------------------------------------------------------------------- /src/main/resources/vis_template/fonts/glyphicons-halflings-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/palvaro/molly/HEAD/src/main/resources/vis_template/fonts/glyphicons-halflings-regular.eot -------------------------------------------------------------------------------- /src/main/resources/vis_template/fonts/glyphicons-halflings-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/palvaro/molly/HEAD/src/main/resources/vis_template/fonts/glyphicons-halflings-regular.ttf -------------------------------------------------------------------------------- /src/main/resources/vis_template/fonts/glyphicons-halflings-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/palvaro/molly/HEAD/src/main/resources/vis_template/fonts/glyphicons-halflings-regular.woff -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "lib/c4"] 2 | path = lib/c4 3 | url = https://github.com/bloom-lang/c4 4 | ignore = dirty 5 | [submodule "lib/z3"] 6 | path = lib/z3 7 | url = https://github.com/Z3Prover/z3 8 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/commit/2pc_timeout.ded: -------------------------------------------------------------------------------- 1 | include "../util/timeout_svc.ded"; 2 | include "2pc.ded"; 3 | 4 | timer_svc(C, X, 4) :- begin(C, X); 5 | abort(C, X)@next :- timeout(C, X), missing_vote(C, X); 6 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/delivery/bcast_edb.ded: -------------------------------------------------------------------------------- 1 | node("a", "b")@1; 2 | node("a", "c")@1; 3 | node("b", "a")@1; 4 | node("b", "c")@1; 5 | node("c", "a")@1; 6 | node("c", "b")@1; 7 | 8 | bcast("a", "hello")@1; 9 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/negative_support_test.ded: -------------------------------------------------------------------------------- 1 | snd(A, B)@async :- begin(B, A); 2 | snd(A,B)@next :- snd(A,B); 3 | bad(B, A) :- begin(B, A)@1, notin snd(A, B); 4 | good(B, A) :- begin(B, A)@1, notin bad(B, A); 5 | 6 | begin("b", "a")@1; 7 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/delivery/deliv_assert.ded: -------------------------------------------------------------------------------- 1 | // someone has a log, but not me. 2 | missing_log(A, Pl) :- log(X, Pl), node(X, A), notin log(A, Pl);//, notin crash(_, A, _); 3 | 4 | pre(X, Pl) :- log(X, Pl), notin bcast(X, Pl)@1, notin crash(X, X, _); 5 | post(X, Pl) :- log(X, Pl), notin missing_log(_, Pl); 6 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/ramp/sequence.ded: -------------------------------------------------------------------------------- 1 | // a simple sequence 2 | 3 | seq(Host, Key, 1) :- seq_begin(Host, Key); 4 | seq(Host, Key, Id + 1)@next :- seq(Host, Key, Id), seq_bump(Host, Key), notin seq_delete(Host, Key); 5 | seq(Host, Key, Id)@next :- seq(Host, Key, Id), notin seq_bump(Host, Key), notin seq_delete(Host, Key); -------------------------------------------------------------------------------- /src/test/resources/examples_ft/commit/2pc_assert_optimist.ded: -------------------------------------------------------------------------------- 1 | include "2pc_assert.ded"; 2 | 3 | // anything goes when the coordinator goes 4 | 5 | good(A, X) :- begin(A, X)@1, crash(A, A, _); 6 | good(B, X) :- begin(A, X)@1, crash(A, A, _), agent(A, B); 7 | 8 | post("termination", X) :- begin(A, X)@1, crash(A, A, _); 9 | 10 | 11 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/timeout.ded: -------------------------------------------------------------------------------- 1 | getit(Host, Sender, Msg)@async :- sendit(Sender, Host, Msg); 2 | good(H, M) :- getit(H, _, M); 3 | good(H, M)@next :- good(H, M); 4 | 5 | gotit(S, M) :- getit(Host, _, M), everyone(Host, S); 6 | 7 | 8 | everyone("b", "c")@1; 9 | everyone("c", "b")@1; 10 | 11 | sendit("a", "b", "Hi!")@1; 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/delivery/rdlog.ded: -------------------------------------------------------------------------------- 1 | // reliable delivery-backed broadcast. send messages at all neighbors 2 | // a (theoretically) infinite number of times 3 | 4 | include "./bcast_edb.ded"; 5 | include "./simplog.ded"; 6 | 7 | 8 | // persist bcast; sender retries. should accomodate all failures 9 | bcast(N, P)@next :- bcast(N, P); 10 | 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.class 2 | *.log 3 | 4 | # sbt specific 5 | .cache/ 6 | .history/ 7 | .lib/ 8 | dist/* 9 | target/ 10 | lib_managed/ 11 | src_managed/ 12 | project/boot/ 13 | project/plugins/project/ 14 | 15 | # Scala-IDE specific 16 | .scala_dependencies 17 | .worksheet 18 | 19 | # IntelliJ-specific 20 | .idea 21 | *.iml 22 | 23 | # Project-specific 24 | output/ 25 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/delivery/simplog.ded: -------------------------------------------------------------------------------- 1 | // simple broadcast. make an attempt to send a message to all neighbors 2 | include "./bcast_edb.ded"; 3 | 4 | node(Node, Neighbor)@next :- node(Node, Neighbor); 5 | log(Node, Pload)@next :- log(Node, Pload); 6 | 7 | log(Node2, Pload)@async :- bcast(Node1, Pload), node(Node1, Node2); 8 | log(Node, Pload) :- bcast(Node, Pload); 9 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/util/timeout_svc.ded: -------------------------------------------------------------------------------- 1 | // a reusable logical timeout service. send a message to it with an identifier and a # of 2 | // transitions, and it sends you a timeout later. 3 | 4 | timer_state(H, I, T-1)@next :- timer_svc(H, I, T); 5 | timer_state(H, I, T-1)@next :- timer_state(H, I, T), notin timer_cancel(H, I), T > 1; 6 | timeout(H, I) :- timer_state(H, I, 1); 7 | -------------------------------------------------------------------------------- /src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=WARN, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1} - %m%n 9 | -------------------------------------------------------------------------------- /src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=WARN, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1} - %m%n 9 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/barrier_test.ded: -------------------------------------------------------------------------------- 1 | vote(M, V)@next :- vote(M, V); 2 | member(M, V, I)@next :- member(M, V, I); 3 | vote(M, V)@async :- begin(V, M); 4 | 5 | //good(M, V) :- vote(M, V); 6 | vote_cnt(M, count) :- vote(M, V), member(M, V, I); 7 | good(M, I) :- vote_cnt(M, I); 8 | 9 | 10 | member("M", "a", 1)@1; 11 | member("M", "b", 2)@1; 12 | begin("a", "M")@1; 13 | begin("b", "M")@1; 14 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/commit/2pc_edb.ded: -------------------------------------------------------------------------------- 1 | begin("C", "hello")@1; 2 | 3 | 4 | agent("C", "a")@1; 5 | agent("C", "b")@1; 6 | agent("C", "d")@1; 7 | agent("a", "b")@1; 8 | agent("a", "d")@1; 9 | agent("b", "a")@1; 10 | agent("b", "d")@1; 11 | agent("d", "a")@1; 12 | agent("d", "b")@1; 13 | 14 | // both agents can commit 15 | can("a", "hello")@1; 16 | can("b", "hello")@1; 17 | can("d", "hello")@1; 18 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/raft/clock.ded: -------------------------------------------------------------------------------- 1 | // a simple passive logical clock. records the # of timesteps since registration of a particular key. 2 | // because the pattern of multiplexing is so common, a key is a combination of a type and an identifier. 3 | 4 | lclock(Host, Type, Id, 0) :- lclock_register(Host, Type, Id); 5 | lclock(Host, Type, Id, Time + 1)@next :- lclock(Host, Type, Id, Time), notin lclock_unreg(Host, Id); -------------------------------------------------------------------------------- /src/test/resources/examples_ft/ramp/ramp_edb.ded: -------------------------------------------------------------------------------- 1 | // inputs 2 | // ASSUME FOR NOW: one write per client per timestamp. 3 | write("C", "a|b", "foo")@1; 4 | write("C2", "b|c", "bar")@1; 5 | write("C", "a|c", "baz")@1; 6 | 7 | 8 | read("C", "b|c")@7; 9 | read("C", "a|b")@8; 10 | read("C2", "a|b")@7; 11 | read("C2", "b|c")@8; 12 | //read("C2", "b|c")@5; 13 | //read("C2", "c|d")@7; 14 | 15 | 16 | seq_begin("C", "Main")@1; 17 | seq_begin("C2", "Main")@1; -------------------------------------------------------------------------------- /src/test/resources/examples_ft/commit/2pc_assert.ded: -------------------------------------------------------------------------------- 1 | // raise if we never decide 2 | 3 | pre("termination", X) :- prepared(_, _, X, _); 4 | post("termination", X) :- decision(A1, X, _), decision(A2, X, _), A1 != A2; 5 | 6 | decision(C, X, "c") :- commit(C, X); 7 | decision(C, X, "a") :- abort(C, X); 8 | disagree(X) :- decision(_, X, V1), decision(_, X, V2), V1 != V2; 9 | pre("decide", X) :- decision(_, X, _); 10 | post("decide", X) :- decision(_, X, V), notin disagree(X); 11 | 12 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/util/HashcodeCaching.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.util 2 | 3 | 4 | /** 5 | * Mixin trait to cache an object's hashCode. This is only safe to apply to immutable objects. 6 | * 7 | * Based on code from a scala-user discussion: 8 | * https://groups.google.com/d/msg/scala-user/drkTziXMUyE/-RWJV-fC1cYJ 9 | */ 10 | trait HashcodeCaching { self: Product => 11 | override lazy val hashCode: Int = scala.runtime.ScalaRunTime._hashCode(this) 12 | } 13 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/wrappers/C4.java: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.wrappers; 2 | 3 | 4 | import jnr.ffi.Pointer; 5 | 6 | public interface C4 { 7 | void c4_initialize(); 8 | 9 | Pointer c4_make(Pointer p, int port); 10 | 11 | int c4_install_file(Pointer c4, String file); 12 | 13 | int c4_install_str(Pointer c4, String str); 14 | 15 | String c4_dump_table(Pointer c4, String table); 16 | 17 | void c4_destroy(Pointer c4); 18 | 19 | void c4_terminate(); 20 | } 21 | -------------------------------------------------------------------------------- /demo_v1.ded: -------------------------------------------------------------------------------- 1 | 2 | member(Node, Other)@next :- member(Node, Other); 3 | log(Node, Message)@next :- log(Node, Message); 4 | 5 | log(Other, Message)@async :- bcast(Node, Message), member(Node, Other); 6 | 7 | bcast("a", "Hello world!")@1; 8 | 9 | pre(X) :- log(X, _); 10 | post(X) :- pre(X); 11 | 12 | member("a", "a")@1; 13 | member("a", "b")@1; 14 | member("a", "c")@1; 15 | member("b", "a")@1; 16 | member("b", "b")@1; 17 | member("b", "c")@1; 18 | member("c", "a")@1; 19 | member("c", "b")@1; 20 | member("c", "c")@1; 21 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/delivery/classic_rb.ded: -------------------------------------------------------------------------------- 1 | // classic reliable broadcast. send the contents of the log to all neighbors, once. 2 | // neighbors relay the log in the same fasion, once only 3 | include "./bcast_edb.ded"; 4 | 5 | node(Node, Neighbor)@next :- node(Node, Neighbor); 6 | log(Node, Pload)@next :- log(Node, Pload); 7 | 8 | //bcast(Node2, Pload)@async :- bcast(Node1, Pload), node(Node1, Node2), notin log(Node1, Pload); 9 | bcast(Node2, Pload)@async :- bcast(Node1, Pload), node(Node1, Node2); 10 | log(N, P)@next :- bcast(N, P); 11 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/UltimateModel.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly 2 | 3 | // TODO: this class is misleadingly named, since it holds the contents of ALL tables 4 | // at ALL timesteps. 5 | case class UltimateModel(tables: Map[String, List[List[String]]]) { 6 | override def toString: String = { 7 | tables.map { case (name, values) => 8 | name + ":\n" + values.map(_.mkString(",")).mkString("\n") 9 | }.mkString("\n\n") 10 | } 11 | 12 | def tableAtTime(table: String, time: Int) = tables(table).filter(_.last.toInt == time) 13 | } 14 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/raft/raft_assert.ded: -------------------------------------------------------------------------------- 1 | // a run is good if we have nontrivial log entries 2 | good("YAY") :- log(N, _,_,_, E), notin bad(N, _, _), E != "NOOP"; 3 | // trivial case: 4 | haslog(X) :- log(X, _, _, _, E), E != "NOOP"; 5 | good("YAY") :- member(_, X, _), notin haslog(X); 6 | 7 | 8 | //good("YAY") :- member(_, X, _); 9 | 10 | 11 | bad(N1, N2, "disagree") :- log(N1, Idx, _, _, Entry), log(N2, Idx, _, _, Entry2), Entry != Entry2, notin crash(_, N2, _), notin crash(_, N1, _); 12 | bad(N1, N2, "two leaders") :- leader(_, T, N1), leader(_, T, N2), N1 != N2; -------------------------------------------------------------------------------- /src/test/resources/examples_ft/delivery/ack_rb.ded: -------------------------------------------------------------------------------- 1 | // classic reliable broadcast. send the contents of the log to all neighbors, once. 2 | // neighbors relay the log in the same fasion, once only 3 | include "./bcast_edb.ded"; 4 | 5 | node(Node, Neighbor)@next :- node(Node, Neighbor); 6 | log(Node, Pload)@next :- log(Node, Pload); 7 | ack(S, H, P)@next :- ack(S, H, P); 8 | rbcast(Node2, Node1, Pload)@async :- log(Node1, Pload), node(Node1, Node2), notin ack(Node1, Node2, Pload); 9 | ack(From, Host, Pl)@async :- rbcast(Host, From, Pl); 10 | rbcast(A, A, P) :- bcast(A, P); 11 | log(N, P) :- rbcast(N, _, P); 12 | -------------------------------------------------------------------------------- /src/test/scala/edu/berkeley/cs/boom/molly/DedalusParserSuite.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly 2 | 3 | import org.scalatest.{ShouldMatchers, FunSuite} 4 | 5 | class DedalusParserSuite extends FunSuite with ShouldMatchers { 6 | test("comment not followed by newline") { 7 | val comment = """//good(C, X) :- begin(C, X)@1""" 8 | val lines = Seq("""omission("a", "b", 1);""", comment, """omission("a", "b", 2);""", comment) 9 | val program = DedalusParser.parseProgram(lines.mkString("\n")) 10 | program.facts.size should be (2) 11 | program.rules.size should be (0) 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/heartbeat_assert.ded: -------------------------------------------------------------------------------- 1 | dead_after(A, O) :- crash(A, O, N), tic(A, O, M), M > N; 2 | // either it really died, and we really caught it 3 | good(A, O) :- watch_log(A, O), failed(A, O), dead_after(A, O); 4 | 5 | // or 6 | good(A, O) :- watch_log(A, O), notin failed(A, O); 7 | 8 | 9 | // we'd like to say: 10 | // (1) There is a time after which every process that crashes is always suspected by some correct process. 11 | // (2) There is a time after which some correct process is never suspected by any correct process. 12 | 13 | 14 | 15 | watch("a", "b")@1; 16 | watch("a","c")@1; 17 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/gstore/group_delete.ded: -------------------------------------------------------------------------------- 1 | begin_delete(Leader, Server, Id) :- ja(Leader, _, Id, _, "R"), running_group_req(Leader, _, Id, Group), lencoding(Leader, Group, Server); 2 | begin_delete(L, S, I)@next :- begin_delete(L, S, I), notin end_delete(L, I); 3 | 4 | d(S, L, I)@async :- begin_delete(L, S, I), notin end_delete(L, I); 5 | 6 | da(Leader, Server, Id)@async :- d(Server, Leader, Id); 7 | 8 | da_log(L, S, I) :- da(L,S,I); 9 | da_log(L, S, I)@next :- da_log(L,S,I); 10 | 11 | missing(L, S, I) :- begin_delete(L, S, I), notin da_log(L, S, I); 12 | end_delete(L, I) :- begin_delete(L, _, I), notin missing(L, _, I); 13 | 14 | //da_missing() :- -------------------------------------------------------------------------------- /demo_v2.ded: -------------------------------------------------------------------------------- 1 | 2 | member(Node, Other)@next :- member(Node, Other); 3 | log(Node, Message)@next :- log(Node, Message); 4 | 5 | log(Other, Message)@async :- bcast(Node, Message), member(Node, Other); 6 | 7 | bcast("a", "Hello world!")@1; 8 | 9 | // someone (X) has a log, but not A. 10 | missing_log(A, Pl) :- log(X, Pl), member(_, A), notin log(A, Pl); 11 | 12 | pre(X, Pl) :- log(X, Pl), notin bcast(X, Pl)@1, notin crash(X, X, _); 13 | post(X, Pl) :- log(X, Pl), notin missing_log(_, Pl); 14 | 15 | member("a", "a")@1; 16 | member("a", "b")@1; 17 | member("a", "c")@1; 18 | member("b", "a")@1; 19 | member("b", "b")@1; 20 | member("b", "c")@1; 21 | member("c", "a")@1; 22 | member("c", "b")@1; 23 | member("c", "c")@1; 24 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/delivery/replog.ded: -------------------------------------------------------------------------------- 1 | // reliable broadcast. send the contents of the log to all neighbors, infinitely often. 2 | // neighbors relay the log in the same fasion. 3 | 4 | include "./bcast_edb.ded"; 5 | //include "./simplog.ded"; 6 | 7 | // why does the (tiny, semantically equivalent) program below have produce different runs? 8 | //include "./rdlog.ded"; 9 | //bcast(N, P)@next :- log(N, P); 10 | 11 | node(Node, Neighbor)@next :- node(Node, Neighbor); 12 | log(Node, Pload)@next :- log(Node, Pload); 13 | log(Node2, Pload)@async :- log(Node1, Pload), node(Node1, Node2); 14 | log(N, P) :- bcast(N, P); 15 | bcast(N, P)@next :- log(N, P); 16 | 17 | //log(Node2, Pload)@async :- log(Node1, Pload), node(Node1, Node2); 18 | -------------------------------------------------------------------------------- /Installation.md: -------------------------------------------------------------------------------- 1 | Due to system updates on Mac with El Capitan, setting up Molly involves slightly more steps than usual. These are as below: 2 | 3 | 1. Install sbt and graphviz 4 | 5 | 2. If installed, uninstall apr and apr-util 6 | 7 | 3. Download apr and apr-util from source and install as per instructions 8 | 9 | 4. Run make in the highest level directory of the cloned repository 10 | 11 | 5. c4 nd z3 libraries now build 12 | 13 | 6. Since environment variables are disabled for system security reasons, create symlinks in the working directory, like so: 14 | 15 | ln -s ./lib/c4/build/src/libc4/libc4.dylib ./libc4.dylib 16 | 17 | ln -s ./lib/z3/build/z3-dist/lib/libz3.dylib ./libz3.dylib 18 | 19 | 7. We are now good to go!! 20 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/orc2.ded: -------------------------------------------------------------------------------- 1 | service1(H, S)@next :- service1(H, S); 2 | service2(H, S)@next :- service2(H, S); 3 | response(H, S, P, C)@next :- response(H, S, P, C); 4 | page(S, P, C)@next :- page(S, P, C); 5 | 6 | 7 | request(S, H, P)@async :- gt(H, P), service1(H, S); 8 | response(H, S, P, C)@async :- request(S, H, P), page(S, P, C); 9 | 10 | delay(H, P)@async :- gt(H, P); 11 | 12 | timeout(H, P)@async :- delay(H, P); 13 | 14 | request(S, H, P)@async :- timeout(H, P), service2(H, S), notin response(H, _, P, _); 15 | 16 | 17 | service1("a", "b")@1; 18 | service2("a", "c")@1; 19 | gt("a", "indx")@1; 20 | 21 | page("b", "indx", "hello")@1; 22 | page("c", "indx", "hello")@1; 23 | 24 | //bad(H, P) :- gt(H, P)@1, notin response(H, _, P, _); 25 | -------------------------------------------------------------------------------- /demo_v3.ded: -------------------------------------------------------------------------------- 1 | 2 | member(Node, Other)@next :- member(Node, Other); 3 | log(Node, Message)@next :- log(Node, Message); 4 | 5 | log(Other, Message)@async :- bcast(Node, Message), member(Node, Other); 6 | 7 | bcast("a", "Hello world!")@1; 8 | bcast(Node, Message)@next :- bcast(Node, Message); 9 | 10 | // someone (X) has a log, but not A. 11 | missing_log(A, Pl) :- log(X, Pl), member(_, A), notin log(A, Pl); 12 | 13 | pre(X, Pl) :- log(X, Pl), notin bcast(X, Pl)@1, notin crash(X, X, _); 14 | post(X, Pl) :- log(X, Pl), notin missing_log(_, Pl); 15 | 16 | member("a", "a")@1; 17 | member("a", "b")@1; 18 | member("a", "c")@1; 19 | member("b", "a")@1; 20 | member("b", "b")@1; 21 | member("b", "c")@1; 22 | member("c", "a")@1; 23 | member("c", "b")@1; 24 | member("c", "c")@1; 25 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/test_deliv.ded: -------------------------------------------------------------------------------- 1 | 2 | 3 | node(Node, Neighbor)@next :- node(Node, Neighbor); 4 | log(Node, Pload)@next :- log(Node, Pload); 5 | 6 | log(Node2, Pload)@async :- bcast(Node1, Pload), node(Node1, Node2); 7 | log(Node, Pload) :- bcast(Node, Pload); 8 | 9 | // persist bcast; sender retries. should accomodate all failures 10 | bcast(N, P)@next :- bcast(N, P); 11 | 12 | 13 | 14 | node("a","a")@1; 15 | node("b","a")@1; 16 | node("c","a")@1; 17 | node("a","b")@1; 18 | node("b","b")@1; 19 | node("c","b")@1; 20 | node("a","c")@1; 21 | node("b","c")@1; 22 | node("c","c")@1; 23 | bcast("a", "nonce1")@1; 24 | bcast("b", "nonce2")@1; 25 | bcast("c", "nonce3")@1; 26 | 27 | good(N, P) :- log(N, P); 28 | bad(N, P) :- log(N1, P), node(N1, N), notin log(N, P); 29 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/real_heartbeat.ded: -------------------------------------------------------------------------------- 1 | //include "timeout_svc.ded"; 2 | 3 | watch_state(F, H, S+1)@next :- watch_state(F, H, S); 4 | watch_state(F, H, 0) :- watch(F, H); 5 | 6 | hb_req(F, H)@async :- watch_state(H, F, _), notin hb_log(H, F, _); 7 | 8 | hb_state(F, H, 0)@next :- hb_req(F, H), notin hb_state(F, H, _); 9 | heartbeat(H, F, N)@async :- hb_state(F, H, N); 10 | hb_state(F, H, N+1)@next :- hb_state(F, H, N); 11 | 12 | 13 | hb_log(H, F, N) :- heartbeat(H, F, N); 14 | hb_log(H, F, N)@next :- hb_log(H, F, N); 15 | aggs(H, F, max) :- hb_log(H, F, N); 16 | 17 | failed(H, F) :- aggs(H, F, M), watch_state(H, F, S), S > M + 3; 18 | 19 | //good(H) :- hb_log(H, _, _); 20 | //good(H) :- watch(A, H)@1, crash(A, A, _); 21 | 22 | //watch("a", "b")@1; 23 | //watch("a", "c")@1; 24 | -------------------------------------------------------------------------------- /demo_v4.ded: -------------------------------------------------------------------------------- 1 | 2 | member(Node, Other)@next :- member(Node, Other); 3 | log(Node, Message)@next :- log(Node, Message); 4 | 5 | log(Other, Message)@async :- bcast(Node, Message), member(Node, Other); 6 | 7 | bcast("a", "Hello world!")@1; 8 | bcast(Node, Message)@next :- bcast(Node, Message); 9 | bcast(Node, Message)@next :- log(Node, Message); 10 | 11 | // someone (X) has a log, but not A. 12 | missing_log(A, Pl) :- log(X, Pl), member(_, A), notin log(A, Pl); 13 | 14 | pre(X, Pl) :- log(X, Pl), notin bcast(X, Pl)@1, notin crash(X, X, _); 15 | post(X, Pl) :- log(X, Pl), notin missing_log(_, Pl); 16 | 17 | member("a", "a")@1; 18 | member("a", "b")@1; 19 | member("a", "c")@1; 20 | member("b", "a")@1; 21 | member("b", "b")@1; 22 | member("b", "c")@1; 23 | member("c", "a")@1; 24 | member("c", "b")@1; 25 | member("c", "c")@1; 26 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/orc.ded: -------------------------------------------------------------------------------- 1 | service1(H, S)@next :- service1(H, S); 2 | service2(H, S)@next :- service2(H, S); 3 | response(H, S, P, C)@next :- response(H, S, P, C); 4 | page(S, P, C)@next :- page(S, P, C); 5 | gt(S, P)@next :- gt(S, P); 6 | 7 | 8 | request(S, H, P)@async :- gt(H, P), service1(H, S); 9 | response(H, S, P, C)@async :- request(S, H, P), page(S, P, C); 10 | 11 | delay(H, P)@async :- gt(H, P); 12 | 13 | timeout(H, P)@async :- delay(H, P); 14 | delay(H, P)@async :- timeout(H, P); 15 | 16 | request(S, H, P)@async :- timeout(H, P), service2(H, S), notin response(H, _, P, _); 17 | 18 | 19 | service1("a", "b")@1; 20 | service2("a", "c")@1; 21 | gt("a", "indx")@1; 22 | 23 | page("b", "indx", "hello")@1; 24 | page("c", "indx", "hello")@1; 25 | 26 | good(H, P) :- gt(H, P)@1, response(H, _, P, _); 27 | good(H, P) :- gt(H, P)@1, crash(H, H, _); 28 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/fake_zk.ded: -------------------------------------------------------------------------------- 1 | include "util/timeout_svc.ded"; 2 | 3 | timer_svc(Z, Z, 2) :- zookeeper(Z, Z)@1; 4 | 5 | // "establish ZK membership" as a replica 6 | member(Z, M)@async :- begin(M), zookeeper(M, Z); 7 | clients(Z, C)@async :- client(C), zookeeper(M, Z); 8 | 9 | member(Z, M)@next :- member(Z, M); 10 | leader(Z, L)@next :- leader(Z, L); 11 | clients(Z, C)@next :- clients(Z, C); 12 | zookeeper(M, Z)@next :- zookeeper(M, Z); 13 | 14 | // periodically tell clients and replicas about membership and leadership 15 | member(C, M)@async :- member(Z, M), clients(Z, C), zookeeper(Z,Z), timeout(Z, Z); 16 | member(A, M)@async :- member(Z, A), member(Z, M), zookeeper(Z,Z), timeout(Z, Z); 17 | leader(M, L)@async :- leader(Z, L), member(Z, M), zookeeper(Z, Z), timeout(Z, Z); 18 | leader(M, L)@async :- leader(Z, L), clients(Z, M), zookeeper(Z,Z), timeout(Z, Z); 19 | 20 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/flux/flux_buffer.ded: -------------------------------------------------------------------------------- 1 | // I am a buffer. I support: put, peek, advance, ack, ack_all, and reset 2 | 3 | buf(Host, Data, Sn, Markings) :- put(Host, Data, Sn, Markings); 4 | 5 | //buf(H,D,S,M)@next :- bug(H, D, S, M), notin advance(H, D), notin reset(H, D); 6 | buf(H,D,S,M)@next :- buf(H, D, S, M), notin reset(H, D); 7 | 8 | acked(H, Dest, Sn, Markings) :- ack(H, Dest, Sn, Markings); 9 | acked(H, Dest, Sn, M)@next :- acked(H, Dest, Sn, M); 10 | 11 | 12 | bot(H, Dest, min) :- buf(H, _, Sn, _), dest(H, Dest), notin advanced(H, Dest, Sn); 13 | advanced(H, D, Sn)@next :- advance(H, D), bot(H, D, Sn); 14 | advanced(H, D, S)@next :- advanced(H, D, S); 15 | 16 | 17 | peek(H, Dest, Sn, Data) :- bot(H, Dest, Sn), buf(H, Data, Sn, _); 18 | 19 | 20 | dest(H, D)@next :- dest(H, D); 21 | 22 | //good(X, D, S) :- buf(X, D, S, _); 23 | 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/util/heartbeat.ded: -------------------------------------------------------------------------------- 1 | include "util/timeout_svc.ded"; 2 | 3 | watch_log(F, H)@next :- watch_log(F, H); 4 | watch_log(F, H) :- watch(F, H); 5 | 6 | timer_svc(F, 1, 3) :- watch(F, H); 7 | timer_svc(H, Id + 1, 3) :- timeout(H, Id); 8 | 9 | ping(H, F, 3)@async :- watch(F, H); 10 | ping(Host, From, Nonce + 1)@async :- watch_log(From, Host), timeout(From, Nonce); 11 | pong(From, Host, Nonce)@async :- ping(Host, From, Nonce); 12 | omission(Host, Other, Id) :- timeout(Host, Id), watch_log(Host, Other), notin pong(Host, Other, Id); 13 | omission_cnt(Host, Other, count) :- omission(Host, Other, Id); 14 | failed(Host, Other) :- omission_cnt(Host, Other, Cnt), max_tic(Host, Other, Tm), Cnt > Tm / 6; 15 | 16 | tic(F, H, 0) :- watch(F, H); 17 | tic(F, H, I+1)@next :- tic(F, H, I); 18 | 19 | max_tic(F, H, max) :- tic(F, H, I); 20 | // schema hint: 21 | 22 | watch("test", "test")@1; 23 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/fake_zk2.ded: -------------------------------------------------------------------------------- 1 | include "timeout_svc.ded"; 2 | 3 | timer_svc(Z, Z, 2) :- zookeeper(Z, Z)@1; 4 | 5 | // "establish ZK membership" as a replica 6 | member(Z, M)@async :- begin(M), zookeeper(M, Z); 7 | clients(Z, C)@async :- client(C), zookeeper(M, Z); 8 | 9 | member(Z, M)@next :- member(Z, M); 10 | leader(Z, L)@next :- leader(Z, L); 11 | clients(Z, C)@next :- clients(Z, C); 12 | zookeeper(M, Z)@next :- zookeeper(M, Z); 13 | 14 | // periodically tell clients and replicas about membership and leadership 15 | //member(C, M)@async :- member(Z, M), clients(Z, C), zookeeper(Z,Z), timeout(Z, Z); 16 | // just tell the leader who the members are. 17 | member(A, M)@async :- leader(Z, A), member(Z, M), zookeeper(Z,Z), timeout(Z, Z); 18 | leader(M, L)@async :- leader(Z, L), member(Z, M), zookeeper(Z, Z), timeout(Z, Z); 19 | leader(M, L)@async :- leader(Z, L), clients(Z, M), zookeeper(Z,Z), timeout(Z, Z); 20 | 21 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/test2.ded: -------------------------------------------------------------------------------- 1 | // classic reliable broadcast. send the contents of the log to all neighbors, once. 2 | // neighbors relay the log in the same fasion, once only 3 | 4 | node(Node, Neighbor)@next :- node(Node, Neighbor); 5 | log(Node, Pload)@next :- log(Node, Pload); 6 | 7 | rbcast(Node2, Node1, Pload)@async :- log(Node1, Pload), node(Node1, Node2), notin ack(Node1, Node2, Pload); 8 | //log(N, P)@next :- rbcast(N, _, P); 9 | log(N, P) :- rbcast(N, _, P); 10 | 11 | ack(From, Host, Pl)@async :- rbcast(Host, From, Pl); 12 | ack(S, H, P)@next :- ack(S, H, P); 13 | 14 | rbcast(A, A, P) :- bcast(A, P); 15 | 16 | 17 | node("a","a")@1; 18 | node("b","a")@1; 19 | node("c","a")@1; 20 | node("a","b")@1; 21 | node("b","b")@1; 22 | node("c","b")@1; 23 | node("a","c")@1; 24 | node("b","c")@1; 25 | node("c","c")@1; 26 | bcast("a", "nonce1")@1; 27 | bcast("b", "nonce2")@1; 28 | bcast("c", "nonce3")@1; 29 | 30 | good(N, P) :- log(N, P); 31 | bad(N, P) :- log(N1, P), node(N1, N), notin log(N, P); 32 | 33 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/test_ack.ded: -------------------------------------------------------------------------------- 1 | // classic reliable broadcast. send the contents of the log to all neighbors, once. 2 | // neighbors relay the log in the same fasion, once only 3 | 4 | node(Node, Neighbor)@next :- node(Node, Neighbor); 5 | log(Node, Pload)@next :- log(Node, Pload); 6 | 7 | rbcast(Node2, Node1, Pload)@async :- log(Node1, Pload), node(Node1, Node2), notin ack(Node1, Node2, Pload); 8 | //log(N, P)@next :- rbcast(N, _, P); 9 | log(N, P) :- rbcast(N, _, P); 10 | 11 | ack(From, Host, Pl)@async :- rbcast(Host, From, Pl); 12 | ack(S, H, P)@next :- ack(S, H, P); 13 | 14 | rbcast(A, A, P) :- bcast(A, P); 15 | 16 | 17 | node("a","a")@1; 18 | node("b","a")@1; 19 | node("c","a")@1; 20 | node("a","b")@1; 21 | node("b","b")@1; 22 | node("c","b")@1; 23 | node("a","c")@1; 24 | node("b","c")@1; 25 | node("c","c")@1; 26 | bcast("a", "nonce1")@1; 27 | bcast("b", "nonce2")@1; 28 | bcast("c", "nonce3")@1; 29 | 30 | good(N, P) :- log(N, P); 31 | bad(N, P) :- log(N1, P), node(N1, N), notin log(N, P); 32 | 33 | -------------------------------------------------------------------------------- /src/test/scala/edu/berkeley/cs/boom/molly/SetUtilsSuite.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly 2 | 3 | import org.scalatest.{Matchers, FunSuite} 4 | 5 | import edu.berkeley.cs.boom.molly.util.SetUtils._ 6 | 7 | class SetUtilsSuite extends FunSuite with Matchers { 8 | test("isSuperset") { 9 | isSuperset(Set.empty, Set.empty) should be (true) 10 | isSuperset(Set(1, 2, 3), Set(1)) should be (true) 11 | isSuperset(Set(1, 2), Set(1, 2, 3)) should be (false) 12 | isSuperset(Set(9, 10, 11), Set(0)) should be (false) 13 | } 14 | 15 | test("minimalSets") { 16 | minimalSets(Seq.empty) should be (empty) 17 | minimalSets(Seq(Set(1), Set(1))).toSet should be (Set(Set(1))) 18 | minimalSets(Seq(Set(1), Set(2, 3))).toSet should be (Set(Set(1), Set(2, 3))) 19 | minimalSets(Seq(Set(1), Set(1, 2), Set(1, 2, 3))).toSet should be (Set(Set(1))) 20 | minimalSets(Seq(Set(1, 2), Set(2, 3), Set(1, 3))).toSet should be ( 21 | Set(Set(1, 2), Set(2, 3), Set(1, 3)) 22 | ) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/commit/2pc.ded: -------------------------------------------------------------------------------- 1 | include "./2pc_edb.ded"; 2 | 3 | // coordinator logic 4 | //prepare(Agent, Coord, Xact)@async :- begin(Coord, Xact), agent(Coord, Agent); 5 | prepare(Agent, Coord, Xact)@async :- running(Coord, Xact), agent(Coord, Agent); 6 | abort(C, X)@next :- vote(C, _, X, "N"); 7 | commit(C, X)@next :- vote(C, _, X, "Y"), notin missing_vote(C, X); 8 | missing_vote(C, X) :- agent(C, A), running(C, X), notin vote(C, A, X, "Y"); 9 | running(Coord, Xact) :- begin(Coord, Xact); 10 | running(C, X)@next :- running(C, X), notin commit(C, X), notin abort(C, X); 11 | commit(A, X)@async :- commit(C, X), agent(C, A); 12 | abort(A, X)@async :- abort(C, X), agent(C, A); 13 | 14 | // agent logic 15 | vote(Coord, Agent, Xact, "Y")@async :- prepare(Agent, Coord, Xact), can(Agent, Xact); 16 | prepared(A, C, X, "Y") :- prepare(A,C,X), can(A,X); 17 | 18 | // frame rules 19 | agent(C, A)@next :- agent(C, A); 20 | can(A, X)@next :- can(A, X); 21 | abort(C, X)@next :- abort(C, X); 22 | commit(C, X)@next :- commit(C, X); 23 | vote(C, A, X, S)@next :- vote(C, A, X, S); 24 | prepared(C,A,X,Y)@next :- prepared(C,A,X,Y); 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: deps 2 | 3 | clean-deps: 4 | rm -rf lib/c4/build 5 | rm -rf lib/z3/build 6 | 7 | deps: get-submodules c4 z3 8 | 9 | get-submodules: 10 | git submodule update --init --recursive 11 | 12 | c4: lib/c4/build/src/libc4/libc4.dylib 13 | 14 | z3: lib/z3/build/z3-dist 15 | 16 | lib/c4/build/src/libc4/libc4.dylib: 17 | @which cmake > /dev/null 18 | cd lib/c4 && mkdir -p build 19 | cd lib/c4/build && cmake .. 20 | cd lib/c4/build && make 21 | 22 | lib/z3/build/z3-dist: lib/z3/build/libz3.dylib 23 | # We need to make these parent directories so that Z3's Makefile 24 | # doesn't complain about them missing when it copies files during 25 | # the `install` step: 26 | mkdir -p lib/z3/build/z3-dist/lib/python2.7/dist-packages 27 | mkdir -p lib/z3/build/z3-dist/lib/python2.6/dist-packages 28 | cd lib/z3/build && make install 29 | 30 | lib/z3/build/libz3.dylib: 31 | cd lib/z3 && python scripts/mk_make.py --prefix=z3-dist 32 | cd lib/z3/build && make -j4 33 | 34 | # SBT command for running only the fast unit tests and excluding the slower 35 | # end-to-end tests (which have been tagged using ScalaTest's `Slow` tag): 36 | fast-test: 37 | sbt "testOnly *Suite -- -l org.scalatest.tags.Slow" 38 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/tokens.ded: -------------------------------------------------------------------------------- 1 | include "util/timeout_svc.ded"; 2 | 3 | node(H, N)@next :- node(H, N); 4 | token(H, T) :- begin(H, T); 5 | // fix later 6 | //wait(H, T)@next :- begin(H, T), notin win(H, T); 7 | wait(H, T)@next :- begin(H, T); 8 | wait(H, T)@next :- wait(H, T); 9 | 10 | send_token(To, From, Token)@async :- token(From, Token), node(From, To), notin win(From, Token); 11 | 12 | ack(F, T)@async :- send_token(H, F, T); 13 | 14 | token(H, T) :- send_token(H, _, T); 15 | token(H, T)@next :- token(H, T), notin ack(H, T); 16 | 17 | win(H, T) :- wait(H, T), send_token(H, _, T); 18 | win(H, T)@next :- win(H, T); 19 | 20 | node("a", "b")@1; 21 | node("b", "c")@1; 22 | node("c", "d")@1; 23 | node("d", "a")@1; 24 | 25 | skip("a", "c")@1; 26 | skip("b", "d")@1; 27 | skip("c", "a")@1; 28 | skip("d", "b")@1; 29 | skip(N, S)@next :- skip(N, S); 30 | 31 | timer_svc(N, T, 2) :- token(N, T); 32 | send_token(To, From, Token)@async :- token(From, Token), skip(From, To), timeout(From, Token), notin win(From, Token); 33 | 34 | 35 | begin("a", 1)@1; 36 | 37 | 38 | //bad(H, T) :- begin(H, T)@1, notin win(H, T), notin crash(H, _)@1; 39 | //bad(H, T) :- win(H, T); 40 | post(H, T) :- win(H, T); 41 | pre(H, T) :- begin(H, T); 42 | good(H, T) :- win(H, T); 43 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/raft/raft_edb.ded: -------------------------------------------------------------------------------- 1 | dispatch("z", 0, "fug", 0)@100; 2 | commit_indx("z", 0)@100; 3 | 4 | 5 | log_term("z", 0)@100; 6 | role("z", "foo")@100; 7 | term("z", 0)@100; 8 | 9 | 10 | // edb; move later 11 | 12 | role("a", "C")@1; 13 | role("b", "C")@1; 14 | role("c", "C")@1; 15 | 16 | stall("a", 0)@1; 17 | stall("b", 0)@1; 18 | stall("c", 0)@1; 19 | 20 | term("a", 0)@1; 21 | term("b", 0)@1; 22 | term("c", 0)@1; 23 | 24 | log_indx("a", 0)@1; 25 | log_indx("b", 0)@1; 26 | log_indx("c", 0)@1; 27 | 28 | member("a", "b", 2)@1; 29 | member("a", "c", 3)@1; 30 | member("a", "a", 1)@1; 31 | member("b", "c", 3)@1; 32 | member("b", "a", 1)@1; 33 | member("b", "b", 2)@1; 34 | member("c", "b", 2)@1; 35 | member("c", "a", 1)@1; 36 | member("c", "c", 3)@1; 37 | 38 | log("a", 0, 0, "z", "NOOP")@1; 39 | log("b", 0, 0, "z", "NOOP")@1; 40 | log("c", 0, 0, "z", "NOOP")@1; 41 | 42 | rank("a", "F", 1)@1; 43 | rank("a", "C", 2)@1; 44 | rank("a", "L", 3)@1; 45 | rank("b", "F", 1)@1; 46 | rank("b", "C", 2)@1; 47 | rank("b", "L", 3)@1; 48 | rank("c", "F", 1)@1; 49 | rank("c", "C", 2)@1; 50 | rank("c", "L", 3)@1; 51 | rank(N, O, I)@next :- rank(N, O, I); 52 | 53 | commit_indx("a", 0)@1; 54 | commit_indx("b", 0)@1; 55 | commit_indx("c", 0)@1; 56 | 57 | new_request("C", "b", 100, "foo")@1; 58 | new_request("C", "a", 200, "bar")@1; 59 | new_request("C", "c", 300, "baz")@1; -------------------------------------------------------------------------------- /src/test/scala/edu/berkeley/cs/boom/molly/C4CodeGeneratorSuite.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly 2 | 3 | import org.scalatest.{ShouldMatchers, FunSuite} 4 | import edu.berkeley.cs.boom.molly.codegen.C4CodeGenerator 5 | import edu.berkeley.cs.boom.molly.DedalusParser._ 6 | import edu.berkeley.cs.boom.molly.DedalusTyper._ 7 | import scalaz.syntax.id._ 8 | 9 | class C4CodeGeneratorSuite extends FunSuite with ShouldMatchers { 10 | test("aggregation") { 11 | val prog = 12 | """ 13 | | omission_cnt(Host, Other, count) :- omission(Host, Other, Id); 14 | | omission("a", "b", 1); 15 | | omission("a", "b", 2); 16 | | omission("a", "b", 3); 17 | """.stripMargin 18 | val code = prog |> parseProgram |> inferTypes |> C4CodeGenerator.generate 19 | code.lines.toSeq should contain ("omission_cnt(Host, Other, count) :- omission(Host, Other, Id);") 20 | } 21 | 22 | test("negated predicates should appear at end of rule body") { 23 | // This requirement is a workaround for https://github.com/bloom-lang/c4/issues/1 24 | val prog = 25 | """ 26 | | foo("1"); 27 | | bar("1"); 28 | | baz(X) :- notin bar(X), foo(X); 29 | """.stripMargin 30 | val code = prog |> parseProgram |> inferTypes |> C4CodeGenerator.generate 31 | code.lines.toSeq should contain ("baz(X) :- foo(X), notin bar(X);") 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/commit/2pc_ctp.ded: -------------------------------------------------------------------------------- 1 | include "2pc_timeout.ded"; 2 | include "../util/timeout_svc.ded"; 3 | 4 | timer_svc(Agent, Xact, 4) :- prepare(Agent, _, Xact), can(Agent, Xact); 5 | 6 | known(Agent, Xact, "C") :- commit(Agent, Xact); 7 | known(Agent, Xact, "A") :- abort(Agent, Xact); 8 | 9 | decision_req(Other, Agent, Xact)@async :- timeout(Agent, Xact), agent(Agent, Other), notin known(Agent, Xact, _); 10 | // 1. q has already decided Commit (or Abort): q simply sends a COMMIT (or ABORT) to p, and p decides accordingly, 11 | commit(Requestor, Xact)@async :- decision_req(Agent, Requestor, Xact), commit(Agent, Xact); 12 | abort(Requestor, Xact)@async :- decision_req(Agent, Requestor, Xact), abort(Agent, Xact); 13 | 14 | // 2. q has not voted yet: q can unilaterally decide Abort. It then sends an ABORT to p, and p therefore decides Abort. 15 | 16 | gotta_abort(Agent, Requestor, Xact) :- decision_req(Agent, Requestor, Xact), notin prepared(Agent, _, Xact, _); 17 | abort(Agent, Xact) :- gotta_abort(Agent, _, Xact); 18 | 19 | // 3. q has voted Yes but has not yet reached a decision: q is also uncertain and therefore cannot help p reach a decision. 20 | // (do nothing...) but remember the case where q voted no: 21 | abort(Requestor, Xact)@async :- decision_req(Agent, Requestor, Xact), prepared(Agent, _, Xact, "N"); 22 | 23 | agent("a", "C")@1; 24 | agent("b", "C")@1; 25 | agent("d", "C")@1; 26 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/report/GraphvizPrettyPrinter.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.report 2 | 3 | import org.kiama.output.PrettyPrinter 4 | import scala.collection.immutable 5 | 6 | /** 7 | * Mixin that provides helper functions for generating GraphViz .dot markup using 8 | * Kiama pretty-printing. 9 | */ 10 | trait GraphvizPrettyPrinter extends PrettyPrinter { 11 | 12 | def subgraph(name: Doc, label: Doc, statements: Traversable[Doc]): Doc = { 13 | "subgraph" <+> name <+> braces(nest( 14 | linebreak <> 15 | "label" <> equal <> dquotes(label) <> semi <@@> 16 | statements.reduce(_ <@@> _) 17 | ) <> linebreak) 18 | } 19 | 20 | def node(id: Any, attributes: (String, String)*): Doc = { 21 | if (attributes.isEmpty) { 22 | id.toString <> semi 23 | } else { 24 | val attrs = attributes.map { case (k, v) => k <> equal <> dquotes(v) } 25 | id.toString <+> brackets(ssep(immutable.Seq(attrs: _*), comma)) <> semi 26 | } 27 | } 28 | 29 | def diEdge(from: Any, to: Any, attributes: (String, String)*): Doc = { 30 | if (attributes.isEmpty) { 31 | from.toString <+> "->" <+> to.toString 32 | } else { 33 | val attrs = attributes.map { case (k, v) => k <> equal <> dquotes(v) } 34 | from.toString <+> "->" <+> to.toString <+> brackets(ssep(immutable.Seq(attrs: _*), comma)) <> semi 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/test/scala/edu/berkeley/cs/boom/molly/derivations/SolverSuite.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.derivations 2 | 3 | import org.scalatest.{FunSuite, Matchers} 4 | 5 | import edu.berkeley.cs.boom.molly.FailureSpec 6 | 7 | class SolverSuite extends FunSuite with Matchers { 8 | 9 | test("solutionToFailureSpec should remove redundant message losses") { 10 | val originalFailureSpec = FailureSpec(eot = 4, eff = 2, maxCrashes = 1, nodes = List("A", "B")) 11 | val solution: Set[SolverVariable] = Set( 12 | CrashFailure("A", 2), 13 | MessageLoss("A", "B", 1), // Before crash, so this message should still be included 14 | MessageLoss("A", "B", 2), // same time as crash, so this should be removed 15 | MessageLoss("A", "B", 3), // after crash, so this should also be removed 16 | MessageLoss("B", "A", 1) // arrives at receiver while receiver crashes, so should be removed 17 | ) 18 | val failureSpec = Solver.solutionToFailureSpec(originalFailureSpec, solution).get 19 | failureSpec.crashes should be (Set(CrashFailure("A", 2))) 20 | failureSpec.omissions should be (Set(MessageLoss("A", "B", 1))) 21 | } 22 | 23 | test("solutionToFailureSpec should not return failure-free specs") { 24 | val originalFailureSpec = FailureSpec(eot = 4, eff = 2, maxCrashes = 1, nodes = List("A", "B")) 25 | Solver.solutionToFailureSpec(originalFailureSpec, Set.empty) should be (None) 26 | } 27 | 28 | } 29 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/ramp/ramp_assert.ded: -------------------------------------------------------------------------------- 1 | // assertions 2 | 3 | //bad_read() :- responses(Cli, Server1, Keys, Val1, Cli2, Ts), responses(Cli, Server2, Keys, Val2, Cli2, Ts) 4 | 5 | read_disagree(Cli, Server) :- responses(Cli, Server, _, X, _, _), responses(Cli2, Server, _, Y, _, _), X != Y; 6 | good(Server) :- responses(Cli, Server, Keys, Val, C2, Ts), notin read_disagree(Cli, Server); 7 | 8 | //good(Server) :- begin_read(Cli, Keys), lencoding(Cli, Keys, Server), notin responses(Cli, Server, _, _, _, _); 9 | // if there is at least one client and at least one server up, there is a response. 10 | all_cli_down("X") :- crash(_, "C", _), crash(_, "C2" ,_); 11 | //all_srv_down("X") :- crash(_, "a", _), crash(_, "b", _), crash(_, "c", _); 12 | srv_down(Server) :- begin_read(Cli, Keys), lencoding(Cli, Keys, Server), crash(_, Server, _); 13 | uncommitted(Server) :- begin_read(Cli, Keys), lencoding(Cli, Keys, Server), notin commit_log(Server, _, _); 14 | 15 | //never_requested(Server) :- begin_read(Cli, Keys), lencoding(Cli, Keys, Server) 16 | good(Server) :- begin_read(Cli, Keys), lencoding(Cli, Keys, Server), all_cli_down(_); 17 | good(Server) :- srv_down(Server); 18 | good(Server) :- uncommitted(Server); 19 | 20 | 21 | //good("YAY") :- last_commit("a", _, _, "foo"), last_commit("b", _, _, "foo"), last_commit("c", _, _, "foo"); 22 | //good("YAY") :- last_commit("a", _, _, "foo"), last_commit("b", _, _, "foo"), last_commit("c", _, _, "foo"); 23 | 24 | // how should I write down the atomic visibility assertion? -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/RandomBenchmarkSweeper.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly 2 | 3 | import scala.sys.process._ 4 | 5 | /** 6 | * Benchmarks the randomized search strategy against an entire corpus of programs. 7 | */ 8 | object RandomBenchmarkSweeper { 9 | 10 | val NUM_RUNS = 100 11 | 12 | val corpus = Seq( 13 | // ("Input programs", "eot", "eff", "nodes", "crashes") 14 | (Seq("simplog.ded", "deliv_assert.ded"), 4, 2, Seq("a", "b", "c"), 0), 15 | (Seq("rdlog.ded", "deliv_assert.ded"), 4, 2, Seq("a", "b", "c"), 1), 16 | (Seq("replog.ded", "deliv_assert.ded"), 8, 6, Seq("a", "b", "c"), 1), 17 | (Seq("classic_rb.ded", "deliv_assert.ded"), 5, 3, Seq("a", "b", "c"), 0), 18 | (Seq("2pc.ded", "2pc_assert.ded"), 5, 0, Seq("a", "b", "C", "d"), 1), 19 | (Seq("2pc_ctp.ded", "2pc_assert.ded"), 6, 0, Seq("a", "b", "C", "d"), 1), 20 | (Seq("3pc.ded", "2pc_assert.ded"), 9, 7, Seq("a", "b", "C", "d"), 1), 21 | (Seq("kafka.ded"), 6, 4, Seq("a", "b", "c", "C", "Z"), 1) 22 | ) 23 | 24 | def main(args: Array[String]) { 25 | corpus.par.foreach { case (inputPrograms, eot, eff, nodes, crashes) => 26 | val inputFiles = inputPrograms.map(name => "../examples_ft/" + name) 27 | val command = s"-N ${nodes.mkString(",")} -t $eot -f $eff -c $crashes --max-runs $NUM_RUNS ${inputFiles.mkString(" ")}" 28 | println(s"Running command '$command'") 29 | Seq("sbt", s"run-main edu.berkeley.cs.boom.molly.RandomBenchmark $command").! 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/kafka.ded.orig: -------------------------------------------------------------------------------- 1 | include "fake_zk.ded"; 2 | 3 | // replicas 4 | write(Next, Data, Origin)@async :- write(Origin, Data, Prev), member(Origin, Next), leader(Origin, Origin), notin ack(Origin, Data, Next); 5 | ack_int(Origin, Data, Acker)@async :- write(Acker, Data, Origin), notin leader(Acker, Acker); 6 | ack(O,D,A)@next :- ack(O,D,A); 7 | ack_int(O,D,A)@next :- ack_int(O,D,A); 8 | write(N,D,O)@next :- write(N, D, O); 9 | ack(Origin, Data, Acker)@async :- leader(Acker, Acker), write(Acker, Data, Origin), notin missing_ack(Acker, Data); 10 | missing_ack(Leader, Data) :- write(Leader, Data, _), member(Leader, Other), notin ack_int(Leader, Data, Other); 11 | 12 | // client 13 | write(M, D, O)@async :- write_req(O, D), leader(O, M); 14 | 15 | 16 | // replica init 17 | zookeeper("a", "Z")@1; 18 | zookeeper("b", "Z")@1; 19 | zookeeper("c", "Z")@1; 20 | zookeeper("C", "Z")@1; 21 | zookeeper("Z", "Z")@1; 22 | begin("a")@1; 23 | begin("b")@1; 24 | begin("c")@1; 25 | 26 | // client init 27 | client("C")@1; 28 | 29 | // bogus stuff 30 | leader("Z", "a")@1; 31 | member("Z", "a")@1; 32 | 33 | 34 | // write stream 35 | write_req("C", "Data1")@2; 36 | write_req("C", "Data1")@3; 37 | write_req("C", "Data1")@4; 38 | 39 | 40 | // assertions 41 | tried(C, D)@next :- tried(C, D); 42 | tried(C, D) :- write_req(C, D); 43 | 44 | good(D) :- ack("C", D, _), write(R, D, _), notin crash(R, R, _); 45 | good(D) :- tried(C, D), notin ack(C, D, _); 46 | 47 | // a pathological case that occurs due to our "preordination" of a as leader 48 | good(D) :- tried(C, D), leader(C, L), notin member(C, L); 49 | //good(D) :- member(D, _); 50 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/util/SetUtils.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.util 2 | 3 | import scala.annotation.tailrec 4 | 5 | object SetUtils { 6 | 7 | /** 8 | * @return true iff the first set is a (non-strict) superset of the second set. 9 | */ 10 | def isSuperset[T](superset: Set[T], set: Set[T]): Boolean = { 11 | set.forall(e => superset.contains(e)) 12 | } 13 | 14 | /** 15 | * Given a sequence of sets, returns a new sequence containing only minimal sets, sets 16 | * which are not supersets of other sets. 17 | */ 18 | def minimalSets[T](sets: Seq[Set[T]]): Seq[Set[T]] = { 19 | // The naive approach to this is O(N^2). 20 | // There are two simple optimizations that help: 21 | // - A set can be a superset of MANY smaller sets, so exclude it as soon as 22 | // we find the first subset. 23 | // - A set can only be a superset of smaller sets, so group the sets by size.å 24 | val setsBySize = sets.groupBy(_.size).toSeq.sortBy(- _._1) // minus sign -> descending sizes 25 | @tailrec 26 | def removeSupersets( 27 | setsBySize: Seq[(Int, Seq[Set[T]])], 28 | accum: Seq[Set[T]] = Seq.empty): Seq[Set[T]] = { 29 | if (setsBySize.isEmpty) { 30 | accum 31 | } else { 32 | val smallerModels: Seq[Set[T]] = setsBySize.tail.map(_._2).flatten 33 | val minimalSets = setsBySize.head._2.toSeq.filterNot { 34 | sup => smallerModels.exists(sub => isSuperset(sup, sub)) 35 | } 36 | removeSupersets(setsBySize.tail, minimalSets ++ accum) 37 | } 38 | } 39 | removeSupersets(setsBySize) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/report/MollyCodecJsons.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.report 2 | 3 | import argonaut._, Argonaut._ 4 | import edu.berkeley.cs.boom.molly.{Run, RunStatus, FailureSpec, UltimateModel} 5 | import edu.berkeley.cs.boom.molly.derivations.{MessageLoss, CrashFailure, Message} 6 | 7 | /** 8 | * Argonaut CodecJsons for converting our objects to JSON; 9 | * see http://argonaut.io/doc/codec/. 10 | */ 11 | object MollyCodecJsons { 12 | 13 | implicit def UltimateModelCodecJson: CodecJson[UltimateModel] = 14 | casecodec1(UltimateModel.apply, UltimateModel.unapply)("tables") 15 | 16 | implicit def RunStatusCodecJson: CodecJson[RunStatus] = 17 | CodecJson.derived(StringEncodeJson.contramap((x: RunStatus) => x.underlying), 18 | StringDecodeJson.map(RunStatus.apply)) 19 | 20 | implicit def RunCodecJson: EncodeJson[Run] = 21 | jencode4L((run: Run) => (run.iteration, run.status, run.failureSpec, run.model))("iteration", "status", "failureSpec", "model") 22 | 23 | implicit def FailureSpecCodecJson: CodecJson[FailureSpec] = 24 | casecodec6(FailureSpec.apply, FailureSpec.unapply)("eot", "eff", "maxCrashes", "nodes", 25 | "crashes", "omissions") 26 | 27 | implicit def CrashFailureCodecJson: CodecJson[CrashFailure] = 28 | casecodec2(CrashFailure.apply, CrashFailure.unapply)("node", "time") 29 | 30 | implicit def MessageLossCodecJson: CodecJson[MessageLoss] = 31 | casecodec3(MessageLoss.apply, MessageLoss.unapply)("from", "to", "time") 32 | 33 | implicit def MessageCodecJson: CodecJson[Message] = 34 | casecodec5(Message.apply, Message.unapply)("table", "from", "to", "sendTime", "receiveTime") 35 | 36 | } 37 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/util/leader.ded: -------------------------------------------------------------------------------- 1 | // need more than 5 ticks because of topology! 2 | 3 | ballot(Host, Round, Id, Id) :- election(Host, Round), slf(Host, Id); 4 | ballot(Target, Round, MyId, Id)@async :- ballot(Host, Round, _, Id), 5 | slf(Host, MyId), 6 | nodes(Host, Target, _), 7 | notin has_higher(Host, Round, Id); 8 | 9 | ballot_log(T, R, P, I) :- ballot(T, R, P, I); 10 | 11 | has_higher(H, R, I) :- ballot_log(H, R, _, I), ballot_log(H, R, _, I2), I2 > I; 12 | ballot_log(H,R,P, I)@next :- ballot_log(H,R,P,I); 13 | bcnt(H, R, I, count

) :- ballot_log(H, R, P, I); 14 | ncnt(H, count) :- nodes(H, _, I); 15 | 16 | leader(H, R, I) :- ballot(H, R, _, I), bcnt(H, R, I, C1), ncnt(H, C2), C1 > C2/2; 17 | 18 | nodes(H, N, I)@next :- nodes(H, N, I); 19 | slf(N, I)@next :- slf(N, I); 20 | 21 | 22 | slf("a", 1)@1; 23 | slf("b", 2)@1; 24 | slf("c", 3)@1; 25 | 26 | nodes(X, Y, Z) :- slf(X, _), slf(Y, Z); 27 | 28 | election("a", 1)@1; 29 | election("b", 1)@1; 30 | //election("c", 1)@1; 31 | 32 | 33 | //bad(Host, I, "noleader") :- slf(Host, I), notin leader(Host, 1, _); 34 | bad(Host, R, "noleader") :- ballot(Host, R, I, _), notin leader(Host, R, _); 35 | //bad(Host, I) :- slf(Host, I), notin leader(Host, 1, _), notin crash(Host, _)@1; 36 | good(Round, I) :- leader(Host, Round, I), notin bad(Host, Round, _); 37 | //good(Round, I) :- leader(Host, Round, I); //, notin bad(Host, Round, _); 38 | 39 | bad(H, L, "disagree") :- leader(H, 1, L), leader(H, 1, L2), L != L2; 40 | 41 | pre(R) :- ballot(_, R, _, _); 42 | disagree(R) :- leader(_, R, L1), leader(_, R, L2), L1 != L2; 43 | post(R) :- leader(_, R, _), notin disagree(R); 44 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/kafka.ded: -------------------------------------------------------------------------------- 1 | include "fake_zk.ded"; 2 | 3 | // replicas 4 | write(Next, Data, Origin)@async :- write(Origin, Data, Prev), member(Origin, Next), leader(Origin, Origin), notin ack_int(Origin, Data, Next); 5 | ack_int(Origin, Data, Acker)@async :- write(Acker, Data, Origin), notin leader(Acker, Acker); 6 | ack(O,D,A)@next :- ack(O,D,A); 7 | ack_int(O,D,A)@next :- ack_int(O,D,A); 8 | write(N,D,O)@next :- write(N, D, O); 9 | ack(Origin, Data, Acker)@async :- leader(Acker, Acker), write(Acker, Data, Origin), Origin != Acker, notin missing_ack(Acker, Data, _); 10 | missing_ack(Leader, Data, Other) :- write(Leader, Data, _), member(Leader, Other), Leader != Other, notin ack_int(Leader, Data, Other); 11 | 12 | // client 13 | write(M, D, O)@async :- write_req(O, D), leader(O, M); 14 | 15 | 16 | // replica init 17 | zookeeper("a", "Z")@1; 18 | zookeeper("b", "Z")@1; 19 | zookeeper("c", "Z")@1; 20 | zookeeper("C", "Z")@1; 21 | zookeeper("Z", "Z")@1; 22 | begin("a")@1; 23 | begin("b")@1; 24 | begin("c")@1; 25 | 26 | // client init 27 | client("C")@1; 28 | 29 | // bogus stuff 30 | leader("Z", "a")@1; 31 | member("Z", "a")@1; 32 | 33 | 34 | // write stream 35 | write_req("C", "Data1")@2; 36 | write_req("C", "Data1")@3; 37 | write_req("C", "Data1")@4; 38 | 39 | 40 | // assertions 41 | tried(C, D)@next :- tried(C, D); 42 | tried(C, D) :- write_req(C, D); 43 | 44 | good(D) :- ack("C", D, _), write(R, D, _), notin crash(R, R, _); 45 | good(D) :- tried(C, D), notin ack(C, D, _); 46 | 47 | // a pathological case that occurs due to our "preordination" of a as leader 48 | good(D) :- tried(C, D), leader(C, L), notin member(C, L); 49 | //good(D) :- member(D, _); 50 | 51 | pre(X) :- ack(_, X, _); 52 | post(X) :- ack(_, X, _), write(R, X, _), notin crash(R, R, _); 53 | -------------------------------------------------------------------------------- /demo_html/run_0_spacetime.dot: -------------------------------------------------------------------------------- 1 | digraph spacetime { 2 | rankdir=TD 3 | splines=line 4 | outputorder=nodesfirst 5 | subgraph cluster_proc_nodes { 6 | label=""; 7 | proc_a [label="Process a",group="a"]; 8 | proc_b [label="Process b",group="b"]; 9 | proc_c [label="Process c",group="c"]; 10 | } 11 | node_a_1 [label="1",group="a"]; 12 | node_a_2 [shape="point",group="a",color="gray75"]; 13 | node_a_3 [shape="point",group="a",color="gray75"]; 14 | node_a_4 [shape="point",group="a",color="gray75"]; 15 | node_a_5 [shape="point",group="a",color="gray75"]; 16 | node_b_1 [shape="point",group="b",color="gray75"]; 17 | node_b_2 [label="2",group="b"]; 18 | node_b_3 [shape="point",group="b",color="gray75"]; 19 | node_b_4 [shape="point",group="b",color="gray75"]; 20 | node_b_5 [shape="point",group="b",color="gray75"]; 21 | node_c_1 [shape="point",group="c",color="gray75"]; 22 | node_c_2 [label="2",group="c"]; 23 | node_c_3 [shape="point",group="c",color="gray75"]; 24 | node_c_4 [shape="point",group="c",color="gray75"]; 25 | node_c_5 [shape="point",group="c",color="gray75"]; 26 | 27 | node_a_1 -> node_c_2 [label="log",constraint="false",weight="0",style="solid",color="black"]; 28 | node_a_1 -> node_b_2 [label="log",constraint="false",weight="0",style="solid",color="black"]; 29 | edge[weight=2, arrowhead=none, color=gray75, fillcolor=gray75]; 30 | proc_a -> node_a_1 -> node_a_2 -> node_a_3 -> node_a_4 -> node_a_5; 31 | edge[weight=2, arrowhead=none, color=gray75, fillcolor=gray75]; 32 | proc_b -> node_b_1 -> node_b_2 -> node_b_3 -> node_b_4 -> node_b_5; 33 | edge[weight=2, arrowhead=none, color=gray75, fillcolor=gray75]; 34 | proc_c -> node_c_1 -> node_c_2 -> node_c_3 -> node_c_4 -> node_c_5; 35 | } -------------------------------------------------------------------------------- /src/test/scala/edu/berkeley/cs/boom/molly/ASTSuite.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly 2 | 3 | import org.scalatest.{FunSuite, Matchers} 4 | 5 | import edu.berkeley.cs.boom.molly.ast._ 6 | 7 | class ASTSuite extends FunSuite with Matchers { 8 | 9 | test("Expr.variables") { 10 | Expr(IntLiteral(42), "", IntLiteral(24)).variables should be (empty) 11 | Expr(Identifier("a"), "", IntLiteral(42)).variables.map(_.name) should be (Set("a")) 12 | Expr(IntLiteral(42), "", Identifier("a")).variables.map(_.name) should be (Set("a")) 13 | Expr(Identifier("a"), "", Identifier("b")).variables.map(_.name) should be (Set("a", "b")) 14 | val nestedExpression = Expr(Identifier("a"), "", Expr(Identifier("b"), "", Identifier("c"))) 15 | nestedExpression.variables.map(_.name) should be (Set("a", "b", "c")) 16 | } 17 | 18 | test("Predicate.*Variables") { 19 | val edbPredicate = 20 | Predicate("edb", List(IntLiteral(42), StringLiteral("foo")), notin = false, None) 21 | edbPredicate.topLevelVariables should be (empty) 22 | edbPredicate.aggregateVariables should be (empty) 23 | edbPredicate.expressionVariables should be (empty) 24 | edbPredicate.topLevelVariablesWithIndices should be (empty) 25 | 26 | val idbPredicate = { 27 | val cols = List( 28 | IntLiteral(42), 29 | StringLiteral("a"), 30 | Identifier("ident"), 31 | Aggregate("max", "aggColumn"), 32 | Expr(Identifier("inExprA"), "<=", Identifier("inExprB")) 33 | ) 34 | Predicate("tableName", cols, notin = false, None) 35 | } 36 | idbPredicate.topLevelVariables should be (Set("ident")) 37 | idbPredicate.aggregateVariables should be (Set("aggColumn")) 38 | idbPredicate.expressionVariables should be (Set("inExprA", "inExprB")) 39 | idbPredicate.topLevelVariablesWithIndices should be (List(("ident", ("tableName", 2)))) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/ramp/encoding.ded: -------------------------------------------------------------------------------- 1 | node("C", "a")@1; 2 | node("C", "b")@1; 3 | node("C", "c")@1; 4 | node("C", "d")@1; 5 | node("C", "C")@1; 6 | node("C", "C2")@1; 7 | 8 | 9 | cli_num(H, C, N)@next :- cli_num(H, C, N); 10 | cli_num("a", "C", 1)@1; 11 | cli_num("a", "C2", 2)@1; 12 | cli_num("b", "C", 1)@1; 13 | cli_num("b", "C2", 2)@1; 14 | cli_num("c", "C", 1)@1; 15 | cli_num("c", "C2", 2)@1; 16 | cli_num("d", "C", 1)@1; 17 | cli_num("d", "C2", 2)@1; 18 | cli_num("C", "C", 1)@1; 19 | cli_num("C", "C2", 2)@1; 20 | cli_num("C2", "C", 1)@1; 21 | cli_num("C2", "C2", 2)@1; 22 | 23 | 24 | // fake junk 25 | lencoding(Host, Group, Server)@next :- lencoding(Host, Group, Server); 26 | lencoding(Host, Group, Server) :- encoding(D, Group, Server), node(D, Host); 27 | 28 | 29 | encoding("C", "a|b|c|d", "a")@1; 30 | encoding("C", "a|b|c|d", "b")@1; 31 | encoding("C", "a|b|c|d", "c")@1; 32 | encoding("C", "a|b|c|d", "d")@1; 33 | 34 | encoding("C", "a|b|c", "a")@1; 35 | encoding("C", "a|b|c", "b")@1; 36 | encoding("C", "a|b|c", "c")@1; 37 | 38 | encoding("C", "a|b|d", "a")@1; 39 | encoding("C", "a|b|d", "b")@1; 40 | encoding("C", "a|b|d", "d")@1; 41 | 42 | encoding("C", "a|c|d", "a")@1; 43 | encoding("C", "a|c|d", "c")@1; 44 | encoding("C", "a|c|d", "d")@1; 45 | 46 | encoding("C", "b|c|d", "b")@1; 47 | encoding("C", "b|c|d", "c")@1; 48 | encoding("C", "b|c|d", "d")@1; 49 | 50 | encoding("C", "a|b", "a")@1; 51 | encoding("C", "a|b", "b")@1; 52 | 53 | encoding("C", "a|c", "a")@1; 54 | encoding("C", "a|c", "c")@1; 55 | 56 | encoding("C", "a|d", "a")@1; 57 | encoding("C", "a|d", "d")@1; 58 | 59 | encoding("C", "b|c", "b")@1; 60 | encoding("C", "b|c", "c")@1; 61 | 62 | encoding("C", "b|d", "b")@1; 63 | encoding("C", "b|d", "d")@1; 64 | 65 | encoding("C", "c|d", "c")@1; 66 | encoding("C", "c|d", "d")@1; 67 | 68 | encoding("C", "a", "a")@1; 69 | encoding("C", "b", "b")@1; 70 | encoding("C", "c", "c")@1; 71 | encoding("C", "d", "d")@1; -------------------------------------------------------------------------------- /src/test/resources/examples_ft/flux/flux.ded: -------------------------------------------------------------------------------- 1 | include "flux_buffer.ded"; 2 | include "../real_heartbeat.ded"; 3 | 4 | // producer specification 5 | 6 | dispatch(Host, Dest, Sn, Data) :- peek(Host, Dest, Sn, Data), status(Host, Dest, "ACTIVE"), conn(Host, Dest, "SEND"); 7 | send(Dest, Host, Sn, Data)@async :- dispatch(Host, Dest, Sn, Data); 8 | advance(Host, Dest) :- dispatch(Host, Dest, _, _); 9 | 10 | // note, this is a library call to the buffer, not a message! 11 | ack(Host, Dest, Sn, Markings) :- dispatch(Host, Dest, Sn, _), del(Host, Markings), notin conn(Host, Dest, "ACK"); 12 | ack(Host, Dest, Sn, Markings) :- dispatch(Host, Dest, Sn, _), del(Host, Markings), send(Host, Dest, Sn, _); 13 | 14 | //snd_ack(Host, Dest, Sn)@async :- send(Dest, Host, Sn, _); 15 | 16 | 17 | // takeover spec 18 | event_1(Host, Dest) :- fail(Host, Dest), notin status(Host, Dest, "DEAD"); 19 | event_2(Host, Other) :- event_1(Host, Dest), other(Host, Dest, Other), notin conn(Host, Other, "RECV"); 20 | status(Host, Dest, "DEAD")@next :- event_1(Host, Dest); 21 | conn(Host, Dest, "RECV")@next :- event_2(Host, Dest); 22 | //reverse. Other mimicks p(dest) 23 | 24 | reverse(Other, Host)@async :- event_2(Host, Other); 25 | status(H, D, S)@next :- status(H, D, S), notin event_1(H, D); 26 | conn(H, D, S)@next :- conn(H, D, S), notin del_conn(H, D, S); 27 | 28 | conn(Host, Dest, "SEND") :- reverse(Host, Dest); 29 | 30 | 31 | // fail(pair) -> t_fail = true, whatever that means, for various roles 32 | t_fail(Host) :- fail(Host, _); 33 | 34 | del(H, M)@next :- del(H, M); 35 | 36 | // here's that close-the-loop shit 37 | put(H, D, S, 0) :- send(H, _, S, D); 38 | 39 | 40 | 41 | other(H, A, B)@next :- other(H, A, B); 42 | member(Z, N)@next :- member(Z, N); 43 | 44 | fail_event(Z, N, H) :- failed(Z, H), member(Z, N); 45 | fail_log(Z, N, H)@next :- fail_event(Z, N, H); 46 | fail_log(Z, N, H)@next :- fail_log(Z, N, H); 47 | fail(N, H)@async :- fail_event(Z, N, H), notin fail_log(Z, N, H); 48 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/flux/flux_clusterpairs.ded: -------------------------------------------------------------------------------- 1 | include "flux.ded"; 2 | 3 | // ingress is dual 4 | conn("i", "pc", "SEND")@1; 5 | conn("i", "sc", "SEND")@1; 6 | conn("i", "pc", "ACK")@1; 7 | conn("i", "sc", "ACK")@1; 8 | status("i", "pc", "ACTIVE")@1; 9 | status("i", "sc", "ACTIVE")@1; 10 | del("i", "PRIM|SEC")@1; 11 | dest("i", "pc")@1; 12 | dest("i", "sc")@1; 13 | 14 | // the primary just forwards data and ACKs. 15 | conn("pc", "e", "SEND")@1; 16 | status("pc", "e", "ACTIVE")@1; 17 | status("pc", "i", "ACTIVE")@1; 18 | dest("pc", "e")@1; 19 | //dest("pc", "i")@1; 20 | conn("pc", "i", "SEND")@1; 21 | 22 | // the secondary just processes ACKs 23 | conn("sc", "e", "ACK")@1; 24 | status("sc", "e", "ACTIVE")@1; 25 | status("sc", "i", "ACTIVE")@1; 26 | dest("sc", "e")@1; 27 | 28 | // egress sends ACKs to the secondary 29 | conn("e", "sc", "ACK")@1; 30 | conn("e", "sc", "SEND")@1; 31 | status("e", "sc", "ACTIVE")@1; 32 | dest("e", "sc")@1; 33 | conn("e", "pc", "RECV")@1; 34 | 35 | //conn("sc", "i", "SEND")@1; 36 | //dest("sc", "i")@1; 37 | 38 | 39 | other("e", "pc", "sc")@1; 40 | other("e", "sc", "pc")@1; 41 | other("i", "sc", "pc")@1; 42 | other("i", "pc", "sc")@1; 43 | 44 | 45 | watch("Z", "pc")@1; 46 | watch("Z", "sc")@1; 47 | member("Z", "i")@1; 48 | member("Z", "e")@1; 49 | member("Z", "pc")@1; 50 | member("Z", "sc")@1; 51 | // just once tho 52 | 53 | put("i", "foo", 1, 0)@1; 54 | put("i", "bar", 2, 0)@1; 55 | put("i", "baz", 3, 0)@1; 56 | put("i", "qux", 4, 0)@2; 57 | 58 | //advance("i", "prim")@3; 59 | //advance("i", "prim")@4; 60 | 61 | //good(X, Y, "foo") :- dest(X, Y); 62 | 63 | //good("foo") :- put(A,B,C,D)@1; 64 | //good("foo")@1; 65 | //good(X)@next :- good(X); 66 | //good(X, Y) :- del(X, Y); 67 | 68 | put_log(E, X, I) :- put(E, X, I, _); 69 | put_log(E, X, I)@next :- put_log(E, X, I); 70 | good(X, I) :- put_log("e", X, I); 71 | 72 | // trivially, anything goes if e or i crash 73 | good(X, I) :- put_log(_, X, I), crash(_, "i", _); 74 | good(X, I) :- put_log(A, X, I), crash(A, "e", _); 75 | 76 | alive(M, X) :- member(_, M), put_log(M, X, _), notin crash(_, M, _); 77 | both_alive(X) :- alive("i", X), alive("e", X); 78 | pre(X) :- alive(_, X); 79 | 80 | post(X) :- put_log("e", X, _); 81 | 82 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/RandomBenchmark.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly 2 | 3 | import com.codahale.metrics.MetricRegistry 4 | import java.io.File 5 | import com.github.tototoshi.csv.CSVWriter 6 | 7 | /** 8 | * Tool for sampling "number of runs to first counterexample" from the random solver. 9 | */ 10 | object RandomBenchmark { 11 | val parser = new scopt.OptionParser[Config]("randomBenchmark") { 12 | head("randomBenchmark", "0.1") 13 | opt[Int]("max-runs") text "max runs (default unlimited)" action { (x, c) => c.copy(maxRuns = x)} 14 | opt[Int]('t', "EOT") text "end of time (default 3)" action { (x, c) => c.copy(eot = x)} 15 | opt[Int]('f', "EFF") text "end of finite failures (default 2)" action { (x, c) => c.copy(eff = x)} 16 | opt[Int]('c', "crashes") text "crash failures (default 0)" action { (x, c) => c.copy(crashes = x)} 17 | opt[String]('N', "nodes") text "a comma-separated list of nodes (required)" required() action { (x, c) => c.copy(nodes = x.split(','))} 18 | arg[File]("...") unbounded() minOccurs 1 text "Dedalus files" action { (x, c) => c.copy(inputPrograms = c.inputPrograms :+ x)} 19 | } 20 | 21 | def findFirstCounterexample(config: Config): (Int, FailureSpec) = { 22 | val metrics = new MetricRegistry 23 | var numRuns = 0 24 | for (run <- SyncFTChecker.check(config, metrics)) { 25 | numRuns += 1 26 | if (run.status == RunStatus("failure")) { 27 | return (numRuns, run.failureSpec) 28 | } 29 | } 30 | throw new IllegalStateException("Random checker exited without finding a counterexample") 31 | } 32 | 33 | def main(args: Array[String]) { 34 | parser.parse(args, Config(strategy = "random")) map { config => 35 | val csvFile = new File(s"random_${config.inputPrograms.head.getName}_t_${config.eot}_f_${config.eff}_c_${config.crashes}.csv") 36 | val csvWriter = CSVWriter.open(csvFile) 37 | csvWriter.writeRow(Seq("numRuns", "counterexample")) 38 | try { 39 | (1 to config.maxRuns).foreach { _ => 40 | csvWriter.writeRow(findFirstCounterexample(config).productIterator.toSeq) 41 | } 42 | } finally { 43 | csvWriter.close() 44 | } 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /demo_html/runs.json: -------------------------------------------------------------------------------- 1 | [ 2 | {"iteration":0,"status":"success","failureSpec":{"maxCrashes":0,"eot":5,"crashes":[],"omissions":[],"eff":0,"nodes":["a","b","c"]},"model":{"tables":{"pre":[["c","2"],["c","4"],["c","5"],["b","3"],["c","3"],["b","2"],["b","4"],["b","5"]],"post":[["c","2"],["c","4"],["c","5"],["b","3"],["c","3"],["b","2"],["b","4"],["b","5"]],"bcast":[["a","Hello world!","1"]],"log_prov2":[["c","Hello world!","1","a","2"],["b","Hello world!","1","a","2"]],"crash":[],"log_prov1":[["c","Hello world!","2","3"],["c","Hello world!","3","4"],["b","Hello world!","4","5"],["c","Hello world!","5","6"],["c","Hello world!","4","5"],["b","Hello world!","2","3"],["b","Hello world!","3","4"],["b","Hello world!","5","6"]],"pre_prov3":[["c","2"],["c","4"],["c","5"],["b","3"],["c","3"],["b","2"],["b","4"],["b","5"]],"clock":[["a","c","5","6"],["c","a","5","6"],["c","c","4","5"],["b","b","4","5"],["a","a","4","5"],["b","c","1","2"],["c","b","1","2"],["c","b","2","3"],["b","c","2","3"],["a","a","5","6"],["b","b","5","6"],["c","c","5","6"],["a","b","2","3"],["b","a","2","3"],["a","c","4","5"],["a","b","1","2"],["b","a","1","2"],["c","a","4","5"],["b","c","5","6"],["c","b","5","6"],["a","b","4","5"],["a","c","1","2"],["c","a","1","2"],["b","a","4","5"],["c","a","2","3"],["a","c","2","3"],["a","b","5","6"],["b","a","5","6"],["a","a","2","3"],["b","b","2","3"],["c","c","2","3"],["a","a","1","2"],["b","b","1","2"],["c","c","1","2"],["b","c","4","5"],["c","b","4","5"],["c","b","3","4"],["b","c","3","4"],["b","a","3","4"],["a","b","3","4"],["c","a","3","4"],["a","c","3","4"],["c","c","3","4"],["b","b","3","4"],["a","a","3","4"]],"member":[["a","b","6"],["a","b","1"],["a","c","2"],["a","c","4"],["a","c","5"],["a","b","3"],["a","c","6"],["a","c","3"],["a","c","1"],["a","b","2"],["a","b","4"],["a","b","5"]],"member_prov0":[["a","c","5","6"],["a","c","1","2"],["a","b","4","5"],["a","c","2","3"],["a","c","3","4"],["a","b","5","6"],["a","b","2","3"],["a","b","3","4"],["a","b","1","2"],["a","c","4","5"]],"post_prov4":[["c","2"],["c","4"],["c","5"],["b","3"],["c","3"],["b","2"],["b","4"],["b","5"]],"log":[["b","Hello world!","3"],["c","Hello world!","2"],["c","Hello world!","4"],["c","Hello world!","5"],["b","Hello world!","6"],["b","Hello world!","2"],["b","Hello world!","4"],["b","Hello world!","5"],["c","Hello world!","3"],["c","Hello world!","6"]]}}} 3 | ] -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/wrappers/C4Wrapper.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.wrappers 2 | 3 | import edu.berkeley.cs.boom.molly.UltimateModel 4 | import edu.berkeley.cs.boom.molly.ast.{IntLiteral, Program} 5 | import jnr.ffi.LibraryLoader 6 | import com.typesafe.scalalogging.LazyLogging 7 | import edu.berkeley.cs.boom.molly.codegen.C4CodeGenerator 8 | import nl.grons.metrics.scala.InstrumentedBuilder 9 | import com.codahale.metrics.MetricRegistry 10 | 11 | 12 | class C4Wrapper(name: String, program: Program) 13 | (implicit val metricRegistry: MetricRegistry) extends LazyLogging with InstrumentedBuilder { 14 | 15 | private val time = metrics.timer("time") 16 | 17 | def run: UltimateModel = C4Wrapper.synchronized { 18 | time.time { 19 | C4Wrapper.libC4.c4_initialize() 20 | val c4 = C4Wrapper.libC4.c4_make(null, 0) 21 | try { 22 | // Install the clock facts one timestep at a time in order to stratify the 23 | // execution by time: 24 | val (clockFacts, nonClockFacts) = program.facts.partition(_.tableName == "clock") 25 | val rulesPlusNonClockFacts = C4CodeGenerator.generate(program.copy(facts=nonClockFacts)) 26 | logger.debug("C4 input minus clock facts is:\n" + rulesPlusNonClockFacts) 27 | assert(C4Wrapper.libC4.c4_install_str(c4, rulesPlusNonClockFacts) == 0) 28 | val clockFactsByTime = clockFacts.groupBy(_.cols(2).asInstanceOf[IntLiteral].int) 29 | for ((time, facts) <- clockFactsByTime.toSeq.sortBy(_._1)) { 30 | val clockFactsProgram = C4CodeGenerator.generate(new Program(Nil, facts, Nil)) 31 | logger.debug(s"Installing clock facts for time $time:\n$clockFactsProgram") 32 | assert(C4Wrapper.libC4.c4_install_str(c4, clockFactsProgram) == 0) 33 | } 34 | val tables = program.tables.map { 35 | t => (t.name, parseTableDump(C4Wrapper.libC4.c4_dump_table(c4, t.name))) 36 | }.toMap 37 | new UltimateModel(tables) 38 | } finally { 39 | C4Wrapper.libC4.c4_destroy(c4) 40 | C4Wrapper.libC4.c4_terminate() 41 | } 42 | } 43 | } 44 | 45 | def parseTableDump(string: String): List[List[String]] = { 46 | string.lines.map(_.split(",").toList).toList 47 | } 48 | } 49 | 50 | object C4Wrapper { 51 | val libC4: C4 = LibraryLoader.create(classOf[C4]).load("c4") 52 | } -------------------------------------------------------------------------------- /project/MollyBuild.scala: -------------------------------------------------------------------------------- 1 | import sbt._ 2 | import sbt.Keys._ 3 | 4 | 5 | object BuildSettings { 6 | val buildSettings = Defaults.defaultSettings ++ Seq( 7 | organization := "edu.berkeley.cs.boom", 8 | version := "0.1-SNAPSHOT", 9 | scalaVersion := "2.11.6", 10 | //scalaVersion := "2.10.3", 11 | resolvers ++= Seq( 12 | Resolver.sonatypeRepo("snapshots"), 13 | Resolver.sonatypeRepo("releases"), 14 | Resolver.typesafeRepo("releases") 15 | ), 16 | parallelExecution in Test := false 17 | ) 18 | } 19 | 20 | 21 | object MollyBuild extends Build { 22 | 23 | import BuildSettings._ 24 | 25 | lazy val root = Project( 26 | "molly", 27 | file("."), 28 | settings = buildSettings ++ Seq( 29 | scalacOptions ++= Seq("-unchecked", "-deprecation", "-feature"), 30 | scalaVersion in scalaZ3 := "2.11.2", 31 | scalaVersion in "bloom-compiler" := "2.10.3", 32 | libraryDependencies ++= Seq( 33 | "com.typesafe.scala-logging" %% "scala-logging" % "3.1.0", 34 | "org.slf4j" % "slf4j-log4j12" % "1.7.5", 35 | "org.scalatest" %% "scalatest" % "2.2.4", 36 | "org.mockito" % "mockito-core" % "1.10.19" % "test", 37 | "com.googlecode.kiama" %% "kiama" % "1.6.0", 38 | "com.github.scopt" %% "scopt" % "3.2.0", 39 | "org.apache.commons" % "commons-math3" % "3.2", 40 | "com.github.jnr" % "jnr-ffi" % "2.0.1", 41 | "io.argonaut" %% "argonaut" % "6.0.4", 42 | "org.ow2.sat4j" % "org.ow2.sat4j.core" % "2.3.5", 43 | "commons-io" % "commons-io" % "2.4", 44 | "pl.project13.scala" %% "rainbow" % "0.2" exclude("org.scalatest", "scalatest_2.11"), 45 | // JGraphT is used for its UnionFind data structure, which we use in 46 | // the type inference algorithm: 47 | "org.jgrapht" % "jgrapht-core" % "0.9.0", 48 | "nl.grons" %% "metrics-scala" % "3.2.0_a2.3", 49 | "com.codahale.metrics" % "metrics-json" % "3.0.2", 50 | "org.codehaus.jackson" % "jackson-mapper-asl" % "1.9.13", 51 | "com.github.tototoshi" %% "scala-csv" % "1.0.0", 52 | "com.lihaoyi" %% "pprint" % "0.3.6", 53 | "com.github.nikita-volkov" % "sext" % "0.2.4" 54 | //"com.github.vagm" %% "optimus" % "1.2.2" 55 | ) 56 | ) 57 | ).dependsOn(scalaZ3) 58 | 59 | lazy val scalaZ3 = RootProject(uri("git://github.com/JoshRosen/ScalaZ3.git#7c3d7801c7b312433f06101414aeb3a7f9f30433")) 60 | } 61 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/codegen/C4CodeGenerator.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.codegen 2 | 3 | import org.kiama.output.PrettyPrinter 4 | import edu.berkeley.cs.boom.molly.ast._ 5 | import edu.berkeley.cs.boom.molly.ast.StringLiteral 6 | import edu.berkeley.cs.boom.molly.ast.IntLiteral 7 | import edu.berkeley.cs.boom.molly.ast.Program 8 | import edu.berkeley.cs.boom.molly.{DedalusType, DedalusTyper} 9 | 10 | object C4CodeGenerator extends PrettyPrinter { 11 | def generate(program: Program): String = { 12 | val tables = program.tables.map { table => 13 | "define" <> parens(table.name <> comma <+> braces(ssep(table.types.map(typeToC4Type _ andThen text), ", "))) <> semi 14 | } 15 | val facts = program.facts.map(genFact) 16 | val rules = program.rules.map(genRule) 17 | val wholeProgram = (tables.toSeq ++ facts.toSeq ++ rules.toSeq).reduce(_ <@@> _) 18 | super.pretty(wholeProgram) 19 | } 20 | 21 | private def typeToC4Type(t: DedalusType): String = { 22 | t match { 23 | case DedalusType.LOCATION => "string" 24 | case DedalusType.INT => "int" 25 | case DedalusType.STRING => "string" 26 | case DedalusType.UNKNOWN => 27 | throw new IllegalArgumentException("Cannot convert unknown Dedalus type to C4 type") 28 | } 29 | } 30 | 31 | private def genAtom(atom: Atom): Doc = atom match { 32 | case StringLiteral(s) => dquotes(text(s)) 33 | case IntLiteral(i) => text(i.toString) 34 | case Identifier(i) => text(i) 35 | case Expr(c, o, e) => genAtom(c) <+> o <+> genAtom(e) 36 | case Aggregate(aggName, aggCol) => aggName <> angles(aggCol) 37 | } 38 | 39 | private def genRule(rule: Rule): Doc = { 40 | // Place negated predicates at the end of the rule body. 41 | // This is a workaround for a C4 bug: https://github.com/bloom-lang/c4/issues/1 42 | val sortedBody = rule.body.sortBy(_.left.map(p => if (p.notin) 1 else 0).left.getOrElse(0)) 43 | genPredicate(rule.head) <+> ":-" <+> ssep(sortedBody.map { 44 | case Left(p: Predicate) => genPredicate(p) 45 | case Right(e: Expr) => genAtom(e) 46 | }, ", ") <> semi 47 | } 48 | 49 | private def genFact(fact: Predicate): Doc = { 50 | genPredicate(fact) <> semi 51 | } 52 | 53 | private def genPredicate(predicate: Predicate): Doc = { 54 | val notin = if (predicate.notin) "notin" <> space else empty 55 | notin <> predicate.tableName <> parens(ssep(predicate.cols.map(genAtom), ", ")) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/chain_replication.ded: -------------------------------------------------------------------------------- 1 | // tiny model of the hdfs data pipeline described in Gunawi et al'10. 2 | 3 | 4 | include "./heartbeat.ded"; 5 | 6 | 7 | //pipeline_length(N, count) :- pipeline(N, D, _); 8 | //pipeline(N, D, "NONE") :- xfer 9 | 10 | 11 | first_node(Namenode, Id, First) :- pipeline(Namenode, Id, First, Next), notin pipeline(Namenode, Id, _, First); 12 | 13 | // send the pipeline metadata to all nodes (eagerly) 14 | pipeline(H, I, F, N)@async :- pipeline(M, I, F, N), datanode(M, H, _); 15 | pipeline(H, I, F, N)@next :- pipeline(H, I, F, N); 16 | 17 | 18 | snd(Host, Id, Pl)@async :- xfer(N, Id, Pl), first_node(N, _, Host); 19 | snd(Host, Id, Pl)@async :- snd(Node, Id, Pl), pipeline(Node, _, Node, Host); 20 | ack(Sender, Id)@async :- snd(Node, Id, _), pipeline(Node, _, Sender, Node); 21 | 22 | xfer(N, I, P)@next :- xfer(N, I, P), notin ack(N, I); 23 | 24 | ack(N, I)@next :- ack(N, I); 25 | data(H, I, P) :- snd(H, I, P); 26 | 27 | data(H,I,P)@next :- data(H,I,P); 28 | 29 | watch("namenode", D) :- datanode("namenode", D, _); 30 | 31 | 32 | // per haryadi et al, now form a pipeline that simply excludes the failed node: 33 | 34 | dead_pipe(H, I) :- pipeline(H, I, _, N), failed(H, N); 35 | dead_pipe(H, I) :- pipeline(H, I, N, _), failed(H, N); 36 | 37 | //pipeline(H, I + 1, F, N) :- pipeline(H, I, F, N), notin failed(H, F), notin failed(H, N), dead_pipe(H, I); 38 | //pipeline(H, I + 1, F, Next) :- pipeline(H, I, F, N), failed(H, N), pipeline(H, I, N, Next); 39 | 40 | 41 | // busted 42 | //pipeline(H, I + 1, F, N)@next :- pipeline(H, I, F, N), dead_pipe(H, I); 43 | 44 | // busted 2 (looks like it could be right) 45 | //pipeline(H, I + 1, F, N) :- pipeline(H, I, F, N), notin failed(H, F), notin failed(H, N), dead_pipe(H, I); 46 | //pipeline(H, I + 1, F, Next) :- pipeline(H, I, F, N), failed(H, N), pipeline(H, I, N, Next); 47 | 48 | 49 | 50 | 51 | 52 | datanode("namenode", "d1", 1)@1; 53 | datanode("namenode", "d2", 2)@1; 54 | datanode("namenode", "d3", 3)@1; 55 | //datanode("namenode", "d4", 4)@1; 56 | 57 | xfer("namenode", 1, "data1")@1; 58 | xfer("namenode", 2, "data2")@2; 59 | xfer("namenode", 3, "data3")@3; 60 | 61 | pipeline("namenode", 1, "d1", "d2")@1; 62 | pipeline("namenode", 1, "d2", "d3")@1; 63 | 64 | 65 | //dead("namenode", 3)@4; 66 | 67 | 68 | good(H, I) :- data(H, I, _); 69 | ever_xfer(N, I, D) :- xfer(N, I, D); 70 | ever_xfer(N, I, D)@next :- ever_xfer(N, I, D); 71 | good(H, I) :- ever_xfer(N, I, _), datanode(N, H, _), crash(_, N, _); 72 | 73 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/real_kafka.ded: -------------------------------------------------------------------------------- 1 | //include "fake_zk2.ded"; 2 | include "util/timeout_svc.ded"; 3 | 4 | // replicas 5 | write(Next, Data, Origin)@async :- write(Origin, Data, Prev), member(Origin, Next), leader(Origin, Origin), notin ack(Origin, Data, Next); 6 | 7 | 8 | ack(Origin, Data, Acker)@async :- write(Acker, Data, Origin), notin leader(Acker, Acker); 9 | ack(O,D,A)@next :- ack(O,D,A); 10 | write(N,D,O)@next :- write(N, D, O); 11 | ack(Origin, Data, Acker)@async :- leader(Acker, Acker), write(Acker, Data, Origin), notin missing_acker(Acker, Data, _); 12 | missing_acker(Leader, Data, Other) :- leader(Leader, Leader), write(Leader, Data, _), follower(Leader, Other), notin ack(Leader, Data, Other); 13 | follower(L, F) :- member(L, F), leader(L, L), notin leader(L, F); 14 | 15 | 16 | // dummy 17 | leader(N, L)@next :- leader(N, L); 18 | 19 | timer_svc(L, D, 2) :- write(L, D, _); 20 | // the leader is now responsible for membership management. 21 | member(N, Other)@next :- member(N, Other), notin timeout(N, _); 22 | //member(N, Other)@next :- member(N, Other), notin missing_acker(N, D, Other), timeout(L, D); 23 | member(N, Other)@next :- member(N, Other), leader(N, N), timeout(N, D), ack(N, D, Other); 24 | 25 | // bug fix: refuse to ack when we have a trivial quorum 26 | //missing_acker(L, D, "trivial") :- leader(L, L), write(L, D, _), notin follower(L, _); 27 | 28 | // client 29 | write(M, D, O)@async :- write_req(O, D), leader(O, M); 30 | 31 | 32 | // replica init 33 | /* 34 | zookeeper("a", "Z")@1; 35 | zookeeper("b", "Z")@1; 36 | zookeeper("c", "Z")@1; 37 | zookeeper("C", "Z")@1; 38 | zookeeper("Z", "Z")@1; 39 | begin("a")@1; 40 | begin("b")@1; 41 | begin("c")@1; 42 | */ 43 | 44 | // a is the leader: 45 | leader("a","a")@1; 46 | leader("b","a")@1; 47 | leader("c","a")@1; 48 | leader("C","a")@1; 49 | member("a", "b")@1; 50 | member("a", "c")@1; 51 | member("a", "d")@1; 52 | 53 | 54 | 55 | 56 | 57 | // client init 58 | client("C")@1; 59 | 60 | // bogus stuff 61 | //leader("Z", "a")@1; 62 | //member("Z", "a")@1; 63 | 64 | 65 | // write stream 66 | write_req("C", "Data1")@2; 67 | write_req("C", "Data2")@3; 68 | write_req("C", "Data3")@4; 69 | 70 | 71 | // assertions 72 | tried(C, D)@next :- tried(C, D); 73 | tried(C, D) :- write_req(C, D); 74 | 75 | good(D) :- ack("C", D, _), write(R, D, _), notin crash(R, R, _); 76 | good(D) :- tried(C, D), notin ack(C, D, _); 77 | 78 | // a pathological case that occurs due to our "preordination" of a as leader 79 | good(D) :- tried(C, D), leader(C, L), notin member(C, L); 80 | //good(D) :- member(D, _); 81 | -------------------------------------------------------------------------------- /src/test/scala/edu/berkeley/cs/boom/molly/FailureSpecSuite.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly 2 | 3 | import org.scalatest.{Matchers, FunSpec} 4 | 5 | 6 | class FailureSpecSuite extends FunSpec with Matchers { 7 | 8 | describe("gross estimate of runs") { 9 | 10 | describe("for the failure-free scenario") { 11 | it("should estimate only one run") { 12 | FailureSpec(3, 0, 0, List("a", "b", "c")).grossEstimate should be (1) 13 | } 14 | } 15 | 16 | describe("for fail-stop scenarios") { 17 | it("should treat failures independently") { 18 | FailureSpec(3, 0, 2, List("a", "b")).grossEstimate should be (16) 19 | } 20 | it("should allow each node to crash once or never") { 21 | FailureSpec(3, 0, 1, List("a")).grossEstimate should be (4) 22 | } 23 | it("should respect maxCrashes") { 24 | // Condition on number of crashes: 25 | // There are two nodes that never crash, and there are 6 ways of picking those 26 | // two nodes. Those nodes only have one failure schedule each. 27 | // The nodes that _are_ crash prone can crash at one of four times. 28 | // So: 4 * 4 * 6 = 96 29 | FailureSpec(3, 0, 2, List("a", "b", "c", "d")).grossEstimate should be (96) 30 | } 31 | } 32 | 33 | describe("for omission-only scenarios") { 34 | it("should allow omissions until eff") { 35 | FailureSpec(3, 2, 0, List("a", "b")).grossEstimate should be (16) 36 | FailureSpec(3, 1, 0, List("a", "b")).grossEstimate should be (4) 37 | } 38 | } 39 | 40 | describe("for scenarios with both crashes and omissions") { 41 | it("should prevent crashed nodes from sending messages") { 42 | // With a naive estimate that treated omissions and crashes independently, each node has 43 | // 4 choices of times to crash and 4 possible combinations of message omissions. 44 | // There are two nodes, so we have (4 * 4) ** 2 = 256 possible failure scenarios. 45 | // However, if we condition on when the node crashes: 46 | // Crash @t=1 -> 1 choices of omissions 47 | // crash @t=2 -> 2 choices 48 | // crash @t=3 -> 4 choices 49 | // Never crash -> 4 choices 50 | // So 11 ** 2 = 121 possible failure scenarios. 51 | FailureSpec(3, 2, 2, List("a", "b")).grossEstimate should be (121) 52 | } 53 | it("shouldn't overflow and estimate zero scenarios") { 54 | FailureSpec(6, 4, 1, List("a", "b", "c", "C", "Z")).grossEstimate should not be (0) 55 | } 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/flux/flux_partitionpairs.ded: -------------------------------------------------------------------------------- 1 | include "flux.ded"; 2 | 3 | // ingress is dual 4 | conn("fpp", "fcp", "SEND")@1; 5 | conn("fpp", "fcs", "ACK")@1; 6 | status("fpp", "fcp", "ACTIVE")@1; 7 | status("fpp", "fcs", "ACTIVE")@1; 8 | del("fpp", "PRIM|SEC")@1; 9 | dest("fpp", "fcp")@1; 10 | 11 | conn("fps", "fcs", "SEND")@1; 12 | conn("fps", "fcp", "ACK")@1; 13 | status("fps", "fcs", "ACTIVE")@1; 14 | status("fps", "fcp", "ACTIVE")@1; 15 | del("fps", "PRIM|SEC")@1; 16 | dest("fps", "fcs")@1; 17 | 18 | 19 | conn("fcp", "fps", "SEND")@1; 20 | //conn("fcp", "fcp", "ACK")@1; 21 | status("fcp", "fps", "ACTIVE")@1; 22 | del("fcp", "PRIM|SEC")@1; 23 | dest("fcp", "fps")@1; 24 | 25 | 26 | conn("fcs", "fpp", "SEND")@1; 27 | //conn("fcp", "fcp", "ACK")@1; 28 | status("fcs", "fpp", "ACTIVE")@1; 29 | del("fcs", "PRIM|SEC")@1; 30 | dest("fcs", "fpp")@1; 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | //other("e", "pc", "sc")@1; 42 | //other("e", "sc", "pc")@1; 43 | //other("i", "sc", "pc")@1; 44 | //other("i", "pc", "sc")@1; 45 | 46 | other("fpp", "fpp", "fps")@1; 47 | other("fpp", "fps", "fpp")@1; 48 | other("fpp", "fcp", "fcs")@1; 49 | other("fpp", "fcs", "fcp")@1; 50 | 51 | 52 | other("fps", "fpp", "fps")@1; 53 | other("fps", "fps", "fpp")@1; 54 | other("fps", "fcp", "fcs")@1; 55 | other("fps", "fcs", "fcp")@1; 56 | 57 | 58 | other("fcp", "fpp", "fps")@1; 59 | other("fcp", "fps", "fpp")@1; 60 | other("fcp", "fcp", "fcs")@1; 61 | other("fcp", "fcs", "fcp")@1; 62 | 63 | other("fcs", "fpp", "fps")@1; 64 | other("fcs", "fps", "fpp")@1; 65 | other("fcs", "fcp", "fcs")@1; 66 | other("fcs", "fcs", "fcp")@1; 67 | 68 | 69 | watch("Z", "fpp")@1; 70 | watch("Z", "fcp")@1; 71 | watch("Z", "fps")@1; 72 | watch("Z", "fcs")@1; 73 | member("Z", "fpp")@1; 74 | member("Z", "fps")@1; 75 | member("Z", "fcp")@1; 76 | member("Z", "fcs")@1; 77 | // just once tho 78 | 79 | put("fpp", "foo", 1, 0)@1; 80 | put("fpp", "bar", 2, 0)@1; 81 | put("fpp", "baz", 3, 0)@1; 82 | put("fpp", "qux", 4, 0)@2; 83 | 84 | put("fps", "foo", 1, 0)@1; 85 | put("fps", "bar", 2, 0)@1; 86 | put("fps", "baz", 3, 0)@1; 87 | put("fps", "qux", 4, 0)@2; 88 | 89 | consumer("fcs")@1; 90 | consumer("fcp")@1; 91 | consumer(X)@next :- consumer(X); 92 | 93 | put_log(E, X, I) :- put(E, X, I, _), consumer(E); 94 | put_log(E, X, I)@next :- put_log(E, X, I); 95 | good(X, I) :- put_log(E, X, I); 96 | 97 | // trivially, anything goes if e or i crash 98 | //good(X, I) :- put_log(_, X, I), crash(_, "i", _); 99 | //good(X, I) :- put_log(A, X, I), crash(A, "e", _); 100 | 101 | pre(X) :- put(_, X, _, _)@1; 102 | post(X) :- good(X, I); 103 | 104 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/commit/3pc.ded: -------------------------------------------------------------------------------- 1 | include "../util/timeout_svc.ded"; 2 | 3 | cancommit(Agent, Coord, Xact)@async :- begin(Coord, Xact), agent(Coord, Agent); 4 | vote_msg(Coord, Agent, Xact, "Y")@async :- cancommit(Agent, Coord, Xact), can(Agent, Xact); 5 | vote(C,A,X,S) :- vote_msg(C,A,X,S); 6 | 7 | timer_svc(A, X, 4) :- cancommit(A, _, X); 8 | 9 | // the coordinator is the distinguished node that is not an agent... 10 | abort(A, X)@next :- timeout(A, X), notin coordinator(A, A), notin precommitted(A, _, X), notin commit(A, X); 11 | 12 | 13 | authorized(C, X) :- vote_msg(C, _, X, "Y"), notin missing_vote(C, X), notin abort(C, X); 14 | 15 | precommit(A, C, X)@async :- authorized(C, X), agent(C, A); 16 | ack(C, A, X)@async :- precommit(A, C, X), prepared(A, C, X, "Y"); 17 | timer_cancel(A, X) :- precommit(A, _, X), prepared(A, _, X, "Y"); 18 | timer_svc(A, X, 4) :- precommit(A, C, X), prepared(A, C, X, "Y"); 19 | 20 | //commit(A, X)@next :- timeout(A, X), precommitted(A, C, X), notin abort(A, X); 21 | commit(A, X) :- timeout(A, X), precommitted(A, C, X), notin abort(A, X); 22 | 23 | precommitted(A, C, X) :- precommit(A, C, X); 24 | precommitted(A, C, X)@next :- precommitted(A, C, X); 25 | 26 | abort(C, X)@next :- vote(C, _, X, "N"); 27 | commit(C, X)@next :- ack(C, _, X), notin missing_ack(C, X), notin abort(C, X); 28 | 29 | missing_ack(C, X) :- agent(C, A), running(C, X), notin ack(C, A, X); 30 | missing_vote(C, X) :- agent(C, A), running(C, X), notin vote(C, A, X, "Y"); 31 | 32 | prepared(A, C, X, "Y") :- cancommit(A,C,X), can(A,X); 33 | prepared(A, C, X, Y)@next :- prepared(A,C,X,Y); 34 | 35 | 36 | timer_svc(C, X, 5) :- begin(C, X); 37 | abort(C, X)@next :- timeout(C, X), coordinator(C, C), missing_ack(C, X), notin commit(C, X); 38 | 39 | 40 | commit(A, X)@async :- commit(C, X), agent(C, A), notin abort(C, A); 41 | abort(A, X)@async :- abort(C, X), agent(C, A); 42 | 43 | 44 | gotcommit(C, A, X)@async :- commit(A, X), precommitted(A, C, X); 45 | 46 | 47 | 48 | 49 | // persistence 50 | agent(C, A)@next :- agent(C, A); 51 | can(A, X)@next :- can(A, X); 52 | abort(A, X)@next :- abort(A, X); 53 | commit(A, X)@next :- commit(A, X), notin abort(A,X); 54 | vote(C, A, X, S)@next :- vote(C, A, X, S); 55 | running(Coord, Xact) :- begin(Coord, Xact); 56 | running(C, X)@next :- running(C, X), notin commit(C, X), notin abort(C, X); 57 | 58 | 59 | agent("C", "a")@1; 60 | agent("C", "b")@1; 61 | agent("C", "d")@1; 62 | agent("a", "b")@1; 63 | agent("a", "d")@1; 64 | agent("b", "a")@1; 65 | agent("b", "d")@1; 66 | agent("d", "a")@1; 67 | agent("d", "b")@1; 68 | 69 | coordinator(A, C)@next :- coordinator(A, C); 70 | 71 | coordinator("a", "C")@1; 72 | coordinator("b", "C")@1; 73 | coordinator("d", "C")@1; 74 | coordinator("C", "C")@1; 75 | 76 | 77 | 78 | // both agents can commit 79 | can("a", "hello")@1; 80 | can("b", "hello")@1; 81 | can("d", "hello")@1; 82 | 83 | begin("C", "hello")@1; 84 | 85 | 86 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/chord.ded: -------------------------------------------------------------------------------- 1 | include "real_heartbeat.ded"; 2 | 3 | 4 | f_node_mapping("a", 1)@1; 5 | //f_node_mapping("b", 3)@1; 6 | f_node_mapping("c", 4)@1; 7 | 8 | 9 | f_key_mapping("foo", 2)@1; 10 | f_key_mapping("bar", 1)@1; 11 | n_succ(1, 2)@1; 12 | n_succ(2, 3)@1; 13 | n_succ(3, 4)@1; 14 | n_succ(4,5)@1; 15 | n_succ(5, 1)@1; 16 | /* 17 | n_succ(5,6)@1; 18 | n_succ(6,7)@1; 19 | n_succ(7,8)@1; 20 | n_succ(8,9)@1; 21 | n_succ(9,10)@1; 22 | n_succ(10,11)@1; 23 | n_succ(11,12)@1; 24 | n_succ(12,1)@1; 25 | n_succ(0,0)@1; 26 | */ 27 | 28 | node("a")@1; 29 | node("b")@1; 30 | node("c")@1; 31 | node("d")@1; 32 | 33 | // JOIN! 34 | f_node_mapping("d", 5)@2; 35 | f_node_mapping("b", 3)@2; 36 | 37 | 38 | node(X)@next :- node(X); 39 | f_node_mapping(H,I)@next :- f_node_mapping(H,I); 40 | f_key_mapping(H,I)@next :- f_key_mapping(H,I); 41 | 42 | node_mapping(N, H, I) :- node(N), f_node_mapping(H, I); 43 | key_mapping(N, H, I) :- node(N), f_key_mapping(H, I); 44 | succ(H, X, Y) :- node(H), n_succ(X, Y); 45 | succ(H,X,Y)@next :- succ(H,X,Y); 46 | 47 | 48 | // maybe the actual successor exists. 49 | successor(Host, Id, Node) :- node_mapping(Host, Node, Id2), succ(Host, Id, Id2); 50 | // or else we want the successor of the successor 51 | successor(Host, Id, Node) :- succ(Host, Id, Id2), notin node_mapping(Host, _, Id2), successor(Host, Id2, Node); 52 | my_successor(Host, Node) :- successor(Host, Id, Node), node_mapping(Host, Host, Id); 53 | next_successor(Host, Next) :- successor(Host, Id, Next), node_mapping(Host, Node, Id), my_successor(Host, Node); 54 | 55 | watching(A,B)@next :- watching(A,B); 56 | watching(A,B)@next :- my_successor(A,B); 57 | watch(A, B) :- my_successor(A, B), notin watching(A, B); 58 | 59 | //good(H, O) :- successor(H, O, _); 60 | 61 | comet("a", "noman")@4; 62 | //comet("b", "b")@3; 63 | //comet("c", "c")@3; 64 | //comet("d", "d")@3; 65 | ////comet(X,Y)@next :- comet(X,Y); 66 | //comet(Y, P)@async :- comet(X, P), my_successor(X, Y); 67 | comet(Y, X)@async :- comet(X, P), reach(X, Y); 68 | 69 | 70 | win("a",X) :- comet("a", X), node(X); 71 | good(A,X) :- win(A,X); 72 | win(A,X)@next :- win(A,X); 73 | 74 | //good(A,X) :- comet("a", X), node(A), notin comet(A,X); 75 | 76 | //bad(H, O, I) :- failed(H, O, I); 77 | //good(A, X) :- win(A, X); 78 | //good(A, X) :- comet(O, X)@1, node(A), d(_, X); 79 | good(A, X) :- comet(A, X)@2, crash(A, A, _); 80 | 81 | 82 | reach(Host, Other) :- my_successor(Host, Other), notin failed(Host, Other); 83 | reach(Host, Other) :- next_successor(Host, Other), my_successor(Host, O1), failed(Host, O1); 84 | 85 | //reach_base(Host, Other) :- my_successor(Host, Other), notin failed(Host, Other); 86 | // cheating 87 | //reach_base(Host, Other) :- next_successor(Host, Other), my_successor(Host, O1), failed(Host, O1); 88 | //reach(H, O) :- reach_base(H, O); 89 | //reach(H, O) :- reach_base(H, M), reach(M, O); 90 | //bad(H, "notaring") :- node(H), notin reach(H, H); 91 | 92 | //bad("notaring") :- node(A), notin win(A, _); 93 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/FailureSpec.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly 2 | 3 | import org.apache.commons.math3.util.ArithmeticUtils 4 | import edu.berkeley.cs.boom.molly.ast.{Program, IntLiteral, StringLiteral, Predicate} 5 | import edu.berkeley.cs.boom.molly.derivations.{MessageLoss, CrashFailure} 6 | 7 | case class FailureSpec( 8 | eot: Int, 9 | eff: Int, 10 | maxCrashes: Int, 11 | nodes: List[String], 12 | crashes: Set[CrashFailure] = Set.empty, 13 | omissions: Set[MessageLoss] = Set.empty) { 14 | import FailureSpec._ 15 | 16 | require(maxCrashes <= nodes.size, "Can't have more crashes than nodes") 17 | require(crashes.size <= maxCrashes, "Can't specify more than maxCrashes crashes") 18 | require(omissions.forall(_.time < eff), "Can't have omissions at or after the EFF") 19 | 20 | import ArithmeticUtils._ 21 | 22 | def grossEstimate: BigInt = { 23 | import BigInt._ 24 | // We'll count the failure scenarios of each node, then multiply them to get the total count. 25 | // At each time step before the crash, any of the `nodes.size - 1` channels could fail. 26 | val singleTimeLosses = BigInt(2).pow(nodes.size - 1) 27 | val crashFree = singleTimeLosses.pow(eff) 28 | // Crashed nodes can't send messages: 29 | val crashProne = (1 to eot + 1).map { crashTime => singleTimeLosses.pow(Math.min(eff, crashTime - 1)) }.sum 30 | // (ways to pick which nodes don't crash) * (executions of crash-free nodes) * (executions of crash prone nodes) 31 | binomialCoefficient(nodes.size, maxCrashes) * crashFree.pow(nodes.size - maxCrashes) * crashProne.pow(maxCrashes) 32 | } 33 | 34 | def generateClockFacts: Seq[Predicate] = { 35 | val temporalFacts = for ( 36 | from <- nodes; 37 | to <- nodes; 38 | t <- 1 to eot 39 | if !crashes.exists(c => c.node == from && c.time <= t) // if the sender didn't crash 40 | ) yield { 41 | val messageLost = omissions.exists(o => o.from == from && o.to == to && o.time == t) 42 | val deliveryTime = if (from != to && messageLost) NEVER else t + 1 43 | Predicate("clock", List(StringLiteral(from), StringLiteral(to), IntLiteral(t), IntLiteral(deliveryTime)), notin = false, None) 44 | } 45 | val localDeductiveFacts = for (node <- nodes; t <- 1 to eot) yield { 46 | Predicate("clock", List(StringLiteral(node), StringLiteral(node), IntLiteral(t), IntLiteral(t + 1)), notin = false, None) 47 | } 48 | val crashFacts = for (crash <- crashes; node <- nodes; t <- 1 to eot) yield { 49 | Predicate("crash", List(StringLiteral(node), StringLiteral(crash.node), IntLiteral(crash.time), IntLiteral(t)), notin = false, None) 50 | } 51 | localDeductiveFacts ++ temporalFacts ++ crashFacts 52 | } 53 | 54 | def addClockFacts(program: Program): Program = { 55 | program.copy(facts = program.facts ++ generateClockFacts) 56 | } 57 | } 58 | 59 | 60 | object FailureSpec { 61 | /** 62 | * Time at which lost messages are delivered. 63 | */ 64 | val NEVER = 99999 65 | } 66 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/raft/election.ded: -------------------------------------------------------------------------------- 1 | //include "../util/timeout_svc.ded"; 2 | include "clock.ded"; 3 | 4 | 5 | role(N, R)@next :- role(N, R), notin role_change(N, _); 6 | 7 | role_x(N, max) :- role_change(N, R), rank(N, R, I); 8 | role(N, R)@next :- role_x(N, I), rank(N, R, I); 9 | 10 | term(N, T)@next :- term(N, T), notin stall(N, T); 11 | term(N, T)@next :- new_term(N, T); 12 | new_term(N, T+1) :- term(N, T), stall(N, T); 13 | lclock_register(N, "Localtime", T) :- new_term(N, T); 14 | 15 | 16 | current_term(N, T) :- term(N, T); 17 | 18 | leader(N, T, L) :- current_term(N, T), append_log(N, T, L, _,_, _, _, _); 19 | 20 | 21 | last_append(Node, Term, max) :- append_log(Node, Term, _, _, _,_,_,Rcv); 22 | 23 | //stall(Node, Term) :- role(Node, "F"), term_time(Node, Term, Time), last_append(Node, Term, Last), Time - Last > 2; 24 | //stall(Node, Term) :- term_time(Node, Term, Time), last_append(Node, Term, Last), Time - Last > 2, notin role(Node, "L"); 25 | stall(Node, Term)@next :- lclock(Node, "Localtime", Term, Time), last_append(Node, Term, Last), 26 | current_term(Node, Term), 27 | notin role(Node, "L"), Time - Last > 1; 28 | stall(Node, 0)@next :- role(Node, _), notin append_log(Node, _, _, _, _,_,_,_); 29 | //stall(Node, 0)@next :- role(Node, "C"), notin last_append(Node, _, _); 30 | 31 | 32 | role_change(N, "C") :- stall(N, _); 33 | role_change(N, "F") :- append_entries(N, T, L, _, _, _, _), L != N;//, current_term(N, Ct), T > Ct; 34 | 35 | request_vote(Node, Lastlogterm + 1, Candidate, Lastlogindx, Lastlogterm)@async :- stall(Candidate, Lastlogterm), 36 | // 37 | //log_term(Candidate, Lastlogterm), 38 | member(Candidate, Node, _), log_indx(Candidate, Lastlogindx); 39 | 40 | 41 | accept_vote(Node, Candidate, Term) :- winner(Node, Term, Id), 42 | member(Node, Candidate, Id), log_term(Node, Lterm), Lterm < Term;//), notin accept_vote_log(Node, _, Term); 43 | winner(Node, Term, min) :- request_vote(Node, Term, Candidate, _, _), member(Node, Candidate, Id); 44 | 45 | 46 | vote(Candidate, Node, Term, "F")@async :- request_vote(Node, Term, Candidate, _, _), log_term(Node, Lterm), Lterm > Term; 47 | vote(Candidate, Node, Term, "T")@async :- accept_vote(Node, Candidate, Term); 48 | 49 | //accept_vote_log(N,C,T)@next :- accept_vote(N,C,T); 50 | //accept_vote_log(N,C,T)@next :- accept_vote_log(N,C,T); 51 | 52 | 53 | vote_log(C, N, T, V) :- vote(C, N, T, V); 54 | vote_log(C, N, T, V)@next :- vote_log(C, N, T, V); 55 | 56 | member_cnt(N, count) :- member(N, _, M); 57 | yes_vote_cnt(Node, Term, count) :- vote_log(Node, Member, Term, "T"), member(Node, Member, Id); 58 | 59 | role_change(N, "L") :- yes_vote_cnt(N, _, Cnt1), member_cnt(N, Cnt2), Cnt1 > Cnt2 / 2; 60 | 61 | 62 | 63 | 64 | 65 | 66 | //append_entries("z", 0, "z", 0, 0, "FOO", 0)@100; 67 | 68 | 69 | // a stub till I figure out commit indexes 70 | commit_indx(Node, Idx) :- log_term(Node, Idx); 71 | 72 | 73 | 74 | member(N, M, I)@next :- member(N, M, I); 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/paxos_synod.ded: -------------------------------------------------------------------------------- 1 | // proposer 2 | 3 | 4 | include "util/timeout_svc.ded"; 5 | timer_svc(A, M, 3) :- proposal(A, M); 6 | 7 | 8 | nodes(A, N, I)@next :- nodes(A, N, I); 9 | seed(A, S)@next :- seed(A, S), notin update_seed(A); 10 | seed(A, S+C)@next :- seed(A, S), update_seed(A), agent_cnt(A, C); 11 | 12 | prepare(B, A, S, M)@async :- proposal(A, M), seed(A, S), nodes(A, B, _); 13 | update_seed(A) :- proposal(A, _); 14 | 15 | redo(A, M) :- timeout(A, M), notin accepted(A, _, M); 16 | prepare(B, A, S, M)@async :- redo(A, M), seed(A, S), nodes(A, B, _); 17 | timer_svc(A,M,3) :- redo(A, M); 18 | update_seed(A) :- redo(A, M); 19 | 20 | response_log(C, A, S, O, Os) :- prepare_response(C, A, S, O, Os); 21 | response_log(C, A, S, O, M)@next :- response_log(C, A, S, O, M); 22 | 23 | // workaround for the fact that c4 can't count strings! 24 | //response_cnt(C, S, count) :- response_log(C, A, S, O, Os); 25 | response_cnt(C, S, count) :- response_log(C, A, S, O, Os), nodes(C, A, I); 26 | 27 | best(C, S, max) :- response_log(C, A, S, O, Os); 28 | what(C, I) :- nodes(C, _, I); 29 | //agent_cnt(C, count) :- nodes(C, _, I); 30 | agent_cnt(C, count) :- what(C, I); 31 | 32 | accept(A, S, O)@async :- agent_cnt(C, Cnt1), response_cnt(C, S, Cnt2), 33 | response_log(C, _, S, O, Os), best(C, S, Os), nodes(C, A, _), Os != 1, Cnt2 > Cnt1 / 2; 34 | accept(A, S, P)@async :- agent_cnt(C, Cnt1), response_cnt(C, S, Cnt2), response_log(C, _, S, O, Os), 35 | best(C, S, Os), my_proposal(C, P), nodes(C, A, _), Os == 1, Cnt2 > Cnt1 / 2; 36 | 37 | 38 | // acceptor 39 | dominated(A, S) :- prepare(A, _, S, _), prepare_log(A, S2, _), S2 > S; 40 | can_respond(A, C, S, M) :- prepare(A, C, S, M), notin dominated(A, S); 41 | prepare_response(C, A, S, O, Os)@async :- can_respond(A, C, S, M), accepted(A, Os, O), highest_accepted(A, Os); 42 | prepare_response(C, A, S, "anything", 1)@async :- can_respond(A, C, S, M), notin accepted(A, _, _); 43 | 44 | 45 | highest_accepted(A, max) :- accepted(A, S, _); 46 | accepted(A, S, M) :- accept(A, S, M); 47 | accepted(A, S, M)@next :- accepted(A, S, M); 48 | 49 | 50 | prepare_log(A, S, M) :- prepare(A, _, S, M); 51 | prepare_log(A, S, M)@next :- prepare_log(A, S, M); 52 | 53 | 54 | my_proposal(A, P) :- proposal(A, P); 55 | my_proposal(A, P)@next :- my_proposal(A, P); 56 | 57 | //good(C, S, M) :- accepted(C, S, M); 58 | 59 | important(A, M) :- accepted(A, _, M), notin crash(A, A, _); 60 | 61 | pre(M) :- important(_, M); 62 | disagree(M) :- important(_, M), important(_, N), M != N; 63 | post(M) :- important(_, M), notin disagree(M); 64 | 65 | bad(A) :- important(A, "peter"), important(_, "foobar"); 66 | good("yay") :- important(A,M), notin bad(A); 67 | 68 | /// EDB 69 | 70 | proposal("a", "peter")@1; 71 | proposal("b", "foobar")@1; 72 | 73 | seed("a", 4)@1; 74 | seed("b", 5)@1; 75 | seed("c", 6)@1; 76 | nodes("a", "b", 2)@1; 77 | nodes("a", "c", 3)@1; 78 | nodes("b", "a", 1)@1; 79 | nodes("b", "c", 3)@1; 80 | nodes("c", "a", 1)@1; 81 | nodes("c", "b", 2)@1; 82 | nodes("a", "a", 1)@1; 83 | nodes("b", "b", 2)@1; 84 | nodes("c", "c", 3)@1; 85 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/report/ProvenanceDiagramGenerator.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.report 2 | 3 | import edu.berkeley.cs.boom.molly.derivations.{DerivationTreeNode, RuleNode, GoalNode} 4 | 5 | 6 | object ProvenanceDiagramGenerator extends GraphvizPrettyPrinter { 7 | 8 | private val GRAY = "gray" 9 | private val BLACK = "black" 10 | private val WHITE = "white" 11 | 12 | def generate(goals: List[GoalNode]): String = { 13 | val dot = "digraph" <+> "dataflow" <+> braces(nest( 14 | linebreak <> 15 | braces("rank=\"same\";" <+> ssep(goals.map(g => text("goal" + g.id)), comma <> space)) <@@> 16 | // Converting to a set of strings is an ugly trick to avoid adding duplicate edges: 17 | goals.flatMap(dotStatements).map(d => super.pretty(d)).toSet.map(text).foldLeft(empty)(_ <@@> _) 18 | ) <> linebreak) 19 | super.pretty(dot) 20 | } 21 | 22 | private def fontColor(node: DerivationTreeNode): String = { 23 | node match { 24 | case goal: GoalNode => 25 | if (goal.tuple.tombstone) GRAY 26 | else if (goal.tuple.negative) WHITE 27 | else BLACK 28 | case rule: RuleNode => 29 | if (rule.subgoals.exists(_.tuple.tombstone)) GRAY 30 | else BLACK 31 | } 32 | } 33 | 34 | private def fillColor(node: DerivationTreeNode): String = { 35 | node match { 36 | case goal: GoalNode => 37 | if (goal.tuple.tombstone) GRAY 38 | else if (goal.tuple.negative) BLACK 39 | else WHITE 40 | case rule: RuleNode => 41 | if (rule.subgoals.exists(_.tuple.tombstone)) GRAY 42 | else WHITE 43 | } 44 | } 45 | 46 | private def color(node: DerivationTreeNode): String = { 47 | node match { 48 | case goal: GoalNode => 49 | if (goal.tuple.tombstone) GRAY 50 | else BLACK 51 | case rule: RuleNode => 52 | if (rule.subgoals.exists(_.tuple.tombstone)) GRAY 53 | else BLACK 54 | } 55 | } 56 | 57 | 58 | private def dotStatements(goal: GoalNode): List[Doc] = { 59 | val id = "goal" + goal.id 60 | val goalNode = node(id, 61 | "label" -> goal.tuple.toString, 62 | "style" -> (if (goal.tuple.tombstone) "dashed" else "filled"), 63 | "fontcolor" -> fontColor(goal), 64 | "color" -> color(goal), 65 | "fillcolor" -> fillColor(goal)) 66 | val edges = goal.rules.map { 67 | rule => diEdge(id, "rule" + rule.id, 68 | "color" -> (if (rule.subgoals.exists(_.tuple.tombstone)) GRAY else BLACK)) 69 | } 70 | List(goalNode) ++ edges ++ goal.rules.flatMap(dotStatements) 71 | } 72 | 73 | private def dotStatements(rule: RuleNode): List[Doc] = { 74 | val id = "rule" + rule.id 75 | val nodes = List(node(id, 76 | "label" -> rule.rule.head.tableName, 77 | "shape" -> "rect", 78 | "fontcolor" -> fontColor(rule), 79 | "color" -> color(rule), 80 | "fillcolor" -> fillColor(rule))) 81 | val edges = rule.subgoals.map { 82 | goal => diEdge(id, "goal" + goal.id, 83 | "color" -> (if (goal.tuple.tombstone) GRAY else BLACK)) 84 | } 85 | nodes ++ edges ++ rule.subgoals.flatMap(dotStatements) 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/report/HTMLWriter.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.report 2 | 3 | import java.io.{PrintWriter, File} 4 | import edu.berkeley.cs.boom.molly.Run 5 | import edu.berkeley.cs.boom.molly.report.MollyCodecJsons._ 6 | import argonaut._, Argonaut._ 7 | import org.apache.commons.io.FileUtils 8 | import scala.collection.JavaConversions._ 9 | import scala.sys.process._ 10 | import org.apache.commons.io.filefilter.{FalseFileFilter, TrueFileFilter} 11 | import scalaz.EphemeralStream 12 | import scalaz.syntax.id._ 13 | import java.util.concurrent.Executors 14 | 15 | 16 | object HTMLWriter { 17 | 18 | private val templateDir = 19 | new File(HTMLWriter.getClass.getClassLoader.getResource("vis_template").getPath) 20 | 21 | private def copyTemplateFiles(outputDirectory: File) { 22 | FileUtils.iterateFilesAndDirs(templateDir, FalseFileFilter.INSTANCE, TrueFileFilter.INSTANCE).foreach { 23 | dir => FileUtils.copyDirectoryToDirectory(dir, outputDirectory) 24 | } 25 | FileUtils.iterateFiles(templateDir, null, false).foreach { 26 | file => FileUtils.copyFileToDirectory(file, outputDirectory) 27 | } 28 | } 29 | 30 | private def writeGraphviz(dot: String, outputDirectory: File, filename: String) = { 31 | val dotFile = new File(outputDirectory, s"$filename.dot") 32 | val svgFile = new File(outputDirectory, s"$filename.svg") 33 | FileUtils.write(dotFile, dot) 34 | new Runnable() { 35 | def run() { 36 | val dotExitCode = s"dot -Tsvg -o ${svgFile.getAbsolutePath} ${dotFile.getAbsolutePath}".! 37 | assert(dotExitCode == 0) 38 | } 39 | } 40 | } 41 | 42 | def write(outputDirectory: File, originalPrograms: List[File], runs: EphemeralStream[Run], 43 | generateProvenanceDiagrams: Boolean, disableDotRendering: Boolean = false) = { 44 | outputDirectory.mkdirs() 45 | require(outputDirectory.isDirectory) 46 | outputDirectory.listFiles().map(_.delete()) 47 | copyTemplateFiles(outputDirectory) 48 | val runsFile = 49 | new PrintWriter(FileUtils.openOutputStream(new File(outputDirectory, "runs.json"))) 50 | // Unfortunately, Argonaut doesn't seem to support streaming JSON writing, hence this code: 51 | var first: Boolean = true 52 | runsFile.print("[\n") 53 | val executor = Executors.newFixedThreadPool(4) // Some parallelism when writing out DOT files 54 | for (run <- runs) { 55 | if (!first) runsFile.print(",\n") 56 | runsFile.print(run.asJson.toString()) 57 | first = false 58 | val renderSpacetime = writeGraphviz(SpacetimeDiagramGenerator.generate(run.failureSpec, run.messages), 59 | outputDirectory, s"run_${run.iteration}_spacetime") 60 | if (!disableDotRendering) executor.submit(renderSpacetime) 61 | if (generateProvenanceDiagrams) { 62 | val renderProv = writeGraphviz(ProvenanceDiagramGenerator.generate(run.provenance), 63 | outputDirectory, s"run_${run.iteration}_provenance") 64 | if (!disableDotRendering) executor.submit(renderProv) 65 | 66 | } 67 | } 68 | runsFile.print("\n]") 69 | runsFile.close() 70 | executor.shutdown() // Wait for DOT rendering to finish 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/symmetry/SymmetryAwareSet.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.symmetry 2 | 3 | import edu.berkeley.cs.boom.molly.FailureSpec 4 | import com.codahale.metrics.MetricRegistry 5 | import scala.collection.mutable 6 | import com.typesafe.scalalogging.LazyLogging 7 | import nl.grons.metrics.scala.InstrumentedBuilder 8 | 9 | 10 | class SymmetryAwareSet(symmetryChecker: SymmetryChecker)(implicit val metricRegistry: MetricRegistry) 11 | extends mutable.Set[FailureSpec] with LazyLogging with InstrumentedBuilder { 12 | 13 | private type SpecHash = (Set[(Int, Int)], Set[(Int, Int)], Set[Int]) 14 | private val backingMap = mutable.HashMap[SpecHash, mutable.Set[FailureSpec]]() 15 | private val symmetricScenariosSkipped = metrics.counter("symmetric-scenarios-skipped") 16 | 17 | /** 18 | * To avoid all-pairs comparisons, we partition the cache based on the number of 19 | * crash failures and message omissions at different timesteps. As in hash tables, 20 | * we want few entries per bucket. 21 | */ 22 | private def hashSpec(f: FailureSpec): SpecHash = { 23 | val crashesByTime = f.crashes.seq.map(c => c.time).groupBy(identity).mapValues(_.size).toSet 24 | val omissionsByTime = f.omissions.seq.map(_.time).groupBy(identity).mapValues(_.size).toSet 25 | val omissionPairCounts = f.omissions.groupBy(o => (o.from, o.to)).values.map(_.size).toSet 26 | (crashesByTime, omissionsByTime, omissionPairCounts) 27 | } 28 | 29 | def contains(f: FailureSpec): Boolean = { 30 | val hash = hashSpec(f) 31 | val possibleMatches = backingMap.get(hash) 32 | if (possibleMatches.isEmpty) { 33 | logger.debug(s"No candidates for symmetry for $f") 34 | false 35 | } else { 36 | logger.info(s"Found ${possibleMatches.get.size} possible symmetric scenarios for $f.") 37 | logger.debug(s"Possible symmetries are: \n${possibleMatches.get.map(_.toString).mkString("\n")}") 38 | for (cachedSpec <- possibleMatches.get) { 39 | if (cachedSpec == f) { 40 | return true 41 | } else if (symmetryChecker.areEquivalentForEDB(f, cachedSpec)) { 42 | logger.info(s"Found scenario $cachedSpec that is symmetric to $f") 43 | symmetricScenariosSkipped.inc() 44 | return true 45 | } 46 | } 47 | false 48 | } 49 | } 50 | 51 | override def +=(elem: FailureSpec) = { 52 | val hash = hashSpec(elem) 53 | val bucket = backingMap.get(hash) 54 | if (bucket.isEmpty) { 55 | backingMap.put(hash, mutable.Set(elem)) 56 | } else { 57 | bucket.get += elem 58 | } 59 | this 60 | } 61 | 62 | override def clear(): Unit = { 63 | backingMap.clear() 64 | } 65 | 66 | override def seq = { 67 | throw new NotImplementedError("SymmetryAwareSet doesn't support seq") 68 | } 69 | 70 | override def iterator = { 71 | throw new NotImplementedError("SymmetryAwareSet doesn't support iterator") 72 | } 73 | 74 | override def -=(elem: FailureSpec) = { 75 | throw new NotImplementedError("SymmetryAwareSet doesn't support deletions") 76 | } 77 | 78 | override def empty = { 79 | throw new NotImplementedError("SymmetryAwareSet doesn't support empty") 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/report/SpacetimeDiagramGenerator.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.report 2 | 3 | import edu.berkeley.cs.boom.molly.FailureSpec 4 | import edu.berkeley.cs.boom.molly.derivations.Message 5 | import scalaz._ 6 | import Scalaz._ 7 | 8 | object SpacetimeDiagramGenerator extends GraphvizPrettyPrinter { 9 | 10 | private val TIMELINE_COLOR = "gray75" 11 | 12 | def generate(failureSpec: FailureSpec, messages: Seq[Message]): String = { 13 | val processNodes = failureSpec.nodes.map { n => 14 | node(s"proc_$n", "label" -> s"Process $n", "group" -> n) 15 | } 16 | 17 | /** 18 | * The last time that this node appears on the timeline. Usually this is EOT, 19 | * but it could be smaller if the node crashes at this time. 20 | */ 21 | val lastTimeOfNode = 22 | failureSpec.crashes.map(x => x.node -> x.time).toMap.withDefaultValue(failureSpec.eot) 23 | 24 | val nodeTimes = for ( 25 | n <- failureSpec.nodes; 26 | t <- 1 to lastTimeOfNode(n)) yield { 27 | val crashed = !failureSpec.crashes.filter(c => c.node == n && c.time <= t).isEmpty 28 | val sentOrReceivedMessage = messages.exists { m => 29 | (m.from == n && m.to != n && m.sendTime == t) || (m.to == n && m.from != n && m.receiveTime == t) 30 | } 31 | val styles = 32 | if (crashed) Seq("color" -> "red", "fontsize" -> "10", "label" -> "CRASHED", "group" -> n, "shape" -> "box") 33 | else if (sentOrReceivedMessage) Seq("label" -> s"$t", "group" -> n) 34 | else Seq("shape" -> "point", "group" -> n, "color" -> TIMELINE_COLOR) 35 | node(s"node_${n}_$t", styles: _*) 36 | } 37 | 38 | val timelineLines = failureSpec.nodes.map { n => 39 | val nodes = (1 to lastTimeOfNode(n)).map { t => s"node_${n}_$t" }.toSeq 40 | s"edge[weight=2, arrowhead=none, color=gray75, fillcolor=$TIMELINE_COLOR];" <@@> s"proc_$n -> " <> nodes.mkString(" -> ") <> semi 41 | } 42 | 43 | val messageEdges = messages.flatMap { case Message(table, from, to, sendTime, receiveTime) => 44 | val wasLost = receiveTime == FailureSpec.NEVER 45 | val receiverEOT = lastTimeOfNode(to) 46 | // Note that this doesn't draw mesasges that were sent to nodes that have been 47 | // crashed for more than one timestep. 48 | val receiverCrashed = receiveTime > receiverEOT && receiverEOT < failureSpec.eot 49 | if (receiverCrashed) None 50 | else diEdge(s"node_${from}_$sendTime", s"node_${to}_${sendTime + 1}", 51 | "label" -> (table + (if (wasLost) " (LOST)" else "")), 52 | "constraint" -> "false", 53 | "weight" -> "0", 54 | "style" -> (if (wasLost) "dashed" else "solid"), 55 | "color" -> (if (wasLost) "red" else "black") 56 | ).some 57 | } 58 | 59 | val dot = "digraph" <+> "spacetime" <+> braces(nest( 60 | linebreak <> 61 | "rankdir=TD" <@@> 62 | "splines=line" <@@> // Keep the message lines straight 63 | "outputorder=nodesfirst" <@@> 64 | subgraph("cluster_proc_nodes", "", processNodes) <@@> 65 | nodeTimes.reduce(_ <@@> _) <@@> 66 | messageEdges.foldLeft(empty)(_ <@@> _) <@@> 67 | timelineLines.reduce(_ <@@> _) 68 | ) <> linebreak) 69 | super.pretty(dot) 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/pipeline.ded: -------------------------------------------------------------------------------- 1 | // tiny model of the hdfs data pipeline described in Gunawi et al'10. 2 | 3 | 4 | datanode(N, D, I)@next :- datanode(N, D, I); 5 | 6 | include "./heartbeat.ded"; 7 | 8 | 9 | //pipeline_length(N, count) :- pipeline(N, D, _); 10 | //pipeline(N, D, "NONE") :- xfer 11 | 12 | 13 | first_node(Namenode, Id, First) :- pipeline(Namenode, Id, First, Next), notin pipeline(Namenode, Id, _, First); 14 | 15 | // send the pipeline metadata to all nodes (eagerly) 16 | //pipeline(H, I, F, N)@async :- pipeline(M, I, F, N), datanode(M, H, _); 17 | pipeline(H, I, F, N)@next :- pipeline(H, I, F, N); 18 | // optimization: just tell each node who the next node is 19 | snd_pipeline(F, I, F, N, H)@async :- pipeline(H, I, F, N), notin ack_pipe(H, F, I, N); 20 | snd_pipeline(F, I, N, F, H)@async :- pipeline(H, I, N, F), notin ack_pipe(H, F, I, N); 21 | 22 | pipeline(H, I, F, N) :- snd_pipeline(H, I, F, N, _); 23 | ack_pipe(H, F, I, N) :- snd_pipeline(F, I, F, N, H); 24 | ack_pipe(H, F, I, N)@next :- ack_pipe(H, F, I, N); 25 | 26 | snd(Host, Id, Pl)@async :- xfer(N, Id, Pl), first_node(N, P, Host), notin ack(N, Id), current_pipeline(Node, P); 27 | snd(Host, Id, Pl)@async :- snd(Node, Id, Pl), pipeline(Node, P, Node, Host), notin ack(N, Id), current_pipeline(Node, P); 28 | ack(Sender, Id)@async :- snd(Node, Id, _), pipeline(Node, P, Sender, Node); 29 | ack(Pred, Id)@async :- ack(Me, Id), pipeline(Me, Id, Pred, Me); 30 | 31 | // test 32 | //snd(Host, Id, Pl)@async :- xfer(N, Id, Pl), first_node(N, P, Host); 33 | 34 | 35 | current_pipeline(Host, max) :- pipeline(Host, Id, _, _); 36 | 37 | xfer(N, I, P)@next :- xfer(N, I, P), notin ack(N, I); 38 | 39 | ack(N, I)@next :- ack(N, I); 40 | data(H, I, P) :- snd(H, I, P); 41 | 42 | data(H,I,P)@next :- data(H,I,P); 43 | 44 | watch("namenode", D) :- datanode("namenode", D, _); 45 | 46 | 47 | // per haryadi et al, now form a pipeline that simply excludes the failed node: 48 | 49 | dead_pipe(H, I) :- pipeline(H, I, _, N), failed(H, N); 50 | dead_pipe(H, I) :- pipeline(H, I, N, _), failed(H, N); 51 | 52 | pipeline(H, I + 1, F, N) :- pipeline(H, I, F, N), notin failed(H, F), notin failed(H, N), dead_pipe(H, I); 53 | pipeline(H, I + 1, F, Next) :- pipeline(H, I, F, N), failed(H, N), pipeline(H, I, N, Next); 54 | 55 | 56 | // busted 57 | //pipeline(H, I + 1, F, N)@next :- pipeline(H, I, F, N), dead_pipe(H, I); 58 | 59 | // busted 2 (looks like it could be right) 60 | //pipeline(H, I + 1, F, N) :- pipeline(H, I, F, N), notin failed(H, F), notin failed(H, N), dead_pipe(H, I); 61 | //pipeline(H, I + 1, F, Next) :- pipeline(H, I, F, N), failed(H, N), pipeline(H, I, N, Next); 62 | 63 | 64 | 65 | 66 | 67 | datanode("namenode", "d1", 1)@1; 68 | datanode("namenode", "d2", 2)@1; 69 | datanode("namenode", "d3", 3)@1; 70 | //datanode("namenode", "d4", 4)@1; 71 | 72 | xfer("namenode", 1, "data1")@1; 73 | xfer("namenode", 2, "data2")@2; 74 | xfer("namenode", 3, "data3")@3; 75 | 76 | pipeline("namenode", 1, "d1", "d2")@1; 77 | pipeline("namenode", 1, "d2", "d3")@1; 78 | 79 | 80 | //dead("namenode", 3)@4; 81 | 82 | 83 | good(H, I) :- data(H, I, _); 84 | ever_xfer(N, I, D) :- xfer(N, I, D); 85 | ever_xfer(N, I, D)@next :- ever_xfer(N, I, D); 86 | good(H, I) :- ever_xfer(N, I, _), datanode(N, H, _), crash(_, N, _); 87 | 88 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/real_chord.ded: -------------------------------------------------------------------------------- 1 | include "real_heartbeat.ded"; 2 | 3 | 4 | f_node_mapping("a", 1)@1; 5 | //f_node_mapping("b", 3)@1; 6 | f_node_mapping("c", 4)@1; 7 | 8 | 9 | f_key_mapping("foo", 2)@1; 10 | f_key_mapping("bar", 1)@1; 11 | n_succ(1, 2)@1; 12 | n_succ(2, 3)@1; 13 | n_succ(3, 4)@1; 14 | n_succ(4,5)@1; 15 | n_succ(5, 1)@1; 16 | 17 | 18 | node("a")@1; 19 | node("b")@1; 20 | node("c")@1; 21 | node("d")@1; 22 | 23 | f_node_mapping("d", 5)@1; 24 | f_node_mapping("b", 3)@1; 25 | 26 | 27 | //node(X)@next :- node(X); 28 | f_node_mapping(H,I)@next :- f_node_mapping(H,I); 29 | f_key_mapping(H,I)@next :- f_key_mapping(H,I); 30 | 31 | // the hashing function is global 32 | node_mapping(N, H, I) :- node(N), f_node_mapping(H, I); 33 | key_mapping(N, H, I) :- node(N), f_key_mapping(H, I); 34 | logical_succ(H, X, Y) :- node(H), n_succ(X, Y); 35 | logical_succ(H,X,Y)@next :- logical_succ(H,X,Y); 36 | 37 | 38 | circumference(H, count) :- logical_succ(H, I, _); 39 | distance(H, X, Y, 1) :- logical_succ(H, X, Y); 40 | distance(H, X, Y, C + 1) :- logical_succ(H, X, Z), distance(H, Z, Y, C), circumference(H, Cir), C < Cir-1; 41 | 42 | 43 | //logical_order(N, X, Y) :- logical_succ(N, X, Y); 44 | //logical_order(N, X, Y) :- logical_succ(N, X, Z), logical_order(N, Z, Y); 45 | 46 | /* 47 | nodes(N, count) :- node_mapping(N, _, I); 48 | lsn(N, A, B) :- logical_succ(N, X, Y), node_mapping(N, A, X), node_mapping(N, B, Y); 49 | distance(N, X, Y, 1) :- lsn(N, X, Y); 50 | distance(N, X, Y, C + 1) :- lsn(N, X, Z), distance(N, Z, Y, C), nodes(N, Cnt), C < Cnt; 51 | */ 52 | 53 | 54 | 55 | 56 | //join("a", "a")@1; 57 | join("b", "a")@2; 58 | join("c", "a")@2; 59 | join("d", "a")@2; 60 | 61 | 62 | //successor(Host, Id, Node) :- join(Host, Id), node_mapping(Host, Node, Id); 63 | //successor(H, I, N)@next :- successor(H, I, N), notin succ_change(H, _, _); 64 | //successor(H, I, N)@next :- succ_change(H, _, _); 65 | 66 | 67 | succ(H, S)@next :- join(H, S), notin succ(H, _); 68 | succ(H, S) :- join(H, S), succ(H, S2), 69 | //node_mapping(H, S, I1), node_mapping(H, S2, I2), node_mapping(H, H, MyId), 70 | //distance(H, MyId, I1, D1), distance(H, MyId, I2, D2), 71 | closeness(H, S, D1), closeness(H, S2, D2), 72 | D1 < D2; 73 | succ(H, S)@next :- succ(H, S), notin succ_change(H, _); 74 | 75 | closeness(H, O, D) :- distance(H, F, T, D), node_mapping(H, H, F), node_mapping(H, O, T); 76 | 77 | stabilize(S, H)@async :- succ(H, S); 78 | stabilized(H, S, P)@async :- stabilize(S, H), pred(S, P); 79 | 80 | 81 | can_stab(H, S) :- stabilize(H, S), notin pred(H, _); 82 | can_stab(H, S) :- stabilize(H, S), pred(H, S2), 83 | node_mapping(H, S, I1), node_mapping(H, S2, I2), node_mapping(H, H, MyId), 84 | distance(H, MyId, I1, D1), distance(H, MyId, I2, D2), 85 | D1 > D2; 86 | 87 | //pred_change(S, H 88 | 89 | pred(S, H)@next :- pred(S, H), notin pred_change(S, _); 90 | 91 | 92 | 93 | //good(S, H) :- pred(S, H); 94 | good(S, H) :- logical_succ(S, H, _); 95 | 96 | //succ("a","b")@100; 97 | 98 | //distance("a",1,1, 1)@100; 99 | 100 | succ_change("a", "z")@100; 101 | pred_change("a", "z")@100; 102 | -------------------------------------------------------------------------------- /src/test/scala/edu/berkeley/cs/boom/molly/FormulaSuite.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly 2 | 3 | import edu.berkeley.cs.boom.molly.derivations._ 4 | import org.scalatest.{FunSpec, Matchers} 5 | import sext._ 6 | 7 | 8 | class FormulaSuite extends FunSpec with Matchers { 9 | 10 | val nuttin = BFLiteral[String](None) 11 | val foo = BFLiteral[String](Some("foo")) 12 | val bar = BFLiteral[String](Some("bar")) 13 | val baz = BFLiteral[String](Some("baz")) 14 | val qux = BFLiteral[String](Some("qux")) 15 | 16 | def trivialBooleanO(): BFNode[String] = { 17 | BFOrNode(foo, bar) 18 | } 19 | 20 | def trivialBooleanA(): BFNode[String] = { 21 | BFAndNode(baz, qux) 22 | } 23 | 24 | def nestedA1() : BFNode[String] = { 25 | BFAndNode(trivialBooleanO(), trivialBooleanO()) 26 | } 27 | 28 | def nestedO1() : BooleanFormula[String] = { 29 | BooleanFormula(BFOrNode(trivialBooleanA(), trivialBooleanA())) 30 | } 31 | 32 | 33 | 34 | 35 | def simpleBooleanFormula(): BFNode[String] = { 36 | val oneWay = BFOrNode(BFAndNode(foo, bar), BFAndNode(baz, qux)) 37 | val otherWay = BFOrNode(BFAndNode(qux, foo), BFAndNode(bar, baz)) 38 | BFAndNode(BFOrNode(otherWay, BFAndNode(oneWay, otherWay)), oneWay) 39 | } 40 | 41 | def paddedBooleanFormula(n: BFNode[String]): BFNode[String] = { 42 | BFAndNode(nuttin, BFOrNode(nuttin, BFOrNode(BFAndNode(nuttin, BFAndNode(nuttin, n)), nuttin))) 43 | } 44 | 45 | describe("simplifying a simplified boolean formula") { 46 | it("should preserve the formula") { 47 | simpleBooleanFormula.simplify should be (simpleBooleanFormula) 48 | } 49 | } 50 | 51 | describe("simplifying a padded boolean formula") { 52 | val pad = paddedBooleanFormula(simpleBooleanFormula) 53 | it("should yield the pure formula") { 54 | pad.simplify should be (simpleBooleanFormula) 55 | } 56 | 57 | it(s"should reduce its size (${pad.clauses} vs. ${pad.simplify.clauses})") { 58 | pad.simplify.clauses should be < (pad.clauses) 59 | } 60 | 61 | it("should preserve its variables") { 62 | pad.simplify.vars should be(pad.vars) 63 | } 64 | 65 | it("should preserve its CNF form") { 66 | pad.simplify.convertToCNF should be(simpleBooleanFormula.convertToCNF) 67 | } 68 | 69 | it("should be idempotent") { 70 | pad.simplify.simplify should be (pad.simplify) 71 | } 72 | } 73 | 74 | describe("conversion to CNF") { 75 | val pad = paddedBooleanFormula(simpleBooleanFormula) 76 | val trivial = trivialBooleanA() 77 | it(s"should increase the number of clauses (${nestedO1().convertToCNFAll.root.clauses} vs. ${nestedO1.clauses})") { 78 | nestedO1().convertToCNFAll.root.clauses should be > (nestedO1.clauses) 79 | pad.simplify.convertToCNF.clauses should be > (pad.clauses) 80 | } 81 | 82 | it(s"should be idempotent") { 83 | //pad.simplify.convertToCNF.convertToCNF should be (pad.simplify.convertToCNF) 84 | //nestedA1.convertToCNF.convertToCNF.convertToCNF should be (nestedA1.convertToCNF) 85 | 86 | println(s"formula $nestedO1") 87 | println(s"cnf ${nestedO1.convertToCNFAll.simplifyAll.treeString}") 88 | val cnf = nestedO1.convertToCNFAll 89 | cnf.convertToCNFAll should be (cnf) 90 | //pad.simplify.convertToCNFAll.root.convertToCNFAll should be (pad.simplify.convertToCNFAll) 91 | } 92 | } 93 | 94 | } 95 | -------------------------------------------------------------------------------- /src/test/scala/edu/berkeley/cs/boom/molly/DerivationTreesSuite.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly 2 | 3 | import org.scalatest.mock.MockitoSugar 4 | import org.scalatest.{FunSuite, Matchers} 5 | 6 | import edu.berkeley.cs.boom.molly.ast.Rule 7 | import edu.berkeley.cs.boom.molly.derivations._ 8 | 9 | class DerivationTreesSuite extends FunSuite with Matchers with MockitoSugar { 10 | 11 | private def clockTuple(from: String, to: String, sendTime: Int): GoalTuple = { 12 | GoalTuple("clock", List(from, to, sendTime.toString, (sendTime + 1).toString), 13 | negative = false, tombstone = false) 14 | } 15 | 16 | private def edb(tuple: GoalTuple): GoalNode = { 17 | new RealGoalNode(tuple, Set.empty, negative = false) 18 | } 19 | 20 | private def goal(tuple: GoalTuple, rules: RuleNode*): GoalNode = 21 | RealGoalNode(tuple, rules.toSet, negative = false) 22 | 23 | private def rule(goals: GoalNode*): RuleNode = RuleNode(mock[Rule], goals.toSet) 24 | 25 | test("edb fact") { 26 | val tuple = GoalTuple("fact", List("a"), negative = false, tombstone = false) 27 | val goal = edb(tuple) 28 | goal.ownImportantClock should be (None) 29 | goal.importantClocks should be (empty) 30 | goal.enumerateDistinctDerivations should be (Set(goal)) 31 | goal.allTups should be (Set(tuple)) 32 | } 33 | 34 | test("clock fact") { 35 | val tuple = clockTuple("from", "to", 1) 36 | val goal = edb(tuple) 37 | goal.ownImportantClock should be (Some("from", "to", 1)) 38 | goal.importantClocks should be (Set(("from", "to", 1))) 39 | goal.enumerateDistinctDerivations should be (Set(goal)) 40 | goal.allTups should be (Set(goal.tuple)) 41 | } 42 | 43 | test("goal with two rule firings") { 44 | val logA1 = GoalTuple("log", List("A", "data", "1"), negative = false, tombstone = false) 45 | val logA2 = GoalTuple("log", List("A", "data", "2"), negative = false, tombstone = false) 46 | val logB1 = GoalTuple("log", List("B", "data", "1"), negative = false, tombstone = false) 47 | val persistenceRule = RuleNode(mock[Rule], Set(edb(logA1), edb(clockTuple("A", "A", 1)))) 48 | val sendRule = RuleNode(mock[Rule], Set(edb(logB1), edb(clockTuple("B", "A", 1)))) 49 | val goal = RealGoalNode(logA2, Set(persistenceRule, sendRule), negative = false) 50 | goal.importantClocks should be (Set(("B", "A", 1))) 51 | goal.enumerateDistinctDerivations should be (Set( 52 | goal.copy(pRules = Set(persistenceRule)), 53 | goal.copy(pRules = Set(sendRule)) 54 | )) 55 | goal.allTups should be (Set( 56 | logA1, logA2, logB1, clockTuple("A", "A", 1), clockTuple("B", "A", 1) 57 | )) 58 | } 59 | 60 | test("cross-product of trees") { 61 | val root = GoalTuple("root", Nil, negative = false, tombstone = false) 62 | val a = GoalTuple("a", Nil, negative = false, tombstone = false) 63 | val b = GoalTuple("b", Nil, negative = false, tombstone = false) 64 | val c = GoalTuple("c", Nil, negative = false, tombstone = false) 65 | val d = GoalTuple("d", Nil, negative = false, tombstone = false) 66 | 67 | val tree = goal(root, 68 | rule( 69 | goal(a, 70 | rule(edb(c)), 71 | rule(edb(d)) 72 | ) 73 | ), 74 | rule( 75 | goal(b, 76 | rule(edb(c)), 77 | rule(edb(d)) 78 | ) 79 | ) 80 | ) 81 | tree.enumerateDistinctDerivations.size should be (4) 82 | tree.enumerateDistinctDerivations.map(_.allTups) should be (Set( 83 | Set(root, a, c), Set(root, a, d), Set(root, b, c), Set(root, b, d) 84 | )) 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /src/test/scala/edu/berkeley/cs/boom/molly/DedalusRewritesSuite.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly 2 | 3 | import org.scalatest.{FunSuite, Matchers} 4 | 5 | import edu.berkeley.cs.boom.molly.ast._ 6 | import edu.berkeley.cs.boom.molly.DedalusRewrites.{dc, nreserved, mreserved} 7 | 8 | class DedalusRewritesSuite extends FunSuite with Matchers { 9 | 10 | test("clock rewrite with deductive rule") { 11 | val node = Identifier("Node") 12 | val pload = Identifier("Pload") 13 | val rules = List( 14 | Rule( 15 | Predicate("log", List(node, pload), notin = false, time = None), 16 | List(Left(Predicate("bcast", List(node, pload), notin = false, time = None)))) 17 | ) 18 | val program = Program(rules, facts = Nil, includes = Nil, tables = Set.empty) 19 | val rewrittenProgram = DedalusRewrites.referenceClockRules(program) 20 | val expectedRules = List( 21 | Rule( 22 | Predicate("log", List(node, pload, nreserved), notin = false, time = None), 23 | List( 24 | Left(Predicate("bcast", List(node, pload, nreserved), notin = false, time = None)), 25 | Left(Predicate("clock", List(node, node, nreserved, dc), notin = false, time = None)) 26 | )) 27 | ) 28 | rewrittenProgram.rules should be (expectedRules) 29 | } 30 | 31 | test("clock rewrite with inductive rule") { 32 | val node = Identifier("Node") 33 | val neighbor = Identifier("Neighbor") 34 | val rules = List( 35 | Rule( 36 | Predicate("node", List(node, neighbor), notin = false, time = Some(Next())), 37 | List(Left(Predicate("node", List(node, neighbor), notin = false, time = None)))) 38 | ) 39 | val program = Program(rules, facts = Nil, includes = Nil, tables = Set.empty) 40 | val rewrittenProgram = DedalusRewrites.referenceClockRules(program) 41 | val sendTimePlus1 = Expr(nreserved, "+", IntLiteral(1)) 42 | val expectedRules = List( 43 | Rule( 44 | Predicate("node", List(node, neighbor, sendTimePlus1), notin = false, time = Some(Next())), 45 | List( 46 | Left(Predicate("node", List(node, neighbor, nreserved), notin = false, time = None)), 47 | Left(Predicate("clock", List(node, dc, nreserved, dc), notin = false, time = None)) 48 | )) 49 | ) 50 | rewrittenProgram.rules should be (expectedRules) 51 | } 52 | 53 | test("clock rewrite with async rule") { 54 | val node1 = Identifier("Node1") 55 | val node2 = Identifier("Node2") 56 | val pload = Identifier("Pload") 57 | val rules = List( 58 | Rule( 59 | Predicate("log", List(node2, pload), notin = false, time = Some(Async())), 60 | List( 61 | Left(Predicate("bcast", List(node1, pload), notin = false, time = None)), 62 | Left(Predicate("node", List(node1, node2), notin = false, time = None)) 63 | )) 64 | ) 65 | val program = Program(rules, facts = Nil, includes = Nil, tables = Set.empty) 66 | val rewrittenProgram = DedalusRewrites.referenceClockRules(program) 67 | val expectedRules = List( 68 | Rule( 69 | Predicate("log", List(node2, pload, mreserved), notin = false, time = Some(Async())), 70 | List( 71 | Left(Predicate("bcast", List(node1, pload, nreserved), notin = false, time = None)), 72 | Left(Predicate("node", List(node1, node2, nreserved), notin = false, time = None)), 73 | Left(Predicate("clock", List(node1, node2, nreserved, mreserved), notin = false, time = None)) 74 | )) 75 | ) 76 | rewrittenProgram.rules should be (expectedRules) 77 | } 78 | 79 | } 80 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/gstore/gstore.ded: -------------------------------------------------------------------------------- 1 | include "../ramp/encoding.ded"; 2 | include "group_delete.ded"; 3 | 4 | leader(C, G, L)@next :- leader(C, G, L); 5 | leader("C", "a|b|c", "a")@1; 6 | leader("C", "b|c|d", "b")@1; 7 | leader("C2", "a|b|c", "a")@1; 8 | leader("C2", "b|c|d", "b")@1; 9 | 10 | 11 | create_group(C,I,G)@next :- create_group(C, I, G); //, notin cg_ack(C, I); 12 | create_group_request(Leader, Cli, Id, Group)@async :- create_group(Cli, Id, Group), leader(Cli, Group, Leader); 13 | 14 | running_group_req(L,C,I,G) :- create_group_request(L,C,I,G); 15 | running_group_req(L,C,I,G)@next :- running_group_req(L,C,I,G), notin end_delete(L, Id); 16 | 17 | // leader logic 18 | j(Server, Leader, Id, Group)@async :- create_group_request(Leader, _, Id, Group), lencoding(Leader, Group, Server); 19 | 20 | 21 | // follower logic 22 | j_buf(S,L,I,G) :- j(S,L,I,G); 23 | j_buf(S,L,I,G)@next :- j_buf(S,L,I,G), notin next(S, I); 24 | next(S,min) :- j_buf(S,_,I,_), notin j_log(S, _, _, _, _); 25 | 26 | // if we are joining a legit thing, advance the state. 27 | process_event(Server, Leader, Id, Group, Yid)@next :- next(Server, Id), j_buf(Server, Leader, Id, Group), 28 | yield_id(Server, Yid), notin follower(Server, _, _), notin j_log(Server, _, _, _, _); 29 | // or if the key is owned, reject. 30 | ja(Leader, Server, Id, Yid, "R") :- next(Server, Id), j_buf(Server, Leader, Id, Group), 31 | yield_id(Server, Yid), follower(Server, Leader2, _), Leader2 != Leader; 32 | 33 | j_log(Server, Leader, Id, Group, Yid) :- process_event(Server, Leader, Id, Group, Yid); 34 | // the follower needs to retry his acks till he knows the leader knows. 35 | j_log(S,L,I,G,Y)@next :- j_log(S,L,I,G,Y), notin jaa(S, L, I, Y); 36 | new_yid(Server) :- process_event(Server, _, _, _,_); 37 | 38 | // so much unwritten logic. at what point do I consider myself a follower? if only after receiving a 39 | // jaa, then I can promise myself to multiple leaders. 40 | // so we consider ourselves a follower even if a create group attempt is ungoing. 41 | follower(Server, Leader, Id)@next :- j_log(Server, Leader, Id, _, _); 42 | follower(Server, Leader, Id)@next :- follower(Server, Leader, Id), notin d(Server, Leader, Id); 43 | 44 | // keep sending acks. suppress the last spurious ack (due to deferred update to j_log 45 | ja(Leader, Server, Id, Yid, "A")@async :- j_log(Server, Leader, Id, Group, Yid), notin jaa(Server, Leader, Id, Yid); 46 | 47 | 48 | 49 | // 50 | jaa(Server, Leader, Id, Yid)@async :- ja(Leader, Server, Id, Yid, _); 51 | 52 | ja_log(L,S,I,Y,O) :- ja(L, S, I, Y, O); 53 | ja_log(L,S,I,Y,O)@next :- ja_log(L, S, I, Y, O); 54 | 55 | missing_ja(Leader, Server, Id) :- running_group_req(Leader, _, Id, Group), 56 | lencoding(Leader, Group, Server), notin ja_log(Leader, Server, Id, _, "A"), Leader != Server; 57 | can_accept_writes(Leader, Id, Group) :- running_group_req(Leader, _, Id, Group), notin missing_ja(Leader, _, Id); 58 | //jaa(S,L,I,D)@next :- jaa(S,L,I,D); 59 | 60 | //good(C) :- create_group(C, _, _)@1; 61 | 62 | //good(Group) :- can_accept_writes(_, _, Group); 63 | problem(Leader, Server) :- can_accept_writes(Leader, Id, Group), 64 | lencoding(Leader, Group, Server), notin follower(Server, Leader, Id), Server != Leader; 65 | 66 | good("yay") :- can_accept_writes(Leader, Id, Group), notin problem(Leader, _); 67 | wata(Leader) :- can_accept_writes(Leader, Id, Group), problem(Leader, _); 68 | //good("yay") :- create_group(_, _, _)@1; 69 | 70 | create_group("C", 1, "a|b|c")@1; 71 | create_group("C2", 2, "b|c|d")@1; 72 | 73 | 74 | yield_id(C, I)@next :- yield_id(C, I), notin new_yid(C); 75 | yield_id(C, I + 1)@next :- yield_id(C, I), new_yid(C); 76 | 77 | 78 | yield_id("a", 0)@1; 79 | yield_id("b", 0)@1; 80 | yield_id("c", 0)@1; 81 | yield_id("d", 0)@1; -------------------------------------------------------------------------------- /src/test/scala/edu/berkeley/cs/boom/molly/symmetry/SymmetryCheckerSuite.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.symmetry 2 | 3 | import org.scalatest.FunSuite 4 | 5 | import edu.berkeley.cs.boom.molly.FailureSpec 6 | import edu.berkeley.cs.boom.molly.ast._ 7 | import edu.berkeley.cs.boom.molly.derivations.{MessageLoss, CrashFailure} 8 | 9 | class SymmetryCheckerSuite extends FunSuite { 10 | test("simple symmetry with an empty EDB and no rules") { 11 | val program = Program( 12 | rules = Nil, 13 | facts = Nil, 14 | includes = Nil, 15 | tables = Set.empty 16 | ) 17 | val nodes = List("A", "B", "C") 18 | val symmetryChecker = new SymmetryChecker(program, nodes) 19 | val a = FailureSpec(4, 3, 2, nodes, crashes = Set(CrashFailure("A", 2))) 20 | val b = FailureSpec(4, 3, 2, nodes, crashes = Set(CrashFailure("B", 2))) 21 | val c = FailureSpec(4, 3, 2, nodes, crashes = Set(CrashFailure("C", 2))) 22 | assert(symmetryChecker.areEquivalentForEDB(a, b)) 23 | assert(symmetryChecker.areEquivalentForEDB(b, c)) 24 | assert(symmetryChecker.areEquivalentForEDB(c, a)) 25 | 26 | val d = FailureSpec(4, 3, 2, nodes, omissions = Set(MessageLoss("A", "B", 2))) 27 | val e = FailureSpec(4, 3, 2, nodes, omissions = Set(MessageLoss("B", "A", 2))) 28 | val f = FailureSpec(4, 3, 2, nodes, omissions = Set(MessageLoss("B", "A", 1))) 29 | assert(symmetryChecker.areEquivalentForEDB(d, e)) 30 | assert(!symmetryChecker.areEquivalentForEDB(a, e)) 31 | assert(!symmetryChecker.areEquivalentForEDB(e, f)) 32 | } 33 | 34 | test("nodes that appear as literals in rules are excluded from potential symmetries") { 35 | // This is a conservative approach that may miss out on some potential symmetries if the 36 | // rules themselves exhibit certain types of symmetry 37 | val program = Program( 38 | // Dummy rule so that "A" is marked as a location literal that appears in a rule body 39 | rules = List( 40 | Rule( 41 | Predicate("table", List(Identifier("Location")), notin = false, None), 42 | List(Left(Predicate("table", List(StringLiteral("A")), notin = false, None))) 43 | ) 44 | ), 45 | facts = Nil, 46 | includes = Nil 47 | ) 48 | val nodes = List("A", "B", "C") 49 | val symmetryChecker = new SymmetryChecker(program, nodes) 50 | val a = FailureSpec(4, 3, 2, nodes, crashes = Set(CrashFailure("A", 2))) 51 | val b = FailureSpec(4, 3, 2, nodes, crashes = Set(CrashFailure("B", 2))) 52 | val c = FailureSpec(4, 3, 2, nodes, crashes = Set(CrashFailure("C", 2))) 53 | assert(!symmetryChecker.areEquivalentForEDB(a, b)) 54 | assert(symmetryChecker.areEquivalentForEDB(b, c)) 55 | assert(!symmetryChecker.areEquivalentForEDB(c, a)) 56 | } 57 | 58 | test("EDB symmetry is required for symmetry") { 59 | val program = Program( 60 | rules = Nil, 61 | facts = List( 62 | // A and B are symmetric with respect to this EDB, while A and C are not 63 | Predicate("foo", List(StringLiteral("A"), IntLiteral(1)), notin = false, None), 64 | Predicate("foo", List(StringLiteral("B"), IntLiteral(1)), notin = false, None), 65 | Predicate("foo", List(StringLiteral("C"), IntLiteral(2)), notin = false, None) 66 | ), 67 | includes = Nil 68 | ) 69 | val nodes = List("A", "B", "C") 70 | val symmetryChecker = new SymmetryChecker(program, nodes) 71 | val a = FailureSpec(4, 3, 2, nodes, crashes = Set(CrashFailure("A", 2))) 72 | val b = FailureSpec(4, 3, 2, nodes, crashes = Set(CrashFailure("B", 2))) 73 | val c = FailureSpec(4, 3, 2, nodes, crashes = Set(CrashFailure("C", 2))) 74 | assert(symmetryChecker.areEquivalentForEDB(a, b)) 75 | assert(!symmetryChecker.areEquivalentForEDB(b, c)) 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/paperexperiments/TableOfCorrectPrograms.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.paperexperiments 2 | 3 | import java.io.File 4 | import com.github.tototoshi.csv.CSVWriter 5 | import edu.berkeley.cs.boom.molly.{RunStatus, FailureSpec, SyncFTChecker, Config} 6 | import com.codahale.metrics.MetricRegistry 7 | 8 | /** 9 | * Generates the data for the table of correct programs in the paper. 10 | */ 11 | object TableOfCorrectPrograms { 12 | val programs = Seq( 13 | // ("Input programs", "eot", "eff", "crashes", "nodes") 14 | (Seq("delivery/rdlog.ded", "delivery/deliv_assert.ded"), 25, 23, 0, Seq("a", "b", "c")), // AKA retry-deliv 15 | (Seq("delivery/replog.ded", "delivery/deliv_assert.ded"), 8, 6, 1, Seq("a", "b", "c")), // AKA redun-deliv 16 | (Seq("delivery/ack_rb.ded", "delivery/deliv_assert.ded"), 8, 6, 1, Seq("a", "b", "c")), // AKA ack-deliv 17 | (Seq("paxos_synod.ded"), 8, 3, 1, Seq("a", "b", "c")) 18 | ) 19 | 20 | /** 21 | * Run the analyzer until it exhaustively covers the failure space. 22 | * 23 | * @param config the analyzer and program configuration 24 | * @return (runtime (seconds), num runs) 25 | */ 26 | def runUntilExhaustion(config: Config): (Double, Int) = { 27 | System.gc() // Run a full GC so we don't count time spent cleaning up earlier runs. 28 | val metrics: MetricRegistry = new MetricRegistry() 29 | var runsCount = 0 30 | val startTime = System.currentTimeMillis() 31 | var runs = SyncFTChecker.check(config, metrics) // An ephemeral stream 32 | while (!runs.isEmpty) { 33 | runsCount += 1 34 | assert (runs.head().status == RunStatus("success")) 35 | runs = runs.tail() 36 | } 37 | val duration = (System.currentTimeMillis() - startTime) / 1000.0 38 | (duration, runsCount) 39 | } 40 | 41 | /** 42 | * Warm up the JVM so we get more accurate timing for the first experiment. 43 | */ 44 | private def warmup() { 45 | val (inputPrograms, eot, eff, crashes, nodes) = programs.head 46 | val inputFiles = inputPrograms.map(name => new File("../examples_ft/" + name)) 47 | val config = Config(eot, eff, crashes, nodes, inputFiles, strategy = "random", 48 | useSymmetry = false, disableDotRendering = true) 49 | val metrics: MetricRegistry = new MetricRegistry() 50 | val runs = SyncFTChecker.check(config, metrics) // An ephemeral stream 51 | runs.take(100).toArray // Force evaluation 52 | } 53 | 54 | def main(args: Array[String]) { 55 | warmup() 56 | val csvFile = new File("table_of_correct_programs.csv") 57 | val csvWriter = CSVWriter.open(csvFile) 58 | val header = Seq("program", "eot", "eff", "crashes", "bound", "backward_exe", "backward_time", 59 | "symm_exe", "symm_time", "causal_exe", "causal_time") 60 | csvWriter.writeRow(header) 61 | try { 62 | for ((inputPrograms, eot, eff, crashes, nodes) <- programs) { 63 | val inputFiles = inputPrograms.map(name => new File("../examples_ft/" + name)) 64 | val backwardConfig = Config(eot, eff, crashes, nodes, inputFiles, strategy = "sat", 65 | useSymmetry = false, disableDotRendering = true) 66 | val symmetryConfig = backwardConfig.copy(useSymmetry = true) 67 | val causalConfig = backwardConfig.copy(strategy = "pcausal") 68 | val grossEstimate = FailureSpec(eot, eff, crashes, nodes.toList).grossEstimate 69 | val (backwardTime, backwardExe) = runUntilExhaustion(backwardConfig) 70 | val (symmetryTime, symmetryExe) = runUntilExhaustion(symmetryConfig) 71 | val (causalTime, causalExe) = runUntilExhaustion(causalConfig) 72 | csvWriter.writeRow(Seq(inputPrograms, eot, eff, crashes, grossEstimate, 73 | backwardExe, backwardTime, symmetryExe, symmetryTime, causalExe, causalTime)) 74 | } 75 | } finally { 76 | csvWriter.close() 77 | } 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/DedalusParser.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly 2 | 3 | import java.io.File 4 | 5 | import scala.io.Source 6 | 7 | import org.kiama.util.PositionedParserUtilities 8 | 9 | import edu.berkeley.cs.boom.molly.ast._ 10 | 11 | /** 12 | * Parser for the Dedalus language, which is syntactically similar to Datalog / Prolog. 13 | * Refer to the `molly.ast` package for the definitions of the types returned by this parser. 14 | * 15 | * For more information on Dedalus, see the "Dedalus: Datalog in Space and Time" tech report: 16 | * http://www.eecs.berkeley.edu/Pubs/TechRpts/2009/EECS-2009-173.html 17 | * 18 | * This parser is implemented using Kiama parser combinators; for more details, see 19 | * https://code.google.com/p/kiama/wiki/Parsing 20 | */ 21 | object DedalusParser extends PositionedParserUtilities { 22 | 23 | def parseProgram(str: CharSequence): Program = { 24 | parseAll(program, str).get 25 | } 26 | 27 | def parseProgramAndIncludes(includeSearchPath: File)(str: CharSequence): Program = { 28 | processIncludes(parseProgram(str), includeSearchPath) 29 | } 30 | 31 | // Define a bunch of constants / building blocks that will be used by later rules 32 | lazy val ident = "[a-zA-Z0-9._?@]+".r 33 | lazy val semi = ";" 34 | lazy val number = "[0-9]+".r ^^ { s => s.toInt} 35 | lazy val string = "\"[^\"]*\"".r ^^ { s => s.stripPrefix("\"").stripSuffix("\"")} 36 | lazy val followsfrom = ":-" 37 | lazy val timesuffix: Parser[Time] = 38 | "@next" ^^ { _ => Next() } | 39 | "@async" ^^ { _ => Async() } | 40 | '@' ~> number ^^ Tick 41 | lazy val op = "==" | "!=" | "+" | "-" | "/" | "*" | "<" | ">" | "<=" | ">=" 42 | 43 | // Define the language of expressions that can appear in rule bodies 44 | lazy val constant: Parser[Constant] = 45 | string ^^ StringLiteral | 46 | number ^^ IntLiteral | 47 | ident ^^ Identifier 48 | lazy val expr: Parser[Expr] = constant ~ op ~ exprOrConstant ^^ { case c ~ o ~ e => Expr(c, o, e)} 49 | lazy val exprOrConstant: Parser[Expression] = expr | constant 50 | lazy val aggregate = ident ~ "<" ~ ident ~ ">" ^^ { 51 | case aggName ~ "<" ~ aggCol ~ ">" => Aggregate(aggName, aggCol) 52 | } 53 | 54 | // A program is a collection of rules and facts 55 | lazy val program: Parser[Program] = rep(clause) ^^ { clauses => 56 | Program(clauses.collect { case r: Rule => r }, 57 | clauses.collect { case p: Predicate => p }, 58 | clauses.collect { case i: Include => i }) 59 | } 60 | lazy val clause: Parser[Clause] = include | rule | fact 61 | lazy val include = "include" ~> string <~ semi ^^ Include 62 | lazy val fact = head <~ semi 63 | lazy val rule = head ~ followsfrom ~ body <~ semi ^^ { 64 | case head ~ followsfrom ~ body => Rule(head, body) 65 | } 66 | 67 | lazy val head = predicate 68 | lazy val body = repsep(bodyTerm, ",") 69 | lazy val bodyTerm: Parser[Either[Predicate, Expr]] = 70 | predicate ^^ { Left(_) } | expr ^^ { Right(_) } 71 | 72 | lazy val predicate = opt("notin") ~ ident ~ "(" ~ repsep(atom, ",") ~ ")" ~ opt(timesuffix) ^^ { 73 | case notin ~ tableName ~ "(" ~ cols ~ ")" ~ time => 74 | Predicate(tableName, cols, notin.isDefined, time) 75 | } 76 | lazy val atom = aggregate | exprOrConstant | constant 77 | 78 | // See https://stackoverflow.com/questions/5952720 79 | override val whiteSpace = """(\s|//.*|(?m)/\*(\*(?!/)|[^*])*\*/)+""".r 80 | 81 | private def processIncludes(program: Program, includeSearchPath: File): Program = { 82 | val includes = program.includes.map { include => 83 | val includeFile = new File(includeSearchPath, include.file) 84 | val newProg = DedalusParser.parseProgram(Source.fromFile(includeFile).getLines().mkString("\n")) 85 | processIncludes(newProg, includeSearchPath) 86 | } 87 | Program( 88 | program.rules ++ includes.flatMap(_.rules), 89 | program.facts ++ includes.flatMap(_.facts), 90 | program.includes, 91 | program.tables 92 | ) 93 | } 94 | } -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/symmetry/SymmetryChecker.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.symmetry 2 | 3 | import edu.berkeley.cs.boom.molly.ast.{Predicate, Program, StringLiteral} 4 | import edu.berkeley.cs.boom.molly.{DedalusType, DedalusTyper, FailureSpec} 5 | 6 | import com.typesafe.scalalogging.LazyLogging 7 | 8 | /** 9 | * Decides whether two failure scenarios are equivalent. 10 | * 11 | * This solves the following decision problem: 12 | * 13 | * Given a Datalog program P and an EDB, is it the case that for all EDB' that are isomorphic 14 | * to the original EDB w.r.t. some function f, the models P(EDB) and P(EDB') are isomorphic 15 | * w.r.t. that same f? 16 | * 17 | * In our case, we only consider isomorphisms that change the values of location-typed attributes 18 | * in the EDB. 19 | */ 20 | class SymmetryChecker(program: Program, nodes: List[String]) extends LazyLogging { 21 | 22 | private type EDB = Set[Predicate] 23 | private type TableTypes = Map[String, List[DedalusType]] 24 | 25 | private val typesForTable: TableTypes = { 26 | val fs = FailureSpec(1, 0, 0, nodes) // TODO: shouldn't have to add dummy clocks to typecheck 27 | val tables = DedalusTyper.inferTypes(fs.addClockFacts(program)).tables 28 | tables.map { t => (t.name, t.types)}.toMap 29 | } 30 | 31 | private val locationLiteralsThatAppearInRules: Set[String] = { 32 | val predicates = program.rules.flatMap(_.bodyPredicates).filter(_.tableName != "clock") 33 | predicates.collect { case pred@Predicate(table, cols, _, _) => 34 | val colTypes = typesForTable(table) 35 | pred.cols.zip(colTypes).collect { 36 | case (StringLiteral(l), DedalusType.LOCATION) => { 37 | logger.debug(s"Location literal '$l' appears in rule defining '$table'") 38 | l 39 | } 40 | } 41 | }.flatten.toSet 42 | } 43 | 44 | private val possiblySymmetricBasedOnRules: List[String] = { 45 | (nodes.toSet -- locationLiteralsThatAppearInRules).toList 46 | } 47 | 48 | if (possiblySymmetricBasedOnRules.isEmpty) { 49 | logger.warn("No candidates for symmetry due to location literals in rules") 50 | } else { 51 | logger.debug(s"Candidates for symmetry are {${possiblySymmetricBasedOnRules.mkString(", ")}}") 52 | } 53 | 54 | // It's necessary, but not sufficient, that the symmetries are unifiers of the EDBs minus the 55 | // clock and crash facts. So, we pre-compute a set of symmetries for the fixed portion of the 56 | // EDB, and then we only need to check symmetry of the clock and crash relations once we're 57 | // comparing two failure specs. 58 | private val possiblySymmetricForStableEDB: Seq[Map[String, String]] = { 59 | // The `drop` here is so that we skip the identity mapping: 60 | val remappings = possiblySymmetricBasedOnRules.permutations.drop(1).map { p => 61 | possiblySymmetricBasedOnRules.zip(p).toMap } 62 | val edb = program.facts.toSet 63 | remappings.filter { m => mapLocations(edb, typesForTable, m) == edb }.toSeq 64 | } 65 | 66 | if (possiblySymmetricForStableEDB.isEmpty) { 67 | logger.debug(s"None of the candidates provide stable EDB symmetry") 68 | } else { 69 | logger.debug(s"Candidates that provide stable EDB symmetry are {${possiblySymmetricForStableEDB.mkString(", ")}}") 70 | } 71 | 72 | /** 73 | * @return true if the two failure specifications are equivalent (according to the definition 74 | * listed above) 75 | */ 76 | def areEquivalentForEDB(a: FailureSpec, b: FailureSpec): Boolean = { 77 | if (possiblySymmetricForStableEDB.isEmpty) return false 78 | require (a.nodes == nodes && b.nodes == nodes) 79 | if (a == b) return true 80 | val aEDB: EDB = a.generateClockFacts.toSet 81 | val bEDB: EDB = b.generateClockFacts.toSet 82 | possiblySymmetricForStableEDB.exists { m => mapLocations(aEDB, typesForTable, m) == bEDB } 83 | } 84 | 85 | /** 86 | * Apply a function to location-valued EDB columns 87 | */ 88 | private def mapLocations( 89 | edb: EDB, 90 | typesForTable: TableTypes, 91 | f: PartialFunction[String, String]): EDB = { 92 | edb.map { case fact @ Predicate(table, cols, _, _) => 93 | val colTypes = typesForTable(table) 94 | val newCols = cols.zip(colTypes).map { 95 | case (StringLiteral(loc), DedalusType.LOCATION) if f.isDefinedAt(loc) => StringLiteral(f(loc)) 96 | case (c, _) => c 97 | } 98 | fact.copy(cols = newCols) 99 | } 100 | } 101 | } -------------------------------------------------------------------------------- /src/test/resources/examples_ft/ramp/ramp.ded: -------------------------------------------------------------------------------- 1 | // RAMPFast 2 | // we'll ignore the complexity of key-node mappings by making keys and node identical; 3 | // each node hosts a single key 4 | 5 | include "encoding.ded"; 6 | include "sequence.ded"; 7 | include "ramp_assert.ded"; 8 | include "ramp_edb.ded"; 9 | 10 | 11 | // client logic 12 | 13 | do_prepare(Cli, Keys, Val, Server, Ts) :- write(Cli, Keys, Val), lencoding(Cli, Keys, Server), seq(Cli, "Main", Ts); 14 | prepare(Server, Keys, Val, Timestamp, Cli)@async :- do_prepare(Cli, Keys, Val, Server, Timestamp); 15 | seq_bump(Cli, "Main") :- do_prepare(Cli, _, _, _, _); 16 | 17 | begin(Cli, Val, Keys, Server, Ts) :- do_prepare(Cli, Keys, Val, Server, Ts); 18 | begin(Cli, Val, Keys, Server, Ts)@next :- begin(Cli, Val, Keys, Server, Ts), notin commit_ack(Cli, Server, Ts); 19 | //outstanding(Cli, Val, Server, Ts)@next :- do_prepare(Cli, _, Val, Server, Ts); 20 | outstanding(Cli, Val, Server, Ts) :- begin(Cli, Val, _, Server, Ts), notin prepare_ok(Cli, Server, Ts); 21 | 22 | ready(Cli, Ts)@next :- begin(Cli, Val, _, Server, Ts), notin outstanding(Cli, _, _, Ts); 23 | commit(Server, Cli, Timestamp)@async :- ready(Cli, Ts), begin(Cli, _, _, Server, Timestamp); 24 | 25 | begin_read(Cli, Keys) :- read(Cli, Keys); 26 | begin_read(C, K)@next :- begin_read(C, K); 27 | 28 | need(Cli, Server, "none", 0) :- read(Cli, Keys), lencoding(Cli, Keys, Server); 29 | get(Server, Cli, C2, Ts)@async :- need(Cli, Server, C2, Ts); 30 | has_need(C, S, C2, Ts) :- need(C,S,C2,Ts); 31 | has_need(C, S, C2, Ts)@next :- has_need(C,S,C2,Ts), notin responses(C, S, _, _, _,_); 32 | 33 | 34 | responses(Cli, Server, Keys, Val, Cli2, Ts) :- get_resp(Cli, Server, Keys, Val, Cli2, Ts); 35 | responses(C, S, K, V, C2, Ts)@next :- responses(C, S, K, V, C2, Ts); 36 | 37 | associated_keys(Cli, Server, S2) :- responses(Cli, Server, Keys, _, _, _), lencoding(Cli, Keys, S2); 38 | 39 | 40 | // interpret: the value you read at server Server was superceded by ts 41 | superceded(Cli, S2, Cli2, Ts) :- responses(Cli, Server, _, _, Cli2, Ts), responses(Cli, S2, _, _, C3, Ts2), 42 | associated_keys(Cli, S2, Server), 43 | notin has_need(Cli, _, _, _), 44 | Ts > Ts2; 45 | 46 | superceded(Cli, S2, Cli2, Ts) :- responses(Cli, Server, _, _, Cli2, Ts), responses(Cli, S2, _, _, C3, Ts), 47 | notin has_need(Cli, _, _, _), 48 | associated_keys(Cli, S2, Server), 49 | cli_num(Cli, Cli2, Id2), cli_num(Cli, C3, Id3), 50 | Id2 > Id3; 51 | 52 | 53 | 54 | //get(Server, Cli, C2, Ts)@async :- begin_read(Cli, Keys), lencoding(Cli, Keys, Server), superceded(Cli, Server, C2, Ts); 55 | need(Cli, Server, C2, Ts)@next :- begin_read(Cli, Keys), lencoding(Cli, Keys, Server), superceded(Cli, Server, C2, Ts); 56 | //need(Cli, Server, C2, Ts)@async :- begin_read(Cli, Keys), lencoding(Cli, Keys, Server), superceded(Cli, Server, C2, Ts); 57 | 58 | 59 | //unsat(Cli, 60 | /// blocking read for all keys 61 | read_response(Cli, Server, Keys, Val, C2, Ts) :- responses(Cli, Server, Keys, Val, C2, Ts), notin has_need(Cli, _, _, _); 62 | 63 | //begin_read(Cli, Keys), lencoding(Cli , Keys, Server), get_resp(Cli, Server, Val, Cli2, Ts) 64 | 65 | // server logic 66 | 67 | prepare_ok(Cli, Server, Timestamp)@async :- prepare(Server, _, _, Timestamp, Cli); 68 | 69 | versions(Server, Cli, Keys, Timestamp, Val) :- prepare(Server, Keys, Val, Timestamp, Cli); 70 | versions(S, C, K, T, V)@next :- versions(S, C, K, T, V); 71 | 72 | 73 | //do_commit(Server, Cli, Timestamp) :- commit(Server, Cli, Timestamp), notin commit(Server, Cli2, Ts2), Cli != Cli2; 74 | commit_ack(Cli, Server, Ts)@async :- commit(Server, Cli, Ts); 75 | commit_log(Server, Cli, Timestamp) :- commit(Server, Cli, Timestamp); 76 | commit_log(Server, Cli, Timestamp)@next :- commit_log(Server, Cli, Timestamp); 77 | 78 | highest_timestamp(Server, max) :- commit_log(Server, _, Timestamp); 79 | winner(Server, max, Ts) :- highest_timestamp(Server, Ts), commit_log(Server, Cli, Ts), cli_num(Server, Cli, Id); 80 | last_commit(Server, Cli, Keys, Ts, Val) :- winner(Server, Id, Ts), cli_num(Server, Cli, Id), versions(Server, Cli, Keys, Ts, Val); 81 | 82 | get_resp(Cli, Server, Keys, Val, Cli2, Ts)@async :- get(Server, Cli, "none", 0), last_commit(Server, Cli2, Keys, Ts, Val); 83 | get_resp(Cli, Server, Keys, Val, C2, Ts)@async :- get(Server, Cli, C2, Ts), //commit_log(Server, C2, Ts), 84 | versions(Server, C2, Keys, Ts, Val); 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/derivations/Solver.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.derivations 2 | 3 | import edu.berkeley.cs.boom.molly.FailureSpec 4 | import com.codahale.metrics.MetricRegistry 5 | import edu.berkeley.cs.boom.molly.util.SetUtils 6 | import nl.grons.metrics.scala.{MetricName, MetricBuilder} 7 | import scala.language.implicitConversions 8 | import com.typesafe.scalalogging.LazyLogging 9 | 10 | /** 11 | * SolverVariables are used to map message losses and failures into SAT / SMT formula variables. 12 | */ 13 | sealed trait SolverVariable 14 | case class CrashFailure(node: String, time: Int) extends SolverVariable 15 | // Note that NeverCrashed variables are placeholders used in SAT constraints; they can safely 16 | // be dropped from resulting solutions prior to consumption by downstream code. 17 | case class NeverCrashed(node: String) extends SolverVariable 18 | case class MessageLoss(from: String, to: String, time: Int) extends SolverVariable { 19 | require (from != to, "Can't lose messages sent to self") 20 | } 21 | case class Not(v: SolverVariable) extends SolverVariable 22 | 23 | /** 24 | * Interface for pluggable SAT / SMT solver backends. 25 | */ 26 | trait Solver extends LazyLogging { 27 | 28 | /** 29 | * Given the derivation of a good outcome, computes a set of potential falsifiers of that outcome. 30 | * 31 | * @param failureSpec a description of failures. 32 | * @param goals a list of goals whose derivations we'll attempt to falsify 33 | * @param messages a list of messages sent during the program's execution 34 | * @param seed a set of message failures and crashes that we already know have occurred, 35 | * e.g. from previous runs. 36 | * @return all solutions to the SAT problem, formulated as failure specifications 37 | */ 38 | def solve( 39 | failureSpec: FailureSpec, 40 | goals: List[GoalNode], 41 | messages: Seq[Message], 42 | seed: Set[SolverVariable] = Set.empty) 43 | //(implicit metricRegistry: MetricRegistry): Set[FailureSpec] = { 44 | (implicit metricRegistry: MetricRegistry): Seq[FailureSpec] = { 45 | 46 | 47 | implicit val metrics = new MetricBuilder(MetricName(getClass), metricRegistry) 48 | 49 | val firstMessageSendTimes = messages.groupBy(_.from).mapValues(_.minBy(_.sendTime).sendTime) 50 | val models = goals.flatMap{ goal => solve(failureSpec, goal, firstMessageSendTimes, seed)}.toSet 51 | logger.info(s"Problem has ${models.size} solutions") 52 | logger.debug(s"Solutions are:\n${models.map(_.toString()).mkString("\n")}") 53 | val minimalModels: Seq[Set[SolverVariable]] = SetUtils.minimalSets(models.toSeq) 54 | logger.info(s"SAT problem has ${minimalModels.size} minimal solutions") 55 | logger.debug(s"Minimal SAT solutions are:\n${minimalModels.map(_.toString()).mkString("\n")}") 56 | //minimalModels.flatMap(vars => Solver.solutionToFailureSpec(failureSpec, vars)).toSet 57 | minimalModels.flatMap(vars => Solver.solutionToFailureSpec(failureSpec, vars)) 58 | } 59 | 60 | /** 61 | * Solver method implemented by subclasses. 62 | */ 63 | protected def solve( 64 | failureSpec: FailureSpec, 65 | goal: GoalNode, 66 | firstMessageSendTimes: Map[String, Int], 67 | seed: Set[SolverVariable]) 68 | (implicit metrics: MetricBuilder): Traversable[Set[SolverVariable]] 69 | } 70 | 71 | object Solver { 72 | 73 | /** 74 | * Convert a solver solution into a failure spec. 75 | * 76 | * @param originalFailureSpec the original failure specification that defines admissible failures 77 | * @param solution the variables in the SAT solution 78 | * @return a FailureSpec corresponding to this solution 79 | */ 80 | def solutionToFailureSpec( 81 | originalFailureSpec: FailureSpec, 82 | solution: Set[SolverVariable]): Option[FailureSpec] = { 83 | val crashes = solution.collect { case cf: CrashFailure => cf } 84 | // If the seed contained a message loss, then it's possible that the SAT solver found 85 | // a solution where that message's sender crashes before that message loss. 86 | // Such message losses are redundant, so we'll remove them: 87 | def subsumedByCrash(ml: MessageLoss) = 88 | crashes.collectFirst { 89 | case cf @ CrashFailure(ml.from, t) if t <= ml.time => cf 90 | case cf @ CrashFailure(ml.to, t) if t + 1 >= ml.time => cf 91 | }.isDefined 92 | val omissions = solution.collect { case ml: MessageLoss => ml }.filterNot(subsumedByCrash) 93 | if (crashes.isEmpty && omissions.isEmpty) { 94 | None 95 | } else { 96 | Some(originalFailureSpec.copy (crashes = crashes, omissions = omissions)) 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | Vagrant.configure("2") do |config| 9 | # The most common configuration options are documented and commented below. 10 | # For a complete reference, please see the online documentation at 11 | # https://docs.vagrantup.com. 12 | 13 | # Every Vagrant development environment requires a box. You can search for 14 | # boxes at https://atlas.hashicorp.com/search. 15 | #config.vm.box = "base" 16 | #config.vm.box = "hashicorp/precise64" 17 | config.vm.box = "ubuntu/xenial64" 18 | 19 | # Disable automatic box update checking. If you disable this, then 20 | # boxes will only be checked for updates when the user runs 21 | # `vagrant box outdated`. This is not recommended. 22 | # config.vm.box_check_update = false 23 | 24 | # Create a forwarded port mapping which allows access to a specific port 25 | # within the machine from a port on the host machine. In the example below, 26 | # accessing "localhost:8080" will access port 80 on the guest machine. 27 | # config.vm.network "forwarded_port", guest: 80, host: 8080 28 | 29 | # Create a private network, which allows host-only access to the machine 30 | # using a specific IP. 31 | # config.vm.network "private_network", ip: "192.168.33.10" 32 | 33 | # Create a public network, which generally matched to bridged network. 34 | # Bridged networks make the machine appear as another physical device on 35 | # your network. 36 | # config.vm.network "public_network" 37 | 38 | # Share an additional folder to the guest VM. The first argument is 39 | # the path on the host to the actual folder. The second argument is 40 | # the path on the guest to mount the folder. And the optional third 41 | # argument is a set of non-required options. 42 | # config.vm.synced_folder "../data", "/vagrant_data" 43 | 44 | # Provider-specific configuration so you can fine-tune various 45 | # backing providers for Vagrant. These expose provider-specific options. 46 | # Example for VirtualBox: 47 | # 48 | config.vm.provider "virtualbox" do |vb| 49 | # # Display the VirtualBox GUI when booting the machine 50 | # vb.gui = true 51 | # 52 | # # Customize the amount of memory on the VM: 53 | vb.memory = "2048" 54 | end 55 | # 56 | # View the documentation for the provider you are using for more 57 | # information on available options. 58 | 59 | # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies 60 | # such as FTP and Heroku are also available. See the documentation at 61 | # https://docs.vagrantup.com/v2/push/atlas.html for more information. 62 | # config.push.define "atlas" do |push| 63 | # push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME" 64 | # end 65 | 66 | # Enable provisioning with a shell script. Additional provisioners such as 67 | # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the 68 | # documentation for more information about their specific syntax and use. 69 | config.vm.provision "shell", inline: <<-SHELL 70 | #apt-get install software-properties-common python-software-properties 71 | #add-apt-repository ppa:jonathonf/gcc-7.1 72 | apt-get update 73 | apt-get install -y git 74 | apt-get install -y build-essential 75 | apt-get install -y cmake 76 | sudo apt-get install -y libapr1 libapr1-dev libaprutil1-dev sqlite libsqlite3-dev 77 | sudo apt-get install -y flex bison python 78 | wget http://apt.typesafe.com/repo-deb-build-0002.deb 79 | sudo dpkg -i repo-deb-build-0002.deb 80 | sudo apt-get update 81 | sudo apt-get install -y sbt openjdk-8-jdk 82 | SHELL 83 | #config.vm.provision :file, source: "../box-of-pain", destination: "box-of-pain" 84 | #config.vm.provision :shell, path: "box.sh", privileged: false 85 | 86 | config.vm.provision "shell", privileged: false, inline: <<-SHELL 87 | echo "deb https://dl.bintray.com/sbt/debian /" | sudo tee -a /etc/apt/sources.list.d/sbt.list 88 | sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2EE0EA64E40A89B84B2DF73499E82A75642AC823 89 | sudo apt-get update 90 | sudo apt-get install sbt 91 | git clone https://github.com/palvaro/molly.git 92 | cd molly/lib 93 | git clone https://github.com/Z3Prover/z3.git 94 | cd .. 95 | export PYTHON=python3 96 | make 97 | echo "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:~/molly/lib/z3/build/z3-dist/lib/:~/molly/lib/c4/build/src/lib/libc4.so" > env.sh 98 | SHELL 99 | end 100 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/ast/AST.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.ast 2 | 3 | import org.kiama.util.TreeNode 4 | 5 | import edu.berkeley.cs.boom.molly.DedalusType 6 | 7 | /** 8 | * An atom is an element that can appear in a predicate, such as a variable or aggregate. 9 | */ 10 | sealed trait Atom extends TreeNode 11 | sealed trait Expression extends Atom 12 | sealed trait Constant extends Expression 13 | 14 | case class Expr(left: Constant, op: String, right: Expression) extends Expression { 15 | /** 16 | * Returns the complete set of variable identifiers that appear anywhere in this expression. 17 | */ 18 | def variables: Set[Identifier] = { 19 | val rightVariables: Set[Identifier] = right match { 20 | case i: Identifier => Set(i) 21 | case e: Expr => e.variables 22 | case _ => Set.empty 23 | } 24 | val leftVariables: Set[Identifier] = left match { 25 | case i: Identifier => Set(i) 26 | case _ => Set.empty 27 | } 28 | leftVariables ++ rightVariables 29 | } 30 | } 31 | 32 | case class StringLiteral(str: String) extends Constant 33 | case class IntLiteral(int: Int) extends Constant 34 | case class Identifier(name: String) extends Constant 35 | 36 | case class Aggregate(aggName: String, aggColumn: String) extends Atom 37 | 38 | case class Program( 39 | rules: List[Rule], 40 | facts: List[Predicate], 41 | includes: List[Include], 42 | tables: Set[Table] = Set() 43 | ) extends TreeNode 44 | 45 | case class Table(name: String, types: List[DedalusType]) { 46 | types.headOption.foreach { t => 47 | assert(t == DedalusType.LOCATION, 48 | s"First column of a table must have type LOCATION, but found $t") 49 | } 50 | } 51 | 52 | sealed trait Clause extends TreeNode 53 | case class Include(file: String) extends Clause 54 | case class Rule(head: Predicate, body: List[Either[Predicate, Expr]]) extends Clause { 55 | def bodyPredicates: List[Predicate] = body.collect { case Left(pred) => pred } 56 | def bodyQuals: List[Expr] = body.collect { case Right(expr) => expr } 57 | def variablesWithIndexes: List[(String, (String, Int))] = { 58 | (List(head) ++ bodyPredicates).flatMap(_.topLevelVariablesWithIndices) 59 | } 60 | def variables: Set[String] = { 61 | variablesWithIndexes.map(_._1).toSet 62 | } 63 | /** Variables that are bound in the body (i.e. appear more than once) */ 64 | def boundVariables: Set[String] = { 65 | val allVars = 66 | bodyPredicates.flatMap(_.topLevelVariables.toSeq) ++ bodyQuals.flatMap(_.variables).map(_.name).toSeq 67 | allVars.groupBy(identity).mapValues(_.size).filter(_._2 >= 2).keys.toSet 68 | } 69 | // Match the Ruby solver's convention that a predicate's location column always appears 70 | // as the first column of its first body predicate 71 | val locationSpecifier = bodyPredicates(0).cols(0) 72 | 73 | def isAsync: Boolean = head.time == Some(Async()) 74 | } 75 | 76 | /** 77 | * Represents a Dedalus predicate. 78 | * 79 | * @param tableName the name of this predicate (e.g. the table that it helps to define) 80 | * @param cols the columns of the predicate 81 | * @param notin true if this predicate is negated, false otherwise 82 | * @param time an optional temporal annotation 83 | */ 84 | case class Predicate( 85 | tableName: String, 86 | cols: List[Atom], 87 | notin: Boolean, 88 | time: Option[Time] 89 | ) extends Clause { 90 | 91 | /** 92 | * The set of variable names that appear at the top-level of this predicate (e.g. not in 93 | * aggregates or expressions). 94 | */ 95 | def topLevelVariables: Set[String] = { 96 | topLevelVariablesWithIndices.map(_._1).toSet 97 | } 98 | 99 | /** 100 | * For each variable that appears at the top-level of this predicate (e.g. not in 101 | * aggregates or expressions), returns a `(variableName, (tableName, colNumber))` tuples, 102 | * where `tableName` is the name of this predicate. 103 | */ 104 | def topLevelVariablesWithIndices: List[(String, (String, Int))] = { 105 | cols.zipWithIndex.collect { 106 | case (Identifier(i), index) if i != "_" => (i, (tableName, index)) 107 | } 108 | } 109 | 110 | /** 111 | * The set of variable names that appear in aggregates in this predicate. 112 | */ 113 | def aggregateVariables: Set[String] = { 114 | cols.collect { case Aggregate(_, aggCol) => aggCol}.toSet 115 | } 116 | 117 | /** 118 | * The set of variable names that appear in expressions in this predicate. 119 | */ 120 | def expressionVariables: Set[String] = { 121 | cols.collect { case e: Expr => e }.flatMap(_.variables).map(_.name).toSet 122 | } 123 | } 124 | 125 | sealed trait Time extends TreeNode 126 | case class Next() extends Time 127 | case class Async() extends Time 128 | case class Tick(number: Int) extends Time -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/paperexperiments/TableOfCounterexamples.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.paperexperiments 2 | 3 | import java.io.File 4 | import com.github.tototoshi.csv.CSVWriter 5 | import edu.berkeley.cs.boom.molly.{FailureSpec, RunStatus, SyncFTChecker, Config} 6 | import com.codahale.metrics.MetricRegistry 7 | 8 | /** 9 | * Generates the data for the table of counterexamples in the paper. 10 | */ 11 | object TableOfCounterexamples { 12 | 13 | val NUM_RANDOM_RUNS = 25 14 | 15 | val programs = Seq( 16 | // ("Input programs", "eot", "eff", "crashes", "nodes") 17 | (Seq("delivery/simplog.ded", "delivery/deliv_assert.ded"), 4, 2, 0, Seq("a", "b", "c")), // AKA simple-deliv 18 | (Seq("delivery/rdlog.ded", "delivery/deliv_assert.ded"), 4, 2, 1, Seq("a", "b", "c")), // AKA retry-deliv 19 | (Seq("delivery/classic_rb.ded", "delivery/deliv_assert.ded"), 5, 3, 0, Seq("a", "b", "c")), // AKA classic-deliv 20 | (Seq("commit/2pc.ded", "commit/2pc_assert.ded"), 5, 0, 1, Seq("a", "b", "C", "d")), 21 | (Seq("commit/2pc_ctp.ded", "commit/2pc_assert.ded"), 8, 0, 1, Seq("a", "b", "C", "d")), 22 | (Seq("commit/3pc.ded", "commit/2pc_assert.ded"), 9, 7, 1, Seq("a", "b", "C", "d")), 23 | (Seq("kafka.ded"), 6, 4, 1, Seq("a", "b", "c", "C", "Z")) 24 | ) 25 | 26 | /** 27 | * Run the analyzer until it finds the first counterexample. 28 | * 29 | * @param config the analyzer and program configuration 30 | * @return (runtime (seconds), num runs) 31 | */ 32 | def runUntilFirstCounterexample(config: Config): (Double, Int) = { 33 | System.gc() // Run a full GC so we don't count time spent cleaning up earlier runs. 34 | val metrics: MetricRegistry = new MetricRegistry() 35 | var runsCount = 0 36 | val startTime = System.currentTimeMillis() 37 | var runs = SyncFTChecker.check(config, metrics) // An ephemeral stream 38 | while (!runs.isEmpty) { 39 | runsCount += 1 40 | val result = runs.head.apply() 41 | if (result.status == RunStatus("failure")) { 42 | val duration = (System.currentTimeMillis() - startTime) / 1000.0 43 | return (duration, runsCount) 44 | } else { 45 | runs = runs.tail() 46 | } 47 | } 48 | throw new IllegalStateException("Should have found a counterexample for config " + config) 49 | } 50 | 51 | /** 52 | * Warm up the JVM so we get more accurate timing for the first experiment. 53 | */ 54 | private def warmup() { 55 | val (inputPrograms, eot, eff, crashes, nodes) = programs.head 56 | val inputFiles = inputPrograms.map(name => new File("src/test/resources/examples_ft/" + name)) 57 | val config = Config(eot, eff, crashes, nodes, inputFiles, strategy = "random", 58 | useSymmetry = false, disableDotRendering = true) 59 | val metrics: MetricRegistry = new MetricRegistry() 60 | val runs = SyncFTChecker.check(config, metrics) // An ephemeral stream 61 | runs.take(100).toArray // Force evaluation 62 | } 63 | 64 | def main(args: Array[String]) { 65 | warmup() 66 | val csvFile = new File("table_of_counterexamples.csv") 67 | val csvWriter = CSVWriter.open(csvFile) 68 | val header = Seq("program", "eot", "eff", "crashes", "bound", "mean_random_exe", "mean_random_time", 69 | "all_random_data", "backward_exe", "backward_time", "symm_exe", "symm_time", "causal_exe", "causal_time") 70 | csvWriter.writeRow(header) 71 | try { 72 | for ((inputPrograms, eot, eff, crashes, nodes) <- programs) { 73 | val inputFiles = inputPrograms.map(name => new File("src/test/resources/examples_ft/" + name)) 74 | val randomConfig = Config(eot, eff, crashes, nodes, inputFiles, strategy = "random", 75 | useSymmetry = false, disableDotRendering = true) 76 | val backwardConfig = randomConfig.copy(strategy = "sat") 77 | val symmetryConfig = backwardConfig.copy(useSymmetry = true) 78 | val causalConfig = backwardConfig.copy(strategy = "pcausal") 79 | val grossEstimate = FailureSpec(eot, eff, crashes, nodes.toList).grossEstimate 80 | val randomResults = (1 to NUM_RANDOM_RUNS).map { _ => runUntilFirstCounterexample(randomConfig)} 81 | val meanRandomTime = randomResults.map(_._1).sum / (1.0 * NUM_RANDOM_RUNS) 82 | val meanRandomExe = randomResults.map(_._2).sum / (1.0 * NUM_RANDOM_RUNS) 83 | val (backwardTime, backwardExe) = runUntilFirstCounterexample(backwardConfig) 84 | val (symmetryTime, symmetryExe) = runUntilFirstCounterexample(symmetryConfig) 85 | //val (causalTime, causalExe) = runUntilFirstCounterexample(causalConfig) 86 | csvWriter.writeRow(Seq(inputPrograms, eot, eff, crashes, grossEstimate, meanRandomExe, 87 | meanRandomTime, randomResults, backwardExe, backwardTime, symmetryExe, symmetryTime, 0, 0.0)) 88 | } 89 | } finally { 90 | csvWriter.close() 91 | } 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /src/test/resources/examples_ft/raft/raft.ded: -------------------------------------------------------------------------------- 1 | include "raft_edb.ded"; 2 | include "raft_assert.ded"; 3 | include "election.ded"; 4 | //include "../util/queue.ded"; 5 | 6 | // need to do client retries, obvs. 7 | client_request(Server, Cli, Cid, Req)@async :- new_request(Cli, Server, Cid, Req); 8 | client_request(Leader, Cli, Cid, Req)@async :- redirect(Cli, Server, Leader, Cid, Req); 9 | 10 | // ephemeral redirects; assume clients retry 11 | do_redirect(Server, Cli, Leader, Cid, Req) :- cli_req_buf(Server, Cli, Cid, Req), role(Server, "F"), leader(Server, _, Leader); 12 | redirect(C,S,L,I,R)@async :- do_redirect(S,C,L,I,R); 13 | finish(S, I) :- do_redirect(S, _, _, I, _); 14 | finish(S, I) :- commit(S, _, Idx), running(S, _, _, I); 15 | 16 | cli_req_buf(S,C,I,R) :- client_request(S,C,I,R); 17 | cli_req_buf(S,C,I,R)@next :- cli_req_buf(S,C,I,R), notin finish(S, I); 18 | buf_bot(S, min) :- cli_req_buf(S, _, I, _); 19 | 20 | dispatch(S, T, R, I)@next :- cli_req_buf(S, C, I, R), current_term(S, T), role(S, "L"), buf_bot(S, I), notin running(S, _, _, _), notin log(S, _, _, _, R); 21 | running(S, T, R, I)@next :- dispatch(S, T, R, I); 22 | running(S, T, R, I)@next :- running(S, T, R, I), notin finish(S, I); 23 | 24 | 25 | real_append_entries(N,T,L,I,P,R,C) :- append_entries(N,T,L,I,P,R,C), R != "NOOP"; 26 | 27 | problem(Node, Term, Idx) :- real_append_entries(Node, Term, Leader, Idx, _, _, _), 28 | current_term(Node, MyTerm), MyTerm > Term; 29 | 30 | problem(Node, Term, Idx) :- real_append_entries(Node, Term, Leader, Idx, PrevTerm, _, _), 31 | notin log(Node, Idx, PrevTerm, _, _); 32 | 33 | append_reply(Leader, Node, Term, Idx, "false")@async :- real_append_entries(Node, Term, Leader, Idx, PrevTerm, _, _), 34 | problem(Node, Term, Idx); 35 | 36 | append_reply(Leader, Node, Term, Idx, "true")@async :- real_append_entries(Node, Term, Leader, Idx, PrevTerm, _, _), 37 | notin problem(Node, Term, Idx); 38 | 39 | 40 | 41 | 42 | ack_log(L,N,T,I) :- append_reply(L,N,T,I,"true"); 43 | ack_log(L,N,T,I)@next :- ack_log(L,N,T,I); 44 | 45 | ack_cnt(Leader, Term, Idx, count) :- ack_log(Leader, Node, Term, Idx), member(Leader, Node, Nid); 46 | //member_cnt(Leader, count) :- member(Leader, Member, Id); 47 | 48 | safe(Leader, Term, Idx + 1) :- ack_cnt(Leader, Term, Idx, Cnt1), member_cnt(Leader, Cnt2), Cnt1 > Cnt2 / 2; 49 | 50 | // the leader heartbeat 51 | 52 | ae_info(Leader, Term, PrevLogIdx, PrevLogTerm, LeaderCommit) :- role(Leader, "L"), term(Leader, Term), 53 | log_indx(Leader, PrevLogIdx), log_term(Leader, PrevLogTerm), commit_indx(Leader, LeaderCommit); 54 | 55 | do_ae(Leader, Term, PrevLogIdx, Prevlogterm, LeaderCommit, "NOOP") :- 56 | ae_info(Leader, Term, PrevLogIdx, Prevlogterm, LeaderCommit), notin running(Leader, Term, _, _); 57 | 58 | do_ae(Leader, Term, PrevLogIdx, Prevlogterm, LeaderCommit, Req) :- 59 | ae_info(Leader, Term, PrevLogIdx, Prevlogterm, LeaderCommit), running(Leader, Term, Req, _); 60 | 61 | append_entries(Node, Term, Leader, PrevLogIdx, PrevLogTerm, Req, LeaderCommit)@async :- 62 | do_ae(Leader, Term, PrevLogIdx, PrevLogTerm, LeaderCommit, Req), member(Leader, Node, _); 63 | 64 | 65 | append_log(Node, Term, LeaderId, Prevlogindex, PrevLogterm, Entries, Leadercommit, Rcv)@next :- 66 | append_log(Node, Term, LeaderId, Prevlogindex, PrevLogterm, Entries, Leadercommit, Rcv); 67 | 68 | append_log(Node, Term, LeaderId, Prevlogindex, Prevlogterm, Entries, Leadercommit, Time) :- 69 | append_entries(Node, Term, LeaderId, Prevlogindex, Prevlogterm, Entries, Leadercommit),//, term_time(Node, Term, Time), 70 | lclock(Node, "Localtime", Term, Time); 71 | 72 | commit(Node, Term, Idx) :- safe(Node, Term, Idx), notin log(Node, Idx, Term, _, _); 73 | commit(Node, Term, Idx + 1) :- real_append_entries(Node, Term, Leader, Idx, _, _, _), notin problem(Node, Term, Idx), Node != Leader; 74 | 75 | 76 | 77 | log_indx(Node, max) :- log(Node, Idx, _, _, _); 78 | log_term(Node, max) :- log(Node, _, Term, _, _); 79 | log(Node, Idx, Term, Leader, Entry)@next :- log(Node, Idx, Term, Leader, Entry), notin log_del(Node, Idx, Term); 80 | log(Node, Idx, Term, Leader, Entry)@next :- commit(Node, Term, Idx), 81 | append_log(Node, Term, Leader, Prev, _, Entry, _, _), 82 | Idx == Prev + 1, 83 | Entry != "NOOP"; 84 | 85 | 86 | // a stub till I figure out commit indexes 87 | //commit_indx(N, I) :- log_term(N, I); 88 | commit_indx(Node, Idx)@next :- commit_indx(Node, Idx), notin real_append_entries(Node, _, _, _, _, _, _), notin commit(Node, Term, Idx); 89 | //last_new_entry(Node, min) :- append_entries(Node, _, _, 90 | keep(Node, Idx)@next :- commit_indx(Node, Idx), real_append_entries(Node, _, _, OldIdx, _, _, Lc), Idx > OldIdx; 91 | //commit_indx(Node, Idx+1)@next :- commit_indx(Node, Idx), notin keep(Node, Idx), 92 | // real_append_entries(Node, _, _, _, _, _, _), notin role(Node, "L"); 93 | 94 | //commit_indx(Node, Idx+1)@next :- role(Node, "L"), commit(Node, _, Idx); 95 | commit_indx(Node, max) :- commit(Node, _, Idx); -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/derivations/SAT4JSolver.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.derivations 2 | 3 | import org.sat4j.minisat.SolverFactory 4 | import edu.berkeley.cs.boom.molly.FailureSpec 5 | import scala.collection.mutable 6 | import org.sat4j.specs.IVecInt 7 | import org.sat4j.core.VecInt 8 | import org.sat4j.tools.ModelIterator 9 | import scala.collection.mutable.ArrayBuffer 10 | import scala.language.implicitConversions 11 | import nl.grons.metrics.scala.MetricBuilder 12 | 13 | //import optimus.optimization._ 14 | 15 | import pprint.Config.Defaults._ 16 | 17 | import sext._ 18 | 19 | object SAT4JSolver extends Solver { 20 | 21 | protected def solve(failureSpec: FailureSpec, goal: GoalNode, 22 | firstMessageSendTimes: Map[String, Int], seed: Set[SolverVariable]) 23 | (implicit metrics: MetricBuilder): 24 | Traversable[Set[SolverVariable]] = { 25 | val solver = SolverFactory.newLight() 26 | val idToSatVariable = mutable.HashMap[Int, SolverVariable]() 27 | val satVariableToId = mutable.HashMap[SolverVariable, Int]() 28 | 29 | implicit def satVarToInt(satVar: SolverVariable): Int = { 30 | val id = satVariableToId.getOrElseUpdate(satVar, { 31 | satVar match { 32 | case Not(v) => -1 * satVarToInt(v) 33 | case _ => solver.nextFreeVarId(true) 34 | } 35 | }) 36 | idToSatVariable(id) = satVar 37 | id 38 | } 39 | implicit def satVarsToVecInt(clause: Iterable[SolverVariable]): IVecInt = 40 | new VecInt(clause.map(satVarToInt).toArray) 41 | 42 | var timer = System.currentTimeMillis(); 43 | val bf = BooleanFormula(goal.booleanFormula).simplifyAll.flipPolarity 44 | logger.debug(s"initial formula \n${bf.treeString}") 45 | logger.debug(s"${System.currentTimeMillis()-timer} millis -- simplification") 46 | timer = System.currentTimeMillis(); 47 | val formula = bf.convertToCNFAll 48 | logger.debug(s"${System.currentTimeMillis()-timer} millis -- CNF") 49 | 50 | val importantNodes: Set[String] = 51 | formula.root.vars.filter(_._3 < failureSpec.eot).map(_._1).toSet ++ 52 | seed.collect { case cf: CrashFailure => cf.node } 53 | if (importantNodes.isEmpty) { 54 | logger.debug(s"Goal ${goal.tuple} has no important nodes; skipping SAT solver") 55 | return Set.empty 56 | } else { 57 | logger.debug(s"Goal ${goal.tuple} has important nodes $importantNodes") 58 | } 59 | 60 | // Add constraints to ensure that each node crashes at a single time, or never crashes: 61 | for (node <- importantNodes) { 62 | // There's no point in considering crashes before the first time that a node sends a message, 63 | // since all such scenarios will be equivalent to crashing when sending the first message: 64 | val firstSendTime = firstMessageSendTimes.getOrElse(node, 1) 65 | // Create one variable for every time at which the node could crash 66 | val crashVars = (firstSendTime to failureSpec.eot - 1).map(t => CrashFailure(node, t)) 67 | //val crashVars = (firstSendTime to failureSpec.eff).map(t => CrashFailure(node, t)) 68 | // Include any crashes specified in the seed, since they might be excluded by the 69 | // "no crashes before the first message was sent" constraint: 70 | val seedCrashes = seed.collect { case c: CrashFailure => c } 71 | // An extra variable for scenarios where the node didn't crash: 72 | val neverCrashed = NeverCrashed(node) 73 | // Each node crashes at a single time, or never crashes: 74 | solver.addExactly((crashVars ++ seedCrashes).toSet ++ Seq(neverCrashed), 1) 75 | } 76 | // If there are at most C crashes, then at least (N - C) nodes never crash: 77 | solver.addAtLeast(failureSpec.nodes.map(NeverCrashed), failureSpec.nodes.size - failureSpec.maxCrashes) 78 | 79 | for (disjunct <- formula.conjuncts.conjunctz; 80 | if !disjunct.disjuncts.isEmpty 81 | ) { 82 | val messageLosses = disjunct.disjuncts.map(MessageLoss.tupled) 83 | val crashes = messageLosses.flatMap { loss => 84 | val firstSendTime = firstMessageSendTimes.getOrElse(loss.from, 1) 85 | val crashTimes = firstSendTime to loss.time 86 | crashTimes.map ( t => CrashFailure(loss.from, t)) 87 | } 88 | //logger.warn(s"loss possibility: $messageLosses") 89 | //logger.warn(s"crash possibility: $crashes") 90 | 91 | solver.addClause(messageLosses ++ crashes) 92 | } 93 | 94 | // Assume any message losses that have already occurred & disallow failures at or after the EFF 95 | val nonCrashes = formula.root.vars.filter(_._3 >= failureSpec.eff).map(MessageLoss.tupled).map(Not) 96 | val assumptions = seed ++ nonCrashes 97 | 98 | val models = ArrayBuffer[Set[SolverVariable]]() 99 | metrics.timer("sat4j-time").time { 100 | val modelIterator = new ModelIterator(solver) 101 | while (modelIterator.isSatisfiable(assumptions)) { 102 | val newModel = modelIterator.model().filter(_ > 0).map(idToSatVariable).toSet 103 | // Exclude models where no failures or crashes occurred: 104 | if (!newModel.filter(!_.isInstanceOf[NeverCrashed]).isEmpty) { 105 | models += newModel 106 | } 107 | } 108 | } 109 | solver.reset() // Required to allow the solver to be GC'ed. 110 | //logger.de("RETURNING with " + models.size) 111 | models 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /demo_html/run_0_provenance.dot: -------------------------------------------------------------------------------- 1 | digraph dataflow { 2 | {rank="same"; goal12, goal24} 3 | 4 | rule0 -> goal2 [color="black"]; 5 | goal7 -> rule2 [color="black"]; 6 | goal11 [label="pre(c, 5)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 7 | rule5 -> goal10 [color="black"]; 8 | rule7 -> goal16 [color="black"]; 9 | goal13 [label="member(a, b, 1)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 10 | rule8 -> goal17 [color="black"]; 11 | rule9 -> goal19 [color="black"]; 12 | goal9 -> rule3 [color="black"]; 13 | goal11 -> rule4 [color="black"]; 14 | rule10 -> goal21 [color="black"]; 15 | rule8 -> goal18 [color="black"]; 16 | goal9 [label="log(c, __WILDCARD__, 5)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 17 | rule9 -> goal20 [color="black"]; 18 | goal14 [label="clock(a, b, 1, 2)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 19 | goal21 -> rule9 [color="black"]; 20 | rule6 -> goal14 [color="black"]; 21 | goal5 -> rule1 [color="black"]; 22 | goal24 [label="post(b, 5)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 23 | rule8 [label="log_prov1",shape="rect",fontcolor="black",color="black",fillcolor="white"]; 24 | rule4 [label="pre_prov3",shape="rect",fontcolor="black",color="black",fillcolor="white"]; 25 | rule4 -> goal10 [color="black"]; 26 | rule1 [label="log_prov1",shape="rect",fontcolor="black",color="black",fillcolor="white"]; 27 | goal5 [label="log(c, Hello world!, 3)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 28 | rule6 -> goal13 [color="black"]; 29 | rule11 -> goal22 [color="black"]; 30 | rule5 -> goal11 [color="black"]; 31 | goal17 -> rule7 [color="black"]; 32 | goal16 [label="clock(b, __WILDCARD__, 2, __WILDCARD__)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 33 | rule6 [label="log_prov2",shape="rect",fontcolor="black",color="black",fillcolor="white"]; 34 | rule10 [label="pre_prov3",shape="rect",fontcolor="black",color="black",fillcolor="white"]; 35 | goal19 -> rule8 [color="black"]; 36 | rule1 -> goal3 [color="black"]; 37 | rule6 -> goal0 [color="black"]; 38 | goal19 [label="log(b, Hello world!, 4)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 39 | rule1 -> goal4 [color="black"]; 40 | rule2 -> goal5 [color="black"]; 41 | goal7 [label="log(c, Hello world!, 4)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 42 | rule3 [label="log_prov1",shape="rect",fontcolor="black",color="black",fillcolor="white"]; 43 | goal3 [label="log(c, Hello world!, 2)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 44 | rule0 -> goal0 [color="black"]; 45 | rule5 [label="post_prov4",shape="rect",fontcolor="black",color="black",fillcolor="white"]; 46 | goal12 -> rule5 [color="black"]; 47 | rule0 -> goal1 [color="black"]; 48 | rule0 [label="log_prov2",shape="rect",fontcolor="black",color="black",fillcolor="white"]; 49 | rule2 [label="log_prov1",shape="rect",fontcolor="black",color="black",fillcolor="white"]; 50 | goal1 [label="member(a, c, 1)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 51 | goal6 [label="clock(c, __WILDCARD__, 3, __WILDCARD__)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 52 | goal12 [label="post(c, 5)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 53 | goal17 [label="log(b, Hello world!, 3)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 54 | rule2 -> goal6 [color="black"]; 55 | goal20 [label="clock(b, __WILDCARD__, 4, __WILDCARD__)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 56 | goal0 [label="bcast(a, Hello world!, 1)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 57 | goal24 -> rule11 [color="black"]; 58 | goal3 -> rule0 [color="black"]; 59 | rule3 -> goal8 [color="black"]; 60 | goal10 [label="clock(c, c, 5, __WILDCARD__)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 61 | rule11 [label="post_prov4",shape="rect",fontcolor="black",color="black",fillcolor="white"]; 62 | goal18 [label="clock(b, __WILDCARD__, 3, __WILDCARD__)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 63 | goal23 [label="pre(b, 5)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 64 | rule7 [label="log_prov1",shape="rect",fontcolor="black",color="black",fillcolor="white"]; 65 | rule9 [label="log_prov1",shape="rect",fontcolor="black",color="black",fillcolor="white"]; 66 | rule4 -> goal9 [color="black"]; 67 | goal15 -> rule6 [color="black"]; 68 | goal2 [label="clock(a, c, 1, 2)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 69 | rule3 -> goal7 [color="black"]; 70 | rule11 -> goal23 [color="black"]; 71 | rule7 -> goal15 [color="black"]; 72 | rule10 -> goal22 [color="black"]; 73 | goal21 [label="log(b, __WILDCARD__, 5)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 74 | goal4 [label="clock(c, __WILDCARD__, 2, __WILDCARD__)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 75 | goal23 -> rule10 [color="black"]; 76 | goal22 [label="clock(b, b, 5, __WILDCARD__)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 77 | goal15 [label="log(b, Hello world!, 2)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 78 | goal8 [label="clock(c, __WILDCARD__, 4, __WILDCARD__)",style="filled",fontcolor="black",color="black",fillcolor="white"]; 79 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Molly: Lineage-driven Fault Injection 2 | 3 | ## Installation 4 | 5 | Molly is written in Scala and compiled using SBT. Molly depends on the [C4 Overlog runtime](https://github.com/bloom-lang/c4), [Z3 theorem prover](https://z3.codeplex.com/), and the [Optimus LP solver](https://github.com/vagm/Optimus). Molly will install local copies of C4 and Z3 automatically. Note that C4 requires CMake version 2.8 or later. 6 | 7 | The top-level `Makefile` should be handle a one-click build on OS X. 8 | 9 | ### Linux 10 | 11 | Linux users may need to install the [Optimus LP solver](https://github.com/vagm/Optimus) separately. Note Molly requires either the lp\_solve or the Gurobi (or both) installation options. Be sure to publish Optimus locally ("$ sbt publishLocal") before attempting to run Molly. 12 | 13 | ## Running 14 | 15 | Add the native library dependencies to your loader path. On OS X: 16 | 17 | ``` 18 | export LD_LIBRARY_PATH=lib/c4/build/src/libc4/:lib/z3/build/z3-dist/lib/ 19 | ``` 20 | 21 | Run 22 | 23 | ``` 24 | sbt "run-main edu.berkeley.cs.boom.molly.SyncFTChecker" 25 | ``` 26 | 27 | and see the usage message for more details. 28 | 29 | ### Example 30 | 31 | In this directory, run 32 | 33 | ``` 34 | sbt "run-main edu.berkeley.cs.boom.molly.SyncFTChecker \ 35 | src/test/resources/examples_ft/delivery/simplog.ded \ 36 | src/test/resources/examples_ft/delivery/deliv_assert.ded \ 37 | --EOT 4 \ 38 | --EFF 2 \ 39 | --nodes a,b,c \ 40 | --crashes 0 \ 41 | --prov-diagrams" 42 | ``` 43 | 44 | Molly will find a counterexample. The `./output` directory will contain an HTML report which shows visualizations that explain the counterexample and the program lineage that was used to find it. To view this report: 45 | 46 | - Safari users: just open `index.html` 47 | - Chrome / Firefox users: due to these browsers' same-origin policies, you need to start up a local web server to host the resources loaded by this page. Just run `python -m SimpleHTTPServer` in the output directory, then browse to the local address that it prints. 48 | 49 | 50 | #### Programs 51 | 52 | Programs are submitted in the Dedalus language. Dedalus is a distributed variant of Datalog: program statements are if-then rules of the forms: 53 | 54 | conclusion(bindings1)[@annotation] :- premise1(bindings2), premise2(bindings2) [...], notin premisen(bindings3), [...]; 55 | 56 | The conclusions and premises are relations; any variables in the conclusion (bindings1) must be bound in the body. Premises may be positive or negative; if the latter, they are preceded by "notin" and all variables (bindings3) must be bound 57 | in positive premises. 58 | 59 | Conclusions can have temporal annotations of the following forms: 60 | 61 | * @next -- the conclusions hold at the *successor* time. 62 | * @async -- the conclusions hold at an undefined time. 63 | * (no annotation) -- the conclusions hold whenever the premises hold. 64 | 65 | The first attribute of every relation is a *location specifier* indicating the identity of a network endpoint. 66 | 67 | The first two rules in simplog.ded are *persistence rules*. They ensure that the contents of log and nodes persist over time: 68 | 69 | node(Node, Neighbor)@next :- node(Node, Neighbor); 70 | log(Node, Pload)@next :- log(Node, Pload); 71 | 72 | The next rule says that for every pair of records in bcast and node that agree in their first column, there should (at some 73 | unknown time) be a record in log that takes its first column from the second column of the node record, and its second column 74 | from the second column of the bcast record. Intuitively, this captures multicast communication: when some Node1 has a bcast record, for 75 | every Node2 about which it knows, it forwards the payload of that record to Node2. 76 | 77 | log(Node2, Pload)@async :- bcast(Node1, Pload), node(Node1, Node2); 78 | 79 | 80 | Finally, the last line says that any node that receives a broadcast should put it in its log: 81 | 82 | log(Node, Pload) :- bcast(Node, Pload); 83 | 84 | #### Specifications 85 | 86 | Molly needs a way to check whether injected failures actually violated program correctness properties. A natural way to express such properties is as an implication of the form "*If* some precondition holds, *then* some postcondition must hold." For example, the broadcast protocol described above can succinctly be expressed in the following way: 87 | 88 | * _Precondition_: *Any* correct process delivers a message *m* 89 | * _Postcondition_: *All* correct processes deliver *m* 90 | 91 | Any execution in which the precondition holds but the postcondition does not is a counterexample to the correctness property. Executions in which the precondition does not hold (we can always always find one by dropping all messages) are vacuously correct. 92 | 93 | 94 | You may specify correctness properties by providing rules that define two special relations: 95 | 96 | * pre() 97 | * post() 98 | 99 | For example: 100 | 101 | pre(X, Pl) :- log(X, Pl), notin bcast(X, Pl)@1, notin crash(X, X, _); 102 | 103 | For every node X that has a payload Pl in its log, there is a record (X, Pl) in pre, provided that X was not the original broadcaster and X did not crash. 104 | 105 | post(X, Pl) :- log(X, Pl), notin missing_log(_, Pl); 106 | missing_log(A, Pl) :- log(X, Pl), node(X, A), notin log(A, Pl); 107 | 108 | There is a record (X, Pl) in post if some node X has a payload Pl in its log, and there are *no* nodes that do not. 109 | 110 | 111 | ## More information 112 | 113 | Molly is described in a [SIGMOD paper](http://people.ucsc.edu/~palvaro/molly.pdf). 114 | 115 | Dedalus is described [here](http://www.eecs.berkeley.edu/Pubs/TechRpts/2009/EECS-2009-173.html). 116 | 117 | -------------------------------------------------------------------------------- /src/test/scala/edu/berkeley/cs/boom/molly/CounterexampleSuite.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly 2 | 3 | import org.scalactic.Explicitly 4 | import org.scalatest.prop.TableDrivenPropertyChecks 5 | import org.scalatest.tags.Slow 6 | import org.scalatest.{FlatSpec, Matchers, PropSpec} 7 | import java.io.File 8 | import com.codahale.metrics.MetricRegistry 9 | import Explicitly._ 10 | 11 | 12 | @Slow 13 | class CounterexampleSuite extends PropSpec with TableDrivenPropertyChecks with Matchers { 14 | //class CounterexampleSuite extends PropSpec with TableDrivenPropertyChecks { 15 | val examplesFTPath = SyncFTChecker.getClass.getClassLoader.getResource("examples_ft").getPath 16 | 17 | val scenarios = Table( 18 | ("Input programs", "eot", "eff", "nodes", "crashes", "should find counterexample"), 19 | (Seq("delivery/simplog.ded", "delivery/deliv_assert.ded"), 6, 3, Seq("a", "b", "c"), 0, true), 20 | (Seq("delivery/rdlog.ded", "delivery/deliv_assert.ded"), 6, 3, Seq("a", "b", "c"), 0, false), 21 | (Seq("delivery/rdlog.ded", "delivery/deliv_assert.ded"), 6, 3, Seq("a", "b", "c"), 1, true), 22 | // classic reliable broadcast fails in the omission model 23 | (Seq("delivery/classic_rb.ded", "delivery/deliv_assert.ded"), 6, 3, Seq("a", "b", "c"), 0, true), 24 | // but is robust in the fail-stop model. 25 | 26 | (Seq("delivery/classic_rb.ded", "delivery/deliv_assert.ded"), 6, 0, Seq("a", "b", "c"), 2, false), 27 | (Seq("delivery/replog.ded", "delivery/deliv_assert.ded"), 6, 3, Seq("a", "b", "c"), 0, false), 28 | (Seq("delivery/replog.ded", "delivery/deliv_assert.ded"), 6, 3, Seq("a", "b", "c"), 1, false), 29 | (Seq("delivery/ack_rb.ded", "delivery/deliv_assert.ded"), 6, 3, Seq("a", "b", "c"), 1, false), 30 | (Seq("commit/2pc.ded", "commit/2pc_assert.ded"), 7, 3, Seq("a", "b", "C", "d"), 0, false), 31 | (Seq("commit/2pc.ded", "commit/2pc_assert.ded"), 6, 3, Seq("a", "b", "C", "d"), 1, true), 32 | // naive commit/2pc has executions that don't decide even if the model is fail-stop. 33 | (Seq("commit/2pc.ded", "commit/2pc_assert.ded"), 6, 0, Seq("a", "b", "C", "d"), 1, true), 34 | (Seq("commit/2pc.ded", "commit/2pc_assert.ded"), 6, 0, Seq("a", "b", "C", "d"), 2, true), 35 | 36 | // indeed, even if we ignore executions where the coordinator fails: 37 | (Seq("commit/2pc.ded", "commit/2pc_assert_optimist.ded"), 6, 0, Seq("a", "b", "C", "d"), 1, true), 38 | (Seq("commit/2pc.ded", "commit/2pc_assert_optimist.ded"), 6, 0, Seq("a", "b", "C", "d"), 2, true), 39 | // with timeout+abort at the coordinator, we get termination when the coordinator doesn't fail 40 | (Seq("commit/2pc_timeout.ded", "commit/2pc_assert_optimist.ded"), 6, 0, Seq("a", "b", "C", "d"), 1, false), 41 | (Seq("commit/2pc_timeout.ded", "commit/2pc_assert_optimist.ded"), 6, 0, Seq("a", "b", "C", "d"), 2, false), 42 | // but honestly... 43 | (Seq("commit/2pc_timeout.ded", "commit/2pc_assert.ded"), 6, 0, Seq("a", "b", "C", "d"), 1, true), 44 | (Seq("commit/2pc_timeout.ded", "commit/2pc_assert.ded"), 6, 0, Seq("a", "b", "C", "d"), 2, true), 45 | 46 | // even the collaborative termination protocol has executions that don't decide. 47 | (Seq("commit/2pc_ctp.ded", "commit/2pc_assert.ded"), 6, 0, Seq("a", "b", "C", "d"), 1, true), 48 | (Seq("commit/2pc_ctp.ded", "commit/2pc_assert.ded"), 6, 0, Seq("a", "b", "C", "d"), 2, true), 49 | 50 | // commit/3pc (yay?) is "nonblocking" in the synchronous, fail-stop model 51 | (Seq("commit/3pc.ded", "commit/2pc_assert.ded"), 8, 0, Seq("a", "b", "C", "d"), 1, false), 52 | (Seq("commit/3pc.ded", "commit/2pc_assert.ded"), 8, 0, Seq("a", "b", "C", "d"), 2, false), 53 | 54 | // somewhat surprised though that we can't break it's synchronicity assumptions by dropping messages... 55 | (Seq("commit/3pc.ded", "commit/2pc_assert.ded"), 9, 7, Seq("a", "b", "C", "d"), 1, true), 56 | 57 | 58 | //(Seq("tokens.ded"), 6, 3, Seq("a", "b", "c", "d"), 1, true), 59 | //(Seq("tokens.ded"), 6, 3, Seq("a", "b", "c", "d"), 0, false), 60 | 61 | // simulating the kafka bug 62 | (Seq("kafka.ded"), 7, 4, Seq("a", "b", "c", "C", "Z"), 1, true), 63 | (Seq("kafka.ded"), 7, 4, Seq("a", "b", "c", "C", "Z"), 0, false) 64 | ) 65 | 66 | 67 | property("SAT guided search should correctly find counterexamples") { 68 | 69 | forAll(scenarios) { (inputPrograms: Seq[String], eot: Int, eff: Int, nodes: Seq[String], crashes: Int, shouldFindCounterexample: Boolean) => 70 | val inputFiles = inputPrograms.map(name => new File(examplesFTPath, name)) 71 | val config = Config(eot, eff, crashes, nodes, inputFiles) 72 | val metrics = new MetricRegistry 73 | val results = SyncFTChecker.check(config, metrics) 74 | val counterexamples = results.filter(_.status == RunStatus("failure")).map(_.failureSpec) 75 | 76 | if (shouldFindCounterexample) { 77 | counterexamples should not be empty 78 | } else { 79 | counterexamples should be (empty) 80 | } 81 | 82 | } 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/DedalusRewrites.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly 2 | 3 | import edu.berkeley.cs.boom.molly.ast._ 4 | 5 | object DedalusRewrites { 6 | 7 | val dc = Identifier("_") 8 | val nreserved = Identifier("NRESERVED") 9 | val mreserved = Identifier("MRESERVED") 10 | 11 | /** 12 | * Modify a program's rules and facts to reference a clock relation. 13 | */ 14 | def referenceClockRules(program: Program): Program = { 15 | def nextClock(loc: Atom) = 16 | Predicate("clock", List(loc, dc, nreserved, dc), notin = false, None) 17 | def localClock(loc: Atom) = 18 | Predicate("clock", List(loc, loc, nreserved, dc), notin = false, None) 19 | def asyncClock(from: Atom, to: Atom) = 20 | Predicate("clock", List(from, to, nreserved, mreserved), notin = false, None) 21 | 22 | def appendCol(col: Atom)(pred: Predicate): Predicate = 23 | pred.copy(cols = pred.cols :+ col) 24 | 25 | def rewriteHead(pred: Predicate, time: Time): Predicate = time match { 26 | case Next() => appendCol(Expr(nreserved, "+", IntLiteral(1)))(pred) 27 | case Async() => appendCol(mreserved)(pred) 28 | case Tick(t) => appendCol(IntLiteral(t))(pred) 29 | } 30 | 31 | def rewriteBodyElem(elem: Either[Predicate, Expr]): Either[Predicate, Expr] = 32 | elem.left.map { pred => 33 | pred.time match { 34 | case Some(Tick(t)) => appendCol(IntLiteral(t))(pred) 35 | case _ => appendCol(nreserved)(pred) 36 | } 37 | } 38 | 39 | def rewriteRule(rule: Rule): Rule = rule match { 40 | case Rule(head, body) => 41 | val loc = rule.locationSpecifier 42 | rule.head.time match { 43 | case None => 44 | // For local rules, we still need to reference the clock in order to guarantee that the 45 | // clock variable appears in a non-negated body predicate. We use localClock in order 46 | // to reduce the number of possible derivations. 47 | Rule(appendCol(nreserved)(head), body.map(rewriteBodyElem) ++ List(Left(localClock(loc)))) 48 | case Some(Next()) => 49 | Rule(rewriteHead(head, Next()), body.map(rewriteBodyElem) ++ List(Left(nextClock(loc)))) 50 | case Some(Async()) => 51 | val to = head.cols(0) 52 | Rule(rewriteHead(head, Async()), body.map(rewriteBodyElem) ++ List(Left(asyncClock(loc, to)))) 53 | case Some(Tick(number)) => 54 | throw new IllegalStateException("Rule head can't only hold at a specific time step") 55 | } 56 | } 57 | 58 | program.copy(rules = program.rules.map(rewriteRule), facts = program.facts.map(f => rewriteHead(f, f.time.get))) 59 | } 60 | 61 | /** 62 | * Splits a rule with an aggregation in its head into two separate rules, 63 | * one that binds variables and another that performs the aggregation. 64 | * 65 | * For example, the rule 66 | * agg(X, count) :- a(X, Z), b(Z, Y) 67 | * is rewritten as two rules: 68 | * agg_vars(X, Y, Z) :- a(X, Z), b(Z, Y) 69 | * agg(X, count) :- agg_vars(X, Y, _) 70 | */ 71 | def splitAggregateRules(program: Program): Program = { 72 | val (rules, aggRules) = program.rules.partition(_.head.aggregateVariables.isEmpty) 73 | program.copy(rules = rules ++ aggRules.flatMap(splitAggregateRule)) 74 | } 75 | 76 | private def splitAggregateRule(rule: Rule): Seq[Rule] = { 77 | assert (!rule.head.aggregateVariables.isEmpty, "Expected rule with aggregation") 78 | val ruleSansAgg = 79 | rule.copy(head = rule.head.copy(cols = rule.head.cols.filterNot(_.isInstanceOf[Aggregate]))) 80 | val varsRule = recordAllVariableBindings(ruleSansAgg, ruleSansAgg.head.tableName + "_vars") 81 | val aggRuleBody = varsRule.head.cols.collect { 82 | case i @ Identifier(x) => 83 | if (rule.head.topLevelVariables.contains(x) || rule.head.aggregateVariables.contains(x)) i 84 | else dc 85 | } 86 | val aggRule = rule.copy(body = List(Left(varsRule.head.copy(time = None, cols = aggRuleBody)))) 87 | Seq(aggRule, varsRule) 88 | } 89 | 90 | /** 91 | * Modify a rule head to record ALL variables, even ones that are only bound in aggregates. 92 | */ 93 | private def recordAllVariableBindings(rule: Rule, newTableName: String): Rule = { 94 | val newVariables = 95 | (rule.head.expressionVariables ++ rule.variables) -- rule.head.topLevelVariables 96 | // Produce a new head, preserving the last time column: 97 | val newHead = rule.head.copy(tableName = newTableName, 98 | cols = rule.head.cols.take(rule.head.cols.size - 1) ++ 99 | newVariables.map(Identifier).filter(_ != dc) ++ List(rule.head.cols.last)) 100 | rule.copy(head = newHead) 101 | } 102 | 103 | /** 104 | * Modify a rule head to record bound variables 105 | */ 106 | private def recordBoundVariables(rule: Rule, newTableName: String): Rule = { 107 | val newVariables = 108 | (rule.head.expressionVariables ++ rule.boundVariables) -- rule.head.topLevelVariables 109 | // Produce a new head, preserving the last time column: 110 | val newHead = rule.head.copy(tableName = newTableName, 111 | cols = rule.head.cols.take(rule.head.cols.size - 1) ++ 112 | newVariables.map(Identifier).filter(_ != dc) ++ List(rule.head.cols.last)) 113 | rule.copy(head = newHead) 114 | } 115 | 116 | /** 117 | * Add rules and rewrite rule bodies to record provenance. 118 | */ 119 | def addProvenanceRules(program: Program): Program = { 120 | val provenanceRules = program.rules.zipWithIndex.map { case (rule, number) => 121 | recordBoundVariables(rule, rule.head.tableName + "_prov" + number) 122 | } 123 | program.copy(rules = program.rules ++ provenanceRules) 124 | } 125 | 126 | } 127 | -------------------------------------------------------------------------------- /src/main/scala/edu/berkeley/cs/boom/molly/derivations/BooleanFormula.scala: -------------------------------------------------------------------------------- 1 | package edu.berkeley.cs.boom.molly.derivations 2 | 3 | 4 | trait BFNode[T] { 5 | def simplify: BFNode[T] 6 | def convertToCNF: BFNode[T] 7 | def vars: Set[T] 8 | def flipPolarity: BFNode[T] 9 | def clauses: Int 10 | 11 | } 12 | 13 | trait BinaryBFNode[T] extends BFNode[T] { 14 | def left: BFNode[T] 15 | def right: BFNode[T] 16 | def construct(l: BFNode[T], r: BFNode[T]): BFNode[T] 17 | def flip(l: BFNode[T], r: BFNode[T]): BFNode[T] 18 | 19 | override def clauses: Int = { 20 | 1 + left.clauses + right.clauses 21 | } 22 | 23 | override def vars = { 24 | left.vars ++ right.vars 25 | } 26 | 27 | override def flipPolarity: BFNode[T] = { 28 | flip(left.flipPolarity, right.flipPolarity) 29 | } 30 | 31 | override def simplify: BFNode[T] = { 32 | (left, right) match { 33 | case (BFLiteral(None), BFLiteral(None)) => BFLiteral(None) 34 | case (BFLiteral(None), c) => c.simplify 35 | case (c, BFLiteral(None)) => c.simplify 36 | case (c: BFLiteral[T], d:BFLiteral[T]) => { 37 | if (c == d) { 38 | c 39 | } else { 40 | construct(c, d) 41 | } 42 | } 43 | 44 | case _ => construct(left.simplify, right.simplify) 45 | } 46 | } 47 | } 48 | 49 | //case class BFAndNode[T](left:BFNode[T], right:BFNode[T]) extends BFNode[T] { 50 | case class BFAndNode[T](left:BFNode[T], right:BFNode[T]) extends BinaryBFNode[T] { 51 | 52 | override def construct(l: BFNode[T], r: BFNode[T]): BFNode[T] = { 53 | BFAndNode(l, r) 54 | } 55 | 56 | def flip(l: BFNode[T], r: BFNode[T]): BFNode[T] = { 57 | BFOrNode(l, r) 58 | } 59 | 60 | 61 | override def convertToCNF(): BFNode[T] = { 62 | // stay put, but convert your children to CNF. 63 | construct(left.convertToCNF, right.convertToCNF) 64 | } 65 | 66 | def findConjuncts: CNFFlat[T] = { 67 | val newL: Set[Disjuncts[T]] = left match { 68 | case a: BFAndNode[T] => a.findConjuncts.conjunctz 69 | case o: BFOrNode[T] => Set(o.findDisjuncts) 70 | case BFLiteral(Some(l)) => Set(Disjuncts(Set(l))) 71 | } 72 | 73 | val newR: Set[Disjuncts[T]] = right match { 74 | case a: BFAndNode[T] => a.findConjuncts.conjunctz 75 | case o: BFOrNode[T] => Set(o.findDisjuncts) 76 | case BFLiteral(Some(l)) => Set(Disjuncts(Set(l))) 77 | //case l: BFLiteral[T] => Set(l) 78 | } 79 | 80 | CNFFlat(newL ++ newR) 81 | } 82 | } 83 | case class BFOrNode[T](left:BFNode[T], right:BFNode[T]) extends BinaryBFNode[T] { 84 | 85 | override def construct(l: BFNode[T], r: BFNode[T]): BFNode[T] = { 86 | BFOrNode(l, r) 87 | } 88 | 89 | //override 90 | def flip(l: BFNode[T], r: BFNode[T]): BFNode[T] = { 91 | BFAndNode(l, r) 92 | } 93 | 94 | override def convertToCNF(): BFNode[T] = { 95 | val ret = (left, right) match { 96 | case (BFAndNode(l, r), _) => { 97 | val newRight = right.convertToCNF 98 | BFAndNode(BFOrNode(newRight, l.convertToCNF), BFOrNode(newRight, r.convertToCNF)) 99 | } 100 | case (_, BFAndNode(l2, r2)) => { 101 | val newLeft = left.convertToCNF 102 | BFAndNode(BFOrNode(newLeft, l2.convertToCNF), BFOrNode(newLeft, r2.convertToCNF)) 103 | } 104 | case (l, r) => BFOrNode(l.convertToCNF, r.convertToCNF) 105 | } 106 | ret 107 | } 108 | 109 | def findDisjuncts: Disjuncts[T] = { 110 | Disjuncts(vars) 111 | } 112 | } 113 | 114 | case class BFLiteral[T](v:Option[T]) extends BFNode[T] { 115 | override def simplify: BFNode[T] = { 116 | BFLiteral(v) 117 | } 118 | override def convertToCNF: BFNode[T] = { 119 | BFLiteral(v) 120 | } 121 | override def vars = { 122 | v match { 123 | case Some(c) => Set(c) 124 | case None => Set() 125 | } 126 | } 127 | 128 | override def flipPolarity = { 129 | BFLiteral(v) 130 | } 131 | 132 | override def clauses: Int = { 133 | 0 134 | } 135 | } 136 | 137 | 138 | 139 | /* Formula wrapper classes */ 140 | 141 | trait AbstractBooleanFormula[T] { 142 | def root: BFNode[T] 143 | def construct(node: BFNode[T]): AbstractBooleanFormula[T] 144 | 145 | def simplifyAll: AbstractBooleanFormula[T] = { 146 | // this can't be idiomatic 147 | var last: BFNode[T] = null 148 | var current = root 149 | while (last != current) { 150 | last = current 151 | current = current.simplify 152 | } 153 | construct(current) 154 | } 155 | 156 | def convertToCNFAll: CNFFormula[T] = { 157 | var last: BFNode[T] = null 158 | var current = root 159 | var iterations = 0 160 | while (last != current) { 161 | last = current 162 | println(s"starting iteration $iterations. ${current.clauses}") 163 | current = current.convertToCNF 164 | println(s"Finished iteration $iterations ${current.vars}") 165 | iterations = iterations + 1 166 | } 167 | CNFFormula(current) 168 | } 169 | 170 | def flipPolarity(): AbstractBooleanFormula[T] = { 171 | construct(root.flipPolarity) 172 | } 173 | 174 | def clauses(): Int = { 175 | root.clauses 176 | } 177 | 178 | def vars(): Set[T] = { 179 | root.vars 180 | } 181 | } 182 | 183 | case class BooleanFormula[T](root: BFNode[T]) extends AbstractBooleanFormula[T] { 184 | def construct(node: BFNode[T]): AbstractBooleanFormula[T] = { 185 | BooleanFormula(node) 186 | } 187 | } 188 | 189 | case class Disjuncts[T](disjuncts: Set[T]) 190 | case class CNFFlat[T](conjunctz: Set[Disjuncts[T]]) 191 | 192 | case class CNFFormula[T](root: BFNode[T]) extends AbstractBooleanFormula[T] { 193 | def construct(node: BFNode[T]) = { 194 | CNFFormula(node) 195 | } 196 | 197 | def conjuncts: CNFFlat[T] = { 198 | root match { 199 | case a: BFAndNode[T] => a.findConjuncts 200 | case o: BFOrNode[T] => CNFFlat(Set(o.findDisjuncts)) 201 | case BFLiteral(Some(l)) => CNFFlat(Set(Disjuncts(Set(l)))) 202 | case _ => println(s"WTF? root $root"); CNFFlat(Set(Disjuncts(Set()))) 203 | } 204 | } 205 | } -------------------------------------------------------------------------------- /demo_html/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | SyncFT Results 6 | 7 | 8 | 9 | 10 | 11 | 34 | 35 | 36 | 37 |

49 | 50 | 150 | 151 | 152 | --------------------------------------------------------------------------------