├── .github
└── workflows
│ └── main.yml
├── .gitignore
├── .gitmodules
├── CONTRIBUTING.md
├── LICENSE.TXT
├── README.md
├── build.sbt
├── docs
├── 0_quick_start.md
├── 1_1_knowledge_base.md
├── 1_2_evidence.md
├── 1_syntax.md
├── 2_1_inference_examples.md
├── 2_2_temporal_inference_examples.md
├── 2_inference.md
├── 3_1_weight_learning_examples.md
├── 3_2_temporal_weight_learning_examples.md
├── 3_weight_learning.md
├── 4_1_structure_learning_examples.md
├── 4_structure_learning.md
├── 5_supervision_completion.md
├── 6_tools.md
├── 7_1_build_and_link_lomrf.md
├── 7_2_download_example_data.md
├── 7_build_test_lomrf.md
├── 8_configuration.md
├── 9_references.md
└── index.md
├── mkdocs.yml
├── project
├── Dependencies.scala
├── Formatting.scala
├── LoMRFBuild.scala
├── build.properties
└── plugins.sbt
├── scripts
├── inc.env.bat
├── inc.env.sh
├── lomrf
├── lomrf.bat
├── mkDynamic
└── poor.mans.logger.sh
├── src
├── main
│ ├── resources
│ │ ├── logback-debug.xml
│ │ ├── logback.xml
│ │ └── reference.conf
│ └── scala
│ │ └── lomrf
│ │ ├── app
│ │ ├── CLIApp.scala
│ │ ├── InferenceCLI.scala
│ │ ├── KBCompilerCLI.scala
│ │ ├── KBDifferenceCLI.scala
│ │ ├── LoMRF.scala
│ │ ├── MRFWriterCLI.scala
│ │ ├── SemiSupervisionCLI.scala
│ │ ├── StructureLearningCLI.scala
│ │ ├── WeightLearningCLI.scala
│ │ └── package.scala
│ │ ├── logic
│ │ ├── AtomSignature.scala
│ │ ├── Clause.scala
│ │ ├── Formula.scala
│ │ ├── LogicOps.scala
│ │ ├── MLNExpression.scala
│ │ ├── MLNTypeDefinition.scala
│ │ ├── Substitutable.scala
│ │ ├── Term.scala
│ │ ├── TriState.scala
│ │ ├── Unify.scala
│ │ ├── compile
│ │ │ ├── LogicFormatter.scala
│ │ │ ├── NormalForm.scala
│ │ │ ├── PredicateCompletion.scala
│ │ │ └── package.scala
│ │ ├── dynamic
│ │ │ ├── DynamicAtom.scala
│ │ │ ├── DynamicAtomBuilder.scala
│ │ │ └── DynamicFunctionBuilder.scala
│ │ ├── package.scala
│ │ └── parser
│ │ │ ├── CommonsMLNParser.scala
│ │ │ ├── DomainParser.scala
│ │ │ ├── EvidenceParser.scala
│ │ │ └── KBParser.scala
│ │ ├── mln
│ │ ├── grounding
│ │ │ ├── AtomRegisterWorker.scala
│ │ │ ├── ClauseGrounder.scala
│ │ │ ├── ClauseGrounderImpl.scala
│ │ │ ├── ClauseLiteralsOrdering.scala
│ │ │ ├── CliqueRegisterWorker.scala
│ │ │ ├── GroundingMaster.scala
│ │ │ ├── GroundingWorker.scala
│ │ │ ├── MRFBuilder.scala
│ │ │ └── package.scala
│ │ ├── inference
│ │ │ ├── ILP.scala
│ │ │ ├── MCSAT.scala
│ │ │ ├── MaxWalkSAT.scala
│ │ │ ├── RoundingScheme.scala
│ │ │ └── Solver.scala
│ │ ├── learning
│ │ │ ├── TrainingEvidence.scala
│ │ │ ├── structure
│ │ │ │ ├── ClauseConstructor.scala
│ │ │ │ ├── CommonModeParser.scala
│ │ │ │ ├── Evaluator.scala
│ │ │ │ ├── ModeParser.scala
│ │ │ │ ├── OSL.scala
│ │ │ │ ├── OSLa.scala
│ │ │ │ ├── StructureLearner.scala
│ │ │ │ └── hypergraph
│ │ │ │ │ ├── HyperGraph.scala
│ │ │ │ │ ├── PathTemplate.scala
│ │ │ │ │ └── TemplateExtractor.scala
│ │ │ ├── supervision
│ │ │ │ ├── graph
│ │ │ │ │ ├── ExtNNGraph.scala
│ │ │ │ │ ├── GraphConnector.scala
│ │ │ │ │ ├── GraphOps.scala
│ │ │ │ │ ├── NNGraph.scala
│ │ │ │ │ ├── Node.scala
│ │ │ │ │ ├── SPLICE.scala
│ │ │ │ │ ├── StreamingGraph.scala
│ │ │ │ │ ├── SupervisionGraph.scala
│ │ │ │ │ ├── caching
│ │ │ │ │ │ ├── FastNodeCache.scala
│ │ │ │ │ │ ├── NodeCache.scala
│ │ │ │ │ │ ├── NodeHashSet.scala
│ │ │ │ │ │ ├── NodeHashStrategy.scala
│ │ │ │ │ │ └── SimpleNodeCache.scala
│ │ │ │ │ ├── package.scala
│ │ │ │ │ └── selection
│ │ │ │ │ │ ├── Clustering.scala
│ │ │ │ │ │ ├── LargeMarginNN.scala
│ │ │ │ │ │ └── NodeCluster.scala
│ │ │ │ └── metric
│ │ │ │ │ ├── AtomMetric.scala
│ │ │ │ │ ├── BinaryMetric.scala
│ │ │ │ │ ├── EvidenceMetric.scala
│ │ │ │ │ ├── Feature.scala
│ │ │ │ │ ├── IsolationForest.scala
│ │ │ │ │ ├── IsolationTree.scala
│ │ │ │ │ ├── MassMetric.scala
│ │ │ │ │ ├── Matcher.scala
│ │ │ │ │ ├── Metric.scala
│ │ │ │ │ └── package.scala
│ │ │ └── weight
│ │ │ │ ├── MaxMarginLearner.scala
│ │ │ │ └── OnlineLearner.scala
│ │ └── model
│ │ │ ├── AtomEvidenceDB.scala
│ │ │ ├── AtomIdentityFunction.scala
│ │ │ ├── ConstantsSet.scala
│ │ │ ├── Evidence.scala
│ │ │ ├── FunctionMapper.scala
│ │ │ ├── KB.scala
│ │ │ ├── MLN.scala
│ │ │ ├── MLNSchema.scala
│ │ │ ├── PredicateSpace.scala
│ │ │ ├── builders
│ │ │ ├── AtomEvidenceDBBuilder.scala
│ │ │ ├── ConstantsDomainBuilder.scala
│ │ │ ├── ConstantsSetBuilder.scala
│ │ │ ├── EvidenceBuilder.scala
│ │ │ ├── FunctionMapperBuilder.scala
│ │ │ └── KBBuilder.scala
│ │ │ ├── mrf
│ │ │ ├── Constraint.scala
│ │ │ ├── GroundAtom.scala
│ │ │ ├── MRF.scala
│ │ │ └── MRFState.scala
│ │ │ └── package.scala
│ │ ├── package.scala
│ │ └── util
│ │ ├── ArrayUtils.scala
│ │ ├── Cartesian.scala
│ │ ├── ImplFinder.scala
│ │ ├── LongDoubleConversions.scala
│ │ ├── NaturalComparator.scala
│ │ ├── collection
│ │ ├── GlobalIndexPartitioned.scala
│ │ ├── IndexPartitioned.scala
│ │ ├── KeyPartitioned.scala
│ │ ├── PartitionFetcher.scala
│ │ ├── Partitioner.scala
│ │ ├── mutable
│ │ │ ├── IndexPartitioned.scala
│ │ │ └── TuplesDB.scala
│ │ └── trove
│ │ │ ├── Implicits.scala
│ │ │ └── TroveConversions.scala
│ │ ├── evaluation
│ │ ├── Evaluate.scala
│ │ ├── Metrics.scala
│ │ └── package.scala
│ │ ├── logging
│ │ ├── Implicits.scala
│ │ └── LogbackColouredHighlighter.scala
│ │ ├── opt
│ │ ├── MasterOptionParser.scala
│ │ ├── OptionParser.scala
│ │ └── package.scala
│ │ └── package.scala
└── test
│ └── scala
│ └── lomrf
│ ├── logic
│ ├── AtomSignatureSpecTest.scala
│ ├── AtomSpecTest.scala
│ ├── ClauseSpecTest.scala
│ ├── SimilaritySpecTest.scala
│ ├── SubsumptionSpecTest.scala
│ ├── TermSpecTest.scala
│ ├── TriStateSpecTest.scala
│ ├── UnificationSpecTest.scala
│ ├── compile
│ │ ├── LogicFormatterSpecTest.scala
│ │ ├── NormalFormSpecTest.scala
│ │ └── PredicateCompletionSpecTest.scala
│ └── parser
│ │ ├── DomainParserSpecTest.scala
│ │ ├── EvidenceParserSpecTest.scala
│ │ └── KBParserSpecTest.scala
│ ├── mln
│ ├── grounding
│ │ ├── DependencyMapSpecTest.scala
│ │ └── GroundingSpecTest.scala
│ ├── inference
│ │ ├── ILPSpecTest.scala
│ │ ├── MCSATSpecTest.scala
│ │ └── MaxWalkSATSpecTest.scala
│ ├── learning
│ │ ├── structure
│ │ │ ├── ModeParserSpecTest.scala
│ │ │ └── hypergraph
│ │ │ │ ├── HPathSpecTest.scala
│ │ │ │ ├── HypergraphSpecTest.scala
│ │ │ │ └── PathTemplateSpecTest.scala
│ │ ├── supervision
│ │ │ ├── graph
│ │ │ │ ├── GraphConnectorSpecTest.scala
│ │ │ │ └── caching
│ │ │ │ │ └── NodeCacheSpecTest.scala
│ │ │ └── metric
│ │ │ │ ├── AtomMetricSpecTest.scala
│ │ │ │ └── EvidenceMetricSpecTest.scala
│ │ └── weight
│ │ │ └── MaxMarginSpecTest.scala
│ └── model
│ │ ├── AtomIdentityFunctionSpecTest.scala
│ │ └── builders
│ │ ├── ConstantsDomainBuilderSpecTest.scala
│ │ ├── ConstantsSetBuilderSpecTest.scala
│ │ ├── EvidenceBuilderSpecTest.scala
│ │ └── KBBuilderSpecTest.scala
│ ├── tests
│ ├── ECExampleDomain1.scala
│ └── TestData.scala
│ └── util
│ ├── CartesianSpecTest.scala
│ ├── LongDoubleSpecTest.scala
│ ├── collection
│ ├── GlobalIndexPartitionedSpecTest.scala
│ └── mutable
│ │ └── TuplesDBSpecTest.scala
│ └── evaluation
│ └── MetricsSpecTest.scala
└── version.sbt
/.github/workflows/main.yml:
--------------------------------------------------------------------------------
1 | name: build
2 |
3 | on: [push, pull_request]
4 |
5 | env:
6 | _JAVA_OPTIONS: "-Xms512m -Xmx4g"
7 | REPOSITORY_OWNER: ${{ github.repository }}
8 | LD_LIBRARY_PATH: "/usr/lib/lp_solve"
9 |
10 | jobs:
11 | build:
12 | runs-on: ubuntu-latest
13 | steps:
14 | - uses: actions/checkout@v2
15 | with:
16 | fetch-depth: 1
17 | - name: Fetch example data
18 | shell: bash
19 | run: git submodule update --init
20 | - name: Set up JDK 1.8
21 | uses: actions/setup-java@v1
22 | with:
23 | java-version: 1.8
24 | - name: Cache SBT ivy cache
25 | uses: actions/cache@v1
26 | with:
27 | path: ~/.ivy2/cache
28 | key: ${{ runner.os }}-sbt-ivy-cache-${{ hashFiles('**/build.sbt') }}
29 | - name: Cache SBT
30 | uses: actions/cache@v1
31 | with:
32 | path: ~/.sbt
33 | key: ${{ runner.os }}-sbt-${{ hashFiles('**/build.sbt') }}
34 | - name: Install lp_solve
35 | run: sudo apt update && sudo apt install -y bash lp-solve wget zip unzip && sudo rm -rf /var/lib/apt/lists/*
36 | - name: Install lp_solve java
37 | run: (cd /tmp && wget https://sourceforge.net/projects/lpsolve/files/lpsolve/5.5.2.5/lp_solve_5.5.2.5_java.zip && unzip lp_solve_5.5.2.5_java.zip && sudo cp lp_solve_5.5_java/lib/ux64/liblpsolve55j.so /usr/lib/lp_solve && sudo ldconfig && sudo rm -rf /tmp/lp_solve_5.5_java && rm /tmp/lp_solve_5.5.2.5_java.zip)
38 | - name: Build for Scala 2.12
39 | run: sbt ++2.12.10 clean compile
40 | - name: Run Tests
41 | run: sbt ++2.12.10 test
42 |
43 | docker:
44 | needs: build
45 | runs-on: ubuntu-latest
46 | steps:
47 | - uses: actions/checkout@v2
48 | with:
49 | fetch-depth: 1
50 | - name: Set up JDK 1.8
51 | uses: actions/setup-java@v1
52 | with:
53 | java-version: 1.8
54 | - name: Cache SBT ivy cache
55 | uses: actions/cache@v1
56 | with:
57 | path: ~/.ivy2/cache
58 | key: ${{ runner.os }}-sbt-ivy-cache-${{ hashFiles('**/build.sbt') }}
59 | - name: Cache SBT
60 | uses: actions/cache@v1
61 | with:
62 | path: ~/.sbt
63 | key: ${{ runner.os }}-sbt-${{ hashFiles('**/build.sbt') }}
64 | - name: Docker login
65 | uses: azure/docker-login@v1
66 | with:
67 | login-server: 'docker.pkg.github.com'
68 | username: ${GITHUB_ACTOR}
69 | password: ${{ secrets.GITHUB_TOKEN }}
70 | - name: Create Docker Image
71 | run: sbt ++2.12.10 docker
72 | - name: Push docker images
73 | if: github.event_name == 'push' && (contains(github.ref, 'develop') || contains(github.ref, 'master'))
74 | run: sbt ++2.12.10 dockerPush
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # use glob syntax.
2 | syntax: glob
3 | *.ser
4 | *.class
5 | *~
6 | *.bak
7 | #*.off
8 | *.old
9 | .DS_Store
10 | .bsp/
11 |
12 | # eclipse conf file
13 | .settings
14 | .classpath
15 | .project
16 | .manager
17 | .scala_dependencies
18 |
19 | # idea
20 | .idea
21 | *.iml
22 | *.ipr
23 | *.iws
24 |
25 | # building
26 | target
27 | build
28 | null
29 | tmp*
30 | temp*
31 | test-output
32 | build.log
33 |
34 | # other scm
35 | .svn
36 | .CVS
37 | .hg*
38 |
39 | # switch to regexp syntax.
40 | # syntax: regexp
41 | # ^\.pc/
42 |
43 | #SHITTY output not in target directory
44 | build.log
45 |
46 | # other
47 | tmp
48 | *.swp
49 | ! .gitkeep
50 | *.log
51 | .directory
52 | *.result
53 | *.counts
54 | *.fg
55 | *.description
56 |
57 | Release
58 |
59 | .ensime
60 | .ensime_*
61 |
62 |
63 | # sbt specific
64 | .cache/
65 | .history/
66 | .lib/
67 | dist/*
68 | target/
69 | lib_managed/
70 | src_managed/
71 | project/boot/
72 | project/plugins/project/
73 | project/project/
74 | project/target/
75 |
76 | # Scala-IDE specific
77 | .scala_dependencies
78 | .worksheet
79 | .metals
80 |
81 | # ignore locally managed libraries
82 | lib/
83 |
84 | # ignore scala worksheet files
85 | *.sc
86 |
87 | # ignore Mac OS files
88 | .DS_Store
89 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "Data"]
2 | path = Data
3 | url = https://github.com/anskarl/LoMRF-data.git
4 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to LoMRF
2 |
3 | Do you like LoMRF and want to get involved? Cool! That is wonderful!
4 |
5 | Please take a moment to review this document, in order to make the contribution process easy and effective for everyone
6 | involved.
7 |
8 | ## Core Ideas
9 |
10 | The purpose of LoMRF is to provide both a usable tool and a library for creating models using the Statistical Relational
11 | Learning concepts of Markov Logic Networks (MLNs). As a tool we aim to have a great end-to-end experience of MLNs
12 | modelling, inference and Machine Learning. This means that we prefer to have features that are mature enough to be part
13 | of the LoMRF. We prefer to postpone the release of a feature, in order to have implementations that are clean in terms
14 | of user experience and development friendliness, well-documented (documentation and examples) and well-tested (unit
15 | tests, example code). LoMRF is composed of a collection of complex algorithms and always we would like to have efficient
16 | implementations. As a result, another important aspect of LoMRF is to have fast and memory-friendly implementations of
17 | the core algorithms (e.g., inference).
18 |
19 | There are two main branches, master and develop. The former, contains the stable versions of LoMRF, thus it is not related
20 | to active development version, pull requests and hot-fixes. The code in master branch is considered as frozen. Even in
21 | situations of hot-fixes or minor improvements we prefer to fix them in the development version first. The latter,
22 | develop branch, contains the latest development snapshot of LoMRF. We strongly suggest to work your contributions over
23 | the develop branch.
24 |
25 | ## Submitting a Pull Request
26 |
27 | Good pull requests, such as patches, improvements, and new features, are a fantastic help. They should remain focused
28 | in scope and avoid containing unrelated commits.
29 |
30 | Please **ask first** if somebody else is already working on this or the core developers think your feature is in-scope
31 | for LoMRF. Generally always have a related issue with discussions for whatever you are including.
32 |
33 | Please also provide a test plan, i.e., specify how you verified that your addition works, add unit tests or provide
34 | examples.
35 |
36 | Finally, since master branch is only for stable releases tagged with a version, **a pull request should be always target
37 | to the develop branch.**
38 |
39 |
40 | Thank you again for considering to contribute to LoMRF and happy hacking :)
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://github.com/anskarl/LoMRF/tree/develop)
2 |
3 |
4 | o o o o o
5 | | o | |\ /| | /
6 | | o-o o--o o-o oo | | O | oo o-o OO o-o o o
7 | | | | | | | | | | | | | | | | | \ | | \ /
8 | O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
9 | |
10 | o--o
11 | o--o o o--o o o
12 | | | | | o | |
13 | O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
14 | | \ | | | | | | | | | | | | | |-' | | | \
15 | o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
16 |
17 | Logical Markov Random Fields.
18 |
19 |
20 | # LoMRF: Logical Markov Random Fields
21 |
22 | LoMRF is an open-source implementation of [Markov Logic Networks](https://en.wikipedia.org/wiki/Markov_logic_network) (MLNs) written in [Scala programming language](http://scala-lang.org).
23 |
24 | ## Features overview:
25 |
26 | 1. Parallel grounding algorithm based on [Akka Actors library](http://akka.io).
27 | 2. Marginal (MC-SAT) and MAP (MaxWalkSAT and LP-relaxed Integer Linear Programming) inference (**lomrf infer**).
28 | 3. Batch and on-line Weight Learning (Max-Margin, AdaGrad and CDA) (**lomrf wlearn**).
29 | 4. On-line Structure Learning (OSL and OSLa) (**lomrf slearn**).
30 | 5. MLN knowledge base compilation (**lomrf compile**):
31 | * Predicate completion.
32 | * Clausal form transformation.
33 | * Replacement of functions with utility predicates and vice versa.
34 | * Reads and produces [Alchemy](http://alchemy.cs.washington.edu/alchemy1.html) compatible MLN files.
35 | 6. Can export ground MRF in various formats (**lomrf export**).
36 | 7. Can compare MLN theories (**lomrf diff**).
37 | 8. Online supervision completion on semi-supervised training sets [*currently experimental*] (**lomrf supervision**)
38 |
39 |
40 | ## Documentation
41 |
42 | Latest [documentation](docs/index.md).
43 |
44 | ## Contributions
45 |
46 | Contributions are welcome, for details see [CONTRIBUTING.md](CONTRIBUTING.md).
47 |
48 | ## License
49 |
50 | Copyright (c) 2014 - 2019 Anastasios Skarlatidis and Evangelos Michelioudakis
51 |
52 | LoMRF is licensed under the Apache License, Version 2.0: [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0)
53 |
54 | ## Reference in Scientific Publications
55 |
56 | Please use the following BibTex entry when you cite LoMRF in your papers:
57 | ```
58 | @misc{LoMRF,
59 | author = {Anastasios Skarlatidis and Evangelos Michelioudakis},
60 | title = {{Logical Markov Random Fields (LoMRF): an open-source implementation of Markov Logic Networks}},
61 | url = {https://github.com/anskarl/LoMRF},
62 | year = {2014}
63 | }
64 | ```
--------------------------------------------------------------------------------
/build.sbt:
--------------------------------------------------------------------------------
1 | addCommandAlias("build", ";headerCreate;compile;test;package")
2 | addCommandAlias("rebuild", ";clean;build")
3 |
4 | lazy val lomrf = Project("LoMRF", file("."))
5 | .enablePlugins(JavaAppPackaging, sbtdocker.DockerPlugin, AutomateHeaderPlugin)
6 | .settings(Test / logLevel := Level.Info)
7 | .settings(Compile / logLevel := Level.Error)
8 | .settings(libraryDependencies ++= Dependencies.Akka)
9 | .settings(libraryDependencies ++= Dependencies.Logging)
10 | .settings(libraryDependencies ++= Dependencies.Utils)
11 | .settings(libraryDependencies ++= Dependencies.Optimus)
12 | .settings(libraryDependencies ++= Dependencies.Testing)
13 |
--------------------------------------------------------------------------------
/docs/1_2_evidence.md:
--------------------------------------------------------------------------------
1 | # Evidence
2 |
3 | Evidence files in LoMRF are text files having the suffix `.db` (e.g., `file_name.db`). The contents of an evidence file
4 | are ground predicates (facts) and optionally ground function Mappings.
5 |
6 | ## Function Mappings
7 |
8 | A function mapping defines a possible **true** grounding of a function (see [Function Definitions](1_1_knowledge_base.md#function-definitions)).
9 | Syntactically follows the corresponding function definition in the knowledge base file, but the domain types are
10 | replaced with some of their corresponding constant symbols.
11 |
12 | For example, the *true* possible groundings of the function `time = next(time)` are the following:
13 |
14 | ```lang-none
15 | 2 = next(1)
16 | 3 = next(2)
17 | 4 = next(3)
18 | 5 = next(4)
19 | 6 = next(5)
20 | 7 = next(6)
21 | 8 = next(7)
22 | 9 = next(8)
23 | 10 = next(9)
24 | ```
25 |
26 | According to the given true groundings in the example above, `next(1)` results to the constant `2`. Furthermore,
27 | LoMRF takes a [Closed-world assumption](https://en.wikipedia.org/wiki/Closed-world_assumption) approach for function
28 | mappings and therefore mappings that are not defined in the evidence are implicitly assumed as **false**. For example,
29 | the mapping of `next(0)` is missing from the above evidence, thus it is implicitly assumed as **false**.
30 |
31 | ## Facts (ground predicates)
32 |
33 | Ground predicates in the evidence represent known facts for the LoMRF. Each fact is expressed with predicates that contain
34 | only constants from their corresponding domains. Each fact represents a true grounding of a specific predicate, optionally
35 | facts can be negated and thus represent a false grounding of a predicate.
36 |
37 | For example, the following ground predicates are facts that express that *Agamemnon* and Menelaus are brothers,
38 | but *Achilles* is not brother of *Agamemnon*.
39 |
40 | ```lang-none
41 | Brother(Agamemnon, Menelaus)
42 | Brother(Menelaus, Agamemnon)
43 | !Brother(Achilles, Agamemnon)
44 | ```
45 |
46 | By default LoMRF takes [Closed-world assumption](https://en.wikipedia.org/wiki/Closed-world_assumption) for all
47 | instantiations of predicates that have at least one fact in the specified evidence file, unless it is explicitly
48 | specified to take [Open-world assumption](https://en.wikipedia.org/wiki/Open-world_assumption) in the inference
49 | options or it is a query predicate (see [Inference](2_inference.md)). Therefore, we can define only the true
50 | groundings in the evidence and assume a *False* state for all other possible combinations (e.g., `Brother(Achilles, Menelaus)` is implicitly assumed as **False**).
51 |
--------------------------------------------------------------------------------
/docs/1_syntax.md:
--------------------------------------------------------------------------------
1 | # Syntax
2 |
3 | LoMRF employs a logic-based language for expressing knowledge files (`*.mln`) and input evidence files (`*.db`):
4 | * [Knowledge base](1_1_knowledge_base.md) is composed of domain type definitions, predicate and function schema definitions, first-order logic formulas, definite clauses, as well as some build-in functions and predicates with additional functionality.
5 | * [Evidence](1_2_evidence.md) is composed of ground predicate facts and function mappings.
6 |
--------------------------------------------------------------------------------
/docs/6_tools.md:
--------------------------------------------------------------------------------
1 |
2 | ## LoMRF command line tools:
3 |
4 | * lomrf infer: Performs probabilistic inference (see `lomrf infer -h` for help).
5 | * lomrf wlearn: Performs weight learning (see `lomrf wlearn -h` for help).
6 | * lomrf slearn: Performs structure learning (see `lomrf slearn -h` for help).
7 | * lomrf compile: Compiles MLN files (predicate completion, CNF, etc.; write `lomrf compile -h` for help).
8 | * lomrf export: Exports ground MRF into various formats (MLN, DIMACS and libDAI Factor graph; write `lomrf export -h` for help).
9 | * lomrf diff: Displays differences between MLN files. The theories are compared only in [CNF](http://en.wikipedia.org/wiki/Conjunctive_normal_form) form.
10 | * lomrf supervision: Completes the missing supervision on a set of given training files (see `lomrf supervision -h` for help).
--------------------------------------------------------------------------------
/docs/7_2_download_example_data.md:
--------------------------------------------------------------------------------
1 | # Download Example and Unit Testing Data
2 |
3 | Optionally if you are interest to run examples or Unit tests, you need to pull the sources of the LoMRF-data git sub-module. In order to download the contents of LoMRF-data as a git sub-modules, you need to give the following commands in the root directory of the project (e.g., `/path/to/LoMRF`)
4 |
5 | **Only for the first time after cloning LoMRF**
6 | ```bash
7 | $ git submodule update --init
8 | ```
9 |
10 | **Every time that you want get the latest version of LoMRF-data**
11 | ```bash
12 | $ git submodule foreach --recursive git pull
13 | ```
14 |
15 | LoMRF examples, as well as, files that are used by Unit testing are located in the sub-directory `Data` and it has the
16 | following directory structure:
17 |
18 | ```
19 | Data
20 | ├── Examples
21 | │ ├── Inference
22 | │ │ ├── Activity_Recognition
23 | │ │ ├── Marcus_Caesar
24 | │ │ ├── distributions
25 | │ │ └── yale_shooting
26 | │ ├── Structure_Learning
27 | │ │ ├── OSL_NLP
28 | │ │ └── OSLa_CAVIAR
29 | │ └── Weight_Learning
30 | │ ├── Activity_Recognition
31 | │ ├── Car_Traffic
32 | │ └── Friends_Smokers
33 | └── Unit_Tests
34 | ├── DependencyMap
35 | ├── inference
36 | └── learning
37 | ```
38 |
39 | **Example Files**
40 |
41 | All inference-related examples are located in `Data/Examples/Inference` (see [Inference](2_inference.md)). All weight learning related examples are located in `Data/Examples/Weight_Learning` (see [Weight Learning](3_weight_learning.md)). Similarly, all structure learning related examples are located in `Data/Examples/Structure_Learning` (see [Structure Learning](4_structure_learning.md)).
42 |
43 | Please note for space efficiency the following input data files for Weight Learning and Structure Learning are compressed:
44 | ```
45 | Data/Examples/Structure_Learning/OSL_NLP/training.tar.bz2
46 | Data/Examples/Structure_Learning/OSLa_CAVIAR/training.tar.bz2
47 | Data/Examples/Weight_Learning/Activity_Recognition/training.tar.bz2
48 | ```
49 |
50 | **Unit Testing Files**
51 | All files that are used in Unit testing are located in `Data/Unit_Tests`.
--------------------------------------------------------------------------------
/docs/7_build_test_lomrf.md:
--------------------------------------------------------------------------------
1 | # Build and Test LoMRF
2 |
3 | In order to build from sources and use LoMRF, either as a program or as a programming library, you need to follow the [build and link instructions](7_1_build_and_link_lomrf.md).
4 |
5 | If you are interested in learning LoMRF by using examples or contribute, we suggest to get the [LoMRF-data sub-module](7_2_download_example_data.md). The LoMRF-data contains examples (see, for instance, [Inference](2_inference.md)) as well as sample files that are used by Unit testing.
6 |
--------------------------------------------------------------------------------
/docs/8_configuration.md:
--------------------------------------------------------------------------------
1 | # Configuration Parameters
2 |
3 | Assuming that you have successfully created a LoMRF distribution from sources (see [Build and Link LoMRF](7_1_build_and_link_lomrf.md))
4 | and that you have extracted the distribution into some path (e.g., `/path/to/your/compiled/LoMRF/`). You may want to
5 | adjust the following runtime parameters.
6 |
7 |
8 | ## Memory and JVM parameters
9 |
10 | Depending your requirements you may want to adjust the heap memory parameters or other options of the Java VM.
11 |
12 | ### Linux, Unix and MacOS X
13 |
14 | In order to adjust the JVM options, you can change the `VM_ARGS` variable in the `inc.env.sh` file (located inside the `bin`
15 | sub-directory) and add or adjust any JVM parameter (for the available parameters write `java -X` in the command line).
16 |
17 | For example, you may adjust the following default options
18 |
19 | ```bash
20 | #
21 | # JVM options:
22 | #
23 | VM_ARGS=" -XX:+DoEscapeAnalysis -XX:+UseFastAccessorMethods -XX:+OptimizeStringConcat -Xss32m "
24 | ```
25 |
26 | and set the heap memory parameters of the JVM, in order to define 1 GB as initial heap memory size (i.e., `-Xms1g`) and
27 | 4 GB as the maximum heap memory size (i.e., `-Xmx4g`), as it is presented below:
28 |
29 | ```bash
30 | #
31 | # JVM options:
32 | #
33 | VM_ARGS=" -XX:+DoEscapeAnalysis -XX:+UseFastAccessorMethods -XX:+OptimizeStringConcat -Xss32m -Xms1g -Xmx4g "
34 | ```
35 |
36 | **Please do not forget to leave spaces at the beginning and at the end of the `VM_ARGS` variable.**
37 |
38 | ### Microsoft Windows
39 |
40 | In order to adjust the JVM options, you can change the `VM_ARGS` variable in the `inc.env.bat` file (located inside the `bin`
41 | sub-folder) and add or adjust any JVM parameter (for the available parameters write `java -X` in the command line).
42 |
43 | For example, you may adjust the following default options
44 |
45 | ```lang-none
46 | :: JVM Parameters
47 | set "VM_ARGS= "
48 | ```
49 | and set the heap memory parameters of the JVM, in order to define 1 GB as initial heap memory size (i.e., `-Xms1g`) and
50 | 4 GB as the maximum heap memory size (i.e., `-Xmx4g`), as it is presented below:
51 |
52 | ```lang-none
53 | :: JVM Parameters
54 | set "VM_ARGS= -XX:+DoEscapeAnalysis -XX:+UseFastAccessorMethods -XX:+OptimizeStringConcat -Xss32m -Xms1g -Xmx4g "
55 | ```
56 |
57 |
58 | ## Quickly enable debug logging
59 |
60 | To enable debug information, export the environment variable `LOMRF_DEBUG=1` for storing further debug information in
61 | `debug.log` file (the file will be created into the current working path), otherwise use default (INFO level) for
62 | console-only output logging.
63 |
64 | ### Linux, Unix and MacOS X
65 |
66 | For example, calling the `lomrf` CLI tool with extra debug logging from the command line in Linux/Unix/MacOS X:
67 |
68 | ```bash
69 | $ export LOMRF_DEBUG=1
70 | $ lomrf
71 | ```
72 |
73 | ### Microsoft Windows
74 |
75 | For example, calling the `lomrf` CLI tool with extra debug logging from the command line in MS Windows:
76 |
77 | ```
78 | C:\> SET "LOMRF_DEBUG=1"
79 |
80 | C:\> lomrf
81 | ```
82 |
83 | ## Change the logging configuration
84 |
85 | You can override the default logging configuration by placing your logback configuration file (i.e., `logback.xml`)
86 | inside the `/path/to/your/compiled/LoMRF/etc` directory.
87 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 |
2 | o o o o o
3 | | o | |\ /| | /
4 | | o-o o--o o-o oo | | O | oo o-o OO o-o o o
5 | | | | | | | | | | | | | | | | | \ | | \ /
6 | O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
7 | |
8 | o--o
9 | o--o o o--o o o
10 | | | | | o | |
11 | O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
12 | | \ | | | | | | | | | | | | | |-' | | | \
13 | o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
14 |
15 | Logical Markov Random Fields.
16 |
17 |
18 | # LoMRF: Logical Markov Random Fields
19 |
20 | LoMRF is an open-source implementation of [Markov Logic Networks](https://en.wikipedia.org/wiki/Markov_logic_network) (MLNs) written in [Scala programming language](http://scala-lang.org).
21 |
22 | ## Features overview:
23 |
24 | 1. Parallel grounding algorithm based on [Akka Actors library](http://akka.io).
25 | 2. Marginal (MC-SAT) and MAP (MaxWalkSAT and LP-relaxed Integer Linear Programming) inference (**lomrf infer**).
26 | 3. Batch and on-line Weight Learning (Max-Margin, AdaGrad and CDA) (**lomrf wlearn**).
27 | 4. On-line Structure Learning (OSL and OSLa) (**lomrf slearn**).
28 | 5. MLN knowledge base compilation (**lomrf compile**):
29 | * Predicate completion.
30 | * Clausal form transformation.
31 | * Replacement of functions with utility predicates and vice versa.
32 | * Reads and produces [Alchemy](http://alchemy.cs.washington.edu/alchemy1.html) compatible MLN files.
33 | 6. Can export ground MRF in various formats (**lomrf export**).
34 | 7. Can compare MLN theories (**lomrf diff**).
35 | 8. Online supervision completion on semi-supervised training sets [*currently experimental*] (**lomrf supervision**)
36 |
37 |
38 | ## Building
39 |
40 | See [Build and Link LoMRF](7_1_build_and_link_lomrf.md).
41 |
42 | ## Documentation contents
43 |
44 | - [Quick-start guide](0_quick_start.md)
45 | - [Syntax](1_syntax.md)
46 | - [Knowledge base](1_1_knowledge_base.md)
47 | - [Evidence](1_2_evidence.md)
48 | - [Inference](2_inference.md)
49 | - [Probabilistic Inference Examples](2_1_inference_examples.md)
50 | - [Temporal Probabilistic Inference Examples](2_2_temporal_inference_examples.md)
51 | - [Weight Learning](3_weight_learning.md)
52 | - [Weight Learning Examples](3_1_weight_learning_examples.md)
53 | - [Temporal Weight Learning Examples](3_2_temporal_weight_learning_examples.md)
54 | - [Structure Learning](4_structure_learning.md)
55 | - [Structure Learning Examples](4_1_structure_learning_examples.md)
56 | - [Supervision Completion](5_supervision_completion.md)
57 | - [CLI Tools](6_tools.md)
58 | - [Build from source](7_build_test_lomrf.md)
59 | - [Build and Link](7_1_build_and_link_lomrf.md)
60 | - [Download example data](7_2_download_example_data.md)
61 | - [Configuration](8_configuration.md)
62 | - [References](9_references.md)
63 |
64 |
65 | ## License
66 |
67 | Copyright (c) 2014 - 2019 Anastasios Skarlatidis and Evangelos Michelioudakis
68 |
69 | LoMRF is licensed under the Apache License, Version 2.0: [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0)
70 |
71 | ## Reference in Scientific Publications
72 |
73 | Please use the following BibTex entry when you cite LoMRF in your papers:
74 | ```
75 | @misc{LoMRF,
76 | author = {Anastasios Skarlatidis and Evangelos Michelioudakis},
77 | title = {{Logical Markov Random Fields (LoMRF): an open-source implementation of Markov Logic Networks}},
78 | url = {https://github.com/anskarl/LoMRF},
79 | year = {2014}
80 | }
81 | ```
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: LoMRF
2 | nav:
3 | - Home: index.md
4 | - Quickstart guide: 0_quick_start.md
5 | - Syntax:
6 | - 1_syntax.md
7 | - Knowledge Base: 1_1_knowledge_base.md
8 | - Evidence: 1_2_evidence.md
9 | - Inference:
10 | - 2_inference.md
11 | - Probabilistic Inference Examples: 2_1_inference_examples.md
12 | - Temporal Probabilistic Inference Examples: 2_2_temporal_inference_examples.md
13 | - Weight Learning:
14 | - 3_weight_learning.md
15 | - Weight Learning Examples: 3_1_weight_learning_examples.md
16 | - Temporal Weight Learning Examples: 3_2_temporal_weight_learning_examples.md
17 | - Structure Learning:
18 | - 4_structure_learning.md
19 | - Structure Learning Examples: 4_1_structure_learning_examples.md
20 | - Supervision Completion: 5_supervision_completion.md
21 | - CLI Tools: 6_tools.md
22 | - Build from source:
23 | - 7_build_test_lomrf.md
24 | - Build and link: 7_1_build_and_link_lomrf.md
25 | - Download example data: 7_2_download_example_data.md
26 | - Configuration: 8_configuration.md
27 | - References: 9_references.md
28 |
29 | theme: readthedocs
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/project/Dependencies.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | import sbt._
22 | import sbt.Keys._
23 |
24 | object Dependencies {
25 |
26 | object v {
27 | final val Akka = "2.5.21"
28 |
29 | final val ScalaLogging = "3.9.2"
30 | final val Logback = "1.2.3"
31 | final val SLF4J = "1.7.25"
32 | final val JANSI = "1.11"
33 |
34 | final val ScalaTest = "3.0.5"
35 |
36 | final val Trove4j = "3.0.3" // todo upgrade to 3.1
37 | final val JTS = "1.14.0"
38 |
39 | final val Enums = "1.5.13"
40 |
41 | final val Optimus = "3.0.0"
42 | final val Breeze = "0.13.2"
43 |
44 | final val Spire = "0.13.0"
45 | }
46 |
47 | // Akka.io
48 | lazy val Akka = Seq(
49 | "com.typesafe.akka" %% "akka-actor" % v.Akka,
50 | "com.typesafe.akka" %% "akka-remote" % v.Akka,
51 | "com.typesafe.akka" %% "akka-slf4j" % v.Akka
52 | )
53 |
54 | // Logging with slf4j and logback
55 | lazy val Logging = Seq(
56 | "com.typesafe.scala-logging" %% "scala-logging" % v.ScalaLogging,
57 | "ch.qos.logback" % "logback-classic" % v.Logback,
58 | "ch.qos.logback" % "logback-core" % v.Logback,
59 | "org.slf4j" % "slf4j-api" % v.SLF4J
60 | )
61 |
62 | // ScalaTest got Unit testing
63 | lazy val Testing = Seq(
64 | "org.scalatest" %% "scalatest" % v.ScalaTest % "test"
65 | )
66 |
67 | lazy val Utils = Seq(
68 | // GNU Trove4j for high performance and memory efficient data-structures
69 | "net.sf.trove4j" % "trove4j" % v.Trove4j, // todo: v3.1 and change artifact from 'trove4j' to 'core'
70 |
71 | // JTS Topology API for modelling and manipulating 2-dimensional linear geometry
72 | "com.vividsolutions" % "jts-core" % v.JTS,
73 |
74 | // Optimized Range foreach loops
75 | //"com.nativelibs4java" %% "scalaxy-streams" % v.Scalaxy % "provided",
76 |
77 | "org.fusesource.jansi" % "jansi" % v.JANSI,
78 |
79 | // Breeze library for efficient numerical processing
80 | "org.scalanlp" %% "breeze" % v.Breeze,
81 | "org.scalanlp" %% "breeze-natives" % v.Breeze,
82 |
83 | "com.beachape" %% "enumeratum" % v.Enums,
84 |
85 | "org.spire-math" %% "spire" % v.Spire,
86 | "org.spire-math" %% "spire-macros" % v.Spire
87 | )
88 |
89 | // Optimus library for linear and quadratic optimization
90 | lazy val Optimus = Seq(
91 | "com.github.vagmcs" %% "optimus" % v.Optimus,
92 | "com.github.vagmcs" %% "optimus-solver-oj" % v.Optimus,
93 | "com.github.vagmcs" %% "optimus-solver-lp" % v.Optimus,
94 | "com.github.vagmcs" %% "optimus-solver-gurobi" % v.Optimus,
95 | "com.github.vagmcs" %% "optimus-solver-mosek" % v.Optimus
96 | )
97 |
98 | }
--------------------------------------------------------------------------------
/project/Formatting.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | import sbt._
22 | import com.typesafe.sbt.SbtScalariform.ScalariformKeys
23 | import scalariform.formatter.preferences.AlignSingleLineCaseStatements.MaxArrowIndent
24 | import scalariform.formatter.preferences._
25 |
26 | object Formatting {
27 |
28 | lazy val formatSettings = Seq(
29 | ScalariformKeys.preferences := setPreferences(ScalariformKeys.preferences.value),
30 | ScalariformKeys.preferences in Compile := setPreferences(ScalariformKeys.preferences.value),
31 | ScalariformKeys.preferences in Test := setPreferences(ScalariformKeys.preferences.value)
32 | )
33 |
34 | def setPreferences(preferences: IFormattingPreferences): IFormattingPreferences = preferences
35 | .setPreference(AlignArguments, true)
36 | .setPreference(AlignParameters, false)
37 | .setPreference(AlignSingleLineCaseStatements, true)
38 | .setPreference(MaxArrowIndent, 40)
39 | .setPreference(AllowParamGroupsOnNewlines, true)
40 | .setPreference(CompactControlReadability, false)
41 | .setPreference(CompactStringConcatenation, false)
42 | .setPreference(DanglingCloseParenthesis, Preserve)
43 | .setPreference(DoubleIndentConstructorArguments, true)
44 | .setPreference(DoubleIndentMethodDeclaration, true)
45 | .setPreference(FirstArgumentOnNewline, Preserve)
46 | .setPreference(FirstParameterOnNewline, Force)
47 | .setPreference(IndentLocalDefs, true)
48 | .setPreference(IndentPackageBlocks, true)
49 | .setPreference(IndentSpaces, 2)
50 | .setPreference(IndentWithTabs, false)
51 | .setPreference(MultilineScaladocCommentsStartOnFirstLine, false)
52 | .setPreference(NewlineAtEndOfFile, true)
53 | .setPreference(PlaceScaladocAsterisksBeneathSecondAsterisk, true)
54 | .setPreference(PreserveSpaceBeforeArguments, true)
55 | .setPreference(RewriteArrowSymbols, false)
56 | .setPreference(SingleCasePatternOnNewline, true)
57 | .setPreference(SpaceBeforeColon, false)
58 | .setPreference(SpaceBeforeContextColon, false)
59 | .setPreference(SpaceInsideBrackets, false)
60 | .setPreference(SpaceInsideParentheses, false)
61 | .setPreference(SpacesWithinPatternBinders, true)
62 | }
63 |
--------------------------------------------------------------------------------
/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version=1.10.1
2 |
--------------------------------------------------------------------------------
/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | resolvers += Resolver.typesafeRepo("releases")
2 | resolvers += Resolver.
3 | url("hmrc-sbt-plugin-releases", url("https://dl.bintray.com/hmrc/sbt-plugin-releases"))(Resolver.ivyStylePatterns)
4 |
5 |
6 | addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.4.0")
7 |
8 | addSbtPlugin("com.scalapenos" % "sbt-prompt" % "1.0.2")
9 |
10 | addSbtPlugin("com.github.gseitz" % "sbt-release" % "1.0.11")
11 |
12 | addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.2.0")
13 |
14 | addSbtPlugin("se.marcuslonnberg" % "sbt-docker" % "1.5.0")
15 |
16 | addSbtPlugin("org.scalariform" % "sbt-scalariform" % "1.8.3")
17 |
18 | addSbtPlugin("com.jsuereth" % "sbt-pgp" % "2.0.0")
--------------------------------------------------------------------------------
/scripts/inc.env.bat:
--------------------------------------------------------------------------------
1 | :: JVM Parameters
2 | set "VM_ARGS= "
3 |
4 | ::
5 | :: Logging configuration
6 | ::
7 | :: To enable debug information, export LOMRF_DEBUG=1 for storing further
8 | :: debug information in 'debug.log' file, otherwise use default (INFO level)
9 | :: console output logging.
10 | ::
11 |
12 |
13 | if not defined LOMRF_DEBUG (
14 | set "LOMRF_DEBUG=0"
15 | )
16 |
17 |
18 |
19 | if %LOMRF_DEBUG% equ 1 (
20 |
21 | if not exist %etc_dir%\logback-debug.xml (
22 | echo Cannot find logging configuration file '%etc_dir%\logback-debug.xml'
23 | exit /b 1
24 | )
25 |
26 | echo Debug output is enabled 'LOMRF_DEBUG=1'
27 | echo Debug output will be stored into the 'debug.log' file
28 |
29 | SET "VM_ARGS=%VM_ARGS% -Dlogback.configurationFile=%etc_dir%\logback-debug.xml "
30 |
31 | ) else (
32 |
33 | if not exist %etc_dir%\logback.xml (
34 | exit /b 1
35 | ) else (
36 | SET "VM_ARGS=%VM_ARGS% -Dlogback.configurationFile=%etc_dir%\logback.xml "
37 | )
38 |
39 | )
40 |
41 |
--------------------------------------------------------------------------------
/scripts/inc.env.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | PRJ_COMMONS_CLASSPATH="."
4 |
5 | for curr_lib in `find ${lib_dir} -name "*.jar"`
6 | do
7 | PRJ_COMMONS_CLASSPATH=$PRJ_COMMONS_CLASSPATH:$curr_lib
8 | done
9 |
10 | SCALA_LIBS=""
11 |
12 | ETC_DIR="$base_dir/etc"
13 |
14 |
15 | #
16 | # Load poor mans logger ;)
17 | #
18 | . `dirname $0`/poor.mans.logger.sh
19 |
20 |
21 | #
22 | # JVM options:
23 | #
24 | if [[ -z ${LOMRF_JVM_ARGS+x} ]]; then
25 | LOMRF_JVM_ARGS=" -XX:+DoEscapeAnalysis -XX:+OptimizeStringConcat "
26 | else
27 | log_info "User-defined JVM args: $LOMRF_JVM_ARGS"
28 | fi
29 |
30 |
31 | #
32 | # Logging configuration
33 | #
34 | # To enable debug information, export LOMRF_DEBUG=1 for storing further
35 | # debug information in 'debug.log' file, otherwise use default (INFO level)
36 | # console output logging.
37 | #
38 | if [[ -n $LOMRF_DEBUG && $LOMRF_DEBUG -eq 1 ]]; then
39 | if [[ ! -f $ETC_DIR/logback-debug.xml ]]; then
40 | echo "Cannot find logging configuration file '$ETC_DIR/logback-debug.xml'"
41 | exit 1
42 | fi
43 |
44 | log_warn "Debug output is enabled (LOMRF_DEBUG=1)."
45 | log_warn "Debug output will be stored into the 'debug.log' file."
46 |
47 | LOMRF_JVM_ARGS=" -Dlogback.configurationFile=$ETC_DIR/logback-debug.xml "
48 | else
49 | if [[ ! -f $ETC_DIR/logback.xml ]]; then
50 | exit_error "Cannot find logging configuration file '$ETC_DIR/logback.xml'"
51 | fi
52 |
53 | LOMRF_JVM_ARGS=" $LOMRF_JVM_ARGS -Dlogback.configurationFile=$ETC_DIR/logback.xml "
54 | fi
55 |
56 |
--------------------------------------------------------------------------------
/scripts/lomrf:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Get the location for this script; handles symlinks
4 | function get_script_path {
5 | local source="${BASH_SOURCE[0]}"
6 | while [[ -h "$source" ]] ; do
7 | local linked="$(readlink "$source")"
8 | local dir="$(cd -P $(dirname "$source") && cd -P $(dirname "$linked") && pwd)"
9 | source="$dir/$(basename "$linked")"
10 | done
11 | echo ${source}
12 | }
13 |
14 | # script details
15 | declare -r script_path=$(get_script_path)
16 | declare -r script_name=$(basename "$script_path")
17 | declare -r script_dir="$(cd -P "$(dirname "$script_path")" && pwd)"
18 |
19 | # directories
20 | declare -r base_dir="$(cd "$script_dir/.." && pwd)"
21 | declare -r lib_dir="$base_dir/lib"
22 |
23 | # take the parameters
24 | . $base_dir/bin/inc.env.sh
25 |
26 |
27 | nice java ${LOMRF_JVM_ARGS} -cp ${PRJ_COMMONS_CLASSPATH} lomrf.app.LoMRF $@
28 |
--------------------------------------------------------------------------------
/scripts/lomrf.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 | setlocal enabledelayedexpansion
3 | :: Get the bin path
4 | SET bin_dir=%~dp0
5 |
6 | :: Get the LoMRF paths
7 | pushd "%bin_dir%"
8 | cd ..
9 | SET base_dir=%cd%
10 | SET lib_dir=%base_dir%\lib
11 | SET etc_dir=%base_dir%\etc
12 | popd
13 |
14 |
15 | call %bin_dir%\inc.env.bat
16 |
17 |
18 | java %VM_ARGS% -Djava.ext.dirs=%lib_dir% lomrf.app.LoMRF %*
--------------------------------------------------------------------------------
/scripts/mkDynamic:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Get the location for this script; handles symlinks
4 | function get_script_path {
5 | local source="${BASH_SOURCE[0]}"
6 | while [[ -h "$source" ]] ; do
7 | local linked="$(readlink "$source")"
8 | local dir="$(cd -P $(dirname "$source") && cd -P $(dirname "$linked") && pwd)"
9 | source="$dir/$(basename "$linked")"
10 | done
11 | echo ${source}
12 | }
13 |
14 | # script details
15 | declare -r script_path=$(get_script_path)
16 | declare -r script_name=$(basename "$script_path")
17 | declare -r script_dir="$(cd -P "$(dirname "$script_path")" && pwd)"
18 |
19 | # directories
20 | declare -r base_dir="$(cd "$script_dir/.." && pwd)"
21 | declare -r lib_dir="$base_dir/lib"
22 |
23 | # take the parameters
24 | . $base_dir/bin/inc.env.sh
25 |
26 |
27 | scalac -classpath ${PRJ_COMMONS_CLASSPATH} -encoding UTF-8 -optimise -nowarn $@
28 |
29 |
--------------------------------------------------------------------------------
/scripts/poor.mans.logger.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Colours for logging messages
4 | red='\033[0;31m'
5 | green='\033[0;32m'
6 | orange='\033[0;33m'
7 | cyan='\033[0;36m'
8 | noColour='\033[0m'
9 |
10 | # param $1: info message
11 | log_info(){
12 | if [[ -f $LOG ]]; then
13 | echo -e "${cyan}"`date`" ${green}[ INFO ] $1 ${noColour}" | tee -a $LOG
14 | else
15 | echo -e "${cyan}"`date`" ${green}[ INFO ] $1 ${noColour}"
16 | fi
17 | }
18 |
19 | # param $1: warning message
20 | log_warn(){
21 | if [[ -f $LOG ]]; then
22 | echo -e "${cyan}"`date`" ${orange}[ WARN ] $1 ${noColour}" | tee -a $LOG
23 | else
24 | echo -e "${cyan}"`date`" ${orange}[ WARN ] $1 ${noColour}"
25 | fi
26 | }
27 |
28 | # param $1: error message
29 | log_error(){
30 | if [[ -f $LOG ]]; then
31 | echo -e "${cyan}"`date`" ${red}[ ERROR ] $1 ${noColour}" | tee -a $LOG
32 | else
33 | echo -e "${cyan}"`date`" ${red}[ ERROR ] $1 ${noColour}"
34 | fi
35 | }
36 |
37 | # param $1: error message (default is "An error occured, exiting...")
38 | # param $2: error code (default is 1)
39 | exit_error(){
40 | if [[ $? -ne 0 ]]; then
41 | log_error ${1:-"An error occured, exiting..."}
42 | exit ${2:-1}
43 | fi
44 | }
45 |
46 |
--------------------------------------------------------------------------------
/src/main/resources/logback-debug.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | true
7 |
8 |
9 | %highlightex([%-5p]) %m%n
10 |
11 |
12 |
13 | INFO
14 |
15 |
16 |
17 |
18 | debug.log
19 | false
20 |
21 |
22 | [%-5p] %d %c: %m%n
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
--------------------------------------------------------------------------------
/src/main/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | true
6 |
7 | %highlightex([%-5p]) %m%n
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/app/CLIApp.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.app
22 |
23 | import lomrf.util.opt.OptionParser
24 | import com.typesafe.scalalogging.LazyLogging
25 |
26 | /**
27 | * Command line basic functionality shared across all command line interfaces. Should
28 | * be extended by any command line interface.
29 | */
30 | trait CLIApp extends App with LazyLogging with OptionParser {
31 | println(lomrf.ASCIILogo)
32 | println(lomrf.BuildVersion)
33 | }
34 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/app/KBDifferenceCLI.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.app
22 |
23 | import lomrf.mln.model.MLN
24 | import lomrf.logic.AtomSignature
25 | import java.io.PrintStream
26 | import lomrf.util.logging.Implicits._
27 |
28 | /**
29 | * Command line tool for knowledge base difference checking.
30 | */
31 | object KBDifferenceCLI extends CLIApp {
32 |
33 | // Input file(s) (path)
34 | var inputFileName: Option[IndexedSeq[String]] = None
35 |
36 | // Evidence file(s) (path)
37 | var evidenceFileName: Option[IndexedSeq[String]] = None
38 |
39 | // Prefix for the output difference files
40 | var prefixOpt: Option[String] = None
41 |
42 | opt("i", "input", "", "At least two comma separated input files", {
43 | v: String =>
44 | if (v.nonEmpty) {
45 | val fileNames = v.split(',').map(_.trim)
46 |
47 | if (fileNames.length < 2)
48 | logger.fatal("At least two input files are required in order to compute difference.")
49 |
50 | inputFileName = Some(fileNames)
51 | }
52 | })
53 |
54 | opt("e", "evidence", "", "At least two comma separated evidence database files", {
55 | v: String =>
56 | if (v.nonEmpty) {
57 | val fileNames = v.split(',').map(_.trim)
58 |
59 | if (fileNames.length < 2)
60 | logger.fatal("At least two evidence files are required in order to compute difference.")
61 |
62 | evidenceFileName = Some(fileNames)
63 | }
64 | })
65 |
66 | opt("p", "prefix", "", "Prefix for the output difference files (-input_filename_1-input_filename_2.diff)", {
67 | v: String => prefixOpt = Some(v.trim)
68 | })
69 |
70 | flagOpt("h", "help", "Print usage options.", {
71 | println(usage)
72 | sys.exit(0)
73 | })
74 |
75 | // Main:
76 | if (args.length == 0) println(usage)
77 | else if (parse(args)) compare(
78 | inputFileName.getOrElse(logger.fatal("Please define the input files.")),
79 | evidenceFileName.getOrElse(logger.fatal("Please define the evidence files.")),
80 | prefixOpt
81 | )
82 |
83 | private def compare(source: IndexedSeq[String], evidence: IndexedSeq[String], prefixOpt: Option[String]) {
84 |
85 | if (source.size != evidence.size)
86 | logger.fatal("The number of input files and evidence files must be the same.")
87 |
88 | val combinations = source.view.zip(evidence).combinations(2).zipWithIndex
89 |
90 | val prefix = prefixOpt.map(_.trim) match {
91 | case Some(p) if p.nonEmpty => p + "-"
92 | case _ => ""
93 | }
94 |
95 | for {
96 | (combination, _) <- combinations
97 | (sourceAlpha, evidenceAlpha) = combination.head
98 | (sourceBeta, evidenceBeta) = combination.last
99 | mlnAlpha = MLN.fromFile(sourceAlpha, Set.empty[AtomSignature], evidenceAlpha)
100 | mlnBeta = MLN.fromFile(sourceBeta, Set.empty[AtomSignature], evidenceBeta)
101 | } {
102 |
103 | logger.info(s"\nSource KB 1: $sourceAlpha$mlnAlpha\nSource KB 2: $sourceBeta$mlnBeta")
104 |
105 | val targetFileName = prefix +
106 | sourceAlpha.substring(0, sourceAlpha.lastIndexOf('.')) + "-" +
107 | sourceBeta.substring(0, sourceBeta.lastIndexOf('.'))
108 |
109 | val output = new PrintStream(targetFileName)
110 |
111 | val diff1 = mlnAlpha.clauses.par.filter(!mlnBeta.clauses.contains(_))
112 | val diff2 = mlnBeta.clauses.par.filter(!mlnAlpha.clauses.contains(_))
113 |
114 | if (diff1.nonEmpty) {
115 | output.println(s"\nKB 1 ($sourceAlpha) does not contain the following clauses:\n")
116 | diff1.seq.foreach(clause => output.println(clause.toText()))
117 | }
118 |
119 | if (diff2.nonEmpty) {
120 | output.println(s"\nKB 2 ($sourceBeta) does not contain the following clauses:\n")
121 | diff2.seq.foreach(clause => output.println(clause.toText()))
122 | }
123 |
124 | if (diff1.isEmpty && diff2.isEmpty) logger.info("KBs are identical!")
125 |
126 | output.flush()
127 | output.close()
128 | }
129 | }
130 | }
131 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/app/LoMRF.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.app
22 |
23 | import lomrf.util.opt.MasterOptionParser
24 |
25 | object LoMRF extends App with MasterOptionParser {
26 |
27 | if (args.isEmpty) {
28 | println(lomrf.ASCIILogo)
29 | println(lomrf.BuildVersion)
30 | }
31 |
32 | addOpt("infer", "Perform probabilistic inference", InferenceCLI.main)
33 | addOpt("wlearn", "Perform weight learning", WeightLearningCLI.main)
34 | addOpt("slearn", "Perform structure learning", StructureLearningCLI.main)
35 | addOpt("supervision", "Perform supervision completion", SemiSupervisionCLI.main)
36 | addOpt("compile", "Perform knowledge base compilation", KBCompilerCLI.main)
37 | addOpt("diff", "Perform knowledge base diff", KBDifferenceCLI.main)
38 | addOpt("export", "Export a knowledge base into other supported formats", MRFWriterCLI.main)
39 | addOpt("help", "Show basic usage", _ => {
40 | println(lomrf.ASCIILogo)
41 | println(lomrf.BuildVersion)
42 |
43 | println(usage)
44 | })
45 |
46 | parse(args)
47 |
48 | }
49 |
50 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/app/package.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf
22 |
23 | import enumeratum._
24 | import scala.collection.immutable
25 | import java.text.DecimalFormat
26 |
27 | package object app {
28 |
29 | final val numFormat = new DecimalFormat("0.############")
30 |
31 | sealed class WeightsMode(override val entryName: String) extends EnumEntry {
32 | override def toString: String = entryName
33 | }
34 |
35 | object WeightsMode extends Enum[WeightsMode] {
36 |
37 | val values: immutable.IndexedSeq[WeightsMode] = findValues
38 |
39 | case object Keep extends WeightsMode("keep")
40 | case object Remove_Soft extends WeightsMode("remove soft")
41 | case object Remove_All extends WeightsMode("remove all")
42 | }
43 |
44 | sealed class GraphSolverType(override val entryName: String) extends EnumEntry {
45 | override def toString: String = entryName
46 | }
47 |
48 | object GraphSolverType extends Enum[GraphSolverType] {
49 |
50 | val values: immutable.IndexedSeq[GraphSolverType] = findValues
51 |
52 | case object NN extends GraphSolverType("nn")
53 | case object EXT_NN extends GraphSolverType("ext.nn")
54 | case object LP_SPLICE extends GraphSolverType("lp.splice")
55 | case object HFC_SPLICE extends GraphSolverType("hfc.splice")
56 | case object LGC_SPLICE extends GraphSolverType("lgc.splice")
57 | case object LP_TLP extends GraphSolverType("lp.tlp")
58 | case object HFC_TLP extends GraphSolverType("hfc.tlp")
59 | case object LGC_TLP extends GraphSolverType("lgc.tlp")
60 | }
61 |
62 | sealed class ConnectorStrategy(override val entryName: String) extends EnumEntry {
63 | override def toString: String = entryName
64 | }
65 |
66 | object ConnectorStrategy extends Enum[ConnectorStrategy] {
67 |
68 | val values: immutable.IndexedSeq[ConnectorStrategy] = findValues
69 |
70 | case object Full extends ConnectorStrategy("full")
71 | case object aNN extends ConnectorStrategy("aNN")
72 | case object aNNLabeled extends ConnectorStrategy("aNN.labeled")
73 | case object aNNTemporal extends ConnectorStrategy("aNN.temporal")
74 | case object eNN extends ConnectorStrategy("eNN")
75 | case object eNNLabeled extends ConnectorStrategy("eNN.labeled")
76 | case object eNNTemporal extends ConnectorStrategy("eNN.temporal")
77 | case object kNN extends ConnectorStrategy("kNN")
78 | case object kNNLabeled extends ConnectorStrategy("kNN.labeled")
79 | case object kNNTemporal extends ConnectorStrategy("kNN.temporal")
80 | }
81 |
82 | sealed class DistanceType(override val entryName: String) extends EnumEntry {
83 | override def toString: String = entryName
84 | }
85 |
86 | object DistanceType extends Enum[DistanceType] {
87 |
88 | val values: immutable.IndexedSeq[DistanceType] = findValues
89 |
90 | case object Binary extends DistanceType("binary")
91 | case object Atomic extends DistanceType("atomic")
92 | case object Evidence extends DistanceType("evidence")
93 | case object Mass extends DistanceType("mass")
94 | case object Hybrid extends DistanceType("hybrid")
95 | }
96 |
97 | sealed class CacheFilter(override val entryName: String) extends EnumEntry {
98 | override def toString: String = entryName
99 | }
100 |
101 | object CacheFilter extends Enum[CacheFilter] {
102 |
103 | val values: immutable.IndexedSeq[CacheFilter] = findValues
104 |
105 | case object Simple extends CacheFilter("simple")
106 | case object Hoeffding extends CacheFilter("hoeffding")
107 | }
108 | }
109 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/logic/MLNTypeDefinition.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.logic
22 |
23 | /**
24 | * Types and constants can be declared in a .kb file having the following syntax:
25 | * {{{
26 | * = { , , ... },
27 | * }}}
28 | * @example {{{
29 | * person = { Alice, Bob }
30 | * time = {1, ..., 100} // for quickly defining a range of integers
31 | * }}}
32 | * According to the above example definitions, the domain '''person''' is composed of
33 | * 2 constant symbols (i.e. Alice and Bob). Similarly the domain '''time''' is composed
34 | * of the symbols that belong into the range [1, 100].
35 | *
36 | * @param name a name for the MLN definition
37 | */
38 | sealed abstract class MLNTypeDefinition(name: String) extends MLNDomainExpression {
39 |
40 | /**
41 | * @return the name of the MLN definition
42 | */
43 | def getName: String = name
44 | }
45 |
46 | /**
47 | * Integer type definition is defined by a name and an interval of integer literals.
48 | *
49 | * @param name a name for the MLN definition
50 | * @param from the beginning of the interval
51 | * @param to the end of the interval
52 | */
53 | case class IntegerTypeDefinition(name: String, from: Int, to: Int) extends MLNTypeDefinition(name)
54 |
55 | /**
56 | * Constant type definition is defined by a name and a sequence of constants.
57 | *
58 | * @param name a name for the MLN definition
59 | * @param constants a sequence of constants
60 | */
61 | case class ConstantTypeDefinition(name: String, constants: Seq[String]) extends MLNTypeDefinition(name)
62 |
63 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/logic/Substitutable.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.logic
22 |
23 | /**
24 | * Enables substitution operation on an expression type. Substitution is a
25 | * fundamental logical operation, a syntactic transformation on formal expressions
26 | * that replaces variables, placeholders and symbols by other expressions.
27 | *
28 | * @tparam T an expression type
29 | */
30 | trait Substitutable[T] {
31 |
32 | /**
33 | * Apply a theta substitution to the expression. Theta is a mapping of
34 | * logical terms to be substituted. The resulting expression is a substitution
35 | * instance of the original expression type.
36 | *
37 | * @param theta a given mapping of logical terms
38 | * @return a substitution instance of the original expression type
39 | */
40 | def substitute(theta: Theta): T
41 | }
42 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/logic/Unify.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.logic
22 |
23 | import annotation.tailrec
24 | import LogicOps._
25 |
26 | /**
27 | * An object for applying the logical Unification operator to logical formulas. The Unification
28 | * operator searches for a mapping of terms (theta-substitution) in order to transform the
29 | * former expression into latter one.
30 | *
31 | * @example
32 | * {{{
33 | * Unify Happens(x,t) with Happens(Event,t) should give Map((x->Event))
34 | * Unify Happens(x,10) with Happens(Event,t) should give Map((x->Event), (t->10))
35 | * }}}
36 | *
37 | * @see Wikipedia article:
38 | * [[http://en.wikipedia.org/wiki/Unification_(computing)#Definition_of_unification_for_first-order_logic]]
39 | * @see Russell, S.J., Norvig, P., Canny, J.F., Malik, J. and Edwards, D.D.
40 | * Artificial Intelligence: A Modern Approach, Chapter 9.2.2 Unification
41 | * [[http://aima.cs.berkeley.edu/]]
42 | */
43 | object Unify {
44 |
45 | type ThetaOpt = Option[Theta]
46 |
47 | def apply[T](x: T, y: T): ThetaOpt = apply(x, y, Some(Map[Term, Term]()))
48 |
49 | def apply[T](x: T, y: T, theta: ThetaOpt): ThetaOpt = (x, y) match {
50 | case (a: Term, b: Term) => unifyTerm(a, b, theta)
51 | case (a: AtomicFormula, b: AtomicFormula) => unifyAtomicFormula(a, b, theta)
52 | case (va: Vector[Term @unchecked], vb: Vector[Term @unchecked]) => unifyTerms(va, vb, theta)
53 | case _ => None
54 | }
55 |
56 | def apply(x: AtomicFormula, f: FormulaConstruct): ThetaOpt = unifyFormula(x, f, Some(Map[Term, Term]()))
57 |
58 | private def unifyTerm(x: Term, y: Term, theta: ThetaOpt): ThetaOpt = theta match {
59 | case None => None // failure
60 | case _ =>
61 | if (x == y) theta
62 | else (x, y) match {
63 | case (v: Variable, _) => unifyVar(v, y, theta)
64 | case (_, v: Variable) => unifyVar(v, x, theta)
65 | case (a: TermFunction, b: TermFunction) =>
66 | if (a.symbol == b.symbol) unifyTerms(a.terms, b.terms, theta)
67 | else None
68 | case _ => None
69 | }
70 | }
71 |
72 | @tailrec
73 | private def unifyTerms(x: Vector[Term], y: Vector[Term], theta: ThetaOpt): ThetaOpt = theta match {
74 | case None => None // failure
75 | case Some(_) =>
76 | (x, y) match {
77 | case (aX +: restX, aY +: restY) => unifyTerms(restX, restY, unifyTerm(aX, aY, theta))
78 | case (IndexedSeq(), IndexedSeq()) => theta
79 | case _ => None
80 | }
81 | }
82 |
83 | @inline
84 | private def unifyAtomicFormula(x: AtomicFormula, y: AtomicFormula, theta: ThetaOpt): ThetaOpt = {
85 | if (x.signature == y.signature) unifyTerms(x.terms, y.terms, theta)
86 | else None
87 | }
88 |
89 | @inline
90 | private def unifyVar(v: Variable, x: Term, theta: ThetaOpt): ThetaOpt = theta match {
91 | case None => None // failure
92 | case Some(m) if m.contains(v) => apply(m(v), x, theta)
93 | case Some(m) => x match {
94 | case a: Variable if m.contains(a) => apply(v, m(a), theta)
95 | case f: TermFunction =>
96 | val groundFunction = f.substitute(m)
97 | if (groundFunction.variables.contains(v)) None // failure
98 | else Some(m + (v -> groundFunction))
99 | case _ => Some(m + (v -> x))
100 | }
101 | }
102 |
103 | @inline
104 | private def unifyFormula(srcAtom: AtomicFormula, src: FormulaConstruct, theta: ThetaOpt): ThetaOpt = src match {
105 | case atom: AtomicFormula => unifyAtomicFormula(srcAtom, atom, theta)
106 | case _ => src.first(srcAtom.signature) match {
107 | case Some(targetAtom) => apply(srcAtom, targetAtom, theta)
108 | case _ => None
109 | }
110 | }
111 | }
112 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/logic/compile/package.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.logic
22 |
23 | import enumeratum._
24 | import scala.collection.immutable
25 |
26 | package object compile {
27 |
28 | sealed trait PredicateCompletionMode extends EnumEntry
29 |
30 | /**
31 | * Choose the type of predicate completion:
32 | *
33 | * - Standard: standard predicate completion.
34 | * - Decomposed: computes predicate completion and decomposes the created equivalences into two implications.
35 | * - Simplification: computes predicate completion and simplifies the formulas based on the created equivalences.
36 | *
37 | *
38 | * @see [[lomrf.logic.compile.PredicateCompletion]]
39 | */
40 | object PredicateCompletionMode extends Enum[PredicateCompletionMode] {
41 |
42 | val values: immutable.IndexedSeq[PredicateCompletionMode] = findValues
43 |
44 | case object Standard extends PredicateCompletionMode
45 | case object Decomposed extends PredicateCompletionMode
46 | case object Simplification extends PredicateCompletionMode
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/logic/dynamic/DynamicAtom.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.logic.dynamic
22 |
23 | import lomrf.logic.{ Term, AtomicFormula }
24 |
25 | /**
26 | * Represents a dynamic atom. Dynamic atoms are atomic formulas whose
27 | * state is evaluated dynamically according to a state function.
28 | *
29 | * @see [[lomrf.logic.Term]]
30 | *
31 | * @param symbol dynamic atom symbol
32 | * @param terms dynamic atom arguments, i.e., constants, variables or functions
33 | */
34 | class DynamicAtom(override val symbol: String, override val terms: Vector[Term]) extends AtomicFormula(symbol, terms) {
35 | override val isDynamic = true
36 | }
37 |
38 | object DynamicAtom {
39 | def apply(symbol: String, terms: Vector[Term]) = new DynamicAtom(symbol, terms)
40 |
41 | def unapply(obj: DynamicAtom): Option[(String, List[Term])] = obj match {
42 | case DynamicAtom(symbol, terms) => Some((symbol, terms))
43 | case _ => None
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/logic/dynamic/DynamicAtomBuilder.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.logic.dynamic
22 |
23 | import lomrf.logic._
24 |
25 | /**
26 | * Dynamic atom builder should be extended by any dynamic atom
27 | * type in order to define its functionality.
28 | */
29 | trait DynamicAtomBuilder {
30 | def signature: AtomSignature
31 |
32 | def stateFunction: Vector[String] => Boolean
33 |
34 | def apply(terms: Vector[Term]): DynamicAtom
35 |
36 | def apply(terms: Term*): DynamicAtom = apply(terms.toVector)
37 | }
38 |
39 | /**
40 | * An infix dynamic atom.
41 | *
42 | * @param prefixSymbol atom symbol
43 | * @param infixSymbol the infix symbol used for syntactic sugar
44 | * @param terms dynamic atom arguments, i.e., constants, variables or functions
45 | */
46 | private[dynamic] final class DynInfix(
47 | prefixSymbol: String,
48 | infixSymbol: String,
49 | terms: Vector[Term]) extends DynamicAtom(prefixSymbol, terms) {
50 |
51 | require(terms.size == 2, s"Dynamic atom '$prefixSymbol($infixSymbol)' should only have 2 terms.")
52 |
53 | override def toText: String = s"(${terms.head.toText} $infixSymbol ${terms.last.toText})"
54 |
55 | override def substitute(theta: Theta): AtomicFormula =
56 | new DynInfix(prefixSymbol, infixSymbol, terms.map(_.substitute(theta)))
57 | }
58 |
59 | // -- Various dynamic atom builders:
60 |
61 | final class DynEqualsBuilder extends DynamicAtomBuilder {
62 |
63 | override def signature = AtomSignature("Equals", 2)
64 |
65 | override def stateFunction: Vector[String] => Boolean =
66 | (constants: Vector[String]) => constants.head == constants.last
67 |
68 | override def apply(terms: Vector[Term]) = new DynInfix("Equals", "=", terms)
69 | }
70 |
71 | final class DynNotEqualsBuilder extends DynamicAtomBuilder {
72 |
73 | override def signature = AtomSignature("NotEquals", 2)
74 |
75 | override def stateFunction: Vector[String] => Boolean =
76 | (constants: Vector[String]) => constants.head == constants.last
77 |
78 | override def apply(terms: Vector[Term]) = new DynInfix("NotEquals", "!=", terms)
79 | }
80 |
81 | final class DynLessThanBuilder extends DynamicAtomBuilder {
82 |
83 | override def signature = AtomSignature("LessThan", 2)
84 |
85 | override def stateFunction: Vector[String] => Boolean =
86 | (constants: Vector[String]) => constants.head.toInt < constants.last.toInt
87 |
88 | override def apply(terms: Vector[Term]) = new DynInfix("LessThan", "<", terms)
89 | }
90 |
91 | final class DynLessThanEqBuilder extends DynamicAtomBuilder {
92 |
93 | override def signature = AtomSignature("LessThanEq", 2)
94 |
95 | override def stateFunction: Vector[String] => Boolean =
96 | (constants: Vector[String]) => constants.head.toInt <= constants.last.toInt
97 |
98 | override def apply(terms: Vector[Term]) = new DynInfix("LessThanEq", "<=", terms)
99 | }
100 |
101 | final class DynGreaterThanBuilder extends DynamicAtomBuilder {
102 |
103 | override def signature = AtomSignature("GreaterThan", 2)
104 |
105 | override def stateFunction: Vector[String] => Boolean =
106 | (constants: Vector[String]) => constants.head.toInt > constants.last.toInt
107 |
108 | override def apply(terms: Vector[Term]) = new DynInfix("GreaterThan", ">", terms)
109 | }
110 |
111 | final class DynGreaterThanEqBuilder extends DynamicAtomBuilder {
112 |
113 | override def signature = AtomSignature("GreaterThanEq", 2)
114 |
115 | override def stateFunction: Vector[String] => Boolean =
116 | (constants: Vector[String]) => constants.head.toInt >= constants.last.toInt
117 |
118 | override def apply(terms: Vector[Term]) = new DynInfix("GreaterThanEq", ">=", terms)
119 | }
120 |
121 | final class DynSubstringBuilder extends DynamicAtomBuilder {
122 | override def signature = AtomSignature("Substr", 2)
123 |
124 | override def stateFunction: Vector[String] => Boolean =
125 | (constants: Vector[String]) => constants.last.contains(constants.head)
126 |
127 | override def apply(terms: Vector[Term]): DynamicAtom = {
128 | require(terms.size == 2)
129 | new DynamicAtom("Substr", terms)
130 | }
131 | }
132 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/logic/parser/CommonsMLNParser.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.logic.parser
22 |
23 | import scala.util.matching.Regex
24 | import scala.util.parsing.combinator.{ JavaTokenParsers, RegexParsers }
25 |
26 | /**
27 | * Contains common regular expressions for parsing.
28 | */
29 | trait CommonsMLNParser extends JavaTokenParsers with RegexParsers {
30 |
31 | protected val functionID: Regex = """[a-z]([a-zA-Z0-9]|_[a-zA-Z0-9])*\\([\w_\-,]+\\)""".r
32 |
33 | protected val lowerCaseID: Regex = """([a-z][_a-zA-Z0-9]*)""".r
34 | protected val upperCaseID: Regex = """-?([_A-Z0-9][_a-zA-Z0-9]*)""".r
35 |
36 | protected val variableArg: Regex = """(\+?([a-z]([a-zA-Z0-9]|_[a-zA-Z0-9])*))""".r
37 |
38 | protected val constOrVar: Regex = """[a-zA-Z0-9]([a-zA-Z0-9]|_[a-zA-Z0-9])*""".r
39 |
40 | protected val numDouble: Regex = "-?\\d+(\\.\\d+)?".r
41 | protected val numPosInt: Regex = "\\d+".r
42 | protected val exist: Regex = "Exist|EXIST".r
43 | protected val forall: Regex = "Forall|FORALL".r
44 | protected val quote: Regex = "\"".r
45 | protected val includeID: Regex = """.+\.mln""".r
46 |
47 | protected override val whiteSpace: Regex = """(\s|//.*\n|(/\*(?:.|[\n\r])*?\*/))+""".r
48 | }
49 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/grounding/AtomRegisterWorker.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.grounding
22 |
23 | import akka.actor.{ Actor, ActorRef }
24 | import gnu.trove.map.hash.TIntObjectHashMap
25 | import gnu.trove.set.hash.TIntHashSet
26 | import com.typesafe.scalalogging.LazyLogging
27 |
28 | /**
29 | * AtomRegisterWorker collects a partition of ground atoms, represented by integer values, as well as in which ground
30 | * clauses the they appear.
31 | *
32 | * @param index the worker index (since we have multiple AtomRegisterWorker instances), it also represents the
33 | * partition index.
34 | * @param master reference to master actor, it is required in order to send the results back to master actor.
35 | */
36 | final class AtomRegisterWorker private (val index: Int, master: ActorRef) extends Actor with LazyLogging {
37 |
38 | import messages._
39 |
40 | /**
41 | * A collection that keeps the relation between ground atoms and ground clauses.
42 | */
43 | private lazy val atomID2CliqueID = new TIntObjectHashMap[TIntHashSet]()
44 |
45 | /**
46 | * A collection of ground atom ids that have been produced by query predicates.
47 | */
48 | private val queryAtomIDs = new TIntHashSet()
49 |
50 | /**
51 | * Collection of ground atom ids, discovered in previous iterations. Initially, the set is empty.
52 | */
53 | private val atomIDs = new TIntHashSet()
54 |
55 | /**
56 | * Collection of ground atom ids, discovered in the current iteration. Initially, the set is empty.
57 | */
58 | private var buffer = new TIntHashSet()
59 |
60 | /**
61 | * AtomRegisterWorker actor behaviour:
62 | *
63 | * - Collects ground query atoms from GroundingWorkers
64 | * - Collects relations between ground atoms from CliqueRegisters
65 | *
66 | *
67 | * @return a partial function with the actor logic for collecting ground atoms and their relations to ground clauses.
68 | */
69 | def receive = {
70 |
71 | /**
72 | * Collect a grounding of a query atom directly from a GroundingWorker
73 | */
74 | case QueryVariable(atomID) => queryAtomIDs.add(atomID)
75 |
76 | /**
77 | * When a grounding iteration is completed (that, is determined by the grounding Master), Master sends this message
78 | * and this worker responds by sending back all collected atom ids.
79 | *
80 | * [Master] -> GRND_Iteration_Completed
81 | * CollectedAtomIDs -> [Master]
82 | */
83 | case ITERATION_COMPLETED =>
84 | atomIDs.addAll(buffer)
85 | master ! CollectedAtomIDs(index, atomIDs)
86 | buffer = new TIntHashSet()
87 |
88 | /**
89 | * (1) Collect an integer that represents a possible grounding of an atom, from a CliqueRegister. All integer
90 | * values are accepted except zero, since it is the reserved value for representing the absence of a ground atom
91 | * in the MRF.
92 | *
93 | * (2) Collect the relation between a ground atom and a ground clause.
94 | *
95 | */
96 | case RegisterAtom(atomID, cliqueID) =>
97 | assert(atomID != 0, "atomID cannot be equal to zero.")
98 |
99 | buffer.add(atomID)
100 |
101 | val cliqueSet = atomID2CliqueID.get(atomID)
102 | if (cliqueSet == null) {
103 | val set = new TIntHashSet()
104 | set.add(cliqueID)
105 | atomID2CliqueID.put(atomID, set)
106 | } else cliqueSet.add(cliqueID)
107 |
108 | case msg =>
109 | logger.error("AtomRegister[" + index + "] --- Received an unknown message: " + msg)
110 | }
111 |
112 | /**
113 | * Before AtomRegistryWorker is stopped, it will first sent the results to the master actor.
114 | */
115 | override def postStop() {
116 | master ! AtomsBatch(index, atomID2CliqueID, queryAtomIDs)
117 | }
118 | }
119 |
120 | private object AtomRegisterWorker {
121 |
122 | def apply(index: Int)(implicit master: ActorRef) = new AtomRegisterWorker(index, master)
123 |
124 | }
125 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/grounding/ClauseGrounder.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.grounding
22 |
23 | import lomrf.logic.AtomSignature
24 |
25 | /**
26 | *
27 | */
28 | trait ClauseGrounder {
29 |
30 | def collectedSignatures: Set[AtomSignature]
31 |
32 | def computeGroundings(): Unit
33 |
34 | }
35 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/grounding/ClauseLiteralsOrdering.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.grounding
22 |
23 | import lomrf.logic.{ AtomSignature, Literal }
24 | import lomrf.mln.model.{ AtomEvidenceDB, MLN }
25 |
26 | /**
27 | *
28 | *
29 | * To improve the grounding speed, we change the order of clause literals according to their type
30 | * (i.e. dynamic or regular predicates) and a score function.
31 | *
32 | *
33 | *
34 | * - When both literals contain dynamic sentences (e.q. equals, lessThan, etc.), then
35 | * the literal with the lowest number of Variables is placed first
36 | * - When only one literal contains a dynamic sentence, then there are two sub-cases:
37 | * (1) if the other literal contains a sentence with unknown groundings, then the dynamic one
38 | * is placed first. (2) Otherwise, the literal with the lowest number of Variables is placed first.
39 | * - Finally, when both literals are regular (i.e. not dynamic), then the literal with the
40 | * lowest score is placed first:
41 | *
42 | * '''score = (number of unsatisfied - number of unknown)/(number of all groundings)'''
43 | *
44 | * In other words, this score value represents the fraction of tuples (i.e. constants replacing
45 | * variables in the clause) that will remain after the literal is grounded. This heuristic score function
46 | * is based in the following paper:
47 | *
48 | *
49 | * ''Shavlik, J. and Natarajan, S. Speeding Up Inference in Markov Logic Networks by pre-processing to
50 | * Reduce the Size of the Resulting Grounded Network. In Proceedings of the 21th International
51 | * Joint Conference on Artificial Intelligence (IJCAI), 2009.''
52 | *
53 | *
54 | *
55 | *
56 | *
57 | */
58 | class ClauseLiteralsOrdering(atomStateDB: Map[AtomSignature, AtomEvidenceDB]) extends Ordering[Literal] {
59 |
60 | def compare(x: Literal, y: Literal) = {
61 | val xDB = atomStateDB.getOrElse(x.sentence.signature, null)
62 | val yDB = atomStateDB.getOrElse(y.sentence.signature, null)
63 |
64 | val scoreX =
65 | if (x.sentence.isDynamic) Double.NaN
66 | else {
67 | val satX = if (x.isNegative) xDB.numberOfFalse else xDB.numberOfTrue
68 | val unsatX = xDB.length - satX
69 | (unsatX + xDB.numberOfUnknown) / xDB.length.toDouble
70 | }
71 |
72 | val scoreY =
73 | if (y.sentence.isDynamic) Double.NaN
74 | else {
75 | val satY = if (y.isNegative) yDB.numberOfFalse else yDB.numberOfTrue
76 | val unsatY = yDB.length - satY
77 | (unsatY + yDB.numberOfUnknown) / yDB.length.toDouble
78 | }
79 |
80 | (scoreX, scoreY) match {
81 | case (Double.NaN, Double.NaN) =>
82 | val nVarX = x.sentence.variables.size
83 | val nVarY = y.sentence.variables.size
84 | nVarX.compare(nVarY)
85 |
86 | case (Double.NaN, _) =>
87 | if (yDB.numberOfUnknown > 0) -1
88 | else {
89 | val nVarX = x.sentence.variables.size
90 | val nVarY = y.sentence.variables.size
91 | nVarX.compare(nVarY)
92 | }
93 |
94 | case (_, Double.NaN) =>
95 | if (xDB.numberOfUnknown > 0) 1
96 | else {
97 | val nVarX = x.sentence.variables.size
98 | val nVarY = y.sentence.variables.size
99 | nVarX.compare(nVarY)
100 | }
101 |
102 | case _ =>
103 | // regular literals
104 | if (scoreX < scoreY) -1
105 | else if (scoreX > scoreY) 1
106 | else 0
107 | }
108 | }
109 | }
110 |
111 | object ClauseLiteralsOrdering {
112 |
113 | def apply(atomStateDB: Map[AtomSignature, AtomEvidenceDB]) = new ClauseLiteralsOrdering(atomStateDB)
114 |
115 | def apply(mln: MLN) = new ClauseLiteralsOrdering(mln.evidence.db)
116 | }
117 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/grounding/GroundingWorker.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.grounding
22 |
23 | import akka.actor.{ Actor, ActorRef }
24 | import com.typesafe.scalalogging.LazyLogging
25 | import lomrf.mln.model.MLN
26 | import lomrf.util.collection.IndexPartitioned
27 |
28 | /**
29 | * Actor responsible for running grounding for each FOL clause in the MLN theory.
30 | *
31 | * @param mln MLN instance, containing the collection of clauses to ground.
32 | * @param cliqueRegisters a partitioned collection of all clique register actors.
33 | * @param noNegWeights flag to eliminate negative weights. If it is true the sign of negative weights in clauses is
34 | * inverted, as well as all disjunctions become conjunctions (de Morgan's law).
35 | * @param eliminateNegatedUnit When it is true, unit clauses with negative literal become unit clauses with positive
36 | * literal and inverted sign in their corresponding weight.
37 | */
38 | final class GroundingWorker private (
39 | mln: MLN,
40 | cliqueRegisters: IndexPartitioned[ActorRef],
41 | noNegWeights: Boolean,
42 | eliminateNegatedUnit: Boolean) extends Actor with LazyLogging {
43 |
44 | import messages._
45 |
46 | /**
47 | * @return a partial function with the actor logic for clause grounding.
48 | */
49 | def receive = {
50 |
51 | case Ground(clause, clauseIndex, atomSignatures, atomsDB) =>
52 | val grounder = new ClauseGrounderImpl(clause, clauseIndex, mln, cliqueRegisters, atomSignatures, atomsDB, noNegWeights, eliminateNegatedUnit)
53 | grounder.computeGroundings()
54 | logger.debug("Grounding completed for clause " + clause)
55 | sender ! Signatures(grounder.collectedSignatures)
56 |
57 | case msg =>
58 | logger.error(s"GroundingWorker --- Received an unknown message '$msg' from ${sender().toString()}")
59 | }
60 |
61 | }
62 |
63 | private object GroundingWorker {
64 |
65 | /**
66 | * Creates a new GroundingWorker instance.
67 | *
68 | * @param mln MLN instance, containing the collection of clauses to ground.
69 | * @param cliqueRegisters a partitioned collection of all clique register actors.
70 | * @param noNegWeights flag to eliminate negative weights. If it is true the sign of negative weights in clauses is
71 | * inverted, as well as all disjunctions become conjunctions (de Morgan's law).
72 | * @param eliminateNegatedUnit When it is true, unit clauses with negative literal become unit clauses with positive
73 | * literal and inverted sign in their corresponding weight.
74 | * @return a new GroundingWorker instance.
75 | */
76 | def apply(mln: MLN, cliqueRegisters: IndexPartitioned[ActorRef], noNegWeights: Boolean = false, eliminateNegatedUnit: Boolean = false) =
77 | new GroundingWorker(mln, cliqueRegisters, noNegWeights, eliminateNegatedUnit)
78 | }
79 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/grounding/package.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln
22 |
23 | import java.{ util => jutil }
24 |
25 | import gnu.trove.map.{ TIntFloatMap, TIntObjectMap }
26 | import gnu.trove.map.hash.TIntObjectHashMap
27 | import gnu.trove.set.TIntSet
28 | import gnu.trove.set.hash.TIntHashSet
29 | import lomrf.logic.{ AtomSignature, Clause }
30 | import lomrf.util.collection.IndexPartitioned
31 |
32 | package object grounding {
33 |
34 | // ----------------------------------------
35 | // Types
36 | // ----------------------------------------
37 | type DependencyMap = TIntObjectMap[TIntFloatMap]
38 |
39 | // ----------------------------------------
40 | // Messages
41 | // ----------------------------------------
42 |
43 | private[grounding] object messages {
44 |
45 | /**
46 | * Message for requesting the final ground MRF from the Master actor.
47 | */
48 | case object REQUEST_RESULTS
49 |
50 | /**
51 | * Notification for the completion of a grounding iteration
52 | */
53 | case object ITERATION_COMPLETED
54 |
55 | /**
56 | * Notification for the completion of the grounding procedure
57 | */
58 | case object GROUNDING_COMPLETED
59 |
60 | /**
61 | * Notification that a Clique Register have send all Atom IDs to AtomRegister workers
62 | */
63 | case object REGISTRATION_COMPLETED
64 |
65 | case class Result(
66 | cliques: IndexPartitioned[TIntObjectMap[CliqueEntry]],
67 | atom2Cliques: IndexPartitioned[TIntObjectMap[TIntHashSet]],
68 | queryAtomIDs: IndexPartitioned[TIntSet],
69 | dependencyMap: Option[IndexPartitioned[DependencyMap]] = None)
70 |
71 | // GroundingWorker -> Master
72 | case class Signatures(collectedSignatures: Set[AtomSignature])
73 |
74 | // Master -> GroundingWorker
75 | case class Ground(clause: Clause, clauseIndex: Int, atomSignatures: Set[AtomSignature], atomsDB: IndexPartitioned[TIntSet])
76 |
77 | // GroundingWorker -> CliqueRegister
78 | case class CliqueEntry(hashKey: Int, var weight: Double, variables: Array[Int], clauseID: Int, freq: Int) {
79 |
80 | override def hashCode() = hashKey
81 |
82 | override def equals(obj: Any): Boolean = obj match {
83 | case other: CliqueEntry =>
84 | other.hashKey == hashKey && other.weight == weight && jutil.Arrays.equals(other.variables, variables)
85 | case _ => false
86 | }
87 |
88 | override def toString: String =
89 | s"CliqueEntry(hashKey=$hashKey, weight=$weight, variables=[${variables.mkString(",")}], clauseID=$clauseID, freq=$freq)"
90 | }
91 |
92 | // Master -> AtomRegister
93 | case class CollectedAtomIDs(atomRegisterIdx: Int, atomIDs: TIntSet)
94 |
95 | // AtomRegister -> Master
96 | case class AtomsBatch(index: Int, registry: TIntObjectHashMap[TIntHashSet], queryAtomIDs: TIntSet)
97 |
98 | // CliqueRegister -> AtomRegister
99 | case class RegisterAtom(atomID: Int, cliqueID: Int)
100 |
101 | // CliqueRegister -> Master
102 | case class CollectedCliques(index: Int, cliques: TIntObjectMap[CliqueEntry], dependencyMap: Option[DependencyMap] = None)
103 |
104 | case class StartID(id: Int)
105 |
106 | case class NumberOfCliques(index: Int, size: Int)
107 |
108 | case class QueryVariable(atomID: Int)
109 |
110 | }
111 |
112 | }
113 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/inference/RoundingScheme.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.inference
22 |
23 | import enumeratum._
24 | import scala.collection.immutable
25 |
26 | sealed trait RoundingScheme extends EnumEntry
27 |
28 | object RoundingScheme extends Enum[RoundingScheme] {
29 |
30 | val values: immutable.IndexedSeq[RoundingScheme] = findValues
31 |
32 | case object RoundUp extends RoundingScheme
33 | case object SAT extends RoundingScheme
34 | }
35 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/inference/Solver.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.inference
22 |
23 | import java.io.PrintStream
24 | import java.text.DecimalFormat
25 | import com.typesafe.scalalogging.LazyLogging
26 | import lomrf.mln.model.mrf.{ MRF, MRFState }
27 | import lomrf.mln.model.AtomIdentityFunctionOps._
28 | import scala.util.{ Failure, Success }
29 |
30 | sealed trait Solver extends LazyLogging {
31 |
32 | // an MRF used for performing inference
33 | protected val mrf: MRF
34 |
35 | /**
36 | * Performs inference.
37 | *
38 | * @see [[lomrf.mln.model.mrf.MRFState]]
39 | *
40 | * @return an MRF state holding the inferred truth values
41 | */
42 | def infer: MRFState
43 | }
44 |
45 | trait MAPSolver extends Solver {
46 |
47 | /**
48 | * Write the results of inference into a selected output stream.
49 | *
50 | * @param out an output stream (default is console)
51 | * @param outputAll show 0/1 results for all query atoms (default is true)
52 | */
53 | def writeResults(out: PrintStream = System.out, outputAll: Boolean = true): Unit = {
54 |
55 | val queryStartID = mrf.mln.space.queryStartID
56 | val queryEndID = mrf.mln.space.queryEndID
57 |
58 | val iterator = mrf.atoms.iterator
59 |
60 | while (iterator.hasNext) {
61 | iterator.advance()
62 | val atomID = iterator.key
63 |
64 | if (atomID >= queryStartID && atomID <= queryEndID) {
65 | val groundAtom = iterator.value
66 | val state = if (groundAtom.getState) 1 else 0
67 |
68 | atomID.decodeAtom(mrf.mln) match {
69 | case Success(txtAtom) if outputAll || state == 1 => out.println(s"$txtAtom $state")
70 | case Failure(f) => logger.error(s"Failed to decode id: $atomID", f)
71 | }
72 | }
73 | }
74 | }
75 |
76 | }
77 |
78 | trait MarginalSolver extends Solver {
79 |
80 | // the number of samples used for calculating the probabilities
81 | protected val samples: Int
82 |
83 | /**
84 | * Write the results of inference into a selected output stream.
85 | *
86 | * @param result an output stream (default is console)
87 | */
88 | def writeResults(result: PrintStream = System.out): Unit = {
89 | val numFormat = new DecimalFormat("0.0######")
90 |
91 | val queryStartID = mrf.mln.space.queryStartID
92 | val queryEndID = mrf.mln.space.queryEndID
93 |
94 | val iterator = mrf.atoms.iterator
95 | while (iterator.hasNext) {
96 | iterator.advance()
97 | val atomID = iterator.key
98 |
99 | if (atomID >= queryStartID && atomID <= queryEndID) {
100 | val groundAtom = iterator.value
101 | val probability = groundAtom.getTruesCount.toDouble / samples
102 |
103 | atomID.decodeAtom(mrf.mln) match {
104 | case Success(txtAtom) => result.println(s"$txtAtom ${numFormat.format(probability)}")
105 | case _ => logger.error(s"failed to decode id: $atomID")
106 | }
107 | }
108 | }
109 | }
110 |
111 | }
112 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/learning/structure/CommonModeParser.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.learning.structure
22 |
23 | import scala.util.matching.Regex
24 | import scala.util.parsing.combinator.{ JavaTokenParsers, RegexParsers }
25 |
26 | /**
27 | * Regular expressions for mode declaration parser
28 | */
29 | trait CommonModeParser extends JavaTokenParsers with RegexParsers {
30 |
31 | protected val arity: Regex = """([1-9][0-9]*)""".r
32 | protected val recall: Regex = """([0]|[1-9][0-9]*|\*)""".r
33 | protected val predicate: Regex = """([A-Z0-9]([a-zA-Z0-9]|_[a-zA-Z0-9])*)""".r
34 | protected val function: Regex = """([a-z]([a-zA-Z0-9]|_[a-zA-Z0-9])*)""".r
35 | protected val placeMarker: Regex = """(#)?(n)?(o)?(p)?(\+|-|\.)""".r
36 |
37 | protected override val whiteSpace: Regex = """(\s|//.*\n|(/\*(?:.|[\n\r])*?\*/))+""".r
38 | }
39 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/learning/supervision/graph/caching/NodeCache.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.learning.supervision.graph.caching
22 |
23 | import com.typesafe.scalalogging.LazyLogging
24 | import lomrf.logic.AtomSignature
25 | import lomrf.mln.learning.supervision.graph.Node
26 |
27 | trait NodeCache extends LazyLogging {
28 |
29 | // Monitor flag!
30 | private[graph] var hasChanged: Boolean = false
31 |
32 | val useHoeffdingBound: Boolean
33 | val querySignature: AtomSignature
34 |
35 | /**
36 | * @return the number of unique nodes in the cache
37 | */
38 | def size: Int
39 |
40 | /**
41 | * @return the number of positive unique nodes in the cache
42 | */
43 | def numberOfPositive: Int
44 |
45 | /**
46 | * @return the number of negative unique nodes in the cache
47 | */
48 | def numberOfNegative: Int
49 |
50 | /**
51 | * @param node a node
52 | * @return an Option value containing the counts of the given node, or None
53 | * if the node does not exist in the cache.
54 | */
55 | def get(node: Node): Option[Long]
56 |
57 | /**
58 | * @param node a node
59 | * @return the counts of the given node or the result of the default computation
60 | * if the node does not exist in the cache.
61 | */
62 | def getOrElse(node: Node, default: => Long): Long
63 |
64 | /**
65 | * @param node a node
66 | * @return true if the node exists in the cache
67 | */
68 | def contains(node: Node): Boolean
69 |
70 | /**
71 | * Add a node to the cache.
72 | *
73 | * @param node a node to be added
74 | * @return a node cache that contains all nodes of the current cache
75 | * along the given node.
76 | */
77 | def +(node: Node): NodeCache
78 |
79 | /**
80 | * Remove a node from the cache.
81 | *
82 | * @param node a node to be removed
83 | * @return a node cache that contains all nodes of the current cache
84 | * except the given node.
85 | */
86 | def -(node: Node): NodeCache
87 |
88 | /**
89 | * Add a sequence of nodes to the cache.
90 | *
91 | * @param nodes a sequence of nodes
92 | * @return a node cache containing all nodes of the current cache
93 | * along the given sequence of nodes.
94 | */
95 | def ++(nodes: Seq[Node]): NodeCache
96 |
97 | /**
98 | * Remove a sequence of nodes to the cache.
99 | *
100 | * @param nodes a sequence of nodes
101 | * @return a node cache containing all nodes of the current cache
102 | * except the given sequence of nodes.
103 | */
104 | def --(nodes: Seq[Node]): NodeCache
105 |
106 | /**
107 | * Collects all unique nodes.
108 | *
109 | * @return all unique nodes in the cache
110 | */
111 | def collectNodes: IndexedSeq[Node]
112 | }
113 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/learning/supervision/graph/caching/NodeHashSet.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.learning.supervision.graph.caching
22 |
23 | import gnu.trove.map.hash.TCustomHashMap
24 | import lomrf.mln.learning.supervision.graph.Node
25 | import scala.collection.convert.ImplicitConversionsToScala._
26 |
27 | /**
28 | * Node set used for grouping similar nodes together
29 | * according to a given hashing function.
30 | *
31 | * @param strategy a hashing strategy
32 | */
33 | private[graph] class NodeHashSet(strategy: NodeHashStrategy = new BodyStrategy)
34 | extends TCustomHashMap[Node, Node](strategy) {
35 |
36 | def +=(n: Node): Unit = {
37 | if (this.contains(n)) this.get(n).similarNodeQueryAtoms ++= (n.similarNodeQueryAtoms + n.query)
38 | else super.put(n, n)
39 | }
40 |
41 | def collectNodes: IndexedSeq[Node] = keySet.toIndexedSeq
42 | }
43 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/learning/supervision/graph/caching/NodeHashStrategy.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.learning.supervision.graph.caching
22 |
23 | import gnu.trove.strategy.HashingStrategy
24 | import lomrf.mln.learning.supervision.graph.Node
25 |
26 | sealed trait NodeHashStrategy extends HashingStrategy[Node]
27 |
28 | /**
29 | * A hashing strategy that compares nodes according to their clausal from.
30 | */
31 | final class ClauseStrategy extends NodeHashStrategy {
32 |
33 | override def computeHashCode(n: Node): Int = n.clause.get.literals
34 | .map(l => l.sentence.symbol.## ^ l.sentence.constants.## ^ l.sentence.variables.size)
35 | .foldLeft(1)(_ ^ _)
36 |
37 | override def equals(n1: Node, n2: Node): Boolean = n1.clause.get =~= n2.clause.get
38 | }
39 |
40 | /**
41 | * A hashing strategy that compares nodes according to the body of their clausal form.
42 | */
43 | final class BodyStrategy extends NodeHashStrategy {
44 |
45 | override def computeHashCode(n: Node): Int = n.literals
46 | .map(l => l.sentence.symbol.## ^ l.sentence.constants.## ^ l.sentence.variables.size)
47 | .foldLeft(1)(_ ^ _)
48 |
49 | override def equals(n1: Node, n2: Node): Boolean = n1.body.get =~= n2.body.get
50 | }
51 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/learning/supervision/graph/selection/Clustering.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.learning.supervision.graph.selection
22 |
23 | import com.typesafe.scalalogging.LazyLogging
24 | import lomrf.mln.learning.supervision.graph.Node
25 | import lomrf.mln.learning.supervision.graph.caching.NodeCache
26 |
27 | /**
28 | * @param maxDensity clusters maximum density
29 | */
30 | case class Clustering(maxDensity: Double, retainNoise: Boolean = true) extends LazyLogging {
31 |
32 | def cluster(nodes: Seq[Node], cache: NodeCache): Set[NodeCluster] = {
33 |
34 | val (positiveNodes, negativeNodes) = nodes.sortWith(_.size > _.size).partition(_.isPositive)
35 |
36 | if (positiveNodes.nonEmpty && negativeNodes.nonEmpty) {
37 |
38 | var pClusters = Set(NodeCluster.fromNodes(Set(positiveNodes.maxBy(_.size)), Some(cache)))
39 | var nClusters = Set(NodeCluster.fromNodes(Set(negativeNodes.maxBy(_.size)), Some(cache)))
40 |
41 | pClusters = positiveNodes.filterNot(pClusters.head.contains).foldLeft(pClusters) {
42 | case (clusters, node) =>
43 | clusters.filter(_.nodes.exists(node.subsumes)) match {
44 | case set if set.isEmpty => clusters + NodeCluster.fromNodes(Seq(node), Some(cache))
45 | case set if set.nonEmpty =>
46 | val c = set.maxBy(_.density)
47 | (clusters - c) + (c + (node, Some(cache)))
48 | }
49 | }
50 |
51 | if (!retainNoise) pClusters = pClusters.filter(_.nodes.exists(n => cache.getOrElse(n, 0) > 1))
52 |
53 | logger.info {
54 | s"""
55 | |Positive clusters:
56 | |${pClusters.map(c => c.majorityPrototype(cache).toText(cache, nodes.flatMap(cache.get).sum)).mkString("\n")}
57 | |""".stripMargin
58 | }
59 |
60 | nClusters = negativeNodes.filterNot(nClusters.head.contains).foldLeft(nClusters) {
61 | case (clusters, node) =>
62 | clusters.filter(_.nodes.exists(node.subsumes)) match {
63 | case set if set.isEmpty => clusters + NodeCluster.fromNodes(Seq(node), Some(cache))
64 | case set if set.nonEmpty =>
65 | val c = set.maxBy(_.density)
66 | (clusters - c) + (c + (node, Some(cache)))
67 | }
68 | }
69 |
70 | if (!retainNoise) nClusters = nClusters.filter(_.nodes.exists(n => cache.getOrElse(n, 0) > 1))
71 |
72 | logger.info {
73 | s"""
74 | |Negative clusters:
75 | |${nClusters.map(c => c.majorityPrototype(cache).toText(cache, nodes.flatMap(cache.get).sum)).mkString("\n")}
76 | |""".stripMargin
77 | }
78 |
79 | if (maxDensity >= 1) pClusters ++ nClusters
80 | else {
81 | logger.info(s"Keeping only $maxDensity of the total density:")
82 | val totalMass = nodes.flatMap(cache.get).sum.toDouble
83 | val maxP = pClusters.maxBy(_.density)
84 | val maxN = nClusters.maxBy(_.density)
85 | var rest = (pClusters - maxP) ++ (nClusters - maxN)
86 | var clusters = Set(maxP, maxN)
87 | while (clusters.map(_.density).sum / totalMass < maxDensity && rest.nonEmpty) {
88 | val next = rest.maxBy(_.density)
89 | clusters += next
90 | rest -= next
91 | }
92 | logger.info(s"${
93 | clusters.map(c => c.majorityPrototype(cache).toText(cache, nodes.flatMap(cache.get).sum))
94 | .mkString("\n")
95 | }")
96 |
97 | clusters
98 | }
99 |
100 | } else Set(
101 | NodeCluster.fromNodes(positiveNodes, Some(cache)),
102 | NodeCluster.fromNodes(negativeNodes, Some(cache))
103 | )
104 | }
105 | }
106 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/learning/supervision/graph/selection/NodeCluster.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.learning.supervision.graph.selection
22 |
23 | import lomrf.mln.learning.supervision.graph.Node
24 | import lomrf.mln.learning.supervision.graph.caching.NodeCache
25 | import lomrf.mln.learning.supervision.metric.Feature
26 |
27 | case class NodeCluster(prototype: Set[Feature], nodes: Set[Node], density: Long) {
28 |
29 | def isPositive: Boolean = nodes.forall(_.isPositive)
30 |
31 | def hasPositive: Boolean = nodes.exists(_.isPositive)
32 |
33 | def isNegative: Boolean = nodes.forall(_.isNegative)
34 |
35 | def hasNegative: Boolean = nodes.exists(_.isNegative)
36 |
37 | def isEmpty: Boolean = nodes.isEmpty
38 |
39 | def nonEmpty: Boolean = nodes.nonEmpty
40 |
41 | def contains(node: Node): Boolean = nodes.contains(node)
42 |
43 | def majorityPrototype(nodeCache: NodeCache): NodeCluster = {
44 | val reduced = prototype.flatMap { f =>
45 | val (contains, notContains) = nodes.partition(_.features.contains(f))
46 | if (contains.foldLeft(0L)((sum, x) => sum + nodeCache.getOrElse(x, 1L)) > notContains.foldLeft(0L)((sum, x) => sum + nodeCache.getOrElse(x, 1L)))
47 | Some(f)
48 | else None
49 | }
50 | new NodeCluster(reduced, nodes, density)
51 | }
52 |
53 | def +(node: Node, nodeCache: Option[NodeCache]): NodeCluster =
54 | new NodeCluster(prototype ++ node.features, nodes + node, density + nodeCache.map(_.getOrElse(node, 1L)).getOrElse(1L))
55 |
56 | def ++(otherNodes: Seq[Node], nodeCache: Option[NodeCache]): NodeCluster = {
57 | new NodeCluster(
58 | prototype ++ otherNodes.flatMap(_.features),
59 | nodes ++ otherNodes,
60 | density + otherNodes.foldLeft(0L) { case (sum, node) => sum + nodeCache.map(_.getOrElse(node, 1L)).getOrElse(1L) })
61 | }
62 |
63 | def toText(nodeCache: NodeCache, totalMass: Double = 0): String = {
64 | s"""
65 | |Prototype: ${prototype.mkString(", ")}
66 | |Density: $density ${if (totalMass != 0) "(" + f"${density / totalMass}%1.4f" + "%)" else ""}
67 | |Nodes:
68 | |${nodes.map(n => "* " + n.toText + " : " + nodeCache.getOrElse(n, 1L)).mkString("\n")}
69 | |""".stripMargin
70 | }
71 |
72 | override def toString: String = {
73 | s"""
74 | |Prototype: ${prototype.mkString(", ")}
75 | |Density: $density
76 | |Nodes:
77 | |${nodes.map(n => s"* ${n.toText}").mkString("\n")}
78 | |""".stripMargin
79 | }
80 | }
81 |
82 | object NodeCluster {
83 |
84 | def emptyCluster: NodeCluster = new NodeCluster(Set.empty, Set.empty, 0)
85 |
86 | def fromNodes(nodes: Iterable[Node], nodeCache: Option[NodeCache]): NodeCluster = {
87 | new NodeCluster(
88 | nodes.flatMap(_.features).toSet,
89 | nodes.toSet,
90 | nodes.foldLeft(0L) { case (sum, node) => sum + nodeCache.map(_.getOrElse(node, 1L)).getOrElse(1L) }
91 | )
92 | }
93 | }
94 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/learning/supervision/metric/AtomMetric.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.learning.supervision.metric
22 |
23 | import lomrf.logic._
24 |
25 | /**
26 | * A atomic metric is a distance for atomic formulas that measures the
27 | * structural distance of atoms by ignoring the variables.
28 | *
29 | * @param matcher a matcher function
30 | * @param selectedFeatures a map from features to binary indicator values
31 | */
32 | case class AtomMetric(
33 | matcher: Matcher,
34 | selectedFeatures: Option[Map[Feature, Int]] = None) extends StructureMetric[AtomicFormula] {
35 |
36 | /**
37 | * A reduced metric using only selected features for computing
38 | * the distance. Everything else is ignored.
39 | *
40 | * @note All weights should be either 0 or 1.
41 | *
42 | * @param weights a map from features to binary values
43 | * @return a weighted metric
44 | */
45 | override def havingWeights(weights: Map[Feature, Double]): StructureMetric[AtomicFormula] = {
46 | require(weights.forall { case (_, w) => w == 0 || w == 1 }, "All weights should be 0 or 1.")
47 | copy(selectedFeatures = Some(weights.mapValues(_.toInt)))
48 | }
49 |
50 | /**
51 | * Distance for atoms. The function must obey to the following properties:
52 | *
53 | * {{{
54 | * 1. d(x, y) >= 0 for all x, y and d(x, y) = 0 if and only if x = y
55 | * 2. d(x, y) = d(y, x) for all x, y
56 | * 3. d(x, y) + d(y, z) >= d(x, z) for all x, y, z (triangle inequality)
57 | * }}}
58 | *
59 | * @param xAtom an atom
60 | * @param yAtom another atom
61 | * @return a distance for the given atoms
62 | */
63 | override def distance(xAtom: AtomicFormula, yAtom: AtomicFormula): Double =
64 | if (xAtom.signature != yAtom.signature) 1
65 | else if (xAtom.constants.isEmpty) 0 // in case no constants exist, distance should be zero
66 | else termSeqDistance(xAtom.terms, yAtom.terms)
67 |
68 | /**
69 | * Distance for term sequences.
70 | *
71 | * @param termSeqA a term sequence
72 | * @param termSeqB another term sequence
73 | * @return a distance in the interval [0, 1] for the given term sequences
74 | */
75 | @inline private def termSeqDistance(termSeqA: IndexedSeq[Term], termSeqB: IndexedSeq[Term]): Double =
76 | (termSeqA zip termSeqB).map { case (a, b) => termDistance(a, b) }.sum / (2d * termSeqA.count(!_.isVariable))
77 |
78 | /**
79 | * Distance for individual terms.
80 | *
81 | * @note If the given term is a term function, then the distance for their
82 | * corresponding term functions are measured.
83 | *
84 | * @param xTerm a term
85 | * @param yTerm another term
86 | * @return a distance in the interval [0, 1] for the given terms.
87 | */
88 | @inline private def termDistance(xTerm: Term, yTerm: Term): Double = (xTerm, yTerm) match {
89 | case (x: Constant, y: Constant) if x.symbol == y.symbol => 0
90 | case (_: Variable, _: Variable) => 0
91 | case (x: TermFunction, y: TermFunction) if x.signature == y.signature => termSeqDistance(x.terms, y.terms)
92 | case _ => 1
93 | }
94 | }
95 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/learning/supervision/metric/BinaryMetric.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.learning.supervision.metric
22 |
23 | import lomrf.logic.AtomicFormula
24 |
25 | /**
26 | * A binary metric is a very simple distance for atomic formulas where an atom
27 | * has zero distance to another atom only if they are identical. Otherwise, their
28 | * distance is always one.
29 | *
30 | * @param matcher a matcher function
31 | * @param selectedFeatures a map from features to binary indicator values
32 | */
33 | case class BinaryMetric(
34 | matcher: Matcher,
35 | selectedFeatures: Option[Map[Feature, Int]] = None) extends StructureMetric[AtomicFormula] {
36 |
37 | /**
38 | * A reduced metric using only selected features for computing
39 | * the distance. Everything else is ignored.
40 | *
41 | * @note All weights should be either 0 or 1.
42 | *
43 | * @param weights a map from features to binary values
44 | * @return a weighted metric
45 | */
46 | override def havingWeights(weights: Map[Feature, Double]): StructureMetric[AtomicFormula] = {
47 | require(weights.forall { case (_, w) => w == 0 || w == 1 }, "All weights should be 0 or 1.")
48 | copy(selectedFeatures = Some(weights.mapValues(_.toInt)))
49 | }
50 |
51 | /**
52 | * Distance for atoms. The function must obey to the following properties:
53 | *
54 | * {{{
55 | * 1. d(x, y) >= 0 for all x, y and d(x, y) = 0 if and only if x = y
56 | * 2. d(x, y) = d(y, x) for all x, y
57 | * 3. d(x, y) + d(y, z) >= d(x, z) for all x, y, z (triangle inequality)
58 | * }}}
59 | *
60 | * @param xAtom an atom
61 | * @param yAtom another atom
62 | * @return a distance for the given atoms
63 | */
64 | override def distance(xAtom: AtomicFormula, yAtom: AtomicFormula): Double =
65 | if (xAtom.signature != yAtom.signature || xAtom.constants != yAtom.constants) 1 else 0
66 | }
67 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/learning/supervision/metric/Feature.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.learning.supervision.metric
22 |
23 | import lomrf.logic.{ AtomSignature, AtomicFormula }
24 | import scala.language.implicitConversions
25 |
26 | /**
27 | * @param signature an atom signature
28 | * @param constantArgs a sequence of constants or function signatures
29 | */
30 | case class Feature(signature: AtomSignature, constantArgs: Set[String]) {
31 |
32 | override def toString: String =
33 | if (constantArgs.isEmpty) signature.toString
34 | else s"$signature[${constantArgs.mkString(",")}]"
35 | }
36 |
37 | object Feature {
38 |
39 | implicit def fromAtomSignature(signature: AtomSignature): Feature =
40 | Feature(signature, Set.empty)
41 |
42 | implicit def fromAtomicFormula(atom: AtomicFormula): Feature =
43 | Feature(atom.signature, atom.functions.map(_.signature.toString) ++ atom.constants.map(_.symbol))
44 | }
45 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/learning/supervision/metric/Matcher.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.learning.supervision.metric
22 |
23 | import breeze.optimize.linear.KuhnMunkres
24 |
25 | /**
26 | * Matcher is any object that solves an assignment problem. The problem consists of finding
27 | * a maximum cost matching (or a minimum cost perfect matching) in a bipartite graph. The input
28 | * graph is usually represented as a cost matrix. Zero values define the absence of edges.
29 | *
30 | * === General Formulation ===
31 | *
32 | * Each problem instance has a number of agents and a number of tasks. Any agent can be assigned to
33 | * any task, incurring a cost that may vary depending on the agent-task assignment. It is required
34 | * that all tasks are assigned to exactly one agent in such a way that the total cost is minimized.
35 | * In case the numbers of agents and tasks are equal and the total cost of the assignment for all tasks
36 | * is equal to the sum of the costs for each agent then the problem is called the linear assignment problem.
37 | *
38 | * @see https://en.wikipedia.org/wiki/Assignment_problem
39 | */
40 | trait Matcher extends (CostMatrix[Double] => (Array[Int], Double))
41 |
42 | /**
43 | * The Hungarian matcher is a combinatorial optimization algorithm that solves the assignment problem in
44 | * polynomial time O(n^3).
45 | *
46 | * @see https://en.wikipedia.org/wiki/Hungarian_algorithm
47 | */
48 | object HungarianMatcher extends Matcher {
49 |
50 | /**
51 | * It solves the assignment problem for the given cost matrix. The cost
52 | * matrix represents the costs for each edge in the graph.
53 | *
54 | * @param costMatrix the bipartite graph cost matrix
55 | * @return the cost of the optimal assignment
56 | */
57 | override def apply(costMatrix: CostMatrix[Double]): (Array[Int], Double) = {
58 | val unmatched = math.abs(costMatrix.length - costMatrix.head.length)
59 | val maxDimension = math.max(costMatrix.length, costMatrix.head.length)
60 |
61 | KuhnMunkres.extractMatching(costMatrix) match {
62 | case (matches, cost) => matches.toArray -> (cost + unmatched) / maxDimension
63 | }
64 | }
65 | }
66 |
67 | /**
68 | * The Hausdorff matcher is based on the Hausdorff distance. The Hausdorff distance is the longest distance
69 | * you can be forced to travel by an adversary that chooses a point in one set, from where you then must travel
70 | * to the other set. In other words, it is the greatest of all the distances from a point in one set to the
71 | * closest point in another set.
72 | *
73 | * @note The Hausdorff matcher can be used for solving the assignment problem, but the solution is not
74 | * guaranteed to be the optimal one. Moreover, the matching is not guaranteed to be one to one.
75 | * @see https://en.wikipedia.org/wiki/Hausdorff_distance
76 | * Distance Between Herbrand Interpretations: A Measure for Approximations
77 | * to a Target Concept (1997)
78 | */
79 | object HausdorffMatcher extends Matcher {
80 |
81 | /**
82 | * It solves the assignment problem for a given cost matrix. The cost
83 | * matrix represents the costs for each edge in the graph.
84 | *
85 | * @param costMatrix the bipartite graph cost matrix
86 | * @return the cost of the assignment
87 | */
88 | override def apply(costMatrix: CostMatrix[Double]): (Array[Int], Double) =
89 | Array.empty[Int] -> math.max(costMatrix.map(_.min).max, costMatrix.transpose.map(_.min).max)
90 | }
91 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/learning/supervision/metric/package.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.learning.supervision
22 |
23 | import lomrf.logic._
24 | import lomrf.{ AUX_PRED_PREFIX => PREFIX }
25 | import lomrf.mln.model.EvidenceDB
26 |
27 | package object metric {
28 |
29 | type CostMatrix[T] = IndexedSeq[IndexedSeq[T]]
30 |
31 | /**
32 | * An AuxConstruct represents an auxiliary ground predicate structure.
33 | *
34 | * @note The constant sequence must not include the corresponding function
35 | * return constant.
36 | *
37 | * @param signature the signature of the predicate
38 | * @param constants a sequence of constants
39 | */
40 | private[metric] case class AuxConstruct(signature: AtomSignature, constants: IndexedSeq[Constant])
41 |
42 | /**
43 | * Collect all auxiliary predicates given an evidence database and construct a
44 | * mapping from function return constants to auxiliary construct objects.
45 | *
46 | * @see [[lomrf.mln.learning.supervision.metric.AuxConstruct]]
47 | *
48 | * @param evidenceDB an evidence database
49 | * @return a map from function return constants to auxiliary constructs
50 | */
51 | private[metric] def collectAuxConstructs(evidenceDB: EvidenceDB): Map[Constant, AuxConstruct] =
52 | for {
53 | (signature, db) <- evidenceDB.filter { case (signature, _) => signature.symbol.contains(PREFIX) }
54 | id <- db.identity.indices.filter(db(_) == TRUE)
55 | constants <- db.identity.decode(id).toOption
56 | } yield Constant(constants.head) -> AuxConstruct(signature, constants.tail.map(Constant))
57 | }
58 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/model/FunctionMapper.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.model
22 |
23 | import gnu.trove.map.TIntObjectMap
24 | import lomrf.mln.model.builders.FunctionMapperBuilder
25 |
26 | trait FunctionMapper extends Serializable {
27 |
28 | /**
29 | * Gives the associated string value for a given vector
30 | * of string arguments or '''null''' in case no value exists.
31 | *
32 | * @param args a vector of string arguments
33 | * @return the associated value for the given arguments
34 | * if one exists, null otherwise
35 | */
36 | def apply(args: Vector[String]): String
37 |
38 | /**
39 | * Gets the associated string value for given vector of
40 | * string arguments, if one exists.
41 | *
42 | * @param args a vector of string arguments
43 | * @return the associated value for the given arguments
44 | * if one exists, none otherwise
45 | */
46 | def get(args: Vector[String]): Option[String]
47 | }
48 |
49 | /**
50 | * A default function mapper that uses an atom identity function
51 | * and a map of encoded string arguments to associate input string
52 | * arguments to string values.
53 | *
54 | * @param identityFunction an atom identity function
55 | * @param args2Value a map from encoded string argument ids (integers) to string values
56 | */
57 | final class FunctionMapperDefaultImpl(
58 | identityFunction: AtomIdentityFunction,
59 | args2Value: TIntObjectMap[String]) extends FunctionMapper {
60 |
61 | def apply(args: Vector[String]): String = {
62 | val id = identityFunction.encode(args)
63 | args2Value.get(id)
64 | }
65 |
66 | def get(args: Vector[String]): Option[String] = {
67 | val id = identityFunction.encode(args)
68 | val result = args2Value.get(id)
69 | if (result eq null) None else Some(result)
70 | }
71 |
72 | override def toString =
73 | s"FunctionMapperDefaultImpl{signature:= ${identityFunction.signature}, size:=${args2Value.size}}"
74 | }
75 |
76 | /**
77 | * A special function mapper that uses a function to associate input
78 | * string arguments to string values. The function should encode the
79 | * logic of the function mapper.
80 | *
81 | * @example {{{
82 | * val concat = (x: Vector[String]) => x.reduce(_ ++ _)
83 | * val fm = new FunctionMapperSpecialImpl(concat)
84 | * }}}
85 | *
86 | * @param f a function from vector of string arguments to string values
87 | */
88 | final class FunctionMapperSpecialImpl(f: Vector[String] => String) extends FunctionMapper {
89 |
90 | def apply(args: Vector[String]): String = f(args)
91 |
92 | def get(args: Vector[String]): Option[String] = Some(f(args))
93 |
94 | override def toString = s"FunctionMapperSpecialImpl{f:= $f}"
95 | }
96 |
97 | object FunctionMapper {
98 |
99 | /**
100 | * Create a function mapper from a given builder.
101 | *
102 | * @param builder a function mapper builder
103 | * @return a FunctionMapper instance
104 | */
105 | def apply(builder: FunctionMapperBuilder): FunctionMapper = builder.result()
106 |
107 | /**
108 | * Create a function mapper from a given atom identity function
109 | * and a map of encoded string arguments to string values.
110 | * @see [[lomrf.mln.model.FunctionMapperDefaultImpl]]
111 | *
112 | * @param identityFunction an atom identity function
113 | * @param args2Value a map from encoded string arguments ids (integers) to string values
114 | * @return a FunctionMapper instance
115 | */
116 | def apply(identityFunction: AtomIdentityFunction, args2Value: TIntObjectMap[String]): FunctionMapper =
117 | new FunctionMapperDefaultImpl(identityFunction, args2Value)
118 |
119 | /**
120 | * Creates a function mapper from a given function.
121 | * @see [[lomrf.mln.model.FunctionMapperSpecialImpl]]
122 | *
123 | * @param f a function from vector of string arguments to string values
124 | * @return a FunctionMapper instance
125 | */
126 | def apply(f: Vector[String] => String): FunctionMapper = new FunctionMapperSpecialImpl(f)
127 | }
128 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/model/MLNSchema.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.model
22 |
23 | /**
24 | * MLNSchema holds the predicate and function schemas, both static and dynamic.
25 | *
26 | * @note Function signatures are also represented using [[lomrf.logic.AtomSignature]].
27 | *
28 | * @param predicates a map from atom signatures to their argument domain names
29 | * @param functions a map from function signatures to their return value domain name, argument domain names
30 | * @param dynamicPredicates a map from atom signatures to functions of the form '''Vector[String] => Boolean'''
31 | * @param dynamicFunctions a map from function signatures to functions of the form '''Vector[String] => String'''
32 | */
33 | case class MLNSchema(
34 | predicates: PredicateSchema,
35 | functions: FunctionSchema,
36 | dynamicPredicates: DynamicPredicates,
37 | dynamicFunctions: DynamicFunctions)
38 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/mln/model/package.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln
22 |
23 | import lomrf.logic.AtomSignature
24 | import lomrf.mln.learning.structure.ModeDeclaration
25 |
26 | package object model {
27 |
28 | type ConstantsDomain = Map[String, ConstantsSet]
29 |
30 | type PredicateSchema = Map[AtomSignature, Vector[String]]
31 |
32 | type FunctionSchema = Map[AtomSignature, (String, Vector[String])]
33 |
34 | type DynamicPredicates = Map[AtomSignature, Vector[String] => Boolean]
35 |
36 | type DynamicFunctions = Map[AtomSignature, Vector[String] => String]
37 |
38 | type EvidenceDB = Map[AtomSignature, AtomEvidenceDB]
39 |
40 | type FunctionMappers = Map[AtomSignature, FunctionMapper]
41 |
42 | type Identities = Map[AtomSignature, AtomIdentityFunction]
43 |
44 | type ModeDeclarations = Map[AtomSignature, ModeDeclaration]
45 |
46 | implicit class FunctionSchemaWrapped(val fs: FunctionSchema) extends AnyVal {
47 |
48 | def toPredicateSchema: PredicateSchema = fs.map {
49 | case (signature, (retType, argTypes)) =>
50 | val symbol = lomrf.AUX_PRED_PREFIX + signature.symbol
51 | val termTypes = argTypes.+:(retType)
52 | (AtomSignature(symbol, signature.arity + 1), termTypes)
53 | }
54 | }
55 |
56 | }
57 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/package.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | /**
22 | * LoMRF utilities.
23 | */
24 | package object lomrf {
25 |
26 | final val NO_ENTRY_KEY = -1
27 | final val DEFAULT_CAPACITY = 43
28 | final val DEFAULT_LOAD_FACTOR = 0.75f
29 |
30 | // predicate prefix when functions are converted into auxiliary predicates
31 | final val AUX_PRED_PREFIX = "AUX"
32 |
33 | // function return value prefix
34 | final val FUNC_RET_VAR_PREFIX = "funcRetVar"
35 |
36 | final val ASCIILogo =
37 | """
38 | |o o o o o
39 | || o | |\ /| | /
40 | || o-o o--o o-o oo | | O | oo o-o OO o-o o o
41 | || | | | | | | | | | | | | | | | \ | | \ /
42 | |O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
43 | | |
44 | | o--o
45 | |o--o o o--o o o
46 | || | | | o | |
47 | |O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
48 | || \ | | | | | | | | | | | | | |-' | | | \
49 | |o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
50 | |
51 | |Logical Markov Random Fields (LoMRF).
52 | """.stripMargin
53 |
54 | final val processors = sys.runtime.availableProcessors
55 |
56 | object BuildVersion {
57 |
58 | import java.net.URL
59 |
60 | final val version: String = "Version: " + {
61 | val clazz = lomrf.BuildVersion.getClass
62 | try {
63 | val classPath = clazz.getResource("package$" + clazz.getSimpleName + ".class").toString
64 |
65 | if (classPath.startsWith("jar")) {
66 | val manifestPath = classPath.substring(0, classPath.lastIndexOf("!") + 1) + "/META-INF/MANIFEST.MF"
67 | val manifest0 = new java.util.jar.Manifest(new URL(manifestPath).openStream)
68 | val attr = manifest0.getMainAttributes
69 |
70 | //val build = attr.getValue("Implementation-Build")
71 | val version = attr.getValue("Specification-Version")
72 |
73 | version
74 | } else "(undefined version)"
75 | } catch {
76 | case ex: NullPointerException => "(undefined version)"
77 | }
78 | }
79 |
80 | def apply(): String = version
81 |
82 | override def toString: String = version
83 | }
84 |
85 | }
86 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/util/ArrayUtils.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.util
22 |
23 | import java.util
24 |
25 | /**
26 | * Array utilities.
27 | */
28 | object ArrayUtils {
29 |
30 | /**
31 | * Specialized hash code calculation for arbitrary array type.
32 | *
33 | * @return the hash code for any array type
34 | */
35 | def hashCodeOf(array: Array[_]): Int = array match {
36 | case x: Array[Char] => util.Arrays.hashCode(x)
37 | case x: Array[Byte] => util.Arrays.hashCode(x)
38 | case x: Array[Short] => util.Arrays.hashCode(x)
39 | case x: Array[Int] => util.Arrays.hashCode(x)
40 | case x: Array[Boolean] => util.Arrays.hashCode(x)
41 | case x: Array[Float] => util.Arrays.hashCode(x)
42 | case x: Array[Long] => util.Arrays.hashCode(x)
43 | case x: Array[Double] => util.Arrays.hashCode(x)
44 | case x: Array[_] => util.Arrays.hashCode(x.asInstanceOf[Array[AnyRef]])
45 | case _ => throw new RuntimeException("possible bug?")
46 | }
47 |
48 | /**
49 | * Specialized equality between arrays for arbitrary types. It checks
50 | * if the arrays have the same length and contain elements from the
51 | * same class type.
52 | *
53 | * @param array1 one array
54 | * @param array2 another array
55 | *
56 | * @return true if arrays are equal, false otherwise
57 | */
58 | def equals(array1: Array[_], array2: Array[_]): Boolean = {
59 |
60 | // length checking
61 | if (array1.length != array2.length) return false
62 |
63 | val classOfArray1 = array1.getClass
64 | val classOfArray2 = array2.getClass
65 |
66 | // class type checking
67 | if (classOfArray1 == classOfArray2) array1 match {
68 | case x: Array[Char] => util.Arrays.equals(array1.asInstanceOf[Array[Char]], array2.asInstanceOf[Array[Char]])
69 | case x: Array[Byte] => util.Arrays.equals(array1.asInstanceOf[Array[Byte]], array2.asInstanceOf[Array[Byte]])
70 | case x: Array[Short] => util.Arrays.equals(array1.asInstanceOf[Array[Short]], array2.asInstanceOf[Array[Short]])
71 | case x: Array[Int] => util.Arrays.equals(array1.asInstanceOf[Array[Int]], array2.asInstanceOf[Array[Int]])
72 | case x: Array[Boolean] => util.Arrays.equals(array1.asInstanceOf[Array[Boolean]], array2.asInstanceOf[Array[Boolean]])
73 | case x: Array[Float] => util.Arrays.equals(array1.asInstanceOf[Array[Float]], array2.asInstanceOf[Array[Float]])
74 | case x: Array[Long] => util.Arrays.equals(array1.asInstanceOf[Array[Long]], array2.asInstanceOf[Array[Long]])
75 | case x: Array[Double] => util.Arrays.equals(array1.asInstanceOf[Array[Double]], array2.asInstanceOf[Array[Double]])
76 | case x: Array[_] => util.Arrays.equals(array1.asInstanceOf[Array[AnyRef]], array2.asInstanceOf[Array[AnyRef]])
77 | case _ => throw new RuntimeException("possible bug?")
78 | }
79 | else false
80 | }
81 |
82 | }
83 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/util/LongDoubleConversions.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.util
22 |
23 | import com.vividsolutions.jts.math.DD
24 |
25 | /**
26 | * Conversions for DD numbers operation of the JTS library,
27 | * in order to be more Scala friendly.
28 | */
29 | object LongDoubleConversions {
30 |
31 | type LongDouble = DD
32 |
33 | final val ZERO = DD.valueOf(0.0)
34 | final val ONE = DD.valueOf(1.0)
35 | final val MAXVALUE = DD.valueOf(9.9999999999999999E300) // Do not used DD.valueOf(Double.MaxValue)
36 |
37 | /**
38 | * According to Scala specification, value classes are not able to
39 | * define a equals or hashCode method.
40 | *
41 | * @see http://docs.scala-lang.org/overviews/core/value-classes.html
42 | *
43 | * @param number LongDouble value
44 | */
45 | implicit class LongDoubleConversions(val number: LongDouble) extends AnyVal {
46 |
47 | def +(other: LongDouble) = number.add(other)
48 | def -(other: LongDouble) = number.subtract(other)
49 | def *(other: LongDouble) = number.multiply(other)
50 | def /(other: LongDouble) = number.divide(other)
51 | def unary_- = number.negate()
52 |
53 | def ===(other: LongDouble) = number.equals(other)
54 | def >(other: LongDouble) = number.gt(other)
55 | def >=(other: LongDouble) = number.ge(other)
56 | def <(other: LongDouble) = number.lt(other)
57 | def <=(other: LongDouble) = number.le(other)
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/util/NaturalComparator.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.util
22 |
23 | object NaturalComparator {
24 |
25 | private def isDigit(ch: Char) = ch >= 48 && ch <= 57
26 |
27 | private def getChunkOffset(s: String, length: Int, mark: Int): Int = {
28 | var marker = mark
29 | var c = s.charAt(marker)
30 | marker += 1
31 |
32 | if (isDigit(c)) while (marker < length) {
33 | c = s.charAt(marker)
34 | if (!isDigit(c)) return marker
35 | marker += 1
36 | }
37 | else while (marker < length) {
38 | c = s.charAt(marker)
39 | if (isDigit(c)) return marker
40 | marker += 1
41 | }
42 | marker
43 | }
44 |
45 | private def compareRegion(s1: String, off1: Int, len1: Int, s2: String, off2: Int, len2: Int): Int = {
46 | val lim = Math.min(len1, len2)
47 | var i = 0
48 |
49 | while (i < lim) {
50 | val c1 = s1.charAt(off1 + i)
51 | val c2 = s2.charAt(off2 + i)
52 | if (c1 != c2) return c1 - c2
53 | i += 1
54 | }
55 | len1 - len2
56 | }
57 |
58 | def compare(s1: String, s2: String): Int = {
59 | var thisMarker = 0
60 | var thatMarker = 0
61 | val s1Length = s1.length
62 | val s2Length = s2.length
63 | while (thisMarker < s1Length && thatMarker < s2Length) {
64 | if (s1.charAt(thisMarker) == s2.charAt(thatMarker)) {
65 | thisMarker += 1
66 | thatMarker += 1
67 | } else {
68 | var result = 0
69 | val thisChunk = getChunkOffset(s1, s1Length, thisMarker)
70 | val thatChunk = getChunkOffset(s2, s2Length, thatMarker)
71 | val thisChunkLength = thisChunk - thisMarker
72 | val thatChunkLength = thatChunk - thatMarker
73 |
74 | if (isDigit(s1.charAt(thisMarker)) && isDigit(s2.charAt(thatMarker))) {
75 | result = thisChunkLength - thatChunkLength
76 | if (result == 0) {
77 | var i = thisMarker
78 | var j = thatMarker
79 | while (i < thisChunk) {
80 | result = s1.charAt(i) - s2.charAt(j)
81 | if (result != 0) return result
82 | i += 1
83 | j += 1
84 | }
85 | }
86 | } else result =
87 | compareRegion(s1, thisMarker, thisChunkLength, s2, thatMarker, thatChunkLength)
88 |
89 | if (result != 0) return result
90 |
91 | thisMarker = thisChunk
92 | thatMarker = thatChunk
93 | }
94 | }
95 | s1Length - s2Length
96 | }
97 |
98 | def compareBool(s1: String, s2: String): Boolean = compare(s1, s2) < 0
99 | }
100 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/util/collection/GlobalIndexPartitioned.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.util.collection
22 |
23 | import scala.{ specialized => sp }
24 |
25 | trait GlobalIndexPartitioned[C, @sp(Byte, Short, Int, Long, Float, Double, Boolean) V] {
26 |
27 | def apply(key: Int): V
28 |
29 | def fetch(key: Int): V
30 |
31 | def get(key: Int): Option[V]
32 |
33 | def size: Int
34 |
35 | def numberOfPartitions: Int
36 |
37 | def partitions: Iterable[C]
38 |
39 | def partition(partitionIndex: Int): C
40 |
41 | def getPartition(partitionIndex: Int): Option[C]
42 |
43 | val firstKey: Int
44 |
45 | val lastKey: Int
46 |
47 | }
48 |
49 | abstract class AbstractGlobalIndexPartitioned[C, @sp(Byte, Short, Int, Long, Float, Double, Boolean) V](data: Array[C], partitionSizes: Array[Int]) extends GlobalIndexPartitioned[C, V] {
50 |
51 | protected val cumulativeIndices: Array[Int] = partitionSizes.scanLeft(0)(_ + _)
52 | protected val partitioner: Partitioner[Int] = Partitioner.indexed(cumulativeIndices)
53 | protected val numberOfElements = partitionSizes.sum
54 |
55 | override def apply(key: Int): V = {
56 | if (key < 0 || key >= cumulativeIndices.last)
57 | throw new IndexOutOfBoundsException(s"Invalid index value.")
58 |
59 | fetch(key)
60 | }
61 |
62 | override def get(key: Int): Option[V] = {
63 | if (key < 0 || key >= cumulativeIndices.last) None
64 | else Some(fetch(key))
65 | }
66 |
67 | override def partitions: Iterable[C] = data.toIterable
68 |
69 | override def partition(partitionIndex: Int): C = data(partitionIndex)
70 |
71 | override def getPartition(partitionIndex: Int): Option[C] = {
72 | if (partitionIndex < 0 || partitionIndex >= data.length) None
73 | else Some(data(partitionIndex))
74 | }
75 |
76 | override def size: Int = numberOfElements
77 |
78 | override def numberOfPartitions: Int = data.length
79 |
80 | override val firstKey: Int = cumulativeIndices.head
81 |
82 | override val lastKey: Int = cumulativeIndices.last
83 |
84 | override def toString: String = s"GlobalIndexPartitioned{cumulativeIndices = (${cumulativeIndices.mkString(",")})}"
85 |
86 | }
87 |
88 | object GlobalIndexPartitioned {
89 |
90 | def apply[C <: IndexedSeq[V], @sp(Byte, Short, Int, Long, Float, Double, Boolean) V](data: Array[C]): GlobalIndexPartitioned[C, V] = {
91 |
92 | new AbstractGlobalIndexPartitioned[C, V](data, data.map(_.size)) {
93 |
94 | override def fetch(key: Int): V = {
95 | val partitionIndex = partitioner(key)
96 | val entryIndex = key - cumulativeIndices(partitionIndex)
97 | data(partitionIndex)(entryIndex)
98 | }
99 |
100 | }
101 | }
102 |
103 | def apply[C, @sp(Byte, Short, Int, Long, Float, Double, Boolean) V](data: Array[C], partitionSizes: Array[Int], partitionFetcher: PartitionFetcher[Int, C, V]): GlobalIndexPartitioned[C, V] = {
104 |
105 | new AbstractGlobalIndexPartitioned[C, V](data, partitionSizes) {
106 |
107 | override def fetch(key: Int): V = {
108 | val partitionIndex = partitioner(key)
109 | val entryIndex = key - cumulativeIndices(partitionIndex)
110 | partitionFetcher(entryIndex, data(partitionIndex))
111 | }
112 | }
113 | }
114 |
115 | }
116 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/util/collection/IndexPartitioned.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.util.collection
22 |
23 | import scala.reflect.ClassTag
24 | import spire.syntax.cfor._
25 |
26 | trait IndexPartitioned[T] extends (Int => T) {
27 |
28 | /**
29 | * Gives the corresponding object that is associated to the specified index value,
30 | * possibly by using an hash partitioning function.
31 | *
32 | * @param idx the index value
33 | *
34 | * @return the corresponding object
35 | */
36 | def apply(idx: Int): T
37 |
38 | /**
39 | * @return the number of partitions
40 | */
41 | def size: Int
42 |
43 | /**
44 | * @return an iterable collection that contains all partitions
45 | */
46 | def partitions: Iterable[T]
47 |
48 | /**
49 | * Direct access to corresponding object at the specified partition index.
50 | *
51 | * @param partitionIndex partition index
52 | * @return corresponding object
53 | */
54 | def partition(partitionIndex: Int): T
55 |
56 | }
57 |
58 | object IndexPartitioned {
59 |
60 | object hash {
61 | def apply[T](data: Array[T]): IndexPartitioned[T] = new IndexPartitioned[T] {
62 |
63 | private val partitioner = Partitioner.hash[Int](data.length)
64 |
65 | override def apply(idx: Int) = data(partitioner(idx))
66 |
67 | override def partitions: Iterable[T] = data.toIterable
68 |
69 | override def size: Int = data.length
70 |
71 | override def partition(partitionIndex: Int) = data(partitionIndex)
72 | }
73 |
74 | def apply[T: ClassTag](size: Int, initializer: (Int => T)): IndexPartitioned[T] = {
75 | val data = new Array[T](size)
76 |
77 | cfor(0) (_ < size, _ + 1){ i: Int => data(i) = initializer(i) }
78 |
79 | apply(data)
80 | }
81 |
82 | def apply[T: ClassTag](size: Int): IndexPartitioned[T] = apply(new Array[T](size))
83 | }
84 |
85 | }
86 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/util/collection/KeyPartitioned.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.util.collection
22 |
23 | import scala.{ specialized => sp }
24 |
25 | trait KeyPartitioned[C, @sp(Byte, Short, Int, Long) K, @sp(Byte, Short, Int, Long) V] extends (K => V) {
26 |
27 | def apply(key: K): V
28 |
29 | def contains(key: K): Boolean
30 |
31 | def size: Int
32 |
33 | def partitions: Iterable[C]
34 |
35 | def partition(partitionIndex: Int): C
36 |
37 | }
38 |
39 | object KeyPartitioned {
40 |
41 | def apply[C, @sp(Byte, Short, Int, Long) K, @sp(Byte, Short, Int, Long) V](data: Array[C], partitioner: Partitioner[K], partitionFetcher: PartitionFetcher[K, C, V]): KeyPartitioned[C, K, V] = {
42 | new KeyPartitionedImpl[C, K, V](data, partitioner, partitionFetcher)
43 | }
44 |
45 | private class KeyPartitionedImpl[C, @sp(Byte, Short, Int, Long) K, @sp(Byte, Short, Int, Long) V](data: Array[C], partitioner: Partitioner[K], partitionFetcher: PartitionFetcher[K, C, V]) extends KeyPartitioned[C, K, V] {
46 |
47 | override def apply(key: K): V = partitionFetcher(key, data(partitioner(key)))
48 |
49 | override def contains(key: K): Boolean = partitionFetcher.contains(key, data(partitioner(key)))
50 |
51 | override def partitions: Iterable[C] = data.toIterable
52 |
53 | override def partition(partitionIndex: Int): C = data(partitionIndex)
54 |
55 | override def size: Int = data.length
56 | }
57 |
58 | }
59 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/util/collection/PartitionFetcher.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.util.collection
22 |
23 | trait PartitionFetcher[Key, Collection, Value] {
24 |
25 | def apply(key: Key, collection: Collection): Value
26 |
27 | def contains(key: Key, collection: Collection): Boolean
28 |
29 | def valuesIterator(key: Key, collection: Collection): Iterator[Value]
30 |
31 | }
32 |
33 | object PartitionFetcher {
34 |
35 | def create[C <: IndexedSeq[V], V]: PartitionFetcher[Int, C, V] = new PartitionFetcher[Int, C, V]() {
36 | override def apply(key: Int, collection: C): V = collection(key)
37 |
38 | override def contains(key: Int, collection: C): Boolean = collection.contains(key)
39 |
40 | override def valuesIterator(key: Int, collection: C): Iterator[V] = collection.iterator
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/util/collection/Partitioner.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.util.collection
22 |
23 | import scala.{ specialized => sp }
24 |
25 | trait Partitioner[@sp(Byte, Short, Int, Long) K] {
26 |
27 | def apply(key: K): Int
28 |
29 | def numPartitions: Int
30 |
31 | }
32 |
33 | object Partitioner {
34 |
35 | object hash {
36 |
37 | import scala.reflect.runtime.universe._
38 |
39 | def apply[K: TypeTag](size: Int): Partitioner[K] = {
40 |
41 | (typeOf[K] match {
42 |
43 | case TypeTag.Int => new FixedSizePartitionerInt(size)
44 |
45 | case TypeTag.Long => new FixedSizePartitionerLong(size)
46 |
47 | case TypeTag.Short => new FixedSizePartitionerShort(size)
48 |
49 | case TypeTag.Byte => new FixedSizePartitionerByte(size)
50 |
51 | case t => new FixedSizePartitioner[K](size) {
52 | override def apply(key: K): Int = {
53 | if (key == null) 0
54 | else (key.## & Int.MaxValue) % size // always return non-negative integer
55 | }
56 | }
57 | }).asInstanceOf[Partitioner[K]]
58 | }
59 |
60 | }
61 |
62 | object indexed {
63 |
64 | def apply(indices: Array[Int]): Partitioner[Int] = new FixedSizePartitioner[Int](indices.length) {
65 |
66 | override def apply(key: Int): Int = {
67 |
68 | val searchResult = java.util.Arrays.binarySearch(indices, math.abs(key))
69 | val partitionIndex = if (searchResult < 0) (-searchResult) - 2 else searchResult
70 |
71 | //assert(partitionIndex >= 0)
72 |
73 | partitionIndex
74 | }
75 |
76 | override def numPartitions: Int = indices.length
77 | }
78 |
79 | def apply[@sp(Byte, Short, Int, Long) K](indices: Array[Int], f: K => Int): Partitioner[K] = new FixedSizePartitioner[K](indices.length) {
80 | override def apply(key: K): Int = {
81 |
82 | val searchResult = java.util.Arrays.binarySearch(indices, math.abs(f(key)))
83 | val partitionIndex = if (searchResult < 0) (-searchResult) - 2 else searchResult
84 |
85 | //assert(partitionIndex >= 0)
86 |
87 | partitionIndex
88 | }
89 | }
90 |
91 | def fromRanges(ranges: Iterable[Range]): Partitioner[Int] = {
92 | require(
93 | ranges.forall(_.step == 1),
94 | "Only ranges with successive values supported, i.e., ranges having step size equal to one.")
95 |
96 | apply(ranges.map(_.size).scanLeft(0)(_ + _).toArray)
97 | }
98 |
99 | def fromRanges[@sp(Byte, Short, Int, Long) K](ranges: Iterable[Range], f: K => Int): Partitioner[K] = {
100 | require(
101 | ranges.forall(_.step == 1),
102 | "Only ranges with successive values supported, i.e., ranges having step size equal to one.")
103 |
104 | apply(ranges.map(_.size).scanLeft(0)(_ + _).toArray, f)
105 | }
106 |
107 | def fromSizes(sizes: Iterable[Int]): Partitioner[Int] = apply(sizes.scanLeft(0)(_ + _).toArray)
108 |
109 | def fromSizes[@sp(Byte, Short, Int, Long) K](sizes: Iterable[Int], f: K => Int): Partitioner[K] = apply(sizes.scanLeft(0)(_ + _).toArray, f)
110 |
111 | }
112 |
113 | private abstract class FixedSizePartitioner[@sp(Byte, Short, Int, Long) K](size: Int) extends Partitioner[K] {
114 | override def numPartitions: Int = size
115 | }
116 |
117 | private class FixedSizePartitionerByte(size: Int) extends FixedSizePartitioner[Byte](size) {
118 | override def apply(key: Byte): Int = (key & Int.MaxValue) % size
119 | }
120 |
121 | private class FixedSizePartitionerShort(size: Int) extends FixedSizePartitioner[Short](size) {
122 | override def apply(key: Short): Int = (key & Int.MaxValue) % size
123 | }
124 |
125 | private class FixedSizePartitionerInt(size: Int) extends FixedSizePartitioner[Int](size) {
126 | override def apply(key: Int): Int = (key & Int.MaxValue) % size
127 | }
128 |
129 | private class FixedSizePartitionerLong(size: Int) extends FixedSizePartitioner[Long](size) {
130 | override def apply(key: Long): Int = (key.toInt & Int.MaxValue) % size
131 | }
132 |
133 | }
134 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/util/collection/mutable/IndexPartitioned.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.util.collection.mutable
22 |
23 | import scala.reflect.ClassTag
24 | import spire.syntax.cfor._
25 |
26 | trait IndexPartitioned[T] extends lomrf.util.collection.IndexPartitioned[T] {
27 |
28 | def update(idx: Int, elem: T)
29 | }
30 |
31 | object IndexPartitioned {
32 | def apply[T](data: Array[T]): IndexPartitioned[T] = new IndexPartitioned[T] {
33 |
34 | private val positionOf = (idx: Int) => math.abs(idx % data.length)
35 |
36 | override def apply(idx: Int): T = data(positionOf(idx))
37 |
38 | override def partitions = data.toIterable
39 |
40 | override def size = data.length
41 |
42 | override def update(idx: Int, elem: T): Unit = data(idx) = elem
43 |
44 | override def partition(partitionIndex: Int) = data(partitionIndex)
45 | }
46 |
47 | def apply[T: ClassTag](size: Int, initializer: (Int => T)): IndexPartitioned[T] = {
48 | val data = new Array[T](size)
49 |
50 | cfor(0) (_ < size, _ + 1){ i: Int => data(i) = initializer(i) }
51 |
52 | apply(data)
53 | }
54 |
55 | def apply[T: ClassTag](size: Int): IndexPartitioned[T] = apply(new Array[T](size))
56 | }
57 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/util/evaluation/package.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.util
22 |
23 | import lomrf.logic.{ FALSE, TRUE, TriState }
24 |
25 | package object evaluation {
26 |
27 | /**
28 | * A tuple composed of the counts for true positives, true negatives,
29 | * false positives and false negatives respectively.
30 | */
31 | type EvaluationStats = (Long, Long, Long, Long)
32 |
33 | private[evaluation] def combine(a: EvaluationStats, b: EvaluationStats): EvaluationStats =
34 | (a._1 + b._1, a._2 + b._2, a._3 + b._3, a._4 + b._4)
35 |
36 | private[evaluation] def evaluateSingle(inferredState: Boolean, annotationState: TriState): EvaluationStats = {
37 | (annotationState, inferredState) match {
38 | case (TRUE, true) => (1, 0, 0, 0)
39 | case (FALSE, false) => (0, 1, 0, 0)
40 | case (FALSE, true) => (0, 0, 1, 0)
41 | case _ => (0, 0, 0, 1)
42 | }
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/util/logging/Implicits.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.util.logging
22 |
23 | import com.typesafe.scalalogging.Logger
24 | import org.slf4j.MarkerFactory
25 |
26 | object Implicits {
27 |
28 | final val FATAL_ERROR_MARKER = MarkerFactory.getMarker("FATAL")
29 |
30 | implicit class RichLogger(val instance: Logger) extends AnyVal {
31 |
32 | def fatal(message: => String): Nothing = {
33 | instance.whenErrorEnabled {
34 | instance.error(Implicits.FATAL_ERROR_MARKER, message)
35 | }
36 | sys.exit(1)
37 | }
38 |
39 | final def fatal(message: => String, ex: => Throwable, exitCode: Int = 1): Nothing = {
40 | instance.whenErrorEnabled {
41 | instance.error(Implicits.FATAL_ERROR_MARKER, message)
42 | }
43 | sys.exit(exitCode)
44 | }
45 | }
46 | }
47 |
48 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/util/logging/LogbackColouredHighlighter.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.util.logging
22 |
23 | import ch.qos.logback.classic.Level
24 | import ch.qos.logback.classic.spi.ILoggingEvent
25 | import ch.qos.logback.core.pattern.color.ForegroundCompositeConverterBase
26 |
27 | import scala.annotation.switch
28 |
29 | class LogbackColouredHighlighter extends ForegroundCompositeConverterBase[ILoggingEvent] {
30 |
31 | import LogbackColouredHighlighter._
32 |
33 | override def getForegroundColorCode(event: ILoggingEvent): String =
34 | (event.getLevel.levelInt: @switch) match {
35 | case Level.ERROR_INT => STYLE_ERROR
36 | case Level.WARN_INT => STYLE_WARN
37 | case Level.INFO_INT => STYLE_INFO
38 | case Level.DEBUG_INT => STYLE_DEBUG
39 | case Level.TRACE_INT => STYLE_TRACE
40 | case _ => DEFAULT_FG
41 | }
42 | }
43 |
44 | object LogbackColouredHighlighter {
45 | final val DEFAULT_FG: String = "39"
46 | final val STYLE_ERROR = "31" // Red
47 | final val STYLE_WARN = "33" // Orange
48 | final val STYLE_INFO = "32" // Green
49 | final val STYLE_DEBUG = "34" // Blue
50 | final val STYLE_TRACE = "35" // Magenta
51 | }
52 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/util/opt/MasterOptionParser.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.util.opt
22 |
23 | trait MasterOptionParser {
24 |
25 | type Description = String
26 | type OptionName = String
27 |
28 | private var optToParserExecutable: Map[OptionName, (Description, Array[String] => Unit)] = Map.empty
29 |
30 | protected def addOpt(
31 | opt: OptionName,
32 | description: Description,
33 | executable: Array[String] => Unit): Unit = {
34 | optToParserExecutable += opt -> (description, executable)
35 | }
36 |
37 | def parse(argz: Array[String]): Unit = {
38 |
39 | if (argz.isEmpty) {
40 | println(usage)
41 | sys.exit(1)
42 | }
43 |
44 | val firstArgument = argz.head.trim
45 |
46 | optToParserExecutable.get(firstArgument) match {
47 | case Some((_, executable)) =>
48 | val restOptions = if (argz.length == 1) Array[String]() else argz.slice(1, argz.length)
49 | executable(restOptions)
50 |
51 | case None =>
52 | Console.err.print(s"Unknown parameter '$firstArgument'")
53 | sys.exit(1)
54 | }
55 | }
56 |
57 | def usage: String = {
58 | val maxSizeOptName = optToParserExecutable.keys.map(_.length).max + 4
59 | "\n\nUsage:\n" + optToParserExecutable
60 | .map {
61 | case (optionName, (description, _)) =>
62 | val max_length = 76 - maxSizeOptName
63 |
64 | if (max_length < 0)
65 | s" $optionName$NLTB${wrapText(description, 72)}"
66 | else {
67 | val numberOfwhiteSpaces = maxSizeOptName - optionName.size
68 | val gap = " " * numberOfwhiteSpaces
69 |
70 | s" $optionName$gap${wrapText(description, max_length)}"
71 | }
72 | }
73 | .mkString(NL)
74 | }
75 |
76 | }
77 |
--------------------------------------------------------------------------------
/src/main/scala/lomrf/util/opt/package.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.util
22 |
23 | package object opt {
24 |
25 | final val NL = System.getProperty("line.separator")
26 | final val TB = " " //8 whitespace chars
27 | final val NLTB = NL + TB
28 | final val NLNL = NL + NL
29 |
30 | final val defaultKeyName = ""
31 | final val defaultValueName = ""
32 |
33 | def wrapText(description: String, maxLength: Int): String = {
34 | if (description.length < maxLength) description
35 | else if (description.substring(0, maxLength).contains(NL)) {
36 | val idxNL = description.indexOf(NL)
37 | description.substring(0, idxNL).trim() + NLTB + wrapText(description.substring(idxNL + 1).trim(), maxLength)
38 | } else {
39 | val idx = math.max(math.max(description.lastIndexOf(" ", maxLength), description.lastIndexOf(TB, maxLength)), description.lastIndexOf("-", maxLength))
40 | description.substring(0, idx).trim() + NLTB + wrapText(description.substring(idx).trim(), maxLength)
41 | }
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/src/test/scala/lomrf/logic/AtomSignatureSpecTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.logic
22 |
23 | import org.scalatest.{ FunSpec, Matchers }
24 | import lomrf.logic.AtomSignatureOps._
25 | import scala.util.Success
26 |
27 | /**
28 | * A series of specification test for atom signatures.
29 | *
30 | * @see [[lomrf.logic.AtomSignature]]
31 | */
32 | final class AtomSignatureSpecTest extends FunSpec with Matchers {
33 |
34 | describe("Atom Signature of predicates and functions") {
35 |
36 | it("P/1 predicate should have a valid atom signature") {
37 | val atomP = AtomSignature("P", 1)
38 |
39 | atomP shouldEqual
40 | AtomSignature(AtomicFormula(
41 | "P", Vector(Variable("x"))))
42 |
43 | Success(atomP) shouldEqual
44 | AtomSignature.parseString("P/1")
45 |
46 | "P/1".signature shouldEqual
47 | AtomSignature.parseString("P/1")
48 | }
49 |
50 | it("Q/4 predicate should have a valid atom signature") {
51 | val atomQ = AtomSignature("Q", 4)
52 |
53 | atomQ shouldEqual
54 | AtomSignature(AtomicFormula(
55 | "Q", (1 to 4).map(i => Variable(s"x$i")).toVector))
56 |
57 | Success(atomQ) shouldEqual
58 | AtomSignature.parseString("Q/4")
59 |
60 | "Q/4".signature shouldEqual
61 | AtomSignature.parseString("Q/4")
62 | }
63 |
64 | it("R/100 predicate should have a valid atom signature") {
65 | val atomR = AtomSignature("R", 100)
66 |
67 | atomR shouldEqual
68 | AtomSignature(AtomicFormula(
69 | "R", (1 to 100).map(i => Variable(s"x$i")).toVector))
70 |
71 | Success(atomR) shouldEqual
72 | AtomSignature.parseString("R/100")
73 |
74 | "R/100".signature shouldEqual
75 | AtomSignature.parseString("R/100")
76 | }
77 |
78 | it("foo/2 function should have a valid atom signature") {
79 | val atomFoo = AtomSignature("foo", 2)
80 |
81 | atomFoo shouldEqual
82 | AtomSignature(TermFunction(
83 | "foo", (1 to 2).map(i => Variable(s"x$i")).toVector))
84 |
85 | Success(atomFoo) shouldEqual
86 | AtomSignature.parseString("foo/2")
87 |
88 | "foo/2".signature shouldEqual
89 | AtomSignature.parseString("foo/2")
90 | }
91 |
92 | it("bar/65 function should have a valid atom signature") {
93 | val atomBar = AtomSignature("bar", 65)
94 |
95 | atomBar shouldEqual
96 | AtomSignature(TermFunction(
97 | "bar", (1 to 65).map(i => Variable(s"x$i")).toVector))
98 |
99 | Success(atomBar) shouldEqual
100 | AtomSignature.parseString("bar/65")
101 |
102 | "bar/65".signature shouldEqual
103 | AtomSignature.parseString("bar/65")
104 | }
105 | }
106 | }
107 |
--------------------------------------------------------------------------------
/src/test/scala/lomrf/logic/TriStateSpecTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.logic
22 |
23 | import org.scalatest.{ FunSpec, Matchers }
24 |
25 | /**
26 | * A series of specification tests for [[lomrf.logic.TriState]]
27 | */
28 | final class TriStateSpecTest extends FunSpec with Matchers {
29 |
30 | describe("Conjunction of Tri-States") {
31 |
32 | it("TRUE ^ TRUE should be TRUE") {
33 | TRUE ^ TRUE shouldEqual TRUE
34 | }
35 |
36 | it("TRUE ^ FALSE should be FALSE") {
37 | TRUE ^ FALSE shouldEqual FALSE
38 | }
39 |
40 | it("TRUE ^ UNKNOWN should be UNKNOWN") {
41 | TRUE ^ UNKNOWN shouldEqual UNKNOWN
42 | }
43 |
44 | it("FALSE ^ FALSE should be FALSE") {
45 | FALSE ^ FALSE shouldEqual FALSE
46 | }
47 |
48 | it("FALSE ^ UNKNOWN should be FALSE") {
49 | FALSE ^ UNKNOWN shouldEqual FALSE
50 | }
51 |
52 | it("UNKNOWN ^ UNKNOWN should be UNKNOWN") {
53 | UNKNOWN ^ UNKNOWN shouldEqual UNKNOWN
54 | }
55 | }
56 |
57 | describe("Disjunction of Tri-States") {
58 |
59 | it("TRUE v TRUE should be TRUE") {
60 | TRUE v TRUE shouldEqual TRUE
61 | }
62 |
63 | it("TRUE v FALSE should be TRUE") {
64 | TRUE v FALSE shouldEqual TRUE
65 | }
66 |
67 | it("TRUE v UNKNOWN should be TRUE") {
68 | TRUE v UNKNOWN shouldEqual TRUE
69 | }
70 |
71 | it("FALSE v FALSE should be FALSE") {
72 | FALSE ^ FALSE shouldEqual FALSE
73 | }
74 |
75 | it("FALSE v UNKNOWN should be UNKNOWN") {
76 | FALSE v UNKNOWN shouldEqual UNKNOWN
77 | }
78 |
79 | it("UNKNOWN v UNKNOWN should be UNKNOWN") {
80 | UNKNOWN v UNKNOWN shouldEqual UNKNOWN
81 | }
82 | }
83 |
84 | describe("Flip Tri-States") {
85 |
86 | it("!TRUE should be FALSE") {
87 | TRUE.flip shouldEqual FALSE
88 | }
89 |
90 | it("!FALSE should be TRUE") {
91 | FALSE.flip shouldEqual TRUE
92 | }
93 |
94 | it("!UNKNOWN should be UNKNOWN") {
95 | UNKNOWN.flip shouldEqual UNKNOWN
96 | }
97 | }
98 |
99 | }
100 |
--------------------------------------------------------------------------------
/src/test/scala/lomrf/logic/parser/DomainParserSpecTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.logic.parser
22 |
23 | import lomrf.logic.{ AtomicType, ConstantTypeDefinition, FunctionType, IntegerTypeDefinition }
24 | import org.scalatest.{ FunSpec, Matchers }
25 |
26 | final class DomainParserSpecTest extends FunSpec with Matchers {
27 |
28 | val domainParser = new DomainParser
29 |
30 | describe("Constant type domain definitions") {
31 |
32 | val time = "time = {1, 2, 3, 4, 5}"
33 |
34 | it(s"$time should be a valid constant domain definition") {
35 | domainParser.parseConstantType(time) shouldEqual
36 | ConstantTypeDefinition("time", (1 to 5).map(_.toString))
37 | }
38 |
39 | val person = "person = { Anna, Bob, George }"
40 |
41 | it(s"$person should be a valid constant domain definition") {
42 | domainParser.parseConstantType(person) shouldEqual
43 | ConstantTypeDefinition("person", Vector("Anna", "Bob", "George"))
44 | }
45 | }
46 |
47 | describe("Integer type domain definitions") {
48 |
49 | val time = "time = {1, ..., 100}"
50 |
51 | it(s"$time should be a valid integer domain definition") {
52 | domainParser.parseIntegerType(time) shouldEqual
53 | IntegerTypeDefinition("time", 1, 100)
54 | }
55 |
56 | val number = "number = {5, ..., 25}"
57 |
58 | it(s"$number should be a valid integer domain definition") {
59 | domainParser.parseIntegerType(number) shouldEqual
60 | IntegerTypeDefinition("number", 5, 25)
61 | }
62 | }
63 |
64 | describe("Atomic type domain definitions") {
65 |
66 | val parent = "Parent(person, person)"
67 |
68 | it(s"$parent should be a valid atomic domain definition") {
69 | domainParser.parseAtomicType(parent) shouldEqual
70 | AtomicType("Parent", Vector("person", "person"))
71 | }
72 |
73 | val unary = "UnaryPredicate"
74 |
75 | it(s"$unary should be a valid atomic domain definition") {
76 | domainParser.parseAtomicType(unary) shouldEqual
77 | AtomicType("UnaryPredicate", Vector.empty[String])
78 | }
79 | }
80 |
81 | describe("Function type domain definitions") {
82 |
83 | val active = "event active(id)"
84 |
85 | it(s"$active should be a valid function domain definition") {
86 | domainParser.parseFunctionType(active) shouldEqual
87 | FunctionType("event", "active", Vector("id"))
88 | }
89 |
90 | val f = "y f(x, z)"
91 |
92 | it(s"$f should be a valid function domain definition") {
93 | domainParser.parseFunctionType(f) shouldEqual
94 | FunctionType("y", "f", Vector("x", "z"))
95 | }
96 | }
97 | }
98 |
--------------------------------------------------------------------------------
/src/test/scala/lomrf/logic/parser/EvidenceParserSpecTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.logic.parser
22 |
23 | import lomrf.logic.{ Constant, EvidenceAtom, FunctionMapping }
24 | import org.scalatest.{ FunSpec, Matchers }
25 |
26 | final class EvidenceParserSpecTest extends FunSpec with Matchers {
27 |
28 | val evidenceParser = new EvidenceParser
29 |
30 | describe("Function mappings") {
31 |
32 | val meet = "Meet_ID1_ID2 = meet(ID1, ID2)"
33 |
34 | it(s"$meet should be a valid function mapping definition") {
35 | evidenceParser.parseFunctionMapping(meet) shouldEqual
36 | new FunctionMapping("Meet_ID1_ID2", "meet", Vector("ID1", "ID2"))
37 | }
38 | }
39 |
40 | describe("Evidence atoms") {
41 |
42 | val positiveHappens = "Happens(Active_ID1, 5)"
43 |
44 | it(s"$positiveHappens should be a valid evidence atom definition") {
45 | evidenceParser.parseEvidenceAtom(positiveHappens) shouldEqual
46 | EvidenceAtom.asTrue("Happens", Vector("Active_ID1", "5").map(Constant))
47 | }
48 |
49 | val probabilisticPositiveHappens = "Happens(Active_ID1, 5) 1.0"
50 |
51 | it(s"$probabilisticPositiveHappens should be a valid evidence atom definition") {
52 | evidenceParser.parseEvidenceAtom(probabilisticPositiveHappens) shouldEqual
53 | EvidenceAtom.asTrue("Happens", Vector("Active_ID1", "5").map(Constant))
54 | }
55 |
56 | val positiveUnary = "UnaryPredicate"
57 |
58 | it(s"$positiveUnary should be a valid evidence atom definition") {
59 | evidenceParser.parseEvidenceAtom(positiveUnary) shouldEqual
60 | EvidenceAtom.asTrue("UnaryPredicate", Vector.empty[Constant])
61 | }
62 |
63 | val negatedHappens = "!Happens(Inactive_ID0, 10)"
64 |
65 | it(s"$negatedHappens should be a valid evidence atom definition") {
66 | evidenceParser.parseEvidenceAtom(negatedHappens) shouldEqual
67 | EvidenceAtom.asFalse("Happens", Vector("Inactive_ID0", "10").map(Constant))
68 | }
69 |
70 | val probabilisticNegativeHappens = "Happens(Inactive_ID0, 10) 0.0"
71 |
72 | it(s"$probabilisticNegativeHappens should be a valid evidence atom definition") {
73 | evidenceParser.parseEvidenceAtom(probabilisticNegativeHappens) shouldEqual
74 | EvidenceAtom.asFalse("Happens", Vector("Inactive_ID0", "10").map(Constant))
75 | }
76 |
77 | val negatedUnary = "!UnaryPredicate"
78 |
79 | it(s"$negatedUnary should be a valid evidence atom definition") {
80 | evidenceParser.parseEvidenceAtom(negatedUnary) shouldEqual
81 | EvidenceAtom.asFalse("UnaryPredicate", Vector.empty[Constant])
82 | }
83 |
84 | val unknownHappens = "?Happens(Exit_ID2, 25)"
85 |
86 | it(s"$unknownHappens should be a valid evidence atom definition") {
87 | evidenceParser.parseEvidenceAtom(unknownHappens) shouldEqual
88 | EvidenceAtom.asUnknown("Happens", Vector("Exit_ID2", "25").map(Constant))
89 | }
90 |
91 | val unknownUnary = "?UnaryPredicate"
92 |
93 | it(s"$unknownUnary should be a valid evidence atom definition") {
94 | evidenceParser.parseEvidenceAtom(unknownUnary) shouldEqual
95 | EvidenceAtom.asUnknown("UnaryPredicate", Vector.empty[Constant])
96 | }
97 |
98 | val probabilisticAtom = "Parent(George, Peter) 0.8"
99 |
100 | it(s"$probabilisticAtom is not supported yet and should throw an exception") {
101 | intercept[UnsupportedOperationException] {
102 | evidenceParser.parseEvidenceAtom(probabilisticAtom)
103 | }
104 | }
105 | }
106 | }
107 |
--------------------------------------------------------------------------------
/src/test/scala/lomrf/mln/grounding/DependencyMapSpecTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.grounding
22 |
23 | import lomrf.logic._
24 | import lomrf.mln.model.{ AtomIdentityFunctionOps, MLN }
25 | import lomrf.mln.model.mrf.MRF
26 | import AtomIdentityFunctionOps._
27 | import org.scalatest.{ FunSpec, Matchers }
28 | import lomrf.tests.TestData
29 | import lomrf.util.io._
30 |
31 | import scala.language.implicitConversions
32 |
33 | /**
34 | * Specification test for dependency map produced by grounding procedure. It is used by learning algorithms in order to
35 | * reconstruct the ground network without rerunning the grounding procedure in each iteration.
36 | */
37 | class DependencyMapSpecTest extends FunSpec with Matchers {
38 |
39 | private implicit def str2AtomSignature(txt: String): AtomSignature = {
40 | val elements = txt.split("/")
41 | assert(elements.length == 2)
42 | AtomSignature(elements(0), elements(1).toInt)
43 | }
44 |
45 | private val sep = System.getProperty("file.separator")
46 | private val prefix = TestData.TestFilesPath / "DependencyMap" //System.getProperty("user.dir") + sep + "Examples" + sep + "data" + sep + "tests" + sep + "DependencyMap" + sep
47 |
48 | private val mlnFile = prefix / "DependencyMap.mln"
49 | private val evidenceFile = prefix / "Empty.db"
50 |
51 | implicit val mln = MLN.fromFile(mlnFile, queryAtoms = Set("S/1", "C/1", "K/1", "M/1", "F/2"), evidenceFile)
52 |
53 | describe(s"The MLN theory in '$mlnFile'") {
54 |
55 | /*it("should contain 7 formulas") {
56 | mln.formulas.size shouldBe 7
57 | }*/
58 |
59 | it("should contain 1 constant set (domain)") {
60 | mln.evidence.constants.size shouldBe 1
61 | }
62 |
63 | it("should contain 5 predicate schemas") {
64 | mln.schema.predicates.size shouldBe 5
65 | }
66 |
67 | it("should not contain any function schemas") {
68 | mln.schema.functions.size shouldBe 0
69 | }
70 | }
71 |
72 | describe(s"The produced MRF when negative weights are allowed") {
73 | checkScenario(noNegWeights = true, expectedNumberOfAtoms = 12, expectedNumberOfConstraints = 19)
74 | }
75 |
76 | describe(s"The produced MRF when negative weights are not allowed") {
77 | checkScenario(noNegWeights = false, expectedNumberOfAtoms = 12, expectedNumberOfConstraints = 17)
78 | }
79 |
80 | private def checkScenario(noNegWeights: Boolean, expectedNumberOfAtoms: Int, expectedNumberOfConstraints: Int): Unit = {
81 |
82 | val mrf = MRF.build(mln, noNegWeights, createDependencyMap = true)
83 |
84 | it(s"should contain $expectedNumberOfAtoms ground atoms") {
85 | mrf.numberOfAtoms shouldBe expectedNumberOfAtoms
86 | }
87 |
88 | it(s"should contain $expectedNumberOfConstraints ground clauses") {
89 | mrf.numberOfConstraints shouldBe expectedNumberOfConstraints
90 | }
91 |
92 | val dependencyMap = mrf.dependencyMap.getOrElse(sys.error("Dependency map does not exists."))
93 |
94 | describe("The produced dependency map") {
95 | val dmIterator = dependencyMap.iterator()
96 |
97 | while (dmIterator.hasNext) {
98 | dmIterator.advance()
99 | val constraintID = dmIterator.key()
100 | val statsIterator = dmIterator.value.iterator()
101 |
102 | val constraintWeight = mrf.constraints.get(constraintID).getWeight
103 | var clauseWeight = 0.0
104 | var total = 0.0
105 |
106 | while (statsIterator.hasNext) {
107 | statsIterator.advance()
108 | val cid = statsIterator.key()
109 | val freq = statsIterator.value()
110 | clauseWeight = if (mln.clauses(cid).isHard) mrf.weightHard else mln.clauses(cid).weight
111 | total += clauseWeight * freq
112 | }
113 |
114 | val constraint = mrf.constraints.get(constraintID)
115 | it(s"has the constraint $constraintID -> ${constraint.decodeFeature()(mln).getOrElse("Failed to decode constraint")}, which can be reconstructed") {
116 | constraintWeight shouldBe total
117 | }
118 | }
119 | }
120 |
121 | }
122 | }
123 |
124 |
--------------------------------------------------------------------------------
/src/test/scala/lomrf/mln/grounding/GroundingSpecTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.grounding
22 |
23 | import java.io.File
24 |
25 | import lomrf.logic.AtomSignature
26 | import lomrf.mln.model.MLN
27 | import lomrf.util.time.{ measureTime, msecTimeToText }
28 | import org.scalatest.{ FunSpec, Matchers }
29 | import lomrf.tests.TestData
30 | import lomrf.util.io._
31 |
32 | import scala.io.Source
33 |
34 | /**
35 | * Specification test regarding the grounding process (i.e., the creation of MRF from an MLN)
36 | */
37 | final class GroundingSpecTest extends FunSpec with Matchers {
38 |
39 | private val mainPath = TestData.TestFilesPath / "inference" / "caviar" / "DN"
40 |
41 | val queryAtoms = Set(AtomSignature("HoldsAt", 2))
42 |
43 | val cwa = Set(
44 | AtomSignature("Happens", 2), AtomSignature("Close", 4), AtomSignature("Next", 2),
45 | AtomSignature("OrientationMove", 3), AtomSignature("StartTime", 1))
46 |
47 | var totalTime = 0L
48 | var iterations = 0
49 |
50 | for {
51 | inertiaConfiguration <- List("HI", "SI", "SI_h")
52 |
53 | fold <- 0 to 9
54 |
55 | currentPath = new File(mainPath / inertiaConfiguration / "meet" / "fold_" + fold)
56 | if currentPath.exists
57 |
58 | mlnFile = findFirstFile(currentPath, _.getName.endsWith(".mln"))
59 | .getOrElse(sys.error("Cannot find MLN in '" + currentPath + "'"))
60 |
61 | expectedResultFiles = findFiles(currentPath, _.getName.endsWith(".mws.golden"))
62 |
63 | dbFile <- findFiles(currentPath, _.getName.endsWith(".db"))
64 | } describe("Loading MLN theory from file '" + mlnFile + "', with evidence from file '" + dbFile) {
65 |
66 | val mln = MLN.fromFile(mlnFile.getAbsolutePath, queryAtoms, dbFile.getAbsolutePath, cwa)
67 |
68 | val stats = Source
69 | .fromFile(dbFile.getAbsolutePath.replace(".db", ".statistics"))
70 | .getLines()
71 | .map(line => line.split('='))
72 | .map(entries => entries(0) -> entries(1))
73 | .toMap
74 |
75 | it(s"should constants ${stats("mln.constants.size")} constants sets (domains)") {
76 | mln.evidence.constants.size should be(stats("mln.constants.size").toInt)
77 | }
78 |
79 | it(s"should contain ${stats("mln.predicateSchema.size")} predicate schemas") {
80 | mln.schema.predicates.size should be(stats("mln.predicateSchema.size").toInt)
81 | }
82 |
83 | it(s"should contain ${stats("mln.functionSchema.size")} function schemas") {
84 | mln.schema.functions.size should be(stats("mln.functionSchema.size").toInt)
85 | }
86 |
87 | info("Creating MRF...")
88 | val mrfBuilder = new MRFBuilder(mln, createDependencyMap = false)
89 |
90 | val (time, mrf) = measureTime(mrfBuilder.buildNetwork)
91 | totalTime += time
92 | iterations += 1
93 |
94 | describe("The constructed MRF") {
95 | it(s"should contain ${stats("mrf.atoms.size")} ground atoms") {
96 | mrf.atoms.size should be(stats("mrf.atoms.size").toInt)
97 | }
98 |
99 | it(s"should contain ${stats("mrf.constraints.size")} ground clauses") {
100 | mrf.constraints.size should be(stats("mrf.constraints.size").toInt)
101 | }
102 |
103 | it(s"should has ${stats("mrf.weightHard")} as hard weight value") {
104 | mrf.weightHard should be(stats("mrf.weightHard").toDouble)
105 | }
106 | }
107 |
108 | it("has continuous index of ground clause ids") {
109 | val keys = mrf.constraints.keys()
110 | var fail = false
111 | java.util.Arrays.sort(keys)
112 |
113 | for ((key, idx) <- keys.zipWithIndex) {
114 | if (key != idx) {
115 | info(key + " != " + idx)
116 | fail = true
117 | }
118 | }
119 |
120 | fail shouldEqual false
121 | }
122 |
123 | }
124 |
125 | info(msecTimeToText("Total time spend for grounding : ", totalTime))
126 | info(msecTimeToText("Average time spend for grounding : ", totalTime / iterations))
127 |
128 | }
129 |
--------------------------------------------------------------------------------
/src/test/scala/lomrf/mln/learning/structure/ModeParserSpecTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.learning.structure
22 |
23 | import lomrf.logic.AtomSignature
24 | import org.scalatest.{ Matchers, FunSpec }
25 | import lomrf.mln.learning.structure.{ PlaceMarker => PM }
26 |
27 | /**
28 | * Specification test for the mode declaration parser.
29 | */
30 | final class ModeParserSpecTest extends FunSpec with Matchers {
31 |
32 | val noIncompatible = Set.empty[AtomSignature]
33 |
34 | // List of mode declarations to be parsed along with an annotation of the results
35 | val modeList = List(
36 | ("modeP(2, A(#-, +, .))", AtomSignature("A", 3), 2, Vector(PM.outputConstant, PM.input, PM.ignore), noIncompatible),
37 | ("modeP(4, B(., +, +))", AtomSignature("B", 3), 4, Vector(PM.ignore, PM.input, PM.input), noIncompatible),
38 | ("modeP(1, C(-, -))", AtomSignature("C", 2), 1, Vector(PM.output, PM.output), noIncompatible),
39 | ("modeP(91, D(-, +))", AtomSignature("D", 2), 91, Vector(PM.output, PM.input), noIncompatible),
40 | ("modeP(7, E(#+) body~/> A/3, D/2)", AtomSignature("E", 1), 7, Vector(PM.inputConstant), Set(AtomSignature("A", 3), AtomSignature("D", 2))),
41 | ("modeP(0, F(+) body~/> foo/2)", AtomSignature("F", 1), 0, Vector(PM.input), Set(AtomSignature(lomrf.AUX_PRED_PREFIX + "foo", 3))),
42 | ("modeP(21, G(-))", AtomSignature("G", 1), 21, Vector(PM.output), noIncompatible),
43 | ("modeP(67, H(., +))", AtomSignature("H", 2), 67, Vector(PM.ignore, PM.input), noIncompatible),
44 | ("modeP(9, I(., -))", AtomSignature("I", 2), 9, Vector(PM.ignore, PM.output), noIncompatible),
45 | ("modeP(*, J(#., +))", AtomSignature("J", 2), Int.MaxValue, Vector(PM.ignoreConstant, PM.input), noIncompatible),
46 | ("modeP(5, K(., +))", AtomSignature("K", 2), 5, Vector(PM.ignore, PM.input), noIncompatible),
47 | ("modeP(1, L(., .))", AtomSignature("L", 2), 1, Vector(PM.ignore, PM.ignore), noIncompatible),
48 | ("modeP(*, M(+, +))", AtomSignature("M", 2), Int.MaxValue, Vector(PM.input, PM.input), noIncompatible),
49 | ("modeP(2, N(#-, #., #+))", AtomSignature("N", 3), 2, Vector(PM.outputConstant, PM.ignoreConstant, PM.inputConstant), noIncompatible),
50 | ("modeP(102, O(+, #-, #+))", AtomSignature("O", 3), 102, Vector(PM.input, PM.outputConstant, PM.inputConstant), noIncompatible),
51 | ("modeP(8, P(n-))", AtomSignature("P", 1), 8, Vector(PM.outputNumeric), noIncompatible),
52 | ("modeP(17, Q(#n+))", AtomSignature("Q", 1), 17, Vector(PM.inputNumericConstant), noIncompatible),
53 | ("modeF(7, foo(-, .))", AtomSignature(lomrf.AUX_PRED_PREFIX + "foo", 3), 7, Vector(PM.input, PM.output, PM.ignore), noIncompatible),
54 | ("modeF(*, bar(-))", AtomSignature(lomrf.AUX_PRED_PREFIX + "bar", 2), Int.MaxValue, Vector(PM.input, PM.output), noIncompatible))
55 |
56 | // For each mode declaration string, parse it and then check if everything is OK
57 | for ((source, signature, recall, values, incompatible) <- modeList) describe(s"Mode declaration '$source'") {
58 |
59 | val (parsedSignature, mode) = ModeParser.parseFrom(source)
60 |
61 | it(s"Signature should be equal to $signature") {
62 | parsedSignature shouldEqual signature
63 | }
64 |
65 | it(s"Recall should be equal to $recall") {
66 | mode.recall shouldEqual recall
67 | }
68 |
69 | it("All place markers should be valid") {
70 | for {
71 | (a, b) <- mode.placeMarkers zip values
72 | } a shouldEqual b
73 | }
74 |
75 | it(s"Incompatible atom signatures should be [${incompatible.mkString(" ")}]") {
76 | incompatible shouldEqual mode.incompatibleSignatures
77 | }
78 | }
79 |
80 | }
81 |
--------------------------------------------------------------------------------
/src/test/scala/lomrf/mln/learning/structure/hypergraph/PathTemplateSpecTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.learning.structure.hypergraph
22 |
23 | import lomrf.logic.AtomSignature
24 | import lomrf.logic.parser.KBParser
25 | import lomrf.mln.model.ConstantsSet
26 | import org.scalatest.{ FunSpec, Matchers }
27 |
28 | /**
29 | * Specification test for path templates.
30 | */
31 | final class PathTemplateSpecTest extends FunSpec with Matchers {
32 |
33 | // Predicate schema having template atoms, evidence atoms and non-evidence atoms
34 | private val predicateSchema = Map(
35 | AtomSignature("TemplateAtom_1", 2) -> Vector("X", "T"),
36 | AtomSignature("TemplateAtom_2", 2) -> Vector("X", "T"),
37 | AtomSignature("EvidenceAtom_1", 2) -> Vector("Y", "T"),
38 | AtomSignature("EvidenceAtom_2", 2) -> Vector("T", "T"),
39 | AtomSignature("NonEvidenceAtom_1", 2) -> Vector("X", "T"))
40 |
41 | // Empty function schema
42 | private val functionsSchema = Map.empty[AtomSignature, (String, Vector[String])]
43 |
44 | // Constants domain
45 | private val constantsDomain = Map(
46 | "T" -> ConstantsSet((1 to 10).map(_.toString)),
47 | "X" -> ConstantsSet("X1", "X2", "X3", "X4"),
48 | "Y" -> ConstantsSet("Y1", "Y2", "Y3", "Y4"))
49 |
50 | private val parser = new KBParser(predicateSchema, functionsSchema)
51 |
52 | // ------------------------------------------------------------------------------------------------------------------
53 | // --- TEST: The Event Calculus case
54 | // ------------------------------------------------------------------------------------------------------------------
55 |
56 | // Template atoms
57 | private val templateAtomsPerAxiom = Seq(
58 | AtomSignature("TemplateAtom_1", 2),
59 | AtomSignature("TemplateAtom_2", 2),
60 | AtomSignature("TemplateAtom_2", 2),
61 | AtomSignature("TemplateAtom_1", 2))
62 |
63 | val axioms = Seq(
64 | "EvidenceAtom_2(t1, t0) ^ TemplateAtom_1(x, t0) => NonEvidenceAtom_1(x, t1).",
65 | "EvidenceAtom_2(t1, t0) ^ TemplateAtom_2(x, t0) => !NonEvidenceAtom_1(x, t1).",
66 | "EvidenceAtom_2(t1, t0) ^ NonEvidenceAtom_1(x, t0) ^ !TemplateAtom_2(x, t0) => NonEvidenceAtom_1(x, t1).",
67 | "EvidenceAtom_2(t1, t0) ^ !NonEvidenceAtom_1(x, t0) ^ !TemplateAtom_1(x, t0) => !NonEvidenceAtom_1(x, t1).").map(parser.parseLogicalSentence).flatMap(_.toCNF(constantsDomain))
68 |
69 | info(axioms.map(_.literals.map(_.toText).mkString(" v ")).mkString("\n"))
70 |
71 | //val pathTemplate = PathTemplate(Set("X", "T"))
72 |
73 | //axioms zip templateAtomsPerAxiom foreach { case (axiom, template) => pathTemplate + (axiom, template) }
74 |
75 | //info(s"$pathTemplate")
76 |
77 | // TODO
78 | }
79 |
--------------------------------------------------------------------------------
/src/test/scala/lomrf/mln/model/builders/ConstantsSetBuilderSpecTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.mln.model.builders
22 |
23 | import org.scalatest.{ FunSpec, Matchers }
24 |
25 | import scala.util.Random
26 |
27 | /**
28 | * A series of specification test for constants set builder.
29 | *
30 | * @see [[lomrf.mln.model.builders.ConstantsSetBuilder]]
31 | */
32 | final class ConstantsSetBuilderSpecTest extends FunSpec with Matchers {
33 |
34 | describe("An empty builder") {
35 | val builder = ConstantsSetBuilder()
36 |
37 | it ("should be empty") {
38 | builder.isEmpty shouldEqual true
39 | }
40 |
41 | it ("should have zero size") {
42 | builder.size shouldEqual 0
43 | }
44 |
45 | it("should have an empty iterator") {
46 | builder.iterator.isEmpty shouldEqual true
47 | }
48 |
49 | it("should create copies of empty builders") {
50 | builder.copy().isEmpty shouldEqual true
51 | }
52 |
53 | it ("should result in an empty constants set") {
54 | builder.result().isEmpty shouldBe true
55 | }
56 | }
57 |
58 | describe("A builder holding a single constant symbol") {
59 | val builder = ConstantsSetBuilder("Symbol")
60 |
61 | it ("should NOT be empty") {
62 | builder.nonEmpty shouldEqual true
63 | }
64 |
65 | it ("should have size 1") {
66 | builder.size shouldEqual 1
67 | }
68 |
69 | it("should have a NON empty iterator") {
70 | builder.iterator.nonEmpty shouldEqual true
71 | }
72 |
73 | it("should create copies of NON empty builders") {
74 | builder.copy().nonEmpty shouldEqual true
75 | }
76 |
77 | it ("should result in a constants set holding a single constant") {
78 | val constantsSet = builder.result()
79 | constantsSet.nonEmpty shouldEqual true
80 | constantsSet.contains("Symbol") shouldEqual true
81 | }
82 | }
83 |
84 | describe("A builder holding a more constant symbols") {
85 |
86 | val constants =
87 | for (idx <- 1 to 10) yield s"C${idx}" + Random.alphanumeric.take(5).toString()
88 |
89 | val builder = ConstantsSetBuilder(constants)
90 |
91 | it ("should NOT be empty") {
92 | builder.nonEmpty shouldEqual true
93 | }
94 |
95 | it (s"should have size ${constants.size}") {
96 | builder.size shouldEqual constants.size
97 | }
98 |
99 | it("should have a NON empty iterator") {
100 | builder.iterator.nonEmpty shouldEqual true
101 | }
102 |
103 | it("should create copies of NON empty builders") {
104 | builder.copy().nonEmpty shouldEqual true
105 | }
106 |
107 | it ("should result in a constants set holding all given constants") {
108 | val constantsSet = builder.result()
109 | constantsSet.nonEmpty shouldEqual true
110 | constants.forall(constantsSet.contains)
111 | }
112 |
113 | it ("should be empty after clear function is called") {
114 | builder.clear()
115 | builder.isEmpty shouldEqual true
116 | }
117 | }
118 | }
119 |
--------------------------------------------------------------------------------
/src/test/scala/lomrf/tests/ECExampleDomain1.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.tests
22 |
23 | import lomrf.logic.AtomSignature
24 | import lomrf.logic.predef._
25 | import lomrf.mln.model.{ ConstantsSet, FunctionMapper }
26 |
27 | /**
28 | * Simple function-free EC domain for testing
29 | */
30 | object ECExampleDomain1 {
31 |
32 | val LAST_TIME_POINT = 10
33 |
34 | lazy val constants = Map[String, ConstantsSet](
35 | "time" -> ConstantsSet((1 to LAST_TIME_POINT).map(_.toString): _*),
36 | "event" -> ConstantsSet("Walking", "Running", "Active", "Inactive", "Exit", "Enter"),
37 | "fluent" -> ConstantsSet("Move", "Meet"))
38 |
39 | lazy val predicateSchema = Map[AtomSignature, Vector[String]](
40 | AtomSignature("InitiatedAt", 2) -> Vector("fluent", "time"),
41 | AtomSignature("TerminatedAt", 2) -> Vector("fluent", "time"),
42 | AtomSignature("Happens", 2) -> Vector("event", "time"),
43 | AtomSignature("HoldsAt", 2) -> Vector("fluent", "time"),
44 | AtomSignature("Next", 2) -> Vector("time", "time"))
45 |
46 | lazy val functionsSchema = Map.empty[AtomSignature, (String, Vector[String])] //Map(AtomSignature("next", 1) -> ("time", Vector("time")))
47 |
48 | lazy val dynamicAtoms = dynAtoms
49 |
50 | lazy val dynamicFunctions = dynFunctions
51 |
52 | lazy val functionMappers = Map.empty[AtomSignature, FunctionMapper]
53 |
54 | lazy val queryAtoms = Set(AtomSignature("HoldsAt", 2))
55 |
56 | lazy val hiddenAtoms = Set(AtomSignature("InitiatedAt", 2), AtomSignature("TerminatedAt", 2))
57 |
58 | lazy val cwa = Set(AtomSignature("Next", 2), AtomSignature("Happens", 2))
59 |
60 | lazy val owa = Set(AtomSignature("HoldsAt", 2), AtomSignature("InitiatedAt", 2), AtomSignature("TerminatedAt", 2))
61 |
62 | lazy val probabilisticAtoms = Set.empty[AtomSignature]
63 |
64 | lazy val tristateAtoms = Set.empty[AtomSignature]
65 |
66 | }
67 |
--------------------------------------------------------------------------------
/src/test/scala/lomrf/tests/TestData.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.tests
22 |
23 | import lomrf.util.io._
24 |
25 | import scala.language.postfixOps
26 |
27 | object TestData {
28 |
29 | final val TestFilesPath = USER_DIR / "Data" / "Unit_Tests" /
30 |
31 | }
32 |
--------------------------------------------------------------------------------
/src/test/scala/lomrf/util/CartesianSpecTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.util
22 |
23 | import lomrf.util.Cartesian.CartesianIteratorArithmeticImpl
24 | import org.scalatest.{ Matchers, FunSpec }
25 |
26 | /**
27 | *
28 | */
29 | class CartesianSpecTest extends FunSpec with Matchers {
30 |
31 | // Note: the given domains should always be above zero
32 | private val domainList = List(
33 | Array(10, 5, 2),
34 | Array(10, 1, 2),
35 | Array(1, 1, 10),
36 | Array(1, 10),
37 | Array(5, 10),
38 | Array(10),
39 | Array(1))
40 |
41 | require(domainList.forall(_.forall(_ > 0)))
42 |
43 | for (domain <- domainList; (l, iteration) <- domain.permutations.zipWithIndex) {
44 | val elements = l.map(_ - 1)
45 | val expectedIterations = l.product // this is the correct number of products
46 |
47 | describe("Cartesian product of domains [" + elements.map(_.toString).reduceLeft(_ + ", " + _) + "]") {
48 |
49 | val iterator = new CartesianIteratorArithmeticImpl(elements)
50 | val result = iterator.map(_.toString).toSet
51 |
52 | info("iteration: " + iteration + "\n" +
53 | "\telements = [" + elements.map(_.toString).reduceLeft(_ + ", " + _) + "]\n" +
54 | "\texpected = " + expectedIterations + "\n" +
55 | "\tproduced = " + result.size)
56 |
57 | it("produces " + expectedIterations + " distinct Cartesian products") {
58 | assert(expectedIterations == result.size)
59 | }
60 | }
61 | }
62 |
63 | }
64 |
--------------------------------------------------------------------------------
/src/test/scala/lomrf/util/LongDoubleSpecTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.util
22 |
23 | import org.scalatest.{ Matchers, FunSpec }
24 |
25 | /**
26 | * Specification test for LongDouble numbers used for very high
27 | * precision operations.
28 | */
29 | final class LongDoubleSpecTest extends FunSpec with Matchers {
30 |
31 | import lomrf.util.LongDoubleConversions._
32 |
33 | val fivePointSix = new LongDouble(5.6)
34 | val onePointSeven = new LongDouble(1.7)
35 | val zeroPointSeven = new LongDouble(0.7)
36 | val minusOnePointNine = new LongDouble(-1.9)
37 |
38 | describe("Operators") {
39 |
40 | it("-5.6 should have identical results to 0 - 5.6") {
41 | assert(-fivePointSix === ZERO - fivePointSix)
42 | }
43 |
44 | it("-(-1.9) should have identical results to 0 - (-1.9)") {
45 | assert(-minusOnePointNine === ZERO - minusOnePointNine)
46 | }
47 |
48 | it("1.7 - 1.7 should be equals to 0") {
49 | assert(onePointSeven - onePointSeven === ZERO)
50 | }
51 |
52 | it("5.6 + 1.7 should be equal to 1.7 + 5.6") {
53 | assert(fivePointSix + onePointSeven === onePointSeven + fivePointSix)
54 | }
55 |
56 | it("1.7 - 0.7 should be equal to 1") {
57 | assert(onePointSeven - zeroPointSeven === ONE)
58 | }
59 |
60 | it("1.7 shoud be equal to (1.7 * 1.7) / 1.7") {
61 | assert(onePointSeven === (onePointSeven * onePointSeven) / onePointSeven)
62 | }
63 |
64 | it("5.6 should be greater than 1.7 and obviously 1.7 less than 5.6") {
65 | assert(fivePointSix > onePointSeven)
66 | assert(onePointSeven < fivePointSix)
67 | }
68 |
69 | it("5.6 should be greater or equal to itself and 1.7 should be less or equal to itself") {
70 | assert(fivePointSix >= fivePointSix && onePointSeven <= onePointSeven)
71 | }
72 | }
73 | }
74 |
--------------------------------------------------------------------------------
/src/test/scala/lomrf/util/evaluation/MetricsSpecTest.scala:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * o o o o o
4 | * | o | |\ /| | /
5 | * | o-o o--o o-o oo | | O | oo o-o OO o-o o o
6 | * | | | | | | | | | | | | | | | | \ | | \ /
7 | * O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
8 | * |
9 | * o--o
10 | * o--o o o--o o o
11 | * | | | | o | |
12 | * O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
13 | * | \ | | | | | | | | | | | | | |-' | | | \
14 | * o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
15 | *
16 | * Logical Markov Random Fields (LoMRF).
17 | *
18 | *
19 | */
20 |
21 | package lomrf.util.evaluation
22 |
23 | import org.scalatest.{ Matchers, FunSpec }
24 |
25 | /**
26 | * Specification test for various metrics used for automated evaluation over
27 | * learning results.
28 | */
29 | final class MetricsSpecTest extends FunSpec with Matchers {
30 |
31 | // Precision is the fraction of retrieved instances that are relevant
32 | describe("Precision") {
33 |
34 | it("when we only have true positives precision should be 1.0") {
35 | Metrics.precision(100, 0, 0) shouldBe 1.0
36 | }
37 |
38 | it("when true positives are equal to false positives precision should be 0.5") {
39 | Metrics.precision(55, 55, 0) shouldBe 0.5
40 | Metrics.precision(100, 100, 5) shouldBe 0.5
41 | }
42 |
43 | it("when both recognised positives and annotation positives are zero precision should be 1.0") {
44 | Metrics.precision(0, 0, 0) shouldBe 1.0
45 | }
46 |
47 | it("when recognised positives are zero but annotation positives are greater than zero precision should be 0.0") {
48 | Metrics.precision(0, 0, 1) shouldBe 0.0
49 | }
50 | }
51 |
52 | // Recall is the fraction of relevant instances that are retrieved
53 | describe("Recall") {
54 |
55 | it("when we only have true positives recall should be 1.0") {
56 | Metrics.recall(100, 0, 0) shouldBe 1.0
57 | }
58 |
59 | it("when true positives are equal to false negatives recall should be 0.5") {
60 | Metrics.recall(55, 0, 55) shouldBe 0.5
61 | Metrics.recall(100, 5, 100) shouldBe 0.5
62 | }
63 |
64 | it("when both recognised positives and annotation positives are zero recall should be 1.0") {
65 | Metrics.recall(0, 0, 0) shouldBe 1.0
66 | }
67 |
68 | it("when recognised positives are greater than zero but annotation positives are zero recall should be 0.0") {
69 | Metrics.recall(0, 1, 0) shouldBe 0.0
70 | }
71 | }
72 |
73 | // Accuracy is the proportion of true results (true positives and true negatives) among the total number of cases examined
74 | describe("Accuracy") {
75 |
76 | it("when true positives and true negatives are zero accuracy should be 0.0") {
77 | Metrics.accuracy(0, 10, 0, 10) shouldBe 0.0
78 | }
79 |
80 | it("when false positives and false negatives are zero accuracy should be 1.0") {
81 | Metrics.accuracy(5, 0, 5, 0) shouldBe 1.0
82 | }
83 |
84 | it("when true positives and negatives are equal to false positives and negatives accuracy should be 0.5") {
85 | Metrics.accuracy(5, 5, 5, 5) shouldBe 0.5
86 | }
87 | }
88 |
89 | // False positive ratio refers to the probability of falsely rejecting the null hypothesis
90 | describe("False Positive Rate") {
91 |
92 | it("when false positives are zero FPT should be 0.0") {
93 | Metrics.fpr(0, 1) shouldBe 0.0
94 | }
95 |
96 | it("when false positives are equal to true negatives FPT should be 0.5") {
97 | Metrics.fpr(5, 5) shouldBe 0.5
98 | }
99 |
100 | it("when true negatives are zero FPT should be 1.0") {
101 | Metrics.fpr(10, 0) shouldBe 1.0
102 | }
103 | }
104 |
105 | // F measure is a measure of a test accuracy
106 | describe("Fmeasure") {
107 |
108 | it("when both recognised positives and annotation positives are zero Fmeasure should be 1.0") {
109 | Metrics.f1(0, 0, 0) shouldBe 1.0
110 | }
111 |
112 | it("when recognised positives are zero but annotation positives are greater than zero Fmeasure should be 0.0") {
113 | Metrics.f1(0, 0, 1) shouldBe 0.0
114 | }
115 |
116 | it("when recognised positives are greater than zero but annotation positives are zero Fmeasure should be 0.0") {
117 | Metrics.f1(0, 1, 0) shouldBe 0.0
118 | }
119 |
120 | it("when recognised and annotation positives are greater than zero but true positives are zero Fmeasure should be 0.0") {
121 | Metrics.f1(0, 5, 5) shouldBe 0.0
122 | }
123 |
124 | }
125 |
126 | }
127 |
--------------------------------------------------------------------------------
/version.sbt:
--------------------------------------------------------------------------------
1 | version in ThisBuild := "1.0.1-SNAPSHOT"
2 |
--------------------------------------------------------------------------------