├── .github
└── FUNDING.yml
├── .gitignore
├── Asserts.py
├── Config.json
├── Config.py
├── Documentation
├── Algorithm to find semantically related concepts.docx
├── Concept-Linking-Diagram.drawio
├── Concept-Linking-Diagram.png
├── Concept-Linking-Example.drawio
├── Concept.drawio
├── Concept.png
├── Diagram.drawio
├── Diagram.png
├── InferenceRules.xlsx
├── NARS in Python - Technical Documentation.docx
├── Todo.txt
├── percepts.PNG
├── wiki-interface.png
└── wiki-internal-data.png
├── Global.py
├── InputChannel.py
├── LICENSE
├── NALGrammar
├── Sentences.py
├── Terms.py
└── Values.py
├── NALInferenceRules
├── Composition.py
├── Conditional.py
├── ExtendedBooleanOperators.py
├── HelperFunctions.py
├── Immediate.py
├── Local.py
├── Syllogistic.py
├── Temporal.py
└── TruthValueFunctions.py
├── NALSyntax.py
├── NARS.py
├── NARSDataStructures
├── Bag.py
├── Buffers.py
├── ItemContainers.py
└── Other.py
├── NARSGUI.py
├── NARSInferenceEngine.py
├── NARSMemory.py
├── Narsese.txt
├── README.md
├── TestCases
├── AllTests.py
├── DataStructureTests.py
├── GrammarTests.py
├── InferenceEngineTests.py
├── InferenceRuleTests.py
└── VisionTests
│ ├── CIFAR10VisionTest.py
│ ├── GenericVisionTest.py
│ └── MNISTVisionTest.py
├── build.bat
├── main.py
├── requirements.txt
└── terminal.bat
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: ccrock4t # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
4 | patreon: # Replace with a single Patreon username
5 | open_collective: # Replace with a single Open Collective username
6 | ko_fi: # Replace with a single Ko-fi username
7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
9 | liberapay: # Replace with a single Liberapay username
10 | issuehunt: # Replace with a single IssueHunt username
11 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
12 | polar: # Replace with a single Polar username
13 | buy_me_a_coffee: # Replace with a single Buy Me a Coffee username
14 | thanks_dev: # Replace with a single thanks.dev username
15 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
16 |
--------------------------------------------------------------------------------
/Asserts.py:
--------------------------------------------------------------------------------
1 | """
2 | Author: Christian Hahm
3 | Created: October 9, 2020
4 | Purpose: Enforces Narsese grammar that is used throughout the project
5 | """
6 | import NALSyntax
7 | import NALGrammar
8 | import NARSDataStructures
9 |
10 | def assert_sentence_forward_implication(j):
11 | """
12 | ==>, =/>, =\>
13 | :param j:
14 | :return:
15 | """
16 | assert_sentence(j)
17 | assert not NALSyntax.Copula.is_symmetric(j.statement.get_copula()) and not NALSyntax.Copula.is_first_order(j.statement.get_copula()), str(j) + " must be a forward implication statement"
18 |
19 | def assert_sentence_asymmetric(j):
20 | """
21 | -->, ==>, =/>, =\>
22 | :param j:
23 | :return:
24 | """
25 | assert_sentence(j)
26 | assert not NALSyntax.Copula.is_symmetric(j.statement.get_copula()), str(j) + " must be asymmetric"
27 |
28 | def assert_sentence_symmetric(j):
29 | """
30 | <->,<=>,>
31 | :param j:
32 | :return:
33 | """
34 | assert_sentence(j)
35 | assert NALSyntax.Copula.is_symmetric(j.statement.get_copula()), str(j) + " must be symmetric"
36 |
37 | def assert_sentence_equivalence(j):
38 | """
39 | <=> >
40 | :param j:
41 | :return:
42 | """
43 | assert_sentence(j)
44 | assert NALSyntax.Copula.is_symmetric(j.statement.get_copula()) and not j.statement.is_first_order(), str(j) + " must be an equivalence statement"
45 |
46 | def assert_sentence_similarity(j):
47 | """
48 | -->
49 | :param j:
50 | :return:
51 | """
52 | assert_sentence(j)
53 | assert j.statement.get_copula() == NALSyntax.Copula.Similarity, str(j) + " must be a similarity statement"
54 |
55 | def assert_sentence_inheritance(j):
56 | """
57 | -->
58 | :param j:
59 | :return:
60 | """
61 | assert_sentence(j)
62 | assert j.statement.get_copula() == NALSyntax.Copula.Inheritance, str(j) + " must be an inheritance statement"
63 |
64 | def assert_term(t):
65 | assert (isinstance(t, NALGrammar.Terms.Term)), str(t) + " must be a Term"
66 |
67 | def assert_compound_term(t):
68 | assert (isinstance(t, NALGrammar.Terms.CompoundTerm)), str(t) + " must be a Compound Term"
69 |
70 | def assert_valid_statement(t):
71 | """
72 | A valid statement is either a statementTerm or a higher order compound term (a compound of statements)
73 | :param t:
74 | :return:
75 | """
76 | assert (isinstance(t, NALGrammar.Terms.StatementTerm)) \
77 | or (isinstance(t, NALGrammar.Terms.CompoundTerm) and not NALSyntax.TermConnector.is_first_order(t.connector)) \
78 | or (isinstance(t, NALGrammar.Terms.SpatialTerm)), str(t) + " term must be a valid Statement"
79 |
80 | def assert_statement_term(t):
81 | assert (isinstance(t, NALGrammar.Terms.StatementTerm)), str(t) + " must be a Statement Term"
82 |
83 | def assert_sentence(j):
84 | assert (isinstance(j, NALGrammar.Sentences.Sentence)), str(j) + " must be a Sentence"
85 |
86 |
87 | def assert_truth_value(j):
88 | assert (isinstance(j, NALGrammar.Values.TruthValue)), str(j) + " must be a TruthValue"
89 |
90 |
91 | def assert_punctuation(j):
92 | assert (isinstance(j, NALSyntax.Punctuation)), str(j) + " must be a Punctuation"
93 |
94 |
95 | def assert_copula(j):
96 | assert (isinstance(j, NALSyntax.Copula)), str(j) + " must be a Copula"
97 |
98 | def assert_task(j):
99 | assert (isinstance(j, NARSDataStructures.Other.Task)), str(j) + " must be a Task"
100 |
101 |
--------------------------------------------------------------------------------
/Config.json:
--------------------------------------------------------------------------------
1 | {
2 | "k": 50,
3 | "T": 0.55,
4 | "FOCUSX": 1.2305365040669583,
5 | "FOCUSY": 1.0437127611721296,
6 | "PROJECTION_DECAY_DESIRE": 0.95,
7 | "PROJECTION_DECAY_EVENT": 0.95,
8 |
9 |
10 | "MINDFULNESS": 1.0,
11 | "BAG_GRANULARITY": 100,
12 |
13 |
14 | "TAU_WORKING_CYCLE_DURATION": 25,
15 |
16 | "POSITIVE_THRESHOLD": 0.51,
17 | "NEGATIVE_THRESHOLD": 0.5,
18 |
19 | "MEMORY_CONCEPT_CAPACITY": 300000,
20 | "EVENT_BUFFER_CAPACITY": 15,
21 | "GLOBAL_BUFFER_CAPACITY": 1000,
22 | "CONCEPT_LINK_CAPACITY": 100,
23 | "NUMBER_OF_ATTEMPTS_TO_SEARCH_FOR_SEMANTICALLY_RELATED_CONCEPT": 3,
24 | "NUMBER_OF_ATTEMPTS_TO_SEARCH_FOR_SEMANTICALLY_RELATED_BELIEF": 5,
25 | "PRIORITY_DECAY_VALUE": 0.29063576107673333,
26 | "PRIORITY_STRENGTHEN_VALUE": 0.99,
27 |
28 |
29 | "GUI_USE_INTERFACE": true,
30 | "SILENT_MODE": false,
31 | "DEBUG": false,
32 | "ARRAY_SENTENCES_DRAW_INDIVIDUAL_ELEMENTS": true,
33 | "USE_PROFILER": false,
34 |
35 |
36 |
37 | "BAG_DEFAULT_CAPACITY": 10000000,
38 |
39 | "TABLE_DEFAULT_CAPACITY": 5,
40 |
41 |
42 | "MAX_EVIDENTIAL_BASE_LENGTH": 30,
43 |
44 |
45 | "DEFAULT_JUDGMENT_FREQUENCY": 1.0,
46 | "DEFAULT_GOAL_FREQUENCY": 1.0,
47 | "DEFAULT_DISAPPOINT_CONFIDENCE": 0.5,
48 |
49 |
50 | "DEFAULT_JUDGMENT_PRIORITY": 0.9,
51 | "DEFAULT_QUESTION_PRIORITY": 0.9,
52 | "DEFAULT_GOAL_PRIORITY": 0.9,
53 | "DEFAULT_QUEST_PRIORITY": 0.9
54 |
55 | }
56 |
--------------------------------------------------------------------------------
/Config.py:
--------------------------------------------------------------------------------
1 | """
2 | Author: Christian Hahm
3 | Created: October 9, 2020
4 | Purpose: Specific configuration settings for NARS
5 | """
6 | import json
7 |
8 | import os
9 | import sys
10 |
11 |
12 | try:
13 | try:
14 | user_config = json.load(open("Config.json"))
15 | except:
16 | try:
17 | user_config = json.load(open("../Config.json"))
18 | except:
19 | user_config = json.load(open("../../Config.json"))
20 |
21 | """
22 | System Parameters
23 | """
24 | k = user_config["k"] # evidential horizon
25 | T = user_config["T"] # decision rule (goal decision-making) threshold
26 | MINDFULNESS = user_config["MINDFULNESS"]
27 | BAG_GRANULARITY = user_config["BAG_GRANULARITY"]
28 | FOCUSX = user_config["FOCUSX"]
29 | FOCUSY = user_config["FOCUSY"]
30 |
31 | TAU_WORKING_CYCLE_DURATION = user_config["TAU_WORKING_CYCLE_DURATION"] # time in milliseconds per working cycle
32 |
33 | POSITIVE_THRESHOLD = user_config["POSITIVE_THRESHOLD"]
34 | NEGATIVE_THRESHOLD = user_config["NEGATIVE_THRESHOLD"]
35 |
36 | MEMORY_CONCEPT_CAPACITY = user_config["MEMORY_CONCEPT_CAPACITY"] # how many concepts can this NARS have?
37 | EVENT_BUFFER_CAPACITY = user_config["EVENT_BUFFER_CAPACITY"]
38 | GLOBAL_BUFFER_CAPACITY = user_config["GLOBAL_BUFFER_CAPACITY"]
39 | CONCEPT_LINK_CAPACITY = user_config["CONCEPT_LINK_CAPACITY"] # how many of each concept link can this NARS have?
40 |
41 | """
42 | Sensors
43 | """
44 | VISION_DIMENSIONS = (28,28)
45 |
46 | """
47 | GUI
48 | """
49 | SILENT_MODE = user_config["SILENT_MODE"] # the system will only output executed operations
50 | GUI_USE_INTERFACE = user_config["GUI_USE_INTERFACE"]
51 | DEBUG = user_config["DEBUG"] # set to true for useful debug statements
52 | ARRAY_SENTENCES_DRAW_INDIVIDUAL_ELEMENTS = user_config[
53 | "ARRAY_SENTENCES_DRAW_INDIVIDUAL_ELEMENTS"] # whether or not to draw each individual element / pixel of an array sentence. Turning this to False results in GUI speedup when viewing array sentences
54 | USE_PROFILER = user_config["USE_PROFILER"]
55 |
56 |
57 | """
58 | Inference
59 | """
60 | PROJECTION_DECAY_DESIRE = user_config["PROJECTION_DECAY_DESIRE"]
61 | PROJECTION_DECAY_EVENT = user_config["PROJECTION_DECAY_EVENT"]
62 |
63 | NUMBER_OF_ATTEMPTS_TO_SEARCH_FOR_SEMANTICALLY_RELATED_CONCEPT = user_config[
64 | "NUMBER_OF_ATTEMPTS_TO_SEARCH_FOR_SEMANTICALLY_RELATED_CONCEPT"] # The number of times to look for a semantically related concept to interact with
65 | NUMBER_OF_ATTEMPTS_TO_SEARCH_FOR_SEMANTICALLY_RELATED_BELIEF = user_config[
66 | "NUMBER_OF_ATTEMPTS_TO_SEARCH_FOR_SEMANTICALLY_RELATED_BELIEF"] # The number of times to look for a semantically related belief to interact with
67 | PRIORITY_DECAY_VALUE = user_config[
68 | "PRIORITY_DECAY_VALUE"] # value in [0,1] weaken band w/ priority during priority decay
69 | PRIORITY_STRENGTHEN_VALUE = user_config[
70 | "PRIORITY_STRENGTHEN_VALUE"] # priority strengthen bor multiplier when concept is activated
71 |
72 | """
73 | Bags
74 | """
75 | BAG_DEFAULT_CAPACITY = user_config["BAG_DEFAULT_CAPACITY"] # default for how many items can fit in a bag
76 |
77 | """
78 | Tables
79 | """
80 | TABLE_DEFAULT_CAPACITY = user_config["TABLE_DEFAULT_CAPACITY"]
81 |
82 | """
83 | Other Structures
84 | """
85 | MAX_EVIDENTIAL_BASE_LENGTH = user_config[
86 | "MAX_EVIDENTIAL_BASE_LENGTH"] # maximum IDs to store documenting evidential base
87 |
88 | """
89 | Default Input Task Values
90 | """
91 | DEFAULT_JUDGMENT_FREQUENCY = user_config["DEFAULT_JUDGMENT_FREQUENCY"]
92 | DEFAULT_GOAL_FREQUENCY = user_config["DEFAULT_GOAL_FREQUENCY"]
93 |
94 | DEFAULT_DISAPPOINT_CONFIDENCE = user_config["DEFAULT_DISAPPOINT_CONFIDENCE"]
95 |
96 | DEFAULT_JUDGMENT_PRIORITY = user_config["DEFAULT_JUDGMENT_PRIORITY"]
97 | DEFAULT_QUESTION_PRIORITY = user_config["DEFAULT_QUESTION_PRIORITY"]
98 | DEFAULT_GOAL_PRIORITY = user_config["DEFAULT_GOAL_PRIORITY"]
99 | DEFAULT_QUEST_PRIORITY = user_config["DEFAULT_QUEST_PRIORITY"]
100 |
101 |
102 |
103 | except:
104 | assert False, "Config could not be loaded."
105 |
--------------------------------------------------------------------------------
/Documentation/Algorithm to find semantically related concepts.docx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccrock4t/NARS-Python/5ff613eac2171ca8a2c78c8606ac8c19c458c6a4/Documentation/Algorithm to find semantically related concepts.docx
--------------------------------------------------------------------------------
/Documentation/Concept-Linking-Diagram.drawio:
--------------------------------------------------------------------------------
1 | 7Zxbc5s4FMc/jWfSh2SExMU82k7cTbeXzCQz3TxSUGwmGFys1PZ++hVG3CQoGINFs247iXWQBf6fn46OLu4IzVa7j6G1Xn4JHOyNIHB2I3Q7glBRIRxF/4Czjy2GqcSGReg6rFJmeHT/xcwImPXNdfCmUJEEgUfcddFoB76PbVKwWWEYbIvVXgKveNe1tcCC4dG2PNH63XXIMraONZDZ/8LuYpncWQHsyspKKjPDZmk5wTZnQncjNAuDgMSvVrsZ9iLxEl3i980rrqYPFmKfNHnD/fdvX59evzz93ISzNzhd399P0DVr5ZflvbEPfHU1grq1Wo/QNP4pFGdXk+vr1LqgN59OP0TmGW++/UDtIGo3ajp/hZqv7vjq86j6QSqyT/QPgzffwdFHUGiN7dIl+HFt2dHVLSWO2pZk5bHLoiTJ58MhwbuciUn0EQcrTMI9rcKuwjFzF+NVZ8Vt5nyoMtsy5/iknsV4W6QtZy6hL5hXjvAQFD3UsYNkKw6KiiuGKLmCzik5EiRPVfN/bKJfZexKF1JT6oUcn1NHVdDxTrpIql4UCZqiSPo5RdIFkebSRdLMgYlkCCJNpIvEaYRkazQWB4rSYUCybIpaz9ZZo5Qp6lYyTkrXDSkD0y15npxwU+kqKUPrlYqYYc+kq8QnufJVErPcW+kqocGpVJaYciJh35lE015asj1rs3FtKsaGWCERzTm5qErh/h9aADdQS8rP+Yu3kSogLe1ZaUPC4BXPAi8IDw+AdH0K5/PfeYA+zAKT+pEMO4WpueinnB+0Ej8kthB7FnF/FSf0Zc5hd3gIXPrEWUhRKjBImtgEb6GN2bvyE3C+IS71RCrXUCyM0NCBlfRjn4CPmI93hs/OJTE9Gis9J7TQ1xk6UWGf5yh+k66jY5g7lB5w6FJFcNgexNhxDYa4P4xYlZtRq6glsarGETs+M7HaUIntO0bWo9k0mJoXNHtBU5y39zAW68cNxl0Fxj8Nqq5GaOlQiescl3hXGGFryYTDIvPdhDtxdWkgZOo6HGTuaF6AlQqsuKw3EGClh1K1IZnoQmYfZCbnAQZCZh+E6e+THE1XCw1BvqG+yRHXkgcS03pfM6xFDjYMasr7RFN6UINDRdPg2LwBCNXwebYcEV2glQptyX5K9BM84XBFf312/VeBYoJ3pAhokQw/8GnN6YvreZzJ8tyFH1FOOYmwmkZbUa5teRN2YeU6TnSb0r2tbPcLdLO9xe1GlJ26AiUQQVDNy2kH3RpsTxy1BZh3QaP+esQBmVrt4FmlK1knP4D8EGLHtYkb+O8eZ6PokpRdeTw3WCE+heeXsY1tu2xk+jHWotDaB+llsp4X9ZIl0gPqd7u1Z/nW/4J1LnSnp2PrTsz255UGy4OnsI4VR8NGGeumbiBL74f1ElnPy3rbNaxKwvPBIcmrkxz7kFWzmVyDxDqfVOcngBU5dVXwqnRbZzO+yzJWL8lz8tydwpn25qr+X4nLQNzLn8GHYNzOvQjVNNSdeyfB57n/9Vuw3K5/fvqbPINPYFv6xaCBfL9HCOglQFTGeLXBAdH0EGnXp/pKle5v5aRuyYLrM52tYVQDJa1b6kpxaDfaRl1uCVhByo05BtkfeNZe2t+B0Gp2qlKAugygOXPDZgnxEYTP/xpvJ5g0b4JG9reYGIDexvNSTdseDj0q1wQ3JmyzilskzWibbEat8+u6HYbCplv+SCq+3PdvFFNriS+saahnYNueDT0p/0yRVEbHzX+yHgCQUZhxmQb8fS/oF9qmexCm1PGbT6vbzpp4+qHSVVpNi9l3+ePq2f+IgO7+Aw==
--------------------------------------------------------------------------------
/Documentation/Concept-Linking-Diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccrock4t/NARS-Python/5ff613eac2171ca8a2c78c8606ac8c19c458c6a4/Documentation/Concept-Linking-Diagram.png
--------------------------------------------------------------------------------
/Documentation/Concept-Linking-Example.drawio:
--------------------------------------------------------------------------------
1 | 7Ztdj+I2FIZ/DdLsxawSOwlwCQy0I3XbkWak7V56Ew9YE2LqmAX662sT59NJkxlCkm6REOATx0ne83B8/MEILrbHXxjabb5QD/sjYHjHEXwYAWBaAIzky/BOkWU8NSPDmhFPVUoNz+RvrIyGsu6Jh8NcRU6pz8kub3RpEGCX52yIMXrIV3ulfv6qO7TGmuHZRb5u/Uo8vomsE9tI7b9ist7EVzYNdWSL4srKEG6QRw8ZE1yO4IJRyqNv2+MC+1K8WJfovFXF0eTGGA54kxMev/7x+8vbl5e/QrbYg/nu8XEG71UrP5C/Vw98dzcCDtruRnAevWvFxd3s/j6xrsXF5/NP0rwomh8+Cbsh25VNZ48I892yWH0lq5+l4qdYf0b3gYflI5iixmFDOH7eIVcePQjihG3Dt746rB4GM46PlSqZifYCWky3mLOTqKJOABPlLsWro4qH1PnAUrZNxvFxPaR4Wyctpy4RX5RX3uEhoHuoZQf1rbiRV9wc65KbsEvJoSZ5olrwPZQfZez2LqRt1gs56VJHS9Nx2btIlpMXCUx1kZwuRXI0kVa9i2RPBybSWBNp1rtIBY1g3xpN9I6itBvoWTbTqmer0yg11XUr6Sd71w2aA9Mtvp+McPPeVTKH9qs09Qx70btKxSS3f5X0LPehd5Xg4FQqS0wLIuHAm8lhryi5PgpD4goxQo4Y180ZuYQk7PSnKBifgR2Xv2UPPkhVjKR0UqWQM/qGF9Sn7HwD0HHmYLVKPIA9bXxd0F8M6BFbY17Xtel+yvjBLvFDbGPYR5z8yN9GmXPUFZ4oETeYhhSzAoO4iZDumYvVWdkBeLGhQuoJrUJDkQ5aQ2dWkse+AB89H28NnyPhET22Kn2LaRHfU3Rk4ZTlKDrJceB7mDuXnjAjQhHMWgQx8mSDPm/YxFqFEbUFP0isZReInXRMrD1UYjuPkfVoNg2m0xuabaCpj9uv0Bc77+uMrxYYBw5VWz1071Dp8xy3ePfv0x21ZIJbuGuDTH12aSBkOg4YRIisBXZ6A7ZLYPVpvYEAO7xQajUkE97IbIHMeD/AQMjshDDnpyDHdqxcQ6DY0LXJ0eeSBxLTup8zrEUONAxqFXPA/zE0ew9qYKhojgtsfjYgrOGzvxwR3qDtEtqS9RT5brxgthUfv5HgTaOY4yPPA5onI6CBqDl/Jb5fMCGfrANJucBCYjWXq1PERf5MHdgSz5OXKV3bSle/jHaWtxyYVz8WP7vtyiihCBjVwFy2063B+sS71gCzPtB/sJeJB2rFA51qVzJTfkb5iWGPuJzQ4OcHurCzJFm464/oBpPElxD9OnGx65Z1Tt8ntoyuV2G9TNduYS+ZJj3DvjzufBSg/wfthfCd7Kuq2zZ7Pbc0mCO8hHZsejYel9E+dcYQOVeivUTXbmn/6ExWJePZ+BBn13Gmfc6t1XiuQXqdTa2zw8CKzLoqfo26H/fdJrPaSKHj+24VzuTnXBUAmuLSl3uLO/GBMfmYeyGsaejD7hXF9K9HUfX0D1xw+Q8=
--------------------------------------------------------------------------------
/Documentation/Concept.drawio:
--------------------------------------------------------------------------------
1 | 7Vpdc6M2FP0tffBM8hAP37YfYyfdnW12JtOk092+ySCMGoEoyLW9v75XIMAY4Tgx1M6OHc8EHQkh7j2693DNwJyF608JioOvzMN0YGjeemDeDQxDtwxjIL6at8mRka3nwCIhnhxUAU/kB5agJtEl8XBaG8gZo5zEddBlUYRdXsNQkrBVfZjPaP2qMVrgBvDkItpE/yQeD3J0bGsV/hmTRVBcWddkT4iKwRJIA+Sx1RZk3g/MWcIYz4/C9QxTYbzCLvl5v7b0lgtLcMQPOWH9+Ef45fnL/Dn1ZsvftM/3P/T1jZzlX0SX8oblYvmmsEDClpGHxST6wJyuAsLxU4xc0bsCnwMW8JDKbp9QOmOUJdm5pm+LP8BTnrAXvNXjZB9xBov4Fp5/AG/eXrFWnHC83oLk7X7CLMQ82cCQonckTb8pOCXbq8qTtiWxYMuL1kSCSLJnUc5dGRgOpI3fYG9DYW+HwmWncc3qzj9LwYspJRG+KZZ2C0PEyuyqH44W8n82y3wXEOZVziw6btJsv4l5jUm8bk47Y5GLY15MBrecz1e/BsCN6wIWV9gOpcB/vM6bOj8iFuEdMkkIUbKIoOkCKzDgU8EGAtv1VnaExPPEZZREraisdcMw09phmNlkmKEpGGb0RbCRimCwr9a3uXMUh1NMCfbhpGc0B9NVHVcBRMsUOv5eeosQrJRevzpZw9fpioQU5f4E7jzJHuF0NyDUe0AbthRWSTlyX4rWNGAJ+QHjUcUQlHCZH0ytGWo8G489q3bmk5hROjvBguuPhbP1HegrWtcGPqCUS8BllKI4JfNy3SFKFiSaMs5ZKAc14tvYmJtZfOuAZYa5wzJn1GCZropjumb1RLNxg2YyVAwHgtq2Psx3mKYNJ1nAUkeAdrO1ebcIABT7XLH9ORP7PIVtT6LFQzbmzqqQ36VtBMTgXJ9mGTmAoIGjLEBwxFHuaXEDMSPAerEEewpfuLOZNrQHNixpBm29asNXDE8gl0VwV4hk3sZAoxVO+Q75tSYp9m7m15kimaEIP0piFGm+c15MXuGFNhwVvLAvvODK+NANFWzjQCo4PVGhUPBbXBgOhxeP9+bx0fjUHlc9TZTK4A4Sa4L3yYwFQ3RHYpxITPi+b7ju2YgJz5k7dkdiwjbeLSb60qx686lIZo1fjlMTpd3a3PuzxxD9cHqci4LQzVfIoFVksC5k6JUMp9cQ1lEa4uLyN7v8YBHR2/7fW8F4TLBHXE5YBEMeSPSSqoSEmweMM9ESYxefkZaYj23L7qj8pU/qWsLUmlpCWf7qrzCht1YmRPaAdlYDlWVJ7dvBoaQ0W5t3P2woeU9hQu+iMqEujPYWV1pLEypiXH3PxIaDwjgDygPtr+sLZw5OP11ULdQ06UtxGMdVLS4u76Js8T+7XPmjXKEb7tcxqAT0oSQH1j0bj85GckyckYk6Kl9Y5yc5jOYT6zdVTinTzYGxpDRbm3c/bCx5j+Qod+nHkRxG8+l1r66YGYNb7cKcDrLQ28lyeuFhHyU8Li5/s8tPLzycfcLjGSfhWSoOQ/WrHMJj/3yKHI47xnO/G8Wx+46PZR6qOHp7ycdo1siubm5gT8lEcX1craO0XpuTP2wseZfwcA5mzKYeME4nPJpFsO/t6uJqqw5yZMXj52VOb2SxTp6F9hXGRFQpiXK7lWmGmThpTzwXhnTGkJF1GEOstzMEmtUL6Fnf1mv85v1/
--------------------------------------------------------------------------------
/Documentation/Concept.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccrock4t/NARS-Python/5ff613eac2171ca8a2c78c8606ac8c19c458c6a4/Documentation/Concept.png
--------------------------------------------------------------------------------
/Documentation/Diagram.drawio:
--------------------------------------------------------------------------------
1 | 5VrZbts4FP0aA+lDC61eHr22RtA2SAaY5mlAS7RMRBJVivIyXz+kRMoSRS9x5CSTBkEiHlJXFO/hvefS7tjjaPuVgGT1Hfsw7FiGv+3Yk45lmY5ldfiv4e8E0h+YBRIQ5AtsDzygf6EADYFmyIdpbSDFOKQoqYMejmPo0RoGCMGb+rAlDutPTUAAG8CDB8Im+jfy6apA+66xx79BFKzkk01D9ERADhZAugI+3lQge9qxxwRjWlxF2zEM+erJdSnumx3oLSdGYEzPueH+H2dKIYls9369+DGe30+D4LOwsgZhJl64Y3VDZm/kozW7DPilhBYS+A4jTHYSZo9cNIYSFWGjNCYr0M0CBJ8Ojc4XkO6kVwjOYh/yFzNY92aFKHxIgMd7N4yIDFvRKGQtk10uURiOcYhJfq/dmw67U6O0Wl1AuRqQULitQGJBv0IcQcpf3BC9PelcQW9HNDd7qpTYqkITR7IECHoGpem9B9mFcOIzHGpd5NDpNoEEwZit4DEHNtx80qmNJ/0F0ic2v1G2XELyLNMg4n6NFyn/d3NHECaIcl/8zmAGP7XKEUaRUbfbDkcc5yRHTEvDkXJg+xy5bNfPY+ayKkfaYcQ0DlD8Qt4prmehNuGXCcEeTNPT7l8A7ynICfMzoyGfjpYWs9lkcC1a2N0zedG/Gi36GlooKwtjf8izKmt5IUhT5B3dR8v8h+FsncjuF9+LXwyjL4HHHOh2bQlMtmK7Fq1dtXXHIhR7UUgEeNAHFJAA0tMxEvq11N/0VMUTrsYREiMwBBSt64JB5x3xhDuM2IxLIphKDjEHg7qJFGfEg+KuaoJXDJUMEoZK5SENFQvTMJSTpXztF/BncE5YmcdJRs+OFuMVYLouPF9WKHxlm5HWGZpSgp+g5GiM+V6v0VZAIERBzGnO6MU5N+JbGzFhOBQdEfJ9/hhtbKknnzaUhkqSZrTQCQ2VApcEi8ktHt8+hJOMjoC7HXpJiGYXCsfpGuaMlenfaFlu3Mzms5+vISKXBrAN0KAT6xlNeobBDS5xTCu4kf+0Qwazf5mkaCN1aNmgU53PzByVDOFW80PZdyA3nJUGjlH4naQBS9EDlloinJsGygnJUKHqyANpgHkH7CrDEj4gfWGi0C680wJbtoj+qlw/7onDWnuq8MZJphQr+/9hiqEoR9XB5zJFlaB9ZSrt6QXtqroNGuTpIf24SbzM2nJrum+cxbtNF7B1igG/8TbGmzCn/If1h+06X9y6RzRVmP4A50oe6b1NbKzVZ069Pmu3OqsG22PV2ckq7n0FZUcR6NbgwqDc7Q/0jHylqNws4mRUVpX6zZAp7XETH+Mo4TuVWTl8a6H9izO94i9/E1YZ6G3OIzY5jzkIxwfN8cnOSqNC8+cGP2z8Mupc+WxqCgHbbJL+aoWAnMDxuvAbmwlMOekrx7jFyfCfVdW7allvaxz4unW9rrC/tJQrG5UU9NJS7uiJ9jtJBs2PhS5V6O6JrPLmtZypK/0bG34EQwSXbALGEvNjH6T5KOHP2PBq7dZrbvgyOr/OhrfbU5x7lflYFZmnFGc75z2nhaU+SJz7gcGbKcvWyn3l1LChAq6sLM3muc+EVQ1r6Ivs/4Erf1tZe1un1FpK9Ky5/1ZJ4bz9l3Ps6X8=
--------------------------------------------------------------------------------
/Documentation/Diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccrock4t/NARS-Python/5ff613eac2171ca8a2c78c8606ac8c19c458c6a4/Documentation/Diagram.png
--------------------------------------------------------------------------------
/Documentation/InferenceRules.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccrock4t/NARS-Python/5ff613eac2171ca8a2c78c8606ac8c19c458c6a4/Documentation/InferenceRules.xlsx
--------------------------------------------------------------------------------
/Documentation/NARS in Python - Technical Documentation.docx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccrock4t/NARS-Python/5ff613eac2171ca8a2c78c8606ac8c19c458c6a4/Documentation/NARS in Python - Technical Documentation.docx
--------------------------------------------------------------------------------
/Documentation/Todo.txt:
--------------------------------------------------------------------------------
1 | - Handling Variables (NAL-6)
2 | - Events (NAL-7) (implemented, TODO: need to handle events and occurrence times for semantic inference)
3 | - Goals (NAL-8) (initial processing implemented)
4 | - Operations (NAL-8)
5 | - Self-monitoring and self-control (NAL-9)
6 | - Sensory terms
7 |
8 | - Inference
9 | - Time projection
10 | - Structural Inference
11 | - Immediate Inference (implemented)
12 | - Composition Rules (implemented)
13 | - Decomposition Rules
14 | - Conditional Syllogism
15 | - Temporal Inference
16 | - Durations
17 |
--------------------------------------------------------------------------------
/Documentation/percepts.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccrock4t/NARS-Python/5ff613eac2171ca8a2c78c8606ac8c19c458c6a4/Documentation/percepts.PNG
--------------------------------------------------------------------------------
/Documentation/wiki-interface.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccrock4t/NARS-Python/5ff613eac2171ca8a2c78c8606ac8c19c458c6a4/Documentation/wiki-interface.png
--------------------------------------------------------------------------------
/Documentation/wiki-internal-data.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccrock4t/NARS-Python/5ff613eac2171ca8a2c78c8606ac8c19c458c6a4/Documentation/wiki-internal-data.png
--------------------------------------------------------------------------------
/Global.py:
--------------------------------------------------------------------------------
1 | """
2 | Author: Christian Hahm
3 | Created: December 24, 2020
4 | """
5 | import Config
6 | import NALGrammar.Terms
7 |
8 | class Global:
9 | """
10 | NARS vars
11 | """
12 | NARS = None # variable to hold NARS instance
13 | paused = False
14 |
15 | """
16 | Terms
17 | """
18 | TERM_SELF = None
19 | TERM_IMAGE_PLACEHOLDER = None
20 | ARRAY_NEGATIVE_ELEMENT = None
21 | ARRAY_NEGATIVE_SENTENCE = None
22 |
23 | """
24 | ID markers
25 | """
26 | MARKER_ITEM_ID = "ItemID:" # there are Sentence IDs and Bag Item IDs
27 | MARKER_SENTENCE_ID = "SentenceID:"
28 | MARKER_ID_END = ":ID "
29 |
30 | """
31 | GUI
32 | """
33 | NARS_object_pipe = None
34 | NARS_string_pipe = None
35 |
36 | @classmethod
37 | def get_current_cycle_number(cls):
38 | return cls.NARS.current_cycle_number
39 |
40 | @classmethod
41 | def print_to_output(cls, msg, data_structure=None):
42 | try:
43 | data_structure_name = None
44 | data_structure_len = 0
45 | if data_structure is None: print(msg)
46 | if not(data_structure is cls.NARS.memory.concepts_bag or
47 | data_structure is cls.NARS.temporal_module or
48 | data_structure is cls.NARS.global_buffer or
49 | data_structure is None): return # must be a valid data structure
50 | if data_structure is not None:
51 | data_structure_name = (str(data_structure), type(data_structure).__name__)
52 | data_structure_len = len(data_structure)
53 | if Config.GUI_USE_INTERFACE: cls.NARS_string_pipe.send(("print", msg, data_structure_name, data_structure_len))
54 | except:
55 | print(msg)
56 |
57 | @classmethod
58 | def clear_output_gui(cls, data_structure=None):
59 | cls.NARS_string_pipe.send(("clear", "", type(data_structure).__name__,0))
60 |
61 | @classmethod
62 | def remove_from_output(cls, msg, data_structure=None):
63 | """
64 | Remove a message from an output GUI box
65 | """
66 | if cls.NARS_string_pipe is None: return
67 | if not(data_structure is cls.NARS.memory.concepts_bag or
68 | data_structure is cls.NARS.temporal_module or
69 | data_structure is cls.NARS.global_buffer): return
70 | cls.NARS_string_pipe.send(("remove", msg, (str(data_structure), type(data_structure).__name__),len(data_structure)))
71 |
72 | @classmethod
73 | def set_paused(cls, paused):
74 | """
75 | Set global paused variable and GUI
76 | """
77 | cls.paused = paused
78 | if Config.GUI_USE_INTERFACE: cls.NARS_string_pipe.send(("paused", paused, "guibox", 0))
79 |
80 |
81 | @classmethod
82 | def debug_print(cls,msg):
83 | if msg is None: return
84 | if not Config.DEBUG: return
85 | print(str(cls.get_current_cycle_number())
86 | + ": gb(" + str(len(cls.NARS.global_buffer)) + "): "
87 | + ": mem(" + str(len(cls.NARS.memory.concepts_bag)) + "/" + str(cls.NARS.memory.concepts_bag.capacity) + "): "
88 | + msg)
89 | # print(str(cls.get_current_cycle_number())
90 | # + ": gb(" + str(len(cls.NARS.global_buffer)) + "): " + msg)
91 |
92 | @classmethod
93 | def create_inherent_terms(cls):
94 | cls.TERM_SELF = NALGrammar.Terms.from_string("{SELF}")
95 | cls.TERM_IMAGE_PLACEHOLDER = NALGrammar.Terms.from_string("_")
96 |
97 | Global.create_inherent_terms()
--------------------------------------------------------------------------------
/InputChannel.py:
--------------------------------------------------------------------------------
1 | import timeit as time
2 |
3 | import numpy as np
4 |
5 | import Config
6 | import NALGrammar.Sentences
7 | import Global
8 | import NALSyntax
9 | import NARSDataStructures
10 | import NALInferenceRules.TruthValueFunctions
11 |
12 | """
13 | Author: Christian Hahm
14 | Created: October 9, 2020
15 | Purpose: Parses an input string and converts it into a Narsese Task which is fed into NARS' task buffer
16 | """
17 |
18 | pended_input_data_queue = []
19 | VISION_KEYWORD = "vision:"
20 | NARSESE_KEYWORD = "narsese:"
21 |
22 |
23 | def get_user_input():
24 | userinputstr = ""
25 |
26 | global pended_input_data_queue
27 | while userinputstr != "exit":
28 | userinputstr = input("")
29 | pended_input_data_queue.append(userinputstr)
30 |
31 |
32 | def parse_and_queue_input_string(input_string: str):
33 | """
34 | Parses any input string and queues the resultant Narsese sentences to the input buffer.
35 |
36 | If the input string is a command, executes the command instead.
37 | :param input_string:
38 | :return:
39 | """
40 | if is_sensory_array_input_string(input_string):
41 | #todo broken
42 | # don't split by lines, this is array input
43 | sentence = parse_input_line(input_string)
44 | pended_input_data_queue.append(sentence)
45 | else:
46 | # treat each line as a separate input
47 | pended_input_data_queue.append((NARSESE_KEYWORD,input_string))
48 |
49 |
50 |
51 |
52 | def parse_input_line(input_string: str):
53 | """
54 | Parses one line of an input string and returns the resultant Narsese sentence.
55 |
56 | If the input string is a command, executes the command instead.
57 | :param input_string:
58 | :return:
59 | """
60 | input_string = input_string.replace(" ", "") # remove all spaces
61 | try:
62 | NARS = Global.Global.NARS
63 | if input_string == "count":
64 | Global.Global.print_to_output(
65 | "Memory count (concepts in memory): " + str(len(NARS.memory)))
66 | elif input_string == "cycle":
67 | Global.Global.print_to_output("Current cycle: " + str(Global.Global.get_current_cycle_number()))
68 | elif input_string == "save":
69 | NARS.save_memory_to_disk()
70 | elif input_string == "load":
71 | NARS.load_memory_from_disk()
72 | elif input_string == "load_input":
73 | load_input()
74 | else:
75 | while Global.Global.NARS is None:
76 | Global.Global.print_to_output("Waiting for NARS to start up...")
77 | time.sleep(1.0)
78 |
79 | if is_sensory_array_input_string(input_string):
80 | # sensory array input (a matrix of RGB or brightness values)
81 | sentence = parse_visual_sensory_string(input_string[len(VISION_KEYWORD):])
82 | else:
83 | # regular Narsese input
84 | sentence = NALGrammar.Sentences.new_sentence_from_string(input_string)
85 |
86 | return sentence
87 | except AssertionError as msg:
88 | Global.Global.print_to_output("WARNING: INPUT REJECTED: " + str(msg))
89 | return None
90 |
91 |
92 | def process_input_channel():
93 | """
94 | Processes the next pending sentence from the input buffer if one exists
95 |
96 | return: whether statement was processed
97 | """
98 | while len(pended_input_data_queue) > 0:
99 | data = pended_input_data_queue.pop()
100 | if data[0] == NARSESE_KEYWORD:
101 | input_string = data[1]
102 | # turn strings into sentences
103 | lines = input_string.splitlines(False)
104 | for line in lines:
105 | sentence = parse_input_line(line)
106 | # turn sentences into tasks
107 | process_sentence_into_task(sentence)
108 | elif data[0] == VISION_KEYWORD:
109 | img = data[1]
110 | img_array = np.array(img)
111 |
112 | # tuple holds spatial truth values
113 | Global.Global.NARS.vision_buffer.set_image(img_array)
114 |
115 |
116 | def process_sentence_into_task(sentence: NALGrammar.Sentences.Sentence):
117 | """
118 | Put a sentence into a NARS task, then do something with the Task
119 | :param sentence:
120 | """
121 | if not Config.SILENT_MODE: Global.Global.print_to_output("IN: " + sentence.get_formatted_string())
122 | # create new task
123 | task = NARSDataStructures.Other.Task(sentence, is_input_task=True)
124 |
125 | Global.Global.NARS.global_buffer.PUT_NEW(task)
126 |
127 | def load_input(filename="input.nal"):
128 | """
129 | Load NAL input from a file
130 | """
131 | try:
132 | with open(filename, "r") as f:
133 | Global.Global.print_to_output("LOADING INPUT FILE: " + filename)
134 | for line in f.readlines():
135 | parse_and_queue_input_string(line)
136 | np.array()
137 | Global.Global.print_to_output("LOAD INPUT SUCCESS")
138 | except:
139 | Global.Global.print_to_output("LOAD INPUT FAIL")
140 |
141 |
142 | def is_sensory_array_input_string(input_string):
143 | return input_string[0:len(VISION_KEYWORD)] == VISION_KEYWORD
144 |
145 |
146 | def parse_visual_sensory_string(input_string):
147 | """
148 | Convert a 3d array of RGB or a 2d array of brightness values to Narsese.
149 |
150 | Also generates and assigns this visual sensation its own unique term.
151 |
152 | Returns a sensory percept of form: {@S} --> [t])
153 |
154 | input_string:
155 |
156 | 2D (matrix of intensities):
157 | [f;c,...,f;c],
158 | [...,...,...],
159 | [f;c,...,f;c]
160 |
161 | 3D (tensor of intensities):
162 | [
163 | [f;c,...,f;c],
164 | [...,...,...],
165 | [f;c,...,f;c]
166 | ],
167 | ...,
168 | [
169 | [f;c,...,f;c],
170 | [...,...,...],
171 | [f;c,...,f;c]
172 | ]
173 |
174 |
175 | """
176 | # remove line endings
177 | input_string = input_string.replace("\n", "")
178 | input_string = input_string.replace("\r", "")
179 | input_string = input_string[1:-1]
180 |
181 | array_idx_start_marker = NALSyntax.StatementSyntax.ArrayElementIndexStart.value
182 | array_idx_end_marker = NALSyntax.StatementSyntax.ArrayElementIndexEnd.value
183 |
184 | def parse_1D_array(input_string):
185 | # 1D array
186 | pixel_value_array = input_string.split(",")
187 | x_length = len(pixel_value_array)
188 | dim_lengths = (x_length,) # how many elements in a row
189 | return np.array(pixel_value_array)
190 |
191 | def parse_2D_array(input_string):
192 | # 2D array
193 | pixel_value_array = []
194 | depth = 0
195 | piece = ""
196 | for i in range(len(input_string)):
197 | c = input_string[i]
198 | if depth == 0 and c == ",":
199 | pixel_value_array.append(parse_1D_array(piece)[0])
200 | piece = ""
201 | else:
202 | if c == array_idx_start_marker:
203 | depth += 1
204 | elif c == array_idx_end_marker:
205 | depth -= 1
206 | else:
207 | piece += c
208 |
209 | pixel_value_array.append(parse_1D_array(piece)[0])
210 |
211 | x_length = len(pixel_value_array[0]) # how many elements in a row
212 | y_length = len(pixel_value_array) # how many rows
213 |
214 | return np.array(pixel_value_array)
215 |
216 | def parse_3D_array(input_string):
217 | # 3D array
218 | pixel_value_array = []
219 | depth = 0
220 | piece = ""
221 | for i in range(len(input_string)):
222 | c = input_string[i]
223 | if depth == 0 and c == ",":
224 | pixel_value_array.append(parse_2D_array(piece)[0])
225 | piece = ""
226 | else:
227 | if c == array_idx_start_marker:
228 | if depth == 1: piece += c
229 | depth += 1
230 | elif c == array_idx_end_marker:
231 | if depth == 2: piece += c
232 | depth -= 1
233 | else:
234 | piece += c
235 |
236 | pixel_value_array.append(parse_2D_array(piece)[0])
237 |
238 | return np.array(pixel_value_array)
239 |
240 | if input_string[0] != array_idx_start_marker:
241 | pixel_value_array = parse_1D_array(input_string)
242 | else:
243 | if input_string[1] != array_idx_start_marker:
244 | pixel_value_array = parse_2D_array(input_string)
245 | else:
246 | pixel_value_array = parse_3D_array(input_string)
247 |
248 | queue_visual_sensory_image_array(pixel_value_array)
249 |
250 | def queue_visual_sensory_image_array(img_array):
251 | pended_input_data_queue.append((VISION_KEYWORD, img_array))
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Christian Hahm
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/NALGrammar/Sentences.py:
--------------------------------------------------------------------------------
1 | import Config
2 | import Global
3 | import NALSyntax
4 | import Asserts
5 |
6 | import NALGrammar.Terms
7 |
8 | from NALGrammar.Values import TruthValue, DesireValue, EvidentialValue
9 | import NALInferenceRules
10 | import numpy as np
11 |
12 | import NARSGUI
13 | from NALInferenceRules import TruthValueFunctions
14 |
15 | """
16 | Author: Christian Hahm
17 | Created: October 9, 2020
18 | Purpose: Enforces Narsese grammar that is used throughout the project
19 | """
20 | class Sentence:
21 | """
22 | sentence ::= %%
23 | """
24 | def __init__(self, statement, value, punctuation, occurrence_time=None):
25 | """
26 |
27 | :param statement:
28 | :param value: Pass as a tuple for array sentences (overall_truth, list_of_element_truth_values)
29 | :param punctuation:
30 | :param occurrence_time:
31 | """
32 | Asserts.assert_punctuation(punctuation)
33 | assert isinstance(statement,NALGrammar.Terms.StatementTerm) or isinstance(statement,NALGrammar.Terms.CompoundTerm),"ERROR: Judgment needs a statement"
34 |
35 | self.statement = statement
36 | self.punctuation: NALSyntax.Punctuation = punctuation
37 | self.stamp = Stamp(self_sentence=self,occurrence_time=occurrence_time)
38 | self.value: EvidentialValue = value # truth-value (for Judgment) or desire-value (for Goal) or None (for Question)
39 |
40 | if self.punctuation != NALSyntax.Punctuation.Question:
41 | self.eternal_expectation = NALInferenceRules.TruthValueFunctions.Expectation(self.value.frequency,
42 | self.value.confidence)
43 |
44 | def __str__(self):
45 | return self.get_formatted_string()
46 |
47 | def __hash__(self):
48 | """
49 | A Sentence is identified by its ID
50 |
51 | :return: Sentence ID
52 | """
53 | return self.stamp.id
54 |
55 | def is_event(self):
56 | return self.get_tense() != NALSyntax.Tense.Eternal
57 |
58 | def get_tense(self):
59 | return self.stamp.get_tense()
60 |
61 | def get_expectation(self):
62 | if self.is_event():
63 | time_projected_truth_value = self.get_present_value()
64 | expectation = NALInferenceRules.TruthValueFunctions.Expectation(time_projected_truth_value.frequency,
65 | time_projected_truth_value.confidence)
66 | return expectation
67 | else:
68 | return self.eternal_expectation
69 |
70 | def get_eternal_expectation(self):
71 | return self.eternal_expectation
72 |
73 | def is_positive(self):
74 | """
75 | :returns: Is this statement True? (does it have more positive evidence than negative evidence?)
76 | """
77 | assert not isinstance(self,Question),"ERROR: Question cannot be positive."
78 |
79 | is_positive = self.get_expectation() >= Config.POSITIVE_THRESHOLD
80 |
81 | if Config.DEBUG: Global.Global.debug_print("Is " + str(self.statement) + " positive? " + str(is_positive))
82 |
83 | return is_positive
84 |
85 | def is_negative(self):
86 | """
87 | :returns: Is this statement False? (does it have more negative evidence than positive evidence?)
88 | """
89 | assert not isinstance(self,Question),"ERROR: Question cannot be negative."
90 |
91 | is_negative = self.get_expectation() < Config.NEGATIVE_THRESHOLD
92 |
93 | return is_negative
94 |
95 | def get_present_value(self):
96 | """
97 | If this is an event, project its value to the current time
98 | """
99 | if self.is_event():
100 | decay = Config.PROJECTION_DECAY_EVENT
101 | if isinstance(self,Goal):
102 | decay = Config.PROJECTION_DECAY_DESIRE
103 | present_value = NALInferenceRules.TruthValueFunctions.F_Projection(self.value.frequency,
104 | self.value.confidence,
105 | self.stamp.occurrence_time,
106 | Global.Global.get_current_cycle_number(),
107 | decay=decay)
108 |
109 | return present_value
110 | else:
111 | return self.value
112 |
113 | def get_term_string_no_id(self):
114 | string = self.statement.get_term_string()
115 | string += str(self.punctuation.value)
116 | if self.is_event(): string = string + " " + self.get_tense().value
117 | if self.value is not None:
118 | string = string + " " + self.get_present_value().get_formatted_string() + " " + str(NALSyntax.StatementSyntax.ExpectationMarker.value) + str(self.get_expectation())
119 | return string
120 |
121 | def get_formatted_string(self):
122 | string = self.get_term_string_no_id()
123 | string = Global.Global.MARKER_SENTENCE_ID + str(self.stamp.id) + Global.Global.MARKER_ID_END + string
124 | return string
125 |
126 | def get_gui_info(self):
127 | dict = {}
128 | dict[NARSGUI.NARSGUI.KEY_STRING] = self.get_formatted_string()
129 | dict[NARSGUI.NARSGUI.KEY_TRUTH_VALUE] = str(self.value)
130 | dict[NARSGUI.NARSGUI.KEY_TIME_PROJECTED_TRUTH_VALUE] = None if self.stamp.occurrence_time is None else str(self.get_present_value())
131 | dict[NARSGUI.NARSGUI.KEY_EXPECTATION] = str(self.get_expectation())
132 | dict[NARSGUI.NARSGUI.KEY_IS_POSITIVE] = "True" if self.is_positive() else "False"
133 | if isinstance(self, Goal):
134 | dict[NARSGUI.NARSGUI.KEY_PASSES_DECISION] = "True" if NALInferenceRules.Local.Decision(self) else "False"
135 | else:
136 | dict[NARSGUI.NARSGUI.KEY_PASSES_DECISION] = None
137 | dict[NARSGUI.NARSGUI.KEY_STRING_NOID] = self.get_term_string_no_id()
138 | dict[NARSGUI.NARSGUI.KEY_ID] = str(self.stamp.id)
139 | dict[NARSGUI.NARSGUI.KEY_OCCURRENCE_TIME] = self.stamp.occurrence_time
140 | dict[NARSGUI.NARSGUI.KEY_SENTENCE_TYPE] = type(self).__name__
141 | evidential_base_iterator = iter(self.stamp.evidential_base)
142 | next(
143 | evidential_base_iterator) # skip the first element, which is just the sentence's ID so it' already displayed
144 | dict[NARSGUI.NARSGUI.KEY_LIST_EVIDENTIAL_BASE] = [str(evidence) for evidence in evidential_base_iterator]
145 | dict[NARSGUI.NARSGUI.KEY_LIST_INTERACTED_SENTENCES] = [] #todo remove
146 |
147 | is_array = isinstance(self.statement, NALGrammar.Terms.SpatialTerm)
148 |
149 | dict[NARSGUI.NARSGUI.KEY_IS_ARRAY] = is_array
150 | dict[NARSGUI.NARSGUI.KEY_ARRAY_IMAGE] = self.statement if is_array and not isinstance(self,Question) else None
151 | dict[NARSGUI.NARSGUI.KEY_ARRAY_ELEMENT_STRINGS] = self.statement.subterms if is_array and not isinstance(self,Question) else None
152 | # END TODO
153 |
154 | dict[NARSGUI.NARSGUI.KEY_DERIVED_BY] = self.stamp.derived_by
155 | dict[NARSGUI.NARSGUI.KEY_PARENT_PREMISES] = str(self.stamp.parent_premises)
156 | return dict
157 |
158 |
159 | class Judgment(Sentence):
160 | """
161 | judgment ::= . %%
162 | """
163 |
164 | def __init__(self, statement, value,occurrence_time=None):
165 | Asserts.assert_valid_statement(statement)
166 | Sentence.__init__(self,
167 | statement,
168 | value,
169 | NALSyntax.Punctuation.Judgment,
170 | occurrence_time=occurrence_time)
171 |
172 |
173 | class Question(Sentence):
174 | """
175 | question ::= ? %%
176 | """
177 |
178 | def __init__(self, statement):
179 | Asserts.assert_valid_statement(statement)
180 | Sentence.__init__(self,
181 | statement,
182 | None,
183 | NALSyntax.Punctuation.Question)
184 |
185 |
186 | class Goal(Sentence):
187 | """
188 | goal ::= ! %%
189 | """
190 |
191 | def __init__(self, statement, value, occurrence_time=None):
192 | self.executed = False
193 | Asserts.assert_valid_statement(statement)
194 | Sentence.__init__(self,
195 | statement,
196 | value,
197 | NALSyntax.Punctuation.Goal,
198 | occurrence_time=occurrence_time)
199 |
200 | def get_desirability(self):
201 | return self.get_expectation()
202 |
203 |
204 | class Stamp:
205 | """
206 | Defines the metadata of a sentence, including
207 | when it was created, its occurrence time (when is its truth value valid),
208 | evidential base, etc.
209 | """
210 | def __init__(self, self_sentence, occurrence_time=None):
211 | self.id = Global.Global.NARS.memory.get_next_stamp_id()
212 | self.creation_time = Global.Global.get_current_cycle_number() # when was this stamp created (in inference cycles)?
213 | self.occurrence_time = occurrence_time
214 | self.sentence = self_sentence
215 | self.evidential_base = EvidentialBase(self_sentence=self_sentence)
216 | self.derived_by = None # none if input task
217 | self.parent_premises = []
218 | self.from_one_premise_inference = False # is this sentence derived from one-premise inference?
219 |
220 | def get_tense(self):
221 | if self.occurrence_time is None:
222 | return NALSyntax.Tense.Eternal
223 |
224 | current_cycle = Global.Global.get_current_cycle_number()
225 | if self.occurrence_time < current_cycle:
226 | return NALSyntax.Tense.Past
227 | elif self.occurrence_time == current_cycle:
228 | return NALSyntax.Tense.Present
229 | elif self.occurrence_time > current_cycle:
230 | return NALSyntax.Tense.Future
231 |
232 |
233 |
234 | class EvidentialBase:
235 | """
236 | Stores history of how the sentence was derived
237 | """
238 | def __init__(self,self_sentence):
239 | """
240 | :param id: Sentence ID
241 | """
242 | self.sentence = self_sentence
243 | self.base = [self_sentence] # array of sentences
244 |
245 | def __iter__(self):
246 | return iter(self.base)
247 |
248 | def __contains__(self, object):
249 | return object in self.base
250 |
251 | def merge_sentence_evidential_base_into_self(self, sentence):
252 | """
253 | Merge a Sentence's evidential base into self.
254 | This function assumes the base to merge does not have evidential overlap with this base
255 | #todo figure out good way to store evidential bases such that older evidence is purged on overflow
256 | """
257 | for e_sentence in sentence.stamp.evidential_base:
258 | self.base.append(e_sentence)
259 |
260 | while len(self.base) > Config.MAX_EVIDENTIAL_BASE_LENGTH:
261 | self.base.pop(0)
262 |
263 | def has_evidential_overlap(self, other_base):
264 | """
265 | Check does other base has overlapping evidence with self?
266 | O(M + N)
267 | https://stackoverflow.com/questions/3170055/test-if-lists-share-any-items-in-python
268 | """
269 | if self.sentence.is_event(): return False
270 | return not set(self.base).isdisjoint(other_base.base)
271 |
272 |
273 |
274 | def may_interact(j1,j2):
275 | """
276 | 2 Sentences may interact if:
277 | # Neither is "None"
278 | # They are not the same Sentence
279 | # They have not previously interacted
280 | # One is not in the other's evidential base
281 | # They do not have overlapping evidential base
282 | :param j1:
283 | :param j2:
284 | :return: Are the sentence allowed to interact for inference
285 | """
286 | if j1 is None or j2 is None:
287 | return False
288 | if j1.stamp.id == j2.stamp.id:
289 | return False
290 | if j1 in j2.stamp.evidential_base:
291 | return False
292 | if j2 in j1.stamp.evidential_base:
293 | return False
294 | if j1.stamp.evidential_base.has_evidential_overlap(j2.stamp.evidential_base):
295 | return False
296 | return True
297 |
298 |
299 |
300 | def new_sentence_from_string(sentence_string: str):
301 | """
302 | :param sentence_string - String of NAL syntax punctuation %frequency;confidence%
303 |
304 | :returns Sentence parsed from sentence_string
305 | """
306 | # Find statement start and statement end
307 | start_idx = sentence_string.find(NALSyntax.StatementSyntax.Start.value)
308 | assert (start_idx != -1), "Statement start character " + NALSyntax.StatementSyntax.Start.value + " not found."
309 |
310 | end_idx = sentence_string.rfind(NALSyntax.StatementSyntax.End.value)
311 | assert (end_idx != -1), "Statement end character " + NALSyntax.StatementSyntax.End.value + " not found."
312 |
313 | # Find sentence punctuation
314 | punctuation_idx = end_idx + 1
315 | assert (punctuation_idx < len(sentence_string)), "No punctuation found."
316 | punctuation_str = sentence_string[punctuation_idx]
317 | punctuation = NALSyntax.Punctuation.get_punctuation_from_string(punctuation_str)
318 | assert (punctuation is not None), punctuation_str + " is not punctuation."
319 |
320 | # Find Truth Value, if it exists
321 | start_truth_val_idx = sentence_string.find(NALSyntax.StatementSyntax.TruthValMarker.value, punctuation_idx)
322 | middle_truth_val_idx = sentence_string.find(NALSyntax.StatementSyntax.ValueSeparator.value, punctuation_idx)
323 | end_truth_val_idx = sentence_string.rfind(NALSyntax.StatementSyntax.TruthValMarker.value, punctuation_idx)
324 |
325 | truth_value_found = not (start_truth_val_idx == -1 or end_truth_val_idx == -1 or start_truth_val_idx == end_truth_val_idx)
326 | has_full_truth_value = middle_truth_val_idx != -1
327 | freq = None
328 | conf = None
329 | if has_full_truth_value:
330 | # Parse truth value from string with specified frequency and confidence
331 | freq = float(sentence_string[start_truth_val_idx + 1:middle_truth_val_idx])
332 | conf = float(sentence_string[middle_truth_val_idx + 1:end_truth_val_idx])
333 | elif truth_value_found:
334 | # Parse single truth value (f) from string, with reference to OpenNARS
335 | freq = float(sentence_string[start_truth_val_idx + 1:end_truth_val_idx])
336 |
337 | # create the statement
338 | statement_string = sentence_string[start_idx:end_idx + 1]
339 | statement = NALGrammar.Terms.simplify(NALGrammar.Terms.from_string(statement_string))
340 |
341 |
342 | # Find Tense, if it exists
343 | # otherwise mark it as eternal
344 | tense = NALSyntax.Tense.Eternal
345 | for t in NALSyntax.Tense:
346 | if t != NALSyntax.Tense.Eternal:
347 | tense_idx = sentence_string.find(t.value)
348 | if tense_idx != -1: # found a tense
349 | tense = NALSyntax.Tense.get_tense_from_string(sentence_string[tense_idx: tense_idx + len(t.value)])
350 | break
351 |
352 |
353 | # make sentence
354 | if punctuation == NALSyntax.Punctuation.Judgment:
355 | sentence = Judgment(statement, TruthValue(freq, conf))
356 | elif punctuation == NALSyntax.Punctuation.Question:
357 | sentence = Question(statement)
358 | elif punctuation == NALSyntax.Punctuation.Goal:
359 | sentence = Goal(statement, DesireValue(freq, conf))
360 | else:
361 | assert False,"Error: No Punctuation!"
362 |
363 |
364 |
365 | if tense == NALSyntax.Tense.Present:
366 | # Mark present tense event as happening right now!
367 | sentence.stamp.occurrence_time = Global.Global.get_current_cycle_number()
368 |
369 | return sentence
--------------------------------------------------------------------------------
/NALGrammar/Values.py:
--------------------------------------------------------------------------------
1 | """
2 | Author: Christian Hahm
3 | Created: October 9, 2020
4 | Purpose: Enforces Narsese grammar that is used throughout the project
5 | """
6 | import Config
7 | import NALSyntax
8 | import NALInferenceRules
9 |
10 | class EvidentialValue:
11 | """
12 |
13 | """
14 |
15 | def __init__(self, frequency, confidence):
16 | if confidence >= 1.0: confidence = 0.9999
17 | if confidence <= 0.0: confidence = 0.0001
18 | assert (frequency >= 0.0 and frequency <= 1.0), "ERROR: Frequency " + str(frequency) + " must be in [0,1]"
19 | assert (confidence >= 0.0 and confidence < 1.0), "ERROR: Confidence must be in (0,1)"
20 | self.frequency = float(frequency)
21 | self.confidence = float(confidence)
22 |
23 | def get_formatted_string(self):
24 | assert False, "Formatted string not defined for Evidential Value base class"
25 |
26 | def __str__(self):
27 | return self.get_formatted_string()
28 |
29 |
30 | class DesireValue(EvidentialValue):
31 | """
32 |
33 | For a virtual judgement S |=> D,
34 | how much the associated statement S implies the overall desired state of NARS, D
35 | """
36 |
37 | def __init__(self, frequency=Config.DEFAULT_GOAL_FREQUENCY, confidence=None):
38 | if frequency is None: frequency = Config.DEFAULT_GOAL_FREQUENCY
39 | if confidence is None: confidence = NALInferenceRules.HelperFunctions.get_unit_evidence()
40 | if confidence > 0.99: confidence = 0.99999
41 | super().__init__(frequency=frequency, confidence=confidence)
42 | self.formatted_string = str(NALSyntax.StatementSyntax.TruthValMarker.value) \
43 | + "{:.2f}".format(self.frequency) \
44 | + str(NALSyntax.StatementSyntax.ValueSeparator.value) \
45 | + "{:.2f}".format(self.confidence) \
46 | + str(NALSyntax.StatementSyntax.TruthValMarker.value)
47 |
48 | def get_formatted_string(self):
49 | return self.formatted_string
50 |
51 |
52 | class TruthValue(EvidentialValue):
53 | """
54 |
55 | Describing the evidential basis for the associated statement to be true
56 | """
57 |
58 | def __init__(self, frequency=Config.DEFAULT_JUDGMENT_FREQUENCY, confidence=None):
59 | if frequency is None: frequency = Config.DEFAULT_JUDGMENT_FREQUENCY
60 | if confidence is None: confidence = NALInferenceRules.HelperFunctions.get_unit_evidence()
61 | super().__init__(frequency=frequency, confidence=confidence)
62 | self.formatted_string = str(NALSyntax.StatementSyntax.TruthValMarker.value) \
63 | + '{0:.2f}'.format(self.frequency) \
64 | + str(NALSyntax.StatementSyntax.ValueSeparator.value) \
65 | + '{0:.10f}'.format(self.confidence) \
66 | + str(NALSyntax.StatementSyntax.TruthValMarker.value) \
67 |
68 | def Clone(self):
69 | return TruthValue(self.frequency,
70 | self.confidence)
71 |
72 | def get_formatted_string(self):
73 | return self.formatted_string
--------------------------------------------------------------------------------
/NALInferenceRules/Composition.py:
--------------------------------------------------------------------------------
1 | """
2 | ==== ==== ==== ==== ==== ====
3 | ==== NAL Inference Rules - Composition Inference Rules ====
4 | ==== ==== ==== ==== ==== ====
5 |
6 | Author: Christian Hahm
7 | Created: October 8, 2020
8 | Purpose: Defines the NAL inference rules
9 | Assumes the given sentences do not have evidential overlap.
10 | Does combine evidential bases in the Resultant Sentence.
11 | """
12 | import Asserts
13 | import NALGrammar
14 | import NALSyntax
15 | from NALInferenceRules import TruthValueFunctions, HelperFunctions
16 |
17 |
18 | def DisjunctionOrIntensionalIntersection(j1, j2):
19 | """
20 | First Order: Intensional Intersection (Strong Inference)
21 | Higher Order: Disjunction
22 |
23 | Assumes: j1 and j2 do not have evidential overlap
24 | -----------------
25 |
26 | Input:
27 | j1: Sentence (T1 --> M ) (Sentence (T1 ==> M ))
28 | and
29 | j2: Sentence (T2 --> M ) (Sentence (T2 ==> M ))
30 |
31 | OR
32 |
33 | j1: Sentence (M --> T1 ) (Sentence (M ==> T1 ))
34 | and
35 | j2: Sentence (M --> T2 ) (Sentence (M ==> T2 ))
36 | Evidence:
37 | F_int
38 |
39 | OR
40 |
41 | F_uni
42 | Returns:
43 | :- Sentence ((T1 | T2) --> M) (Sentence ((T1 || T2) --> M))
44 | OR
45 | :- Sentence (M --> (T1 | T2)) (Sentence (M --> (T1 || T2)))
46 | """
47 | Asserts.assert_sentence_asymmetric(j1)
48 | Asserts.assert_sentence_asymmetric(j2)
49 |
50 | # Statement
51 | if j1.statement.is_first_order() and j2.statement.is_first_order():
52 | connector = NALSyntax.TermConnector.IntensionalIntersection
53 | copula = NALSyntax.Copula.Inheritance
54 | else:
55 | # higher-order, could be temporal
56 | # todo temporal disjunction
57 | connector = NALSyntax.TermConnector.Disjunction
58 | copula = NALSyntax.Copula.Implication
59 |
60 | # Statement
61 | result_truth_function = None
62 | if j1.statement.get_predicate_term() == j2.statement.get_predicate_term():
63 | # j1: Sentence(T1 --> M < f1, c1 >)
64 | # j2: Sentence(T2 --> M < f2, c2 >)
65 | if isinstance(j1.statement.get_subject_term(), NALGrammar.Terms.CompoundTerm) \
66 | or isinstance(j2.statement.get_subject_term(), NALGrammar.Terms.CompoundTerm):
67 | # don't compound terms which are already compound
68 | # this reduces complexity.
69 | # todo: better simplifying of syntactically complex results
70 | return None
71 |
72 | compound_term = NALGrammar.Terms.CompoundTerm([j1.statement.get_subject_term(),
73 | j2.statement.get_subject_term()],
74 | term_connector=connector) # (T1 | T2)
75 | result_statement = NALGrammar.Terms.StatementTerm(compound_term,
76 | j1.statement.get_predicate_term(),
77 | copula) # ((T1 | T2) --> M)
78 |
79 | if not isinstance(j1, NALGrammar.Sentences.Question):
80 | result_truth_function = TruthValueFunctions.F_Intersection
81 |
82 | elif j1.statement.get_subject_term() == j2.statement.get_subject_term():
83 | # j1: Sentence(M --> T1 < f1, c1 >)
84 | # j2: Sentence(M --> T2 < f2, c2 >)
85 | if isinstance(j1.statement.get_predicate_term(), NALGrammar.Terms.CompoundTerm) \
86 | or isinstance(j2.statement.get_predicate_term(), NALGrammar.Terms.CompoundTerm):
87 | # don't compound terms which are already compound
88 | # this reduces complexity.
89 | # todo: better simplifying of syntactically complex results
90 | return None
91 |
92 | compound_term = NALGrammar.Terms.CompoundTerm([j1.statement.get_predicate_term(),
93 | j2.statement.get_predicate_term()],
94 | term_connector=connector) # (T1 | T2)
95 |
96 | result_statement = NALGrammar.Terms.StatementTerm(j1.statement.get_subject_term(),
97 | compound_term,
98 | copula) # (M --> (T1 | T2))
99 |
100 | if not isinstance(j1, NALGrammar.Sentences.Question):
101 | result_truth_function = TruthValueFunctions.F_Union
102 | else:
103 | assert False,"ERROR: Invalid inputs to Intensional Intersection"
104 |
105 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
106 | j2,
107 | result_statement,
108 | result_truth_function)
109 |
110 |
111 | def ConjunctionOrExtensionalIntersection(j1, j2):
112 | """
113 | First-Order: Extensional Intersection (Strong Inference)
114 | Higher-Order: Conjunction
115 |
116 | Assumes: j1 and j2 do not have evidential overlap
117 | -----------------
118 |
119 | Input:
120 | j1: Sentence (T1 --> M ) (Sentence (T1 ==> M ))
121 | and
122 | j2: Sentence (T2 --> M ) (Sentence (T2 ==> M ))
123 |
124 | OR
125 |
126 | j1: Sentence (M --> T1 ) (Sentence (M ==> T1 ))
127 | and
128 | j2: Sentence (M --> T2 ) (Sentence (M ==> T2 ))
129 | Evidence:
130 | F_uni
131 |
132 | OR
133 |
134 | F_int
135 | Returns:
136 | Sentence ((T1 & T2) --> M) or Sentence ((T1 && T2) ==> M)
137 |
138 | or
139 |
140 | Sentence (M --> (T1 & T2)) or Sentence (M ==> (T1 && T2))
141 | """
142 | Asserts.assert_sentence_asymmetric(j1)
143 | Asserts.assert_sentence_asymmetric(j2)
144 |
145 | # Statement
146 | connector = None
147 | copula = None
148 | if j1.statement.is_first_order() and j2.statement.is_first_order():
149 | connector = NALSyntax.TermConnector.ExtensionalIntersection # &
150 | copula = NALSyntax.Copula.Inheritance
151 | else:
152 | # higher-order, could be temporal
153 | connector = NALSyntax.TermConnector.Conjunction # &&
154 | copula = NALSyntax.Copula.Implication
155 |
156 | result_truth_function = None
157 | if j1.statement.get_predicate_term() == j2.statement.get_predicate_term():
158 | # j1: Sentence(T1 --> M < f1, c1 >)
159 | # j2: Sentence(T2 --> M < f2, c2 >)
160 | if isinstance(j1.statement.get_subject_term(), NALGrammar.Terms.CompoundTerm) \
161 | or isinstance(j2.statement.get_subject_term(), NALGrammar.Terms.CompoundTerm):
162 | # don't compound terms which are already compound
163 | # this reduces complexity.
164 | # todo: better simplifying of syntactically complex results
165 | return None
166 |
167 | compound_term = NALGrammar.Terms.CompoundTerm([j1.statement.get_subject_term(),
168 | j2.statement.get_subject_term()],
169 | term_connector=connector) # (T1 & T2)
170 | result_statement = NALGrammar.Terms.StatementTerm(compound_term,
171 | j1.statement.get_predicate_term(),
172 | copula) # ((T1 & T2) --> M)
173 |
174 | if not isinstance(j1, NALGrammar.Sentences.Question):
175 | result_truth_function = TruthValueFunctions.F_Union
176 |
177 | elif j1.statement.get_subject_term() == j2.statement.get_subject_term():
178 | # j1: Sentence(M --> T1 < f1, c1 >)
179 | # j2: Sentence(M --> T2 < f2, c2 >)
180 | if isinstance(j1.statement.get_predicate_term(), NALGrammar.Terms.CompoundTerm) \
181 | or isinstance(j2.statement.get_predicate_term(), NALGrammar.Terms.CompoundTerm):
182 | # don't compound terms which are already compound
183 | # this reduces complexity.
184 | # todo: better simplifying of syntactically complex results
185 | return None
186 | compound_term = NALGrammar.Terms.CompoundTerm([j1.statement.get_predicate_term(),
187 | j2.statement.get_predicate_term()],
188 | term_connector=connector) # (T1 & T2)
189 | result_statement = NALGrammar.Terms.StatementTerm(j1.statement.get_subject_term(),
190 | compound_term,
191 | copula) # (M --> (T1 & T2))
192 |
193 | if not isinstance(j1, NALGrammar.Sentences.Question):
194 | result_truth_function = TruthValueFunctions.F_Intersection
195 | else:
196 | assert False, "ERROR: Invalid inputs to Extensional Intersection"
197 |
198 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
199 | j2,
200 | result_statement,
201 | result_truth_function)
202 |
203 |
204 | def IntensionalDifference(j1, j2):
205 | """
206 | Intensional Difference (Strong Inference)
207 |
208 | Assumes: j1 and j2 do not have evidential overlap
209 | -----------------
210 |
211 | Input:
212 | j1: Sentence (T1 --> M )
213 | and
214 | j2: Sentence (T2 --> M )
215 | Evidence:
216 | F_difference
217 | Returns:
218 | :- Sentence ((T1 ~ T2) --> M)
219 | """
220 | Asserts.assert_sentence_asymmetric(j1)
221 | Asserts.assert_sentence_asymmetric(j2)
222 | assert j1.statement.get_predicate_term() == j2.statement.get_predicate_term()
223 |
224 | if isinstance(j1.statement.get_subject_term(), NALGrammar.Terms.CompoundTerm) \
225 | or isinstance(j2.statement.get_subject_term(), NALGrammar.Terms.CompoundTerm):
226 | # don't compound terms which are already compound
227 | # this reduces complexity.
228 | # todo: better simplifying of syntactically complex results
229 | return None
230 |
231 | compound_term = NALGrammar.Terms.CompoundTerm([j1.statement.get_subject_term(),
232 | j2.statement.get_subject_term()],
233 | NALSyntax.TermConnector.IntensionalDifference) # (T1 ~ T2)
234 | result_statement = NALGrammar.Terms.StatementTerm(compound_term,
235 | j1.statement.get_predicate_term(),
236 | NALSyntax.Copula.Inheritance) # ((T1 ~ T2) --> M)
237 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
238 | j2,
239 | result_statement,
240 | TruthValueFunctions.F_Difference)
241 |
242 |
243 | def ExtensionalDifference(j1, j2):
244 | """
245 | Extensional Difference (Strong Inference)
246 |
247 | Assumes: j1 and j2 do not have evidential overlap
248 | -----------------
249 | Input:
250 | j1: Sentence (M --> T1 )
251 | and
252 | j2: Sentence (M --> T2 )
253 | Evidence:
254 | F_difference
255 | Returns:
256 | :- Sentence (M --> (T1 - T2))
257 | """
258 | Asserts.assert_sentence_asymmetric(j1)
259 | Asserts.assert_sentence_asymmetric(j2)
260 | assert j1.statement.get_subject_term() == j2.statement.get_subject_term()
261 |
262 | if isinstance(j1.statement.get_predicate_term(), NALGrammar.Terms.CompoundTerm) \
263 | or isinstance(j2.statement.get_predicate_term(), NALGrammar.Terms.CompoundTerm):
264 | # don't compound terms which are already compound
265 | # this reduces complexity.
266 | # todo: better simplifying of syntactically complex results
267 | return None
268 |
269 | compound_term = NALGrammar.Terms.CompoundTerm([j1.statement.get_predicate_term(),
270 | j2.statement.get_predicate_term()],
271 | NALSyntax.TermConnector.ExtensionalDifference)
272 | result_statement = NALGrammar.Terms.StatementTerm(j1.statement.get_subject_term(),
273 | compound_term,
274 | NALSyntax.Copula.Inheritance) # (M --> (T1 - T2))
275 |
276 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
277 | j2,
278 | result_statement,
279 | TruthValueFunctions.F_Difference)
--------------------------------------------------------------------------------
/NALInferenceRules/Conditional.py:
--------------------------------------------------------------------------------
1 | """
2 | ==== ==== ==== ==== ==== ====
3 | ==== NAL Inference Rules - Conditional Syllogistic Inference Rules ====
4 | ==== ==== ==== ==== ==== ====
5 |
6 | Author: Christian Hahm
7 | Created: October 8, 2020
8 | Purpose: Defines the NAL inference rules
9 | Assumes the given sentences do not have evidential overlap.
10 | Does combine evidential bases in the Resultant Sentence.
11 | """
12 | import numpy as np
13 |
14 | import Asserts
15 | import NALGrammar
16 | import NALSyntax
17 | from NALInferenceRules import TruthValueFunctions, HelperFunctions
18 |
19 |
20 | def ConditionalAnalogy(j1, j2):
21 | """
22 | Conditional Analogy
23 |
24 | Input:
25 | j1: Statement (S) {tense}
26 |
27 | j2: Equivalence Statement (S <=> P)
28 | Evidence:
29 | F_analogy
30 | Returns:
31 | :- Sentence (P )
32 | """
33 | Asserts.assert_sentence_equivalence(j2)
34 | Asserts.assert_sentence(j2)
35 |
36 | # Statement
37 | if j1.statement == j2.statement.get_subject_term():
38 | result_statement: NALGrammar.Terms.StatementTerm = j2.statement.get_predicate_term()
39 | elif j1.statement == j2.statement.get_predicate_term():
40 | result_statement: NALGrammar.Terms.StatementTerm = j2.statement.get_subject_term()
41 | else:
42 | assert False, "Error: Invalid inputs to Conditional Analogy: " + j1.get_formatted_string() + " and " + j2.get_formatted_string()
43 |
44 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
45 | j2,
46 | result_statement,
47 | TruthValueFunctions.F_Analogy)
48 |
49 |
50 | def ConditionalJudgmentDeduction(j1, j2):
51 | """
52 | Conditional Judgment Deduction
53 |
54 | Input:
55 | j1: Implication Statement (S ==> P)
56 |
57 | j2: Statement (S) {tense} (E ==> S)
58 | Evidence:
59 | F_deduction
60 | Returns:
61 | :- P. :|: (E ==> P)
62 | """
63 | Asserts.assert_sentence_forward_implication(j1)
64 | assert j2.statement == j1.statement.get_subject_term(), "Error: Invalid inputs to Conditional Judgment Deduction: " \
65 | + j1.get_formatted_string() \
66 | + " and " \
67 | + j2.get_formatted_string()
68 | result_statement: NALGrammar.Terms.StatementTerm = j1.statement.get_predicate_term() # P
69 |
70 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
71 | j2,
72 | result_statement,
73 | TruthValueFunctions.F_Deduction)
74 |
75 |
76 | def ConditionalJudgmentAbduction(j1, j2):
77 | """
78 | Conditional Judgment Abduction
79 |
80 | Input:
81 | j1: Implication Statement (S ==> P)
82 |
83 | j2: Judgment Event (P) {tense}, i.e. (E ==> P)
84 | Evidence:
85 | F_abduction
86 | Returns:
87 | :- S. :|: (E ==> S)
88 | """
89 | Asserts.assert_sentence_forward_implication(j1)
90 | assert j2.statement == j1.statement.get_predicate_term(), "Error: Invalid inputs to Conditional Judgment Abduction: " \
91 | + j1.get_formatted_string() \
92 | + " and " \
93 | + j2.get_formatted_string()
94 |
95 | result_statement: NALGrammar.Terms.StatementTerm = j1.statement.get_subject_term() # S
96 |
97 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
98 | j2,
99 | result_statement,
100 | TruthValueFunctions.F_Abduction)
101 |
102 |
103 | def ConditionalGoalDeduction(j1, j2):
104 | """
105 | Conditional Goal Deduction
106 |
107 | Input:
108 | j1: Goal Event (P) {tense}, i.e. (P ==> D)
109 |
110 | j2: Implication Statement (S ==> P)
111 | Evidence:
112 | F_deduction
113 | Returns:
114 | :- S! (S ==> D)
115 | """
116 | Asserts.assert_sentence_forward_implication(j2)
117 | assert j1.statement == j2.statement.get_predicate_term(), "Error: Invalid inputs to Conditional Goal Deduction: " \
118 | + j1.get_formatted_string() \
119 | + " and " \
120 | + j2.get_formatted_string()
121 |
122 | result_statement: NALGrammar.Terms.StatementTerm = j2.statement.get_subject_term() # S
123 |
124 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
125 | j2,
126 | result_statement,
127 | TruthValueFunctions.F_Deduction)
128 |
129 |
130 | def ConditionalGoalInduction(j1, j2):
131 | """
132 | Conditional Goal Induction
133 |
134 | Input:
135 | j1: Goal Event (S!) {tense}, i.e. (S ==> D)
136 |
137 | j2: Implication Statement (S ==> P)
138 | Evidence:
139 | F_induction
140 | Returns:
141 | :- P! (P ==> D)
142 | """
143 | Asserts.assert_sentence_forward_implication(j2)
144 | assert j1.statement == j2.statement.get_subject_term(), "Error: Invalid inputs to Conditional Goal Induction: " \
145 | + j1.get_formatted_string() \
146 | + " and " \
147 | + j2.get_formatted_string()
148 |
149 | result_statement: NALGrammar.Terms.StatementTerm = j2.statement.get_predicate_term() # S
150 |
151 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
152 | j2,
153 | result_statement,
154 | TruthValueFunctions.F_Induction)
155 |
156 |
157 | def SimplifyConjunctiveGoal(j1, j2):
158 | """
159 | Conditional Goal Deduction
160 |
161 | Input:
162 | j1: Goal Event (C &/ S)! , i.e. ((C && S) ==> D)
163 |
164 | j2: Belief (C) {tense}
165 | Evidence:
166 | F_abduction
167 | Returns:
168 | :- S! (S ==> D)
169 | """
170 | remaining_subterms = j1.statement.subterms.copy()
171 | found_idx = np.where(remaining_subterms == j2.statement)
172 |
173 | assert found_idx != -1, "Error: Invalid inputs to Simplify conjuctive goal (deduction): " \
174 | + j1.get_formatted_string() \
175 | + " and " \
176 | + j2.get_formatted_string()
177 |
178 | remaining_subterms = np.delete(remaining_subterms, found_idx)
179 |
180 | if len(remaining_subterms) == 1:
181 | result_statement = remaining_subterms[0]
182 | else:
183 | new_intervals = []
184 | if len(j1.statement.intervals) > 0:
185 | new_intervals = j1.statement.intervals.copy().pop(found_idx)
186 | result_statement = NALGrammar.Terms.CompoundTerm(subterms=remaining_subterms,
187 | term_connector=j1.statement.connector,
188 | intervals=new_intervals)
189 |
190 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
191 | j2,
192 | result_statement,
193 | TruthValueFunctions.F_Deduction)
194 |
195 |
196 | def SimplifyNegatedConjunctiveGoal(j1, j2):
197 | """
198 | Conditional Goal Deduction
199 |
200 | Input:
201 | j1: Goal Event (--,(A &/ B))! , i.e. ((A &/ B) ==> D)
202 |
203 | j2: Belief (A) {tense}
204 | Evidence:
205 | F_abduction
206 | Returns:
207 | :- B! (B ==> D)
208 | """
209 | remaining_subterms = j1.statement.subterms[0].subterms.copy()
210 | found_idx = np.where(remaining_subterms == j2.statement)
211 |
212 | assert found_idx != -1, "Error: Invalid inputs to Simplify negated conjuctive goal (induction): " \
213 | + j1.get_formatted_string() \
214 | + " and " \
215 | + j2.get_formatted_string()
216 |
217 | remaining_subterms.pop(found_idx)
218 |
219 | if len(remaining_subterms) == 1:
220 | result_statement = NALGrammar.Terms.CompoundTerm(subterms=remaining_subterms,
221 | term_connector=j1.statement.connector)
222 |
223 | else:
224 | new_intervals = []
225 | if len(j1.statement.intervals) > 0:
226 | new_intervals = j1.statement.intervals.copy().pop(found_idx)
227 | result_statement = NALGrammar.Terms.CompoundTerm(subterms=remaining_subterms,
228 | term_connector=j1.statement.connector,
229 | intervals=new_intervals)
230 |
231 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
232 | j2,
233 | result_statement,
234 | TruthValueFunctions.F_Induction)
235 |
236 |
237 | """
238 | Conditional Conjunctional Rules
239 | --------------------------------
240 | Conditional Rules w/ Conjunctions
241 | """
242 |
243 |
244 | def ConditionalConjunctionalDeduction(j1, j2):
245 | """
246 | Conditional Conjunctional Deduction
247 |
248 | Input:
249 | j1: Conjunctive Implication Judgment ((C1 && C2 && ... CN && S) ==> P)
250 | or
251 | Conjunctive Implication Judgment ((C1 &/ C2 &/ ... CN) ==> P
252 |
253 | j2: Statement (S) {tense}
254 | Evidence:
255 | F_deduction
256 | Returns:
257 | :- ((C1 && C2 && ... CN) ==> P)
258 | """
259 | if isinstance(j1, NALGrammar.Sentences.Judgment):
260 | Asserts.assert_sentence_forward_implication(j1)
261 | subject_term: NALGrammar.Terms.CompoundTerm = j1.statement.get_subject_term()
262 | elif isinstance(j1, NALGrammar.Sentences.Goal):
263 | subject_term: NALGrammar.Terms.CompoundTerm = j1.statement
264 | else:
265 | assert False, "ERROR"
266 |
267 | new_subterms = list(set(subject_term.subterms) - {j2.statement}) # subtract j2 from j1 subject subterms
268 |
269 | if len(new_subterms) > 1:
270 | # recreate the conjunctional compound with the new subterms
271 | new_compound_subject_term = NALGrammar.Terms.CompoundTerm(new_subterms, subject_term.connector)
272 | elif len(new_subterms) == 1:
273 | # only 1 subterm, no need to make it a compound
274 | new_compound_subject_term = new_subterms[0]
275 | else:
276 | # 0 new subterms
277 | if len(subject_term.subterms) > 1:
278 | new_subterms = subject_term.subterms.copy()
279 | new_subterms.pop()
280 | new_compound_subject_term = NALGrammar.Terms.CompoundTerm(new_subterms, subject_term.connector)
281 | else:
282 | assert False, "ERROR: Invalid inputs to Conditional Conjunctional Deduction " + j1.get_formatted_string() + " and " + j2.get_formatted_string()
283 |
284 | if isinstance(j1, NALGrammar.Sentences.Judgment):
285 | result_statement = NALGrammar.Terms.StatementTerm(new_compound_subject_term, j1.statement.get_predicate_term(),
286 | j1.statement.get_copula())
287 | elif isinstance(j1, NALGrammar.Sentences.Goal):
288 | result_statement = new_compound_subject_term
289 | else:
290 | assert False, "ERROR"
291 |
292 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
293 | j2,
294 | result_statement,
295 | TruthValueFunctions.F_Deduction)
296 |
297 |
298 | def ConditionalConjunctionalAbduction(j1, j2):
299 | """
300 | Conditional Conjunctional Abduction
301 |
302 | Input:
303 | j1: Implication Statement ((C1 && C2 && ... CN && S) ==> P)
304 |
305 | j2: Implication Statement ((C1 && C2 && ... CN) ==> P) {tense}
306 | Evidence:
307 | F_abduction
308 | Returns:
309 | :- S
310 |
311 | #todo temporal
312 | """
313 |
314 | Asserts.assert_sentence_forward_implication(j1)
315 | Asserts.assert_sentence_forward_implication(j2)
316 |
317 | j1_subject_term = j1.statement.get_subject_term()
318 | j2_subject_term = j2.statement.get_subject_term()
319 |
320 | j1_subject_statement_terms = j1_subject_term.subterms if NALSyntax.TermConnector.is_conjunction(
321 | j1_subject_term.connector) else [j1_subject_term]
322 |
323 | j2_subject_statement_terms = j2_subject_term.subterms if NALSyntax.TermConnector.is_conjunction(
324 | j2_subject_term.connector) else [j2_subject_term]
325 |
326 | set_difference_of_terms = list(set(j1_subject_statement_terms) - set(j2_subject_statement_terms))
327 |
328 | assert len(set_difference_of_terms) == 1, "Error, should only have one term in set difference: " + str([term.get_formatted_string() for term in set_difference_of_terms])
329 |
330 | result_statement: NALGrammar.Terms.StatementTerm = set_difference_of_terms[0]
331 |
332 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
333 | j2,
334 | result_statement,
335 | TruthValueFunctions.F_Abduction)
336 |
--------------------------------------------------------------------------------
/NALInferenceRules/ExtendedBooleanOperators.py:
--------------------------------------------------------------------------------
1 | """
2 | ==== ==== ==== ==== ==== ====
3 | ==== NAL Inference Rules - Extended Boolean Operators ====
4 | ==== ==== ==== ==== ==== ====
5 |
6 | Author: Christian Hahm
7 | Created: October 8, 2020
8 | Purpose: Defines the NAL inference rules
9 | Assumes the given sentences do not have evidential overlap.
10 | Does combine evidential bases in the Resultant Sentence.
11 | """
12 |
13 | def band(*argv):
14 | """
15 | Boolean AND
16 |
17 | -----------------
18 |
19 | Input:
20 | argv: NAL Boolean Values
21 |
22 | Returns:
23 | argv1*argv2*...*argvn
24 | """
25 | res = 1
26 | for arg in argv:
27 | res = res * arg
28 | return res
29 |
30 | def band_average(*argv):
31 | """
32 | Boolean AND, with an exponent inversely proportional to the number of terms being ANDed.
33 |
34 | -----------------
35 |
36 | Input:
37 | argv: NAL Boolean Values
38 |
39 | Returns:
40 | (argv1*argv2*...*argvn)^(1/n)
41 | """
42 | res = 1
43 | for arg in argv:
44 | res = res * arg
45 | exp = 1 / len(argv)
46 | return res ** exp
47 |
48 |
49 |
50 | def bor(*argv):
51 | """
52 | Boolean OR
53 |
54 | -----------------
55 |
56 | Input:
57 | argv: NAL Boolean Values
58 |
59 | Returns:
60 | 1-((1-argv1)*(1-argv2)*...*(1-argvn))
61 | """
62 | res = 1
63 | for arg in argv:
64 | res = res * (1 - arg)
65 | return 1 - res
66 |
67 |
68 | def bnot(arg):
69 | """
70 | Boolean Not
71 |
72 | -----------------
73 |
74 | Input:
75 | arg: NAL Boolean Value
76 |
77 | Returns:
78 | 1 minus arg
79 | """
80 | return 1 - arg
81 |
--------------------------------------------------------------------------------
/NALInferenceRules/HelperFunctions.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import Config
4 | import Global
5 | import NALGrammar
6 | import NALSyntax
7 | from NALInferenceRules.TruthValueFunctions import TruthFunctionOnArrayAndRevise, TruthFunctionOnArray, F_Deduction, \
8 | F_Revision, F_Abduction
9 |
10 | import NALInferenceRules.Local
11 |
12 |
13 | def get_truthvalue_from_evidence(wp, w):
14 | """
15 | Input:
16 | wp: positive evidence w+
17 |
18 | w: total evidence w
19 | Returns:
20 | frequency, confidence
21 | """
22 | if wp == 0 and w == 0:
23 | # special case, 0/0
24 | f = 0
25 | if wp == w:
26 | f = 1.0
27 | else:
28 | f = wp / w
29 | c = get_confidence_from_evidence(w)
30 | return f, c
31 |
32 |
33 | def get_evidence_fromfreqconf(f, c):
34 | """
35 | Input:
36 | f: frequency
37 |
38 | c: confidence
39 | Returns:
40 | w+, w, w-
41 | """
42 | wp = Config.k * f * c / (1 - c)
43 | w = Config.k * c / (1 - c)
44 | return wp, w, w - wp
45 |
46 |
47 | def get_confidence_from_evidence(w):
48 | """
49 | Input:
50 | w: Total evidence
51 | Returns:
52 | confidence
53 | """
54 | return w / (w + Config.k)
55 |
56 |
57 |
58 |
59 | def create_resultant_sentence_two_premise(j1, j2, result_statement, truth_value_function):
60 | """
61 | Creates the resultant sentence between 2 premises, the resultant statement, and the truth function
62 | :param j1:
63 | :param j2:
64 | :param result_statement:
65 | :param truth_value_function:
66 | :return:
67 | """
68 | result_statement = NALGrammar.Terms.simplify(result_statement)
69 |
70 | result_type = premise_result_type(j1,j2)
71 |
72 | if result_type == NALGrammar.Sentences.Judgment or result_type == NALGrammar.Sentences.Goal:
73 | # Judgment or Goal
74 | # Get Truth Value
75 | higher_order_statement = isinstance(result_statement,
76 | NALGrammar.Terms.StatementTerm) and not result_statement.is_first_order()
77 |
78 | if higher_order_statement:
79 | (f1, c1) = (j1.value.frequency, j1.value.confidence)
80 | (f2, c2) = (j2.value.frequency, j2.value.confidence)
81 | else:
82 | (f1, c1) = (j1.get_present_value().frequency, j1.get_present_value().confidence)
83 | (f2, c2) = (j2.get_present_value().frequency, j2.get_present_value().confidence)
84 |
85 | result_truth = truth_value_function(f1, c1, f2, c2)
86 | occurrence_time = None
87 |
88 | # if the result is a first-order statement, or a higher-order compound statement, it may need an occurrence time
89 |
90 | if (j1.is_event() or j2.is_event()) and not higher_order_statement:
91 | occurrence_time = Global.Global.get_current_cycle_number()
92 |
93 | if result_type == NALGrammar.Sentences.Judgment:
94 | result = NALGrammar.Sentences.Judgment(result_statement, result_truth,
95 | occurrence_time=occurrence_time)
96 | elif result_type == NALGrammar.Sentences.Goal:
97 | result = NALGrammar.Sentences.Goal(result_statement, result_truth, occurrence_time=occurrence_time)
98 | elif result_type == NALGrammar.Sentences.Question:
99 | result = NALGrammar.Sentences.Question(result_statement)
100 |
101 | if not result.is_event():
102 | # merge in the parent sentences' evidential bases
103 | result.stamp.evidential_base.merge_sentence_evidential_base_into_self(j1)
104 | result.stamp.evidential_base.merge_sentence_evidential_base_into_self(j2)
105 | else:
106 | # event evidential bases expire too quickly to track
107 | pass
108 |
109 |
110 | stamp_and_print_inference_rule(result, truth_value_function, [j1,j2])
111 |
112 | return result
113 |
114 | def create_resultant_sentence_one_premise(j, result_statement, truth_value_function, result_truth=None):
115 | """
116 | Creates the resultant sentence for 1 premise, the resultant statement, and the truth function
117 | if truth function is None, uses j's truth-value
118 | :param j:
119 | :param result_statement:
120 | :param truth_value_function:
121 | :param result_truth: Optional truth result
122 | :return:
123 | """
124 | result_statement = NALGrammar.Terms.simplify(result_statement)
125 | result_type = type(j)
126 | if result_type == NALGrammar.Sentences.Judgment or result_type == NALGrammar.Sentences.Goal:
127 | # Get Truth Value
128 | result_truth_array = None
129 | if result_truth is None:
130 | if truth_value_function is None:
131 | result_truth = j.value #NALGrammar.Values.TruthValue(j.value.frequency,j.value.confidence)
132 | else:
133 | result_truth = truth_value_function(j.value.frequency, j.value.confidence)
134 |
135 |
136 | if result_type == NALGrammar.Sentences.Judgment:
137 | result = NALGrammar.Sentences.Judgment(result_statement, result_truth,
138 | occurrence_time=j.stamp.occurrence_time)
139 | elif result_type == NALGrammar.Sentences.Goal:
140 | result = NALGrammar.Sentences.Goal(result_statement, result_truth,
141 | occurrence_time=j.stamp.occurrence_time)
142 | elif result_type == NALGrammar.Sentences.Question:
143 | result = NALGrammar.Sentences.Question(result_statement)
144 |
145 |
146 | if truth_value_function is None:
147 | stamp_and_print_inference_rule(result, truth_value_function, j.stamp.parent_premises)
148 | else:
149 | stamp_and_print_inference_rule(result, truth_value_function, [j])
150 |
151 | return result
152 |
153 | def stamp_and_print_inference_rule(sentence, inference_rule, parent_sentences):
154 | sentence.stamp.derived_by = "Structural Transformation" if inference_rule is None else inference_rule.__name__
155 |
156 | sentence.stamp.parent_premises = []
157 |
158 |
159 | parent_strings = []
160 | for parent in parent_sentences:
161 | sentence.stamp.parent_premises.append(parent)
162 |
163 | # if isinstance(parent.statement, NALGrammar.Terms.SpatialTerm):
164 | # parent_strings.append("CENTER: " + str(parent.statement.center) + " | DIM:"
165 | # + str(parent.statement.dimensions) + " " + str(parent.value)
166 | # + " -- pooled? " + str(parent.statement.of_spatial_terms))
167 | # elif isinstance(parent.statement, NALGrammar.Terms.CompoundTerm) and not isinstance(parent, NALGrammar.Sentences.Goal):
168 | # parent_strings.append("CENTER1: " + str(parent.statement.subterms[0].center) + " | DIM:"
169 | # + str(parent.statement.subterms[0].dimensions)
170 | # + " -- pooled? " + str(parent.statement.subterms[0].of_spatial_terms)
171 | # + " " + str(parent.value))
172 | #
173 | # parent_strings.append("CENTER2: " + str(parent.statement.subterms[1].center) + " | DIM:"
174 | # + str(parent.statement.subterms[1].dimensions) + " " + str(parent.value)
175 | # + " -- pooled? " + str(parent.statement.subterms[1].of_spatial_terms))
176 | #
177 | # else:
178 | # parent_strings.append("other " + str(parent.value))
179 |
180 |
181 | if inference_rule is F_Deduction and isinstance(sentence, NALGrammar.Sentences.Judgment) and sentence.statement.is_first_order():
182 | Global.Global.debug_print(sentence.stamp.derived_by
183 | + " derived " + sentence.get_formatted_string()
184 | + " by parents " + str(parent_strings))
185 |
186 | def premise_result_type(j1,j2):
187 | """
188 | Given 2 sentence premises, determines the type of the resultant sentence
189 | """
190 | if not isinstance(j1, NALGrammar.Sentences.Judgment):
191 | return type(j1)
192 | elif not isinstance(j2, NALGrammar.Sentences.Judgment):
193 | return type(j2)
194 | else:
195 | return NALGrammar.Sentences.Judgment
196 |
197 | def convert_to_interval(working_cycles):
198 | """
199 | return interval from working cycles
200 | """
201 | #round(Config.INTERVAL_SCALE*math.sqrt(working_cycles))
202 | return working_cycles#round(math.log(Config.INTERVAL_SCALE * working_cycles)) + 1 #round(math.log(working_cycles)) + 1 ##round(5*math.log(0.05*(working_cycles + 9))+4)
203 |
204 | def convert_from_interval(interval):
205 | """
206 | return working cycles from interval
207 | """
208 | #round((interval/Config.INTERVAL_SCALE) ** 2)
209 | return interval#round(math.exp(interval) / Config.INTERVAL_SCALE) #round(math.exp(interval)) # round(math.exp((interval-4)/5)/0.05 - 9)
210 |
211 | def interval_weighted_average(interval1, interval2, weight1, weight2):
212 | return round((interval1*weight1 + interval2*weight2)/(weight1 + weight2))
213 |
214 | def get_unit_evidence():
215 | return 1 / (1 + Config.k)
--------------------------------------------------------------------------------
/NALInferenceRules/Immediate.py:
--------------------------------------------------------------------------------
1 | """
2 | ==== ==== ==== ==== ==== ====
3 | ==== NAL Inference Rules - Immediate Inference Rules ====
4 | ==== ==== ==== ==== ==== ====
5 |
6 | Author: Christian Hahm
7 | Created: October 8, 2020
8 | Purpose: Defines the NAL inference rules
9 | Assumes the given sentences do not have evidential overlap.
10 | Does combine evidential bases in the Resultant Sentence.
11 | """
12 | import Asserts
13 | import Global
14 | import NALGrammar
15 | import NALSyntax
16 | import NALInferenceRules
17 |
18 |
19 | def Negation(j):
20 | """
21 | Negation
22 |
23 | -----------------
24 |
25 | Input:
26 | j: Sentence (Statement )
27 |
28 | Returns:
29 | """
30 | Asserts.assert_sentence(j)
31 | result_statement = NALGrammar.Terms.CompoundTerm(subterms=[j.statement], term_connector=NALSyntax.TermConnector.Negation)
32 | return NALInferenceRules.HelperFunctions.create_resultant_sentence_one_premise(j, result_statement, NALInferenceRules.TruthValueFunctions.F_Negation)
33 |
34 |
35 | def Conversion(j):
36 | """
37 | Conversion Rule
38 |
39 | Reverses the subject and predicate.
40 | -----------------
41 |
42 | Input:
43 | j: Sentence (S --> P )
44 |
45 | must have a frequency above zero, or else the confidence of the conclusion will be zero
46 |
47 | Truth Val:
48 | w+: and(f1,c1)
49 | w-: 0
50 | Returns:
51 | :- Sentence (P --> S )
52 | """
53 | Asserts.assert_sentence_asymmetric(j)
54 |
55 | # Statement
56 | result_statement = NALGrammar.Terms.StatementTerm(j.statement.get_predicate_term(),
57 | j.statement.get_subject_term(),
58 | j.statement.get_copula())
59 |
60 | return NALInferenceRules.HelperFunctions.create_resultant_sentence_one_premise(j,result_statement,NALInferenceRules.TruthValueFunctions.F_Conversion)
61 |
62 |
63 | def Contraposition(j):
64 | """
65 | Contraposition
66 | Inputs:
67 | j: (S ==> P)
68 |
69 | Frequency must be below one or confidence of conclusion will be zero
70 |
71 | :param j:
72 | :return: ((--,P) ==> (--,S))
73 | """
74 | Asserts.assert_sentence_forward_implication(j)
75 | # Statement
76 | negated_predicate_term = NALGrammar.Terms.CompoundTerm([j.statement.get_predicate_term()],
77 | NALSyntax.TermConnector.Negation)
78 | negated_subject_term = NALGrammar.Terms.CompoundTerm([j.statement.get_subject_term()],
79 | NALSyntax.TermConnector.Negation)
80 |
81 | result_statement = NALGrammar.Terms.StatementTerm(negated_predicate_term,
82 | negated_subject_term,
83 | j.statement.get_copula())
84 |
85 | return NALInferenceRules.HelperFunctions.create_resultant_sentence_one_premise(j, result_statement, NALInferenceRules.TruthValueFunctions.F_Contraposition)
86 |
87 |
88 | def ExtensionalImage(j):
89 | """
90 | Extensional Image
91 | Inputs:
92 | j: ((*,S,...,P) --> R)
93 |
94 | :param j:
95 | :returns: array of
96 | (S --> (/,R,_,...,P))
97 | (P --> (/,R,S,...,_))
98 | ...
99 | """
100 | Asserts.assert_sentence_inheritance(j)
101 |
102 | results = []
103 | # Statement
104 | statement_subterms = j.statement.get_subject_term().subterms
105 | R = j.statement.get_predicate_term()
106 |
107 | for i1 in range(0, len(statement_subterms)):
108 | subterm = statement_subterms[i1]
109 |
110 | image_subterms = [R]
111 | for i2 in range(0, len(statement_subterms)):
112 | if i1 != i2:
113 | image_subterms.append(statement_subterms[i2])
114 | elif i1 == i2:
115 | image_subterms.append(Global.Global.TERM_IMAGE_PLACEHOLDER)
116 |
117 | image_term = NALGrammar.Terms.CompoundTerm(image_subterms,
118 | NALSyntax.TermConnector.ExtensionalImage)
119 |
120 | result_statement = NALGrammar.Terms.StatementTerm(subterm,
121 | image_term,
122 | NALSyntax.Copula.Inheritance)
123 |
124 | result = NALInferenceRules.HelperFunctions.create_resultant_sentence_one_premise(j, result_statement, None)
125 | results.append(result)
126 |
127 | return results
128 |
129 |
130 | def IntensionalImage(j):
131 | """
132 | Intensional Image
133 | Inputs:
134 | j: (R --> (*,S,P))
135 |
136 | :param j:
137 | :returns: array of
138 | ((/,R,_,P) --> S)
139 | and
140 | ((/,R,S,_) --> P)
141 | """
142 | Asserts.assert_sentence_inheritance(j)
143 |
144 | results = []
145 | # Statement
146 | statement_subterms = j.statement.get_predicate_term().subterms
147 | R = j.statement.get_subject_term()
148 |
149 | for i1 in range(0, len(statement_subterms)):
150 | subterm = statement_subterms[i1]
151 |
152 | image_subterms = [R]
153 | for i2 in range(0, len(statement_subterms)):
154 | if i1 != i2:
155 | image_subterms.append(statement_subterms[i2])
156 | elif i1 == i2:
157 | image_subterms.append(Global.Global.TERM_IMAGE_PLACEHOLDER)
158 |
159 | image_term = NALGrammar.Terms.CompoundTerm(image_subterms,
160 | NALSyntax.TermConnector.ExtensionalImage)
161 |
162 | result_statement = NALGrammar.Terms.StatementTerm(image_term,
163 | subterm,
164 | NALSyntax.Copula.Inheritance)
165 |
166 | result = NALInferenceRules.HelperFunctions.create_resultant_sentence_one_premise(j, result_statement, None)
167 | results.append(result)
168 |
169 | return results
--------------------------------------------------------------------------------
/NALInferenceRules/Local.py:
--------------------------------------------------------------------------------
1 | """
2 | ==== ==== ==== ==== ==== ====
3 | ==== NAL Inference Rules - Local Inference Rules ====
4 | ==== ==== ==== ==== ==== ====
5 |
6 | Author: Christian Hahm
7 | Created: October 8, 2020
8 | Purpose: Defines the NAL inference rules
9 | Assumes the given sentences do not have evidential overlap.
10 | Does combine evidential bases in the Resultant Sentence.
11 | """
12 | import Asserts
13 | import Config
14 | import Global
15 | import NALGrammar
16 | import NALSyntax
17 | from NALInferenceRules import HelperFunctions, TruthValueFunctions
18 |
19 |
20 | def Revision(j1, j2):
21 | """
22 | Revision Rule
23 |
24 | Assumes: j1 and j2 do not have evidential overlap
25 | -----------------
26 |
27 | Revises two instances of the same statement / sentence with different truth values.
28 |
29 | Input:
30 | j1: Sentence (Statement )
31 |
32 | j2: Sentence (Statement )
33 | Returns:
34 | :- Sentence (Statement )
35 | """
36 | Asserts.assert_sentence(j1)
37 | Asserts.assert_sentence(j2)
38 | assert (
39 | j1.statement.get_term_string() == j2.statement.get_term_string()), "Cannot revise sentences for 2 different statements"
40 |
41 | if isinstance(j1.statement, NALGrammar.Terms.CompoundTerm) \
42 | and j1.statement.connector == NALSyntax.TermConnector.SequentialConjunction:
43 | new_intervals = []
44 | for i in range(len(j1.statement.intervals)):
45 | new_interval = HelperFunctions.interval_weighted_average(interval1=j1.statement.intervals[i],
46 | interval2=j2.statement.intervals[i],
47 | weight1=j1.value.confidence,
48 | weight2=j2.value.confidence)
49 | new_intervals.append(new_interval)
50 | result_statement = NALGrammar.Terms.CompoundTerm(subterms=j1.statement.subterms,
51 | term_connector=j1.statement.connector,
52 | intervals=new_intervals)
53 | else:
54 | result_statement = j1.statement
55 |
56 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
57 | j2,
58 | result_statement,
59 | TruthValueFunctions.F_Revision)
60 |
61 |
62 |
63 |
64 | def Choice(j1, j2, only_confidence=False):
65 | """
66 | Choice Rule
67 |
68 | -----------------
69 |
70 | Choose the better answer (according to the choice rule) between 2 different sentences.
71 | If the statements are the same, the statement with the highest confidence is chosen.
72 | If they are different, the statement with the highest expectation is chosen.
73 |
74 | Input:
75 | j1: Sentence (Statement )
76 |
77 | j2: Sentence (Statement )
78 |
79 | Returns:
80 | j1 or j2, depending on which is better according to the choice rule
81 | """
82 | Asserts.assert_sentence(j1)
83 | Asserts.assert_sentence(j2)
84 |
85 | # Truth Value
86 | j1_value = j1.get_present_value()
87 | j2_value = j2.get_present_value()
88 | (f1, c1), (f2, c2) = (j1_value.frequency, j1_value.confidence), (j2_value.frequency, j2_value.confidence)
89 |
90 | # Make the choice
91 | if only_confidence or j1.statement == j2.statement:
92 | if c1 >= c2:
93 | best = j1
94 | else:
95 | best = j2
96 | else:
97 | e1 = TruthValueFunctions.Expectation(f1, c1)
98 | e2 = TruthValueFunctions.Expectation(f2, c2)
99 | if e1 >= e2:
100 | best = j1
101 | else:
102 | best = j2
103 |
104 | return best
105 |
106 |
107 | def Decision(j):
108 | """
109 | Decision Rule
110 |
111 | -----------------
112 |
113 | Make the decision to purse a desire based on its expected desirability
114 |
115 | Input:
116 | f: Desire-value frequency
117 | c: Desire-value confidence
118 |
119 | Returns:
120 | True or false, whether to pursue the goal
121 | """
122 | value = j.get_present_value()
123 | desirability = TruthValueFunctions.Expectation(value.frequency, value.confidence)
124 | return desirability > Config.T
125 |
126 | def Eternalization(j):
127 | """
128 | Eternalization
129 | :param j:
130 | :return: Eternalized form of j
131 | """
132 | Asserts.assert_sentence(j)
133 |
134 | if isinstance(j, NALGrammar.Sentences.Judgment):
135 | result_truth = TruthValueFunctions.F_Eternalization(j.value.frequency, j.value.confidence)
136 | result = NALGrammar.Sentences.Judgment(j.statement, result_truth, occurrence_time=None)
137 | elif isinstance(j, NALGrammar.Sentences.Question):
138 | assert "error"
139 |
140 | result.stamp.evidential_base.merge_sentence_evidential_base_into_self(j)
141 |
142 | return result
143 |
144 | def Projection(j, occurrence_time):
145 | """
146 | Projection.
147 |
148 | Returns a sentence j projected to the given occurrence time.
149 |
150 | :param j:
151 | :param occurrence_time: occurrence time to project j to
152 | :return: Projected form of j
153 | """
154 | Asserts.assert_sentence(j)
155 |
156 | decay = Config.PROJECTION_DECAY_EVENT
157 | if isinstance(j, NALGrammar.Sentences.Goal):
158 | decay = Config.PROJECTION_DECAY_DESIRE
159 | result_truth = TruthValueFunctions.F_Projection(j.value.frequency,
160 | j.value.confidence,
161 | j.stamp.occurrence_time,
162 | occurrence_time,
163 | decay=decay)
164 |
165 |
166 | if isinstance(j, NALGrammar.Sentences.Judgment):
167 | result = NALGrammar.Sentences.Judgment(j.statement, result_truth, occurrence_time=occurrence_time)
168 | elif isinstance(j, NALGrammar.Sentences.Goal):
169 | result = NALGrammar.Sentences.Goal(j.statement, result_truth, occurrence_time=occurrence_time)
170 | elif isinstance(j, NALGrammar.Sentences.Question):
171 | assert "error"
172 |
173 | result.stamp.evidential_base.merge_sentence_evidential_base_into_self(j)
174 |
175 | return result
176 |
177 | def Value_Projection(j,occurrence_time):
178 | """
179 | Projection; only returns a value
180 |
181 | Returns j's truth value projected to the given occurrence time.
182 |
183 | :param j:
184 | :param occurrence_time: occurrence time to project j to
185 | :return: project value of j
186 | """
187 | Asserts.assert_sentence(j)
188 |
189 | decay = Config.PROJECTION_DECAY_EVENT
190 | if isinstance(j, NALGrammar.Sentences.Goal):
191 | decay = Config.PROJECTION_DECAY_DESIRE
192 | result_truth = TruthValueFunctions.F_Projection(j.value.frequency,
193 | j.value.confidence,
194 | j.stamp.occurrence_time,
195 | occurrence_time,
196 | decay=decay)
197 |
198 | return result_truth
--------------------------------------------------------------------------------
/NALInferenceRules/Syllogistic.py:
--------------------------------------------------------------------------------
1 | """
2 | ==== ==== ==== ==== ==== ====
3 | ==== NAL Inference Rules - Syllogistic Inference Rules ====
4 | ==== ==== ==== ==== ==== ====
5 |
6 | Author: Christian Hahm
7 | Created: October 8, 2020
8 | Purpose: Defines the NAL inference rules
9 | Assumes the given sentences do not have evidential overlap.
10 | Does combine evidential bases in the Resultant Sentence.
11 | """
12 | import Asserts
13 | import NALGrammar
14 | import NALSyntax
15 | from NALInferenceRules import TruthValueFunctions, HelperFunctions
16 |
17 |
18 | def Deduction(j1, j2):
19 | """
20 | Deduction (Strong syllogism)
21 |
22 | -----------------
23 | Assumes: j1 and j2 do not have evidential overlap
24 |
25 | Input:
26 | j1: Sentence (M --> P )
27 |
28 | j2: Sentence (S --> M )
29 | Truth Val:
30 | F_ded
31 | Returns:
32 | :- Sentence (S --> P )
33 | """
34 | Asserts.assert_sentence_asymmetric(j1)
35 | Asserts.assert_sentence_asymmetric(j2)
36 |
37 | # Statement
38 | result_statement = NALGrammar.Terms.StatementTerm(j2.statement.get_subject_term(),
39 | j1.statement.get_predicate_term(),
40 | j1.statement.get_copula())
41 |
42 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
43 | j2,
44 | result_statement,
45 | TruthValueFunctions.F_Deduction)
46 |
47 |
48 |
49 | def Analogy(j1, j2):
50 | """
51 | Analogy (Strong syllogism)
52 |
53 | -----------------
54 | Assumes: j1 and j2 do not have evidential overlap
55 |
56 | Input:
57 | j1: Sentence (M --> P )
58 | or
59 | j1: Sentence (P --> M )
60 |
61 | j2: Sentence (S <-> M )
62 | Truth Val:
63 | F_ana
64 | Returns: (depending on j1)
65 | :- Sentence (S --> P )
66 | or
67 | :- Sentence (P --> S )
68 |
69 | """
70 | Asserts.assert_sentence_asymmetric(j1)
71 | Asserts.assert_sentence_symmetric(j2)
72 |
73 | # Statement
74 | if j1.statement.get_subject_term() == j2.statement.get_predicate_term():
75 | # j1=M-->P, j2=S<->M
76 | result_statement = NALGrammar.Terms.StatementTerm(j2.statement.get_subject_term(),
77 | j1.statement.get_predicate_term(),
78 | j1.statement.get_copula()) # S-->P
79 | elif j1.statement.get_subject_term() == j2.statement.get_subject_term():
80 | # j1=M-->P, j2=M<->S
81 | result_statement = NALGrammar.Terms.StatementTerm(j2.statement.get_predicate_term(),
82 | j1.statement.get_predicate_term(),
83 | j1.statement.get_copula()) # S-->P
84 | elif j1.statement.get_predicate_term() == j2.statement.get_predicate_term():
85 | # j1=P-->M, j2=S<->M
86 | result_statement = NALGrammar.Terms.StatementTerm(j1.statement.get_subject_term(),
87 | j2.statement.get_subject_term(),
88 | j1.statement.get_copula()) # P-->S
89 | elif j1.statement.get_predicate_term() == j2.statement.get_subject_term():
90 | # j1=P-->M, j2=M<->S
91 | result_statement = NALGrammar.Terms.StatementTerm(j1.statement.get_subject_term(),
92 | j2.statement.get_predicate_term(),
93 | j1.statement.get_copula()) # P-->S
94 | else:
95 | assert (
96 | False), "Error: Invalid inputs to nal_analogy: " + j1.get_formatted_string() + " and " + j2.get_formatted_string()
97 |
98 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
99 | j2,
100 | result_statement,
101 | TruthValueFunctions.F_Analogy)
102 |
103 |
104 |
105 | def Resemblance(j1, j2):
106 | """
107 | Resemblance (Strong syllogism)
108 |
109 | -----------------
110 | Assumes: j1 and j2 do not have evidential overlap
111 |
112 | Input:
113 | j1: Sentence (M <-> P )
114 | or
115 | j1: Sentence (P <-> M )
116 |
117 | j2: Sentence (S <-> M )
118 | or
119 | j2: Sentence (M <-> S )
120 | Truth Val:
121 | F_res
122 | Returns:
123 | :- Sentence (S <-> P )
124 | """
125 | Asserts.assert_sentence_symmetric(j1)
126 | Asserts.assert_sentence_symmetric(j2)
127 |
128 | # Statement
129 | if j1.statement.get_subject_term() == j2.statement.get_predicate_term():
130 | # j1=M<->P, j2=S<->M
131 | result_statement = NALGrammar.Terms.StatementTerm(j2.statement.get_subject_term(),
132 | j1.statement.get_predicate_term(),
133 | j1.statement.get_copula()) # S<->P
134 | elif j1.statement.get_subject_term() == j2.statement.get_subject_term():
135 | # j1=M<->P, j2=M<->S
136 | result_statement = NALGrammar.Terms.StatementTerm(j2.statement.get_predicate_term(),
137 | j1.statement.get_predicate_term(),
138 | j1.statement.get_copula()) # S<->P
139 | elif j1.statement.get_predicate_term() == j2.statement.get_predicate_term():
140 | # j1=P<->M, j2=S<->M
141 | result_statement = NALGrammar.Terms.StatementTerm(j2.statement.get_subject_term(),
142 | j1.statement.get_subject_term(),
143 | j1.statement.get_copula()) # S<->P
144 | elif j1.statement.get_predicate_term() == j2.statement.get_subject_term():
145 | # j1=P<->M, j2=M<->S
146 | result_statement = NALGrammar.Terms.StatementTerm(j2.statement.get_predicate_term(),
147 | j2.statement.get_subject_term(),
148 | j1.statement.get_copula()) # S<->P
149 | else:
150 | assert (
151 | False), "Error: Invalid inputs to nal_resemblance: " + j1.get_formatted_string() + " and " + j2.get_formatted_string()
152 |
153 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
154 | j2,
155 | result_statement,
156 | TruthValueFunctions.F_Resemblance)
157 |
158 | def Abduction(j1, j2):
159 | """
160 | Abduction (Weak syllogism)
161 |
162 | -----------------
163 | Assumes: j1 and j2 do not have evidential overlap
164 |
165 | Input:
166 | j1: Sentence (P --> M )
167 |
168 | j2: Sentence (S --> M )
169 | Evidence:
170 | F_abd
171 | Returns:
172 | :- Sentence (S --> P )
173 | """
174 | Asserts.assert_sentence_asymmetric(j1)
175 | Asserts.assert_sentence_asymmetric(j2)
176 |
177 | # Statement
178 | result_statement = NALGrammar.Terms.StatementTerm(j2.statement.get_subject_term(),
179 | j1.statement.get_subject_term(),
180 | j1.statement.get_copula())
181 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
182 | j2,
183 | result_statement,
184 | TruthValueFunctions.F_Abduction)
185 |
186 |
187 | def Induction(j1, j2):
188 | """
189 | Induction (Weak syllogism)
190 |
191 | -----------------
192 | Assumes: j1 and j2 do not have evidential overlap
193 |
194 | Input:
195 | j1: Sentence (M --> P )
196 |
197 | j2: Sentence (M --> S )
198 | Evidence:
199 | F_ind
200 | Returns:
201 | :- Sentence (S --> P )
202 | """
203 | Asserts.assert_sentence_asymmetric(j1)
204 | Asserts.assert_sentence_asymmetric(j2)
205 |
206 | # Statement
207 | result_statement = NALGrammar.Terms.StatementTerm(j2.statement.get_predicate_term(),
208 | j1.statement.get_predicate_term(), j1.statement.get_copula())
209 |
210 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
211 | j2,
212 | result_statement,
213 | TruthValueFunctions.F_Induction)
214 |
215 |
216 | def Exemplification(j1, j2):
217 | """
218 | Exemplification (Weak syllogism)
219 |
220 | -----------------
221 | Assumes: j1 and j2 do not have evidential overlap
222 |
223 | Input:
224 | j1: Sentence (P --> M )
225 |
226 | j2: Sentence (M --> S )
227 | Evidence:
228 | F_exe
229 | Returns:
230 | :- Sentence (S --> P )
231 | """
232 | Asserts.assert_sentence_asymmetric(j1)
233 | Asserts.assert_sentence_asymmetric(j2)
234 |
235 | # Statement
236 | result_statement = NALGrammar.Terms.StatementTerm(j2.statement.get_predicate_term(),
237 | j1.statement.get_subject_term(), j1.statement.get_copula())
238 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
239 | j2,
240 | result_statement,
241 | TruthValueFunctions.F_Exemplification)
242 |
243 | def Comparison(j1, j2):
244 | """
245 | Comparison (Weak syllogism)
246 |
247 | -----------------
248 | Assumes: j1 and j2 do not have evidential overlap
249 |
250 | Input:
251 | j1: Sentence (M --> P )
252 | j2: Sentence (M --> S )
253 |
254 | or
255 |
256 | j1: Sentence (P --> M )
257 | j2: Sentence (S --> M )
258 | Evidence:
259 | F_com
260 | Returns:
261 | :- Sentence (S <-> P )
262 | """
263 | Asserts.assert_sentence_asymmetric(j1)
264 | Asserts.assert_sentence_asymmetric(j2)
265 |
266 | copula = NALSyntax.Copula.Similarity if j1.statement.is_first_order() else NALSyntax.Copula.Equivalence
267 | # Statement
268 | if j1.statement.get_subject_term() == j2.statement.get_subject_term():
269 | # M --> P and M --> S
270 |
271 | result_statement = NALGrammar.Terms.StatementTerm(j2.statement.get_predicate_term(),
272 | j1.statement.get_predicate_term(),
273 | copula)
274 | elif j1.statement.get_predicate_term() == j2.statement.get_predicate_term():
275 | # P --> M and S --> M
276 | result_statement = NALGrammar.Terms.StatementTerm(j2.statement.get_subject_term(),
277 | j1.statement.get_subject_term(),
278 | copula)
279 | else:
280 | assert False, "Error: Invalid inputs to nal_comparison: " + j1.get_formatted_string() + " and " + j2.get_formatted_string()
281 |
282 | truth_function = TruthValueFunctions.F_Comparison
283 |
284 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
285 | j2,
286 | result_statement,
287 | truth_function)
288 |
--------------------------------------------------------------------------------
/NALInferenceRules/Temporal.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import numpy as np
4 |
5 | import NALGrammar
6 | import NALSyntax
7 | from NALInferenceRules import TruthValueFunctions, HelperFunctions
8 | """
9 | ==== ==== ==== ==== ==== ====
10 | ==== NAL Inference Rules - Truth Value Functions ====
11 | ==== ==== ==== ==== ==== ====
12 |
13 | Author: Christian Hahm
14 | Created: August 11, 2021
15 | Purpose: Defines the NAL temporal inference rules
16 | Assumes the given sentences do not have evidential overlap.
17 | Does combine evidential bases in the Resultant Sentence.
18 | """
19 |
20 | def TemporalIntersection(j1, j2):
21 | """
22 | Temporal Intersection
23 |
24 | Input:
25 | j1: Event S {tense}
26 |
27 | j2: Event P {tense}
28 | Evidence:
29 | F_Intersection
30 | Returns:
31 | :- Event (S &/ P )
32 | :- or Event (P &/ S )
33 | :- or Event (S &| P )
34 | """
35 | assert j1.get_tense() != NALSyntax.Tense.Eternal and j2.get_tense() != NALSyntax.Tense.Eternal,"ERROR: Temporal Intersection needs events"
36 |
37 | j1_statement_term = j1.statement
38 | j2_statement_term = j2.statement
39 |
40 | if j1_statement_term == j2_statement_term: return None # S && S simplifies to S, so no inference to do
41 | #if not (not j1_statement_term.is_op() and j2_statement_term.is_op()): return result # only care about operations right now
42 |
43 | #todo restore temporal component
44 | #
45 | # if j1.stamp.occurrence_time == j2.stamp.occurrence_time:
46 | # # j1 &| j2
47 | # result_statement = NALGrammar.Terms.CompoundTerm([j1_statement_term, j2_statement_term],
48 | # NALSyntax.TermConnector.ParallelConjunction)
49 | # elif j1.stamp.occurrence_time < j2.stamp.occurrence_time:
50 | # # j1 &/ j2
51 | # result_statement = NALGrammar.Terms.CompoundTerm([j1_statement_term, j2_statement_term],
52 | # NALSyntax.TermConnector.SequentialConjunction,
53 | # intervals=[HelperFunctions.convert_to_interval(abs(j2.stamp.occurrence_time - j1.stamp.occurrence_time))])
54 | # elif j2.stamp.occurrence_time < j1.stamp.occurrence_time:
55 | # # j2 &/ j1
56 | # result_statement = NALGrammar.Terms.CompoundTerm([j2_statement_term, j1_statement_term],
57 | # NALSyntax.TermConnector.SequentialConjunction,
58 | # intervals=[HelperFunctions.convert_to_interval(abs(j2.stamp.occurrence_time - j1.stamp.occurrence_time))])
59 | result_statement = NALGrammar.Terms.CompoundTerm([j1_statement_term, j2_statement_term],NALSyntax.TermConnector.Conjunction)
60 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
61 | j2,
62 | result_statement,
63 | TruthValueFunctions.F_Intersection)
64 |
65 | def TemporalInduction(j1, j2):
66 | """
67 | Temporal Induction
68 |
69 | Input:
70 | j1: Event S {tense}
71 |
72 | j2: Event P {tense}
73 | Evidence:
74 | F_induction
75 | Returns:
76 | :- Sentence (S =|> P )
77 | :- or Sentence (S =/> P )
78 | :- or Sentence (P =/> S )
79 | """
80 | assert j1.get_tense() != NALSyntax.Tense.Eternal and j2.get_tense() != NALSyntax.Tense.Eternal,"ERROR: Temporal Induction needs events"
81 |
82 | j1_statement_term = j1.statement
83 | j2_statement_term = j2.statement
84 |
85 | if j1_statement_term == j2_statement_term: return None # S =/> S simplifies to S, so no inference to do
86 | if j2_statement_term.is_op(): return None # exclude operation consequents
87 |
88 | #todo restore temporal component
89 | #
90 | # if j1.stamp.occurrence_time == j2.stamp.occurrence_time:
91 | # # j1 =|> j2
92 | # result_statement = NALGrammar.Terms.StatementTerm(j1_statement_term, j2_statement_term,
93 | # NALSyntax.Copula.ConcurrentImplication)
94 | # elif j1.stamp.occurrence_time < j2.stamp.occurrence_time:
95 | # # j1 =/> j2
96 | # result_statement = NALGrammar.Terms.StatementTerm(j1_statement_term, j2_statement_term,
97 | # NALSyntax.Copula.PredictiveImplication,
98 | # interval=HelperFunctions.convert_to_interval(abs(j2.stamp.occurrence_time - j1.stamp.occurrence_time)))
99 | # elif j2.stamp.occurrence_time < j1.stamp.occurrence_time:
100 | # # j2 =/> j1
101 | # result_statement = NALGrammar.Terms.StatementTerm(j2_statement_term, j1_statement_term,
102 | # NALSyntax.Copula.PredictiveImplication,
103 | # interval=HelperFunctions.convert_to_interval(abs(j2.stamp.occurrence_time - j1.stamp.occurrence_time)))
104 |
105 | result_statement = NALGrammar.Terms.StatementTerm(j1_statement_term, j2_statement_term,
106 | NALSyntax.Copula.Implication)
107 |
108 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
109 | j2,
110 | result_statement,
111 | TruthValueFunctions.F_Induction)
112 |
113 |
114 | def TemporalComparison(j1, j2):
115 | """
116 | Temporal Comparison
117 |
118 | Input:
119 | A: Event S {tense}
120 |
121 | B: Event P {tense}
122 | Evidence:
123 | F_comparison
124 | Returns:
125 | :- Sentence (S <|> P )
126 | :- or Sentence (S > P )
127 | :- or Sentence (P > S )
128 | """
129 | assert j1.get_tense() != NALSyntax.Tense.Eternal and j2.get_tense() != NALSyntax.Tense.Eternal, "ERROR: Temporal Comparison needs events"
130 |
131 | j1_statement_term = j1.statement
132 | j2_statement_term = j2.statement
133 |
134 | if j1_statement_term == j2_statement_term: return None # S > S simplifies to S, so no inference to do
135 |
136 | if j1.stamp.occurrence_time == j2.stamp.occurrence_time:
137 | # <|>
138 | result_statement = NALGrammar.Terms.StatementTerm(j1_statement_term, j2_statement_term,
139 | NALSyntax.Copula.ConcurrentEquivalence)
140 | elif j1.stamp.occurrence_time < j2.stamp.occurrence_time:
141 | # j1 > j2
142 | result_statement = NALGrammar.Terms.StatementTerm(j1_statement_term, j2_statement_term,
143 | NALSyntax.Copula.PredictiveEquivalence)
144 | elif j2.stamp.occurrence_time < j1.stamp.occurrence_time:
145 | # j2 > j1
146 | result_statement = NALGrammar.Terms.StatementTerm(j2_statement_term, j1_statement_term,
147 | NALSyntax.Copula.PredictiveEquivalence)
148 |
149 | return HelperFunctions.create_resultant_sentence_two_premise(j1,
150 | j2,
151 | result_statement,
152 | TruthValueFunctions.F_Comparison)
153 |
154 |
155 |
--------------------------------------------------------------------------------
/NALInferenceRules/TruthValueFunctions.py:
--------------------------------------------------------------------------------
1 | """
2 | ==== ==== ==== ==== ==== ====
3 | ==== NAL Inference Rules - Truth Value Functions ====
4 | ==== ==== ==== ==== ==== ====
5 |
6 | Author: Christian Hahm
7 | Created: October 8, 2020
8 | Purpose: Defines the NAL inference rules
9 | Assumes the given sentences do not have evidential overlap.
10 | Does combine evidential bases in the Resultant Sentence.
11 | """
12 | import Config
13 | import Global
14 | import NALGrammar
15 | import NALInferenceRules
16 | from NALInferenceRules import ExtendedBooleanOperators
17 | import numpy as np
18 |
19 | def F_Revision(f1,c1,f2,c2):
20 | """
21 | :return: F_rev: Truth-Value (f,c)
22 | """
23 | wp1, w1, _ = NALInferenceRules.HelperFunctions.get_evidence_fromfreqconf(f1,c1)
24 | wp2, w2, _ = NALInferenceRules.HelperFunctions.get_evidence_fromfreqconf(f2,c2)
25 | # compute values of combined evidence
26 | wp = wp1 + wp2
27 | w = w1 + w2
28 | f_rev, c_rev = NALInferenceRules.HelperFunctions.get_truthvalue_from_evidence(wp, w)
29 | return NALGrammar.Values.TruthValue(f_rev, c_rev)
30 |
31 |
32 | def F_Negation(f, c):
33 | """
34 | f_neg = 1 - f
35 | c_neg = c
36 | :return: F_neg: Truth-Value (f,c)
37 | """
38 | return NALGrammar.Values.TruthValue(1 - f, c)
39 |
40 |
41 | def F_Conversion(f, c):
42 | """
43 | f_cnv = 1
44 | c_cnv = (f*c)/(f*c+k)
45 | :return: F_cnv: Truth-Value (f,c)
46 | """
47 | # compute values of combined evidence
48 | f_cnv = 1.0
49 | c_cnv = (f*c)/(f*c+Config.k)
50 | return NALGrammar.Values.TruthValue(f_cnv, c_cnv)
51 |
52 |
53 | def F_Contraposition(f, c):
54 | """
55 | wp = 0
56 | wn = AND(NOT(f), c)
57 | :return: F_cnt: Truth-Value (f,c)
58 | """
59 | #todo
60 | return NALGrammar.Values.TruthValue(f, ExtendedBooleanOperators.band(f, c))
61 |
62 |
63 | def F_Deduction(f1, c1, f2, c2):
64 | """
65 | f_ded: and(f1,f2)
66 | c_ded: and(f1,f2,c1,c2)
67 |
68 | :return: F_ded: Truth-Value (f,c)
69 | """
70 | f3 = ExtendedBooleanOperators.band_average(f1, f2)
71 | c3 = ExtendedBooleanOperators.band_average(f1, f2, c1, c2)
72 | return NALGrammar.Values.TruthValue(f3, c3)
73 |
74 |
75 |
76 |
77 | def F_Analogy(f1, c1, f2, c2):
78 | """
79 | f_ana: AND(f1,f2)
80 | c_ana: AND(f2,c1,c2)
81 |
82 | :return: F_ana: Truth-Value (f,c)
83 | """
84 | f_ana = ExtendedBooleanOperators.band(f1, f2)
85 | c_ana = ExtendedBooleanOperators.band(f2, c1, c2)
86 | return NALGrammar.Values.TruthValue(f_ana, c_ana)
87 |
88 |
89 | def F_Resemblance(f1, c1, f2, c2):
90 | """
91 | f_res = AND(f1,f2)
92 | c_res = AND(OR(f1,f2),c1,c2)
93 |
94 | :return: F_res: Truth-Value (f,c)
95 | """
96 | f_res = ExtendedBooleanOperators.band(f1, f2)
97 | c_res = ExtendedBooleanOperators.band(ExtendedBooleanOperators.bor(f1, f2), c1, c2)
98 |
99 | return NALGrammar.Values.TruthValue(f_res, c_res)
100 |
101 |
102 | def F_Abduction(f1, c1, f2, c2):
103 | """
104 | wp = AND(f1,f2,c1,c2)
105 | w = AND(f1,c1,c2)
106 |
107 | :return: F_abd: Truth-Value (f,c)
108 | """
109 | wp = ExtendedBooleanOperators.band(f1, f2, c1, c2)
110 | w = ExtendedBooleanOperators.band(f1, c1, c2)
111 | f_abd, c_abd = NALInferenceRules.HelperFunctions.get_truthvalue_from_evidence(wp, w)
112 | return NALGrammar.Values.TruthValue(f_abd, c_abd)
113 |
114 |
115 | def F_Induction(f1, c1, f2, c2):
116 | """
117 | :return: F_ind: Truth-Value (f,c)
118 | """
119 | wp = ExtendedBooleanOperators.band(f1, f2, c1, c2)
120 | w = ExtendedBooleanOperators.band(f2, c1, c2)
121 | f_ind, c_ind = NALInferenceRules.HelperFunctions.get_truthvalue_from_evidence(wp, w)
122 | return NALGrammar.Values.TruthValue(f_ind, c_ind)
123 |
124 |
125 | def F_Exemplification(f1, c1, f2, c2):
126 | """
127 | :return: F_exe: Truth-Value (f,c)
128 | """
129 | wp = ExtendedBooleanOperators.band(f1, f2, c1, c2)
130 | w = wp
131 | f_exe, c_exe = NALInferenceRules.HelperFunctions.get_truthvalue_from_evidence(wp, w)
132 | return NALGrammar.Values.TruthValue(f_exe, c_exe)
133 |
134 |
135 | def F_Comparison(f1, c1, f2, c2):
136 | """
137 | :return: F_com: Truth-Value (f,c)
138 | """
139 | wp = ExtendedBooleanOperators.band(f1, f2, c1, c2)
140 | w = ExtendedBooleanOperators.band(ExtendedBooleanOperators.bor(f1, f2), c1, c2)
141 | f3, c3 = NALInferenceRules.HelperFunctions.get_truthvalue_from_evidence(wp, w)
142 | return NALGrammar.Values.TruthValue(f3, c3)
143 |
144 |
145 | def F_Intersection(f1, c1, f2, c2):
146 | """
147 | :return: F_int: Truth-Value (f,c)
148 | """
149 | f_int = ExtendedBooleanOperators.band_average(f1, f2)
150 | c_int = ExtendedBooleanOperators.bor(c1, c2)
151 | return NALGrammar.Values.TruthValue(f_int, c_int)
152 |
153 |
154 | def F_Union(f1, c1, f2, c2):
155 | """
156 | :return: F_uni: Truth-Value (f,c)
157 | """
158 | f3 = ExtendedBooleanOperators.bor(f1, f2)
159 | c3 = ExtendedBooleanOperators.band_average(c1, c2)
160 | return NALGrammar.Values.TruthValue(f3, c3)
161 |
162 |
163 | def F_Difference(f1, c1, f2, c2):
164 | """
165 | :return: F_dif: Truth-Value (f,c)
166 | """
167 | f3 = ExtendedBooleanOperators.band(f1, ExtendedBooleanOperators.bnot(f2))
168 | c3 = ExtendedBooleanOperators.band(c1, c2)
169 | return NALGrammar.Values.TruthValue(f3, c3)
170 |
171 |
172 | def F_Projection(frequency, confidence, t_B, t_T, decay):
173 | """
174 | Time Projection
175 |
176 | Project the occurrence time of a belief (t_B)
177 | to another occurrence time (t_T).
178 |
179 | Same frequency, but lower confidence depending on when it occurred.
180 | """
181 | if t_B == t_T: return NALGrammar.Values.TruthValue(frequency, confidence)
182 | interval = abs(t_B - t_T)
183 | projected_confidence = confidence * (decay ** interval)
184 | return NALGrammar.Values.TruthValue(frequency, projected_confidence)
185 |
186 |
187 | def F_Eternalization(temporal_frequency, temporal_confidence):
188 | eternal_confidence = temporal_confidence/ (Config.k + temporal_confidence)
189 | return NALGrammar.Values.TruthValue(temporal_frequency, eternal_confidence)
190 |
191 | def Expectation(f, c):
192 | """
193 | Expectation
194 |
195 | -----------------
196 |
197 | Input:
198 | f: frequency
199 |
200 | c: confidence
201 |
202 | Returns:
203 | expectation value
204 | """
205 | return c * (f - 0.5) + 0.5
206 |
207 |
208 | def TruthFunctionOnArray(truth_value_array_1, truth_value_array_2, truth_value_function):
209 | """
210 | Performs a truth value function element-wise on the array
211 | :param truth_value_array_1:
212 | :param truth_value_array_2:
213 | :param truth_value_function:
214 | :return:
215 | """
216 | if truth_value_array_1 is None and truth_value_array_2 is None: return None
217 | if truth_value_array_1 is not None and truth_value_array_2 is not None: assert truth_value_array_1.shape == truth_value_array_2.shape,"ERROR: Truth value arrays must be the same shape"
218 |
219 | def function(*indices):
220 | coords = tuple([int(var) for var in indices])
221 | truth_value_1 = truth_value_array_1[coords]
222 | if truth_value_array_2 is None:
223 | # single truth value
224 | truth_value = truth_value_function(truth_value_1.frequency,
225 | truth_value_1.confidence)
226 | else:
227 | truth_value_2 = truth_value_array_2[coords]
228 | truth_value = truth_value_function(truth_value_1.frequency,
229 | truth_value_1.confidence,
230 | truth_value_2.frequency,
231 | truth_value_2.confidence)
232 | return truth_value
233 |
234 | func_vectorized = np.vectorize(function)
235 | return np.fromfunction(function=func_vectorized,shape=truth_value_array_1.shape)
236 |
237 |
238 |
239 | def ReviseArray(truth_value_array):
240 | """
241 | Revises a truth value array into a single truth-value
242 | """
243 | final_truth_value = None
244 | for (coords), element in np.ndenumerate(truth_value_array):
245 | if final_truth_value is None:
246 | final_truth_value = element
247 | else:
248 | final_truth_value = F_Revision(final_truth_value.frequency,final_truth_value.confidence,element.frequency,element.confidence)
249 | return final_truth_value
250 |
251 | def TruthFunctionOnArrayAndRevise(truth_value_array_1, truth_value_array_2, truth_value_function):
252 | """
253 | Performs a truth value function element-wise on 1 or 2 arrays
254 | and simultaneously revises it into a single truth-value.
255 |
256 | Returns the single truth-value
257 | """
258 | final_truth_value = None
259 | final_truth_value_array = np.empty(shape=truth_value_array_1.shape,dtype=NALGrammar.Values.TruthValue)
260 | for (coords), element in np.ndenumerate(truth_value_array_1):
261 | truth_value_1 = truth_value_array_1[coords]
262 | if truth_value_array_2 is None:
263 | # single truth value
264 | truth_value = truth_value_function(truth_value_1.frequency,
265 | truth_value_1.confidence)
266 | else:
267 | truth_value_2 = truth_value_array_2[coords]
268 | truth_value = truth_value_function(truth_value_1.frequency,
269 | truth_value_1.confidence,
270 | truth_value_2.frequency,
271 | truth_value_2.confidence)
272 | final_truth_value_array[coords] = truth_value
273 | if final_truth_value is None:
274 | final_truth_value = truth_value
275 | else:
276 | final_truth_value = F_Revision(final_truth_value.frequency,
277 | final_truth_value.confidence,
278 | truth_value.frequency,
279 | truth_value.confidence)
280 |
281 | return final_truth_value,final_truth_value_array
--------------------------------------------------------------------------------
/NALSyntax.py:
--------------------------------------------------------------------------------
1 | import enum
2 |
3 | """
4 | Author: Christian Hahm
5 | Created: October 9, 2020
6 | Purpose: Defines the syntax to be used for Narsese
7 | """
8 |
9 |
10 | class StatementSyntax(enum.Enum):
11 | Start = "("
12 | End = ")"
13 | TruthValMarker = "%"
14 | ExpectationMarker = "#"
15 | ValueSeparator = ";"
16 | TermDivider = ","
17 | BudgetMarker = "$"
18 |
19 | ArrayElementIndexStart = "["
20 | ArrayElementIndexEnd = "]"
21 |
22 |
23 | class Tense(enum.Enum):
24 | Future = ":/:"
25 | Past = ":\:"
26 | Present = ":|:"
27 | Eternal = None
28 |
29 | @classmethod
30 | def get_tense_from_string(cls, value):
31 | for tense in cls:
32 | if value == tense.value:
33 | return tense
34 |
35 | return None
36 |
37 |
38 | class TermConnector(enum.Enum):
39 | # NAL-2
40 | ExtensionalSetStart = "{"
41 | ExtensionalSetEnd = "}"
42 | IntensionalSetStart = "["
43 | IntensionalSetEnd = "]"
44 |
45 | # NAL-3
46 | ExtensionalIntersection = "&"
47 | IntensionalIntersection = "|"
48 | ExtensionalDifference = "-"
49 | IntensionalDifference = "~"
50 |
51 | # NAL-4
52 | Product = "*"
53 | ExtensionalImage = "/"
54 | IntensionalImage = "\\"
55 | ImagePlaceHolder = "_"
56 |
57 | # NAL-5
58 | Negation = "--"
59 | Conjunction = "&&"
60 | Disjunction = "||"
61 | SequentialConjunction = "&/"
62 | ParallelConjunction = "&|"
63 |
64 | # Array
65 | ArrayConjunction = "@&"
66 | ArrayDisjunction = "@|"
67 |
68 | @classmethod
69 | def is_string_a_term_connector(cls, value):
70 | return value in cls._value2member_map_
71 |
72 | @classmethod
73 | def get_term_connector_from_string(cls, value):
74 | if not TermConnector.is_string_a_term_connector(value):
75 | return None
76 |
77 | for connector in cls:
78 | if value == connector.value:
79 | return connector
80 |
81 | return None
82 |
83 | @classmethod
84 | def is_first_order(cls, connector):
85 | """
86 | First order connectors are Term Connectors
87 | Higher order connectors are Statement Connectors
88 | """
89 | assert connector is not None,"ERROR: None is not a term connector"
90 | return not (connector is cls.Negation or
91 | connector is cls.Conjunction or
92 | connector is cls.Disjunction or
93 | connector is cls.SequentialConjunction or
94 | connector is cls.ParallelConjunction or
95 | connector is cls.ArrayConjunction)
96 |
97 | @classmethod
98 | def is_order_invariant(cls, connector):
99 | return (connector is cls.ExtensionalIntersection or
100 | connector is cls.IntensionalIntersection or
101 | connector is cls.ExtensionalSetStart or
102 | connector is cls.IntensionalSetStart or
103 | connector is cls.Negation or
104 | connector is cls.Conjunction or
105 | connector is cls.Disjunction)
106 |
107 | @classmethod
108 | def is_conjunction(cls, connector):
109 | #assert connector is not None, "ERROR: None is not a term connector"
110 | return (connector is cls.Conjunction or
111 | connector is cls.SequentialConjunction or
112 | connector is cls.ParallelConjunction)
113 |
114 | @classmethod
115 | def contains_conjunction(cls,string):
116 | return (cls.Conjunction.value in string or
117 | cls.SequentialConjunction.value in string or
118 | cls.ParallelConjunction.value in string)
119 |
120 | @classmethod
121 | def contains_higher_level_connector(cls,string):
122 | for connector in cls:
123 | if not cls.is_first_order(connector):
124 | # higher order connector
125 | if connector.value in string: return True
126 | return False
127 |
128 | @classmethod
129 | def get_set_end_connector_from_set_start_connector(cls, start_connector):
130 | if start_connector == TermConnector.ExtensionalSetStart: return TermConnector.ExtensionalSetEnd
131 | if start_connector == TermConnector.IntensionalSetStart: return TermConnector.IntensionalSetEnd
132 | assert False,"ERROR: Invalid start connector"
133 |
134 | @classmethod
135 | def is_set_bracket_start(cls, bracket):
136 | """
137 | Returns true if character is a starting bracket for a set
138 | :param bracket:
139 | :return:
140 | """
141 | assert bracket is not None, "ERROR: None is not a term connector"
142 | return (bracket == TermConnector.IntensionalSetStart.value) or (
143 | bracket == TermConnector.ExtensionalSetStart.value)
144 |
145 | @classmethod
146 | def is_set_bracket_end(cls, bracket):
147 | """
148 | Returns true if character is an ending bracket for a set
149 | :param bracket:
150 | :return:
151 | """
152 | assert bracket is not None, "ERROR: None is not a term connector"
153 | return (bracket == TermConnector.IntensionalSetEnd.value) or (
154 | bracket == TermConnector.ExtensionalSetEnd.value)
155 |
156 |
157 | class Copula(enum.Enum):
158 | # Primary copula
159 | Inheritance = "-->"
160 | Similarity = "<->"
161 | Implication = "==>"
162 | Equivalence = "<=>"
163 |
164 | # Secondary copula
165 | Instance = "{--"
166 | Property = "--]"
167 | InstanceProperty = "{-]"
168 | PredictiveImplication = "=/>"
169 | RetrospectiveImplication = r"=\>"
170 | ConcurrentImplication = "=|>"
171 | PredictiveEquivalence = ">"
172 | ConcurrentEquivalence = "<|>"
173 |
174 |
175 | @classmethod
176 | def is_implication(cls, copula):
177 | return copula is cls.Implication \
178 | or copula is cls.PredictiveImplication \
179 | or copula is cls.RetrospectiveImplication \
180 |
181 | @classmethod
182 | def is_first_order(cls, copula):
183 | return copula is cls.Inheritance \
184 | or copula is cls.Similarity \
185 | or copula is cls.Instance \
186 | or copula is cls.Property \
187 | or copula is cls.InstanceProperty
188 |
189 | @classmethod
190 | def is_temporal(cls, copula):
191 | return copula == cls.PredictiveImplication \
192 | or copula == cls.RetrospectiveImplication \
193 | or copula == cls.ConcurrentImplication \
194 | or copula == cls.PredictiveEquivalence \
195 | or copula == cls.ConcurrentEquivalence
196 |
197 | @classmethod
198 | def is_symmetric(cls, copula):
199 | return copula == cls.Similarity \
200 | or copula == cls.Equivalence \
201 | or copula == cls.PredictiveEquivalence \
202 | or copula == cls.ConcurrentEquivalence
203 |
204 | @classmethod
205 | def is_string_a_copula(cls, value):
206 | return value in cls._value2member_map_
207 |
208 | @classmethod
209 | def get_copula_from_string(cls, value):
210 | if not Copula.is_string_a_copula(value):
211 | return None
212 |
213 | for copula in cls:
214 | if value == copula.value:
215 | return copula
216 |
217 | return None
218 |
219 | @classmethod
220 | def contains_copula(cls, string):
221 | for copula in cls:
222 | if copula.value in string:
223 | return True
224 | return False
225 |
226 | @classmethod
227 | def contains_top_level_copula(cls,string):
228 | copula, _ = cls.get_top_level_copula(string)
229 | return copula is not None
230 |
231 | @classmethod
232 | def get_top_level_copula(cls,string):
233 | """
234 | Searches for top-level copula in the string.
235 |
236 | :returns copula and index if it exists,
237 | :returns none and -1 otherwise
238 | """
239 | copula = None
240 | copula_idx = -1
241 |
242 | depth = 0
243 | for i, v in enumerate(string):
244 | if v == StatementSyntax.Start.value:
245 | depth += 1
246 | elif v == StatementSyntax.End.value:
247 | depth -= 1
248 | elif depth == 1 and i + 3 <= len(string) and Copula.is_string_a_copula(string[i:i + 3]):
249 | copula, copula_idx = Copula.get_copula_from_string(string[i:i + 3]), i
250 |
251 | return copula, copula_idx
252 |
253 |
254 | class Punctuation(enum.Enum):
255 | Judgment = "."
256 | Question = "?" # on truth-value
257 | Goal = "!"
258 | Quest = "`" # on desire-value #todo, decide value for Quest since @ is used for array now
259 |
260 | @classmethod
261 | def is_punctuation(cls, value):
262 | return value in cls._value2member_map_
263 |
264 | @classmethod
265 | def get_punctuation_from_string(cls, value):
266 | if not Punctuation.is_punctuation(value):
267 | return None
268 |
269 | for punctuation in cls:
270 | if value == punctuation.value:
271 | return punctuation
272 |
273 | return None
274 |
275 |
276 | """
277 | List of valid characters that can be used in a term.
278 | """
279 | valid_term_chars = {
280 | 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
281 | 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
282 | 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
283 | 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
284 | "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "_", "^"
285 | }
286 |
--------------------------------------------------------------------------------
/NARSDataStructures/Bag.py:
--------------------------------------------------------------------------------
1 | """
2 | Author: Christian Hahm
3 | Created: December 24, 2020
4 | Purpose: Holds data structure implementations that are specific / custom to NARS
5 | """
6 | import math
7 | import random
8 |
9 | import sortedcontainers
10 |
11 | import Config
12 | import NARSDataStructures.ItemContainers
13 |
14 | import NARSDataStructures.Other
15 | import NALInferenceRules
16 |
17 |
18 | class Bag(NARSDataStructures.ItemContainers.ItemContainer):
19 | """
20 | Probabilistic priority-queue
21 |
22 | --------------------------------------------
23 |
24 | An array of buckets, where each bucket holds items of a certain priority
25 | (e.g. 100 buckets, bucket 1 - hold items with 0.01 priority, bucket 50 - hold items with 0.50 priority)
26 | """
27 |
28 | def __init__(self, item_type, capacity, granularity=Config.BAG_GRANULARITY):
29 | self.level = 0
30 | self.priority_buckets = {}
31 | self.quality_buckets = {} # store by inverted quality for deletion
32 | self.granularity = granularity
33 | for i in range(granularity):
34 | self.priority_buckets[i] = None
35 | self.quality_buckets[i] = None
36 | NARSDataStructures.ItemContainers.ItemContainer.__init__(self, item_type=item_type, capacity=capacity)
37 |
38 | def __len__(self):
39 | return len(self.item_lookup_dict)
40 |
41 | def __iter__(self):
42 | return iter(list(self.item_lookup_dict.values()).__reversed__())
43 |
44 | def clear(self):
45 | self.level = 0
46 | for i in range(self.granularity):
47 | self.priority_buckets[i] = None
48 | self.quality_buckets[i] = None
49 | NARSDataStructures.ItemContainers.ItemContainer._clear(self)
50 |
51 | def PUT_NEW(self, object):
52 | """
53 | Place a NEW Item into the bag.
54 |
55 | :param Bag Item to place into the Bag
56 | :returns the new item
57 | """
58 | assert (isinstance(object, self.item_type)), "item object must be of type " + str(self.item_type)
59 |
60 | # remove lowest priority item if over capacity
61 | if len(self) == self.capacity:
62 | purged_item = self._TAKE_MIN()
63 |
64 | # add new item
65 | item = NARSDataStructures.ItemContainers.ItemContainer.PUT_NEW(self, object)
66 | self.add_item_to_bucket(item)
67 | self.add_item_to_quality_bucket(item)
68 |
69 | return item
70 |
71 | def peek(self, key=None):
72 | """
73 | Peek an object from the bag using its key.
74 | If key is None, peeks probabilistically
75 |
76 | :returns An item peeked from the Bag; None if item could not be peeked from the Bag
77 | """
78 | if len(self) == 0: return None # no items
79 |
80 | if key is None:
81 | item = self._peek_probabilistically(buckets=self.priority_buckets)
82 | else:
83 | item = NARSDataStructures.ItemContainers.ItemContainer.peek_using_key(self, key=key)
84 |
85 | return item
86 |
87 | def change_priority(self, key, new_priority):
88 | """
89 | Changes an item priority in the bag
90 | :param key:
91 | :return:
92 | """
93 | item = self.peek_using_key(key)
94 |
95 | self.remove_item_from_its_bucket(item=item)
96 |
97 | # change item priority attribute, and GUI if necessary
98 | item.budget.set_priority(new_priority)
99 |
100 | # if Config.GUI_USE_INTERFACE:
101 | # NARSDataStructures.ItemContainers.ItemContainer._take_from_lookup_dict(self, key)
102 | # NARSDataStructures.ItemContainers.ItemContainer._put_into_lookup_dict(self,item)
103 | # add to new bucket
104 | self.add_item_to_bucket(item=item)
105 |
106 |
107 | def change_quality(self, key, new_quality):
108 | item = self.peek_using_key(key)
109 |
110 | # remove from sorted
111 | self.remove_item_from_its_quality_bucket(item)
112 |
113 | # change item quality
114 | item.budget.set_quality(new_quality)
115 |
116 | # if Config.GUI_USE_INTERFACE:
117 | # NARSDataStructures.ItemContainers.ItemContainer._take_from_lookup_dict(self, key)
118 | # NARSDataStructures.ItemContainers.ItemContainer._put_into_lookup_dict(self, item)
119 |
120 | # add back to sorted
121 | self.add_item_to_quality_bucket(item=item)
122 |
123 |
124 | def add_item_to_bucket(self,item):
125 | # add to appropriate bucket
126 | bucket_num = self.calc_bucket_num_from_value(item.budget.get_priority())
127 | if self.priority_buckets[bucket_num] is None:
128 | self.priority_buckets[bucket_num] = sortedcontainers.SortedList()
129 | bucket = self.priority_buckets[bucket_num]
130 | bucket.add((id(item),item)) # convert to ID so
131 | item.bucket_num = bucket_num
132 |
133 |
134 | def remove_item_from_its_bucket(self, item):
135 | # take from bucket
136 | bucket = self.priority_buckets[item.bucket_num]
137 | bucket.remove((id(item),item))
138 | if len(bucket) == 0:
139 | self.priority_buckets[item.bucket_num] = None
140 | item.bucket_num = None
141 |
142 | def add_item_to_quality_bucket(self, item):
143 | # add to appropriate bucket
144 | bucket_num = self.calc_bucket_num_from_value(1-item.budget.get_quality()) # higher quality should have lower probability of being selected for deletion
145 | if self.quality_buckets[bucket_num] is None: self.quality_buckets[bucket_num] = sortedcontainers.SortedList()
146 | bucket = self.quality_buckets[bucket_num]
147 | bucket.add((id(item),item))
148 | item.quality_bucket_num = bucket_num
149 |
150 | def remove_item_from_its_quality_bucket(self, item):
151 | # take from bucket
152 | bucket = self.quality_buckets[item.quality_bucket_num]
153 | bucket.remove((id(item),item))
154 | if len(bucket) == 0:
155 | self.quality_buckets[item.quality_bucket_num] = None
156 | item.quality_bucket_num = None
157 |
158 | def strengthen_item_priority(self, key, multiplier=Config.PRIORITY_STRENGTHEN_VALUE):
159 | """
160 | Strenghtens an item in the bag
161 | :param key:
162 | :return:
163 | """
164 | item = self.peek_using_key(key)
165 | # change item priority attribute, and GUI if necessary
166 | new_priority = NALInferenceRules.ExtendedBooleanOperators.bor(item.budget.get_priority(), multiplier)
167 | self.change_priority(key, new_priority=new_priority)
168 |
169 |
170 | def strengthen_item_quality(self, key):
171 | """
172 | Decays an item in the bag
173 | :param key:
174 | :return:
175 | """
176 | item = self.peek_using_key(key)
177 | # change item priority attribute, and GUI if necessary
178 | new_quality = NALInferenceRules.ExtendedBooleanOperators.bor(item.budget.get_quality(), 0.1)
179 | self.change_quality(key, new_quality=new_quality)
180 |
181 |
182 |
183 | def decay_item(self, key, multiplier=Config.PRIORITY_DECAY_VALUE):
184 | """
185 | Decays an item in the bag
186 | :param key:
187 | :return:
188 | """
189 | item = self.peek_using_key(key)
190 | new_priority = NALInferenceRules.ExtendedBooleanOperators.band(item.budget.get_priority(), multiplier)
191 | self.change_priority(key, new_priority=new_priority)
192 |
193 | def TAKE_USING_KEY(self, key):
194 | """
195 | Take an item from the bag using the key
196 |
197 | :param key: key of the item to remove from the Bag
198 | :return: the item which was removed from the bucket
199 | """
200 | assert (key in self.item_lookup_dict), "Given key does not exist in this bag"
201 | item = NARSDataStructures.ItemContainers.ItemContainer._take_from_lookup_dict(self, key)
202 | self.remove_item_from_its_bucket(item=item)
203 | self.remove_item_from_its_quality_bucket(item)
204 | return item
205 |
206 |
207 | def _TAKE_MIN(self):
208 | """
209 | :returns the lowest quality item taken from the Bag
210 | """
211 | try:
212 | item = self._peek_probabilistically(buckets=self.quality_buckets)
213 | assert (item.key in self.item_lookup_dict), "Given key does not exist in this bag"
214 | item = NARSDataStructures.ItemContainers.ItemContainer._take_from_lookup_dict(self, item.key)
215 | self.remove_item_from_its_bucket(item=item)
216 | self.remove_item_from_its_quality_bucket(item)
217 | except:
218 | item = None
219 | return item
220 |
221 |
222 | def _peek_probabilistically(self, buckets):
223 | """
224 | Probabilistically selects a priority value / bucket, then peeks an item from that bucket.
225 |
226 | :returns item
227 | """
228 | if len(self) == 0: return None
229 | self.level = random.randint(0, self.granularity - 1)
230 |
231 | MAX_ATTEMPTS = 10
232 | num_attempts: int = 0
233 | while True and num_attempts < MAX_ATTEMPTS:
234 | level_bucket = buckets[self.level]
235 | if level_bucket is None or len(level_bucket) == 0:
236 | self.level = (self.level + 1) % self.granularity
237 | continue
238 |
239 | # try to go into bucket
240 | rnd = random.randint(0, self.granularity - 1)
241 |
242 | threshold = self.level
243 | if rnd <= threshold:
244 | # use this bucket
245 | break
246 | else:
247 | self.level = (self.level + 1) % self.granularity
248 |
249 | num_attempts += 1
250 |
251 | if num_attempts >= MAX_ATTEMPTS: return None
252 |
253 | rnd_idx = random.randint(0,len(level_bucket)-1)
254 | _, item = level_bucket[rnd_idx]
255 |
256 | return item
257 |
258 | def calc_bucket_num_from_value(self, val):
259 | return min(math.floor(val * self.granularity), self.granularity - 1)
--------------------------------------------------------------------------------
/NARSDataStructures/ItemContainers.py:
--------------------------------------------------------------------------------
1 | """
2 | Author: Christian Hahm
3 | Created: December 24, 2020
4 | Purpose: Holds data structure implementations that are specific / custom to NARS
5 | """
6 | import Global
7 | import NALSyntax
8 | import NARSGUI
9 | import NALGrammar
10 | import NARSMemory
11 | import Config
12 | import NARSDataStructures.Other
13 | import NARSDataStructures.Bag
14 | import NALInferenceRules
15 |
16 |
17 | class ItemContainer:
18 | """
19 | Base Class for data structures which contain "Items", as defined in this class.
20 |
21 | Examples of Item Containers include Bag and Buffer.
22 | """
23 |
24 | def __init__(self, item_type, capacity):
25 | self.item_type = item_type # the class of the objects this Container stores (be wrapped in Item)
26 | self.item_lookup_dict = dict() # for accessing Item by key
27 | self.next_item_id = 0
28 | self.capacity = capacity
29 | #self.item_archive = {}
30 |
31 | def __contains__(self, object):
32 | """
33 | Purpose:
34 | Check if the object is contained in the Bag by checking whether its key is in the item lookup table
35 |
36 | :param object: object to look for in the Bag
37 | :return: True if the item is in the Bag;
38 | False otherwise
39 | """
40 | key = Item.get_key_from_object(object)
41 | return self.item_lookup_dict.get(key, None) is not None
42 |
43 | def __iter__(self):
44 | return iter(self.item_lookup_dict.values())
45 |
46 | def __getitem__(self, key):
47 | return self.item_lookup_dict[key]
48 |
49 | def _clear(self):
50 | self.item_lookup_dict = dict()
51 | self.next_item_id = 0
52 |
53 | def PUT_NEW(self, object):
54 | """
55 | Place a NEW Item into the container.
56 | """
57 | item = NARSDataStructures.ItemContainers.Item(object, self.get_next_item_id())
58 | self._put_into_lookup_dict(item) # Item Container
59 | return item
60 |
61 | def _put_into_lookup_dict(self, item):
62 | """
63 | Puts item into lookup table and GUI
64 | :param item: put an Item into the lookup dictionary.
65 | """
66 | # put item into lookup table
67 | self.item_lookup_dict[item.key] = item
68 |
69 | if Config.GUI_USE_INTERFACE:
70 | Global.Global.print_to_output(str(item), data_structure=self) # draw to GUI
71 | #self.item_archive[item.key] = item
72 |
73 | def _take_from_lookup_dict(self, key):
74 | """
75 | Removes an Item from the lookup dictionary using its key,
76 | and returns the Item.
77 |
78 | :param key: Key of the Item to remove.
79 | :return: The Item that was removed.
80 | """
81 | item = self.item_lookup_dict.pop(key) # remove item reference from lookup table
82 |
83 | if Config.GUI_USE_INTERFACE:
84 | Global.Global.remove_from_output(str(item), data_structure=self)
85 |
86 | return item
87 |
88 |
89 | def _take_min(self):
90 | assert False, "Take smallest priority item not defined for generic Item Container!"
91 |
92 | def get_next_item_id(self) -> int:
93 | self.next_item_id += 1
94 | return self.next_item_id - 1
95 |
96 | def peek_from_item_archive(self, key):
97 | return self.item_archive.get(key, None)
98 |
99 | def peek_using_key(self, key=None):
100 | """
101 | Peek an Item using its key
102 |
103 | :param key: Key of the item to peek
104 | :return: Item peeked from the data structure
105 | """
106 | assert key is not None, "Key cannot be none when peeking with a key!"
107 | return self.item_lookup_dict.get(key, None)
108 |
109 |
110 | class Item:
111 | """
112 | Item in an Item Container. Wraps an object.
113 |
114 | Consists of:
115 | object (e.g. Concept, Task, etc.)
116 |
117 | budget ($priority$)
118 | """
119 |
120 | def __init__(self, object, id):
121 | """
122 | :param object: object to wrap in the item
123 | :param container: the Item Container instance that will contain this item
124 | """
125 | self.bucket = None
126 | self.object = object
127 | self.id = id
128 | priority = None
129 |
130 | quality = None
131 | if isinstance(object, NARSDataStructures.Other.Task):
132 | if isinstance(object.sentence, NALGrammar.Sentences.Judgment):
133 | priority = object.sentence.get_present_value().confidence
134 |
135 |
136 | elif isinstance(object, NARSMemory.Concept):
137 | if isinstance(object.term, NALGrammar.Terms.SpatialTerm):
138 | pass
139 | elif isinstance(object.term, NALGrammar.Terms.StatementTerm) and not object.term.is_first_order():
140 | pass
141 | # if isinstance(object.term,NALGrammar.Terms.CompoundTerm) and object.term.connector == NALSyntax.TermConnector.Negation:
142 | # priority = 0.0
143 | # quality = 0.0
144 | # if isinstance(object.term,NALGrammar.Terms.StatementTerm) and object.term.is_first_order():
145 | # priority = 0.0
146 | # quality = 0.8
147 |
148 | # assign ID
149 | if isinstance(object, NARSDataStructures.Other.Task):
150 | self.key = id
151 | else:
152 | self.key = Item.get_key_from_object(object)
153 |
154 |
155 | self.budget = Item.Budget(priority=priority, quality=quality)
156 |
157 | @classmethod
158 | def get_key_from_object(cls, object):
159 | """
160 | Returns a key that uniquely identifies the given object.
161 |
162 | This is essentially a universal hash function for NARS objects
163 |
164 | :param object:
165 | :return: key for object
166 | """
167 | key = None
168 | if isinstance(object, NARSMemory.Concept):
169 | key = str(object.term)
170 | elif isinstance(object, NALGrammar.Sentences.Sentence):
171 | key = str(object.stamp.id)
172 | else:
173 | key = str(object)
174 | return key
175 |
176 | def __str__(self):
177 | return NALSyntax.StatementSyntax.BudgetMarker.value \
178 | + "{:.5f}".format(self.budget.get_priority()) \
179 | + NALSyntax.StatementSyntax.ValueSeparator.value \
180 | + "{:.5f}".format(self.budget.get_quality()) \
181 | + NALSyntax.StatementSyntax.BudgetMarker.value \
182 | + " " \
183 | + Global.Global.MARKER_ITEM_ID \
184 | + str(self.id) + Global.Global.MARKER_ID_END \
185 | + str(self.object)
186 |
187 |
188 |
189 | def get_gui_info(self):
190 | dict = {}
191 | dict[NARSGUI.NARSGUI.KEY_KEY] = self.key
192 | dict[NARSGUI.NARSGUI.KEY_CLASS_NAME] = type(self.object).__name__
193 | dict[NARSGUI.NARSGUI.KEY_OBJECT_STRING] = str(self.object)
194 | dict[NARSGUI.NARSGUI.KEY_TERM_TYPE] = type(self.object.get_term()).__name__
195 | if isinstance(self.object, NARSMemory.Concept):
196 | dict[NARSGUI.NARSGUI.KEY_IS_POSITIVE] = "True" if self.object.is_positive() else "False"
197 | if len(self.object.desire_table) > 0:
198 | dict[NARSGUI.NARSGUI.KEY_PASSES_DECISION] = "True" if NALInferenceRules.Local.Decision(
199 | self.object.desire_table.peek()) else "False"
200 | else:
201 | dict[NARSGUI.NARSGUI.KEY_PASSES_DECISION] = None
202 | dict[NARSGUI.NARSGUI.KEY_EXPECTATION] = self.object.get_expectation()
203 | dict[NARSGUI.NARSGUI.KEY_LIST_BELIEFS] = [str(belief[0]) for belief in self.object.belief_table]
204 | dict[NARSGUI.NARSGUI.KEY_LIST_DESIRES] = [str(desire[0]) for desire in self.object.desire_table]
205 | dict[NARSGUI.NARSGUI.KEY_LIST_TERM_LINKS] = [str(termlink.object) for termlink in self.object.term_links]
206 | dict[NARSGUI.NARSGUI.KEY_LIST_PREDICTION_LINKS] = [str(predictionlink.object) for predictionlink in
207 | self.object.prediction_links]
208 | dict[NARSGUI.NARSGUI.KEY_LIST_EXPLANATION_LINKS] = [str(explanationlink.object) for explanationlink in
209 | self.object.explanation_links]
210 | dict[NARSGUI.NARSGUI.KEY_CAPACITY_BELIEFS] = str(self.object.belief_table.capacity)
211 | dict[NARSGUI.NARSGUI.KEY_CAPACITY_DESIRES] = str(self.object.desire_table.capacity)
212 | dict[NARSGUI.NARSGUI.KEY_CAPACITY_TERM_LINKS] = str(self.object.term_links.capacity)
213 | dict[NARSGUI.NARSGUI.KEY_CAPACITY_PREDICTION_LINKS] = str(self.object.prediction_links.capacity)
214 | dict[NARSGUI.NARSGUI.KEY_CAPACITY_EXPLANATION_LINKS] = str(self.object.explanation_links.capacity)
215 | elif isinstance(self.object, NARSDataStructures.Other.Task):
216 | dict[NARSGUI.NARSGUI.KEY_SENTENCE_STRING] = str(self.object.sentence)
217 | dict[NARSGUI.NARSGUI.KEY_LIST_EVIDENTIAL_BASE] = [str(evidence) for evidence in
218 | self.object.sentence.stamp.evidential_base]
219 | dict[NARSGUI.NARSGUI.KEY_LIST_INTERACTED_SENTENCES] = []
220 |
221 | return dict
222 |
223 | class Budget:
224 | """
225 | Budget deciding the proportion of the system's time-space resources to allocate to a Bag Item.
226 | Priority determines how likely an item is to be selected,
227 | Quality defines the Item's base priority (its lowest possible priority)
228 | """
229 |
230 | def __init__(self, priority=None, quality=None):
231 | if quality is None: quality = 0
232 | self.set_quality(quality)
233 |
234 | if priority is None: priority = quality
235 | self.set_priority(priority)
236 |
237 | def __str__(self):
238 | return NALSyntax.StatementSyntax.BudgetMarker.value \
239 | + str(self.get_priority()) \
240 | + NALSyntax.StatementSyntax.ValueSeparator.value \
241 | + str(self.get_quality()) \
242 | + NALSyntax.StatementSyntax.BudgetMarker.value
243 |
244 | def set_priority(self, value):
245 | # if value < self.get_quality(): value = self.get_quality() # priority can't go below quality
246 | if value > 0.99999999: value = 0.99999999 # priority can't got too close to 1
247 | if value < 0.01: value = 0.01 # priority can't go below 0
248 | self._priority = value
249 |
250 | def set_quality(self, value):
251 | if value > 0.99999999: value = 0.99999999 # quality can't got too close to 1
252 | if value < 0: value = 0 # priority can't go below 0
253 | self._quality = value
254 |
255 | def get_priority(self):
256 | return self._priority
257 |
258 | def get_quality(self):
259 | return self._quality
260 |
--------------------------------------------------------------------------------
/NARSDataStructures/Other.py:
--------------------------------------------------------------------------------
1 | import random
2 | import timeit as time
3 | from typing import List
4 |
5 | import Asserts
6 | import NALGrammar.Sentences
7 | import Config
8 | import Global
9 | import depq
10 | import NALInferenceRules
11 |
12 | """
13 | Author: Christian Hahm
14 | Created: December 24, 2020
15 | Purpose: Holds data structure implementations that are specific / custom to NARS
16 | """
17 |
18 |
19 | class Depq():
20 | def __init__(self):
21 | self.depq = depq.DEPQ(iterable=None, maxlen=None) # maxheap depq
22 |
23 | def __iter__(self):
24 | return iter(self.depq)
25 |
26 | def __len__(self):
27 | return len(self.depq)
28 |
29 | def __getitem__(self, i):
30 | return self.depq[i][0]
31 |
32 | def remove(self, item):
33 | return self.depq.remove(item)
34 |
35 | def insert_object(self, object, priority):
36 | self.depq.insert(object, priority)
37 |
38 | def extract_max(self):
39 | """
40 | Extract Item with highest priority from the depq
41 | O(1)
42 |
43 | Returns None if depq is empty
44 | """
45 | if len(self.depq) == 0: return None
46 | max = self.depq.popfirst()[0]
47 | return max
48 |
49 | def extract_min(self):
50 | """
51 | Extract Item with lowest priority from the depq
52 | O(1)
53 |
54 | Returns None if depq is empty
55 | """
56 | if len(self.depq) == 0: return None
57 | min = self.depq.poplast()[0]
58 | return min
59 |
60 | def peek_max(self):
61 | """
62 | Peek Item with highest priority from the depq
63 | O(1)
64 |
65 | Returns None if depq is empty
66 | """
67 | if len(self.depq) == 0: return None
68 | i = self.depq.first()
69 | return i
70 |
71 | def peek_min(self):
72 | """
73 | Peek Item with lowest priority from the depq
74 | O(1)
75 |
76 | Returns None if depq is empty
77 | """
78 | if len(self.depq) == 0: return None
79 | return self.depq.last()
80 |
81 |
82 | class Table(Depq):
83 | """
84 | NARS Table, stored within Concepts.
85 | Tables store Narsese sentences using a double ended priority queue, where priority = sentence confidence
86 | Sorted by highest-confidence.
87 | It purges lowest-confidence items when it overflows.
88 | """
89 |
90 | def __init__(self, item_type, capacity=Config.TABLE_DEFAULT_CAPACITY):
91 | self.item_type = item_type
92 | self.capacity = capacity
93 | Depq.__init__(self)
94 |
95 | def clear(self):
96 | while len(self) > 0:
97 | self.take()
98 |
99 | def put(self, sentence):
100 | """
101 | Insert a Sentence into the depq, sorted by confidence (time-projected confidence if it's an event).
102 | """
103 | assert (isinstance(sentence, self.item_type)), "Cannot insert sentence into a Table of different punctuation"
104 |
105 | if len(self) > 0:
106 | if sentence.is_event():
107 | current_event = self.take()
108 | sentence = NALInferenceRules.Local.Revision(sentence, current_event)
109 | else:
110 | existing_interactable = self.peek_highest_confidence_interactable(sentence)
111 | if existing_interactable is not None:
112 | revised = NALInferenceRules.Local.Revision(sentence, existing_interactable)
113 | priority = revised.get_present_value().confidence
114 | Depq.insert_object(self, revised, priority)
115 |
116 |
117 | priority = sentence.get_present_value().confidence
118 | Depq.insert_object(self, sentence, priority)
119 |
120 | if len(self) > self.capacity:
121 | Depq.extract_min(self)
122 |
123 | def take(self):
124 | """
125 | Take item with highest confidence from the depq
126 | O(1)
127 | """
128 | return Depq.extract_max(self)
129 |
130 | def peek(self):
131 | """
132 | Peek item with highest confidence from the depq
133 | O(1)
134 |
135 | Returns None if depq is empty
136 | """
137 | max = self.take()
138 | if max is not None:
139 | self.put(max)
140 | return max
141 |
142 | def peek_random(self):
143 | """
144 | Peek random item from the depq
145 | O(1)
146 |
147 | Returns None if depq is empty
148 | """
149 | if len(self) == 0: return None
150 | return random.choice(self)
151 |
152 | def peek_highest_confidence_interactable(self, j):
153 | """
154 | Returns the best sentence in this table that j may interact with
155 | None if there are none.
156 | O(N)
157 |
158 | :param j:
159 | :return:
160 | """
161 | for (belief, confidence) in self: # loop starting with max confidence
162 | if NALGrammar.Sentences.may_interact(j, belief):
163 | return belief
164 | return None
165 |
166 |
167 | class Task:
168 | """
169 | NARS Task
170 | """
171 |
172 | def __init__(self, sentence, is_input_task=False):
173 | Asserts.assert_sentence(sentence)
174 | self.sentence = sentence
175 | self.creation_timestamp: int = Global.Global.get_current_cycle_number() # save the task's creation time
176 | self.is_from_input: bool = is_input_task
177 | # only used for question tasks
178 | self.needs_to_be_answered_in_output: bool = is_input_task
179 |
180 | def get_term(self):
181 | return self.sentence.statement
182 |
183 | def __str__(self):
184 | return "TASK: " + self.sentence.get_term_string_no_id()
185 |
186 | class QuadTree:
187 |
188 |
189 | def __init__(self, ROOTLEAFID=None):
190 | self.children: List[QuadTree] = []
191 | self.values: List[NALGrammar.Sentences.Judgment] = []
192 | self.id = None
193 | self.LEAFID = ROOTLEAFID if ROOTLEAFID is not None else [0]
194 | self.MAXLEAFID = 32 # todo - make this vary
195 |
196 |
197 | def Create4ChildrenRecursive(self, depth):
198 | if(depth == 0):
199 | self.id = (self.LEAFID[0] // self.MAXLEAFID, self.LEAFID[0] % self.MAXLEAFID) #(y,x)
200 | self.LEAFID[0] += 1
201 | return
202 |
203 | for _ in range(4):
204 | child = QuadTree(ROOTLEAFID=self.LEAFID)
205 | child.Create4ChildrenRecursive(depth-1)
206 | self.children.append(child)
207 |
--------------------------------------------------------------------------------
/Narsese.txt:
--------------------------------------------------------------------------------
1 | task ::= [budget] sentence (* task to be processed *)
2 |
3 | sentence ::= statement"." [tense] [truth] (* judgement to be absorbed into beliefs *)
4 | | statement"?" [tense] (* question on truth-value to be answered *)
5 | | statement"!" [desire] (* goal to be realized by operations *)
6 | | statement"@" (* question on desire-value to be answered *)
7 |
8 | statement ::= <"<">term copula term<">"> (* two terms related to each other *)
9 | | <"(">term copula term<")"> (* two terms related to each other, new notation *)
10 | | term (* a term can name a statement *)
11 | | "(^"word {","term} ")" (* an operation to be executed *)
12 | | word"("term {","term} ")" (* an operation to be executed, new notation *)
13 |
14 | copula ::= "-->" (* inheritance *)
15 | | "<->" (* similarity *)
16 | | "{--" (* instance *)
17 | | "--]" (* property *)
18 | | "{-]" (* instance-property *)
19 | | "==>" (* implication *)
20 | | "=/>" (* predictive implication *)
21 | | "=|>" (* concurrent implication *)
22 | | "=\\>" (* =\> retrospective implication *)
23 | | "<=>" (* equivalence *)
24 | | ">" (* predictive equivalence *)
25 | | "<|>" (* concurrent equivalence *)
26 |
27 | term ::= word (* an atomic constant term *)
28 | | variable (* an atomic variable term *)
29 | | compound-term (* a term with internal structure *)
30 | | statement (* a statement can serve as a term *)
31 |
32 | compound-term ::= op-ext-set term {"," term} "}" (* extensional set *)
33 | | op-int-set term {"," term} "]" (* intensional set *)
34 | | "("op-multi"," term {"," term} ")" (* with prefix operator *)
35 | | "("op-single"," term "," term ")" (* with prefix operator *)
36 | | "(" term {op-multi term} ")" (* with infix operator *)
37 | | "(" term op-single term ")" (* with infix operator *)
38 | | "(" term {","term} ")" (* product, new notation *)
39 | | "(" op-ext-image "," term {"," term} ")"(* special case, extensional image *)
40 | | "(" op-int-image "," term {"," term} ")"(* special case, \ intensional image *)
41 | | "(" op-negation "," term ")" (* negation *)
42 | | op-negation term (* negation, new notation *)
43 |
44 | op-int-set::= "[" (* intensional set *)
45 | op-ext-set::= "{" (* extensional set *)
46 | op-negation::= "--" (* negation *)
47 | op-int-image::= "\\" (* \ intensional image *)
48 | op-ext-image::= "/" (* extensional image *)
49 | op-multi ::= "&&" (* conjunction *)
50 | | "*" (* product *)
51 | | "||" (* disjunction *)
52 | | "&|" (* parallel events *)
53 | | "&/" (* sequential events *)
54 | | "|" (* intensional intersection *)
55 | | "&" (* extensional intersection *)
56 | op-single ::= "-" (* extensional difference *)
57 | | "~" (* intensional difference *)
58 |
59 | variable ::= "$"word (* independent variable *)
60 | | "#"word (* dependent variable *)
61 | | "?"word (* query variable in question *)
62 |
63 | tense ::= ":/:" (* future event *)
64 | | ":|:" (* present event *)
65 | | ":\\:" (* :\: past event *)
66 |
67 | desire ::= truth (* same format, different interpretations *)
68 | truth ::= <"%">frequency[<";">confidence]<"%"> (* two numbers in [0,1]x(0,1) *)
69 | budget ::= <"$">priority[<";">durability][<";">quality]<"$"> (* three numbers in [0,1]x(0,1)x[0,1] *)
70 |
71 | word : #"[^\ ]+" (* unicode string *)
72 | priority : #"([0]?\.[0-9]+|1\.[0]*|1|0)" (* 0 <= x <= 1 *)
73 | durability : #"[0]?\.[0]*[1-9]{1}[0-9]*" (* 0 < x < 1 *)
74 | quality : #"([0]?\.[0-9]+|1\.[0]*|1|0)" (* 0 <= x <= 1 *)
75 | frequency : #"([0]?\.[0-9]+|1\.[0]*|1|0)" (* 0 <= x <= 1 *)
76 | confidence : #"[0]?\.[0]*[1-9]{1}[0-9]*" (* 0 < x < 1 *)
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # NARS-Python
2 |
3 | NARS theory implemented in Python
4 |
5 | Build Instructions: `pyinstaller --onefile main.py`
6 |
7 | Architecture:
8 |
9 | 
10 |
11 | GUI screenshots:
12 |
13 | 
14 |
15 | 
16 |
17 | 
18 |
19 |
20 |
--------------------------------------------------------------------------------
/TestCases/AllTests.py:
--------------------------------------------------------------------------------
1 | import DataStructureTests
2 | import GrammarTests
3 | import InferenceEngineTests
4 | import InferenceRuleTests
5 | import Config
6 |
7 |
8 | def main():
9 | DataStructureTests.main()
10 | GrammarTests.main()
11 | InferenceEngineTests.main()
12 | InferenceRuleTests.main()
13 |
14 | if __name__ == "__main__":
15 | Config.DEBUG = False
16 | Config.GUI_USE_INTERFACE = False
17 | main()
--------------------------------------------------------------------------------
/TestCases/DataStructureTests.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | import Global
4 | import NARSDataStructures
5 | import NALGrammar
6 | import NALSyntax
7 | import NARS
8 | import NARSMemory
9 |
10 | """
11 | Author: Christian Hahm
12 | Created: January 29, 2021
13 | Purpose: Unit Testing for NARS data structures
14 | """
15 |
16 |
17 | def test_table_removemax():
18 | """
19 | Test if the Table can successfully remove its maximum value
20 | """
21 | table = NARSDataStructures.Other.Table(item_type=NALGrammar.Sentences.Judgment)
22 | confidences = [0.6, 0.2, 0.99, 0.5, 0.9]
23 | maximum = max(confidences)
24 | for c in confidences:
25 | sentence = NALGrammar.Sentences.Judgment(
26 | NALGrammar.Terms.StatementTerm(NALGrammar.Terms.from_string("a"),
27 | NALGrammar.Terms.from_string("b"), NALSyntax.Copula.Inheritance),
28 | NALGrammar.Values.TruthValue(0.9, c))
29 | table.put(sentence)
30 | tablemax = table.extract_max().value.confidence
31 | assert (tablemax == maximum), "TEST FAILURE: Table did not properly retrieve maximum value"
32 |
33 |
34 | def test_table_removemin():
35 | """
36 | Test if the Table can successfully remove its minimum value
37 | """
38 | table = NARSDataStructures.Other.Table(item_type=NALGrammar.Sentences.Judgment)
39 | confidences = [0.6, 0.2, 0.99, 0.5, 0.9]
40 | minimum = min(confidences)
41 | for c in confidences:
42 | # make sentence b>. %0.9;c%
43 | sentence = NALGrammar.Sentences.Judgment(
44 | NALGrammar.Terms.StatementTerm(NALGrammar.Terms.from_string("a"),
45 | NALGrammar.Terms.from_string("b"), NALSyntax.Copula.Inheritance),
46 | NALGrammar.Values.TruthValue(0.9, c))
47 | table.put(sentence)
48 |
49 | tablemin = table.extract_min().value.confidence
50 | assert (tablemin == minimum), "TEST FAILURE: Table did not properly retrieve minimum value"
51 |
52 |
53 | def test_table_overflow_purge():
54 | """
55 | Test if table stays within capacity when it overflows.
56 | """
57 | test_data_structure = NARSDataStructures.Other.\
58 | Table(item_type=NALGrammar.Sentences.Judgment)
59 | items_added = 0
60 | max_capacity = NARS.Config.TABLE_DEFAULT_CAPACITY
61 | for i in range(0, max_capacity + 5):
62 | test_data_structure.put(NALGrammar.Sentences.new_sentence_from_string("(a-->b)."))
63 | items_added += 1
64 | if items_added <= max_capacity:
65 | assert len(
66 | test_data_structure) == items_added, "TEST FAILURE: Length of bag does not equal # of items added"
67 |
68 | assert (items_added > max_capacity), "TEST FAILURE: For this test, add more items than the capacity"
69 | assert (len(test_data_structure) == max_capacity), "TEST FAILURE: " + type(
70 | test_data_structure).__name__ + " did not maintain capacity on overflow"
71 |
72 |
73 | def test_buffer_removemax():
74 | """
75 | Test if the Buffer can successfully remove its maximum value
76 | """
77 | buffer = NARSDataStructures.Buffers.Buffer(NARSDataStructures.Other.Task, capacity=10)
78 | priorities = [0.6, 0.2, 0.99, 0.5, 0.9]
79 | maximum = max(priorities)
80 | for p in priorities:
81 | sentence = NALGrammar.Sentences.Judgment(
82 | NALGrammar.Terms.StatementTerm(NALGrammar.Terms.from_string("a"),
83 | NALGrammar.Terms.from_string("b"), NALSyntax.Copula.Inheritance),
84 | NALGrammar.Values.TruthValue(0.9, 0.9))
85 | item = NARSDataStructures.ItemContainers.Item(NARSDataStructures.Other.Task(sentence), -1)
86 | item.budget.set_priority(p)
87 | buffer.put(item)
88 | buffermax = buffer.extract_max().budget.get_priority()
89 | assert (buffermax == maximum), "TEST FAILURE: Buffer did not properly retrieve maximum value"
90 |
91 |
92 | def test_buffer_removemin():
93 | """
94 | Test if the Table can successfully remove its minimum value
95 | """
96 | buffer = NARSDataStructures.Buffers.Buffer(NARSDataStructures.Other.Task, capacity=10)
97 | priorities = [0.6, 0.2, 0.99, 0.5, 0.9]
98 | minimum = min(priorities)
99 | for p in priorities:
100 | # make sentence b>. %0.9;c%
101 | sentence = NALGrammar.Sentences.Judgment(
102 | NALGrammar.Terms.StatementTerm(NALGrammar.Terms.from_string("a"),
103 | NALGrammar.Terms.from_string("b"), NALSyntax.Copula.Inheritance),
104 | NALGrammar.Values.TruthValue(0.9, 0.9))
105 | item = NARSDataStructures.ItemContainers.Item(NARSDataStructures.Other.Task(sentence), -1)
106 | item.budget.set_priority(p)
107 | buffer.put(item)
108 |
109 | buffermin = buffer.extract_min().budget.get_priority()
110 | assert (buffermin == minimum), "TEST FAILURE: Buffer did not properly retrieve minimum value"
111 |
112 |
113 | def test_concept_termlinking():
114 | """
115 | Test if term links can be added and removed properly from a concept
116 | """
117 | memory = NARSMemory.Memory()
118 | statement_concept = memory.peek_concept(NALGrammar.Terms.from_string("(A-->B)"))
119 | conceptA = memory.peek_concept(NALGrammar.Terms.from_string("A"))
120 | conceptB = memory.peek_concept(NALGrammar.Terms.from_string("B"))
121 |
122 | assert (len(statement_concept.term_links) == 2), "TEST FAILURE: Concept " + str(
123 | statement_concept) + " does not have 2 termlinks"
124 | assert (len(conceptA.term_links) == 1), "TEST FAILURE: Concept " + str(
125 | conceptA) + " does not have 1 termlink. Has: " + str(conceptA.term_links.count)
126 | assert (len(conceptB.term_links) == 1), "TEST FAILURE: Concept " + str(
127 | conceptB) + " does not have 1 termlink. Has: " + str(conceptB.term_links.count)
128 |
129 | statement_concept.remove_term_link(conceptA) # remove concept A's termlink
130 |
131 | assert (len(statement_concept.term_links) == 1), "Concept " + str(statement_concept) + " does not have 1 termlink"
132 | assert (len(conceptA.term_links) == 0), "TEST FAILURE: Concept " + str(conceptA) + " does not have 0 termlinks"
133 | assert (len(conceptB.term_links) == 1), "TEST FAILURE: Concept " + str(conceptB) + " does not have 1 termlink"
134 |
135 | take = statement_concept.term_links.TAKE_USING_KEY(NARSDataStructures.ItemContainers.Item.get_key_from_object(
136 | conceptB)).object # take out the only remaining concept (concept B)
137 |
138 | assert (take == conceptB), "TEST FAILURE: Removed concept was not Concept 'B'"
139 | assert (len(conceptB.term_links) == 1), "TEST FAILURE: Concept does not have 1 termlink"
140 |
141 |
142 | def test_bag_overflow_purge():
143 | """
144 | Test if bag stays within capacity when it overflows.
145 | """
146 | max_capacity = 10
147 | test_data_structure = NARSDataStructures.Bag.Bag(item_type=NALGrammar.Sentences.Sentence, capacity=max_capacity)
148 | items_added = 0
149 |
150 | for i in range(0, max_capacity + 5):
151 | test_data_structure.PUT_NEW(NALGrammar.Sentences.new_sentence_from_string("(a-->b)."))
152 | items_added += 1
153 | if items_added <= max_capacity:
154 | assert len(
155 | test_data_structure) == items_added, "TEST FAILURE: Length of bag does not equal # of items added"
156 |
157 | assert (items_added > max_capacity), "TEST FAILURE: For this test, add more items than the capacity"
158 | assert (len(test_data_structure) == max_capacity), "TEST FAILURE: " + type(
159 | test_data_structure).__name__ + " did not maintain capacity on overflow"
160 |
161 |
162 | def test_bag_clear():
163 | """
164 | Test if bag stays within capacity when it overflows.
165 | """
166 | max_capacity = 5
167 | test_data_structure = NARSDataStructures.Bag.Bag(item_type=NALGrammar.Sentences.Sentence, capacity=max_capacity)
168 | items_added = 0
169 |
170 | for i in range(0, max_capacity):
171 | test_data_structure.PUT_NEW(NALGrammar.Sentences.new_sentence_from_string("(a-->b)."))
172 | items_added += 1
173 | if items_added <= max_capacity:
174 | assert len(
175 | test_data_structure) == items_added, "TEST FAILURE: Length of bag does not equal # of items added"
176 |
177 | test_data_structure.clear()
178 |
179 | assert len(test_data_structure) == 0, "TEST FAILURE: This test should empty the bag completelyy"
180 |
181 |
182 | def test_bag_priority_changing():
183 | """
184 | Test if bag stays within capacity when it overflows.
185 | """
186 | max_capacity = 10
187 | test_data_structure = NARSDataStructures.Bag.Bag(item_type=NALGrammar.Sentences.Sentence, capacity=max_capacity)
188 | items_added = 0
189 |
190 | expected_values = {}
191 | for i in range(0, max_capacity):
192 | item = test_data_structure.PUT_NEW(NALGrammar.Sentences.new_sentence_from_string("(a-->b)."))
193 | new_priority = random.random()
194 | test_data_structure.change_priority(item.key, new_priority)
195 | expected_values[item.key] = item.get_bag_number()
196 | items_added += 1
197 |
198 | counts = {}
199 | for key in test_data_structure.item_keys:
200 | if key not in counts:
201 | counts[key] = 1
202 | else:
203 | counts[key] += 1
204 |
205 | for key in expected_values.keys():
206 | assert (counts[key] == expected_values[
207 | key]), "TEST FAILURE: Priority change did not remove/add proper number of values in Bag " + str(
208 | (counts[key], expected_values[key]))
209 |
210 |
211 | def test_bag_priority_changing():
212 | """
213 | Test if changing priority works in bag
214 | """
215 | max_capacity = 10
216 | bag = NARSDataStructures.Bag.Bag(item_type=NALGrammar.Sentences.Sentence, capacity=max_capacity)
217 | items_added = 0
218 |
219 | expected_values = {}
220 | for i in range(0, max_capacity):
221 | item = bag.PUT_NEW(NALGrammar.Sentences.new_sentence_from_string("(a-->b)."))
222 | new_priority = random.random()
223 | bag.change_priority(item.key, new_priority)
224 | expected_values[item.key] = new_priority
225 | items_added += 1
226 |
227 | for i in range(len(bag)):
228 | key = bag.item_keys[i]
229 | item = bag.item_lookup_dict[key]
230 | priority = item.budget.get_priority()
231 | assert (priority == expected_values[key]), "TEST FAILURE: Priority value was not changed as expected " + str(
232 | (priority, expected_values[key]))
233 |
234 |
235 | def test_4_event_temporal_chaining():
236 | calculate_expected_num_of_results = lambda N: int(N * (N + 1) / 2 - 1)
237 |
238 | capacities = [2, 3, 6, 10]
239 |
240 | for capacity in capacities:
241 | event_buffer = NARSDataStructures.Buffers.TemporalModule(NARS=None, item_type=NARSDataStructures.Other.Task,
242 | capacity=capacity)
243 |
244 | for i in range(capacity):
245 | event_buffer.PUT_NEW(NARSDataStructures.Other.Task(
246 | NALGrammar.Sentences.new_sentence_from_string("(a" + str(i) + "-->b" + str(i) + "). :|:")))
247 |
248 | actual = len(event_buffer.temporal_chaining_4())
249 | expected = calculate_expected_num_of_results(capacity)
250 | assert actual == expected, "ERROR: Event buffer of size " + str(capacity) + " produced " + str(
251 | actual) + " results, instead of expected " + str(expected)
252 |
253 |
254 | def main():
255 | """
256 | Concept Tests
257 | """
258 | test_concept_termlinking()
259 |
260 | """
261 | Table Tests
262 | """
263 | Global.Global.NARS = NARS.NARS() # need it for Stamp IDs
264 | test_table_removemax()
265 | test_table_removemin()
266 | test_table_overflow_purge()
267 |
268 | """
269 | Buffer Tests
270 | """
271 | test_buffer_removemax()
272 | test_buffer_removemin()
273 | # test_event_buffer_processing()
274 |
275 | """
276 | Bag Tests
277 | """
278 | test_bag_overflow_purge()
279 | test_bag_clear()
280 | test_bag_priority_changing()
281 |
282 | print("All Data Structure Tests successfully passed.")
283 |
284 |
285 | if __name__ == "__main__":
286 | main()
287 |
--------------------------------------------------------------------------------
/TestCases/GrammarTests.py:
--------------------------------------------------------------------------------
1 | import NARSDataStructures
2 | import NALGrammar
3 | import NALSyntax
4 | import NARS
5 | import NARSMemory
6 |
7 | """
8 | Author: Christian Hahm
9 | Created: March 11, 2021
10 | Purpose: Unit Testing for NARS grammar
11 | """
12 |
13 | def calculate_syntactic_complexity_test():
14 | atomic_term = NALGrammar.Terms.from_string("A")
15 | atomic_term_complexity = 1
16 |
17 | singleton_set_compound_term = NALGrammar.Terms.from_string("[A]")
18 | singleton_set_compound_term_complexity = 2
19 |
20 | extensional_set_compound_term = NALGrammar.Terms.from_string("{A,B}")
21 | # becomes an intensional intersection of sets, (|,{A},{B})
22 | extensional_set_compound_term_complexity = 5
23 |
24 | singleton_set_internal_compound_term = NALGrammar.Terms.from_string("[(*,A,B)]")
25 | singleton_set_internal_compound_term_complexity = 4
26 |
27 | statement_term = NALGrammar.Terms.from_string("(A-->B)")
28 | statement_term_complexity = 3
29 |
30 | assert atomic_term._calculate_syntactic_complexity() == atomic_term_complexity
31 | assert singleton_set_compound_term._calculate_syntactic_complexity() == singleton_set_compound_term_complexity
32 | assert extensional_set_compound_term._calculate_syntactic_complexity() == extensional_set_compound_term_complexity
33 | assert singleton_set_internal_compound_term._calculate_syntactic_complexity() == singleton_set_internal_compound_term_complexity
34 | assert statement_term._calculate_syntactic_complexity() == statement_term_complexity
35 |
36 | def array_term_indexing_test():
37 | array_term_name = "M"
38 | array_term = NALGrammar.Terms.SpatialTerm(name=array_term_name, dimensions=(5, 5)) # create a 5x5 array term
39 |
40 | array_element_term = array_term[0.1,0.1]
41 | expected_string = (NALSyntax.TermConnector.ArrayConjunction.value +
42 | array_term_name +
43 | NALSyntax.StatementSyntax.ArrayElementIndexStart.value +
44 | "0.0, 0.0" +
45 | NALSyntax.StatementSyntax.ArrayElementIndexEnd.value)
46 | assert array_element_term.get_formatted_string() == expected_string, "ERROR: " + expected_string
47 | array_element_term = array_term[-1.0, 0.0]
48 | assert array_element_term.get_formatted_string() == (NALSyntax.TermConnector.ArrayConjunction.value +
49 | array_term_name +
50 | NALSyntax.StatementSyntax.ArrayElementIndexStart.value +
51 | "-1.0, 0.0" +
52 | NALSyntax.StatementSyntax.ArrayElementIndexEnd.value)
53 |
54 | array_element_term = array_term[1.0, 0.5]
55 | assert array_element_term.get_formatted_string() == (NALSyntax.TermConnector.ArrayConjunction.value +
56 | array_term_name +
57 | NALSyntax.StatementSyntax.ArrayElementIndexStart.value +
58 | "1.0, 0.5" +
59 | NALSyntax.StatementSyntax.ArrayElementIndexEnd.value)
60 |
61 |
62 | def main():
63 | """
64 | Term Tests
65 | """
66 | calculate_syntactic_complexity_test()
67 | array_term_indexing_test()
68 |
69 | print("All Grammar Tests successfully passed.")
70 |
71 | if __name__ == "__main__":
72 | main()
--------------------------------------------------------------------------------
/TestCases/InferenceEngineTests.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import InputChannel
3 |
4 | import NALGrammar
5 | import NALInferenceRules.Local
6 | import NALInferenceRules.Conditional
7 |
8 | import NARSInferenceEngine
9 |
10 | """
11 | Author: Christian Hahm
12 | Created: March 23, 2021
13 | Purpose: Unit Testing for the Narsese Inference Engine.
14 | Tests if the inference engine returns all expected inference results for the given premises.
15 | """
16 |
17 |
18 | def run_test(j1, j2=None):
19 | """
20 | Runs j1 and j2 through the inference engine.
21 | Returns an array of the outputs.
22 | :param j1:
23 | :param j2:
24 | :return:
25 | """
26 | # feed judgments into the engine
27 | output = None
28 | if j2 is not None:
29 | output = NARSInferenceEngine.do_semantic_inference_two_premise(j1, j2)
30 | else:
31 | output = NARSInferenceEngine.do_inference_one_premise(j1)
32 | return output
33 |
34 |
35 | def check_success(output_q: [], success_criteria: [str]):
36 | """
37 |
38 | :param output_q: queue holding inference engine output tasts
39 | :param success_criteria: array of strings that must be present in the output in order to be considered success
40 | :return: (True, None) if output passed all success_criteria
41 | (False, failed_criteria) if output failed a criterion
42 | """
43 | output = []
44 | while len(output_q) > 0: # read and store result in log file
45 | output.append(output_q.pop().get_term_string_no_id())
46 |
47 | success = True
48 | failed_criterion = ""
49 | for criterion in success_criteria:
50 | success = False
51 | for line in output:
52 | if criterion in line:
53 | success = True
54 | break
55 | if not success:
56 | failed_criterion = criterion
57 | break
58 |
59 | return success, failed_criterion
60 |
61 |
62 | def revision():
63 | """
64 | Test first-order deduction:
65 | j1: (S-->P). %1.0;0.9%
66 | j2: (S-->P). %1.0;0.9%
67 |
68 | :- (S-->P). %1.0;0.81%
69 | """
70 | j1 = NALGrammar.Sentences.new_sentence_from_string("(S-->P). %1.0;0.9%")
71 | j2 = NALGrammar.Sentences.new_sentence_from_string("(S-->P). %1.0;0.9%")
72 |
73 | output_q = run_test(j1,j2)
74 |
75 | success_criteria = []
76 | success_criteria.append(NALInferenceRules.Local.Revision(j1, j2).get_term_string_no_id())
77 |
78 | success, failed_criterion = check_success(output_q, success_criteria)
79 |
80 | assert success,"TEST FAILURE: Revision test failed: " + failed_criterion
81 |
82 |
83 | def first_order_deduction():
84 | """
85 | Test first-order deduction:
86 | j1: (S-->M). %1.0;0.9%
87 | j2: (M-->P). %1.0;0.9%
88 |
89 | :- (S-->P). %1.0;0.81%
90 | """
91 | j1 = NALGrammar.Sentences.new_sentence_from_string("(M-->P). %1.0;0.9%")
92 | j2 = NALGrammar.Sentences.new_sentence_from_string("(S-->M). %1.0;0.9%")
93 |
94 | output_q = run_test(j1,j2)
95 |
96 | success_criteria = []
97 | success_criteria.append(NALInferenceRules.Syllogistic.Deduction(j1, j2).get_term_string_no_id())
98 |
99 | success, failed_criterion = check_success(output_q, success_criteria)
100 |
101 | assert success,"TEST FAILURE: First-order Deduction test failed: " + failed_criterion
102 |
103 | def first_order_induction():
104 | """
105 | Test first-order induction:
106 | j1: (M-->S). %1.0;0.9%
107 | j2: (M-->P). %1.0;0.9%
108 |
109 | :- (S-->P). %1.0;0.45%
110 | :- (P-->S). %1.0;0.45%
111 | """
112 | j1 = NALGrammar.Sentences.new_sentence_from_string("(M-->S). %1.0;0.9%")
113 | j2 = NALGrammar.Sentences.new_sentence_from_string("(M-->P). %1.0;0.9%")
114 |
115 | output_q = run_test(j1,j2)
116 |
117 | success_criteria = []
118 | success_criteria.append(NALInferenceRules.Syllogistic.Induction(j1, j2).get_term_string_no_id())
119 | success_criteria.append(NALInferenceRules.Syllogistic.Induction(j2, j1).get_term_string_no_id())
120 |
121 | success, failed_criterion = check_success(output_q, success_criteria)
122 |
123 | assert success,"TEST FAILURE: First-order Induction test failed: " + failed_criterion
124 |
125 | def first_order_abduction():
126 | """
127 | Test first-order abduction:
128 | j1: (S-->M). %1.0;0.9%
129 | j2: (P-->M). %1.0;0.9%
130 |
131 | :- (S-->P). %1.0;0.45%
132 | :- (P-->S). %1.0;0.45%
133 | """
134 | j1 = NALGrammar.Sentences.new_sentence_from_string("(S-->M). %1.0;0.9%")
135 | j2 = NALGrammar.Sentences.new_sentence_from_string("(P-->M). %1.0;0.9%")
136 |
137 | output_q = run_test(j1,j2)
138 |
139 | success_criteria = []
140 | success_criteria.append(NALInferenceRules.Syllogistic.Abduction(j1, j2).get_term_string_no_id())
141 | success_criteria.append(NALInferenceRules.Syllogistic.Abduction(j2, j1).get_term_string_no_id())
142 |
143 | success, failed_criterion = check_success(output_q, success_criteria)
144 |
145 | assert success,"TEST FAILURE: First-order Abduction test failed: " + failed_criterion
146 |
147 | def first_order_analogy():
148 | """
149 | Test first-order analogy:
150 | j1: (chimp-->monkey). %1.0;0.9%
151 | j2: (human<->monkey). %1.0;0.9%
152 |
153 | :- (chimp-->human). %1.0;0.81%
154 | """
155 | j1 = NALGrammar.Sentences.new_sentence_from_string("(chimp-->monkey). %1.0;0.9%")
156 | j2 = NALGrammar.Sentences.new_sentence_from_string("(human<->monkey). %1.0;0.9%")
157 |
158 | output_q = run_test(j1,j2)
159 |
160 | success_criteria = []
161 | success_criteria.append(NALInferenceRules.Syllogistic.Analogy(j1, j2).get_term_string_no_id())
162 |
163 | success, failed_criterion = check_success(output_q, success_criteria)
164 |
165 | assert success,"TEST FAILURE: First-order Analogy test failed: " + failed_criterion
166 |
167 | def first_order_intensional_composition():
168 | """
169 | Test Intensional Composition rules:
170 | j1: (M --> S). %1.0;0.9%
171 | j2: (M --> P). %1.0;0.9%
172 |
173 | :- (M --> (S | P)).
174 | :- (M --> (S & P)).
175 | :- (M --> (S - P)).
176 | :- (M --> (P - S)).
177 | """
178 | j1 = NALGrammar.Sentences.new_sentence_from_string("(M-->S). %1.0;0.9%")
179 | j2 = NALGrammar.Sentences.new_sentence_from_string("(M-->P). %1.0;0.9%")
180 |
181 | output_q = run_test(j1,j2)
182 |
183 | success_criteria = []
184 | success_criteria.append(NALInferenceRules.Composition.ConjunctionOrExtensionalIntersection(j1, j2).get_term_string_no_id())
185 | success_criteria.append(NALInferenceRules.Composition.DisjunctionOrIntensionalIntersection(j1, j2).get_term_string_no_id())
186 | success_criteria.append(NALInferenceRules.Composition.ExtensionalDifference(j1, j2).get_term_string_no_id())
187 | success_criteria.append(NALInferenceRules.Composition.ExtensionalDifference(j2, j1).get_term_string_no_id())
188 |
189 | success, failed_criterion = check_success(output_q, success_criteria)
190 |
191 | assert success,"TEST FAILURE: Intensional Composition test failed: " + failed_criterion
192 |
193 | def first_order_extensional_composition():
194 | """
195 | Test Extensional Composition rules:
196 | j1: (S-->M). %1.0;0.9%
197 | j2: (P-->M). %1.0;0.9%
198 |
199 | :- ((S | P) --> M).
200 | :- ((S & P) --> M).
201 | :- ((S ~ P) --> M).
202 | :- ((P ~ S) --> M).
203 | """
204 | j1 = NALGrammar.Sentences.new_sentence_from_string("(S-->M). %1.0;0.9%")
205 | j2 = NALGrammar.Sentences.new_sentence_from_string("(P-->M). %1.0;0.9%")
206 |
207 | output_q = run_test(j1,j2)
208 |
209 | success_criteria = []
210 | success_criteria.append(NALInferenceRules.Composition.ConjunctionOrExtensionalIntersection(j1, j2).get_term_string_no_id())
211 | success_criteria.append(NALInferenceRules.Composition.DisjunctionOrIntensionalIntersection(j1, j2).get_term_string_no_id())
212 | success_criteria.append(NALInferenceRules.Composition.IntensionalDifference(j1, j2).get_term_string_no_id())
213 | success_criteria.append(NALInferenceRules.Composition.IntensionalDifference(j2, j1).get_term_string_no_id())
214 | success, failed_criterion = check_success(output_q, success_criteria)
215 |
216 | assert success,"TEST FAILURE: Extensional Composition test failed: " + failed_criterion
217 |
218 | def first_order_exemplification():
219 | """
220 | Test Extensional Composition rules:
221 | j1: (S-->M). %1.0;0.9%
222 | j2: (P-->M). %1.0;0.9%
223 |
224 | :- ((S | P) --> M).
225 | """
226 | j1 = NALGrammar.Sentences.new_sentence_from_string("(P-->M). %1.0;0.9%")
227 | j2 = NALGrammar.Sentences.new_sentence_from_string("(M-->S). %1.0;0.9%")
228 |
229 | output_q = run_test(j1,j2)
230 |
231 | success_criteria = []
232 | success_criteria.append(NALInferenceRules.Syllogistic.Exemplification(j1, j2).get_term_string_no_id())
233 | success, failed_criterion = check_success(output_q, success_criteria)
234 |
235 | assert success,"TEST FAILURE: Exemplification test failed: " + failed_criterion
236 |
237 |
238 | def extensional_image():
239 | """
240 | Test Extensional Image rule:
241 | j: ((*,S,P)-->R). %1.0;0.9%
242 |
243 | :- j: (S-->(/,R,_,P)). %1.0;0.9%
244 | :- j: (P-->(/,R,S,_)). %1.0;0.9%
245 | """
246 | j1 = NALGrammar.Sentences.new_sentence_from_string("((*,S,P)-->R). %1.0;0.9%")
247 |
248 | output_q = run_test(j1)
249 |
250 | success_criteria = []
251 | success_criteria.append(NALInferenceRules.Immediate.ExtensionalImage(j1).get_term_string_no_id())
252 | success, failed_criterion = check_success(output_q, success_criteria)
253 |
254 | assert success,"TEST FAILURE: Extensional Image test failed: " + failed_criterion
255 |
256 | def conditional_analogy():
257 | """
258 | Test Conditional Analogy rule:
259 | j1: S<=>P %1.0;0.9%
260 | j2: S %1.0;0.9%
261 |
262 | :- P %1.0;0.81%
263 | """
264 | j1 = NALGrammar.Sentences.new_sentence_from_string("(a-->b). %1.0;0.9%")
265 | j2 = NALGrammar.Sentences.new_sentence_from_string("((a-->b)<=>(c-->d)). %1.0;0.9%")
266 |
267 | output_q = run_test(j1,j2)
268 |
269 | success_criteria = []
270 | success_criteria.append(NALInferenceRules.Conditional.ConditionalAnalogy(j1, j2).get_term_string_no_id())
271 | success, failed_criterion = check_success(output_q, success_criteria)
272 |
273 | assert success,"TEST FAILURE: Conditional Analogy test failed: " + failed_criterion
274 |
275 | def conditional_deduction():
276 | """
277 | Test Conditional Deduction rule:
278 | j1: S==>P %1.0;0.9%
279 | j2: S %1.0;0.9%
280 |
281 | :- P %1.0;0.81%
282 | """
283 | j1 = NALGrammar.Sentences.new_sentence_from_string("((a-->b)==>(c-->d)). %1.0;0.9%")
284 | j2 = NALGrammar.Sentences.new_sentence_from_string("(a-->b). %1.0;0.9%")
285 |
286 | output_q = run_test(j1,j2)
287 |
288 | success_criteria = []
289 | success_criteria.append(NALInferenceRules.Conditional.ConditionalJudgmentDeduction(j1, j2).get_term_string_no_id())
290 | success, failed_criterion = check_success(output_q, success_criteria)
291 |
292 | assert success,"TEST FAILURE: Conditional Deduction test failed: " + failed_criterion
293 |
294 | def conditional_abduction():
295 | """
296 | Test Conditional Abduction rule:
297 | j1: S==>P %1.0;0.9%
298 | j2: P %1.0;0.9%
299 |
300 | :- S %1.0;0.81%
301 | """
302 | j1 = NALGrammar.Sentences.new_sentence_from_string("((a-->b)==>(c-->d)). %1.0;0.9%")
303 | j2 = NALGrammar.Sentences.new_sentence_from_string("(c-->d). %1.0;0.9%")
304 |
305 | output_q = run_test(j1,j2)
306 |
307 | success_criteria = []
308 | success_criteria.append(NALInferenceRules.Conditional.ConditionalJudgmentAbduction(j1, j2).get_term_string_no_id())
309 | success, failed_criterion = check_success(output_q, success_criteria)
310 |
311 | assert success,"TEST FAILURE: Conditional Abduction test failed: " + failed_criterion
312 |
313 | def conditional_conjunctional_deduction():
314 | """
315 | Test Conditional Conjunctional Deduction rule:
316 | j1: (C1 && C2 && ... CN && S) ==> P %1.0;0.9%
317 | j2: S %1.0;0.9%
318 |
319 | :- (C1 && C2 && ... CN) ==> P %1.0;0.81%
320 | """
321 |
322 | # test removal of second element
323 | j1 = NALGrammar.Sentences.new_sentence_from_string("((&&,(a-->b),(c-->d))==>(e-->f)). %1.0;0.9%")
324 | j2 = NALGrammar.Sentences.new_sentence_from_string("(c-->d). %1.0;0.9%")
325 | output_q = run_test(j1, j2)
326 |
327 | success_criteria = []
328 | success_criteria.append(
329 | NALInferenceRules.Conditional.ConditionalConjunctionalDeduction(j1, j2).get_term_string_no_id())
330 | success, failed_criterion = check_success(output_q, success_criteria)
331 |
332 | assert success, "TEST FAILURE: Conditional Conjunctional Deduction test failed: " + failed_criterion
333 |
334 | def conditional_conjunctional_abduction():
335 | """
336 | Test Conditional Conjunctional Deduction rule:
337 | j1: (C1 && C2 && ... CN && S) ==> P %1.0;0.9%
338 | j2: (C1 && C2 && ... CN) ==> P %1.0;0.9%
339 |
340 | :- S %1.0;0.45%
341 | """
342 | j1 = NALGrammar.Sentences.new_sentence_from_string("((&&,(a-->b),(c-->d))==>(e-->f)). %1.0;0.9%")
343 | j2 = NALGrammar.Sentences.new_sentence_from_string("((c-->d)==>(e-->f)). %1.0;0.9%")
344 | output_q = run_test(j1, j2)
345 |
346 | success_criteria = []
347 | success_criteria.append(
348 | NALInferenceRules.Conditional.ConditionalConjunctionalAbduction(j1, j2).get_term_string_no_id())
349 | success, failed_criterion = check_success(output_q, success_criteria)
350 |
351 | assert success, "TEST FAILURE: Conditional Conjunctional Abduction test failed: " + failed_criterion
352 |
353 | def main():
354 | revision()
355 |
356 | """
357 | First-Order syllogism tests
358 | """
359 | first_order_abduction()
360 | first_order_analogy()
361 | first_order_deduction()
362 | first_order_induction()
363 | first_order_exemplification()
364 |
365 | """
366 | Composition
367 | """
368 | first_order_extensional_composition()
369 | first_order_intensional_composition()
370 |
371 | """
372 | Conditional Syllogism
373 | """
374 | # conditional_abduction()
375 | # conditional_analogy()
376 | # conditional_deduction()
377 | # conditional_conjunctional_deduction()
378 | # conditional_conjunctional_abduction()
379 |
380 | print("All Inference Engine Tests successfully passed.")
381 |
382 | if __name__ == "__main__":
383 | main()
--------------------------------------------------------------------------------
/TestCases/VisionTests/CIFAR10VisionTest.py:
--------------------------------------------------------------------------------
1 | from keras.datasets import cifar10
2 |
3 | from TestCases.VisionTests.GenericVisionTest import GenericVisionTest
4 |
5 | if __name__ == "__main__":
6 | vision_test = GenericVisionTest(dataset_loader=cifar10,
7 | gui_enabled=True,
8 | train_size=10,
9 | test_size=10,
10 | training_cycles=1000)
11 | vision_test.run_main_test()
--------------------------------------------------------------------------------
/TestCases/VisionTests/MNISTVisionTest.py:
--------------------------------------------------------------------------------
1 | from keras.datasets import mnist
2 |
3 | from TestCases.VisionTests.GenericVisionTest import GenericVisionTest
4 |
5 | if __name__ == "__main__":
6 | vision_test = GenericVisionTest(dataset_loader=mnist,
7 | gui_enabled=True,
8 | train_size=100,
9 | test_size=10,
10 | training_cycles=5)
11 | vision_test.run_main_test()
--------------------------------------------------------------------------------
/build.bat:
--------------------------------------------------------------------------------
1 | set condapath="C:\Users\%USERNAME%\Anaconda3"
2 | call %condapath%\Scripts\activate.bat %condapath%
3 | call activate NARS-Python
4 | set NARS_ROOT=%cd%
5 | cd %NARS_ROOT%
6 | pyinstaller --onefile main.py
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 |
2 | """
3 | Author: Christian Hahm
4 | Created: April 10, 2020
5 | Purpose: Program entry point
6 | """
7 |
8 |
9 |
10 | import threading
11 | import multiprocessing
12 | import Config
13 | import Global
14 | import InputChannel
15 | import NARSGUI
16 | import NARS
17 |
18 |
19 |
20 | class GUIProcess(multiprocessing.Process):
21 | def __init__(self):
22 | NARS_object: NARS = Global.Global.NARS
23 | narsese_buffer_ID = (str(NARS_object.global_buffer), type(NARS_object.global_buffer).__name__)
24 | narsese_buffer_capacity = NARS_object.global_buffer.capacity
25 | vision_buffer_ID = (str(NARS_object.vision_buffer), type(NARS_object.global_buffer).__name__)
26 | vision_buffer_dims = str(NARS_object.vision_buffer.dimensions)
27 | temporal_module_ID = (str(NARS_object.temporal_module), type(NARS_object.temporal_module).__name__)
28 | temporal_module_capacity = NARS_object.temporal_module.capacity
29 | memory_bag_ID = (str(NARS_object.memory.concepts_bag), type(NARS_object.memory.concepts_bag).__name__)
30 | memory_bag_capacity = NARS_object.memory.concepts_bag.capacity
31 |
32 | data_structure_IDs = (narsese_buffer_ID, vision_buffer_ID, temporal_module_ID, memory_bag_ID)
33 | data_structure_capacities = (narsese_buffer_capacity, vision_buffer_dims, temporal_module_capacity, memory_bag_capacity)
34 |
35 | # multiprocess pipe to pass objects between NARS and GUI Processes
36 | pipe_gui_objects, pipe_NARS_objects = multiprocessing.Pipe() # 2-way object request pipe
37 | pipe_gui_strings, pipe_NARS_strings = multiprocessing.Pipe() # 1-way string pipe
38 | Global.Global.NARS_object_pipe = pipe_NARS_objects
39 | Global.Global.NARS_string_pipe = pipe_NARS_strings
40 |
41 | multiprocessing.Process.__init__(self,target=NARSGUI.start_gui,
42 | args=(Config.GUI_USE_INTERFACE,
43 | data_structure_IDs,
44 | data_structure_capacities,
45 | pipe_gui_objects,
46 | pipe_gui_strings),
47 | name="GUI thread",
48 | daemon=True)
49 | self.start()
50 |
51 | while not pipe_NARS_objects.poll(1.0):
52 | print('Waiting for GUI Process to start')
53 | pipe_NARS_objects.recv() # received GUI ready signal
54 |
55 |
56 |
57 | def main(start=True):
58 | """
59 | This is where the program starts
60 | Creates threads, populates globals, and runs the NARS.
61 |
62 | """
63 |
64 | # First, create the NARS
65 | NARS_object = NARS.NARS()
66 | Global.Global.NARS = NARS_object
67 |
68 | # setup internal/interface GUI
69 | if Config.GUI_USE_INTERFACE:
70 | GUIProcess()
71 |
72 | # launch shell input thread
73 | shell_input_thread = threading.Thread(target=InputChannel.get_user_input,
74 | name="Shell input thread",
75 | daemon=True)
76 | shell_input_thread.start()
77 |
78 | if start:
79 | # Finally, run NARS in the shell
80 | Global.Global.set_paused(False)
81 | print('Starting NARS in the shell.')
82 | Global.Global.NARS.startup_and_run()
83 |
84 |
85 |
86 |
87 |
88 | if __name__ == "__main__":
89 | # On Windows calling this function is necessary.
90 | # On Linux/OSX it does nothing.
91 | multiprocessing.freeze_support()
92 | main()
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | depq==1.5.5
2 | dill==0.3.8
3 | keras==2.10.0
4 | numpy==2.1.3
5 | Pillow==11.0.0
6 | sortedcontainers==2.4.0
7 | tensorflow==2.10.0
8 | tk==8.6.14
--------------------------------------------------------------------------------
/terminal.bat:
--------------------------------------------------------------------------------
1 | set condapath="C:\Users\%USERNAME%\Anaconda3"
2 | call %condapath%\Scripts\activate.bat %condapath%
3 | call activate NARS-Python
4 | set NARS_ROOT=%cd%
5 | cd %NARS_ROOT%
6 | cmd /K
--------------------------------------------------------------------------------