Stack vs. stream benchmarks
148 |
149 |
150 | ```python
151 | -------------------------------------------------------------------------------- benchmark 'reify_chain size=10': 2 tests -------------------------------------------------------------------------------
152 | Name (time in us) Min Max Mean StdDev Median IQR Outliers OPS (Kops/s) Rounds Iterations
153 | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
154 | test_reify_chain_stack[10] 41.0790 (1.0) 545.1940 (3.20) 52.9087 (1.07) 9.7964 (1.04) 50.8650 (1.08) 6.4301 (8.37) 11815;10849 18.9005 (0.93) 260164 1
155 | test_reify_chain_stream[10] 42.4410 (1.03) 170.5540 (1.0) 49.3080 (1.0) 9.3993 (1.0) 47.2400 (1.0) 0.7680 (1.0) 14962;102731 20.2807 (1.0) 278113 1
156 | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
157 |
158 | ------------------------------------------ benchmark 'reify_chain size=1000': 1 tests -----------------------------------------
159 | Name (time in ms) Min Max Mean StdDev Median IQR Outliers OPS Rounds Iterations
160 | -------------------------------------------------------------------------------------------------------------------------------
161 | test_reify_chain_stream_large[1000] 7.7722 28.2579 10.0723 2.5087 9.4899 0.3106 70;155 99.2820 1528 1
162 | -------------------------------------------------------------------------------------------------------------------------------
163 |
164 | ------------------------------------------------------------------------- benchmark 'reify_chain size=300': 2 tests --------------------------------------------------------------------------
165 | Name (time in ms) Min Max Mean StdDev Median IQR Outliers OPS Rounds Iterations
166 | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
167 | test_reify_chain_stack[300] 1.5183 (1.0) 22.1821 (1.19) 1.9826 (1.0) 1.5511 (1.16) 1.7410 (1.0) 0.0801 (1.0) 144;684 504.3878 (1.0) 7201 1
168 | test_reify_chain_stream[300] 1.7059 (1.12) 18.6020 (1.0) 2.1237 (1.07) 1.3389 (1.0) 1.9260 (1.11) 0.1020 (1.27) 118;585 470.8745 (0.93) 6416 1
169 | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
170 |
171 | --------------------------------------------------------------------------------- benchmark 'reify_chain size=35': 2 tests --------------------------------------------------------------------------------
172 | Name (time in us) Min Max Mean StdDev Median IQR Outliers OPS (Kops/s) Rounds Iterations
173 | -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
174 | test_reify_chain_stream[35] 129.2780 (1.0) 868.1510 (1.02) 190.0433 (1.11) 36.2784 (1.41) 179.5690 (1.08) 21.5360 (2.30) 1535;1455 5.2620 (0.90) 26072 1
175 | test_reify_chain_stack[35] 150.7850 (1.17) 853.7920 (1.0) 170.5166 (1.0) 25.7944 (1.0) 165.8500 (1.0) 9.3530 (1.0) 3724;5480 5.8645 (1.0) 81286 1
176 | -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
177 |
178 | ------------------------------------------- benchmark 'reify_chain size=5000': 1 tests ------------------------------------------
179 | Name (time in ms) Min Max Mean StdDev Median IQR Outliers OPS Rounds Iterations
180 | ---------------------------------------------------------------------------------------------------------------------------------
181 | test_reify_chain_stream_large[5000] 46.9073 86.9737 52.9724 6.6919 49.6787 3.9609 68;68 18.8778 292 1
182 | ---------------------------------------------------------------------------------------------------------------------------------
183 |
184 | ------------------------------------------------------------------------------- benchmark 'unify_chain size=10': 2 tests -------------------------------------------------------------------------------
185 | Name (time in us) Min Max Mean StdDev Median IQR Outliers OPS (Kops/s) Rounds Iterations
186 | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
187 | test_unify_chain_stream[10] 77.6280 (1.0) 307.9130 (1.0) 86.7625 (1.0) 17.5355 (1.20) 82.7525 (1.0) 1.7290 (1.0) 809;1736 11.5257 (1.0) 15524 1
188 | test_unify_chain_stack[10] 92.9890 (1.20) 309.8770 (1.01) 104.2017 (1.20) 14.6694 (1.0) 101.0160 (1.22) 4.2368 (2.45) 3657;6651 9.5968 (0.83) 73379 1
189 | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
190 |
191 | ------------------------------------------- benchmark 'unify_chain size=1000': 1 tests ------------------------------------------
192 | Name (time in ms) Min Max Mean StdDev Median IQR Outliers OPS Rounds Iterations
193 | ---------------------------------------------------------------------------------------------------------------------------------
194 | test_unify_chain_stream_large[1000] 27.3518 65.5924 31.1374 4.2563 29.5148 3.5286 38;35 32.1158 496 1
195 | ---------------------------------------------------------------------------------------------------------------------------------
196 |
197 | ------------------------------------------------------------------------- benchmark 'unify_chain size=300': 2 tests --------------------------------------------------------------------------
198 | Name (time in ms) Min Max Mean StdDev Median IQR Outliers OPS Rounds Iterations
199 | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
200 | test_unify_chain_stream[300] 3.6957 (1.0) 13.1876 (1.0) 4.4439 (1.0) 1.0719 (1.42) 4.2080 (1.0) 0.2410 (1.67) 51;95 225.0298 (1.0) 1114 1
201 | test_unify_chain_stack[300] 4.2952 (1.16) 13.4294 (1.02) 4.7732 (1.07) 0.7555 (1.0) 4.6623 (1.11) 0.1446 (1.0) 36;136 209.5024 (0.93) 2911 1
202 | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
203 |
204 | --------------------------------------------------------------------------------- benchmark 'unify_chain size=35': 2 tests ---------------------------------------------------------------------------------
205 | Name (time in us) Min Max Mean StdDev Median IQR Outliers OPS (Kops/s) Rounds Iterations
206 | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
207 | test_unify_chain_stream[35] 285.6880 (1.0) 934.9690 (1.0) 324.5402 (1.0) 40.8338 (1.0) 319.8520 (1.0) 20.4375 (1.0) 962;1159 3.0813 (1.0) 24331 1
208 | test_unify_chain_stack[35] 345.2770 (1.21) 1,088.3650 (1.16) 407.9067 (1.26) 52.2263 (1.28) 396.6640 (1.24) 20.6560 (1.01) 2054;3027 2.4515 (0.80) 37594 1
209 | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
210 |
211 | --------------------------------------------- benchmark 'unify_chain size=5000': 1 tests ---------------------------------------------
212 | Name (time in ms) Min Max Mean StdDev Median IQR Outliers OPS Rounds Iterations
213 | --------------------------------------------------------------------------------------------------------------------------------------
214 | test_unify_chain_stream_large[5000] 555.2733 754.9897 605.4949 50.6124 591.1251 61.4030 2;2 1.6515 26 1
215 | --------------------------------------------------------------------------------------------------------------------------------------
216 |
217 | Legend:
218 | Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.
219 | OPS: Operations Per Second, computed as 1 / Mean
220 | ```
221 |
222 |
223 |
224 |
225 | ## About
226 |
227 | This project is a fork of [`unification`](https://github.com/mrocklin/unification/).
228 |
229 | ## Development
230 |
231 | Install the development dependencies:
232 |
233 | ```bash
234 | $ pip install -r requirements.txt
235 | ```
236 |
237 | Set up `pre-commit` hooks:
238 |
239 | ```bash
240 | $ pre-commit install --install-hooks
241 | ```
242 |
--------------------------------------------------------------------------------
/examples/account.py:
--------------------------------------------------------------------------------
1 | from collections import defaultdict
2 | from functools import partial
3 |
4 | from unification import var
5 | from unification.match import VarDispatcher, match
6 |
7 | match = partial(match, Dispatcher=VarDispatcher)
8 |
9 | balance = defaultdict(lambda: 0)
10 |
11 | name, amount = var("name"), var("amount")
12 |
13 |
14 | @match({"status": 200, "data": {"name": name, "credit": amount}})
15 | def respond(name, amount):
16 | balance[name] += amount
17 |
18 |
19 | @match({"status": 200, "data": {"name": name, "debit": amount}})
20 | def respond(name, amount):
21 | balance[name] -= amount
22 |
23 |
24 | @match({"status": 404})
25 | def respond():
26 | print("Bad Request")
27 |
28 |
29 | if __name__ == "__main__":
30 | respond({"status": 200, "data": {"name": "Alice", "credit": 100}})
31 | respond({"status": 200, "data": {"name": "Bob", "debit": 100}})
32 | respond({"status": 404})
33 | print(dict(balance))
34 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | -e ./
2 | coveralls
3 | pydocstyle>=3.0.0
4 | pytest>=5.0.0
5 | pytest-cov>=2.6.1
6 | isort
7 | pytest-html>=1.20.0
8 | pytest-benchmark
9 | pylint>=2.3.1
10 | black>=19.3b0; platform.python_implementation!='PyPy'
11 | diff-cover
12 | versioneer
13 | coverage>=5.1
14 | pre-commit
15 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [versioneer]
2 | VCS = git
3 | style = pep440
4 | versionfile_source = unification/_version.py
5 | versionfile_build = unification/_version.py
6 | tag_prefix = v
7 | parentdir_prefix = unification-
8 |
9 | [pydocstyle]
10 | # Ignore errors for missing docstrings.
11 | # Ignore D202 (No blank lines allowed after function docstring)
12 | # due to bug in black: https://github.com/ambv/black/issues/355
13 | add-ignore = D100,D101,D102,D103,D104,D105,D106,D107,D202
14 | convention = numpy
15 |
16 | [tool:pytest]
17 | python_functions=test_*
18 | python_files=test*.py
19 | testpaths=tests
20 |
21 | [coverage:run]
22 | relative_files = True
23 | omit =
24 | unification/_version.py
25 | tests/*
26 |
27 | [coverage:report]
28 | exclude_lines =
29 | pragma: no cover
30 |
31 | raise NotImplementedError
32 |
33 | [isort]
34 | multi_line_output = 3
35 | include_trailing_comma = True
36 | force_grid_wrap = 0
37 | use_parentheses = True
38 | ensure_newline_before_comments = True
39 | line_length = 88
40 |
41 | [flake8]
42 | max-line-length = 88
43 | extend-ignore = E203, W503
44 | per-file-ignores =
45 | **/__init__.py:F401,E402,F403
46 | examples/account.py:F811
47 | tests/test_match.py:F811
48 |
49 | [pylint]
50 | max-line-length = 88
51 |
52 | [pylint.messages_control]
53 | disable = C0330, C0326
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from os.path import exists
3 |
4 | from setuptools import setup
5 |
6 | import versioneer
7 |
8 | setup(
9 | name="logical-unification",
10 | version=versioneer.get_version(),
11 | cmdclass=versioneer.get_cmdclass(),
12 | description="Logical unification in Python",
13 | url="http://github.com/pythological/unification/",
14 | maintainer="Brandon T. Willard",
15 | maintainer_email="brandonwillard+unification@gmail.com",
16 | license="BSD",
17 | keywords="unification logic-programming dispatch",
18 | packages=["unification"],
19 | install_requires=[
20 | "toolz",
21 | "multipledispatch",
22 | ],
23 | long_description=(open("README.md").read() if exists("README.md") else ""),
24 | long_description_content_type="text/markdown",
25 | zip_safe=False,
26 | python_requires=">=3.6",
27 | classifiers=[
28 | "Development Status :: 5 - Production/Stable",
29 | "Intended Audience :: Science/Research",
30 | "Intended Audience :: Developers",
31 | "License :: OSI Approved :: BSD License",
32 | "Operating System :: OS Independent",
33 | "Programming Language :: Python",
34 | "Programming Language :: Python :: 3",
35 | "Programming Language :: Python :: 3.7",
36 | "Programming Language :: Python :: 3.8",
37 | "Programming Language :: Python :: 3.9",
38 | "Programming Language :: Python :: 3.10",
39 | "Programming Language :: Python :: Implementation :: CPython",
40 | "Programming Language :: Python :: Implementation :: PyPy",
41 | "Topic :: Software Development :: Libraries",
42 | ],
43 | )
44 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pythological/unification/5e3e4aba1a4f63c6a4abe7d8a5c6fbbce36fa488/tests/__init__.py
--------------------------------------------------------------------------------
/tests/test_benchmarks.py:
--------------------------------------------------------------------------------
1 | import platform
2 | import sys
3 |
4 | import pytest
5 |
6 | from tests.utils import gen_long_chain
7 | from unification import assoc, isvar, reify, unify, var
8 | from unification.utils import transitive_get as walk
9 |
10 | nesting_sizes = [10, 35, 300]
11 |
12 |
13 | def unify_stack(u, v, s):
14 |
15 | u = walk(u, s)
16 | v = walk(v, s)
17 |
18 | if u == v:
19 | return s
20 | if isvar(u):
21 | return assoc(s, u, v)
22 | if isvar(v):
23 | return assoc(s, v, u)
24 |
25 | if isinstance(u, (tuple, list)) and type(u) == type(v):
26 | for i_u, i_v in zip(u, v):
27 | s = unify_stack(i_u, i_v, s)
28 | if s is False:
29 | return s
30 |
31 | return s
32 |
33 | return False
34 |
35 |
36 | def reify_stack(u, s):
37 |
38 | u_ = walk(u, s)
39 |
40 | if u_ is not u:
41 | return reify_stack(u_, s)
42 |
43 | if isinstance(u_, (tuple, list)):
44 | return type(u_)(reify_stack(i_u, s) for i_u in u_)
45 |
46 | return u_
47 |
48 |
49 | @pytest.mark.benchmark(group="unify_chain")
50 | @pytest.mark.parametrize("size", nesting_sizes)
51 | def test_unify_chain_stream(size, benchmark):
52 | a_lv = var()
53 | form, lvars = gen_long_chain(a_lv, size, use_lvars=True)
54 | term, _ = gen_long_chain("a", size)
55 |
56 | res = benchmark(unify, form, term, {})
57 | assert res[a_lv] == "a"
58 |
59 |
60 | @pytest.mark.benchmark(group="unify_chain")
61 | @pytest.mark.parametrize("size", nesting_sizes)
62 | def test_unify_chain_stack(size, benchmark):
63 | a_lv = var()
64 | form, lvars = gen_long_chain(a_lv, size, use_lvars=True)
65 | term, _ = gen_long_chain("a", size)
66 |
67 | res = benchmark(unify_stack, form, term, {})
68 | assert res[a_lv] == "a"
69 |
70 |
71 | @pytest.mark.benchmark(group="reify_chain")
72 | @pytest.mark.parametrize("size", nesting_sizes)
73 | def test_reify_chain_stream(size, benchmark):
74 | a_lv = var()
75 | form, lvars = gen_long_chain(a_lv, size, use_lvars=True)
76 | term, _ = gen_long_chain("a", size)
77 |
78 | lvars.update({a_lv: "a"})
79 | res = benchmark(reify_stack, form, lvars)
80 | assert res == term
81 |
82 |
83 | @pytest.mark.benchmark(group="reify_chain")
84 | @pytest.mark.parametrize("size", nesting_sizes)
85 | def test_reify_chain_stack(size, benchmark):
86 | a_lv = var()
87 | form, lvars = gen_long_chain(a_lv, size, use_lvars=True)
88 | term, _ = gen_long_chain("a", size)
89 |
90 | lvars.update({a_lv: "a"})
91 | res = benchmark(reify_stack, form, lvars)
92 | assert res == term
93 |
94 |
95 | @pytest.mark.benchmark(group="unify_chain")
96 | @pytest.mark.parametrize("size", [1000, 5000])
97 | def test_unify_chain_stream_large(size, benchmark):
98 | a_lv = var()
99 | form, lvars = gen_long_chain(a_lv, size, use_lvars=True)
100 | term, _ = gen_long_chain("a", size)
101 |
102 | res = benchmark(unify, form, term, {})
103 | assert res[a_lv] == "a"
104 |
105 |
106 | @pytest.mark.skipif(
107 | platform.python_implementation() == "PyPy",
108 | reason="PyPy's sys.getrecursionlimit changes",
109 | )
110 | @pytest.mark.benchmark(group="reify_chain")
111 | @pytest.mark.parametrize("size", [sys.getrecursionlimit(), sys.getrecursionlimit() * 5])
112 | def test_reify_chain_stream_large(size, benchmark):
113 | a_lv = var()
114 | form, lvars = gen_long_chain(a_lv, size, use_lvars=True)
115 | term, _ = gen_long_chain("a", size)
116 |
117 | lvars.update({a_lv: "a"})
118 |
119 | res = benchmark(reify, form, lvars)
120 |
121 | if size < sys.getrecursionlimit():
122 | assert res == term
123 | else:
124 | with pytest.raises(RecursionError):
125 | assert res == term
126 |
--------------------------------------------------------------------------------
/tests/test_core.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from collections import OrderedDict
3 | from types import MappingProxyType
4 |
5 | import pytest
6 |
7 | from tests.utils import gen_long_chain
8 | from unification import var
9 | from unification.core import assoc, isground, reify, unground_lvars, unify
10 | from unification.utils import freeze
11 |
12 |
13 | def test_assoc():
14 | d = {"a": 1, 2: 2}
15 | assert assoc(d, "c", 3) is not d
16 | assert assoc(d, "c", 3) == {"a": 1, 2: 2, "c": 3}
17 | assert assoc(d, 2, 3) == {"a": 1, 2: 3}
18 | assert assoc(d, "a", 0) == {"a": 0, 2: 2}
19 | assert d == {"a": 1, 2: 2}
20 |
21 | def assoc_OrderedDict(s, u, v):
22 | s[u] = v
23 | return s
24 |
25 | assoc.add((OrderedDict, object, object), assoc_OrderedDict)
26 |
27 | x = var()
28 | d2 = OrderedDict(d)
29 | assert assoc(d2, x, 3) is d2
30 | assert assoc(d2, x, 3) == {"a": 1, 2: 2, x: 3}
31 | assert assoc(d, x, 3) is not d
32 |
33 |
34 | def test_reify():
35 | x, y, z = var(), var(), var()
36 | s = {x: 1, y: 2, z: (x, y)}
37 | assert reify(x, s) == 1
38 | assert reify(10, s) == 10
39 | assert reify((1, y), s) == (1, 2)
40 | assert reify((1, (x, (y, 2))), s) == (1, (1, (2, 2)))
41 | assert reify(z, s) == (1, 2)
42 | assert reify(z, MappingProxyType(s)) == (1, 2)
43 |
44 |
45 | def test_reify_Mapping():
46 | x, y = var(), var()
47 | s = {x: 2, y: 4}
48 | e = [(1, x), (3, {5: y})]
49 | expected_res = [(1, 2), (3, {5: 4})]
50 | assert reify(dict(e), s) == dict(expected_res)
51 | assert reify(OrderedDict(e), s) == OrderedDict(expected_res)
52 |
53 |
54 | def test_reify_Set():
55 | x, y = var(), var()
56 | assert reify({1, 2, x, y}, {x: 3}) == {1, 2, 3, y}
57 | assert reify(frozenset({1, 2, x, y}), {x: 3}) == frozenset({1, 2, 3, y})
58 |
59 |
60 | def test_reify_list():
61 | x, y = var(), var()
62 | s = {x: 2, y: 4}
63 | e = [1, [x, 3], y]
64 | assert reify(e, s) == [1, [2, 3], 4]
65 |
66 |
67 | def test_reify_complex():
68 | x, y = var(), var()
69 | s = {x: 2, y: 4}
70 | e = {1: [x], 3: (y, 5)}
71 |
72 | assert reify(e, s) == {1: [2], 3: (4, 5)}
73 | assert reify((1, {2: x}), {x: slice(0, y), y: 3}) == (1, {2: slice(0, 3)})
74 |
75 |
76 | def test_reify_slice():
77 | x = var()
78 | assert reify(slice(1, x, 3), {x: 10}) == slice(1, 10, 3)
79 |
80 |
81 | def test_unify():
82 | x, y, z = var(), var(), var()
83 | assert unify(x, x, {}) == {}
84 | assert unify(1, 1, {}) == {}
85 | assert unify(1, 2, {}) is False
86 | assert unify(x, 2, {}) == {x: 2}
87 | assert unify(2, x, {}) == {x: 2}
88 | assert unify(2, x, MappingProxyType({})) == {x: 2}
89 | assert unify(x, y, {}) == {x: y}
90 | assert unify(y, x, {}) == {y: x}
91 | assert unify(y, x, {y: x}) == {y: x}
92 | assert unify(x, y, {y: x}) == {y: x}
93 | assert unify(y, x, {x: y}) == {x: y}
94 | assert unify(x, y, {x: y}) == {x: y}
95 | assert unify(y, x, {y: z}) == {y: z, z: x}
96 | assert unify(x, y, {y: z}) == {y: z, x: z}
97 |
98 |
99 | def test_unify_slice():
100 | x, y = var(), var()
101 | assert unify(slice(1), slice(1), {}) == {}
102 | assert unify(slice(1, 2, 1), slice(2, 2, 1), {}) is False
103 | assert unify(slice(1, 2, 1), slice(x, 2, 1), {x: 2}) is False
104 | assert unify(slice(1, 2, 1), slice(1, 3, 1), {}) is False
105 | assert unify(slice(1, 4, 2), slice(1, 4, 1), {}) is False
106 | assert unify(slice(x), slice(x), {}) == {}
107 | assert unify(slice(1, 2, 3), x, {}) == {x: slice(1, 2, 3)}
108 | assert unify(slice(1, 2, None), slice(x, y), {}) == {x: 1, y: 2}
109 |
110 |
111 | def test_unify_iter():
112 | x = var()
113 | assert unify([1], (1,)) is False
114 | assert unify((i for i in [1, 2]), [1, 2]) is False
115 | assert unify(iter([1, x]), iter([1, 2])) == {x: 2}
116 |
117 |
118 | def test_unify_seq():
119 | x = var()
120 | assert unify([], [], {}) == {}
121 | assert unify([x], [x], {}) == {}
122 | assert unify((1, 2), (1, 2), {}) == {}
123 | assert unify([1, 2], [1, 2], {}) == {}
124 | assert unify((1, 2), (1, 2, 3), {}) is False
125 | assert unify((1, x), (1, 2), {}) == {x: 2}
126 | assert unify((1, x), (1, 2), {x: 3}) is False
127 |
128 | a, b, z = var(), var(), var()
129 | assert unify([a, b], x, {x: [z, 1]}) == {x: [z, 1], a: z, b: 1}
130 |
131 |
132 | def test_unify_set():
133 | x, y = var(), var()
134 | assert unify(set(), set(), {}) == {}
135 | assert unify({x}, {x}, {}) == {}
136 | assert unify({1, 2}, {1, 2}, {}) == {}
137 | assert unify({1, x}, {1, 2}, {}) == {x: 2}
138 | assert unify({x, 2}, {1, 2}, {}) == {x: 1}
139 | assert unify({1, y, x}, {2, 1}, {x: 2}) is False
140 |
141 |
142 | def test_unify_dict():
143 | x = var()
144 | assert unify({1: 2}, {1: 2}, {}) == {}
145 | assert unify({1: x}, {1: x}, {}) == {}
146 | assert unify({1: 2}, {1: 3}, {}) is False
147 | assert unify({2: 2}, {1: 2}, {}) is False
148 | assert unify({2: 2, 3: 3}, {1: 2}, {}) is False
149 | assert unify({1: x}, {1: 2}, {}) == {x: 2}
150 |
151 |
152 | def test_unify_complex():
153 | x, y = var(), var()
154 | assert unify((1, {2: 3}), (1, {2: 3}), {}) == {}
155 | assert unify((1, {2: 3}), (1, {2: 4}), {}) is False
156 | assert unify((1, {2: x}), (1, {2: 4}), {}) == {x: 4}
157 | assert unify((1, {2: x}), (1, {2: slice(1, y)}), {y: 2}) == {x: slice(1, y), y: 2}
158 | assert unify({1: (2, 3)}, {1: (2, x)}, {}) == {x: 3}
159 | assert unify({1: [2, 3]}, {1: [2, x]}, {}) == {x: 3}
160 |
161 |
162 | def test_unground_lvars():
163 | a_lv, b_lv = var(), var()
164 |
165 | for ctor in (tuple, list, iter, set, frozenset):
166 |
167 | if ctor not in (set, frozenset):
168 | sub_ctor = list
169 | else:
170 | sub_ctor = tuple
171 |
172 | assert unground_lvars(ctor((1, 2)), {}) == set()
173 | assert unground_lvars(
174 | ctor((1, sub_ctor((a_lv, sub_ctor((b_lv, 2)), 3)))), {}
175 | ) == {a_lv, b_lv}
176 | assert unground_lvars(
177 | ctor((1, sub_ctor((a_lv, sub_ctor((b_lv, 2)), 3)))), {a_lv: 4}
178 | ) == {b_lv}
179 | assert (
180 | unground_lvars(
181 | ctor((1, sub_ctor((a_lv, sub_ctor((b_lv, 2)), 3)))), {a_lv: 4, b_lv: 5}
182 | )
183 | == set()
184 | )
185 |
186 | assert isground(ctor((1, 2)), {})
187 | assert isground(ctor((1, a_lv)), {a_lv: 2})
188 | assert isground(ctor((a_lv, sub_ctor((b_lv, 2)), 3)), {a_lv: b_lv, b_lv: 1})
189 |
190 | assert not isground(ctor((1, a_lv)), {a_lv: b_lv})
191 | assert not isground(ctor((1, var())), {})
192 | assert not isground(ctor((1, sub_ctor((a_lv, sub_ctor((b_lv, 2)), 3)))), {})
193 | assert not isground(
194 | ctor((a_lv, sub_ctor((b_lv, 2)), 3)), {a_lv: b_lv, b_lv: var("c")}
195 | )
196 |
197 | # Make sure that no composite elements are constructed within the
198 | # groundedness checks.
199 | class CounterList(list):
200 | constructions = 0
201 |
202 | def __new__(cls, *args, **kwargs):
203 | cls.constructions += 1
204 | return super().__new__(cls, *args, **kwargs)
205 |
206 | test_l = CounterList([1, 2, CounterList([a_lv, CounterList([4])])])
207 |
208 | assert CounterList.constructions == 3
209 |
210 | assert not isground(test_l, {})
211 | assert CounterList.constructions == 3
212 |
213 | assert unground_lvars(test_l, {}) == {a_lv}
214 |
215 |
216 | def test_reify_recursion_limit():
217 | import platform
218 |
219 | a_lv = var()
220 |
221 | b, _ = gen_long_chain(a_lv, 10)
222 | res = reify(b, {a_lv: "a"})
223 | assert res == gen_long_chain("a", 10)[0]
224 |
225 | r_limit = sys.getrecursionlimit()
226 |
227 | try:
228 | sys.setrecursionlimit(100)
229 |
230 | b, _ = gen_long_chain(a_lv, 200)
231 | res = reify(b, {a_lv: "a"})
232 | exp_res, _ = gen_long_chain("a", 200)
233 |
234 | if platform.python_implementation().lower() != "pypy":
235 | # CPython has stack limit issues when comparing nested lists, but
236 | # PyPy doesn't.
237 | with pytest.raises(RecursionError):
238 | assert res == exp_res
239 |
240 | sys.setrecursionlimit(300)
241 |
242 | assert res == exp_res
243 |
244 | finally:
245 | sys.setrecursionlimit(r_limit)
246 |
247 |
248 | def test_unify_recursion_limit():
249 | a_lv = var()
250 |
251 | b, _ = gen_long_chain("a")
252 | b_var, _ = gen_long_chain(a_lv)
253 |
254 | s = unify(b, b_var, {})
255 |
256 | assert s[a_lv] == "a"
257 |
258 |
259 | def test_unify_freeze():
260 |
261 | # These will sometimes be in different orders after conversion to
262 | # `iter`/`list`/`tuple`!
263 | # u = frozenset({("name", a), ("debit", b)})
264 | # v = frozenset({("name", "Bob"), ("debit", 100)})
265 |
266 | a, b = var("name"), var("amount")
267 | u = freeze({"name": a, "debit": b})
268 | v = freeze({"name": "Bob", "debit": 100})
269 |
270 | assert unify(u, v, {}) == {a: "Bob", b: 100}
271 |
--------------------------------------------------------------------------------
/tests/test_match.py:
--------------------------------------------------------------------------------
1 | from pytest import mark, raises
2 |
3 | from unification.match import Dispatcher, VarDispatcher, match, ordering, supercedes
4 | from unification.variable import var
5 |
6 |
7 | def identity(x):
8 | return x
9 |
10 |
11 | def inc(x):
12 | return x + 1
13 |
14 |
15 | def dec(x):
16 | return x - 1
17 |
18 |
19 | def add(x, y):
20 | return x + y
21 |
22 |
23 | def mul(x, y):
24 | return x * y
25 |
26 |
27 | def foo(*args):
28 | return args
29 |
30 |
31 | def test_simple():
32 | d = Dispatcher("d")
33 |
34 | d.add((1,), inc)
35 | d.add((10,), dec)
36 |
37 | assert d(1) == 2
38 | assert d(10) == 9
39 |
40 |
41 | def test_complex():
42 | d = Dispatcher("d")
43 | x = var("x")
44 | y = var("y")
45 |
46 | d.add((1,), inc)
47 | d.add((x,), inc)
48 | d.add((x, 1), add)
49 | d.add((y, y), mul)
50 | d.add((x, (x, x)), foo)
51 |
52 | assert d(1) == 2
53 | assert d(2) == 3
54 | assert d(2, 1) == 3
55 | assert d(10, 10) == 100
56 | assert d(10, (10, 10)) == (10, (10, 10))
57 | with raises(NotImplementedError):
58 | d(1, 2)
59 |
60 |
61 | def test_dict():
62 | d = Dispatcher("d")
63 | x = var("x")
64 |
65 | d.add(({"x": x, "key": 1},), identity)
66 |
67 | d({"x": 1, "key": 1}) == {"x": 1, "key": 1}
68 |
69 |
70 | def test_ordering():
71 | x = var("x")
72 | y = var("y")
73 | o = ordering([(1,), (x,), (2,), (y,), (x, x), (1, x), (x, 1), (1, 2)])
74 |
75 | for a, b in zip(o, o[1:]):
76 | assert supercedes(a, b) or not supercedes(b, a)
77 |
78 |
79 | def test_raises_error():
80 | d = Dispatcher("d")
81 |
82 | with raises(NotImplementedError):
83 | d(1, 2, 3)
84 |
85 |
86 | def test_register():
87 | d = Dispatcher("d")
88 |
89 | @d.register(1)
90 | def f(x):
91 | return 10
92 |
93 | @d.register(2)
94 | def f(x):
95 | return 20
96 |
97 | assert d(1) == 10
98 | assert d(2) == 20
99 |
100 |
101 | def test_dispatcher():
102 | x = var("x")
103 |
104 | @match(1)
105 | def fib(x):
106 | return 1
107 |
108 | @match(0)
109 | def fib(x):
110 | return 0
111 |
112 | @match(x)
113 | def fib(n):
114 | return fib(n - 1) + fib(n - 2)
115 |
116 | assert [fib(i) for i in range(10)] == [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
117 |
118 |
119 | def test_supercedes():
120 | x, y, z = var("x"), var("y"), var("z")
121 | assert not supercedes(1, 2)
122 | assert supercedes(1, x)
123 | assert not supercedes(x, 1)
124 | assert supercedes((1, 2), (1, x))
125 | assert not supercedes((1, x), (1, 2))
126 | assert supercedes((1, x), (y, z))
127 | assert supercedes(x, y)
128 | assert supercedes((1, (x, 3)), (1, y))
129 | assert not supercedes((1, y), (1, (x, 3)))
130 |
131 |
132 | @mark.xfail()
133 | def test_supercedes_more():
134 | x, y = var("x"), var("y")
135 | assert supercedes((1, x), (y, y))
136 | assert supercedes((1, x), (x, x))
137 |
138 |
139 | def test_VarDispatcher():
140 | d = VarDispatcher("d")
141 | x, y, z = var("x"), var("y"), var("z")
142 |
143 | @d.register(x, y)
144 | def swap(y, x):
145 | return y, x
146 |
147 | assert d(1, 2) == (2, 1)
148 |
149 | @d.register((1, z), 2)
150 | def foo(z):
151 | return z
152 |
153 | assert d((1, 3), 2) == 3
154 |
--------------------------------------------------------------------------------
/tests/test_more.py:
--------------------------------------------------------------------------------
1 | import ast
2 | from collections.abc import Mapping
3 |
4 | from unification import var
5 | from unification.core import _reify, _unify, reify, stream_eval, unify
6 | from unification.more import _reify_object, _unify_object, unifiable
7 |
8 |
9 | class Foo(object):
10 | def __init__(self, a, b):
11 | self.a = a
12 | self.b = b
13 |
14 | def __eq__(self, other):
15 | return type(self) == type(other) and (self.a, self.b) == (other.a, other.b)
16 |
17 |
18 | class Bar(object):
19 | def __init__(self, c):
20 | self.c = c
21 |
22 | def __eq__(self, other):
23 | return type(self) == type(other) and self.c == other.c
24 |
25 |
26 | def test_unify_object():
27 | x = var()
28 | assert stream_eval(_unify_object(Foo(1, 2), Foo(1, 2), {})) == {}
29 | assert stream_eval(_unify_object(Foo(1, 2), Foo(1, 3), {})) is False
30 | assert stream_eval(_unify_object(Foo(1, 2), Foo(1, x), {})) == {x: 2}
31 |
32 |
33 | def test_unify_nonstandard_object():
34 | _unify.add((ast.AST, ast.AST, Mapping), _unify_object)
35 | x = var()
36 | assert unify(ast.Num(n=1), ast.Num(n=1), {}) == {}
37 | assert unify(ast.Num(n=1), ast.Num(n=2), {}) is False
38 | assert unify(ast.Num(n=1), ast.Num(n=x), {}) == {x: 1}
39 |
40 |
41 | def test_reify_object():
42 | x = var()
43 | obj = stream_eval(_reify_object(Foo(1, x), {x: 4}))
44 | assert obj.a == 1
45 | assert obj.b == 4
46 |
47 | f = Foo(1, 2)
48 | assert stream_eval(_reify_object(f, {})) is f
49 |
50 |
51 | def test_reify_nonstandard_object():
52 | _reify.add((ast.AST, Mapping), _reify_object)
53 | x = var()
54 | assert reify(ast.Num(n=1), {}).n == 1
55 | assert reify(ast.Num(n=x), {}).n == x
56 | assert reify(ast.Num(n=x), {x: 2}).n == 2
57 |
58 |
59 | def test_reify_slots():
60 | class SlotsObject(object):
61 | __slots__ = ["myattr"]
62 |
63 | def __init__(self, myattr):
64 | self.myattr = myattr
65 |
66 | def __eq__(self, other):
67 | return type(self) == type(other) and self.myattr == other.myattr
68 |
69 | x = var()
70 | s = {x: 1}
71 | e = SlotsObject(x)
72 | assert stream_eval(_reify_object(e, s)) == SlotsObject(1)
73 | assert stream_eval(_reify_object(SlotsObject(1), s)) == SlotsObject(1)
74 |
75 |
76 | def test_objects_full():
77 | _unify.add((Foo, Foo, Mapping), _unify_object)
78 | _unify.add((Bar, Bar, Mapping), _unify_object)
79 | _reify.add((Foo, Mapping), _reify_object)
80 | _reify.add((Bar, Mapping), _reify_object)
81 |
82 | x, y = var(), var()
83 | assert unify(Foo(1, 2), Bar(1), {}) is False
84 | assert unify(Foo(1, Bar(2)), Foo(1, Bar(x)), {}) == {x: 2}
85 | assert reify(Foo(x, Bar(Foo(y, 3))), {x: 1, y: 2}) == Foo(1, Bar(Foo(2, 3)))
86 |
87 | class SubFoo(Foo):
88 | pass
89 |
90 | assert unify(Foo(1, 2), SubFoo(1, 2), {}) is False
91 |
92 |
93 | @unifiable
94 | class A(object):
95 | def __init__(self, a, b):
96 | self.a = a
97 | self.b = b
98 |
99 | def __eq__(self, other):
100 | return type(self) == type(other) and self.__dict__ == other.__dict__
101 |
102 |
103 | def test_unifiable_dict():
104 | x = var()
105 | f = A(1, 2)
106 | g = A(1, x)
107 | assert unify(f, g, {}) == {x: 2}
108 | assert reify(g, {x: 2}) == f
109 |
110 |
111 | @unifiable
112 | class Aslot(object):
113 | __slots__ = ("a", "b")
114 |
115 | def __init__(self, a, b):
116 | self.a = a
117 | self.b = b
118 |
119 | def __eq__(self, other):
120 | return type(self) == type(other) and all(
121 | a == b for a, b in zip(self.__slots__, other.__slots__)
122 | )
123 |
124 |
125 | def test_unifiable_slots():
126 | x = var()
127 | f = Aslot(1, 2)
128 | g = Aslot(1, x)
129 | assert unify(f, g, {}) == {x: 2}
130 | assert reify(g, {x: 2}) == f
131 |
--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | from unification.utils import freeze, transitive_get
2 | from unification.variable import var
3 |
4 |
5 | def test_transitive_get():
6 | x, y = var(), var()
7 | assert transitive_get(x, {x: y, y: 1}) == 1
8 | assert transitive_get({1: 2}, {x: y, y: 1}) == {1: 2}
9 | # Cycles are not handled
10 | # assert transitive_get(x, {x: x}) == x
11 | # assert transitive_get(x, {x: y, y: x}) == x
12 |
13 |
14 | def test_freeze():
15 | assert freeze({1: [2, 3]}) == ((1, (2, 3)),)
16 | assert freeze(set([1])) == (1,)
17 | assert freeze(([1],)) == ((1,),)
18 |
--------------------------------------------------------------------------------
/tests/test_variable.py:
--------------------------------------------------------------------------------
1 | from unification.variable import Var, isvar, var, variables, vars
2 |
3 |
4 | def test_isvar():
5 | assert not isvar(3)
6 | assert isvar(var(3))
7 |
8 | class CustomVar(Var):
9 | pass
10 |
11 | assert isvar(CustomVar())
12 |
13 |
14 | def test_var():
15 | assert var(1) == var(1)
16 | one_lv = var(1)
17 | assert var(1) is one_lv
18 | assert var() != var()
19 | assert var(prefix="a") != var(prefix="a")
20 |
21 |
22 | def test_var_inputs():
23 | assert var(1) == var(1)
24 | assert var() != var()
25 |
26 |
27 | def test_vars():
28 | vs = vars(3)
29 | assert len(vs) == 3
30 | assert all(map(isvar, vs))
31 |
32 |
33 | def test_context_manager():
34 | with variables(1):
35 | assert isvar(1)
36 | assert not isvar(1)
37 |
--------------------------------------------------------------------------------
/tests/utils.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | from unification.variable import var
4 |
5 |
6 | def gen_long_chain(last_elem=None, N=None, use_lvars=False):
7 | """Generate a nested list of length `N` with the last element set to `last_elm`.
8 |
9 | Parameters
10 | ----------
11 | last_elem: object
12 | The element to be placed in the inner-most nested list.
13 | N: int
14 | The number of nested lists.
15 | use_lvars: bool
16 | Whether or not to add `var`s to the first elements of each nested list
17 | or simply integers. If ``True``, each `var` is passed the nesting
18 | level integer (i.e. ``var(i)``).
19 |
20 | Returns
21 | -------
22 | list, dict
23 | The generated nested list and a ``dict`` containing the generated
24 | `var`s and their nesting level integers, if any.
25 |
26 | """
27 | b_struct = None
28 | if N is None:
29 | N = sys.getrecursionlimit()
30 | lvars = {}
31 | for i in range(N - 1, 0, -1):
32 | i_el = var(i) if use_lvars else i
33 | if use_lvars:
34 | lvars[i_el] = i
35 | b_struct = [i_el, last_elem if i == N - 1 else b_struct]
36 | return b_struct, lvars
37 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | install_command = pip install {opts} {packages}
3 | envlist = py36,py37,pypy
4 |
5 | [testenv]
6 | usedevelop = True
7 | commands =
8 | pytest {posargs:--with-doctest --with-coverage --cover-package=unification} -v
9 | deps =
10 | pytest-coverage
11 | pytest
12 |
--------------------------------------------------------------------------------
/unification/__init__.py:
--------------------------------------------------------------------------------
1 | from ._version import get_versions
2 | from .core import assoc, reify, unify
3 | from .more import unifiable
4 | from .variable import Var, isvar, var, variables, vars
5 |
6 | __version__ = get_versions()["version"]
7 | del get_versions
8 |
--------------------------------------------------------------------------------
/unification/_version.py:
--------------------------------------------------------------------------------
1 |
2 | # This file helps to compute a version number in source trees obtained from
3 | # git-archive tarball (such as those provided by githubs download-from-tag
4 | # feature). Distribution tarballs (built by setup.py sdist) and build
5 | # directories (produced by setup.py build) will contain a much shorter file
6 | # that just contains the computed version number.
7 |
8 | # This file is released into the public domain. Generated by
9 | # versioneer-0.21 (https://github.com/python-versioneer/python-versioneer)
10 |
11 | """Git implementation of _version.py."""
12 |
13 | import errno
14 | import os
15 | import re
16 | import subprocess
17 | import sys
18 | from typing import Callable, Dict
19 |
20 |
21 | def get_keywords():
22 | """Get the keywords needed to look up the version information."""
23 | # these strings will be replaced by git during git-archive.
24 | # setup.py/versioneer.py will grep for the variable names, so they must
25 | # each be defined on a line of their own. _version.py will just call
26 | # get_keywords().
27 | git_refnames = " (HEAD -> main)"
28 | git_full = "5e3e4aba1a4f63c6a4abe7d8a5c6fbbce36fa488"
29 | git_date = "2024-09-03 17:30:56 -0500"
30 | keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
31 | return keywords
32 |
33 |
34 | class VersioneerConfig:
35 | """Container for Versioneer configuration parameters."""
36 |
37 |
38 | def get_config():
39 | """Create, populate and return the VersioneerConfig() object."""
40 | # these strings are filled in when 'setup.py versioneer' creates
41 | # _version.py
42 | cfg = VersioneerConfig()
43 | cfg.VCS = "git"
44 | cfg.style = "pep440"
45 | cfg.tag_prefix = "v"
46 | cfg.parentdir_prefix = "unification-"
47 | cfg.versionfile_source = "unification/_version.py"
48 | cfg.verbose = False
49 | return cfg
50 |
51 |
52 | class NotThisMethod(Exception):
53 | """Exception raised if a method is not valid for the current scenario."""
54 |
55 |
56 | LONG_VERSION_PY: Dict[str, str] = {}
57 | HANDLERS: Dict[str, Dict[str, Callable]] = {}
58 |
59 |
60 | def register_vcs_handler(vcs, method): # decorator
61 | """Create decorator to mark a method as the handler of a VCS."""
62 | def decorate(f):
63 | """Store f in HANDLERS[vcs][method]."""
64 | if vcs not in HANDLERS:
65 | HANDLERS[vcs] = {}
66 | HANDLERS[vcs][method] = f
67 | return f
68 | return decorate
69 |
70 |
71 | def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
72 | env=None):
73 | """Call the given command(s)."""
74 | assert isinstance(commands, list)
75 | process = None
76 | for command in commands:
77 | try:
78 | dispcmd = str([command] + args)
79 | # remember shell=False, so use git.cmd on windows, not just git
80 | process = subprocess.Popen([command] + args, cwd=cwd, env=env,
81 | stdout=subprocess.PIPE,
82 | stderr=(subprocess.PIPE if hide_stderr
83 | else None))
84 | break
85 | except OSError:
86 | e = sys.exc_info()[1]
87 | if e.errno == errno.ENOENT:
88 | continue
89 | if verbose:
90 | print("unable to run %s" % dispcmd)
91 | print(e)
92 | return None, None
93 | else:
94 | if verbose:
95 | print("unable to find command, tried %s" % (commands,))
96 | return None, None
97 | stdout = process.communicate()[0].strip().decode()
98 | if process.returncode != 0:
99 | if verbose:
100 | print("unable to run %s (error)" % dispcmd)
101 | print("stdout was %s" % stdout)
102 | return None, process.returncode
103 | return stdout, process.returncode
104 |
105 |
106 | def versions_from_parentdir(parentdir_prefix, root, verbose):
107 | """Try to determine the version from the parent directory name.
108 |
109 | Source tarballs conventionally unpack into a directory that includes both
110 | the project name and a version string. We will also support searching up
111 | two directory levels for an appropriately named parent directory
112 | """
113 | rootdirs = []
114 |
115 | for _ in range(3):
116 | dirname = os.path.basename(root)
117 | if dirname.startswith(parentdir_prefix):
118 | return {"version": dirname[len(parentdir_prefix):],
119 | "full-revisionid": None,
120 | "dirty": False, "error": None, "date": None}
121 | rootdirs.append(root)
122 | root = os.path.dirname(root) # up a level
123 |
124 | if verbose:
125 | print("Tried directories %s but none started with prefix %s" %
126 | (str(rootdirs), parentdir_prefix))
127 | raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
128 |
129 |
130 | @register_vcs_handler("git", "get_keywords")
131 | def git_get_keywords(versionfile_abs):
132 | """Extract version information from the given file."""
133 | # the code embedded in _version.py can just fetch the value of these
134 | # keywords. When used from setup.py, we don't want to import _version.py,
135 | # so we do it with a regexp instead. This function is not used from
136 | # _version.py.
137 | keywords = {}
138 | try:
139 | with open(versionfile_abs, "r") as fobj:
140 | for line in fobj:
141 | if line.strip().startswith("git_refnames ="):
142 | mo = re.search(r'=\s*"(.*)"', line)
143 | if mo:
144 | keywords["refnames"] = mo.group(1)
145 | if line.strip().startswith("git_full ="):
146 | mo = re.search(r'=\s*"(.*)"', line)
147 | if mo:
148 | keywords["full"] = mo.group(1)
149 | if line.strip().startswith("git_date ="):
150 | mo = re.search(r'=\s*"(.*)"', line)
151 | if mo:
152 | keywords["date"] = mo.group(1)
153 | except OSError:
154 | pass
155 | return keywords
156 |
157 |
158 | @register_vcs_handler("git", "keywords")
159 | def git_versions_from_keywords(keywords, tag_prefix, verbose):
160 | """Get version information from git keywords."""
161 | if "refnames" not in keywords:
162 | raise NotThisMethod("Short version file found")
163 | date = keywords.get("date")
164 | if date is not None:
165 | # Use only the last line. Previous lines may contain GPG signature
166 | # information.
167 | date = date.splitlines()[-1]
168 |
169 | # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
170 | # datestamp. However we prefer "%ci" (which expands to an "ISO-8601
171 | # -like" string, which we must then edit to make compliant), because
172 | # it's been around since git-1.5.3, and it's too difficult to
173 | # discover which version we're using, or to work around using an
174 | # older one.
175 | date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
176 | refnames = keywords["refnames"].strip()
177 | if refnames.startswith("$Format"):
178 | if verbose:
179 | print("keywords are unexpanded, not using")
180 | raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
181 | refs = {r.strip() for r in refnames.strip("()").split(",")}
182 | # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
183 | # just "foo-1.0". If we see a "tag: " prefix, prefer those.
184 | TAG = "tag: "
185 | tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
186 | if not tags:
187 | # Either we're using git < 1.8.3, or there really are no tags. We use
188 | # a heuristic: assume all version tags have a digit. The old git %d
189 | # expansion behaves like git log --decorate=short and strips out the
190 | # refs/heads/ and refs/tags/ prefixes that would let us distinguish
191 | # between branches and tags. By ignoring refnames without digits, we
192 | # filter out many common branch names like "release" and
193 | # "stabilization", as well as "HEAD" and "master".
194 | tags = {r for r in refs if re.search(r'\d', r)}
195 | if verbose:
196 | print("discarding '%s', no digits" % ",".join(refs - tags))
197 | if verbose:
198 | print("likely tags: %s" % ",".join(sorted(tags)))
199 | for ref in sorted(tags):
200 | # sorting will prefer e.g. "2.0" over "2.0rc1"
201 | if ref.startswith(tag_prefix):
202 | r = ref[len(tag_prefix):]
203 | # Filter out refs that exactly match prefix or that don't start
204 | # with a number once the prefix is stripped (mostly a concern
205 | # when prefix is '')
206 | if not re.match(r'\d', r):
207 | continue
208 | if verbose:
209 | print("picking %s" % r)
210 | return {"version": r,
211 | "full-revisionid": keywords["full"].strip(),
212 | "dirty": False, "error": None,
213 | "date": date}
214 | # no suitable tags, so version is "0+unknown", but full hex is still there
215 | if verbose:
216 | print("no suitable tags, using unknown + full revision id")
217 | return {"version": "0+unknown",
218 | "full-revisionid": keywords["full"].strip(),
219 | "dirty": False, "error": "no suitable tags", "date": None}
220 |
221 |
222 | @register_vcs_handler("git", "pieces_from_vcs")
223 | def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
224 | """Get version from 'git describe' in the root of the source tree.
225 |
226 | This only gets called if the git-archive 'subst' keywords were *not*
227 | expanded, and _version.py hasn't already been rewritten with a short
228 | version string, meaning we're inside a checked out source tree.
229 | """
230 | GITS = ["git"]
231 | TAG_PREFIX_REGEX = "*"
232 | if sys.platform == "win32":
233 | GITS = ["git.cmd", "git.exe"]
234 | TAG_PREFIX_REGEX = r"\*"
235 |
236 | _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
237 | hide_stderr=True)
238 | if rc != 0:
239 | if verbose:
240 | print("Directory %s not under git control" % root)
241 | raise NotThisMethod("'git rev-parse --git-dir' returned error")
242 |
243 | # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
244 | # if there isn't one, this yields HEX[-dirty] (no NUM)
245 | describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty",
246 | "--always", "--long",
247 | "--match",
248 | "%s%s" % (tag_prefix, TAG_PREFIX_REGEX)],
249 | cwd=root)
250 | # --long was added in git-1.5.5
251 | if describe_out is None:
252 | raise NotThisMethod("'git describe' failed")
253 | describe_out = describe_out.strip()
254 | full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
255 | if full_out is None:
256 | raise NotThisMethod("'git rev-parse' failed")
257 | full_out = full_out.strip()
258 |
259 | pieces = {}
260 | pieces["long"] = full_out
261 | pieces["short"] = full_out[:7] # maybe improved later
262 | pieces["error"] = None
263 |
264 | branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
265 | cwd=root)
266 | # --abbrev-ref was added in git-1.6.3
267 | if rc != 0 or branch_name is None:
268 | raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
269 | branch_name = branch_name.strip()
270 |
271 | if branch_name == "HEAD":
272 | # If we aren't exactly on a branch, pick a branch which represents
273 | # the current commit. If all else fails, we are on a branchless
274 | # commit.
275 | branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
276 | # --contains was added in git-1.5.4
277 | if rc != 0 or branches is None:
278 | raise NotThisMethod("'git branch --contains' returned error")
279 | branches = branches.split("\n")
280 |
281 | # Remove the first line if we're running detached
282 | if "(" in branches[0]:
283 | branches.pop(0)
284 |
285 | # Strip off the leading "* " from the list of branches.
286 | branches = [branch[2:] for branch in branches]
287 | if "master" in branches:
288 | branch_name = "master"
289 | elif not branches:
290 | branch_name = None
291 | else:
292 | # Pick the first branch that is returned. Good or bad.
293 | branch_name = branches[0]
294 |
295 | pieces["branch"] = branch_name
296 |
297 | # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
298 | # TAG might have hyphens.
299 | git_describe = describe_out
300 |
301 | # look for -dirty suffix
302 | dirty = git_describe.endswith("-dirty")
303 | pieces["dirty"] = dirty
304 | if dirty:
305 | git_describe = git_describe[:git_describe.rindex("-dirty")]
306 |
307 | # now we have TAG-NUM-gHEX or HEX
308 |
309 | if "-" in git_describe:
310 | # TAG-NUM-gHEX
311 | mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
312 | if not mo:
313 | # unparsable. Maybe git-describe is misbehaving?
314 | pieces["error"] = ("unable to parse git-describe output: '%s'"
315 | % describe_out)
316 | return pieces
317 |
318 | # tag
319 | full_tag = mo.group(1)
320 | if not full_tag.startswith(tag_prefix):
321 | if verbose:
322 | fmt = "tag '%s' doesn't start with prefix '%s'"
323 | print(fmt % (full_tag, tag_prefix))
324 | pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
325 | % (full_tag, tag_prefix))
326 | return pieces
327 | pieces["closest-tag"] = full_tag[len(tag_prefix):]
328 |
329 | # distance: number of commits since tag
330 | pieces["distance"] = int(mo.group(2))
331 |
332 | # commit: short hex revision ID
333 | pieces["short"] = mo.group(3)
334 |
335 | else:
336 | # HEX: no tags
337 | pieces["closest-tag"] = None
338 | count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
339 | pieces["distance"] = int(count_out) # total number of commits
340 |
341 | # commit date: see ISO-8601 comment in git_versions_from_keywords()
342 | date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
343 | # Use only the last line. Previous lines may contain GPG signature
344 | # information.
345 | date = date.splitlines()[-1]
346 | pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
347 |
348 | return pieces
349 |
350 |
351 | def plus_or_dot(pieces):
352 | """Return a + if we don't already have one, else return a ."""
353 | if "+" in pieces.get("closest-tag", ""):
354 | return "."
355 | return "+"
356 |
357 |
358 | def render_pep440(pieces):
359 | """Build up version string, with post-release "local version identifier".
360 |
361 | Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
362 | get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
363 |
364 | Exceptions:
365 | 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
366 | """
367 | if pieces["closest-tag"]:
368 | rendered = pieces["closest-tag"]
369 | if pieces["distance"] or pieces["dirty"]:
370 | rendered += plus_or_dot(pieces)
371 | rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
372 | if pieces["dirty"]:
373 | rendered += ".dirty"
374 | else:
375 | # exception #1
376 | rendered = "0+untagged.%d.g%s" % (pieces["distance"],
377 | pieces["short"])
378 | if pieces["dirty"]:
379 | rendered += ".dirty"
380 | return rendered
381 |
382 |
383 | def render_pep440_branch(pieces):
384 | """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
385 |
386 | The ".dev0" means not master branch. Note that .dev0 sorts backwards
387 | (a feature branch will appear "older" than the master branch).
388 |
389 | Exceptions:
390 | 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
391 | """
392 | if pieces["closest-tag"]:
393 | rendered = pieces["closest-tag"]
394 | if pieces["distance"] or pieces["dirty"]:
395 | if pieces["branch"] != "master":
396 | rendered += ".dev0"
397 | rendered += plus_or_dot(pieces)
398 | rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
399 | if pieces["dirty"]:
400 | rendered += ".dirty"
401 | else:
402 | # exception #1
403 | rendered = "0"
404 | if pieces["branch"] != "master":
405 | rendered += ".dev0"
406 | rendered += "+untagged.%d.g%s" % (pieces["distance"],
407 | pieces["short"])
408 | if pieces["dirty"]:
409 | rendered += ".dirty"
410 | return rendered
411 |
412 |
413 | def pep440_split_post(ver):
414 | """Split pep440 version string at the post-release segment.
415 |
416 | Returns the release segments before the post-release and the
417 | post-release version number (or -1 if no post-release segment is present).
418 | """
419 | vc = str.split(ver, ".post")
420 | return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
421 |
422 |
423 | def render_pep440_pre(pieces):
424 | """TAG[.postN.devDISTANCE] -- No -dirty.
425 |
426 | Exceptions:
427 | 1: no tags. 0.post0.devDISTANCE
428 | """
429 | if pieces["closest-tag"]:
430 | if pieces["distance"]:
431 | # update the post release segment
432 | tag_version, post_version = pep440_split_post(pieces["closest-tag"])
433 | rendered = tag_version
434 | if post_version is not None:
435 | rendered += ".post%d.dev%d" % (post_version+1, pieces["distance"])
436 | else:
437 | rendered += ".post0.dev%d" % (pieces["distance"])
438 | else:
439 | # no commits, use the tag as the version
440 | rendered = pieces["closest-tag"]
441 | else:
442 | # exception #1
443 | rendered = "0.post0.dev%d" % pieces["distance"]
444 | return rendered
445 |
446 |
447 | def render_pep440_post(pieces):
448 | """TAG[.postDISTANCE[.dev0]+gHEX] .
449 |
450 | The ".dev0" means dirty. Note that .dev0 sorts backwards
451 | (a dirty tree will appear "older" than the corresponding clean one),
452 | but you shouldn't be releasing software with -dirty anyways.
453 |
454 | Exceptions:
455 | 1: no tags. 0.postDISTANCE[.dev0]
456 | """
457 | if pieces["closest-tag"]:
458 | rendered = pieces["closest-tag"]
459 | if pieces["distance"] or pieces["dirty"]:
460 | rendered += ".post%d" % pieces["distance"]
461 | if pieces["dirty"]:
462 | rendered += ".dev0"
463 | rendered += plus_or_dot(pieces)
464 | rendered += "g%s" % pieces["short"]
465 | else:
466 | # exception #1
467 | rendered = "0.post%d" % pieces["distance"]
468 | if pieces["dirty"]:
469 | rendered += ".dev0"
470 | rendered += "+g%s" % pieces["short"]
471 | return rendered
472 |
473 |
474 | def render_pep440_post_branch(pieces):
475 | """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
476 |
477 | The ".dev0" means not master branch.
478 |
479 | Exceptions:
480 | 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
481 | """
482 | if pieces["closest-tag"]:
483 | rendered = pieces["closest-tag"]
484 | if pieces["distance"] or pieces["dirty"]:
485 | rendered += ".post%d" % pieces["distance"]
486 | if pieces["branch"] != "master":
487 | rendered += ".dev0"
488 | rendered += plus_or_dot(pieces)
489 | rendered += "g%s" % pieces["short"]
490 | if pieces["dirty"]:
491 | rendered += ".dirty"
492 | else:
493 | # exception #1
494 | rendered = "0.post%d" % pieces["distance"]
495 | if pieces["branch"] != "master":
496 | rendered += ".dev0"
497 | rendered += "+g%s" % pieces["short"]
498 | if pieces["dirty"]:
499 | rendered += ".dirty"
500 | return rendered
501 |
502 |
503 | def render_pep440_old(pieces):
504 | """TAG[.postDISTANCE[.dev0]] .
505 |
506 | The ".dev0" means dirty.
507 |
508 | Exceptions:
509 | 1: no tags. 0.postDISTANCE[.dev0]
510 | """
511 | if pieces["closest-tag"]:
512 | rendered = pieces["closest-tag"]
513 | if pieces["distance"] or pieces["dirty"]:
514 | rendered += ".post%d" % pieces["distance"]
515 | if pieces["dirty"]:
516 | rendered += ".dev0"
517 | else:
518 | # exception #1
519 | rendered = "0.post%d" % pieces["distance"]
520 | if pieces["dirty"]:
521 | rendered += ".dev0"
522 | return rendered
523 |
524 |
525 | def render_git_describe(pieces):
526 | """TAG[-DISTANCE-gHEX][-dirty].
527 |
528 | Like 'git describe --tags --dirty --always'.
529 |
530 | Exceptions:
531 | 1: no tags. HEX[-dirty] (note: no 'g' prefix)
532 | """
533 | if pieces["closest-tag"]:
534 | rendered = pieces["closest-tag"]
535 | if pieces["distance"]:
536 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
537 | else:
538 | # exception #1
539 | rendered = pieces["short"]
540 | if pieces["dirty"]:
541 | rendered += "-dirty"
542 | return rendered
543 |
544 |
545 | def render_git_describe_long(pieces):
546 | """TAG-DISTANCE-gHEX[-dirty].
547 |
548 | Like 'git describe --tags --dirty --always -long'.
549 | The distance/hash is unconditional.
550 |
551 | Exceptions:
552 | 1: no tags. HEX[-dirty] (note: no 'g' prefix)
553 | """
554 | if pieces["closest-tag"]:
555 | rendered = pieces["closest-tag"]
556 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
557 | else:
558 | # exception #1
559 | rendered = pieces["short"]
560 | if pieces["dirty"]:
561 | rendered += "-dirty"
562 | return rendered
563 |
564 |
565 | def render(pieces, style):
566 | """Render the given version pieces into the requested style."""
567 | if pieces["error"]:
568 | return {"version": "unknown",
569 | "full-revisionid": pieces.get("long"),
570 | "dirty": None,
571 | "error": pieces["error"],
572 | "date": None}
573 |
574 | if not style or style == "default":
575 | style = "pep440" # the default
576 |
577 | if style == "pep440":
578 | rendered = render_pep440(pieces)
579 | elif style == "pep440-branch":
580 | rendered = render_pep440_branch(pieces)
581 | elif style == "pep440-pre":
582 | rendered = render_pep440_pre(pieces)
583 | elif style == "pep440-post":
584 | rendered = render_pep440_post(pieces)
585 | elif style == "pep440-post-branch":
586 | rendered = render_pep440_post_branch(pieces)
587 | elif style == "pep440-old":
588 | rendered = render_pep440_old(pieces)
589 | elif style == "git-describe":
590 | rendered = render_git_describe(pieces)
591 | elif style == "git-describe-long":
592 | rendered = render_git_describe_long(pieces)
593 | else:
594 | raise ValueError("unknown style '%s'" % style)
595 |
596 | return {"version": rendered, "full-revisionid": pieces["long"],
597 | "dirty": pieces["dirty"], "error": None,
598 | "date": pieces.get("date")}
599 |
600 |
601 | def get_versions():
602 | """Get version information or return default if unable to do so."""
603 | # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
604 | # __file__, we can work backwards from there to the root. Some
605 | # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
606 | # case we can only use expanded keywords.
607 |
608 | cfg = get_config()
609 | verbose = cfg.verbose
610 |
611 | try:
612 | return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
613 | verbose)
614 | except NotThisMethod:
615 | pass
616 |
617 | try:
618 | root = os.path.realpath(__file__)
619 | # versionfile_source is the relative path from the top of the source
620 | # tree (where the .git directory might live) to this file. Invert
621 | # this to find the root from __file__.
622 | for _ in cfg.versionfile_source.split('/'):
623 | root = os.path.dirname(root)
624 | except NameError:
625 | return {"version": "0+unknown", "full-revisionid": None,
626 | "dirty": None,
627 | "error": "unable to find root of source tree",
628 | "date": None}
629 |
630 | try:
631 | pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
632 | return render(pieces, cfg.style)
633 | except NotThisMethod:
634 | pass
635 |
636 | try:
637 | if cfg.parentdir_prefix:
638 | return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
639 | except NotThisMethod:
640 | pass
641 |
642 | return {"version": "0+unknown", "full-revisionid": None,
643 | "dirty": None,
644 | "error": "unable to compute version", "date": None}
645 |
--------------------------------------------------------------------------------
/unification/core.py:
--------------------------------------------------------------------------------
1 | from collections import OrderedDict, deque
2 | from collections.abc import Generator, Iterator, Mapping, Set
3 | from copy import copy
4 | from functools import partial
5 | from operator import length_hint
6 |
7 | from .dispatch import dispatch
8 | from .utils import transitive_get as walk
9 | from .variable import Var, isvar
10 |
11 | # An object used to tell the reifier that the next yield constructs the reified
12 | # object from its constituent refications (if any).
13 | construction_sentinel = object()
14 |
15 |
16 | @dispatch(Mapping, object, object)
17 | def assoc(s, u, v):
18 | """Add an entry to a `Mapping` and return it."""
19 | if hasattr(s, "copy"):
20 | s = s.copy()
21 | else:
22 | s = copy(s) # pragma: no cover
23 | s[u] = v
24 | return s
25 |
26 |
27 | def stream_eval(z, res_filter=None):
28 | r"""Evaluate a stream of `_reify`/`_unify` results.
29 |
30 | This implementation consists of a deque that simulates an evaluation stack
31 | of `_reify`/`_unify`-produced generators. We're able to overcome
32 | `RecursionError`\s this way.
33 | """
34 |
35 | if not isinstance(z, Generator):
36 | return z
37 |
38 | stack = deque()
39 | z_args, z_out = None, None
40 | stack.append(z)
41 |
42 | while stack:
43 | z = stack[-1]
44 | try:
45 | z_out = z.send(z_args)
46 |
47 | if res_filter:
48 | _ = res_filter(z, z_out)
49 |
50 | if isinstance(z_out, Generator):
51 | stack.append(z_out)
52 | z_args = None
53 | else:
54 | z_args = z_out
55 |
56 | except StopIteration:
57 | _ = stack.pop()
58 |
59 | return z_out
60 |
61 |
62 | class UngroundLVarException(Exception):
63 | """An exception signaling that an unground variable was found."""
64 |
65 |
66 | @dispatch(object, Mapping)
67 | def _reify(o, s):
68 | return o
69 |
70 |
71 | @_reify.register(Var, Mapping)
72 | def _reify_Var(o, s):
73 | o_w = walk(o, s)
74 |
75 | if o_w is o:
76 | yield o_w
77 | else:
78 | yield _reify(o_w, s)
79 |
80 |
81 | def _reify_Iterable_ctor(ctor, t, s):
82 | """Create a generator that yields `_reify` generators.
83 |
84 | The yielded generators need to be evaluated by the caller and the fully
85 | reified results "sent" back to this generator so that it can finish
86 | constructing reified iterable.
87 |
88 | This approach allows us "collapse" nested `_reify` calls by pushing nested
89 | calls up the stack.
90 | """
91 | res = []
92 |
93 | if isinstance(t, Mapping):
94 | t = t.items()
95 |
96 | for y in t:
97 | r = _reify(y, s)
98 | if isinstance(r, Generator):
99 | r = yield r
100 | res.append(r)
101 |
102 | yield construction_sentinel
103 |
104 | yield ctor(res)
105 |
106 |
107 | for seq, ctor in (
108 | (tuple, tuple),
109 | (list, list),
110 | (Iterator, iter),
111 | (set, set),
112 | (frozenset, frozenset),
113 | ):
114 | _reify.add((seq, Mapping), partial(_reify_Iterable_ctor, ctor))
115 |
116 |
117 | for seq in (dict, OrderedDict):
118 | _reify.add((seq, Mapping), partial(_reify_Iterable_ctor, seq))
119 |
120 |
121 | @_reify.register(slice, Mapping)
122 | def _reify_slice(o, s):
123 | start = yield _reify(o.start, s)
124 | stop = yield _reify(o.stop, s)
125 | step = yield _reify(o.step, s)
126 |
127 | yield construction_sentinel
128 |
129 | yield slice(start, stop, step)
130 |
131 |
132 | @dispatch(object, Mapping)
133 | def reify(e, s):
134 | """Replace logic variables in a term, `e`, with their substitutions in `s`.
135 |
136 | >>> x, y = var(), var()
137 | >>> e = (1, x, (3, y))
138 | >>> s = {x: 2, y: 4}
139 | >>> reify(e, s)
140 | (1, 2, (3, 4))
141 |
142 | >>> e = {1: x, 3: (y, 5)}
143 | >>> reify(e, s)
144 | {1: 2, 3: (4, 5)}
145 | """
146 |
147 | if len(s) == 0:
148 | return e
149 |
150 | return stream_eval(_reify(e, s))
151 |
152 |
153 | @dispatch(object, object, Mapping)
154 | def _unify(u, v, s):
155 | return s if u == v else False
156 |
157 |
158 | @_unify.register(Var, (Var, object), Mapping)
159 | def _unify_Var_object(u, v, s):
160 | u_w = walk(u, s)
161 |
162 | if isvar(v):
163 | v_w = walk(v, s)
164 | else:
165 | v_w = v
166 |
167 | if u_w == v_w:
168 | yield s
169 | elif isvar(u_w):
170 | yield assoc(s, u_w, v_w)
171 | elif isvar(v_w):
172 | yield assoc(s, v_w, u_w)
173 | else:
174 | yield _unify(u_w, v_w, s)
175 |
176 |
177 | _unify.add((object, Var, Mapping), _unify_Var_object)
178 |
179 |
180 | def _unify_Iterable(u, v, s):
181 | len_u = length_hint(u, -1)
182 | len_v = length_hint(v, -1)
183 |
184 | if len_u != len_v:
185 | yield False
186 | return
187 |
188 | for uu, vv in zip(u, v):
189 | s = yield _unify(uu, vv, s)
190 | if s is False:
191 | return
192 | else:
193 | yield s
194 |
195 |
196 | for seq in (tuple, list, Iterator):
197 | _unify.add((seq, seq, Mapping), _unify_Iterable)
198 |
199 |
200 | @_unify.register(Set, Set, Mapping)
201 | def _unify_Set(u, v, s):
202 | i = u & v
203 | u = u - i
204 | v = v - i
205 | yield _unify(iter(u), iter(v), s)
206 |
207 |
208 | @_unify.register(Mapping, Mapping, Mapping)
209 | def _unify_Mapping(u, v, s):
210 | if len(u) != len(v):
211 | yield False
212 | return
213 |
214 | for key, uval in u.items():
215 | if key not in v:
216 | yield False
217 | return
218 |
219 | s = yield _unify(uval, v[key], s)
220 |
221 | if s is False:
222 | return
223 | else:
224 | yield s
225 |
226 |
227 | @_unify.register(slice, slice, Mapping)
228 | def _unify_slice(u, v, s):
229 | s = yield _unify(u.start, v.start, s)
230 | if s is False:
231 | return
232 | s = yield _unify(u.stop, v.stop, s)
233 | if s is False:
234 | return
235 | s = yield _unify(u.step, v.step, s)
236 |
237 |
238 | @dispatch(object, object, Mapping)
239 | def unify(u, v, s):
240 | """Find substitution so that ``u == v`` while satisfying `s`.
241 |
242 | >>> x = var('x')
243 | >>> unify((1, x), (1, 2), {})
244 | {~x: 2}
245 | """
246 | if u is v:
247 | return s
248 |
249 | return stream_eval(_unify(u, v, s))
250 |
251 |
252 | @unify.register(object, object)
253 | def unify_NoMap(u, v):
254 | return unify(u, v, {})
255 |
256 |
257 | def unground_lvars(u, s):
258 | """Return the unground logic variables from a term and state."""
259 |
260 | lvars = set()
261 |
262 | def lvar_filter(z, r):
263 | nonlocal lvars
264 |
265 | if isvar(r):
266 | lvars.add(r)
267 |
268 | if r is construction_sentinel:
269 | z.close()
270 |
271 | # Remove this generator from the stack.
272 | raise StopIteration()
273 |
274 | z = _reify(u, s)
275 | stream_eval(z, lvar_filter)
276 |
277 | return lvars
278 |
279 |
280 | def isground(u, s):
281 | """Determine whether or not `u` contains an unground logic variable under mappings `s`.""" # noqa: E501
282 |
283 | def lvar_filter(z, r):
284 |
285 | if isvar(r):
286 | raise UngroundLVarException()
287 | elif r is construction_sentinel:
288 | z.close()
289 |
290 | # Remove this generator from the stack.
291 | raise StopIteration()
292 |
293 | try:
294 | z = _reify(u, s)
295 | stream_eval(z, lvar_filter)
296 | except UngroundLVarException:
297 | return False
298 |
299 | return True
300 |
301 |
302 | def debug_unify(u, v, s): # pragma: no cover
303 | """Stop in the debugger when unify fails.
304 |
305 | You can inspect the generator-based stack by looking through the
306 | generator frames in the `stack` variable in `stream_eval`:
307 |
308 | (Pdb) up
309 | > .../unification/unification/core.py(39)stream_eval()
310 | -> _ = res_filter(z, z_out)
311 | (Pdb) stack[-2].gi_frame.f_locals
312 | {'u':