├── tests
├── __init__.py
├── tensorflow
│ ├── __init__.py
│ ├── conftest.py
│ ├── test_graph.py
│ ├── utils.py
│ ├── test_kanren.py
│ ├── test_dispatch.py
│ └── test_printing.py
├── test_relations.py
├── conftest.py
├── theano
│ ├── conftest.py
│ ├── test_utils.py
│ ├── __init__.py
│ ├── test_kanren.py
│ ├── utils.py
│ ├── test_dispatch.py
│ ├── test_printing.py
│ ├── test_meta.py
│ ├── test_rv.py
│ └── test_relations.py
├── test_dispatch.py
├── test_meta.py
└── test_utils.py
├── .gitattributes
├── MANIFEST.in
├── pyproject.toml
├── docs
├── logos
│ ├── PyMC3.ai
│ ├── PyMC3.eps
│ ├── PyMC3.ico
│ ├── PyMC3.jpg
│ ├── PyMC3.pdf
│ ├── PyMC3.png
│ └── svg
│ │ ├── PyMC3_square.svg
│ │ ├── PyMC3_circle.svg
│ │ └── PyMC3_banner.svg
├── source
│ ├── modules.rst
│ ├── _static
│ │ ├── centered_trace.png
│ │ ├── recentered_trace.png
│ │ ├── pymc4-radon-plot-energy.png
│ │ ├── pymc4-radon-plot-trace.png
│ │ ├── transformed-model-plot-trace.png
│ │ ├── transformed-model-plot-energy.png
│ │ └── custom.css
│ ├── symbolic_pymc.relations.tensorflow.rst
│ ├── symbolic_pymc.relations.rst
│ ├── index.rst
│ ├── symbolic_pymc.rst
│ ├── symbolic_pymc.relations.theano.rst
│ ├── symbolic_pymc.tensorflow.rst
│ ├── semantic_sphinx
│ │ ├── theme.conf
│ │ ├── search.html
│ │ ├── static
│ │ │ └── gallery.js
│ │ └── layout.html
│ ├── symbolic_pymc.theano.rst
│ ├── theano-posteriors-example.rst
│ ├── theano-posteriors-example.org
│ ├── conf.py
│ ├── theano-radon-example.rst
│ ├── theano-radon-example.org
│ └── org-babel-extensions.org
├── Makefile
└── make.bat
├── symbolic_pymc
├── theano
│ ├── __init__.py
│ ├── dispatch.py
│ └── utils.py
├── tensorflow
│ ├── __init__.py
│ ├── dispatch.py
│ ├── graph.py
│ └── printing.py
├── __init__.py
├── relations
│ ├── tensorflow
│ │ └── __init__.py
│ ├── __init__.py
│ └── theano
│ │ ├── __init__.py
│ │ ├── conjugates.py
│ │ ├── linalg.py
│ │ └── distributions.py
├── dispatch.py
└── utils.py
├── images
├── centered_trace.png
└── recentered_trace.png
├── requirements.txt
├── .travis.yml
├── setup.cfg
├── .gitignore
├── setup.py
├── Makefile
├── README.md
└── LICENSE
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | symbolic_pymc/_version.py export-subst
2 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include versioneer.py
2 | include symbolic_pymc/_version.py
3 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.black]
2 | line-length = 100
3 | target-version = ['py36']
--------------------------------------------------------------------------------
/docs/logos/PyMC3.ai:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pymc-devs/symbolic-pymc/HEAD/docs/logos/PyMC3.ai
--------------------------------------------------------------------------------
/docs/logos/PyMC3.eps:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pymc-devs/symbolic-pymc/HEAD/docs/logos/PyMC3.eps
--------------------------------------------------------------------------------
/docs/logos/PyMC3.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pymc-devs/symbolic-pymc/HEAD/docs/logos/PyMC3.ico
--------------------------------------------------------------------------------
/docs/logos/PyMC3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pymc-devs/symbolic-pymc/HEAD/docs/logos/PyMC3.jpg
--------------------------------------------------------------------------------
/docs/logos/PyMC3.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pymc-devs/symbolic-pymc/HEAD/docs/logos/PyMC3.pdf
--------------------------------------------------------------------------------
/docs/logos/PyMC3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pymc-devs/symbolic-pymc/HEAD/docs/logos/PyMC3.png
--------------------------------------------------------------------------------
/symbolic_pymc/theano/__init__.py:
--------------------------------------------------------------------------------
1 | # Needed to register generic functions
2 | from .dispatch import *
3 |
--------------------------------------------------------------------------------
/symbolic_pymc/tensorflow/__init__.py:
--------------------------------------------------------------------------------
1 | # Needed to register generic functions
2 | from .dispatch import *
3 |
--------------------------------------------------------------------------------
/images/centered_trace.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pymc-devs/symbolic-pymc/HEAD/images/centered_trace.png
--------------------------------------------------------------------------------
/images/recentered_trace.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pymc-devs/symbolic-pymc/HEAD/images/recentered_trace.png
--------------------------------------------------------------------------------
/docs/source/modules.rst:
--------------------------------------------------------------------------------
1 | symbolic_pymc
2 | =============
3 |
4 | .. toctree::
5 | :maxdepth: 4
6 |
7 | symbolic_pymc
8 |
--------------------------------------------------------------------------------
/docs/source/_static/centered_trace.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pymc-devs/symbolic-pymc/HEAD/docs/source/_static/centered_trace.png
--------------------------------------------------------------------------------
/docs/source/_static/recentered_trace.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pymc-devs/symbolic-pymc/HEAD/docs/source/_static/recentered_trace.png
--------------------------------------------------------------------------------
/docs/source/_static/pymc4-radon-plot-energy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pymc-devs/symbolic-pymc/HEAD/docs/source/_static/pymc4-radon-plot-energy.png
--------------------------------------------------------------------------------
/docs/source/_static/pymc4-radon-plot-trace.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pymc-devs/symbolic-pymc/HEAD/docs/source/_static/pymc4-radon-plot-trace.png
--------------------------------------------------------------------------------
/docs/source/_static/transformed-model-plot-trace.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pymc-devs/symbolic-pymc/HEAD/docs/source/_static/transformed-model-plot-trace.png
--------------------------------------------------------------------------------
/docs/source/_static/transformed-model-plot-energy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pymc-devs/symbolic-pymc/HEAD/docs/source/_static/transformed-model-plot-energy.png
--------------------------------------------------------------------------------
/symbolic_pymc/__init__.py:
--------------------------------------------------------------------------------
1 | # We need this so that `multipledispatch` initialization occurs
2 | from .dispatch import *
3 |
4 | from ._version import get_versions
5 |
6 | __version__ = get_versions()["version"]
7 | del get_versions
8 |
--------------------------------------------------------------------------------
/docs/source/_static/custom.css:
--------------------------------------------------------------------------------
1 | div.code-block-caption {
2 | justify-content: center;
3 | text-align: center;
4 | }
5 |
6 | div.code-block-caption > span.caption-text {
7 | visibility: hidden;
8 | display: none;
9 | }
10 |
--------------------------------------------------------------------------------
/tests/tensorflow/__init__.py:
--------------------------------------------------------------------------------
1 | from functools import wraps
2 | from tensorflow.python.eager.context import graph_mode
3 |
4 |
5 | def run_in_graph_mode(f):
6 | @wraps(f)
7 | def _f(*args, **kwargs):
8 | with graph_mode():
9 | return f()
10 |
11 | return _f
12 |
--------------------------------------------------------------------------------
/docs/source/symbolic_pymc.relations.tensorflow.rst:
--------------------------------------------------------------------------------
1 | symbolic\_pymc.relations.tensorflow package
2 | ===========================================
3 |
4 | Module contents
5 | ---------------
6 |
7 | .. automodule:: symbolic_pymc.relations.tensorflow
8 | :members:
9 | :undoc-members:
10 | :show-inheritance:
11 |
--------------------------------------------------------------------------------
/tests/test_relations.py:
--------------------------------------------------------------------------------
1 | from unification import var
2 |
3 | from kanren import run
4 |
5 | from symbolic_pymc.relations import concat
6 |
7 |
8 | def test_concat():
9 | q = var()
10 | assert run(0, q, concat("a", "b", q)) == ("ab",)
11 | assert not run(0, q, concat("a", "b", "bc"))
12 | assert not run(0, q, concat(1, "b", "bc"))
13 | assert run(0, q, concat(q, "b", "bc")) == (q,)
14 |
--------------------------------------------------------------------------------
/docs/source/symbolic_pymc.relations.rst:
--------------------------------------------------------------------------------
1 | symbolic\_pymc.relations package
2 | ================================
3 |
4 | Subpackages
5 | -----------
6 |
7 | .. toctree::
8 |
9 | symbolic_pymc.relations.tensorflow
10 | symbolic_pymc.relations.theano
11 |
12 | Module contents
13 | ---------------
14 |
15 | .. automodule:: symbolic_pymc.relations
16 | :members:
17 | :undoc-members:
18 | :show-inheritance:
19 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from copy import deepcopy
4 |
5 |
6 | @pytest.fixture(autouse=True)
7 | def setup_module():
8 | import symbolic_pymc.meta
9 |
10 | from symbolic_pymc.meta import base_metatize
11 |
12 | _old_metatize = symbolic_pymc.meta._metatize
13 | symbolic_pymc.meta._metatize = deepcopy(base_metatize)
14 |
15 | yield
16 |
17 | symbolic_pymc.meta._metatize = _old_metatize
18 |
--------------------------------------------------------------------------------
/tests/theano/conftest.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 |
4 | @pytest.fixture(autouse=True)
5 | def setup_module():
6 |
7 | import symbolic_pymc.meta
8 | from symbolic_pymc.meta import base_metatize
9 |
10 | import symbolic_pymc.theano.meta as tm
11 |
12 | _metatize = tm.load_dispatcher()
13 |
14 | symbolic_pymc.meta._metatize = _metatize
15 |
16 | yield
17 |
18 | symbolic_pymc.meta._metatize = base_metatize
19 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | setuptools>=45.2.0
2 | six>=1.14.0
3 | -e ./
4 | sympy>=1.3
5 | pypolyagamma @ git+https://github.com/slinderman/pypolyagamma.git@b5883e661123862ca07d29ab14369fae85bdbc27#egg=pypolyagamma-1.2.2
6 | coveralls
7 | pydocstyle>=3.0.0
8 | pytest>=5.0.0
9 | pytest-cov>=2.6.1
10 | pytest-html>=1.20.0
11 | pylint>=2.3.1
12 | black==20.8b1; platform.python_implementation!='PyPy'
13 | diff-cover
14 | ipython
15 | versioneer
16 | sphinx-autobuild==0.7.1
17 | sphinx>=1.5.5
18 | nbsphinx>=0.2.13
19 | recommonmark>=0.4.0
20 | ghp-import
21 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | dist: xenial
2 | language: python
3 |
4 | python:
5 | - "3.6"
6 | - "3.7"
7 | # No tf-nightly for this version
8 | # - "3.8"
9 | # Cython and/or Numpy fails with permission errors
10 | # - "pypy3"
11 |
12 | install:
13 | - pip install Cython
14 | - pip install -r requirements.txt
15 |
16 | script:
17 | - pylint symbolic_pymc/ tests/
18 | - if [[ `command -v black` ]]; then
19 | black --check symbolic_pymc tests;
20 | fi
21 | - pytest -v tests/ --cov=symbolic_pymc/
22 |
23 | after_success:
24 | - coveralls
25 |
--------------------------------------------------------------------------------
/tests/tensorflow/conftest.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 |
4 | @pytest.fixture(autouse=True)
5 | def setup_module():
6 | import symbolic_pymc.meta
7 | from symbolic_pymc.meta import base_metatize
8 |
9 | import symbolic_pymc.tensorflow.meta as tm
10 |
11 | _metatize = tm.load_dispatcher()
12 |
13 | symbolic_pymc.meta._metatize = _metatize
14 |
15 | # Let's make sure we have a clean graph slate
16 | from tensorflow.compat.v1 import reset_default_graph
17 |
18 | reset_default_graph()
19 |
20 | yield
21 |
22 | symbolic_pymc.meta._metatize = base_metatize
23 |
--------------------------------------------------------------------------------
/symbolic_pymc/relations/tensorflow/__init__.py:
--------------------------------------------------------------------------------
1 | from unification import var
2 |
3 | from kanren.facts import fact
4 | from kanren.assoccomm import commutative, associative
5 |
6 | from ...tensorflow.meta import mt, TFlowMetaOperator
7 |
8 |
9 | # TODO: We could use `mt.*.op_def.obj.is_commutative` to capture
10 | # more/all cases.
11 | fact(commutative, TFlowMetaOperator(mt.AddV2.op_def, var()))
12 | fact(commutative, TFlowMetaOperator(mt.AddN.op_def, var()))
13 | fact(commutative, TFlowMetaOperator(mt.Mul.op_def, var()))
14 |
15 | fact(associative, TFlowMetaOperator(mt.AddN.op_def, var()))
16 | fact(associative, TFlowMetaOperator(mt.AddV2.op_def, var()))
17 |
--------------------------------------------------------------------------------
/tests/theano/test_utils.py:
--------------------------------------------------------------------------------
1 | import theano
2 |
3 | from symbolic_pymc.theano.utils import is_random_variable
4 | from symbolic_pymc.theano.random_variables import NormalRV
5 |
6 |
7 | @theano.change_flags(compute_test_value="ignore", cxx="")
8 | def test_is_random_variable():
9 |
10 | X_rv = NormalRV(0, 1)
11 | res = is_random_variable(X_rv)
12 | assert res == (X_rv, X_rv)
13 |
14 | def scan_fn():
15 | Y_t = NormalRV(0, 1, name="Y_t")
16 | return Y_t
17 |
18 | Y_rv, scan_updates = theano.scan(
19 | fn=scan_fn,
20 | outputs_info=[{}],
21 | n_steps=10,
22 | )
23 |
24 | res = is_random_variable(Y_rv)
25 | assert res == (Y_rv, Y_rv.owner.op.outputs[0])
26 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/tests/theano/__init__.py:
--------------------------------------------------------------------------------
1 | import warnings
2 |
3 | with warnings.catch_warnings():
4 | warnings.filterwarnings("ignore", category=DeprecationWarning)
5 | import theano
6 |
7 | import pymc3 as pm
8 |
9 | from functools import wraps
10 |
11 |
12 | theano.config.compute_test_value = "ignore"
13 | theano.config.on_opt_error = "raise"
14 | theano.config.mode = "FAST_COMPILE"
15 | theano.config.cxx = ""
16 |
17 |
18 | def requires_test_values(f):
19 | @wraps(f)
20 | def _f(*args, **kwargs):
21 |
22 | import theano
23 |
24 | last_value = theano.config.compute_test_value
25 | theano.config.compute_test_value = "raise"
26 |
27 | try:
28 | res = f(*args, **kwargs)
29 | finally:
30 | theano.config.compute_test_value = last_value
31 |
32 | return res
33 |
34 | return _f
35 |
--------------------------------------------------------------------------------
/symbolic_pymc/relations/__init__.py:
--------------------------------------------------------------------------------
1 | from kanren.facts import Relation
2 |
3 | from unification import unify, reify, Var
4 |
5 |
6 | # Hierarchical models that we recognize.
7 | hierarchical_model = Relation("hierarchical")
8 |
9 | # Conjugate relationships
10 | conjugate = Relation("conjugate")
11 |
12 |
13 | def concat(a, b, out):
14 | """Construct a non-relational string concatenation goal."""
15 |
16 | def concat_goal(S):
17 | nonlocal a, b, out
18 |
19 | a_rf, b_rf, out_rf = reify((a, b, out), S)
20 |
21 | if isinstance(a_rf, str) and isinstance(b_rf, str):
22 | S_new = unify(out_rf, a_rf + b_rf, S)
23 |
24 | if S_new is not False:
25 | yield S_new
26 | return
27 | elif isinstance(a_rf, (Var, str)) and isinstance(b_rf, (Var, str)):
28 | yield S
29 |
30 | return concat_goal
31 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [versioneer]
2 | VCS = git
3 | style = pep440
4 | versionfile_source = symbolic_pymc/_version.py
5 | versionfile_build = symbolic_pymc/_version.py
6 | tag_prefix = v
7 | parentdir_prefix = symbolic-pymc-
8 |
9 | [pydocstyle]
10 | # Ignore errors for missing docstrings.
11 | # Ignore D202 (No blank lines allowed after function docstring)
12 | # due to bug in black: https://github.com/ambv/black/issues/355
13 | add-ignore = D100,D101,D102,D103,D104,D105,D106,D107,D202
14 | convention = numpy
15 |
16 | [tool:pytest]
17 | python_functions=test_*
18 | filterwarnings =
19 | ignore:the imp module is deprecated:DeprecationWarning:
20 | ignore:Using a non-tuple sequence:FutureWarning:theano\.tensor
21 |
22 | [coverage:run]
23 | omit =
24 | symbolic_pymc/_version.py
25 | tests/*
26 |
27 | [coverage:report]
28 | exclude_lines =
29 | pragma: no cover
30 |
31 | raise NotImplementedError
32 |
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. Symbolic PyMC documentation master file, created by
2 | sphinx-quickstart on Fri Aug 23 13:44:28 2019.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to Symbolic PyMC's documentation!
7 | =========================================
8 |
9 | .. toctree::
10 | :maxdepth: 2
11 | :caption: Contents:
12 |
13 | symbolic-pymc-tour
14 | modules
15 |
16 | Examples
17 | ========
18 |
19 | .. toctree::
20 | :maxdepth: 1
21 | :caption: Theano:
22 |
23 | theano-posteriors-example
24 | theano-radon-example
25 |
26 | .. toctree::
27 | :maxdepth: 1
28 | :caption: TensorFlow:
29 |
30 | tensorflow-radon-example
31 | tensorflow-simplification-example
32 |
33 | Indices and tables
34 | ==================
35 |
36 | * :ref:`genindex`
37 | * :ref:`modindex`
38 | * :ref:`search`
39 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/source/symbolic_pymc.rst:
--------------------------------------------------------------------------------
1 | symbolic\_pymc package
2 | ======================
3 |
4 | Subpackages
5 | -----------
6 |
7 | .. toctree::
8 |
9 | symbolic_pymc.relations
10 | symbolic_pymc.tensorflow
11 | symbolic_pymc.theano
12 |
13 | Submodules
14 | ----------
15 |
16 | symbolic\_pymc.dispatch module
17 | ------------------------------
18 |
19 | .. automodule:: symbolic_pymc.dispatch
20 | :members:
21 | :undoc-members:
22 | :show-inheritance:
23 |
24 | symbolic\_pymc.meta module
25 | --------------------------
26 |
27 | .. automodule:: symbolic_pymc.meta
28 | :members:
29 | :undoc-members:
30 | :show-inheritance:
31 |
32 | symbolic\_pymc.utils module
33 | ---------------------------
34 |
35 | .. automodule:: symbolic_pymc.utils
36 | :members:
37 | :undoc-members:
38 | :show-inheritance:
39 |
40 |
41 | Module contents
42 | ---------------
43 |
44 | .. automodule:: symbolic_pymc
45 | :members:
46 | :undoc-members:
47 | :show-inheritance:
48 |
--------------------------------------------------------------------------------
/tests/tensorflow/test_graph.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 |
4 | from symbolic_pymc.tensorflow.graph import normalize_tf_graph
5 |
6 | from tests.tensorflow import run_in_graph_mode
7 |
8 |
9 | @run_in_graph_mode
10 | def test_normalize():
11 |
12 | tf.config.optimizer.set_experimental_options(
13 | {
14 | "shape_optimizations": True,
15 | "arithmetic_optimzation": True,
16 | "function_optimization": True,
17 | "min_graph_nodes": 0,
18 | }
19 | )
20 | with tf.Graph().as_default() as norm_graph:
21 | a_tf = tf.compat.v1.placeholder("float")
22 | const_log_tf = 0.5 * np.log(2.0 * np.pi) + tf.math.log(a_tf)
23 | normal_const_log_tf = normalize_tf_graph(const_log_tf)
24 |
25 | # Grappler appears to put log ops before const
26 | assert normal_const_log_tf.op.inputs[0].op.type == "Log"
27 | assert normal_const_log_tf.op.inputs[1].op.type == "Const"
28 |
--------------------------------------------------------------------------------
/docs/source/symbolic_pymc.relations.theano.rst:
--------------------------------------------------------------------------------
1 | symbolic\_pymc.relations.theano package
2 | =======================================
3 |
4 | Submodules
5 | ----------
6 |
7 | symbolic\_pymc.relations.theano.conjugates module
8 | -------------------------------------------------
9 |
10 | .. automodule:: symbolic_pymc.relations.theano.conjugates
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | symbolic\_pymc.relations.theano.distributions module
16 | ----------------------------------------------------
17 |
18 | .. automodule:: symbolic_pymc.relations.theano.distributions
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 | symbolic\_pymc.relations.theano.linalg module
24 | ---------------------------------------------
25 |
26 | .. automodule:: symbolic_pymc.relations.theano.linalg
27 | :members:
28 | :undoc-members:
29 | :show-inheritance:
30 |
31 |
32 | Module contents
33 | ---------------
34 |
35 | .. automodule:: symbolic_pymc.relations.theano
36 | :members:
37 | :undoc-members:
38 | :show-inheritance:
39 |
--------------------------------------------------------------------------------
/docs/source/symbolic_pymc.tensorflow.rst:
--------------------------------------------------------------------------------
1 | symbolic\_pymc.tensorflow package
2 | =================================
3 |
4 | Submodules
5 | ----------
6 |
7 | symbolic\_pymc.tensorflow.dispatch module
8 | -----------------------------------------
9 |
10 | .. automodule:: symbolic_pymc.tensorflow.dispatch
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | symbolic\_pymc.tensorflow.graph module
16 | --------------------------------------
17 |
18 | .. automodule:: symbolic_pymc.tensorflow.graph
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 | symbolic\_pymc.tensorflow.meta module
24 | -------------------------------------
25 |
26 | .. automodule:: symbolic_pymc.tensorflow.meta
27 | :members:
28 | :undoc-members:
29 | :show-inheritance:
30 |
31 | symbolic\_pymc.tensorflow.printing module
32 | -----------------------------------------
33 |
34 | .. automodule:: symbolic_pymc.tensorflow.printing
35 | :members:
36 | :undoc-members:
37 | :show-inheritance:
38 |
39 |
40 | Module contents
41 | ---------------
42 |
43 | .. automodule:: symbolic_pymc.tensorflow
44 | :members:
45 | :undoc-members:
46 | :show-inheritance:
47 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | *.sw[op]
3 | examples/*.png
4 | nb_examples/
5 | nb_tutorials/
6 | build/*
7 | dist/*
8 | *$py.class
9 | /testing-report.html
10 | /pip-wheel-metadata/
11 |
12 | # C extensions
13 | *.so
14 |
15 | # Distribution / packaging
16 | .Python
17 | build/
18 | develop-eggs/
19 | dist/
20 | downloads/
21 | eggs/
22 | .eggs/
23 | lib/
24 | lib64/
25 | parts/
26 | sdist/
27 | var/
28 | wheels/
29 | *.egg-info/
30 | .ipynb_checkpoints
31 | tmtags
32 | tags
33 | .DS_Store
34 | # PyInstaller
35 | # Usually these files are written by a python script from a template
36 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
37 | *.manifest
38 | *.spec
39 |
40 | # Installer logs
41 | pip-log.txt
42 | pip-delete-this-directory.txt
43 |
44 | # Unit test / coverage reports
45 | htmlcov/
46 | .tox/
47 | .coverage
48 | .coverage.*
49 | .cache
50 | # IntelliJ IDE
51 | .idea
52 | *.iml
53 | .hypothesis/
54 | .pytest_cache/
55 |
56 | # Sphinx documentation
57 | docs/_build/
58 |
59 | # PyBuilder
60 | target/
61 |
62 | # Sphinx
63 | _build
64 |
65 | # Merge tool
66 | *.orig
67 | .venv
68 | env/
69 | venv/
70 | ENV/
71 | env.bak/
72 | venv.bak/
73 |
74 | # Test artifacts
75 | mcmc.sqlite
76 | .spyproject
77 |
78 | # Docker development
79 | # notebooks/
80 |
81 | # air speed velocity (asv)
82 | benchmarks/env/
83 | benchmarks/html/
84 | benchmarks/results/
85 | .pytest_cache/
86 |
87 | # VSCode
88 | .vscode/
89 | .dmypy.json
90 | dmypy.json
91 |
--------------------------------------------------------------------------------
/symbolic_pymc/tensorflow/dispatch.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | from collections.abc import Mapping
4 |
5 | from kanren.term import operator, arguments
6 |
7 | from unification.core import _reify, _unify, reify
8 |
9 | from cons.core import _car, _cdr
10 |
11 | from etuples import etuplize
12 |
13 | from .meta import TFlowMetaSymbol
14 | from ..meta import metatize
15 | from ..dispatch import unify_MetaSymbol
16 |
17 | tf_class_abstractions = tuple(c.base for c in TFlowMetaSymbol.base_subclasses())
18 |
19 | _unify.add(
20 | (TFlowMetaSymbol, tf_class_abstractions, Mapping),
21 | lambda u, v, s: unify_MetaSymbol(u, metatize(v), s),
22 | )
23 | _unify.add(
24 | (tf_class_abstractions, TFlowMetaSymbol, Mapping),
25 | lambda u, v, s: unify_MetaSymbol(metatize(u), v, s),
26 | )
27 | _unify.add(
28 | (tf_class_abstractions, tf_class_abstractions, Mapping),
29 | lambda u, v, s: unify_MetaSymbol(metatize(u), metatize(v), s),
30 | )
31 |
32 |
33 | def _reify_TFlowClasses(o, s):
34 | meta_obj = metatize(o)
35 | return reify(meta_obj, s)
36 |
37 |
38 | _reify.add((tf_class_abstractions, Mapping), _reify_TFlowClasses)
39 |
40 |
41 | _car.add((tf.Tensor,), lambda x: operator(metatize(x)))
42 | operator.add((tf.Tensor,), lambda x: operator(metatize(x)))
43 |
44 | _cdr.add((tf.Tensor,), lambda x: arguments(metatize(x)))
45 | arguments.add((tf.Tensor,), lambda x: arguments(metatize(x)))
46 |
47 | etuplize.add(tf_class_abstractions, lambda x, shallow=False: etuplize(metatize(x), shallow))
48 |
49 |
50 | __all__ = []
51 |
--------------------------------------------------------------------------------
/symbolic_pymc/theano/dispatch.py:
--------------------------------------------------------------------------------
1 | import theano.tensor as tt
2 |
3 | from collections.abc import Mapping
4 |
5 | from kanren.term import term, operator, arguments
6 |
7 | from unification.core import _reify, _unify, reify
8 |
9 | from cons.core import _car, _cdr
10 |
11 | from etuples import etuplize
12 | from etuples.core import ExpressionTuple
13 |
14 | from .meta import TheanoMetaSymbol
15 | from ..meta import metatize
16 | from ..dispatch import unify_MetaSymbol
17 |
18 |
19 | tt_class_abstractions = tuple(c.base for c in TheanoMetaSymbol.base_subclasses())
20 |
21 | _unify.add(
22 | (TheanoMetaSymbol, tt_class_abstractions, Mapping),
23 | lambda u, v, s: unify_MetaSymbol(u, metatize(v), s),
24 | )
25 | _unify.add(
26 | (tt_class_abstractions, TheanoMetaSymbol, Mapping),
27 | lambda u, v, s: unify_MetaSymbol(metatize(u), v, s),
28 | )
29 | _unify.add(
30 | (tt_class_abstractions, tt_class_abstractions, Mapping),
31 | lambda u, v, s: unify_MetaSymbol(metatize(u), metatize(v), s),
32 | )
33 |
34 |
35 | def _reify_TheanoClasses(o, s):
36 | meta_obj = metatize(o)
37 | return reify(meta_obj, s)
38 |
39 |
40 | _reify.add((tt_class_abstractions, Mapping), _reify_TheanoClasses)
41 |
42 | operator.add((tt.Variable,), lambda x: operator(metatize(x)))
43 | _car.add((tt.Variable,), lambda x: operator(metatize(x)))
44 |
45 | arguments.add((tt.Variable,), lambda x: arguments(metatize(x)))
46 | _cdr.add((tt.Variable,), lambda x: arguments(metatize(x)))
47 |
48 | term.add((tt.Op, ExpressionTuple), lambda op, args: term(metatize(op), args))
49 |
50 | etuplize.add(tt_class_abstractions, lambda x, shallow=False: etuplize(metatize(x), shallow))
51 |
52 | __all__ = []
53 |
--------------------------------------------------------------------------------
/symbolic_pymc/tensorflow/graph.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | from tensorflow.core.protobuf import config_pb2
4 |
5 | from tensorflow.python.framework import ops
6 | from tensorflow.python.framework import importer
7 | from tensorflow.python.framework import meta_graph
8 |
9 | from tensorflow.python.grappler import cluster
10 | from tensorflow.python.grappler import tf_optimizer
11 |
12 |
13 | try: # pragma: no cover
14 | gcluster = cluster.Cluster()
15 | except tf.errors.UnavailableError: # pragma: no cover
16 | pass
17 |
18 | config = config_pb2.ConfigProto()
19 |
20 |
21 | def normalize_tf_graph(graph_output, new_graph=True, verbose=False):
22 | """Use grappler to normalize a graph.
23 |
24 | Arguments
25 | ---------
26 | graph_output: Tensor
27 | A tensor we want to consider as "output" of a `FuncGraph`.
28 |
29 | Returns
30 | -------
31 | The simplified graph.
32 | """
33 | train_op = graph_output.graph.get_collection_ref(ops.GraphKeys.TRAIN_OP)
34 | train_op.clear()
35 | train_op.extend([graph_output])
36 |
37 | metagraph = meta_graph.create_meta_graph_def(graph=graph_output.graph)
38 |
39 | optimized_graphdef = tf_optimizer.OptimizeGraph(
40 | config, metagraph, verbose=verbose, cluster=gcluster
41 | )
42 |
43 | output_name = graph_output.name
44 |
45 | if new_graph:
46 | optimized_graph = ops.Graph()
47 | else: # pragma: no cover
48 | optimized_graph = ops.get_default_graph()
49 | del graph_output
50 |
51 | with optimized_graph.as_default():
52 | importer.import_graph_def(optimized_graphdef, name="")
53 |
54 | opt_graph_output = optimized_graph.get_tensor_by_name(output_name)
55 |
56 | return opt_graph_output
57 |
--------------------------------------------------------------------------------
/docs/source/semantic_sphinx/theme.conf:
--------------------------------------------------------------------------------
1 | # Semantic theme
2 | [theme]
3 | inherit = basic
4 | stylesheet = semantic-sphinx.css
5 | pygments_style = tango
6 | # The sphinx bootstrap theme includes information typically presented in a
7 | # theme's sidebar in the navbar, so these are disabled by default.
8 | # Users can override this with `html_sidebars` in their conf.py.
9 | sidebars =
10 |
11 | # Configurable options.
12 | [options]
13 | # Navigation bar title. (Default: ``project`` value)
14 | navbar_title =
15 |
16 | # Tab name for entire site. (Default: "Site")
17 | navbar_site_name = Site
18 |
19 | # A list of tuples containting pages to link to. The value should be
20 | # in the form [(name, page), ..]
21 | navbar_links =
22 |
23 | # Render the next and previous page links in navbar. (Default: true)
24 | navbar_sidebarrel = true
25 |
26 | # Render the current pages TOC in the navbar. (Default: true)
27 | navbar_pagenav = true
28 |
29 | # Tab name for the current pages TOC. (Default: "Page")
30 | navbar_pagenav_name = Page
31 |
32 | # Global TOC depth for "site" navbar tab. (Default: 1)
33 | # Switching to -1 shows all levels.
34 | globaltoc_depth = 1
35 |
36 | # Include hidden TOCs in Site navbar?
37 | #
38 | # Note: If this is "false", you cannot have mixed ``:hidden:`` and
39 | # non-hidden ``toctree`` directives in the same page, or else the build
40 | # will break.
41 | #
42 | # Values: "true" (default) or "false"
43 | globaltoc_includehidden = true
44 |
45 | # HTML navbar class (Default: "navbar") to attach to
element.
46 | # For black navbar, do "navbar navbar-inverse"
47 | navbar_class = navbar
48 |
49 | # Fix navigation bar to top of page?
50 | # Values: "true" (default) or "false"
51 | navbar_fixed_top = true
52 |
53 | # Location of link to source.
54 | # Options are "nav" (default), "footer" or anything else to exclude.
55 | source_link_position = nav
56 |
--------------------------------------------------------------------------------
/docs/source/symbolic_pymc.theano.rst:
--------------------------------------------------------------------------------
1 | symbolic\_pymc.theano package
2 | =============================
3 |
4 | Submodules
5 | ----------
6 |
7 | symbolic\_pymc.theano.dispatch module
8 | -------------------------------------
9 |
10 | .. automodule:: symbolic_pymc.theano.dispatch
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | symbolic\_pymc.theano.meta module
16 | ---------------------------------
17 |
18 | .. automodule:: symbolic_pymc.theano.meta
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 | symbolic\_pymc.theano.ops module
24 | --------------------------------
25 |
26 | .. automodule:: symbolic_pymc.theano.ops
27 | :members:
28 | :undoc-members:
29 | :show-inheritance:
30 |
31 | symbolic\_pymc.theano.opt module
32 | --------------------------------
33 |
34 | .. automodule:: symbolic_pymc.theano.opt
35 | :members:
36 | :undoc-members:
37 | :show-inheritance:
38 |
39 | symbolic\_pymc.theano.printing module
40 | -------------------------------------
41 |
42 | .. automodule:: symbolic_pymc.theano.printing
43 | :members:
44 | :undoc-members:
45 | :show-inheritance:
46 |
47 | symbolic\_pymc.theano.pymc3 module
48 | ----------------------------------
49 |
50 | .. automodule:: symbolic_pymc.theano.pymc3
51 | :members:
52 | :undoc-members:
53 | :show-inheritance:
54 |
55 | symbolic\_pymc.theano.random\_variables module
56 | ----------------------------------------------
57 |
58 | .. automodule:: symbolic_pymc.theano.random_variables
59 | :members:
60 | :undoc-members:
61 | :show-inheritance:
62 |
63 | symbolic\_pymc.theano.utils module
64 | ----------------------------------
65 |
66 | .. automodule:: symbolic_pymc.theano.utils
67 | :members:
68 | :undoc-members:
69 | :show-inheritance:
70 |
71 |
72 | Module contents
73 | ---------------
74 |
75 | .. automodule:: symbolic_pymc.theano
76 | :members:
77 | :undoc-members:
78 | :show-inheritance:
79 |
--------------------------------------------------------------------------------
/tests/theano/test_kanren.py:
--------------------------------------------------------------------------------
1 | import theano.tensor as tt
2 |
3 | from unification import var
4 |
5 | from etuples import etuple
6 |
7 | from kanren import eq, run
8 | from kanren.graph import applyo
9 | from kanren.term import term, operator, arguments
10 | from kanren.assoccomm import eq_assoc, eq_comm
11 |
12 | from symbolic_pymc.theano.meta import mt
13 | from symbolic_pymc.theano.utils import graph_equal
14 |
15 |
16 | def test_terms():
17 | x, a, b = tt.dvectors("xab")
18 | test_expr = x + a * b
19 |
20 | assert mt(test_expr.owner.op) == operator(test_expr)
21 | assert mt(tuple(test_expr.owner.inputs)) == tuple(arguments(test_expr))
22 |
23 | assert tuple(arguments(test_expr)) == mt(tuple(test_expr.owner.inputs))
24 |
25 | # Implicit `etuple` conversion should retain the original object
26 | # (within the implicitly introduced meta object, of course).
27 | assert test_expr == arguments(test_expr)._parent._eval_obj.obj
28 |
29 | assert graph_equal(test_expr, term(operator(test_expr), arguments(test_expr)))
30 | assert mt(test_expr) == term(operator(test_expr), arguments(test_expr))
31 |
32 | # Same here: should retain the original object.
33 | assert test_expr == term(operator(test_expr), arguments(test_expr)).reify()
34 |
35 |
36 | def test_kanren_algebra():
37 | a, b = mt.dvectors("ab")
38 | assert b == run(1, var("x"), eq(mt.add(a, b), mt.add(a, var("x"))))[0]
39 | assert b == run(1, var("x"), eq(mt.mul(a, b), mt.mul(a, var("x"))))[0]
40 |
41 |
42 | def test_assoccomm():
43 | x, a, b, c = tt.dvectors("xabc")
44 | test_expr = x + 1
45 | q = var()
46 |
47 | res = run(1, q, applyo(tt.add, etuple(*test_expr.owner.inputs), test_expr))
48 | assert q == res[0]
49 |
50 | res = run(1, q, applyo(q, etuple(*test_expr.owner.inputs), test_expr))
51 | assert tt.add == res[0].reify()
52 |
53 | res = run(1, q, applyo(tt.add, q, test_expr))
54 | assert mt(tuple(test_expr.owner.inputs)) == res[0]
55 |
56 | x = var()
57 | res = run(0, x, eq_comm(mt.mul(a, b), mt.mul(b, x)))
58 | assert (mt(a),) == res
59 |
60 | res = run(0, x, eq_comm(mt.add(a, b), mt.add(b, x)))
61 | assert (mt(a),) == res
62 |
63 | (res,) = run(0, x, eq_assoc(mt.add(a, b, c), mt.add(a, x)))
64 | assert res == mt(b + c)
65 |
66 | (res,) = run(0, x, eq_assoc(mt.mul(a, b, c), mt.mul(a, x)))
67 | assert res == mt(b * c)
68 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import versioneer
3 |
4 | from pathlib import Path
5 |
6 | from setuptools import setup, find_packages
7 |
8 |
9 | PROJECT_ROOT = Path(__file__).resolve().parent
10 | REQUIREMENTS_FILE = PROJECT_ROOT / "requirements.txt"
11 | README_FILE = PROJECT_ROOT / "README.md"
12 | VERSION_FILE = PROJECT_ROOT / "symbolic_pymc" / "__init__.py"
13 |
14 | NAME = "symbolic-pymc"
15 | DESCRIPTION = "Tools for symbolic math in PyMC"
16 | AUTHOR = "PyMC Developers"
17 | AUTHOR_EMAIL = "pymc.devs@gmail.com"
18 | URL = ("https://github.com/pymc-devs/symbolic-pymc",)
19 |
20 |
21 | def get_long_description():
22 | with open(README_FILE, "rt") as buff:
23 | return buff.read()
24 |
25 |
26 | setup(
27 | name=NAME,
28 | version=versioneer.get_version(),
29 | cmdclass=versioneer.get_cmdclass(),
30 | description=DESCRIPTION,
31 | author=AUTHOR,
32 | author_email=AUTHOR_EMAIL,
33 | url=URL,
34 | install_requires=[
35 | "numpy>=1.18.1",
36 | "scipy>=1.4.0",
37 | "Theano>=1.0.4",
38 | "tf-estimator-nightly==2.4.0.dev2020070401",
39 | "tf-nightly==2.4.0.dev20200704",
40 | "tfp-nightly==0.11.0.dev20200704",
41 | "multipledispatch>=0.6.0",
42 | "logical-unification>=0.4.3",
43 | "miniKanren>=1.0.1",
44 | "etuples>=0.3.2",
45 | "cons>=0.4.0",
46 | "toolz>=0.9.0",
47 | "cachetools",
48 | "pymc3>=3.6",
49 | "pymc4 @ git+https://github.com/pymc-devs/pymc4.git@master#egg=pymc4-0.0.1",
50 | ],
51 | packages=find_packages(exclude=["tests"]),
52 | tests_require=["pytest"],
53 | long_description=get_long_description(),
54 | long_description_content_type="text/markdown",
55 | include_package_data=True,
56 | python_requires=">=3.6",
57 | classifiers=[
58 | "Development Status :: 3 - Alpha",
59 | "Intended Audience :: Science/Research",
60 | "Intended Audience :: Developers",
61 | "Operating System :: OS Independent",
62 | "Programming Language :: Python",
63 | "Programming Language :: Python :: 3",
64 | "Programming Language :: Python :: 3.6",
65 | "Programming Language :: Python :: 3.7",
66 | "Programming Language :: Python :: Implementation :: CPython",
67 | "Programming Language :: Python :: Implementation :: PyPy",
68 | "Topic :: Software Development :: Libraries",
69 | ],
70 | )
71 |
--------------------------------------------------------------------------------
/tests/theano/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import theano
3 | import theano.tensor as tt
4 |
5 | from symbolic_pymc.theano.opt import ScanArgs
6 | from symbolic_pymc.theano.random_variables import CategoricalRV, DirichletRV, NormalRV
7 |
8 |
9 | def create_test_hmm():
10 | rng_state = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(1234)))
11 | rng_init_state = rng_state.get_state()
12 | rng_tt = theano.shared(rng_state, name="rng", borrow=True)
13 | rng_tt.tag.is_rng = True
14 | rng_tt.default_update = rng_tt
15 |
16 | N_tt = tt.iscalar("N")
17 | N_tt.tag.test_value = 10
18 | M_tt = tt.iscalar("M")
19 | M_tt.tag.test_value = 2
20 |
21 | mus_tt = tt.matrix("mus")
22 | mus_tt.tag.test_value = np.stack([np.arange(0.0, 10), np.arange(0.0, -10, -1)], axis=-1).astype(
23 | theano.config.floatX
24 | )
25 |
26 | sigmas_tt = tt.ones((N_tt,))
27 | sigmas_tt.name = "sigmas"
28 |
29 | pi_0_rv = DirichletRV(tt.ones((M_tt,)), rng=rng_tt, name="pi_0")
30 | Gamma_rv = DirichletRV(tt.ones((M_tt, M_tt)), rng=rng_tt, name="Gamma")
31 |
32 | S_0_rv = CategoricalRV(pi_0_rv, rng=rng_tt, name="S_0")
33 |
34 | def scan_fn(mus_t, sigma_t, S_tm1, Gamma_t, rng):
35 | S_t = CategoricalRV(Gamma_t[S_tm1], rng=rng, name="S_t")
36 | Y_t = NormalRV(mus_t[S_t], sigma_t, rng=rng, name="Y_t")
37 | return S_t, Y_t
38 |
39 | (S_rv, Y_rv), scan_updates = theano.scan(
40 | fn=scan_fn,
41 | sequences=[mus_tt, sigmas_tt],
42 | non_sequences=[Gamma_rv, rng_tt],
43 | outputs_info=[{"initial": S_0_rv, "taps": [-1]}, {}],
44 | strict=True,
45 | name="scan_rv",
46 | )
47 | Y_rv.name = "Y_rv"
48 |
49 | scan_op = Y_rv.owner.op
50 | scan_args = ScanArgs.from_node(Y_rv.owner)
51 |
52 | Gamma_in = scan_args.inner_in_non_seqs[0]
53 | Y_t = scan_args.inner_out_nit_sot[0]
54 | mus_t = scan_args.inner_in_seqs[0]
55 | sigmas_t = scan_args.inner_in_seqs[1]
56 | S_t = scan_args.inner_out_sit_sot[0]
57 | rng_in = scan_args.inner_out_shared[0]
58 |
59 | rng_updates = scan_updates[rng_tt]
60 | rng_updates.name = "rng_updates"
61 | mus_in = Y_rv.owner.inputs[1]
62 | mus_in.name = "mus_in"
63 | sigmas_in = Y_rv.owner.inputs[2]
64 | sigmas_in.name = "sigmas_in"
65 |
66 | # The output `S_rv` is really `S_rv[1:]`, so we have to extract the actual
67 | # `Scan` output: `S_rv`.
68 | S_in = S_rv.owner.inputs[0]
69 | S_in.name = "S_in"
70 |
71 | return locals()
72 |
--------------------------------------------------------------------------------
/tests/tensorflow/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | import tensorflow as tf
4 | from tensorflow.python.framework import ops
5 |
6 | from collections.abc import Mapping
7 |
8 | from symbolic_pymc.tensorflow.meta import mt
9 |
10 |
11 | def assert_ops_equal(a, b, compare_fn=lambda a, b: a.op.type == b.op.type):
12 | if hasattr(a, "op") or hasattr(b, "op"):
13 | assert hasattr(a, "op") and hasattr(b, "op")
14 |
15 | assert compare_fn(a, b)
16 |
17 | if isinstance(a.op, tf.Operation):
18 | a_inputs = ops._reconstruct_sequence_inputs(
19 | a.op.op_def, a.op.inputs, a.op.node_def.attr
20 | )
21 | elif isinstance(a.op, Mapping):
22 | a_inputs = list(a.op.inputs.values())
23 | else:
24 | a_inputs = list(a.op.inputs)
25 |
26 | if isinstance(b.op, tf.Operation):
27 | b_inputs = ops._reconstruct_sequence_inputs(
28 | b.op.op_def, b.op.inputs, b.op.node_def.attr
29 | )
30 | elif isinstance(b.op, Mapping):
31 | b_inputs = list(b.op.inputs.values())
32 | else:
33 | b_inputs = list(b.op.inputs)
34 |
35 | assert len(a_inputs) == len(b_inputs)
36 |
37 | for i_a, i_b in zip(a_inputs, b_inputs):
38 | assert_ops_equal(i_a, i_b)
39 |
40 |
41 | def tfp_normal_log_prob(x, loc, scale):
42 | """Create a graph of the Grappler-canonicalized form of a TFP normal log-likelihood."""
43 | log_unnormalized = -0.5 * tf.math.squared_difference(x / scale, loc / scale)
44 | log_normalization = 0.5 * np.log(2.0 * np.pi) + tf.math.log(scale)
45 | return log_unnormalized - log_normalization
46 |
47 |
48 | def mt_normal_log_prob(x, loc, scale):
49 | """Create a meta graph for Grappler-canonicalized standard or non-standard TFP normal log-likelihoods."""
50 | if loc == 0:
51 | log_unnormalized_mt = mt(np.array(-0.5, "float32"))
52 | log_unnormalized_mt *= mt.squareddifference(
53 | mt(np.array(0.0, "float32")),
54 | mt.realdiv(x, scale) if scale != 1 else mt.mul(np.array(1.0, "float32"), x),
55 | )
56 | else:
57 | log_unnormalized_mt = mt(np.array(-0.5, "float32"))
58 | log_unnormalized_mt *= mt.squareddifference(
59 | mt.realdiv(x, scale) if scale != 1 else mt.mul(np.array(1.0, "float32"), x),
60 | mt.realdiv(loc, scale) if scale != 1 else mt.mul(np.array(1.0, "float32"), loc),
61 | )
62 |
63 | log_normalization_mt = mt((0.5 * np.log(2.0 * np.pi)).astype("float32"))
64 |
65 | if scale != 1:
66 | log_normalization_mt = log_normalization_mt + mt.log(scale)
67 |
68 | return log_unnormalized_mt - log_normalization_mt
69 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: help venv conda docker docstyle format style black test lint check coverage docs
2 | .DEFAULT_GOAL = help
3 |
4 | PYTHON = python
5 | PIP = pip
6 | CONDA = conda
7 | SHELL = bash
8 |
9 | help:
10 | @printf "Usage:\n"
11 | @grep -E '^[a-zA-Z_-]+:.*?# .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?# "}; {printf "\033[1;34mmake %-10s\033[0m%s\n", $$1, $$2}'
12 |
13 | conda: # Set up a conda environment for development.
14 | @printf "Creating conda environment...\n"
15 | ${CONDA} create --yes --name symbolic-pymc-env python=3.6
16 | ( \
17 | ${CONDA} activate symbolic-pymc-env; \
18 | ${PIP} install -U pip; \
19 | ${PIP} install -r requirements.txt; \
20 | ${PIP} install -r requirements-dev.txt; \
21 | ${CONDA} deactivate; \
22 | )
23 | @printf "\n\nConda environment created! \033[1;34mRun \`conda activate symbolic-pymc-env\` to activate it.\033[0m\n\n\n"
24 |
25 | venv: # Set up a Python virtual environment for development.
26 | @printf "Creating Python virtual environment...\n"
27 | rm -rf symbolic-pymc-venv
28 | ${PYTHON} -m venv symbolic-pymc-venv
29 | ( \
30 | source symbolic-pymc-venv/bin/activate; \
31 | ${PIP} install -U pip; \
32 | ${PIP} install -r requirements.txt; \
33 | ${PIP} install -r requirements-dev.txt; \
34 | deactivate; \
35 | )
36 | @printf "\n\nVirtual environment created! \033[1;34mRun \`source pymc-symbolic-venv/bin/activate\` to activate it.\033[0m\n\n\n"
37 |
38 | docker: # Set up a Docker image for development.
39 | @printf "Creating Docker image...\n"
40 | ${SHELL} ./scripts/container.sh --build
41 |
42 | docstyle:
43 | @printf "Checking documentation with pydocstyle...\n"
44 | pydocstyle symbolic_pymc/
45 | @printf "\033[1;34mPydocstyle passes!\033[0m\n\n"
46 |
47 | format:
48 | @printf "Checking code style with black...\n"
49 | black --check symbolic_pymc/ tests/
50 | @printf "\033[1;34mBlack passes!\033[0m\n\n"
51 |
52 | style:
53 | @printf "Checking code style with pylint...\n"
54 | pylint symbolic_pymc/ tests/
55 | @printf "\033[1;34mPylint passes!\033[0m\n\n"
56 |
57 | black: # Format code in-place using black.
58 | black symbolic_pymc/ tests/
59 |
60 | test: # Test code using pytest.
61 | pytest -v tests/ --cov=symbolic_pymc/ --cov-report=xml --html=testing-report.html --self-contained-html
62 |
63 | coverage: test
64 | diff-cover coverage.xml --compare-branch=master --fail-under=100
65 |
66 | docs:
67 | pushd docs && \
68 | sphinx-apidoc ../symbolic_pymc/ -o source --force && \
69 | make html && \
70 | ghp-import -n -p -r upstream -b gh-pages build/html && \
71 | popd
72 |
73 | lint: docstyle format style # Lint code using pydocstyle, black and pylint.
74 |
75 | check: lint test coverage # Both lint and test code. Runs `make lint` followed by `make test`.
76 |
--------------------------------------------------------------------------------
/docs/source/semantic_sphinx/search.html:
--------------------------------------------------------------------------------
1 | {#
2 | basic/search.html
3 | ~~~~~~~~~~~~~~~~~
4 |
5 | Template for the search page.
6 |
7 | :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
8 | :license: BSD, see LICENSE for details.
9 | #}
10 | {%- extends "layout.html" %}
11 | {% set title = _('Search') %}
12 | {% set script_files = script_files + ['_static/searchtools.js'] %}
13 | {% block extrahead %}
14 |
17 | {# this is used when loading the search index using $.ajax fails,
18 | such as on Chrome for documents on localhost #}
19 |
20 | {{ super() }}
21 | {% endblock %}
22 | {% block body %}
23 |
24 |
25 |
26 |
27 |
28 | {% trans %}Please activate JavaScript to enable the search
29 | functionality.{% endtrans %}
30 |
31 |
32 |
33 | {% trans %}From here you can search these documents. Enter your search
34 | words into the box below and click "search". Note that the search
35 | function will automatically search for all of the words. Pages
36 | containing fewer words won't appear in the result list.{% endtrans %}
37 |
38 |
39 |
45 |
46 |
47 | {% if search_performed %}
48 |
{{ _('Search Results') }}
49 | {% if not search_results %}
50 |
{{ _('Your search did not match any documents. Please make sure that all words are spelled correctly and that
51 | you\'ve selected enough categories.') }}
52 | {% endif %}
53 | {% endif %}
54 |
55 | {% if search_results %}
56 |
57 | {% for href, caption, context in search_results %}
58 |
59 |
60 |
63 | {{ context|e }}
64 |
65 |
>
66 | {% endfor %}
67 |
68 | {% endif %}
69 |
70 |
71 | {% endblock %}
72 |
--------------------------------------------------------------------------------
/tests/test_dispatch.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from cons import car, cdr
4 | from cons.core import ConsError
5 |
6 | from etuples import rator, rands
7 | from etuples.core import ExpressionTuple
8 |
9 | from unification import var, unify, reify
10 |
11 | from symbolic_pymc.meta import MetaVariable, MetaOp
12 |
13 |
14 | class SomeOp(object):
15 | def __repr__(self):
16 | return "
"
17 |
18 |
19 | class SomeType(object):
20 | def __init__(self, field1, field2):
21 | self.field1 = field1
22 | self.field2 = field2
23 |
24 | def __repr__(self):
25 | return f"SomeType({self.field1}, {self.field2})"
26 |
27 | def __str__(self):
28 | return f"SomeType<{self.field1}, {self.field2}>"
29 |
30 |
31 | class SomeMetaOp(MetaOp):
32 | __slots__ = ()
33 | base = SomeOp
34 |
35 | def output_meta_types(self):
36 | return [SomeMetaVariable]
37 |
38 | def __call__(self, *args, **kwargs):
39 | return SomeMetaVariable(*args, **kwargs)
40 |
41 |
42 | class SomeOtherMetaOp(SomeMetaOp):
43 | pass
44 |
45 |
46 | class SomeMetaVariable(MetaVariable):
47 | __slots__ = ("op", "args")
48 | base = SomeType
49 |
50 | def __init__(self, op, args, obj=None):
51 | super().__init__(obj)
52 | self.op = op
53 | self.args = args
54 |
55 | @property
56 | def base_operator(self):
57 | if type(self.op) == SomeMetaOp:
58 | return self.op
59 | else:
60 | raise NotImplementedError()
61 |
62 | @property
63 | def base_arguments(self):
64 | if len(self.args) > 0:
65 | return self.args
66 | else:
67 | raise NotImplementedError()
68 |
69 |
70 | def test_unify():
71 |
72 | q_lv = var()
73 |
74 | op = SomeMetaOp()
75 | a_args = (1, 2)
76 | a = SomeMetaVariable(op, a_args, obj=SomeType(1, 2))
77 | b = SomeMetaVariable(op, q_lv)
78 |
79 | s = unify(a, b)
80 | assert s is not False
81 | assert s[q_lv] is a_args
82 |
83 | obj = reify(b, s)
84 |
85 | assert obj == a
86 |
87 | r_lv = var()
88 | b = SomeMetaVariable(op, q_lv, obj=r_lv)
89 |
90 | s = unify(a, b)
91 | assert s is not False
92 | assert s[r_lv] is a.obj
93 |
94 | assert car(a) == rator(a) == op
95 | assert isinstance(cdr(a), ExpressionTuple)
96 | assert isinstance(rands(a), ExpressionTuple)
97 | assert cdr(a) == rands(a) == a_args
98 |
99 | a = SomeMetaVariable(op, ())
100 |
101 | with pytest.raises(ConsError):
102 | cdr(a)
103 |
104 | with pytest.raises(ConsError):
105 | rands(a)
106 |
107 | op = SomeOtherMetaOp()
108 | a = SomeMetaVariable(op, ())
109 |
110 | with pytest.raises(ConsError):
111 | car(a)
112 |
113 | with pytest.raises(ConsError):
114 | rator(a)
115 |
--------------------------------------------------------------------------------
/docs/source/semantic_sphinx/static/gallery.js:
--------------------------------------------------------------------------------
1 | var Gallery = {
2 | examples: null,
3 | contents: null,
4 | categories: null,
5 |
6 | drawExample: function (key) {
7 | var example = this.examples[key]
8 |
9 | var image_div = $('', {
10 | class: 'image'
11 | }).append($('
', {
12 | src: "../_static/" + example.thumb
13 | }))
14 |
15 | var contents_div = $('
', {
16 | class: 'content'
17 | }).append($('
', {
18 | class: 'header'
19 | }).text(example.title))
20 |
21 | var div = $('
', {
22 | class: 'card',
23 | href: example.url
24 | }).append(image_div).append(contents_div)
25 | return div
26 | },
27 |
28 | makeExamples: function (examples) {
29 | var cards = $("", {
30 | class: "ui link six stackable cards"
31 | })
32 | for (var j = 0; j < examples.length; j++) {
33 | cards.append(this.drawExample(examples[j]))
34 | }
35 | return cards
36 | },
37 |
38 | drawExamples: function () {
39 | var main_div = $("#gallery")
40 | var gallery = this;
41 | var categories = this.getCategories()
42 | var cats = Object.keys(categories)
43 | cats.sort()
44 |
45 | cats.map(function (category) {
46 | var div = $("
", {
47 | class: "ui vertical segment"
48 | })
49 | div.append($("
", {
50 | class: "ui header"
51 | }).text(category))
52 | div.append(gallery.makeExamples(categories[category]))
53 | main_div.append(div)
54 | })
55 | },
56 |
57 | getCategories: function () {
58 | var categories = {};
59 | var gallery = this;
60 | var uniqueCategories = Array.from(new Set(Object.values(this.contents)))
61 | for (var i in uniqueCategories) {
62 | categories[uniqueCategories[i]] = []
63 | }
64 | categories["Other"] = []
65 | Object.keys(this.examples).forEach(function (key) {
66 | if (key in gallery.contents) {
67 | categories[gallery.contents[key]].push(key)
68 | }
69 | else {
70 | categories["Other"].push(key)
71 | }
72 | })
73 | if(categories["Other"].length === 0){
74 | delete categories["Other"]
75 | }
76 | return categories
77 | },
78 |
79 | loadScript: function (url, eltId) {
80 | var self = this;
81 | $.ajax({
82 | type: "GET", url: url, data: null,
83 | dataType: "script", cache: true,
84 | complete: function (jqxhr, textstatus) {
85 | if (textstatus != "success") {
86 | document.getElementById(eltId).src = url;
87 | }
88 | self.drawExamples();
89 | }
90 | });
91 | }
92 |
93 | }
94 |
--------------------------------------------------------------------------------
/docs/source/theano-posteriors-example.rst:
--------------------------------------------------------------------------------
1 | =======================================
2 | Compute Symbolic Closed-form Posteriors
3 | =======================================
4 |
5 | :Author: Brandon T. Willard
6 | :Date: 2019-11-24
7 |
8 | .. code-block:: python
9 | :name: compute-symbolic-posterior
10 |
11 | import numpy as np
12 |
13 | import theano
14 | import theano.tensor as tt
15 |
16 | import pymc3 as pm
17 |
18 | from functools import partial
19 |
20 | from unification import var
21 |
22 | from kanren import run
23 | from kanren.graph import reduceo, walko
24 |
25 | from symbolic_pymc.theano.printing import tt_pprint
26 | from symbolic_pymc.theano.pymc3 import model_graph
27 |
28 | from symbolic_pymc.relations.theano.conjugates import conjugate
29 |
30 | theano.config.cxx = ''
31 | theano.config.compute_test_value = 'ignore'
32 |
33 | a_tt = tt.vector('a')
34 | R_tt = tt.matrix('R')
35 | F_t_tt = tt.matrix('F')
36 | V_tt = tt.matrix('V')
37 |
38 | a_tt.tag.test_value = np.r_[1., 0.]
39 | R_tt.tag.test_value = np.diag([10., 10.])
40 | F_t_tt.tag.test_value = np.c_[-2., 1.]
41 | V_tt.tag.test_value = np.diag([0.5])
42 |
43 | y_tt = tt.as_tensor_variable(np.r_[-3.])
44 | y_tt.name = 'y'
45 |
46 | with pm.Model() as model:
47 |
48 | # A normal prior
49 | beta_rv = pm.MvNormal('beta', a_tt, R_tt, shape=(2,))
50 |
51 | # An observed random variable using the prior as a regression parameter
52 | E_y_rv = F_t_tt.dot(beta_rv)
53 | Y_rv = pm.MvNormal('Y', E_y_rv, V_tt, observed=y_tt)
54 |
55 | # Create a graph for the model
56 | fgraph = model_graph(model, output_vars=[Y_rv])
57 |
58 |
59 | def conjugate_graph(graph):
60 | """Apply conjugate relations throughout a graph."""
61 |
62 | def fixedp_conjugate_walko(x, y):
63 | return reduceo(partial(walko, conjugate), x, y)
64 |
65 | expr_graph, = run(1, var('q'),
66 | fixedp_conjugate_walko(graph, var('q')))
67 |
68 | fgraph_opt = expr_graph.eval_obj
69 | fgraph_opt_tt = fgraph_opt.reify()
70 | return fgraph_opt_tt
71 |
72 |
73 | fgraph_conj = conjugate_graph(fgraph.outputs[0])
74 |
75 | Before
76 | ------
77 |
78 | .. code-block:: python
79 | :name: posterior-before-print
80 |
81 | >>> print(tt_pprint(fgraph))
82 | F in R**(N^F_0 x N^F_1), a in R**(N^a_0), R in R**(N^R_0 x N^R_1)
83 | V in R**(N^V_0 x N^V_1)
84 | beta ~ N(a, R) in R**(N^beta_0), Y ~ N((F * beta), V) in R**(N^Y_0)
85 | Y = [-3.]
86 |
87 | After
88 | -----
89 |
90 | .. code-block:: python
91 | :name: posterior-after-print
92 |
93 | >>> print(tt_pprint(fgraph_conj))
94 | a in R**(N^a_0), R in R**(N^R_0 x N^R_1), F in R**(N^F_0 x N^F_1)
95 | c in R**(N^c_0 x N^c_1), d in R**(N^d_0 x N^d_1)
96 | V in R**(N^V_0 x N^V_1), e in R**(N^e_0 x N^e_1)
97 | b ~ N((a + (((R * F.T) * c) * ([-3.] - (F * a)))), (R - ((((R * F.T) * d) * (V + (F * (R * F.T)))) * ((R * F.T) * e).T))) in R**(N^b_0)
98 | b
99 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Symbolic PyMC
2 |
3 | [](https://travis-ci.org/pymc-devs/symbolic-pymc) [](https://coveralls.io/github/pymc-devs/symbolic-pymc?branch=master)
4 |
5 |
6 | [Symbolic PyMC](https://pymc-devs.github.io/symbolic-pymc) provides tools for the symbolic manipulation of [PyMC](https://github.com/pymc-devs) models and their underlying computational graphs in [Theano](https://github.com/Theano/Theano) and [TensorFlow](https://github.com/tensorflow/tensorflow). It enables graph manipulations in the relational DSL [miniKanren](http://minikanren.org/)—via the [`miniKanren`](https://github.com/pythological/kanren) package—by way of meta classes and S-expression forms of a graph.
7 |
8 | This work stems from a series of articles starting [here](https://brandonwillard.github.io/a-role-for-symbolic-computation-in-the-general-estimation-of-statistical-models.html). Documentation and examples for Symbolic PyMC are available [here](https://pymc-devs.github.io/symbolic-pymc).
9 |
10 | *This package is currently in alpha, so expect large-scale changes at any time!*
11 |
12 | ## Installation
13 |
14 | The package name is `symbolic_pymc` and it can be installed with `pip` directly from GitHub
15 | ```shell
16 | $ pip install git+https://github.com/pymc-devs/symbolic-pymc
17 | ```
18 | or after cloning the repo (and then installing with `pip`).
19 |
20 | ## Features
21 |
22 | ### General
23 |
24 | * Support for [Theano](https://github.com/Theano/Theano) and [TensorFlow](https://github.com/tensorflow/tensorflow) graphs
25 | - [Unification and reification](https://github.com/pythological/unification) for all components of a graph
26 | - A more robust Theano `Op` for representing random variables
27 | - Conversion of PyMC3 models into sample-able Theano graphs representing all random variable inter-dependencies
28 | - A Theano LaTeX pretty printer that displays shape information and distributions in mathematical notation
29 | - Simple text-based TensorFlow graph print-outs
30 | * Full [miniKanren](http://minikanren.org/) integration for relational graph/model manipulation.
31 | - Perform simple and robust "search and replace" over arbitrary graphs (e.g. Python builtin collections, AST, tensor algebra graphs, etc.)
32 | - Create and compose relations with explicit high-level statistical/mathematical meaning and functionality, such as "`X` is a normal scale mixture with mixing distribution `Y`", and automatically "solve" for components (i.e. `X` and `Y`) that satisfy a relation
33 | - Apply non-trivial conditions—as relations—to produce sophisticated graph manipulations (e.g. search for normal scale mixtures and scale a term in the mixing distribution)
34 | - Integrate standard Python operations into relations (e.g. use a symbolic math library to compute an inverse-Laplace transform to determine if a distribution is a scale mixture—and find its mixing distribution)
35 | * Convert graphs to an S-expression-like tuple-based form and perform manipulations at the syntax level
36 | * Pre-built example relations for graph traversal, fixed-points, symbolic closed-form posteriors, and standard statistical model reformulations
37 |
--------------------------------------------------------------------------------
/symbolic_pymc/relations/theano/__init__.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import theano.tensor as tt
3 |
4 | from unification import var
5 | from unification.utils import transitive_get as walk
6 |
7 | from kanren import eq
8 | from kanren.core import lall, Zzz
9 | from kanren.facts import fact
10 | from kanren.graph import applyo, walko
11 | from kanren.assoccomm import commutative, associative
12 | from kanren.constraints import neq
13 |
14 | from etuples import etuple
15 |
16 | from ...utils import HashableNDArray
17 | from ...theano.meta import TheanoMetaConstant, mt
18 |
19 | # Establish some Theano `Op`s as commutative and/or associative
20 | fact(commutative, mt.add)
21 | fact(commutative, mt.mul)
22 | fact(associative, mt.add)
23 | fact(associative, mt.mul)
24 |
25 |
26 | def constant_neq(lvar, val):
27 | """Assert that a constant graph variable is not equal to a specific value.
28 |
29 | Scalar values are broadcast across arrays.
30 |
31 | XXX: This goal is non-relational.
32 | TODO: Rewrite this as an actual constraint.
33 | """
34 |
35 | if isinstance(val, np.ndarray):
36 | val = val.view(HashableNDArray)
37 |
38 | def constant_neq_goal(S):
39 | lvar_rf = walk(lvar, S)
40 | if isinstance(lvar_rf, (tt.Constant, TheanoMetaConstant)):
41 | # Although `neq` is an actual constraint, the preceding type check
42 | # and alternative success--when not a constant type--make the
43 | # entire goal non-relational/not a true constraint, since the
44 | # aforementioned check will only occur once per goal-stream/state
45 | # and never again.
46 | yield from neq(lvar_rf.data, val)(S)
47 | else:
48 | # When the value isn't a Theano constant, consider it satisfied
49 | yield S
50 |
51 | return constant_neq_goal
52 |
53 |
54 | def non_obs_walko(relation, a, b):
55 | """Construct a goal that applies a relation to all nodes above an observed random variable.
56 |
57 | This is useful if you don't want to apply relations to an observed random
58 | variable, but you do want to apply them to every term above one and
59 | ultimately reproduce the entire graph (observed RV included).
60 |
61 | Parameters
62 | ----------
63 | relation: function
64 | A binary relation/goal constructor function
65 | a: lvar or meta graph
66 | The left-hand side of the relation.
67 | b: lvar or meta graph
68 | The right-hand side of the relation
69 |
70 | """
71 | obs_lv, obs_rv_lv = var(), var()
72 | rv_op_lv, rv_args_lv, obs_rv_lv = var(), var(), var()
73 | new_rv_args_lv, new_obs_rv_lv = var(), var()
74 |
75 | return lall(
76 | # Indicate the observed term (i.e. observation and RV)
77 | eq(a, mt.observed(obs_lv, obs_rv_lv)),
78 | # Deconstruct the observed random variable
79 | applyo(rv_op_lv, rv_args_lv, obs_rv_lv),
80 | # Apply relation to the RV's inputs
81 | Zzz(walko, relation, rv_args_lv, new_rv_args_lv),
82 | # map_anyo(partial(walko, relation), rv_args_lv, new_rv_args_lv),
83 | # Reconstruct the random variable
84 | applyo(rv_op_lv, new_rv_args_lv, new_obs_rv_lv),
85 | # Reconstruct the observation
86 | applyo(mt.observed, etuple(obs_lv, new_obs_rv_lv), b),
87 | )
88 |
--------------------------------------------------------------------------------
/docs/source/theano-posteriors-example.org:
--------------------------------------------------------------------------------
1 | #+TITLE: Compute Symbolic Closed-form Posteriors
2 | #+AUTHOR: Brandon T. Willard
3 | #+DATE: 2019-11-24
4 | #+EMAIL: brandonwillard@gmail.com
5 |
6 | #+STARTUP: hideblocks indent hidestars
7 | #+OPTIONS: num:nil author:t date:t title:t toc:nil ^:nil d:(not "logbook" "todo" "notes") tex:t |:t broken-links:f
8 | #+SELECT_TAGS: export
9 | #+EXCLUDE_TAGS: noexport
10 |
11 | #+PROPERTY: header-args :session spymc-examples :exports both :eval never-export :results output drawer replace
12 | #+PROPERTY: header-args:text :eval never
13 |
14 | #+NAME: compute-symbolic-posterior
15 | #+BEGIN_SRC python :eval never
16 | import numpy as np
17 |
18 | import theano
19 | import theano.tensor as tt
20 |
21 | import pymc3 as pm
22 |
23 | from functools import partial
24 |
25 | from unification import var
26 |
27 | from kanren import run
28 | from kanren.graph import reduceo, walko
29 |
30 | from symbolic_pymc.theano.printing import tt_pprint
31 | from symbolic_pymc.theano.pymc3 import model_graph
32 |
33 | from symbolic_pymc.relations.theano.conjugates import conjugate
34 |
35 | theano.config.cxx = ''
36 | theano.config.compute_test_value = 'ignore'
37 |
38 | a_tt = tt.vector('a')
39 | R_tt = tt.matrix('R')
40 | F_t_tt = tt.matrix('F')
41 | V_tt = tt.matrix('V')
42 |
43 | a_tt.tag.test_value = np.r_[1., 0.]
44 | R_tt.tag.test_value = np.diag([10., 10.])
45 | F_t_tt.tag.test_value = np.c_[-2., 1.]
46 | V_tt.tag.test_value = np.diag([0.5])
47 |
48 | y_tt = tt.as_tensor_variable(np.r_[-3.])
49 | y_tt.name = 'y'
50 |
51 | with pm.Model() as model:
52 |
53 | # A normal prior
54 | beta_rv = pm.MvNormal('beta', a_tt, R_tt, shape=(2,))
55 |
56 | # An observed random variable using the prior as a regression parameter
57 | E_y_rv = F_t_tt.dot(beta_rv)
58 | Y_rv = pm.MvNormal('Y', E_y_rv, V_tt, observed=y_tt)
59 |
60 | # Create a graph for the model
61 | fgraph = model_graph(model, output_vars=[Y_rv])
62 |
63 |
64 | def conjugate_graph(graph):
65 | """Apply conjugate relations throughout a graph."""
66 |
67 | def fixedp_conjugate_walko(x, y):
68 | return reduceo(partial(walko, conjugate), x, y)
69 |
70 | expr_graph, = run(1, var('q'),
71 | fixedp_conjugate_walko(graph, var('q')))
72 |
73 | fgraph_opt = expr_graph.eval_obj
74 | fgraph_opt_tt = fgraph_opt.reify()
75 | return fgraph_opt_tt
76 |
77 |
78 | fgraph_conj = conjugate_graph(fgraph.outputs[0])
79 |
80 | #+END_SRC
81 |
82 | *** Before
83 | #+NAME: posterior-before-print
84 | #+BEGIN_SRC python :eval never
85 | >>> print(tt_pprint(fgraph))
86 | F in R**(N^F_0 x N^F_1), a in R**(N^a_0), R in R**(N^R_0 x N^R_1)
87 | V in R**(N^V_0 x N^V_1)
88 | beta ~ N(a, R) in R**(N^beta_0), Y ~ N((F * beta), V) in R**(N^Y_0)
89 | Y = [-3.]
90 |
91 | #+END_SRC
92 |
93 | *** After
94 | #+NAME: posterior-after-print
95 | #+BEGIN_SRC python :eval never
96 | >>> print(tt_pprint(fgraph_conj))
97 | a in R**(N^a_0), R in R**(N^R_0 x N^R_1), F in R**(N^F_0 x N^F_1)
98 | c in R**(N^c_0 x N^c_1), d in R**(N^d_0 x N^d_1)
99 | V in R**(N^V_0 x N^V_1), e in R**(N^e_0 x N^e_1)
100 | b ~ N((a + (((R * F.T) * c) * ([-3.] - (F * a)))), (R - ((((R * F.T) * d) * (V + (F * (R * F.T)))) * ((R * F.T) * e).T))) in R**(N^b_0)
101 | b
102 |
103 | #+END_SRC
104 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | import os
14 | import sys
15 |
16 | import symbolic_pymc
17 |
18 |
19 | sys.path.insert(0, os.path.abspath('.'))
20 |
21 |
22 | # -- Project information -----------------------------------------------------
23 |
24 | project = 'Symbolic PyMC'
25 | copyright = '2019, PyMC developers'
26 | author = 'PyMC developers'
27 |
28 |
29 | # -- General configuration ---------------------------------------------------
30 |
31 | # Add any Sphinx extension module names here, as strings. They can be
32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
33 | # ones.
34 | extensions = [
35 | "recommonmark",
36 | "sphinx.ext.autodoc",
37 | "sphinx.ext.mathjax",
38 | "sphinx.ext.autosummary",
39 | "sphinx.ext.autosectionlabel",
40 | "IPython.sphinxext.ipython_console_highlighting",
41 | ]
42 |
43 | html_css_files = [
44 | 'custom.css',
45 | ]
46 |
47 | mathjax_config = {
48 | # 'extensions': [''],
49 | # 'jax': ['input/TeX']
50 | }
51 |
52 | modindex_common_prefix = ['symbolic_pymc.']
53 |
54 | numfig = True
55 | numfig_secnum_depth = 1
56 |
57 | # Don't auto-generate summary for class members.
58 | numpydoc_show_class_members = False
59 |
60 | # Show the documentation of __init__ and the class docstring
61 | autoclass_content = "both"
62 |
63 | # Do not show the return type as seperate section
64 | napoleon_use_rtype = False
65 |
66 | # Add any paths that contain templates here, relative to this directory.
67 | templates_path = ['_templates']
68 |
69 | source_suffix = [".rst", ".md"]
70 |
71 | # List of patterns, relative to source directory, that match files and
72 | # directories to ignore when looking for source files.
73 | # This pattern also affects html_static_path and html_extra_path.
74 | exclude_patterns = ["_build", "**.ipynb_checkpoints"]
75 |
76 |
77 | # -- Options for HTML output -------------------------------------------------
78 |
79 | # The theme to use for HTML and HTML Help pages. See the documentation for
80 | # a list of builtin themes.
81 | #
82 | html_theme_path = ["."]
83 | html_theme = "semantic_sphinx"
84 |
85 | html_theme_options = {
86 | "navbar_links": [
87 | ("Index", "index"),
88 | ("API", "modules"),
89 | ],
90 | }
91 |
92 | # Add any paths that contain custom static files (such as style sheets) here,
93 | # relative to this directory. They are copied after the builtin static files,
94 | # so a file named "default.css" will overwrite the builtin "default.css".
95 | html_static_path = ['_static']
96 |
97 | pygments_style = "friendly"
98 |
99 | html_sidebars = {"**": ["about.html", "navigation.html", "searchbox.html"]}
100 |
101 |
102 | def setup(app):
103 | app.add_stylesheet(
104 | "https://cdn.jsdelivr.net/npm/semantic-ui@2.4.2/dist/semantic.min.css"
105 | )
106 | app.add_stylesheet("default.css")
107 |
--------------------------------------------------------------------------------
/tests/tensorflow/test_kanren.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | from tensorflow.python.framework.ops import disable_tensor_equality
4 |
5 | from tensorflow_probability import distributions as tfd
6 |
7 | from unification import var, unify
8 |
9 | from kanren import run, eq, lall
10 | from kanren.graph import walko
11 | from kanren.assoccomm import eq_comm, commutative
12 |
13 | from symbolic_pymc.meta import enable_lvar_defaults
14 | from symbolic_pymc.tensorflow.meta import mt
15 | from symbolic_pymc.tensorflow.graph import normalize_tf_graph
16 |
17 | from tests.tensorflow import run_in_graph_mode
18 | from tests.tensorflow.utils import mt_normal_log_prob
19 |
20 | disable_tensor_equality()
21 |
22 |
23 | @run_in_graph_mode
24 | def test_walko():
25 | with enable_lvar_defaults("names"):
26 | add_1_mt = mt(1) + mt(2)
27 |
28 | def walk_rel(x, y):
29 | return lall(eq(x, mt(1)), eq(y, mt(3)))
30 |
31 | q = var()
32 | (res,) = run(1, q, walko(walk_rel, add_1_mt, q))
33 |
34 | # The easiest way to check whether or not two arbitrary TF meta graphs are
35 | # (structurally) equivalent is to confirm that they unify. This avoids
36 | # uninteresting differences in node names, uninferred type information,
37 | # etc.
38 | with enable_lvar_defaults("names", "node_attrs"):
39 | assert unify(res.eval_obj, mt(3) + mt(2)) is not False
40 |
41 |
42 | @run_in_graph_mode
43 | def test_commutativity():
44 | with enable_lvar_defaults("names"):
45 | add_1_mt = mt(1) + mt(2)
46 | add_2_mt = mt(2) + mt(1)
47 |
48 | q = var()
49 | res = run(0, q, commutative(add_1_mt.base_operator))
50 | assert res is not False
51 |
52 | res = run(0, q, eq_comm(add_1_mt, add_2_mt))
53 | assert res is not False
54 |
55 | with enable_lvar_defaults("names"):
56 | add_pattern_mt = mt(2) + q
57 |
58 | res = run(0, q, eq_comm(add_1_mt, add_pattern_mt))
59 | assert res[0] == add_1_mt.base_arguments[0]
60 |
61 |
62 | @run_in_graph_mode
63 | def test_commutativity_tfp():
64 |
65 | with tf.Graph().as_default():
66 | mu_tf = tf.compat.v1.placeholder(tf.float32, name="mu", shape=tf.TensorShape([None]))
67 | tau_tf = tf.compat.v1.placeholder(tf.float32, name="tau", shape=tf.TensorShape([None]))
68 |
69 | normal_tfp = tfd.normal.Normal(mu_tf, tau_tf)
70 |
71 | value_tf = tf.compat.v1.placeholder(tf.float32, name="value", shape=tf.TensorShape([None]))
72 |
73 | normal_log_lik = normal_tfp.log_prob(value_tf)
74 |
75 | normal_log_lik_opt = normalize_tf_graph(normal_log_lik)
76 |
77 | with enable_lvar_defaults("names", "node_attrs"):
78 | tfp_normal_pattern_mt = mt_normal_log_prob(var(), var(), var())
79 |
80 | normal_log_lik_mt = mt(normal_log_lik)
81 | normal_log_lik_opt_mt = mt(normal_log_lik_opt)
82 |
83 | # Our pattern is the form of an unnormalized TFP normal PDF.
84 | assert run(0, True, eq(normal_log_lik_mt, tfp_normal_pattern_mt)) == (True,)
85 | # Our pattern should *not* match the Grappler-optimized graph, because
86 | # Grappler will reorder terms (e.g. the log + constant
87 | # variance/normalization term)
88 | assert run(0, True, eq(normal_log_lik_opt_mt, tfp_normal_pattern_mt)) == ()
89 |
90 | # XXX: `eq_comm` is, unfortunately, order sensitive! LHS should be ground.
91 | assert run(0, True, eq_comm(normal_log_lik_mt, tfp_normal_pattern_mt)) == (True,)
92 | assert run(0, True, eq_comm(normal_log_lik_opt_mt, tfp_normal_pattern_mt)) == (True,)
93 |
--------------------------------------------------------------------------------
/symbolic_pymc/dispatch.py:
--------------------------------------------------------------------------------
1 | from collections.abc import Mapping
2 |
3 | from cons.core import _car, _cdr, ConsError
4 |
5 | from kanren.term import arguments, operator
6 |
7 | from unification.variable import Var
8 | from unification.core import _reify, _unify, reify, unify
9 |
10 | from etuples import etuple
11 |
12 | from .meta import MetaSymbol, MetaVariable
13 |
14 |
15 | def unify_MetaSymbol(u, v, s):
16 | if type(u) != type(v):
17 | return False
18 | if getattr(u, "__all_props__", False):
19 | s = unify(
20 | [getattr(u, slot) for slot in u.__all_props__],
21 | [getattr(v, slot) for slot in v.__all_props__],
22 | s,
23 | )
24 | elif u != v:
25 | return False
26 | if s:
27 | # If these two meta objects unified, and one has a logic
28 | # variable as its base object, consider the unknown base
29 | # object unified by the other's base object (if any).
30 | # This way, the original base objects can be recovered during
31 | # reification (preserving base object equality and such).
32 | if isinstance(u.obj, Var) and v.obj:
33 | s[u.obj] = v.obj
34 | elif isinstance(v.obj, Var) and u.obj:
35 | s[v.obj] = u.obj
36 | return s
37 |
38 |
39 | _unify.add((MetaSymbol, MetaSymbol, Mapping), unify_MetaSymbol)
40 |
41 |
42 | def _reify_MetaSymbol(o, s):
43 | if isinstance(o.obj, Var):
44 | # We allow reification of the base object field for
45 | # a meta object.
46 | # TODO: This is a weird thing that we should probably reconsider.
47 | # It's part of the functionality that allows base objects to fill-in
48 | # as logic variables, though.
49 | obj = s.get(o.obj, o.obj)
50 | else:
51 | # Otherwise, if there's a base object, it should indicate that there
52 | # are no logic variables or meta terms.
53 | # TODO: Seems like we should be able to skip the reify and comparison
54 | # below.
55 | obj = None
56 |
57 | try:
58 | rands = o.rands
59 | except NotImplementedError:
60 | return o
61 |
62 | new_rands = reify(rands, s)
63 |
64 | if rands == new_rands:
65 | return o
66 | else:
67 | newobj = type(o)(*new_rands, obj=obj)
68 | return newobj
69 |
70 |
71 | _reify.add((MetaSymbol, Mapping), _reify_MetaSymbol)
72 |
73 |
74 | # def car_MetaSymbol(x):
75 | # """Return the operator/head/CAR of a meta symbol."""
76 | # return type(x)
77 |
78 |
79 | def car_MetaVariable(x):
80 | """Return the operator/head/CAR of a meta variable."""
81 | try:
82 | return x.base_operator
83 | except NotImplementedError:
84 | raise ConsError("Not a cons pair.")
85 |
86 |
87 | # _car.add((MetaSymbol,), car_MetaSymbol)
88 | _car.add((MetaVariable,), car_MetaVariable)
89 |
90 | # operator.add((MetaSymbol,), car_MetaSymbol)
91 | operator.add((MetaVariable,), car_MetaVariable)
92 |
93 |
94 | # def cdr_MetaSymbol(x):
95 | # """Return the arguments/tail/CDR of a meta symbol.
96 | #
97 | # We build the full `etuple` for the argument, then return the
98 | # `cdr`/tail, so that the original object is retained when/if the
99 | # original object is later reconstructed and evaluated (e.g. using
100 | # `term`).
101 | #
102 | # """
103 | # try:
104 | # x_e = etuple(_car(x), *x.rands, eval_obj=x)
105 | # except NotImplementedError:
106 | # raise ConsError("Not a cons pair.")
107 | #
108 | # return x_e[1:]
109 |
110 |
111 | def cdr_MetaVariable(x):
112 | """Return the arguments/tail/CDR of a variable object.
113 |
114 | See `cdr_MetaSymbol`
115 | """
116 | try:
117 | x_e = etuple(_car(x), *x.base_arguments, eval_obj=x)
118 | except NotImplementedError:
119 | raise ConsError("Not a cons pair.")
120 |
121 | return x_e[1:]
122 |
123 |
124 | # _cdr.add((MetaSymbol,), cdr_MetaSymbol)
125 | _cdr.add((MetaVariable,), cdr_MetaVariable)
126 |
127 | # arguments.add((MetaSymbol,), cdr_MetaSymbol)
128 | arguments.add((MetaVariable,), cdr_MetaVariable)
129 |
--------------------------------------------------------------------------------
/symbolic_pymc/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from operator import ne, attrgetter, itemgetter
4 | from collections import namedtuple
5 | from collections.abc import Hashable, Sequence, Mapping
6 |
7 | from unification import isvar, Var
8 |
9 | from toolz import compose
10 |
11 | import symbolic_pymc as sp
12 |
13 |
14 | class HashableNDArray(np.ndarray, Hashable):
15 | """A subclass of Numpy's ndarray that uses `tostring` hashing and `array_equal` equality testing.
16 |
17 | Usage
18 | -----
19 | >>> import numpy as np
20 | >>> from symbolic_pymc.utils import HashableNDArray
21 | >>> x = np.r_[1, 2, 3]
22 | >>> x_new = x.view(HashableNDArray)
23 | >>> assert hash(x_new) == hash(x.tostring())
24 | >>> assert x_new == np.r_[1, 2, 3]
25 | """
26 |
27 | def __hash__(self):
28 | return hash(self.tostring())
29 |
30 | def __eq__(self, other):
31 | return np.array_equal(self, other)
32 |
33 | def __ne__(self, other):
34 | if self.__eq__(other):
35 | return False
36 |
37 | return NotImplemented
38 |
39 |
40 | UnequalMetaParts = namedtuple("UnequalMetaParts", ["path", "reason", "objects"])
41 |
42 |
43 | def meta_diff_seq(x, y, loc, path, is_map=False, **kwargs):
44 | if len(x) != len(y):
45 | return (path, f"{loc} len", (x, y))
46 | else:
47 | for i, (a, b) in enumerate(zip(x, y)):
48 | if is_map:
49 | if a[0] != b[0]:
50 | return (path, "map keys", (x, y))
51 | this_path = compose(itemgetter(a[0]), path)
52 | a, b = a[1], b[1]
53 | else:
54 | this_path = compose(itemgetter(i), path)
55 |
56 | z = meta_diff(a, b, path=this_path, **kwargs)
57 | if z is not None:
58 | return z
59 |
60 |
61 | def meta_diff(x, y, pdb=False, ne_fn=ne, cmp_types=True, path=compose()):
62 | """Traverse meta objects and return information about the first pair of elements that are not equal.
63 |
64 | Returns a `UnequalMetaParts` object containing the object path, reason for
65 | being unequal, and the unequal object pair; otherwise, `None`.
66 | """
67 | res = None
68 | if cmp_types and ne_fn(type(x), type(y)) is True:
69 | res = (path, "types", (x, y))
70 | elif isinstance(x, sp.meta.MetaSymbol):
71 | if ne_fn(x.base, y.base) is True:
72 | res = (path, "bases", (x.base, y.base))
73 | else:
74 | try:
75 | x_rands = x.rands
76 | y_rands = y.rands
77 | except NotImplementedError:
78 | pass
79 | else:
80 |
81 | path = compose(attrgetter("rands"), path)
82 |
83 | res = meta_diff_seq(
84 | x_rands, y_rands, "rands", path, pdb=pdb, ne_fn=ne_fn, cmp_types=cmp_types
85 | )
86 |
87 | elif isinstance(x, Mapping) and isinstance(y, Mapping):
88 |
89 | x_ = sorted(x.items(), key=itemgetter(0))
90 | y_ = sorted(y.items(), key=itemgetter(0))
91 |
92 | res = meta_diff_seq(
93 | x_, y_, "map", path, is_map=True, pdb=pdb, ne_fn=ne_fn, cmp_types=cmp_types
94 | )
95 |
96 | elif (
97 | isinstance(x, Sequence)
98 | and isinstance(y, Sequence)
99 | and not isinstance(x, str)
100 | and not isinstance(y, str)
101 | ):
102 |
103 | res = meta_diff_seq(x, y, "seq", path, pdb=pdb, ne_fn=ne_fn, cmp_types=cmp_types)
104 |
105 | elif ne_fn(x, y) is True:
106 | res = (path, "ne_fn", (x, y))
107 |
108 | if res is not None:
109 | if pdb: # pragma: no cover
110 | import pdb
111 |
112 | pdb.set_trace()
113 | return UnequalMetaParts(*res)
114 |
115 |
116 | def lvar_ignore_ne(x, y):
117 | if (isvar(x) and isvar(y)) or (
118 | isinstance(x, type) and isinstance(y, type) and issubclass(x, Var) and issubclass(y, Var)
119 | ):
120 | return False
121 | else:
122 | return ne(x, y)
123 |
124 |
125 | def eq_lvar(x, y):
126 | """Perform an equality check that considers all logic variables equal."""
127 | return meta_diff(x, y, ne_fn=lvar_ignore_ne) is None
128 |
--------------------------------------------------------------------------------
/symbolic_pymc/relations/theano/conjugates.py:
--------------------------------------------------------------------------------
1 | import theano
2 |
3 | from unification import var
4 |
5 | from kanren.facts import fact
6 |
7 | from etuples import etuple
8 |
9 | from .. import conjugate
10 | from ...theano.meta import mt
11 |
12 |
13 | mt.namespaces += [theano.tensor.nlinalg]
14 |
15 |
16 | def _create_normal_normal_goals():
17 | """Produce a relation representing Bayes theorem for a multivariate normal prior mean with a normal observation model.
18 |
19 | NOTE: This unifies with meta graph objects directly and not their etuple
20 | forms, so use it on a meta graphs if you want it to work.
21 |
22 | TODO: This implementation is a little too restrictive in that it limits the
23 | conjugate update to only random variables attached to explicitly defined
24 | observations (i.e. via the `observed` `Op`).
25 |
26 | TODO: Lift univariate normals to multivariates so that this update can be
27 | applied to them, as well? Seems lame to remake this just for the
28 | univariate cases, especially when they can be easily and completely
29 | embedded in multivariate spaces.
30 |
31 | """
32 | #
33 | # Create the pattern/form of the prior normal distribution
34 | #
35 | beta_name_lv = var()
36 | beta_size_lv = var()
37 | beta_rng_lv = var()
38 | a_lv = var()
39 | R_lv = var()
40 | beta_prior_mt = mt.MvNormalRV(a_lv, R_lv, size=beta_size_lv, rng=beta_rng_lv, name=beta_name_lv)
41 |
42 | y_name_lv = var()
43 | y_size_lv = var()
44 | y_rng_lv = var()
45 | F_t_lv = var()
46 | V_lv = var()
47 | E_y_mt = mt.dot(F_t_lv, beta_prior_mt)
48 | Y_mt = mt.MvNormalRV(E_y_mt, V_lv, size=y_size_lv, rng=y_rng_lv, name=y_name_lv)
49 |
50 | # The variable specifying the fixed sample value of the random variable
51 | # given by `Y_mt`
52 | obs_sample_mt = var()
53 |
54 | Y_obs_mt = mt.observed(obs_sample_mt, Y_mt)
55 |
56 | #
57 | # Create tuple-form expressions that construct the posterior
58 | #
59 | e_expr = mt.sub(obs_sample_mt, mt.dot(F_t_lv, a_lv))
60 | F_expr = etuple(mt.transpose, F_t_lv)
61 | R_F_expr = etuple(mt.dot, R_lv, F_expr)
62 | Q_expr = etuple(mt.add, V_lv, etuple(mt.dot, F_t_lv, R_F_expr))
63 | A_expr = etuple(mt.dot, R_F_expr, etuple(mt.matrix_inverse, Q_expr))
64 | # m = C \left(F V^{-1} y + R^{-1} a\right)
65 | m_expr = etuple(mt.add, a_lv, etuple(mt.dot, A_expr, e_expr))
66 | # C = \left(R^{-1} + F V^{-1} F^{\top}\right)^{-1}
67 | # TODO: We could use the naive posterior forms and apply identities, like
68 | # Woodbury's, in another set of "simplification" relations.
69 | # In some cases, this might make the patterns simpler and more broadly
70 | # applicable.
71 | C_expr = etuple(
72 | mt.sub, R_lv, etuple(mt.dot, etuple(mt.dot, A_expr, Q_expr), etuple(mt.transpose, A_expr))
73 | )
74 |
75 | norm_posterior_exprs = etuple(mt.MvNormalRV, m_expr, C_expr, y_size_lv, y_rng_lv)
76 |
77 | return (Y_obs_mt, norm_posterior_exprs)
78 |
79 |
80 | def _create_normal_wishart_goals(): # pragma: no cover
81 | """TODO."""
82 | # Create the pattern/form of the prior normal distribution
83 | Sigma_name_lv = var()
84 | Sigma_size_lv = var()
85 | Sigma_rng_lv = var()
86 | V_lv = var()
87 | n_lv = var()
88 | Sigma_prior_mt = mt.WishartRV(V_lv, n_lv, Sigma_size_lv, Sigma_rng_lv, name=Sigma_name_lv)
89 |
90 | y_name_lv = var()
91 | y_size_lv = var()
92 | y_rng_lv = var()
93 | V_lv = var()
94 | f_mt = var()
95 | Y_mt = mt.MvNormalRV(f_mt, V_lv, y_size_lv, y_rng_lv, name=y_name_lv)
96 |
97 | y_mt = var()
98 | Y_obs_mt = mt.observed(y_mt, Y_mt)
99 |
100 | n_post_mt = etuple(mt.add, n_lv, etuple(mt.Shape, Y_obs_mt))
101 |
102 | # wishart_posterior_exprs = etuple(mt.MvStudentTRV,
103 | # m_expr, C_expr,
104 | # y_size_lv, y_rng_lv)
105 |
106 | # return (Sigma_prior_mt, wishart_posterior_exprs)
107 |
108 |
109 | norm_norm_prior_post = _create_normal_normal_goals()
110 | fact(
111 | conjugate,
112 | # An unconjugated observation backed by an MvNormal likelihood with MvNormal prior mean
113 | norm_norm_prior_post[0],
114 | # The corresponding conjugated distribution
115 | norm_norm_prior_post[1],
116 | )
117 |
--------------------------------------------------------------------------------
/symbolic_pymc/relations/theano/linalg.py:
--------------------------------------------------------------------------------
1 | import toolz
2 |
3 | from operator import itemgetter, attrgetter
4 |
5 | from theano.tensor.nlinalg import QRFull
6 |
7 | from unification import var
8 |
9 | from kanren import eq
10 | from kanren.core import lall
11 | from kanren.graph import applyo
12 | from kanren.constraints import neq
13 |
14 | from etuples import etuple, etuplize
15 |
16 | from ...theano.meta import mt
17 |
18 |
19 | mt.nlinalg.qr_full = mt(QRFull("reduced"))
20 | owner_inputs = attrgetter("owner.inputs")
21 | normal_get_size = toolz.compose(itemgetter(2), owner_inputs)
22 | normal_get_rng = toolz.compose(itemgetter(3), owner_inputs)
23 |
24 |
25 | def update_name_suffix(x, old_x, suffix): # pragma: no cover
26 | new_name = old_x.name + suffix
27 | x.name = new_name
28 | return x
29 |
30 |
31 | def normal_normal_regression(Y, X, beta, Y_args_tail=None, beta_args=None):
32 | """Create a goal for a normal-normal regression of the form `Y ~ N(X * beta, sd**2)`."""
33 | Y_args_tail = Y_args_tail or var()
34 | beta_args = beta_args or var()
35 |
36 | Y_args, Y_mean_lv = var(), var()
37 |
38 | res = lall(
39 | # `Y` is a `NormalRV`
40 | applyo(mt.NormalRV, Y_args, Y),
41 | # `beta` is also a `NormalRV`
42 | applyo(mt.NormalRV, beta_args, beta),
43 | # Obtain its mean parameter and remaining args
44 | applyo(Y_mean_lv, Y_args_tail, Y_args),
45 | # Relate it to a dot product of `X` and `beta`
46 | applyo(mt.dot, etuple(X, beta), Y_mean_lv),
47 | )
48 |
49 | return res
50 |
51 |
52 | def normal_qr_transform(in_expr, out_expr):
53 | """Produce a relation for normal-normal regression and its QR-reduced form.
54 |
55 | TODO XXX: This isn't entirely correct (e.g. it needs to also
56 | transform the variance terms), but it demonstrates all the requisite
57 | functionality for this kind of model reformulation.
58 |
59 | """
60 | y_lv, Y_lv, X_lv, beta_lv = var(), var(), var(), var()
61 | Y_args_lv, beta_args_lv = var(), var()
62 | QR_lv, Q_lv, R_lv = var(), var(), var()
63 | beta_til_lv, beta_new_lv = var(), var()
64 | beta_mean_lv, beta_sd_lv = var(), var()
65 | beta_size_lv, beta_rng_lv = var(), var()
66 | Y_new_lv = var()
67 | X_op_lv = var()
68 |
69 | in_expr = etuplize(in_expr)
70 |
71 | res = lall(
72 | # Only applies to regression models on observed RVs
73 | eq(in_expr, etuple(mt.observed, y_lv, Y_lv)),
74 | # Relate the model components
75 | normal_normal_regression(Y_lv, X_lv, beta_lv, Y_args_lv, beta_args_lv),
76 | # Let's not do all this to an already QR-reduce graph;
77 | # otherwise, we'll loop forever!
78 | applyo(X_op_lv, var(), X_lv),
79 | # XXX: This type of dis-equality goal isn't the best,
80 | # but it will definitely work for now.
81 | neq(mt.nlinalg.qr_full, X_op_lv),
82 | # Relate terms for the QR decomposition
83 | eq(QR_lv, etuple(mt.nlinalg.qr_full, X_lv)),
84 | eq(Q_lv, etuple(itemgetter(0), QR_lv)),
85 | eq(R_lv, etuple(itemgetter(1), QR_lv)),
86 | # The new `beta_tilde`
87 | eq(beta_args_lv, (beta_mean_lv, beta_sd_lv, beta_size_lv, beta_rng_lv)),
88 | eq(
89 | beta_til_lv,
90 | etuple(
91 | mt.NormalRV,
92 | # Use these `tt.[ones|zeros]_like` functions to preserve the
93 | # correct shape (and a valid `tt.dot`).
94 | etuple(mt.zeros_like, beta_mean_lv),
95 | etuple(mt.ones_like, beta_sd_lv),
96 | beta_size_lv,
97 | beta_rng_lv,
98 | ),
99 | ),
100 | # Relate the new and old coeffs
101 | eq(beta_new_lv, etuple(mt.dot, etuple(mt.nlinalg.matrix_inverse, R_lv), beta_til_lv)),
102 | # Use the relation the other way to produce the new/transformed
103 | # observation distribution
104 | normal_normal_regression(Y_new_lv, Q_lv, beta_til_lv, Y_args_lv),
105 | eq(
106 | out_expr,
107 | [
108 | (
109 | in_expr,
110 | etuple(mt.observed, y_lv, etuple(update_name_suffix, Y_new_lv, Y_lv, "")),
111 | ),
112 | (beta_lv, beta_new_lv),
113 | ],
114 | ),
115 | )
116 | return res
117 |
--------------------------------------------------------------------------------
/docs/source/theano-radon-example.rst:
--------------------------------------------------------------------------------
1 | =====================================
2 | Automatic Re-centering and Re-scaling
3 | =====================================
4 |
5 | :Author: Brandon T. Willard
6 | :Date: 2019-11-24
7 |
8 | Using \ ``symbolic_pymc``\ we can automate the PyMC3 model
9 | transformation in `"Why hierarchical models are awesome, tricky, and Bayesian" `_
10 | and improve sample chain quality.
11 |
12 | .. code-block:: python
13 | :name: recenter-radon-model
14 |
15 | import numpy as np
16 | import pandas as pd
17 |
18 | import pymc3 as pm
19 |
20 | import theano
21 | import theano.tensor as tt
22 |
23 | from functools import partial
24 |
25 | from unification import var
26 |
27 | from kanren import run
28 | from kanren.graph import reduceo
29 |
30 | from symbolic_pymc.theano.meta import mt
31 | from symbolic_pymc.theano.pymc3 import model_graph, graph_model
32 | from symbolic_pymc.theano.utils import canonicalize
33 |
34 | from symbolic_pymc.relations.theano import non_obs_walko
35 | from symbolic_pymc.relations.theano.distributions import scale_loc_transform
36 |
37 |
38 | tt.config.compute_test_value = 'ignore'
39 |
40 | data = pd.read_csv('https://github.com/pymc-devs/pymc3/raw/master/pymc3/examples/data/radon.csv')
41 | data['log_radon'] = data['log_radon'].astype(theano.config.floatX)
42 | county_names = data.county.unique()
43 | county_idx = data.county_code.values
44 |
45 | n_counties = len(data.county.unique())
46 |
47 | with pm.Model() as model_centered:
48 | mu_a = pm.Normal('mu_a', mu=0., sd=100**2)
49 | sigma_a = pm.HalfCauchy('sigma_a', 5)
50 | mu_b = pm.Normal('mu_b', mu=0., sd=100**2)
51 | sigma_b = pm.HalfCauchy('sigma_b', 5)
52 | a = pm.Normal('a', mu=mu_a, sd=sigma_a, shape=n_counties)
53 | b = pm.Normal('b', mu=mu_b, sd=sigma_b, shape=n_counties)
54 | eps = pm.HalfCauchy('eps', 5)
55 | radon_est = a[county_idx] + b[county_idx] * data.floor.values
56 | radon_like = pm.Normal('radon_like', mu=radon_est, sd=eps,
57 | observed=data.log_radon)
58 |
59 | # Convert the PyMC3 graph into a symbolic-pymc graph
60 | fgraph = model_graph(model_centered)
61 | # Perform a set of standard algebraic simplifications
62 | fgraph = canonicalize(fgraph, in_place=False)
63 |
64 |
65 | def reparam_graph(graph):
66 | """Apply re-parameterization relations throughout a graph."""
67 |
68 | graph_mt = mt(graph)
69 |
70 | def scale_loc_fixedp_applyo(x, y):
71 | return reduceo(partial(non_obs_walko, scale_loc_transform), x, y)
72 |
73 | q = var()
74 | expr_graph = run(0, q,
75 | # Apply our transforms to unobserved RVs only
76 | scale_loc_fixedp_applyo(graph_mt, q))
77 |
78 | expr_graph = expr_graph[0]
79 | opt_graph_tt = expr_graph.reify()
80 |
81 | # PyMC3 needs names for each RV
82 | opt_graph_tt.owner.inputs[1].name = 'Y_new'
83 |
84 | return opt_graph_tt
85 |
86 |
87 | fgraph_reparam = reparam_graph(fgraph.outputs[0])
88 |
89 | # Convert the symbolic-pymc graph into a PyMC3 graph so that we can sample it
90 | model_recentered = graph_model(fgraph_reparam)
91 |
92 | np.random.seed(123)
93 |
94 | with model_centered:
95 | centered_trace = pm.sample(draws=5000, tune=1000, cores=4)[1000:]
96 |
97 | with model_recentered:
98 | recentered_trace = pm.sample(draws=5000, tune=1000, cores=4)[1000:]
99 |
100 | Before
101 | ------
102 |
103 | .. code-block:: python
104 | :name: before-recenter-plot
105 |
106 | >>> pm.traceplot(centered_trace, varnames=['sigma_b'])
107 |
108 | .. _fig:original_model_trace:
109 |
110 | .. figure:: _static/centered_trace.png
111 | :width: 800px
112 | :align: center
113 | :figclass: align-center
114 |
115 |
116 | Original model trace results.
117 |
118 | After
119 | -----
120 |
121 | .. code-block:: python
122 | :name: after-recenter-plot
123 |
124 | >>> pm.traceplot(recentered_trace, varnames=['sigma_b'])
125 |
126 | .. _fig:transformed_model_trace:
127 |
128 | .. figure:: _static/recentered_trace.png
129 | :width: 800px
130 | :align: center
131 | :figclass: align-center
132 |
133 |
134 | Transformed model trace results.
135 |
--------------------------------------------------------------------------------
/docs/source/theano-radon-example.org:
--------------------------------------------------------------------------------
1 | #+TITLE: Automatic Re-centering and Re-scaling
2 | #+AUTHOR: Brandon T. Willard
3 | #+DATE: 2019-11-24
4 | #+EMAIL: brandonwillard@gmail.com
5 |
6 | #+STARTUP: hideblocks indent hidestars
7 | #+OPTIONS: num:nil author:t date:t title:t toc:nil ^:nil d:(not "logbook" "todo" "notes") tex:t |:t broken-links:f
8 | #+SELECT_TAGS: export
9 | #+EXCLUDE_TAGS: noexport
10 |
11 | #+PROPERTY: header-args :session spymc-examples :exports both :eval never-export :results output drawer replace
12 | #+PROPERTY: header-args:text :eval never
13 |
14 | Using src_python[:eval never]{symbolic_pymc} we can automate the PyMC3 model
15 | transformation in [[https://twiecki.io/blog/2017/02/08/bayesian-hierchical-non-centered/]["Why hierarchical models are awesome, tricky, and Bayesian"]]
16 | and improve sample chain quality.
17 |
18 | #+NAME: recenter-radon-model
19 | #+BEGIN_SRC python :eval never
20 | import numpy as np
21 | import pandas as pd
22 |
23 | import pymc3 as pm
24 |
25 | import theano
26 | import theano.tensor as tt
27 |
28 | from functools import partial
29 |
30 | from unification import var
31 |
32 | from kanren import run
33 | from kanren.graph import reduceo
34 |
35 | from symbolic_pymc.theano.meta import mt
36 | from symbolic_pymc.theano.pymc3 import model_graph, graph_model
37 | from symbolic_pymc.theano.utils import canonicalize
38 |
39 | from symbolic_pymc.relations.theano import non_obs_walko
40 | from symbolic_pymc.relations.theano.distributions import scale_loc_transform
41 |
42 |
43 | tt.config.compute_test_value = 'ignore'
44 |
45 | data = pd.read_csv('https://github.com/pymc-devs/pymc3/raw/master/pymc3/examples/data/radon.csv')
46 | data['log_radon'] = data['log_radon'].astype(theano.config.floatX)
47 | county_names = data.county.unique()
48 | county_idx = data.county_code.values
49 |
50 | n_counties = len(data.county.unique())
51 |
52 | with pm.Model() as model_centered:
53 | mu_a = pm.Normal('mu_a', mu=0., sd=100**2)
54 | sigma_a = pm.HalfCauchy('sigma_a', 5)
55 | mu_b = pm.Normal('mu_b', mu=0., sd=100**2)
56 | sigma_b = pm.HalfCauchy('sigma_b', 5)
57 | a = pm.Normal('a', mu=mu_a, sd=sigma_a, shape=n_counties)
58 | b = pm.Normal('b', mu=mu_b, sd=sigma_b, shape=n_counties)
59 | eps = pm.HalfCauchy('eps', 5)
60 | radon_est = a[county_idx] + b[county_idx] * data.floor.values
61 | radon_like = pm.Normal('radon_like', mu=radon_est, sd=eps,
62 | observed=data.log_radon)
63 |
64 | # Convert the PyMC3 graph into a symbolic-pymc graph
65 | fgraph = model_graph(model_centered)
66 | # Perform a set of standard algebraic simplifications
67 | fgraph = canonicalize(fgraph, in_place=False)
68 |
69 |
70 | def reparam_graph(graph):
71 | """Apply re-parameterization relations throughout a graph."""
72 |
73 | graph_mt = mt(graph)
74 |
75 | def scale_loc_fixedp_applyo(x, y):
76 | return reduceo(partial(non_obs_walko, scale_loc_transform), x, y)
77 |
78 | q = var()
79 | expr_graph = run(0, q,
80 | # Apply our transforms to unobserved RVs only
81 | scale_loc_fixedp_applyo(graph_mt, q))
82 |
83 | expr_graph = expr_graph[0]
84 | opt_graph_tt = expr_graph.reify()
85 |
86 | # PyMC3 needs names for each RV
87 | opt_graph_tt.owner.inputs[1].name = 'Y_new'
88 |
89 | return opt_graph_tt
90 |
91 |
92 | fgraph_reparam = reparam_graph(fgraph.outputs[0])
93 |
94 | # Convert the symbolic-pymc graph into a PyMC3 graph so that we can sample it
95 | model_recentered = graph_model(fgraph_reparam)
96 |
97 | np.random.seed(123)
98 |
99 | with model_centered:
100 | centered_trace = pm.sample(draws=5000, tune=1000, cores=4)[1000:]
101 |
102 | with model_recentered:
103 | recentered_trace = pm.sample(draws=5000, tune=1000, cores=4)[1000:]
104 | #+END_SRC
105 |
106 | *** Before
107 | #+NAME: before-recenter-plot
108 | #+BEGIN_SRC python :eval never
109 | >>> pm.traceplot(centered_trace, varnames=['sigma_b'])
110 | #+END_SRC
111 |
112 | #+ATTR_ORG: :width 600
113 | #+ATTR_RST: :width 800px :align center :figclass align-center
114 | #+CAPTION: Original model trace results.
115 | #+NAME: fig:original_model_trace
116 | #+RESULTS: before-recenter-plot
117 | [[file:_static/centered_trace.png]]
118 |
119 | *** After
120 | #+NAME: after-recenter-plot
121 | #+BEGIN_SRC python :eval never
122 | >>> pm.traceplot(recentered_trace, varnames=['sigma_b'])
123 | #+END_SRC
124 |
125 | #+ATTR_ORG: :width 600
126 | #+ATTR_RST: :width 800px :align center :figclass align-center
127 | #+CAPTION: Transformed model trace results.
128 | #+NAME: fig:transformed_model_trace
129 | #+RESULTS: after-recenter-plot
130 | [[file:_static/recentered_trace.png]]
131 |
--------------------------------------------------------------------------------
/docs/logos/svg/PyMC3_square.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
29 |
30 |
31 |
34 |
37 |
41 |
46 |
47 |
48 |
52 |
55 |
58 |
59 |
63 |
64 |
65 |
66 |
--------------------------------------------------------------------------------
/docs/logos/svg/PyMC3_circle.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
31 |
32 |
33 |
36 |
39 |
43 |
48 |
49 |
50 |
54 |
57 |
60 |
61 |
65 |
66 |
67 |
68 |
--------------------------------------------------------------------------------
/symbolic_pymc/relations/theano/distributions.py:
--------------------------------------------------------------------------------
1 | """Relations pertaining to probability distributions."""
2 | from unification import var
3 |
4 | from etuples import etuple
5 |
6 | from kanren import conde, eq
7 | from kanren.facts import fact, Relation
8 |
9 | from . import constant_neq
10 | from .. import concat
11 | from ...theano.meta import mt
12 |
13 |
14 | derived_dist = Relation("derived_dist")
15 | stable_dist = Relation("stable_dist")
16 | generalized_gamma_dist = Relation("generalized_gamma_dist")
17 |
18 | uniform_mt = mt.UniformRV(var(), var(), size=var(), rng=var(), name=var())
19 | normal_mt = mt.NormalRV(var(), var(), size=var(), rng=var(), name=var())
20 | cauchy_mt = mt.CauchyRV(var(), var(), size=var(), rng=var(), name=var())
21 | halfcauchy_mt = mt.HalfCauchyRV(var(), var(), size=var(), rng=var(), name=var())
22 | gamma_mt = mt.GammaRV(var(), var(), size=var(), rng=var(), name=var())
23 | exponential_mt = mt.ExponentialRV(var(), size=var(), rng=var(), name=var())
24 |
25 | # TODO: Add constraints for different variations of this. Also, consider a
26 | # check for exact equality of the two dists, or simply normalize/canonicalize
27 | # the graph first.
28 | fact(
29 | derived_dist,
30 | mt.true_div(
31 | mt.NormalRV(0.0, 1.0, size=var("_ratio_norm_size"), rng=var("_ratio_norm_rng"), name=var()),
32 | mt.NormalRV(0.0, 1.0, size=var("_ratio_norm_size"), rng=var(), name=var()),
33 | ),
34 | mt.CauchyRV(0.0, 1.0, size=var("_ratio_norm_size"), rng=var("_ratio_norm_rng")),
35 | )
36 |
37 | # TODO:
38 | # fact(stable_dist,
39 | # normal_mt, ('StableRV',
40 | # 2., 0.,
41 | # normal_mt.owner.inputs[1],
42 | # normal_mt.owner.inputs[1]))
43 | # fact(stable_dist,
44 | # cauchy_mt, ('StableRV',
45 | # 1., 0.,
46 | # cauchy_mt.owner.inputs[1],
47 | # cauchy_mt.owner.inputs[1]))
48 |
49 | # TODO: Weibull, Gamma, Exponential, Half-normal
50 | # fact(generalized_gamma_dist,
51 | # None,
52 | # None)
53 |
54 |
55 | def scale_loc_transform(in_expr, out_expr):
56 | """Create relations for lifting and sinking scale and location parameters of distributions.
57 |
58 | I.e. f(a + b*x) -> a + b * f(x)
59 |
60 | For example, `in_expr`: f(a + b*x) == `out_expr`: a + b * f(x).
61 |
62 | TODO: Match larger distribution families and perform transforms from there.
63 |
64 | XXX: PyMC3 rescaling issue (?) doesn't allow us to take the more general
65 | approach, which involves separate scale and location rewrites.
66 |
67 | """
68 | # Scale and location transform expression "pattern" for a Normal term.
69 | normal_mt = mt.NormalRV(var(), var(), size=var(), rng=var(), name=var())
70 | n_name_lv = normal_mt.name
71 | n_mean_lv, n_sd_lv, n_size_lv, n_rng_lv = normal_mt.owner.inputs
72 | offset_name_mt = var()
73 | rct_norm_offset_mt = etuple(
74 | mt.add,
75 | n_mean_lv,
76 | etuple(
77 | mt.mul,
78 | n_sd_lv,
79 | mt.NormalRV(0.0, 1.0, size=n_size_lv, rng=n_rng_lv, name=offset_name_mt),
80 | ),
81 | )
82 |
83 | # Scale and location transform expression "pattern" for a Cauchy term.
84 | cauchy_mt = mt.CauchyRV(var(), var(), size=var(), rng=var(), name=var())
85 | c_name_lv = cauchy_mt.name
86 | c_mean_lv, c_beta_lv, c_size_lv, c_rng_lv = cauchy_mt.owner.inputs
87 | rct_cauchy_offset_mt = etuple(
88 | mt.add,
89 | c_mean_lv,
90 | etuple(
91 | mt.mul,
92 | c_beta_lv,
93 | mt.CauchyRV(0.0, 1.0, size=c_size_lv, rng=c_rng_lv, name=offset_name_mt),
94 | ),
95 | )
96 |
97 | # TODO:
98 | # uniform_mt = mt.UniformRV(var(), var(), size=var(), rng=var(), name=var())
99 | # u_name_lv = uniform_mt.name
100 | # u_a_lv, u_b_lv, u_size_lv, u_rng_lv = uniform_mt.owner.inputs
101 | # rct_uniform_scale_mt = etuple(
102 | # mt.mul,
103 | # u_b_lv,
104 | # mt.UniformRV(0.0, 1.0, size=u_size_lv, rng=u_rng_lv, name=offset_name_mt),
105 | # )
106 | # rct_uniform_loc_mt = etuple(mt.add, u_c_lv,
107 | # mt.UniformRV(u_a_lv, u_b_lv,
108 | # size=u_size_lv,
109 | # rng=u_rng_lv,
110 | # name=offset_name_mt))
111 |
112 | rels = conde(
113 | [
114 | eq(in_expr, normal_mt),
115 | constant_neq(n_sd_lv, 1),
116 | constant_neq(n_mean_lv, 0),
117 | eq(out_expr, rct_norm_offset_mt),
118 | concat(n_name_lv, "_offset", offset_name_mt),
119 | ],
120 | [
121 | eq(in_expr, cauchy_mt),
122 | constant_neq(c_beta_lv, 1),
123 | # TODO: Add a positivity constraint for the scale.
124 | constant_neq(c_mean_lv, 0),
125 | eq(out_expr, rct_cauchy_offset_mt),
126 | concat(c_name_lv, "_offset", offset_name_mt),
127 | ],
128 | # TODO:
129 | # [eq(in_expr, uniform_mt),
130 | # lall(
131 | # constant_eq(u_a_lv, 0),
132 | # eq(out_expr, rct_uniform_scale_mt),
133 | # concat(u_name_lv, "_scale", offset_name_mt),
134 | # )],
135 | )
136 |
137 | return rels
138 |
--------------------------------------------------------------------------------
/docs/source/org-babel-extensions.org:
--------------------------------------------------------------------------------
1 | #+TITLE: Custom =org-babel= Extensions
2 | #+AUTHOR: Brandon T. Willard
3 | #+DATE: 2018-07-29
4 | #+EMAIL: brandonwillard@gmail.com
5 | #+STARTUP: hideblocks
6 |
7 | * Introduction
8 |
9 | To get started, add a block like the following to an Org file that
10 | needs to reference the functions herein:
11 | #+BEGIN_SRC elisp :eval t :exports none :results none
12 | (org-babel-lob-ingest "org-babel-extensions.org")
13 | #+END_SRC
14 |
15 | Alternatively, run the same code within Emacs; both should add the named
16 | blocks below to your LOB (library-of-babel, i.e. =org-babel-library-of-babel=).
17 |
18 | * General Babel Functions
19 |
20 | #+NAME: babel_helper_functions
21 | #+BEGIN_SRC elisp :eval t :exports none :results none
22 | (defun org-babel-get-call-var-value (var-name)
23 | "Extract the value of a named variable from a CALL statement."
24 | ;; What about `org-element-context' and `org-babel-parse-header-arguments'?
25 | (when-let ((el-info (org-babel-lob-get-info)))
26 | (car-safe
27 | (seq-filter #'identity
28 | (map-values-apply
29 | (lambda (x) (if (string-match (format "^%s=\"\\(.*\\)\"$" var-name) x)
30 | (match-string 1 x)))
31 | (seq-filter (lambda (x) (eq (car x) :var))
32 | (nth 2 el-info)))))))
33 |
34 | (defmacro org-babel-get-caller-var-value (var)
35 | `(or (org-with-point-at org-babel-current-src-block-location
36 | (org-babel-get-call-var-value ,(symbol-name var)))
37 | ,var))
38 | #+END_SRC
39 |
40 | * Figure Generation
41 |
42 | Below, we create a babel function that nicely wraps the output of a filename
43 | to be displayed as a figure in org-mode and LaTeX.
44 |
45 | The code takes extra effort to extract variable information from the calling block.
46 | This is especially useful when blocks are called indirectly (e.g. from =:post=) and
47 | variables (i.e. =:var= assignments) for the callee need to be set.
48 |
49 | #+NAME: org_fig_wrap
50 | #+HEADER: :var org_attrs=":width 400"
51 | #+HEADER: :var latex_attrs=":width 1.0\\textwidth :height 1.0\\textwidth :float t :options [keepaspectratio] :placement [p!]"
52 | #+HEADER: :var data="" :var label="" :var caption="" :var label_var=""
53 | #+BEGIN_SRC elisp :exports none :results raw value :noweb yes
54 |
55 | <>
56 |
57 | (let* ((label (if (string-blank-p label)
58 | ;; There's no specified label.
59 | (org-with-point-at org-babel-current-src-block-location
60 | (let ((src-block-info (org-babel-get-src-block-info)))
61 | ;; First, use the calling block's name as the label.
62 | (if src-block-info
63 | (nth 4 (org-babel-get-src-block-info))
64 | ;; The caller is not a SRC block; let's assume it's a
65 | ;; CALL.
66 | (or (org-babel-get-call-var-value "label")
67 | ;; If the CALL specifies no label value, try the
68 | ;; value assigned to the variable given by
69 | ;; label_var.
70 | (org-babel-get-call-var-value label_var))
71 | ;; (error "No figure name!")
72 | )))
73 | label))
74 | (latex_attrs (org-babel-get-caller-var-value latex_attrs))
75 | (org_attrs (org-babel-get-caller-var-value org_attrs))
76 | (caption (org-babel-get-caller-var-value caption))
77 | ;; TODO: Further customize filename output?
78 | ;; (filename (if (org-export-derived-backend-p org-export-current-backend 'latex)
79 | ;; (let ((pdf-name (concat (file-name-sans-extension data) ".pdf")))
80 | ;; (or (and (file-exists-p pdf-name) pdf-name)
81 | ;; data))
82 | ;; data))
83 | ;; TODO: Could number figures using `org-export-get-ordinal'.
84 | ;; See https://github.com/kawabata/ox-pandoc/blob/master/ox-pandoc.el
85 | )
86 | (mapconcat 'identity
87 | `(,(format "#+ATTR_ORG: %s" org_attrs)
88 | ,(format "#+ATTR_LATEX: %s" latex_attrs)
89 | ,(format "#+CAPTION: %s" caption)
90 | ,(format "#+NAME: fig:%s" label)
91 | ,(format "[[file:%s]]" data))
92 | "\n"))
93 | #+END_SRC
94 |
95 | ** Example Usage
96 |
97 | In this instance, we call =org_fig_wrap= as a block =:post= processing function.
98 | This is where the indirect variable gathering functionality is useful, since,
99 | without it, we would not be able to set =label= or =caption= for
100 | =org_fig_wrap= in the originating =CALL=.
101 |
102 | #+BEGIN_SRC org :eval never :exports code
103 | ,#+NAME: insert_pydot_figure
104 | ,#+HEADER: :var graph_obj_name=""
105 | ,#+HEADER: :post org_fig_wrap(data=*this*, label_var="graph_obj_name")
106 | ,#+BEGIN_SRC python :results raw value
107 | ...
108 | ,#+END_SRC
109 | #+END_SRC
110 |
111 |
112 | Then emit the results in a =CALL= statement.
113 | #+BEGIN_SRC org :eval never :exports code
114 | ,#+CALL: insert_pydot_figure[:results value](graph_obj_name="blah", label="a-label", caption="A caption")
115 | #+END_SRC
116 |
--------------------------------------------------------------------------------
/tests/test_meta.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | import numpy as np
4 |
5 | from symbolic_pymc.utils import HashableNDArray
6 | from symbolic_pymc.meta import MetaSymbol, MetaOp, metatize
7 |
8 |
9 | class SomeOp(object):
10 | def __repr__(self):
11 | return ""
12 |
13 |
14 | class SomeType(object):
15 | def __init__(self, field1, field2):
16 | self.field1 = field1
17 | self.field2 = field2
18 |
19 | def __repr__(self):
20 | return f"SomeType({self.field1}, {self.field2})"
21 |
22 | def __str__(self):
23 | return f"SomeType<{self.field1}, {self.field2}>"
24 |
25 |
26 | class SomeMetaSymbol(MetaSymbol):
27 | __slots__ = ("field1", "field2", "_blah")
28 | base = SomeType
29 |
30 | def __init__(self, obj=None):
31 | super().__init__(obj)
32 | self.field1 = 1
33 | self.field2 = 2
34 | self._blah = "a"
35 |
36 |
37 | class SomeMetaOp(MetaOp):
38 | __slots__ = ()
39 | base = SomeOp
40 |
41 | def output_meta_types(self):
42 | return [SomeMetaSymbol]
43 |
44 | def __call__(self, *args, **kwargs):
45 | return SomeMetaSymbol(*args, **kwargs)
46 |
47 |
48 | def test_meta():
49 | """Make sure hash caching and slot manipulation works."""
50 |
51 | some_mt = SomeMetaSymbol()
52 |
53 | assert some_mt.__all_slots__ == ("_obj", "_hash", "_rands", "field1", "field2", "_blah")
54 | assert some_mt.__all_props__ == ("field1", "field2")
55 | assert some_mt.__props__ == ("field1", "field2")
56 | assert some_mt.__volatile_slots__ == ("_obj", "_hash", "_rands", "_blah")
57 |
58 | assert some_mt.obj is None
59 | assert not hasattr(some_mt, "_hash")
60 |
61 | some_hash = hash(some_mt)
62 |
63 | assert some_mt._hash == some_hash
64 |
65 | assert some_mt.field1 == 1
66 | assert some_mt.field2 == 2
67 |
68 | # This assignment shouldn't change the cached values
69 | some_mt._blah = "b"
70 |
71 | assert some_mt._hash == some_hash
72 |
73 | # This should
74 | some_mt.field1 = 10
75 |
76 | assert some_mt._hash is None
77 | assert some_mt._blah is None
78 |
79 | some_new_hash = hash(some_mt)
80 |
81 | assert some_mt._hash == some_new_hash
82 | assert some_new_hash != some_hash
83 |
84 | some_op_mt = SomeMetaOp(SomeOp())
85 |
86 | with pytest.raises(AttributeError):
87 | some_op_mt.obj = SomeOp()
88 |
89 |
90 | def test_meta_inheritance():
91 | class SomeOtherType(SomeType):
92 | def __init__(self, field1, field2, field3):
93 | super().__init__(field1, field2)
94 | self.field3 = field3
95 |
96 | class SomeOtherMetaSymbol(SomeMetaSymbol):
97 | __slots__ = ("field3", "_bloh")
98 | base = SomeOtherType
99 |
100 | def __init__(self, obj=None):
101 | super().__init__(obj)
102 | self.field3 = 3
103 |
104 | def __hash__(self):
105 | return hash((super().__hash__(), self.field3))
106 |
107 | some_mt = SomeMetaSymbol()
108 | other_mt = SomeOtherMetaSymbol()
109 |
110 | assert some_mt != other_mt
111 |
112 | assert other_mt.__all_slots__ == (
113 | "_obj",
114 | "_hash",
115 | "_rands",
116 | "field1",
117 | "field2",
118 | "_blah",
119 | "field3",
120 | "_bloh",
121 | )
122 | assert other_mt.__all_props__ == ("field1", "field2", "field3")
123 | assert other_mt.__props__ == ("field3",)
124 | assert other_mt.__volatile_slots__ == ("_obj", "_hash", "_rands", "_blah", "_bloh")
125 |
126 |
127 | def test_meta_str():
128 |
129 | some_mt = SomeMetaSymbol()
130 |
131 | assert repr(some_mt) == "SomeMetaSymbol(1, 2)"
132 | assert str(some_mt) == repr(some_mt)
133 |
134 | some_mt = SomeMetaSymbol(SomeType(1, 2))
135 |
136 | assert repr(some_mt) == "SomeMetaSymbol(1, 2, obj=SomeType(1, 2))"
137 | assert str(some_mt) == "SomeMetaSymbol(1, 2)"
138 |
139 | some_op_mt = SomeMetaOp()
140 | assert repr(some_op_mt) == "SomeMetaOp(obj=None)"
141 |
142 | some_op_mt = SomeMetaOp(SomeOp())
143 | assert repr(some_op_mt) == "SomeMetaOp(obj=)"
144 |
145 |
146 | def test_meta_pretty():
147 | pretty_mod = pytest.importorskip("IPython.lib.pretty")
148 | from symbolic_pymc.meta import meta_repr
149 |
150 | some_mt = SomeMetaSymbol()
151 | assert pretty_mod.pretty(some_mt) == "SomeMetaSymbol(field1=1, field2=2)"
152 |
153 | meta_repr.print_obj = True
154 |
155 | assert pretty_mod.pretty(some_mt) == "SomeMetaSymbol(field1=1, field2=2)"
156 |
157 | some_mt = SomeMetaSymbol(SomeType(1, 2))
158 |
159 | assert pretty_mod.pretty(some_mt) == "SomeMetaSymbol(field1=1, field2=2, obj=SomeType(1, 2))"
160 |
161 | meta_repr.print_obj = False
162 |
163 | some_mt = SomeMetaSymbol(SomeType(1, 2))
164 | some_mt.field1 = SomeMetaSymbol(SomeType(3, 4))
165 | some_mt.field1.field2 = SomeMetaSymbol(SomeType(5, 6))
166 |
167 | assert (
168 | pretty_mod.pretty(some_mt)
169 | == "SomeMetaSymbol(\n field1=SomeMetaSymbol(field1=1, field2=SomeMetaSymbol(field1=1, field2=2)),\n field2=2)"
170 | )
171 |
172 | some_op_mt = SomeMetaOp()
173 | assert pretty_mod.pretty(some_op_mt) == "SomeMetaOp()"
174 |
175 |
176 | def test_metatize():
177 | x_mt = metatize(np.r_[1, 2, 3])
178 | assert isinstance(x_mt, HashableNDArray)
179 |
180 | y_mt = metatize(np.r_[1, 2, 3, 4])
181 | assert isinstance(y_mt, HashableNDArray)
182 |
183 | assert x_mt != y_mt
184 |
185 | assert x_mt != 1
186 |
--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from unification import var
4 |
5 | from symbolic_pymc.meta import MetaSymbol, MetaOp
6 | from symbolic_pymc.utils import meta_diff, eq_lvar, HashableNDArray
7 |
8 |
9 | class SomeOp(object):
10 | def __repr__(self):
11 | return ""
12 |
13 |
14 | class SomeType(object):
15 | def __init__(self, field1, field2):
16 | self.field1 = field1
17 | self.field2 = field2
18 |
19 | def __repr__(self):
20 | return f"SomeType({self.field1}, {self.field2})"
21 |
22 | def __str__(self):
23 | return f"SomeType<{self.field1}, {self.field2}>"
24 |
25 |
26 | class SomeMetaSymbol(MetaSymbol):
27 | __slots__ = ("field1", "field2", "_blah")
28 | base = SomeType
29 |
30 | def __init__(self, obj=None):
31 | super().__init__(obj)
32 | self.field1 = 1
33 | self.field2 = 2
34 | self._blah = "a"
35 |
36 |
37 | class SomeMetaOp(MetaOp):
38 | __slots__ = ()
39 | base = SomeOp
40 |
41 | def output_meta_types(self):
42 | return [SomeMetaSymbol]
43 |
44 | def __call__(self, *args, **kwargs):
45 | return SomeMetaSymbol(*args, **kwargs)
46 |
47 |
48 | class SomeOtherMetaSymbol(MetaSymbol):
49 | __slots__ = ("field1", "field2")
50 | base = SomeType
51 |
52 | def __init__(self, field1, field2, obj=None):
53 | super().__init__(obj)
54 | self.field1 = field1
55 | self.field2 = field2
56 |
57 |
58 | class SomeOtherOp(object):
59 | def __repr__(self):
60 | return ""
61 |
62 |
63 | class SomeOtherMetaOp(SomeMetaOp):
64 | base = SomeOtherOp
65 |
66 |
67 | def test_parts_unequal():
68 | s0 = SomeMetaSymbol()
69 | s1 = SomeOtherMetaSymbol(1, 2)
70 |
71 | res = meta_diff(s0, s1)
72 | assert res.reason == "types"
73 | assert res.path(s0) is s0
74 | assert res.objects == (s0, s1)
75 |
76 | res = meta_diff(s0, s1, cmp_types=False)
77 | assert res is None
78 |
79 | s2 = SomeOtherMetaSymbol(1, 3)
80 | res = meta_diff(s0, s2, cmp_types=False)
81 | assert res.path(s2) == 3
82 | assert res.path(s1) == 2
83 | assert res.reason == "ne_fn"
84 | assert res.objects == (2, 3)
85 |
86 | res = meta_diff(SomeMetaOp(), SomeMetaOp())
87 | assert res is None
88 |
89 | op1 = SomeMetaOp()
90 | op2 = SomeOtherMetaOp()
91 | res = meta_diff(op1, op2, cmp_types=False)
92 | assert res.path(op1) is op1
93 | assert res.reason == "bases"
94 | assert res.objects == (op1.base, op2.base)
95 |
96 | a = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4)])
97 | b = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 5)])
98 | res = meta_diff(a, b)
99 |
100 | assert res.path(a) == 4
101 | assert res.path(b) == 5
102 | assert res.reason == "ne_fn"
103 | assert res.objects == (4, 5)
104 |
105 | a = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4)])
106 | b = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4)])
107 | res = meta_diff(a, b)
108 | assert res is None
109 |
110 | a = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4), 5])
111 | b = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4)])
112 | res = meta_diff(a, b)
113 | assert res is not None
114 | assert res.reason == "seq len"
115 |
116 | a = SomeOtherMetaSymbol(1, ["a", "b"])
117 | b = SomeOtherMetaSymbol(1, 2)
118 | res = meta_diff(a, b, cmp_types=False)
119 | assert res is not None
120 | assert res.reason == "ne_fn"
121 |
122 | a = SomeOtherMetaSymbol(1, ["a", "b"])
123 | b = SomeOtherMetaSymbol(1, "ab")
124 | res = meta_diff(a, b, cmp_types=False)
125 | assert res is not None
126 |
127 | a = SomeOtherMetaSymbol(1, {"a": 1, "b": 2})
128 | b = SomeOtherMetaSymbol(1, {"b": 2, "a": 1})
129 | res = meta_diff(a, b)
130 | assert res is None
131 |
132 | a = SomeOtherMetaSymbol(1, {"a": 1, "b": 2})
133 | b = SomeOtherMetaSymbol(1, {"b": 3, "a": 1})
134 | res = meta_diff(a, b)
135 | assert res.reason == "ne_fn"
136 | assert res.objects == (2, 3)
137 | assert res.path(a) == 2
138 | assert res.path(b) == 3
139 |
140 | a = SomeOtherMetaSymbol(1, {"a": 1, "b": 2})
141 | b = SomeOtherMetaSymbol(1, {"a": 1, "c": 2})
142 | res = meta_diff(a, b)
143 | assert res.reason == "map keys"
144 | assert res.path(a) == {"a": 1, "b": 2}
145 | assert res.objects == ([("a", 1), ("b", 2)], [("a", 1), ("c", 2)])
146 |
147 |
148 | def test_eq_lvar():
149 | a = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4)])
150 | b = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4)])
151 | assert eq_lvar(a, b) is True
152 |
153 | a = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4)])
154 | b = SomeOtherMetaSymbol(1, [2, var()])
155 | assert eq_lvar(a, b) is False
156 |
157 | a = SomeOtherMetaSymbol(1, [2, var()])
158 | b = SomeOtherMetaSymbol(1, [2, var()])
159 | assert eq_lvar(a, b) is True
160 |
161 | a = SomeOtherMetaSymbol(1, [2, {"a": var()}])
162 | b = SomeOtherMetaSymbol(1, [2, {"a": var()}])
163 | assert eq_lvar(a, b) is True
164 |
165 | a = SomeOtherMetaSymbol(1, [3, var()])
166 | b = SomeOtherMetaSymbol(1, [2, var()])
167 | assert eq_lvar(a, b) is False
168 |
169 |
170 | def test_HashableNDArray():
171 | a = np.r_[[1, 2], 3]
172 | a_h = a.view(HashableNDArray)
173 | b = np.r_[[1, 2], 3]
174 | b_h = b.view(HashableNDArray)
175 |
176 | assert hash(a_h) == hash(b_h)
177 | assert a_h == b_h
178 | assert not a_h != b_h
179 |
180 | c = np.r_[[1, 2], 4]
181 | c_h = c.view(HashableNDArray)
182 | assert hash(a_h) != hash(c_h)
183 | assert a_h != c_h
184 |
--------------------------------------------------------------------------------
/docs/source/semantic_sphinx/layout.html:
--------------------------------------------------------------------------------
1 | {% extends "basic/layout.html" %}
2 |
3 | {% set script_files = script_files + [
4 | '_static/highlight.min.js',
5 | '_static/semantic.min.js',
6 | ]
7 | %}
8 | {% if pagename == 'nb_examples/index' or pagename == 'nb_tutorials/index' %}
9 | {% set script_files = script_files + [
10 | '_static/gallery.js',
11 | ]
12 | %}
13 | {% endif %}
14 |
15 | {%- block extrahead %}
16 |
17 |
18 |
19 |
20 |
21 | {% if pagename == 'nb_tutorials/index' %}
22 |
25 | {# this is used when loading the gallery index using $.ajax fails,
26 | such as on Chrome for documents on localhost #}
27 |
28 | {% endif %}
29 | {% if pagename == 'nb_examples/index' %}
30 |
33 | {# this is used when loading the gallery index using $.ajax fails,
34 | such as on Chrome for documents on localhost #}
35 |
36 | {% endif %}
37 | {% endblock %}
38 |
39 | {# Silence the sidebar's, relbar's #}
40 | {% block header %}{% endblock %}
41 | {% block relbar1 %}{% endblock %}
42 | {% block relbar2 %}{% endblock %}
43 | {% block sidebarsourcelink %}{% endblock %}
44 |
45 | {%- block content %}
46 |
47 |
48 |
49 |
69 |
70 | {% if pagename == 'index' %}
71 |
72 |
Symbolic PyMC
73 |
74 |
75 |
76 |
77 |
78 | {% endif %}
79 |
80 |
81 |
82 | {% if pagename == 'index' %}
83 |
97 | {% endif %}
98 |
99 |
100 | {% block body %}{% endblock %}
101 |
102 |
103 | {%- endblock %}
104 |
105 | {%- block footer %}
106 |
137 | {%- endblock %}
138 |
--------------------------------------------------------------------------------
/symbolic_pymc/tensorflow/printing.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 | from functools import singledispatch
7 | from contextlib import contextmanager
8 |
9 | from unification import isvar
10 |
11 | # from tensorflow.python.framework import tensor_util
12 |
13 | from symbolic_pymc.tensorflow.meta import TFlowMetaTensor
14 | from symbolic_pymc.tensorflow.meta import TFlowMetaOp
15 |
16 |
17 | class DepthExceededException(Exception):
18 | pass
19 |
20 |
21 | class TFlowPrinter(object):
22 | """A printer that indents and keeps track of already printed subgraphs."""
23 |
24 | def __init__(self, formatter, buffer, depth_lower_idx=0, depth_upper_idx=sys.maxsize):
25 | # The buffer to which results are printed
26 | self.buffer = buffer
27 | # A function used to pre-process printed results
28 | self.formatter = formatter
29 |
30 | self.depth_count = 0
31 | self.depth_lower_idx, self.depth_upper_idx = depth_lower_idx, depth_upper_idx
32 |
33 | # This is the current indentation string
34 | if self.depth_lower_idx > 0:
35 | self.indentation = "... "
36 | else:
37 | self.indentation = ""
38 |
39 | # The set of graphs that have already been printed
40 | self.printed_subgraphs = set()
41 |
42 | @contextmanager
43 | def indented(self, indent):
44 | pre_indentation = self.indentation
45 |
46 | self.depth_count += 1
47 |
48 | if self.depth_lower_idx < self.depth_count <= self.depth_upper_idx:
49 | self.indentation += indent
50 |
51 | try:
52 | yield
53 | except DepthExceededException:
54 | pass
55 | finally:
56 | self.indentation = pre_indentation
57 | self.depth_count -= 1
58 |
59 | def format(self, obj):
60 | return self.indentation + self.formatter(obj)
61 |
62 | def print(self, obj, suffix=""):
63 | if self.depth_lower_idx <= self.depth_count < self.depth_upper_idx:
64 | self.buffer.write(self.format(obj) + suffix)
65 | self.buffer.flush()
66 | elif self.depth_count == self.depth_upper_idx:
67 | # Only print the cut-off indicator at the first occurrence
68 | self.buffer.write(self.format(f"...{suffix}"))
69 | self.buffer.flush()
70 |
71 | # Prevent the caller from traversing at this level or higher
72 | raise DepthExceededException()
73 |
74 | def println(self, obj):
75 | self.print(obj, suffix="\n")
76 |
77 | def subgraph_add(self, obj):
78 | if self.depth_lower_idx <= self.depth_count < self.depth_upper_idx:
79 | # Only track printed subgraphs when they're actually printed
80 | self.printed_subgraphs.add(obj)
81 |
82 | def __repr__(self): # pragma: no cover
83 | return (
84 | "TFlowPrinter\n"
85 | f"\tdepth_lower_idx={self.depth_lower_idx},\tdepth_upper_idx={self.depth_upper_idx}\n"
86 | f"\tindentation='{self.indentation}',\tdepth_count={self.depth_count}"
87 | )
88 |
89 |
90 | def tf_dprint(obj, depth_lower=0, depth_upper=10, printer=None):
91 | """Print a textual representation of a TF graph. The output roughly follows the format of `theano.printing.debugprint`.
92 |
93 | Parameters
94 | ----------
95 | obj : Tensorflow object
96 | Tensorflow graph object to be represented.
97 | depth_lower : int
98 | Used to index specific portions of the graph.
99 | depth_upper : int
100 | Used to index specific portions of the graph.
101 | printer : optional
102 | Backend used to display the output.
103 |
104 | """
105 |
106 | if isinstance(obj, tf.Tensor):
107 | try:
108 | obj.op
109 | except AttributeError:
110 | raise ValueError(
111 | f"TensorFlow Operation not available; "
112 | "try recreating the object with eager-mode disabled"
113 | " (e.g. within `tensorflow.python.eager.context.graph_mode`)"
114 | )
115 |
116 | if printer is None:
117 | printer = TFlowPrinter(str, sys.stdout, depth_lower, depth_upper)
118 |
119 | _tf_dprint(obj, printer)
120 |
121 |
122 | @singledispatch
123 | def _tf_dprint(obj, printer):
124 | printer.println(obj)
125 |
126 |
127 | @_tf_dprint.register(tf.Tensor)
128 | @_tf_dprint.register(TFlowMetaTensor)
129 | def _tf_dprint_TFlowMetaTensor(obj, printer):
130 |
131 | try:
132 | shape_str = str(obj.shape.as_list())
133 | except (ValueError, AttributeError):
134 | shape_str = "Unknown"
135 |
136 | prefix = f'Tensor({getattr(obj.op, "type", obj.op)}):{obj.value_index},\tdtype={getattr(obj.dtype, "name", obj.dtype)},\tshape={shape_str},\t"{obj.name}"'
137 |
138 | _tf_dprint(prefix, printer)
139 |
140 | if isvar(obj.op):
141 | return
142 | elif isvar(obj.op.inputs):
143 | with printer.indented("| "):
144 | _tf_dprint(f"{obj.op.inputs}", printer)
145 | elif obj.op.type == "Const":
146 | with printer.indented("| "):
147 | if isinstance(obj, tf.Tensor):
148 | numpy_val = obj.eval(session=tf.compat.v1.Session(graph=obj.graph))
149 | elif isvar(obj.op.node_def):
150 | _tf_dprint(f"{obj.op.node_def}", printer)
151 | return
152 | else:
153 | numpy_val = obj.op.node_def.attr["value"]
154 |
155 | _tf_dprint(
156 | np.array2string(numpy_val, threshold=20, prefix=printer.indentation), printer
157 | )
158 | elif len(obj.op.inputs) > 0:
159 | with printer.indented("| "):
160 | if obj in printer.printed_subgraphs:
161 | _tf_dprint("...", printer)
162 | else:
163 | printer.subgraph_add(obj)
164 | _tf_dprint(obj.op, printer)
165 |
166 |
167 | @_tf_dprint.register(tf.Operation)
168 | @_tf_dprint.register(TFlowMetaOp)
169 | def _tf_dprint_TFlowMetaOp(obj, printer):
170 | for op_input in obj.inputs:
171 | _tf_dprint(op_input, printer)
172 |
--------------------------------------------------------------------------------
/docs/logos/svg/PyMC3_banner.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
11 |
12 |
13 |
16 |
18 |
21 |
25 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
53 |
56 |
60 |
64 |
65 |
66 |
70 |
73 |
76 |
77 |
81 |
82 |
83 |
84 |
--------------------------------------------------------------------------------
/tests/theano/test_dispatch.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | import numpy as np
4 |
5 | import theano
6 | import theano.tensor as tt
7 |
8 | from unification import reify, unify, var
9 |
10 | from cons.core import ConsError
11 |
12 | from etuples import etuple, etuplize, rator, rands
13 | from etuples.core import ExpressionTuple
14 |
15 | from symbolic_pymc.theano.meta import mt
16 | from symbolic_pymc.theano.utils import graph_equal
17 | from symbolic_pymc.theano.random_variables import MvNormalRV
18 |
19 |
20 | def test_unification():
21 | x, y, a, b = tt.dvectors("xyab")
22 | x_s = tt.scalar("x_s")
23 | y_s = tt.scalar("y_s")
24 | c_tt = tt.constant(1, "c")
25 | d_tt = tt.constant(2, "d")
26 |
27 | x_l = var("x_l")
28 | y_l = var("y_l")
29 |
30 | assert a == reify(x_l, {x_l: a}).reify()
31 | test_expr = mt.add(1, mt.mul(2, x_l))
32 | test_reify_res = reify(test_expr, {x_l: a})
33 | assert graph_equal(test_reify_res.reify(), 1 + 2 * a)
34 |
35 | z = tt.add(b, a)
36 | assert {x_l: z} == unify(x_l, z)
37 | assert b == unify(mt.add(x_l, a), mt.add(b, a))[x_l].reify()
38 |
39 | res = unify(mt.inv(mt.add(x_l, a)), mt.inv(mt.add(b, y_l)))
40 | assert res[x_l].reify() == b
41 | assert res[y_l].reify() == a
42 |
43 | mt_expr_add = mt.add(x_l, y_l)
44 |
45 | # The parameters are vectors
46 | tt_expr_add_1 = tt.add(x, y)
47 | assert graph_equal(tt_expr_add_1, reify(mt_expr_add, unify(mt_expr_add, tt_expr_add_1)).reify())
48 |
49 | # The parameters are scalars
50 | tt_expr_add_2 = tt.add(x_s, y_s)
51 | assert graph_equal(tt_expr_add_2, reify(mt_expr_add, unify(mt_expr_add, tt_expr_add_2)).reify())
52 |
53 | # The parameters are constants
54 | tt_expr_add_3 = tt.add(c_tt, d_tt)
55 | assert graph_equal(tt_expr_add_3, reify(mt_expr_add, unify(mt_expr_add, tt_expr_add_3)).reify())
56 |
57 |
58 | def test_etuple_term():
59 | """Test `etuplize` and `etuple` interaction with `term`."""
60 | # Take apart an already constructed/evaluated meta
61 | # object.
62 | e2 = mt.add(mt.vector(), mt.vector())
63 |
64 | e2_et = etuplize(e2)
65 |
66 | assert isinstance(e2_et, ExpressionTuple)
67 |
68 | # e2_et_expect = etuple(
69 | # mt.add,
70 | # etuple(mt.TensorVariable,
71 | # etuple(mt.TensorType,
72 | # 'float64', (False,), None),
73 | # None, None, None),
74 | # etuple(mt.TensorVariable,
75 | # etuple(mt.TensorType,
76 | # 'float64', (False,), None),
77 | # None, None, None),
78 | # )
79 | e2_et_expect = etuple(mt.add, e2.base_arguments[0], e2.base_arguments[1])
80 | assert e2_et == e2_et_expect
81 | assert e2_et.eval_obj is e2
82 |
83 | # Make sure expression expansion works from Theano objects, too.
84 | # First, do it manually.
85 | tt_expr = tt.vector() + tt.vector()
86 |
87 | mt_expr = mt(tt_expr)
88 | assert mt_expr.obj is tt_expr
89 | assert mt_expr.reify() is tt_expr
90 | e3 = etuplize(mt_expr)
91 | assert e3 == e2_et
92 | assert e3.eval_obj is mt_expr
93 | assert e3.eval_obj.reify() is tt_expr
94 |
95 | # Now, through `etuplize`
96 | e2_et_2 = etuplize(tt_expr)
97 | assert e2_et_2 == e3 == e2_et
98 | assert isinstance(e2_et_2, ExpressionTuple)
99 | assert e2_et_2.eval_obj == tt_expr
100 |
101 | test_expr = mt(tt.vector("z") * 7)
102 | assert rator(test_expr) == mt.mul
103 | assert rands(test_expr)[0] == mt(tt.vector("z"))
104 |
105 | dim_shuffle_op = rator(rands(test_expr)[1])
106 |
107 | assert isinstance(dim_shuffle_op, mt.DimShuffle)
108 | assert rands(rands(test_expr)[1]) == etuple(mt(7))
109 |
110 | with pytest.raises(ConsError):
111 | rator(dim_shuffle_op)
112 | # assert rator(dim_shuffle_op) == mt.DimShuffle
113 | # assert rands(dim_shuffle_op) == etuple((), ("x",), True)
114 |
115 | const_tensor = rands(rands(test_expr)[1])[0]
116 | with pytest.raises(ConsError):
117 | rator(const_tensor)
118 | with pytest.raises(ConsError):
119 | rands(const_tensor)
120 |
121 | et_expr = etuplize(test_expr)
122 | exp_res = etuple(
123 | mt.mul,
124 | mt(tt.vector("z")),
125 | etuple(mt.DimShuffle((), ("x",), True), mt(7))
126 | # etuple(etuple(mt.DimShuffle, (), ("x",), True), mt(7))
127 | )
128 |
129 | assert et_expr == exp_res
130 | assert exp_res.eval_obj == test_expr
131 |
132 |
133 | def test_unify_rvs():
134 |
135 | a_tt = tt.vector("a")
136 | R_tt = tt.matrix("R")
137 | F_t_tt = tt.matrix("F")
138 | V_tt = tt.matrix("V")
139 | beta_rv = MvNormalRV(a_tt, R_tt, name="\\beta")
140 | E_y_rv = F_t_tt.dot(beta_rv)
141 | Y_rv = MvNormalRV(E_y_rv, V_tt, name="y")
142 |
143 | E_y_lv, V_lv, Y_name_lv = var(), var(), var()
144 | Y_lv = mt.MvNormalRV(E_y_lv, V_lv, size=var(), rng=var(), name=Y_name_lv)
145 |
146 | s = unify(Y_lv, Y_rv)
147 |
148 | assert s[E_y_lv].reify() == E_y_rv
149 | assert s[V_lv].reify() == V_tt
150 | assert s[Y_name_lv] == "y"
151 |
152 |
153 | def test_unify_ops():
154 | def f_pow2(x_tm1):
155 | return 2 * x_tm1
156 |
157 | state = theano.tensor.scalar("state")
158 | n_steps = theano.tensor.iscalar("nsteps")
159 | output, updates = theano.scan(
160 | f_pow2, [], state, [], n_steps=n_steps, truncate_gradient=-1, go_backwards=False
161 | )
162 |
163 | assert np.array_equal(output.eval({state: 1.0, n_steps: 4}), np.r_[2.0, 4.0, 8.0, 16.0])
164 |
165 | scan_tt = output.owner.inputs[0].owner.op
166 |
167 | inputs_lv, outputs_lv, info_lv = var(), var(), var()
168 | scan_lv = mt.Scan(inputs_lv, outputs_lv, info_lv)
169 |
170 | s = unify(scan_lv, scan_tt, {})
171 |
172 | assert s is not False
173 | assert s[inputs_lv] is scan_tt.inputs
174 |
175 | s_new = s.copy()
176 | s_new[outputs_lv] = [5 * s_new[inputs_lv][0]]
177 |
178 | new_scan_mt = reify(scan_lv, s_new)
179 |
180 | output_mt = mt(output)
181 | output_mt.owner.inputs[0].owner.op = new_scan_mt
182 | output_mt.owner.inputs[0].reset()
183 | output_mt.owner.outputs[0].reset()
184 | output_mt.owner.reset()
185 | output_mt.reset()
186 | assert output_mt.obj is not output
187 |
188 | output_new = output_mt.reify()
189 |
190 | assert output_new != output
191 |
192 | assert np.array_equal(output_new.eval({state: 1.0, n_steps: 4}), np.r_[5.0, 25.0, 125.0, 625.0])
193 |
--------------------------------------------------------------------------------
/tests/tensorflow/test_dispatch.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | import tensorflow as tf
4 |
5 | from unification import unify, reify, var
6 |
7 | from kanren.term import term, operator, arguments
8 |
9 | from etuples import etuple, etuplize
10 | from etuples.core import ExpressionTuple
11 |
12 | from cons.core import ConsError
13 |
14 | from symbolic_pymc.tensorflow.meta import TFlowMetaOperator, TFlowMetaTensor, mt
15 |
16 | from tests.tensorflow import run_in_graph_mode
17 | from tests.tensorflow.utils import assert_ops_equal
18 |
19 |
20 | @run_in_graph_mode
21 | def test_operator():
22 | s = unify(TFlowMetaOperator(var("a"), var("b")), mt.add)
23 |
24 | assert s[var("a")] == mt.add.op_def
25 | assert s[var("b")] == mt.add.node_def
26 |
27 | add_mt = reify(TFlowMetaOperator(var("a"), var("b")), s)
28 |
29 | assert add_mt == mt.add
30 |
31 | assert unify(mt.mul, mt.matmul) is False
32 | assert unify(mt.mul.op_def, mt.matmul.op_def) is False
33 |
34 |
35 | @run_in_graph_mode
36 | def test_etuple_term():
37 |
38 | assert etuplize("blah", return_bad_args=True) == "blah"
39 |
40 | a = tf.compat.v1.placeholder(tf.float64, name="a")
41 | b = tf.compat.v1.placeholder(tf.float64, name="b")
42 |
43 | a_mt = mt(a)
44 | a_mt._obj = None
45 | a_reified = a_mt.reify()
46 | assert isinstance(a_reified, tf.Tensor)
47 | assert a_reified.shape.dims is None
48 |
49 | with pytest.raises(TypeError):
50 | etuplize(a_mt.op.op_def)
51 |
52 | with pytest.raises(TypeError):
53 | etuplize(a_mt.op.node_def, shallow=False)
54 |
55 | with pytest.raises(TypeError):
56 | etuplize(a_mt, shallow=False)
57 |
58 | # Now, consider a meta graph with operator arguments
59 | add_mt = mt.AddV2(a, b)
60 | add_et = etuplize(add_mt, shallow=True)
61 | assert isinstance(add_et, ExpressionTuple)
62 | assert add_et[0].op_def == mt.AddV2.op_def
63 |
64 | # Check `kanren`'s term framework
65 | assert isinstance(operator(add_mt), TFlowMetaOperator)
66 | assert arguments(add_mt) == add_mt.op.inputs
67 |
68 | assert operator(add_mt)(*arguments(add_mt)) == add_mt
69 |
70 | assert isinstance(add_et[0], TFlowMetaOperator)
71 | assert add_et[1:] == add_mt.op.inputs
72 | assert operator(add_mt)(*arguments(add_mt)) == add_mt
73 |
74 | assert term(operator(add_mt), arguments(add_mt)) == add_mt
75 |
76 | add_mt = mt.AddV2(a, add_mt)
77 | add_et = etuplize(add_mt, shallow=False)
78 |
79 | assert isinstance(add_et, ExpressionTuple)
80 | assert len(add_et) == 3
81 | assert add_et[0].op_def == mt.AddV2.op_def
82 | assert len(add_et[2]) == 3
83 | assert add_et[2][0].op_def == mt.AddV2.op_def
84 | assert add_et.eval_obj is add_mt
85 |
86 | add_et._eval_obj = ExpressionTuple.null
87 | with tf.Graph().as_default():
88 | assert add_et.eval_obj == add_mt
89 |
90 | # Make sure things work with logic variables
91 | add_lvar_mt = TFlowMetaTensor(var(), var(), [1, 2])
92 |
93 | with pytest.raises(ConsError):
94 | assert operator(add_lvar_mt) is None
95 |
96 | with pytest.raises(ConsError):
97 | assert arguments(add_lvar_mt) is None
98 |
99 |
100 | @run_in_graph_mode
101 | def test_basic_unify_reify():
102 | # Test reification with manually constructed replacements
103 | a = tf.compat.v1.placeholder(tf.float64, name="a")
104 | x_l = var("x_l")
105 | a_reif = reify(x_l, {x_l: mt(a)})
106 | assert a_reif.obj is not None
107 | # Confirm that identity is preserved (i.e. that the underlying object
108 | # was properly tracked and not unnecessarily reconstructed)
109 | assert a == a_reif.reify()
110 |
111 | test_expr = mt.add(
112 | tf.constant(1, dtype=tf.float64), mt.mul(tf.constant(2, dtype=tf.float64), x_l)
113 | )
114 | test_reify_res = reify(test_expr, {x_l: a})
115 | test_base_res = test_reify_res.reify()
116 | assert isinstance(test_base_res, tf.Tensor)
117 |
118 | with tf.Graph().as_default():
119 | a = tf.compat.v1.placeholder(tf.float64, name="a")
120 | expected_res = tf.add(
121 | tf.constant(1, dtype=tf.float64), tf.multiply(tf.constant(2, dtype=tf.float64), a)
122 | )
123 | assert_ops_equal(test_base_res, expected_res)
124 |
125 | # Simply make sure that unification succeeds
126 | meta_expected_res = mt(expected_res)
127 | s_test = unify(test_expr, meta_expected_res, {})
128 | assert len(s_test) == 3
129 |
130 | assert reify(test_expr, s_test) == meta_expected_res
131 |
132 |
133 | @run_in_graph_mode
134 | def test_sexp_unify_reify():
135 | """Make sure we can unify and reify etuples/S-exps."""
136 | # Unify `A . (x + y)`, for `x`, `y` logic variables
137 | A = tf.compat.v1.placeholder(tf.float64, name="A", shape=tf.TensorShape([None, None]))
138 | x = tf.compat.v1.placeholder(tf.float64, name="x", shape=tf.TensorShape([None, 1]))
139 | y = tf.compat.v1.placeholder(tf.float64, name="y", shape=tf.TensorShape([None, 1]))
140 |
141 | z = tf.matmul(A, tf.add(x, y))
142 |
143 | z_sexp = etuplize(z, shallow=False)
144 |
145 | # Let's just be sure that the original TF objects are preserved
146 | assert z_sexp[1].reify() == A
147 | assert z_sexp[2][1].reify() == x
148 | assert z_sexp[2][2].reify() == y
149 |
150 | A_lv, x_lv, y_lv = var(), var(), var()
151 | dis_pat = etuple(
152 | TFlowMetaOperator(mt.matmul.op_def, var()),
153 | A_lv,
154 | etuple(TFlowMetaOperator(mt.add.op_def, var()), x_lv, y_lv),
155 | )
156 |
157 | s = unify(dis_pat, z_sexp, {})
158 |
159 | assert s[A_lv] == mt(A)
160 | assert s[x_lv] == mt(x)
161 | assert s[y_lv] == mt(y)
162 |
163 | # Now, we construct a graph that reflects the distributive property and
164 | # reify with the substitutions from the un-distributed form
165 | out_pat = etuple(mt.add, etuple(mt.matmul, A_lv, x_lv), etuple(mt.matmul, A_lv, y_lv))
166 | z_dist = reify(out_pat, s)
167 |
168 | # Evaluate the tuple-expression and get a meta object/graph
169 | z_dist_mt = z_dist.eval_obj
170 |
171 | # If all the logic variables were reified, we should be able to
172 | # further reify the meta graph and get a concrete TF graph
173 | z_dist_tf = z_dist_mt.reify()
174 |
175 | assert isinstance(z_dist_tf, tf.Tensor)
176 |
177 | # Check the first part of `A . x + A . y` (i.e. `A . x`)
178 | assert z_dist_tf.op.inputs[0].op.inputs[0] == A
179 | assert z_dist_tf.op.inputs[0].op.inputs[1] == x
180 | # Now, the second, `A . y`
181 | assert z_dist_tf.op.inputs[1].op.inputs[0] == A
182 | assert z_dist_tf.op.inputs[1].op.inputs[1] == y
183 |
--------------------------------------------------------------------------------
/tests/theano/test_printing.py:
--------------------------------------------------------------------------------
1 | import textwrap
2 |
3 | import theano.tensor as tt
4 |
5 | from symbolic_pymc.theano.random_variables import NormalRV, observed
6 | from symbolic_pymc.theano.printing import tt_pprint, tt_tprint
7 |
8 |
9 | def test_notex_print():
10 |
11 | tt_normalrv_noname_expr = tt.scalar("b") * NormalRV(tt.scalar("\\mu"), tt.scalar("\\sigma"))
12 | expected = textwrap.dedent(
13 | r"""
14 | b in R, \mu in R, \sigma in R
15 | a ~ N(\mu, \sigma**2) in R
16 | (b * a)
17 | """
18 | )
19 | assert tt_pprint(tt_normalrv_noname_expr) == expected.strip()
20 |
21 | # Make sure the constant shape is show in values and not symbols.
22 | tt_normalrv_name_expr = tt.scalar("b") * NormalRV(
23 | tt.scalar("\\mu"), tt.scalar("\\sigma"), size=[2, 1], name="X"
24 | )
25 | expected = textwrap.dedent(
26 | r"""
27 | b in R, \mu in R, \sigma in R
28 | X ~ N(\mu, \sigma**2) in R**(2 x 1)
29 | (b * X)
30 | """
31 | )
32 | assert tt_pprint(tt_normalrv_name_expr) == expected.strip()
33 |
34 | tt_2_normalrv_noname_expr = tt.matrix("M") * NormalRV(
35 | tt.scalar("\\mu_2"), tt.scalar("\\sigma_2")
36 | )
37 | tt_2_normalrv_noname_expr *= tt.scalar("b") * NormalRV(
38 | tt_2_normalrv_noname_expr, tt.scalar("\\sigma")
39 | ) + tt.scalar("c")
40 | expected = textwrap.dedent(
41 | r"""
42 | M in R**(N^M_0 x N^M_1), \mu_2 in R, \sigma_2 in R
43 | b in R, \sigma in R, c in R
44 | a ~ N(\mu_2, \sigma_2**2) in R, d ~ N((M * a), \sigma**2) in R**(N^d_0 x N^d_1)
45 | ((M * a) * ((b * d) + c))
46 | """
47 | )
48 | assert tt_pprint(tt_2_normalrv_noname_expr) == expected.strip()
49 |
50 | expected = textwrap.dedent(
51 | r"""
52 | b in Z, c in Z, M in R**(N^M_0 x N^M_1)
53 | M[b, c]
54 | """
55 | )
56 | # TODO: "c" should be "1".
57 | assert (
58 | tt_pprint(tt.matrix("M")[tt.iscalar("a"), tt.constant(1, dtype="int")]) == expected.strip()
59 | )
60 |
61 | expected = textwrap.dedent(
62 | r"""
63 | M in R**(N^M_0 x N^M_1)
64 | M[1]
65 | """
66 | )
67 | assert tt_pprint(tt.matrix("M")[1]) == expected.strip()
68 |
69 | expected = textwrap.dedent(
70 | r"""
71 | M in N**(N^M_0)
72 | M[2:4:0]
73 | """
74 | )
75 | assert tt_pprint(tt.vector("M", dtype="uint32")[0:4:2]) == expected.strip()
76 |
77 | norm_rv = NormalRV(tt.scalar("\\mu"), tt.scalar("\\sigma"))
78 | rv_obs = observed(tt.constant(1.0, dtype=norm_rv.dtype), norm_rv)
79 |
80 | expected = textwrap.dedent(
81 | r"""
82 | \mu in R, \sigma in R
83 | a ~ N(\mu, \sigma**2) in R
84 | a = 1.0
85 | """
86 | )
87 | assert tt_pprint(rv_obs) == expected.strip()
88 |
89 |
90 | def test_tex_print():
91 |
92 | tt_normalrv_noname_expr = tt.scalar("b") * NormalRV(tt.scalar("\\mu"), tt.scalar("\\sigma"))
93 | expected = textwrap.dedent(
94 | r"""
95 | \begin{equation}
96 | \begin{gathered}
97 | b \in \mathbb{R}, \,\mu \in \mathbb{R}, \,\sigma \in \mathbb{R}
98 | \\
99 | a \sim \operatorname{N}\left(\mu, {\sigma}^{2}\right)\, \in \mathbb{R}
100 | \end{gathered}
101 | \\
102 | (b \odot a)
103 | \end{equation}
104 | """
105 | )
106 | assert tt_tprint(tt_normalrv_noname_expr) == expected.strip()
107 |
108 | tt_normalrv_name_expr = tt.scalar("b") * NormalRV(
109 | tt.scalar("\\mu"), tt.scalar("\\sigma"), size=[2, 1], name="X"
110 | )
111 | expected = textwrap.dedent(
112 | r"""
113 | \begin{equation}
114 | \begin{gathered}
115 | b \in \mathbb{R}, \,\mu \in \mathbb{R}, \,\sigma \in \mathbb{R}
116 | \\
117 | X \sim \operatorname{N}\left(\mu, {\sigma}^{2}\right)\, \in \mathbb{R}^{2 \times 1}
118 | \end{gathered}
119 | \\
120 | (b \odot X)
121 | \end{equation}
122 | """
123 | )
124 | assert tt_tprint(tt_normalrv_name_expr) == expected.strip()
125 |
126 | tt_2_normalrv_noname_expr = tt.matrix("M") * NormalRV(
127 | tt.scalar("\\mu_2"), tt.scalar("\\sigma_2")
128 | )
129 | tt_2_normalrv_noname_expr *= tt.scalar("b") * NormalRV(
130 | tt_2_normalrv_noname_expr, tt.scalar("\\sigma")
131 | ) + tt.scalar("c")
132 | expected = textwrap.dedent(
133 | r"""
134 | \begin{equation}
135 | \begin{gathered}
136 | M \in \mathbb{R}^{N^{M}_{0} \times N^{M}_{1}}
137 | \\
138 | \mu_2 \in \mathbb{R}, \,\sigma_2 \in \mathbb{R}
139 | \\
140 | b \in \mathbb{R}, \,\sigma \in \mathbb{R}, \,c \in \mathbb{R}
141 | \\
142 | a \sim \operatorname{N}\left(\mu_2, {\sigma_2}^{2}\right)\, \in \mathbb{R}
143 | \\
144 | d \sim \operatorname{N}\left((M \odot a), {\sigma}^{2}\right)\, \in \mathbb{R}^{N^{d}_{0} \times N^{d}_{1}}
145 | \end{gathered}
146 | \\
147 | ((M \odot a) \odot ((b \odot d) + c))
148 | \end{equation}
149 | """
150 | )
151 | assert tt_tprint(tt_2_normalrv_noname_expr) == expected.strip()
152 |
153 | expected = textwrap.dedent(
154 | r"""
155 | \begin{equation}
156 | \begin{gathered}
157 | b \in \mathbb{Z}, \,c \in \mathbb{Z}, \,M \in \mathbb{R}^{N^{M}_{0} \times N^{M}_{1}}
158 | \end{gathered}
159 | \\
160 | M\left[b, \,c\right]
161 | \end{equation}
162 | """
163 | )
164 | # TODO: "c" should be "1".
165 | assert (
166 | tt_tprint(tt.matrix("M")[tt.iscalar("a"), tt.constant(1, dtype="int")]) == expected.strip()
167 | )
168 |
169 | expected = textwrap.dedent(
170 | r"""
171 | \begin{equation}
172 | \begin{gathered}
173 | M \in \mathbb{R}^{N^{M}_{0} \times N^{M}_{1}}
174 | \end{gathered}
175 | \\
176 | M\left[1\right]
177 | \end{equation}
178 | """
179 | )
180 | assert tt_tprint(tt.matrix("M")[1]) == expected.strip()
181 |
182 | expected = textwrap.dedent(
183 | r"""
184 | \begin{equation}
185 | \begin{gathered}
186 | M \in \mathbb{N}^{N^{M}_{0}}
187 | \end{gathered}
188 | \\
189 | M\left[2:4:0\right]
190 | \end{equation}
191 | """
192 | )
193 | assert tt_tprint(tt.vector("M", dtype="uint32")[0:4:2]) == expected.strip()
194 |
195 | norm_rv = NormalRV(tt.scalar("\\mu"), tt.scalar("\\sigma"))
196 | rv_obs = observed(tt.constant(1.0, dtype=norm_rv.dtype), norm_rv)
197 |
198 | expected = textwrap.dedent(
199 | r"""
200 | \begin{equation}
201 | \begin{gathered}
202 | \mu \in \mathbb{R}, \,\sigma \in \mathbb{R}
203 | \\
204 | a \sim \operatorname{N}\left(\mu, {\sigma}^{2}\right)\, \in \mathbb{R}
205 | \end{gathered}
206 | \\
207 | a = 1.0
208 | \end{equation}
209 | """
210 | )
211 | assert tt_tprint(rv_obs) == expected.strip()
212 |
--------------------------------------------------------------------------------
/tests/tensorflow/test_printing.py:
--------------------------------------------------------------------------------
1 | import io
2 | import textwrap
3 |
4 | import pytest
5 | import numpy as np
6 | import tensorflow as tf
7 |
8 | from contextlib import redirect_stdout
9 |
10 | from unification import var, Var
11 |
12 | from symbolic_pymc.tensorflow.meta import mt
13 | from symbolic_pymc.tensorflow.printing import tf_dprint
14 |
15 | from tests.tensorflow import run_in_graph_mode
16 |
17 |
18 | def test_eager_mode():
19 |
20 | assert tf.executing_eagerly()
21 |
22 | N = 100
23 | X = np.vstack([np.random.randn(N), np.ones(N)]).T
24 | X_tf = tf.convert_to_tensor(X)
25 |
26 | with pytest.raises(ValueError):
27 | _ = tf_dprint(X_tf)
28 |
29 |
30 | @run_in_graph_mode
31 | def test_ascii_printing():
32 | """Make sure we can ascii/text print a TF graph."""
33 |
34 | A = tf.compat.v1.placeholder("float", name="A", shape=tf.TensorShape([None, None]))
35 | x = tf.compat.v1.placeholder("float", name="x", shape=tf.TensorShape([None, 1]))
36 | y = tf.multiply(1.0, x, name="y")
37 |
38 | z = tf.matmul(A, tf.add(y, y, name="x_p_y"), name="A_dot")
39 |
40 | std_out = io.StringIO()
41 | with redirect_stdout(std_out):
42 | tf_dprint(z)
43 |
44 | expected_out = textwrap.dedent(
45 | """
46 | Tensor(MatMul):0,\tdtype=float32,\tshape=[None, 1],\t"A_dot:0"
47 | | Tensor(Placeholder):0,\tdtype=float32,\tshape=[None, None],\t"A:0"
48 | | Tensor(Add):0,\tdtype=float32,\tshape=[None, 1],\t"x_p_y:0"
49 | | | Tensor(Mul):0,\tdtype=float32,\tshape=[None, 1],\t"y:0"
50 | | | | Tensor(Const):0,\tdtype=float32,\tshape=[],\t"y/x:0"
51 | | | | | 1.
52 | | | | Tensor(Placeholder):0,\tdtype=float32,\tshape=[None, 1],\t"x:0"
53 | | | Tensor(Mul):0,\tdtype=float32,\tshape=[None, 1],\t"y:0"
54 | | | | ...
55 | """
56 | )
57 |
58 | assert std_out.getvalue() == expected_out.lstrip()
59 |
60 | std_out = io.StringIO()
61 | with tf.Graph().as_default(), redirect_stdout(std_out):
62 | Var._id = 0
63 | tt_lv_inputs_mt = mt.Tensor(mt.Operation(var(), var(), var()), 0, var())
64 | tt_const_lv_nodedef_mt = mt.Tensor(mt.Operation(mt.Const.op_def, var(), ()), 0, var())
65 | tt_lv_op_mt = mt.Tensor(var(), 0, var())
66 | test_mt = mt(1) + tt_lv_inputs_mt + tt_const_lv_nodedef_mt + tt_lv_op_mt
67 | tf_dprint(test_mt)
68 |
69 | expected_out = textwrap.dedent(
70 | """
71 | Tensor(AddV2):0,\tdtype=int32,\tshape=~_11,\t"add:0"
72 | | Tensor(AddV2):0,\tdtype=int32,\tshape=~_12,\t"add:0"
73 | | | Tensor(AddV2):0,\tdtype=int32,\tshape=~_13,\t"add:0"
74 | | | | Tensor(Const):0,\tdtype=int32,\tshape=[],\t"Const:0"
75 | | | | | 1
76 | | | | Tensor(~_15):0,\tdtype=~_3,\tshape=~_14,\t"~_17"
77 | | | | | ~_2
78 | | | Tensor(Const):0,\tdtype=~_5,\tshape=~_18,\t"~_20"
79 | | | | ~_4
80 | | Tensor(~_6):0,\tdtype=~_7,\tshape=~_21,\t"~_22"
81 | """
82 | )
83 |
84 | assert std_out.getvalue() == expected_out.lstrip()
85 |
86 |
87 | @run_in_graph_mode
88 | def test_unknown_shape():
89 | """Make sure we can ascii/text print a TF graph with unknown shapes."""
90 |
91 | A = tf.compat.v1.placeholder(tf.float64, name="A")
92 |
93 | std_out = io.StringIO()
94 | with redirect_stdout(std_out):
95 | tf_dprint(A)
96 |
97 | expected_out = 'Tensor(Placeholder):0,\tdtype=float64,\tshape=Unknown,\t"A:0"\n'
98 |
99 | assert std_out.getvalue() == expected_out.lstrip()
100 |
101 |
102 | @run_in_graph_mode
103 | def test_numpy():
104 | """Make sure we can ascii/text print constant tensors with large Numpy arrays."""
105 |
106 | with tf.Graph().as_default():
107 | A = tf.convert_to_tensor(np.arange(100))
108 |
109 | std_out = io.StringIO()
110 | with redirect_stdout(std_out):
111 | tf_dprint(A)
112 |
113 | expected_out = textwrap.dedent(
114 | """
115 | Tensor(Const):0,\tdtype=int64,\tshape=[100],\t"Const:0"
116 | | [ 0 1 2 ... 97 98 99]
117 | """
118 | )
119 |
120 | assert std_out.getvalue() == expected_out.lstrip()
121 |
122 | N = 100
123 | np.random.seed(12345)
124 | X = np.vstack([np.random.randn(N), np.ones(N)]).T
125 |
126 | with tf.Graph().as_default():
127 | X_tf = tf.convert_to_tensor(X)
128 |
129 | std_out = io.StringIO()
130 | with redirect_stdout(std_out):
131 | tf_dprint(X_tf)
132 |
133 | expected_out = textwrap.dedent(
134 | """
135 | Tensor(Const):0,\tdtype=float64,\tshape=[100, 2],\t"Const:0"
136 | | [[-0.20470766 1. ]
137 | [ 0.47894334 1. ]
138 | [-0.51943872 1. ]
139 | ...
140 | [-0.74853155 1. ]
141 | [ 0.58496974 1. ]
142 | [ 0.15267657 1. ]]
143 | """
144 | )
145 |
146 | assert std_out.getvalue() == expected_out.lstrip()
147 |
148 |
149 | @run_in_graph_mode
150 | def test_depth_indexing():
151 | """Make sure graph indexing functions as expected."""
152 |
153 | A = tf.compat.v1.placeholder("float", name="A", shape=tf.TensorShape([None, None]))
154 | x = tf.compat.v1.placeholder("float", name="x", shape=tf.TensorShape([None, 1]))
155 | y = tf.multiply(1.0, x, name="y")
156 |
157 | z = tf.matmul(A, tf.add(y, y, name="x_p_y"), name="A_dot")
158 |
159 | std_out = io.StringIO()
160 | with redirect_stdout(std_out):
161 | tf_dprint(z, depth_upper=3)
162 |
163 | expected_out = textwrap.dedent(
164 | """
165 | Tensor(MatMul):0,\tdtype=float32,\tshape=[None, 1],\t"A_dot:0"
166 | | Tensor(Placeholder):0,\tdtype=float32,\tshape=[None, None],\t"A:0"
167 | | Tensor(Add):0,\tdtype=float32,\tshape=[None, 1],\t"x_p_y:0"
168 | | | Tensor(Mul):0,\tdtype=float32,\tshape=[None, 1],\t"y:0"
169 | | | | ...
170 | | | Tensor(Mul):0,\tdtype=float32,\tshape=[None, 1],\t"y:0"
171 | | | | ...
172 | """
173 | )
174 |
175 | assert std_out.getvalue() == expected_out.lstrip()
176 |
177 | std_out = io.StringIO()
178 | with redirect_stdout(std_out):
179 | tf_dprint(z, depth_lower=1)
180 |
181 | expected_out = textwrap.dedent(
182 | """
183 | ... Tensor(Placeholder):0,\tdtype=float32,\tshape=[None, None],\t"A:0"
184 | ... Tensor(Add):0,\tdtype=float32,\tshape=[None, 1],\t"x_p_y:0"
185 | ... | Tensor(Mul):0,\tdtype=float32,\tshape=[None, 1],\t"y:0"
186 | ... | | Tensor(Const):0,\tdtype=float32,\tshape=[],\t"y/x:0"
187 | ... | | | 1.
188 | ... | | Tensor(Placeholder):0,\tdtype=float32,\tshape=[None, 1],\t"x:0"
189 | ... | Tensor(Mul):0,\tdtype=float32,\tshape=[None, 1],\t"y:0"
190 | ... | | ...
191 | """
192 | )
193 |
194 | assert std_out.getvalue() == expected_out.lstrip()
195 |
196 | std_out = io.StringIO()
197 | with redirect_stdout(std_out):
198 | tf_dprint(z, depth_lower=1, depth_upper=4)
199 |
200 | expected_out = textwrap.dedent(
201 | """
202 | ... Tensor(Placeholder):0,\tdtype=float32,\tshape=[None, None],\t"A:0"
203 | ... Tensor(Add):0,\tdtype=float32,\tshape=[None, 1],\t"x_p_y:0"
204 | ... | Tensor(Mul):0,\tdtype=float32,\tshape=[None, 1],\t"y:0"
205 | ... | | Tensor(Const):0,\tdtype=float32,\tshape=[],\t"y/x:0"
206 | ... | | | ...
207 | ... | | Tensor(Placeholder):0,\tdtype=float32,\tshape=[None, 1],\t"x:0"
208 | ... | Tensor(Mul):0,\tdtype=float32,\tshape=[None, 1],\t"y:0"
209 | ... | | ...
210 | """
211 | )
212 |
213 | assert std_out.getvalue() == expected_out.lstrip()
214 |
--------------------------------------------------------------------------------
/tests/theano/test_meta.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import numpy as np
3 | import theano
4 | import theano.tensor as tt
5 |
6 | from collections.abc import Iterator
7 |
8 | from unification import var, isvar, variables
9 |
10 | from symbolic_pymc.meta import MetaSymbol, MetaOp
11 | from symbolic_pymc.theano.meta import (
12 | metatize,
13 | TheanoMetaOp,
14 | TheanoMetaApply,
15 | TheanoMetaVariable,
16 | TheanoMetaTensorConstant,
17 | TheanoMetaTensorVariable,
18 | TheanoMetaType,
19 | TheanoMetaTensorType,
20 | mt,
21 | )
22 | from symbolic_pymc.theano.utils import graph_equal
23 |
24 |
25 | def test_metatize():
26 | vec_tt = tt.vector("vec")
27 | vec_m = metatize(vec_tt)
28 | assert vec_m.base == type(vec_tt)
29 |
30 | test_list = [1, 2, 3]
31 | metatize_test_list = metatize(test_list)
32 | assert isinstance(metatize_test_list, list)
33 | assert all(isinstance(m, MetaSymbol) for m in metatize_test_list)
34 |
35 | test_iter = iter([1, 2, 3])
36 | metatize_test_iter = metatize(test_iter)
37 | assert isinstance(metatize_test_iter, Iterator)
38 | assert all(isinstance(m, MetaSymbol) for m in metatize_test_iter)
39 |
40 | test_out = metatize(var())
41 | assert isvar(test_out)
42 |
43 | with variables(vec_tt):
44 | test_out = metatize(vec_tt)
45 | assert test_out == vec_tt
46 | assert isvar(test_out)
47 |
48 | test_out = metatize(np.r_[1, 2, 3])
49 | assert isinstance(test_out, MetaSymbol)
50 |
51 | class TestClass(object):
52 | pass
53 |
54 | with pytest.raises(ValueError):
55 | metatize(TestClass())
56 |
57 | class TestOp(tt.gof.Op):
58 | pass
59 |
60 | test_out = metatize(TestOp)
61 | assert issubclass(test_out, MetaOp)
62 |
63 | test_op_tt = TestOp()
64 | test_obj = test_out(obj=test_op_tt)
65 | assert isinstance(test_obj, MetaSymbol)
66 | assert test_obj.obj == test_op_tt
67 | assert test_obj.base == TestOp
68 |
69 |
70 | def test_meta_classes():
71 | vec_tt = tt.vector("vec")
72 | vec_m = metatize(vec_tt)
73 | assert vec_m.obj == vec_tt
74 | assert type(vec_m) == TheanoMetaTensorVariable
75 |
76 | # This should invalidate the underlying base object.
77 | vec_m.index = 0
78 | assert vec_m.obj is None
79 | assert vec_m.reify().type == vec_tt.type
80 | assert vec_m.reify().name == vec_tt.name
81 |
82 | vec_type_m = vec_m.type
83 | assert type(vec_type_m) == TheanoMetaTensorType
84 | assert vec_type_m.dtype == vec_tt.dtype
85 | assert vec_type_m.broadcastable == vec_tt.type.broadcastable
86 | assert vec_type_m.name == vec_tt.type.name
87 | assert vec_m.ndim == 1
88 |
89 | var_type = TheanoMetaType(obj=tt.type.scal.Scalar(tt.config.floatX))
90 | assert hash(var_type) == hash((var_type.base, var_type.obj))
91 |
92 | var_type_2 = TheanoMetaType(obj=tt.type.scal.Scalar(tt.config.floatX))
93 | assert var_type == var_type_2
94 |
95 | var_mt = TheanoMetaTensorVariable(
96 | TheanoMetaTensorType(tt.config.floatX, var(), var()), None, None, None
97 | )
98 | assert isvar(var_mt.ndim)
99 |
100 | assert graph_equal(tt.add(1, 2), mt.add(1, 2).reify())
101 |
102 | meta_var = mt.add(1, var()).reify()
103 | assert isinstance(meta_var, TheanoMetaTensorVariable)
104 | assert isinstance(meta_var.owner.op.obj, theano.Op)
105 | assert isinstance(meta_var.owner.inputs[0].obj, tt.TensorConstant)
106 |
107 | test_vals = [1, 2.4]
108 | meta_vars = metatize(test_vals)
109 | assert meta_vars == [metatize(x) for x in test_vals]
110 | # TODO: Do we really want meta variables to be equal to their
111 | # reified base objects?
112 | # assert meta_vars == [tt.as_tensor_variable(x) for x in test_vals]
113 |
114 | name_mt = var()
115 | add_tt = tt.add(0, 1)
116 | add_mt = mt.add(0, 1, name=name_mt)
117 |
118 | assert add_mt.name is name_mt
119 | assert add_tt.type == add_mt.type.reify()
120 | assert mt(add_tt.owner) == add_mt.owner
121 | # assert isvar(add_mt._obj)
122 |
123 | # Let's confirm that we can dynamically create a new meta `Op` type
124 | test_mat = np.c_[[2, 3], [4, 5]]
125 |
126 | svd_tt = tt.nlinalg.SVD()(test_mat)
127 | # First, can we create one from a new base `Op` instance?
128 | svd_op_mt = mt(tt.nlinalg.SVD())
129 | svd_mt = svd_op_mt(test_mat)
130 |
131 | assert svd_mt[0].owner.nin == 1
132 | assert svd_mt[0].owner.nout == 3
133 |
134 | svd_outputs = svd_mt[0].owner.outputs
135 | assert svd_outputs[0] == svd_mt[0]
136 | assert svd_outputs[1] == svd_mt[1]
137 | assert svd_outputs[2] == svd_mt[2]
138 |
139 | assert mt(svd_tt) == svd_mt
140 |
141 | # Next, can we create one from a base `Op` type/class?
142 | svd_op_type_mt = mt.nlinalg.SVD
143 | assert isinstance(svd_op_type_mt, type)
144 | assert issubclass(svd_op_type_mt, TheanoMetaOp)
145 |
146 | # svd_op_inst_mt = svd_op_type_mt(tt.nlinalg.SVD())
147 | # svd_op_inst_mt(test_mat) == svd_mt
148 |
149 | # Apply node with logic variable as outputs
150 | svd_apply_mt = TheanoMetaApply(svd_op_mt, [test_mat], outputs=var("out"))
151 | assert isinstance(svd_apply_mt.inputs, tuple)
152 | assert isinstance(svd_apply_mt.inputs[0], MetaSymbol)
153 | assert isvar(svd_apply_mt.outputs)
154 | assert svd_apply_mt.nin == 1
155 | assert svd_apply_mt.nout is None
156 |
157 | # Apply node with logic variable as inputs
158 | svd_apply_mt = TheanoMetaApply(svd_op_mt, var("in"), outputs=var("out"))
159 | assert svd_apply_mt.nin is None
160 |
161 | # A meta variable with None index
162 | var_mt = TheanoMetaVariable(svd_mt[0].type, svd_mt[0].owner, None, None)
163 | assert var_mt.index is None
164 | reified_var_mt = var_mt.reify()
165 |
166 | assert isinstance(reified_var_mt, TheanoMetaTensorVariable)
167 | assert reified_var_mt.index == 0
168 | assert var_mt.index == 0
169 | assert reified_var_mt == svd_mt[0]
170 |
171 | # A meta variable with logic variable index
172 | var_mt = TheanoMetaVariable(svd_mt[0].type, svd_mt[0].owner, var("index"), None)
173 | assert isvar(var_mt.index)
174 | reified_var_mt = var_mt.reify()
175 | assert isvar(var_mt.index)
176 | assert reified_var_mt.index == 0
177 |
178 | const_mt = mt(1)
179 | assert isinstance(const_mt, TheanoMetaTensorConstant)
180 | assert const_mt != mt(2)
181 |
182 |
183 | def test_meta_str():
184 | assert str(mt.add) == "TheanoMetaElemwise(Elemwise{add,no_inplace})"
185 |
186 |
187 | def test_meta_pretty():
188 | pretty_mod = pytest.importorskip("IPython.lib.pretty")
189 | assert pretty_mod.pretty(mt.add) == "TheanoMetaElemwise(Elemwise{add,no_inplace})"
190 |
191 |
192 | def test_meta_helpers():
193 | zeros_mt = mt.zeros(2)
194 | assert np.array_equal(zeros_mt.reify().eval(), np.r_[0.0, 0.0])
195 |
196 | zeros_mt = mt.zeros(2, dtype=int)
197 | assert np.array_equal(zeros_mt.reify().eval(), np.r_[0, 0])
198 |
199 | mat_mt = mt(np.eye(2))
200 | diag_mt = mt.diag(mat_mt)
201 | assert np.array_equal(diag_mt.reify().eval(), np.r_[1.0, 1.0])
202 |
203 | diag_mt = mt.diag(mt(np.r_[1, 2, 3]))
204 | assert np.array_equal(diag_mt.reify().eval(), np.diag(np.r_[1, 2, 3]))
205 |
206 |
207 | def test_scan_op():
208 | def f_pow2(x_tm1):
209 | return 2 * x_tm1
210 |
211 | state = theano.tensor.scalar("state")
212 | n_steps = theano.tensor.iscalar("nsteps")
213 | output, updates = theano.scan(
214 | f_pow2, [], state, [], n_steps=n_steps, truncate_gradient=-1, go_backwards=False
215 | )
216 |
217 | output_mt = mt(output)
218 |
219 | assert isinstance(output_mt.owner.inputs[0].owner.op, mt.Scan)
220 |
221 | assert output is output_mt.reify()
222 |
--------------------------------------------------------------------------------
/symbolic_pymc/theano/utils.py:
--------------------------------------------------------------------------------
1 | import theano.tensor as tt
2 |
3 | from theano.gof import FunctionGraph as tt_FunctionGraph, Query
4 | from theano.gof.graph import inputs as tt_inputs, clone_get_equiv, io_toposort, ancestors
5 | from theano.compile import optdb
6 | from theano.scan_module.scan_op import Scan
7 |
8 | from .meta import mt
9 | from .opt import FunctionGraph
10 | from .ops import RandomVariable
11 | from .random_variables import Observed
12 |
13 | from unification.utils import transitive_get as walk
14 |
15 |
16 | canonicalize_opt = optdb.query(Query(include=["canonicalize"]))
17 |
18 |
19 | def replace_input_nodes(inputs, outputs, replacements=None, memo=None, clone_inputs=True):
20 | """Recreate a graph, replacing input variables according to a given map.
21 |
22 | This is helpful if you want to replace the variable dependencies of
23 | an existing variable according to a `clone_get_equiv` map and/or
24 | replacement variables that already exist within a `FunctionGraph`.
25 |
26 | The latter is especially annoying, because you can't simply make a
27 | `FunctionGraph` for the variable to be adjusted and then use that to
28 | perform the replacement; if the variables to be replaced are already in a
29 | `FunctionGraph` any such replacement will err-out saying "...these
30 | variables are already owned by another graph..."
31 |
32 | Parameters
33 | ----------
34 | inputs: list
35 | List of input nodes.
36 | outputs: list
37 | List of output nodes. Everything between `inputs` and these `outputs`
38 | is the graph under consideration.
39 | replacements: dict (optional)
40 | A dictionary mapping existing nodes to their new ones.
41 | These values in this map will be used instead of newly generated
42 | clones. This dict is not altered.
43 | memo: dict (optional)
44 | A dictionary to update with the initial `replacements` and maps from
45 | any old-to-new nodes arising from an actual replacement.
46 | It serves the same role as `replacements`, but it is updated
47 | as elements are cloned.
48 | clone_inputs: bool (optional)
49 | If enabled, clone all the input nodes that aren't mapped in
50 | `replacements`. These cloned nodes are mapped in `memo`, as well.
51 |
52 | Results
53 | -------
54 | out: memo
55 |
56 | """
57 | if memo is None:
58 | memo = {}
59 | if replacements is not None:
60 | memo.update(replacements)
61 | for apply in io_toposort(inputs, outputs):
62 |
63 | walked_inputs = []
64 | for i in apply.inputs:
65 | if clone_inputs:
66 | # TODO: What if all the inputs are in the memo?
67 | walked_inputs.append(memo.setdefault(i, i.clone()))
68 | else:
69 | walked_inputs.append(walk(i, memo))
70 |
71 | if any(w != i for w, i in zip(apply.inputs, walked_inputs)):
72 | new_apply = apply.clone_with_new_inputs(walked_inputs)
73 |
74 | memo.setdefault(apply, new_apply)
75 | for output, new_output in zip(apply.outputs, new_apply.outputs):
76 | memo.setdefault(output, new_output)
77 | return memo
78 |
79 |
80 | def graph_equal(x, y):
81 | """Compare elements in a Theano graph using their object properties and not just identity."""
82 | try:
83 | if isinstance(x, (list, tuple)) and isinstance(y, (list, tuple)):
84 | return len(x) == len(y) and all(mt(xx) == mt(yy) for xx, yy in zip(x, y))
85 | return mt(x) == mt(y)
86 | except ValueError:
87 | return False
88 |
89 |
90 | def optimize_graph(x, optimization, return_graph=None, in_place=False):
91 | """Easily optimize Theano graphs.
92 |
93 | Apply an optimization to either the graph formed by a Theano variable or an
94 | existing graph and return the resulting optimized graph.
95 |
96 | When given an existing `FunctionGraph`, the optimization is
97 | performed without side-effects (i.e. won't change the given graph).
98 |
99 | """
100 | if not isinstance(x, tt_FunctionGraph):
101 | inputs = tt_inputs([x])
102 | outputs = [x]
103 | model_memo = clone_get_equiv(inputs, outputs, copy_orphans=False)
104 | cloned_inputs = [model_memo[i] for i in inputs if not isinstance(i, tt.Constant)]
105 | cloned_outputs = [model_memo[i] for i in outputs]
106 |
107 | x_graph = FunctionGraph(cloned_inputs, cloned_outputs, clone=False)
108 | x_graph.memo = model_memo
109 |
110 | if return_graph is None:
111 | return_graph = False
112 | else:
113 | x_graph = x
114 |
115 | if return_graph is None:
116 | return_graph = True
117 |
118 | x_graph_opt = x_graph if in_place else x_graph.clone()
119 | _ = optimization.optimize(x_graph_opt)
120 |
121 | if return_graph:
122 | res = x_graph_opt
123 | else:
124 | res = x_graph_opt.outputs
125 | x_graph_opt.disown()
126 | if len(res) == 1:
127 | (res,) = res
128 | return res
129 |
130 |
131 | def canonicalize(x, **kwargs):
132 | """Canonicalize a Theano variable and/or graph."""
133 | return optimize_graph(x, canonicalize_opt, **kwargs)
134 |
135 |
136 | def get_rv_observation(node):
137 | """Return a `RandomVariable` node's corresponding `Observed` node, or `None`."""
138 | if not getattr(node, "fgraph", None):
139 | raise ValueError("Node does not belong to a `FunctionGraph`")
140 |
141 | if isinstance(node.op, RandomVariable):
142 | fgraph = node.fgraph
143 | for o, i in node.default_output().clients:
144 | if o == "output":
145 | o = fgraph.outputs[i].owner
146 |
147 | if isinstance(o.op, Observed):
148 | return o
149 | return None
150 |
151 |
152 | def is_random_variable(var):
153 | """Check if a Theano `Apply` node is a random variable.
154 |
155 | Output
156 | ------
157 | Tuple[TensorVariable, TensorVariable]
158 | Returns a tuple with the `RandomVariable` or `Scan` `Op` containing a
159 | `RandomVariable` variable--along with the corresponding output variable
160 | that is a client of said `Op`; otherwise, `None`.
161 |
162 | """
163 | node = var.owner
164 |
165 | if not var.owner:
166 | return None
167 |
168 | # Consider `Subtensor` `Op`s that slice a `Scan`. This is the type of
169 | # output sometimes returned by `theano.scan` when taps/lags are used.
170 | if isinstance(node.op, tt.Subtensor) and node.inputs[0].owner:
171 | var = node.inputs[0]
172 | node = var.owner
173 |
174 | if isinstance(node.op, RandomVariable):
175 | return (var, var)
176 |
177 | if isinstance(node.op, Scan):
178 | op = node.op
179 | inner_out_var_idx = op.var_mappings["outer_out_from_inner_out"][node.outputs.index(var)]
180 | inner_out_var = op.outputs[inner_out_var_idx]
181 |
182 | if isinstance(inner_out_var.owner.op, RandomVariable):
183 | return (var, inner_out_var)
184 |
185 | return None
186 |
187 |
188 | def vars_to_rvs(var):
189 | """Compute paths from `TensorVariable`s to their underlying `RandomVariable` outputs."""
190 | return {
191 | a: v if v[0] is not a else (v[1])
192 | for a, v in [(a, is_random_variable(a)) for a in ancestors([var])]
193 | if v is not None
194 | }
195 |
196 |
197 | def get_random_outer_outputs(scan_args):
198 | """Get the `RandomVariable` outputs of a `Scan` (well, it's `ScanArgs`)."""
199 | rv_vars = []
200 | for n, oo in enumerate(scan_args.outer_outputs):
201 | oo_info = scan_args.find_among_fields(oo)
202 | io_type = oo_info.name[(oo_info.name.index("_", 6) + 1) :]
203 | inner_out_type = "inner_out_{}".format(io_type)
204 | io_var = getattr(scan_args, inner_out_type)[oo_info.index]
205 | if io_var.owner and isinstance(io_var.owner.op, RandomVariable):
206 | rv_vars.append((n, oo, io_var))
207 | return rv_vars
208 |
209 |
210 | def construct_scan(scan_args):
211 | scan_op = Scan(scan_args.inner_inputs, scan_args.inner_outputs, scan_args.info)
212 | scan_out = scan_op(*scan_args.outer_inputs)
213 |
214 | if not isinstance(scan_out, list):
215 | scan_out = [scan_out]
216 |
217 | return scan_out
218 |
--------------------------------------------------------------------------------
/tests/theano/test_rv.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | import theano.tensor as tt
4 |
5 | from theano.gof.op import get_test_value
6 | from theano.gof.graph import inputs as tt_inputs
7 |
8 | from pytest import importorskip
9 |
10 | from symbolic_pymc.theano.opt import FunctionGraph
11 | from symbolic_pymc.theano.random_variables import (
12 | NormalRV,
13 | MvNormalRV,
14 | PolyaGammaRV,
15 | DirichletRV,
16 | sample_dirichlet,
17 | CategoricalRV,
18 | sample_categorical,
19 | )
20 |
21 | from tests.theano import requires_test_values
22 |
23 |
24 | def rv_numpy_tester(rv, *params, size=None):
25 | """Test for correspondence between `RandomVariable` and NumPy shape and
26 | broadcast dimensions.
27 | """
28 | tt.config.compute_test_value = "ignore"
29 |
30 | test_rv = rv(*params, size=size)
31 | param_vals = [get_test_value(p) for p in params]
32 | size_val = None if size is None else tt.gof.op.get_test_value(size)
33 | test_val = getattr(np.random, rv.name)(*param_vals, size=size_val)
34 | test_shp = np.shape(test_val)
35 |
36 | # This might be a little too harsh, since purely symbolic `tensor.vector`
37 | # inputs have no broadcastable information, yet, they can take
38 | # broadcastable values.
39 | # E.g.
40 | # x_tt = tt.vector('x')
41 | # # non-symbolic value is broadcastable!
42 | # x_tt.tag.test_value = np.array([5])
43 | # # non-symbolic value is not broadcastable.
44 | # x_tt.tag.test_value = np.array([5, 4])
45 | #
46 | # In the end, there's really no clear way to determine this without full
47 | # evaluation of a symbolic node, and that mostly defeats the purpose.
48 | # Unfortunately, this is what PyMC3 resorts to when constructing its
49 | # `TensorType`s (and shapes).
50 |
51 | test_bcast = [s == 1 for s in test_shp]
52 | np.testing.assert_array_equal(test_rv.type.broadcastable, test_bcast)
53 |
54 | eval_args = {
55 | p: v
56 | for p, v in zip(params, param_vals)
57 | if isinstance(p, tt.Variable) and not isinstance(p, tt.Constant)
58 | }
59 | np.testing.assert_array_equal(test_rv.shape.eval(eval_args), test_shp)
60 | np.testing.assert_array_equal(np.shape(test_rv.eval(eval_args)), test_shp)
61 |
62 |
63 | @requires_test_values
64 | def test_Normal_infer_shape():
65 | M_tt = tt.iscalar("M")
66 | M_tt.tag.test_value = 3
67 | sd_tt = tt.scalar("sd")
68 | sd_tt.tag.test_value = 1.0
69 |
70 | test_params = [
71 | ([tt.as_tensor_variable(1.0), sd_tt], None),
72 | ([tt.as_tensor_variable(1.0), sd_tt], (M_tt,)),
73 | ([tt.as_tensor_variable(1.0), sd_tt], (2, M_tt)),
74 | ([tt.zeros((M_tt,)), sd_tt], None),
75 | ([tt.zeros((M_tt,)), sd_tt], (M_tt,)),
76 | ([tt.zeros((M_tt,)), sd_tt], (2, M_tt)),
77 | ([tt.zeros((M_tt,)), tt.ones((M_tt,))], None),
78 | ([tt.zeros((M_tt,)), tt.ones((M_tt,))], (2, M_tt)),
79 | ]
80 | for args, size in test_params:
81 | rv = NormalRV(*args, size=size)
82 | rv_shape = tuple(NormalRV._infer_shape(size or (), args, None))
83 | assert tuple(get_test_value(rv_shape)) == tuple(get_test_value(rv).shape)
84 |
85 |
86 | @requires_test_values
87 | def test_Normal_ShapeFeature():
88 | M_tt = tt.iscalar("M")
89 | M_tt.tag.test_value = 3
90 | sd_tt = tt.scalar("sd")
91 | sd_tt.tag.test_value = 1.0
92 |
93 | d_rv = NormalRV(tt.ones((M_tt,)), sd_tt, size=(2, M_tt))
94 | d_rv.tag.test_value
95 |
96 | fg = FunctionGraph(
97 | [i for i in tt_inputs([d_rv]) if not isinstance(i, tt.Constant)],
98 | [d_rv],
99 | clone=True,
100 | features=[tt.opt.ShapeFeature()],
101 | )
102 | s1, s2 = fg.shape_feature.shape_of[fg.memo[d_rv]]
103 |
104 | assert get_test_value(s1) == get_test_value(d_rv).shape[0]
105 | assert get_test_value(s2) == get_test_value(d_rv).shape[1]
106 |
107 |
108 | @requires_test_values
109 | def test_normalrv_vs_numpy():
110 | rv_numpy_tester(NormalRV, 0.0, 1.0)
111 | rv_numpy_tester(NormalRV, 0.0, 1.0, size=[3])
112 | # Broadcast sd over independent means...
113 | rv_numpy_tester(NormalRV, [0.0, 1.0, 2.0], 1.0)
114 | rv_numpy_tester(NormalRV, [0.0, 1.0, 2.0], 1.0, size=[3, 3])
115 | rv_numpy_tester(NormalRV, [0], [1], size=[1])
116 | rv_numpy_tester(NormalRV, tt.as_tensor_variable([0]), [1], size=[1])
117 | rv_numpy_tester(NormalRV, tt.as_tensor_variable([0]), [1], size=tt.as_tensor_variable([1]))
118 |
119 |
120 | @requires_test_values
121 | def test_mvnormalrv_vs_numpy():
122 | rv_numpy_tester(MvNormalRV, [0], np.diag([1]))
123 | rv_numpy_tester(MvNormalRV, [0], np.diag([1]), size=[1])
124 | rv_numpy_tester(MvNormalRV, [0], np.diag([1]), size=[4])
125 | rv_numpy_tester(MvNormalRV, [0], np.diag([1]), size=[4, 1])
126 | rv_numpy_tester(MvNormalRV, [0], np.diag([1]), size=[4, 1, 1])
127 | rv_numpy_tester(MvNormalRV, [0], np.diag([1]), size=[1, 5, 8])
128 | rv_numpy_tester(MvNormalRV, [0, 1, 2], np.diag([1, 1, 1]))
129 | # Broadcast cov matrix across independent means?
130 | # Looks like NumPy doesn't support that (and it's probably better off for
131 | # it).
132 | # rv_numpy_tester(MvNormalRV, [[0, 1, 2], [4, 5, 6]], np.diag([1, 1, 1]))
133 |
134 |
135 | @requires_test_values
136 | def test_mvnormalrv_ShapeFeature():
137 | M_tt = tt.iscalar("M")
138 | M_tt.tag.test_value = 2
139 |
140 | d_rv = MvNormalRV(tt.ones((M_tt,)), tt.eye(M_tt), size=2)
141 |
142 | fg = FunctionGraph(
143 | [i for i in tt_inputs([d_rv]) if not isinstance(i, tt.Constant)],
144 | [d_rv],
145 | clone=True,
146 | features=[tt.opt.ShapeFeature()],
147 | )
148 |
149 | s1, s2 = fg.shape_feature.shape_of[fg.memo[d_rv]]
150 |
151 | assert s1.eval() == 2
152 | assert fg.memo[M_tt] in tt_inputs([s2])
153 |
154 |
155 | def test_polyagammarv_vs_PolyaGammaRV():
156 |
157 | _ = importorskip("pypolyagamma")
158 |
159 | # Sampled values should be scalars
160 | pg_rv = PolyaGammaRV(1.1, -10.5)
161 | assert pg_rv.eval().shape == ()
162 |
163 | pg_rv = PolyaGammaRV(1.1, -10.5, size=[1])
164 | assert pg_rv.eval().shape == (1,)
165 |
166 | pg_rv = PolyaGammaRV(1.1, -10.5, size=[2, 3])
167 | bcast_smpl = pg_rv.eval()
168 | assert bcast_smpl.shape == (2, 3)
169 | # Make sure they're not all equal
170 | assert np.all(np.abs(np.diff(bcast_smpl.flat)) > 0.0)
171 |
172 | pg_rv = PolyaGammaRV(np.r_[1.1, 3], -10.5)
173 | bcast_smpl = pg_rv.eval()
174 | assert bcast_smpl.shape == (2,)
175 | assert np.all(np.abs(np.diff(bcast_smpl.flat)) > 0.0)
176 |
177 | pg_rv = PolyaGammaRV(np.r_[1.1, 3], -10.5, size=(2, 3))
178 | bcast_smpl = pg_rv.eval()
179 | assert bcast_smpl.shape == (2, 2, 3)
180 | assert np.all(np.abs(np.diff(bcast_smpl.flat)) > 0.0)
181 |
182 |
183 | def test_dirichletrv_samples():
184 |
185 | import theano
186 |
187 | theano.config.cxx = ""
188 | theano.config.mode = "FAST_COMPILE"
189 |
190 | alphas = np.c_[[100, 1, 1], [1, 100, 1], [1, 1, 100]]
191 |
192 | res = DirichletRV(alphas).eval()
193 | assert np.all(np.diag(res) >= res)
194 |
195 | res = DirichletRV(alphas, size=2).eval()
196 | assert res.shape == (2, 3, 3)
197 | assert all(np.all(np.diag(r) >= r) for r in res)
198 |
199 | for i in range(alphas.shape[0]):
200 | res = DirichletRV(alphas[i]).eval()
201 | assert np.all(res[i] > np.delete(res, [i]))
202 |
203 | res = DirichletRV(alphas[i], size=2).eval()
204 | assert res.shape == (2, 3)
205 | assert all(np.all(r[i] > np.delete(r, [i])) for r in res)
206 |
207 | rng_state = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(1234)))
208 |
209 | alphas = np.array([[1000, 1, 1], [1, 1000, 1], [1, 1, 1000]])
210 |
211 | assert sample_dirichlet(rng_state, alphas).shape == alphas.shape
212 | assert sample_dirichlet(rng_state, alphas, size=10).shape == (10,) + alphas.shape
213 | assert sample_dirichlet(rng_state, alphas, size=(10, 2)).shape == (10, 2) + alphas.shape
214 |
215 |
216 | @requires_test_values
217 | def test_dirichlet_infer_shape():
218 | M_tt = tt.iscalar("M")
219 | M_tt.tag.test_value = 3
220 |
221 | test_params = [
222 | ([tt.ones((M_tt,))], None),
223 | ([tt.ones((M_tt,))], (M_tt + 1,)),
224 | ([tt.ones((M_tt,))], (2, M_tt)),
225 | ([tt.ones((M_tt, M_tt + 1))], None),
226 | ([tt.ones((M_tt, M_tt + 1))], (M_tt + 2,)),
227 | ([tt.ones((M_tt, M_tt + 1))], (2, M_tt + 2, M_tt + 3)),
228 | ]
229 | for args, size in test_params:
230 | rv = DirichletRV(*args, size=size)
231 | rv_shape = tuple(DirichletRV._infer_shape(size or (), args, None))
232 | assert tuple(get_test_value(rv_shape)) == tuple(get_test_value(rv).shape)
233 |
234 |
235 | def test_dirichlet_ShapeFeature():
236 | """Make sure `RandomVariable.infer_shape` works with `ShapeFeature`."""
237 | M_tt = tt.iscalar("M")
238 | M_tt.tag.test_value = 2
239 | N_tt = tt.iscalar("N")
240 | N_tt.tag.test_value = 3
241 |
242 | d_rv = DirichletRV(tt.ones((M_tt, N_tt)), name="Gamma")
243 |
244 | fg = FunctionGraph(
245 | [i for i in tt_inputs([d_rv]) if not isinstance(i, tt.Constant)],
246 | [d_rv],
247 | clone=True,
248 | features=[tt.opt.ShapeFeature()],
249 | )
250 |
251 | s1, s2 = fg.shape_feature.shape_of[fg.memo[d_rv]]
252 |
253 | assert fg.memo[M_tt] in tt_inputs([s1])
254 | assert fg.memo[N_tt] in tt_inputs([s2])
255 |
256 |
257 | def test_categoricalrv_samples():
258 | import theano
259 |
260 | theano.config.cxx = ""
261 | theano.config.mode = "FAST_COMPILE"
262 |
263 | rng_state = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(1234)))
264 |
265 | p = np.array([[100000, 1, 1], [1, 100000, 1], [1, 1, 100000]])
266 | p = p / p.sum(axis=-1)
267 |
268 | assert sample_categorical(rng_state, p).shape == p.shape[:-1]
269 | assert sample_categorical(rng_state, p, size=10).shape == (10,) + p.shape[:-1]
270 | assert sample_categorical(rng_state, p, size=(10, 2)).shape == (10, 2) + p.shape[:-1]
271 |
272 | res = CategoricalRV(p)
273 | assert np.array_equal(res.eval(), np.arange(3))
274 |
275 | res = CategoricalRV(p, size=10)
276 | exp_res = np.tile(np.arange(3), (10, 1))
277 | assert np.array_equal(res.eval(), exp_res)
278 |
279 | res = CategoricalRV(p, size=(10, 2))
280 | exp_res = np.tile(np.arange(3), (10, 2, 1))
281 | assert np.array_equal(res.eval(), exp_res)
282 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | =======
2 | License
3 | =======
4 |
5 | symbolic-pymc is distributed under the Apache License, Version 2.0
6 |
7 | Copyright (c) 2019 Brandon T. Willard (Academic Free License)
8 | All rights reserved.
9 |
10 | Apache License
11 | Version 2.0, January 2004
12 | http://www.apache.org/licenses/
13 |
14 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
15 |
16 | 1. Definitions.
17 |
18 | "License" shall mean the terms and conditions for use, reproduction,
19 | and distribution as defined by Sections 1 through 9 of this document.
20 |
21 | "Licensor" shall mean the copyright owner or entity authorized by
22 | the copyright owner that is granting the License.
23 |
24 | "Legal Entity" shall mean the union of the acting entity and all
25 | other entities that control, are controlled by, or are under common
26 | control with that entity. For the purposes of this definition,
27 | "control" means (i) the power, direct or indirect, to cause the
28 | direction or management of such entity, whether by contract or
29 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
30 | outstanding shares, or (iii) beneficial ownership of such entity.
31 |
32 | "You" (or "Your") shall mean an individual or Legal Entity
33 | exercising permissions granted by this License.
34 |
35 | "Source" form shall mean the preferred form for making modifications,
36 | including but not limited to software source code, documentation
37 | source, and configuration files.
38 |
39 | "Object" form shall mean any form resulting from mechanical
40 | transformation or translation of a Source form, including but
41 | not limited to compiled object code, generated documentation,
42 | and conversions to other media types.
43 |
44 | "Work" shall mean the work of authorship, whether in Source or
45 | Object form, made available under the License, as indicated by a
46 | copyright notice that is included in or attached to the work
47 | (an example is provided in the Appendix below).
48 |
49 | "Derivative Works" shall mean any work, whether in Source or Object
50 | form, that is based on (or derived from) the Work and for which the
51 | editorial revisions, annotations, elaborations, or other modifications
52 | represent, as a whole, an original work of authorship. For the purposes
53 | of this License, Derivative Works shall not include works that remain
54 | separable from, or merely link (or bind by name) to the interfaces of,
55 | the Work and Derivative Works thereof.
56 |
57 | "Contribution" shall mean any work of authorship, including
58 | the original version of the Work and any modifications or additions
59 | to that Work or Derivative Works thereof, that is intentionally
60 | submitted to Licensor for inclusion in the Work by the copyright owner
61 | or by an individual or Legal Entity authorized to submit on behalf of
62 | the copyright owner. For the purposes of this definition, "submitted"
63 | means any form of electronic, verbal, or written communication sent
64 | to the Licensor or its representatives, including but not limited to
65 | communication on electronic mailing lists, source code control systems,
66 | and issue tracking systems that are managed by, or on behalf of, the
67 | Licensor for the purpose of discussing and improving the Work, but
68 | excluding communication that is conspicuously marked or otherwise
69 | designated in writing by the copyright owner as "Not a Contribution."
70 |
71 | "Contributor" shall mean Licensor and any individual or Legal Entity
72 | on behalf of whom a Contribution has been received by Licensor and
73 | subsequently incorporated within the Work.
74 |
75 | 2. Grant of Copyright License. Subject to the terms and conditions of
76 | this License, each Contributor hereby grants to You a perpetual,
77 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
78 | copyright license to reproduce, prepare Derivative Works of,
79 | publicly display, publicly perform, sublicense, and distribute the
80 | Work and such Derivative Works in Source or Object form.
81 |
82 | 3. Grant of Patent License. Subject to the terms and conditions of
83 | this License, each Contributor hereby grants to You a perpetual,
84 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
85 | (except as stated in this section) patent license to make, have made,
86 | use, offer to sell, sell, import, and otherwise transfer the Work,
87 | where such license applies only to those patent claims licensable
88 | by such Contributor that are necessarily infringed by their
89 | Contribution(s) alone or by combination of their Contribution(s)
90 | with the Work to which such Contribution(s) was submitted. If You
91 | institute patent litigation against any entity (including a
92 | cross-claim or counterclaim in a lawsuit) alleging that the Work
93 | or a Contribution incorporated within the Work constitutes direct
94 | or contributory patent infringement, then any patent licenses
95 | granted to You under this License for that Work shall terminate
96 | as of the date such litigation is filed.
97 |
98 | 4. Redistribution. You may reproduce and distribute copies of the
99 | Work or Derivative Works thereof in any medium, with or without
100 | modifications, and in Source or Object form, provided that You
101 | meet the following conditions:
102 |
103 | (a) You must give any other recipients of the Work or
104 | Derivative Works a copy of this License; and
105 |
106 | (b) You must cause any modified files to carry prominent notices
107 | stating that You changed the files; and
108 |
109 | (c) You must retain, in the Source form of any Derivative Works
110 | that You distribute, all copyright, patent, trademark, and
111 | attribution notices from the Source form of the Work,
112 | excluding those notices that do not pertain to any part of
113 | the Derivative Works; and
114 |
115 | (d) If the Work includes a "NOTICE" text file as part of its
116 | distribution, then any Derivative Works that You distribute must
117 | include a readable copy of the attribution notices contained
118 | within such NOTICE file, excluding those notices that do not
119 | pertain to any part of the Derivative Works, in at least one
120 | of the following places: within a NOTICE text file distributed
121 | as part of the Derivative Works; within the Source form or
122 | documentation, if provided along with the Derivative Works; or,
123 | within a display generated by the Derivative Works, if and
124 | wherever such third-party notices normally appear. The contents
125 | of the NOTICE file are for informational purposes only and
126 | do not modify the License. You may add Your own attribution
127 | notices within Derivative Works that You distribute, alongside
128 | or as an addendum to the NOTICE text from the Work, provided
129 | that such additional attribution notices cannot be construed
130 | as modifying the License.
131 |
132 | You may add Your own copyright statement to Your modifications and
133 | may provide additional or different license terms and conditions
134 | for use, reproduction, or distribution of Your modifications, or
135 | for any such Derivative Works as a whole, provided Your use,
136 | reproduction, and distribution of the Work otherwise complies with
137 | the conditions stated in this License.
138 |
139 | 5. Submission of Contributions. Unless You explicitly state otherwise,
140 | any Contribution intentionally submitted for inclusion in the Work
141 | by You to the Licensor shall be under the terms and conditions of
142 | this License, without any additional terms or conditions.
143 | Notwithstanding the above, nothing herein shall supersede or modify
144 | the terms of any separate license agreement you may have executed
145 | with Licensor regarding such Contributions.
146 |
147 | 6. Trademarks. This License does not grant permission to use the trade
148 | names, trademarks, service marks, or product names of the Licensor,
149 | except as required for reasonable and customary use in describing the
150 | origin of the Work and reproducing the content of the NOTICE file.
151 |
152 | 7. Disclaimer of Warranty. Unless required by applicable law or
153 | agreed to in writing, Licensor provides the Work (and each
154 | Contributor provides its Contributions) on an "AS IS" BASIS,
155 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
156 | implied, including, without limitation, any warranties or conditions
157 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
158 | PARTICULAR PURPOSE. You are solely responsible for determining the
159 | appropriateness of using or redistributing the Work and assume any
160 | risks associated with Your exercise of permissions under this License.
161 |
162 | 8. Limitation of Liability. In no event and under no legal theory,
163 | whether in tort (including negligence), contract, or otherwise,
164 | unless required by applicable law (such as deliberate and grossly
165 | negligent acts) or agreed to in writing, shall any Contributor be
166 | liable to You for damages, including any direct, indirect, special,
167 | incidental, or consequential damages of any character arising as a
168 | result of this License or out of the use or inability to use the
169 | Work (including but not limited to damages for loss of goodwill,
170 | work stoppage, computer failure or malfunction, or any and all
171 | other commercial damages or losses), even if such Contributor
172 | has been advised of the possibility of such damages.
173 |
174 | 9. Accepting Warranty or Additional Liability. While redistributing
175 | the Work or Derivative Works thereof, You may choose to offer,
176 | and charge a fee for, acceptance of support, warranty, indemnity,
177 | or other liability obligations and/or rights consistent with this
178 | License. However, in accepting such obligations, You may act only
179 | on Your own behalf and on Your sole responsibility, not on behalf
180 | of any other Contributor, and only if You agree to indemnify,
181 | defend, and hold each Contributor harmless for any liability
182 | incurred by, or claims asserted against, such Contributor by reason
183 | of your accepting any such warranty or additional liability.
184 |
185 | END OF TERMS AND CONDITIONS
186 |
187 | APPENDIX: How to apply the Apache License to your work.
188 |
189 | To apply the Apache License to your work, attach the following
190 | boilerplate notice, with the fields enclosed by brackets "[]"
191 | replaced with your own identifying information. (Don't include
192 | the brackets!) The text should be enclosed in the appropriate
193 | comment syntax for the file format. We also recommend that a
194 | file or class name and description of purpose be included on the
195 | same "printed page" as the copyright notice for easier
196 | identification within third-party archives.
197 |
198 | Copyright [yyyy] [name of copyright owner]
199 |
200 | Licensed under the Apache License, Version 2.0 (the "License");
201 | you may not use this file except in compliance with the License.
202 | You may obtain a copy of the License at
203 |
204 | http://www.apache.org/licenses/LICENSE-2.0
205 |
206 | Unless required by applicable law or agreed to in writing, software
207 | distributed under the License is distributed on an "AS IS" BASIS,
208 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
209 | See the License for the specific language governing permissions and
210 | limitations under the License.
--------------------------------------------------------------------------------
/tests/theano/test_relations.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | import numpy as np
4 |
5 | import theano
6 | import theano.tensor as tt
7 |
8 | from functools import partial
9 |
10 | from unification import var
11 |
12 | from etuples import etuple, etuplize
13 |
14 | from kanren import run, eq
15 | from kanren.core import lall
16 | from kanren.graph import reduceo, walko, applyo
17 |
18 | from symbolic_pymc.theano.meta import mt
19 | from symbolic_pymc.theano.opt import eval_and_reify_meta
20 | from symbolic_pymc.theano.random_variables import observed, NormalRV, HalfCauchyRV, MvNormalRV
21 |
22 | from symbolic_pymc.relations.theano import non_obs_walko
23 | from symbolic_pymc.relations.theano.conjugates import conjugate
24 | from symbolic_pymc.relations.theano.distributions import scale_loc_transform, constant_neq
25 | from symbolic_pymc.relations.theano.linalg import normal_normal_regression, normal_qr_transform
26 |
27 |
28 | def test_constant_neq():
29 | q_lv = var()
30 |
31 | res = run(0, q_lv, eq(q_lv, mt(1)), constant_neq(q_lv, np.array(1.0)))
32 | assert not res
33 |
34 | # TODO: If `constant_neq` was a true constraint, this would work.
35 | # res = run(0, q_lv, constant_neq(q_lv, np.array(1.0)), eq(q_lv, mt(1)))
36 | # assert not res
37 |
38 | # TODO: If `constant_neq` was a true constraint, this would work.
39 | # res = run(0, q_lv, constant_neq(q_lv, np.array(1.0)), eq(q_lv, mt(2)))
40 | # assert res == (mt(2),)
41 |
42 | res = run(0, q_lv, eq(q_lv, mt(2)), constant_neq(q_lv, np.array(1.0)))
43 | assert res == (mt(2),)
44 |
45 |
46 | def test_scale_loc_transform():
47 | tt.config.compute_test_value = "ignore"
48 |
49 | rand_state = theano.shared(np.random.RandomState())
50 | mu_a = NormalRV(0.0, 100 ** 2, name="mu_a", rng=rand_state)
51 | sigma_a = HalfCauchyRV(5, name="sigma_a", rng=rand_state)
52 | mu_b = NormalRV(0.0, 100 ** 2, name="mu_b", rng=rand_state)
53 | sigma_b = HalfCauchyRV(5, name="sigma_b", rng=rand_state)
54 | county_idx = np.r_[1, 1, 2, 3]
55 | # We want the following for a, b:
56 | # N(m, S) -> m + N(0, 1) * S
57 | a = NormalRV(mu_a, sigma_a, size=(len(county_idx),), name="a", rng=rand_state)
58 | b = NormalRV(mu_b, sigma_b, size=(len(county_idx),), name="b", rng=rand_state)
59 | radon_est = a[county_idx] + b[county_idx] * 7
60 | eps = HalfCauchyRV(5, name="eps", rng=rand_state)
61 | radon_like = NormalRV(radon_est, eps, name="radon_like", rng=rand_state)
62 | radon_like_rv = observed(tt.as_tensor_variable(np.r_[1.0, 2.0, 3.0, 4.0]), radon_like)
63 |
64 | q_lv = var()
65 |
66 | (expr_graph,) = run(
67 | 1, q_lv, non_obs_walko(partial(reduceo, scale_loc_transform), radon_like_rv, q_lv)
68 | )
69 |
70 | radon_like_rv_opt = expr_graph.reify()
71 |
72 | assert radon_like_rv_opt.owner.op == observed
73 |
74 | radon_like_opt = radon_like_rv_opt.owner.inputs[1]
75 | radon_est_opt = radon_like_opt.owner.inputs[0]
76 |
77 | # These should now be `tt.add(mu_*, ...)` outputs.
78 | a_opt = radon_est_opt.owner.inputs[0].owner.inputs[0]
79 | b_opt = radon_est_opt.owner.inputs[1].owner.inputs[0].owner.inputs[0]
80 | # Make sure NormalRV gets replaced with an addition
81 | assert a_opt.owner.op == tt.add
82 | assert b_opt.owner.op == tt.add
83 |
84 | # Make sure the first term in the addition is the old NormalRV mean
85 | mu_a_opt = a_opt.owner.inputs[0].owner.inputs[0]
86 | assert "mu_a" == mu_a_opt.name == mu_a.name
87 | mu_b_opt = b_opt.owner.inputs[0].owner.inputs[0]
88 | assert "mu_b" == mu_b_opt.name == mu_b.name
89 |
90 | # Make sure the second term in the addition is the standard NormalRV times
91 | # the old std. dev.
92 | assert a_opt.owner.inputs[1].owner.op == tt.mul
93 | assert b_opt.owner.inputs[1].owner.op == tt.mul
94 |
95 | sigma_a_opt = a_opt.owner.inputs[1].owner.inputs[0].owner.inputs[0]
96 | assert sigma_a_opt.owner.op == sigma_a.owner.op
97 | sigma_b_opt = b_opt.owner.inputs[1].owner.inputs[0].owner.inputs[0]
98 | assert sigma_b_opt.owner.op == sigma_b.owner.op
99 |
100 | a_std_norm_opt = a_opt.owner.inputs[1].owner.inputs[1]
101 | assert a_std_norm_opt.owner.op == NormalRV
102 | assert a_std_norm_opt.owner.inputs[0].data == 0.0
103 | assert a_std_norm_opt.owner.inputs[1].data == 1.0
104 | b_std_norm_opt = b_opt.owner.inputs[1].owner.inputs[1]
105 | assert b_std_norm_opt.owner.op == NormalRV
106 | assert b_std_norm_opt.owner.inputs[0].data == 0.0
107 | assert b_std_norm_opt.owner.inputs[1].data == 1.0
108 |
109 |
110 | def test_mvnormal_conjugate():
111 | """Test that we can produce the closed-form distribution for the conjugate
112 | multivariate normal-regression with normal-prior model.
113 | """
114 | # import symbolic_pymc.theano.meta as tm
115 | #
116 | # tm.load_dispatcher()
117 |
118 | tt.config.cxx = ""
119 | tt.config.compute_test_value = "ignore"
120 |
121 | a_tt = tt.vector("a")
122 | R_tt = tt.matrix("R")
123 | F_t_tt = tt.matrix("F")
124 | V_tt = tt.matrix("V")
125 |
126 | a_tt.tag.test_value = np.r_[1.0, 0.0]
127 | R_tt.tag.test_value = np.diag([10.0, 10.0])
128 | F_t_tt.tag.test_value = np.c_[-2.0, 1.0]
129 | V_tt.tag.test_value = np.diag([0.5])
130 |
131 | beta_rv = MvNormalRV(a_tt, R_tt, name="\\beta")
132 |
133 | E_y_rv = F_t_tt.dot(beta_rv)
134 | Y_rv = MvNormalRV(E_y_rv, V_tt, name="Y")
135 |
136 | y_tt = tt.as_tensor_variable(np.r_[-3.0])
137 | y_tt.name = "y"
138 | Y_obs = observed(y_tt, Y_rv)
139 |
140 | q_lv = var()
141 |
142 | (expr_graph,) = run(1, q_lv, walko(conjugate, Y_obs, q_lv))
143 |
144 | fgraph_opt = expr_graph.eval_obj
145 | fgraph_opt_tt = fgraph_opt.reify()
146 |
147 | # Check that the SSE has decreased from prior to posterior.
148 | # TODO: Use a better test.
149 | beta_prior_mean_val = a_tt.tag.test_value
150 | F_val = F_t_tt.tag.test_value
151 | beta_post_mean_val = fgraph_opt_tt.owner.inputs[0].tag.test_value
152 | priorp_err = np.square(y_tt.data - F_val.dot(beta_prior_mean_val)).sum()
153 | postp_err = np.square(y_tt.data - F_val.dot(beta_post_mean_val)).sum()
154 |
155 | # First, make sure the prior and posterior means are simply not equal.
156 | with pytest.raises(AssertionError):
157 | np.testing.assert_array_equal(priorp_err, postp_err)
158 |
159 | # Now, make sure there's a decrease (relative to the observed point).
160 | np.testing.assert_array_less(postp_err, priorp_err)
161 |
162 |
163 | @pytest.mark.xfail(strict=True)
164 | def test_normal_normal_regression():
165 | tt.config.compute_test_value = "ignore"
166 | theano.config.cxx = ""
167 | np.random.seed(9283)
168 |
169 | N = 10
170 | M = 3
171 | a_tt = tt.vector("a")
172 | R_tt = tt.vector("R")
173 | X_tt = tt.matrix("X")
174 | V_tt = tt.vector("V")
175 |
176 | a_tt.tag.test_value = np.random.normal(size=M)
177 | R_tt.tag.test_value = np.abs(np.random.normal(size=M))
178 | X = np.random.normal(10, 1, size=N)
179 | X = np.c_[np.ones(10), X, X * X]
180 | X_tt.tag.test_value = X
181 | V_tt.tag.test_value = np.ones(N)
182 |
183 | beta_rv = NormalRV(a_tt, R_tt, name="\\beta")
184 |
185 | E_y_rv = X_tt.dot(beta_rv)
186 | E_y_rv.name = "E_y"
187 | Y_rv = NormalRV(E_y_rv, V_tt, name="Y")
188 |
189 | y_tt = tt.as_tensor_variable(Y_rv.tag.test_value)
190 | y_tt.name = "y"
191 | y_obs_rv = observed(y_tt, Y_rv)
192 | y_obs_rv.name = "y_obs"
193 |
194 | #
195 | # Use the relation with identify/match `Y`, `X` and `beta`.
196 | #
197 | y_args_tail_lv, b_args_tail_lv = var(), var()
198 | beta_lv = var()
199 |
200 | y_args_lv, y_lv, Y_lv, X_lv = var(), var(), var(), var()
201 | (res,) = run(
202 | 1,
203 | (beta_lv, y_args_tail_lv, b_args_tail_lv),
204 | applyo(mt.observed, y_args_lv, y_obs_rv),
205 | eq(y_args_lv, (y_lv, Y_lv)),
206 | normal_normal_regression(Y_lv, X_lv, beta_lv, y_args_tail_lv, b_args_tail_lv),
207 | )
208 |
209 | # TODO FIXME: This would work if non-op parameters (e.g. names) were covered by
210 | # `operator`/`car`. See `TheanoMetaOperator`.
211 | assert res[0].eval_obj.obj == beta_rv
212 | assert res[0] == etuplize(beta_rv)
213 | assert res[1] == etuplize(Y_rv)[2:]
214 | assert res[2] == etuplize(beta_rv)[1:]
215 |
216 | #
217 | # Use the relation with to produce `Y` from given `X` and `beta`.
218 | #
219 | X_new_mt = mt(tt.eye(N, M))
220 | beta_new_mt = mt(NormalRV(0, 1, size=M))
221 | Y_args_cdr_mt = etuplize(Y_rv)[2:]
222 | Y_lv = var()
223 | (res,) = run(1, Y_lv, normal_normal_regression(Y_lv, X_new_mt, beta_new_mt, Y_args_cdr_mt))
224 | Y_out_mt = res.eval_obj
225 |
226 | Y_new_mt = etuple(mt.NormalRV, mt.dot(X_new_mt, beta_new_mt)) + Y_args_cdr_mt
227 | Y_new_mt = Y_new_mt.eval_obj
228 |
229 | assert Y_out_mt == Y_new_mt
230 |
231 |
232 | @pytest.mark.xfail(strict=True)
233 | def test_normal_qr_transform():
234 | np.random.seed(9283)
235 |
236 | N = 10
237 | M = 3
238 | X_tt = tt.matrix("X")
239 | X = np.random.normal(10, 1, size=N)
240 | X = np.c_[np.ones(10), X, X * X]
241 | X_tt.tag.test_value = X
242 |
243 | V_tt = tt.vector("V")
244 | V_tt.tag.test_value = np.ones(N)
245 |
246 | a_tt = tt.vector("a")
247 | R_tt = tt.vector("R")
248 | a_tt.tag.test_value = np.random.normal(size=M)
249 | R_tt.tag.test_value = np.abs(np.random.normal(size=M))
250 |
251 | beta_rv = NormalRV(a_tt, R_tt, name="\\beta")
252 |
253 | E_y_rv = X_tt.dot(beta_rv)
254 | E_y_rv.name = "E_y"
255 | Y_rv = NormalRV(E_y_rv, V_tt, name="Y")
256 |
257 | y_tt = tt.as_tensor_variable(Y_rv.tag.test_value)
258 | y_tt.name = "y"
259 | y_obs_rv = observed(y_tt, Y_rv)
260 | y_obs_rv.name = "y_obs"
261 |
262 | (res,) = run(1, var("q"), normal_qr_transform(y_obs_rv, var("q")))
263 |
264 | new_node = {eval_and_reify_meta(k): eval_and_reify_meta(v) for k, v in res}
265 |
266 | # Make sure the old-to-new `beta` conversion is correct.
267 | t_Q, t_R = np.linalg.qr(X)
268 | Coef_new_value = np.linalg.inv(t_R)
269 | np.testing.assert_array_almost_equal(
270 | Coef_new_value, new_node[beta_rv].owner.inputs[0].tag.test_value
271 | )
272 |
273 | # Make sure the new `beta_tilde` has the right standard normal distribution
274 | # parameters.
275 | beta_tilde_node = new_node[beta_rv].owner.inputs[1]
276 | np.testing.assert_array_almost_equal(
277 | np.r_[0.0, 0.0, 0.0], beta_tilde_node.owner.inputs[0].tag.test_value
278 | )
279 | np.testing.assert_array_almost_equal(
280 | np.r_[1.0, 1.0, 1.0], beta_tilde_node.owner.inputs[1].tag.test_value
281 | )
282 |
283 | Y_new = new_node[y_obs_rv].owner.inputs[1]
284 | assert Y_new.owner.inputs[0].owner.inputs[1] == beta_tilde_node
285 |
286 | np.testing.assert_array_almost_equal(t_Q, Y_new.owner.inputs[0].owner.inputs[0].tag.test_value)
287 |
288 |
289 | def test_basic_scan_transform():
290 | def f_pow2(x_tm1):
291 | return 2 * x_tm1
292 |
293 | state = theano.tensor.scalar("state")
294 | n_steps = theano.tensor.iscalar("nsteps")
295 | output, updates = theano.scan(
296 | f_pow2, [], state, [], n_steps=n_steps, truncate_gradient=-1, go_backwards=False
297 | )
298 |
299 | assert np.array_equal(output.eval({state: 1.0, n_steps: 4}), np.r_[2.0, 4.0, 8.0, 16.0])
300 |
301 | def mul_trans(in_expr, out_expr):
302 | """Equate `2 * x` with `5 * x` in a Theano `scan`.
303 |
304 | I.e. from left-to-right, replace `2 * x[t-1]` with `5 * x[t-1]`.
305 | """
306 | arg_lv = var()
307 | inputs_lv, info_lv = var(), var()
308 | in_scan_lv = mt.Scan(inputs_lv, [mt.mul(2, arg_lv)], info_lv)
309 | out_scan_lv = mt.Scan(inputs_lv, [mt.mul(5, arg_lv)], info_lv)
310 |
311 | return lall(eq(in_expr, in_scan_lv), eq(out_expr, out_scan_lv))
312 |
313 | q_lv = var()
314 | (output_mt,) = run(1, q_lv, walko(partial(reduceo, mul_trans), output, q_lv))
315 |
316 | output_new = output_mt.eval_obj.reify()
317 |
318 | assert output_new != output
319 |
320 | assert np.array_equal(output_new.eval({state: 1.0, n_steps: 4}), np.r_[5.0, 25.0, 125.0, 625.0])
321 |
--------------------------------------------------------------------------------