├── .github
└── workflows
│ └── ci-build.yml
├── .gitignore
├── .readthedocs.yml
├── .style.yapf
├── .travis.yml
├── AUTHORS
├── CONTRIBUTING.md
├── LICENSE
├── MANIFEST.in
├── README.md
├── colabs
└── Tensor_Networks_in_Neural_Networks.ipynb
├── conftest.py
├── dev_tools
└── Dockerfile
├── docs
├── Makefile
├── _static
│ ├── basic_mps
│ │ ├── mps_basic_1.png
│ │ ├── mps_basic_2.png
│ │ ├── mps_basic_4.png
│ │ ├── tensor_1.png
│ │ ├── tensor_2.png
│ │ ├── tensor_3.png
│ │ └── tensor_4.png
│ ├── blocktensor.png
│ ├── contractors.png
│ ├── dangling_contract.png
│ ├── replicate_nodes_subgraph.png
│ ├── subgraph_contract.png
│ ├── svd.png
│ └── tensornetwork_logo.jpg
├── basic_mps.rst
├── block_sparse.rst
├── block_sparse_tutorial.rst
├── conf.py
├── contractors.rst
├── contributing.rst
├── copy_contract.rst
├── edges.rst
├── index.rst
├── make.bat
├── mps.rst
├── ncon.rst
├── network.rst
├── node_spliting.rst
├── nodes.rst
├── quantum_circuit.rst
├── requirements_docs.txt
├── stubs
│ ├── tensornetwork.BaseCharge.rst
│ ├── tensornetwork.BaseNode.rst
│ ├── tensornetwork.BlockSparseTensor.rst
│ ├── tensornetwork.ChargeArray.rst
│ ├── tensornetwork.CopyNode.rst
│ ├── tensornetwork.Edge.rst
│ ├── tensornetwork.FiniteMPS.rst
│ ├── tensornetwork.Index.rst
│ ├── tensornetwork.InfiniteMPS.rst
│ ├── tensornetwork.Node.rst
│ ├── tensornetwork.NodeCollection.rst
│ ├── tensornetwork.U1Charge.rst
│ ├── tensornetwork.Z2Charge.rst
│ ├── tensornetwork.ZNCharge.rst
│ ├── tensornetwork.contractors.auto.rst
│ ├── tensornetwork.contractors.branch.rst
│ ├── tensornetwork.contractors.bucket.rst
│ ├── tensornetwork.contractors.custom.rst
│ ├── tensornetwork.contractors.greedy.rst
│ ├── tensornetwork.contractors.optimal.rst
│ ├── tensornetwork.tn_keras.layers.Conv2DMPO.rst
│ ├── tensornetwork.tn_keras.layers.DenseCondenser.rst
│ ├── tensornetwork.tn_keras.layers.DenseDecomp.rst
│ ├── tensornetwork.tn_keras.layers.DenseEntangler.rst
│ ├── tensornetwork.tn_keras.layers.DenseExpander.rst
│ └── tensornetwork.tn_keras.layers.DenseMPO.rst
├── tn_keras.rst
└── tutorial.rst
├── examples
├── __init__.py
├── custom_path_solvers
│ └── example.py
├── dmrg
│ └── symmetric_dmrg.py
├── fft
│ ├── __init__.py
│ ├── fft.py
│ └── fft_test.py
├── sat
│ ├── SATTutorial.ipynb
│ ├── __init__.py
│ ├── sat_tensornetwork.py
│ └── sat_tensornetwork_test.py
├── simple_mera
│ ├── simple_mera.py
│ └── simple_mera_test.py
└── wavefunctions
│ ├── evolution_example.py
│ ├── trotter.py
│ ├── wavefunctions.py
│ └── wavefunctions_test.py
├── pylintrc
├── requirements.txt
├── requirements_travis.txt
├── setup.py
└── tensornetwork
├── __init__.py
├── backend_contextmanager.py
├── backends
├── __init__.py
├── abstract_backend.py
├── backend_factory.py
├── backend_test.py
├── decorators.py
├── decorators_test.py
├── jax
│ ├── __init__.py
│ ├── jax_backend.py
│ ├── jax_backend_test.py
│ ├── jitted_functions.py
│ └── jitted_functions_test.py
├── numpy
│ ├── __init__.py
│ ├── decompositions.py
│ ├── decompositions_test.py
│ ├── numpy_backend.py
│ └── numpy_backend_test.py
├── pytorch
│ ├── __init__.py
│ ├── decompositions.py
│ ├── decompositions_test.py
│ ├── pytorch_backend.py
│ ├── pytorch_backend_test.py
│ └── pytorch_tensornetwork_test.py
├── symmetric
│ ├── __init__.py
│ ├── decompositions.py
│ ├── decompositions_test.py
│ ├── symmetric_backend.py
│ └── symmetric_backend_test.py
└── tensorflow
│ ├── __init__.py
│ ├── decompositions.py
│ ├── decompositions_test.py
│ ├── tensordot2.py
│ ├── tensordot2_test.py
│ ├── tensorflow_backend.py
│ ├── tensorflow_backend_test.py
│ └── tensorflow_tensornetwork_test.py
├── block_sparse
├── __init__.py
├── blocksparse_utils.py
├── blocksparse_utils_test.py
├── blocksparsetensor.py
├── blocksparsetensor_test.py
├── caching.py
├── caching_test.py
├── charge.py
├── charge_test.py
├── index.py
├── index_test.py
├── initialization.py
├── initialization_test.py
├── linalg.py
├── linalg_test.py
├── sizetypes.py
├── tensordot_test.py
├── utils.py
└── utils_test.py
├── component_factory.py
├── config.py
├── contractors
├── __init__.py
├── bucket_contractor.py
├── bucket_contractor_test.py
├── custom_path_solvers
│ ├── __init__.py
│ ├── nconinterface.py
│ ├── nconinterface_test.py
│ ├── pathsolvers.py
│ └── pathsolvers_test.py
└── opt_einsum_paths
│ ├── __init__.py
│ ├── path_calculation_test.py
│ ├── path_contractors.py
│ ├── path_contractors_node_test.py
│ └── utils.py
├── linalg
├── __init__.py
├── initialization.py
├── krylov.py
├── linalg.py
├── node_linalg.py
├── operations.py
└── tests
│ ├── initialization_test.py
│ ├── node_linalg_test.py
│ ├── test_krylov.py
│ ├── test_linalg.py
│ └── test_operations.py
├── matrixproductstates
├── __init__.py
├── base_mps.py
├── base_mps_test.py
├── dmrg.py
├── dmrg_test.py
├── finite_mps.py
├── finite_mps_test.py
├── infinite_mps.py
├── infinite_mps_test.py
├── mpo.py
└── mpo_test.py
├── ncon_interface.py
├── network_components.py
├── network_operations.py
├── ops.py
├── quantum
├── quantum.py
└── quantum_test.py
├── tensor.py
├── tests
├── __init__.py
├── backend_contextmanager_test.py
├── ncon_interface_test.py
├── network_components_free_symmetric_test.py
├── network_components_free_test.py
├── network_operations_symmetric_test.py
├── network_operations_test.py
├── network_test.py
├── serialize_test.py
├── split_node_symmetric_test.py
├── split_node_test.py
├── tensor_test.py
├── tensornetwork_symmetric_test.py
├── tensornetwork_test.py
└── testing_utils.py
├── tn_keras
├── README.md
├── __init__.py
├── colabs
│ └── TN_Keras.ipynb
├── condenser.py
├── conv2d_mpo.py
├── dense.py
├── entangler.py
├── expander.py
├── images
│ ├── condenser.png
│ ├── decomp.png
│ ├── entangler.png
│ ├── expander.png
│ ├── mpo.png
│ └── staircase_entangler.png
├── layers.py
├── mpo.py
├── test_conv_layer.py
├── test_layer.py
└── test_mpo.py
├── utils.py
├── version.py
└── visualization
├── __init__.py
├── graphviz.py
└── graphviz_test.py
/.github/workflows/ci-build.yml:
--------------------------------------------------------------------------------
1 | name: Continuous Integration
2 |
3 | on:
4 | push:
5 | branches: [ master ]
6 | pull_request:
7 | branches: [ master ]
8 |
9 | jobs:
10 | build:
11 |
12 | runs-on: ubuntu-latest
13 | strategy:
14 | matrix:
15 | python-version: ['3.7', '3.8']
16 |
17 | steps:
18 | - uses: actions/checkout@v2
19 | - name: Set up Python ${{ matrix.python-version }}
20 | uses: actions/setup-python@v2
21 | with:
22 | python-version: ${{ matrix.python-version }}
23 | - name: Install dependencies
24 | run: |
25 | python -m pip install --upgrade pip
26 | pip install -r requirements.txt
27 | pip install -r requirements_travis.txt
28 | pip install pylint pytest-cov codecov
29 | - name: Linting with pylint
30 | run: |
31 | pylint tensornetwork
32 | - name: Test with pytest
33 | run: |
34 | pytest --cov=./
35 | - name: Uploading coverage report
36 | run: |
37 | codecov
38 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 |
5 | # C extensions
6 | *.so
7 |
8 | # Distribution / packaging
9 | bin/
10 | build/
11 | develop-eggs/
12 | dist/
13 | eggs/
14 | lib/
15 | lib64/
16 | parts/
17 | sdist/
18 | var/
19 | .pytype/
20 | .pytest_cache/
21 | *.egg-info/
22 | .installed.cfg
23 | *.egg
24 | docs/clean
25 |
26 | # Installer logs
27 | pip-log.txt
28 | pip-delete-this-directory.txt
29 |
30 | # Unit test / coverage reports
31 | .tox/
32 | .coverage
33 | .cache
34 | nosetests.xml
35 | coverage.xml
36 |
37 | # Max OS
38 | .DS_Store
39 |
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | # Read the Docs configuration file
2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
3 |
4 | # Required
5 | version: 2
6 |
7 | # Build documentation in the docs/ directory with Sphinx
8 | sphinx:
9 | configuration: docs/conf.py
10 |
11 | # Optionally set the version of Python and requirements required to build your docs
12 | python:
13 | version: 3.7
14 | install:
15 | - requirements: requirements.txt
16 | - requirements: docs/requirements_docs.txt
--------------------------------------------------------------------------------
/.style.yapf:
--------------------------------------------------------------------------------
1 | [style]
2 | # TensorNetwork uses the yapf style
3 | based_on_style = yapf
4 | indent_width = 2
5 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | dist: xenial
2 | language: python
3 | cache:
4 | directories:
5 | - $HOME/build/google/TensorNetwork/.pytype
6 | - $HOME/.cache/pip
7 | python:
8 | - "3.6"
9 | - "3.7"
10 | - "3.8"
11 | # command to install dependencies
12 | install:
13 | - pip install -r requirements.txt
14 | - pip install -r requirements_travis.txt
15 | - pip install pylint pytest-cov codecov
16 | # command to run tests
17 | script:
18 | - pylint ./tensornetwork/
19 | - pytest --cov=./
20 | - python3 setup.py build
21 |
22 | after_success:
23 | - codecov
24 |
--------------------------------------------------------------------------------
/AUTHORS:
--------------------------------------------------------------------------------
1 | # This is the list of TensorNetwork authors for copyright purposes.
2 | #
3 | # This does not necessarily list everyone who has contributed code, since in
4 | # some cases, their employer may be the copyright holder. To see the full list
5 | # of contributors, see the revision history in source control.
6 |
7 | Google LLC
8 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # How to Contribute
2 |
3 | We'd love to accept your patches and contributions to this project. There are
4 | just a few small guidelines you need to follow.
5 |
6 | ## Contributor License Agreement
7 |
8 | Contributions to this project must be accompanied by a Contributor License
9 | Agreement. You (or your employer) retain the copyright to your contribution;
10 | this simply gives us permission to use and redistribute your contributions as
11 | part of the project. Head over to to see
12 | your current agreements on file or to sign a new one.
13 |
14 | You generally only need to submit a CLA once, so if you've already submitted one
15 | (even if it was for a different project), you probably don't need to do it
16 | again.
17 |
18 | ## Code reviews
19 |
20 | All submissions, including submissions by project members, require review. We
21 | use GitHub pull requests for this purpose. Consult
22 | [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
23 | information on using pull requests.
24 |
25 | ## Community Guidelines
26 |
27 | This project follows [Google's Open Source Community
28 | Guidelines](https://opensource.google.com/conduct/).
29 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include requirements.txt
2 | recursive-exclude examples *
3 | recursive-exclude experiments *
4 | global-exclude */*_test.py
5 | global-exclude tensornetwork/tests/*
6 | prune tests
7 |
--------------------------------------------------------------------------------
/conftest.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import pytest
16 | import jax
17 | import tensornetwork
18 | import tensorflow as tf
19 |
20 |
21 | @pytest.fixture(
22 | name="backend", params=["numpy", "tensorflow", "jax", "pytorch"])
23 | def backend_fixture(request):
24 | return request.param
25 |
26 |
27 | @pytest.fixture(autouse=True)
28 | def reset_default_backend():
29 | tensornetwork.set_default_backend("numpy")
30 | yield
31 | tensornetwork.set_default_backend("numpy")
32 |
33 |
34 | @pytest.fixture(autouse=True)
35 | def enable_jax_64():
36 | jax.config.update("jax_enable_x64", True)
37 | yield
38 | jax.config.update("jax_enable_x64", True)
39 |
40 |
41 | @pytest.fixture(autouse=True)
42 | def tf_enable_v2_behaviour():
43 | tf.compat.v1.enable_v2_behavior()
44 | yield
45 | tf.compat.v1.enable_v2_behavior()
46 |
--------------------------------------------------------------------------------
/dev_tools/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu
2 |
3 | # Install basic tools.
4 | RUN DEBIAN_FRONTEND=noninteractive apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
5 | python3-pip python3-tk git emacs vim locales
6 |
7 | # Configure UTF-8 encoding.
8 | RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && locale-gen
9 | ENV LANG en_US.UTF-8
10 | ENV LANGUAGE en_US:en
11 | ENV LC_ALL en_US.UTF-8
12 |
13 | # Make python3 default
14 | RUN rm -f /usr/bin/python && ln -s /usr/bin/python3 /usr/bin/python
15 |
16 | # Install Tensor Network with the needed Python libraries.
17 | RUN pip3 install tensornetwork
18 |
19 | # Clone the repository so development work can be done in the container.
20 | RUN git clone https://github.com/google/TensorNetwork
21 |
22 | WORKDIR /TensorNetwork/examples
23 |
24 | EXPOSE 8888
25 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/_static/basic_mps/mps_basic_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/docs/_static/basic_mps/mps_basic_1.png
--------------------------------------------------------------------------------
/docs/_static/basic_mps/mps_basic_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/docs/_static/basic_mps/mps_basic_2.png
--------------------------------------------------------------------------------
/docs/_static/basic_mps/mps_basic_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/docs/_static/basic_mps/mps_basic_4.png
--------------------------------------------------------------------------------
/docs/_static/basic_mps/tensor_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/docs/_static/basic_mps/tensor_1.png
--------------------------------------------------------------------------------
/docs/_static/basic_mps/tensor_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/docs/_static/basic_mps/tensor_2.png
--------------------------------------------------------------------------------
/docs/_static/basic_mps/tensor_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/docs/_static/basic_mps/tensor_3.png
--------------------------------------------------------------------------------
/docs/_static/basic_mps/tensor_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/docs/_static/basic_mps/tensor_4.png
--------------------------------------------------------------------------------
/docs/_static/blocktensor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/docs/_static/blocktensor.png
--------------------------------------------------------------------------------
/docs/_static/contractors.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/docs/_static/contractors.png
--------------------------------------------------------------------------------
/docs/_static/dangling_contract.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/docs/_static/dangling_contract.png
--------------------------------------------------------------------------------
/docs/_static/replicate_nodes_subgraph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/docs/_static/replicate_nodes_subgraph.png
--------------------------------------------------------------------------------
/docs/_static/subgraph_contract.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/docs/_static/subgraph_contract.png
--------------------------------------------------------------------------------
/docs/_static/svd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/docs/_static/svd.png
--------------------------------------------------------------------------------
/docs/_static/tensornetwork_logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/docs/_static/tensornetwork_logo.jpg
--------------------------------------------------------------------------------
/docs/block_sparse.rst:
--------------------------------------------------------------------------------
1 | Block Sparse
2 | ---------------------
3 |
4 | .. autosummary::
5 | :toctree: stubs
6 |
7 | tensornetwork.BlockSparseTensor
8 | tensornetwork.ChargeArray
9 | tensornetwork.Index
10 | tensornetwork.U1Charge
11 | tensornetwork.BaseCharge
12 | tensornetwork.Z2Charge
13 | tensornetwork.ZNCharge
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # http://www.sphinx-doc.org/en/master/config
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | import os
14 | import sys
15 | sys.path.insert(0, os.path.abspath('../'))
16 |
17 | # -- Project information -----------------------------------------------------
18 |
19 | project = 'TensorNetwork'
20 | copyright = '2019, The TensorNetwork Authors'
21 | author = 'The TensorNetwork Authors'
22 |
23 | # -- General configuration ---------------------------------------------------
24 |
25 | # Add any Sphinx extension module names here, as strings. They can be
26 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
27 | # ones.
28 | extensions = [
29 | 'sphinx.ext.autodoc',
30 | 'sphinx.ext.napoleon',
31 | 'sphinx.ext.autosummary',
32 | ]
33 |
34 | # Add any paths that contain templates here, relative to this directory.
35 | templates_path = ['_templates']
36 |
37 | # The master toctree document.
38 | master_doc = 'index'
39 |
40 | # List of patterns, relative to source directory, that match files and
41 | # directories to ignore when looking for source files.
42 | # This pattern also affects html_static_path and html_extra_path.
43 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
44 |
45 | autosummary_generate = True
46 | autosummary_generate_overwrite = False
47 | napolean_use_rtype = False
48 | # -- Options for HTML output -------------------------------------------------
49 |
50 | # The theme to use for HTML and HTML Help pages. See the documentation for
51 | # a list of builtin themes.
52 | #
53 | html_theme = 'sphinx_rtd_theme'
54 | html_theme_options = {
55 | 'logo_only': True,
56 | }
57 | html_logo = '_static/tensornetwork_logo.jpg'
58 | master_doc = 'index'
59 | default_role = 'py:obj'
60 | autodoc_default_flags = ['members']
61 | autosummary_generate = True
62 |
--------------------------------------------------------------------------------
/docs/contractors.rst:
--------------------------------------------------------------------------------
1 | Contractors
2 | ------------------
3 |
4 | .. autosummary::
5 | :toctree: stubs
6 |
7 | tensornetwork.contractors.greedy
8 | tensornetwork.contractors.bucket
9 | tensornetwork.contractors.branch
10 | tensornetwork.contractors.optimal
11 | tensornetwork.contractors.auto
12 | tensornetwork.contractors.custom
13 |
--------------------------------------------------------------------------------
/docs/contributing.rst:
--------------------------------------------------------------------------------
1 | How to Contribute
2 | =================
3 |
4 | We'd love to accept your patches and contributions to this project.
5 | There are just a few small guidelines you need to follow.
6 |
7 | Contributor License Agreement
8 | -----------------------------
9 |
10 | Contributions to this project must be accompanied by a Contributor
11 | License Agreement. You (or your employer) retain the copyright to your
12 | contribution; this simply gives us permission to use and redistribute
13 | your contributions as part of the project. Head over to
14 | https://cla.developers.google.com/ to see your current agreements on
15 | file or to sign a new one.
16 |
17 | You generally only need to submit a CLA once, so if you've already
18 | submitted one (even if it was for a different project), you probably
19 | don't need to do it again.
20 |
21 | Code reviews
22 | ------------
23 |
24 | All submissions, including submissions by project members, require
25 | review. We use GitHub pull requests for this purpose. Consult `GitHub
26 | Help `__ for more
27 | information on using pull requests.
28 |
29 | Community Guidelines
30 | --------------------
31 |
32 | This project follows `Google's Open Source Community
33 | Guidelines `__.
34 |
--------------------------------------------------------------------------------
/docs/copy_contract.rst:
--------------------------------------------------------------------------------
1 | Copying and Contracting
2 | ========================
3 |
4 | Automated contraction
5 | ---------------------
6 | Finding good contracting paths for arbitrary tensor networks can be challenging. In fact, finding the optimal contration path is proven to be NP-Hard. While we can't crack NP hard problems, we can get decent results for small sized systems.
7 |
8 | Using a contraction algorithm is very easy.
9 |
10 | .. code-block:: python3
11 |
12 | result = tn.contractors.auto([a, b, c, d, e, f])
13 |
14 | .. figure:: _static/contractors.png
15 |
16 | We have several contraction algorithms avaliable as of April 2020.
17 |
18 | - `optimal`: Find the true optimal path via brute force. It can be extremly slow for more than ~10 nodes.
19 | - `greedy`: Continuously do the cheapest contraction possible. Works well as a default for networks with many nodes.
20 | - `branch`: Brute search, but only check the top `n` possiblities per step.
21 | - `auto`: Automatically decide which of the above 3 algorithms to use.
22 |
23 | When contracting a network with more than one dangling leg, you must specify the output order of the dangling legs.
24 |
25 | .. code-block:: python3
26 |
27 | result = tn.contractors.auto(
28 | [a, b, c, d, e, f]
29 | output_edge_order=[x, y, z]
30 | )
31 |
32 | .. figure:: _static/dangling_contract.png
33 |
34 | If you do not care about the final output order (for instance, if you are only doing a partial network contraction and the intermidiate order doesn't matter), then you can set `ignore_edge_order=True` and you won't need to supply an `output_edge_order`.
35 |
36 | Contracting subgraph
37 | ---------------------
38 | There are many instances when you want to contract only a subset of your network. Perhaps you know good intermidiate states, but not how to get there efficiently. You can still very easily get a good contraction order by using the subnetwork contraction feature of the `contractors`.
39 |
40 | .. code-block:: python3
41 |
42 | # Say all of these nodes are connected somehow.
43 | a, b, c, d, e, f = build_your_network(...)
44 | tmp1 = tn.contractors.optimal([a, b, c], ignore_edge_order=True)
45 | tmp2 = tn.contractors.optimal([d, e, f], ignore_edge_order=True)
46 | result = tmp1 @ tmp2
47 |
48 |
49 | .. figure:: _static/subgraph_contract.png
50 |
51 | Reusing networks
52 | ------------------
53 | When building tensor networks, it's very common to want to use a single tensornetwork for many purposes. For example, a user may want to use an MPS to calculate an inner product with some product state as well as its partition function. `Nodes` within `tensornetwork` are mutable, so to presever the original MPS connectivity, you'd need to copy the nodes before contraction.
54 |
55 | .. code-block:: python3
56 |
57 | # Calcualte the inner product of two MPS/Product state networks.
58 | def inner_product(x: List[tn.Node], y: List[tn.Node]) -> tn.Node:
59 | for a, b in zip(x, y)
60 | # Assume all of the dangling edges are mapped to the name "dangling"
61 | tn.connect(a["dangling"], b["dangling"])
62 | return tn.contractors.greedy(x + y)
63 |
64 | # Build your original MPS
65 | mps_nodes = build_your_mps(...)
66 |
67 | # Calculate the inner product with a product_state
68 | product_state_nodes = build_your_product_state(...)
69 | mps_copy = tn.replicate_nodes(mps_nodes)
70 | result1 = inner_product(mps_copy, product_state_nodes)
71 |
72 | # Calculate the partition function.
73 | mps_copy = tn.replicate_nodes(mps_nodes)
74 | mps_conj_copy = tn.replicate_nodes(mps_nodes, conjugate=True)
75 | result2 = inner_product(mps_copy, mps_conj_copy)
76 |
77 | If you need more advance access to your copied network, `tn.copy` will return two dictionaries mapping your original nodes and edges to the newly copied versions.
78 |
79 |
80 |
81 | Copying subnetwork
82 | ------------------
83 |
84 | When copying a subgraph of a network, all of the connections between nodes are preserved in the copied network. Any standarded edges between two nodes where one is copied and isn't, that edge becomes a dangling edge on the same axis of the copied node.
85 |
86 | .. code-block:: python3
87 |
88 | # Say all of these nodes are connected somehow.
89 | a, b, c, d, e, f = build_your_network(...)
90 | new_a, new_b, new_c = tn.replicate_nodes([a, b, c])
91 |
92 | .. figure:: _static/replicate_nodes_subgraph.png
--------------------------------------------------------------------------------
/docs/edges.rst:
--------------------------------------------------------------------------------
1 | Edges
2 | ------------------
3 |
4 | .. autosummary::
5 | :toctree: stubs
6 |
7 | tensornetwork.Edge
8 |
9 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | TensorNetwork reference documentation
2 | =========================================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 | :caption: Tutorials
7 |
8 | tutorial
9 | copy_contract
10 | quantum_circuit
11 | block_sparse_tutorial
12 | node_spliting
13 | basic_mps
14 |
15 | .. toctree::
16 | :maxdepth: 1
17 | :caption: API Reference
18 |
19 | contractors
20 | nodes
21 | edges
22 | ncon
23 | mps
24 | network
25 | block_sparse
26 | tn_keras
27 |
28 | .. toctree::
29 | :maxdepth: 1
30 | :caption: Developer Documentation
31 |
32 | contributing
33 |
34 | Indices and tables
35 | ==================
36 |
37 | * :ref:`genindex`
38 | * :ref:`search`
39 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/mps.rst:
--------------------------------------------------------------------------------
1 | Matrix Product States
2 | ---------------------
3 |
4 | .. autosummary::
5 | :toctree: stubs
6 |
7 | tensornetwork.FiniteMPS
8 | tensornetwork.InfiniteMPS
9 |
10 |
--------------------------------------------------------------------------------
/docs/ncon.rst:
--------------------------------------------------------------------------------
1 | Ncon
2 | ------------------
3 |
4 | .. autofunction:: tensornetwork.ncon
5 |
--------------------------------------------------------------------------------
/docs/network.rst:
--------------------------------------------------------------------------------
1 | Common Functions
2 | ------------------
3 |
4 | .. autofunction:: tensornetwork.norm
5 | .. autofunction:: tensornetwork.conj
6 | .. autofunction:: tensornetwork.copy
7 | .. autofunction:: tensornetwork.transpose
8 | .. autofunction:: tensornetwork.remove_node
9 | .. autofunction:: tensornetwork.split_node
10 | .. autofunction:: tensornetwork.split_node_qr
11 | .. autofunction:: tensornetwork.split_node_rq
12 | .. autofunction:: tensornetwork.split_node_full_svd
13 | .. autofunction:: tensornetwork.split_edge
14 | .. autofunction:: tensornetwork.slice_edge
15 | .. autofunction:: tensornetwork.reachable
16 | .. autofunction:: tensornetwork.check_connected
17 | .. autofunction:: tensornetwork.check_correct
18 | .. autofunction:: tensornetwork.get_all_nodes
19 | .. autofunction:: tensornetwork.get_all_edges
20 | .. autofunction:: tensornetwork.get_subgraph_dangling
21 | .. autofunction:: tensornetwork.reduced_density
22 | .. autofunction:: tensornetwork.switch_backend
23 | .. autofunction:: tensornetwork.contract_trace_edges
24 | .. autofunction:: tensornetwork.contract
25 | .. autofunction:: tensornetwork.contract_copy_node
26 | .. autofunction:: tensornetwork.contract_between
27 | .. autofunction:: tensornetwork.outer_product
28 | .. autofunction:: tensornetwork.outer_product_final_nodes
29 | .. autofunction:: tensornetwork.contract_parallel
30 | .. autofunction:: tensornetwork.flatten_edges
31 | .. autofunction:: tensornetwork.get_all_nondangling
32 | .. autofunction:: tensornetwork.get_all_dangling
33 | .. autofunction:: tensornetwork.flatten_all_edges
34 | .. autofunction:: tensornetwork.flatten_edges_between
35 | .. autofunction:: tensornetwork.get_parallel_edges
36 | .. autofunction:: tensornetwork.get_shared_edges
37 | .. autofunction:: tensornetwork.get_neighbors
38 | .. autofunction:: tensornetwork.kron
--------------------------------------------------------------------------------
/docs/node_spliting.rst:
--------------------------------------------------------------------------------
1 | Tensor Decompositions and Node Spliting
2 | =======================================
3 |
4 |
5 | Singular Value Decomposition (SVD)
6 | ----------------------------------
7 | While SVD is really cool, describing SVD is out of scope of this tutorial. Instead, we will describe how SVD is used in TensorNetwork.
8 |
9 | SVD and other decompositions like QR are used to "split a node". That is, it takes a single node (of arbitrary order!) and splits it into several new nodes. The main method for this is `tn.split_node`
10 |
11 | .. code-block:: python3
12 |
13 | u_prime, vh_prime, _ = tn.split_node(m, left_edges=[m[0]], right_edges=[m[1]])
14 |
15 | Split node works by taking the SVD of the matrix :math:`M` and then spliting the :math:`S` node
16 | via a single square root operation. The two :math:`\sqrt{S}` nodes are then merged into :math:`U` and :math:`V*` respectively. The final two nodes are returned. The last axis of `u` and the first axis of `v` are connected together.
17 |
18 | .. figure:: _static/svd.png
19 |
20 | Compression
21 | -----------
22 | One of the main advantages of using SVD is it allows you to have a
23 | very nice mathematical way of compressing tensors. That is, your can preseve the maximum L2 norm
24 | by dropping the lower singular values during compression. You can achieve this with either the `max_singluar_values` or `max_truncation_error` attributes.
25 |
26 | .. code-block:: python3
27 |
28 | u_prime, vh_prime, truncation_error = tn.split_node(..., max_singular_values=10)
29 | # Truncation error is a tensor (not a Node!) of the dropped singular values.
30 | print(truncation_error) # Example: array([2.7, 1.31, 0.13, ...])
31 |
32 |
33 | Similar methods include:
34 |
35 | - `split_node_full_svd`
36 | - `split_node_qr`
37 | - `split_node_rq`
--------------------------------------------------------------------------------
/docs/nodes.rst:
--------------------------------------------------------------------------------
1 | Nodes
2 | ------------------
3 |
4 | .. autosummary::
5 | :toctree: stubs
6 |
7 | tensornetwork.Node
8 | tensornetwork.CopyNode
9 | tensornetwork.AbstractNode
10 | tensornetwork.NodeCollection
11 |
12 |
--------------------------------------------------------------------------------
/docs/quantum_circuit.rst:
--------------------------------------------------------------------------------
1 | Simulating a Quantum Circuit
2 | ============================
3 | Quantum circuit simulations is one of the main use cases for tensors networks. This is because all quantum circuit diagrams exactly map to tensor networks! After the tensor network is fully contracted, the resulting tensor will be equal to the wavefunction of the quantum computer just before measurement.
4 |
5 | Gates and States
6 | ----------------
7 |
8 | To get started, lets create a `|0>` state by using 1 node.
9 |
10 | .. code-block:: python3
11 |
12 | import tensornetwork as tn
13 | import numpy as np
14 |
15 | state = tn.Node(np.array([1.0 + 0.0j, 0.0 + 0.0j]))
16 |
17 | One of the important things to realize is that qubits aren't actually represented by :code:`Nodes`,
18 | they are represented by :code:`Edges`!
19 |
20 | .. code-block:: python3
21 |
22 | qubit = state[0]
23 |
24 | Applying a gate to this qubit is then the same as connecting the dangling edge representing
25 | the qubit to a node representing the gate.
26 |
27 | .. code-block:: python3
28 |
29 | # This node represents the Hadamard gate we wish to perform
30 | # on this qubit.
31 | hadamard = tn.Node(np.array([[1, 1], [1, -1]])) / np.sqrt(2)
32 | tn.connect(qubit, hadamard[0]) # Equal to qubit ^ hadamard[0]
33 | # The "output edge" of the operation represents the qubit after
34 | # applying the operation.
35 | qubit = hadamard[1]
36 | # Contraction is how you actually "apply" the gate.
37 | state = state @ hadamard
38 | print(state.tensor) # array([0.707+0.j, 0.707+0.j])
39 |
40 |
41 |
42 | Multiple Qubits
43 | ----------------
44 | Multiple qubits is the same story, just instead of starting with a single node for the state, we can start with a product state instead.
45 |
46 | Here, we create an initial `|00>` state and evolve to a `|00> + |11>` bell state.
47 |
48 | .. code-block:: python3
49 |
50 | def apply_gate(qubit_edges, gate, operating_qubits):
51 | op = tn.Node(gate)
52 | for i, bit in enumerate(operating_qubits):
53 | tn.connect(qubit_edges[bit], op[i])
54 | qubit_edges[bit] = op[i + len(operating_qubits)]
55 |
56 | # These are just numpy arrays of the operators.
57 | H = np.array([[1, 1], [1, -1]], dtype=complex) / np.sqrt(2)
58 | CNOT = np.zeros((2, 2, 2, 2), dtype=complex)
59 | CNOT[0][0][0][0] = 1
60 | CNOT[0][1][0][1] = 1
61 | CNOT[1][0][1][1] = 1
62 | CNOT[1][1][1][0] = 1
63 | all_nodes = []
64 | # NodeCollection allows us to store all of the nodes created under this context.
65 | with tn.NodeCollection(all_nodes):
66 | state_nodes = [
67 | tn.Node(np.array([1.0 + 0.0j, 0.0 + 0.0j],)) for _ in range(2)
68 | ]
69 | qubits = [node[0] for node in state_nodes]
70 | apply_gate(qubits, H, [0])
71 | apply_gate(qubits, CNOT, [0, 1])
72 | # We can contract the entire tensornetwork easily with a contractor algorithm
73 | result = tn.contractors.optimal(
74 | all_nodes, output_edge_order=qubits)
75 | print(result.tensor) # array([0.707+0.j, 0.0+0.j], [0.0+0.j, 0.707+0.j])
76 |
77 | --
78 |
--------------------------------------------------------------------------------
/docs/requirements_docs.txt:
--------------------------------------------------------------------------------
1 | tensorflow>=2.0
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.BaseCharge.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.BaseCharge
2 | ========================
3 |
4 | .. currentmodule:: tensornetwork
5 |
6 | .. autoclass:: BaseCharge
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~BaseCharge.__init__
17 | ~BaseCharge.copy
18 | ~BaseCharge.dual
19 | ~BaseCharge.dual_charges
20 | ~BaseCharge.fuse
21 | ~BaseCharge.identity_charge
22 | ~BaseCharge.intersect
23 | ~BaseCharge.isin
24 | ~BaseCharge.random
25 | ~BaseCharge.reduce
26 | ~BaseCharge.sort_unique_charges
27 | ~BaseCharge.unique
28 |
29 |
30 |
31 |
32 |
33 | .. rubric:: Attributes
34 |
35 | .. autosummary::
36 |
37 | ~BaseCharge.charges
38 | ~BaseCharge.degeneracies
39 | ~BaseCharge.dim
40 | ~BaseCharge.dtype
41 | ~BaseCharge.identity_charges
42 | ~BaseCharge.label_dtype
43 | ~BaseCharge.num_symmetries
44 | ~BaseCharge.num_unique
45 |
46 |
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.BaseNode.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.AbstractNode
2 | ======================
3 |
4 | .. currentmodule:: tensornetwork
5 |
6 | .. autoclass:: AbstractNode
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~AbstractNode.__init__
17 | ~AbstractNode.add_axis_names
18 | ~AbstractNode.add_edge
19 | ~AbstractNode.copy
20 | ~AbstractNode.disable
21 | ~AbstractNode.fresh_edges
22 | ~AbstractNode.get_all_dangling
23 | ~AbstractNode.get_all_edges
24 | ~AbstractNode.get_all_nondangling
25 | ~AbstractNode.get_axis_number
26 | ~AbstractNode.get_dimension
27 | ~AbstractNode.get_edge
28 | ~AbstractNode.get_rank
29 | ~AbstractNode.get_tensor
30 | ~AbstractNode.has_dangling_edge
31 | ~AbstractNode.has_nondangling_edge
32 | ~AbstractNode.reorder_axes
33 | ~AbstractNode.reorder_edges
34 | ~AbstractNode.set_name
35 | ~AbstractNode.set_tensor
36 | ~AbstractNode.tensor_from_edge_order
37 |
38 |
39 |
40 |
41 |
42 | .. rubric:: Attributes
43 |
44 | .. autosummary::
45 |
46 | ~AbstractNode.axis_names
47 | ~AbstractNode.dtype
48 | ~AbstractNode.edges
49 | ~AbstractNode.name
50 | ~AbstractNode.shape
51 | ~AbstractNode.sparse_shape
52 | ~AbstractNode.tensor
53 |
54 |
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.BlockSparseTensor.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.BlockSparseTensor
2 | ===============================
3 |
4 | .. currentmodule:: tensornetwork
5 |
6 | .. autoclass:: BlockSparseTensor
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~BlockSparseTensor.__init__
17 | ~BlockSparseTensor.conj
18 | ~BlockSparseTensor.copy
19 | ~BlockSparseTensor.fromdense
20 | ~BlockSparseTensor.ones
21 | ~BlockSparseTensor.randn
22 | ~BlockSparseTensor.random
23 | ~BlockSparseTensor.reshape
24 | ~BlockSparseTensor.todense
25 | ~BlockSparseTensor.transpose
26 | ~BlockSparseTensor.transpose_data
27 | ~BlockSparseTensor.zeros
28 |
29 |
30 |
31 |
32 |
33 | .. rubric:: Attributes
34 |
35 | .. autosummary::
36 |
37 | ~BlockSparseTensor.T
38 | ~BlockSparseTensor.charges
39 | ~BlockSparseTensor.dtype
40 | ~BlockSparseTensor.flat_charges
41 | ~BlockSparseTensor.flat_flows
42 | ~BlockSparseTensor.flat_order
43 | ~BlockSparseTensor.flows
44 | ~BlockSparseTensor.ndim
45 | ~BlockSparseTensor.shape
46 | ~BlockSparseTensor.sparse_shape
47 |
48 |
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.ChargeArray.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.ChargeArray
2 | =========================
3 |
4 | .. currentmodule:: tensornetwork
5 |
6 | .. autoclass:: ChargeArray
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~ChargeArray.__init__
17 | ~ChargeArray.conj
18 | ~ChargeArray.random
19 | ~ChargeArray.reshape
20 | ~ChargeArray.todense
21 | ~ChargeArray.transpose
22 | ~ChargeArray.transpose_data
23 |
24 |
25 |
26 |
27 |
28 | .. rubric:: Attributes
29 |
30 | .. autosummary::
31 |
32 | ~ChargeArray.T
33 | ~ChargeArray.charges
34 | ~ChargeArray.dtype
35 | ~ChargeArray.flat_charges
36 | ~ChargeArray.flat_flows
37 | ~ChargeArray.flat_order
38 | ~ChargeArray.flows
39 | ~ChargeArray.ndim
40 | ~ChargeArray.shape
41 | ~ChargeArray.sparse_shape
42 |
43 |
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.CopyNode.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.CopyNode
2 | ======================
3 |
4 | .. currentmodule:: tensornetwork
5 |
6 | .. autoclass:: CopyNode
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~CopyNode.__init__
17 | ~CopyNode.add_axis_names
18 | ~CopyNode.add_edge
19 | ~CopyNode.compute_contracted_tensor
20 | ~CopyNode.copy
21 | ~CopyNode.disable
22 | ~CopyNode.fresh_edges
23 | ~CopyNode.get_all_dangling
24 | ~CopyNode.get_all_edges
25 | ~CopyNode.get_all_nondangling
26 | ~CopyNode.get_axis_number
27 | ~CopyNode.get_dimension
28 | ~CopyNode.get_edge
29 | ~CopyNode.get_partners
30 | ~CopyNode.get_rank
31 | ~CopyNode.get_tensor
32 | ~CopyNode.has_dangling_edge
33 | ~CopyNode.has_nondangling_edge
34 | ~CopyNode.make_copy_tensor
35 | ~CopyNode.reorder_axes
36 | ~CopyNode.reorder_edges
37 | ~CopyNode.set_name
38 | ~CopyNode.set_tensor
39 | ~CopyNode.tensor_from_edge_order
40 |
41 |
42 |
43 |
44 |
45 | .. rubric:: Attributes
46 |
47 | .. autosummary::
48 |
49 | ~CopyNode.axis_names
50 | ~CopyNode.dtype
51 | ~CopyNode.edges
52 | ~CopyNode.name
53 | ~CopyNode.shape
54 | ~CopyNode.sparse_shape
55 | ~CopyNode.tensor
56 |
57 |
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.Edge.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.Edge
2 | ==================
3 |
4 | .. currentmodule:: tensornetwork
5 |
6 | .. autoclass:: Edge
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~Edge.__init__
17 | ~Edge.disable
18 | ~Edge.disconnect
19 | ~Edge.get_nodes
20 | ~Edge.is_being_used
21 | ~Edge.is_dangling
22 | ~Edge.is_trace
23 | ~Edge.set_name
24 | ~Edge.update_axis
25 |
26 |
27 |
28 |
29 |
30 | .. rubric:: Attributes
31 |
32 | .. autosummary::
33 |
34 | ~Edge.axis1
35 | ~Edge.axis2
36 | ~Edge.dimension
37 | ~Edge.name
38 | ~Edge.node1
39 | ~Edge.node2
40 |
41 |
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.FiniteMPS.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.FiniteMPS
2 | =======================
3 |
4 | .. currentmodule:: tensornetwork
5 |
6 | .. autoclass:: FiniteMPS
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~FiniteMPS.__init__
17 | ~FiniteMPS.apply_one_site_gate
18 | ~FiniteMPS.apply_transfer_operator
19 | ~FiniteMPS.apply_two_site_gate
20 | ~FiniteMPS.canonicalize
21 | ~FiniteMPS.check_canonical
22 | ~FiniteMPS.check_orthonormality
23 | ~FiniteMPS.get_tensor
24 | ~FiniteMPS.left_envs
25 | ~FiniteMPS.measure_local_operator
26 | ~FiniteMPS.measure_two_body_correlator
27 | ~FiniteMPS.position
28 | ~FiniteMPS.random
29 | ~FiniteMPS.right_envs
30 | ~FiniteMPS.save
31 |
32 |
33 |
34 |
35 |
36 | .. rubric:: Attributes
37 |
38 | .. autosummary::
39 |
40 | ~FiniteMPS.bond_dimensions
41 | ~FiniteMPS.dtype
42 | ~FiniteMPS.physical_dimensions
43 |
44 |
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.Index.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.Index
2 | ===================
3 |
4 | .. currentmodule:: tensornetwork
5 |
6 | .. autoclass:: Index
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~Index.__init__
17 | ~Index.copy
18 | ~Index.flip_flow
19 |
20 |
21 |
22 |
23 |
24 | .. rubric:: Attributes
25 |
26 | .. autosummary::
27 |
28 | ~Index.charges
29 | ~Index.dim
30 | ~Index.flat_charges
31 | ~Index.flat_flows
32 |
33 |
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.InfiniteMPS.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.InfiniteMPS
2 | =========================
3 |
4 | .. currentmodule:: tensornetwork
5 |
6 | .. autoclass:: InfiniteMPS
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~InfiniteMPS.__init__
17 | ~InfiniteMPS.apply_one_site_gate
18 | ~InfiniteMPS.apply_transfer_operator
19 | ~InfiniteMPS.apply_two_site_gate
20 | ~InfiniteMPS.canonicalize
21 | ~InfiniteMPS.check_canonical
22 | ~InfiniteMPS.check_orthonormality
23 | ~InfiniteMPS.get_tensor
24 | ~InfiniteMPS.left_envs
25 | ~InfiniteMPS.measure_local_operator
26 | ~InfiniteMPS.measure_two_body_correlator
27 | ~InfiniteMPS.position
28 | ~InfiniteMPS.random
29 | ~InfiniteMPS.right_envs
30 | ~InfiniteMPS.save
31 | ~InfiniteMPS.transfer_matrix_eigs
32 | ~InfiniteMPS.unit_cell_transfer_operator
33 |
34 |
35 |
36 |
37 |
38 | .. rubric:: Attributes
39 |
40 | .. autosummary::
41 |
42 | ~InfiniteMPS.bond_dimensions
43 | ~InfiniteMPS.dtype
44 | ~InfiniteMPS.physical_dimensions
45 |
46 |
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.Node.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.Node
2 | ==================
3 |
4 | .. currentmodule:: tensornetwork
5 |
6 | .. autoclass:: Node
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~Node.__init__
17 | ~Node.add_axis_names
18 | ~Node.add_edge
19 | ~Node.copy
20 | ~Node.disable
21 | ~Node.fresh_edges
22 | ~Node.get_all_dangling
23 | ~Node.get_all_edges
24 | ~Node.get_all_nondangling
25 | ~Node.get_axis_number
26 | ~Node.get_dimension
27 | ~Node.get_edge
28 | ~Node.get_rank
29 | ~Node.get_tensor
30 | ~Node.has_dangling_edge
31 | ~Node.has_nondangling_edge
32 | ~Node.op_protection
33 | ~Node.reorder_axes
34 | ~Node.reorder_edges
35 | ~Node.set_name
36 | ~Node.set_tensor
37 | ~Node.tensor_from_edge_order
38 |
39 |
40 |
41 |
42 |
43 | .. rubric:: Attributes
44 |
45 | .. autosummary::
46 |
47 | ~Node.axis_names
48 | ~Node.dtype
49 | ~Node.edges
50 | ~Node.name
51 | ~Node.shape
52 | ~Node.sparse_shape
53 | ~Node.tensor
54 |
55 |
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.NodeCollection.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.NodeCollection
2 | ============================
3 |
4 | .. currentmodule:: tensornetwork
5 |
6 | .. autoclass:: NodeCollection
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~NodeCollection.__init__
17 | ~NodeCollection.add
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.U1Charge.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.U1Charge
2 | ======================
3 |
4 | .. currentmodule:: tensornetwork
5 |
6 | .. autoclass:: U1Charge
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~U1Charge.__init__
17 | ~U1Charge.copy
18 | ~U1Charge.dual
19 | ~U1Charge.dual_charges
20 | ~U1Charge.fuse
21 | ~U1Charge.identity_charge
22 | ~U1Charge.intersect
23 | ~U1Charge.isin
24 | ~U1Charge.random
25 | ~U1Charge.reduce
26 | ~U1Charge.sort_unique_charges
27 | ~U1Charge.unique
28 |
29 |
30 |
31 |
32 |
33 | .. rubric:: Attributes
34 |
35 | .. autosummary::
36 |
37 | ~U1Charge.charges
38 | ~U1Charge.degeneracies
39 | ~U1Charge.dim
40 | ~U1Charge.dtype
41 | ~U1Charge.identity_charges
42 | ~U1Charge.label_dtype
43 | ~U1Charge.num_symmetries
44 | ~U1Charge.num_unique
45 |
46 |
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.Z2Charge.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.Z2Charge
2 | ======================
3 |
4 | .. currentmodule:: tensornetwork
5 |
6 | .. autoclass:: Z2Charge
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~Z2Charge.__init__
17 | ~Z2Charge.copy
18 | ~Z2Charge.dual
19 | ~Z2Charge.dual_charges
20 | ~Z2Charge.fuse
21 | ~Z2Charge.identity_charge
22 | ~Z2Charge.intersect
23 | ~Z2Charge.isin
24 | ~Z2Charge.random
25 | ~Z2Charge.reduce
26 | ~Z2Charge.sort_unique_charges
27 | ~Z2Charge.unique
28 |
29 |
30 |
31 |
32 |
33 | .. rubric:: Attributes
34 |
35 | .. autosummary::
36 |
37 | ~Z2Charge.charges
38 | ~Z2Charge.degeneracies
39 | ~Z2Charge.dim
40 | ~Z2Charge.dtype
41 | ~Z2Charge.identity_charges
42 | ~Z2Charge.label_dtype
43 | ~Z2Charge.num_symmetries
44 | ~Z2Charge.num_unique
45 |
46 |
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.ZNCharge.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.ZNCharge
2 | ======================
3 |
4 | .. currentmodule:: tensornetwork
5 |
6 | .. autofunction:: ZNCharge
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.contractors.auto.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.contractors.auto
2 | ==============================
3 |
4 | .. currentmodule:: tensornetwork.contractors
5 |
6 | .. autofunction:: auto
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.contractors.branch.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.contractors.branch
2 | ================================
3 |
4 | .. currentmodule:: tensornetwork.contractors
5 |
6 | .. autofunction:: branch
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.contractors.bucket.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.contractors.bucket
2 | ================================
3 |
4 | .. currentmodule:: tensornetwork.contractors
5 |
6 | .. autofunction:: bucket
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.contractors.custom.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.contractors.custom
2 | ================================
3 |
4 | .. currentmodule:: tensornetwork.contractors
5 |
6 | .. autofunction:: custom
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.contractors.greedy.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.contractors.greedy
2 | ================================
3 |
4 | .. currentmodule:: tensornetwork.contractors
5 |
6 | .. autofunction:: greedy
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.contractors.optimal.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.contractors.optimal
2 | =================================
3 |
4 | .. currentmodule:: tensornetwork.contractors
5 |
6 | .. autofunction:: optimal
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.tn_keras.layers.Conv2DMPO.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.tn\_keras.layers.Conv2DMPO
2 | ========================================
3 |
4 | .. currentmodule:: tensornetwork.tn_keras.layers
5 |
6 | .. autoclass:: Conv2DMPO
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~Conv2DMPO.__init__
17 | ~Conv2DMPO.add_loss
18 | ~Conv2DMPO.add_metric
19 | ~Conv2DMPO.add_update
20 | ~Conv2DMPO.add_variable
21 | ~Conv2DMPO.add_weight
22 | ~Conv2DMPO.apply
23 | ~Conv2DMPO.build
24 | ~Conv2DMPO.call
25 | ~Conv2DMPO.compute_mask
26 | ~Conv2DMPO.compute_output_shape
27 | ~Conv2DMPO.compute_output_signature
28 | ~Conv2DMPO.count_params
29 | ~Conv2DMPO.from_config
30 | ~Conv2DMPO.get_config
31 | ~Conv2DMPO.get_input_at
32 | ~Conv2DMPO.get_input_mask_at
33 | ~Conv2DMPO.get_input_shape_at
34 | ~Conv2DMPO.get_losses_for
35 | ~Conv2DMPO.get_output_at
36 | ~Conv2DMPO.get_output_mask_at
37 | ~Conv2DMPO.get_output_shape_at
38 | ~Conv2DMPO.get_updates_for
39 | ~Conv2DMPO.get_weights
40 | ~Conv2DMPO.set_weights
41 | ~Conv2DMPO.with_name_scope
42 |
43 |
44 |
45 |
46 |
47 | .. rubric:: Attributes
48 |
49 | .. autosummary::
50 |
51 | ~Conv2DMPO.activity_regularizer
52 | ~Conv2DMPO.dtype
53 | ~Conv2DMPO.dynamic
54 | ~Conv2DMPO.inbound_nodes
55 | ~Conv2DMPO.input
56 | ~Conv2DMPO.input_mask
57 | ~Conv2DMPO.input_shape
58 | ~Conv2DMPO.input_spec
59 | ~Conv2DMPO.losses
60 | ~Conv2DMPO.metrics
61 | ~Conv2DMPO.name
62 | ~Conv2DMPO.name_scope
63 | ~Conv2DMPO.non_trainable_variables
64 | ~Conv2DMPO.non_trainable_weights
65 | ~Conv2DMPO.outbound_nodes
66 | ~Conv2DMPO.output
67 | ~Conv2DMPO.output_mask
68 | ~Conv2DMPO.output_shape
69 | ~Conv2DMPO.stateful
70 | ~Conv2DMPO.submodules
71 | ~Conv2DMPO.trainable
72 | ~Conv2DMPO.trainable_variables
73 | ~Conv2DMPO.trainable_weights
74 | ~Conv2DMPO.updates
75 | ~Conv2DMPO.variables
76 | ~Conv2DMPO.weights
77 |
78 |
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.tn_keras.layers.DenseCondenser.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.tn\_keras.layers.DenseCondenser
2 | =============================================
3 |
4 | .. currentmodule:: tensornetwork.tn_keras.layers
5 |
6 | .. autoclass:: DenseCondenser
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~DenseCondenser.__init__
17 | ~DenseCondenser.add_loss
18 | ~DenseCondenser.add_metric
19 | ~DenseCondenser.add_update
20 | ~DenseCondenser.add_variable
21 | ~DenseCondenser.add_weight
22 | ~DenseCondenser.apply
23 | ~DenseCondenser.build
24 | ~DenseCondenser.call
25 | ~DenseCondenser.compute_mask
26 | ~DenseCondenser.compute_output_shape
27 | ~DenseCondenser.compute_output_signature
28 | ~DenseCondenser.count_params
29 | ~DenseCondenser.from_config
30 | ~DenseCondenser.get_config
31 | ~DenseCondenser.get_input_at
32 | ~DenseCondenser.get_input_mask_at
33 | ~DenseCondenser.get_input_shape_at
34 | ~DenseCondenser.get_losses_for
35 | ~DenseCondenser.get_output_at
36 | ~DenseCondenser.get_output_mask_at
37 | ~DenseCondenser.get_output_shape_at
38 | ~DenseCondenser.get_updates_for
39 | ~DenseCondenser.get_weights
40 | ~DenseCondenser.set_weights
41 | ~DenseCondenser.with_name_scope
42 |
43 |
44 |
45 |
46 |
47 | .. rubric:: Attributes
48 |
49 | .. autosummary::
50 |
51 | ~DenseCondenser.activity_regularizer
52 | ~DenseCondenser.dtype
53 | ~DenseCondenser.dynamic
54 | ~DenseCondenser.inbound_nodes
55 | ~DenseCondenser.input
56 | ~DenseCondenser.input_mask
57 | ~DenseCondenser.input_shape
58 | ~DenseCondenser.input_spec
59 | ~DenseCondenser.losses
60 | ~DenseCondenser.metrics
61 | ~DenseCondenser.name
62 | ~DenseCondenser.name_scope
63 | ~DenseCondenser.non_trainable_variables
64 | ~DenseCondenser.non_trainable_weights
65 | ~DenseCondenser.outbound_nodes
66 | ~DenseCondenser.output
67 | ~DenseCondenser.output_mask
68 | ~DenseCondenser.output_shape
69 | ~DenseCondenser.stateful
70 | ~DenseCondenser.submodules
71 | ~DenseCondenser.trainable
72 | ~DenseCondenser.trainable_variables
73 | ~DenseCondenser.trainable_weights
74 | ~DenseCondenser.updates
75 | ~DenseCondenser.variables
76 | ~DenseCondenser.weights
77 |
78 |
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.tn_keras.layers.DenseDecomp.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.tn\_keras.layers.DenseDecomp
2 | ==========================================
3 |
4 | .. currentmodule:: tensornetwork.tn_keras.layers
5 |
6 | .. autoclass:: DenseDecomp
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~DenseDecomp.__init__
17 | ~DenseDecomp.add_loss
18 | ~DenseDecomp.add_metric
19 | ~DenseDecomp.add_update
20 | ~DenseDecomp.add_variable
21 | ~DenseDecomp.add_weight
22 | ~DenseDecomp.apply
23 | ~DenseDecomp.build
24 | ~DenseDecomp.call
25 | ~DenseDecomp.compute_mask
26 | ~DenseDecomp.compute_output_shape
27 | ~DenseDecomp.compute_output_signature
28 | ~DenseDecomp.count_params
29 | ~DenseDecomp.from_config
30 | ~DenseDecomp.get_config
31 | ~DenseDecomp.get_input_at
32 | ~DenseDecomp.get_input_mask_at
33 | ~DenseDecomp.get_input_shape_at
34 | ~DenseDecomp.get_losses_for
35 | ~DenseDecomp.get_output_at
36 | ~DenseDecomp.get_output_mask_at
37 | ~DenseDecomp.get_output_shape_at
38 | ~DenseDecomp.get_updates_for
39 | ~DenseDecomp.get_weights
40 | ~DenseDecomp.set_weights
41 | ~DenseDecomp.with_name_scope
42 |
43 |
44 |
45 |
46 |
47 | .. rubric:: Attributes
48 |
49 | .. autosummary::
50 |
51 | ~DenseDecomp.activity_regularizer
52 | ~DenseDecomp.dtype
53 | ~DenseDecomp.dynamic
54 | ~DenseDecomp.inbound_nodes
55 | ~DenseDecomp.input
56 | ~DenseDecomp.input_mask
57 | ~DenseDecomp.input_shape
58 | ~DenseDecomp.input_spec
59 | ~DenseDecomp.losses
60 | ~DenseDecomp.metrics
61 | ~DenseDecomp.name
62 | ~DenseDecomp.name_scope
63 | ~DenseDecomp.non_trainable_variables
64 | ~DenseDecomp.non_trainable_weights
65 | ~DenseDecomp.outbound_nodes
66 | ~DenseDecomp.output
67 | ~DenseDecomp.output_mask
68 | ~DenseDecomp.output_shape
69 | ~DenseDecomp.stateful
70 | ~DenseDecomp.submodules
71 | ~DenseDecomp.trainable
72 | ~DenseDecomp.trainable_variables
73 | ~DenseDecomp.trainable_weights
74 | ~DenseDecomp.updates
75 | ~DenseDecomp.variables
76 | ~DenseDecomp.weights
77 |
78 |
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.tn_keras.layers.DenseEntangler.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.tn\_keras.layers.DenseEntangler
2 | =============================================
3 |
4 | .. currentmodule:: tensornetwork.tn_keras.layers
5 |
6 | .. autoclass:: DenseEntangler
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~DenseEntangler.__init__
17 | ~DenseEntangler.add_loss
18 | ~DenseEntangler.add_metric
19 | ~DenseEntangler.add_update
20 | ~DenseEntangler.add_variable
21 | ~DenseEntangler.add_weight
22 | ~DenseEntangler.apply
23 | ~DenseEntangler.build
24 | ~DenseEntangler.call
25 | ~DenseEntangler.compute_mask
26 | ~DenseEntangler.compute_output_shape
27 | ~DenseEntangler.compute_output_signature
28 | ~DenseEntangler.count_params
29 | ~DenseEntangler.from_config
30 | ~DenseEntangler.get_config
31 | ~DenseEntangler.get_input_at
32 | ~DenseEntangler.get_input_mask_at
33 | ~DenseEntangler.get_input_shape_at
34 | ~DenseEntangler.get_losses_for
35 | ~DenseEntangler.get_output_at
36 | ~DenseEntangler.get_output_mask_at
37 | ~DenseEntangler.get_output_shape_at
38 | ~DenseEntangler.get_updates_for
39 | ~DenseEntangler.get_weights
40 | ~DenseEntangler.set_weights
41 | ~DenseEntangler.with_name_scope
42 |
43 |
44 |
45 |
46 |
47 | .. rubric:: Attributes
48 |
49 | .. autosummary::
50 |
51 | ~DenseEntangler.activity_regularizer
52 | ~DenseEntangler.dtype
53 | ~DenseEntangler.dynamic
54 | ~DenseEntangler.inbound_nodes
55 | ~DenseEntangler.input
56 | ~DenseEntangler.input_mask
57 | ~DenseEntangler.input_shape
58 | ~DenseEntangler.input_spec
59 | ~DenseEntangler.losses
60 | ~DenseEntangler.metrics
61 | ~DenseEntangler.name
62 | ~DenseEntangler.name_scope
63 | ~DenseEntangler.non_trainable_variables
64 | ~DenseEntangler.non_trainable_weights
65 | ~DenseEntangler.outbound_nodes
66 | ~DenseEntangler.output
67 | ~DenseEntangler.output_mask
68 | ~DenseEntangler.output_shape
69 | ~DenseEntangler.stateful
70 | ~DenseEntangler.submodules
71 | ~DenseEntangler.trainable
72 | ~DenseEntangler.trainable_variables
73 | ~DenseEntangler.trainable_weights
74 | ~DenseEntangler.updates
75 | ~DenseEntangler.variables
76 | ~DenseEntangler.weights
77 |
78 |
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.tn_keras.layers.DenseExpander.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.tn\_keras.layers.DenseExpander
2 | ============================================
3 |
4 | .. currentmodule:: tensornetwork.tn_keras.layers
5 |
6 | .. autoclass:: DenseExpander
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~DenseExpander.__init__
17 | ~DenseExpander.add_loss
18 | ~DenseExpander.add_metric
19 | ~DenseExpander.add_update
20 | ~DenseExpander.add_variable
21 | ~DenseExpander.add_weight
22 | ~DenseExpander.apply
23 | ~DenseExpander.build
24 | ~DenseExpander.call
25 | ~DenseExpander.compute_mask
26 | ~DenseExpander.compute_output_shape
27 | ~DenseExpander.compute_output_signature
28 | ~DenseExpander.count_params
29 | ~DenseExpander.from_config
30 | ~DenseExpander.get_config
31 | ~DenseExpander.get_input_at
32 | ~DenseExpander.get_input_mask_at
33 | ~DenseExpander.get_input_shape_at
34 | ~DenseExpander.get_losses_for
35 | ~DenseExpander.get_output_at
36 | ~DenseExpander.get_output_mask_at
37 | ~DenseExpander.get_output_shape_at
38 | ~DenseExpander.get_updates_for
39 | ~DenseExpander.get_weights
40 | ~DenseExpander.set_weights
41 | ~DenseExpander.with_name_scope
42 |
43 |
44 |
45 |
46 |
47 | .. rubric:: Attributes
48 |
49 | .. autosummary::
50 |
51 | ~DenseExpander.activity_regularizer
52 | ~DenseExpander.dtype
53 | ~DenseExpander.dynamic
54 | ~DenseExpander.inbound_nodes
55 | ~DenseExpander.input
56 | ~DenseExpander.input_mask
57 | ~DenseExpander.input_shape
58 | ~DenseExpander.input_spec
59 | ~DenseExpander.losses
60 | ~DenseExpander.metrics
61 | ~DenseExpander.name
62 | ~DenseExpander.name_scope
63 | ~DenseExpander.non_trainable_variables
64 | ~DenseExpander.non_trainable_weights
65 | ~DenseExpander.outbound_nodes
66 | ~DenseExpander.output
67 | ~DenseExpander.output_mask
68 | ~DenseExpander.output_shape
69 | ~DenseExpander.stateful
70 | ~DenseExpander.submodules
71 | ~DenseExpander.trainable
72 | ~DenseExpander.trainable_variables
73 | ~DenseExpander.trainable_weights
74 | ~DenseExpander.updates
75 | ~DenseExpander.variables
76 | ~DenseExpander.weights
77 |
78 |
--------------------------------------------------------------------------------
/docs/stubs/tensornetwork.tn_keras.layers.DenseMPO.rst:
--------------------------------------------------------------------------------
1 | tensornetwork.tn\_keras.layers.DenseMPO
2 | =======================================
3 |
4 | .. currentmodule:: tensornetwork.tn_keras.layers
5 |
6 | .. autoclass:: DenseMPO
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~DenseMPO.__init__
17 | ~DenseMPO.add_loss
18 | ~DenseMPO.add_metric
19 | ~DenseMPO.add_update
20 | ~DenseMPO.add_variable
21 | ~DenseMPO.add_weight
22 | ~DenseMPO.apply
23 | ~DenseMPO.build
24 | ~DenseMPO.call
25 | ~DenseMPO.compute_mask
26 | ~DenseMPO.compute_output_shape
27 | ~DenseMPO.compute_output_signature
28 | ~DenseMPO.count_params
29 | ~DenseMPO.from_config
30 | ~DenseMPO.get_config
31 | ~DenseMPO.get_input_at
32 | ~DenseMPO.get_input_mask_at
33 | ~DenseMPO.get_input_shape_at
34 | ~DenseMPO.get_losses_for
35 | ~DenseMPO.get_output_at
36 | ~DenseMPO.get_output_mask_at
37 | ~DenseMPO.get_output_shape_at
38 | ~DenseMPO.get_updates_for
39 | ~DenseMPO.get_weights
40 | ~DenseMPO.set_weights
41 | ~DenseMPO.with_name_scope
42 |
43 |
44 |
45 |
46 |
47 | .. rubric:: Attributes
48 |
49 | .. autosummary::
50 |
51 | ~DenseMPO.activity_regularizer
52 | ~DenseMPO.dtype
53 | ~DenseMPO.dynamic
54 | ~DenseMPO.inbound_nodes
55 | ~DenseMPO.input
56 | ~DenseMPO.input_mask
57 | ~DenseMPO.input_shape
58 | ~DenseMPO.input_spec
59 | ~DenseMPO.losses
60 | ~DenseMPO.metrics
61 | ~DenseMPO.name
62 | ~DenseMPO.name_scope
63 | ~DenseMPO.non_trainable_variables
64 | ~DenseMPO.non_trainable_weights
65 | ~DenseMPO.outbound_nodes
66 | ~DenseMPO.output
67 | ~DenseMPO.output_mask
68 | ~DenseMPO.output_shape
69 | ~DenseMPO.stateful
70 | ~DenseMPO.submodules
71 | ~DenseMPO.trainable
72 | ~DenseMPO.trainable_variables
73 | ~DenseMPO.trainable_weights
74 | ~DenseMPO.updates
75 | ~DenseMPO.variables
76 | ~DenseMPO.weights
77 |
78 |
--------------------------------------------------------------------------------
/docs/tn_keras.rst:
--------------------------------------------------------------------------------
1 | TN Keras Layers
2 | ------------------
3 |
4 | TN Keras exists to simplify tensorization of existing TensorFlow models.
5 | These layers try to match the APIs for existing Keras layers closely.
6 | Please note these layers are currently intended for experimentation only,
7 | not production. These layers are in alpha and upcoming releases might include
8 | breaking changes.
9 |
10 | APIs are listed here. An overview of these layers is available
11 | `here `_.
12 |
13 | .. autosummary::
14 | :toctree: stubs
15 |
16 | tensornetwork.tn_keras.layers.DenseDecomp
17 | tensornetwork.tn_keras.layers.DenseMPO
18 | tensornetwork.tn_keras.layers.Conv2DMPO
19 | tensornetwork.tn_keras.layers.DenseCondenser
20 | tensornetwork.tn_keras.layers.DenseExpander
21 | tensornetwork.tn_keras.layers.DenseEntangler
22 |
--------------------------------------------------------------------------------
/examples/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/examples/__init__.py
--------------------------------------------------------------------------------
/examples/custom_path_solvers/example.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import time
16 | import numpy as np
17 | import tensornetwork as tn
18 | from tensornetwork import ncon, contractors
19 | import opt_einsum as oe
20 | # pylint: disable=line-too-long
21 | from tensornetwork.contractors.custom_path_solvers.nconinterface import ncon_solver
22 | """
23 | An example for using`ncon_solver` to find an optimal contraction path for a
24 | networks defined in the `ncon` syntax. Note that there are essentially three
25 | ways to use the solver:
26 |
27 | (i) Unrestricted search: set 'max_branch=None' to search over all possible
28 | contraction paths in order to obtain the guaranteed optimal path. The total
29 | search time required scales with number of tensors `N ` as: t ~ exp(N)
30 |
31 | (ii) Restricted search: set 'max_branch' as an integer to restrict the search
32 | to that number of the most likely paths. The total search time required scales
33 | with number of tensors `N ` as: t ~ N*max_branch
34 |
35 | (iii) Greedy search: set 'max_branch=1' to build a contraction path from the
36 | sequence of locally optimal contractions (aka the greedy algorithm). The total
37 | search time is essentially negligible: t < 0.005s
38 | """
39 |
40 | # define a network (here from a 1D binary MERA algorithm)
41 | chi = 3
42 | chi_p = 3
43 | u = np.random.rand(chi, chi, chi_p, chi_p)
44 | w = np.random.rand(chi_p, chi_p, chi)
45 | ham = np.random.rand(chi, chi, chi, chi, chi, chi)
46 | tensors = [u, u, w, w, w, ham, u, u, w, w, w]
47 | connects = [[1, 3, 10, 11], [4, 7, 12, 13], [8, 10, -4], [11, 12, -5],
48 | [13, 14, -6], [2, 5, 6, 3, 4, 7], [1, 2, 9, 17], [5, 6, 16, 15],
49 | [8, 9, -1], [17, 16, -2], [15, 14, -3]]
50 |
51 | t0 = time.time()
52 | # check all contraction paths to find the optimal order
53 | con_order, costs, is_optimal = ncon_solver(tensors, connects, max_branch=None)
54 |
55 | # contract network using ncon
56 | T0 = ncon(tensors, connects, con_order)
57 | print("ncon_solver: time to contract = ", time.time() - t0)
58 | """
59 | For comparison, the also show how the same network can be contracted using the
60 | `opt_einsum` package.
61 | """
62 |
63 | # combine tensors and connects lists
64 | N = len(tensors)
65 | comb_list = [0] * (2 * len(tensors))
66 | for k in range(N):
67 | comb_list[2 * k] = tensors[k]
68 | comb_list[2 * k + 1] = connects[k]
69 |
70 | # solve order and contract network using opt_einsum
71 | t0 = time.time()
72 | T1 = oe.contract(*comb_list, [-1, -2, -3, -4, -5, -6], optimize='branch-all')
73 | print("opt_einsum: time to contract = ", time.time() - t0)
74 | """
75 | For a final comparison, we demonstrate how the example network can be solved
76 | for the optimal order and contracted using the node/edge API with opt_einsum
77 | """
78 |
79 | # define network nodes
80 | backend = "numpy"
81 | iso_l = tn.Node(w, backend=backend)
82 | iso_c = tn.Node(w, backend=backend)
83 | iso_r = tn.Node(w, backend=backend)
84 | iso_l_con = tn.conj(iso_l)
85 | iso_c_con = tn.conj(iso_c)
86 | iso_r_con = tn.conj(iso_r)
87 | op = tn.Node(ham, backend=backend)
88 | un_l = tn.Node(u, backend=backend)
89 | un_l_con = tn.conj(un_l)
90 | un_r = tn.Node(u, backend=backend)
91 | un_r_con = tn.conj(un_r)
92 |
93 | # define network edges
94 | tn.connect(iso_l[0], iso_l_con[0])
95 | tn.connect(iso_l[1], un_l[2])
96 | tn.connect(iso_c[0], un_l[3])
97 | tn.connect(iso_c[1], un_r[2])
98 | tn.connect(iso_r[0], un_r[3])
99 | tn.connect(iso_r[1], iso_r_con[1])
100 | tn.connect(un_l[0], un_l_con[0])
101 | tn.connect(un_l[1], op[3])
102 | tn.connect(un_r[0], op[4])
103 | tn.connect(un_r[1], op[5])
104 | tn.connect(op[0], un_l_con[1])
105 | tn.connect(op[1], un_r_con[0])
106 | tn.connect(op[2], un_r_con[1])
107 | tn.connect(un_l_con[2], iso_l_con[1])
108 | tn.connect(un_l_con[3], iso_c_con[0])
109 | tn.connect(un_r_con[2], iso_c_con[1])
110 | tn.connect(un_r_con[3], iso_r_con[0])
111 |
112 | # define output edges
113 | output_edge_order = [
114 | iso_l_con[2], iso_c_con[2], iso_r_con[2], iso_l[2], iso_c[2], iso_r[2]
115 | ]
116 |
117 | # solve for optimal order and contract the network
118 | t0 = time.time()
119 | T2 = contractors.branch(
120 | tn.reachable(op), output_edge_order=output_edge_order).get_tensor()
121 | print("tn.contractors: time to contract = ", time.time() - t0)
122 |
--------------------------------------------------------------------------------
/examples/fft/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/examples/fft/__init__.py
--------------------------------------------------------------------------------
/examples/fft/fft.py:
--------------------------------------------------------------------------------
1 | """Utility methods for producing interesting tensor networks."""
2 |
3 | from typing import List
4 | import numpy as np
5 | import tensornetwork as tn
6 |
7 |
8 | def add_fft(inputs: List[tn.Edge],) -> List[tn.Edge]:
9 | """Creates output node axes corresponding to the Fourier transform of inputs.
10 |
11 | Uses Cooley-Tukey's FFT algorithm. All axes are expected to have length 2. The
12 | input axes must be (and output axes will be) binary.
13 |
14 | Args:
15 | inputs: The node axes to act upon.
16 |
17 | Returns:
18 | A list of `Edges` containing the result.
19 | """
20 | if not all(e.is_dangling() for e in inputs):
21 | raise ValueError("Inputs must be dangling edges.")
22 |
23 | hadamard = np.array([[1, 1], [1, -1]], dtype=np.complex128) / np.sqrt(2)
24 |
25 | def cz(p: int) -> np.ndarray:
26 | result = np.eye(4, dtype=np.complex128)
27 | result[3, 3] = np.exp(-1j * np.pi / 2**p)
28 | return result.reshape((2,) * 4)
29 |
30 | def inline_stitch(targets: List[int], tensor: np.ndarray, name: str):
31 | """Applies an operation to the targeted axis indices."""
32 | op_node = tn.Node(tensor, name)
33 | for k, t in enumerate(targets):
34 | incoming_state = state[t]
35 | receiving_port = op_node[k]
36 | output_port = op_node[k + len(targets)]
37 | incoming_state ^ receiving_port
38 | state[t] = output_port
39 |
40 | state = list(inputs)
41 |
42 | # Mix "n twiddle.
43 | n = len(state)
44 | for i in range(n):
45 | for j in range(1, i + 1):
46 | inline_stitch([i - j, i], cz(j), "TWIDDLE_{}_{}".format(j, i))
47 | inline_stitch([i], hadamard, "MIX_{}".format(i))
48 |
49 | # FFT reverses bit order.
50 | return state[::-1]
51 |
--------------------------------------------------------------------------------
/examples/fft/fft_test.py:
--------------------------------------------------------------------------------
1 | # python3
2 | """Tests for fft."""
3 |
4 | import numpy as np
5 | from examples.fft import fft
6 | import tensornetwork as tn
7 |
8 |
9 | def test_fft():
10 | n = 3
11 | initial_state = [complex(0)] * (1 << n)
12 | initial_state[1] = 1j
13 | initial_state[5] = -1
14 | initial_node = tn.Node(np.array(initial_state).reshape((2,) * n))
15 |
16 | fft_out = fft.add_fft([initial_node[k] for k in range(n)])
17 | result = tn.contractors.greedy(tn.reachable(fft_out[0].node1), fft_out)
18 | tn.flatten_edges(fft_out)
19 | actual = result.tensor
20 | expected = np.fft.fft(initial_state, norm="ortho")
21 | np.testing.assert_allclose(expected, actual)
22 |
--------------------------------------------------------------------------------
/examples/sat/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/examples/sat/__init__.py
--------------------------------------------------------------------------------
/examples/sat/sat_tensornetwork.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """A TensorNetwork for counting 3SAT solutions.
15 |
16 | This is an implementation of https://arxiv.org/abs/1105.3201.
17 |
18 | 3SAT problems are boolean satisfiability problem of the form
19 | (a OR NOT b OR c) AND (NOT a OR NOT c OR d) AND ...
20 | where a, b, c, d are variables that can take the values True or False.
21 | See https://en.wikipedia.org/wiki/Boolean_satisfiability_problem for a more
22 | in-depth description.
23 |
24 | 3SAT TensorNetworks are networks that can find/count all solutions to a
25 | given 3SAT problem. At a high level, these networks are constructed by
26 | connecting "clause nodes" to "variable nodes" through "copy tensors".
27 |
28 | Clause nodes are tensors of shape (2, 2, 2) with 1 for every variable
29 | assigment that satifies the clause, and 0 for the one assigment that doesn't.
30 | For example, for the clause (a OR b OR NOT c), then
31 | clause.get_tensor()[0][0][1] == 0, and is 1 everywhere else.
32 |
33 | Variable Nodes are (2, 1) tensors that have [1, 1] as their concrete value.
34 | You can think if this node like an unnormalized superposition of the variable
35 | being True and False.
36 |
37 | Copy tensors are tensors of shape (2, 2, 2). These tensors have value 1 at
38 | positions [1][1][1] and [0][0][0] and have value 0 eveywhere else.
39 | """
40 |
41 | import numpy as np
42 | from typing import List, Tuple, Set
43 | import tensornetwork as tn
44 |
45 |
46 | def sat_tn(clauses: List[Tuple[int, int, int]]) -> List[tn.Edge]:
47 | """Create a 3SAT TensorNetwork of the given 3SAT clauses.
48 |
49 | After full contraction, this network will be a tensor of size (2, 2, ..., 2)
50 | with the rank being the same as the number of variables. Each element of the
51 | final tensor represents whether the given assignment satisfies all of the
52 | clauses. For example, if final_node.get_tensor()[0][1][1] == 1, then the
53 | assiment (False, True, True) satisfies all clauses.
54 |
55 | Args:
56 | clauses: A list of 3 int tuples. Each element in the tuple corresponds to a
57 | variable in the clause. If that int is negative, that variable is negated
58 | in the clause.
59 |
60 | Returns:
61 | net: The 3SAT TensorNetwork.
62 | var_edges: The edges for the given variables.
63 |
64 | Raises:
65 | ValueError: If any of the clauses have a 0 in them.
66 | """
67 | for clause in clauses:
68 | if 0 in clause:
69 | raise ValueError("0's are not allowed in the clauses.")
70 | var_set = set()
71 | for clause in clauses:
72 | var_set |= {abs(x) for x in clause}
73 | num_vars = max(var_set)
74 | var_nodes = []
75 | var_edges = []
76 |
77 | # Prepare the variable nodes.
78 | for _ in range(num_vars):
79 | new_node = tn.Node(np.ones(2, dtype=np.int32))
80 | var_nodes.append(new_node)
81 | var_edges.append(new_node[0])
82 |
83 | # Create the nodes for each clause
84 | for clause in clauses:
85 | a, b, c, = clause
86 | clause_tensor = np.ones((2, 2, 2), dtype=np.int32)
87 | clause_tensor[(-np.sign(a) + 1) // 2, (-np.sign(b) + 1) // 2,
88 | (-np.sign(c) + 1) // 2] = 0
89 | clause_node = tn.Node(clause_tensor)
90 |
91 | # Connect the variable to the clause through a copy tensor.
92 | for i, var in enumerate(clause):
93 | copy_tensor_node = tn.CopyNode(3, 2)
94 | clause_node[i] ^ copy_tensor_node[0]
95 | var_edges[abs(var) - 1] ^ copy_tensor_node[1]
96 | var_edges[abs(var) - 1] = copy_tensor_node[2]
97 |
98 | return var_edges
99 |
100 |
101 | def sat_count_tn(clauses: List[Tuple[int, int, int]]) -> Set[tn.AbstractNode]:
102 | """Create a 3SAT Count TensorNetwork.
103 |
104 | After full contraction, the final node will be the count of all possible
105 | solutions to the given 3SAT problem.
106 |
107 | Args:
108 | clauses: A list of 3 int tuples. Each element in the tuple corresponds to a
109 | variable in the clause. If that int is negative, that variable is negated
110 | in the clause.
111 |
112 | Returns:
113 | nodes: The set of nodes
114 | """
115 | var_edges1 = sat_tn(clauses)
116 | var_edges2 = sat_tn(clauses)
117 | for edge1, edge2 in zip(var_edges1, var_edges2):
118 | edge1 ^ edge2
119 | # TODO(chaseriley): Support diconnected SAT graphs.
120 | return tn.reachable(var_edges1[0].node1)
121 |
--------------------------------------------------------------------------------
/examples/sat/sat_tensornetwork_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import tensornetwork
16 | from examples.sat import sat_tensornetwork
17 |
18 |
19 | def test_sanity_check():
20 | nodes = sat_tensornetwork.sat_count_tn([
21 | (1, 2, 3),
22 | ])
23 | count = tensornetwork.contractors.greedy(nodes).tensor
24 | assert count == 7
25 |
26 |
27 | def test_dual_clauses():
28 | nodes = sat_tensornetwork.sat_count_tn([
29 | (1, 2, 3),
30 | (1, -2, 3),
31 | ])
32 | count = tensornetwork.contractors.greedy(nodes).tensor
33 | assert count == 6
34 |
35 |
36 | def test_many_clauses():
37 | nodes = sat_tensornetwork.sat_count_tn([
38 | (1, 2, 3),
39 | (1, 2, -3),
40 | (1, -2, 3),
41 | (1, -2, -3),
42 | (-1, 2, 3),
43 | (-1, 2, -3),
44 | (-1, -2, 3),
45 | (-1, -2, -3),
46 | ])
47 | count = tensornetwork.contractors.greedy(nodes).tensor
48 | assert count == 0
49 |
50 |
51 | def test_four_variables():
52 | nodes = sat_tensornetwork.sat_count_tn([
53 | (1, 2, 3),
54 | (1, 2, 4),
55 | ])
56 | count = tensornetwork.contractors.greedy(nodes).tensor
57 | assert count == 13
58 |
59 |
60 | def test_four_variables_four_clauses():
61 | nodes = sat_tensornetwork.sat_count_tn([
62 | (1, 2, 3),
63 | (1, 2, 4),
64 | (-3, -4, 2),
65 | (-1, 3, -2),
66 | ])
67 | count = tensornetwork.contractors.greedy(nodes).tensor
68 | assert count == 9
69 |
70 |
71 | def test_single_variable():
72 | nodes = sat_tensornetwork.sat_count_tn([
73 | (1, 1, 1),
74 | ])
75 | count = tensornetwork.contractors.greedy(nodes).tensor
76 | assert count == 1
77 |
78 |
79 | def test_solutions():
80 | edge_order = sat_tensornetwork.sat_tn([
81 | (1, 2, -3),
82 | ])
83 | solutions = tensornetwork.contractors.greedy(
84 | tensornetwork.reachable(edge_order[0].node1), edge_order).tensor
85 | assert solutions[0][0][0] == 1
86 | # Only unaccepted value.
87 | assert solutions[0][0][1] == 0
88 | assert solutions[0][1][0] == 1
89 | assert solutions[0][1][1] == 1
90 | assert solutions[1][0][0] == 1
91 | assert solutions[1][0][1] == 1
92 | assert solutions[1][1][0] == 1
93 | assert solutions[1][1][1] == 1
94 |
--------------------------------------------------------------------------------
/examples/simple_mera/simple_mera_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import time
16 | import pytest
17 | import jax
18 | import jax.config
19 | jax.config.update("jax_enable_x64", True)
20 | import jax.numpy as np
21 | import jax.random
22 | import tensornetwork as tn
23 | import simple_mera
24 |
25 |
26 | def test_descend(random_tensors):
27 | h, s, iso, dis = random_tensors
28 | s = simple_mera.descend(h, s, iso, dis)
29 | assert len(s.shape) == 6
30 | D = s.shape[0]
31 | smat = np.reshape(s, [D**3] * 2)
32 | assert np.isclose(np.trace(smat), 1.0)
33 | assert np.isclose(np.linalg.norm(smat - np.conj(np.transpose(smat))), 0.0)
34 | spec, _ = np.linalg.eigh(smat)
35 | assert np.alltrue(spec >= 0.0)
36 |
37 |
38 | def test_ascend(random_tensors):
39 | h, s, iso, dis = random_tensors
40 | h = simple_mera.ascend(h, s, iso, dis)
41 | assert len(h.shape) == 6
42 | D = h.shape[0]
43 | hmat = np.reshape(h, [D**3] * 2)
44 | norm = np.linalg.norm(hmat - np.conj(np.transpose(hmat)))
45 | assert np.isclose(norm, 0.0)
46 |
47 |
48 | def test_energy(wavelet_tensors):
49 | h, iso, dis = wavelet_tensors
50 | s = np.reshape(np.eye(2**3) / 2**3, [2] * 6)
51 | for _ in range(20):
52 | s = simple_mera.descend(h, s, iso, dis)
53 | en = np.trace(np.reshape(s, [2**3, -1]) @ np.reshape(h, [2**3, -1]))
54 | assert np.isclose(en, -1.242, rtol=1e-3, atol=1e-3)
55 | en = simple_mera.binary_mera_energy(h, s, iso, dis)
56 | assert np.isclose(en, -1.242, rtol=1e-3, atol=1e-3)
57 |
58 |
59 | def test_opt(wavelet_tensors):
60 | h, iso, dis = wavelet_tensors
61 | s = np.reshape(np.eye(2**3) / 2**3, [2] * 6)
62 | for _ in range(20):
63 | s = simple_mera.descend(h, s, iso, dis)
64 | s, iso, dis = simple_mera.optimize_linear(h, s, iso, dis, 100)
65 | en = np.trace(np.reshape(s, [2**3, -1]) @ np.reshape(h, [2**3, -1]))
66 | assert en < -1.25
67 |
68 |
69 | @pytest.fixture(params=[2, 3])
70 | def random_tensors(request):
71 | D = request.param
72 | key = jax.random.PRNGKey(0)
73 |
74 | h = jax.random.normal(key, shape=[D**3] * 2)
75 | h = 0.5 * (h + np.conj(np.transpose(h)))
76 | h = np.reshape(h, [D] * 6)
77 |
78 | s = jax.random.normal(key, shape=[D**3] * 2)
79 | s = s @ np.conj(np.transpose(s))
80 | s /= np.trace(s)
81 | s = np.reshape(s, [D] * 6)
82 |
83 | a = jax.random.normal(key, shape=[D**2] * 2)
84 | u, _, vh = np.linalg.svd(a)
85 | dis = np.reshape(u, [D] * 4)
86 | iso = np.reshape(vh, [D] * 4)[:, :, :, 0]
87 |
88 | return tuple(x.astype(np.complex128) for x in (h, s, iso, dis))
89 |
90 |
91 | @pytest.fixture
92 | def wavelet_tensors(request):
93 | """Returns the Hamiltonian and MERA tensors for the D=2 wavelet MERA.
94 |
95 | From Evenbly & White, Phys. Rev. Lett. 116, 140403 (2016).
96 | """
97 | D = 2
98 | h = simple_mera.ham_ising()
99 |
100 | E = np.array([[1, 0], [0, 1]])
101 | X = np.array([[0, 1], [1, 0]])
102 | Y = np.array([[0, -1j], [1j, 0]])
103 | Z = np.array([[1, 0], [0, -1]])
104 |
105 | wmat_un = np.real((np.sqrt(3) + np.sqrt(2)) / 4 * np.kron(E, E) +
106 | (np.sqrt(3) - np.sqrt(2)) / 4 * np.kron(Z, Z) + 1.j *
107 | (1 + np.sqrt(2)) / 4 * np.kron(X, Y) + 1.j *
108 | (1 - np.sqrt(2)) / 4 * np.kron(Y, X))
109 |
110 | umat = np.real((np.sqrt(3) + 2) / 4 * np.kron(E, E) +
111 | (np.sqrt(3) - 2) / 4 * np.kron(Z, Z) +
112 | 1.j / 4 * np.kron(X, Y) + 1.j / 4 * np.kron(Y, X))
113 |
114 | w = np.reshape(wmat_un, (D, D, D, D))[:, 0, :, :]
115 | u = np.reshape(umat, (D, D, D, D))
116 |
117 | w = np.transpose(w, [1, 2, 0])
118 | u = np.transpose(u, [2, 3, 0, 1])
119 |
120 | return tuple(x.astype(np.complex128) for x in (h, w, u))
121 |
--------------------------------------------------------------------------------
/examples/wavefunctions/evolution_example.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Trotter evolution of exact wavefunctions: Example script."""
15 |
16 | import tensorflow as tf
17 |
18 | from examples.wavefunctions import wavefunctions
19 |
20 |
21 | def ising_hamiltonian(N, dtype):
22 | X = tf.convert_to_tensor([[0.0, 1.0], [1.0, 0.0]], dtype=dtype)
23 | Z = tf.convert_to_tensor([[1.0, 0.0], [0.0, -1.0]], dtype=dtype)
24 | I = tf.eye(2, dtype=dtype)
25 | h = -tf.tensordot(X, X, axes=0) - tf.tensordot(Z, I, axes=0)
26 | h_last = h - tf.tensordot(I, Z, axes=0)
27 | h = tf.transpose(h, (0, 2, 1, 3))
28 | h_last = tf.transpose(h_last, (0, 2, 1, 3))
29 | H = [h] * (N - 2) + [h_last]
30 | return H
31 |
32 |
33 | def random_state(N, d, dtype):
34 | psi = tf.cast(tf.random.uniform([d for n in range(N)]), dtype)
35 | psi = tf.divide(psi, tf.norm(psi))
36 | return psi
37 |
38 |
39 | def callback(psi, t, i):
40 | print(i,
41 | tf.norm(psi).numpy().real,
42 | wavefunctions.expval(psi, X, 0).numpy().real)
43 |
44 |
45 | if __name__ == "__main__":
46 | N = 16
47 | dtype = tf.complex128
48 | build_graph = True
49 |
50 | dt = 0.1
51 | num_steps = 100
52 | euclidean_evolution = False
53 |
54 | print("----------------------------------------------------")
55 | print("Evolving a random state by the Ising Hamiltonian.")
56 | print("----------------------------------------------------")
57 | print("System size:", N)
58 | print("Trotter step size:", dt)
59 | print("Euclidean?:", euclidean_evolution)
60 |
61 | X = tf.convert_to_tensor([[0.0, 1.0], [1.0, 0.0]], dtype=dtype)
62 | H = ising_hamiltonian(N, dtype)
63 | psi = random_state(N, 2, dtype)
64 |
65 | if build_graph:
66 | f = wavefunctions.evolve_trotter_defun
67 | else:
68 | f = wavefunctions.evolve_trotter
69 |
70 | print("----------------------------------------------------")
71 | print("step\tnorm\t")
72 | print("----------------------------------------------------")
73 | psi_t, t = f(
74 | psi, H, dt, num_steps, euclidean=euclidean_evolution, callback=callback)
75 |
76 | print("Final norm:", tf.norm(psi_t).numpy().real)
77 | print(":", wavefunctions.inner(psi, psi_t).numpy())
78 |
--------------------------------------------------------------------------------
/examples/wavefunctions/trotter.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Trotter decomposition of a Hamiltonian evolution."""
15 |
16 | import tensorflow as tf
17 |
18 |
19 | def trotter_prepare_gates(H, step_size, num_sites, euclidean):
20 | """Prepare gates using 1st-order trotter decomposition.
21 |
22 | Currently only implemented for nearest-neighbor Hamiltonians.
23 |
24 | Args:
25 | H: List of Hamiltonian terms. Should be length num_sites-1.
26 | step_size: The trotter step size (a scalar).
27 | num_sites: The total number of sites in the system (an integer).
28 | euclidean: Whether the evolution is euclidean, or not (boolean).
29 | Returns:
30 | layers: A list of layers, with each layer a list of gates, one for each
31 | site, or `None` if no gate is applied to that site in the layer.
32 | """
33 | if not len(H) == num_sites - 1:
34 | raise ValueError("Number of H terms must match number of sites - 1.")
35 |
36 | step_size = tf.cast(step_size, tf.float64) # must be real
37 | step_size = tf.cast(step_size, H[0].dtype)
38 |
39 | if euclidean:
40 | step_size = -1.0 * step_size
41 | else:
42 | step_size = 1.j * step_size
43 |
44 | eH = []
45 | for h in H:
46 | if len(h.shape) != 4:
47 | raise ValueError("H must be nearest-neighbor.")
48 |
49 | h_shp = tf.shape(h)
50 | h_r = tf.reshape(h, (h_shp[0] * h_shp[1], h_shp[2] * h_shp[3]))
51 |
52 | eh_r = tf.linalg.expm(step_size * h_r)
53 | eH.append(tf.reshape(eh_r, h_shp))
54 |
55 | eh_even = [None] * num_sites
56 | eh_odd = [None] * num_sites
57 | for (n, eh) in enumerate(eH):
58 | if n % 2 == 0:
59 | eh_even[n] = eh
60 | else:
61 | eh_odd[n] = eh
62 |
63 | return [eh_even, eh_odd]
64 |
--------------------------------------------------------------------------------
/examples/wavefunctions/wavefunctions_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import pytest
16 | import numpy as np
17 | import tensorflow as tf
18 | import tensornetwork as tn
19 | from examples.wavefunctions import wavefunctions
20 |
21 |
22 | @pytest.mark.parametrize("num_sites", [2, 3, 4])
23 | def test_expval(num_sites):
24 | op = np.kron(np.array([[1.0, 0.0], [0.0, -1.0]]), np.eye(2)).reshape([2] * 4)
25 | op = tf.convert_to_tensor(op)
26 | for j in range(num_sites):
27 | psi = np.zeros([2] * num_sites)
28 | psi_vec = psi.reshape((2**num_sites,))
29 | psi_vec[2**j] = 1.0
30 | psi = tf.convert_to_tensor(psi)
31 | for i in range(num_sites):
32 | res = wavefunctions.expval(psi, op, i, pbc=True)
33 | if i == num_sites - 1 - j:
34 | np.testing.assert_allclose(res, -1.0)
35 | else:
36 | np.testing.assert_allclose(res, 1.0)
37 |
38 |
39 | @pytest.mark.parametrize("num_sites", [2, 3, 4])
40 | def test_apply_op(num_sites):
41 | psi1 = np.zeros([2] * num_sites)
42 | psi1_vec = psi1.reshape((2**num_sites,))
43 | psi1_vec[0] = 1.0
44 | psi1 = tf.convert_to_tensor(psi1)
45 |
46 | for j in range(num_sites):
47 | psi2 = np.zeros([2] * num_sites)
48 | psi2_vec = psi2.reshape((2**num_sites,))
49 | psi2_vec[2**j] = 1.0
50 | psi2 = tf.convert_to_tensor(psi2)
51 |
52 | opX = tf.convert_to_tensor(np.array([[0.0, 1.0], [1.0, 0.0]]))
53 | psi2 = wavefunctions.apply_op(psi2, opX, num_sites - 1 - j)
54 |
55 | res = wavefunctions.inner(psi1, psi2)
56 | np.testing.assert_allclose(res, 1.0)
57 |
58 |
59 | @pytest.mark.parametrize("num_sites,phys_dim,graph",
60 | [(2, 3, False), (2, 3, True), (5, 2, False)])
61 | def test_evolve_trotter(num_sites, phys_dim, graph):
62 | tf.random.set_seed(10)
63 | psi = tf.complex(
64 | tf.random.normal([phys_dim] * num_sites, dtype=tf.float64),
65 | tf.random.normal([phys_dim] * num_sites, dtype=tf.float64))
66 | h = tf.complex(
67 | tf.random.normal((phys_dim**2, phys_dim**2), dtype=tf.float64),
68 | tf.random.normal((phys_dim**2, phys_dim**2), dtype=tf.float64))
69 | h = 0.5 * (h + tf.linalg.adjoint(h))
70 | h = tf.reshape(h, (phys_dim, phys_dim, phys_dim, phys_dim))
71 | H = [h] * (num_sites - 1)
72 |
73 | norm1 = wavefunctions.inner(psi, psi)
74 | en1 = sum(wavefunctions.expval(psi, H[i], i) for i in range(num_sites - 1))
75 |
76 | if graph:
77 | psi, t = wavefunctions.evolve_trotter_defun(psi, H, 0.001, 10)
78 | else:
79 | psi, t = wavefunctions.evolve_trotter(psi, H, 0.001, 10)
80 |
81 | norm2 = wavefunctions.inner(psi, psi)
82 | en2 = sum(wavefunctions.expval(psi, H[i], i) for i in range(num_sites - 1))
83 |
84 | np.testing.assert_allclose(t, 0.01)
85 | np.testing.assert_almost_equal(norm1 / norm2, 1.0)
86 | np.testing.assert_almost_equal(en1 / en2, 1.0, decimal=2)
87 |
88 |
89 | @pytest.mark.parametrize("num_sites,phys_dim,graph",
90 | [(2, 3, False), (2, 3, True), (5, 2, False)])
91 | def test_evolve_trotter_euclidean(num_sites, phys_dim, graph):
92 | tf.random.set_seed(10)
93 | psi = tf.complex(
94 | tf.random.normal([phys_dim] * num_sites, dtype=tf.float64),
95 | tf.random.normal([phys_dim] * num_sites, dtype=tf.float64))
96 | h = tf.complex(
97 | tf.random.normal((phys_dim**2, phys_dim**2), dtype=tf.float64),
98 | tf.random.normal((phys_dim**2, phys_dim**2), dtype=tf.float64))
99 | h = 0.5 * (h + tf.linalg.adjoint(h))
100 | h = tf.reshape(h, (phys_dim, phys_dim, phys_dim, phys_dim))
101 | H = [h] * (num_sites - 1)
102 |
103 | norm1 = wavefunctions.inner(psi, psi)
104 | en1 = sum(wavefunctions.expval(psi, H[i], i) for i in range(num_sites - 1))
105 |
106 | if graph:
107 | psi, t = wavefunctions.evolve_trotter_defun(psi, H, 0.1, 10, euclidean=True)
108 | else:
109 | psi, t = wavefunctions.evolve_trotter(psi, H, 0.1, 10, euclidean=True)
110 |
111 | norm2 = wavefunctions.inner(psi, psi)
112 | en2 = sum(wavefunctions.expval(psi, H[i], i) for i in range(num_sites - 1))
113 |
114 | np.testing.assert_allclose(t, 1.0)
115 | np.testing.assert_almost_equal(norm2, 1.0)
116 | assert en2.numpy() / norm2.numpy() < en1.numpy() / norm1.numpy()
117 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy>=1.17
2 | graphviz>=0.11.1
3 | opt_einsum>=2.3.0
4 | h5py>=2.9.0
5 | scipy>=1.1
6 |
--------------------------------------------------------------------------------
/requirements_travis.txt:
--------------------------------------------------------------------------------
1 | tensorflow>=2.0.0
2 | pytest
3 | torch==1.8.1 # TODO (mganahl): remove restriction once torch.tensordot bug is fixed (https://github.com/pytorch/pytorch/issues/65524)
4 | jax>=0.1.68
5 | jaxlib>=0.1.59
6 | pylint==2.5.3
7 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # Copyright 2019 The TensorNetwork Developers
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # https://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | from setuptools import find_packages, setup
17 |
18 | # This reads the __version__ variable from tensornetwork/version.py
19 | with open('tensornetwork/version.py') as f:
20 | exec(f.read(), globals())
21 |
22 | description = ('A high level tensor network API for '
23 | 'accelerated tensor network calculations.')
24 |
25 | # Reading long Description from README.md file.
26 | with open("README.md", "r") as fh:
27 | long_description = fh.read()
28 |
29 | # Read in requirements
30 | requirements = [
31 | requirement.strip() for requirement in open('requirements.txt').readlines()
32 | ]
33 |
34 | setup(
35 | name='tensornetwork',
36 | version=__version__,
37 | url='http://github.com/google/TensorNetwork',
38 | author='The TensorNetwork Developers',
39 | author_email='martin.ganahl@gmail.com',
40 | python_requires=('>=3.7.0'),
41 | install_requires=requirements,
42 | license='Apache 2.0',
43 | description=description,
44 | long_description=long_description,
45 | long_description_content_type="text/markdown",
46 | packages=find_packages(),
47 | )
48 |
--------------------------------------------------------------------------------
/tensornetwork/__init__.py:
--------------------------------------------------------------------------------
1 | from tensornetwork.network_components import (AbstractNode, CopyNode, Edge,
2 | Node, NodeCollection)
3 | from tensornetwork.network_operations import (
4 | check_connected, check_correct, contract_trace_edges, copy, get_all_edges,
5 | get_all_nodes, get_neighbors, get_subgraph_dangling, reachable,
6 | reduced_density, remove_node, replicate_nodes, split_node,
7 | split_node_full_svd, split_node_qr, split_node_rq, switch_backend,
8 | nodes_to_json, nodes_from_json, redirect_edge)
9 |
10 | from tensornetwork.tensor import Tensor, NconBuilder
11 | from tensornetwork.linalg.initialization import (eye, ones, randn,
12 | random_uniform, zeros)
13 |
14 | from tensornetwork.linalg.linalg import norm, qr, svd, rq, eigh, expm, inv
15 |
16 | #pylint: disable=redefined-builtin
17 | from tensornetwork.linalg.operations import (tensordot, reshape, transpose,
18 | take_slice, shape, sqrt, outer,
19 | einsum, conj, hconj, sin, cos, exp,
20 | log, diagonal, diagflat, trace,
21 | sign, abs, kron, pivot)
22 |
23 | from tensornetwork.backends.decorators import jit
24 |
25 | from tensornetwork.network_components import (
26 | contract, contract_between, contract_copy_node, contract_parallel,
27 | flatten_all_edges, flatten_edges, flatten_edges_between,
28 | get_all_nondangling, get_all_dangling, get_parallel_edges, get_shared_edges,
29 | outer_product, outer_product_final_nodes, slice_edge, split_edge)
30 | from tensornetwork.backends.abstract_backend import AbstractBackend
31 | from tensornetwork.network_components import connect, disconnect
32 | from tensornetwork.ncon_interface import ncon, finalize
33 | from tensornetwork.version import __version__
34 | from tensornetwork.visualization.graphviz import to_graphviz
35 | from tensornetwork import contractors
36 | from tensornetwork.utils import load_nodes, save_nodes, from_topology
37 | from tensornetwork.matrixproductstates.infinite_mps import InfiniteMPS
38 | from tensornetwork.matrixproductstates.finite_mps import FiniteMPS
39 | from tensornetwork.matrixproductstates.dmrg import FiniteDMRG
40 | from tensornetwork.matrixproductstates.mpo import (FiniteMPO, FiniteTFI,
41 | FiniteXXZ,
42 | FiniteFreeFermion2D)
43 | from tensornetwork.backend_contextmanager import DefaultBackend
44 | from tensornetwork.backend_contextmanager import set_default_backend
45 | from tensornetwork import block_sparse
46 | from tensornetwork.block_sparse.blocksparsetensor import BlockSparseTensor
47 | from tensornetwork.block_sparse.blocksparsetensor import ChargeArray
48 | from tensornetwork.block_sparse.index import Index
49 | from tensornetwork.block_sparse.charge import U1Charge, BaseCharge, Z2Charge
50 | from tensornetwork.block_sparse.charge import ZNCharge
51 |
--------------------------------------------------------------------------------
/tensornetwork/backend_contextmanager.py:
--------------------------------------------------------------------------------
1 | from typing import Text, Union
2 | from tensornetwork.backends.abstract_backend import AbstractBackend
3 | from tensornetwork.backends import backend_factory
4 |
5 |
6 | class DefaultBackend():
7 | """Context manager for setting up backend for nodes"""
8 |
9 | def __init__(self, backend: Union[Text, AbstractBackend]) -> None:
10 | if not isinstance(backend, (Text, AbstractBackend)):
11 | raise ValueError("Item passed to DefaultBackend "
12 | "must be Text or BaseBackend")
13 | self.backend = backend
14 |
15 | def __enter__(self):
16 | _default_backend_stack.stack.append(self)
17 |
18 | def __exit__(self, exc_type, exc_val, exc_tb):
19 | _default_backend_stack.stack.pop()
20 |
21 |
22 | class _DefaultBackendStack():
23 | """A stack to keep track default backends context manager"""
24 |
25 | def __init__(self):
26 | self.stack = []
27 | self.default_backend = "numpy"
28 |
29 | def get_current_backend(self):
30 | return self.stack[-1].backend if self.stack else self.default_backend
31 |
32 |
33 | _default_backend_stack = _DefaultBackendStack()
34 |
35 |
36 | def get_default_backend():
37 | return _default_backend_stack.get_current_backend()
38 |
39 |
40 | def set_default_backend(backend: Union[Text, AbstractBackend]) -> None:
41 | if _default_backend_stack.stack:
42 | raise AssertionError("The default backend should not be changed "
43 | "inside the backend context manager")
44 | if not isinstance(backend, (Text, AbstractBackend)):
45 | raise ValueError("Item passed to set_default_backend "
46 | "must be Text or BaseBackend")
47 | if isinstance(backend, Text) and backend not in backend_factory._BACKENDS:
48 | raise ValueError(f"Backend '{backend}' was not found.")
49 | _default_backend_stack.default_backend = backend
50 |
--------------------------------------------------------------------------------
/tensornetwork/backends/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/tensornetwork/backends/__init__.py
--------------------------------------------------------------------------------
/tensornetwork/backends/backend_factory.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from typing import Union, Text
16 | from tensornetwork.backends.tensorflow import tensorflow_backend
17 | from tensornetwork.backends.numpy import numpy_backend
18 | from tensornetwork.backends.jax import jax_backend
19 | from tensornetwork.backends.pytorch import pytorch_backend
20 | from tensornetwork.backends.symmetric import symmetric_backend
21 | from tensornetwork.backends import abstract_backend
22 | _BACKENDS = {
23 | "tensorflow": tensorflow_backend.TensorFlowBackend,
24 | "numpy": numpy_backend.NumPyBackend,
25 | "jax": jax_backend.JaxBackend,
26 | "pytorch": pytorch_backend.PyTorchBackend,
27 | "symmetric": symmetric_backend.SymmetricBackend
28 | }
29 |
30 | #we instantiate each backend only once and store it here
31 | _INSTANTIATED_BACKENDS = dict()
32 |
33 |
34 | def get_backend(
35 | backend: Union[Text, abstract_backend.AbstractBackend]
36 | ) -> abstract_backend.AbstractBackend:
37 | if isinstance(backend, abstract_backend.AbstractBackend):
38 | return backend
39 | if backend not in _BACKENDS:
40 | raise ValueError("Backend '{}' does not exist".format(backend))
41 |
42 | if backend in _INSTANTIATED_BACKENDS:
43 | return _INSTANTIATED_BACKENDS[backend]
44 |
45 | _INSTANTIATED_BACKENDS[backend] = _BACKENDS[backend]()
46 | return _INSTANTIATED_BACKENDS[backend]
47 |
--------------------------------------------------------------------------------
/tensornetwork/backends/decorators.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | """
16 | Decorator functions that depend on the backend.
17 | """
18 | from typing import Union, Iterable, Optional, Text, Callable
19 | import functools
20 | import tensornetwork.backends.abstract_backend as abstract_backend
21 | import tensornetwork.backends as backends
22 | import tensornetwork.backend_contextmanager as backend_contextmanager
23 |
24 | AbstractBackend = abstract_backend.AbstractBackend
25 |
26 | def jit(fun: Callable,
27 | backend: Union[Text, AbstractBackend] = None,
28 | backend_argnum: Optional[int] = None,
29 | static_argnums: Union[int, Iterable[int]] = (), device=None,
30 | xla_backend: Optional[str] = None) -> Callable:
31 | """
32 | Return a jitted or graph-compiled version of `fun`
33 | for JAX backend. For all other backends returns `fun`.
34 | Args:
35 | fun: Callable
36 | backend: The backend.
37 | backend_argnum: Labels the argument of the decorated function which
38 | specifies the backend.
39 | This argument will be treated
40 | as static in the sense of static_argnums.
41 | If backend_argnum is specified, backend must be None.
42 | static_argnums: Label the arguments which will be statically compiled
43 | against.
44 | xla_backend: Specifies the backend ('gpu', 'cpu'...) against which
45 | XLA is to run.
46 | donate_argnums: Labels arguments that Jit is allowed to overwrite.
47 | args: Arguments to `fun`.
48 | kwargs: Keyword arguments to `fun`.
49 |
50 | Raises:
51 | ValueError: If backend_argnum is specified but backend is not None.
52 |
53 | If backend_argnum is specified but the corresponding
54 | argument neither is nor labels a backend.
55 | Returns:
56 | Callable: jitted/graph-compiled version of `fun`, or just `fun`.
57 | """
58 | argnum_mode = False
59 | if backend_argnum is not None:
60 | if backend is not None:
61 | raise ValueError("backend must be None if backend_argnum is specified.")
62 | argnum_mode = True
63 | static_argnums = tuple(list(static_argnums) + [backend_argnum,])
64 |
65 | if not argnum_mode:
66 | if backend is None:
67 | backend = backend_contextmanager.get_default_backend()
68 | backend_obj = backends.backend_factory.get_backend(backend)
69 |
70 | @functools.wraps(fun)
71 | def wrapper(*args, **kwargs):
72 | jitted = backend_obj.jit(fun, static_argnums=static_argnums,
73 | device=device, backend=xla_backend)
74 | return jitted(*args, **kwargs)
75 | else:
76 | @functools.wraps(fun)
77 | def wrapper(*args, **kwargs):
78 | backend = args[backend_argnum]
79 | try:
80 | backend_obj = backends.backend_factory.get_backend(backend)
81 | except ValueError as error:
82 | errstr = (f"backend_argnum={backend_argnum} was specified"
83 | f"but the corresponding argument {args[backend_argnum]}"
84 | f"did not specify a backend.")
85 | raise ValueError(errstr) from error
86 | jitted = backend_obj.jit(fun, static_argnums=static_argnums,
87 | device=device, backend=xla_backend)
88 | return jitted(*args, **kwargs)
89 | return wrapper
90 |
--------------------------------------------------------------------------------
/tensornetwork/backends/decorators_test.py:
--------------------------------------------------------------------------------
1 | """Tests for decorators."""
2 | import pytest
3 | import numpy as np
4 | import functools
5 | from tensornetwork.backends.abstract_backend import AbstractBackend
6 | from tensornetwork.backends import backend_factory
7 | from tensornetwork import backends
8 | import tensornetwork
9 |
10 | def jittest_init(backend):
11 | """
12 | Helper to initialize data for the other Jit tests.
13 | """
14 | backend_obj = backends.backend_factory.get_backend(backend)
15 | def fun(x, A, y):
16 | return backend_obj.multiply(x, backend_obj.multiply(A, y))
17 | x = backend_obj.randn((4,), seed=11)
18 | y = backend_obj.randn((4,), seed=11)
19 | A = backend_obj.randn((4, 4), seed=11)
20 | return (x, y, A, fun)
21 |
22 |
23 | def test_jit(backend):
24 | """
25 | Tests that tn.jit gives the right answer.
26 | """
27 | x, y, A, fun = jittest_init(backend)
28 | fun_jit = tensornetwork.jit(fun, backend=backend)
29 | res1 = fun(x, A, y)
30 | res2 = fun_jit(x, A, y)
31 | np.testing.assert_allclose(res1, res2)
32 |
33 |
34 | def test_jit_ampersand(backend):
35 | """
36 | Tests that tn.jit gives the right answer when used as a decorator.
37 | """
38 | x, y, A, fun = jittest_init(backend)
39 | @functools.partial(tensornetwork.jit, static_argnums=(3,), backend=backend)
40 | def fun_jit(x, A, y, dummy):
41 | _ = dummy
42 | return fun(x, A, y)
43 | res1 = fun(x, A, y)
44 | res2 = fun_jit(x, A, y, 2)
45 | np.testing.assert_allclose(res1, res2)
46 |
47 |
48 | def test_jit_args(backend):
49 | """
50 | Tests that tn.jit gives the right answer when given extra arguments.
51 | """
52 | x, y, A, fun = jittest_init(backend)
53 | fun_jit = tensornetwork.jit(fun, backend=backend)
54 | res1 = fun(x, A, y)
55 | res2 = fun_jit(x, A, y)
56 | res3 = fun_jit(x, y=y, A=A)
57 | np.testing.assert_allclose(res1, res2)
58 | np.testing.assert_allclose(res1, res3)
59 |
60 |
61 | def test_jit_backend_argnum_is_string(backend):
62 | """
63 | Tests that tn.jit gives the right answer when the backend is supplied
64 | via backend_argnum as a string.
65 | """
66 | x, y, A, fun = jittest_init(backend)
67 |
68 | @functools.partial(tensornetwork.jit, backend_argnum=3)
69 | def fun_jit(x, A, y, the_backend):
70 | _ = the_backend
71 | return fun(x, A, y)
72 | res1 = fun(x, A, y)
73 | res2 = fun_jit(x, A, y, backend)
74 | np.testing.assert_allclose(res1, res2)
75 |
76 |
77 | def test_jit_backend_argnum_is_obj(backend):
78 | """
79 | Tests that tn.jit gives the right answer when the backend is supplied
80 | via backend_argnum as a backend object.
81 | """
82 | x, y, A, fun = jittest_init(backend)
83 |
84 | @functools.partial(tensornetwork.jit, backend_argnum=3)
85 | def fun_jit(x, A, y, the_backend):
86 | _ = the_backend
87 | return fun(x, A, y)
88 | res1 = fun(x, A, y)
89 | backend_obj = backends.backend_factory.get_backend(backend)
90 | res2 = fun_jit(x, A, y, backend_obj)
91 | np.testing.assert_allclose(res1, res2)
92 |
93 |
94 | def test_jit_backend_argnum_invalid(backend):
95 | """
96 | Tests that tn.jit raises ValueError when backend_argnum points to something
97 | other than a backend.
98 | """
99 | x, y, A, fun = jittest_init(backend)
100 |
101 | with pytest.raises(ValueError):
102 | @functools.partial(tensornetwork.jit, backend_argnum=3)
103 | def fun_jit(x, A, y, the_backend):
104 | _ = the_backend
105 | return fun(x, A, y)
106 | _ = fun_jit(x, A, y, 99)
107 |
108 |
109 | def test_jit_backend_and_backend_obj_raises_error(backend):
110 | """
111 | Tests that tn.jit raises ValueError when backend_argnum and backend
112 | are both specified.
113 | """
114 | x, y, A, fun = jittest_init(backend)
115 |
116 | with pytest.raises(ValueError):
117 | @functools.partial(tensornetwork.jit, backend_argnum=3, backend=backend)
118 | def fun_jit(x, A, y, the_backend):
119 | _ = the_backend
120 | return fun(x, A, y)
121 | _ = fun_jit(x, A, y, backend)
122 |
--------------------------------------------------------------------------------
/tensornetwork/backends/jax/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/tensornetwork/backends/numpy/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/tensornetwork/backends/numpy/decompositions.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Tensor Decomposition Numpy Implementation."""
15 |
16 | from typing import Optional, Any, Tuple
17 | import numpy
18 | Tensor = Any
19 |
20 |
21 | def svd(
22 | np, # TODO: Typing
23 | tensor: Tensor,
24 | pivot_axis: int,
25 | max_singular_values: Optional[int] = None,
26 | max_truncation_error: Optional[float] = None,
27 | relative: Optional[bool] = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
28 | """Computes the singular value decomposition (SVD) of a tensor.
29 |
30 | See tensornetwork.backends.tensorflow.decompositions for details.
31 | """
32 | left_dims = tensor.shape[:pivot_axis]
33 | right_dims = tensor.shape[pivot_axis:]
34 |
35 | tensor = np.reshape(tensor, [numpy.prod(left_dims), numpy.prod(right_dims)])
36 | u, s, vh = np.linalg.svd(tensor, full_matrices=False)
37 |
38 | if max_singular_values is None:
39 | max_singular_values = np.size(s)
40 |
41 | if max_truncation_error is not None:
42 | # Cumulative norms of singular values in ascending order.
43 | trunc_errs = np.sqrt(np.cumsum(np.square(s[::-1])))
44 | # If relative is true, rescale max_truncation error with the largest
45 | # singular value to yield the absolute maximal truncation error.
46 | if relative:
47 | abs_max_truncation_error = max_truncation_error * s[0]
48 | else:
49 | abs_max_truncation_error = max_truncation_error
50 | # We must keep at least this many singular values to ensure the
51 | # truncation error is <= abs_max_truncation_error.
52 | num_sing_vals_err = np.count_nonzero(
53 | (trunc_errs > abs_max_truncation_error).astype(np.int32))
54 | else:
55 | num_sing_vals_err = max_singular_values
56 |
57 | num_sing_vals_keep = min(max_singular_values, num_sing_vals_err)
58 |
59 | # tf.svd() always returns the singular values as a vector of float{32,64}.
60 | # since tf.math_ops.real is automatically applied to s. This causes
61 | # s to possibly not be the same dtype as the original tensor, which can cause
62 | # issues for later contractions. To fix it, we recast to the original dtype.
63 | s = s.astype(tensor.dtype)
64 |
65 | s_rest = s[num_sing_vals_keep:]
66 | s = s[:num_sing_vals_keep]
67 | u = u[:, :num_sing_vals_keep]
68 | vh = vh[:num_sing_vals_keep, :]
69 |
70 | dim_s = s.shape[0]
71 | u = np.reshape(u, list(left_dims) + [dim_s])
72 | vh = np.reshape(vh, [dim_s] + list(right_dims))
73 |
74 | return u, s, vh, s_rest
75 |
76 |
77 | def qr(
78 | np, # TODO: Typing
79 | tensor: Tensor,
80 | pivot_axis: int,
81 | non_negative_diagonal: bool
82 | ) -> Tuple[Tensor, Tensor]:
83 | """Computes the QR decomposition of a tensor.
84 |
85 | See tensornetwork.backends.tensorflow.decompositions for details.
86 | """
87 | left_dims = tensor.shape[:pivot_axis]
88 | right_dims = tensor.shape[pivot_axis:]
89 | tensor = np.reshape(tensor, [numpy.prod(left_dims), numpy.prod(right_dims)])
90 | q, r = np.linalg.qr(tensor)
91 | if non_negative_diagonal:
92 | phases = np.sign(np.diagonal(r))
93 | q = q * phases
94 | r = phases.conj()[:, None] * r
95 | center_dim = q.shape[1]
96 | q = np.reshape(q, list(left_dims) + [center_dim])
97 | r = np.reshape(r, [center_dim] + list(right_dims))
98 | return q, r
99 |
100 |
101 | def rq(
102 | np, # TODO: Typing
103 | tensor: Tensor,
104 | pivot_axis: int,
105 | non_negative_diagonal: bool
106 | ) -> Tuple[Tensor, Tensor]:
107 | """Computes the RQ (reversed QR) decomposition of a tensor.
108 |
109 | See tensornetwork.backends.tensorflow.decompositions for details.
110 | """
111 | left_dims = tensor.shape[:pivot_axis]
112 | right_dims = tensor.shape[pivot_axis:]
113 | tensor = np.reshape(tensor, [numpy.prod(left_dims), numpy.prod(right_dims)])
114 | q, r = np.linalg.qr(np.conj(np.transpose(tensor)))
115 | if non_negative_diagonal:
116 | phases = np.sign(np.diagonal(r))
117 | q = q * phases
118 | r = phases.conj()[:, None] * r
119 | r, q = np.conj(np.transpose(r)), np.conj(
120 | np.transpose(q)) #M=r*q at this point
121 | center_dim = r.shape[1]
122 | r = np.reshape(r, list(left_dims) + [center_dim])
123 | q = np.reshape(q, [center_dim] + list(right_dims))
124 | return r, q
125 |
--------------------------------------------------------------------------------
/tensornetwork/backends/numpy/decompositions_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import math
16 | import numpy as np
17 | import tensorflow as tf
18 | from tensornetwork.backends.numpy import decompositions
19 |
20 |
21 | class DecompositionsTest(tf.test.TestCase):
22 |
23 | def test_expected_shapes(self):
24 | val = np.zeros((2, 3, 4, 5))
25 | u, s, vh, _ = decompositions.svd(np, val, 2)
26 | self.assertEqual(u.shape, (2, 3, 6))
27 | self.assertEqual(s.shape, (6,))
28 | self.assertAllClose(s, np.zeros(6))
29 | self.assertEqual(vh.shape, (6, 4, 5))
30 |
31 | def test_expected_shapes_qr(self):
32 | val = np.zeros((2, 3, 4, 5))
33 | q, r = decompositions.qr(np, val, 2, False)
34 | self.assertEqual(q.shape, (2, 3, 6))
35 | self.assertEqual(r.shape, (6, 4, 5))
36 |
37 | def test_expected_shapes_rq(self):
38 | val = np.zeros((2, 3, 4, 5))
39 | r, q = decompositions.rq(np, val, 2, False)
40 | self.assertEqual(r.shape, (2, 3, 6))
41 | self.assertEqual(q.shape, (6, 4, 5))
42 |
43 | def test_rq(self):
44 | random_matrix = np.random.rand(10, 10)
45 | for non_negative_diagonal in [True, False]:
46 | r, q = decompositions.rq(np, random_matrix, 1, non_negative_diagonal)
47 | self.assertAllClose(r.dot(q), random_matrix)
48 |
49 | def test_qr(self):
50 | random_matrix = np.random.rand(10, 10)
51 | for non_negative_diagonal in [True, False]:
52 | q, r = decompositions.qr(np, random_matrix, 1, non_negative_diagonal)
53 | self.assertAllClose(q.dot(r), random_matrix)
54 |
55 | def test_max_singular_values(self):
56 | random_matrix = np.random.rand(10, 10)
57 | unitary1, _, unitary2 = np.linalg.svd(random_matrix)
58 | singular_values = np.array(range(10))
59 | val = unitary1.dot(np.diag(singular_values).dot(unitary2.T))
60 | u, s, vh, trun = decompositions.svd(
61 | np, val, 1, max_singular_values=7)
62 | self.assertEqual(u.shape, (10, 7))
63 | self.assertEqual(s.shape, (7,))
64 | self.assertAllClose(s, np.arange(9, 2, -1))
65 | self.assertEqual(vh.shape, (7, 10))
66 | self.assertAllClose(trun, np.arange(2, -1, -1))
67 |
68 | def test_max_singular_values_larger_than_bond_dimension(self):
69 | random_matrix = np.random.rand(10, 6)
70 | unitary1, _, unitary2 = np.linalg.svd(random_matrix, full_matrices=False)
71 | singular_values = np.array(range(6))
72 | val = unitary1.dot(np.diag(singular_values).dot(unitary2.T))
73 | u, s, vh, _ = decompositions.svd(
74 | np, val, 1, max_singular_values=30)
75 | self.assertEqual(u.shape, (10, 6))
76 | self.assertEqual(s.shape, (6,))
77 | self.assertEqual(vh.shape, (6, 6))
78 |
79 | def test_max_truncation_error(self):
80 | random_matrix = np.random.rand(10, 10)
81 | unitary1, _, unitary2 = np.linalg.svd(random_matrix)
82 | singular_values = np.array(range(10))
83 | val = unitary1.dot(np.diag(singular_values).dot(unitary2.T))
84 | u, s, vh, trun = decompositions.svd(
85 | np, val, 1, max_truncation_error=math.sqrt(5.1))
86 | self.assertEqual(u.shape, (10, 7))
87 | self.assertEqual(s.shape, (7,))
88 | self.assertAllClose(s, np.arange(9, 2, -1))
89 | self.assertEqual(vh.shape, (7, 10))
90 | self.assertAllClose(trun, np.arange(2, -1, -1))
91 |
92 | def test_max_truncation_error_relative(self):
93 | absolute = np.diag([2.0, 1.0, 0.2, 0.1])
94 | relative = np.diag([2.0, 1.0, 0.2, 0.1])
95 | max_truncation_err = 0.2
96 | _, _, _, trunc_sv_absolute = decompositions.svd(
97 | np,
98 | absolute,
99 | 1,
100 | max_truncation_error=max_truncation_err,
101 | relative=False)
102 | _, _, _, trunc_sv_relative = decompositions.svd(
103 | np, relative, 1, max_truncation_error=max_truncation_err, relative=True)
104 | np.testing.assert_almost_equal(trunc_sv_absolute, [0.1])
105 | np.testing.assert_almost_equal(trunc_sv_relative, [0.2, 0.1])
106 |
107 |
108 | if __name__ == '__main__':
109 | tf.test.main()
110 |
--------------------------------------------------------------------------------
/tensornetwork/backends/pytorch/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/tensornetwork/backends/pytorch/__init__.py
--------------------------------------------------------------------------------
/tensornetwork/backends/pytorch/decompositions_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import math
16 | import numpy as np
17 | import torch
18 | from tensornetwork.backends.pytorch import decompositions
19 |
20 |
21 | def test_expected_shapes():
22 | val = torch.zeros((2, 3, 4, 5))
23 | u, s, vh, _ = decompositions.svd(torch, val, 2)
24 | assert u.shape == (2, 3, 6)
25 | assert s.shape == (6,)
26 | np.testing.assert_allclose(s, np.zeros(6))
27 | assert vh.shape == (6, 4, 5)
28 |
29 |
30 | def test_expected_shapes_qr():
31 | val = torch.zeros((2, 3, 4, 5))
32 | for non_negative_diagonal in [True, False]:
33 | q, r = decompositions.qr(torch, val, 2, non_negative_diagonal)
34 | assert q.shape == (2, 3, 6)
35 | assert r.shape == (6, 4, 5)
36 |
37 |
38 | def test_expected_shapes_rq():
39 | val = torch.zeros((2, 3, 4, 5))
40 | for non_negative_diagonal in [True, False]:
41 | r, q = decompositions.rq(torch, val, 2, non_negative_diagonal)
42 | assert r.shape == (2, 3, 6)
43 | assert q.shape == (6, 4, 5)
44 |
45 |
46 | def test_rq():
47 | random_matrix = torch.rand([10, 10], dtype=torch.float64)
48 | for non_negative_diagonal in [True, False]:
49 | r, q = decompositions.rq(torch, random_matrix, 1, non_negative_diagonal)
50 | np.testing.assert_allclose(r.mm(q), random_matrix)
51 |
52 |
53 | def test_qr():
54 | random_matrix = torch.rand([10, 10], dtype=torch.float64)
55 | for non_negative_diagonal in [True, False]:
56 | q, r = decompositions.rq(torch, random_matrix, 1, non_negative_diagonal)
57 | np.testing.assert_allclose(q.mm(r), random_matrix)
58 |
59 |
60 | def test_max_singular_values():
61 | np.random.seed(2018)
62 | random_matrix = np.random.rand(10, 10)
63 | unitary1, _, unitary2 = np.linalg.svd(random_matrix)
64 | singular_values = np.array(range(10))
65 | val = unitary1.dot(np.diag(singular_values).dot(unitary2.T))
66 | u, s, vh, trun = decompositions.svd(
67 | torch, torch.tensor(val), 1, max_singular_values=7)
68 | assert u.shape == (10, 7)
69 | assert s.shape == (7,)
70 | np.testing.assert_array_almost_equal(s, np.arange(9, 2, -1))
71 | assert vh.shape == (7, 10)
72 | np.testing.assert_array_almost_equal(trun, np.arange(2, -1, -1))
73 |
74 |
75 | def test_max_truncation_error():
76 | np.random.seed(2019)
77 | random_matrix = np.random.rand(10, 10)
78 | unitary1, _, unitary2 = np.linalg.svd(random_matrix)
79 | singular_values = np.array(range(10))
80 | val = unitary1.dot(np.diag(singular_values).dot(unitary2.T))
81 | u, s, vh, trun = decompositions.svd(
82 | torch, torch.Tensor(val), 1, max_truncation_error=math.sqrt(5.1))
83 | assert u.shape == (10, 7)
84 | assert s.shape == (7,)
85 | np.testing.assert_array_almost_equal(s, np.arange(9, 2, -1), decimal=5)
86 | assert vh.shape == (7, 10)
87 | np.testing.assert_array_almost_equal(trun, np.arange(2, -1, -1))
88 |
89 |
90 | def test_max_truncation_error_relative():
91 | absolute = np.diag([2.0, 1.0, 0.2, 0.1])
92 | relative = np.diag([2.0, 1.0, 0.2, 0.1])
93 | max_truncation_err = 0.2
94 | _, _, _, trunc_sv_absolute = decompositions.svd(
95 | torch,
96 | torch.Tensor(absolute),
97 | 1,
98 | max_truncation_error=max_truncation_err,
99 | relative=False)
100 | _, _, _, trunc_sv_relative = decompositions.svd(
101 | torch,
102 | torch.Tensor(relative),
103 | 1,
104 | max_truncation_error=max_truncation_err,
105 | relative=True)
106 | np.testing.assert_almost_equal(trunc_sv_absolute, [0.1])
107 | np.testing.assert_almost_equal(trunc_sv_relative, [0.2, 0.1])
108 |
--------------------------------------------------------------------------------
/tensornetwork/backends/pytorch/pytorch_tensornetwork_test.py:
--------------------------------------------------------------------------------
1 | """Tests for graphmode_tensornetwork."""
2 | import numpy as np
3 | from tensornetwork import (connect, contract, contract_between,
4 | flatten_edges_between, Node)
5 | import torch
6 |
7 |
8 | def test_basic_graphmode():
9 | a = Node(torch.ones(10), backend="pytorch")
10 | b = Node(torch.ones(10), backend="pytorch")
11 | e = connect(a[0], b[0])
12 | actual = contract(e).get_tensor()
13 | assert actual == 10.0
14 |
15 |
16 | def test_gradient_decent():
17 | a = Node(
18 | torch.autograd.Variable(torch.ones(10), requires_grad=True),
19 | backend="pytorch")
20 | b = Node(torch.ones(10), backend="pytorch")
21 | e = connect(a[0], b[0])
22 | final_tensor = contract(e).get_tensor()
23 | opt = torch.optim.SGD([a.tensor], lr=0.001)
24 | opt.zero_grad()
25 | final_tensor.norm().backward()
26 | opt.step()
27 | np.testing.assert_allclose(final_tensor.data, 10)
28 | np.testing.assert_allclose(a.tensor.data, 0.999 * np.ones((10,)))
29 | assert final_tensor == 10
30 |
31 |
32 | def test_dynamic_network_sizes():
33 |
34 | def f(x, n):
35 | x_slice = x[:n]
36 | n1 = Node(x_slice, backend="pytorch")
37 | n2 = Node(x_slice, backend="pytorch")
38 | e = connect(n1[0], n2[0])
39 | return contract(e).get_tensor()
40 |
41 | x = torch.ones(10)
42 | assert f(x, 2) == 2.
43 | assert f(x, 3) == 3.
44 |
45 |
46 | def test_dynamic_network_sizes_contract_between():
47 |
48 | def f(x, n):
49 | x_slice = x[..., :n]
50 | n1 = Node(x_slice, backend="pytorch")
51 | n2 = Node(x_slice, backend="pytorch")
52 | connect(n1[0], n2[0])
53 | connect(n1[1], n2[1])
54 | connect(n1[2], n2[2])
55 | return contract_between(n1, n2).get_tensor()
56 |
57 | x = torch.ones((3, 4, 5))
58 | assert f(x, 2) == 24.
59 | assert f(x, 3) == 36.
60 |
61 |
62 | def test_dynamic_network_sizes_flatten_standard():
63 |
64 | def f(x, n):
65 | x_slice = x[..., :n]
66 | n1 = Node(x_slice, backend="pytorch")
67 | n2 = Node(x_slice, backend="pytorch")
68 | connect(n1[0], n2[0])
69 | connect(n1[1], n2[1])
70 | connect(n1[2], n2[2])
71 | return contract(flatten_edges_between(n1, n2)).get_tensor()
72 |
73 | x = torch.ones((3, 4, 5))
74 | assert f(x, 2) == 24.
75 | assert f(x, 3) == 36.
76 |
77 |
78 | def test_dynamic_network_sizes_flatten_trace():
79 |
80 | def f(x, n):
81 | x_slice = x[..., :n]
82 | n1 = Node(x_slice, backend="pytorch")
83 | connect(n1[0], n1[2])
84 | connect(n1[1], n1[3])
85 | return contract(flatten_edges_between(n1, n1)).get_tensor()
86 |
87 | x = torch.ones((3, 4, 3, 4, 5))
88 | np.testing.assert_allclose(f(x, 2), np.ones((2,)) * 12)
89 | np.testing.assert_allclose(f(x, 3), np.ones((3,)) * 12)
90 |
--------------------------------------------------------------------------------
/tensornetwork/backends/symmetric/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/tensornetwork/backends/symmetric/__init__.py
--------------------------------------------------------------------------------
/tensornetwork/backends/tensorflow/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/tensornetwork/backends/tensorflow/__init__.py
--------------------------------------------------------------------------------
/tensornetwork/backends/tensorflow/tensorflow_tensornetwork_test.py:
--------------------------------------------------------------------------------
1 | """Tests for graphmode_tensornetwork."""
2 | import numpy as np
3 | import tensorflow as tf
4 | from tensornetwork import (contract, connect, flatten_edges_between,
5 | contract_between, Node)
6 | import pytest
7 |
8 |
9 | class GraphmodeTensorNetworkTest(tf.test.TestCase):
10 |
11 | def test_basic_graphmode(self):
12 | # pylint: disable=not-context-manager
13 | with tf.compat.v1.Graph().as_default():
14 | a = Node(tf.ones(10), backend="tensorflow")
15 | b = Node(tf.ones(10), backend="tensorflow")
16 | e = connect(a[0], b[0])
17 | final_tensor = contract(e).get_tensor()
18 |
19 | sess = tf.compat.v1.Session()
20 | final_val = sess.run(final_tensor)
21 | self.assertAllClose(final_val, 10.0)
22 |
23 | def test_gradient_decent(self):
24 | # pylint: disable=not-context-manager
25 | with tf.compat.v1.Graph().as_default():
26 | a = Node(tf.Variable(tf.ones(10)), backend="tensorflow")
27 | b = Node(tf.ones(10), backend="tensorflow")
28 | e = connect(a[0], b[0])
29 | final_tensor = contract(e).get_tensor()
30 | opt = tf.compat.v1.train.GradientDescentOptimizer(0.001)
31 | train_op = opt.minimize(final_tensor)
32 | sess = tf.compat.v1.Session()
33 | sess.run(tf.compat.v1.global_variables_initializer())
34 | self.assertAllClose(sess.run(final_tensor), 10.0)
35 | sess.run(train_op)
36 | self.assertLess(sess.run(final_tensor), 10.0)
37 |
38 | def test_dynamic_network_sizes(self):
39 |
40 | @tf.function
41 | def f(x, n):
42 | x_slice = x[:n]
43 | n1 = Node(x_slice, backend="tensorflow")
44 | n2 = Node(x_slice, backend="tensorflow")
45 | e = connect(n1[0], n2[0])
46 | return contract(e).get_tensor()
47 |
48 | x = np.ones(10)
49 | self.assertAllClose(f(x, tf.convert_to_tensor(2)), 2.0)
50 | self.assertAllClose(f(x, tf.convert_to_tensor(3)), 3.0)
51 |
52 | @pytest.mark.skip(reason="Test fails due to probable bug in tensorflow 2.0.0")
53 | def test_dynamic_network_sizes_contract_between(self):
54 |
55 | @tf.function
56 | def f(x, n):
57 | x_slice = x[..., :n]
58 | n1 = Node(x_slice, backend="tensorflow")
59 | n2 = Node(x_slice, backend="tensorflow")
60 | connect(n1[0], n2[0])
61 | connect(n1[1], n2[1])
62 | connect(n1[2], n2[2])
63 | return contract_between(n1, n2).get_tensor()
64 |
65 | x = tf.ones((3, 4, 5))
66 | self.assertAllClose(f(x, tf.convert_to_tensor(2)), 24.0)
67 | self.assertAllClose(f(x, tf.convert_to_tensor(3)), 36.0)
68 |
69 | def test_dynamic_network_sizes_flatten_standard(self):
70 |
71 | @tf.function
72 | def f(x, n):
73 | x_slice = x[..., :n]
74 | n1 = Node(x_slice, backend="tensorflow")
75 | n2 = Node(x_slice, backend="tensorflow")
76 | connect(n1[0], n2[0])
77 | connect(n1[1], n2[1])
78 | connect(n1[2], n2[2])
79 | return contract(flatten_edges_between(n1, n2)).get_tensor()
80 |
81 | x = np.ones((3, 4, 5))
82 | self.assertAllClose(f(x, tf.convert_to_tensor(2)), 24.0)
83 | self.assertAllClose(f(x, tf.convert_to_tensor(3)), 36.0)
84 |
85 | def test_dynamic_network_sizes_flatten_trace(self):
86 |
87 | @tf.function
88 | def f(x, n):
89 | x_slice = x[..., :n]
90 | n1 = Node(x_slice, backend="tensorflow")
91 | connect(n1[0], n1[2])
92 | connect(n1[1], n1[3])
93 | return contract(flatten_edges_between(n1, n1)).get_tensor()
94 |
95 | x = np.ones((3, 4, 3, 4, 5))
96 | self.assertAllClose(f(x, tf.convert_to_tensor(2)), np.ones((2,)) * 12)
97 | self.assertAllClose(f(x, tf.convert_to_tensor(3)), np.ones((3,)) * 12)
98 |
99 | def test_batch_usage(self,):
100 |
101 | def build_tensornetwork(tensors):
102 | a = Node(tensors[0], backend="tensorflow")
103 | b = Node(tensors[1], backend="tensorflow")
104 | e = connect(a[0], b[0])
105 | return contract(e).get_tensor()
106 |
107 | tensors = [np.ones((5, 10)), np.ones((5, 10))]
108 | result = tf.map_fn(build_tensornetwork, tensors, dtype=tf.float64)
109 | np.testing.assert_allclose(result, np.ones(5) * 10)
110 |
111 |
112 | if __name__ == '__main__':
113 | tf.test.main()
114 |
--------------------------------------------------------------------------------
/tensornetwork/block_sparse/__init__.py:
--------------------------------------------------------------------------------
1 | from tensornetwork.block_sparse import index
2 | from tensornetwork.block_sparse import charge
3 | from tensornetwork.block_sparse import blocksparsetensor
4 | from tensornetwork.block_sparse import linalg
5 | from tensornetwork.block_sparse.blocksparsetensor import (BlockSparseTensor,
6 | ChargeArray,
7 | tensordot,
8 | outerproduct,
9 | compare_shapes)
10 |
11 | from tensornetwork.block_sparse.linalg import (svd, qr, diag, sqrt, trace, inv,#pylint: disable=redefined-builtin
12 | pinv, eye, eigh, eig, conj,
13 | reshape, transpose, norm, abs,
14 | sign)
15 |
16 | from tensornetwork.block_sparse.initialization import (zeros, ones, randn,
17 | random, empty_like,
18 | ones_like, zeros_like,
19 | randn_like, random_like)
20 |
21 | from tensornetwork.block_sparse.index import Index
22 | from tensornetwork.block_sparse.caching import (get_cacher, enable_caching,
23 | disable_caching, clear_cache,
24 | get_caching_status,
25 | set_caching_status)
26 | from tensornetwork.block_sparse.charge import (U1Charge, BaseCharge, Z2Charge,
27 | ZNCharge)
28 |
--------------------------------------------------------------------------------
/tensornetwork/block_sparse/caching.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import numpy as np
15 | from typing import List, Union, Any, Tuple, Optional, Sequence
16 | # currently there is only one global cacher that does caching.
17 | # this could be changed later on to having stacks of cachers,
18 | # i.e. different cache levesl
19 | _INSTANTIATED_CACHERS = []
20 |
21 |
22 | class Cacher:
23 |
24 | def __init__(self) -> None:
25 | self.cache = {}
26 | self.do_caching = False
27 |
28 | def set_status(self, value) -> None:
29 | self.do_caching = value
30 |
31 | def clear_cache(self) -> None:
32 | self.cache = {}
33 |
34 | @property
35 | def is_empty(self) -> bool:
36 | return len(self.cache) == 0
37 |
38 |
39 | def get_cacher() -> Cacher:
40 | """
41 | Return a `Cacher` object which can be used to perform
42 | caching of block-data for block-sparse tensor contractions.
43 | """
44 | if len(_INSTANTIATED_CACHERS) == 0:
45 | _INSTANTIATED_CACHERS.append(Cacher())
46 | return _INSTANTIATED_CACHERS[0]
47 |
48 |
49 | def enable_caching() -> None:
50 | """
51 | Enable caching of block-data for block-sparse contraction.
52 | If enabled, all data that is needed to perform binary tensor contractions
53 | will be cached in a dictionary for later reuse.
54 | Enabling caching can significantly speed tensor contractions,
55 | but can lead to substantially larger memory footprints.
56 | In particular if the code uses tensor decompositions like QR, SVD
57 | eig, eigh or any similar method, enabling caching can cause
58 | catastrophic memory clutter, so use caching with great care.
59 |
60 | The user can at any point clear the cache by calling
61 | `tn.block_sparse.clear_cache()`.
62 | """
63 | get_cacher().set_status(True)
64 |
65 |
66 | def disable_caching() -> None:
67 | """
68 | Disable caching of block-data for block-sparse tensor contractions.
69 | Note that the cache WILL NOT BE CLEARED.
70 | Clearing the cache can be achieved by calling
71 | `tn.block_sparse.clear_cache()`.
72 | """
73 | get_cacher().set_status(False)
74 |
75 |
76 | def clear_cache() -> None:
77 | """
78 | Clear the cache that stores block-data for block-sparse tensor contractions.
79 | """
80 | get_cacher().clear_cache()
81 |
82 |
83 | def get_caching_status() -> bool:
84 | return get_cacher().do_caching
85 |
86 |
87 | def set_caching_status(status) -> None:
88 | get_cacher().set_status(status)
89 |
--------------------------------------------------------------------------------
/tensornetwork/block_sparse/caching_test.py:
--------------------------------------------------------------------------------
1 | from tensornetwork.block_sparse.caching import (get_cacher, set_caching_status,
2 | get_caching_status, clear_cache,
3 | enable_caching, disable_caching,
4 | _INSTANTIATED_CACHERS)
5 | from tensornetwork.block_sparse.index import Index
6 | from tensornetwork.block_sparse.charge import U1Charge, charge_equal
7 | from tensornetwork.block_sparse.blocksparse_utils import (
8 | _to_string, _find_transposed_diagonal_sparse_blocks)
9 |
10 | from tensornetwork.block_sparse.blocksparsetensor import BlockSparseTensor
11 | from tensornetwork.ncon_interface import ncon
12 | import numpy as np
13 |
14 |
15 | def test_get_cacher():
16 | cacher = get_cacher()
17 | assert len(_INSTANTIATED_CACHERS) == 1
18 | assert _INSTANTIATED_CACHERS[0] is cacher
19 |
20 |
21 | def test_set_caching_status():
22 | set_caching_status(True)
23 | cacher = get_cacher()
24 | assert len(_INSTANTIATED_CACHERS) == 1
25 | assert _INSTANTIATED_CACHERS[0] is cacher
26 | assert cacher.do_caching
27 |
28 | set_caching_status(False)
29 | cacher = get_cacher()
30 | assert len(_INSTANTIATED_CACHERS) == 1
31 | assert _INSTANTIATED_CACHERS[0] is cacher
32 | assert not cacher.do_caching
33 |
34 |
35 | def test_get_caching_status():
36 | set_caching_status(True)
37 | assert get_caching_status()
38 | set_caching_status(False)
39 | assert not get_caching_status()
40 |
41 |
42 | def test_enable_caching():
43 | enable_caching()
44 | cacher = get_cacher()
45 | assert len(_INSTANTIATED_CACHERS) == 1
46 | assert _INSTANTIATED_CACHERS[0] is cacher
47 | assert cacher.do_caching
48 | disable_caching()
49 |
50 | def test_disable_caching():
51 | disable_caching()
52 | cacher = get_cacher()
53 | assert len(_INSTANTIATED_CACHERS) == 1
54 | assert _INSTANTIATED_CACHERS[0] is cacher
55 | assert not cacher.do_caching
56 |
57 |
58 | def test_cache():
59 | D = 10
60 | mpsinds = [
61 | Index(U1Charge(np.random.randint(-5, 5, D, dtype=np.int16)), False),
62 | Index(U1Charge(np.random.randint(-5, 5, D, dtype=np.int16)), False),
63 | Index(U1Charge(np.random.randint(-5, 5, D, dtype=np.int16)), False),
64 | Index(U1Charge(np.random.randint(-5, 5, D, dtype=np.int16)), True)
65 | ]
66 | A = BlockSparseTensor.random(mpsinds)
67 | B = A.conj()
68 | res_charges = [
69 | A.flat_charges[2], A.flat_charges[3], B.flat_charges[2], B.flat_charges[3]
70 | ]
71 | res_flows = [
72 | A.flat_flows[2], A.flat_flows[3], B.flat_flows[2], B.flat_flows[3]
73 | ]
74 |
75 | enable_caching()
76 | ncon([A, B], [[1, 2, -1, -2], [1, 2, -3, -4]], backend='symmetric')
77 | cacher = get_cacher()
78 | sA = _to_string(A.flat_charges, A.flat_flows, 2, [2, 3, 0, 1])
79 | sB = _to_string(B.flat_charges, B.flat_flows, 2, [0, 1, 2, 3])
80 | sC = _to_string(res_charges, res_flows, 2, [0, 1, 2, 3])
81 | blocksA, chargesA, dimsA = _find_transposed_diagonal_sparse_blocks(
82 | A.flat_charges, A.flat_flows, 2, [2, 3, 0, 1])
83 | blocksB, chargesB, dimsB = _find_transposed_diagonal_sparse_blocks(
84 | B.flat_charges, B.flat_flows, 2, [0, 1, 2, 3])
85 | blocksC, chargesC, dimsC = _find_transposed_diagonal_sparse_blocks(
86 | res_charges, res_flows, 2, [0, 1, 2, 3])
87 |
88 | assert sA in cacher.cache
89 | assert sB in cacher.cache
90 | assert sC in cacher.cache
91 |
92 | for b1, b2 in zip(cacher.cache[sA][0], blocksA):
93 | np.testing.assert_allclose(b1, b2)
94 | for b1, b2 in zip(cacher.cache[sB][0], blocksB):
95 | np.testing.assert_allclose(b1, b2)
96 | for b1, b2 in zip(cacher.cache[sC][0], blocksC):
97 | np.testing.assert_allclose(b1, b2)
98 | assert charge_equal(cacher.cache[sA][1], chargesA)
99 | assert charge_equal(cacher.cache[sB][1], chargesB)
100 | assert charge_equal(cacher.cache[sC][1], chargesC)
101 |
102 | np.testing.assert_allclose(cacher.cache[sA][2], dimsA)
103 | np.testing.assert_allclose(cacher.cache[sB][2], dimsB)
104 | np.testing.assert_allclose(cacher.cache[sC][2], dimsC)
105 | disable_caching()
106 | clear_cache()
107 |
108 | def test_clear_cache():
109 | D = 100
110 | M = 5
111 | mpsinds = [
112 | Index(U1Charge(np.random.randint(5, 15, D, dtype=np.int16)), False),
113 | Index(U1Charge(np.array([0, 1, 2, 3], dtype=np.int16)), False),
114 | Index(U1Charge(np.random.randint(5, 18, D, dtype=np.int16)), True)
115 | ]
116 | mpoinds = [
117 | Index(U1Charge(np.random.randint(0, 5, M)), False),
118 | Index(U1Charge(np.random.randint(0, 10, M)), True), mpsinds[1],
119 | mpsinds[1].flip_flow()
120 | ]
121 | Linds = [mpoinds[0].flip_flow(), mpsinds[0].flip_flow(), mpsinds[0]]
122 | Rinds = [mpoinds[1].flip_flow(), mpsinds[2].flip_flow(), mpsinds[2]]
123 | mps = BlockSparseTensor.random(mpsinds)
124 | mpo = BlockSparseTensor.random(mpoinds)
125 | L = BlockSparseTensor.random(Linds)
126 | R = BlockSparseTensor.random(Rinds)
127 |
128 | enable_caching()
129 | ncon([L, mps, mpo, R], [[3, 1, -1], [1, 2, 4], [3, 5, -2, 2], [5, 4, -3]],
130 | backend='symmetric')
131 | cacher = get_cacher()
132 | assert len(cacher.cache) > 0
133 | disable_caching()
134 | clear_cache()
135 | assert len(cacher.cache) == 0
136 |
--------------------------------------------------------------------------------
/tensornetwork/block_sparse/index.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import numpy as np
15 | from tensornetwork.block_sparse.charge import BaseCharge, fuse_charges
16 | import copy
17 | from typing import List, Union
18 |
19 |
20 | class Index:
21 | """
22 | An index class to store indices of a symmetric tensor.
23 | """
24 |
25 | def __init__(self, charges: Union[List[BaseCharge], BaseCharge],
26 | flow: Union[List[bool], bool]) -> None:
27 | """
28 | Initialize an `Index` object.
29 | """
30 | if isinstance(charges, BaseCharge):
31 | charges = [charges]
32 | self._charges = charges
33 | if np.isscalar(flow):
34 | flow = [flow]
35 | if not all(isinstance(f, (np.bool_, np.bool, bool)) for f in flow):
36 | raise TypeError("flows have to be boolean. Found flow = {}".format(flow))
37 | self.flow = flow
38 |
39 | def __len__(self) -> int:
40 | return self.dim
41 |
42 | def __repr__(self) -> str:
43 | dense_shape = f"Dimension: {str(self.dim)} \n"
44 | charge_str = str(self._charges).replace('\n,', ',\n')
45 | charge_str = charge_str.replace('\n', '\n ')
46 | charges = f"Charges: {charge_str} \n"
47 | flow_info = f"Flows: {str(self.flow)} \n"
48 | return f"Index:\n {dense_shape} {charges} {flow_info} "
49 |
50 | @property
51 | def dim(self) -> int:
52 | return np.prod([i.dim for i in self._charges])
53 |
54 | def __eq__(self, other) -> bool:
55 | if len(other._charges) != len(self._charges):
56 | return False
57 | for n in range(len(self._charges)):
58 | if not np.array_equal(self._charges[n].unique_charges,
59 | other._charges[n].unique_charges):
60 | return False
61 | if not np.array_equal(self._charges[n].charge_labels,
62 | other._charges[n].charge_labels):
63 | return False
64 | if not np.all(np.asarray(self.flow) == np.asarray(other.flow)):
65 | return False
66 | return True
67 |
68 | def copy(self) -> "Index":
69 | """
70 | Returns:
71 | Index: A deep copy of `Index`. Note that all children of
72 | `Index` are copied as well.
73 | """
74 | index_copy = Index(
75 | charges=[c.copy() for c in self._charges],
76 | flow=copy.deepcopy(self.flow))
77 |
78 | return index_copy
79 |
80 | @property
81 | def flat_charges(self) -> List:
82 | """
83 | Returns:
84 | List: A list containing the elementary indices
85 | of `Index`.
86 | """
87 | return self._charges
88 |
89 | @property
90 | def flat_flows(self) -> List:
91 | """
92 | Returns:
93 | List: A list containing the elementary indices
94 | of `Index`.
95 | """
96 | return list(self.flow)
97 |
98 | def flip_flow(self) -> "Index":
99 | """
100 | Flip the flow if `Index`.
101 | Returns:
102 | Index
103 | """
104 | return Index(
105 | charges=[c.copy() for c in self._charges],
106 | flow=list(np.logical_not(self.flow)))
107 |
108 | def __mul__(self, index: "Index") -> "Index":
109 | """
110 | Merge `index` and self into a single larger index.
111 | The flow of the resulting index is set to 1.
112 | Flows of `self` and `index` are multiplied into
113 | the charges upon fusing.n
114 | """
115 | return fuse_index_pair(self, index)
116 |
117 | @property
118 | def charges(self) -> BaseCharge:
119 | """
120 | Return the fused charges of the index. Note that
121 | flows are merged into the charges.
122 | """
123 | return fuse_charges(self.flat_charges, self.flat_flows)
124 |
125 |
126 | def fuse_index_pair(left_index: Index, right_index: Index) -> Index:
127 | """
128 | Fuse two consecutive indices (legs) of a symmetric tensor.
129 | Args:
130 | left_index: A tensor Index.
131 | right_index: A tensor Index.
132 | flow: An optional flow of the resulting `Index` object.
133 | Returns:
134 | Index: The result of fusing `index1` and `index2`.
135 | """
136 |
137 | return Index(
138 | charges=left_index.flat_charges + right_index.flat_charges,
139 | flow=left_index.flat_flows + right_index.flat_flows)
140 |
141 |
142 | def fuse_indices(indices: List[Index]) -> Index:
143 | """
144 | Fuse a list of indices (legs) of a symmetric tensor.
145 | Args:
146 | indices: A list of tensor Index objects
147 | flow: An optional flow of the resulting `Index` object.
148 | Returns:
149 | Index: The result of fusing `indices`.
150 | """
151 |
152 | index = indices[0]
153 | for n in range(1, len(indices)):
154 | index = fuse_index_pair(index, indices[n])
155 | return index
156 |
--------------------------------------------------------------------------------
/tensornetwork/block_sparse/initialization_test.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | from tensornetwork.block_sparse.charge import (U1Charge, charge_equal,
4 | BaseCharge)
5 | from tensornetwork.block_sparse.index import Index
6 | from tensornetwork.block_sparse.blocksparsetensor import BlockSparseTensor
7 | from tensornetwork.block_sparse.initialization import (zeros, ones, randn,
8 | random, ones_like,
9 | zeros_like, empty_like,
10 | randn_like, random_like)
11 |
12 | np_dtypes = [np.float64, np.complex128]
13 |
14 |
15 | @pytest.mark.parametrize('dtype', np_dtypes)
16 | @pytest.mark.parametrize('num_charges', [1, 2, 3])
17 | def test_tn_zeros(dtype, num_charges):
18 | np.random.seed(10)
19 | Ds = [8, 9, 10, 11]
20 | rank = 4
21 | flows = np.random.choice([True, False], size=rank, replace=True)
22 | indices = [
23 | Index(
24 | BaseCharge(
25 | np.random.randint(-5, 6, (Ds[n], num_charges)),
26 | charge_types=[U1Charge] * num_charges), flows[n])
27 | for n in range(rank)
28 | ]
29 | arr = zeros(indices, dtype=dtype)
30 | np.testing.assert_allclose(arr.data, 0)
31 | np.testing.assert_allclose(Ds, arr.shape)
32 | np.testing.assert_allclose(arr.flat_flows, flows)
33 | for n in range(4):
34 | assert charge_equal(arr.charges[n][0], indices[n].flat_charges[0])
35 |
36 |
37 | @pytest.mark.parametrize('dtype', np_dtypes)
38 | @pytest.mark.parametrize('num_charges', [1, 2, 3])
39 | def test_tn_ones(dtype, num_charges):
40 | np.random.seed(10)
41 | Ds = [8, 9, 10, 11]
42 | rank = 4
43 | flows = np.random.choice([True, False], size=rank, replace=True)
44 | indices = [
45 | Index(
46 | BaseCharge(
47 | np.random.randint(-5, 6, (Ds[n], num_charges)),
48 | charge_types=[U1Charge] * num_charges), flows[n])
49 | for n in range(rank)
50 | ]
51 |
52 | arr = ones(indices, dtype=dtype)
53 | np.testing.assert_allclose(arr.data, 1)
54 | np.testing.assert_allclose(Ds, arr.shape)
55 | np.testing.assert_allclose(arr.flat_flows, flows)
56 | for n in range(4):
57 | assert charge_equal(arr.charges[n][0], indices[n].flat_charges[0])
58 |
59 |
60 | @pytest.mark.parametrize('dtype', np_dtypes)
61 | @pytest.mark.parametrize('num_charges', [1, 2, 3])
62 | def test_tn_random(dtype, num_charges):
63 | np.random.seed(10)
64 | Ds = [8, 9, 10, 11]
65 | rank = 4
66 | flows = np.random.choice([True, False], size=rank, replace=True)
67 | indices = [
68 | Index(
69 | BaseCharge(
70 | np.random.randint(-5, 6, (Ds[n], num_charges)),
71 | charge_types=[U1Charge] * num_charges), flows[n])
72 | for n in range(rank)
73 | ]
74 | arr = random(indices, dtype=dtype)
75 |
76 | np.testing.assert_allclose(Ds, arr.shape)
77 | np.testing.assert_allclose(arr.flat_flows, flows)
78 | for n in range(4):
79 | assert charge_equal(arr.charges[n][0], indices[n].flat_charges[0])
80 |
81 |
82 | @pytest.mark.parametrize('dtype', np_dtypes)
83 | @pytest.mark.parametrize('num_charges', [1, 2, 3])
84 | def test_tn_randn(dtype, num_charges):
85 | np.random.seed(10)
86 | Ds = [8, 9, 10, 11]
87 | rank = 4
88 | flows = np.random.choice([True, False], size=rank, replace=True)
89 | indices = [
90 | Index(
91 | BaseCharge(
92 | np.random.randint(-5, 6, (Ds[n], num_charges)),
93 | charge_types=[U1Charge] * num_charges), flows[n])
94 | for n in range(rank)
95 | ]
96 | arr = randn(indices, dtype=dtype)
97 |
98 | np.testing.assert_allclose(Ds, arr.shape)
99 | np.testing.assert_allclose(arr.flat_flows, flows)
100 | for n in range(4):
101 | assert charge_equal(arr.charges[n][0], indices[n].flat_charges[0])
102 |
103 |
104 | @pytest.mark.parametrize('dtype', np_dtypes)
105 | @pytest.mark.parametrize('num_charges', [1, 2, 3])
106 | @pytest.mark.parametrize('fun, val', [(ones_like, 1), (zeros_like, 0),
107 | (empty_like, None), (randn_like, None),
108 | (random_like, None)])
109 | def test_like_init(fun, val, dtype, num_charges):
110 | np.random.seed(10)
111 | Ds = [8, 9, 10, 11]
112 | rank = 4
113 | flows = np.random.choice([True, False], size=rank, replace=True)
114 | indices = [
115 | Index(
116 | BaseCharge(
117 | np.random.randint(-5, 6, (Ds[n], num_charges)),
118 | charge_types=[U1Charge] * num_charges), flows[n])
119 | for n in range(rank)
120 | ]
121 | arr = randn(indices, dtype=dtype)
122 | arr2 = fun(arr)
123 | assert arr.dtype == arr2.dtype
124 | np.testing.assert_allclose(arr.shape, arr2.shape)
125 | np.testing.assert_allclose(arr.flat_flows, arr2.flat_flows)
126 | for n in range(4):
127 | assert charge_equal(arr.charges[n][0], arr2.charges[n][0])
128 | if val is not None:
129 | np.testing.assert_allclose(arr2.data, val)
130 |
--------------------------------------------------------------------------------
/tensornetwork/block_sparse/sizetypes.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import numpy as np
16 | SIZE_T = np.int64 #the size-type of index-arrays
17 |
--------------------------------------------------------------------------------
/tensornetwork/component_factory.py:
--------------------------------------------------------------------------------
1 | from tensornetwork.network_components import Node, CopyNode, Edge
2 |
3 | _COMPONENTS = {
4 | "Node": Node,
5 | "CopyNode": CopyNode,
6 | "Edge": Edge,
7 | }
8 |
9 |
10 | def get_component(name):
11 | if name not in _COMPONENTS:
12 | raise ValueError("Component {} does not exist".format(name))
13 | return _COMPONENTS[name]
14 |
--------------------------------------------------------------------------------
/tensornetwork/config.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/tensornetwork/contractors/__init__.py:
--------------------------------------------------------------------------------
1 | # pylint: disable=line-too-long
2 | from tensornetwork.contractors.custom_path_solvers.nconinterface import ncon_solver
3 | from tensornetwork.contractors.bucket_contractor import bucket
4 | from tensornetwork.contractors.opt_einsum_paths.path_contractors import optimal
5 | from tensornetwork.contractors.opt_einsum_paths.path_contractors import branch
6 | from tensornetwork.contractors.opt_einsum_paths.path_contractors import greedy
7 | from tensornetwork.contractors.opt_einsum_paths.path_contractors import auto
8 | from tensornetwork.contractors.opt_einsum_paths.path_contractors import custom
9 | from tensornetwork.contractors.opt_einsum_paths.path_contractors import path_solver
10 | from tensornetwork.contractors.opt_einsum_paths.path_contractors import contract_path
11 |
--------------------------------------------------------------------------------
/tensornetwork/contractors/bucket_contractor.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Network contractor which exploits copy tensors."""
15 |
16 | from typing import Sequence, Iterable
17 | from tensornetwork.network_components import AbstractNode, contract_copy_node
18 | from tensornetwork import network_components
19 |
20 |
21 | def bucket(
22 | nodes: Iterable[AbstractNode],
23 | contraction_order: Sequence[network_components.CopyNode]
24 | ) -> Iterable[AbstractNode]:
25 | """Contract given nodes exploiting copy tensors.
26 |
27 | This is based on the Bucket-Elimination-based algorithm described in
28 | `arXiv:quant-ph/1712.05384`_, but avoids explicit construction of the
29 | graphical model. Instead, it achieves the efficient contraction of sparse
30 | tensors by representing them as subnetworks consisting of lower rank tensors
31 | and copy tensors. This function assumes that sparse tensors have already been
32 | decomposed this way by the caller.
33 |
34 | This contractor is efficient on networks with many copy tensors. Time and
35 | memory requirements are highly sensitive to the requested contraction order.
36 |
37 | Note that the returned tensor network may not be fully contracted if the input
38 | network doesn't have enough copy nodes. In this case, the client should use
39 | a different contractor to complete the contraction.
40 |
41 | .. _arXiv:quant-ph/1712.05384:
42 | https://arxiv.org/abs/1712.05384
43 |
44 | Args:
45 | nodes: A collection of connected nodes.
46 | contraction_order: Order in which copy tensors are contracted.
47 |
48 | Returns:
49 | A new iterable of nodes after contracting copy tensors.
50 | """
51 | nodes = set(nodes)
52 | for copy_node in contraction_order:
53 | partners = copy_node.get_partners()
54 | new_node = contract_copy_node(copy_node)
55 | nodes = nodes.difference(list(partners.keys()) + [copy_node])
56 | nodes.add(new_node)
57 | return nodes
58 |
--------------------------------------------------------------------------------
/tensornetwork/contractors/bucket_contractor_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Tests for tensornetwork.contractors.bucket."""
15 |
16 | from typing import Tuple
17 |
18 | import numpy as np
19 |
20 | from tensornetwork import network_components, CopyNode, Node
21 | from tensornetwork.contractors import bucket_contractor
22 | from tensornetwork.contractors import greedy
23 |
24 | bucket = bucket_contractor.bucket
25 |
26 |
27 | def add_cnot(
28 | q0: network_components.Edge,
29 | q1: network_components.Edge,
30 | backend: str = "numpy"
31 | ) -> Tuple[network_components.CopyNode, network_components.Edge,
32 | network_components.Edge]:
33 | """Adds the CNOT quantum gate to tensor network.
34 |
35 | CNOT consists of two rank-3 tensors: a COPY tensor on the control qubit and
36 | a XOR tensor on the target qubit.
37 |
38 | Args:
39 | q0: Input edge for the control qubit.
40 | q1: Input edge for the target qubit.
41 | backend: backend to use
42 |
43 | Returns:
44 | Tuple with three elements:
45 | - copy tensor corresponding to the control qubit
46 | - output edge for the control qubit and
47 | - output edge for the target qubit.
48 | """
49 | control = CopyNode(rank=3, dimension=2, backend=backend)
50 | xor = np.array([[[1, 0], [0, 1]], [[0, 1], [1, 0]]], dtype=np.float64)
51 | target = Node(xor, backend=backend)
52 | network_components.connect(q0, control[0])
53 | network_components.connect(q1, target[0])
54 | network_components.connect(control[1], target[1])
55 | return (control, control[2], target[2])
56 |
57 |
58 | def test_cnot_gate():
59 | # Prepare input state: |11>
60 | q0_in = Node(np.array([0, 1], dtype=np.float64))
61 | q1_in = Node(np.array([0, 1], dtype=np.float64))
62 | # Prepare output state: |10>
63 | q0_out = Node(np.array([0, 1], dtype=np.float64))
64 | q1_out = Node(np.array([1, 0], dtype=np.float64))
65 | # Build quantum circuit
66 | copy_node, q0_t1, q1_t1 = add_cnot(q0_in[0], q1_in[0])
67 | network_components.connect(q0_t1, q0_out[0])
68 | network_components.connect(q1_t1, q1_out[0])
69 | # Contract the network, first using Bucket Elimination, then once
70 | # no more copy tensors are left to exploit, fall back to the naive
71 | # contractor.
72 | contraction_order = (copy_node,)
73 | net = bucket([q0_in, q1_in, q0_out, q1_out, copy_node], contraction_order)
74 | result = greedy(net)
75 | # Verify that CNOT has turned |11> into |10>.
76 | np.testing.assert_allclose(result.get_tensor(), 1.0)
77 |
78 |
79 | def test_swap_gate():
80 | # Prepare input state: 0.6|00> + 0.8|10>
81 | q0_in = Node(np.array([0.6, 0.8], dtype=np.float64), backend="jax")
82 | q1_in = Node(np.array([1, 0], dtype=np.float64), backend="jax")
83 | # Prepare output state: 0.6|00> + 0.8|01>
84 | q0_out = Node(np.array([1, 0], dtype=np.float64), backend="jax")
85 | q1_out = Node(np.array([0.6, 0.8], dtype=np.float64), backend="jax")
86 | # Build quantum circuit: three CNOTs implement a SWAP
87 | copy_node_1, q0_t1, q1_t1 = add_cnot(q0_in[0], q1_in[0], backend="jax")
88 | copy_node_2, q1_t2, q0_t2 = add_cnot(q1_t1, q0_t1, backend="jax")
89 | copy_node_3, q0_t3, q1_t3 = add_cnot(q0_t2, q1_t2, backend="jax")
90 | network_components.connect(q0_t3, q0_out[0])
91 | network_components.connect(q1_t3, q1_out[0])
92 | # Contract the network, first Bucket Elimination, then greedy to complete.
93 | contraction_order = (copy_node_1, copy_node_2, copy_node_3)
94 | nodes = [q0_in, q0_out, q1_in, q1_out, copy_node_1, copy_node_2, copy_node_3]
95 | net = bucket(nodes, contraction_order)
96 | result = greedy(net)
97 | # Verify that SWAP has turned |10> into |01> and kept |00> unchanged.
98 | np.testing.assert_allclose(result.get_tensor(), 1.0)
99 |
--------------------------------------------------------------------------------
/tensornetwork/contractors/custom_path_solvers/__init__.py:
--------------------------------------------------------------------------------
1 | from tensornetwork.contractors.custom_path_solvers import pathsolvers
2 | from tensornetwork.contractors.custom_path_solvers import nconinterface
3 | #pylint: disable=line-too-long
4 | from tensornetwork.contractors.custom_path_solvers.pathsolvers import greedy_cost_solve, greedy_size_solve, full_solve_complete
5 | #pylint: disable=line-too-long
6 | from tensornetwork.contractors.custom_path_solvers.nconinterface import ncon_solver, ncon_to_adj, ord_to_ncon, ncon_cost_check
7 |
--------------------------------------------------------------------------------
/tensornetwork/contractors/custom_path_solvers/pathsolvers_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import numpy as np
16 | import pytest
17 | # pylint: disable=line-too-long
18 | from tensornetwork.contractors.custom_path_solvers.pathsolvers import greedy_size_solve, greedy_cost_solve, full_solve_complete
19 |
20 |
21 | @pytest.mark.parametrize('N', range(2, 20))
22 | def test_greedy_size_solve(N):
23 | log_adj = (1 + np.sin(range(N**2))).reshape(N, N)
24 | log_adj += log_adj.T
25 | order, cost = greedy_size_solve(log_adj)
26 | assert order.shape == (2, N - 1)
27 | assert isinstance(cost, float)
28 |
29 |
30 | @pytest.mark.parametrize('d1', np.linspace(1, 6, 10))
31 | @pytest.mark.parametrize('d2', np.linspace(1, 6, 10))
32 | def test_greedy_size_solve2(d1, d2):
33 | N = 3
34 | log_adj = np.zeros([N, N])
35 | log_adj[0, 1] = d1
36 | log_adj[1, 2] = d2
37 | log_adj += log_adj.T
38 | order, cost = greedy_size_solve(log_adj)
39 | if d1 >= d2:
40 | ex_order = np.array([[0, 0], [1, 1]])
41 | ex_cost = d2 + np.log10(10**d1 + 1)
42 | else:
43 | ex_order = np.array([[1, 0], [2, 1]])
44 | ex_cost = d1 + np.log10(10**d2 + 1)
45 | assert np.array_equal(order, ex_order)
46 | assert np.allclose(ex_cost, cost)
47 |
48 |
49 | @pytest.mark.parametrize('N', range(2, 20))
50 | def test_greedy_cost_solve(N):
51 | log_adj = (1 + np.sin(range(N**2))).reshape(N, N)
52 | log_adj += log_adj.T
53 | order, cost = greedy_cost_solve(log_adj)
54 | assert order.shape == (2, N - 1)
55 | assert isinstance(cost, float)
56 |
57 |
58 | @pytest.mark.parametrize('d1', np.linspace(1, 6, 5))
59 | @pytest.mark.parametrize('d2', np.linspace(1, 6, 5))
60 | @pytest.mark.parametrize('d3', np.linspace(1, 6, 5))
61 | def test_greedy_cost_solve2(d1, d2, d3):
62 | N = 3
63 | log_adj = np.zeros([N, N])
64 | log_adj[0, 1] = d1
65 | log_adj[1, 2] = d2
66 | log_adj += log_adj.T
67 | log_adj[2, 2] = d3
68 | order, cost = greedy_cost_solve(log_adj)
69 | ex_order = np.array([[0, 0], [1, 1]])
70 | ex_cost = d1 + d2 + np.log10(1 + 10**(d3 - d1))
71 | assert np.array_equal(order, ex_order)
72 | assert np.allclose(ex_cost, cost)
73 |
74 |
75 | @pytest.mark.parametrize('N', range(2, 8))
76 | def test_full_solve_complete(N):
77 | log_adj = (1 + np.sin(range(N**2))).reshape(N, N)
78 | log_adj += log_adj.T
79 | order, cost, _ = full_solve_complete(log_adj)
80 | assert order.shape == (2, N - 1)
81 | assert isinstance(cost, float)
82 |
83 |
84 | @pytest.mark.parametrize('d1', np.linspace(1, 6, 5))
85 | def test_full_solve_complete2(d1):
86 | N = 7
87 | log_adj = np.zeros([N, N])
88 | log_adj[:(N - 1), 1:] = np.diag(d1 * np.ones(N - 1))
89 | log_adj += log_adj.T
90 | log_adj[0, 0] = d1
91 | log_adj[-1, -1] = d1
92 | _, cost, is_optimal = full_solve_complete(log_adj)
93 | ex_cost = np.log10((N - 1) * 10**(3 * d1))
94 | assert np.allclose(ex_cost, cost)
95 | assert is_optimal
96 |
97 |
98 | @pytest.mark.parametrize('cost_bound', range(1, 50, 5))
99 | @pytest.mark.parametrize('max_branch', range(1, 1000, 100))
100 | def test_full_solve_complete3(cost_bound, max_branch):
101 | N = 7
102 | log_adj = (1 + np.sin(range(N**2))).reshape(N, N)
103 | log_adj += log_adj.T
104 | order, cost, _ = full_solve_complete(
105 | log_adj, cost_bound=cost_bound, max_branch=max_branch)
106 | assert order.shape == (2, N - 1)
107 | assert isinstance(cost, float)
108 |
--------------------------------------------------------------------------------
/tensornetwork/contractors/opt_einsum_paths/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/tensornetwork/contractors/opt_einsum_paths/__init__.py
--------------------------------------------------------------------------------
/tensornetwork/contractors/opt_einsum_paths/path_calculation_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Tests contraction paths calculated by `utils.gate_path`.
15 |
16 | These tests are based on `opt_einsum`s tests from
17 | github.com/dgasmith/opt_einsum/blob/master/opt_einsum/tests/test_paths.py
18 | """
19 | import numpy as np
20 | import opt_einsum
21 | import pytest
22 | from tensornetwork import Node
23 | from tensornetwork.contractors.opt_einsum_paths import utils
24 |
25 |
26 | def check_path(calculated_path, correct_path):
27 | if not isinstance(calculated_path, list):
28 | return False
29 |
30 | if len(calculated_path) != len(correct_path):
31 | return False
32 |
33 | ret = True
34 | for calc, correct in zip(calculated_path, correct_path):
35 | ret &= isinstance(calc, tuple)
36 | ret &= calc == correct
37 | return ret
38 |
39 |
40 | # We do not use the backend fixture as this file tests only contraction paths
41 | # that `opt_einsum` returns and not the actual contractions performed by
42 | # `TensorNetwork`.
43 | def gemm_network():
44 | """Creates 'GEMM1' contraction from `opt_einsum` tests."""
45 | x = Node(np.ones([1, 2, 4]))
46 | y = Node(np.ones([1, 3]))
47 | z = Node(np.ones([2, 4, 3]))
48 | # pylint: disable=pointless-statement
49 | x[0] ^ y[0]
50 | x[1] ^ z[0]
51 | x[2] ^ z[1]
52 | y[1] ^ z[2]
53 | return [x, y, z]
54 |
55 |
56 | def inner_network():
57 | """Creates a (modified) `Inner1` contraction from `opt_einsum` tests."""
58 | x = Node(np.ones([5, 2, 3, 4]))
59 | y = Node(np.ones([5, 3]))
60 | z = Node(np.ones([2, 4]))
61 | # pylint: disable=pointless-statement
62 | x[0] ^ y[0]
63 | x[1] ^ z[0]
64 | x[2] ^ y[1]
65 | x[3] ^ z[1]
66 | return [x, y, z]
67 |
68 |
69 | def matrix_chain():
70 | """Creates a contraction of chain of matrices.
71 |
72 | The `greedy` algorithm does not find the optimal path in this case!
73 | """
74 | d = [10, 8, 6, 4, 2]
75 | nodes = [Node(np.ones([d1, d2])) for d1, d2 in zip(d[:-1], d[1:])]
76 | for a, b in zip(nodes[:-1], nodes[1:]):
77 | # pylint: disable=pointless-statement
78 | a[1] ^ b[0]
79 | return nodes
80 |
81 |
82 | # Parametrize tests by giving:
83 | # (contraction algorithm, network, correct path that is expected)
84 | test_list = [
85 | ("optimal", "gemm_network", [(0, 2), (0, 1)]),
86 | ("branch", "gemm_network", [(0, 2), (0, 1)]),
87 | ("greedy", "gemm_network", [(0, 2), (0, 1)]),
88 | ("optimal", "inner_network", [(0, 1), (0, 1)]),
89 | ("branch", "inner_network", [(0, 1), (0, 1)]),
90 | ("greedy", "inner_network", [(0, 1), (0, 1)]),
91 | ("optimal", "matrix_chain", [(2, 3), (1, 2), (0, 1)]),
92 | ("branch", "matrix_chain", [(2, 3), (1, 2), (0, 1)]),
93 | ("greedy", "matrix_chain", [(0, 1), (0, 2), (0, 1)]),
94 | ]
95 |
96 |
97 | @pytest.mark.parametrize("params", test_list)
98 | def test_path_optimal(params):
99 | algorithm_name, network_name, correct_path = params
100 |
101 | nodes = globals()[network_name]()
102 | path_algorithm = getattr(opt_einsum.paths, algorithm_name)
103 |
104 | calculated_path, _ = utils.get_path(nodes, path_algorithm)
105 | assert check_path(calculated_path, correct_path)
106 |
--------------------------------------------------------------------------------
/tensornetwork/contractors/opt_einsum_paths/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Helper methods for `path_contractors`."""
15 | # pylint: disable=line-too-long
16 | from tensornetwork.network_operations import get_all_edges, get_subgraph_dangling
17 | from tensornetwork.network_components import AbstractNode, Edge
18 | from typing import (Any, Callable, Dict, List, Set, Tuple, Iterable, Text)
19 | # `opt_einsum` algorithm method typing
20 | Algorithm = Callable[[List[Set[Edge]], Set[Edge], Dict[Edge, Any]],
21 | List[Tuple[int, int]]]
22 |
23 |
24 | def multi_remove(elems: List[Any], indices: List[int]) -> List[Any]:
25 | """Remove multiple indicies in a list at once."""
26 | return [i for j, i in enumerate(elems) if j not in indices]
27 |
28 |
29 | def get_path(
30 | nodes: Iterable[AbstractNode],
31 | algorithm: Algorithm) -> Tuple[List[Tuple[int, int]], List[AbstractNode]]:
32 | """Calculates the contraction paths using `opt_einsum` methods.
33 |
34 | Args:
35 | nodes: An iterable of nodes.
36 | algorithm: `opt_einsum` method to use for calculating the contraction path.
37 |
38 | Returns:
39 | The optimal contraction path as returned by `opt_einsum`.
40 | """
41 | nodes = list(nodes)
42 | input_sets = [set(node.edges) for node in nodes]
43 | output_set = get_subgraph_dangling(nodes)
44 | size_dict = {edge: edge.dimension for edge in get_all_edges(nodes)}
45 |
46 | return algorithm(input_sets, output_set, size_dict), nodes
47 |
--------------------------------------------------------------------------------
/tensornetwork/linalg/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/tensornetwork/linalg/__init__.py
--------------------------------------------------------------------------------
/tensornetwork/matrixproductstates/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/tensornetwork/matrixproductstates/__init__.py
--------------------------------------------------------------------------------
/tensornetwork/matrixproductstates/infinite_mps_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from __future__ import absolute_import
16 | from __future__ import division
17 | from __future__ import print_function
18 | import pytest
19 | import numpy as np
20 | import tensornetwork as tn
21 | from tensornetwork.matrixproductstates.infinite_mps import InfiniteMPS
22 | import tensorflow as tf
23 |
24 | from jax.config import config
25 |
26 |
27 | @pytest.fixture(
28 | name="backend_dtype_values",
29 | params=[('numpy', np.float64), ('numpy', np.complex128),
30 | ('tensorflow', np.float64), ('tensorflow', np.complex128),
31 | ('pytorch', np.float64), ('jax', np.float64)])
32 | def backend_dtype(request):
33 | return request.param
34 |
35 |
36 | def get_random_np(shape, dtype, seed=0):
37 | np.random.seed(seed) #get the same tensors every time you call this function
38 | if dtype is np.complex64:
39 | return np.random.randn(*shape).astype(
40 | np.float32) + 1j * np.random.randn(*shape).astype(np.float32)
41 | if dtype is np.complex128:
42 | return np.random.randn(*shape).astype(
43 | np.float64) + 1j * np.random.randn(*shape).astype(np.float64)
44 | return np.random.randn(*shape).astype(dtype)
45 |
46 |
47 | @pytest.mark.parametrize("N, pos", [(10, -1), (10, 10)])
48 | def test_infinite_mps_init(backend, N, pos):
49 | D, d = 10, 2
50 | tensors = [np.random.randn(2, d, D)] + [
51 | np.random.randn(D, d, D) for _ in range(N - 2)
52 | ] + [np.random.randn(D, d, 1)]
53 | with pytest.raises(ValueError):
54 | InfiniteMPS(tensors, center_position=pos, backend=backend)
55 |
56 |
57 | @pytest.mark.parametrize("dtype", [np.float64, np.complex128])
58 | def test_TMeigs(dtype):
59 | D, d, N = 10, 2, 10
60 | imps = InfiniteMPS.random(
61 | d=[d] * N, D=[D] * (N + 1), dtype=dtype, backend='numpy')
62 | eta, l = imps.transfer_matrix_eigs('r')
63 | l2 = imps.unit_cell_transfer_operator('r', l)
64 | np.testing.assert_allclose(eta * l, l2)
65 |
66 |
67 | @pytest.mark.parametrize("dtype", [np.float64, np.complex128])
68 | @pytest.mark.parametrize("direction", ['left', 'right'])
69 | def test_unitcell_transfer_operator(dtype, direction):
70 | D, d, N = 10, 2, 10
71 | imps = InfiniteMPS.random(
72 | d=[d] * N, D=[D] * (N + 1), dtype=dtype, backend='numpy')
73 | m = imps.backend.randn((D, D), dtype=dtype, seed=10)
74 | res1 = imps.unit_cell_transfer_operator(direction, m)
75 | sites = range(len(imps))
76 | if direction == 'right':
77 | sites = reversed(sites)
78 |
79 | for site in sites:
80 | m = imps.apply_transfer_operator(site, direction, m)
81 | np.testing.assert_allclose(m, res1)
82 |
83 |
84 | @pytest.mark.parametrize("dtype", [np.float64, np.complex128])
85 | def test_InfiniteMPS_canonicalize(dtype):
86 | D, d, N = 10, 2, 4
87 | imps = InfiniteMPS.random(
88 | d=[d] * N, D=[D] * (N + 1), dtype=dtype, backend='numpy')
89 |
90 | imps.canonicalize()
91 | assert imps.check_canonical() < 1E-12
92 |
--------------------------------------------------------------------------------
/tensornetwork/matrixproductstates/mpo_test.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import numpy as np
3 | import tensorflow as tf
4 | import jax
5 | import torch
6 | from tensornetwork.backends import backend_factory
7 | #pylint: disable=line-too-long
8 | from tensornetwork.matrixproductstates.mpo import (FiniteMPO,
9 | BaseMPO,
10 | InfiniteMPO,
11 | FiniteFreeFermion2D)
12 | from tensornetwork.matrixproductstates.finite_mps import FiniteMPS
13 | from tensornetwork.matrixproductstates.dmrg import FiniteDMRG
14 |
15 |
16 |
17 |
18 | @pytest.fixture(
19 | name="backend_dtype_values",
20 | params=[('numpy', np.float64), ('numpy', np.complex128),
21 | ('tensorflow', tf.float64), ('tensorflow', tf.complex128),
22 | ('pytorch', torch.float64), ('jax', np.float64),
23 | ('jax', np.complex128)])
24 | def backend_dtype(request):
25 | return request.param
26 |
27 |
28 | def test_base_mpo_init(backend_dtype_values):
29 | backend = backend_factory.get_backend(backend_dtype_values[0])
30 | dtype = backend_dtype_values[1]
31 | tensors = [
32 | backend.randn((1, 5, 2, 2), dtype=dtype),
33 | backend.randn((5, 5, 2, 2), dtype=dtype),
34 | backend.randn((5, 1, 2, 2), dtype=dtype)
35 | ]
36 | mpo = BaseMPO(tensors=tensors, backend=backend, name='test')
37 | assert mpo.backend is backend
38 | assert mpo.dtype == dtype
39 | np.testing.assert_allclose(mpo.bond_dimensions, [1, 5, 5, 1])
40 |
41 |
42 | def test_base_mpo_raises():
43 | backend = backend_factory.get_backend('numpy')
44 | tensors = [
45 | backend.randn((1, 5, 2, 2), dtype=np.float64),
46 | backend.randn((5, 5, 2, 2), dtype=np.float64),
47 | backend.randn((5, 1, 2, 2), dtype=np.float32)
48 | ]
49 | with pytest.raises(TypeError):
50 | BaseMPO(tensors=tensors, backend=backend)
51 | mpo = BaseMPO(tensors=[], backend=backend)
52 | mpo.tensors = tensors
53 | with pytest.raises(TypeError):
54 | mpo.dtype
55 |
56 |
57 | def test_finite_mpo_raises(backend):
58 | tensors = [np.random.rand(2, 5, 2, 2), np.random.rand(5, 1, 2, 2)]
59 | with pytest.raises(ValueError):
60 | FiniteMPO(tensors=tensors, backend=backend)
61 | tensors = [np.random.rand(1, 5, 2, 2), np.random.rand(5, 2, 2, 2)]
62 | with pytest.raises(ValueError):
63 | FiniteMPO(tensors=tensors, backend=backend)
64 |
65 |
66 | def test_infinite_mpo_raises(backend):
67 | tensors = [np.random.rand(2, 5, 2, 2), np.random.rand(5, 3, 2, 2)]
68 | with pytest.raises(ValueError):
69 | InfiniteMPO(tensors=tensors, backend=backend)
70 |
71 |
72 | def test_infinite_mpo_roll(backend):
73 | tensors = [np.random.rand(5, 5, 2, 2), np.random.rand(5, 5, 2, 2)]
74 | mpo = InfiniteMPO(tensors=tensors, backend=backend)
75 | mpo.roll(1)
76 | np.testing.assert_allclose(mpo.tensors[0], tensors[1])
77 | np.testing.assert_allclose(mpo.tensors[1], tensors[0])
78 | mpo.roll(1)
79 | np.testing.assert_allclose(mpo.tensors[0], tensors[0])
80 | np.testing.assert_allclose(mpo.tensors[1], tensors[1])
81 |
82 |
83 | def test_len(backend):
84 | tensors = [
85 | np.random.rand(1, 5, 2, 2),
86 | np.random.rand(5, 5, 2, 2),
87 | np.random.rand(5, 1, 2, 2)
88 | ]
89 | mpo = BaseMPO(tensors=tensors, backend=backend)
90 | assert len(mpo) == 3
91 |
92 |
93 | @pytest.mark.parametrize("N1, N2, D", [(2, 2, 4), (2, 4, 16), (4, 4, 128)])
94 | def test_finiteFreeFermions2d(N1, N2, D):
95 | def adjacency(N1, N2):
96 | neighbors = {}
97 | mat = np.arange(N1 * N2).reshape(N1, N2)
98 | for n in range(N1 * N2):
99 | x, y = np.divmod(n, N2)
100 | if n not in neighbors:
101 | neighbors[n] = []
102 | if y < N2 - 1:
103 | neighbors[n].append(mat[x, y + 1])
104 | if x > 0:
105 | neighbors[n].append(mat[x - 1, y])
106 | return neighbors
107 |
108 | adj = adjacency(N1, N2)
109 | tij = np.zeros((N1 * N2, N1 * N2))
110 | t = -1
111 | v = -1
112 | for n, d in adj.items():
113 | for ind in d:
114 | tij[n, ind] += t
115 | tij[ind, n] += t
116 | tij += np.diag(np.ones(N1 * N2) * v)
117 |
118 | eta, _ = np.linalg.eigh(tij)
119 | expected = min(np.cumsum(eta))
120 |
121 | t1 = t
122 | t2 = t
123 | dtype = np.float64
124 | mpo = FiniteFreeFermion2D(t1, t2, v, N1, N2, dtype)
125 | mps = FiniteMPS.random([2] * N1 * N2, [D] * (N1 * N2 - 1), dtype=np.float64)
126 | dmrg = FiniteDMRG(mps, mpo)
127 | actual = dmrg.run_one_site()
128 | np.testing.assert_allclose(actual, expected)
129 |
--------------------------------------------------------------------------------
/tensornetwork/ops.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | class _DefaultNodeCollectionStack:
17 | """A stack to keep track of contexts that were entered."""
18 |
19 | def __init__(self):
20 | self.stack = []
21 |
22 | def get_current_item(self):
23 | return self.stack[-1] if self.stack else None
24 |
25 |
26 | _default_collection_stack = _DefaultNodeCollectionStack()
27 |
28 |
29 | def get_current_collection():
30 | return _default_collection_stack.get_current_item()
31 |
--------------------------------------------------------------------------------
/tensornetwork/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/tensornetwork/tests/__init__.py
--------------------------------------------------------------------------------
/tensornetwork/tests/backend_contextmanager_test.py:
--------------------------------------------------------------------------------
1 | import tensornetwork as tn
2 | from tensornetwork.backend_contextmanager import _default_backend_stack
3 | import pytest
4 | import numpy as np
5 |
6 |
7 | def test_contextmanager_simple():
8 | with tn.DefaultBackend("tensorflow"):
9 | a = tn.Node(np.ones((10,)))
10 | b = tn.Node(np.ones((10,)))
11 | assert a.backend.name == b.backend.name
12 |
13 |
14 | def test_contextmanager_default_backend():
15 | tn.set_default_backend("pytorch")
16 | with tn.DefaultBackend("numpy"):
17 | assert _default_backend_stack.default_backend == "pytorch"
18 |
19 |
20 | def test_contextmanager_interruption():
21 | tn.set_default_backend("pytorch")
22 | with pytest.raises(AssertionError):
23 | with tn.DefaultBackend("numpy"):
24 | tn.set_default_backend("tensorflow")
25 |
26 |
27 | def test_contextmanager_nested():
28 | with tn.DefaultBackend("tensorflow"):
29 | a = tn.Node(np.ones((10,)))
30 | assert a.backend.name == "tensorflow"
31 | with tn.DefaultBackend("numpy"):
32 | b = tn.Node(np.ones((10,)))
33 | assert b.backend.name == "numpy"
34 | c = tn.Node(np.ones((10,)))
35 | assert c.backend.name == "tensorflow"
36 | d = tn.Node(np.ones((10,)))
37 | assert d.backend.name == "numpy"
38 |
39 |
40 | def test_contextmanager_wrong_item():
41 | a = tn.Node(np.ones((10,)))
42 | with pytest.raises(ValueError):
43 | tn.DefaultBackend(a) # pytype: disable=wrong-arg-types
44 |
45 |
46 |
47 | def test_contextmanager_BaseBackend():
48 | tn.set_default_backend("pytorch")
49 | a = tn.Node(np.ones((10,)))
50 | with tn.DefaultBackend(a.backend):
51 | b = tn.Node(np.ones((10,)))
52 | assert b.backend.name == "pytorch"
53 |
54 |
55 | def test_set_default_backend_value_error():
56 | tn.set_default_backend("pytorch")
57 | with pytest.raises(
58 | ValueError,
59 | match="Item passed to set_default_backend "
60 | "must be Text or BaseBackend"):
61 | tn.set_default_backend(-1) # pytype: disable=wrong-arg-types
62 |
--------------------------------------------------------------------------------
/tensornetwork/tests/network_components_free_symmetric_test.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | import tensornetwork as tn
4 | from tensornetwork.block_sparse import (BlockSparseTensor, Index, BaseCharge,
5 | U1Charge)
6 |
7 |
8 | @pytest.mark.parametrize("num_charges", [1, 2])
9 | def test_sparse_shape(num_charges):
10 | np.random.seed(10)
11 | dtype = np.float64
12 | shape = [10, 11, 12, 13]
13 | R = len(shape)
14 | charges = [
15 | BaseCharge(
16 | np.random.randint(-5, 5, (shape[n], num_charges)),
17 | charge_types=[U1Charge] * num_charges) for n in range(R)
18 | ]
19 | flows = list(np.full(R, fill_value=False, dtype=np.bool))
20 | indices = [Index(charges[n], flows[n]) for n in range(R)]
21 | a = BlockSparseTensor.random(indices=indices, dtype=dtype)
22 | node = tn.Node(a, backend='symmetric')
23 | for s1, s2 in zip(node.sparse_shape, indices):
24 | assert s1 == s2
25 |
--------------------------------------------------------------------------------
/tensornetwork/tests/serialize_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import tensornetwork as tn
16 | import pytest
17 | import numpy as np
18 |
19 |
20 | def assert_nodes_eq(a, b):
21 | assert type(a) == type(b) #pylint: disable=unidiomatic-typecheck
22 | assert getattr(a, 'name', None) == getattr(b, 'name', None)
23 | assert getattr(a, 'axis_names', None) == getattr(b, 'axis_names', None)
24 | assert getattr(a, 'backend', None) == getattr(b, 'backend', None)
25 | assert getattr(a, 'shape', None) == getattr(b, 'shape', None)
26 | assert getattr(a, 'rank', None) == getattr(b, 'rank', None)
27 | assert getattr(a, 'dtype', None) == getattr(b, 'dtype', None)
28 | assert getattr(a, 'dimension', None) == getattr(b, 'dimension', None)
29 | ta = getattr(a, 'tensor', None)
30 | if isinstance(ta, np.ndarray):
31 | assert (ta == getattr(b, 'tensor', None)).all()
32 |
33 |
34 | def assert_edges_eq(a, b):
35 | assert isinstance(a, tn.Edge) and isinstance(b, tn.Edge)
36 | assert a.name == b.name
37 | assert a._axes == b._axes
38 |
39 |
40 | def assert_graphs_eq(a_nodes, b_nodes):
41 | assert len(a_nodes) == len(b_nodes)
42 | a_nodes_dict = {}
43 | b_nodes_dict = {}
44 | for i, (a, b) in enumerate(zip(a_nodes, b_nodes)):
45 | a_nodes_dict[a] = i
46 | b_nodes_dict[b] = i
47 | for a, b in zip(a_nodes, b_nodes):
48 | for e1, e2 in zip(a.edges, b.edges):
49 | assert_edges_eq(e1, e2)
50 | assert a_nodes_dict.get(e1.node2,
51 | None) == b_nodes_dict.get(e2.node2, None)
52 |
53 |
54 | def create_basic_network():
55 | np.random.seed(10)
56 | a = tn.Node(np.random.normal(size=[8]), name='an', axis_names=['a1'])
57 | b = tn.Node(np.random.normal(size=[8, 8, 8]),
58 | name='bn',
59 | axis_names=['b1', 'b2', 'b3'])
60 | c = tn.Node(np.random.normal(size=[8, 8, 8]),
61 | name='cn',
62 | axis_names=['c1', 'c2', 'c3'])
63 | d = tn.Node(np.random.normal(size=[8, 8, 8]),
64 | name='dn',
65 | axis_names=['d1', 'd2', 'd3'])
66 |
67 | a[0] ^ b[0]
68 | b[1] ^ c[0]
69 | c[1] ^ d[0]
70 | c[2] ^ b[2]
71 |
72 | return [a, b, c, d]
73 |
74 |
75 | def test_basic_serial():
76 | nodes = create_basic_network()
77 |
78 | s = tn.nodes_to_json(nodes)
79 | new_nodes, _ = tn.nodes_from_json(s)
80 | for x, y in zip(nodes, new_nodes):
81 | assert_nodes_eq(x, y)
82 | assert_graphs_eq(nodes, new_nodes)
83 | c = tn.contractors.greedy(nodes, ignore_edge_order=True)
84 | new_c = tn.contractors.greedy(new_nodes, ignore_edge_order=True)
85 | np.testing.assert_allclose(c.tensor, new_c.tensor)
86 |
87 |
88 | def test_exlcuded_node_serial():
89 | nodes = create_basic_network()
90 |
91 | s = tn.nodes_to_json(nodes[:-1])
92 | new_nodes, _ = tn.nodes_from_json(s)
93 | for x, y in zip(nodes, new_nodes):
94 | assert_nodes_eq(x, y)
95 | with pytest.raises(AssertionError):
96 | assert_graphs_eq(nodes, new_nodes)
97 | sub_graph = nodes[:-1]
98 | sub_graph[-1][1].disconnect(sub_graph[-1][1].name)
99 | assert_graphs_eq(sub_graph, new_nodes)
100 |
101 |
102 | def test_serial_with_bindings():
103 | a, b, c, d = create_basic_network()
104 | bindings = {}
105 | a[0].name = 'ea0'
106 | bindings['ea'] = a[0]
107 | for s, n in zip(['eb', 'ec', 'ed'], [b, c, d]):
108 | for i, e in enumerate(n.edges):
109 | e.name = s + str(i)
110 | bindings[s] = bindings.get(s, ()) + (e,)
111 | s = tn.nodes_to_json([a, b, c, d], edge_binding=bindings)
112 | _, new_bindings = tn.nodes_from_json(s)
113 | assert len(new_bindings) == len(bindings)
114 | assert bindings['ea'].name == new_bindings['ea'][0].name
115 | for k in ['eb', 'ec', 'ed']:
116 | new_names = {e.name for e in new_bindings[k]}
117 | names = {e.name for e in bindings[k]}
118 | assert names == new_names
119 |
120 |
121 | def test_serial_non_str_keys():
122 | a, b, c, d = create_basic_network()
123 | bindings = {}
124 | bindings[1] = a[0]
125 | with pytest.raises(TypeError):
126 | _ = tn.nodes_to_json([a, b, c, d], edge_binding=bindings)
127 |
128 |
129 | def test_serial_non_edge_values():
130 | a, b, c, d = create_basic_network()
131 | bindings = {}
132 | bindings['non_edge'] = a
133 | with pytest.raises(TypeError):
134 | _ = tn.nodes_to_json([a, b, c, d], edge_binding=bindings)
135 |
136 |
137 | def test_serial_exclude_non_network_edges():
138 | a, b, c, d = create_basic_network() # pylint: disable=unused-variable
139 | bindings = {'include': a[0], 'boundary': b[1], 'exclude': d[0]}
140 | s = tn.nodes_to_json([a, b], edge_binding=bindings)
141 | nodes, new_bindings = tn.nodes_from_json(s)
142 | assert len(nodes) == 2
143 | assert 'include' in new_bindings and 'boundary' in new_bindings
144 | assert 'exclude' not in new_bindings
145 |
--------------------------------------------------------------------------------
/tensornetwork/tests/split_node_symmetric_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import tensornetwork as tn
16 | import pytest
17 | import numpy as np
18 | from tensornetwork.block_sparse import BlockSparseTensor, Index
19 | from tensornetwork.block_sparse.charge import charge_equal, BaseCharge, U1Charge
20 | from tensornetwork.block_sparse.blocksparse_utils import _find_diagonal_sparse_blocks #pylint: disable=line-too-long
21 |
22 |
23 | def get_random(shape, num_charges, dtype=np.float64):
24 | R = len(shape)
25 | charges = [
26 | BaseCharge(
27 | np.random.randint(-5, 6, (shape[n], num_charges)),
28 | charge_types=[U1Charge] * num_charges) for n in range(R)
29 | ]
30 | flows = list(np.full(R, fill_value=False, dtype=np.bool))
31 | indices = [Index(charges[n], flows[n]) for n in range(R)]
32 | return BlockSparseTensor.randn(indices=indices, dtype=dtype)
33 |
34 |
35 | def get_zeros(shape, num_charges, dtype=np.float64):
36 | R = len(shape)
37 | charges = [
38 | BaseCharge(
39 | np.random.randint(-5, 6, (shape[n], num_charges)),
40 | charge_types=[U1Charge] * num_charges) for n in range(R)
41 | ]
42 | flows = list(np.full(R, fill_value=False, dtype=np.bool))
43 | indices = [Index(charges[n], flows[n]) for n in range(R)]
44 | return BlockSparseTensor.zeros(indices=indices, dtype=dtype)
45 |
46 |
47 | @pytest.mark.parametrize("dtype", [np.float64, np.complex128])
48 | @pytest.mark.parametrize("num_charges", [1, 2, 3])
49 | def test_split_node(dtype, num_charges):
50 | np.random.seed(111)
51 | a = tn.Node(
52 | get_zeros((5, 7, 4, 5, 6), num_charges, dtype), backend='symmetric')
53 |
54 | left_edges = []
55 | for i in range(3):
56 | left_edges.append(a[i])
57 | right_edges = []
58 | for i in range(3, 5):
59 | right_edges.append(a[i])
60 | left, right, _ = tn.split_node(a, left_edges, right_edges)
61 | tn.check_correct({left, right})
62 | actual = left @ right
63 | np.testing.assert_allclose(actual.tensor.shape, (5, 7, 4, 5, 6))
64 | np.testing.assert_allclose(a.tensor.shape, (5, 7, 4, 5, 6))
65 | np.testing.assert_allclose(left.tensor.data, 0)
66 | np.testing.assert_allclose(right.tensor.data, 0)
67 | assert np.all([
68 | charge_equal(a.tensor._charges[n], actual.tensor._charges[n])
69 | for n in range(len(a.tensor._charges))
70 | ])
71 |
72 |
73 | @pytest.mark.parametrize("dtype", [np.float64, np.complex128])
74 | @pytest.mark.parametrize("num_charges", [1, 2, 3])
75 | def test_split_node_mixed_order(dtype, num_charges):
76 | np.random.seed(131)
77 | a = tn.Node(
78 | get_zeros((5, 3, 4, 5, 6), num_charges, dtype), backend='symmetric')
79 |
80 | left_edges = []
81 | for i in [0, 2, 4]:
82 | left_edges.append(a[i])
83 | right_edges = []
84 | for i in [1, 3]:
85 | right_edges.append(a[i])
86 | left, right, _ = tn.split_node(a, left_edges, right_edges)
87 |
88 | tn.check_correct({left, right})
89 | actual = left @ right
90 | np.testing.assert_allclose(actual.tensor.shape, (5, 4, 6, 3, 5))
91 | np.testing.assert_allclose(a.tensor.shape, (5, 3, 4, 5, 6))
92 |
93 | np.testing.assert_allclose(left.tensor.data, 0)
94 | np.testing.assert_allclose(right.tensor.data, 0)
95 | np.testing.assert_allclose(left.tensor.shape[0:3], (5, 4, 6))
96 | np.testing.assert_allclose(right.tensor.shape[1:], (3, 5))
97 | new_order = [0, 2, 4, 1, 3]
98 | assert np.all([
99 | charge_equal(a.tensor.charges[new_order[n]][0],
100 | actual.tensor.charges[n][0])
101 | for n in range(len(a.tensor._charges))
102 | ])
103 |
104 |
105 | @pytest.mark.parametrize("dtype", [np.float64, np.complex128])
106 | @pytest.mark.parametrize("num_charges", [1, 2, 3])
107 | def test_svd_consistency(dtype, num_charges):
108 | np.random.seed(111)
109 | original_tensor = get_random((20, 20), num_charges, dtype)
110 | node = tn.Node(original_tensor, backend='symmetric')
111 | u, vh, _ = tn.split_node(node, [node[0]], [node[1]])
112 | final_node = tn.contract_between(u, vh)
113 | np.testing.assert_allclose(
114 | final_node.tensor.data, original_tensor.data, rtol=1e-6)
115 | assert np.all([
116 | charge_equal(final_node.tensor._charges[n], original_tensor._charges[n])
117 | for n in range(len(original_tensor._charges))
118 | ])
119 |
--------------------------------------------------------------------------------
/tensornetwork/tests/testing_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import jax.numpy as jnp
3 | from jax import config
4 | import tensorflow as tf
5 | import torch
6 | import pytest
7 | import tensornetwork
8 | from tensornetwork.block_sparse.charge import charge_equal
9 | from tensornetwork import backends
10 | config.update("jax_enable_x64", True)
11 |
12 | np_real = [np.float32, np.float64]
13 | np_complex = [np.complex64, np.complex128]
14 | np_float_dtypes = np_real + np_complex
15 | np_int = [np.int8, np.int16, np.int32, np.int64]
16 | np_uint = [np.uint8, np.uint16, np.uint32, np.uint64]
17 | np_not_bool = np_float_dtypes + np_int + np_uint + [None, ]
18 | np_not_half = [np.float32, np.float64] + np_complex
19 | np_all_dtypes = np_not_bool + [np.bool, ]
20 |
21 | torch_supported_dtypes = np_real + np_int + [np.uint8, np.bool, None]
22 | # torch_supported_dtypes = [np.float32, np.float64]
23 |
24 |
25 | def safe_randn(shape, backend, dtype):
26 | """
27 | Creates a random tensor , catching errors that occur when the
28 | dtype is not supported by the backend. Returns the Tensor and the backend
29 | array, which are both None if the dtype and backend did not match.
30 | """
31 | np.random.seed(seed=10)
32 | init = np.random.randn(*shape)
33 | if dtype == np.bool:
34 | init = np.round(init)
35 | init = init.astype(dtype)
36 |
37 | if dtype in np_complex:
38 | init_i = np.random.randn(*shape)
39 | init = init + 1.0j * init_i.astype(dtype)
40 |
41 | if backend == "pytorch" and dtype not in torch_supported_dtypes:
42 | pytest.skip("dtype unsupported by PyTorch")
43 | else:
44 | A = tensornetwork.Tensor(init, backend=backend)
45 | return (A, init)
46 |
47 |
48 | def safe_zeros(shape, backend, dtype):
49 | """
50 | Creates a tensor of zeros, catching errors that occur when the
51 | dtype is not supported by the backend. Returns both the Tensor and the backend
52 | array, which are both None if the dtype and backend did not match.
53 | """
54 | init = np.zeros(shape, dtype=dtype)
55 | if backend == "pytorch" and dtype not in torch_supported_dtypes:
56 | pytest.skip("dtype unsupported by PyTorch")
57 | else:
58 | A = tensornetwork.Tensor(init, backend=backend)
59 | return (A, init)
60 |
61 |
62 | def np_dtype_to_backend(backend, dtype):
63 | """
64 | Converts a given np dtype to the equivalent in the given backend. Skips
65 | the present test if the dtype is not supported in the backend.
66 | """
67 | backend_obj = backends.backend_factory.get_backend(backend)
68 | if backend_obj.name in ("numpy", "symmetric"):
69 | return dtype
70 | A_np = np.ones([1], dtype=dtype)
71 |
72 | if backend_obj.name == "jax":
73 | A = jnp.array(A_np)
74 | elif backend_obj.name == "tensorflow":
75 | A = tf.convert_to_tensor(A_np, dtype=dtype)
76 | elif backend_obj.name == "pytorch":
77 | if dtype not in torch_supported_dtypes:
78 | pytest.skip("dtype unsupported by PyTorch")
79 | A = torch.tensor(A_np)
80 | else:
81 | raise ValueError("Invalid backend ", backend)
82 | return A.dtype
83 |
84 |
85 | def check_contraction_dtype(backend, dtype):
86 | """
87 | Skips the test if the backend cannot perform multiply-add with the given
88 | dtype.
89 | """
90 | skip = False
91 | if backend == "tensorflow":
92 | if dtype in [np.uint8, tf.uint8, np.uint16, tf.uint16, np.int8, tf.int8,
93 | np.int16, tf.int16, np.uint32, tf.uint32, np.uint64,
94 | tf.uint64]:
95 | skip = True
96 |
97 | if backend == "pytorch":
98 | if dtype in [np.float16, torch.float16]:
99 | skip = True
100 | if skip:
101 | pytest.skip("backend does not support multiply-add with this dtype.")
102 |
103 |
104 | def assert_allclose(expected, actual, backend, **kwargs):
105 | if backend.name == 'symmetric':
106 | exp = expected.contiguous()
107 | act = actual.contiguous()
108 | if exp.shape != act.shape:
109 | raise ValueError(f"expected shape = {exp.shape}, "
110 | f"actual shape = {act.shape}")
111 | if len(exp.flat_charges) != len(act.flat_charges):
112 | raise ValueError("expected charges differ from actual charges")
113 |
114 | if len(exp.flat_flows) != len(act.flat_flows):
115 | raise ValueError(f"expected flat flows = {exp.flat_flows}"
116 | f" differ from actual flat flows = {act.flat_flows}")
117 |
118 | for c1, c2 in zip(exp.flat_charges, act.flat_charges):
119 | if not charge_equal(c1, c2):
120 | raise ValueError("expected charges differ from actual charges")
121 |
122 | if not np.all(np.array(exp.flat_flows) == np.array(act.flat_flows)):
123 | raise ValueError(f"expected flat flows = {exp.flat_flows}"
124 | f" differ from actual flat flows = {act.flat_flows}")
125 | if not np.all(np.abs(exp.data - act.data) < 1E-10):
126 | np.testing.assert_allclose(act.data, exp.data, **kwargs)
127 | else:
128 | np.testing.assert_allclose(actual, expected, **kwargs)
129 |
--------------------------------------------------------------------------------
/tensornetwork/tn_keras/README.md:
--------------------------------------------------------------------------------
1 | # TensorNetwork Keras Layers
2 |
3 | TN Keras exists to simplify tensorization of existing TensorFlow models. These layers try to match the APIs for existing Keras layers closely. Please note these layers are currently intended for experimentation only, not production. These layers are in alpha and upcoming releases might include breaking changes.
4 |
5 | ## Table of Contents
6 |
7 | - [Usage](#usage)
8 | - [Networks](#networks)
9 | - [Support](#support)
10 |
11 | ## Usage
12 |
13 | `pip install tensornetwork` and then:
14 |
15 | ```python
16 | import tensornetwork as tn
17 | import tensorflow as tf
18 | from tensorflow.keras.models import Sequential
19 | from tensorflow.keras.layers import Dense
20 |
21 | # Import tensornetwork keras layer
22 | from tensornetwork.tn_keras.layers import DenseMPO
23 |
24 | # Build a fully connected network using TN layer DenseMPO
25 | mpo_model = Sequential()
26 | mpo_model.add(DenseMPO(256, num_nodes=4, bond_dim=8, use_bias=True, activation='relu', input_shape=(1296,)))
27 | mpo_model.add(DenseMPO(81, num_nodes=4, bond_dim=4, use_bias=True, activation='relu'))
28 | mpo_model.add(Dense(1, use_bias=True, activation='sigmoid'))
29 |
30 | ...
31 | ```
32 | ## Networks
33 |
34 | - **DenseDecomp**. A TN layer comparable to Dense that carries out matrix multiplication with 2 significantly smaller weight matrices instead of 1 large one. This layer is similar to performing a SVD on the weight matrix and dropping the lowest singular values. The TN looks like:
35 |
36 | 
37 |
38 | - **DenseMPO**. A TN layer that implements an MPO (Matrix Product Operator), a common tensor network found in condensed matter physics. MPOs are one of the most successful TNs we've seen in practice. Note for this layer the input dimension, output dimension, and number of nodes must all relate in order for the network structure to work. Specifically, `input_shape[-1]**(1. / num_nodes)` and `output_dim**(1. / num_nodes)` must both be round. The TN looks like:
39 |
40 | 
41 |
42 | - **Conv2DMPO**. A TN layer that recreates the functionality of a traditional 2d convolutional layer, but stores the 'kernel' as a network of nodes forming an MPO. The bond dimension of the MPO can be adjusted to increase or decrease the number of parameters independently of the input and output dimensions. When the layer is called, the MPO is contracted into a traditional kernel and convolved with the layer input to produce a tensor of outputs. As with the DenseMPO the `input_shape[-1]**(1. / num_nodes)` and `output_dim**(1. / num_nodes)` must both be round.
43 |
44 | - **Entangler**. A TN layer inspired by quantum circuits that allows one to dramatically increase the dimensionality of hidden layers, far beyond what is currently feasible with normal dense layers e.g. hidden layers of >1M in size. Note for this layer the input dimensions and output dimensions will be equal. Additionally, `input_shape[-1]**(1. / num_legs)` must be round. `num_levels` is the only parameter that does not change input/output shape; it can be increased to increase the power of the layer, but inference time will also scale approximately linearly. The TN looks like:
45 |
46 | 
47 |
48 | - **Expander**. A TN layer to expand the dimensionality of an input tensor, commonly used in conjunction with Entangler. Since Entangler does not modify the input dimension, an Expander layer is often placed before an Entangler layer to increase the size of the input. Note the output dim will be `input_shape[-1] * (exp_base**num_nodes)` so increasing `num_nodes` will increase the output dim exponentially. The TN looks like:
49 |
50 | 
51 |
52 | - **Condenser**. A TN layer to reduce the dimensionality of an input tensor, commonly used in conjunction with Entangler. Since Entangler does not modify the output dimension, a Condenser layer is often placed after an Entangler layer to decrease the size of the output. Note the output dim will be `input_shape[-1] // (exp_base**num_nodes)` so increasing `num_nodes` will decrease the output dim exponentially. The TN looks like:
53 |
54 | 
55 |
56 | ## Support
57 |
58 | Please [open an issue](https://github.com/google/TensorNetwork/issues/new) for support.
59 |
--------------------------------------------------------------------------------
/tensornetwork/tn_keras/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/tensornetwork/tn_keras/__init__.py
--------------------------------------------------------------------------------
/tensornetwork/tn_keras/images/condenser.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/tensornetwork/tn_keras/images/condenser.png
--------------------------------------------------------------------------------
/tensornetwork/tn_keras/images/decomp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/tensornetwork/tn_keras/images/decomp.png
--------------------------------------------------------------------------------
/tensornetwork/tn_keras/images/entangler.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/tensornetwork/tn_keras/images/entangler.png
--------------------------------------------------------------------------------
/tensornetwork/tn_keras/images/expander.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/tensornetwork/tn_keras/images/expander.png
--------------------------------------------------------------------------------
/tensornetwork/tn_keras/images/mpo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/tensornetwork/tn_keras/images/mpo.png
--------------------------------------------------------------------------------
/tensornetwork/tn_keras/images/staircase_entangler.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/TensorNetwork/e12580f1749493dbe05f474d2fecdec4eaba73c5/tensornetwork/tn_keras/images/staircase_entangler.png
--------------------------------------------------------------------------------
/tensornetwork/tn_keras/layers.py:
--------------------------------------------------------------------------------
1 | from tensornetwork.tn_keras.dense import DenseDecomp
2 | from tensornetwork.tn_keras.mpo import DenseMPO
3 | from tensornetwork.tn_keras.conv2d_mpo import Conv2DMPO
4 | from tensornetwork.tn_keras.condenser import DenseCondenser
5 | from tensornetwork.tn_keras.expander import DenseExpander
6 | from tensornetwork.tn_keras.entangler import DenseEntangler
7 |
--------------------------------------------------------------------------------
/tensornetwork/tn_keras/test_conv_layer.py:
--------------------------------------------------------------------------------
1 | # pylint: disable=no-name-in-module
2 | import pytest
3 | import math
4 | import os
5 | import shutil
6 | import numpy as np
7 | import tensorflow as tf
8 | import tensorflow.keras
9 | from tensorflow.keras import backend as K
10 | from tensorflow.keras.models import Sequential, load_model
11 | from tensorflow.keras.layers import Conv2D, Dense, Flatten
12 | from tensornetwork.tn_keras.layers import Conv2DMPO
13 |
14 | LAYER_NAME = 'conv_layer'
15 |
16 |
17 | @pytest.fixture(params=[(100, 8, 8, 16)])
18 | def dummy_data(request):
19 | np.random.seed(42)
20 | data = np.random.rand(*request.param)
21 | labels = np.concatenate((np.ones((50, 1)), np.zeros((50, 1))))
22 | return data, labels
23 |
24 |
25 | @pytest.fixture()
26 | def make_model(dummy_data):
27 | # pylint: disable=redefined-outer-name
28 | data, _ = dummy_data
29 | model = Sequential()
30 | model.add(
31 | Conv2DMPO(filters=4,
32 | kernel_size=3,
33 | num_nodes=2,
34 | bond_dim=10,
35 | padding='same',
36 | input_shape=data.shape[1:],
37 | name=LAYER_NAME)
38 | )
39 | model.add(Flatten())
40 | model.add(Dense(1, activation='sigmoid'))
41 | return model
42 |
43 |
44 | def test_train(dummy_data, make_model):
45 | # pylint: disable=redefined-outer-name
46 | model = make_model
47 | model.compile(optimizer='adam',
48 | loss='binary_crossentropy',
49 | metrics=['accuracy'])
50 | data, labels = dummy_data
51 | # Train the model for 10 epochs
52 | history = model.fit(data, labels, epochs=10, batch_size=32)
53 |
54 | # Check that loss decreases and accuracy increases
55 | assert history.history['loss'][0] > history.history['loss'][-1]
56 | assert history.history['accuracy'][0] < history.history['accuracy'][-1]
57 |
58 |
59 | def test_weights_change(dummy_data, make_model):
60 | # pylint: disable=redefined-outer-name
61 | data, labels = dummy_data
62 | model = make_model
63 | model.compile(optimizer='adam',
64 | loss='binary_crossentropy',
65 | metrics=['accuracy'])
66 | before = model.get_weights()
67 |
68 | model.fit(data, labels, epochs=5, batch_size=32)
69 |
70 | after = model.get_weights()
71 | # Make sure every layer's weights changed
72 | for b, a in zip(before, after):
73 | assert (b != a).any()
74 |
75 |
76 | def test_output_shape(dummy_data, make_model):
77 | # pylint: disable=redefined-outer-name
78 | data, _ = dummy_data
79 | data = K.constant(data)
80 | model = make_model
81 | l = model.get_layer(LAYER_NAME)
82 |
83 | actual_output_shape = l(data).shape
84 | expected_output_shape = l.compute_output_shape(data.shape)
85 | np.testing.assert_equal(expected_output_shape, actual_output_shape)
86 |
87 |
88 | def test_num_parameters(dummy_data, make_model):
89 | # pylint: disable=redefined-outer-name
90 | data, _ = dummy_data
91 | model = make_model
92 | l = model.get_layer(LAYER_NAME)
93 |
94 | in_dim = math.ceil(data.shape[-1] ** (1. / l.num_nodes))
95 | out_dim = math.ceil(l.filters ** (1. / l.num_nodes))
96 | exp_num_parameters = ((l.num_nodes - 2) *
97 | (l.bond_dim * 2 * in_dim * out_dim) +
98 | (l.kernel_size[0] * out_dim * in_dim * l.bond_dim) +
99 | (l.kernel_size[1] * out_dim * in_dim * l.bond_dim) +
100 | (l.filters))
101 | np.testing.assert_equal(exp_num_parameters, l.count_params())
102 |
103 |
104 | def test_config(make_model):
105 | # pylint: disable=redefined-outer-name
106 | model = make_model
107 |
108 | expected_num_parameters = model.layers[0].count_params()
109 |
110 | # Serialize model and use config to create new layer
111 | l = model.get_layer(LAYER_NAME)
112 | layer_config = l.get_config()
113 | new_model = Conv2DMPO.from_config(layer_config)
114 |
115 | # Build the layer so we can count params below
116 | new_model.build(layer_config['batch_input_shape'])
117 |
118 | np.testing.assert_equal(expected_num_parameters, new_model.count_params())
119 | assert layer_config == new_model.get_config()
120 |
121 | def test_model_save(dummy_data, make_model, tmp_path):
122 | # pylint: disable=redefined-outer-name
123 | data, labels = dummy_data
124 | model = make_model
125 | model.compile(optimizer='adam',
126 | loss='binary_crossentropy',
127 | metrics=['accuracy'])
128 |
129 | # Train the model for 5 epochs
130 | model.fit(data, labels, epochs=5)
131 |
132 | for save_path in [tmp_path / 'test_model', tmp_path / 'test_model.h5']:
133 | # Save model to a SavedModel folder or h5 file, then load model
134 | print('save_path: ', save_path)
135 | model.save(save_path)
136 | loaded_model = load_model(save_path)
137 |
138 | # Clean up SavedModel folder
139 | if os.path.isdir(save_path):
140 | shutil.rmtree(save_path)
141 |
142 | # Clean up h5 file
143 | if os.path.exists(save_path):
144 | os.remove(save_path)
145 |
146 | # Compare model predictions and loaded_model predictions
147 | np.testing.assert_almost_equal(model.predict(data),
148 | loaded_model.predict(data))
149 |
--------------------------------------------------------------------------------
/tensornetwork/tn_keras/test_mpo.py:
--------------------------------------------------------------------------------
1 | # pylint: disable=no-name-in-module
2 | import itertools
3 | from tensornetwork.tn_keras.layers import DenseMPO
4 | import tensorflow as tf
5 | from tensorflow.keras.models import Sequential
6 | from tensorflow.keras import Input
7 | import numpy as np
8 | import pytest
9 |
10 |
11 | @pytest.mark.parametrize('in_dim_base,dim1,dim2,num_nodes,bond_dim',
12 | itertools.product([3, 4], [3, 4], [2, 5], [3, 4],
13 | [2, 3]))
14 | def test_shape_sanity_check(in_dim_base, dim1, dim2, num_nodes, bond_dim):
15 | model = Sequential([
16 | Input(in_dim_base**num_nodes),
17 | DenseMPO(dim1**num_nodes, num_nodes=num_nodes, bond_dim=bond_dim),
18 | DenseMPO(dim2**num_nodes, num_nodes=num_nodes, bond_dim=bond_dim),
19 | ])
20 | # Hard code batch size.
21 | result = model.predict(np.ones((32, in_dim_base**num_nodes)))
22 | assert result.shape == (32, dim2**num_nodes)
23 |
--------------------------------------------------------------------------------
/tensornetwork/version.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Developers
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | __version__ = '0.4.6'
16 |
--------------------------------------------------------------------------------
/tensornetwork/visualization/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/tensornetwork/visualization/graphviz.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Implementation of TensorNetwork Graphviz visualization."""
15 |
16 | import graphviz
17 | from typing import Optional, Text, Iterable
18 | from tensornetwork.network_components import AbstractNode
19 |
20 |
21 | #pylint: disable=no-member
22 | def to_graphviz(nodes: Iterable[AbstractNode],
23 | graph: Optional[graphviz.Graph] = None,
24 | include_all_names: bool = False,
25 | engine: Text = "neato") -> graphviz.Graph:
26 | """Create a graphviz Graph that is isomorphic to the given TensorNetwork.
27 |
28 | Args:
29 | nodes: a collection of nodes
30 | graph: An optional `graphviz.Graph` object to write to. Use this only
31 | if you wish to set custom attributes for the graph.
32 | include_all_names: Whether to include all of the names in the graph.
33 | If False, all names starting with '__' (which are almost always just
34 | the default generated names) will be dropped to reduce clutter.
35 | engine: The graphviz engine to use. Only applicable if `graph` is None.
36 |
37 | Returns:
38 | The `graphviz.Graph` object.
39 | """
40 | if graph is None:
41 | #pylint: disable=no-member
42 | graph = graphviz.Graph('G', engine=engine)
43 | for node in nodes:
44 | if not node.name.startswith("__") or include_all_names:
45 | label = node.name
46 | else:
47 | label = ""
48 | graph.node(str(id(node)), label=label)
49 | seen_edges = set()
50 | for node in nodes:
51 | for i, edge in enumerate(node.edges):
52 | if edge in seen_edges:
53 | continue
54 | seen_edges.add(edge)
55 | if not edge.name.startswith("__") or include_all_names:
56 | edge_label = edge.name
57 | else:
58 | edge_label = ""
59 | if edge.is_dangling():
60 | # We need to create an invisible node for the dangling edge
61 | # to connect to.
62 | graph.node(
63 | "{}_{}".format(id(node), i),
64 | label="",
65 | _attributes={"style": "invis"})
66 | graph.edge("{}_{}".format(id(node), i), str(id(node)), label=edge_label)
67 | else:
68 | graph.edge(str(id(edge.node1)), str(id(edge.node2)), label=edge_label)
69 | return graph
70 |
--------------------------------------------------------------------------------
/tensornetwork/visualization/graphviz_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorNetwork Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Test of TensorNetwork Graphviz visualization."""
15 |
16 | import graphviz
17 | from tensornetwork.visualization.graphviz import to_graphviz
18 | from tensornetwork import connect, Node
19 | import numpy as np
20 |
21 |
22 | def test_sanity_check():
23 | a = Node(np.eye(2), backend="tensorflow")
24 | b = Node(np.eye(2), backend="tensorflow")
25 | connect(a[0], b[0])
26 | g = to_graphviz([a, b])
27 | #pylint: disable=no-member
28 | assert isinstance(g, graphviz.Graph)
29 |
--------------------------------------------------------------------------------