├── acqdp
├── __init__.py
├── utility
│ ├── __init__.py
│ ├── opt_einsum_ext.py
│ └── opt_einsum_helpers.pyx
├── tensor_network
│ ├── default_params.json
│ ├── khp_params.json
│ ├── __init__.py
│ ├── kahypar_profiles
│ │ ├── cut_rKaHyPar_sea20.ini
│ │ ├── km1_rKaHyPar_sea20.ini
│ │ ├── cut_rKaHyPar_dissertation.ini
│ │ ├── km1_rKaHyPar_dissertation.ini
│ │ ├── cut_kKaHyPar_sea20.ini
│ │ ├── km1_kKaHyPar_sea20.ini
│ │ ├── cut_kKaHyPar_dissertation.ini
│ │ └── km1_kKaHyPar_dissertation.ini
│ ├── tensor.py
│ ├── tensor_view.py
│ ├── order_finder.py
│ ├── contractor.py
│ ├── tensor_sum.py
│ ├── undirected_contraction_tree.py
│ ├── tensor_valued.py
│ ├── local_optimizer.py
│ └── slicer.py
└── circuit
│ ├── __init__.py
│ ├── noise.py
│ └── converter.py
├── demo
├── __init__.py
├── QAOA
│ ├── __init__.py
│ ├── qaoa_demo.py
│ └── qaoa.py
└── QEC
│ ├── __init__.py
│ ├── decoder.py
│ ├── surface_code.py
│ └── noise_model.py
├── tests
├── __init__.py
├── test_tensor_sum.py
├── test_tensor.py
└── test_tensor_network.py
├── docsource
├── source
│ ├── MPS.pdf
│ ├── circuit.pdf
│ ├── benchmark.pdf
│ ├── flowchart.pdf
│ ├── flowchart.png
│ ├── _templates
│ │ └── side_info.html
│ ├── index.rst
│ ├── acqdp.circuit.rst
│ ├── installation.rst
│ ├── sycamore.rst
│ ├── conf.py
│ ├── tensor_network.rst
│ └── tutorial.rst
├── Makefile
└── make.bat
├── MANIFEST.in
├── .flake8
├── examples
├── GHZ.py
├── khp_params.json
└── circuit_simulation.py
├── LICENSE
├── setup.py
├── .github
└── workflows
│ └── python-package.yml
├── .gitignore
└── README.md
/acqdp/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/demo/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/demo/QAOA/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/demo/QEC/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/acqdp/utility/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docsource/source/MPS.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alibaba/acqdp/HEAD/docsource/source/MPS.pdf
--------------------------------------------------------------------------------
/docsource/source/circuit.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alibaba/acqdp/HEAD/docsource/source/circuit.pdf
--------------------------------------------------------------------------------
/docsource/source/benchmark.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alibaba/acqdp/HEAD/docsource/source/benchmark.pdf
--------------------------------------------------------------------------------
/docsource/source/flowchart.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alibaba/acqdp/HEAD/docsource/source/flowchart.pdf
--------------------------------------------------------------------------------
/docsource/source/flowchart.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alibaba/acqdp/HEAD/docsource/source/flowchart.png
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | recursive-include acqdp *.ini *.json *.pyx
2 | prune demo
3 | prune examples
4 | prune benchmark
5 | global-exclude .DS_Store
6 |
--------------------------------------------------------------------------------
/demo/QAOA/qaoa_demo.py:
--------------------------------------------------------------------------------
1 | from demo.QAOA.qaoa import QAOAOptimizer
2 |
3 | a = {(i, (i + 1) % 30): [[1, -1], [-1, 1]] for i in range(30)}
4 | q = QAOAOptimizer(a, num_layers=2)
5 | q.preprocess()
6 | q.optimize()
7 |
--------------------------------------------------------------------------------
/docsource/source/_templates/side_info.html:
--------------------------------------------------------------------------------
1 |
See Also
2 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/default_params.json:
--------------------------------------------------------------------------------
1 | {
2 | "order_finder_name": "default",
3 | "order_finder_params": {
4 | "order_method": "default"
5 | },
6 | "slicer_name": "default",
7 | "compiler_params" : {"do_patch": false}
8 | }
9 |
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | ignore = C901, #is too complex
3 | E741, # ambiguous variable name
4 | E402, # module level import not at top of file
5 | W503 # line break before binary operator
6 |
7 | exclude =
8 | .git,
9 | __pycache__,
10 | build,
11 | dist
12 |
13 | max-complexity = 15
14 | max-line-length = 127
--------------------------------------------------------------------------------
/examples/GHZ.py:
--------------------------------------------------------------------------------
1 | from acqdp.circuit import add_noise, Depolarization
2 | from acqdp.circuit import Circuit, HGate, CNOTGate, ZeroState
3 |
4 |
5 | def GHZState(n):
6 | a = Circuit().append(ZeroState, [0]).append(HGate, [0])
7 | for i in range(n - 1):
8 | a.append(ZeroState, [i + 1])
9 | a.append(CNOTGate, [0, i + 1])
10 | return a
11 |
12 |
13 | a = GHZState(10)
14 | b = add_noise(a, Depolarization(0.01))
15 | print((b | a.adjoint()).tensor_density.contract())
16 |
--------------------------------------------------------------------------------
/docsource/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SOURCEDIR = source
8 | BUILDDIR = build
9 |
10 | # Put it first so that "make" without argument is like "make help".
11 | help:
12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
13 |
14 | .PHONY: help Makefile
15 |
16 | github:
17 | @make html
18 | @cp -a build/html/. ../docs
19 |
20 | # Catch-all target: route all unknown targets to Sphinx using the new
21 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
22 | %: Makefile
23 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/docsource/source/index.rst:
--------------------------------------------------------------------------------
1 | .. ACQDP documentation master file, created by
2 | sphinx-quickstart on Fri Feb 15 16:01:27 2019.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Alibaba Cloud Quantum Development Platform (ACQDP) v0.1.1 documentation
7 | ==============================================================================
8 |
9 | .. toctree::
10 | :maxdepth: 1
11 | :caption: Contents:
12 |
13 | installation.rst
14 | tutorial.rst
15 | tensor_network.rst
16 | acqdp.circuit.rst
17 | qaoa.rst
18 | sycamore.rst
19 |
20 | Indices and tables
21 | ==================
22 |
23 | * :ref:`genindex`
24 | * :ref:`modindex`
25 | * :ref:`search`
26 |
--------------------------------------------------------------------------------
/examples/khp_params.json:
--------------------------------------------------------------------------------
1 | {
2 | "order_finder_name" : "sliced",
3 | "order_finder_params": {
4 | "base_order_finder": {
5 | "order_finder_name" : "khp",
6 | "order_finder_params": {
7 | "num_iters": 10,
8 | "num_threads": 1,
9 | "num_cmas": 1
10 | }
11 | },
12 | "slicer":{
13 | "slicer_name": "default",
14 | "slicer_params": {
15 | "num_iter_before": 30,
16 | "num_iter_middle": 20,
17 | "num_iter_after": 50,
18 | "max_tw": 28,
19 | "max_num_slice": 25,
20 | "num_threads": 1
21 | }
22 | }
23 | },
24 | "compiler_params":{
25 | "do_patch": true
26 | },
27 | "contractor_params":{
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/khp_params.json:
--------------------------------------------------------------------------------
1 | {
2 | "order_finder_name" : "sliced",
3 | "order_finder_params": {
4 | "base_order_finder": {
5 | "order_finder_name" : "khp",
6 | "order_finder_params": {
7 | "num_iters": 10,
8 | "num_threads": 1,
9 | "num_cmas": 1
10 | }
11 | },
12 | "slicer":{
13 | "slicer_name": "default",
14 | "slicer_params": {
15 | "num_iter_before": 30,
16 | "num_iter_middle": 20,
17 | "num_iter_after": 50,
18 | "max_tw": 28,
19 | "max_num_slice": 25,
20 | "num_threads": 1
21 | }
22 | }
23 | },
24 | "compiler_params":{
25 | "do_patch": true
26 | },
27 | "contractor_params":{
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/acqdp/utility/opt_einsum_ext.py:
--------------------------------------------------------------------------------
1 | from opt_einsum import helpers, paths
2 | import sys
3 | import os
4 |
5 | try:
6 | from . import opt_einsum_paths, opt_einsum_helpers
7 |
8 | for name in opt_einsum_paths.__all__:
9 | setattr(paths, name, getattr(opt_einsum_paths, name))
10 |
11 | for name in opt_einsum_helpers.__all__:
12 | setattr(helpers, name, getattr(opt_einsum_helpers, name))
13 | except ImportError:
14 | print('Cython modules for opt_einsum are not built. ACQDP will function normally, but contraction scheme finding may be'
15 | 'slower. To build those modules, run:')
16 | print(f' {os.path.basename(sys.executable)} -m pip install Cython')
17 | print(f' {os.path.basename(sys.executable)} -m pip install --force-reinstall --no-deps acqdp')
18 | print('(or reinstall acqdp from whatever source you prefer)')
19 | print()
20 |
--------------------------------------------------------------------------------
/docsource/source/acqdp.circuit.rst:
--------------------------------------------------------------------------------
1 | acqdp.circuit package
2 | =====================
3 |
4 | Submodules
5 | ----------
6 |
7 | acqdp.circuit.circuit module
8 | ----------------------------
9 |
10 | .. automodule:: acqdp.circuit.circuit
11 | :members:
12 | :special-members: __mul__, __or__
13 | :undoc-members:
14 | :show-inheritance:
15 |
16 | acqdp.circuit.converter module
17 | ------------------------------
18 |
19 | .. automodule:: acqdp.circuit.converter
20 | :members:
21 | :undoc-members:
22 | :show-inheritance:
23 |
24 | acqdp.circuit.noise module
25 | --------------------------
26 |
27 | .. automodule:: acqdp.circuit.noise
28 | :members:
29 | :undoc-members:
30 | :show-inheritance:
31 |
32 |
33 | Module contents
34 | ---------------
35 |
36 | .. automodule:: acqdp.circuit
37 | :members:
38 | :undoc-members:
39 | :show-inheritance:
40 |
--------------------------------------------------------------------------------
/docsource/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Alibaba
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/acqdp/circuit/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 |
3 | from . import (
4 | circuit,
5 | noise,
6 | converter
7 | )
8 |
9 | from .circuit import (
10 | Operation,
11 | ImmutableOperation,
12 | State,
13 | Measurement,
14 | Channel,
15 |
16 | PureOperation,
17 | PureState,
18 | Unitary,
19 | PureMeas,
20 | ControlledOperation,
21 | Controlled,
22 | Diagonal,
23 | CompState,
24 | CompMeas,
25 | XGate,
26 | YGate,
27 | ZGate,
28 | TGate,
29 | HGate,
30 | SGate,
31 | IGate,
32 | SWAPGate,
33 | XHalfGate,
34 | YHalfGate,
35 | CNOTGate,
36 | Trace,
37 | XRotation,
38 | ZRotation,
39 | Circuit,
40 | ControlledCircuit,
41 | XXRotation,
42 | ZZRotation,
43 | SuperPosition,
44 | FourierMeas,
45 | FourierState,
46 | ZeroState,
47 | OneState,
48 | PlusState,
49 | MinusState,
50 | ZeroMeas,
51 | OneMeas,
52 | PlusMeas,
53 | MinusMeas,
54 | CZGate
55 | )
56 |
57 | from .noise import (
58 | Depolarization,
59 | Dephasing,
60 | AmplitudeDampling,
61 | add_noise
62 | )
63 |
64 | from .converter import (
65 | Converter
66 | )
67 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/__init__.py:
--------------------------------------------------------------------------------
1 | from .tensor_network import TensorNetwork
2 | from .tensor_sum import TensorSum
3 | from .tensor import Tensor
4 | from .tensor_view import TensorView
5 | from .tensor_valued import TensorValued, normalize, conjugate, transpose
6 | from .order_finder import OrderFinder, SlicedOrderFinder, OptEinsumOrderFinder
7 | from .kahypar_order_finder import KHPOrderFinder
8 | from .slicer import Slicer, MPSlicer
9 |
10 | from .contraction import (ContractionCost, ContractionScheme,
11 | ContractionTask)
12 |
13 | from .local_optimizer import (LocalOptimizer, OrderResolver,
14 | defaultOrderResolver)
15 |
16 | from .compiler import Compiler
17 |
18 | from .contractor import Contractor
19 |
20 | from .order_finder import get_order_finder
21 |
22 | from .slicer import get_slicer
23 |
24 | __all__ = [
25 | 'TensorNetwork', 'Tensor', 'TensorView', 'TensorSum', 'TensorValued',
26 | 'ContractionCost', 'ContractionScheme', 'ContractionTask', 'LocalOptimizer',
27 | 'OrderResolver', 'defaultOrderResolver', 'Compiler', 'Contractor',
28 | 'get_order_finder', 'get_slicer', 'normalize', 'conjugate', 'transpose',
29 | 'OrderFinder', 'OptEinsumOrderFinder', 'SlicedOrderFinder', 'KHPOrderFinder', 'Slicer', 'MPSlicer'
30 | ]
31 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 | import os.path
3 |
4 | this_dir = os.path.abspath(os.path.dirname(__file__))
5 |
6 | with open(os.path.join(this_dir, "README.md"), "r") as fh:
7 | long_description = fh.read()
8 |
9 | try:
10 | from Cython.Build import cythonize
11 | except ImportError:
12 | print('Installing QDP without Cython...')
13 |
14 | def cythonize(*args, **kwargs):
15 | return []
16 |
17 | setup(
18 | name='acqdp',
19 | version='0.1.1',
20 | description='Alibaba Cloud Quantum Development Platform',
21 | long_description=long_description,
22 | long_description_content_type='text/markdown',
23 | author='Alibaba Quantum Lab',
24 | author_email='aql_software@alibabacloud.com',
25 | license='MIT',
26 | url='https://github.com/alibaba/acqdp',
27 | packages=find_packages(include=['acqdp*']),
28 | package_data={'acqdp': ['*.ini', '*.json']},
29 | include_package_data=True,
30 | ext_modules=cythonize('acqdp/utility/*.pyx'),
31 | zip_safe=False,
32 | install_requires=[
33 | 'numpy',
34 | 'scipy',
35 | 'networkx',
36 | 'numexpr',
37 | 'matplotlib',
38 | 'opt_einsum',
39 | 'kahypar == 1.1.2',
40 | 'cma',
41 | 'jax',
42 | 'jaxlib',
43 | 'tqdm'
44 | ],
45 | python_requires='>=3.7'
46 | )
47 |
--------------------------------------------------------------------------------
/.github/workflows/python-package.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3 |
4 | name: Python package
5 |
6 | on:
7 | push:
8 | branches: [ main ]
9 | pull_request:
10 | branches: [ main ]
11 |
12 | jobs:
13 | build:
14 |
15 | runs-on: ubuntu-latest
16 | strategy:
17 | matrix:
18 | python-version: [3.7, 3.8]
19 |
20 | steps:
21 | - uses: actions/checkout@v2
22 | - name: Set up Python ${{ matrix.python-version }}
23 | uses: actions/setup-python@v2
24 | with:
25 | python-version: ${{ matrix.python-version }}
26 | - name: Install dependencies
27 | run: |
28 | python -m pip install --upgrade pip
29 | pip install flake8 pytest
30 | if [ -f requirements.txt ]
31 | then
32 | pip install -r requirements.txt
33 | else
34 | pip install .
35 | fi
36 | - name: Lint with flake8
37 | run: |
38 | # stop the build if there are Python syntax errors or undefined names
39 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
40 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
41 | flake8 . --count --statistics
42 | - name: Test with pytest
43 | run: |
44 | pytest
45 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/kahypar_profiles/cut_rKaHyPar_sea20.ini:
--------------------------------------------------------------------------------
1 | # general
2 | mode=recursive
3 | objective=cut
4 | seed=-1
5 | cmaxnet=-1
6 | vcycles=0
7 | # main -> preprocessing -> min hash sparsifier
8 | p-use-sparsifier=true
9 | p-sparsifier-min-median-he-size=28
10 | p-sparsifier-max-hyperedge-size=1200
11 | p-sparsifier-max-cluster-size=10
12 | p-sparsifier-min-cluster-size=2
13 | p-sparsifier-num-hash-func=5
14 | p-sparsifier-combined-num-hash-func=100
15 | # main -> preprocessing -> community detection
16 | p-detect-communities=true
17 | p-detect-communities-in-ip=false
18 | p-reuse-communities=false
19 | p-max-louvain-pass-iterations=100
20 | p-min-eps-improvement=0.0001
21 | p-louvain-edge-weight=hybrid
22 | # main -> coarsening
23 | c-type=heavy_lazy
24 | c-s=3.25
25 | c-t=160
26 | # main -> coarsening -> rating
27 | c-rating-score=heavy_edge
28 | c-rating-use-communities=true
29 | c-rating-heavy_node_penalty=multiplicative
30 | c-rating-acceptance-criterion=best
31 | c-fixed-vertex-acceptance-criterion=free_vertex_only
32 | # main -> initial partitioning
33 | i-mode=direct
34 | i-technique=flat
35 | # initial partitioning -> initial partitioning
36 | i-algo=pool
37 | i-runs=20
38 | # initial partitioning -> local search
39 | i-r-type=twoway_fm
40 | i-r-runs=-1
41 | i-r-fm-stop=simple
42 | i-r-fm-stop-i=50
43 | # main -> local search
44 | r-type=twoway_fm_hyperflow_cutter
45 | r-runs=-1
46 | r-fm-stop=adaptive_opt
47 | r-fm-stop-alpha=1
48 | r-fm-stop-i=350
49 | # local_search -> flow scheduling and heuristics
50 | r-flow-execution-policy=exponential
51 | # local_search -> hyperflowcutter configuration
52 | r-hfc-size-constraint=mf-style
53 | r-hfc-scaling=16
54 | r-hfc-distance-based-piercing=true
55 | r-hfc-mbc=true
56 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/kahypar_profiles/km1_rKaHyPar_sea20.ini:
--------------------------------------------------------------------------------
1 | # general
2 | mode=recursive
3 | objective=km1
4 | seed=-1
5 | cmaxnet=-1
6 | vcycles=0
7 | # main -> preprocessing -> min hash sparsifier
8 | p-use-sparsifier=true
9 | p-sparsifier-min-median-he-size=28
10 | p-sparsifier-max-hyperedge-size=1200
11 | p-sparsifier-max-cluster-size=10
12 | p-sparsifier-min-cluster-size=2
13 | p-sparsifier-num-hash-func=5
14 | p-sparsifier-combined-num-hash-func=100
15 | # main -> preprocessing -> community detection
16 | p-detect-communities=true
17 | p-detect-communities-in-ip=false
18 | p-reuse-communities=false
19 | p-max-louvain-pass-iterations=100
20 | p-min-eps-improvement=0.0001
21 | p-louvain-edge-weight=hybrid
22 | # main -> coarsening
23 | c-type=heavy_lazy
24 | c-s=3.25
25 | c-t=160
26 | # main -> coarsening -> rating
27 | c-rating-score=heavy_edge
28 | c-rating-use-communities=true
29 | c-rating-heavy_node_penalty=multiplicative
30 | c-rating-acceptance-criterion=best
31 | c-fixed-vertex-acceptance-criterion=free_vertex_only
32 | # main -> initial partitioning
33 | i-mode=direct
34 | i-technique=flat
35 | # initial partitioning -> initial partitioning
36 | i-algo=pool
37 | i-runs=20
38 | # initial partitioning -> local search
39 | i-r-type=twoway_fm
40 | i-r-runs=-1
41 | i-r-fm-stop=simple
42 | i-r-fm-stop-i=50
43 | # main -> local search
44 | r-type=twoway_fm_hyperflow_cutter
45 | r-runs=-1
46 | r-fm-stop=adaptive_opt
47 | r-fm-stop-alpha=1
48 | r-fm-stop-i=350
49 | # local_search -> flow scheduling and heuristics
50 | r-flow-execution-policy=exponential
51 | # local_search -> hyperflowcutter configuration
52 | r-hfc-size-constraint=mf-style
53 | r-hfc-scaling=16
54 | r-hfc-distance-based-piercing=true
55 | r-hfc-mbc=true
56 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/kahypar_profiles/cut_rKaHyPar_dissertation.ini:
--------------------------------------------------------------------------------
1 | # general
2 | mode=recursive
3 | objective=cut
4 | seed=-1
5 | cmaxnet=-1
6 | vcycles=0
7 | # main -> preprocessing -> min hash sparsifier
8 | p-use-sparsifier=true
9 | p-sparsifier-min-median-he-size=28
10 | p-sparsifier-max-hyperedge-size=1200
11 | p-sparsifier-max-cluster-size=10
12 | p-sparsifier-min-cluster-size=2
13 | p-sparsifier-num-hash-func=5
14 | p-sparsifier-combined-num-hash-func=100
15 | # main -> preprocessing -> community detection
16 | p-detect-communities=true
17 | p-detect-communities-in-ip=false
18 | p-reuse-communities=false
19 | p-max-louvain-pass-iterations=100
20 | p-min-eps-improvement=0.0001
21 | p-louvain-edge-weight=hybrid
22 | # main -> coarsening
23 | c-type=heavy_lazy
24 | c-s=3.25
25 | c-t=160
26 | # main -> coarsening -> rating
27 | c-rating-score=heavy_edge
28 | c-rating-use-communities=true
29 | c-rating-heavy_node_penalty=multiplicative
30 | c-rating-acceptance-criterion=best
31 | c-fixed-vertex-acceptance-criterion=free_vertex_only
32 | # main -> initial partitioning
33 | i-mode=direct
34 | i-technique=flat
35 | # initial partitioning -> initial partitioning
36 | i-algo=pool
37 | i-runs=20
38 | # initial partitioning -> local search
39 | i-r-type=twoway_fm
40 | i-r-runs=-1
41 | i-r-fm-stop=simple
42 | i-r-fm-stop-i=50
43 | # main -> local search
44 | r-type=twoway_fm_flow
45 | r-runs=-1
46 | r-fm-stop=simple
47 | r-fm-stop-alpha=1
48 | r-fm-stop-i=350
49 | # local_search -> flow
50 | r-flow-algorithm=ibfs
51 | r-flow-alpha=16
52 | r-flow-beta=128
53 | r-flow-network=hybrid
54 | r-flow-execution-policy=exponential
55 | r-flow-use-most-balanced-minimum-cut=true
56 | r-flow-use-adaptive-alpha-stopping-rule=true
57 | r-flow-ignore-small-hyperedge-cut=true
58 | r-flow-use-improvement-history=true
59 |
60 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/kahypar_profiles/km1_rKaHyPar_dissertation.ini:
--------------------------------------------------------------------------------
1 | # general
2 | mode=recursive
3 | objective=km1
4 | seed=-1
5 | cmaxnet=-1
6 | vcycles=0
7 | # main -> preprocessing -> min hash sparsifier
8 | p-use-sparsifier=true
9 | p-sparsifier-min-median-he-size=28
10 | p-sparsifier-max-hyperedge-size=1200
11 | p-sparsifier-max-cluster-size=10
12 | p-sparsifier-min-cluster-size=2
13 | p-sparsifier-num-hash-func=5
14 | p-sparsifier-combined-num-hash-func=100
15 | # main -> preprocessing -> community detection
16 | p-detect-communities=true
17 | p-detect-communities-in-ip=false
18 | p-reuse-communities=false
19 | p-max-louvain-pass-iterations=100
20 | p-min-eps-improvement=0.0001
21 | p-louvain-edge-weight=hybrid
22 | # main -> coarsening
23 | c-type=heavy_lazy
24 | c-s=3.25
25 | c-t=160
26 | # main -> coarsening -> rating
27 | c-rating-score=heavy_edge
28 | c-rating-use-communities=true
29 | c-rating-heavy_node_penalty=multiplicative
30 | c-rating-acceptance-criterion=best
31 | c-fixed-vertex-acceptance-criterion=free_vertex_only
32 | # main -> initial partitioning
33 | i-mode=direct
34 | i-technique=flat
35 | # initial partitioning -> initial partitioning
36 | i-algo=pool
37 | i-runs=20
38 | # initial partitioning -> local search
39 | i-r-type=twoway_fm
40 | i-r-runs=-1
41 | i-r-fm-stop=simple
42 | i-r-fm-stop-i=50
43 | # main -> local search
44 | r-type=twoway_fm_flow
45 | r-runs=-1
46 | r-fm-stop=simple
47 | r-fm-stop-alpha=1
48 | r-fm-stop-i=350
49 | # local_search -> flow
50 | r-flow-algorithm=ibfs
51 | r-flow-alpha=16
52 | r-flow-beta=128
53 | r-flow-network=hybrid
54 | r-flow-execution-policy=exponential
55 | r-flow-use-most-balanced-minimum-cut=true
56 | r-flow-use-adaptive-alpha-stopping-rule=true
57 | r-flow-ignore-small-hyperedge-cut=true
58 | r-flow-use-improvement-history=true
59 |
60 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/kahypar_profiles/cut_kKaHyPar_sea20.ini:
--------------------------------------------------------------------------------
1 | # general
2 | mode=direct
3 | objective=cut
4 | seed=-1
5 | cmaxnet=1000
6 | vcycles=0
7 | # main -> preprocessing -> min hash sparsifier
8 | p-use-sparsifier=true
9 | p-sparsifier-min-median-he-size=28
10 | p-sparsifier-max-hyperedge-size=1200
11 | p-sparsifier-max-cluster-size=10
12 | p-sparsifier-min-cluster-size=2
13 | p-sparsifier-num-hash-func=5
14 | p-sparsifier-combined-num-hash-func=100
15 | # main -> preprocessing -> community detection
16 | p-detect-communities=true
17 | p-detect-communities-in-ip=true
18 | p-reuse-communities=false
19 | p-max-louvain-pass-iterations=100
20 | p-min-eps-improvement=0.0001
21 | p-louvain-edge-weight=hybrid
22 | # main -> coarsening
23 | c-type=ml_style
24 | c-s=1
25 | c-t=160
26 | # main -> coarsening -> rating
27 | c-rating-score=heavy_edge
28 | c-rating-use-communities=true
29 | c-rating-heavy_node_penalty=no_penalty
30 | c-rating-acceptance-criterion=best_prefer_unmatched
31 | c-fixed-vertex-acceptance-criterion=fixed_vertex_allowed
32 | # main -> initial partitioning
33 | i-mode=recursive
34 | i-technique=multi
35 | # initial partitioning -> coarsening
36 | i-c-type=ml_style
37 | i-c-s=1
38 | i-c-t=150
39 | # initial partitioning -> coarsening -> rating
40 | i-c-rating-score=heavy_edge
41 | i-c-rating-use-communities=true
42 | i-c-rating-heavy_node_penalty=no_penalty
43 | i-c-rating-acceptance-criterion=best_prefer_unmatched
44 | i-c-fixed-vertex-acceptance-criterion=fixed_vertex_allowed
45 | # initial partitioning -> initial partitioning
46 | i-algo=pool
47 | i-runs=20
48 | # initial partitioning -> local search
49 | i-r-type=twoway_fm
50 | i-r-runs=-1
51 | i-r-fm-stop=simple
52 | i-r-fm-stop-i=50
53 | # main -> local search
54 | r-type=kway_fm_hyperflow_cutter
55 | r-runs=-1
56 | r-fm-stop=adaptive_opt
57 | r-fm-stop-alpha=1
58 | r-fm-stop-i=350
59 | # local_search -> flow scheduling and heuristics
60 | r-flow-execution-policy=exponential
61 | # local_search -> hyperflowcutter configuration
62 | r-hfc-size-constraint=mf-style
63 | r-hfc-scaling=16
64 | r-hfc-distance-based-piercing=true
65 | r-hfc-mbc=true
66 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/kahypar_profiles/km1_kKaHyPar_sea20.ini:
--------------------------------------------------------------------------------
1 | # general
2 | mode=direct
3 | objective=km1
4 | seed=-1
5 | cmaxnet=1000
6 | vcycles=0
7 | # main -> preprocessing -> min hash sparsifier
8 | p-use-sparsifier=true
9 | p-sparsifier-min-median-he-size=28
10 | p-sparsifier-max-hyperedge-size=1200
11 | p-sparsifier-max-cluster-size=10
12 | p-sparsifier-min-cluster-size=2
13 | p-sparsifier-num-hash-func=5
14 | p-sparsifier-combined-num-hash-func=100
15 | # main -> preprocessing -> community detection
16 | p-detect-communities=true
17 | p-detect-communities-in-ip=true
18 | p-reuse-communities=false
19 | p-max-louvain-pass-iterations=100
20 | p-min-eps-improvement=0.0001
21 | p-louvain-edge-weight=hybrid
22 | # main -> coarsening
23 | c-type=ml_style
24 | c-s=1
25 | c-t=160
26 | # main -> coarsening -> rating
27 | c-rating-score=heavy_edge
28 | c-rating-use-communities=true
29 | c-rating-heavy_node_penalty=no_penalty
30 | c-rating-acceptance-criterion=best_prefer_unmatched
31 | c-fixed-vertex-acceptance-criterion=fixed_vertex_allowed
32 | # main -> initial partitioning
33 | i-mode=recursive
34 | i-technique=multi
35 | # initial partitioning -> coarsening
36 | i-c-type=ml_style
37 | i-c-s=1
38 | i-c-t=150
39 | # initial partitioning -> coarsening -> rating
40 | i-c-rating-score=heavy_edge
41 | i-c-rating-use-communities=true
42 | i-c-rating-heavy_node_penalty=no_penalty
43 | i-c-rating-acceptance-criterion=best_prefer_unmatched
44 | i-c-fixed-vertex-acceptance-criterion=fixed_vertex_allowed
45 | # initial partitioning -> initial partitioning
46 | i-algo=pool
47 | i-runs=20
48 | # initial partitioning -> local search
49 | i-r-type=twoway_fm
50 | i-r-runs=-1
51 | i-r-fm-stop=simple
52 | i-r-fm-stop-i=50
53 | # main -> local search
54 | r-type=kway_fm_hyperflow_cutter_km1
55 | r-runs=-1
56 | r-fm-stop=adaptive_opt
57 | r-fm-stop-alpha=1
58 | r-fm-stop-i=350
59 | # local_search -> flow scheduling and heuristics
60 | r-flow-execution-policy=exponential
61 | # local_search -> hyperflowcutter configuration
62 | r-hfc-size-constraint=mf-style
63 | r-hfc-scaling=16
64 | r-hfc-distance-based-piercing=true
65 | r-hfc-mbc=true
66 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/kahypar_profiles/cut_kKaHyPar_dissertation.ini:
--------------------------------------------------------------------------------
1 | # general
2 | mode=direct
3 | objective=cut
4 | seed=-1
5 | cmaxnet=1000
6 | vcycles=0
7 | # main -> preprocessing -> min hash sparsifier
8 | p-use-sparsifier=true
9 | p-sparsifier-min-median-he-size=28
10 | p-sparsifier-max-hyperedge-size=1200
11 | p-sparsifier-max-cluster-size=10
12 | p-sparsifier-min-cluster-size=2
13 | p-sparsifier-num-hash-func=5
14 | p-sparsifier-combined-num-hash-func=100
15 | # main -> preprocessing -> community detection
16 | p-detect-communities=true
17 | p-detect-communities-in-ip=true
18 | p-reuse-communities=false
19 | p-max-louvain-pass-iterations=100
20 | p-min-eps-improvement=0.0001
21 | p-louvain-edge-weight=hybrid
22 | # main -> coarsening
23 | c-type=ml_style
24 | c-s=1
25 | c-t=160
26 | # main -> coarsening -> rating
27 | c-rating-score=heavy_edge
28 | c-rating-use-communities=true
29 | c-rating-heavy_node_penalty=no_penalty
30 | c-rating-acceptance-criterion=best_prefer_unmatched
31 | c-fixed-vertex-acceptance-criterion=fixed_vertex_allowed
32 | # main -> initial partitioning
33 | i-mode=recursive
34 | i-technique=multi
35 | # initial partitioning -> coarsening
36 | i-c-type=ml_style
37 | i-c-s=1
38 | i-c-t=150
39 | # initial partitioning -> coarsening -> rating
40 | i-c-rating-score=heavy_edge
41 | i-c-rating-use-communities=true
42 | i-c-rating-heavy_node_penalty=no_penalty
43 | i-c-rating-acceptance-criterion=best_prefer_unmatched
44 | i-c-fixed-vertex-acceptance-criterion=fixed_vertex_allowed
45 | # initial partitioning -> initial partitioning
46 | i-algo=pool
47 | i-runs=20
48 | # initial partitioning -> local search
49 | i-r-type=twoway_fm
50 | i-r-runs=-1
51 | i-r-fm-stop=simple
52 | i-r-fm-stop-i=50
53 | # main -> local search
54 | r-type=kway_fm_flow
55 | r-runs=-1
56 | r-fm-stop=adaptive_opt
57 | r-fm-stop-alpha=1
58 | r-fm-stop-i=350
59 | # local_search -> flow
60 | r-flow-algorithm=ibfs
61 | r-flow-alpha=16
62 | r-flow-beta=128
63 | r-flow-network=hybrid
64 | r-flow-execution-policy=exponential
65 | r-flow-use-most-balanced-minimum-cut=true
66 | r-flow-use-adaptive-alpha-stopping-rule=true
67 | r-flow-ignore-small-hyperedge-cut=true
68 | r-flow-use-improvement-history=true
69 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/kahypar_profiles/km1_kKaHyPar_dissertation.ini:
--------------------------------------------------------------------------------
1 | # general
2 | mode=direct
3 | objective=km1
4 | seed=-1
5 | cmaxnet=1000
6 | vcycles=0
7 | # main -> preprocessing -> min hash sparsifier
8 | p-use-sparsifier=true
9 | p-sparsifier-min-median-he-size=28
10 | p-sparsifier-max-hyperedge-size=1200
11 | p-sparsifier-max-cluster-size=10
12 | p-sparsifier-min-cluster-size=2
13 | p-sparsifier-num-hash-func=5
14 | p-sparsifier-combined-num-hash-func=100
15 | # main -> preprocessing -> community detection
16 | p-detect-communities=true
17 | p-detect-communities-in-ip=true
18 | p-reuse-communities=false
19 | p-max-louvain-pass-iterations=100
20 | p-min-eps-improvement=0.0001
21 | p-louvain-edge-weight=hybrid
22 | # main -> coarsening
23 | c-type=ml_style
24 | c-s=1
25 | c-t=160
26 | # main -> coarsening -> rating
27 | c-rating-score=heavy_edge
28 | c-rating-use-communities=true
29 | c-rating-heavy_node_penalty=no_penalty
30 | c-rating-acceptance-criterion=best_prefer_unmatched
31 | c-fixed-vertex-acceptance-criterion=fixed_vertex_allowed
32 | # main -> initial partitioning
33 | i-mode=recursive
34 | i-technique=multi
35 | # initial partitioning -> coarsening
36 | i-c-type=ml_style
37 | i-c-s=1
38 | i-c-t=150
39 | # initial partitioning -> coarsening -> rating
40 | i-c-rating-score=heavy_edge
41 | i-c-rating-use-communities=true
42 | i-c-rating-heavy_node_penalty=no_penalty
43 | i-c-rating-acceptance-criterion=best_prefer_unmatched
44 | i-c-fixed-vertex-acceptance-criterion=fixed_vertex_allowed
45 | # initial partitioning -> initial partitioning
46 | i-algo=pool
47 | i-runs=20
48 | # initial partitioning -> local search
49 | i-r-type=twoway_fm
50 | i-r-runs=-1
51 | i-r-fm-stop=simple
52 | i-r-fm-stop-i=50
53 | # main -> local search
54 | r-type=kway_fm_flow_km1
55 | r-runs=-1
56 | r-fm-stop=adaptive_opt
57 | r-fm-stop-alpha=1
58 | r-fm-stop-i=350
59 | # local_search -> flow
60 | r-flow-algorithm=ibfs
61 | r-flow-alpha=16
62 | r-flow-beta=128
63 | r-flow-network=hybrid
64 | r-flow-execution-policy=exponential
65 | r-flow-use-most-balanced-minimum-cut=true
66 | r-flow-use-adaptive-alpha-stopping-rule=true
67 | r-flow-ignore-small-hyperedge-cut=true
68 | r-flow-use-improvement-history=true
69 |
--------------------------------------------------------------------------------
/acqdp/circuit/noise.py:
--------------------------------------------------------------------------------
1 | from acqdp.circuit.circuit import Channel, Circuit
2 | import numpy as np
3 |
4 |
5 | def Depolarization(px=0.25, py=None, pz=None):
6 | """Single-qubit depolarizing channel.
7 |
8 | :param px: One of the three parameters to describe the single-qubit depolarizing channel.
9 | :type px: :class:`float`.
10 | :param py: One of the three parameters to describe the single-qubit depolarizing channel.
11 | :type py: :class:`float`.
12 | :param pz: One of the three parameters to describe the single-qubit depolarizing channel.
13 | :type pz: :class:`float`.
14 | :returns: :class:`Mixture` -- the single-qubit depolarizing channel.
15 | """
16 | if py is None:
17 | py = px
18 | if pz is None:
19 | pz = px
20 | res = Channel(
21 | 1,
22 | np.array([[[[1 - px - py, 0], [0, 1 - 2 * pz - px - py]],
23 | [[0, px + py], [px - py, 0]]],
24 | [[[0, px - py], [px + py, 0]],
25 | [[1 - px - py - 2 * pz, 0], [0, 1 - px - py]]]]))
26 | return res
27 |
28 |
29 | def Dephasing(pz=0.5):
30 | """Single-qubit dephasing channel, also called the phase-damping channel.
31 |
32 | :param pz: The parameter to describe the single-qubit dephasing channel.
33 | :type pz: :class:`float`.
34 | :returns: :class:`Mixture` -- the single-qubit dephasing channel with given parameter.
35 | """
36 | return Depolarization(0, 0, pz)
37 |
38 |
39 | def AmplitudeDampling(p=0.1):
40 | res = Channel(
41 | 1,
42 | np.array([[[[1, 0], [0, np.sqrt(1 - p)]],
43 | [[0, p], [0, 0]]],
44 | [[[0, 0], [0, 0]],
45 | [[np.sqrt(1 - p), 0], [0, 1 - p]]]]))
46 | return res
47 |
48 |
49 | def add_noise(circuit, noise_channel):
50 | c = Circuit()
51 | for k in circuit.operations_by_name:
52 | operation = circuit.operations_by_name[k]['operation']
53 | qubits = circuit.operations_by_name[k]['qubits']
54 | time_step = circuit.operations_by_name[k]['time_step']
55 | new_op = Circuit() | operation
56 | for a in new_op._output_indices[0]:
57 | new_op.append(noise_channel, [a])
58 | c.append(new_op, qubits=qubits, time_step=time_step, name=k)
59 | return c
60 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.c
8 | *.so
9 |
10 | # Distribution / packaging
11 | .Python
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | pip-wheel-metadata/
25 | share/python-wheels/
26 | *.egg-info/
27 | .installed.cfg
28 | *.egg
29 | MANIFEST
30 |
31 | # PyInstaller
32 | # Usually these files are written by a python script from a template
33 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
34 | *.manifest
35 | *.spec
36 |
37 | # Installer logs
38 | pip-log.txt
39 | pip-delete-this-directory.txt
40 |
41 | # Unit test / coverage reports
42 | htmlcov/
43 | .tox/
44 | .nox/
45 | .coverage
46 | .coverage.*
47 | .cache
48 | nosetests.xml
49 | coverage.xml
50 | *.cover
51 | *.py,cover
52 | .hypothesis/
53 | .pytest_cache/
54 |
55 | # Translations
56 | *.mo
57 | *.pot
58 |
59 | # Django stuff:
60 | *.log
61 | local_settings.py
62 | db.sqlite3
63 | db.sqlite3-journal
64 |
65 | # Flask stuff:
66 | instance/
67 | .webassets-cache
68 |
69 | # Scrapy stuff:
70 | .scrapy
71 |
72 | # Sphinx documentation
73 | docs/_build/
74 |
75 | # PyBuilder
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | .python-version
87 |
88 | # pipenv
89 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
90 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
91 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
92 | # install all needed dependencies.
93 | #Pipfile.lock
94 |
95 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
96 | __pypackages__/
97 |
98 | # Celery stuff
99 | celerybeat-schedule
100 | celerybeat.pid
101 |
102 | # SageMath parsed files
103 | *.sage.py
104 |
105 | # Environments
106 | .env
107 | .venv
108 | env/
109 | venv/
110 | ENV/
111 | env.bak/
112 | venv.bak/
113 |
114 | # Spyder project settings
115 | .spyderproject
116 | .spyproject
117 |
118 | # Rope project settings
119 | .ropeproject
120 |
121 | # mkdocs documentation
122 | /site
123 |
124 | # mypy
125 | .mypy_cache/
126 | .dmypy.json
127 | dmypy.json
128 |
129 | # Pyre type checker
130 | .pyre/
131 |
132 | # vscode
133 | .vscode/
134 |
135 | # docs for github pages
136 | /docs/
--------------------------------------------------------------------------------
/demo/QEC/decoder.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tqdm
3 | import json
4 | from demo.QEC.surface_code import surface_code_tensor_network
5 |
6 |
7 | def computeErrorRate(params):
8 | tn = surface_code_tensor_network(num_layers=2, params=params)
9 | e_ro = params.get('e_ro', 0.01)
10 | butterfly = np.array([[1 - e_ro, e_ro], [e_ro, 1 - e_ro]])
11 | with open('acqdp/tensor_network/khp_params.json', 'r') as f:
12 | kwargs = json.load(f)
13 | order = tn.find_order(input='task.json', output='task.json', **kwargs)
14 | task = tn.compile(order, **kwargs)
15 | # task.set_data({node: tn.network.nodes[(0, node)]['tensor'].contract() for node in tn.nodes_by_name})
16 | res = task.execute(**kwargs)
17 |
18 | alphabet = 'abABCDEFGHIJKLMNOPQRSTUVWXYZ'
19 | alphabet_u = 'cdefghijklmnopqrstuvwxyz'
20 | # convert from Choi matrices to PTM
21 | Pauli = np.array([[[[1, 0], [0, 1]], [[0, 1], [1j, 0]]],
22 | [[[0, 1], [-1j, 0]], [[1, 0], [0, -1]]]])
23 | res = np.einsum(alphabet + ',' + 'aAcd->cbdB' + alphabet[4:], res, Pauli)
24 | res = np.einsum(alphabet + ',' + 'bBcd->cdaA' + alphabet[4:], res,
25 | np.conj(Pauli))
26 | res = np.real(res)
27 |
28 | # Merge with readout error
29 | for k in tqdm.tqdm(range(16)):
30 | script = alphabet + ',' + alphabet[k + 4] + alphabet_u[k] + '->'
31 | output = alphabet[:k + 4] + alphabet_u[k] + alphabet[k + 5:]
32 | res = np.einsum(script + output, res, butterfly)
33 |
34 | res = np.reshape(res, (4, 4, 2**24))
35 |
36 | # Maximum likelihood decoding
37 | sk = np.array([[1, 1, 1, 1], [1, 1, -1, -1], [1, -1, 1, -1], [1, -1, -1,
38 | 1]])
39 | aa = np.argmax(np.einsum('aab,ac->cb', res, sk), axis=0)
40 | bb = np.zeros((2**24, 4))
41 | bb[np.arange(2**24)] = sk[aa]
42 | res = np.einsum('abc,ca->abc', res, bb)
43 | lk = np.sum(res, axis=2) * 512
44 | return lk
45 |
46 |
47 | params = {
48 | 'T_1_inv': 1 / 30000.0,
49 | 'T_phi_inv': 1 / 60000.0,
50 | 'p_axis': 1e-4,
51 | 'p_plane': 5e-4,
52 | 'delta_phi': 0.0,
53 | 'T_g_1Q': 20.0,
54 | 'T_g_2Q': 40.0,
55 | 'tau_m': 300.0,
56 | 'tau_d': 300.0,
57 | 'gamma': 0.12,
58 | 'alpha0': 4,
59 | 'kappa': 1 / 250,
60 | 'chi': 1.3 * 1e-3,
61 | 'xsplit': 2,
62 | 'butterfly': np.array([[1 - 0.0015, 0.0015], [0.0015, 1 - 0.0015]])
63 | }
64 |
65 | params['gamma'] = 0.12
66 | b = computeErrorRate(params)
67 | params['gamma'] = 0
68 | a = computeErrorRate(params)
69 | print(a, b, a - b)
70 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Alibaba Cloud Quantum Development Platform (ACQDP)
2 |
3 | ## Introduction
4 | ACQDP is an open-source simulator-driven development tool for quantum algorithms and quantum computers. The initial release of ACQDP in October 2020 features Alibaba Quantum Laboratory’s general-purpose, tensor-contraction based quantum circuit simulator, together with some applications on quantum algorithm and error correction simulations. Some future directions of ACQDP of higher prioritites are
5 |
6 | 1. Strengthening the capabilities of the simulator, in terms of the scale of the target circuits, and allowing approximations.
7 | 2. Improving the capabilities for and expanding the scope of applications.
8 | 3. Developing friendly user interfaces for both the educational and research communities.
9 | 4. Adding utilities facilitating the deployment in various computing environments.
10 |
11 | ## Computing Engine
12 | Partially inspired by the recent quantum supremacy experiment, classical simulation of quantum circuits attracts quite a bit of attention and impressive progress has been made along this line of research to significantly improve the performance of classical simulation of quantum circuits. Key ingredients include
13 | 1. Quantum circuit simulation as tensor network contraction [[1]](#1);
14 | 2. Undirected graph model formalism[[2]](#2);
15 | 3. Dynamic slicing [[3]](#3)[[4]](#4);
16 | 4. Contraction tree [[5]](#5);
17 | 6. Contraction subtree reconfiguration [[6]](#6).
18 |
19 | We are happy to be part of this effort.
20 |
21 | ## Use Cases
22 |
23 | * Efficient exact contraction of intermediate-sized tensor networks
24 | * Deployment on large-scale clusters for contracting complex tensor networks
25 | * Efficient exact simulation of intermediate sized quantum circuit
26 | * Classical simulation under different quantum noise models
27 |
28 | ## Documentation
29 | [See full documentation here.](https://alibabaquantumlab.github.io/acqdp)
30 |
31 | ## Installation
32 | [See instructions here.](https://alibabaquantumlab.github.io/acqdp/installation.html)
33 |
34 | ## Contributing
35 |
36 | If you are interested in contributing to ACQDP feel free to contact me or create an issue on the issue tracking system.
37 |
38 | ## References
39 |
40 | [1]
41 | Markov, I. and Shi, Y.(2008)
42 | Simulating quantum computation by contracting tensor networks
43 | SIAM Journal on Computing, 38(3):963-981, 2008
44 |
45 | [2]
46 | Boixo, S., Isakov, S., Smelyanskiy, V. and Neven, H. (2017)
47 | Simulation of low-depth quantum circuits as complex undirected graphical models
48 | arXiv preprint arXiv:1712.05384
49 |
50 | [3]
51 | Chen, J., Zhang, F., Huang, C., Newman, M. and Shi, Y.(2018)
52 | Classical simulation of intermediate-size quantum circuits
53 | arXiv preprint arXiv:1805.01450
54 |
55 | [4]
56 | Zhang, F., Huang, C., Newman M., Cai, J., Yu, H., Tian, Z., Yuan, B., Xu, H.,Wu, J., Gao, X., Chen, J., Szegedy, M. and Shi, Y.(2019)
57 | Alibaba Cloud Quantum Development Platform: Large-Scale Classical Simulation of Quantum Circuits
58 | arXiv preprint arXiv:1907.11217
59 |
60 | [5]
61 | Gray, J. and Kourtis, S.(2020)
62 | Hyper-optimized tensor network contraction
63 | arXiv preprint arXiv:2002.01935
64 |
65 | [6]
66 | Huang, C., Zhang, F.,Newman M., Cai, J., Gao, X., Tian, Z., Wu, J., Xu, H., Yu, H., Yuan, B.,\
67 | Szegedy, M., Shi, Y. and Chen, J. (2020)
68 | Classical Simulation of Quantum Supremacy Circuits
69 | arXiv preprint arXiv:2005.06787
70 |
--------------------------------------------------------------------------------
/docsource/source/installation.rst:
--------------------------------------------------------------------------------
1 | .. _installation:
2 |
3 | =================
4 | Overview
5 | =================
6 |
7 | ACQDP is an open-source simulator-driven development tool for quantum algorithms and quantum computers. The initial release of ACQDP in October 2020 features Alibaba Quantum Laboratory’s general-purpose, tensor-contraction based quantum circuit simulator, together with some applications on quantum algorithm and error correction simulations. Some future directions of ACQDP of higher prioritites are
8 |
9 | 1. Strengthening the capabilities of the simulator, in terms of the scale of the target circuits, and allowing approximations.
10 | 2. Improving the capabilities for and expanding the scope of applications.
11 | 3. Developing friendly user interfaces for both the educational and research communities.
12 | 4. Adding utilities facilitating the deployment in various computing environments.
13 |
14 | =================
15 | Installation
16 | =================
17 |
18 | .. contents::
19 | :depth: 1
20 | :local:
21 | :backlinks: none
22 |
23 | .. highlight:: console
24 |
25 | Prerequisites
26 | ***************************
27 |
28 | `Python `__ - version 3.7 or later is required.
29 |
30 | `Cython `__ (optional) - used in the acqdp to accelerate the software package `KaHyPar `__ used in contraction order finding.
31 | In a command window, run
32 |
33 | .. code-block:: bash
34 |
35 | pip install cython
36 |
37 | `boost `__ - C++ boost library, particularly the `program_options `__ library
38 |
39 | `KaHyPar python package `__ (Windows) - On non-Windows systems, it installs automatically
40 | when installing acqdp but it'll error out on Windows. To make it work on Windows, you'll need manually install Kahypar
41 | first following its instructions `here `__. Alternatively, you
42 | can use `Windows Subsystem for Linux `__
43 |
44 | Installation from PyPI
45 | **************************
46 |
47 | ACQDP packages are published on the `Python Package Index `__ and can be installed using `pip`.
48 | This is the recommended way for most users. However, if you'd like to see or modify the source code, proceed to the next section
49 | `Installation from source code`_
50 |
51 | In a command window, run
52 |
53 | .. code-block:: bash
54 |
55 | pip install -U acqdp
56 |
57 | Installation from source code
58 | *****************************
59 |
60 | Alternatively, you can install from source code, say, cloned from `Github `__ so you can see and modify the code.
61 | First clone the repo
62 |
63 | .. code-block:: bash
64 |
65 | git clone https://github.com/alibaba/acqdp
66 |
67 |
68 | Then install the ACQDP packages. Enter the `acqdp` folder and run
69 |
70 | .. code-block:: bash
71 |
72 | pip install -e .
73 |
74 | Example and test codes
75 | ***********************
76 |
77 | You are ready to go! Find some examples at https://github.com/alibaba/acqdp/tree/master/examples and
78 | https://github.com/alibaba/acqdp/tree/master/demo
79 |
80 | To run the example in the examples, for example `GHZ.py`, simply run the following command in the examples folder:
81 |
82 | .. code-block:: bash
83 |
84 | python examples/GHZ.py
85 |
86 |
87 | To see if the package passes all the tests, simply run :command:`python -m pytest`.
88 |
--------------------------------------------------------------------------------
/tests/test_tensor_sum.py:
--------------------------------------------------------------------------------
1 | from copy import deepcopy
2 | import numpy
3 | from numpy import testing
4 | import unittest
5 | from acqdp.tensor_network import TensorSum, Tensor, TensorNetwork
6 |
7 |
8 | class TensorSumTestCase(unittest.TestCase):
9 |
10 | def setUp(self):
11 | self.a = TensorNetwork()
12 | for i in range(5):
13 | self.a.open_edge(i)
14 | for i in range(5):
15 | for j in range(5, 10):
16 | self.a.add_node((i, j), [i, j], numpy.random.rand(2, 2))
17 |
18 | self.b = TensorNetwork()
19 | for i in range(5):
20 | self.b.open_edge(i)
21 | for i in range(5):
22 | for j in range(5, 10):
23 | self.b.add_node((i, j), [i, j], 1j * numpy.random.rand(2, 3))
24 |
25 | self.c = Tensor(numpy.random.rand(2, 2, 2, 2, 2))
26 |
27 | def test_addition(self):
28 | c = self.a + ~(self.b) + 8 * self.c
29 | self.assertEqual(type(c), TensorSum)
30 | self.assertTrue(numpy.allclose(c.contract(),
31 | self.a.contract()
32 | + numpy.conj(self.b.contract())
33 | + 8 * self.c.contract()))
34 |
35 | def test_associativity(self):
36 | c = (self.a + self.b) + self.c
37 | d = self.a + (self.b + self.c)
38 | e = (self.a + (self.c - self.b)) + (self.a + (2 * self.b - self.a))
39 | self.assertTrue(numpy.allclose(c.contract(), d.contract()))
40 | self.assertTrue(numpy.allclose(c.contract(), e.contract()))
41 |
42 | def test_transpose(self):
43 | axesA = (2, 4, 0, 1, 3)
44 | axesB = (3, 4, 1, 0, 2)
45 | axesC = (3, 0, 1, 2, 4)
46 | axes_ = (1, 3, 0, 4, 2)
47 | c = (self.a % axesA
48 | + self.b % axesB
49 | + self.c % axesC) % axes_
50 | data = numpy.transpose(numpy.transpose(self.a.contract(), axesA)
51 | + numpy.transpose(self.b.contract(), axesB)
52 | + numpy.transpose(self.c.contract(), axesC), axes_)
53 | self.assertTrue(numpy.allclose(c.contract(), data))
54 |
55 | def test_add_and_remove_term(self):
56 | c = TensorSum()
57 | for i in range(100):
58 | c.add_term(i, self.a % numpy.random.permutation(5))
59 | choice = numpy.random.choice(100, 30, replace=False)
60 | for j in choice:
61 | c.remove_term(j)
62 | with self.assertRaises(KeyError):
63 | c.remove_term(choice[0])
64 | with self.assertRaises(KeyError):
65 | c.add_term(0, None)
66 | c.add_term(0, None)
67 |
68 | def test_shape_cache(self):
69 | c = TensorSum()
70 | misshaped = numpy.random.rand(2, 2, 3, 2, 2)
71 | c.add_term(0, self.a)
72 | c.add_term(1, self.a)
73 | c.remove_term(0)
74 | with self.assertRaises(ValueError):
75 | c.add_term(2, misshaped)
76 | c.add_term(2, self.a)
77 | c.remove_term(1)
78 | c.remove_term(2)
79 | # Now that C is empty we can add any shape to it
80 | c.add_term(0, misshaped)
81 | c.add_term(1, misshaped)
82 | with self.assertRaises(ValueError):
83 | c.add_term(2, self.a)
84 | # The function update_term does not fail early, so the user can update all tensors one-by-one
85 | c.update_term(0, self.a)
86 | c.update_term(1, self.a)
87 | c.add_term(2, self.a)
88 |
89 | def test_copy(self):
90 | c = TensorSum()
91 | a = Tensor(numpy.random.rand(2, 2, 2, 2, 2))
92 | for i in range(2):
93 | c.add_term(i, (a % numpy.random.permutation(5)).expand())
94 | d = c.copy()
95 | testing.assert_allclose(c.contract(), d.contract())
96 | e = deepcopy(c)
97 | testing.assert_allclose(c.contract(), e.contract())
98 | d.update_term(0, numpy.random.rand(2, 2, 2, 2, 2))
99 | self.assertFalse(numpy.allclose(c.contract(), d.contract()))
100 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/tensor.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import numpy
3 | from .tensor_valued import TensorValued, DTYPE
4 |
5 |
6 | class Tensor(TensorValued):
7 | """A :class:`Tensor` is an array of numbers with multiple dimensions. The most basic examples of a :class:`Tensor`
8 | are a vector (1-dimensional arrays of numbers) and a matrix (2-dimensional arrays).
9 |
10 | In our implementation, :class:`Tensor` is a subclass of :class:`TensorValued`, where the value is stored in an
11 | `numpy.ndarray`. All other `TensorValued` represent operations over the :class:`Tensor` objects.
12 |
13 | :ivar _data: `numpy.ndarray` object representing the data corresponding to the tensor.
14 | """
15 |
16 | def __init__(self,
17 | data: numpy.ndarray = None,
18 | dtype: type = DTYPE) -> None:
19 | """Constructor of a :class:`Tensor` object."""
20 | if data is None:
21 | self._data = None
22 | elif isinstance(data, TensorValued):
23 | self._data = data.contract()
24 | elif isinstance(data, numpy.ndarray):
25 | self._data = data
26 | if dtype is None:
27 | dtype = self._data.dtype
28 | else:
29 | self._data = numpy.array(data)
30 | if dtype is None:
31 | dtype = self._data.dtype
32 | super().__init__(dtype)
33 |
34 | @property
35 | def shape(self):
36 | """
37 | The common property of all :class:`TensorValued` classes.
38 | The shape of a `TensorValued` object is the bond dimension for each of its indices.
39 | :class:`TensorValued` objects must have compatible shapes in order to be connected together in
40 | a :class:`TensorNetwork`,or summed over in a :class:`TensorSum`.
41 |
42 | For :class:`Tensor` objects, it refers to the shape of the underlying :class:`numpy.ndarray` object.
43 | """
44 | if self._data is None:
45 | return None
46 | elif isinstance(self._data, numpy.ndarray):
47 | return self._data.shape
48 | else:
49 | raise ValueError
50 |
51 | def __str__(self) -> str:
52 | s = None
53 | if isinstance(self._data, numpy.ndarray):
54 | s = numpy.around(self._data, decimals=3)
55 | data_str = "Data: \n" + str(s)
56 | return super().__str__() + "\n" + data_str
57 |
58 | def __repr__(self) -> str:
59 | return "Id: " + str(self.identifier) + self.__str__()
60 |
61 | def __iadd__(self, t):
62 | self._data += t.contract()
63 | return self
64 |
65 | @property
66 | def is_valid(self) -> bool:
67 | """For :class:`Tensor` objects, it is to indicate whether the underlying :class:`numpy.ndarray` object where the
68 | unary operation is performed onto, is valid or not."""
69 | return True
70 |
71 | @property
72 | def is_ready(self) -> bool:
73 | """The common property of all :class:`TensorValued` classes, indicating whether the current
74 | :class:`TensorValued` object is ready for contraction, i.e. whether it semantically represents a tensor with a
75 | definite value. In the process of a program, not all :class:`TensorValued` objects need to be ready; however
76 | once the `data` property of a certain object is queried, such object must be ready in order to successfully
77 | yield an :class:`numpy.ndarray` object.
78 |
79 | For :class:`Tensor` objects, it is to indicate whether the underlying :class:`numpy.ndarray` object where the
80 | unary operation is performed onto, is ready for contraction.
81 | """
82 | return self._data is not None
83 |
84 | @property
85 | def norm_squared(self):
86 | """Square of Frobenius norm of the underlying :class:`numpy.ndarray` object."""
87 | return numpy.linalg.norm(self._data.flatten()) ** 2
88 |
89 | def fix_index(self, index, fix_to=0):
90 | """Fix the given index to the given value. The object would have the same dtype as the original one, with rank 1
91 | smaller than the original.
92 |
93 | :param index: The index to fix.
94 | :type index: :class:`int`.
95 | :param fix_to: the value to assign to the given index.
96 | :type fix_to: :class:`int`
97 | """
98 | if self._data is not None:
99 | self._data = numpy.moveaxis(self._data, index, 0)[fix_to]
100 | return self
101 |
102 | def contract(self, **kwargs):
103 | """
104 | :returns: :class:`numpy.ndarray` -- the value of the tensor whose value is stored in an :class:`numpy.ndarray` object.
105 | """
106 | return self._data
107 |
108 | def cast(self, dtype):
109 | """Return a copy of the `Tensor` object with updated underlying dtype."""
110 | return Tensor(numpy.array(self._data, dtype), dtype)
111 |
112 | def copy(self):
113 | """Return a copy of the `Tensor` object."""
114 | return Tensor(self._data, self.dtype)
115 |
116 | def __deepcopy__(self, memo):
117 | tn = Tensor(copy.deepcopy(self._data), dtype=self.dtype)
118 | return tn
119 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/tensor_view.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import numpy
3 | from .tensor_valued import TensorValued, DTYPE
4 |
5 |
6 | class TensorView(TensorValued):
7 | """
8 | :class:`TensorView` is a subclass of :class:`TensorValued` representing unary operations over another :class:`TensorValued`
9 | object that preserves the shape of the tensor. Common examples include element-wise conjugation and normalization with
10 | respect to the frobenius norm.
11 |
12 | :ivar tn: the underlying `TensorValued` object where the unary operation is performed onto.
13 | :ivar func: the unary function to be applied.
14 | :ivar homomorphism: indicator whether the unary function is homomorphic to the addition and multiplication of tensors. If
15 | so, the unary function can be broadcast to lower-level tensors, enabling potential simplification of the tensor
16 | network structure.
17 | :ivar dtype: dtype for the tensor entries.
18 | """
19 |
20 | def __init__(self, tn, func=numpy.conj, homomorphism=False, dtype=DTYPE):
21 | """The constructor of a `TensorView` object."""
22 | super().__init__(dtype)
23 | self.ref = tn
24 | self.func = func
25 | self.homomorphism = (func == numpy.conj) | homomorphism
26 |
27 | def __str__(self) -> str:
28 | data_str = "Data: \n" + str(self.ref)
29 | func_str = "Func: " + str(self.func)
30 | return super().__str__() + "\n" + data_str + func_str
31 |
32 | @property
33 | def shape(self):
34 | """
35 | The common property of all :class:`TensorValued` classes, yielding the shape of the object.
36 | :class:`TensorValued` objects must have compatible shapes in order to be connected together in
37 | a :class:`TensorNetwork`, or summed over in a :class:`TensorSum`.
38 |
39 | For :class:`TensorView` objects, it refers to the shape of the underlying :class:`TensorValued` object where the unary
40 | operation is performed onto.
41 | """
42 | return self.ref.shape
43 |
44 | @property
45 | def is_ready(self):
46 | """The common property of all :class:`TensorValued` classes, indicating whether the current
47 | :class:`TensorValued` object is ready for contraction, i.e. whether it semantically represents a tensor with a
48 | definite value. In the process of a program, not all :class:`TensorValued` objects need to be ready; however
49 | once the `data` property of a certain object is queried, such object must be ready in order to successfully
50 | yield an :class:`numpy.ndarray` object.
51 |
52 | For :class:`TensorView` objects, it is to indicate whether the underlying :class:`TensorValued` object where the
53 | unary operation is performed onto, is ready for contraction.
54 | """
55 | return self.ref.is_ready
56 |
57 | @property
58 | def is_valid(self):
59 | """The common property of all :class:`TensorValued` classes, indicating whether the :class:`TensorValued` object
60 | is valid or not. In every step of a program, all existing :class:`TensorValued` object must be valid, otherwise
61 | an exception should be thrown out; this property is for double checking that the current :class:`TensorValued`
62 | object is indeed valid.
63 |
64 | For :class:`TensorView` objects, it is to indicate whether the underlying :class:`TensorValued` object where the
65 | unary operation is performed onto, is valid or not.
66 | """
67 | return self.ref.is_valid
68 |
69 | @property
70 | def raw_data(self):
71 | """The data of the underlying :class:`TensorValued` object where the unary operation is performed onto."""
72 | return self.ref.contract()
73 |
74 | def fix_index(self, index, fix_to=0):
75 | """Fix the given index to the given value. The object after the method would have the same type as the original
76 | one, with rank 1 smaller than the original.
77 |
78 | :param index: The index to fix.
79 | :type index: :class:`int`.
80 | :param fix_to: the value to assign to the given index.
81 | :type fix_to: :class:`int`.
82 |
83 | :returns: :class:`TensorView` -- The :class:`TensorView` object after fixing the given index.
84 | """
85 | self.ref = self.ref.fix_index(index, fix_to)
86 |
87 | def expand(self, recursive=False):
88 | """Commute the unary operation with the underlying tensor network, when the unary operation is a homomorphism
89 | for tensor network contractions."""
90 | from acqdp.tensor_network import TensorNetwork
91 | if not self.homomorphism or not isinstance(self.ref, TensorNetwork):
92 | return self
93 | else:
94 | k = self.ref.copy()
95 | if recursive:
96 | k.expand(recursive=True)
97 | for node_name in k.nodes_by_name:
98 | k.update_node(node_name, TensorView(k.network.nodes[(0, node_name)]['tensor'], self.func, self.homomorphism))
99 | return k
100 |
101 | def cast(self, dtype):
102 | self.dtype = dtype
103 | self.ref = self.ref.cast(dtype)
104 | return self
105 |
106 | def contract(self, **kwargs):
107 | return self.func(self.ref.contract(**kwargs))
108 |
109 | def copy(self):
110 | return TensorView(self.ref.copy(), self.func)
111 |
112 | def __deepcopy__(self, memo):
113 | return TensorView(copy.deepcopy(self.ref), self.func)
114 |
--------------------------------------------------------------------------------
/tests/test_tensor.py:
--------------------------------------------------------------------------------
1 | from copy import copy, deepcopy
2 | import numpy
3 | from numpy import testing
4 | import unittest
5 | from acqdp.tensor_network import TensorValued, Tensor, TensorSum, normalize
6 |
7 |
8 | class TensorTestCase(unittest.TestCase):
9 |
10 | def setUp(self):
11 | self.a = (2,) * 10
12 |
13 | def test_identifier(self):
14 | cnt = TensorValued.id_count
15 | for _ in range(100):
16 | a = Tensor()
17 | self.assertEqual(a.identifier, cnt + 100)
18 |
19 | def test_equal(self):
20 | a = Tensor(numpy.array([2, 4]))
21 | b = 2 * Tensor(numpy.array([1, 2]))
22 | c = Tensor(numpy.array([1, 1])) + Tensor(numpy.array([1, 3]))
23 | testing.assert_allclose(a.contract(), c.contract())
24 | testing.assert_allclose(a.contract(), b.contract())
25 |
26 | def test_data(self):
27 | a = numpy.random.rand(*(self.a))
28 | b = Tensor(a)
29 | testing.assert_allclose(b.contract(), a)
30 | self.assertEqual(self.a, b.shape)
31 |
32 | def test_multiplication(self):
33 | a = numpy.random.rand(*(self.a))
34 | b = Tensor(a)
35 | c = numpy.random.rand()
36 | testing.assert_allclose(c * a, (c * b).contract())
37 |
38 | def test_conjugation(self):
39 | a_R = numpy.random.rand(*(self.a))
40 | a_I = numpy.random.rand(*(self.a))
41 | a = a_R + a_I * 1j
42 | b = Tensor(a)
43 | testing.assert_allclose((~b).contract(), numpy.conj(a))
44 | testing.assert_allclose((~(~b)).contract(), a)
45 |
46 | def test_multiplication_addition(self):
47 | a = numpy.random.rand(10, *(self.a))
48 | c = numpy.random.rand(10)
49 | b = [c[i] * Tensor(a[i]) for i in range(10)]
50 | res = TensorSum()
51 | res_c = numpy.zeros(self.a)
52 | for i in range(2):
53 | res.add_term(i, b[i])
54 | res_c += c[i] * a[i]
55 | testing.assert_allclose(res.contract(), res_c)
56 |
57 | def test_transpose(self):
58 | a = numpy.random.rand(*(self.a))
59 | axes = numpy.random.permutation(10)
60 | b = Tensor(a) % axes
61 | testing.assert_allclose(b.contract(), numpy.transpose(a, axes))
62 |
63 | def test_mul_add_tran_conj(self):
64 | res_c = numpy.zeros(self.a[:2], dtype=complex)
65 | res = Tensor(copy(res_c))
66 | for _ in range(100):
67 | c1 = numpy.random.rand() + 1j * numpy.random.rand()
68 | a = numpy.random.rand(*(self.a[:2])) + 1j * \
69 | numpy.random.rand(*(self.a[:2]))
70 | c2 = numpy.random.rand() + 1j * numpy.random.rand()
71 | axes = numpy.random.permutation(2)
72 | res_c += c1 * numpy.transpose(numpy.conj(c2 * a), axes)
73 | res += c1 * (~(Tensor(a) * c2) % axes)
74 | testing.assert_allclose(res.contract(), res_c)
75 |
76 | def test_mul_add_tran_conj_2(self):
77 | res_c = numpy.zeros(self.a[:2], dtype=complex)
78 | res = TensorSum()
79 | for i in range(300):
80 | c1 = numpy.random.rand() + 1j * numpy.random.rand()
81 | a = numpy.random.rand(*(self.a[:2])) + 1j * \
82 | numpy.random.rand(*(self.a[:2]))
83 | c2 = numpy.random.rand() + 1j * numpy.random.rand()
84 | axes = numpy.random.permutation(2)
85 | res_c += c1 * numpy.transpose(numpy.conj(c2 * a), axes)
86 | res.add_term(i, c1 * (~(Tensor(a) * c2) % axes))
87 | testing.assert_allclose(res.contract(), res_c)
88 |
89 | def test_mul_add_tran_conj_3(self):
90 | res_c = numpy.zeros(self.a[:2], dtype=complex)
91 | res = TensorSum()
92 | for i in range(300):
93 | c1 = numpy.random.rand() + 1j * numpy.random.rand()
94 | a = numpy.random.rand(*(self.a[:2])) + 1j * \
95 | numpy.random.rand(*(self.a[:2]))
96 | c2 = numpy.random.rand() + 1j * numpy.random.rand()
97 | axes = numpy.random.permutation(2)
98 | res_c += c1 * numpy.transpose(numpy.conj(c2 * a), axes)
99 | res.add_term(i, (c1 * (~(Tensor(a) * c2) % axes)).contract())
100 | testing.assert_allclose(res.contract(), res_c)
101 |
102 | def test_mul_add_tran_conj_4(self):
103 | res_c = numpy.zeros(self.a[:2], dtype=complex)
104 | res = Tensor(copy(res_c))
105 | for _ in range(300):
106 | c1 = numpy.random.rand() + 1j * numpy.random.rand()
107 | a = numpy.random.rand(*(self.a[:2])) + 1j * \
108 | numpy.random.rand(*(self.a[:2]))
109 | c2 = numpy.random.rand() + 1j * numpy.random.rand()
110 | axes = numpy.random.permutation(2)
111 | res_c += c1 * numpy.transpose(numpy.conj(c2 * a), axes)
112 | res = Tensor(res.contract() + (c1 * (~(Tensor(a) * c2) % axes)).contract())
113 | testing.assert_allclose(res.contract(), res_c)
114 |
115 | def test_incompatible_dimensions(self):
116 | a = Tensor(numpy.array([1, 2]))
117 | b = Tensor(numpy.array([1, 2, 3]))
118 | with self.assertRaises(ValueError):
119 | a + b
120 |
121 | def test_norm(self):
122 | a = Tensor(numpy.array([1, 2, 3]))
123 | e = Tensor(numpy.array([1j, 2j, 3j]))
124 | self.assertAlmostEqual(a.norm_squared, 14)
125 | self.assertAlmostEqual(e.norm_squared, 14)
126 | self.assertAlmostEqual((a + 2 * e).norm_squared, 70)
127 | c = normalize(Tensor(numpy.random.rand(*self.a)))
128 | self.assertAlmostEqual(c.norm_squared, 1)
129 |
130 | def test_copy(self):
131 | a = Tensor(numpy.array([1, 2]))
132 | b = a.copy()
133 | c = deepcopy(a)
134 | self.assertTrue(b._data is a._data)
135 | self.assertFalse(c._data is a._data)
136 | testing.assert_allclose(a.contract(), b.contract())
137 | testing.assert_allclose(a.contract(), c.contract())
138 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/order_finder.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import opt_einsum
3 | from acqdp.tensor_network.local_optimizer import defaultOrderResolver
4 |
5 | if sys.version_info < (3, 0):
6 | sys.stdout.write("Sorry, requires Python 3.x, not Python 2.x\n")
7 | sys.exit(1)
8 |
9 |
10 | class OrderFinder:
11 | """
12 | :class:`OrderFinder` class is dedicated to finding a contraction scheme corresponding to a given tensor network structure.
13 | The main method of an :class:`OrderFinder` is `find_order`, which takes a `TensorNetwork` as input, and yields a
14 | generator of `ContractionScheme`.The base class offers preliminary order finding schemes. For more advanced
15 | hypergraph-based approach, use :class:`KHPOrderFinder`. For finding contraction schemes with sliced edges,
16 | use :class:`SlicedOrderFinder`.
17 |
18 | :ivar order_method: 'default' order contracts the tensor one by one by order; 'vertical' order contracts the tensor based
19 | on the vertical ordering. When the tensor network is from a quantum circuit, the vertical order corresponds to the
20 | tensor network contraction where the tensors connected to a qubit is first contracted together and then merged to a
21 | main one according to a given order of the qubits.
22 | """
23 |
24 | def __init__(self,
25 | order_method='default',
26 | **kwargs):
27 | self.order_method = order_method
28 |
29 | def find_order(self, tn, **kwargs):
30 | """Find a contraction scheme for the input tensor network subject to the constraints given in the
31 | :class:`OrderFinder`.
32 |
33 | :param tn: A tensor network for which the contraction scheme is to be determined.
34 | :type tn: :class:`TensorNetwork`
35 | :yields: A :class:`ContractionScheme` containing the pairwise contraction order, a list of edges to be sliced, and
36 | optionally the total contraction cost.
37 | """
38 | tn = tn._expand_and_delta()
39 | try:
40 | if self.order_method == 'default':
41 | nodes_list = sorted(tn.nodes_by_name)
42 | elif self.order_method == 'vertical':
43 | qubit_order = kwargs.get('qubit_order', sorted(set(b[1] for b in tn.nodes_by_name)))
44 | nodes_list = sorted(tn.nodes_by_name, key=lambda b: (qubit_order.index(b[1]), b[0], b[2:]))
45 | else:
46 | raise ValueError("order method not implemented")
47 | except TypeError:
48 | nodes_list = sorted(tn.nodes_by_name, key=lambda x: str(x))
49 | o = []
50 | if len(nodes_list) > 1:
51 | k = nodes_list[0]
52 | for i in range(len(nodes_list) - 1):
53 | new_k = ('#', i)
54 | o.append([[nodes_list[i + 1], k], new_k])
55 | k = new_k
56 | if len(o) > 0:
57 | o[-1][-1] = '#'
58 | res = defaultOrderResolver.order_to_contraction_scheme(tn, o)
59 | while True:
60 | yield res
61 |
62 |
63 | class OptEinsumOrderFinder(OrderFinder):
64 | """
65 | :class: `OptEinsumOrderFinder` finds an unsliced contraction scheme based on the built-in method in `opt_einsum`,
66 | called `opt_einsum.contract_path`.
67 | :ivar optimize: The argument `optimize` for `opt_einsum.contract_path`.
68 | """
69 |
70 | def __init__(self,
71 | optimize='greedy',
72 | **kwargs):
73 | self.optimize = optimize
74 |
75 | def find_order(self, tn, **kwargs):
76 | tn = tn._expand_and_delta()
77 | while True:
78 | lhs, rhs, shapes = tn.subscripts()
79 | path, _ = opt_einsum.contract_path(','.join(lhs) + '->' + rhs,
80 | *shapes,
81 | shapes=True,
82 | optimize=self.optimize)
83 | if len(tn.nodes_by_name) > 1:
84 | order = defaultOrderResolver.path_to_paired_order([list(tn.nodes_by_name), '#'], path)
85 | else:
86 | order = []
87 | yield defaultOrderResolver.order_to_contraction_scheme(tn, order)
88 |
89 |
90 | class SlicedOrderFinder(OrderFinder):
91 | """
92 | :class: `SlicedOrderFinder` finds a sliced contraction scheme based on unsliced contraction schemes found by its base
93 | order finder.
94 | :ivar base_order_finder: The base order finder of the sliced order finder, from which the `SlicedOrderFinder` fetches
95 | contraction schemes and do slicing on it.
96 | :ivar slicer: The slicing algorithm acting upon the contraction schemes.
97 | :ivar num_candidates: Number of unsliced contraction schemes to feed to the slicer at a time. Set to 20 by default.
98 | """
99 |
100 | def __init__(self,
101 | base_order_finder={'order_finder_name': 'khp'},
102 | slicer={'slicer_name': 'default'},
103 | num_candidates=20,
104 | **kwargs):
105 | self.base_order_finder = base_order_finder
106 | self.num_candidates = num_candidates
107 | from acqdp.tensor_network.order_finder import get_order_finder
108 | from acqdp.tensor_network.slicer import get_slicer
109 | self.base_order_finder = get_order_finder(**base_order_finder)
110 | self.slicer = get_slicer(**slicer)
111 |
112 | def find_order(self, tn, **kwargs):
113 | tn = tn._expand_and_delta()
114 | order_gen = self.base_order_finder.find_order(tn=tn)
115 | next(order_gen)
116 | while True:
117 | res = self.slicer.slice(tn, order_gen)
118 | yield res
119 |
120 |
121 | def get_order_finder(**kwargs):
122 | from acqdp.tensor_network.kahypar_order_finder import KHPOrderFinder
123 | order_finders = {
124 | 'khp': KHPOrderFinder,
125 | 'default': OrderFinder,
126 | 'oe': OptEinsumOrderFinder,
127 | 'sliced': SlicedOrderFinder
128 | }
129 | order_finder_name = kwargs.get('order_finder_name', 'default')
130 | return (order_finders[order_finder_name])(
131 | **kwargs.get('order_finder_params', {}))
132 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/contractor.py:
--------------------------------------------------------------------------------
1 | import inspect
2 | import numpy
3 | from multiprocessing import Pool
4 | from functools import lru_cache
5 |
6 | DEFAULT_ENGINE = 'numpy'
7 |
8 |
9 | @lru_cache(maxsize=None)
10 | def getDeployPlugin(pluginName):
11 | """plugin is just an installed python module that has "tensor_network" submodule."""
12 | import importlib
13 | res = importlib.import_module(pluginName)
14 | return res
15 |
16 |
17 | class Contractor:
18 | """Contractor class for tensor network contraction takes a :class:`ContractionTask` object and execute it
19 | sequentially. For :class:`NetworkContractionTask`, multi-processing is available for further accelarate the
20 | computation.
21 |
22 | :ivar backend: When set to `jax`, large tensor contractions will make use of the `jax` backend. `numpy.einsum` is used
23 | otherwise.
24 | :ivar exeEngine: Extension interface for other contraction backends. Set to `None` by default. When set to `parallel`,
25 | subtasks will be computed simultaneously.
26 | """
27 |
28 | def __init__(self, exeEngine=None, backend='default', dtype=complex, **kwargs):
29 | self.exeEngine = exeEngine
30 | self.backend = backend
31 | self.dtype = numpy.dtype(dtype)
32 |
33 | def execute(self, tasks, lst=None, **kwargs):
34 | """Execute a contraction task.
35 |
36 | :param tasks: The task to be executed.
37 | :type tasks: :class:`acqdp.ContractionScheme`
38 | :param lst: The list of subtasks to be executed. If set to `None`, all subtasks are executed and merged together.
39 | :type lst: :class:`List`
40 |
41 | :returns: :class:`numpy.ndarray` -- Final result expressed as a multi-dimensional array.
42 | """
43 | tasks._load_data()
44 | if lst is None:
45 | lst = range(tasks.length)
46 | engine = self.exeEngine
47 | if engine is None:
48 | engine = DEFAULT_ENGINE
49 |
50 | if engine == 'numpy':
51 | return tasks._merge(
52 | {i: self._execute(tasks[i], **kwargs) for i in lst})
53 | elif inspect.ismodule(engine):
54 | res = engine.tensor_networkService.contractorExecute(tasks)
55 | return res
56 | elif engine == "parallel":
57 | with Pool() as p:
58 | return tasks._merge(p.starmap(self._execute, [tasks[i] for i in lst]))
59 | elif engine.startswith("plugin:"):
60 | pluginName = engine[7:]
61 | plugin = getDeployPlugin(pluginName)
62 | res = plugin.tensor_networkService.contractorExecute(tasks)
63 | return res
64 |
65 | def _execute(self,
66 | task,
67 | track=False,
68 | normalize=False,
69 | cnt=None,
70 | **kwargs):
71 | if cnt is None:
72 | commands = task.commands
73 | else:
74 | commands = task.commands[:cnt]
75 | output = task.output
76 | for command in commands:
77 | if track:
78 | print("Current Memory usage = {}".format(
79 | self._track_memory(commands) + 4))
80 | operation = command[0]
81 | lhs = command[1]
82 | rhs = command[2]
83 | kwargs = command[3]
84 | try:
85 | if operation == 'f':
86 | res = numpy.moveaxis(lhs[0][1], kwargs['fix_idx'],
87 | range(len(kwargs['fix_idx'])))[tuple(
88 | [a[0] for a in kwargs['fix_to']])]
89 | rhs[0] = (lhs[0][0], numpy.array(res))
90 | else:
91 | if operation == 'c':
92 | init_norm = sum([l[0][0] for l in lhs])
93 | if 'expr' in kwargs:
94 | if self.backend == 'jax':
95 | res = kwargs['expr'](*[l[0][1] for l in lhs], backend='jax')
96 | else:
97 | res = kwargs['expr'](*[l[0][1] for l in lhs])
98 | else:
99 | res = numpy.array(numpy.einsum(kwargs['subscripts'],
100 | *[l[0][1] for l in lhs]))
101 | elif operation == 'n':
102 | init_norm = 0
103 | res = kwargs['func'](numpy.exp(lhs[0][0]) * lhs[0][1],
104 | **kwargs)
105 | if normalize:
106 | import numexpr as ne
107 | norm = ne.evaluate(
108 | 'max(res.real ** 2 + res.imag ** 2)')**0.5
109 | if norm == 0:
110 | rhs[0] = (0, numpy.zeros(res.shape))
111 | else:
112 | res /= norm
113 | rhs[0] = (numpy.log(norm) + init_norm, res)
114 | else:
115 | rhs[0] = (init_norm, res)
116 | except Exception as e:
117 | print(e)
118 | print(command)
119 | raise e
120 | if cnt is None:
121 | res = numpy.exp(output[0][0]) * output[0][1]
122 | return numpy.array(res)
123 |
124 | def _track_memory(self, commands):
125 | lst = []
126 | for command in commands:
127 | if command[0] == 'c':
128 | for k in command[1]:
129 | for j in lst:
130 | if k[0] is None or j is k[0][1]:
131 | break
132 | else:
133 | lst.append(k[0][1])
134 | elif command[0] == 'f':
135 | k = command[1]
136 | for j in lst:
137 | if k[0] is None or j is k[0][1]:
138 | break
139 | else:
140 | lst.append(k[0][1])
141 | return numpy.log2(max([a.size for a in lst]))
142 |
143 |
144 | defaultContractor = Contractor()
145 |
146 |
147 | _defaultContractor = None
148 |
149 |
150 | def getDefault():
151 | global _defaultContractor
152 | if _defaultContractor is None:
153 | try:
154 | from acqdp.tensor_network import contractor
155 | _defaultContractor = contractor.defaultContractor
156 | except ImportError:
157 | _defaultContractor = Contractor()
158 | return _defaultContractor
159 |
160 |
161 | def setDefault(aContractor: Contractor):
162 | global _defaultContractor
163 | _defaultContractor = aContractor
164 |
165 |
166 | def contract(tn, **kwargs):
167 | return getDefault().contract(tn, **kwargs)
168 |
--------------------------------------------------------------------------------
/examples/circuit_simulation.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import time
3 | import math
4 | from acqdp import circuit
5 | from acqdp.tensor_network import ContractionScheme, TensorNetwork
6 | from datetime import timedelta
7 | import json
8 | import argparse
9 | import os
10 |
11 | t = TensorNetwork(open_edges=[0, 1, 1, 0])
12 | t.add_node(0, [0, 1], np.array([[1, 1j], [1j, np.exp(np.pi * 1j / 6)]]))
13 | ISWAP_CZ = circuit.Unitary(2, t, "FSim", True)
14 |
15 |
16 | def GRCS(f, in_state=0, simplify=False):
17 | with open(f) as fin:
18 | n = int(fin.readline())
19 | c = circuit.Circuit()
20 | if in_state is not None:
21 | for qubit in range(n):
22 | c.append(circuit.CompState[(in_state >> (n - qubit - 1)) & 1],
23 | [qubit], -1)
24 | gate_table = {
25 | 'h':
26 | circuit.HGate,
27 | 'x_1_2':
28 | circuit.Unitary(1,
29 | np.array([[1 / np.sqrt(2), -1j / np.sqrt(2)],
30 | [-1j / np.sqrt(2), 1 / np.sqrt(2)]]),
31 | name='X_1_2'),
32 | 'y_1_2':
33 | circuit.Unitary(1,
34 | np.array([[1 / np.sqrt(2), -1 / np.sqrt(2)],
35 | [1 / np.sqrt(2), 1 / np.sqrt(2)]]),
36 | name='Y_1_2'),
37 | 'hz_1_2':
38 | circuit.Unitary(
39 | 1,
40 | np.array([[1 / np.sqrt(2), -np.sqrt(1j) / np.sqrt(2)],
41 | [np.sqrt(-1j) / np.sqrt(2), 1 / np.sqrt(2)]]),
42 | name='W_1_2'),
43 | 'cz':
44 | circuit.CZGate,
45 | 't':
46 | circuit.Diagonal(1, np.array([1, np.exp(1j * np.pi / 4)])),
47 | 'is':
48 | circuit.Diagonal(2, np.array([1, 1j, 1j, 1])) | circuit.SWAPGate
49 | }
50 |
51 | size_table = {
52 | 'h': 1,
53 | 'cz': 2,
54 | 't': 1,
55 | 'x_1_2': 1,
56 | 'y_1_2': 1,
57 | 'hz_1_2': 1,
58 | 'is': 2,
59 | 'rz': 1,
60 | 'fs': 2
61 | }
62 |
63 | for line in fin:
64 | words = line.split()
65 | layer = int(words[0])
66 | target = list(
67 | int(x) for x in words[2:2 + size_table[words[1].lower()]])
68 | params = words[2 + size_table[words[1].lower()]:]
69 | if not params:
70 | c.append(gate_table[words[1].lower()], target, layer)
71 | elif len(params) == 1:
72 | c.append(
73 | circuit.Diagonal(
74 | 1,
75 | np.array([1, np.exp(1j * float(params[0]))]),
76 | name='R_Z({})'.format(params[0])), target, layer)
77 | elif len(params) == 2:
78 | c.append(
79 | circuit.Unitary(
80 | 2,
81 | np.array([[1, 0, 0, 0],
82 | [
83 | 0,
84 | math.cos(float(params[0])),
85 | -math.sin(float(params[0])) * 1j, 0
86 | ],
87 | [
88 | 0, -math.sin(float(params[0])) * 1j,
89 | math.cos(float(params[0])), 0
90 | ],
91 | [
92 | 0, 0, 0,
93 | math.cos(-float(params[1])) + math.sin(-float(params[1])) * 1j
94 | ]]),
95 | name='FSim'), target, layer)
96 | if simplify:
97 | for k in c.operations_by_name:
98 | if c.operations_by_name[k][
99 | 'time_step'] == 2 or c.operations_by_name[k][
100 | 'time_step'] == c.max_time - 2:
101 | c.operations_by_name[k]['operation'] = ISWAP_CZ
102 | return c
103 |
104 |
105 | if __name__ == '__main__':
106 | parser = argparse.ArgumentParser(description='Simulate circuits with tensor network contraction.')
107 | parser.add_argument('circuit_file', help='the ciruit file (in .qsim format) to be simulated')
108 | parser.add_argument('-o', '--load-order', metavar='order_file', help='load a contraction order from a file')
109 | parser.add_argument('-s', '--save-order', metavar='order_file', help='save the contraction order to a file')
110 | parser.add_argument(
111 | '-a',
112 | '--num-amplitudes',
113 | metavar='N_a',
114 | default=1,
115 | type=int,
116 | help='number of amplitudes that would need to be sampled (used only to calculate the projected running time)')
117 |
118 | args = parser.parse_args()
119 |
120 | start_time_TZ = time.time()
121 |
122 | c = GRCS(args.circuit_file, simplify=False)
123 | n = len(c.all_qubits)
124 | tn = c.tensor_pure
125 | tn.cast(np.complex64)
126 | tn.expand(recursive=True)
127 |
128 | open_indices = [0, 1, 2, 3, 4, 5]
129 | for i in range(n):
130 | if i not in open_indices:
131 | tn.fix_edge(tn.open_edges[i], 0)
132 | tn.open_edges = [tn.open_edges[i] for i in open_indices]
133 |
134 | this_dir = os.path.dirname(os.path.abspath(__file__))
135 | with open(os.path.join(this_dir, 'khp_params.json'), 'r') as f:
136 | kwargs = json.load(f)
137 | if args.load_order is not None:
138 | print(f'Loading order file {args.load_order}\n')
139 | with open(args.load_order, 'r') as f:
140 | order = ContractionScheme.load(f)
141 | else:
142 | order = tn.find_order(**kwargs)
143 | print(order.cost)
144 | if args.save_order is not None:
145 | print(f'Saving order file {args.save_order}\n')
146 | with open(args.save_order, 'w') as f:
147 | ContractionScheme.dump(order, f)
148 |
149 | tsk = tn.compile(order, **kwargs)
150 | print("Number of subtasks per batch --- %d ---" % (tsk.length))
151 | pp_time_TZ = time.time()
152 | compile_time = time.time()
153 | print("TaiZhang Preprocessing Time --- %s seconds ---" % (pp_time_TZ - start_time_TZ))
154 | start_time = time.time()
155 | results = 0
156 | num_samps = 5
157 | tsk.cast('complex64')
158 | for i in range(num_samps):
159 | res = tsk[i].execute(**kwargs)
160 | results += res
161 | compute_time = time.time()
162 | print(results)
163 | tm = timedelta(seconds=args.num_amplitudes * (compute_time - start_time) * tsk.length / num_samps / 27648)
164 | print("Compute Time --- %s seconds ---" % (compute_time - start_time))
165 | print(f'Projected Running Time --- {tm} ---')
166 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/tensor_sum.py:
--------------------------------------------------------------------------------
1 | import numpy
2 | import copy
3 | from collections import OrderedDict
4 | from .tensor_valued import TensorValued, DTYPE
5 |
6 |
7 | class TensorSum(TensorValued):
8 | """A :class:`TensorSum` object represents the summation of multiple tensors.
9 |
10 | :ivar terms_by_name: a dictionary with key-value pairs, where the key is the name of a summand and the value is the
11 | corresponding summand :class:`TensorValued` object.
12 | """
13 |
14 | def __init__(self, terms=None, dtype: type = DTYPE) -> None:
15 | """The constructor of a `TensorSum` object."""
16 | super().__init__(dtype)
17 | if terms is None:
18 | self.terms_by_name = OrderedDict()
19 | else:
20 | for term_name in self.terms_by_name:
21 | self.terms_by_name[term_name] = terms[term_name]
22 |
23 | def __str__(self):
24 | term_str = "\nTerms:"
25 | for term_name in self.terms_by_name:
26 | term_str += "\n" + str(term_name) + "\n" + str(self.terms_by_name[term_name])
27 | return super().__str__() + term_str
28 |
29 | def _update_shape(self, curr, tmp):
30 | if tmp is None:
31 | return curr
32 | if curr is None:
33 | return list(tmp)
34 | if len(curr) != len(tmp):
35 | raise ValueError('Component shapes do not match')
36 | for i in range(len(curr)):
37 | if curr[i] is None:
38 | curr[i] = tmp[i]
39 | elif (tmp[i] is not None) and (tmp[i] != curr[i]):
40 | raise ValueError('Component shapes do not match')
41 | return curr
42 |
43 | def _invalidate_shape_cache(self):
44 | if hasattr(self, '_cached_shape'):
45 | del self._cached_shape
46 |
47 | @property
48 | def shape(self):
49 | """The common property of all :class:`TensorValued` classes, yielding the shape of the object.
50 |
51 | :class:`TensorValued` objects must have compatible shapes in order to be connected together in a
52 | :class:`TensorNetwork`, or summed over in a :class:`TensorSum`.
53 | """
54 | if not hasattr(self, '_cached_shape'):
55 | curr = None
56 | for tsr_name in self.terms_by_name:
57 | tsr = self.terms_by_name[tsr_name]
58 | curr = self._update_shape(curr, tsr.shape)
59 | self._cached_shape = curr
60 | return tuple(self._cached_shape) if self._cached_shape is not None else None
61 |
62 | @property
63 | def is_valid(self):
64 | """The common property of all :class:`TensorValued` classes, indicating whether the :class:`TensorValued` object
65 | is valid or not.
66 |
67 | In every step of a program, all existing :class:`TensorValued` object must be valid, otherwise an exception
68 | should be thrown out; this property is for double checking that the current :class:`TensorValued` object is
69 | indeed valid.
70 | """
71 | try:
72 | self.shape
73 | except ValueError:
74 | return False
75 | else:
76 | return True
77 |
78 | @property
79 | def is_ready(self):
80 | """The common property of all :class:`TensorValued` classes, indicating whether the current
81 | :class:`TensorValued` object is ready for contraction, i.e. whether it semantically represents a tensor with a
82 | definite value.
83 |
84 | In the process of a program, not all :class:`TensorValued` objects need to be ready; however once the `data`
85 | property of a certain object is queried, such object must be ready in order to successfully yield an
86 | :class:`numpy.ndarray` object.
87 | """
88 | for t in self.terms_by_name.values():
89 | if not t.is_ready:
90 | return False
91 | return self.is_valid
92 |
93 | def add_term(self, term=None, tensor=None):
94 | """Add a term to the summation.
95 |
96 | :param term: Name of the term to be added. If not given, an auto-assigned one will be given as the output.
97 | :type term: hashable
98 |
99 | :param tensor: Value of the term to be added.
100 | :type tensor: :class:`TensorValued` or None
101 |
102 | :returns: The name of the newly added term.
103 | """
104 | from .tensor import Tensor
105 | if not isinstance(tensor, TensorValued):
106 | tensor = Tensor(tensor)
107 | if term is None:
108 | term = tensor.identifier
109 | if tensor.dtype == complex:
110 | self.dtype = complex
111 | if term in self.terms_by_name:
112 | raise KeyError("term {} to be added into the tensor network already in the tensor network!".format(term))
113 | if tensor.shape is not None:
114 | self.shape # Make sure the shape cache is initialized
115 | self._cached_shape = self._update_shape(self._cached_shape, tensor.shape)
116 | self.terms_by_name[term] = tensor
117 | return term
118 |
119 | def __iadd__(self, t):
120 | self.add_term(tensor=t)
121 | return self
122 |
123 | def update_term(self, term, tensor=None):
124 | """Update the value of a term in the summation.
125 |
126 | :param term: Name of the term to be updated.
127 | :type term: hashable
128 | :param tensor: New value of the term
129 | :type tensor: :class:`TensorValued`
130 |
131 | :returns: Name of the term to be updated.
132 | """
133 | from .tensor import Tensor
134 | if (type(tensor) == numpy.ndarray) or (tensor is None):
135 | tensor = Tensor(tensor)
136 | if term not in self.terms_by_name:
137 | raise KeyError("term {} not in the TensorSum object".format(term))
138 | self.terms_by_name[term] = tensor
139 | self._invalidate_shape_cache()
140 | return term
141 |
142 | def remove_term(self, term):
143 | """Remove a term from the summation.
144 |
145 | :param term: Name of the term to be removed.
146 | :type term: hashable
147 | :returns: :class:`TensorValued` Value of the removed term
148 | """
149 | pop = self.terms_by_name.pop(term)
150 | self._invalidate_shape_cache()
151 | return pop
152 |
153 | def fix_index(self, index, fix_to=0):
154 | """Fix the given index to the given value. The result :class:`TensorValued` object would have the same type as
155 | the original one, with rank 1 smaller than the original.
156 |
157 | :param index: The index to fix.
158 | :type index: :class:`int`.
159 | :param fix_to: The value to assign to the given index.
160 | :type fix_to: :class:`int`.
161 | :returns: :class:`TensorValued` -- The :class:`TensorValued` object after fixing the given index.
162 | :raises: NotImplementedError
163 | """
164 | ts = self.copy()
165 | for term in ts.terms_by_name:
166 | ts.terms_by_name[term] = ts.terms_by_name[term].fix_index(index, fix_to)
167 | ts._invalidate_shape_cache()
168 | return ts
169 |
170 | def cast(self, dtype):
171 | self.dtype = dtype
172 | for term in self.terms_by_name:
173 | self.update_term(term, self.terms_by_name[term].cast(dtype))
174 | return self
175 |
176 | def contract(self, **kwargs):
177 | """Evaluate the object by summing over all the terms.
178 |
179 | :returns: :class:`numpy.ndarray`
180 | """
181 | res = [
182 | self.terms_by_name[term].contract(**kwargs)
183 | for term in self.terms_by_name
184 | ]
185 | return sum(res)
186 |
187 | def copy(self):
188 | ts = TensorSum(dtype=self.dtype)
189 | for t in self.terms_by_name:
190 | ts.add_term(t, self.terms_by_name[t])
191 | return ts
192 |
193 | def __deepcopy__(self, memo):
194 | ts = TensorSum(dtype=self.dtype)
195 | for t in self.terms_by_name:
196 | ts.add_term(t, copy.deepcopy(self.terms_by_name[t]))
197 | return ts
198 |
--------------------------------------------------------------------------------
/docsource/source/sycamore.rst:
--------------------------------------------------------------------------------
1 | Demo: Simulating the Sycamore random quantum circuits
2 | ===========================================================
3 |
4 | The concept of quantum supremacy is first introduced by John Preskill in [P12]_, indicating a milestone that a quantum computer can achieve a certain task that is infeasible for even the most powerful classical computational resource. Quantum supremacy does not immediately indicate the usefulness of quantum computers on solving classically intractable computational problems; it serves rather as an early demonstration that quantum computers can potentially outperform classical computers on specific tasks.
5 |
6 | After years of rapid development on quantum hardwares, Google claimed to have achieved quantum supremacy by sampling from a family of random circuits using their latest-generation superconducting quantum computer of 53 qubits, called the `Sycamore` quantum computer [AAB+19]_. It is estimated that the quantum computer takes about 200 seconds to sample 1 million bitstrings with a certain quality, while a comparable task would take 10 millenia on the Summit supercomputer. Using the powerful order-finding scheme in the ACQDP, The recent work from Alibaba managed to pin this estimation down to less than 20 days [HZN+20]_. Although it is still far from the 200 seconds from the quantum hardware, this examples serves as a great example of the ACQDP being used for intermediate-sized tensor network contraction, where up to a few thousands of tensors are involved. The source code can be found at `examples.GRCS` and `examples.circuit_simulation`, and the contraction schemes previously found are available at the folder `benchmarks`.
7 |
8 |
9 | An easy example :math:`m=10`
10 | ----------------------------
11 |
12 | `examples/GRCS.py` provides a preliminary parser translating `.qsim` files for the Sycamore circuit instances to `acqdp.circuit.Circuit` and then to `acqdp.tensor_network.TensorNetwork`.
13 |
14 | To run the Sycamore circuit with 10 layers, run:
15 |
16 | .. code-block:: zsh
17 |
18 | python -m examples.circuit_simulation benchmark/circuit_n53_m10_s0_e0_pABCDCDAB.qsim
19 |
20 | Without specifying an order, the program finds a contraction order by invoking the Kahypar hypergraph decomposition scheme. In one run of the script, the program outputs:
21 |
22 | .. code-block:: python
23 |
24 | Process 0 succeeded with cost = 10.62287901105588, cw = 27.0, num_split = 0
25 |
26 | Indicating a contraction order (without slicing) is found, where the number of floating point operations is `10**10.62` and the biggest intermediate tensor has `2**27` entries. No slicing was needed since the tensor sizes are well within the hardware limits of 16 Gigabytes. The program then proceeds with contraction. The total run time for 5 times the contraction on a single laptop reads:
27 |
28 | ::
29 |
30 | Compute Time --- 52.8843309879303 seconds ---
31 |
32 | A harder example: :math:`m=20`
33 | ------------------------------
34 |
35 | The previous example demonstrates a full run of the ACQDP on a relatively small instance. It is to be noted that the :math:`m=10` circuits are not the one used in [AAB+19]_ for the quantum supremacy experiment; instead, the full experiment ran the much deeper, :math:`m=20` circuit. Not only simulating the :math:`m=20` quantum circuit is difficult, it takes longer time to find a good contraction order together with index slicing to carry out the simulation efficiently.
36 |
37 | Here we choose to include the contraction schemes we found earlier. Note that all Sycamore quantum circuits with the same :math:`m` have identical tensor network structure, and those contraction schemes can be reused for different instances of random circuits. The contraction schemes are stored in the `benchmark` folder.
38 |
39 | .. code-block:: zsh
40 |
41 | python -m examples.circuit_simulation benchmark/circuit_n53_m20_s0_e0_pABCDCDAB.qsim -o benchmark/m20_1.json
42 |
43 | One get the estimated cost and number of subtasks:
44 |
45 | ::
46 |
47 | cost = 19.124598309858378, cw = 29.0, num_slice = 25
48 | Number of subtasks per batch --- 33554432 ---
49 |
50 | Performance
51 | ------------
52 |
53 | In [HZN+20]_ , we compared our theoretical number of floating point operations and projected experimental running time to the existing results:
54 |
55 | .. image:: benchmark.pdf
56 | :width: 700
57 | :alt: Comparison of FLOPs / projected running time of the simulation tasks
58 |
59 |
60 | Classical simulation cost and extrapolated running time of sampling from :math:`m`-cycle random circuits with low XEB fidelities. The dashed lines represent the theoretical number of floating point operations (FLOPs) and the solid lines represent extrapolated running times from the experiments. The two axes are aligned by the theoretical GPU efficiency of an Nvidia V100.Consequently, the dashed lines represent runtime lower bounds provided that GPU efficiency is fully saturated. Numerical data for ACQDP is reported in Table 1. The velvet line is reportedin [AAB+19]_ using the hybrid Schr\"odinger-Feynman algorithm, where the projected running time is estimated from a different architecture than Summit, and so the theretical FLOPs is not shown.
61 |
62 | Appendix
63 | ************
64 |
65 | The Sycamore quantum circuit
66 | -----------------------------
67 |
68 | The quantum circuit ran on the Google Sycamore quantum device are drawn from a particular distribution of quantum circuits.
69 |
70 | .. image:: circuit.pdf
71 | :width: 700
72 | :alt: Sycamore circuit
73 |
74 | The structure of the 53-qubit random quantum circuits is shown above. There are 53 qubits on the Sycamore quantum chip, with a pairwise connection graph as shown in (a). The random circuits consists of repetitions of alternations between fixed two-qubit gates followed by random one qubit gates. Lines of different colors in (a) represent two-qubit gates that appear in different layers. (b) shows a schematic diagram of an 8-cycle circuit. Each cycle includes a layer of random single-qubit gates (empty squares in the diagram) and a layer of two-qubit gates (labeled A, B, C, or D, and colored according to the two-qubit gates in (a)). For longer circuits, the layers repeat in the sequence A, B, C, D, C, D, A, B. Note that there is an extra layer of single-qubit gates preceding measurement.
75 |
76 | The classical simulation algorithm
77 | ----------------------------------
78 |
79 | We adopt the tensor network contraction framework proposed in [BIS+18]_, [AAB+19]_ as the basis for our simulation of random circuit sampling. This framework assumes that the outcome distribution of a random quantum circuit is a randomly permuted Porter-Thomas distribution. Under this assumption, we can perform *frugal rejection sampling* on bitstrings by computing the corresponding amplitudes [MFI+18]_. When the batch size of bitstrings is sufficiently large (chosen in our case to be 64), then with high probability, at least one outcome among the batch will be accepted. It can be estimated from [MFI+18]_ that this framework achieves almost perfect sampling when the batch size is chosen to be 64. We can choose the batch to be a state vector on 6 qubits while randomly post-selecting the remaining 47 qubits. In this case, the aggregated result of the amplitudes can be expressed as an open tensor network. This translates the task of sampling from random quantum circuits to the task of contracting a tensor network. For random circuits with :math:`m=12,14,20` cycles, we choose the qubits (0,1,2,3,4,5) in the upper-right corner, and for :math:`m=16, 18` cycles, we choose the qubits (10,17,26,36,27,18) in the lower-right corner. These choices minimize the overhead introduced by simultaneously evaluating each batch of 64 amplitudes.
80 |
81 | References
82 | *************************
83 |
84 |
85 | .. [P12] John Preskill, *Quantum computing and the entanglement frontier*, arXiv preprint arXiv:1203.5813, 2012.
86 | .. [AAB+19] Frank Arute et al, *Quantum supremacy using a programmable superconducting processor*, Nature, 574(7779):505– 510, 2019.
87 | .. [HZN+20] Cupjin Huang, Fang Zhang, Michael Newman, Junjie Cai, Xun Gao, Zhengxiong Tian, Junyin Wu, Haihong Xu, Huanjun Yu, Bo Yuan, Mario Szegedy, Yaoyun Shi, and Jianxin Chen, *Classical Simulation of Quantum Supremacy Circuits*, arXiv preprint arXiv:2005.06787, 2020.
88 | .. [MFI+18] Igor L Markov, Aneeqa Fatima, Sergei V Isakov, and Sergio Boixo. *Quantum supremacy is both closer and farther than it appears*, arXiv preprint arXiv:1807.10749, 2018.
89 | .. [BIS+18] Sergio Boixo, Sergei V Isakov, Vadim N Smelyanskiy, Ryan Babbush, Nan Ding, Zhang Jiang, Michael J Bremner, John M Martinis, and Hartmut Neven. *Characterizing quantum supremacy in near-term devices*. Nature Physics, 14(6):595–600, 2018.
90 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/undirected_contraction_tree.py:
--------------------------------------------------------------------------------
1 | import networkx as nx
2 |
3 |
4 | class UndirectedContractionTree:
5 | """Contraction tree corresponding to a pairwise sequential contraction order, with interfaces for branch flipping
6 | and merging.
7 |
8 | TODO: Merge this class with :class:`ContractionTree` for a unified interface on contraction trees.
9 | """
10 |
11 | def __init__(self, eq, path):
12 | lhs, rhs = eq.replace(' ', '').split('->')
13 | lhs = lhs.split(',')
14 | self.n = len(lhs)
15 | self.open_subscripts = set(rhs)
16 | if self.n == 1:
17 | raise ValueError('Cannot construct undirected contraction tree with only one operand')
18 | self.graph = nx.Graph()
19 | operands = list(range(self.n))
20 | self.graph.add_nodes_from((i, {'subscripts': set(x)}) for i, x in enumerate(lhs))
21 | t = self.n
22 | for i, j in path[:-1]:
23 | if i > j:
24 | i, j = j, i
25 | v = operands.pop(j)
26 | u = operands.pop(i)
27 | self.graph.add_edge(u, t)
28 | self.graph.add_edge(v, t)
29 | self.graph.nodes[u]['parent'] = self.graph.nodes[v]['parent'] = t
30 | operands.append(t)
31 | t += 1
32 | u, v = self.root = tuple(operands) # There should be two operands left
33 | self.graph.add_edge(u, v)
34 | self.graph.nodes[u]['parent'] = v
35 | self.graph.nodes[v]['parent'] = u
36 | for u, v in self.graph.edges:
37 | self.preprocess_edge(u, v)
38 | self.preprocess_edge(v, u)
39 | self.cost = self.root_cost = 0
40 | self.compute_root_cost()
41 | for v in range(self.n, self.n * 2 - 2):
42 | self.compute_node_cost(v)
43 | self.detect_stem()
44 |
45 | def is_leaf(self, v):
46 | return v < self.n
47 |
48 | def preprocess_edge(self, u, v):
49 | if v not in self.graph[u][v]:
50 | if self.is_leaf(v):
51 | self.graph[u][v][v] = {'subscripts': self.graph.nodes[v]['subscripts']}
52 | else:
53 | children = [c for c in self.graph[v] if c != u]
54 | assert len(children) == 2
55 | d0, d1 = (self.preprocess_edge(v, c) for c in children)
56 | self.graph[u][v][v] = {'subscripts': d0['subscripts'] | d1['subscripts']}
57 | return self.graph[u][v][v]
58 |
59 | def open_subscripts_at_edge(self, u, v):
60 | edge = self.graph[u][v]
61 | if self.is_leaf(v):
62 | return edge[v]['subscripts']
63 | return edge[v]['subscripts'] & (edge[u]['subscripts'] | self.open_subscripts)
64 |
65 | def compute_cost(self, s0, s1, s):
66 | res = 2 ** len(s0 | s1 | s)
67 | if (s0 | s1) - s:
68 | res *= 2
69 | return res
70 |
71 | def compute_root_cost(self):
72 | u, v = self.root
73 | s0 = self.open_subscripts_at_edge(u, v)
74 | s1 = self.open_subscripts_at_edge(v, u)
75 | self.cost -= self.root_cost
76 | self.root_cost = self.compute_cost(s0, s1, self.open_subscripts)
77 | self.cost += self.root_cost
78 |
79 | def compute_node_cost(self, v):
80 | parent = self.graph.nodes[v]['parent']
81 | children = [c for c in self.graph[v] if c != parent]
82 | assert len(children) == 2
83 | s0, s1 = (self.open_subscripts_at_edge(v, c) for c in children)
84 | if 'cost' in self.graph.nodes[v]:
85 | self.cost -= self.graph.nodes[v]['cost']
86 | self.graph.nodes[v]['cost'] = self.compute_cost(s0, s1, self.open_subscripts_at_edge(parent, v))
87 | self.cost += self.graph.nodes[v]['cost']
88 |
89 | def get_internal_path(self, v):
90 | if self.is_leaf(v):
91 | return []
92 | parent = self.graph.nodes[v]['parent']
93 | c0, c1 = [c for c in self.graph[v] if c != parent]
94 | return self.get_internal_path(c0) + self.get_internal_path(c1) + [(c0, c1, v)]
95 |
96 | def get_path(self):
97 | operands = list(range(self.n))
98 | res = []
99 | for r in self.root: # Two halves of the contraction tree
100 | for c0, c1, v in self.get_internal_path(r):
101 | res.append((operands.index(c0), operands.index(c1)))
102 | operands.remove(c0)
103 | operands.remove(c1)
104 | operands.append(v)
105 | return res + [(0, 1)]
106 |
107 | def switch_edges(self, e0, e1):
108 | """
109 | Switch the edges e0 = (u, c0) and e1 = (v, c1) by connecting
110 | c0 to v and c1 to u instead. The path c0 -- u -- v -- c1 must
111 | be a path in the tree.
112 | This might break the stem, so if one or both of e0 and e1 may
113 | be on the stem, please use `switch_branches`, `merge_branches`,
114 | `unmerge_branches`, or fix the stem manually.
115 | """
116 | for (u, c0), (v, c1) in (e0, e1), (e1, e0):
117 | self.graph.add_edge(v, c0)
118 | self.graph[v][c0][c0] = self.graph[u][c0][c0]
119 | self.graph[v][c0][v] = self.graph[u][c0][u]
120 | self.graph.remove_edge(u, c0)
121 | if self.graph.nodes[c0]['parent'] == u:
122 | self.graph.nodes[c0]['parent'] = v
123 | if self.graph.nodes[u]['parent'] == c0:
124 | self.graph.nodes[v]['parent'] = c0
125 | self.graph.nodes[u]['parent'] = v
126 | if set(self.root) == {u, c0}:
127 | self.root = (v, c0)
128 | self.graph[u][v].clear()
129 | self.preprocess_edge(u, v)
130 | self.preprocess_edge(v, u)
131 | self.compute_node_cost(u)
132 | self.compute_node_cost(v)
133 | if set(self.root) == {u, v}:
134 | self.compute_root_cost()
135 |
136 | def step_root(self, u, v):
137 | if u not in self.root:
138 | raise ValueError('The new root must be adjacent to the old root at vertex u')
139 | self.root = u, v
140 | self.graph.nodes[u]['parent'] = v
141 | self.compute_node_cost(u)
142 | self.compute_root_cost()
143 |
144 | def detect_stem_in_subtree(self, u, v):
145 | """Detect the heaviest path in the subtree descending from the edge (u, v), *and* the heaviest such path that
146 | ends at v.
147 |
148 | Return format is (cost, endpoint, endpoint), (cost, endpoint).
149 | """
150 | if self.is_leaf(v):
151 | return (0, v, v), (0, v)
152 | c0, c1 = [c for c in self.graph[v] if c != u]
153 | cost_v = self.graph.nodes[v]['cost']
154 | res0, (cost0, end0) = self.detect_stem_in_subtree(v, c0)
155 | res1, (cost1, end1) = self.detect_stem_in_subtree(v, c1)
156 | cost, end = max((cost0, end0), (cost1, end1))
157 | res = max(res0, res1, (cost0 + cost1 + cost_v, end0, end1), key=lambda x: x[0])
158 | return res, (cost + cost_v, end)
159 |
160 | def detect_stem(self):
161 | # Just choose any leaf as the root to make things easier.
162 | (cost, end0, end1), _ = self.detect_stem_in_subtree(0, self.graph.nodes[0]['parent'])
163 | # Maybe not the most efficient, but convenient enough
164 | self.stem = nx.shortest_path(self.graph, end0, end1)
165 | return self.stem
166 |
167 | def switch_branches(self, i):
168 | """Switch the branches at nodes i and i+1 of the stem."""
169 | t0, u, v, t1 = self.stem[i - 1:i + 3]
170 | c0, = set(self.graph[u]) - {t0, v}
171 | c1, = set(self.graph[v]) - {u, t1}
172 | self.switch_edges((u, c0), (v, c1))
173 |
174 | def merge_branches(self, i):
175 | """Merge the branches at nodes i and i+1 of the stem."""
176 | t0, u, v, t1 = self.stem[i - 1:i + 3]
177 | c0, = set(self.graph[u]) - {t0, v}
178 | self.switch_edges((u, c0), (v, t1))
179 | self.stem.pop(i + 1)
180 | return c0
181 |
182 | def unmerge_branches(self, i, c0=None):
183 | """Split the branch at node i of the stem into two branches, with the sub-branch c0 now directly connecting to
184 | node i of the stem and the other sub-branch connecting to node i+1 of the stem.
185 |
186 | By default, use the sub-branch with smaller id as c0.
187 | """
188 | t0, u, t1 = self.stem[i - 1:i + 2]
189 | v, = set(self.graph[u]) - {t0, t1}
190 | if c0 is None:
191 | c0 = min(c for c in self.graph[v] if c != u)
192 | self.switch_edges((u, t1), (v, c0))
193 | self.stem.insert(i + 1, v)
194 |
--------------------------------------------------------------------------------
/docsource/source/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Configuration file for the Sphinx documentation builder.
4 | #
5 | # This file does only contain a selection of the most common options. For a
6 | # full list see the documentation:
7 | # http://www.sphinx-doc.org/en/master/config
8 |
9 | # -- Path setup --------------------------------------------------------------
10 |
11 | # If extensions (or modules to document with autodoc) are in another directory,
12 | # add these directories to sys.path here. If the directory is relative to the
13 | # documentation root, use os.path.abspath to make it absolute, like shown here.
14 | #
15 | import os
16 | import sys
17 | from typing import Any
18 | sys.path.insert(0, os.path.abspath('../..'))
19 |
20 |
21 | # -- Project information -----------------------------------------------------
22 |
23 | project = 'Alibaba Cloud Quantum Development Platform'
24 | copyright = '2020, Alibaba Quantum Laboratory'
25 | author = 'Cupjin Huang, Mario Szegedy, Fang Zhang and Jianxin Chen'
26 |
27 | # The short X.Y version
28 | version = ''
29 | # The full version, including alpha/beta/rc tags
30 | release = '0.1'
31 |
32 |
33 | # -- General configuration ---------------------------------------------------
34 |
35 | # If your documentation needs a minimal Sphinx version, state it here.
36 | #
37 | # needs_sphinx = '1.0'
38 |
39 | # Add any Sphinx extension module names here, as strings. They can be
40 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
41 | # ones.
42 | extensions = [
43 | 'sphinx.ext.autodoc',
44 | 'sphinx.ext.doctest',
45 | 'sphinx.ext.intersphinx',
46 | 'sphinx.ext.todo',
47 | 'sphinx.ext.coverage',
48 | 'sphinx.ext.mathjax',
49 | 'sphinx.ext.ifconfig',
50 | 'sphinx.ext.viewcode',
51 | 'sphinx.ext.githubpages',
52 | 'matplotlib.sphinxext.plot_directive',
53 | 'matplotlib.sphinxext.plot_directive',
54 | ]
55 |
56 |
57 | # Add any paths that contain templates here, relative to this directory.
58 | templates_path = ['_templates']
59 |
60 | # The suffix(es) of source filenames.
61 | # You can specify multiple suffix as a list of string:
62 | #
63 | # source_suffix = ['.rst', '.md']
64 | source_suffix = '.rst'
65 |
66 | # The master toctree document.
67 | master_doc = 'index'
68 |
69 | # The language for content autogenerated by Sphinx. Refer to documentation
70 | # for a list of supported languages.
71 | #
72 | # This is also used if you do content translation via gettext catalogs.
73 | # Usually you set "language" from the command line for these cases.
74 | language = None
75 |
76 | # List of patterns, relative to source directory, that match files and
77 | # directories to ignore when looking for source files.
78 | # This pattern also affects html_static_path and html_extra_path.
79 | exclude_patterns = []
80 |
81 | # The name of the Pygments (syntax highlighting) style to use.
82 | pygments_style = None
83 |
84 |
85 | # -- Options for HTML output -------------------------------------------------
86 |
87 | # The theme to use for HTML and HTML Help pages. See the documentation for
88 | # a list of builtin themes.
89 | #
90 | # html_theme = 'alabaster'
91 | html_theme = 'bizstyle'
92 |
93 | # Theme options are theme-specific and customize the look and feel of a theme
94 | # further. For a list of options available for each theme, see the
95 | # documentation.
96 | #
97 | # html_theme_options = {}
98 |
99 | # Add any paths that contain custom static files (such as style sheets) here,
100 | # relative to this directory. They are copied after the builtin static files,
101 | # so a file named "default.css" will overwrite the builtin "default.css".
102 | # html_static_path = ['_static']
103 |
104 | # Custom sidebar templates, must be a dictionary that maps document names
105 | # to template names.
106 | #
107 | # The default sidebars (for documents that don't match any pattern) are
108 | # defined by theme itself. Builtin themes are using these templates by
109 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
110 | # 'searchbox.html']``.
111 | #
112 | html_sidebars = {'**': ['localtoc.html', 'relations.html', 'searchbox.html', 'side_info.html']}
113 |
114 |
115 | # -- Options for HTMLHelp output ---------------------------------------------
116 |
117 | # Output file base name for HTML help builder.
118 | htmlhelp_basename = 'ACQDPdoc'
119 |
120 |
121 | # -- Options for LaTeX output ------------------------------------------------
122 |
123 | latex_elements = {
124 | # The paper size ('letterpaper' or 'a4paper').
125 | #
126 | # 'papersize': 'letterpaper',
127 |
128 | # The font size ('10pt', '11pt' or '12pt').
129 | #
130 | # 'pointsize': '10pt',
131 |
132 | # Additional stuff for the LaTeX preamble.
133 | #
134 | # 'preamble': '',
135 |
136 | # Latex figure (float) alignment
137 | #
138 | # 'figure_align': 'htbp',
139 | }
140 |
141 | # Grouping the document tree into LaTeX files. List of tuples
142 | # (source start file, target name, title,
143 | # author, documentclass [howto, manual, or own class]).
144 | latex_documents = [
145 | (master_doc, 'ACQDP.tex', 'ACQDP Documentation',
146 | 'Cupjin Huang, Mario Szegedy, Fang Zhang and Jianxin Chen', 'manual'),
147 | ]
148 |
149 |
150 | # -- Options for manual page output ------------------------------------------
151 |
152 | # One entry per manual page. List of tuples
153 | # (source start file, name, description, authors, manual section).
154 | man_pages = [
155 | (master_doc, 'ACQDP', 'ACQDP Documentation',
156 | [author], 1)
157 | ]
158 |
159 |
160 | # -- Options for Texinfo output ----------------------------------------------
161 |
162 | # Grouping the document tree into Texinfo files. List of tuples
163 | # (source start file, target name, title, author,
164 | # dir menu entry, description, category)
165 | texinfo_documents = [
166 | (master_doc, 'ACQDP', 'ACQDP(授渔) Documentation',
167 | author, 'ACQDP', 'One line description of project.',
168 | 'Miscellaneous'),
169 | ]
170 |
171 |
172 | # -- Options for Epub output -------------------------------------------------
173 |
174 | # Bibliographic Dublin Core info.
175 | epub_title = project
176 |
177 | # The unique identifier of the text. This can be a ISBN number
178 | # or the project homepage.
179 | #
180 | # epub_identifier = ''
181 |
182 | # A unique identification for the text.
183 | #
184 | # epub_uid = ''
185 |
186 | # A list of files that should not be packed into the epub file.
187 | epub_exclude_files = ['search.html']
188 |
189 |
190 | # -- Extension configuration -------------------------------------------------
191 |
192 | # -- Options for intersphinx extension ---------------------------------------
193 |
194 | # Example configuration for intersphinx: refer to the Python standard library.
195 | intersphinx_mapping = {'https://docs.python.org/': None}
196 |
197 | # -- Options for todo extension ----------------------------------------------
198 |
199 | # If true, `todo` and `todoList` produce output, else they produce nothing.
200 | todo_include_todos = True
201 |
202 |
203 | # Sort members by type
204 | autodoc_member_order = 'bysource'
205 |
206 |
207 | # We want to remove all private (i.e. _. or __.__) members
208 | # that are not in the list of accepted functions
209 | accepted_private_functions = ['__init__']
210 |
211 |
212 | def member_function_test(app, what, name, obj, skip, options):
213 | # test if we have a private function
214 | if len(name) > 1 and name[0] == '_':
215 | # test if this private function should be allowed
216 | if name not in accepted_private_functions:
217 | # omit privat functions that are not in the list of accepted private functions
218 | return skip
219 | else:
220 | # test if the method is documented
221 | if not hasattr(obj, '__doc__') or not obj.__doc__:
222 | return skip
223 | return False
224 |
225 |
226 | def setup(app):
227 | app.connect('autodoc-skip-member', member_function_test)
228 |
229 |
230 | import sphinx.util.inspect
231 |
232 | object_description = sphinx.util.inspect.object_description
233 |
234 |
235 | def patched_object_description(object: Any) -> str:
236 | if isinstance(object, list):
237 | return '[' + ', '.join(patched_object_description(x) for x in object) + ']'
238 | res = object_description(object)
239 | if len(res) <= 50:
240 | return res
241 | res = type(object).__name__
242 | if hasattr(object, 'name'):
243 | return f'{res} "{object.name}"'
244 | return f'<{res}>'
245 |
246 |
247 | sphinx.util.inspect.object_description = patched_object_description
248 |
--------------------------------------------------------------------------------
/demo/QAOA/qaoa.py:
--------------------------------------------------------------------------------
1 | from acqdp.tensor_network import TensorNetwork
2 | from scipy import optimize
3 | import numpy
4 | import tqdm
5 | import time
6 | import itertools
7 |
8 |
9 | def XRot(angle):
10 | return numpy.array([[numpy.cos(angle), 1j * numpy.sin(angle)],
11 | [1j * numpy.sin(angle), numpy.cos(angle)]])
12 |
13 |
14 | def checkinstance(csp):
15 | """Check if a given instance is valid."""
16 | if type(csp) != dict:
17 | print("Instance has to be a dictionary and it has type ",
18 | type(csp))
19 | return 1
20 | try:
21 | for item in csp:
22 | if len(item) != len(csp[item].shape):
23 | stt = "(" + "2," * (len(item) - 1) + "2)"
24 | print("Label of ", item, " has the shape", csp[item].shape,
25 | " Must have shape:", stt)
26 | return 1
27 | for num in csp[item].shape:
28 | if num != 2:
29 | stt = "(" + "2," * (len(item) - 1) + "2)"
30 | print("Label of ", item, " has the shape",
31 | csp[item].shape, " Must have shape:", stt)
32 | return 1
33 | return 0
34 | except Exception:
35 | print(" instance must be a dictionary of tuples such as:")
36 | print(" instance = { (0,) : numpy.array([3,0]),")
37 | print(" (0, 1) : numpy.array([[3,0],[4,5]])}")
38 | return 1
39 |
40 |
41 | class QAOAOptimizer:
42 | """Quantum variational optimization algorithm for CSP optimization, using the Quantum Approximate Optimization
43 | Algorithm ansatz."""
44 |
45 | def __init__(self, csp, params=None, num_layers=2):
46 | """Constructor of :class:`CSPQAOAOptimizer` class."""
47 | if params is None:
48 | params = 2 * numpy.pi * numpy.random.rand(2 * num_layers)
49 | self.set_task(csp, num_layers, params)
50 | assert checkinstance(self.csp) == 0
51 |
52 | def set_task(self, csp, num_layers, params=None, **kwargs):
53 | self.csp = {key: numpy.asarray(value) for key, value in csp.items()}
54 | self.clauses = sorted(list(self.csp))
55 | self.num_clause = len(csp)
56 | s = set()
57 | for clause in self.csp:
58 | s |= set(clause)
59 | self.lst_var = sorted(list(s))
60 | self.params = params
61 | self.id = hash(tuple(csp)) + hash((num_layers))
62 | self.num_layers = num_layers
63 | self.data = {i: [None] for i in range(1, num_layers + 1)}
64 | self.data.update({-i: [None] for i in range(1, num_layers + 1)})
65 | for clause in self.clauses:
66 | self.data.update({(0, clause): [self.csp[clause]]})
67 | self.data.update({(i, clause): [None] for i in range(1, num_layers + 1)})
68 | self.data.update({(-i, clause): [None] for i in range(1, num_layers + 1)})
69 | for key, value in kwargs.items():
70 | setattr(self, key, value)
71 |
72 | def lightcone(self, qubits, csp, num_layers):
73 | """Construct simplified tensor network corresponding to all QAOA circuit element acting non-trivially on a set
74 | of qubits."""
75 | qubits_set = set(qubits)
76 | turns = {}
77 | tn = TensorNetwork(dtype=complex)
78 | for i in range(num_layers):
79 | for qubit in qubits_set:
80 | tn.add_node((2 * i + 1, qubit), [(i, qubit), (i + 1, qubit)], None)
81 | tn.add_node((-2 * i - 1, qubit), [(-i, qubit), (-i - 1, qubit)], None)
82 | clauses = []
83 | new_set = set()
84 | for clause in csp:
85 | if set(clause).intersection(qubits_set):
86 | clauses.append(clause)
87 | new_set |= set(clause)
88 | tn.add_node((2 * i + 2, clause), [(i + 1, q) for q in clause], None)
89 | tn.add_node((-2 * i - 2, clause), [(-i - 1, q) for q in clause], None)
90 | turns.update({qubit: i + 1 for qubit in new_set.difference(qubits_set)})
91 | qubits_set |= new_set
92 | for qubit in turns:
93 | tn.merge_edges([(turns[qubit], qubit), (-turns[qubit], qubit)], merge_to=(0, qubit))
94 | tn.update_dimension({e: 2 for e in tn.edges_by_name})
95 | return tn, qubits_set
96 |
97 | def decorate(self, params=None):
98 | """Assign specific values to the relavant tensor networks (specified by `tn.dict`) according to the input
99 | paramter values."""
100 | if params is None:
101 | params = self.params
102 | betas = params[:self.num_layers]
103 | gammas = params[self.num_layers:]
104 | for i in range(1, self.num_layers + 1):
105 | self.data[i][0] = XRot(betas[-i])
106 | self.data[-i][0] = XRot(-betas[-i])
107 | for clause in self.clauses:
108 | self.data[(i, clause)][0] = numpy.exp(1j * gammas[-i] * self.csp[clause])
109 | self.data[(-i, clause)][0] = numpy.exp(-1j * gammas[-i] * self.csp[clause])
110 | for tsk in self.query_dict:
111 | self.query_dict[tsk].set_data(
112 | {i: self.data_dict[tsk][i][0] for i in self.data_dict[tsk]})
113 |
114 | def preprocess_clause(self, clause, **kwargs):
115 | tn, set_qubits = self.lightcone(clause, self.csp, self.num_layers)
116 | multiplier = kwargs.get('multiplier', 1)
117 | multiplier *= 2 ** (-len(set_qubits))
118 | tn.add_node((0, clause), [(0, i) for i in clause], None)
119 | task = tn.compile(tn.find_order(**kwargs), **kwargs)
120 | dic = {}
121 | for k in tn.nodes_by_name:
122 | if k[0] == 0:
123 | dic[k] = [multiplier * numpy.array(self.data[(0, clause)][0])]
124 | elif k[0] % 2 == 0:
125 | dic[k] = self.data[(k[0] // 2, k[1])]
126 | else:
127 | dic[k] = self.data[(k[0] + (1 if k[0] > 0 else -1)) // 2]
128 | return {clause: task}, {clause: dic}
129 |
130 | def preprocess(self, **kwargs):
131 | """Preprocessing for calculating the energy value of the QAOA circuit."""
132 | time_start = time.time()
133 | print("Preprocessing for energy query...")
134 |
135 | self.query_dict = {}
136 | self.data_dict = {}
137 | self.clauses = kwargs.get('clauses', self.clauses)
138 | for clause in tqdm.tqdm(self.clauses):
139 | m = 1
140 | if isinstance(clause, dict):
141 | m = clause['weight']
142 | clause = clause['clause']
143 | a, b = self.preprocess_clause(clause, multiplier=m, **kwargs)
144 | self.query_dict.update(a)
145 | self.data_dict.update(b)
146 | print("Preprocessing time for queries: {}".format(time.time() - time_start))
147 |
148 | def optimize(self, method=None, init_value=None, **kwargs):
149 | """Optimizing over the parameters with respect to the total energy function."""
150 | if init_value is None:
151 | init_value = self.params
152 | elif init_value == 'zeros':
153 | init_value = numpy.zeros(len(self.params))
154 | else:
155 | init_value = numpy.reshape(numpy.array(init_value),
156 | (len(self.params),))
157 | res = optimize.minimize(lambda x: numpy.real(self.query(x)),
158 | init_value,
159 | method=method,
160 | tol=1.0e-2,
161 | options={
162 | 'disp': False,
163 | 'maxiter': kwargs.get('num_calls', 100)
164 | })
165 | params = res.x
166 | value = res.fun
167 | self.params = params
168 | return value, params
169 |
170 | def query(self, params=None, noise_config=None, clauses_list=None, **kwargs):
171 | """Querying of the total energy corresponding to a specific set of values of the parameters.
172 |
173 | If `None`, the internal parameter values will be used.
174 | """
175 |
176 | if params is None:
177 | params = self.params
178 | self.decorate(params)
179 | res = []
180 | for i in tqdm.tqdm(self.query_dict):
181 | res.append(self.query_dict[i].execute(**kwargs))
182 | res = sum(res)
183 | print("E({}) = {}".format(list(params), res))
184 | return res
185 |
186 | def energy(self, assignment):
187 | e = 0
188 | for i in self.csp:
189 | e += self.csp[i][tuple(assignment[k] for k in i)]
190 | return e
191 |
192 | def optimum(self):
193 | x = min(itertools.product([0, 1], repeat=len(self.lst_var)),
194 | key=lambda a: self.energy(a))
195 | return self.energy(x), x
196 |
--------------------------------------------------------------------------------
/demo/QEC/surface_code.py:
--------------------------------------------------------------------------------
1 | from acqdp.tensor_network import TensorNetwork
2 | import numpy as np
3 | from acqdp.circuit import CNOTGate, CZGate, Circuit, HGate, Measurement, PlusState, State, Trace, ZeroMeas, ZeroState
4 | from demo.QEC.noise_model import add_idle_noise, add_noisy_surface_code
5 |
6 |
7 | params = {
8 | 'T_1_inv': 1 / 30000.0,
9 | 'T_phi_inv': 1 / 60000.0,
10 | 'p_axis': 1e-4,
11 | 'p_plane': 5e-4,
12 | 'delta_phi': 0.01,
13 | 'T_g_1Q': 20.0,
14 | 'T_g_2Q': 40.0,
15 | 'tau_m': 300.0,
16 | 'tau_d': 300.0,
17 | 'gamma': 0,
18 | 'alpha0': 4,
19 | 'kappa': 1 / 250,
20 | 'chi': 1.3 * 1e-3}
21 |
22 | Z = TensorNetwork([0, 0])
23 | Z.add_node('PH', [0], np.ones(2))
24 | PlaceHolder = Measurement(1, Z, name='PH_nz')
25 | TraceState = State(1, TensorNetwork([0, 0], bond_dim=2))
26 |
27 | qubit_group_name = {
28 | (0, 0): 'D1',
29 | (0, 2): 'D2',
30 | (2, 0): 'D3',
31 | (2, 2): 'D4',
32 | (1, 1): 'X1',
33 | (1, 3): 'Z1',
34 | (3, 1): 'Z2',
35 | (3, 3): 'X2',
36 | (-1, -1): 'dummy'
37 | }
38 | qubit_groups = {group: [] for group in qubit_group_name.values()}
39 | qubit_coords = ([(x * 2, y * 2) for x in range(3) for y in range(3)]
40 | + [(-1, 3)]
41 | + [(x * 2 + 1, y * 2 + 1) for x in range(2) for y in (range(3) if x % 2 else range(-1, 2))]
42 | + [(5, 1)])
43 |
44 | for x, y in qubit_coords:
45 | qubit_groups[qubit_group_name[x % 4, y % 4]].append((x, y))
46 |
47 |
48 | def add_CZ_gates(circuit, high_freq_group, low_freq_group):
49 | for q1 in high_freq_group:
50 | for q2 in low_freq_group:
51 | if abs(q1[0] - q2[0]) == 1 and abs(q1[1] - q2[1]) == 1:
52 | circuit.append(CZGate, [q1, q2])
53 | break
54 |
55 |
56 | def x_stab_meas(circuit, measure_outcome=None, use_ndcompmeas=False):
57 | """This is currently only used to add a round of X-stabilier measurement circuit without noise."""
58 |
59 | for group in ['D1', 'D2', 'D3', 'D4', 'X1', 'X2']:
60 | for qubit in qubit_groups[group]:
61 | circuit.append(HGate, [qubit])
62 |
63 | # Time slot 1 ~ 4
64 | for flux_dance in [('D2', 'X1', 'X2', 'D3', 'D4'),
65 | ('D1', 'X1', 'X2', 'D4', 'D3'),
66 | ('D1', 'X2', 'X1', 'D4', 'D3'),
67 | ('D2', 'X2', 'X1', 'D3', 'D4')]:
68 |
69 | for g1, g2 in [flux_dance[0:2], flux_dance[2:4]]:
70 | add_CZ_gates(circuit, qubit_groups[g1], qubit_groups[g2])
71 |
72 | # Time slot B
73 | for group in ['D1', 'D2', 'D3', 'D4', 'X1', 'X2']:
74 | for qubit in qubit_groups[group]:
75 | circuit.append(HGate, [qubit])
76 |
77 | # Time slot C
78 | if use_ndcompmeas:
79 | for group in ['X1', 'X2']:
80 | for qubit in qubit_groups[group]:
81 | circuit.append(PlaceHolder, [qubit])
82 | circuit.append(TraceState, [qubit])
83 | else:
84 | if measure_outcome is not None:
85 | for group in ['X1', 'X2']:
86 | for qubit in qubit_groups[group]:
87 | circuit.append(measure_outcome[qubit], [qubit])
88 | circuit.append(ZeroState, [qubit])
89 | else:
90 | for group in ['X1', 'X2']:
91 | for qubit in qubit_groups[group]:
92 | circuit.append(ZeroMeas, [qubit])
93 | circuit.append(ZeroState, [qubit])
94 |
95 |
96 | def z_stab_meas(circuit, measure_outcome=None, use_ndcompmeas=False):
97 | """
98 | measure_outcome: a dictionary, from qubit_coords, to ZeroMeas/OneMeas
99 |
100 | This is currently only used to add a round of Z-stabilier measurement circuit without noise
101 | """
102 |
103 | for group in ['Z1', 'Z2']:
104 | for qubit in qubit_groups[group]:
105 | circuit.append(HGate, [qubit])
106 |
107 | # Time slot 5 ~ 8
108 | for flux_dance in [('D1', 'Z1', 'Z2', 'D4', 'D3'),
109 | ('D2', 'Z2', 'Z1', 'D3', 'D4'),
110 | ('D2', 'Z1', 'Z2', 'D3', 'D4'),
111 | ('D1', 'Z2', 'Z1', 'D4', 'D3')]:
112 | for g1, g2 in [flux_dance[0:2], flux_dance[2:4]]:
113 | add_CZ_gates(circuit, qubit_groups[g1], qubit_groups[g2])
114 |
115 | for group in ['Z1', 'Z2']:
116 | for qubit in qubit_groups[group]:
117 | circuit.append(HGate, [qubit])
118 |
119 | if use_ndcompmeas:
120 | for group in ['Z1', 'Z2']:
121 | for qubit in qubit_groups[group]:
122 | circuit.append(PlaceHolder, [qubit])
123 | circuit.append(TraceState, [qubit])
124 | else:
125 | if measure_outcome is not None:
126 | for group in ['Z1', 'Z2']:
127 | for qubit in qubit_groups[group]:
128 | circuit.append(measure_outcome[qubit], [qubit])
129 | circuit.append(ZeroState, [qubit])
130 | else:
131 | for group in ['Z1', 'Z2']:
132 | for qubit in qubit_groups[group]:
133 | circuit.append(ZeroMeas, [qubit])
134 | circuit.append(ZeroState, [qubit])
135 |
136 |
137 | def initial_state(coord=(-100, -100)):
138 | """prepare a maximally entangled state between surface code and ancilla qubit.
139 |
140 | First prepare |0>_surf |+>_anc, then do a few CNOT
141 | """
142 | c = Circuit()
143 |
144 | for q in qubit_coords:
145 | c.append(ZeroState, [q])
146 |
147 | x_stab_meas(c)
148 |
149 | c.append(PlusState, [coord])
150 | c.append(CNOTGate, [coord, (0, 0)])
151 | c.append(CNOTGate, [coord, (2, 0)])
152 | c.append(CNOTGate, [coord, (4, 0)])
153 | return c
154 |
155 |
156 | def final_measurement(circuit: Circuit):
157 | for q in [(1, 1), (1, -1), (3, 3), (3, 1), (-1, 3), (5, 1), (1, 3), (3, 5)]:
158 | circuit.append(Trace, [q])
159 | circuit.append(ZeroState, [q])
160 | x_stab_meas(circuit, use_ndcompmeas=True) # Add final noiseless X-stabilizer measurements
161 | for qubits in [[(1, 1), (0, 0)], [(3, 3), (4, 4)], [(-1, 3), (0, 4)], [(5, 1), (4, 0)]]:
162 | circuit.append(CZGate, qubits)
163 | circuit.append(Trace, [qubits[0]])
164 | circuit.append(TraceState, [qubits[0]])
165 |
166 | z_stab_meas(circuit, use_ndcompmeas=True) # Add final noiseless Z-stabilizer measurements
167 | for qubits in [[(1, -1), (0, 0)], [(3, 1), (4, 0)], [(1, 3), (0, 4)], [(3, 5), (4, 4)]]:
168 | circuit.append(CNOTGate, qubits)
169 | circuit.append(Trace, [qubits[0]])
170 | circuit.append(TraceState, [qubits[0]])
171 | return circuit
172 |
173 |
174 | def surface_code_tensor_network(num_layers=2, params=params):
175 | noisy_meas_circ = Circuit()
176 | end_time = 0
177 | for _ in range(num_layers):
178 | end_time = add_noisy_surface_code(noisy_meas_circ, qubit_coords, time=end_time, params=params)
179 | add_idle_noise(noisy_meas_circ, params=params)
180 | d = final_measurement(noisy_meas_circ)
181 | init_state = initial_state()
182 | c_prob = init_state | d | initial_state(coord=(-101, -101)).adjoint()
183 | tn = c_prob.tensor_density.expand(recursive=True)
184 | for node_name in tn.nodes_by_name:
185 | if node_name[-1] == 'PH':
186 | tn.remove_node(node_name)
187 | return tn
188 |
189 |
190 | def surface_code_tensor_network_with_syndrome(syndrome=None, num_layers=2, params=params):
191 | if syndrome is None:
192 | syndrome = [0] * (8 * (num_layers + 1))
193 | noisy_meas_circ = Circuit()
194 | e_ro = params.get('e_ro', 0.01)
195 | butterfly = np.array([[1 - e_ro, e_ro], [e_ro, 1 - e_ro]])
196 | end_time = 0
197 | for _ in range(num_layers):
198 | end_time = add_noisy_surface_code(noisy_meas_circ,
199 | qubit_coords,
200 | time=end_time,
201 | params=params)
202 | add_idle_noise(noisy_meas_circ, params=params)
203 | d = final_measurement(noisy_meas_circ)
204 | init_state = initial_state()
205 | c_prob = init_state | d | initial_state(coord=(-101, -101)).adjoint()
206 | tn = c_prob.tensor_density.expand(recursive=True)
207 | cnt = 0
208 | for node_name in tn.nodes_by_name:
209 | if node_name[0] == 'PH' or node_name[0] == 'PH_nz':
210 | if syndrome is not None:
211 | if node_name[0] == 'PH':
212 | tn.update_node(node_name, butterfly[syndrome[cnt]])
213 | else:
214 | tn.fix_edge(tn.network.nodes[(0, node_name)]['edges'][0][0],
215 | syndrome[cnt])
216 | cnt += 1
217 | return tn
218 |
--------------------------------------------------------------------------------
/acqdp/utility/opt_einsum_helpers.pyx:
--------------------------------------------------------------------------------
1 | """
2 | Contains helper functions for opt_einsum testing scripts
3 | """
4 |
5 | import numpy as np
6 |
7 | from opt_einsum.parser import get_symbol
8 |
9 | __all__ = ["build_views", "compute_size_by_dict", "find_contraction", "flop_count"]
10 |
11 | _valid_chars = "abcdefghijklmopqABC"
12 | _sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3, 2, 5, 7, 4, 3, 2, 3, 4])
13 | _default_dim_dict = {c: s for c, s in zip(_valid_chars, _sizes)}
14 |
15 |
16 | def build_views(string, dimension_dict=None):
17 | """
18 | Builds random numpy arrays for testing.
19 |
20 | Parameters
21 | ----------
22 | string : list of str
23 | List of tensor strings to build
24 | dimension_dict : dictionary
25 | Dictionary of index _sizes
26 |
27 | Returns
28 | -------
29 | ret : list of np.ndarry's
30 | The resulting views.
31 |
32 | Examples
33 | --------
34 | >>> view = build_views(['abbc'], {'a': 2, 'b':3, 'c':5})
35 | >>> view[0].shape
36 | (2, 3, 3, 5)
37 |
38 | """
39 |
40 | if dimension_dict is None:
41 | dimension_dict = _default_dim_dict
42 |
43 | views = []
44 | terms = string.split('->')[0].split(',')
45 | for term in terms:
46 | dims = [dimension_dict[x] for x in term]
47 | views.append(np.random.rand(*dims))
48 | return views
49 |
50 |
51 | def compute_size_by_dict(indices, idx_dict):
52 | """
53 | Computes the product of the elements in indices based on the dictionary
54 | idx_dict.
55 |
56 | Parameters
57 | ----------
58 | indices : iterable
59 | Indices to base the product on.
60 | idx_dict : dictionary
61 | Dictionary of index _sizes
62 |
63 | Returns
64 | -------
65 | ret : int
66 | The resulting product.
67 |
68 | Examples
69 | --------
70 | >>> compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
71 | 90
72 |
73 | """
74 | ret = 1
75 | for i in indices: # lgtm [py/iteration-string-and-sequence]
76 | ret *= idx_dict[i]
77 | return ret
78 |
79 |
80 | def find_contraction(positions, input_sets, output_set):
81 | """
82 | Finds the contraction for a given set of input and output sets.
83 |
84 | Parameters
85 | ----------
86 | positions : iterable
87 | Integer positions of terms used in the contraction.
88 | input_sets : list
89 | List of sets that represent the lhs side of the einsum subscript
90 | output_set : set
91 | Set that represents the rhs side of the overall einsum subscript
92 |
93 | Returns
94 | -------
95 | new_result : set
96 | The indices of the resulting contraction
97 | remaining : list
98 | List of sets that have not been contracted, the new set is appended to
99 | the end of this list
100 | idx_removed : set
101 | Indices removed from the entire contraction
102 | idx_contraction : set
103 | The indices used in the current contraction
104 |
105 | Examples
106 | --------
107 |
108 | # A simple dot product test case
109 | >>> pos = (0, 1)
110 | >>> isets = [set('ab'), set('bc')]
111 | >>> oset = set('ac')
112 | >>> find_contraction(pos, isets, oset)
113 | ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
114 |
115 | # A more complex case with additional terms in the contraction
116 | >>> pos = (0, 2)
117 | >>> isets = [set('abd'), set('ac'), set('bdc')]
118 | >>> oset = set('ac')
119 | >>> find_contraction(pos, isets, oset)
120 | ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
121 | """
122 |
123 | remaining = list(input_sets)
124 | inputs = (remaining.pop(i) for i in sorted(positions, reverse=True))
125 | idx_contract = next(inputs).union(*inputs)
126 | idx_remain = output_set.union(*remaining)
127 |
128 | new_result = idx_remain & idx_contract
129 | idx_removed = (idx_contract - new_result)
130 | remaining.append(new_result)
131 |
132 | return new_result, remaining, idx_removed, idx_contract
133 |
134 |
135 | def flop_count(idx_contraction, inner, num_terms, size_dictionary):
136 | """
137 | Computes the number of FLOPS in the contraction.
138 |
139 | Parameters
140 | ----------
141 | idx_contraction : iterable
142 | The indices involved in the contraction
143 | inner : bool
144 | Does this contraction require an inner product?
145 | num_terms : int
146 | The number of terms in a contraction
147 | size_dictionary : dict
148 | The size of each of the indices in idx_contraction
149 |
150 | Returns
151 | -------
152 | flop_count : int
153 | The total number of FLOPS required for the contraction.
154 |
155 | Examples
156 | --------
157 |
158 | >>> flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
159 | 90
160 |
161 | >>> flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
162 | 270
163 |
164 | """
165 |
166 | overall_size = compute_size_by_dict(idx_contraction, size_dictionary)
167 | op_factor = max(1, num_terms - 1)
168 | if inner:
169 | op_factor += 1
170 |
171 | return overall_size * op_factor
172 |
173 |
174 | def rand_equation(n, reg, n_out=0, d_min=2, d_max=9, seed=None,
175 | global_dim=False, return_size_dict=False):
176 | """Generate a random contraction and shapes.
177 |
178 | Parameters
179 | ----------
180 | n : int
181 | Number of array arguments.
182 | reg : int
183 | 'Regularity' of the contraction graph. This essentially determines how
184 | many indices each tensor shares with others on average.
185 | n_out : int, optional
186 | Number of output indices (i.e. the number of non-contracted indices).
187 | Defaults to 0, i.e., a contraction resulting in a scalar.
188 | d_min : int, optional
189 | Minimum dimension size.
190 | d_max : int, optional
191 | Maximum dimension size.
192 | seed: int, optional
193 | If not None, seed numpy's random generator with this.
194 | global_dim : bool, optional
195 | Add a global, 'broadcast', dimension to every operand.
196 | return_size_dict : bool, optional
197 | Return the mapping of indices to sizes.
198 |
199 | Returns
200 | -------
201 | eq : str
202 | The equation string.
203 | shapes : list[tuple[int]]
204 | The array shapes.
205 | size_dict : dict[str, int]
206 | The dict of index sizes, only returned if ``return_size_dict=True``.
207 |
208 | Examples
209 | --------
210 | >>> eq, shapes = rand_equation(n=10, reg=4, n_out=5, seed=42)
211 | >>> eq
212 | 'oyeqn,tmaq,skpo,vg,hxui,n,fwxmr,hitplcj,kudlgfv,rywjsb->cebda'
213 |
214 | >>> shapes
215 | [(9, 5, 4, 5, 4),
216 | (4, 4, 8, 5),
217 | (9, 4, 6, 9),
218 | (6, 6),
219 | (6, 9, 7, 8),
220 | (4,),
221 | (9, 3, 9, 4, 9),
222 | (6, 8, 4, 6, 8, 6, 3),
223 | (4, 7, 8, 8, 6, 9, 6),
224 | (9, 5, 3, 3, 9, 5)]
225 | """
226 |
227 | if seed is not None:
228 | np.random.seed(seed)
229 |
230 | # total number of indices
231 | num_inds = n * reg // 2 + n_out
232 | inputs = ["" for _ in range(n)]
233 | output = []
234 |
235 | size_dict = {
236 | get_symbol(i): np.random.randint(d_min, d_max + 1)
237 | for i in range(num_inds)
238 | }
239 |
240 | # generate a list of indices to place either once or twice
241 | def gen():
242 | for i, ix in enumerate(size_dict):
243 | # generate an outer index
244 | if i < n_out:
245 | output.append(ix)
246 | yield ix
247 | # generate a bond
248 | else:
249 | yield ix
250 | yield ix
251 |
252 | # add the indices randomly to the inputs
253 | for i, ix in enumerate(np.random.permutation(list(gen()))):
254 | # make sure all inputs have at least one index
255 | if i < n:
256 | inputs[i] += ix
257 | else:
258 | # don't add any traces on same op
259 | where = np.random.randint(0, n)
260 | while ix in inputs[where]:
261 | where = np.random.randint(0, n)
262 |
263 | inputs[where] += ix
264 |
265 | # possibly add the same global dim to every arg
266 | if global_dim:
267 | gdim = get_symbol(num_inds)
268 | size_dict[gdim] = np.random.randint(d_min, d_max + 1)
269 | for i in range(n):
270 | inputs[i] += gdim
271 | output += gdim
272 |
273 | # randomly transpose the output indices and form equation
274 | output = "".join(np.random.permutation(output))
275 | eq = "{}->{}".format(",".join(inputs), output)
276 |
277 | # make the shapes
278 | shapes = [tuple(size_dict[ix] for ix in op) for op in inputs]
279 |
280 | ret = (eq, shapes)
281 |
282 | if return_size_dict:
283 | ret += (size_dict,)
284 |
285 | return ret
286 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/tensor_valued.py:
--------------------------------------------------------------------------------
1 | import numpy
2 |
3 | DTYPE = complex
4 |
5 |
6 | def conjugate(t) -> 'TensorValued':
7 | """Return the complex conjugation of a `TensorValued` object.
8 |
9 | :returns: :class:`TensorValued` -- the conjugation of a `TensorValued` object.
10 | """
11 | from .tensor import Tensor
12 | from .tensor_sum import TensorSum
13 | from .tensor_network import TensorNetwork
14 | from .tensor_view import TensorView
15 | if not isinstance(t, TensorValued):
16 | raise TypeError()
17 | elif isinstance(t, Tensor):
18 | if t._data is None:
19 | return Tensor()
20 | else:
21 | return Tensor(numpy.conj(t._data))
22 | elif isinstance(t, TensorSum):
23 | ts = t.copy()
24 | for term in ts.terms_by_name:
25 | ts.terms_by_name[term] = conjugate(ts.terms_by_name[term])
26 | return ts
27 | elif isinstance(t, TensorNetwork):
28 | tn = t.copy()
29 | for node in tn.nodes:
30 | tn.network.nodes[node]['tensor'] = conjugate(tn.network.nodes[node]['tensor'])
31 | return tn
32 | elif isinstance(t, TensorView):
33 | return TensorView(t)
34 |
35 |
36 | def transpose(t, axes: tuple) -> 'TensorValued':
37 | """Return the transposition of a `TensorValued` object.
38 |
39 | :param axes: the transposition on the referred object.
40 | :type axes: tuple.
41 | :returns: :class:`TensorNetwork` -- the transposition of a `TensorValued` object can be readily expressed in terms of a
42 | `TensorNetwork` object.
43 | """
44 | from .tensor_network import TensorNetwork
45 | tn = TensorNetwork()
46 | tn.add_node(tensor=t, is_open=True)
47 | tn.open_edges = [tn.open_edges[i] for i in axes]
48 | return tn
49 |
50 |
51 | def normalize(tv: 'TensorValued') -> 'TensorValued':
52 | """Return a copy with unit Frobenius norm.
53 |
54 | :param tv: The :class:`TensorValued` object to normalize.
55 | :type tv: :class:`TensorValued`.
56 | :returns: :class:`TensorValued` -- the normalized :class:`TensorValued` object.
57 | """
58 | return tv * (1 / numpy.sqrt(tv.norm_squared))
59 |
60 |
61 | class TensorValued(object):
62 | """Interface for all :class:`TensorValued` objects, including :class:`Tensor`, :class:`TensorNetwork`,
63 | :class:`TensorSum` and :class:`TensorView`.
64 |
65 | :ivar identifier: unique identifier for each :class:`TensorValued` object.
66 | :ivar dtype: A :class:`TensorValued` object is homogeneous, and contains elements described by a dtype object.
67 | """
68 | id_count = -1
69 |
70 | def __new__(cls, *args, **kwargs):
71 | TensorValued.id_count += 1
72 | instance = object.__new__(cls)
73 | return instance
74 |
75 | def __init__(self, dtype: type = DTYPE) -> None:
76 | self.identifier = TensorValued.id_count
77 | self.dtype = dtype
78 |
79 | @property
80 | def shape(self):
81 | """The common property of all :class:`TensorValued` classes, indicating whether the bond dimensions for a tensor
82 | valued object. A tensor valued object is semantically a multi-dimensionally array, and its dimensions can be
83 | expressed as a tuple of integers. The tuple is called the shape of the tensor, whereas the length of the tuple
84 | is the rank of the tensor.
85 |
86 | In ACQDP, undetermined tensors and undetermined dimensions are allowed. In the former case, the shape of the tensor
87 | will return `None`; in the latter case, some of the bond_dimensions appearing in the tuple could be `None`.
88 |
89 | :returns: :class:`tuple` or None
90 | :raises: NotImplementedError, ValueError
91 | """
92 | raise NotImplementedError()
93 |
94 | @property
95 | def is_valid(self) -> bool:
96 | """The common property of all :class:`TensorValued` classes, indicating whether the :class:`TensorValued` object
97 | is valid or not. In every step of a program, all existing :class:`TensorValued` object must be valid, otherwise
98 | an exception should be thrown out.
99 |
100 | :returns: :class:`bool`
101 | :raises: NotImplementedError
102 | """
103 | raise NotImplementedError()
104 |
105 | @property
106 | def is_ready(self) -> bool:
107 | """The common property of all :class:`TensorValued` classes, indicating whether the current
108 | :class:`TensorValued` object is ready for contraction, i.e. whether it semantically represents a tensor with a
109 | definite value. A `TensorValued` object needs to be ready upon contraction, but needs not to be ready throught
110 | the construction.
111 |
112 | :returns: :class:`bool`
113 | :raises: NotImplementedError()
114 | """
115 | raise NotImplementedError()
116 |
117 | def __str__(self) -> str:
118 | return "Type: " + str(type(self))\
119 | + ", Shape: " + str(self.shape)
120 |
121 | def __repr__(self) -> str:
122 | return "Id: " + str(self.identifier) + self.__str__()
123 |
124 | def __eq__(self, other) -> bool:
125 | if self is other:
126 | return True
127 | elif isinstance(other, TensorValued):
128 | if self.shape != other.shape:
129 | return False
130 | return numpy.allclose(self.contract(), other.contract())
131 | else:
132 | return False
133 |
134 | def __mul__(self, other) -> 'TensorValued':
135 | """Tensor product of two tensors. For tensor multiplications, use `TensorNetwork` classes to specify how indices
136 | are to be contracted. By default, addition creates a `TensorNetwork` object with two operands as two components.
137 |
138 | :returns: :class:`TensorNetwork`
139 | """
140 | from .tensor_network import TensorNetwork
141 | tn = TensorNetwork()
142 | tn.add_node(tensor=self, is_open=True)
143 | tn.add_node(tensor=other, is_open=True)
144 | return tn
145 |
146 | __rmul__ = __mul__
147 |
148 | def __add__(self, other: 'TensorValued') -> 'TensorValued':
149 | """Addition of two `TensorValued` objects.
150 |
151 | By default, addition creates a `TensorSum` object with two operands as two components. The two operands of the
152 | addition must have compatible shapes.
153 | """
154 | from .tensor_sum import TensorSum
155 | tl = TensorSum()
156 | tl.add_term(tensor=self)
157 | tl.add_term(tensor=other)
158 | return tl
159 |
160 | def __neg__(self) -> 'TensorValued':
161 | """Negation of a `TensorValued` object."""
162 | return -1 * self
163 |
164 | def __sub__(self, other: 'TensorValued') -> 'TensorValued':
165 | """Subtraction of `TensorValued` objects."""
166 | return self + (-other)
167 |
168 | def __invert__(self) -> 'TensorValued':
169 | """Syntactic sugar for conjugation."""
170 | return conjugate(self)
171 |
172 | def __mod__(self, axes: tuple):
173 | """Syntactic sugar for transposition."""
174 | return transpose(self, axes)
175 |
176 | @property
177 | def norm_squared(self):
178 | """Square of Frobenius norm of the :class:`TensorValued` object."""
179 | from .tensor_network import TensorNetwork
180 | tn = TensorNetwork()
181 | tn.add_node(tensor=self)
182 | tn.add_node(tensor=~self)
183 | return tn.contract().real
184 |
185 | def fix_index(self, index, fix_to=0) -> 'TensorValued':
186 | """Fix the given index to the given value. The result :class:`TensorValued` object would have the same type as
187 | the original one, with rank 1 smaller than the original.
188 |
189 | :param index: The index to fix.
190 | :type index: :class:`int`
191 | :param fix_to: the value to assign to the given index.
192 | :type fix_to: :class:`bool`
193 | :returns: :class:`TensorValued` -- The :class:`TensorValued` object after fixing the given index.
194 | :raises: NotImplementedError
195 | """
196 | raise NotImplementedError()
197 |
198 | def expand(self):
199 | """Expand nested tensor network structures in the :class:`TensorValued` object if there is any.
200 |
201 | :returns: :class:`TensorValued`.
202 | """
203 | return self
204 |
205 | def contract(self, **kwargs) -> numpy.ndarray:
206 | """Evaluate the :class:`TensorValued` object to a :class:`numpy.ndarray`.
207 |
208 | :returns: :class:`numpy.ndarray`
209 | :raises: NotImplementedError
210 | """
211 | raise NotImplementedError()
212 |
213 | def cast(self, dtype):
214 | """Cast the tensor valued object to a new underlying dtype."""
215 | self.dtype = dtype
216 |
217 | def copy(self) -> 'TensorValued':
218 | """Make a copy of the current object.
219 |
220 | Data is duplicated only when necessary.
221 | """
222 | raise NotImplementedError()
223 |
224 | def __deepcopy__(self, memo) -> 'TensorValued':
225 | """Make a deepcopy of the current object with all data duplicated."""
226 | raise NotImplementedError()
227 |
--------------------------------------------------------------------------------
/docsource/source/tensor_network.rst:
--------------------------------------------------------------------------------
1 | Tensor network and its contractions
2 | ===================================
3 |
4 | This page serves as documentation for our main tensor network contraction engine.
5 |
6 | ===========
7 | Overview
8 | ===========
9 |
10 | .. contents::
11 | :depth: 1
12 | :local:
13 | :backlinks: none
14 |
15 | .. highlight:: console
16 |
17 |
18 | =====================
19 | Tensor Valued Objects
20 | =====================
21 |
22 | A tensor is a multi-dimensional array of numbers. The number of dimensions associated to a tensor is called the `rank` of the tensor. For example, a tensor of rank 0 represent a scalar :math:`T`. A tensor of rank 1 is a vector :math:`(T_i)_{i=0}^{d-1}`, and a tensor of rank 2 is a matrix :math:`(T_{ij})_{(i,j)\in [d_0]\times [d_1]}`. The dimension corresponds to each of the indices are called `bond dimensions` associated to the indices.
23 |
24 | A tensor network represents a multi-linear map of multiple tensors that results in a new tensor. For example:
25 |
26 | * The inner product of two vectors :math:`U, V` can be expressed as :math:`T = \sum_i U_i\cdot V_i`.
27 | * The outer product of two vectors :math:`U, V` can be expressed as :math:`T_{ij} = U_i\cdot V_j`.
28 | * The element-wise product of two vectors :math:`U, V` can be expressed as :math:`T_i = U_i\cdot V_i`.
29 | * The matrix product of two matrices :math:`M, N` can be expressed as :math:`T_{ij}=\sum_k U_{ik}\cdot V_{kj}`.
30 |
31 | Explicit expressions like above are good for sake of demonstrations, but more complex tensor networks could arise from various scenarios, including many-body physics, machine learning and quantum computation. There are several equivalent ways of expressing a tensor network:
32 |
33 | * Einstein summation. An einstein summation takes the form `[indices_0],[indices_1],...,[indices_k]->[indices_o]`. Each term on the left hand side corresponds to the indices configuration of an operand, and the indices on the right hand side corresponds to the indices on the right hand side, also called the open indices. All matching indices are identified with each other, and the closed indices, i.e. the ones that do not appear on the right hand side, are being summed over. The four examples above, expressed in terms of the Einstein summation, are respectively `i,i->`, `i,j->ij`, `i,i->i`, `ik,kj->ij`. There is a more restrictive interpretation of the einstein summation that each index appears exactly twice in the expression and the right hand side is thus removed. We do not take such convention. Moreover, each edge can appear multiple times on the right hand side, and does not have to appear on the left hand side as long as each index is associated with a definite bond dimension.
34 | * Attributed Multi-hypergraph. A tensor network can be expressed as a multi-hypergraph, where each node corresponds to a tensor operand, and each hyperedge an index. This provides a graphical, intuitive representation of a tensor network.
35 |
36 | Our package not only supports the definition and manipulation of tensors and tensor networks, but also unary functions acting on a tensor and summation of tensors with the same shape. These extensions help us handle different scenarios where tensors appear naturally in an easier manner.
37 |
38 | .. autoclass:: acqdp.tensor_network.TensorValued
39 | :members: is_ready, is_valid, shape, contract, cast, fix_index
40 |
41 | .. autoclass:: acqdp.tensor_network.Tensor
42 | :members: __init__
43 | :show-inheritance:
44 |
45 | .. autoclass:: acqdp.tensor_network.TensorSum
46 | :members: __init__, add_term, update_term, remove_term
47 | :show-inheritance:
48 |
49 | .. autoclass:: acqdp.tensor_network.TensorView
50 | :members: __init__
51 | :show-inheritance:
52 |
53 | .. autoclass:: acqdp.tensor_network.TensorNetwork
54 | :members: add_node, pop_node, update_node, remove_node, add_edge, rewire, fix_edge, open_edge, close_edge, merge_edges, find_order, compile, contract
55 | :show-inheritance:
56 |
57 | ==================================
58 | Tensor Network Contraction Related
59 | ==================================
60 |
61 | Given the tensor operands and the network hypergraph as inputs, `tensor network contraction` aims to evaluation of the tensor network. Tensor network contraction is a computationally hard problem (:math:`\#P-complete`), but cleverly contracting intermediate-sized tensor networks are of great interest.
62 |
63 | Our packages aims at efficient `exact` tensor network contraction, i.e. no approximation is made throughout the process. The contraction process consists of the following steps: order finding, runtime compilation and contraction.
64 |
65 | .. image:: flowchart.png
66 | :width: 700
67 | :alt: An illustration of the tensor network contraction routine
68 |
69 | ----------------
70 | Order finding
71 | ----------------
72 |
73 | One way of tensor network contraction is to merge two tensor nodes into one at a time, until only one tensor is left as the outcome. The cost for such method is determined by the stepwise cost. Such method can be formulated as a binary contraction tree with each node representing a pairwise contraction step. Our algorithm uses the method first introduced in [GK20]_ to construct efficient contraction trees using hypergraph decomposition. A preliminary order finding scheme is given in :class:`OrderFinder` when the tensor network is relatively small. :class:`OptEinsumOrderFinder` provides more choices of order finding methods implemented in the `opt_einsum` package. More advanced, hypergraph-decomposition-based order finding approach is given in :class:`KHPOrderFinder`.
74 |
75 | Sometimes, a fully serialized contraction tree can be too costly to be carried out due to time / space constraints; for a complex tensor network, it is possible that whatever contraction tree one finds would result in an intermediate result that is well beyond the space of a laptop. We introduced the idea of index slicing in [CZH+18]_ for parallelization that helps both on running the contraction scheme concurrently, and on relieving the hard space constraint. The slicing method is provided in the class :class:`Slicer`, and integrated to the order finding scheme as :class:`SlicedOrderFinder`.
76 |
77 | As a result of its execution, a call to `find_order` would yield :class:`ContractionScheme`.
78 |
79 | .. autoclass:: acqdp.tensor_network.ContractionScheme
80 |
81 | .. autoclass:: acqdp.tensor_network.OrderFinder
82 | :members: find_order
83 |
84 | .. autoclass:: acqdp.tensor_network.OptEinsumOrderFinder
85 | :show-inheritance:
86 |
87 | .. autoclass:: acqdp.tensor_network.KHPOrderFinder
88 | :show-inheritance:
89 |
90 | .. autoclass:: acqdp.tensor_network.SlicedOrderFinder
91 | :show-inheritance:
92 |
93 | .. autoclass:: acqdp.tensor_network.Slicer
94 | :members: slice
95 |
96 | .. autoclass:: acqdp.tensor_network.MPSlicer
97 | :show-inheritance:
98 |
99 | .. autoclass:: acqdp.tensor_network.LocalOptimizer
100 | :members: optimize
101 |
102 | ------------------------------
103 | Runtime-specific optimizations
104 | ------------------------------
105 |
106 | A good contraction scheme involving sliced edges and pairwise contraction order might seem appealing in theory, but further work needs to be done for fully unleash the potential of the hardware to carry out such contraction efficiently. The :class:`Compiler` aims to minimize the inefficiencies introduced by small-tensor operations, which take up a majority of number of steps and can not make full use of the existing hardwares. More specifically, :class:`Compiler` does the following processing to increase the overall efficiency.
107 |
108 | * Pre-computation. A typical contraction order has a stem-branch structure, i.e. it consists of a majority of small tensor multiplications (i.e. branches), and a few operations involving a big tensor obsorbing small tensors step by step (i.e. the stem). It is often the case that the stem requires too much memory that slicing becomes necessary, but the branches would need to be computed many times as a result. Although this does not add too much in the theoretical cost, it takes up a large amount of total time. To avoid this, :class:`Compiler` moves the small tensor multiplications *before* slicing. As it only has to be done ones prior to all slicing, the inefficiencies caused by small tensor operation is almost entirely reduces. These small tensor operations are then called pre-computation as they happen before slicing and the contractions on the stem.
109 |
110 | * Branch merging. A typical contraction step on the stem is that the stem tensor (typically a big one) absorbs a small one as a result of contracting the corresponding branch. The branch tensor is sometimes so small that the multiplication cannot make full use of the GPU to achieve high-efficiency tensor contraction. One way to get around this is to slightly relax the floating point operations constraint, and merge two adjacent tensors together to form a bigger tensor, and then contract it to the stem.
111 |
112 |
113 | .. autoclass:: acqdp.tensor_network.ContractionTask
114 | :members: execute
115 |
116 | .. autoclass:: acqdp.tensor_network.Compiler
117 | :members: compile
118 | :undoc-members:
119 |
120 |
121 | ------------
122 | Contraction
123 | ------------
124 |
125 |
126 | Finally, :class:`Contractor` makes use of GPU, with the help of `jax` and `opt_einsum` to further increase the efficiency.
127 |
128 | .. autoclass:: acqdp.tensor_network.Contractor
129 | :members: execute
130 |
131 |
132 |
133 | References
134 | *************************
135 |
136 | .. [GK20] Johnnie Gray, and Stefanos Kourtis, *Hyper-optimized tensor network contraction*, arXiv preprint arXiv:2002.01935, 2020.
137 | .. [CZH+18] Jianxin Chen, Fang Zhang, Cupjin Huang, Michael Newman, and Yaoyun Shi, *Classical simulation of intermediate-size quantum circuits*, arXiv preprint arXiv:1805.01450, 2018.
138 |
139 |
--------------------------------------------------------------------------------
/acqdp/circuit/converter.py:
--------------------------------------------------------------------------------
1 | from .circuit import (Operation,
2 | ImmutableOperation,
3 | PureOperation,
4 | Circuit,
5 | ControlledOperation,
6 | Controlled,
7 | ControlledCircuit,
8 | CompState,
9 | CompMeas,
10 | SuperPosition)
11 |
12 | from acqdp.tensor_network.tensor_valued import TensorValued
13 |
14 | from acqdp.tensor_network.tensor_sum import TensorSum
15 |
16 | from acqdp.tensor_network.tensor_network import TensorNetwork
17 |
18 |
19 | class Converter:
20 | """Generic converter class, with method mapping a circuit to a tensor network."""
21 | @classmethod
22 | def convert_pure(cls, operation: Operation) -> TensorValued:
23 | """Do a pure conversion for noiseless circuit described as a :class:`Operation` object to a tensor network as a
24 | :class:`TensorValued` object.
25 |
26 | :param operation: the noisy circuit to convert.
27 | :type operation: :class:`Operation`.
28 | :returns: :class:`TensorValued` -- the tensor network to describe the input noisy circuit.
29 | """
30 | if not operation.is_pure:
31 | raise ValueError("Invalid operation: trying to do pure conversion for noisy circuit")
32 | if isinstance(operation, ControlledCircuit):
33 | op = operation.circuit
34 | c = Circuit()
35 | qubits = op.all_qubits
36 | for gate in op.operations_by_name:
37 | c.append(Controlled(op.operations_by_name[gate]["operation"],
38 | conditioned_on=operation.conditioned_on),
39 | [0] + [1 + qubits.index(i) for i in op.operations_by_name[gate]['qubits']])
40 | return c.tensor_pure
41 | elif isinstance(operation, ControlledOperation):
42 | tn = TensorNetwork()
43 | tn.add_node("TC", tensor=operation._tensor_control)
44 | out_edges = []
45 | in_edges = []
46 | lst_ctrl, len_ctrl = operation._indices_with_property("^c$")
47 | lst_out, len_out = operation._indices_with_property(".*o$")
48 | lst_in, len_in = operation._indices_with_property("^i")
49 | for i in operation._output_indices[0]:
50 | if i in lst_ctrl:
51 | out_edges.append(lst_ctrl.index(i))
52 | else:
53 | out_edges.append(len_ctrl + lst_out.index(i))
54 | for i in operation._input_indices[0]:
55 | if i in lst_ctrl:
56 | in_edges.append(lst_ctrl.index(i))
57 | else:
58 | in_edges.append(len_ctrl + len_out + lst_in.index(i))
59 | tn.open_edges = out_edges + in_edges
60 | return tn
61 | elif isinstance(operation, PureOperation):
62 | return operation._tensor_pure
63 | elif isinstance(operation, SuperPosition):
64 | ts = TensorSum()
65 | for i in range(len(operation.coefs)):
66 | ts.add_term(i, operation.coefs[i] * operation.operations[i].tensor_pure)
67 | return ts
68 | else:
69 | if not isinstance(operation, Circuit):
70 | raise ValueError("Invalid operation: operation is not yet supported")
71 | # This cannot happen normally because Circuit.is_pure checks
72 | # Circuit.is_valid, but is left in just in case someone subclasses
73 | # Circuit and overrides is_pure.
74 | if not operation.is_valid: # pragma: no cover
75 | raise ValueError("Invalid operation: trying to convert an invalid circuit into a tensor network")
76 | dic = {}
77 | dic_in = {}
78 | dic_out = {}
79 | tn = TensorNetwork()
80 | all_edges = set()
81 | for time_step in list(operation.operations_by_time)[::-1]:
82 | for idx, gate in enumerate(operation.operations_by_time[time_step]):
83 | op = operation.operations_by_name[gate]['operation']
84 | qubits = operation.operations_by_name[gate]['qubits']
85 | out_edges = []
86 | in_edges = []
87 | for i in op._output_indices[0]:
88 | q = qubits[i]
89 | if q not in dic:
90 | dic_out[q] = (time_step + 1, idx, q)
91 | out_edges.append(dic_out[q])
92 | else:
93 | edge = dic.pop(q)
94 | out_edges.append(edge)
95 | dic_in.pop(q)
96 | for i in op._input_indices[0]:
97 | q = qubits[i]
98 | dic[q] = (time_step, idx, q)
99 | in_edges.append(dic[q])
100 | dic_in[q] = dic[q]
101 | for edge in out_edges + in_edges:
102 | if edge not in all_edges:
103 | tn.add_edge(edge, bond_dim=2)
104 | all_edges.add(edge)
105 | if op in CompState:
106 | tn.fix_edge(out_edges[0], CompState.index(op))
107 | elif op in CompMeas:
108 | tn.fix_edge(in_edges[0], CompMeas.index(op))
109 | else:
110 | tn.add_node(gate, out_edges + in_edges, op.tensor_pure)
111 | tn.open_edges = [dic_out[i] for i in sorted(dic_out)] +\
112 | [dic_in[i] for i in sorted(dic_in)]
113 | return tn
114 |
115 | @classmethod
116 | def convert_control(cls, operation: PureOperation) -> TensorValued:
117 | """Do a control conversion for noisy circuit described as a :class:`Operation` object to a tensor network as a
118 | :class:`TensorValued` object.
119 |
120 | :param operation: the noisy circuit to convert.
121 | :type operation: :class:`Operation`.
122 | :returns: :class:`TensorValued` -- the tensor network to describe the input noisy circuit.
123 | """
124 | if not operation.is_pure:
125 | raise ValueError("Noisy operation does not have pure block-diagonal form")
126 | if isinstance(operation, ControlledOperation):
127 | return operation._tensor_control
128 | else:
129 | return operation.tensor_pure
130 |
131 | @classmethod
132 | def convert_density(cls, operation: Operation) -> TensorValued:
133 | """Do a density conversion for noisy circuit described as a :class:`Operation` object to a tensor network as a
134 | :class:`TensorValued` object.
135 |
136 | :param operation: the noisy circuit to convert.
137 | :type operation: :class:`Operation`.
138 | :returns: :class:`TensorValued` -- the tensor network to describe the input noisy circuit.
139 | """
140 | if isinstance(operation, PureOperation):
141 | tp = operation.tensor_pure
142 | tn = TensorNetwork()
143 | len_tensor = len(tp.shape)
144 | tn.add_node("TP", list(range(len_tensor)), tp)
145 | tn.add_node("~TP", list(range(len_tensor, 2 * len_tensor)), (~tp).expand())
146 | tn.open_edges = list(range(2 * len_tensor))
147 | return tn
148 | elif isinstance(operation, ImmutableOperation):
149 | return operation._tensor_density
150 | else:
151 | if not isinstance(operation, Circuit):
152 | raise ValueError("Invalid operation: operation is not yet supported")
153 | if not operation.is_valid:
154 | raise ValueError('Invalid operation')
155 | dic = {}
156 | tn = TensorNetwork()
157 | dic_out = {}
158 | dic_in = {}
159 | for time_step in list(operation.operations_by_time):
160 | for idx, gate in enumerate(operation.operations_by_time[time_step]):
161 | op = operation.operations_by_name[gate]["operation"]
162 | qubits = operation.operations_by_name[gate]['qubits']
163 | out_edges_p = []
164 | in_edges_p = []
165 | out_edges_d = []
166 | in_edges_d = []
167 |
168 | for i in op._input_indices[0]:
169 | q = qubits[i]
170 | if q not in dic:
171 | in_edges_p.append((time_step, idx, q, 'p'))
172 | in_edges_d.append((time_step, idx, q, 'd'))
173 | dic_in[q] = [time_step, idx, q]
174 | else:
175 | edge = list(dic.pop(q))
176 | in_edges_p.append(tuple(edge + ['p']))
177 | in_edges_d.append(tuple(edge + ['d']))
178 | dic_out.pop(q)
179 | for i in op._output_indices[0]:
180 | q = qubits[i]
181 | dic[q] = (time_step + 1, idx, q)
182 | out_edges_p.append((time_step + 1, idx, q, 'p'))
183 | out_edges_d.append((time_step + 1, idx, q, 'd'))
184 | dic_out[q] = [time_step + 1, idx, q]
185 | tn.add_node(gate, out_edges_p + in_edges_p + out_edges_d + in_edges_d, op.tensor_density)
186 | tn.open_edges = [tuple(dic_out[i] + ['p']) for i in sorted(dic_out)] +\
187 | [tuple(dic_in[i] + ['p']) for i in sorted(dic_in)] +\
188 | [tuple(dic_out[i] + ['d']) for i in sorted(dic_out)] +\
189 | [tuple(dic_in[i] + ['d']) for i in sorted(dic_in)]
190 | return tn
191 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/local_optimizer.py:
--------------------------------------------------------------------------------
1 | from acqdp.tensor_network.contraction import OrderCounter, ContractionCost
2 | import opt_einsum
3 | import copy
4 | import numpy as np
5 |
6 | ORDER_RESOLVER_KWARGS = {'optimal': 8, 'dp': 20, 'thres': 1e8}
7 |
8 |
9 | class OrderResolver:
10 | """Interfaces for conveniently converting between different formats of contraction orders. Also serves as a
11 | converter from non-pairwise contraction orders to pairwise contraction orders.
12 |
13 | :ivar optimal: The maximum number of tensors in a single step of a non-pairwise order, for which brute-force search is
14 | used to reconstruct the pairwise order.
15 | :ivar dp: The maximum number of tensors in a single step of a non-pairwise order, for which dynamic programming is used
16 | to reconstruct the pairwise order.
17 | :ivar thres: The minimum cost of a non-pairwise contraction order via default path to invoke more advanced search for a
18 | pairwise order, such as the `dp` or `optimal` methods.
19 | """
20 |
21 | def __init__(self,
22 | optimal=8,
23 | dp=20,
24 | thres=1e8,
25 | **kwargs):
26 | self.optimal = optimal
27 | self.dp = dp
28 | self.thres = thres
29 |
30 | def order_to_contraction_scheme(self, tn, order):
31 | subscripts = self.order_to_subscripts(tn, order)
32 | cost = ContractionCost()
33 | counter = OrderCounter(order)
34 | new_order = []
35 | for o, i in zip(order, subscripts):
36 | try:
37 | res = self.subscript_to_path(*i)
38 | new_order += self.path_to_paired_order(o, res[0], counter)
39 | cost += res[1]
40 | except IndexError as e:
41 | print(o, i, res[0], res[1])
42 | raise e
43 | from acqdp.tensor_network import ContractionScheme
44 | return ContractionScheme(new_order, cost=cost)
45 |
46 | def order_to_path(self, tn, order):
47 | tn_copy = tn.copy()
48 | tn_copy.fix()
49 | edges_list = tn_copy.edges_by_name
50 | nodes_list = tn_copy.nodes_by_name
51 | lhs = [
52 | ''.join(
53 | opt_einsum.get_symbol(edges_list.index(e))
54 | for e in tn_copy.network.nodes[(0, n)]['edges'])
55 | for n in nodes_list
56 | ]
57 | rhs = ''.join(
58 | opt_einsum.get_symbol(edges_list.index(e))
59 | for e in tn_copy.open_edges)
60 | path = []
61 | for o in order:
62 | i, j = nodes_list.index(o[0][0]), nodes_list.index(o[0][1])
63 | path.append((i, j))
64 | nodes_list.pop(max(i, j))
65 | nodes_list.pop(min(i, j))
66 | nodes_list.append(o[1])
67 | return ','.join(lhs) + '->' + rhs, path, {
68 | opt_einsum.get_symbol(edges_list.index(e)): e for e in edges_list
69 | }
70 |
71 | def order_to_subscripts_tree(self, tn, order):
72 | from acqdp.tensor_network.contraction_tree import ContractionTree
73 | tree = ContractionTree.from_order(tn, order)
74 | return tree.full_subscripts, tree.full_order
75 |
76 | def order_to_subscripts(self, tn, order):
77 | tn_copy = tn.copy()
78 | subs = []
79 | for o in order:
80 | if o[1] == '#':
81 | break
82 | try:
83 | _, stn = tn_copy.encapsulate(o[0], stn_name=o[1])
84 | subs.append(stn.subscripts(o[0]))
85 | except Exception as e:
86 | print(e)
87 | raise e
88 | if len(order) > 0:
89 | subs.append(tn_copy.subscripts(order[-1][0]))
90 | return subs
91 |
92 | def print_order(self, tn, order):
93 | subs, order = self.order_to_subscripts_tree(tn, order)
94 | steps = []
95 | for i, sub in enumerate(subs):
96 | a = len(set(sub[0][0]).difference(sub[0][1]))
97 | b = len(set(sub[0][1]).difference(sub[0][0]))
98 | c = len(set(sub[0][1]).intersection(sub[0][0]).difference(sub[1]))
99 | d = len(set(sub[0][1]).intersection(sub[0][0]).intersection(sub[1]))
100 | steps.append(
101 | f'Step {i}: <{a+b+c+d}> {sub[0][0]}[{a} x {c} x {d}] * {sub[0][1]}[{c} x {b} x {d}]'
102 | f' -> {sub[1]}[{a} x {b} x {d}] \n {order[i]}'
103 | )
104 | return '\n'.join(steps)
105 |
106 | def subscript_to_path(self, lhs, rhs, shapes):
107 | optimize = None
108 | try:
109 | path, path_info = opt_einsum.contract_path(','.join(lhs) + '->' + rhs,
110 | *shapes,
111 | shapes=True,
112 | optimize='auto')
113 | except ValueError as e:
114 | print(','.join(lhs) + '->' + rhs, shapes)
115 | raise e
116 | if len(lhs) > 2 and path_info.opt_cost > self.thres:
117 | if len(lhs) < self.optimal:
118 | optimize = 'optimal'
119 | elif len(lhs) < self.dp:
120 | optimize = 'dp'
121 | if optimize is not None:
122 | path, path_info = opt_einsum.contract_path(','.join(lhs) + '->' + rhs,
123 | *shapes,
124 | shapes=True,
125 | optimize=optimize)
126 | return path, ContractionCost(path_info.opt_cost,
127 | path_info.largest_intermediate)
128 |
129 | def path_to_paired_order(self, order, path, counter=None):
130 | lhs, rhs = copy.copy(order[0]), order[1]
131 | if counter is None:
132 | counter = OrderCounter()
133 | new_order = []
134 | for idx, i in enumerate(path):
135 | try:
136 | if len(i) == 1:
137 | if idx != len(path) - 1:
138 | lhs.append(lhs.pop(i[0]))
139 | else:
140 | new_order.append([[lhs.pop(i[0])], rhs])
141 | else:
142 | new_order.append([[lhs[i[0]], lhs[i[1]]], counter.cnt])
143 | lhs.pop(max(i[0], i[1]))
144 | lhs.pop(min(i[0], i[1]))
145 | lhs.append(new_order[-1][1])
146 | except IndexError as e:
147 | print(order[0], i)
148 | raise e
149 | new_order[-1][1] = rhs
150 | return new_order
151 |
152 |
153 | defaultOrderResolver = OrderResolver()
154 |
155 |
156 | class LocalOptimizer:
157 | """
158 |
159 | :class:`LocalOptimizer` takes a pairwise contraction tree and do local optimization on the tree. Note that a connected
160 | subgraph of the contraction tree represents a sequence of intermediate steps that take some (potentially intermediate)
161 | tensors as input, and outputs a later intermediate tensor. This sequence of steps can be optimized based on solely the
162 | shapes of the input and output tensors associated to the subgraph, without looking at the rest of the contraction
163 | tree. This allows local reorganization of the tree.
164 |
165 | Each iteration of local optimization consists of two steps: In the first step, the contraction tree is divided into
166 | non-overlapping subgraphs each up to a given size, with the internal connections in the subgraphs removed. The internal
167 | connections are then reconstructed in the second phase using accelerated optimum contraction tree finding approach. The
168 | iterations can be repeated by each time randomly selecting a different patch of subgraph division.
169 |
170 | :ivar size: The maximum number of nodes to be included in one subgraph.
171 | :ivar resolver: The resolver to help reconstruct the pairwise order from subgraph divisions.
172 |
173 | """
174 |
175 | def __init__(self,
176 | size=13,
177 | **kwargs):
178 | self.size = size
179 | self.resolver = OrderResolver(**kwargs.get('resolver_params', {}))
180 |
181 | def _flatten_order(self, tn, order, offset=0):
182 | tn_copy = tn.copy()
183 | tn_copy.fix()
184 | size_dic = {}
185 | cnt_offset = 0
186 | subs = self.resolver.order_to_subscripts(tn_copy, order)
187 | for i, o in enumerate(order):
188 | size_dic[o[1]] = len(subs[i][1])
189 | for j, k in enumerate(o[0]):
190 | size_dic[k] = len(subs[i][0][j])
191 | cnt = -1
192 | while -cnt < len(order):
193 | rhs_list = {}
194 | for i, o in enumerate(order[:cnt]):
195 | rhs_list[o[1]] = (i, o[0])
196 | o = order[cnt]
197 | lhs = o[0]
198 | if len(lhs) >= self.size:
199 | cnt -= 1
200 | continue
201 | # choose the largest tensor
202 | sizes = {k: size_dic[k] for k in o[0]}
203 | largest_list = sorted(sizes, key=lambda x: sizes[x])[::-1]
204 | for l in largest_list:
205 | if (l not in rhs_list) or (sizes[l] < 7) or (
206 | len(rhs_list[l][1]) + len(lhs) > self.size):
207 | continue
208 | if cnt_offset < offset:
209 | cnt_offset += 1
210 | continue
211 | else:
212 | o[0].remove(l)
213 | o[0] += rhs_list[l][1]
214 | order.pop(rhs_list[l][0])
215 | break
216 | else:
217 | cnt -= 1
218 | return order
219 |
220 | def optimize(self, tn, order, num_iter):
221 | for _ in range(num_iter):
222 | new_order = self._flatten_order(tn,
223 | order.order,
224 | offset=np.random.randint(
225 | self.size))
226 | order = self.resolver.order_to_contraction_scheme(tn, new_order)
227 | return order
228 |
--------------------------------------------------------------------------------
/docsource/source/tutorial.rst:
--------------------------------------------------------------------------------
1 | Tutorial
2 | ===========
3 |
4 | In the tutorial, we will show you two examples of using `ACQDP`. The first one is to use the tensor network functionalities to experiment with tensor network states. The second one is to use the circuit library to experiment with the fidelity of GHZ states under various noise models.
5 |
6 | .. contents::
7 | :depth: 1
8 | :local:
9 | :backlinks: none
10 |
11 | .. highlight:: console
12 |
13 | MPS State
14 | ---------
15 | In this section, we create two random ring-MPS states and calculate their fidelity.
16 |
17 | An MPS state is a quantum state formulated as a centipede-shaped tensor network. We first define a random MPS state on a ring, with bond dimension and number of qubits given as the input:
18 |
19 | .. code-block:: python
20 |
21 | from acqdp.tensor_network import TensorNetwork, normalize
22 | import numpy
23 |
24 | def MPS(num_qubits, bond_dim):
25 | a = TensorNetwork()
26 | for i in range(num_qubits):
27 | tensor = numpy.random.normal(size=(2, bond_dim, bond_dim)) +\
28 | 1j * numpy.random.normal(size=(2, bond_dim, bond_dim))
29 | a.add_node(i, edges=[(i, 'o'), (i, 'i'), ((i+1) % num_qubits, 'i')], tensor=tensor)
30 | a.open_edge((i, 'o'))
31 | return normalize(a)
32 |
33 | This constructs an MPS state of the following form, where the internal connections `(i, 'i')` have bond dimension `bond_dim` and the outgoing wires `(i, 'o')` have bond dimension 2 representing a qubit system.
34 |
35 | .. image:: MPS.pdf
36 | :width: 700
37 | :alt: An illustration of a ring-MPS state
38 |
39 | Note that normalize() computes the frobenius norm of the tensor network, which already involves tensor network contraction.
40 |
41 | For a further break down, in the code we first defined a tensor network:
42 |
43 | .. code-block:: python
44 |
45 | a = TensorNetwork()
46 |
47 | Each tensor `i` is of shape `(2, bond_dim, bond_dim)`. We add the tensor into the tensor network by specifying its connection in the tensor network:
48 |
49 | .. code-block:: python
50 |
51 | a.add_node(i, edges=[(i, 'o'), (i, 'i'), ((i+1) % num_qubits, 'i')], tensor=tensor)
52 |
53 | Finally, the outgoing edges `[(i, 'o')]` needs to be opened:
54 |
55 | .. code-block:: python
56 |
57 | a.open_edge((i, 'o'))
58 |
59 | This allows us to get two random MPS states:
60 |
61 | .. code-block:: python
62 |
63 | a = MPS(10, 3)
64 | b = MPS(10, 3)
65 |
66 | To calculate the fidelity of the two states, we put them into a single tensor network representing their inner product:
67 |
68 | .. code-block:: python
69 |
70 | c = TensorNetwork()
71 | c.add_node('a', range(10), a)
72 | c.add_node('b', range(10), ~b)
73 |
74 | Here, The tensor network `c` is constructed by adding the two tensor valued objects `a` and `~b` into the tensor network. The outgoing edges of `a` are identified as 0 to 9 in `c`, and that matches the outgoing edges of `b`. As no open edges is indicated in `c`, it sums over all the indices 0 to 9 and yield the inner product of `a` and `b`. (Note that the complex conjugate of b is added instead of b itself.)
75 |
76 | This tensor network `c` takes two tensor valued objects `a` and `b` which are not necessarily tensors. This is a feature of the ACQDP: components in tensor networks do not have to be tensors, which allows nested structures of tensor networks to be easily constructed. The fidelity is then the absolute value of the inner product:
77 |
78 | .. code-block:: python
79 |
80 | print("Fidelity = {}".format(numpy.abs(c.contract()) ** 2))
81 |
82 |
83 | GHZ State
84 | ---------
85 | The next example features our circuit module, which allows simulation of quantum computation supported by the powerful tensor network engine. A priliminary noise model is also included.
86 |
87 | A :math:`n`-qubit GHZ state, also known as “Schroedinger cat states” or just “cat states”, are defined as :math:`\frac{1}{\sqrt2}\left(|0\rangle^{\otimes n}+|1\rangle^{\otimes n}\right)`. A :math:`n`-qubit GHZ state can be prepared by setting the first qubit :math:`|+\rangle`, and apply CNOT gate sequentially from the first qubit to all the other qubits. In ACQDP, we first define the circuit preparing the GHZ state:
88 |
89 | .. code-block:: python
90 |
91 | from acqdp.circuit import Circuit, HGate, CNOTGate, ZeroState
92 |
93 | def GHZState(n):
94 | a = Circuit().append(ZeroState, [0]).append(HGate, [0])
95 | for i in range(n - 1):
96 | a.append(ZeroState, [i + 1])
97 | a.append(CNOTGate, [0, i + 1])
98 | return a
99 |
100 | A GHZ state then can be constructed upon calling :math:`GHZState(n)`. A 4-qubit GHZ state is then
101 |
102 | .. code-block:: python
103 |
104 | a = GHZState(4)
105 |
106 | `a` is right now a syntactic representation of the GHZ state as a gate sequence. To examine the state as a tensor representing the pure state vector,
107 |
108 | .. code-block:: python
109 |
110 | a_tensor = a.tensor_pure
111 | print(a_tensor.contract())
112 |
113 | gives the output
114 |
115 | .. code-block:: python
116 |
117 | array([[[[0.70710678, 0. ],
118 | [0. , 0. ]],
119 |
120 | [[0. , 0. ],
121 | [0. , 0. ]]],
122 |
123 |
124 | [[[0. , 0. ],
125 | [0. , 0. ]],
126 |
127 | [[0. , 0. ],
128 | [0. , 0.70710678]]]]).
129 |
130 | The `tensor_pure` of a circuit object returns the tensor network representing it as a pure operation, i.e. a state vector, an isometry, or a projective measurement. In this case we do get the state vector; the density matrix will be returned if we choose to contract the `tensor_density`.
131 |
132 | We are now interested in how the fidelity is preserved under simplified noise models.
133 |
134 | .. code-block:: python
135 |
136 | from acqdp.circuit import add_noise, Depolarization
137 | b = add_noise(a, Depolarization(0.01))
138 |
139 | The quantum state `b` representing noisy preparation of the GHZ state is no longer pure. To compute the fidelity of `b` and `a`, we compute the probability of postselecting `b` on the state `a`, i.e. concatenate `b` with `~a`:
140 |
141 | .. code-block:: python
142 |
143 | c = (b | ~a)
144 | print(c.tensor_density.contract())
145 |
146 | which gives the result `0.7572548016539656`.
147 |
148 | The landscape of the fidelity with respect to the depolarization strength is given in the following figure:
149 |
150 | .. plot::
151 |
152 | import matplotlib.pyplot as plt
153 | import numpy as np
154 | x = np.arange(0, 0.25, 0.01)
155 | y10 = [1.0000000000000002, 0.47542461105055484, 0.23487071438231183, 0.12115735903392826, 0.0652719534015429, 0.036617680257557426, 0.021320371501238007, 0.012865239407674912, 0.008058243176286981, 0.005262103481307746, 0.0036037915311682234, 0.002603081578208944, 0.0019895879912888935, 0.0016082666392452137, 0.001368731067752561, 0.0012173606455788866, 0.001121708636479879, 0.001061702644494669, 0.0010246415790509495, 0.0010023252937947874, 0.0009893837155440777, 0.0009822806445391288, 0.0009786976371788851, 0.0009771335153513456, 0.0009766284332079827]
156 | y5 = [1.0000000000000002, 0.6999844145588493, 0.4942690316849154, 0.35324185097666616, 0.25638313196195456, 0.1895935117363198, 0.14325927357443888, 0.11086721247093151, 0.08802473314645329, 0.07177520425921743, 0.060125720084480076, 0.05172560640086923, 0.04565037199901648, 0.041258297754298824, 0.03809626462551216, 0.035838408954879976, 0.03424630001392201, 0.033143002111564755, 0.032395966801493, 0.03190548171704453, 0.031596601835520065, 0.03141327605432126, 0.03131388459310738, 0.03126771494539828, 0.0312520931895467]
157 | y9 = [1.0000000000000002, 0.5132381922266505, 0.27166900372328906, 0.14908909496638956, 0.08499787250437944, 0.0503080570674666, 0.030859873469816123, 0.01960047090479715, 0.01290249474675171, 0.008829579127738789, 0.0063089453125740646, 0.0047265524768867915, 0.003721592330004327, 0.0030776535845843216, 0.0026627047793805945, 0.002394907394164172, 0.0022227022250304526, 0.0021130478204492353, 0.0020444111128750074, 0.002002555102440312, 0.0019779765982536465, 0.0019643161156803883, 0.0019573399295379776, 0.0019542600701634096, 0.0019532566580686113]
158 | y8 = [1.0000000000000002, 0.5542898030047229, 0.31472871323487583, 0.18401831669889898, 0.11114615113427825, 0.06942896286520428, 0.04485003004992563, 0.029956265215634527, 0.020702906622970918, 0.014834003362939607, 0.011051355010576764, 0.008584138930327863, 0.006961588246175839, 0.005889338848034959, 0.005179768537503058, 0.004711361749931511, 0.004404297210961834, 0.0042054590581549825, 0.004079094318600177, 0.004000921928759586, 0.003954371863209565, 0.003928141910971797, 0.003914569170475958, 0.003908506219259532, 0.003906512899442519]
159 | y7 = [1.0000000000000002, 0.5988759886636054, 0.36520885830779715, 0.22787751587652708, 0.14603245642264728, 0.09634967216232196, 0.06553695207133448, 0.04599360960731351, 0.0333314264793019, 0.024975471552979505, 0.019381152335915943, 0.015597642695686274, 0.013023633249363637, 0.011269075600120797, 0.010075326362177048, 0.009267762911382808, 0.00872681002216888, 0.008369704573638986, 0.008138729130284681, 0.007993461983708823, 0.007905579323118082, 0.00785530286847755, 0.007828916935169336, 0.007816984595525935, 0.007813024965493753]
160 | y6 = [1.0000000000000002, 0.6473219489080633, 0.42449925291790536, 0.2831859627887721, 0.19290889674361097, 0.13461252752812014, 0.09645080103190168, 0.07108101348153528, 0.05394736164157386, 0.04220677469037928, 0.03406511183233029, 0.028371421880545485, 0.02437196355627255, 0.021561679208785108, 0.019594356328221877, 0.018227799850946545, 0.017289803616474746, 0.016656516040107903, 0.01623827845499067, 0.015970060933913902, 0.01580480241205252, 0.015708638639341736, 0.015657390490663555, 0.01563391348132136, 0.015626048264106986]
161 | plt.xlabel('Noise level')
162 | plt.ylabel('Fidelity')
163 | plt.plot(x, y5, label='5-qubit GHZ Circuit')
164 | plt.plot(x, y6, label='6-qubit GHZ Circuit')
165 | plt.plot(x, y7, label='7-qubit GHZ Circuit')
166 | plt.plot(x, y8, label='8-qubit GHZ Circuit')
167 | plt.plot(x, y9, label='9-qubit GHZ Circuit')
168 | plt.plot(x, y10, label='10-qubit GHZ Circuit')
169 | plt.title("Demonstration of Noise Model Study on the ACQDP")
170 | plt.legend()
171 | plt.show()
172 |
--------------------------------------------------------------------------------
/tests/test_tensor_network.py:
--------------------------------------------------------------------------------
1 | from copy import deepcopy
2 | import numpy
3 | import unittest
4 | from acqdp.tensor_network import TensorNetwork
5 |
6 |
7 | class TensorNetworkTestCase(unittest.TestCase):
8 |
9 | def test_initialization(self):
10 | a = TensorNetwork()
11 | a.add_node(1, [0], numpy.array([1, 2]))
12 | a.add_node(2, [0], numpy.array([2, 3]))
13 | a.open_edge(0)
14 | a.open_edge(1)
15 | a.open_edge(0)
16 | self.assertEqual(a.shape, (2, None, 2))
17 | a.add_node(3, [0, 1], numpy.array([[1, 2], [3, 4]]))
18 | self.assertEqual(a.shape, (2, 2, 2))
19 |
20 | def test_mps(self):
21 | c = TensorNetwork()
22 | for i in range(10):
23 | c.add_node(i, [i, (i, 'o'), (i + 1) % 10], numpy.ones((2, 3, 2)))
24 | c.open_edge((i, 'o'))
25 | self.assertEqual(c.shape, tuple([3] * 10))
26 |
27 | d = TensorNetwork()
28 | d.add_node('C', tensor=c)
29 | d.add_node('~C', tensor=~c)
30 | norm_sq = c.norm_squared
31 | self.assertAlmostEqual(norm_sq, d.contract())
32 | d.expand(['C'])
33 | # d.raw_data = None
34 | self.assertAlmostEqual(norm_sq, d.contract())
35 | d.expand(['~C'])
36 | # d.raw_data = None
37 | self.assertAlmostEqual(norm_sq, d.contract())
38 | d.encapsulate([('C', i) for i in range(10)])
39 | # d.raw_data = None
40 | self.assertAlmostEqual(norm_sq, d.contract())
41 | d.encapsulate([('~C', i) for i in range(10)])
42 | # d.raw_data = None
43 | self.assertAlmostEqual(norm_sq, d.contract())
44 |
45 | def test_update_nodes(self):
46 | peps = TensorNetwork()
47 | for i in range(3):
48 | for j in range(3):
49 | peps.add_node((i, j), [(i, j, 'h'), ((i + 1) % 3, j, 'h'), (i, j, 'v'), (i, (j + 1) % 3, 'v'), (i, j, 'o')])
50 | peps.open_edge((i, j, 'o'))
51 |
52 | self.assertEqual(peps.shape, tuple([None] * 9))
53 |
54 | diagonal = numpy.zeros((2, 2, 2, 2, 2))
55 | diagonal[0, 0, 0, 0, 0] = 1
56 | diagonal[1, 1, 1, 1, 1] = 1
57 |
58 | ready_nodes = []
59 |
60 | for node in peps.nodes_by_name:
61 | self.assertTrue(peps.is_valid)
62 | self.assertFalse(peps.is_ready)
63 | ready_nodes.append(node)
64 | peps.update_node(node, diagonal)
65 |
66 | self.assertTrue(peps.is_ready)
67 | self.assertTrue(peps.shape == tuple([2] * 9))
68 |
69 | def test_copy(self):
70 | a = TensorNetwork()
71 | a.add_node(0, [0, 1], numpy.random.rand(10, 8))
72 | a.add_node(1, [1, 2], numpy.random.rand(8, 9))
73 | a.add_node(2, [2, 0], numpy.random.rand(9, 10))
74 | b = a.copy()
75 | c = deepcopy(a)
76 | self.assertNotEqual(b.identifier, a.identifier)
77 | self.assertNotEqual(c.identifier, a.identifier)
78 |
79 | node = list(b.nodes_by_name)[0]
80 | self.assertTrue(a.nodes[(0, node)]['tensor']._data is b.nodes[(0, node)]['tensor']._data)
81 | self.assertFalse(a.nodes[(0, node)]['tensor']._data is c.nodes[(0, node)]['tensor']._data)
82 |
83 | b.update_node(0, numpy.random.rand(10, 8))
84 | self.assertNotEqual(a.contract(), b.contract())
85 |
86 | def test_shape(self):
87 | a = TensorNetwork()
88 | a.add_node(0, [0, 1], numpy.zeros((2, 3)))
89 | self.assertEqual((), a.shape)
90 | sp = []
91 | for _ in range(1000):
92 | o = numpy.random.randint(2)
93 | a.open_edge(o)
94 | sp.append(o + 2)
95 | self.assertEqual(tuple(sp), a.shape)
96 |
97 | def test_basic_contraction(self):
98 | a = TensorNetwork()
99 | a.add_node(0, [0, 1], numpy.ones((2, 2)))
100 | self.assertTrue(numpy.isclose(a.contract(), 4 + 0j))
101 | a.add_node(1, [0, 1], numpy.ones((2, 2)))
102 | self.assertTrue(numpy.isclose(a.contract(), 4 + 0j))
103 | a.update_node(1, numpy.array([[70168, 52 * 1j], [65.77, -1e-3]]))
104 | self.assertTrue(numpy.isclose(a.contract(), 70233.769 + 52 * 1j))
105 |
106 | def test_matmul(self):
107 | a = TensorNetwork()
108 | res = numpy.eye(2)
109 | a.open_edge(0)
110 | for i in range(100):
111 | random_mat = numpy.random.rand(2, 2)
112 | a.add_node(i, [i, i + 1], random_mat)
113 | if i > 0:
114 | a.close_edge(i)
115 | a.open_edge(i + 1)
116 | res = res.dot(random_mat)
117 | self.assertTrue(numpy.allclose(res, a.contract()))
118 |
119 | def test_single_tensor(self):
120 | a = TensorNetwork()
121 | a.add_node('X', [0], numpy.array([1, 0]), is_open=True)
122 | self.assertTrue(numpy.allclose(a.contract('khp'), [1, 0]))
123 |
124 | def test_transpose(self):
125 | a = TensorNetwork()
126 | b = numpy.random.rand(3, 4, 5)
127 | a.add_node(0, [0, 1, 2], b)
128 | a.open_edge(0)
129 | a.open_edge(1)
130 | a.open_edge(2)
131 | perm = (2, 0, 1)
132 | self.assertTrue(numpy.allclose((a % perm).contract(),
133 | numpy.transpose(b, perm)))
134 |
135 | def test_fix_index(self):
136 | a = TensorNetwork()
137 | a.add_edge(1, bond_dim=2)
138 | a.add_edge(2, bond_dim=2)
139 | a.add_edge(3, bond_dim=2)
140 | for i in range(10):
141 | a.open_edge(1)
142 | a.open_edge(2)
143 | a.open_edge(3)
144 | for i in range(5):
145 | a.fix_index(0)
146 | a.fix_index(10 - i)
147 | a.fix_index(20 - 2 * i)
148 | self.assertEqual(a.shape, tuple([2] * 15))
149 | c = numpy.zeros((2, 2, 2, 2, 2))
150 | c[0, 0, 0, 0, 0] = 1
151 | data = numpy.einsum('ABCDE, FGHIJ, KLMNO -> KAFLBCGMDHINEJO', c, c, c)
152 | self.assertTrue(numpy.allclose(a.contract(), data))
153 |
154 | def test_fix_nested_edge(self):
155 | a = TensorNetwork()
156 | a.add_edge(0, bond_dim=2)
157 | a.open_edge(0)
158 | a.open_edge(0)
159 | self.assertTrue(numpy.allclose(a.contract(), numpy.array([[1, 0], [0, 1]])))
160 | b = TensorNetwork()
161 | b.add_node(0, [0, 1], a)
162 | b.open_edge(0)
163 | b.open_edge(1)
164 | c = TensorNetwork()
165 | c.add_node(0, [0, 1], b)
166 | c.open_edge(0)
167 | c.open_edge(1)
168 | c.fix_edge(0)
169 | self.assertTrue(numpy.allclose(c.contract(), numpy.array([[1, 0], [0, 0]])))
170 |
171 | def test_fix_nested_edge_from_inside(self):
172 | a = TensorNetwork()
173 | a.add_edge(0, bond_dim=2)
174 | a.open_edge(0)
175 | a.open_edge(0)
176 | b = TensorNetwork()
177 | b.add_node(0, [0, 1], a)
178 | b.open_edge(0)
179 | b.open_edge(1)
180 | c = TensorNetwork()
181 | c.add_node(0, [0, 1], b)
182 | c.open_edge(0)
183 | c.open_edge(1)
184 | a.fix_edge(0)
185 | self.assertTrue(numpy.allclose(c.contract(), numpy.array([[1, 0], [0, 0]])))
186 |
187 |
188 | class TensorNetworkGraphTestCase(unittest.TestCase):
189 | def setUp(self):
190 | numpy.random.seed(517100123)
191 | self.a = TensorNetwork()
192 | self.open_edges = []
193 | self.closed_edges = []
194 | for i in range(10):
195 | self.a.add_edge(i, bond_dim=2)
196 | if numpy.random.randint(2):
197 | self.a.open_edge(i)
198 | self.open_edges.append(i)
199 | else:
200 | self.closed_edges.append(i)
201 | self.bipart = numpy.random.randint(2, size=(10, 10))
202 | for i in range(10):
203 | edges = numpy.where(self.bipart[i])[0]
204 | self.a.add_node(i,
205 | edges,
206 | numpy.random.rand(*([2] * len(edges))))
207 |
208 | def test_graph_properties(self):
209 | self.assertTrue(set(self.a.open_edges) == set(self.open_edges))
210 | self.assertTrue(self.a.closed_edges_by_name == set(self.closed_edges))
211 |
212 | nodes_loc = list(numpy.where(numpy.random.randint(2, size=10))[0])
213 | edges_from_nodes = set()
214 | for node in nodes_loc:
215 | edges_from_nodes |= set(numpy.where(self.bipart[node])[0])
216 | self.assertTrue(self.a.edges_by_name_from_nodes_by_name(nodes_loc) == edges_from_nodes)
217 |
218 | closed_edges_from_nodes = set()
219 | for edge in edges_from_nodes:
220 | if set(numpy.where(self.bipart[:, edge])[0]).issubset(nodes_loc):
221 | if edge not in self.open_edges:
222 | closed_edges_from_nodes.add(edge)
223 | self.assertTrue(self.a.closed_edges_by_name_from_nodes_by_name(nodes_loc) == closed_edges_from_nodes)
224 |
225 | edges_loc = list(numpy.where(numpy.random.randint(2, size=10))[0])
226 | nodes_from_edges = set()
227 | for edge in edges_loc:
228 | nodes_from_edges |= set(numpy.where(self.bipart[:, edge])[0])
229 | self.assertTrue(self.a.nodes_by_name_from_edges_by_name(edges_loc) == nodes_from_edges)
230 |
231 | def test_graph_manipulation(self):
232 | data = self.a.contract()
233 | closed_edges = list(self.a.closed_edges_by_name)
234 | self.a.merge_edges(closed_edges, "merge")
235 | for edge in closed_edges:
236 | self.assertTrue((edge not in self.a.edges_by_name) or (len(self.a.network.nodes[(1, edge)]) == 0))
237 | for node in range(10):
238 | edge_list = numpy.where(self.bipart[node])[0]
239 | for i in range(len(edge_list)):
240 | if self.a.network.nodes[(0, node)]['edges'][i] == 'merge':
241 | self.a.rewire(node, i, edge_list[i])
242 |
243 | self.assertTrue(numpy.allclose(self.a.contract(), data))
244 | node = numpy.random.randint(10)
245 | pop = self.a.pop_node(node)
246 | self.a.add_node(node, pop['edges'], pop['tensor'])
247 | self.assertTrue(numpy.allclose(self.a.contract(), data))
248 |
249 | def test_khp(self):
250 | data = self.a.contract()
251 | data_khp = self.a.contract('khp')
252 | self.assertTrue(numpy.allclose(data, data_khp))
253 |
--------------------------------------------------------------------------------
/demo/QEC/noise_model.py:
--------------------------------------------------------------------------------
1 | from acqdp.tensor_network import TensorNetwork
2 | from acqdp.circuit import CZGate, Channel, Circuit, Depolarization, Diagonal, HGate, Measurement, State
3 | import numpy as np
4 |
5 | default_params = {
6 | 'T_1_inv': 1 / 30000.0,
7 | 'T_phi_inv': 1 / 60000.0,
8 | 'p_axis': 1e-4,
9 | 'p_plane': 5e-4,
10 | 'delta_phi': 0.01,
11 | 'T_g_1Q': 20.0,
12 | 'T_g_2Q': 40.0,
13 | 'tau_m': 300.0,
14 | 'tau_d': 300.0,
15 | 'gamma': 0,
16 | 'alpha0': 4,
17 | 'kappa': 1 / 250,
18 | 'chi': 1.3 * 1e-3}
19 |
20 | X_TALK_GAMMA = False
21 |
22 |
23 | class IdleGate(Channel):
24 | def __init__(self, duration, params=default_params, ts=None):
25 | self.duration = duration
26 | self.p_1_ = np.exp(-duration * params['T_1_inv']) # 1 - p_1
27 | self.p_phi_ = np.exp(-duration * params['T_phi_inv']) # 1 - p_phi
28 | if ts is not None:
29 | self.p_phi_ *= self.lamda(params, ts)
30 | p_xy = (self.p_1_ * self.p_phi_) ** 0.5
31 | data = np.diag([1, 1 - self.p_1_, 0, self.p_1_]) + np.diag([p_xy, 0, 0, p_xy])[::-1]
32 | super().__init__(1, data, name='Idle')
33 |
34 | def lamda(self, params, ts):
35 | chi = params['chi']
36 | alpha0 = params['alpha0']
37 | kappa = params['kappa']
38 | tstart = ts['start']
39 | tend = ts['end']
40 | Dt = ts['Dt']
41 | two_chi_alpha_Dt = 2 * chi * alpha0 * np.exp(-kappa * Dt)
42 |
43 | # Top half of the integral
44 | int_term_top = -np.exp(-kappa * tend) / (4 * chi**2 + kappa**2) * \
45 | (kappa * np.sin(2 * chi * tend) + 2 * chi * np.cos(2 * chi * tend))
46 | int_term_bot = -np.exp(-kappa * tstart) / (4 * chi**2 + kappa**2) * \
47 | (kappa * np.sin(2 * chi * tstart) + 2 * chi * np.cos(2 * chi * tstart))
48 |
49 | lamda = np.exp(-two_chi_alpha_Dt * (int_term_top - int_term_bot))
50 | return lamda
51 |
52 |
53 | def NoisyHGate(params=default_params, qubit=0):
54 | if params is None:
55 | return HGate
56 | res = Circuit('NoisyH')
57 | res.append(HGate, [qubit])
58 | res.append(Depolarization(params['p_axis'] / 4, params['p_plane'] / 2 - params['p_axis'] / 4, params['p_axis'] / 4),
59 | [qubit])
60 | return res
61 |
62 |
63 | def add_idle_noise(circuit, start=None, end=None, params=default_params):
64 | timing = {q: start for q in circuit.all_qubits}
65 | time_last_meas = {q: None for q in circuit.all_qubits}
66 | time_start = {q: None for q in circuit.all_qubits}
67 | for t, layer in circuit.operations_by_time.items():
68 | for op in layer.values():
69 | operation = op['operation']
70 | for q in op['qubits']:
71 | if operation.name == 'ND':
72 | time_last_meas[q] = t
73 | t0 = timing[q]
74 | if t0 is not None and not isinstance(operation,
75 | State) and (t - t0) > 0:
76 | ts = None
77 | if time_start[q] is not None:
78 | ts = {
79 | 'start': t0 - time_start[q],
80 | 'end': t - time_start[q],
81 | 'Dt': time_start[q] - time_last_meas[q]
82 | }
83 | circuit.append(IdleGate(t - t0, params=params, ts=ts),
84 | qubits=[q],
85 | time_step=(t0 + t) / 2)
86 | if isinstance(operation, Measurement):
87 | timing[q] = None
88 | else:
89 | timing[q] = t
90 | if operation.name == 'NoisyH' and time_last_meas[q] is not None:
91 | if time_start[q] is None:
92 | time_start[q] = t
93 | else:
94 | time_start[q] = None
95 | time_last_meas[q] = None
96 | if end is not None:
97 | for q, t0 in timing.items():
98 | if t0 is not None and t0 < end:
99 | circuit.append(IdleGate(end - t0, params=params),
100 | qubits=[q],
101 | time_step=(t0 + end) / 2)
102 |
103 |
104 | def add_CZ_rotations(circuit, high_freq_group, low_freq_group, time_step=None, angles=None):
105 | """I will just assume that in the new net-zero gate, the quasi-static flux has a neglected effect. So I changed
106 | Fang's implementation. Now (angle == pi or None) is the CZ gate. Very small angle is used to add cross-talk.
107 |
108 | Maybe we can add some rotation error
109 | """
110 |
111 | for q1 in high_freq_group:
112 | if angles is not None:
113 | if isinstance(angles, dict):
114 | gate = Diagonal(2, np.array([1, 1, np.exp(1j * angles[q1]), -np.exp(-1j * angles[q1])]), name='NoisyCZ')
115 | else:
116 | gate = Diagonal(2, np.array([1, 1, 1, np.exp(1j * angles)]), name='NoisyCZ')
117 | else:
118 | gate = CZGate
119 | for q2 in low_freq_group:
120 | if abs(q1[0] - q2[0]) == 1 and abs(q1[1] - q2[1]) == 1:
121 | circuit.append(gate, [q1, q2], time_step=time_step)
122 |
123 |
124 | C = TensorNetwork([0, 0, 0, 0], bond_dim=2)
125 | C.add_node('PH', [0], np.ones(2))
126 | NDCompMeas = Channel(1, C, name='ND')
127 |
128 |
129 | def add_noisy_surface_code(circuit, qubit_coords=None, connections=None, time=None, params=default_params):
130 |
131 | if time is None:
132 | time = max(circuit.max_time, 0)
133 | qubit_group_name = {
134 | (0, 0): 'D1',
135 | (0, 2): 'D2',
136 | (2, 0): 'D3',
137 | (2, 2): 'D4',
138 | (1, 1): 'X1',
139 | (1, 3): 'Z1',
140 | (3, 1): 'Z2',
141 | (3, 3): 'X2',
142 | (-1, -1): 'dummy'
143 | }
144 | qubit_groups = {group: [] for group in qubit_group_name.values()}
145 | for x, y in qubit_coords:
146 | qubit_groups[qubit_group_name[x % 4, y % 4]].append((x, y))
147 | if params is None:
148 | quasi_static_flux = None
149 | T_g_1Q = 1
150 | T_g_2Q = 1
151 | T_C = -1
152 | tau_m = 1
153 | tau_d = 4
154 | else:
155 | quasi_static_flux = {q: np.random.randn() * params['delta_phi'] for q in qubit_coords}
156 | T_g_1Q = params['T_g_1Q']
157 | T_g_2Q = params['T_g_2Q']
158 | T_C = params.get('T_C', -T_g_1Q)
159 | tau_m = params['tau_m']
160 | tau_d = params['tau_d']
161 | gamma = params['gamma']
162 |
163 | # Time slot A
164 | time += T_g_1Q / 2
165 | for group in ['D1', 'D2', 'D3', 'D4', 'X1', 'X2']:
166 | for qubit in qubit_groups[group]:
167 | circuit.append(NoisyHGate(params=params), [qubit], time_step=time)
168 | time += T_g_1Q / 2
169 |
170 | # Time slot 1 ~ 4
171 | for flux_dance in [('D2', 'X1', 'X2', 'D3', 'D4'),
172 | ('D1', 'X1', 'X2', 'D4', 'D3'),
173 | ('D1', 'X2', 'X1', 'D4', 'D3'),
174 | ('D2', 'X2', 'X1', 'D3', 'D4')]:
175 | time += T_g_2Q / 2
176 | for g1, g2 in [flux_dance[0:2], flux_dance[2:4], (flux_dance[4], 'dummy')]:
177 | add_CZ_rotations(circuit, qubit_groups[g1], qubit_groups[g2], time_step=time, angles=quasi_static_flux)
178 | time += T_g_2Q / 2
179 |
180 | # cross talk between data qubits and Z-ancilla qubits. added on the same time, so dont add too many idle errors
181 | for flux_dance in [('D1', 'Z1', 'Z2', 'D4', 'D3'),
182 | ('D2', 'Z2', 'Z1', 'D3', 'D4'),
183 | ('D2', 'Z1', 'Z2', 'D3', 'D4'),
184 | ('D1', 'Z2', 'Z1', 'D4', 'D3')]:
185 | for g1, g2 in [flux_dance[0:2], flux_dance[2:4], (flux_dance[4], 'dummy')]:
186 | add_CZ_rotations(circuit, qubit_groups[g1], qubit_groups[g2], time_step=time, angles=gamma)
187 |
188 | # Time slot B
189 | time += T_g_1Q / 2
190 | for group in ['D1', 'D2', 'D3', 'D4', 'X1', 'X2']:
191 | for qubit in qubit_groups[group]:
192 | circuit.append(NoisyHGate(params=params), [qubit], time_step=time)
193 | time += T_g_1Q / 2
194 |
195 | # Time slot C
196 | for group in ['X1', 'X2']:
197 | for qubit in qubit_groups[group]:
198 | circuit.append(NDCompMeas, [qubit], time_step=time + tau_m / 2)
199 | end_time = time + tau_m + tau_d
200 | time += T_C
201 |
202 | # Time slot D
203 | time += T_g_1Q / 2
204 | for group in ['Z1', 'Z2']:
205 | for qubit in qubit_groups[group]:
206 | circuit.append(NoisyHGate(params=params), [qubit], time_step=time)
207 | time += T_g_1Q / 2
208 |
209 | # Time slot 5 ~ 8
210 | for flux_dance in [('D1', 'Z1', 'Z2', 'D4', 'D3'),
211 | ('D2', 'Z2', 'Z1', 'D3', 'D4'),
212 | ('D2', 'Z1', 'Z2', 'D3', 'D4'),
213 | ('D1', 'Z2', 'Z1', 'D4', 'D3')]:
214 | time += T_g_2Q / 2
215 | for g1, g2 in [flux_dance[0:2], flux_dance[2:4], (flux_dance[4], 'dummy')]:
216 | add_CZ_rotations(circuit, qubit_groups[g1], qubit_groups[g2], time_step=time, angles=quasi_static_flux)
217 | time += T_g_2Q / 2
218 |
219 | # cross talk between data qubits and X-ancilla qubits. added on the same time, so don't add too many idle errors
220 | for flux_dance in [('D2', 'X1', 'X2', 'D3', 'D4'),
221 | ('D1', 'X1', 'X2', 'D4', 'D3'),
222 | ('D1', 'X2', 'X1', 'D4', 'D3'),
223 | ('D2', 'X2', 'X1', 'D3', 'D4')]:
224 | for g1, g2 in [flux_dance[0:2], flux_dance[2:4], (flux_dance[4], 'dummy')]:
225 | add_CZ_rotations(circuit, qubit_groups[g1], qubit_groups[g2], time_step=time, angles=gamma)
226 |
227 | # Time slot E
228 | time += T_g_1Q / 2
229 | for group in ['Z1', 'Z2']:
230 | for qubit in qubit_groups[group]:
231 | circuit.append(NoisyHGate(params=params), [qubit], time_step=time)
232 | time += T_g_1Q / 2
233 |
234 | # Time slot F
235 | for group in ['Z1', 'Z2']:
236 | for qubit in qubit_groups[group]:
237 | circuit.append(NDCompMeas, [qubit], time_step=time + tau_m / 2)
238 |
239 | return end_time
240 |
--------------------------------------------------------------------------------
/acqdp/tensor_network/slicer.py:
--------------------------------------------------------------------------------
1 | from acqdp.tensor_network.local_optimizer import defaultOrderResolver, LocalOptimizer
2 | from acqdp.tensor_network.contraction import ContractionScheme
3 | from multiprocessing import Pool
4 | import copy
5 | import numpy
6 |
7 |
8 | class Slicer:
9 | """
10 | :class:`Slicer` finds slicing of an unsliced contraction scheme when called by the :class:`SlicedOrderFinder`.
11 |
12 | :ivar num_iter_before: Number of iterations of local optimization before slicing.
13 | :ivar num_iter_before: Number of iterations of local optimization in the middle of slicing.
14 | :ivar num_iter_before: Number of iterations of local optimization after slicing.
15 | :ivar max_num_slice: Maxmimum number of edges to be sliced. If set to -1, the constraint will be ignored.
16 | :ivar num_threads: Number of threads for multi-processing.
17 | :ivar slice_thres: Automatically slice edges that introduce an overhed below this threshold. Set to 0.02 by default.
18 | """
19 |
20 | def __init__(self,
21 | num_iter_before=0,
22 | num_iter_middle=20,
23 | num_iter_after=100,
24 | max_tw=29,
25 | max_num_slice=-1,
26 | num_threads=28,
27 | slice_thres=.02,
28 | **kwargs):
29 | self.num_iter_before = num_iter_before
30 | self.num_iter_middle = num_iter_middle
31 | self.num_iter_after = num_iter_after
32 | self.max_tw = max_tw
33 | self.max_num_slice = max_num_slice
34 | self.num_threads = num_threads
35 | self.slice_thres = slice_thres
36 | self.local_optimizer = LocalOptimizer(
37 | **kwargs.get('local_optimizer_params', {}))
38 | self.num_suc_candidates = kwargs.get('num_suc_candidates', 10)
39 |
40 | def _slice(self, tn, orders, num_process=0):
41 | tn = tn.copy()
42 | tnc = tn
43 | while True:
44 | try:
45 | tn = tnc.copy()
46 | y = min(orders, key=lambda a: (a.cost, orders.index(a)))
47 | slice_edges = []
48 | y = self.local_optimizer.optimize(
49 | tn, y, self.num_iter_before)
50 | print(f'Process {num_process} initial cost: {y.cost}',
51 | flush=True)
52 | while y.cost.t > 2**self.max_tw:
53 | if len(slice_edges) >= self.max_num_slice:
54 | break
55 | if numpy.log2(float(y.cost.t)) - self.max_tw + len(
56 | slice_edges) >= self.max_num_slice + 2: # early termination
57 | break
58 | y = self.local_optimizer.optimize(
59 | tn, y, self.num_iter_middle)
60 | k, order = self._biggest_weight_edge(tn, y.order)
61 | slice_edges += k
62 | for a in k:
63 | tn.fix_edge(a)
64 | tn.fix()
65 | y = defaultOrderResolver.order_to_contraction_scheme(tn, order)
66 | y.cost.k = len(slice_edges)
67 | new_y = self.local_optimizer.optimize(
68 | tn, y, self.num_iter_after)
69 | new_y.cost.k = len(slice_edges)
70 | if new_y.cost.t <= 2**self.max_tw:
71 | y = new_y
72 | if y.cost.t <= 2**self.max_tw:
73 | print(f'Process {num_process} succeeded with {y.cost}',
74 | flush=True)
75 | return ContractionScheme(y.order, slice_edges, cost=y.cost)
76 | else:
77 | print(f'Process {num_process} failed. Please consider alternative order finding methods',
78 | flush=True)
79 | return None
80 | except KeyboardInterrupt:
81 | return None
82 |
83 | def _biggest_weight_edge(self, tn, order):
84 | """Find an edge or a list of edges, slicing of which introduces an overhead each that is below a threshold given
85 | by self.slice_thres, or a minimal overhead if self.slice_thres is unattainable.
86 |
87 | The method enumerates all edges that appear frequently on the stem of the contraction tree. It tries to
88 | introduce as minimal overhead as possible by flipping branches on the stem while trying to slice the edges.
89 | """
90 | tn_copy = tn.copy()
91 | tn_copy.fix()
92 | nodes_names = list(tn_copy.nodes_by_name)
93 | from acqdp.tensor_network.undirected_contraction_tree import UndirectedContractionTree
94 | eq, path, eedd = defaultOrderResolver.order_to_path(tn_copy, order)
95 | uct = UndirectedContractionTree(eq, path)
96 | se = set()
97 | edges_dic = {}
98 | ss = []
99 | for i in range(len(uct.stem) - 1):
100 | new_se = uct.open_subscripts_at_edge(
101 | uct.graph.nodes[uct.stem[i]]['parent'], uct.stem[i])
102 | for a in new_se.difference(se):
103 | edges_dic[a] = i
104 | for a in se.difference(new_se):
105 | ss.append((a, edges_dic[a], i))
106 | se = new_se
107 | ss = sorted(ss, key=lambda x: x[1] - x[2])[:10]
108 | ss_dic = {}
109 | c = uct.cost
110 | slice_edges = []
111 | for s in ss:
112 | uct_copy = copy.deepcopy(uct)
113 | for v in range(uct_copy.n):
114 | uct_copy.graph.nodes[v]['subscripts'] = uct_copy.graph.nodes[v][
115 | 'subscripts'].difference({s[0]})
116 | for u, v in uct_copy.graph.edges:
117 | uct_copy.graph[u][v].clear()
118 | uct_copy.graph[v][u].clear()
119 | uct_copy.preprocess_edge(u, v)
120 | uct_copy.preprocess_edge(v, u)
121 | uct_copy.compute_root_cost()
122 | for v in range(uct_copy.n, uct_copy.n * 2 - 2):
123 | uct_copy.compute_node_cost(v)
124 | res = (uct_copy.cost, s[1], s[2], uct_copy.get_path())
125 | i = s[1]
126 |
127 | curr_cost = res[0]
128 | while i > 5:
129 | ii = i
130 | while ii > 3:
131 | for k in range(3, ii)[::-1]:
132 | for l in range(k, ii):
133 | uct_copy.switch_branches(l)
134 | if uct_copy.cost <= curr_cost:
135 | curr_cost = uct_copy.cost
136 | ii -= 1
137 | break
138 | else:
139 | for l in range(k, ii)[::-1]:
140 | uct_copy.switch_branches(l)
141 | else:
142 | break
143 |
144 | k = ii
145 | sss = ii + 1
146 | while k > 5:
147 | uct_copy.switch_branches(k)
148 | if uct_copy.cost <= curr_cost:
149 | curr_cost = uct_copy.cost
150 | sss = k
151 | k -= 1
152 | ii = sss
153 | for k in range(6, ii):
154 | uct_copy.switch_branches(k)
155 | if ii >= i:
156 | break
157 | i = ii
158 | res = (curr_cost, i, res[2])
159 | j = res[2]
160 | while j < len(uct_copy.stem) - 5:
161 | jj = j
162 | while jj < len(uct_copy.stem) - 3:
163 | for k in range(jj + 1, len(uct_copy.stem) - 1):
164 | for l in range(jj, k)[::-1]:
165 | uct_copy.switch_branches(l)
166 | ucost = uct_copy.cost
167 | if ucost <= curr_cost:
168 | curr_cost = ucost
169 | jj += 1
170 | break
171 | else:
172 | for l in range(jj, k):
173 | uct_copy.switch_branches(l)
174 | else:
175 | break
176 | k = jj
177 | sss = jj - 1
178 | while k < len(uct_copy.stem) - 5:
179 | uct_copy.switch_branches(k)
180 | ucost = uct_copy.cost
181 | if ucost <= curr_cost:
182 | curr_cost = uct_copy.cost
183 | sss = k
184 | k += 1
185 | jj = sss
186 | for k in range(jj + 1, len(uct_copy.stem) - 5)[::-1]:
187 | uct_copy.switch_branches(k)
188 | if jj <= j:
189 | break
190 | j = jj
191 | if curr_cost < (1 + self.slice_thres) / 2 * c:
192 | slice_edges.append(eedd[s[0]])
193 | c = curr_cost
194 | uct = uct_copy
195 | else:
196 | ss_dic[s[0]] = (curr_cost, res[1], j, uct_copy.get_path())
197 | if len(slice_edges) > 0:
198 | pp = uct.get_path()
199 | else:
200 | kk = sorted(ss_dic, key=lambda x: ss_dic[x][0])[0]
201 | slice_edges = [eedd[kk]]
202 | pp = ss_dic[kk][-1]
203 | new_order = []
204 | for i, p in enumerate(pp):
205 | new_order.append([[nodes_names[p[0]], nodes_names[p[1]]], order[i][1]])
206 | nodes_names.pop(max(p[0], p[1]))
207 | nodes_names.pop(min(p[0], p[1]))
208 | nodes_names.append(order[i][1])
209 | return slice_edges, new_order
210 |
211 | def slice(self, tn, order_gen):
212 | orders = [next(order_gen) for _ in range(self.num_suc_candidates)]
213 | return self._slice(tn, orders)
214 |
215 |
216 | def mpwrapper(slicer, tn, orders, num_process):
217 | return slicer._slice(tn, orders, num_process)
218 |
219 |
220 | class MPSlicer(Slicer):
221 | """Multi-processing slicing, by concurrently trying different slicing routes."""
222 |
223 | def slice(self, tn, order_gen):
224 | candidates = []
225 | while len(candidates) <= self.num_suc_candidates:
226 | with Pool(self.num_threads) as p:
227 | lk = list((self, tn, [next(order_gen)], num_process) for num_process in range(self.num_threads))
228 | new_candidates = p.starmap(mpwrapper, lk)
229 | candidates += [i for i in new_candidates if i is not None]
230 | print("Num of candidates now: {}".format(len(candidates)))
231 | res = min(candidates, key=lambda x: (x.cost, candidates.index(x)))
232 | return res
233 |
234 |
235 | def get_slicer(**kwargs):
236 | slicers = {'default': Slicer, 'mp': MPSlicer}
237 | slicer_name = kwargs.get('slicer_name', 'default')
238 | return (slicers[slicer_name])(**kwargs.get('slicer_params', {}))
239 |
--------------------------------------------------------------------------------