├── .all-contributorsrc ├── .devcontainer ├── Dockerfile └── devcontainer.json ├── .dockerignore ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── feature_request.md │ ├── task_description.md │ └── tc_enhancement_proposal.md ├── nightly_build │ ├── darkify.py │ └── setup.py └── workflows │ ├── ci.yml │ └── nightly_release.yml ├── .gitignore ├── .pylintrc ├── .readthedocs.yaml ├── CHANGELOG.md ├── CITATION.cff ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── HISTORY.md ├── LICENSE ├── MANIFEST.in ├── README.md ├── README_cn.md ├── benchmarks ├── README.md ├── requirements.txt └── scripts │ ├── benchmark.py │ ├── qml_benchmark.py │ ├── qml_jd.py │ ├── qml_pennylane.py │ ├── qml_tc_jax.py │ ├── qml_tc_tf.py │ ├── qml_tfquantum.py │ ├── utils.py │ ├── vqe_pennylane.py │ ├── vqe_qibo.py │ ├── vqe_qiskit.py │ ├── vqe_tc.py │ └── vqe_tfquantum.py ├── check_all.sh ├── codecov.yml ├── docker ├── Dockerfile ├── Dockerfile_v2 └── README.md ├── docs ├── Makefile ├── ext │ └── toctree_filter.py └── source │ ├── advance.rst │ ├── api │ ├── about.rst │ ├── abstractcircuit.rst │ ├── applications.rst │ ├── applications │ │ ├── ai.rst │ │ ├── ai │ │ │ └── ensemble.rst │ │ ├── dqas.rst │ │ ├── finance.rst │ │ ├── finance │ │ │ └── portfolio.rst │ │ ├── graphdata.rst │ │ ├── layers.rst │ │ ├── optimization.rst │ │ ├── physics.rst │ │ ├── physics │ │ │ ├── baseline.rst │ │ │ └── fss.rst │ │ ├── utils.rst │ │ ├── vags.rst │ │ ├── van.rst │ │ └── vqes.rst │ ├── backends.rst │ ├── backends │ │ ├── backend_factory.rst │ │ ├── cupy_backend.rst │ │ ├── jax_backend.rst │ │ ├── numpy_backend.rst │ │ ├── pytorch_backend.rst │ │ └── tensorflow_backend.rst │ ├── basecircuit.rst │ ├── channels.rst │ ├── circuit.rst │ ├── cloud.rst │ ├── cloud │ │ ├── abstraction.rst │ │ ├── apis.rst │ │ ├── config.rst │ │ ├── local.rst │ │ ├── quafu_provider.rst │ │ ├── tencent.rst │ │ ├── utils.rst │ │ └── wrapper.rst │ ├── compiler.rst │ ├── compiler │ │ ├── composed_compiler.rst │ │ ├── qiskit_compiler.rst │ │ └── simple_compiler.rst │ ├── cons.rst │ ├── densitymatrix.rst │ ├── experimental.rst │ ├── fgs.rst │ ├── gates.rst │ ├── interfaces.rst │ ├── interfaces │ │ ├── numpy.rst │ │ ├── scipy.rst │ │ ├── tensorflow.rst │ │ ├── tensortrans.rst │ │ └── torch.rst │ ├── keras.rst │ ├── mps_base.rst │ ├── mpscircuit.rst │ ├── noisemodel.rst │ ├── quantum.rst │ ├── results.rst │ ├── results │ │ ├── counts.rst │ │ ├── qem.rst │ │ ├── qem │ │ │ ├── benchmark_circuits.rst │ │ │ └── qem_methods.rst │ │ └── readout_mitigation.rst │ ├── shadows.rst │ ├── simplify.rst │ ├── templates.rst │ ├── templates │ │ ├── ansatz.rst │ │ ├── blocks.rst │ │ ├── chems.rst │ │ ├── conversions.rst │ │ ├── dataset.rst │ │ ├── graphs.rst │ │ └── measurements.rst │ ├── torchnn.rst │ ├── translation.rst │ ├── utils.rst │ └── vis.rst │ ├── cnconf.py │ ├── conf.py │ ├── contribs │ ├── development_Mac.md │ ├── development_MacARM.md │ ├── development_MacM1.rst │ ├── development_MacM2.md │ ├── development_Mac_cn.md │ ├── development_windows.rst │ └── development_wsl2.rst │ ├── contribution.rst │ ├── faq.rst │ ├── generate_rst.py │ ├── index.rst │ ├── infras.rst │ ├── locale │ └── zh │ │ └── LC_MESSAGES │ │ ├── advance.po │ │ ├── api.po │ │ ├── contribs.po │ │ ├── contribution.po │ │ ├── faq.po │ │ ├── index.po │ │ ├── index_cn.po │ │ ├── infras.po │ │ ├── modules.po │ │ ├── quickstart.po │ │ ├── sharpbits.po │ │ ├── tutorial.po │ │ ├── tutorial_cn.po │ │ └── whitepapertoc.po │ ├── modules.rst │ ├── modules.rst.backup │ ├── quickstart.rst │ ├── sharpbits.rst │ ├── statics │ ├── bell_pair_block.png │ ├── example_block.png │ ├── landscape.jpg │ ├── logo.png │ ├── logov2.jpg │ ├── qd_alg.jpg │ ├── quop.png │ ├── teleportation.png │ ├── tianxuan_s1.png │ └── vmap_ingredients.png │ ├── textbook │ ├── chap1.ipynb │ ├── chap2.ipynb │ ├── chap3.ipynb │ ├── chap4.ipynb │ ├── chap5.ipynb │ └── img │ │ ├── 3GHZ.png │ │ ├── CNOT.png │ │ ├── Circuit_G.png │ │ ├── DJ_algorithm.png │ │ ├── Fredkin.png │ │ ├── GHZ_hist.png │ │ ├── Grover.png │ │ ├── Grover_iteration.png │ │ ├── Toffoli.png │ │ ├── Toffoli_circuit.png │ │ ├── VQE.png │ │ ├── bloch.png │ │ ├── classical_gates.png │ │ ├── densecoding.png │ │ ├── density_Bell.png │ │ ├── google_sycamore.png │ │ ├── half_adder.png │ │ ├── quantum_circuit.png │ │ ├── reversible_gates.png │ │ ├── simple_oracle.png │ │ ├── square_lattice.png │ │ ├── swing.png │ │ ├── teleportation.png │ │ └── vqa.jpg │ ├── textbooktoc.rst │ ├── tutorial.rst │ ├── tutorial_cn.rst │ ├── tutorials │ ├── barren_plateaus.ipynb │ ├── barren_plateaus_cn.ipynb │ ├── benchmark_circuits.ipynb │ ├── circuit_basics.ipynb │ ├── circuit_basics_cn.ipynb │ ├── classical_shadows.ipynb │ ├── contractors.ipynb │ ├── contractors_cn.ipynb │ ├── dqas.ipynb │ ├── dqas_cn.ipynb │ ├── error_mitigation.ipynb │ ├── gradient_benchmark.ipynb │ ├── gradient_benchmark_cn.ipynb │ ├── imag_time_evo.ipynb │ ├── mera.ipynb │ ├── mera_cn.ipynb │ ├── mnist_qml.ipynb │ ├── mnist_qml_cn.ipynb │ ├── nnvqe.ipynb │ ├── nnvqe_cn.ipynb │ ├── operator_spreading.ipynb │ ├── operator_spreading_cn.ipynb │ ├── optimization_and_expressibility.ipynb │ ├── optimization_and_expressibility_cn.ipynb │ ├── portfolio_optimization.ipynb │ ├── qaoa.ipynb │ ├── qaoa_bo.ipynb │ ├── qaoa_cn.ipynb │ ├── qaoa_nae3sat.ipynb │ ├── qaoa_quantum_dropout.ipynb │ ├── qcloud_sdk.ipynb │ ├── qcloud_sdk_demo.ipynb │ ├── qml_scenarios.ipynb │ ├── qml_scenarios_cn.ipynb │ ├── qubo_problem.ipynb │ ├── sklearn_svc.ipynb │ ├── sklearn_svc_cn.ipynb │ ├── template.ipynb │ ├── template_cn.ipynb │ ├── tfim_vqe.ipynb │ ├── tfim_vqe_cn.ipynb │ ├── tfim_vqe_diffreph.ipynb │ ├── tfim_vqe_diffreph_cn.ipynb │ ├── torch_qml.ipynb │ ├── torch_qml_cn.ipynb │ ├── vqe_h2o.ipynb │ ├── vqe_h2o_cn.ipynb │ ├── vqex_mbl.ipynb │ └── vqex_mbl_cn.ipynb │ ├── whitepaper │ ├── 3-circuits-gates.ipynb │ ├── 3-circuits-gates_cn.ipynb │ ├── 4-gradient-optimization.ipynb │ ├── 4-gradient-optimization_cn.ipynb │ ├── 5-density-matrix.ipynb │ ├── 5-density-matrix_cn.ipynb │ ├── 6-1-conditional-measurements-post-selection.ipynb │ ├── 6-1-conditional-measurements-post-selection_cn.ipynb │ ├── 6-2-pauli-string-expectation.ipynb │ ├── 6-2-pauli-string-expectation_cn.ipynb │ ├── 6-3-vmap.ipynb │ ├── 6-3-vmap_cn.ipynb │ ├── 6-4-quoperator.ipynb │ ├── 6-4-quoperator_cn.ipynb │ ├── 6-5-custom-contraction.ipynb │ ├── 6-5-custom-contraction_cn.ipynb │ ├── 6-6-advanced-automatic-differentiation.ipynb │ └── 6-6-advanced-automatic-differentiation_cn.ipynb │ ├── whitepapertoc.rst │ └── whitepapertoc_cn.rst ├── examples ├── adiabatic_vqnhe.py ├── analog_evolution_interface.py ├── analog_evolution_jax.py ├── analog_evolution_mint.py ├── apicomparison │ ├── 0_tfq_qml.py │ ├── 0_tfq_vg.py │ ├── 1_pennylane_qml.py │ ├── 1_pennylane_vg.py │ ├── 2_tc_qml.py │ ├── 2_tc_vg.py │ ├── README.md │ └── _barplot.py ├── batched_parameters_structures.py ├── bp_benchmark.py ├── bp_validation.py ├── chaotic_behavior.py ├── checkpoint_memsave.py ├── circuit_compiler.py ├── clifford_optimization.py ├── cotengra_setting_bench.py ├── ghz_dqas.py ├── gradient_benchmark.py ├── h6_hamiltonian.npy ├── hamiltonian_building.py ├── hchainhamiltonian.py ├── hea_scan_jit_acc.py ├── hybrid_gpu_pipeline.py ├── incremental_twoqubit.py ├── jacobian_cal.py ├── jax_scan_jit_acc.py ├── jsonio.py ├── keras3_tc_integration.py ├── lightcone_simplify.py ├── matprod_vmap.py ├── mcnoise_boost.py ├── mcnoise_boost_v2.py ├── mcnoise_check.py ├── mera_extra_mpo.py ├── mipt.py ├── mipt_pideal.py ├── mpsvsexact.py ├── noise_calibration.py ├── noisy_qml.py ├── noisy_sampling_jit.py ├── omeinsum_julia │ ├── README.md │ ├── benchmark_results.csv │ ├── circuit_n12_m14_s0_e0_pEFGH.qsim │ ├── omeinsum.jl │ ├── omeinsum_contractor_juliacall.py │ ├── omeinsum_contractor_subprocess.py │ └── omeinsum_treesa_optimizer.py ├── optperformance_comparison.py ├── parameter_shift.py ├── qaoa_dqas.py ├── qaoa_parallel_opt.py ├── qaoa_shot_noise.py ├── qem_dqas.py ├── quantumng.py ├── quditcircuit.py ├── readout_mitigation.py ├── rem_super_large_scale.py ├── sample_benchmark.py ├── sample_value_gradient.py ├── shvqe.py ├── simple_qaoa.py ├── slicing_wavefunction_vqa.py ├── stabilizer_simulation.py ├── tcgates.inc ├── time_evolution.py ├── timeevolution_trotter.py ├── training_deep_tunable_structures.py ├── universal_lr.py ├── variational_dynamics.py ├── variational_dynamics_circuit.py ├── variational_dynamics_generalized.py ├── vmap_randomness.py ├── vqe2d.py ├── vqe_extra.py ├── vqe_extra_mpo.py ├── vqe_extra_mpo_spopt.py ├── vqe_noisyopt.py ├── vqe_parallel_pmap.py ├── vqe_shot_noise.py ├── vqeh2o_benchmark.py ├── vqetfim_benchmark.py └── vqnhe_h6.py ├── mypy.ini ├── pytest.ini ├── requirements ├── requirements-dev.txt ├── requirements-docker-v2.txt ├── requirements-docker.txt ├── requirements-extra.txt ├── requirements-rtd.txt ├── requirements-types.txt └── requirements.txt ├── setup.py ├── tensorcircuit ├── __init__.py ├── about.py ├── abstractcircuit.py ├── applications │ ├── README.md │ ├── __init__.py │ ├── ai │ │ ├── __init__.py │ │ └── ensemble.py │ ├── dqas.py │ ├── finance │ │ ├── __init__.py │ │ └── portfolio.py │ ├── graphdata.py │ ├── layers.py │ ├── optimization.py │ ├── physics │ │ ├── __init__.py │ │ ├── baseline.py │ │ └── fss.py │ ├── utils.py │ ├── vags.py │ ├── van.py │ └── vqes.py ├── asciiart.py ├── backends │ ├── __init__.py │ ├── abstract_backend.py │ ├── backend_factory.py │ ├── cupy_backend.py │ ├── jax_backend.py │ ├── jax_ops.py │ ├── numpy_backend.py │ ├── pytorch_backend.py │ ├── pytorch_ops.py │ ├── tensorflow_backend.py │ └── tf_ops.py ├── basecircuit.py ├── channels.py ├── circuit.py ├── cloud │ ├── __init__.py │ ├── abstraction.py │ ├── apis.py │ ├── config.py │ ├── local.py │ ├── quafu_provider.py │ ├── tencent.py │ ├── utils.py │ └── wrapper.py ├── compiler │ ├── __init__.py │ ├── composed_compiler.py │ ├── qiskit_compiler.py │ └── simple_compiler.py ├── cons.py ├── densitymatrix.py ├── experimental.py ├── fgs.py ├── gates.py ├── interfaces │ ├── __init__.py │ ├── numpy.py │ ├── scipy.py │ ├── tensorflow.py │ ├── tensortrans.py │ └── torch.py ├── keras.py ├── mps_base.py ├── mpscircuit.py ├── noisemodel.py ├── quantum.py ├── results │ ├── __init__.py │ ├── counts.py │ ├── qem │ │ ├── __init__.py │ │ ├── benchmark_circuits.py │ │ └── qem_methods.py │ └── readout_mitigation.py ├── shadows.py ├── simplify.py ├── templates │ ├── __init__.py │ ├── ansatz.py │ ├── blocks.py │ ├── chems.py │ ├── conversions.py │ ├── dataset.py │ ├── graphs.py │ └── measurements.py ├── torchnn.py ├── translation.py ├── utils.py └── vis.py └── tests ├── __init__.py ├── conftest.py ├── test_backends.py ├── test_calibrating.py ├── test_channels.py ├── test_circuit.py ├── test_cloud.py ├── test_compiler.py ├── test_dmcircuit.py ├── test_ensemble.py ├── test_fgs.py ├── test_gates.py ├── test_interfaces.py ├── test_keras.py ├── test_miscs.py ├── test_mpscircuit.py ├── test_noisemodel.py ├── test_qaoa.py ├── test_qem.py ├── test_quantum.py ├── test_quantum_attr.py ├── test_results.py ├── test_shadows.py ├── test_simplify.py ├── test_templates.py ├── test_torchnn.py └── test_van.py /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | # See here for image contents: https://github.com/microsoft/vscode-dev-containers/tree/v0.140.1/containers/python-3/.devcontainer/base.Dockerfile 2 | 3 | # [Choice] Python version: 3, 3.8, 3.7, 3.6 4 | ARG VARIANT="3" 5 | FROM mcr.microsoft.com/vscode/devcontainers/python:0-${VARIANT} 6 | 7 | # [Option] Install Node.js 8 | ARG INSTALL_NODE="true" 9 | ARG NODE_VERSION="lts/*" 10 | RUN if [ "${INSTALL_NODE}" = "true" ]; then su vscode -c "source /usr/local/share/nvm/nvm.sh && nvm install ${NODE_VERSION} 2>&1"; fi 11 | 12 | # [Optional] If your pip requirements rarely change, uncomment this section to add them to the image. 13 | COPY requirements/requirements.txt /tmp/pip-tmp/ 14 | COPY requirements/requirements-extra.txt /tmp/pip-tmp/ 15 | 16 | RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \ 17 | && pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements-extra.txt \ 18 | && rm -rf /tmp/pip-tmp 19 | 20 | # [Optional] Uncomment this section to install additional OS packages. 21 | # RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ 22 | # && apt-get -y install --no-install-recommends 23 | 24 | # [Optional] Uncomment this line to install global node packages. 25 | # RUN su vscode -c "source /usr/local/share/nvm/nvm.sh && npm install -g " 2>&1 -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the README at: 2 | // https://github.com/microsoft/vscode-dev-containers/tree/v0.140.1/containers/python-3 3 | { 4 | "name": "TensorCircuit User", 5 | "build": { 6 | "dockerfile": "Dockerfile", 7 | "context": "..", 8 | "args": { 9 | // Update 'VARIANT' to pick a Python version: 3, 3.6, 3.7, 3.8 10 | "VARIANT": "3.8", 11 | // Options 12 | "INSTALL_NODE": "true", 13 | "NODE_VERSION": "lts/*" 14 | } 15 | }, 16 | 17 | // Set *default* container specific settings.json values on container create. 18 | "settings": { 19 | "terminal.integrated.shell.linux": "/bin/bash", 20 | "python.pythonPath": "/usr/local/bin/python", 21 | "python.linting.enabled": true, 22 | "python.linting.pylintEnabled": true, 23 | "python.formatting.autopep8Path": "/usr/local/py-utils/bin/autopep8", 24 | "python.formatting.blackPath": "/usr/local/py-utils/bin/black", 25 | "python.formatting.yapfPath": "/usr/local/py-utils/bin/yapf", 26 | "python.linting.banditPath": "/usr/local/py-utils/bin/bandit", 27 | "python.linting.flake8Path": "/usr/local/py-utils/bin/flake8", 28 | "python.linting.mypyPath": "/usr/local/py-utils/bin/mypy", 29 | "python.linting.pycodestylePath": "/usr/local/py-utils/bin/pycodestyle", 30 | "python.linting.pydocstylePath": "/usr/local/py-utils/bin/pydocstyle", 31 | "python.linting.pylintPath": "/usr/local/py-utils/bin/pylint" 32 | }, 33 | 34 | // Add the IDs of extensions you want installed when the container is created. 35 | "extensions": ["ms-python.python", "ms-toolsai.jupyter"], 36 | 37 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 38 | // "forwardPorts": [], 39 | 40 | // Use 'postCreateCommand' to run commands after the container is created. 41 | "postCreateCommand": "sudo python3 setup.py develop" 42 | 43 | // Uncomment to connect as a non-root user. See https://aka.ms/vscode-remote/containers/non-root. 44 | // "remoteUser": "vscode" 45 | } 46 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | **/*DS_Store 2 | **/*coverage 3 | **/*idea 4 | **/*mypy_cache 5 | **/*pytest_cache 6 | **/*develop 7 | **/*dist 8 | **/*build 9 | **/*htmlcov 10 | **/*__pycache__ 11 | **/*.pyc 12 | **/*.outdated 13 | **/*.result 14 | **/*.results 15 | **/*.data 16 | **/*.egg-info 17 | **/*examples-ng -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: a report on TensorCircuit related bug 4 | title: "" 5 | labels: bug 6 | assignees: "" 7 | --- 8 | 9 | ## Issue Description 10 | 11 | <--! Insert a short description of the bug here, along with what you expected the behavior to be. --> 12 | 13 | ## How to Reproduce 14 | 15 | ```python 16 | # Include a minimal snippet of the code that produces the error here. 17 | ``` 18 | 19 | ### Error Output 20 | 21 | ```python 22 | # Include the unexpected output or error log here 23 | ``` 24 | 25 | ## Environment Context 26 | 27 | <--! Please report your OS version, Python environment and version, TensorCircuit version and necessary dependent package (NumPy, TensorFlow, Jax, Jaxlib, PyTorch) version here. --> 28 | 29 | Output of `tc.about()` and `tc.__version__`. 30 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea or improvement for TensorCircuit 4 | title: "" 5 | labels: feature-request 6 | assignees: "" 7 | --- 8 | 9 | ## Issue Description 10 | 11 | 12 | 13 | ## Proposed Solution 14 | 15 | 16 | 17 | ## Additional References 18 | 19 | 20 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/task_description.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Task Description 3 | about: Tasks from TensorCircuit Maintainer 4 | title: "" 5 | labels: good first issue 6 | assignees: "" 7 | --- 8 | 9 | ## Task description 10 | 11 | 12 | 13 | ## Implementation 14 | 15 | 16 | 17 | ## Requirements 18 | 19 | 20 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/tc_enhancement_proposal.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: TensorCircuit Enhancement Proposal 3 | about: a design note describing a new major feature for TensorCircuit 4 | title: "" 5 | labels: tep 6 | assignees: "" 7 | --- 8 | 9 | 10 | 11 | 12 | 13 | # TEP - Title 14 | 15 | Author 16 | 17 | 18 | Status 19 | 20 | 21 | Created 22 | 23 | 24 | ## Abstract 25 | 26 | 27 | 28 | ## Motivation and Scope 29 | 30 | 31 | 32 | ## Usage and Impact 33 | 34 | 35 | 36 | ## Backward compatibility 37 | 38 | 39 | 40 | ## Related Work 41 | 42 | 43 | 44 | ## Implementation 45 | 46 | 48 | 49 | ## Alternatives 50 | 51 | 52 | 53 | ## References 54 | 55 | 56 | -------------------------------------------------------------------------------- /.github/nightly_build/darkify.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | import requests 3 | 4 | 5 | def change_version(post=""): 6 | datestr = datetime.now().strftime("%Y%m%d") 7 | datestr += post 8 | with open("tensorcircuit/__init__.py", "r") as f: 9 | r = [] 10 | for l in f.readlines(): 11 | if l.startswith("__version__"): 12 | l = l[:-2] 13 | l += ".dev" + datestr + '"\n' 14 | r.append(l) 15 | # __version__ = "0.2.2.dev20220706" 16 | with open("tensorcircuit/__init__.py", "w") as f: 17 | f.writelines(r) 18 | 19 | 20 | def update_setuppy(url=None): 21 | if not url: 22 | url = "https://raw.githubusercontent.com/refraction-ray/tensorcircuit-dev/beta/.github/nightly_build/setup.py" 23 | r = requests.get(url) 24 | with open("setup.py", "w") as f: 25 | f.writelines(r.text) 26 | 27 | 28 | if __name__ == "__main__": 29 | change_version() 30 | update_setuppy() 31 | -------------------------------------------------------------------------------- /.github/nightly_build/setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | from tensorcircuit import __version__, __author__ 4 | 5 | with open("README.md", "r") as fh: 6 | long_description = fh.read() 7 | 8 | 9 | setuptools.setup( 10 | name="tensorcircuit-nightly", 11 | version=__version__, 12 | author=__author__, 13 | author_email="znfesnpbh.tc@gmail.com", 14 | description="nightly release for tensorcircuit", 15 | long_description=long_description, 16 | long_description_content_type="text/markdown", 17 | url="https://github.com/refraction-ray/tensorcircuit-dev", 18 | packages=setuptools.find_packages(), 19 | include_package_data=True, 20 | install_requires=["numpy", "scipy", "tensornetwork", "networkx"], 21 | extras_require={ 22 | "tensorflow": ["tensorflow"], 23 | "jax": ["jax", "jaxlib"], 24 | "torch": ["torch"], 25 | "qiskit": ["qiskit"], 26 | }, 27 | tests_require=[ 28 | "pytest", 29 | "pytest-lazy-fixture", 30 | "pytest-cov", 31 | "pytest-benchmark", 32 | "pytest-xdist", 33 | ], 34 | classifiers=[ 35 | "Programming Language :: Python :: 3", 36 | "Operating System :: OS Independent", 37 | ], 38 | ) 39 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | on: [push, pull_request] 3 | jobs: 4 | tests: 5 | runs-on: ${{ matrix.os }} 6 | name: test 7 | strategy: 8 | matrix: 9 | os: [ubuntu-20.04, macos-latest] # macos-latest disabled to save quota 10 | python-version: ["3.10"] 11 | fail-fast: false 12 | steps: 13 | - uses: actions/checkout@v2 14 | - name: Set up Python ${{ matrix.python-version }} 15 | uses: actions/setup-python@v4 16 | with: 17 | python-version: ${{ matrix.python-version }} 18 | cache: "pip" 19 | - name: install dependencies 20 | run: | 21 | python -m pip install --upgrade pip 22 | pip install --no-cache-dir -r requirements/requirements.txt 23 | pip install --no-cache-dir -r requirements/requirements-extra.txt 24 | pip install --no-cache-dir -r requirements/requirements-dev.txt 25 | pip install --no-cache-dir -r requirements/requirements-types.txt 26 | - name: black linter 27 | run: | 28 | black . --check 29 | - name: mypy checker 30 | run: | 31 | mypy tensorcircuit 32 | - name: pylint checker 33 | run: | 34 | pylint tensorcircuit tests 35 | - name: test scripts 36 | run: | 37 | pytest --cov=tensorcircuit --cov-report=xml -svv --benchmark-skip 38 | - name: Upload coverage to Codecov 39 | if: matrix.os == 'ubuntu-20.04' 40 | uses: codecov/codecov-action@v2 41 | with: 42 | verbose: true 43 | token: ${{ secrets.CODECOV_TOKEN }} 44 | - name: run example demos 45 | run: | 46 | cd examples 47 | python mcnoise_check.py 48 | python vqnhe_h6.py 49 | python mcnoise_boost.py 50 | python quantumng.py 51 | python universal_lr.py 52 | python parameter_shift.py 53 | python mpsvsexact.py 54 | - name: setup build 55 | run: | 56 | python3 setup.py build 57 | -------------------------------------------------------------------------------- /.github/workflows/nightly_release.yml: -------------------------------------------------------------------------------- 1 | name: release 2 | on: 3 | # push: 4 | # branches: 5 | # - beta 6 | schedule: 7 | - cron: "0 12 * * *" 8 | jobs: 9 | tests: 10 | runs-on: ubuntu-20.04 11 | name: test 12 | strategy: 13 | fail-fast: false 14 | steps: 15 | - uses: actions/checkout@v2 16 | with: 17 | ref: beta 18 | - name: Set up Python 19 | uses: actions/setup-python@v4 20 | with: 21 | python-version: "3.10" 22 | cache: "pip" 23 | - name: install dependencies 24 | run: | 25 | python -m pip install --upgrade pip 26 | pip install --no-cache-dir -r requirements/requirements.txt 27 | pip install --no-cache-dir -r requirements/requirements-extra.txt 28 | pip install --no-cache-dir -r requirements/requirements-dev.txt 29 | pip install --no-cache-dir -r requirements/requirements-types.txt 30 | pip install requests 31 | - name: black linter 32 | run: | 33 | black . --check 34 | - name: mypy checker 35 | run: | 36 | mypy tensorcircuit 37 | - name: pylint checker 38 | run: | 39 | pylint tensorcircuit tests 40 | - name: test scripts 41 | run: | 42 | pytest --cov=tensorcircuit --cov-report=xml -svv --benchmark-skip 43 | - name: setup build 44 | run: | 45 | python3 .github/nightly_build/darkify.py 46 | cat setup.py 47 | python3 setup.py bdist_wheel sdist 48 | - name: upload to pypi 49 | # if: startsWith(github.ref, 'refs/tags') 50 | uses: pypa/gh-action-pypi-publish@release/v1 51 | with: 52 | password: ${{ secrets.PYPI_API_TOKEN }} 53 | # skip_existing: true 54 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | data 2 | .prettierignore 3 | .idea/ 4 | dataset 5 | *.outdated 6 | *.disable 7 | *.mo 8 | develop 9 | examples-ng 10 | .coverage* 11 | tutorials.po 12 | whitepaper.po 13 | *.result 14 | .vscode/ 15 | .pytest_cache/ 16 | .DS_Store 17 | coverage.xml 18 | .mypy_cache/ 19 | .coverage 20 | .pytest_cache 21 | *.pyc 22 | build 23 | dist 24 | *.egg-info 25 | __pycache__ 26 | .ipynb_checkpoints 27 | examples/Unified AD model.ipynb 28 | docs/source/locale/zh/LC_MESSAGES/textbook.po 29 | docs/source/locale/zh/LC_MESSAGES/whitepapertoc_cn.po 30 | docs/source/locale/zh/LC_MESSAGES/textbooktoc.po 31 | test.qasm 32 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the 3 | # number of processors available to use. 4 | jobs=1 5 | 6 | 7 | [MESSAGES CONTROL] 8 | 9 | # Disable the message, report, category or checker with the given id(s). 10 | disable=all 11 | 12 | # Enable the message, report, category or checker with the given id(s). 13 | enable=c-extension-no-member, 14 | bad-indentation, 15 | bare-except, 16 | broad-except, 17 | dangerous-default-value, 18 | function-redefined, 19 | len-as-condition, 20 | line-too-long, 21 | misplaced-future, 22 | missing-final-newline, 23 | mixed-line-endings, 24 | multiple-imports, 25 | multiple-statements, 26 | singleton-comparison, 27 | trailing-comma-tuple, 28 | trailing-newlines, 29 | trailing-whitespace, 30 | unexpected-line-ending-format, 31 | unused-import, 32 | unused-variable, 33 | wildcard-import, 34 | wrong-import-order 35 | 36 | 37 | [FORMAT] 38 | 39 | # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. 40 | expected-line-ending-format=LF 41 | 42 | # Regexp for a line that is allowed to be longer than the limit. 43 | ignore-long-lines=^\s*(# )??$ 44 | 45 | # Maximum number of characters on a single line. 46 | max-line-length=120 47 | 48 | # Maximum number of lines in a module. 49 | max-module-lines=2000 50 | 51 | 52 | [EXCEPTIONS] 53 | 54 | # Exceptions that will emit a warning when being caught. Defaults to 55 | # "BaseException, Exception". 56 | overgeneral-exceptions=BaseException, 57 | Exception 58 | 59 | # Note how codecc doesn't accept goddnames in pylintrc, and how we use pylint disable invalid name per file instead 60 | # it is not neat at all, but this is codecc's badness :( 61 | # stupid to check variable name convention when you are a scientist dealing with lots of N, Pauli or QAOA -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | formats: 9 | - pdf 10 | 11 | # Set the version of Python and other tools you might need 12 | build: 13 | os: ubuntu-20.04 14 | tools: 15 | python: "3.8" 16 | 17 | # Build documentation in the docs/ directory with Sphinx 18 | sphinx: 19 | configuration: docs/source/conf.py 20 | # We recommend specifying your dependencies to enable reproducible builds: 21 | # https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 22 | python: 23 | install: 24 | - requirements: requirements/requirements-rtd.txt 25 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | message: "If you find this software helpful in your research, please cite it as below." 3 | authors: 4 | - family-names: "Zhang" 5 | given-names: "Shi-Xin" 6 | - family-names: "Chen" 7 | given-names: "Yu-Qin" 8 | title: "TensorCircuit" 9 | version: 0.7.0 10 | date-released: 2020-04-19 11 | url: "https://github.com/tencent-quantum-lab/tensorcircuit" 12 | preferred-citation: 13 | type: article 14 | authors: 15 | - family-names: "Zhang" 16 | given-names: "Shi-Xin" 17 | - family-names: "Allcock" 18 | given-names: "Jonathan" 19 | - family-names: "Wan" 20 | given-names: "Zhou-Quan" 21 | - family-names: "Liu" 22 | given-names: "Shuo" 23 | - family-names: "Sun" 24 | given-names: "Jiace" 25 | - family-names: "Yu" 26 | given-names: "Hao" 27 | - family-names: "Yang" 28 | given-names: "Xing-Han" 29 | - family-names: "Qiu" 30 | given-names: "Jiezhong" 31 | - family-names: "Ye" 32 | given-names: "Zhaofeng" 33 | - family-names: "Chen" 34 | given-names: "Yu-Qin" 35 | - family-names: "Lee" 36 | given-names: "Chee-Kong" 37 | - family-names: "Zheng" 38 | given-names: "Yi-Cong" 39 | - family-names: "Jian" 40 | given-names: "Shao-Kai" 41 | - family-names: "Yao" 42 | given-names: "Hong" 43 | - family-names: "Hsieh" 44 | given-names: "Chang-Yu" 45 | - family-names: "Zhang" 46 | given-names: "Shengyu" 47 | doi: "10.22331/q-2023-02-02-912" 48 | journal: "Quantum" 49 | month: 2 50 | pages: 912 51 | title: "TensorCircuit: a Quantum Software Framework for the NISQ Era" 52 | volume: 7 53 | year: 2023 -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to TensorCircuit 2 | 3 | For information on how to contribute, see 4 | [Guide for Contributors](docs/source/contribution.rst). -------------------------------------------------------------------------------- /HISTORY.md: -------------------------------------------------------------------------------- 1 | TensorCircuit is initially a personal project created by @refraction-ray (Shi-Xin Zhang). He began this project in April 2020, inspired by the MPS quantum simulator [mpsim](https://github.com/grmlarose/mpsim) and the introduction of the Google [TensorNetwork](https://github.com/google/TensorNetwork) package. This project is further developed by him during 2020 and the first half of 2021 when he was a Ph.D. candidate at Tsinghua University, with multiple new features and applications added for his research purpose. The original TensorCircuit project is archived now on [GitHub](https://github.com/refraction-ray/tensorcircuit/). He decided to make this project a more universal open-source framework after he joined Tencent in July 2021. And he has extensively refactored and optimized the codebase since then. As the creator and the lead author of TensorCircuit, he thanks all the [contributors](https://github.com/tencent-quantum-lab/tensorcircuit#contributors) who have made TensorCircuit and the ecosystem better. 2 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include README_cn.md 3 | include HISTORY.md 4 | include LICENSE 5 | include CHANGELOG.md 6 | include docs/source/* 7 | include tests/* -------------------------------------------------------------------------------- /benchmarks/README.md: -------------------------------------------------------------------------------- 1 | # benchmark4tc 2 | 3 | `cd scripts` 4 | 5 | `python benchmark.py -n [# of Qubits] -nlayer [# of QC layers] -nitrs [# of max iterations] -nbatch [# of batch for QML task] -t [time limitation] -gpu [0 for no gpu and 1 for gpu enabled] -tcbackend [jax or tensorflow]` 6 | 7 | then a `.json` file will be created in data folder which contains the information of benchmarking parameters and results. 8 | 9 | Since tensorcircuit may be installed in a local dir, you may have to firstly set in terminal: `export PYTHONPATH=/abs/path/for/tc`. 10 | -------------------------------------------------------------------------------- /benchmarks/requirements.txt: -------------------------------------------------------------------------------- 1 | pennylane==0.18 2 | tensorflow==2.4.1 3 | tensorflow-quantum==0.5.1 4 | tensornetwork==0.4.5 5 | #jaxlib==0.1.71 6 | jax==0.2.21 7 | networkx 8 | sympy 9 | py-cpuinfo 10 | scikit-learn -------------------------------------------------------------------------------- /benchmarks/scripts/benchmark.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | import os 3 | import utils 4 | 5 | 6 | if __name__ == "__main__": 7 | _uuid = str(uuid.uuid4()) 8 | nwires, nlayer, nitrs, timeLimit, isgpu, minus, path = utils.arg() 9 | if isgpu == 0: 10 | os.environ["CUDA_VISIBLE_DEVICES"] = "-1" 11 | 12 | else: 13 | import tensorflow as tf 14 | 15 | gpu = tf.config.list_physical_devices("GPU") 16 | tf.config.experimental.set_memory_growth(device=gpu[0], enable=True) 17 | from vqe_pennylane import pennylane_benchmark 18 | from vqe_tc_tf import tensorcircuit_tf_benchmark 19 | from vqe_tc_jax import tensorcircuit_jax_benchmark 20 | from vqe_tfquantum import tfquantum_benchmark 21 | 22 | pl_json = pennylane_benchmark(_uuid, nwires, nlayer, nitrs, timeLimit, isgpu) 23 | tfq_json = tfquantum_benchmark(_uuid, nwires, nlayer, nitrs, timeLimit, isgpu) 24 | tc32_json = tensorcircuit_tf_benchmark( 25 | _uuid, nwires, nlayer, nitrs, timeLimit, isgpu, "32" 26 | ) 27 | tc64_json = tensorcircuit_tf_benchmark( 28 | _uuid, nwires, nlayer, nitrs, timeLimit, isgpu, "64" 29 | ) 30 | tcjax_json = tensorcircuit_jax_benchmark( 31 | _uuid, nwires, nlayer, nitrs, timeLimit, isgpu 32 | ) 33 | utils.save([pl_json, tfq_json, tc32_json, tc64_json, tcjax_json], _uuid, path) 34 | -------------------------------------------------------------------------------- /benchmarks/scripts/qml_benchmark.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | import os 3 | import utils 4 | 5 | 6 | if __name__ == "__main__": 7 | _uuid = str(uuid.uuid4()) 8 | nwires, nlayer, nitrs, timeLimit, isgpu, minus, path, nbatch = utils.arg(qml=True) 9 | if isgpu == 0: 10 | os.environ["CUDA_VISIBLE_DEVICES"] = "-1" 11 | 12 | else: 13 | import tensorflow as tf 14 | 15 | gpu = tf.config.list_physical_devices("GPU") 16 | tf.config.experimental.set_memory_growth(device=gpu[0], enable=True) 17 | 18 | train_img, test_img, train_lbl, test_lbl = utils.mnist_data_preprocessing( 19 | nwires - 1 20 | ) 21 | 22 | from qml_pennylane import pennylane_benchmark 23 | from qml_tc_tf import tensorcircuit_tf_benchmark 24 | from qml_tc_jax import tensorcircuit_jax_benchmark 25 | from qml_tfquantum import tfquantum_benchmark 26 | 27 | pl_json = pennylane_benchmark( 28 | _uuid, 29 | nwires, 30 | nlayer, 31 | nitrs, 32 | timeLimit, 33 | isgpu, 34 | train_img, 35 | test_img, 36 | train_lbl, 37 | test_lbl, 38 | nbatch, 39 | False, 40 | ) 41 | tfq_json = tfquantum_benchmark( 42 | _uuid, 43 | nwires, 44 | nlayer, 45 | nitrs, 46 | timeLimit, 47 | isgpu, 48 | train_img, 49 | test_img, 50 | train_lbl, 51 | test_lbl, 52 | nbatch, 53 | ) 54 | tc32_json = tensorcircuit_tf_benchmark( 55 | _uuid, 56 | nwires, 57 | nlayer, 58 | nitrs, 59 | timeLimit, 60 | isgpu, 61 | train_img, 62 | test_img, 63 | train_lbl, 64 | test_lbl, 65 | nbatch, 66 | "32", 67 | ) 68 | tc64_json = tensorcircuit_tf_benchmark( 69 | _uuid, 70 | nwires, 71 | nlayer, 72 | nitrs, 73 | timeLimit, 74 | isgpu, 75 | train_img, 76 | test_img, 77 | train_lbl, 78 | test_lbl, 79 | nbatch, 80 | "64", 81 | ) 82 | tcjax_json = tensorcircuit_jax_benchmark( 83 | _uuid, 84 | nwires, 85 | nlayer, 86 | nitrs, 87 | timeLimit, 88 | isgpu, 89 | train_img, 90 | test_img, 91 | train_lbl, 92 | test_lbl, 93 | nbatch, 94 | "64", 95 | ) 96 | utils.save([pl_json, tfq_json, tc32_json, tc64_json, tcjax_json], _uuid, path) 97 | -------------------------------------------------------------------------------- /check_all.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | set -e 3 | echo "black check" 4 | black . --check 5 | echo "mypy check" 6 | mypy tensorcircuit 7 | echo "pylint check" 8 | pylint tensorcircuit tests examples/*.py 9 | echo "pytest check" 10 | pytest -n auto --cov=tensorcircuit -vv -W ignore::DeprecationWarning 11 | # for test on gpu machine, please set `export TF_FORCE_GPU_ALLOW_GROWTH=true` for tf 12 | # and `export XLA_PYTHON_CLIENT_PREALLOCATE=false` for jax to avoid OOM in testing 13 | echo "sphinx check" 14 | cd docs && sphinx-build source build/html && sphinx-build source -D language="zh" build/html_cn 15 | echo "all checks passed, congratulation! 💐" 16 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | ignore: 2 | - "tensorcircuit/applications" 3 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:11.0.3-cudnn8-runtime-ubuntu18.04 2 | 3 | # https://gitlab.com/nvidia/container-images/cuda/-/blob/master/doc/supported-tags.md 4 | 5 | RUN apt-key del 7fa2af80 6 | RUN apt-key del 3bf863cc 7 | RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub 8 | RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub 9 | 10 | # Nvidia is breaking the docker images on Apr 2022 ... 11 | # https://github.com/NVIDIA/nvidia-docker/issues/1631 12 | 13 | RUN apt update && DEBIAN_FRONTEND=noninteractive apt install -y \ 14 | wget \ 15 | cuda-command-line-tools-11-0 \ 16 | git \ 17 | vim \ 18 | pandoc 19 | 20 | RUN wget -q -P /tmp \ 21 | https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \ 22 | && bash /tmp/Miniconda3-latest-Linux-x86_64.sh -b -p /opt/conda \ 23 | && rm /tmp/Miniconda3-latest-Linux-x86_64.sh 24 | 25 | ENV PATH="/opt/conda/bin:$PATH" 26 | 27 | RUN conda install -y \ 28 | cudatoolkit=11.0 \ 29 | pip \ 30 | python=3.8 31 | 32 | COPY requirements/requirements-docker.txt /app/requirements-docker.txt 33 | 34 | RUN pip install -r /app/requirements-docker.txt 35 | 36 | RUN pip install jaxlib==0.3.2+cuda11.cudnn805 -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html 37 | 38 | RUN pip install -U git+https://github.com/jcmgray/cotengra.git 39 | 40 | RUN pip install ray 41 | 42 | # requirements conflict for ray 43 | 44 | RUN ln -s /usr/local/cuda-11.0/targets/x86_64-linux/lib/libcusolver.so.10 /usr/local/cuda-11.0/targets/x86_64-linux/lib/libcusolver.so.11 45 | 46 | RUN echo export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.0/targets/x86_64-linux/lib >> ~/.bashrc \ 47 | && echo export PYTHONPATH=/app >> ~/.bashrc \ 48 | && echo export TF_CPP_MIN_LOG_LEVEL=3 >> ~/.bashrc 49 | 50 | COPY . /app 51 | 52 | # RUN export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.1/targets/x86_64-linux/lib 53 | 54 | WORKDIR /app 55 | 56 | CMD ["/bin/bash"] -------------------------------------------------------------------------------- /docker/Dockerfile_v2: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu20.04 2 | # nvidia/cuda:11.6.0-cudnn8-devel-ubuntu20.04 3 | 4 | RUN apt update && DEBIAN_FRONTEND=noninteractive apt install -y \ 5 | wget \ 6 | git \ 7 | vim \ 8 | pandoc 9 | 10 | RUN wget -q -P /tmp \ 11 | https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \ 12 | && bash /tmp/Miniconda3-latest-Linux-x86_64.sh -b -p /opt/conda \ 13 | && rm /tmp/Miniconda3-latest-Linux-x86_64.sh 14 | 15 | ENV PATH="/opt/conda/bin:$PATH" 16 | 17 | RUN conda install -y \ 18 | pip \ 19 | python=3.10 20 | 21 | COPY requirements/requirements-docker-v2.txt /requirements-docker-v2.txt 22 | 23 | # RUN pip install -r /requirements-docker-v2.txt -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html 24 | RUN pip install -i https://pypi.tuna.tsinghua.edu.cn/simple -r /requirements-docker-v2.txt -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html 25 | 26 | # RUN pip install nvidia-cudnn-cu11==8.6.0.163 ray 27 | RUN pip install -i https://pypi.tuna.tsinghua.edu.cn/simple nvidia-cudnn-cu11==8.6.0.163 ray 28 | 29 | RUN pip install tensorcircuit 30 | 31 | # requirements conflict for ray 32 | # jax must have cudnn>8.6 otherwise fail when init array on gpu, 33 | # while torch insists cudnn 8.5 in setup but 8.6 can also work for torch 34 | 35 | RUN echo export TF_CPP_MIN_LOG_LEVEL=3 >> ~/.bashrc 36 | 37 | CMD ["/bin/bash"] -------------------------------------------------------------------------------- /docker/README.md: -------------------------------------------------------------------------------- 1 | Run the following command to build the docker for tensorcircuit at parent path: 2 | 3 | ```bash 4 | sudo docker build . -f docker/Dockerfile -t tensorcircuit 5 | ``` 6 | 7 | Since v0.10 we introduce new docker env based on ubuntu20.04+cuda11.7+py3.10 (+ pip installed tensorcircuit package), build the new docker use 8 | 9 | ```bash 10 | sudo docker build . -f docker/Dockerfile_v2 -t tensorcircuit 11 | ``` 12 | 13 | One can also pull the [official image](https://hub.docker.com/repository/docker/tensorcircuit/tensorcircuit) from DockerHub as 14 | 15 | ```bash 16 | sudo docker pull tensorcircuit/tensorcircuit 17 | ``` 18 | 19 | Run the docker container by the following command: 20 | 21 | ```bash 22 | sudo docker run -it --network host --gpus all tensorcircuit 23 | 24 | # if one also wants to mount local source code, also add args `-v "$(pwd)":/root` 25 | 26 | # using tensorcircuit/tensorcircuit:latest to run the prebuild docker image from dockerhub 27 | ``` 28 | 29 | `export CUDA_VISIBLE_DEVICES=-1` if you want to test only on CPU. 30 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = tensorcircuit 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /docs/ext/toctree_filter.py: -------------------------------------------------------------------------------- 1 | """ 2 | sphinx extension for conditional toc 3 | """ 4 | 5 | import re 6 | from sphinx.directives.other import TocTree 7 | 8 | # adpoted from https://stackoverflow.com/a/46600038 9 | 10 | 11 | def setup(app): 12 | app.add_directive("toctree-filt", TocTreeFilt) 13 | return {"version": "1.0.0"} 14 | 15 | 16 | class TocTreeFilt(TocTree): 17 | """ 18 | Directive to notify Sphinx about the hierarchical structure of the docs, 19 | and to include a table-of-contents like tree in the current document. This 20 | version filters the entries based on a list of prefixes. We simply filter 21 | the content of the directive and call the super's version of run. The 22 | list of exclusions is stored in the **toc_filter_exclusion** list. Any 23 | table of content entry prefixed by one of these strings will be excluded. 24 | If `toc_filter_exclusion=['secret','draft']` then all toc entries of the 25 | form `:secret:ultra-api` or `:draft:new-features` will be excuded from 26 | the final table of contents. Entries without a prefix are always included. 27 | """ 28 | 29 | hasPat = re.compile("^\s*:(.+):(.+)$") 30 | 31 | # Remove any entries in the content that we dont want and strip 32 | # out any filter prefixes that we want but obviously don't want the 33 | # prefix to mess up the file name. 34 | def filter_entries(self, entries): 35 | excl = [self.state.document.settings.env.config.language] 36 | filtered = [] 37 | for e in entries: 38 | m = self.hasPat.match(e) 39 | if m != None: 40 | if not m.groups()[0] in excl: 41 | filtered.append(m.groups()[1]) 42 | else: 43 | filtered.append(e) 44 | return filtered 45 | 46 | def run(self): 47 | # Remove all TOC entries that should not be on display 48 | self.content = self.filter_entries(self.content) 49 | return super().run() 50 | -------------------------------------------------------------------------------- /docs/source/api/about.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.about 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.about 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/abstractcircuit.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.abstractcircuit 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.abstractcircuit 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/applications.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.applications 2 | ================================================================================ 3 | .. toctree:: 4 | applications/ai.rst 5 | applications/dqas.rst 6 | applications/finance.rst 7 | applications/graphdata.rst 8 | applications/layers.rst 9 | applications/optimization.rst 10 | applications/physics.rst 11 | applications/utils.rst 12 | applications/vags.rst 13 | applications/van.rst 14 | applications/vqes.rst -------------------------------------------------------------------------------- /docs/source/api/applications/ai.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.applications.ai 2 | ================================================================================ 3 | .. toctree:: 4 | ai/ensemble.rst -------------------------------------------------------------------------------- /docs/source/api/applications/ai/ensemble.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.applications.ai.ensemble 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.applications.ai.ensemble 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/applications/dqas.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.applications.dqas 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.applications.dqas 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/applications/finance.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.applications.finance 2 | ================================================================================ 3 | .. toctree:: 4 | finance/portfolio.rst -------------------------------------------------------------------------------- /docs/source/api/applications/finance/portfolio.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.applications.finance.portfolio 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.applications.finance.portfolio 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/applications/graphdata.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.applications.graphdata 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.applications.graphdata 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/applications/layers.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.applications.layers 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.applications.layers 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/applications/optimization.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.applications.optimization 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.applications.optimization 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/applications/physics.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.applications.physics 2 | ================================================================================ 3 | .. toctree:: 4 | physics/baseline.rst 5 | physics/fss.rst -------------------------------------------------------------------------------- /docs/source/api/applications/physics/baseline.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.applications.physics.baseline 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.applications.physics.baseline 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/applications/physics/fss.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.applications.physics.fss 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.applications.physics.fss 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/applications/utils.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.applications.utils 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.applications.utils 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/applications/vags.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.applications.vags 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.applications.vags 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/applications/van.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.applications.van 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.applications.van 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/applications/vqes.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.applications.vqes 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.applications.vqes 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/backends.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.backends 2 | ================================================================================ 3 | .. toctree:: 4 | backends/backend_factory.rst 5 | backends/cupy_backend.rst 6 | backends/jax_backend.rst 7 | backends/numpy_backend.rst 8 | backends/pytorch_backend.rst 9 | backends/tensorflow_backend.rst -------------------------------------------------------------------------------- /docs/source/api/backends/backend_factory.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.backends.backend_factory 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.backends.backend_factory 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/backends/cupy_backend.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.backends.cupy_backend 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.backends.cupy_backend 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/backends/jax_backend.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.backends.jax_backend 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.backends.jax_backend 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/backends/numpy_backend.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.backends.numpy_backend 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.backends.numpy_backend 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/backends/pytorch_backend.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.backends.pytorch_backend 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.backends.pytorch_backend 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/backends/tensorflow_backend.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.backends.tensorflow_backend 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.backends.tensorflow_backend 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/basecircuit.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.basecircuit 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.basecircuit 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/channels.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.channels 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.channels 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/circuit.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.circuit 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.circuit 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/cloud.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.cloud 2 | ================================================================================ 3 | .. toctree:: 4 | cloud/abstraction.rst 5 | cloud/apis.rst 6 | cloud/config.rst 7 | cloud/local.rst 8 | cloud/quafu_provider.rst 9 | cloud/tencent.rst 10 | cloud/utils.rst 11 | cloud/wrapper.rst -------------------------------------------------------------------------------- /docs/source/api/cloud/abstraction.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.cloud.abstraction 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.cloud.abstraction 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/cloud/apis.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.cloud.apis 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.cloud.apis 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/cloud/config.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.cloud.config 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.cloud.config 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/cloud/local.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.cloud.local 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.cloud.local 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/cloud/quafu_provider.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.cloud.quafu_provider 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.cloud.quafu_provider 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/cloud/tencent.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.cloud.tencent 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.cloud.tencent 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/cloud/utils.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.cloud.utils 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.cloud.utils 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/cloud/wrapper.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.cloud.wrapper 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.cloud.wrapper 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/compiler.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.compiler 2 | ================================================================================ 3 | .. toctree:: 4 | compiler/composed_compiler.rst 5 | compiler/qiskit_compiler.rst 6 | compiler/simple_compiler.rst -------------------------------------------------------------------------------- /docs/source/api/compiler/composed_compiler.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.compiler.composed_compiler 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.compiler.composed_compiler 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/compiler/qiskit_compiler.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.compiler.qiskit_compiler 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.compiler.qiskit_compiler 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/compiler/simple_compiler.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.compiler.simple_compiler 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.compiler.simple_compiler 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/cons.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.cons 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.cons 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/densitymatrix.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.densitymatrix 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.densitymatrix 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/experimental.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.experimental 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.experimental 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/fgs.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.fgs 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.fgs 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/gates.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.gates 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.gates 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/interfaces.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.interfaces 2 | ================================================================================ 3 | .. toctree:: 4 | interfaces/numpy.rst 5 | interfaces/scipy.rst 6 | interfaces/tensorflow.rst 7 | interfaces/tensortrans.rst 8 | interfaces/torch.rst -------------------------------------------------------------------------------- /docs/source/api/interfaces/numpy.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.interfaces.numpy 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.interfaces.numpy 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/interfaces/scipy.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.interfaces.scipy 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.interfaces.scipy 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/interfaces/tensorflow.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.interfaces.tensorflow 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.interfaces.tensorflow 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/interfaces/tensortrans.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.interfaces.tensortrans 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.interfaces.tensortrans 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/interfaces/torch.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.interfaces.torch 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.interfaces.torch 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/keras.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.keras 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.keras 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/mps_base.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.mps_base 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.mps_base 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/mpscircuit.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.mpscircuit 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.mpscircuit 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/noisemodel.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.noisemodel 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.noisemodel 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/quantum.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.quantum 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.quantum 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/results.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.results 2 | ================================================================================ 3 | .. toctree:: 4 | results/counts.rst 5 | results/qem.rst 6 | results/readout_mitigation.rst -------------------------------------------------------------------------------- /docs/source/api/results/counts.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.results.counts 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.results.counts 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/results/qem.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.results.qem 2 | ================================================================================ 3 | .. toctree:: 4 | qem/benchmark_circuits.rst 5 | qem/qem_methods.rst -------------------------------------------------------------------------------- /docs/source/api/results/qem/benchmark_circuits.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.results.qem.benchmark_circuits 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.results.qem.benchmark_circuits 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/results/qem/qem_methods.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.results.qem.qem_methods 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.results.qem.qem_methods 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/results/readout_mitigation.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.results.readout_mitigation 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.results.readout_mitigation 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/shadows.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.shadows 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.shadows 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/simplify.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.simplify 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.simplify 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/templates.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.templates 2 | ================================================================================ 3 | .. toctree:: 4 | templates/ansatz.rst 5 | templates/blocks.rst 6 | templates/chems.rst 7 | templates/conversions.rst 8 | templates/dataset.rst 9 | templates/graphs.rst 10 | templates/measurements.rst -------------------------------------------------------------------------------- /docs/source/api/templates/ansatz.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.templates.ansatz 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.templates.ansatz 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/templates/blocks.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.templates.blocks 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.templates.blocks 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/templates/chems.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.templates.chems 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.templates.chems 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/templates/conversions.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.templates.conversions 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.templates.conversions 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/templates/dataset.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.templates.dataset 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.templates.dataset 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/templates/graphs.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.templates.graphs 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.templates.graphs 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/templates/measurements.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.templates.measurements 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.templates.measurements 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/torchnn.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.torchnn 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.torchnn 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/translation.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.translation 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.translation 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/utils.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.utils 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.utils 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/api/vis.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit.vis 2 | ================================================================================ 3 | .. automodule:: tensorcircuit.vis 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | :inherited-members: -------------------------------------------------------------------------------- /docs/source/contribs/development_MacM2.md: -------------------------------------------------------------------------------- 1 | # Tensorcircuit Installation Guide on MacOS 2 | 3 | Contributed by [Hong-Ye Hu](https://github.com/hongyehu) 4 | 5 | .. warning:: 6 | This page is deprecated. Please visit `the update tutorial `_ for the latest information. 7 | 8 | The key issue addressed in this document is **how to install both TensorFlow and Jax on a M2 chip MacOS without conflict**. 9 | 10 | ## Starting From Scratch 11 | 12 | ### Install Xcode Command Line Tools 13 | 14 | Need graphical access to the machine. 15 | 16 | Run `xcode-select --install` to install if on optimal internet. 17 | 18 | Or Download from [Apple](https://developer.apple.com/download/more/) Command Line Tools installation image then install if internet connection is weak. 19 | 20 | ## Install Miniconda 21 | 22 | Due to the limitation of MacOS and packages, the lastest version of python does not always function as desired, thus miniconda installation is advised to solve the issues. And use anaconda virtual environment is always a good habit. 23 | 24 | ``` 25 | curl -o ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh 26 | bash ~/miniconda.sh -b -p $HOME/miniconda 27 | source ~/miniconda/bin/activate 28 | ``` 29 | 30 | ## Install Packages 31 | First, create a virtual environment, and make sure the python version is 3.8.5 by 32 | ``` 33 | conda create --name NewEnv python==3.8.5 34 | conda activate NewEnv 35 | ``` 36 | Then, install the TensorFlow from `.whl` file (file can be downloaded from this [URL](https://drive.google.com/drive/folders/1oSipZLnoeQB0Awz8U68KYeCPsULy_dQ7)). This will install TensorFlow version 2.4.1 37 | ``` 38 | pip install ~/Downloads/tensorflow-2.4.1-py3-none-any.whl 39 | ``` 40 | Next, one need to install **Jax** and **Optax** by 41 | ``` 42 | conda install jax==0.3.0 43 | conda install optax==0.1.4 44 | ``` 45 | Now, hopefully, you should be able to use both Jax and TensorFlow in this environment. But sometimes, it may give you an error "ERROR: package Chardet not found.". 46 | If that is the case, you can install it by `conda install chardet`. 47 | Lastly, install tensorcircuit 48 | ``` 49 | pip install tensorcircuit 50 | ``` 51 | This is the solution that seems to work for M2-chip MacOS. Please let me know if there is a better solution! 52 | 53 | 54 | -------------------------------------------------------------------------------- /docs/source/contribs/development_Mac_cn.md: -------------------------------------------------------------------------------- 1 | # MacOS Tensorcircuit 安装教程 2 | 3 | [_Mark (Zixuan) Song_](https://marksong.tech) 撰写 4 | 5 | 由于苹果更新了Tensorflow,因此M系列(直到M2)和英特尔系列Mac上的安装可以遵循完全相同的过程。 6 | 7 | ## 从头开始 8 | 9 | 对于全新的Macos或未安装Xcode的Macos。 10 | 11 | 若您已安装Xcode,请跳转到安装TC后端。 12 | 13 | ### 安装Xcode命令行工具 14 | 15 | 需要对机器的图形访问 16 | 17 | 如果网络良好,请运行`xcode-select --install`进行安装。 18 | 19 | 或者,如果网络连接不理想,请从[苹果](https://developer.apple.com/download/more/)下载命令行工具安装映像,然后进行安装。 20 | 21 | ## 安装TC后端 22 | 23 | 有四个后端可供选择,Numpy,Tensorflow,Jax和Torch。 24 | 25 | ### 安装Jax、Pytorch(可选) 26 | 27 | ```bash 28 | pip install [Package Name] 29 | ``` 30 | 31 | ### 安装Tensorflow(可选 - 推荐) 32 | 33 | #### 安装miniconda(可选 - 推荐) 34 | 35 | 若您希望使用苹果为MacOS优化的Tensorflow(`tensorflow-macos`)或使用Tensorflow GPU优化(`tensorflow-metal`)请安装mimiconda。 36 | 37 | 若您希望使Google开发的原版Tensorflow(`tensorflow`)请跳过此步骤。 38 | 39 | ```bash 40 | curl -o ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh 41 | bash ~/miniconda.sh -b -p $HOME/miniconda 42 | source ~/miniconda/bin/activate 43 | conda install -c apple tensorflow-deps 44 | ``` 45 | 46 | #### 安装步骤 47 | 48 | ```bash 49 | pip install tensorflow 50 | ``` 51 | 52 | 若您希望使用苹果为Tensorflow优化的Metal后端,请继续运行(不建议): 53 | 54 | ```bash 55 | pip install tensorflow-metal 56 | ``` 57 | 58 | #### 验证Tensorflow安装 59 | 60 | ```python 61 | import tensorflow as tf 62 | 63 | cifar = tf.keras.datasets.cifar100 64 | (x_train, y_train), (x_test, y_test) = cifar.load_data() 65 | model = tf.keras.applications.ResNet50( 66 | include_top=True, 67 | weights=None, 68 | input_shape=(32, 32, 3), 69 | classes=100,) 70 | 71 | loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) 72 | model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"]) 73 | model.fit(x_train, y_train, epochs=5, batch_size=64) 74 | ``` 75 | 76 | ## 安装Tensorcircuit 77 | 78 | ```bash 79 | pip install tensorcircuit 80 | ``` 81 | 82 | ## 测试与比较 83 | 84 | 以下数据由运行`benchmarks/scripts/vqe_tc.py` 10次并取平均值得到。 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 |
原版Tensorflow苹果优化版Tensorflow苹果优化版Tensorflow并安装Tensorflow Metal插件
构建时间11.49241641s11.31878941s11.6103961s
迭代时间0.002313011s0.002333004s0.046412581s
从时间11.72371747s11.55208979s16.25165417s
112 | 113 | 114 | 直到2023年7月,这已在运行Ventura的英特尔i9 Mac、运行Ventura的M1 Mac、运行Ventura的M2 Mac、运行Sonoma测试版的M2 Mac上进行了测试。 -------------------------------------------------------------------------------- /docs/source/contribs/development_wsl2.rst: -------------------------------------------------------------------------------- 1 | Run TensorCirit on Windows with WSL2 (Windows Subsystem for Linux 2) 2 | =========================================================================== 3 | 4 | Contributed by `YHPeter `_ (Peter Yu) 5 | 6 | Reminder, if you are not supposed to use JAX, you can still use Numpy/Tensorflow/Pytorch backend to run demonstrations. 7 | 8 | Step 1. 9 | Install WSL2, follow the official installation instruction: https://docs.microsoft.com/en-us/windows/wsl/install 10 | 11 | Step 2. 12 | Install CUDA for GPU support, if you want to used GPU accelerator. 13 | The official CUDA installation for WSL2: https://docs.nvidia.com/cuda/wsl-user-guide/index.html#ch02-getting-started 14 | 15 | Step 3. 16 | Follow the Linux Installation Instructions to finish installing. 17 | 18 | .. list-table:: **System Support Summary** 19 | :header-rows: 1 20 | 21 | * - Backend 22 | - Numpy 23 | - TensorFlow 24 | - JAX 25 | - Pytorch 26 | * - Suggested Package Version 27 | - >= 1.20.0 28 | - >= 2.7.0 29 | - >= 0.3.0 30 | - >= 1.12 31 | * - OS Support without GPU Accelerator 32 | - Windows/MacOS/Linux 33 | - Windows/MacOS/Linux 34 | - Windows/MacOS/Linux 35 | - Windows/MacOS/Linux 36 | * - OS Support with GPU Accelerator 37 | - No Support for GPU 38 | - Windows(WSL2, docker)/`MacOS `_/Linux 39 | - Windows(WSL2, docker)/MacOS/Linux 40 | - Windows(WSL2, docker)/MacOS(torch>=1.12)/Linux 41 | * - Platform with TPU Accelerator 42 | - No Support for TPU 43 | - `GCP - Tensorflow with TPU `_ 44 | - `GCP - JAX with TPU `_ 45 | - `GCP - Pytorch with TPU `_ 46 | 47 | Tips: Currently, we don't suggest you to use TPU accelerator. -------------------------------------------------------------------------------- /docs/source/locale/zh/LC_MESSAGES/index_cn.po: -------------------------------------------------------------------------------- 1 | # SOME DESCRIPTIVE TITLE. 2 | # Copyright (C) 2020, The TensorCircuit Authors 3 | # This file is distributed under the same license as the tensorcircuit 4 | # package. 5 | # FIRST AUTHOR , 2022. 6 | # 7 | msgid "" 8 | msgstr "" 9 | "Project-Id-Version: tensorcircuit\n" 10 | "Report-Msgid-Bugs-To: \n" 11 | "POT-Creation-Date: 2022-04-08 21:05+0800\n" 12 | "PO-Revision-Date: 2022-04-16 22:35+0800\n" 13 | "Last-Translator: Xinghan Yang\n" 14 | "Language-Team: LANGUAGE \n" 15 | "MIME-Version: 1.0\n" 16 | "Content-Type: text/plain; charset=UTF-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.9.1\n" 19 | "Language: cn\n" 20 | "X-Generator: Poedit 1.6.11\n" 21 | 22 | #: ../../source/index_cn.rst:2 23 | msgid "Guide to TensorCircuit" 24 | msgstr "TensoCircuit 指南" 25 | 26 | #: ../../source/index_cn.rst:16 27 | msgid "API References" 28 | msgstr "API 参考" 29 | 30 | #: ../../source/index_cn.rst:25 31 | msgid "Indices and Tables" 32 | msgstr "索引和表格" 33 | 34 | #: ../../source/index_cn.rst:27 35 | msgid ":ref:`genindex`" 36 | msgstr ":ref:`genindex`" 37 | 38 | #: ../../source/index_cn.rst:28 39 | msgid ":ref:`modindex`" 40 | msgstr ":ref:`modindex`" 41 | 42 | #: ../../source/index_cn.rst:29 43 | msgid ":ref:`search`" 44 | msgstr ":ref:`search`" 45 | -------------------------------------------------------------------------------- /docs/source/locale/zh/LC_MESSAGES/tutorial.po: -------------------------------------------------------------------------------- 1 | # SOME DESCRIPTIVE TITLE. 2 | # Copyright (C) 2020, The TensorCircuit Authors 3 | # This file is distributed under the same license as the tensorcircuit 4 | # package. 5 | # FIRST AUTHOR , 2022. 6 | # 7 | #, fuzzy 8 | msgid "" 9 | msgstr "" 10 | "Project-Id-Version: tensorcircuit \n" 11 | "Report-Msgid-Bugs-To: \n" 12 | "POT-Creation-Date: 2022-04-08 21:05+0800\n" 13 | "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" 14 | "Last-Translator: FULL NAME \n" 15 | "Language-Team: LANGUAGE \n" 16 | "MIME-Version: 1.0\n" 17 | "Content-Type: text/plain; charset=utf-8\n" 18 | "Content-Transfer-Encoding: 8bit\n" 19 | "Generated-By: Babel 2.9.1\n" 20 | 21 | #: ../../source/tutorial.rst:3 22 | msgid "Jupyter Tutorials" 23 | msgstr "" 24 | 25 | -------------------------------------------------------------------------------- /docs/source/locale/zh/LC_MESSAGES/tutorial_cn.po: -------------------------------------------------------------------------------- 1 | # SOME DESCRIPTIVE TITLE. 2 | # Copyright (C) 2020, The TensorCircuit Authors 3 | # This file is distributed under the same license as the tensorcircuit 4 | # package. 5 | # FIRST AUTHOR , 2022. 6 | # 7 | #, fuzzy 8 | msgid "" 9 | msgstr "" 10 | "Project-Id-Version: tensorcircuit \n" 11 | "Report-Msgid-Bugs-To: \n" 12 | "POT-Creation-Date: 2022-04-08 21:05+0800\n" 13 | "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" 14 | "Last-Translator: FULL NAME \n" 15 | "Language-Team: LANGUAGE \n" 16 | "MIME-Version: 1.0\n" 17 | "Content-Type: text/plain; charset=utf-8\n" 18 | "Content-Transfer-Encoding: 8bit\n" 19 | "Generated-By: Babel 2.9.1\n" 20 | 21 | #: ../../source/tutorial_cn.rst:3 22 | msgid "案例教程" 23 | msgstr "" 24 | 25 | -------------------------------------------------------------------------------- /docs/source/locale/zh/LC_MESSAGES/whitepapertoc.po: -------------------------------------------------------------------------------- 1 | # SOME DESCRIPTIVE TITLE. 2 | # Copyright (C) 2020, The TensorCircuit Authors 3 | # This file is distributed under the same license as the tensorcircuit 4 | # package. 5 | # FIRST AUTHOR , 2022. 6 | # 7 | #, fuzzy 8 | msgid "" 9 | msgstr "" 10 | "Project-Id-Version: tensorcircuit \n" 11 | "Report-Msgid-Bugs-To: \n" 12 | "POT-Creation-Date: 2022-05-09 17:10+0800\n" 13 | "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" 14 | "Last-Translator: FULL NAME \n" 15 | "Language-Team: LANGUAGE \n" 16 | "MIME-Version: 1.0\n" 17 | "Content-Type: text/plain; charset=utf-8\n" 18 | "Content-Transfer-Encoding: 8bit\n" 19 | "Generated-By: Babel 2.9.1\n" 20 | 21 | #: ../../source/whitepapertoc.rst:3 22 | msgid "Whitepaper Tutorials" 23 | msgstr "" 24 | 25 | -------------------------------------------------------------------------------- /docs/source/modules.rst: -------------------------------------------------------------------------------- 1 | tensorcircuit 2 | ================================================================================ 3 | .. toctree:: 4 | ./api/about.rst 5 | ./api/abstractcircuit.rst 6 | ./api/applications.rst 7 | ./api/backends.rst 8 | ./api/basecircuit.rst 9 | ./api/channels.rst 10 | ./api/circuit.rst 11 | ./api/cloud.rst 12 | ./api/compiler.rst 13 | ./api/cons.rst 14 | ./api/densitymatrix.rst 15 | ./api/experimental.rst 16 | ./api/fgs.rst 17 | ./api/gates.rst 18 | ./api/interfaces.rst 19 | ./api/keras.rst 20 | ./api/mps_base.rst 21 | ./api/mpscircuit.rst 22 | ./api/noisemodel.rst 23 | ./api/quantum.rst 24 | ./api/results.rst 25 | ./api/shadows.rst 26 | ./api/simplify.rst 27 | ./api/templates.rst 28 | ./api/torchnn.rst 29 | ./api/translation.rst 30 | ./api/utils.rst 31 | ./api/vis.rst -------------------------------------------------------------------------------- /docs/source/modules.rst.backup: -------------------------------------------------------------------------------- 1 | tensorcircuit 2 | ================================================== 3 | .. toctree:: 4 | ./api/about.rst 5 | ./api/abstractcircuit.rst 6 | ./api/applications.rst 7 | ./api/backends.rst 8 | ./api/basecircuit.rst 9 | ./api/channels.rst 10 | ./api/circuit.rst 11 | ./api/cloud.rst 12 | ./api/compiler.rst 13 | ./api/cons.rst 14 | ./api/densitymatrix.rst 15 | ./api/experimental.rst 16 | ./api/gates.rst 17 | ./api/interfaces.rst 18 | ./api/keras.rst 19 | ./api/mps_base.rst 20 | ./api/mpscircuit.rst 21 | ./api/noisemodel.rst 22 | ./api/quantum.rst 23 | ./api/results.rst 24 | ./api/simplify.rst 25 | ./api/templates.rst 26 | ./api/torchnn.rst 27 | ./api/translation.rst 28 | ./api/utils.rst 29 | ./api/vis.rst -------------------------------------------------------------------------------- /docs/source/statics/bell_pair_block.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/statics/bell_pair_block.png -------------------------------------------------------------------------------- /docs/source/statics/example_block.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/statics/example_block.png -------------------------------------------------------------------------------- /docs/source/statics/landscape.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/statics/landscape.jpg -------------------------------------------------------------------------------- /docs/source/statics/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/statics/logo.png -------------------------------------------------------------------------------- /docs/source/statics/logov2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/statics/logov2.jpg -------------------------------------------------------------------------------- /docs/source/statics/qd_alg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/statics/qd_alg.jpg -------------------------------------------------------------------------------- /docs/source/statics/quop.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/statics/quop.png -------------------------------------------------------------------------------- /docs/source/statics/teleportation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/statics/teleportation.png -------------------------------------------------------------------------------- /docs/source/statics/tianxuan_s1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/statics/tianxuan_s1.png -------------------------------------------------------------------------------- /docs/source/statics/vmap_ingredients.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/statics/vmap_ingredients.png -------------------------------------------------------------------------------- /docs/source/textbook/img/3GHZ.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/3GHZ.png -------------------------------------------------------------------------------- /docs/source/textbook/img/CNOT.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/CNOT.png -------------------------------------------------------------------------------- /docs/source/textbook/img/Circuit_G.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/Circuit_G.png -------------------------------------------------------------------------------- /docs/source/textbook/img/DJ_algorithm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/DJ_algorithm.png -------------------------------------------------------------------------------- /docs/source/textbook/img/Fredkin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/Fredkin.png -------------------------------------------------------------------------------- /docs/source/textbook/img/GHZ_hist.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/GHZ_hist.png -------------------------------------------------------------------------------- /docs/source/textbook/img/Grover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/Grover.png -------------------------------------------------------------------------------- /docs/source/textbook/img/Grover_iteration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/Grover_iteration.png -------------------------------------------------------------------------------- /docs/source/textbook/img/Toffoli.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/Toffoli.png -------------------------------------------------------------------------------- /docs/source/textbook/img/Toffoli_circuit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/Toffoli_circuit.png -------------------------------------------------------------------------------- /docs/source/textbook/img/VQE.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/VQE.png -------------------------------------------------------------------------------- /docs/source/textbook/img/bloch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/bloch.png -------------------------------------------------------------------------------- /docs/source/textbook/img/classical_gates.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/classical_gates.png -------------------------------------------------------------------------------- /docs/source/textbook/img/densecoding.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/densecoding.png -------------------------------------------------------------------------------- /docs/source/textbook/img/density_Bell.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/density_Bell.png -------------------------------------------------------------------------------- /docs/source/textbook/img/google_sycamore.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/google_sycamore.png -------------------------------------------------------------------------------- /docs/source/textbook/img/half_adder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/half_adder.png -------------------------------------------------------------------------------- /docs/source/textbook/img/quantum_circuit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/quantum_circuit.png -------------------------------------------------------------------------------- /docs/source/textbook/img/reversible_gates.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/reversible_gates.png -------------------------------------------------------------------------------- /docs/source/textbook/img/simple_oracle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/simple_oracle.png -------------------------------------------------------------------------------- /docs/source/textbook/img/square_lattice.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/square_lattice.png -------------------------------------------------------------------------------- /docs/source/textbook/img/swing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/swing.png -------------------------------------------------------------------------------- /docs/source/textbook/img/teleportation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/teleportation.png -------------------------------------------------------------------------------- /docs/source/textbook/img/vqa.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/docs/source/textbook/img/vqa.jpg -------------------------------------------------------------------------------- /docs/source/textbooktoc.rst: -------------------------------------------------------------------------------- 1 | ================================= 2 | 量子计算教程 3 | ================================= 4 | 5 | .. toctree:: 6 | 7 | textbook/chap1.ipynb 8 | textbook/chap2.ipynb 9 | textbook/chap3.ipynb 10 | textbook/chap4.ipynb 11 | textbook/chap5.ipynb -------------------------------------------------------------------------------- /docs/source/tutorial.rst: -------------------------------------------------------------------------------- 1 | ================================= 2 | Jupyter Tutorials 3 | ================================= 4 | 5 | .. toctree:: 6 | 7 | tutorials/circuit_basics.ipynb 8 | tutorials/qaoa.ipynb 9 | tutorials/qaoa_bo.ipynb 10 | tutorials/qaoa_nae3sat.ipynb 11 | tutorials/qaoa_quantum_dropout.ipynb 12 | tutorials/tfim_vqe.ipynb 13 | tutorials/mnist_qml.ipynb 14 | tutorials/torch_qml.ipynb 15 | tutorials/qml_scenarios.ipynb 16 | tutorials/vqe_h2o.ipynb 17 | tutorials/tfim_vqe_diffreph.ipynb 18 | tutorials/mera.ipynb 19 | tutorials/gradient_benchmark.ipynb 20 | tutorials/contractors.ipynb 21 | tutorials/operator_spreading.ipynb 22 | tutorials/optimization_and_expressibility.ipynb 23 | tutorials/vqex_mbl.ipynb 24 | tutorials/dqas.ipynb 25 | tutorials/barren_plateaus.ipynb 26 | tutorials/qubo_problem.ipynb 27 | tutorials/portfolio_optimization.ipynb 28 | tutorials/imag_time_evo.ipynb 29 | tutorials/classical_shadows.ipynb 30 | tutorials/sklearn_svc.ipynb 31 | tutorials/qcloud_sdk_demo.ipynb -------------------------------------------------------------------------------- /docs/source/tutorial_cn.rst: -------------------------------------------------------------------------------- 1 | ================================= 2 | 案例教程 3 | ================================= 4 | 5 | .. toctree:: 6 | 7 | tutorials/circuit_basics_cn.ipynb 8 | tutorials/qaoa_cn.ipynb 9 | tutorials/tfim_vqe_cn.ipynb 10 | tutorials/mnist_qml_cn.ipynb 11 | tutorials/torch_qml_cn.ipynb 12 | tutorials/qml_scenarios_cn.ipynb 13 | tutorials/vqe_h2o_cn.ipynb 14 | tutorials/tfim_vqe_diffreph_cn.ipynb 15 | tutorials/mera_cn.ipynb 16 | tutorials/gradient_benchmark_cn.ipynb 17 | tutorials/contractors_cn.ipynb 18 | tutorials/operator_spreading_cn.ipynb 19 | tutorials/optimization_and_expressibility_cn.ipynb 20 | tutorials/vqex_mbl_cn.ipynb 21 | tutorials/dqas_cn.ipynb 22 | tutorials/barren_plateaus_cn.ipynb 23 | tutorials/sklearn_svc_cn.ipynb -------------------------------------------------------------------------------- /docs/source/whitepapertoc.rst: -------------------------------------------------------------------------------- 1 | ================================= 2 | Whitepaper Tutorials 3 | ================================= 4 | 5 | .. toctree:: 6 | 7 | whitepaper/3-circuits-gates.ipynb 8 | whitepaper/4-gradient-optimization.ipynb 9 | whitepaper/5-density-matrix.ipynb 10 | whitepaper/6-1-conditional-measurements-post-selection.ipynb 11 | whitepaper/6-2-pauli-string-expectation.ipynb 12 | whitepaper/6-3-vmap.ipynb 13 | whitepaper/6-4-quoperator.ipynb 14 | whitepaper/6-5-custom-contraction.ipynb 15 | whitepaper/6-6-advanced-automatic-differentiation.ipynb -------------------------------------------------------------------------------- /docs/source/whitepapertoc_cn.rst: -------------------------------------------------------------------------------- 1 | ================================= 2 | 白皮书教程 3 | ================================= 4 | 5 | .. toctree:: 6 | 7 | whitepaper/3-circuits-gates_cn.ipynb 8 | whitepaper/4-gradient-optimization_cn.ipynb 9 | whitepaper/5-density-matrix_cn.ipynb 10 | whitepaper/6-1-conditional-measurements-post-selection_cn.ipynb 11 | whitepaper/6-2-pauli-string-expectation_cn.ipynb 12 | whitepaper/6-3-vmap_cn.ipynb 13 | whitepaper/6-4-quoperator_cn.ipynb 14 | whitepaper/6-5-custom-contraction_cn.ipynb 15 | whitepaper/6-6-advanced-automatic-differentiation_cn.ipynb -------------------------------------------------------------------------------- /examples/analog_evolution_interface.py: -------------------------------------------------------------------------------- 1 | """ 2 | jax backend is required, experimental built-in interface for parameterized hamiltonian evolution 3 | """ 4 | 5 | import optax 6 | import tensorcircuit as tc 7 | from tensorcircuit.experimental import evol_global, evol_local 8 | 9 | K = tc.set_backend("jax") 10 | 11 | 12 | def h_fun(t, b): 13 | return b * tc.gates.x().tensor 14 | 15 | 16 | hy = tc.quantum.PauliStringSum2COO([[2, 0]]) 17 | 18 | 19 | def h_fun2(t, b): 20 | return b[2] * K.cos(b[0] * t + b[1]) * hy 21 | 22 | 23 | @K.jit 24 | @K.value_and_grad 25 | def hybrid_evol(params): 26 | c = tc.Circuit(2) 27 | c.x([0, 1]) 28 | c = evol_local(c, [1], h_fun, 1.0, params[0]) 29 | c.cx(1, 0) 30 | c.h(0) 31 | c = evol_global(c, h_fun2, 1.0, params[1:]) 32 | return K.real(c.expectation_ps(z=[0, 1])) 33 | 34 | 35 | opt = K.optimizer(optax.adam(0.1)) 36 | b = K.implicit_randn([4]) 37 | for _ in range(50): 38 | v, gs = hybrid_evol(b) 39 | b = opt.update(gs, b) 40 | print(v, b) 41 | -------------------------------------------------------------------------------- /examples/analog_evolution_jax.py: -------------------------------------------------------------------------------- 1 | """ 2 | Parameterized Hamiltonian (Pulse control/Analog simulation) with AD/JIT support using jax ode solver 3 | """ 4 | 5 | import optax 6 | from jax.experimental.ode import odeint 7 | import tensorcircuit as tc 8 | 9 | K = tc.set_backend("jax") 10 | tc.set_dtype("complex128") 11 | 12 | hx = tc.quantum.PauliStringSum2COO([[1]]) 13 | hz = tc.quantum.PauliStringSum2COO([[3]]) 14 | 15 | 16 | # psi = -i H psi 17 | # we want to optimize the final z expectation over parameters params 18 | # a single qubit example below 19 | 20 | 21 | def final_z(b): 22 | def f(y, t, b): 23 | h = b[3] * K.sin(b[0] * t + b[1]) * hx + K.cos(b[2]) * hz 24 | return -1.0j * K.sparse_dense_matmul(h, y) 25 | 26 | y0 = tc.array_to_tensor([1, 0]) 27 | y0 = K.reshape(y0, [-1, 1]) 28 | t = tc.array_to_tensor([0.0, 10.0], dtype=tc.rdtypestr) 29 | yf = odeint(f, y0, t, b) 30 | c = tc.Circuit(1, inputs=K.reshape(yf[-1], [-1])) 31 | return K.real(c.expectation_ps(z=[0])) 32 | 33 | 34 | vgf = K.jit(K.value_and_grad(final_z)) 35 | 36 | 37 | opt = K.optimizer(optax.adam(0.1)) 38 | b = K.implicit_randn([4]) 39 | for _ in range(50): 40 | v, gs = vgf(b) 41 | b = opt.update(gs, b) 42 | print(v, b) 43 | -------------------------------------------------------------------------------- /examples/analog_evolution_mint.py: -------------------------------------------------------------------------------- 1 | """ 2 | jax backend analog evolution targeting at minimizing evolution time 3 | """ 4 | 5 | import optax 6 | import tensorcircuit as tc 7 | from tensorcircuit.experimental import evol_global 8 | 9 | K = tc.set_backend("jax") 10 | 11 | hx = tc.quantum.PauliStringSum2COO([[1]]) 12 | 13 | 14 | def h_fun(t, b): 15 | return K.sin(b) * hx 16 | 17 | 18 | def fast_evol(t, b): 19 | lbd = 0.08 20 | c = tc.Circuit(1) 21 | c = evol_global(c, h_fun, t, b) 22 | loss = K.real(c.expectation_ps(z=[0])) 23 | return loss + lbd * t**2, loss 24 | # l2 regularization to minimize t while target z=-1 25 | 26 | 27 | vgf = K.jit(K.value_and_grad(fast_evol, argnums=(0, 1), has_aux=True)) 28 | 29 | opt = K.optimizer(optax.adam(0.05)) 30 | b, t = tc.array_to_tensor(0.5, 1.0, dtype=tc.rdtypestr) 31 | 32 | for i in range(500): 33 | (v, loss), gs = vgf(b, t) 34 | b, t = opt.update(gs, (b, t)) 35 | if i % 20 == 0: 36 | print(v, loss, b, t) 37 | -------------------------------------------------------------------------------- /examples/apicomparison/0_tfq_qml.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import tensorflow_quantum as tfq 3 | import cirq 4 | import sympy 5 | import numpy as np 6 | 7 | nwires, nlayers, nbatch = 6, 3, 16 8 | qubits = [cirq.GridQubit(0, i) for i in range(nwires)] 9 | my_symbol = sympy.symbols("params_0:" + str(nwires * 2 * nlayers)) 10 | model_circuit = cirq.Circuit() 11 | 12 | for j in range(nlayers): 13 | for i in range(nwires - 1): 14 | model_circuit.append( 15 | cirq.ZZPowGate(exponent=my_symbol[j * nwires * 2 + i])( 16 | qubits[i], qubits[nwires - 1] 17 | ) 18 | ) 19 | for i in range(nwires): 20 | model_circuit.append(cirq.rx(my_symbol[j * nwires * 2 + nwires + i])(qubits[i])) 21 | 22 | model = tf.keras.Sequential( 23 | [ 24 | tf.keras.layers.Input(shape=(), dtype=tf.string), 25 | tfq.layers.PQC(model_circuit, cirq.Z(qubits[nwires - 1]) * 0.5 + 0.5), 26 | ] 27 | ) 28 | 29 | 30 | def img2circuit(img): 31 | circuit = cirq.Circuit() 32 | for i in range(nwires - 1): 33 | circuit.append(cirq.rx(img[i])(qubits[i])) 34 | return circuit 35 | 36 | 37 | img = np.random.normal(size=[nbatch, nwires]).astype(np.float32) 38 | img = tfq.convert_to_tensor([img2circuit(x) for x in img]) 39 | 40 | print(model(img)) 41 | -------------------------------------------------------------------------------- /examples/apicomparison/0_tfq_vg.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import tensorflow_quantum as tfq 3 | import cirq 4 | import sympy 5 | import numpy as np 6 | 7 | nwires, nlayers = 6, 3 8 | qubits = [cirq.GridQubit(0, i) for i in range(nwires)] 9 | symbols = sympy.symbols("params_0:" + str(nlayers * nwires * 2)) 10 | 11 | circuit = cirq.Circuit() 12 | for i in range(nwires): 13 | circuit.append(cirq.H(qubits[i])) 14 | for j in range(nlayers): 15 | for i in range(nwires - 1): 16 | circuit.append( 17 | cirq.ZZPowGate(exponent=symbols[j * nwires * 2 + i])( 18 | qubits[i], qubits[(i + 1)] 19 | ) 20 | ) 21 | for i in range(nwires): 22 | circuit.append(cirq.rx(symbols[j * nwires * 2 + nwires + i])(qubits[i])) 23 | 24 | circuit = tfq.convert_to_tensor([circuit]) 25 | 26 | hamiltonian = tfq.convert_to_tensor( 27 | [ 28 | [ 29 | sum( 30 | [cirq.Z(qubits[i]) * cirq.Z(qubits[i + 1]) for i in range(nwires - 1)] 31 | + [-1.0 * cirq.X(qubits[i]) for i in range(nwires)] 32 | ) 33 | ] 34 | ] 35 | ) 36 | 37 | ep = tfq.layers.Expectation() 38 | 39 | 40 | @tf.function 41 | def tf_vg(symbol_values): 42 | with tf.GradientTape() as g: 43 | g.watch(symbol_values) 44 | expectations = ep( 45 | circuit, 46 | symbol_names=symbols, 47 | symbol_values=symbol_values, 48 | operators=hamiltonian, 49 | ) 50 | grads = g.gradient(expectations, [symbol_values]) 51 | return expectations, grads 52 | 53 | 54 | symbol_values = [np.random.normal(size=[nlayers * nwires * 2]).astype(np.float32)] 55 | symbol_values = tf.Variable(tf.convert_to_tensor(symbol_values)) 56 | print(tf_vg(symbol_values)) 57 | -------------------------------------------------------------------------------- /examples/apicomparison/1_pennylane_qml.py: -------------------------------------------------------------------------------- 1 | import pennylane as qml 2 | import tensorflow as tf 3 | 4 | nwires, nlayers, nbatch = 6, 3, 16 5 | dev = qml.device("default.qubit.tf", wires=nwires) 6 | 7 | 8 | @tf.function 9 | @qml.qnode(dev, interface="tf") 10 | def yp(inputs, params): 11 | for i in range(nwires - 1): 12 | qml.RX(inputs[i], wires=i) 13 | for j in range(nlayers): 14 | for i in range(nwires - 1): 15 | qml.IsingZZ(params[i + j * 2 * nwires], wires=[i, nwires - 1]) 16 | for i in range(nwires): 17 | qml.RX(params[nwires + i + j * 2 * nwires], wires=i) 18 | return qml.expval(qml.Hamiltonian([1.0], [qml.PauliZ(nwires - 1)], True)) 19 | 20 | 21 | model = qml.qnn.KerasLayer(yp, {"params": (nlayers * 2 * nwires)}, output_dim=1) 22 | 23 | imgs = tf.random.normal([nbatch, nwires]) 24 | 25 | print(model(imgs)) 26 | -------------------------------------------------------------------------------- /examples/apicomparison/1_pennylane_vg.py: -------------------------------------------------------------------------------- 1 | import pennylane as qml 2 | import tensorflow as tf 3 | 4 | nwires, nlayers = 6, 3 5 | 6 | coeffs = [-1.0] * nwires + [1.0] * (nwires - 1) 7 | obs = [qml.PauliX(i) for i in range(nwires)] + [ 8 | qml.PauliZ(i) @ qml.PauliZ((i + 1) % nwires) for i in range(nwires - 1) 9 | ] 10 | Htfim = qml.Hamiltonian(coeffs, obs, True) 11 | 12 | dev = qml.device("default.qubit.tf", wires=nwires) 13 | 14 | 15 | @tf.function 16 | @qml.qnode(dev, interface="tf") 17 | def tf_expval(params): 18 | for i in range(nwires): 19 | qml.Hadamard(wires=i) 20 | for j in range(nlayers): 21 | for i in range(nwires - 1): 22 | qml.IsingZZ(params[i + j * 2 * nwires], wires=[i, i + 1]) 23 | for i in range(nwires): 24 | qml.RX(params[nwires + i + j * 2 * nwires], wires=i) 25 | return qml.expval(Htfim) 26 | 27 | 28 | params = tf.random.normal(shape=[nlayers * 2 * nwires]) 29 | 30 | 31 | @tf.function 32 | def tf_vg(params): 33 | with tf.GradientTape() as t: 34 | t.watch(params) 35 | e = tf_expval(params) 36 | grad = t.gradient(e, [params]) 37 | return e, grad 38 | 39 | 40 | print(tf_vg(params)) 41 | -------------------------------------------------------------------------------- /examples/apicomparison/2_tc_qml.py: -------------------------------------------------------------------------------- 1 | import tensorcircuit as tc 2 | 3 | K = tc.set_backend("tensorflow") 4 | nwires, nlayer, nbatch = 6, 3, 16 5 | 6 | 7 | def yp(img, params): 8 | c = tc.Circuit(nwires) 9 | for i in range(nwires - 1): 10 | c.rx(i, theta=img[i]) 11 | for j in range(nlayer): 12 | for i in range(nwires - 1): 13 | c.rzz(i, nwires - 1, theta=params[i + j * 2 * nwires]) 14 | for i in range(nwires): 15 | c.rx(i, theta=params[nwires + i + j * 2 * nwires]) 16 | return K.real(c.expectation_ps(z=[nwires - 1])) 17 | 18 | 19 | model = tc.keras.QuantumLayer(yp, [(nlayer * 2 * nwires)]) 20 | 21 | imgs = K.implicit_randn(shape=[nbatch, nwires]) 22 | 23 | print(model(imgs)) 24 | -------------------------------------------------------------------------------- /examples/apicomparison/2_tc_vg.py: -------------------------------------------------------------------------------- 1 | import tensorcircuit as tc 2 | 3 | tc.set_backend("tensorflow") 4 | nwires, nlayers = 6, 3 5 | 6 | 7 | def vqe_forward(param): 8 | c = tc.Circuit(nwires) 9 | for i in range(nwires): 10 | c.H(i) 11 | for j in range(nlayers): 12 | for i in range(nwires - 1): 13 | c.exp1(i, i + 1, theta=param[2 * j, i], unitary=tc.gates._zz_matrix) 14 | for i in range(nwires): 15 | c.rx(i, theta=param[2 * j + 1, i]) 16 | e = sum( 17 | [-1.0 * c.expectation_ps(x=[i]) for i in range(nwires)] 18 | + [1.0 * c.expectation_ps(z=[i, i + 1]) for i in range(nwires - 1)] 19 | ) 20 | return e 21 | 22 | 23 | tc_vg = tc.backend.jit(tc.backend.value_and_grad(vqe_forward)) 24 | param = tc.backend.cast(tc.backend.randn([2 * nlayers, nwires]), "complex64") 25 | print(tc_vg(param)) 26 | -------------------------------------------------------------------------------- /examples/apicomparison/README.md: -------------------------------------------------------------------------------- 1 | API comparison on the same standard variational circuit evaluation task demonstrating the advantage of TensorCircuit API design. 2 | 3 | * QML subtask refers building a keras model of quantum circuit. 4 | 5 | * VQE subtask refers getting energy and circuit gradients. 6 | 7 | | # Lines (# Packages) | TensorFlow Quantum | Pennylane | TensorCircuit | 8 | | :------------------: | :----------------: | :-------: | :-----------: | 9 | | QML subtask | 32 (5) | 18 (2) | 16 (1) | 10 | | VQE subtask | 47 (5) | 29 (2) | 20 (1) | 11 | 12 | -------------------------------------------------------------------------------- /examples/apicomparison/_barplot.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from matplotlib import pyplot as plt 3 | 4 | labels = ["VQE subtask", "QML subtask"] 5 | tfqlines = [47, 32] 6 | pllines = [29, 18] 7 | tclines = [20, 16] 8 | 9 | x = np.arange(len(labels)) # the label locations 10 | width = 0.2 # the width of the bars 11 | 12 | fig, ax = plt.subplots() 13 | rects0 = ax.bar(x - width, tfqlines, width, label="tfq") 14 | rects1 = ax.bar(x, pllines, width, label="pennylane") 15 | rects2 = ax.bar(x + width, tclines, width, label="tc (ours)") 16 | 17 | # Add some text for labels, title and custom x-axis tick labels, etc. 18 | ax.set_ylabel("Lines of code") 19 | ax.set_title("API comparison") 20 | ax.set_xticks(x, labels) 21 | ax.legend() 22 | ax.set_ylim(0, 51) 23 | ax.set_yticks([0, 10, 20, 30, 40]) 24 | ax.bar_label(rects0, padding=1) 25 | ax.bar_label(rects1, padding=1) 26 | ax.bar_label(rects2, padding=1) 27 | 28 | fig.tight_layout() 29 | 30 | # plt.show() 31 | plt.savefig("apic.pdf") 32 | -------------------------------------------------------------------------------- /examples/batched_parameters_structures.py: -------------------------------------------------------------------------------- 1 | """ 2 | VQE optimization over different parameter initializations and different circuit structures 3 | """ 4 | 5 | import tensorflow as tf 6 | import numpy as np 7 | import tensorcircuit as tc 8 | 9 | K = tc.set_backend("tensorflow") 10 | 11 | n = 6 12 | lattice = tc.templates.graphs.Line1D(n, pbc=False) 13 | h = tc.quantum.heisenberg_hamiltonian( 14 | lattice, hzz=1, hxx=0, hyy=0, hx=-1, hy=0, hz=0, sparse=False 15 | ) 16 | 17 | 18 | def gate_list(param): 19 | l = [ 20 | tc.gates.Gate(np.eye(4)), 21 | tc.gates.Gate(np.kron(tc.gates._x_matrix, np.eye(2))), 22 | tc.gates.Gate(np.kron(tc.gates._y_matrix, np.eye(2))), 23 | tc.gates.Gate(np.kron(tc.gates._z_matrix, np.eye(2))), 24 | tc.gates.Gate(np.kron(tc.gates._h_matrix, np.eye(2))), 25 | tc.gates.rx_gate.ided(before=False)(theta=param), 26 | tc.gates.ry_gate.ided(before=False)(theta=param), 27 | tc.gates.rz_gate.ided(before=False)(theta=param), 28 | tc.gates.exp1_gate(theta=param, unitary=tc.gates._xx_matrix), 29 | tc.gates.exp1_gate(theta=param, unitary=tc.gates._yy_matrix), 30 | tc.gates.exp1_gate(theta=param, unitary=tc.gates._zz_matrix), 31 | ] 32 | return [tc.backend.reshape2(m.tensor) for m in l if isinstance(m, tc.gates.Gate)] 33 | 34 | 35 | def makec(param, structure): 36 | c = tc.Circuit(n) 37 | for i in range(structure.shape[0]): 38 | for j in range(n): 39 | c.select_gate(structure[i, j], gate_list(param[i, j]), j, (j + 1) % n) 40 | return c 41 | 42 | 43 | def vqef(param, structure): 44 | c = makec(param, structure) 45 | e = tc.templates.measurements.operator_expectation(c, h) 46 | return e 47 | 48 | 49 | vqegf = tc.backend.jit( 50 | tc.backend.vmap( 51 | tc.backend.vvag(vqef, argnums=0, vectorized_argnums=0), 52 | vectorized_argnums=(0, 1), 53 | ) 54 | ) 55 | 56 | batch_structure = 2 57 | batch_weights = 8 58 | depth = 2 59 | structure1 = np.array([[0, 1, 0, 5, 0, 6], [6, 0, 6, 0, 6, 0]]) 60 | structure2 = np.array([[0, 1, 0, 5, 0, 6], [9, 0, 8, 0, 3, 0]]) 61 | structure = tc.backend.stack([structure1, structure2]) 62 | structure = tc.backend.cast(structure, "int32") 63 | weights = tc.backend.implicit_randn(shape=[batch_structure, batch_weights, depth, n]) 64 | opt = tc.backend.optimizer(tf.keras.optimizers.Adam(1e-2)) 65 | 66 | for _ in range(100): 67 | v, g = vqegf(weights, structure) 68 | print("energy: ", v) 69 | weights = opt.update(g, weights) 70 | -------------------------------------------------------------------------------- /examples/chaotic_behavior.py: -------------------------------------------------------------------------------- 1 | """ 2 | Some chaotic properties calculations from the circuit state. 3 | """ 4 | 5 | from functools import partial 6 | import sys 7 | 8 | sys.path.insert(0, "../") 9 | import tensorflow as tf 10 | import tensorcircuit as tc 11 | 12 | K = tc.set_backend("tensorflow") 13 | 14 | 15 | # build the circuit 16 | 17 | 18 | @partial(K.jit, static_argnums=(1, 2)) 19 | def get_state(params, n, nlayers, inputs=None): 20 | c = tc.Circuit(n, inputs=inputs) # inputs is for input state of the circuit 21 | for i in range(nlayers): 22 | for j in range(n): 23 | c.ry(j, theta=params[i, j]) 24 | for j in range(n): 25 | c.cnot(j, (j + 1) % n) 26 | # one can further customize the layout and gate type above 27 | return c.state() # output wavefunction 28 | 29 | 30 | params = K.implicit_randn([5, 10]) 31 | 32 | s = get_state(params, n=10, nlayers=5) 33 | 34 | print(s) # output state in vector form 35 | rm = tc.quantum.reduced_density_matrix(s, cut=10 // 2) 36 | print(tc.quantum.entropy(rm)) 37 | # entanglement 38 | # for more quantum quantities functions, please refer to tc.quantum module 39 | 40 | 41 | @partial(K.jit, static_argnums=(2, 3, 4)) 42 | def frame_potential(param1, param2, t, n, nlayers): 43 | s1 = get_state(param1, n, nlayers) 44 | s2 = get_state(param2, n, nlayers) 45 | inner = K.tensordot(K.conj(s1), s2, 1) 46 | return K.abs(inner) ** (2 * t) 47 | 48 | 49 | # calculate several samples together using vmap 50 | 51 | frame_potential_vmap = K.vmap( 52 | partial(frame_potential, t=1, n=10, nlayers=5), vectorized_argnums=(0, 1) 53 | ) 54 | 55 | for _ in range(5): 56 | print( 57 | frame_potential_vmap(K.implicit_randn([3, 5, 10]), K.implicit_randn([3, 5, 10])) 58 | ) 59 | # the first dimension is the batch 60 | 61 | # get \partial \psi_i/ \partial \params_j (Jacobian) 62 | 63 | jac_func = K.jacfwd(partial(get_state, n=10, nlayers=5)) 64 | print(jac_func(params)) 65 | 66 | # correlation 67 | 68 | 69 | def get_zz(params, n, nlayers, inputs=None): 70 | s = get_state(params, n, nlayers, inputs) 71 | c = tc.Circuit(n, inputs=s) 72 | z1z2 = c.expectation([tc.gates.z(), [1]], [tc.gates.z(), [2]]) 73 | return K.real( 74 | z1z2 75 | ) # one can also add several correlations together as energy estimation 76 | 77 | 78 | # hessian matrix 79 | 80 | h_func = K.hessian(partial(get_zz, n=10, nlayers=5)) 81 | # suggest jax backend for hessian and directly use `jax.hessian` may be better 82 | print(h_func(params)) 83 | 84 | # optimization, suppose the energy we want to minimize is just z1z2 as above 85 | 86 | vg_func = K.jit(K.value_and_grad(get_zz), static_argnums=(1, 2)) 87 | opt = K.optimizer(tf.keras.optimizers.Adam(1e-2)) 88 | 89 | for i in range(200): # gradient descent 90 | energy, grads = vg_func(params, 10, 5) 91 | params = opt.update(grads, params) 92 | if i % 20 == 0: 93 | print(energy) # see energy optimization dynamics 94 | -------------------------------------------------------------------------------- /examples/circuit_compiler.py: -------------------------------------------------------------------------------- 1 | """ 2 | compilation utilities in tensorcircuit 3 | """ 4 | 5 | import tensorcircuit as tc 6 | 7 | 8 | c = tc.Circuit(3) 9 | c.rx(0, theta=0.2) 10 | c.rz(0, theta=-0.3) 11 | c.ry(1, theta=0.1) 12 | c.h(2) 13 | c.cx(0, 1) 14 | c.cz(2, 1) 15 | c.x(0) 16 | c.y(0) 17 | c.rxx(1, 2, theta=1.7) 18 | 19 | 20 | c0, _ = tc.compiler.qiskit_compiler.qiskit_compile( 21 | c, 22 | compiled_options={"optimization_level": 0, "basis_gates": ["cx", "cz", "h", "rz"]}, 23 | ) 24 | 25 | c1, _ = tc.compiler.qiskit_compiler.qiskit_compile( 26 | c, 27 | compiled_options={"optimization_level": 1, "basis_gates": ["cx", "cz", "h", "rz"]}, 28 | ) 29 | 30 | 31 | c2, _ = tc.compiler.qiskit_compiler.qiskit_compile( 32 | c, 33 | compiled_options={"optimization_level": 2, "basis_gates": ["cx", "cz", "h", "rz"]}, 34 | ) 35 | 36 | 37 | c3, _ = tc.compiler.qiskit_compiler.qiskit_compile( 38 | c, 39 | compiled_options={"optimization_level": 3, "basis_gates": ["cx", "cz", "h", "rz"]}, 40 | ) 41 | 42 | print( 43 | "qiskit can become worse with higher level optimization when the target gate is not U3 but rz" 44 | ) 45 | print("level 0:\n") 46 | print(c0.draw()) 47 | print("level 1:\n") 48 | print(c1.draw()) 49 | print("level 2:\n") 50 | print(c2.draw()) 51 | print("level 3:\n") 52 | print(c3.draw()) 53 | 54 | 55 | compiler_wo_mapping = tc.compiler.DefaultCompiler() 56 | c4, _ = compiler_wo_mapping(c) 57 | print( 58 | "compiled with tc default compiler: combining the good from qiskit and our tc own" 59 | ) 60 | # we always uuggest using DefaultCompiler for tasks on qcloud 61 | # internally we run optimized compiling using U3 basis with qiskit which has good performance 62 | # and we unroll u3 with rz and apply replace/prune/merge loop developed in tc to further optimize the circuit 63 | print(c4.draw()) 64 | 65 | print("gate number comparison (last ours vs before qiskit (0, 1, 2, 3))") 66 | for c in [c0, c1, c2, c3, c4]: 67 | print(c.gate_count()) 68 | 69 | # if we want to apply routing/qubit mapping 70 | 71 | compiler_w_mapping = tc.compiler.DefaultCompiler( 72 | {"coupling_map": [[0, 2], [2, 0], [1, 0], [0, 1]]} 73 | ) 74 | c5, info = compiler_w_mapping(c) 75 | print("circuit with qubit mapping") 76 | print(c5.draw()) 77 | print(info) 78 | -------------------------------------------------------------------------------- /examples/ghz_dqas.py: -------------------------------------------------------------------------------- 1 | """ 2 | DQAS for GHZ state preparation circuit, deprecated DQAS implementation 3 | """ 4 | 5 | import sys 6 | 7 | sys.path.insert(0, "../") 8 | import numpy as np 9 | import tensorflow as tf 10 | import cirq 11 | 12 | import tensorcircuit as tc 13 | from tensorcircuit.applications.vags import double_qubits_initial, GHZ_vag, GHZ_vag_tfq 14 | from tensorcircuit.applications.dqas import ( 15 | set_op_pool, 16 | get_preset, 17 | DQAS_search, 18 | ) 19 | 20 | tc.set_backend("tensorflow") 21 | 22 | 23 | def main_tn(): 24 | """ 25 | DQAS with the tensorcircuit engine backend by TensorNetwork 26 | state preparation example 27 | 28 | :return: 29 | """ 30 | # multi start may be necessary 31 | ghz_pool = [ 32 | ("ry", 0), 33 | ("ry", 1), 34 | ("ry", 2), 35 | ("CNOT", 0, 1), 36 | ("CNOT", 1, 0), 37 | ("CNOT", 0, 2), 38 | ("CNOT", 2, 0), 39 | ("H", 0), 40 | ("H", 1), 41 | ("H", 2), 42 | ] 43 | set_op_pool(ghz_pool) 44 | c = len(ghz_pool) 45 | p = 4 46 | stp, nnp, _ = DQAS_search( 47 | GHZ_vag, 48 | p=p, 49 | batch=128, 50 | epochs=20, 51 | verbose=True, 52 | parallel_num=0, 53 | nnp_initial_value=np.zeros([p, c]), 54 | structure_opt=tf.keras.optimizers.Adam(learning_rate=0.15), 55 | ) 56 | preset = get_preset(stp).numpy() 57 | GHZ_vag(None, nnp, preset, verbose=True) 58 | 59 | 60 | def main_tfq(): 61 | """ 62 | DQAS with the tensorflow quantum engine. 63 | Unitary learning example. 64 | 65 | :return: 66 | """ 67 | p = 4 68 | cset = [ 69 | cirq.H(cirq.GridQubit(0, 0)), 70 | cirq.H(cirq.GridQubit(1, 0)), 71 | cirq.CNOT(cirq.GridQubit(0, 0), cirq.GridQubit(1, 0)), 72 | cirq.CNOT(cirq.GridQubit(1, 0), cirq.GridQubit(0, 0)), 73 | cirq.X(cirq.GridQubit(0, 0)), 74 | cirq.X(cirq.GridQubit(1, 0)), 75 | ] 76 | set_op_pool(cset) 77 | c = len(cset) 78 | stp, nnp, _ = DQAS_search( 79 | GHZ_vag_tfq, 80 | g=double_qubits_initial(), 81 | p=p, 82 | batch=16, 83 | epochs=5, 84 | verbose=False, 85 | parallel_num=0, 86 | nnp_initial_value=np.zeros([p, c]), 87 | structure_opt=tf.keras.optimizers.Adam(learning_rate=0.15), 88 | ) 89 | preset = get_preset(stp).numpy() 90 | GHZ_vag_tfq(double_qubits_initial().send(None), nnp, preset, verbose=True) 91 | 92 | 93 | if __name__ == "__main__": 94 | main_tfq() 95 | # main_tn() 96 | -------------------------------------------------------------------------------- /examples/h6_hamiltonian.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/examples/h6_hamiltonian.npy -------------------------------------------------------------------------------- /examples/hchainhamiltonian.py: -------------------------------------------------------------------------------- 1 | """ 2 | Get molecule qubit format Hamiltonian from openfermion. 3 | """ 4 | 5 | import time 6 | import tensorflow as tf 7 | from openfermion.chem import MolecularData 8 | from openfermion.transforms import ( 9 | get_fermion_operator, 10 | binary_code_transform, 11 | get_fermion_operator, 12 | reorder, 13 | checksum_code, 14 | ) 15 | from openfermion.utils import up_then_down 16 | from openfermionpyscf import run_pyscf 17 | from scipy import sparse 18 | 19 | import tensorcircuit as tc 20 | 21 | 22 | n = 4 23 | multiplicity = 1 24 | geometry = [("H", (0, 0, 0.95 * i)) for i in range(n)] 25 | description = "H%s_0.95" % str(n) 26 | basis = "sto-3g" 27 | molecule = MolecularData(geometry, basis, multiplicity, description=description) 28 | molecule = run_pyscf(molecule, run_mp2=True, run_cisd=True, run_ccsd=True, run_fci=True) 29 | print(molecule.fci_energy, molecule.ccsd_energy) 30 | fermion_hamiltonian = get_fermion_operator(molecule.get_molecular_hamiltonian()) 31 | b = binary_code_transform( 32 | reorder(fermion_hamiltonian, up_then_down), 2 * checksum_code(n, 1) 33 | ) 34 | ls, w = tc.templates.chems.get_ps(b, 2 * n - 2) 35 | time0 = time.time() 36 | m = tc.quantum.PauliStringSum2COO_numpy( 37 | tf.constant(ls, dtype=tf.int64), tf.constant(w, dtype=tf.complex128) 38 | ) 39 | time1 = time.time() 40 | print(m) 41 | print("tc takes time: ", time1 - time0) 42 | tc.backend.sparse_dense_matmul(m, tc.backend.ones([2 ** (2 * n - 2)])) 43 | time2 = time.time() 44 | print("tc takes time for mvp: ", time2 - time1) 45 | sparse.save_npz("./h-" + str(n) + "-chain.npz", m) 46 | m2 = sparse.load_npz("./h-" + str(n) + "-chain.npz") 47 | print(m2) 48 | 49 | """ 50 | from openfermion.linalg import LinearQubitOperator 51 | 52 | # too slow to use 53 | h = LinearQubitOperator(b) 54 | ids = np.eye(2 ** h.n_qubits) 55 | time3 = time.time() 56 | m = h.dot(np.ones([2 ** h.n_qubits])) 57 | time4 = time.time() 58 | print("of takes time for mvp: ", time4 - time3) 59 | """ 60 | -------------------------------------------------------------------------------- /examples/hea_scan_jit_acc.py: -------------------------------------------------------------------------------- 1 | """ 2 | reducing jit compiling time by general scan magic 3 | """ 4 | 5 | import numpy as np 6 | import tensorcircuit as tc 7 | 8 | n = 10 9 | nlayers = 16 10 | param_np = np.random.normal(size=[nlayers, n, 2]) 11 | 12 | for backend in ["tensorflow", "jax"]: 13 | with tc.runtime_backend(backend) as K: 14 | print("running %s" % K.name) 15 | 16 | def energy_reference(param, n, nlayers): 17 | c = tc.Circuit(n) 18 | for i in range(n): 19 | c.h(i) 20 | for i in range(nlayers): 21 | for j in range(n - 1): 22 | c.rzz(j, j + 1, theta=param[i, j, 0]) 23 | for j in range(n): 24 | c.rx(j, theta=param[i, j, 1]) 25 | return K.real(c.expectation_ps(z=[0, 1]) + c.expectation_ps(x=[2])) 26 | 27 | vg_reference = K.jit( 28 | K.value_and_grad(energy_reference, argnums=0), static_argnums=(1, 2) 29 | ) 30 | 31 | # a jit efficient way to utilize scan 32 | 33 | def energy(param, n, nlayers, each): 34 | def loop_f(s_, param_): 35 | c_ = tc.Circuit(n, inputs=s_) 36 | for i in range(each): 37 | for j in range(n - 1): 38 | c_.rzz(j, j + 1, theta=param_[i, j, 0]) 39 | for j in range(n): 40 | c_.rx(j, theta=param_[i, j, 1]) 41 | s_ = c_.state() 42 | return s_ 43 | 44 | c = tc.Circuit(n) 45 | for i in range(n): 46 | c.h(i) 47 | s = c.state() 48 | s1 = K.scan(loop_f, K.reshape(param, [nlayers // each, each, n, 2]), s) 49 | c1 = tc.Circuit(n, inputs=s1) 50 | return K.real(c1.expectation_ps(z=[0, 1]) + c1.expectation_ps(x=[2])) 51 | 52 | vg = K.jit( 53 | K.value_and_grad(energy, argnums=0), 54 | static_argnums=(1, 2, 3), 55 | jit_compile=True, 56 | ) 57 | # set to False can improve compile time for tf 58 | 59 | param = K.convert_to_tensor(param_np) 60 | 61 | for each in [1, 2, 4]: 62 | print(" scan impl with each=%s" % str(each)) 63 | r1 = tc.utils.benchmark(vg, param, n, nlayers, each) 64 | print(r1[0][0]) 65 | 66 | print(" plain impl") 67 | r0 = tc.utils.benchmark(vg_reference, param, n, nlayers) # too slow 68 | np.testing.assert_allclose(r0[0][0], r1[0][0], atol=1e-5) 69 | np.testing.assert_allclose(r0[0][1], r1[0][1], atol=1e-5) 70 | # correctness check 71 | 72 | 73 | # jit_compile=True icrease runtime while degrades jit time for tensorflow 74 | # and in general jax improves better with scan methodology, 75 | # both compile time and running time can outperform tf 76 | -------------------------------------------------------------------------------- /examples/incremental_twoqubit.py: -------------------------------------------------------------------------------- 1 | """ 2 | Optimizing the parameterized circuit with progressively dense two-qubit gates, 3 | as a potential approach to alleviate barren plateau. 4 | """ 5 | 6 | import sys 7 | 8 | sys.path.insert(0, "../") 9 | import tensorflow as tf 10 | import numpy as np 11 | import tensorcircuit as tc 12 | 13 | K = tc.set_backend("tensorflow") 14 | 15 | n = 10 16 | nlayers = 3 17 | g = tc.templates.graphs.Line1D(n) 18 | 19 | 20 | def energy(params, structures, n, nlayers): 21 | structures = (K.sign(structures) + 1) / 2 # 0 or 1 22 | structures = K.cast(structures, params.dtype) 23 | c = tc.Circuit(n) 24 | for i in range(n): 25 | c.H(i) 26 | for j in range(nlayers): 27 | for i in range(n - 1): 28 | matrix = structures[j, i] * tc.gates._ii_matrix + ( 29 | 1.0 - structures[j, i] 30 | ) * ( 31 | K.cos(params[2 * j + 1, i]) * tc.gates._ii_matrix 32 | + 1.0j * K.sin(params[2 * j + 1, i]) * tc.gates._zz_matrix 33 | ) 34 | c.any( 35 | i, 36 | i + 1, 37 | unitary=matrix, 38 | ) 39 | for i in range(n): 40 | c.rx(i, theta=params[2 * j, i]) 41 | 42 | e = tc.templates.measurements.heisenberg_measurements( 43 | c, g, hzz=1, hxx=0, hyy=0, hx=-1, hy=0, hz=0 44 | ) # TFIM energy from expectation of circuit c defined on lattice given by g 45 | return e 46 | 47 | 48 | vagf = K.jit(K.value_and_grad(energy, argnums=0), static_argnums=(2, 3)) 49 | 50 | params = np.random.uniform(size=[2 * nlayers, n]) 51 | structures = np.random.uniform(size=[nlayers, n]) 52 | params, structures = tc.array_to_tensor(params, structures) 53 | 54 | opt = K.optimizer(tf.keras.optimizers.Adam(1e-2)) 55 | 56 | for i in range(300): 57 | if i % 20 == 0: 58 | structures -= 0.2 * K.ones([nlayers, n]) 59 | # one can change the structures by tune the structure tensor value 60 | # this specifically equiv to add two qubit gates 61 | e, grads = vagf(params, structures, n, nlayers) 62 | params = opt.update(grads, params) 63 | print(K.numpy(e)) 64 | -------------------------------------------------------------------------------- /examples/jacobian_cal.py: -------------------------------------------------------------------------------- 1 | """ 2 | jacobian calculation on different backend 3 | """ 4 | 5 | import numpy as np 6 | import tensorcircuit as tc 7 | 8 | 9 | def get_jac(n, nlayers): 10 | def state(params): 11 | params = K.reshape(params, [2 * nlayers, n]) 12 | c = tc.Circuit(n) 13 | c = tc.templates.blocks.example_block(c, params, nlayers=nlayers) 14 | return c.state() 15 | 16 | params = K.ones([2 * nlayers * n]) 17 | n1 = K.jacfwd(state)(params) 18 | n2 = K.jacrev(state)(params) 19 | # tf backend, jaxrev is upto conjugate with real jacobian 20 | params = K.cast(params, "float64") 21 | n3 = K.jacfwd(state)(params) 22 | n4 = K.jacrev(state)(params) 23 | # n4 is the real part of n3 24 | return n1, n2, n3, n4 25 | 26 | 27 | for b in ["jax", "tensorflow"]: 28 | with tc.runtime_backend(b) as K: 29 | with tc.runtime_dtype("complex128"): 30 | n1, n2, n3, n4 = get_jac(3, 1) 31 | 32 | print(n1) 33 | print(n2) 34 | print(n3) 35 | print(n4) 36 | 37 | np.testing.assert_allclose(K.real(n3), n4) 38 | if K.name == "tensorflow": 39 | n2 = K.conj(n2) 40 | np.testing.assert_allclose(n1, n2) 41 | -------------------------------------------------------------------------------- /examples/jsonio.py: -------------------------------------------------------------------------------- 1 | """ 2 | example showcasing how circuit can be load from and dump to json: 3 | useful for storage or restful api 4 | """ 5 | 6 | import numpy as np 7 | import tensorcircuit as tc 8 | 9 | tc.set_dtype("complex128") 10 | 11 | 12 | def make_circuit(): 13 | c = tc.Circuit(3) 14 | c.h(0) 15 | c.H(2) 16 | c.CNOT(1, 2) 17 | c.rxx(0, 2, theta=0.3) 18 | c.u(2, theta=0.2, lbd=-1.2, phi=0.5) 19 | c.cu(1, 0, lbd=1.0) 20 | c.crx(0, 1, theta=-0.8) 21 | c.r(1, theta=tc.backend.ones([]), alpha=0.2) 22 | c.toffoli(0, 2, 1) 23 | c.ccnot(0, 1, 2) 24 | c.any(0, 1, unitary=tc.gates._xx_matrix) 25 | c.multicontrol(1, 2, 0, ctrl=[0, 1], unitary=tc.gates._x_matrix) 26 | return c 27 | 28 | 29 | if __name__ == "__main__": 30 | c = make_circuit() 31 | s = c.to_json(simplified=True) 32 | print(s) 33 | c.to_json(file="circuit.json") 34 | # load from json string 35 | c2 = tc.Circuit.from_json(s) 36 | print("\n", c2.draw()) 37 | np.testing.assert_allclose(c.state(), c2.state(), atol=1e-5) 38 | print("test correctness 1") 39 | # load from json file 40 | c3 = tc.Circuit.from_json_file("circuit.json") 41 | print("\n", c3.draw()) 42 | np.testing.assert_allclose(c.state(), c3.state(), atol=1e-5) 43 | print("test correctness 2") 44 | -------------------------------------------------------------------------------- /examples/keras3_tc_integration.py: -------------------------------------------------------------------------------- 1 | """ 2 | keras3 is excellent to use together with tc, we will have unique features including: 3 | 1. turn OO paradigm to functional paradigm, i.e. reuse keras layer function in functional programming 4 | 2. batch on neural network weights 5 | """ 6 | 7 | import os 8 | 9 | os.environ["KERAS_BACKEND"] = "jax" 10 | import keras_core as keras 11 | import numpy as np 12 | import optax 13 | import tensorcircuit as tc 14 | 15 | K = tc.set_backend("jax") 16 | 17 | batch = 8 18 | n = 6 19 | layer = keras.layers.Dense(1, activation="sigmoid") 20 | layer.build([batch, n]) 21 | 22 | data_x = np.random.choice([0, 1], size=batch * n).reshape([batch, n]) 23 | # data_y = np.sum(data_x, axis=-1) % 2 24 | data_y = data_x[:, 0] 25 | data_y = data_y.reshape([batch, 1]) 26 | data_x = data_x.astype(np.float32) 27 | data_y = data_y.astype(np.float32) 28 | 29 | 30 | print("data", data_x, data_y) 31 | 32 | 33 | def loss(xs, ys, params, weights): 34 | c = tc.Circuit(n) 35 | c.rx(range(n), theta=xs) 36 | c.cx(range(n - 1), range(1, n)) 37 | c.rz(range(n), theta=params) 38 | outputs = K.stack([K.real(c.expectation_ps(z=[i])) for i in range(n)]) 39 | ypred, _ = layer.stateless_call(weights, [], outputs) 40 | return keras.losses.binary_crossentropy(ypred, ys), ypred 41 | 42 | 43 | # common data batch practice 44 | vgf = K.jit( 45 | K.vectorized_value_and_grad( 46 | loss, argnums=(2, 3), vectorized_argnums=(0, 1), has_aux=True 47 | ) 48 | ) 49 | 50 | params = K.implicit_randn(shape=[n]) 51 | w = K.implicit_randn(shape=[n, 1]) 52 | b = K.implicit_randn(shape=[1]) 53 | opt = K.optimizer(optax.adam(1e-2)) 54 | # seems that currently keras3'optimizer doesn't support nested list of variables 55 | 56 | for i in range(100): 57 | (v, yp), gs = vgf(data_x, data_y, params, [w, b]) 58 | params, [w, b] = opt.update(gs, (params, [w, b])) 59 | if i % 10 == 0: 60 | print(K.mean(v)) 61 | 62 | m = keras.metrics.BinaryAccuracy() 63 | m.update_state(data_y, yp[:, None]) 64 | print("acc", m.result()) 65 | 66 | 67 | # data batch with batched and quantum neural weights 68 | 69 | vgf2 = K.jit( 70 | K.vmap( 71 | K.vectorized_value_and_grad( 72 | loss, argnums=(2, 3), vectorized_argnums=(0, 1), has_aux=True 73 | ), 74 | vectorized_argnums=(2, 3), 75 | ) 76 | ) 77 | 78 | wbatch = 4 79 | params = K.implicit_randn(shape=[wbatch, n]) 80 | w = K.implicit_randn(shape=[wbatch, n, 1]) 81 | b = K.implicit_randn(shape=[wbatch, 1]) 82 | opt = K.optimizer(optax.adam(1e-2)) 83 | # seems that currently keras3'optimizer doesn't support nested list of variables 84 | 85 | for i in range(100): 86 | (v, yp), gs = vgf2(data_x, data_y, params, [w, b]) 87 | params, [w, b] = opt.update(gs, (params, [w, b])) 88 | if i % 10 == 0: 89 | print(K.mean(v, axis=-1)) 90 | 91 | for i in range(wbatch): 92 | m = keras.metrics.BinaryAccuracy() 93 | m.update_state(data_y, yp[0, :, None]) 94 | print("acc", m.result()) 95 | m.reset_state() 96 | -------------------------------------------------------------------------------- /examples/lightcone_simplify.py: -------------------------------------------------------------------------------- 1 | """ 2 | comparison between expectation evaluation with/wo lightcone simplification 3 | """ 4 | 5 | import numpy as np 6 | import tensorcircuit as tc 7 | 8 | K = tc.set_backend("tensorflow") 9 | 10 | 11 | def brickwall_ansatz(c, params, gatename, nlayers): 12 | n = c._nqubits 13 | params = K.reshape(params, [nlayers, n, 2]) 14 | for j in range(nlayers): 15 | for i in range(0, n, 2): 16 | getattr(c, gatename)(i, (i + 1) % n, theta=params[j, i, 0]) 17 | for i in range(1, n, 2): 18 | getattr(c, gatename)(i, (i + 1) % n, theta=params[j, i, 1]) 19 | return c 20 | 21 | 22 | def loss(params, n, nlayers, enable_lightcone): 23 | c = tc.Circuit(n) 24 | for i in range(n): 25 | c.h(i) 26 | c = brickwall_ansatz(c, params, "rzz", nlayers) 27 | expz = K.stack( 28 | [c.expectation_ps(z=[i], enable_lightcone=enable_lightcone) for i in range(n)] 29 | ) 30 | return K.real(K.sum(expz)) 31 | 32 | 33 | vg1 = K.jit(K.value_and_grad(loss), static_argnums=(1, 2, 3)) 34 | 35 | 36 | def efficiency(): 37 | for n in range(6, 40, 4): 38 | for nlayers in range(2, 6, 2): 39 | print(n, nlayers) 40 | print("w lightcone") 41 | (v2, g2), _, _ = tc.utils.benchmark( 42 | vg1, K.ones([nlayers * n * 2]), n, nlayers, True 43 | ) 44 | if n < 16: 45 | print("wo lightcone") 46 | (v1, g1), _, _ = tc.utils.benchmark( 47 | vg1, K.ones([nlayers * n * 2]), n, nlayers, False 48 | ) 49 | np.testing.assert_allclose(v1, v2, atol=1e-5) 50 | np.testing.assert_allclose(g1, g2, atol=1e-5) 51 | 52 | 53 | ## further correctness check 54 | def correctness(n, nlayers): 55 | for _ in range(5): 56 | v1, g1 = vg1(K.implicit_randn([nlayers * n * 2]), n, nlayers, False) 57 | v2, g2 = vg1(K.implicit_randn([nlayers * n * 2]), n, nlayers, True) 58 | np.testing.assert_allclose(v1, v2, atol=1e-5) 59 | np.testing.assert_allclose(g1, g2, atol=1e-5) 60 | 61 | 62 | if __name__ == "__main__": 63 | efficiency() 64 | correctness(7, 3) 65 | -------------------------------------------------------------------------------- /examples/matprod_vmap.py: -------------------------------------------------------------------------------- 1 | """ 2 | matrix product: a new twist 3 | rewrite matrix product in a vmap style 4 | """ 5 | 6 | from functools import partial 7 | 8 | import numpy as np 9 | import tensorcircuit as tc 10 | 11 | for bk in ["jax", "tensorflow"]: 12 | with tc.runtime_backend(bk) as K: 13 | print("~~~~~~~~~~~~~~~~~~~~~") 14 | print(f"using {K.name} backend") 15 | 16 | @partial(K.jit, jit_compile=True) 17 | def mul(a, b): 18 | return a @ b 19 | 20 | def ij(i, j): 21 | """ 22 | Inner product 23 | """ 24 | return K.tensordot(i, j, 1) 25 | 26 | vij = K.vmap(ij, vectorized_argnums=1) 27 | vvij = K.vmap(vij, vectorized_argnums=0) 28 | 29 | @partial(K.jit, jit_compile=True) 30 | def mul2(a, b): 31 | b = K.transpose(b) 32 | return vvij(a, b) 33 | 34 | for shape in [(256, 4096), (4096, 256), (2048, 2048)]: 35 | print(shape) 36 | a = K.implicit_randn(shape) 37 | b = K.implicit_randn([shape[1], shape[0]]) 38 | print("plain matprod") 39 | r1, _, _ = tc.utils.benchmark(mul, a, b, tries=10) 40 | print("vmap matprod") 41 | r2, _, _ = tc.utils.benchmark(mul2, a, b, tries=10) 42 | np.testing.assert_allclose(r1, r2, atol=1e-5) 43 | -------------------------------------------------------------------------------- /examples/mcnoise_boost.py: -------------------------------------------------------------------------------- 1 | """ 2 | Boost the Monte Carlo noise simulation (specifically the staging time) 3 | on general error with circuit layerwise slicing 4 | """ 5 | 6 | import time 7 | import sys 8 | 9 | sys.path.insert(0, "../") 10 | import tensorcircuit as tc 11 | 12 | tc.set_backend("jax") 13 | 14 | n = 3 # 10 15 | nlayer = 2 # 4 16 | 17 | 18 | def f1(key, param, n, nlayer): 19 | if key is not None: 20 | tc.backend.set_random_state(key) 21 | c = tc.Circuit(n) 22 | for i in range(n): 23 | c.H(i) 24 | for j in range(nlayer): 25 | for i in range(n - 1): 26 | c.cnot(i, i + 1) 27 | c.apply_general_kraus(tc.channels.phasedampingchannel(0.15), i) 28 | c.apply_general_kraus(tc.channels.phasedampingchannel(0.15), i + 1) 29 | for i in range(n): 30 | c.rx(i, theta=param[j, i]) 31 | return tc.backend.real(c.expectation((tc.gates.z(), [int(n / 2)]))) 32 | 33 | 34 | def templatecnot(s, param, i): 35 | c = tc.Circuit(n, inputs=s) 36 | c.cnot(i, i + 1) 37 | return c.state() 38 | 39 | 40 | def templatenoise(key, s, param, i): 41 | c = tc.Circuit(n, inputs=s) 42 | status = tc.backend.stateful_randu(key)[0] 43 | c.apply_general_kraus(tc.channels.phasedampingchannel(0.15), i, status=status) 44 | return c.state() 45 | 46 | 47 | def templaterz(s, param, j): 48 | c = tc.Circuit(n, inputs=s) 49 | for i in range(n): 50 | c.rx(i, theta=param[j, i]) 51 | return c.state() 52 | 53 | 54 | def f2(key, param, n, nlayer): 55 | c = tc.Circuit(n) 56 | for i in range(n): 57 | c.H(i) 58 | s = c.state() 59 | for j in range(nlayer): 60 | for i in range(n - 1): 61 | s = templatecnot(s, param, i) 62 | key, subkey = tc.backend.random_split(key) 63 | s = templatenoise(subkey, s, param, i) 64 | key, subkey = tc.backend.random_split(key) 65 | s = templatenoise(subkey, s, param, i + 1) 66 | s = templaterz(s, param, j) 67 | return tc.backend.real(tc.expectation((tc.gates.z(), [int(n / 2)]), ket=s)) 68 | 69 | 70 | vagf1 = tc.backend.jit(tc.backend.value_and_grad(f1, argnums=1), static_argnums=(2, 3)) 71 | vagf2 = tc.backend.jit(tc.backend.value_and_grad(f2, argnums=1), static_argnums=(2, 3)) 72 | 73 | param = tc.backend.ones([nlayer, n]) 74 | 75 | 76 | def benchmark(f, tries=3): 77 | time0 = time.time() 78 | key = tc.backend.get_random_state(42) 79 | print(f(key, param, n, nlayer)[0]) 80 | time1 = time.time() 81 | for _ in range(tries): 82 | print(f(key, param, n, nlayer)[0]) 83 | time2 = time.time() 84 | print( 85 | "staging time: ", 86 | time1 - time0, 87 | "running time: ", 88 | (time2 - time1) / tries, 89 | ) 90 | 91 | 92 | print("without layerwise slicing jit") 93 | benchmark(vagf1) 94 | print("=============================") 95 | print("with layerwise slicing jit") 96 | benchmark(vagf2) 97 | 98 | # 10*4: jax*T4: 235/0.36 vs. 26/0.04 99 | -------------------------------------------------------------------------------- /examples/mcnoise_boost_v2.py: -------------------------------------------------------------------------------- 1 | """ 2 | Boost the Monte Carlo noise simulation (specifically the staging time) 3 | on general error with circuit layerwise slicing: new paradigm, 4 | essentially the same as v1, but much simpler 5 | """ 6 | 7 | import time 8 | import sys 9 | 10 | sys.path.insert(0, "../") 11 | import tensorcircuit as tc 12 | 13 | tc.set_backend("jax") 14 | 15 | n = 6 # 10 16 | nlayer = 5 # 4 17 | 18 | 19 | def precompute(c): 20 | s = c.state() 21 | return tc.Circuit(c._nqubits, inputs=s) 22 | 23 | 24 | def f1(key, param, n, nlayer): 25 | if key is not None: 26 | tc.backend.set_random_state(key) 27 | c = tc.Circuit(n) 28 | for i in range(n): 29 | c.H(i) 30 | for j in range(nlayer): 31 | for i in range(n - 1): 32 | c.cnot(i, i + 1) 33 | c.apply_general_kraus(tc.channels.phasedampingchannel(0.15), i) 34 | c.apply_general_kraus(tc.channels.phasedampingchannel(0.15), i + 1) 35 | for i in range(n): 36 | c.rx(i, theta=param[j, i]) 37 | return tc.backend.real(c.expectation((tc.gates.z(), [int(n / 2)]))) 38 | 39 | 40 | def f2(key, param, n, nlayer): 41 | c = tc.Circuit(n) 42 | for i in range(n): 43 | c.H(i) 44 | for j in range(nlayer): 45 | for i in range(n - 1): 46 | c.cnot(i, i + 1) 47 | c = precompute(c) 48 | c.apply_general_kraus(tc.channels.phasedampingchannel(0.15), i) 49 | c = precompute(c) 50 | c.apply_general_kraus(tc.channels.phasedampingchannel(0.15), i + 1) 51 | for i in range(n): 52 | c.rx(i, theta=param[j, i]) 53 | return tc.backend.real(c.expectation((tc.gates.z(), [int(n / 2)]))) 54 | 55 | 56 | vagf1 = tc.backend.jit(tc.backend.value_and_grad(f1, argnums=1), static_argnums=(2, 3)) 57 | vagf2 = tc.backend.jit(tc.backend.value_and_grad(f2, argnums=1), static_argnums=(2, 3)) 58 | 59 | param = tc.backend.ones([nlayer, n]) 60 | 61 | 62 | def benchmark(f, tries=3): 63 | time0 = time.time() 64 | key = tc.backend.get_random_state(42) 65 | print(f(key, param, n, nlayer)[0]) 66 | time1 = time.time() 67 | for _ in range(tries): 68 | print(f(key, param, n, nlayer)[0]) 69 | time2 = time.time() 70 | print( 71 | "staging time: ", 72 | time1 - time0, 73 | "running time: ", 74 | (time2 - time1) / tries, 75 | ) 76 | 77 | 78 | print("without layerwise slicing jit") 79 | benchmark(vagf1) 80 | print("=============================") 81 | print("with layerwise slicing jit") 82 | benchmark(vagf2) 83 | 84 | # mac16 intel cpu: (6*5, jax) 1015, 0.0035; 31.68, 0.00082 85 | -------------------------------------------------------------------------------- /examples/mcnoise_check.py: -------------------------------------------------------------------------------- 1 | """ 2 | Cross check the correctness of the density matrix simulator and the Monte Carlo trajectory state simulator. 3 | """ 4 | 5 | import os 6 | 7 | os.environ["CUDA_VISIBLE_DEVICES"] = "-1" 8 | # cpu is fast for small scale circuit simulation 9 | import sys 10 | 11 | sys.path.insert(0, "../") 12 | 13 | from tqdm import tqdm 14 | import jax 15 | import tensorcircuit as tc 16 | 17 | tc.set_backend("jax") 18 | 19 | n = 5 20 | nlayer = 3 21 | mctries = 100 # 100000 22 | 23 | print(jax.devices()) 24 | 25 | 26 | def template(c): 27 | # dont jit me! 28 | for i in range(n): 29 | c.H(i) 30 | for i in range(n): 31 | c.rz(i, theta=tc.num_to_tensor(i)) 32 | for _ in range(nlayer): 33 | for i in range(n - 1): 34 | c.cnot(i, i + 1) 35 | for i in range(n): 36 | c.rx(i, theta=tc.num_to_tensor(i)) 37 | for i in range(n): 38 | c.apply_general_kraus(tc.channels.phasedampingchannel(0.15), i) 39 | return c.state() 40 | 41 | 42 | @tc.backend.jit 43 | def answer(): 44 | c = tc.DMCircuit2(n) 45 | return template(c) 46 | 47 | 48 | rho0 = answer() 49 | 50 | print(rho0) 51 | 52 | 53 | @tc.backend.jit 54 | def f(key): 55 | if key is not None: 56 | tc.backend.set_random_state(key) 57 | c = tc.Circuit(n) 58 | return template(c) 59 | 60 | 61 | key = jax.random.PRNGKey(42) 62 | f(key).block_until_ready() # build the graph 63 | 64 | rho = 0.0 65 | 66 | for i in tqdm(range(mctries)): 67 | key, subkey = jax.random.split(key) 68 | psi = f(subkey) # [1, 2**n] 69 | rho += ( 70 | 1 71 | / mctries 72 | * tc.backend.reshape(psi, [-1, 1]) 73 | @ tc.backend.conj(tc.backend.reshape(psi, [1, -1])) 74 | ) 75 | 76 | print(rho) 77 | print("difference\n", tc.backend.abs(rho - rho0)) 78 | print("difference in total\n", tc.backend.sum(tc.backend.abs(rho - rho0))) 79 | print("fidelity", tc.quantum.fidelity(rho, rho0)) 80 | print("trace distance", tc.quantum.trace_distance(rho, rho0)) 81 | -------------------------------------------------------------------------------- /examples/mipt.py: -------------------------------------------------------------------------------- 1 | """ 2 | demo example of mipt in tc style 3 | """ 4 | 5 | from functools import partial 6 | import time 7 | import numpy as np 8 | from scipy import stats 9 | import tensorcircuit as tc 10 | 11 | K = tc.set_backend("jax") 12 | # tf backend is slow (at least on cpu) 13 | 14 | 15 | @partial(K.jit, static_argnums=(2, 3, 4)) 16 | def circuit_output(random_matrix, status, n, d, p): 17 | """ 18 | mipt circuit 19 | 20 | :param random_matrix: a float or complex tensor containing 4*4 random haar matrix wth size [d*n, 4, 4] 21 | :type random_matrix: _type_ 22 | :param status: a int tensor with element in 0 or 1 or 2 (no meausrement) with size d*n 23 | :type status: _type_ 24 | :param n: number of qubits 25 | :type n: _type_ 26 | :param d: number of depth 27 | :type d: _type_ 28 | :param p: measurement ratio 29 | :type p: float 30 | :return: output state 31 | """ 32 | random_matrix = K.reshape(random_matrix, [d, n, 4, 4]) 33 | status = K.reshape(status, [d, n]) 34 | inputs = None 35 | for j in range(d): 36 | if inputs is None: 37 | c = tc.Circuit(n) 38 | else: 39 | c = tc.Circuit(n, inputs=inputs) 40 | for i in range(0, n, 2): 41 | c.unitary(i, (i + 1) % n, unitary=random_matrix[j, i]) 42 | for i in range(1, n, 2): 43 | c.unitary(i, (i + 1) % n, unitary=random_matrix[j, i]) 44 | inputs = c.state() 45 | c = tc.Circuit(n, inputs=inputs) 46 | for i in range(n): 47 | c.general_kraus( 48 | [ 49 | np.sqrt(p) * np.array([[1.0, 0], [0, 0]]), 50 | np.sqrt(p) * np.array([[0, 0], [0, 1.0]]), 51 | np.sqrt(1 - p) * np.eye(2), 52 | ], 53 | i, 54 | status=status[j, i], 55 | ) 56 | inputs = c.state() 57 | c = tc.Circuit(n, inputs=inputs) 58 | inputs = c.state() 59 | inputs /= K.norm(inputs) 60 | return inputs 61 | 62 | 63 | @partial(K.jit, static_argnums=(2, 3, 4)) 64 | def cals(random_matrix, status, n, d, p): 65 | state = circuit_output(random_matrix, status, n, d, p) 66 | rho = tc.quantum.reduced_density_matrix(state, cut=[i for i in range(n // 2)]) 67 | return tc.quantum.entropy(rho), tc.quantum.renyi_entropy(rho, k=2) 68 | 69 | 70 | if __name__ == "__main__": 71 | n = 12 72 | d = 12 73 | st = np.random.uniform(size=[d * n]) 74 | ## assume all X gate instead 75 | rm = [stats.unitary_group.rvs(4) for _ in range(d * n)] 76 | rm = [r / np.linalg.det(r) for r in rm] 77 | rm = np.stack(rm) 78 | time0 = time.time() 79 | print(cals(rm, st, n, d, 0.1)) 80 | time1 = time.time() 81 | st = np.random.uniform(size=[d * n]) 82 | print(cals(rm, st, n, d, 0.1)) 83 | time2 = time.time() 84 | print(f"compiling time {time1-time0}, running time {time2-time1}") 85 | -------------------------------------------------------------------------------- /examples/mpsvsexact.py: -------------------------------------------------------------------------------- 1 | """ 2 | A simple script to benchmark the approximation power of the MPS simulator. 3 | """ 4 | 5 | import sys 6 | 7 | sys.path.insert(0, "../") 8 | 9 | import tensorcircuit as tc 10 | 11 | tc.set_backend("tensorflow") 12 | tc.set_dtype("complex128") 13 | 14 | 15 | def tfi_energy(c, n, j=1.0, h=-1.0): 16 | e = 0.0 17 | for i in range(n): 18 | e += h * c.expectation((tc.gates.x(), [i])) 19 | for i in range(n - 1): 20 | e += j * c.expectation((tc.gates.z(), [i]), (tc.gates.z(), [(i + 1) % n])) 21 | return e 22 | 23 | 24 | def energy(param, mpsd=None): 25 | if mpsd is None: 26 | c = tc.Circuit(n) 27 | else: 28 | c = tc.MPSCircuit(n) 29 | c.set_split_rules({"max_singular_values": mpsd}) 30 | 31 | for i in range(n): 32 | c.H(i) 33 | for j in range(nlayers): 34 | for i in range(n - 1): 35 | c.exp1( 36 | i, 37 | (i + 1) % n, 38 | theta=param[2 * j, i], 39 | unitary=tc.gates._zz_matrix, 40 | ) 41 | for i in range(n): 42 | c.rx(i, theta=param[2 * j + 1, i]) 43 | 44 | e = tfi_energy(c, n) 45 | e = tc.backend.real(e) 46 | if mpsd is not None: 47 | fidelity = c._fidelity 48 | else: 49 | fidelity = None 50 | return e, c.state(), fidelity 51 | 52 | 53 | n, nlayers = 15, 20 54 | print("number of qubits: ", n) 55 | print("number of layers: ", nlayers) 56 | 57 | param = tc.backend.implicit_randu([2 * nlayers, n]) 58 | # param = tc.backend.ones([2 * nlayers, n]) 59 | # it turns out that the mps approximation power highly depends on the 60 | # parameters, if we use ``param = tc.backend.ones``, the apprixmation ratio decays very fast 61 | # At least, the estimated fidelity is a very good proxy metric for real fidelity 62 | # as long as it is larger than 50% 63 | e0, s0, _ = energy(param) 64 | print( 65 | "entanglement: ", 66 | tc.backend.numpy( 67 | tc.quantum.entropy(tc.quantum.reduced_density_matrix(s0, cut=n // 2)) 68 | ), 69 | ) 70 | 71 | for mpsd in [2, 5, 10, 20, 50, 100]: 72 | e1, s1, f1 = energy(param, mpsd=mpsd) 73 | print("------------------------") 74 | print("bond dimension: ", mpsd) 75 | print( 76 | "exact energy: ", 77 | tc.backend.numpy(e0), 78 | "mps simulator energy: ", 79 | tc.backend.numpy(e1), 80 | ) 81 | print( 82 | "energy relative error(%): ", 83 | tc.backend.numpy(tc.backend.abs((e1 - e0) / e0)) * 100, 84 | ) 85 | print("estimated fidelity:", tc.backend.numpy(f1)) 86 | print( 87 | "real fidelity:", 88 | tc.backend.numpy( 89 | tc.backend.abs(tc.backend.tensordot(tc.backend.conj(s1), s0, 1)) 90 | ), 91 | ) 92 | -------------------------------------------------------------------------------- /examples/noisy_sampling_jit.py: -------------------------------------------------------------------------------- 1 | """ 2 | For hardware simlation, only sample interface is available and Monte Carlo simulation is enough 3 | """ 4 | 5 | import tensorcircuit as tc 6 | 7 | n = 6 8 | m = 4 9 | pn = 0.003 10 | 11 | K = tc.set_backend("jax") 12 | 13 | 14 | def make_noise_circuit(c, weights, status=None): 15 | for j in range(m): 16 | for i in range(n - 1): 17 | c.cnot(i, i + 1) 18 | if c.is_dm is False: 19 | c.depolarizing(i, px=pn, py=pn, pz=pn, status=status[0, i, j]) 20 | c.depolarizing(i + 1, px=pn, py=pn, pz=pn, status=status[1, i, j]) 21 | else: 22 | c.depolarizing(i, px=pn, py=pn, pz=pn) 23 | c.depolarizing(i + 1, px=pn, py=pn, pz=pn) 24 | for i in range(n): 25 | c.rx(i, theta=weights[i, j]) 26 | return c 27 | 28 | 29 | @K.jit 30 | def noise_measurement(weights, status, key): 31 | c = tc.Circuit(n) 32 | c = make_noise_circuit(c, weights, status) 33 | return c.sample(allow_state=True, random_generator=key) 34 | 35 | 36 | @K.jit 37 | def exact_result(weights): 38 | c = tc.DMCircuit(n) 39 | c = make_noise_circuit(c, weights) 40 | return K.real(c.expectation_ps(z=[0, 1])) 41 | 42 | 43 | weights = K.ones([n, m]) 44 | z0z1_exact = exact_result(weights) 45 | 46 | 47 | tries = 2**15 48 | status = K.implicit_randu([tries, 2, n, m]) 49 | subkey = K.get_random_state(42) 50 | 51 | # a micro benchmarking 52 | 53 | tc.utils.benchmark(noise_measurement, weights, status[0], subkey) 54 | 55 | 56 | rs = [] 57 | for i in range(tries): 58 | # can also be vmapped, but a tradeoff between number of trials here for further jit 59 | key, subkey = K.random_split(subkey) 60 | r = noise_measurement(weights, status[i], key) 61 | rs.append(r[0]) 62 | 63 | rs = (K.stack(rs) - 0.5) * 2 64 | z0z1_mc = K.mean(rs[:, 0] * rs[:, 1]) 65 | 66 | print(z0z1_exact, z0z1_mc) 67 | 68 | assert abs(z0z1_exact - z0z1_mc) < 0.03 69 | -------------------------------------------------------------------------------- /examples/omeinsum_julia/omeinsum.jl: -------------------------------------------------------------------------------- 1 | import OMEinsum 2 | import ArgParse 3 | import JSON 4 | using KaHyPar 5 | 6 | function parse_commandline() 7 | s = ArgParse.ArgParseSettings() 8 | 9 | @ArgParse.add_arg_table s begin 10 | "--einsum_json" 11 | arg_type = String 12 | default = "einsum.json" 13 | "--result_json" 14 | arg_type = String 15 | default = "opteinsum.json" 16 | "--sc_target" 17 | arg_type = Int 18 | default = 20 19 | "--beta_start" 20 | arg_type = Float64 21 | default = 0.01 22 | "--beta_step" 23 | arg_type = Float64 24 | default = 0.01 25 | "--beta_stop" 26 | arg_type = Float64 27 | default = 15.0 28 | "--ntrials" 29 | arg_type = Int 30 | default = 10 31 | "--niters" 32 | arg_type = Int 33 | default = 50 34 | "--sc_weight" 35 | arg_type = Float64 36 | default = 1.0 37 | "--rw_weight" 38 | arg_type = Float64 39 | default = 0.2 40 | "--kahypar_init" 41 | action = :store_true 42 | end 43 | 44 | return ArgParse.parse_args(s) 45 | end 46 | 47 | function main() 48 | parsed_args = parse_commandline() 49 | # println("Parsed args:") 50 | # for (arg,val) in parsed_args 51 | # println(" $arg => $val") 52 | # end 53 | # println(Threads.nthreads()) 54 | contraction_args = JSON.parsefile(parsed_args["einsum_json"]) 55 | 56 | inputs = map(Tuple, contraction_args["inputs"]) 57 | output = contraction_args["output"] 58 | 59 | eincode = OMEinsum.EinCode(Tuple(inputs), Tuple(output)) 60 | 61 | size_dict = OMEinsum.uniformsize(eincode, 2) 62 | for (k, v) in contraction_args["size"] 63 | size_dict[k] = v 64 | end 65 | 66 | if parsed_args["kahypar_init"] 67 | eincode = OMEinsum.optimize_code(eincode, size_dict, OMEinsum.KaHyParBipartite( 68 | sc_target=parsed_args["sc_target"], 69 | max_group_size=50)) 70 | end 71 | 72 | algorithm = OMEinsum.TreeSA( 73 | sc_target=parsed_args["sc_target"], 74 | βs=parsed_args["beta_start"]:parsed_args["beta_step"]:parsed_args["beta_stop"], 75 | ntrials=parsed_args["ntrials"], 76 | niters=parsed_args["niters"], 77 | sc_weight=parsed_args["sc_weight"], 78 | rw_weight=parsed_args["rw_weight"], 79 | initializer=parsed_args["kahypar_init"] ? :specified : :greedy 80 | ) 81 | # println(parsed_args["beta_start"]:parsed_args["beta_step"]:parsed_args["beta_stop"]) 82 | # println(algorithm) 83 | optcode = OMEinsum.optimize_code(eincode, size_dict, algorithm) 84 | OMEinsum.writejson(parsed_args["result_json"], optcode) 85 | end 86 | 87 | main() 88 | -------------------------------------------------------------------------------- /examples/omeinsum_julia/omeinsum_treesa_optimizer.py: -------------------------------------------------------------------------------- 1 | from typing import List, Set, Dict, Tuple 2 | 3 | 4 | class OMEinsumTreeSAOptimizer(object): 5 | def __init__( 6 | self, 7 | sc_target: int = 20, 8 | betas: Tuple[float, float, float] = (0.01, 0.01, 15), 9 | ntrials: int = 10, 10 | niters: int = 50, 11 | sc_weight: float = 1.0, 12 | rw_weight: float = 0.2, 13 | ): 14 | self.sc_target = sc_target 15 | self.betas = betas 16 | self.ntrials = ntrials 17 | self.niters = niters 18 | self.sc_weight = sc_weight 19 | self.rw_weight = rw_weight 20 | 21 | def _contraction_tree_to_contraction_path(self, ei, queue, path, idx): 22 | if ei["isleaf"]: 23 | # OMEinsum provide 1-based index 24 | # but in contraction path we want 0-based index 25 | ei["tensorindex"] -= 1 26 | return idx 27 | assert len(ei["args"]) == 2, "must be a binary tree" 28 | for child in ei["args"]: 29 | idx = self._contraction_tree_to_contraction_path(child, queue, path, idx) 30 | assert "tensorindex" in child 31 | 32 | lhs_args = sorted( 33 | [queue.index(child["tensorindex"]) for child in ei["args"]], reverse=True 34 | ) 35 | for arg in lhs_args: 36 | queue.pop(arg) 37 | 38 | ei["tensorindex"] = idx 39 | path.append(lhs_args) 40 | queue.append(idx) 41 | return idx + 1 42 | 43 | def __call__( 44 | self, 45 | inputs: List[Set[str]], 46 | output: Set[str], 47 | size: Dict[str, int], 48 | memory_limit=None, 49 | ) -> List[Tuple[int, int]]: 50 | raise NotImplementedError 51 | -------------------------------------------------------------------------------- /examples/optperformance_comparison.py: -------------------------------------------------------------------------------- 1 | """ 2 | Optimization for performance comparison for different densities of two-qubit gates 3 | (random layouts averaged). 4 | """ 5 | 6 | import sys 7 | 8 | sys.path.insert(0, "../") 9 | import tensorflow as tf 10 | import numpy as np 11 | import cotengra as ctg 12 | import tensorcircuit as tc 13 | 14 | K = tc.set_backend("tensorflow") 15 | optr = ctg.ReusableHyperOptimizer( 16 | methods=["greedy"], 17 | parallel=True, 18 | minimize="flops", 19 | max_time=30, 20 | max_repeats=512, 21 | progbar=True, 22 | ) 23 | tc.set_contractor("custom", optimizer=optr, preprocessing=True) 24 | 25 | eye4 = K.eye(4) 26 | cnot = tc.array_to_tensor(tc.gates._cnot_matrix) 27 | 28 | 29 | def energy_p(params, p, seed, n, nlayers): 30 | g = tc.templates.graphs.Line1D(n) 31 | c = tc.Circuit(n) 32 | for i in range(n): 33 | c.H(i) 34 | for i in range(nlayers): 35 | for k in range(n): 36 | c.ry(k, theta=params[2 * i, k]) 37 | c.rz(k, theta=params[2 * i + 1, k]) 38 | c.ry(k, theta=params[2 * i + 2, k]) 39 | for k in range(n // 2): # alternating entangler with probability 40 | c.unitary_kraus( 41 | [eye4, cnot], 42 | 2 * k + (i % 2), 43 | (2 * k + (i % 2) + 1) % n, 44 | prob=[1 - p, p], 45 | status=seed[i, k], 46 | ) 47 | 48 | e = tc.templates.measurements.heisenberg_measurements( 49 | c, g, hzz=1, hxx=0, hyy=0, hx=-1, hy=0, hz=0 50 | ) # TFIM energy from expectation of circuit c defined on lattice given by g 51 | return e 52 | 53 | 54 | vagf = K.jit( 55 | K.vectorized_value_and_grad(energy_p, argnums=0, vectorized_argnums=(0, 2)), 56 | static_argnums=(3, 4), 57 | ) 58 | 59 | energy_list = [] 60 | 61 | 62 | if __name__ == "__main__": 63 | n = 12 64 | nlayers = 12 65 | nsteps = 250 66 | sample = 10 67 | debug = True 68 | 69 | for a in np.arange(0.1, 1, 0.1): 70 | energy_sublist = [] 71 | params = K.implicit_randn(shape=[sample, 3 * nlayers, n]) 72 | seeds = K.implicit_randu(shape=[sample, nlayers, n // 2]) 73 | opt = tc.backend.optimizer(tf.keras.optimizers.Adam(2e-3)) 74 | for i in range(nsteps): 75 | p = (n * nlayers) ** (a - 1) 76 | p = tc.array_to_tensor(p, dtype="float32") 77 | e, grads = vagf(params, p, seeds, n, nlayers) 78 | params = opt.update(grads, params) 79 | if i % 50 == 0 and debug: 80 | print(a, i, e) 81 | energy_list.append(K.numpy(e)) 82 | 83 | print(energy_list) 84 | -------------------------------------------------------------------------------- /examples/qaoa_parallel_opt.py: -------------------------------------------------------------------------------- 1 | """ 2 | Depracated, using ``vectorized_value_and_grad`` instead for batched optimization 3 | """ 4 | 5 | import sys 6 | 7 | sys.path.insert(0, "../") 8 | 9 | import tensorcircuit as tc 10 | from tensorcircuit.applications.dqas import ( 11 | parallel_qaoa_train, 12 | single_generator, 13 | set_op_pool, 14 | ) 15 | from tensorcircuit.applications.layers import * # pylint: disable=wildcard-import 16 | from tensorcircuit.applications.graphdata import get_graph 17 | 18 | tc.set_backend("tensorflow") 19 | 20 | set_op_pool([Hlayer, rxlayer, rylayer, rzlayer, xxlayer, yylayer, zzlayer]) 21 | 22 | 23 | if __name__ == "__main__": 24 | # old fashion example, prefer vvag for new generation software for this task 25 | parallel_qaoa_train( 26 | [0, 6, 1, 6, 1], 27 | single_generator(get_graph("8B")), 28 | tries=4, 29 | cores=2, 30 | batch=1, 31 | epochs=5, 32 | scale=0.8, 33 | ) 34 | -------------------------------------------------------------------------------- /examples/qem_dqas.py: -------------------------------------------------------------------------------- 1 | """ 2 | DQAS for QFT QEM circuit design, deprecated DQAS implementation 3 | """ 4 | 5 | import sys 6 | 7 | sys.path.insert(0, "../") 8 | 9 | from functools import partial 10 | import cirq 11 | import numpy as np 12 | import tensorflow as tf 13 | from tensorcircuit.applications.vags import qft_qem_vag 14 | from tensorcircuit.applications.dqas import ( 15 | set_op_pool, 16 | DQAS_search, 17 | verbose_output, 18 | ) 19 | 20 | 21 | def main_3(): 22 | qft_3 = partial(qft_qem_vag, n=3) 23 | 24 | set_op_pool([cirq.X, cirq.Y, cirq.Z, cirq.I, cirq.T, cirq.S]) 25 | DQAS_search( 26 | qft_3, 27 | nnp_initial_value=tf.zeros([6, 6]), 28 | p=6, 29 | prethermal=0, 30 | batch=32, 31 | verbose=False, 32 | epochs=30, 33 | ) 34 | 35 | 36 | def main_4(): 37 | qft_4 = partial(qft_qem_vag, n=4) 38 | 39 | set_op_pool( 40 | [ 41 | cirq.I, 42 | cirq.X, 43 | cirq.Y, 44 | cirq.Z, 45 | cirq.H, 46 | cirq.rx(np.pi / 3), 47 | cirq.rx(np.pi * 2.0 / 3), 48 | cirq.rz(np.pi / 3), 49 | cirq.rz(np.pi * 2.0 / 3), 50 | cirq.S, 51 | cirq.T, 52 | ] 53 | ) 54 | DQAS_search( 55 | qft_4, 56 | nnp_initial_value=tf.zeros([12, 11]), 57 | p=12, 58 | prethermal=0, 59 | batch=32, 60 | verbose=False, 61 | verbose_func=verbose_output, 62 | epochs=6, 63 | ) 64 | 65 | 66 | if __name__ == "__main__": 67 | # main_3() 68 | main_4() 69 | -------------------------------------------------------------------------------- /examples/quantumng.py: -------------------------------------------------------------------------------- 1 | """ 2 | Quantum natural gradient descent demonstration with the TFIM VQE example. 3 | """ 4 | 5 | import sys 6 | import time 7 | 8 | sys.path.insert(0, "../") 9 | 10 | import optax 11 | import tensorflow as tf 12 | 13 | import tensorcircuit as tc 14 | from tensorcircuit import experimental 15 | 16 | tc.set_backend("jax") # tf, jax are both ok 17 | 18 | n, nlayers = 7, 2 19 | g = tc.templates.graphs.Line1D(n) 20 | 21 | 22 | @tc.backend.jit 23 | def state(params): 24 | params = tc.backend.reshape(params, [2 * nlayers, n]) 25 | c = tc.Circuit(n) 26 | c = tc.templates.blocks.example_block(c, params, nlayers=nlayers) 27 | return c.state() 28 | 29 | 30 | def energy(params): 31 | s = state(params) 32 | c = tc.Circuit(n, inputs=s) 33 | loss = tc.templates.measurements.heisenberg_measurements( 34 | c, g, hzz=1, hxx=0, hyy=0, hx=-1 35 | ) 36 | return tc.backend.real(loss) 37 | 38 | 39 | vags = tc.backend.jit(tc.backend.value_and_grad(energy)) 40 | lr = 1e-2 41 | if tc.backend.name == "jax": 42 | oopt = optax.sgd(lr) 43 | else: 44 | oopt = tf.keras.optimizers.SGD(lr) 45 | opt = tc.backend.optimizer(oopt) 46 | 47 | qng = tc.backend.jit(experimental.qng(state, mode="fwd")) 48 | qngr = tc.backend.jit(experimental.qng(state, mode="rev")) 49 | qng2 = tc.backend.jit(experimental.qng2(state, mode="fwd")) 50 | qng2r = tc.backend.jit(experimental.qng2(state, mode="rev")) 51 | 52 | 53 | def train_loop(params, i, qngf): 54 | qmetric = qngf(params) 55 | value, grad = vags(params) 56 | ngrad = tc.backend.solve(qmetric, grad, assume_a="sym") 57 | params = opt.update(ngrad, params) 58 | if i % 10 == 0: 59 | print(tc.backend.numpy(value)) 60 | return params 61 | 62 | 63 | def plain_train_loop(params, i): 64 | value, grad = vags(params) 65 | params = opt.update(grad, params) 66 | if i % 10 == 0: 67 | print(tc.backend.numpy(value)) 68 | return params 69 | 70 | 71 | def benchmark(f, *args): 72 | # params = tc.backend.implicit_randn([2 * nlayers * n]) 73 | params = 0.1 * tc.backend.ones([2 * nlayers * n]) 74 | params = tc.backend.cast(params, "float32") 75 | time0 = time.time() 76 | params = f(params, 0, *args) 77 | time1 = time.time() 78 | for i in range(100): 79 | params = f(params, i + 1, *args) 80 | time2 = time.time() 81 | 82 | print("staging time: ", time1 - time0, "running time: ", (time2 - time1) / 100) 83 | 84 | 85 | if __name__ == "__main__": 86 | print("quantum natural gradient descent 1+f") 87 | benchmark(train_loop, qng) 88 | print("quantum natural gradient descent 1+r") 89 | benchmark(train_loop, qngr) 90 | print("quantum natural gradient descent 2+f") 91 | benchmark(train_loop, qng2) 92 | print("quantum natural gradient descent 2+r") 93 | benchmark(train_loop, qng2r) 94 | print("plain gradient descent") 95 | benchmark(plain_train_loop) 96 | -------------------------------------------------------------------------------- /examples/quditcircuit.py: -------------------------------------------------------------------------------- 1 | """ 2 | Basic features of ``tc.Circuit`` class support qudits natively 3 | """ 4 | 5 | import numpy as np 6 | import tensorcircuit as tc 7 | 8 | K = tc.set_backend("tensorflow") 9 | 10 | n = 3 11 | 12 | # d=3 qudits 13 | ns = [tc.gates.Gate(np.array([1.0, 0.0, 0.0])) for _ in range(n)] 14 | mps = tc.quantum.QuVector([nd[0] for nd in ns]) 15 | 16 | c = tc.Circuit(n, mps_inputs=mps) 17 | 18 | ctrl1switch02 = np.kron( 19 | np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]]), np.eye(3) 20 | ) + np.kron( 21 | np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]]), 22 | np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]]), 23 | ) 24 | print("two-qudit gate: \n", ctrl1switch02) 25 | 26 | # use unitary gate with ``Gate`` as input to avoid the matrix autoreshape to 2-base 27 | c.unitary(0, unitary=tc.gates.Gate(np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]))) 28 | c.unitary(0, 2, unitary=tc.gates.Gate(np.array(ctrl1switch02.reshape([3, 3, 3, 3])))) 29 | 30 | print(c.state()) 31 | for i in range(n): 32 | print(i) 33 | print( 34 | c.expectation( 35 | [tc.gates.Gate(np.array([[1, 0, 0], [0, 0, 0], [0, 0, -1]])), [i]] 36 | ) 37 | ) 38 | -------------------------------------------------------------------------------- /examples/rem_super_large_scale.py: -------------------------------------------------------------------------------- 1 | """ 2 | Demonstrate the failure of rem when qubit number is much larger than 1/p 3 | """ 4 | 5 | from functools import partial 6 | import numpy as np 7 | import tensorcircuit as tc 8 | 9 | 10 | def simulate_engine(p, ans): 11 | fans = "" 12 | for a in ans: 13 | if p > np.random.uniform(): 14 | if a == "0": 15 | fans += "1" 16 | else: 17 | fans += "0" 18 | else: 19 | fans += a 20 | return fans 21 | 22 | 23 | def run(cs, shots, p=0.1): 24 | # we assume only all 0 and all 1 results for simplicity 25 | rds = [] 26 | for c in cs: 27 | if len(c.to_qir()) < 2: 28 | ans = "0" * c._nqubits 29 | else: 30 | ans = "1" * c._nqubits 31 | rd = {} 32 | for _ in range(shots): 33 | r = simulate_engine(p, ans) 34 | rd[r] = rd.get(r, 0) + 1 35 | rds.append(rd) 36 | return rds 37 | 38 | 39 | if __name__ == "__main__": 40 | for p in [0.1, 0.05, 0.02]: 41 | print(p) 42 | n = int(3 / p) 43 | c = tc.Circuit(n) 44 | c.x(range(n)) 45 | runp = partial(run, p=p) 46 | r = runp([c], 8192)[0] 47 | mit = tc.results.rem.ReadoutMit(runp) 48 | mit.cals_from_system(n) 49 | for i in range(n): 50 | print(i, "\n", mit.single_qubit_cals[i]) 51 | rs = [] 52 | for i in range(n): 53 | rs.append([i, np.abs(mit.expectation(r, list(range(i))))]) 54 | print(rs[i]) 55 | -------------------------------------------------------------------------------- /examples/sample_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | perfect sampling vs. state sampling 3 | the benchmark results show that only use perfect/tensor sampling when the wavefunction doesn't fit in memory 4 | """ 5 | 6 | import time 7 | import numpy as np 8 | import tensorcircuit as tc 9 | 10 | K = tc.set_backend("jax") 11 | # tf staging is too slow 12 | 13 | 14 | def construct_circuit(n, nlayers): 15 | c = tc.Circuit(n) 16 | for i in range(n): 17 | c.H(i) 18 | for _ in range(nlayers): 19 | for i in range(n): 20 | r = np.random.randint(n - 1) + 1 21 | c.cnot(i, (i + r) % n) 22 | return c 23 | 24 | 25 | for n in [8, 10, 12, 14, 16]: 26 | for nlayers in [2, 6, 10]: 27 | print("n: ", n, " nlayers: ", nlayers) 28 | c = construct_circuit(n, nlayers) 29 | time0 = time.time() 30 | s = c.sample(allow_state=True) 31 | time1 = time.time() 32 | # print(smp) 33 | print("state sampling time: ", time1 - time0) 34 | time0 = time.time() 35 | smp = c.sample() 36 | # print(smp) 37 | time1 = time.time() 38 | print("nonjit tensor sampling time: ", time1 - time0) 39 | time0 = time.time() 40 | s = c.sample(allow_state=True, batch=10) 41 | time1 = time.time() 42 | print("batch state sampling time: ", (time1 - time0) / 10) 43 | 44 | @K.jit 45 | def f(key): 46 | K.set_random_state(key) 47 | return c.sample() 48 | 49 | key = K.get_random_state(42) 50 | key1, key2 = K.random_split(key) 51 | time0 = time.time() 52 | smp = f(key1) 53 | time1 = time.time() 54 | for _ in range(10): 55 | key1, key2 = K.random_split(key2) 56 | smp = f(key1) 57 | # print(smp) 58 | time2 = time.time() 59 | print("jittable tensor sampling staginging time: ", time1 - time0) 60 | print("jittable tensor sampling running time: ", (time2 - time1) / 10) 61 | -------------------------------------------------------------------------------- /examples/simple_qaoa.py: -------------------------------------------------------------------------------- 1 | """ 2 | A plain QAOA optimization example with given graphs using networkx. 3 | """ 4 | 5 | import sys 6 | 7 | sys.path.insert(0, "../") 8 | import networkx as nx 9 | import tensorflow as tf 10 | import tensorcircuit as tc 11 | 12 | K = tc.set_backend("tensorflow") 13 | 14 | ## 1. define the graph 15 | 16 | 17 | def dict2graph(d): 18 | g = nx.to_networkx_graph(d) 19 | for e in g.edges: 20 | if not g[e[0]][e[1]].get("weight"): 21 | g[e[0]][e[1]]["weight"] = 1.0 22 | return g 23 | 24 | 25 | # a graph instance 26 | 27 | example_graph_dict = { 28 | 0: {1: {"weight": 1.0}, 7: {"weight": 1.0}, 3: {"weight": 1.0}}, 29 | 1: {0: {"weight": 1.0}, 2: {"weight": 1.0}, 3: {"weight": 1.0}}, 30 | 2: {1: {"weight": 1.0}, 3: {"weight": 1.0}, 5: {"weight": 1.0}}, 31 | 4: {7: {"weight": 1.0}, 6: {"weight": 1.0}, 5: {"weight": 1.0}}, 32 | 7: {4: {"weight": 1.0}, 6: {"weight": 1.0}, 0: {"weight": 1.0}}, 33 | 3: {1: {"weight": 1.0}, 2: {"weight": 1.0}, 0: {"weight": 1.0}}, 34 | 6: {7: {"weight": 1.0}, 4: {"weight": 1.0}, 5: {"weight": 1.0}}, 35 | 5: {6: {"weight": 1.0}, 4: {"weight": 1.0}, 2: {"weight": 1.0}}, 36 | } 37 | 38 | example_graph = dict2graph(example_graph_dict) 39 | 40 | # 2. define the quantum ansatz 41 | 42 | nlayers = 3 43 | 44 | 45 | def QAOAansatz(gamma, beta, g=example_graph): 46 | n = len(g.nodes) 47 | c = tc.Circuit(n) 48 | for i in range(n): 49 | c.H(i) 50 | for j in range(nlayers): 51 | for e in g.edges: 52 | c.exp1( 53 | e[0], 54 | e[1], 55 | unitary=tc.gates._zz_matrix, 56 | theta=g[e[0]][e[1]].get("weight", 1.0) * gamma[j], 57 | ) 58 | for i in range(n): 59 | c.rx(i, theta=beta[j]) 60 | 61 | # calculate the loss function, max cut 62 | loss = 0.0 63 | for e in g.edges: 64 | loss += c.expectation_ps(z=[e[0], e[1]]) 65 | 66 | return K.real(loss) 67 | 68 | 69 | # 3. get compiled function for QAOA ansatz and its gradient 70 | 71 | QAOA_vg = K.jit(K.value_and_grad(QAOAansatz, argnums=(0, 1)), static_argnums=2) 72 | 73 | 74 | # 4. optimization loop 75 | 76 | beta = K.implicit_randn(shape=[nlayers], stddev=0.1) 77 | gamma = K.implicit_randn(shape=[nlayers], stddev=0.1) 78 | opt = K.optimizer(tf.keras.optimizers.Adam(1e-2)) 79 | 80 | for i in range(100): 81 | loss, grads = QAOA_vg(gamma, beta, example_graph) 82 | print(K.numpy(loss)) 83 | gamma, beta = opt.update(grads, [gamma, beta]) # gradient descent 84 | -------------------------------------------------------------------------------- /examples/time_evolution.py: -------------------------------------------------------------------------------- 1 | """ 2 | A simple static Hamiltonian evolution benchmark 3 | """ 4 | 5 | import time 6 | from functools import partial 7 | import numpy as np 8 | from scipy.integrate import solve_ivp 9 | import tensorcircuit as tc 10 | from tensorcircuit.experimental import hamiltonian_evol 11 | 12 | K = tc.set_backend("jax") 13 | tc.set_dtype("complex128") 14 | 15 | 16 | @partial(K.jit, static_argnums=1) 17 | def total_z(psi, N): 18 | return K.real( 19 | K.sum(K.stack([tc.expectation([tc.gates.z(), i], ket=psi) for i in range(N)])) 20 | ) 21 | 22 | 23 | @K.jit 24 | def naive_evol(t, h, psi0): 25 | return K.reshape(K.expm(-1j * t * h) @ K.reshape(psi0, [-1, 1]), [-1]) 26 | 27 | 28 | @K.jit 29 | def hpsi(h, y): 30 | return K.reshape(-1.0j * h @ K.reshape(y, [-1, 1]), [-1]) 31 | 32 | 33 | def main(N): 34 | psi0 = np.zeros([2**N]) 35 | psi0[0] = 1 36 | psi0 = tc.array_to_tensor(psi0) 37 | g = tc.templates.graphs.Line1D(N, pbc=False) 38 | h = tc.quantum.heisenberg_hamiltonian(g, hzz=1, hxx=0, hyy=0, hx=1, sparse=False) 39 | tlist = K.arange(0, 3, 0.1) 40 | time0 = time.time() 41 | for t in tlist: 42 | psit = naive_evol(t, h, psi0) 43 | psit /= K.norm(psit) 44 | print(total_z(psit, N)) 45 | time1 = time.time() 46 | r = hamiltonian_evol(1.0j * tlist, h, psi0, callback=partial(total_z, N=N)) 47 | print(r) 48 | time2 = time.time() 49 | 50 | def fun(t, y): 51 | y = tc.array_to_tensor(y) 52 | return K.numpy(hpsi(h, y)) 53 | 54 | r = solve_ivp( 55 | fun, (0, 3), psi0, method="DOP853", t_eval=K.numpy(tlist), rtol=1e-6, atol=1e-6 56 | ) 57 | for psit in r.y.T: 58 | print(total_z(psit, N)) 59 | time3 = time.time() 60 | print( 61 | "matrix exponential:", 62 | time1 - time0, 63 | "tc fast implementation", 64 | time2 - time1, 65 | "scipy ode", 66 | time3 - time2, 67 | ) 68 | 69 | 70 | if __name__ == "__main__": 71 | main(10) 72 | -------------------------------------------------------------------------------- /examples/timeevolution_trotter.py: -------------------------------------------------------------------------------- 1 | """ 2 | Time evolution of Heisenberg model realized by Trotter decomposition 3 | """ 4 | 5 | import numpy as np 6 | import tensorcircuit as tc 7 | 8 | K = tc.set_backend("tensorflow") 9 | tc.set_dtype("complex128") 10 | 11 | xx = tc.gates._xx_matrix 12 | yy = tc.gates._yy_matrix 13 | zz = tc.gates._zz_matrix 14 | 15 | nqubit = 4 16 | t = 1.0 17 | tau = 0.1 18 | 19 | 20 | def Trotter_step_unitary(input_state, tau, nqubit): 21 | c = tc.Circuit(nqubit, inputs=input_state) 22 | for i in range(nqubit - 1): ### U_zz 23 | c.exp1(i, i + 1, theta=tau, unitary=zz) 24 | for i in range(nqubit - 1): ### U_yy 25 | c.exp1(i, i + 1, theta=tau, unitary=yy) 26 | for i in range(nqubit - 1): ### U_xx 27 | c.exp1(i, i + 1, theta=tau, unitary=xx) 28 | TSUstate = c.state() ### return state U(τ)|ψ_i> 29 | z0 = c.expectation_ps(z=[0]) 30 | return TSUstate, z0 31 | 32 | 33 | TSU_vmap = tc.backend.jit( 34 | tc.backend.vmap( 35 | Trotter_step_unitary, 36 | vectorized_argnums=0, 37 | ) 38 | ) 39 | 40 | ninput = 2 41 | input_state = np.zeros((ninput, 2**nqubit)) 42 | input_state[0, 0] = 1.0 43 | input_state[1, -1] = 1.0 44 | 45 | for _ in range(int(t / tau)): 46 | input_state, z0 = TSU_vmap(input_state, tau, nqubit) 47 | print("z: ", z0) 48 | -------------------------------------------------------------------------------- /examples/universal_lr.py: -------------------------------------------------------------------------------- 1 | """ 2 | Backend agnostic linear regression with gradient descent optimization: 3 | a demonstration on most of core features and paradigm of tensorcircuit 4 | """ 5 | 6 | # this script shows how backend agnostic magic works, no code change is required to switch backend 7 | # we also include jit, vmap and AD features in this pure classical example 8 | # this demonstrates that tensorcircuit can serve as a solid unified ML library without any "quantumness" 9 | 10 | import sys 11 | 12 | sys.path.insert(0, "../") 13 | import numpy as np 14 | import tensorcircuit as tc 15 | 16 | # (x, y) data preparation 17 | 18 | nsamples = 100 19 | k0 = 1.0 20 | b0 = 0.0 21 | 22 | xs0 = np.random.uniform(low=-1, high=1, size=[nsamples]) 23 | ys0 = k0 * xs0 + b0 + np.random.normal(scale=0.1, size=[nsamples]) 24 | 25 | 26 | def lr(xs, ys): 27 | """ 28 | fully ML backend agnostic linear regression implementation 29 | """ 30 | 31 | # construct the loss 32 | def loss_pointwise(x, y, param): 33 | k, b = param["k"], param["b"] 34 | yp = k * x + b 35 | return (yp - y) ** 2 36 | 37 | # we suppose this loss function only works for scalar, so that we can show the usage of ``vmap`` 38 | 39 | loss_vmap = tc.backend.vmap(loss_pointwise, vectorized_argnums=(0, 1)) 40 | 41 | # now we define the total loss for all data 42 | 43 | def loss(xs, ys, param): 44 | losses = loss_vmap(xs, ys, param) 45 | return tc.backend.sum(losses) 46 | 47 | # we get the jitted function to evaluate loss and its derivatives wrt. param 48 | 49 | loss_and_grad = tc.backend.jit(tc.backend.value_and_grad(loss, argnums=2)) 50 | 51 | # setup initial values and optimizers 52 | 53 | weight = {"k": tc.backend.implicit_randn(), "b": tc.backend.implicit_randn()} 54 | 55 | if tc.backend.name == "tensorflow": 56 | import tensorflow as tf 57 | 58 | opt = tc.backend.optimizer(tf.keras.optimizers.Adam(1e-2)) 59 | elif tc.backend.name == "jax": 60 | import optax 61 | 62 | opt = tc.backend.optimizer(optax.adam(1e-2)) 63 | else: 64 | raise ValueError("Unsupported backend") 65 | 66 | # gradient descent optimization loop 67 | maxstep = 500 68 | for i in range(maxstep): 69 | loss, grad = loss_and_grad(xs, ys, weight) 70 | weight = opt.update(grad, weight) 71 | if i % 100 == 0 or i == maxstep - 1: 72 | print("optimized MSE loss after %s round: " % i, tc.backend.numpy(loss)) 73 | 74 | return tc.backend.numpy(weight["k"]), tc.backend.numpy(weight["b"]) 75 | 76 | 77 | if __name__ == "__main__": 78 | for n in ["tensorflow", "jax"]: 79 | with tc.runtime_backend(n): # runtime backend switch with context manager 80 | print("~~~~~~~~ using %s backend ~~~~~~~~" % n) 81 | xs_tensor, ys_tensor = tc.array_to_tensor(xs0, ys0, dtype="float32") 82 | print("predicted coefficient", lr(xs_tensor, ys_tensor)) 83 | -------------------------------------------------------------------------------- /examples/vmap_randomness.py: -------------------------------------------------------------------------------- 1 | """ 2 | Interplay between jit, vmap, randomness and backend 3 | """ 4 | 5 | import tensorcircuit as tc 6 | 7 | K = tc.set_backend("tensorflow") 8 | n = 10 9 | batch = 100 10 | 11 | print("tensorflow backend") 12 | # has serialization issue for random generation 13 | 14 | 15 | @K.jit 16 | def f(a, key): 17 | return a + K.stateful_randn(key, [n]) 18 | 19 | 20 | vf = K.jit(K.vmap(f)) 21 | 22 | key = K.get_random_state(42) 23 | 24 | r, _, _ = tc.utils.benchmark(f, K.ones([n], dtype="float32"), key) 25 | print(r) 26 | 27 | r, _, _ = tc.utils.benchmark(vf, K.ones([batch, n], dtype="float32"), key) 28 | print(r[:2]) 29 | 30 | 31 | K = tc.set_backend("jax") 32 | 33 | print("jax backend") 34 | 35 | 36 | @K.jit 37 | def f2(a, key): 38 | return a + K.stateful_randn(key, [n]) 39 | 40 | 41 | vf2 = K.jit(K.vmap(f2, vectorized_argnums=(0, 1))) 42 | 43 | 44 | key = K.get_random_state(42) 45 | 46 | r, _, _ = tc.utils.benchmark(f2, K.ones([n], dtype="float32"), key) 47 | print(r) 48 | 49 | keys = K.stack([K.get_random_state(i) for i in range(batch)]) 50 | 51 | r, _, _ = tc.utils.benchmark(vf2, K.ones([batch, n], dtype="float32"), keys) 52 | print(r[:2]) 53 | -------------------------------------------------------------------------------- /examples/vqe2d.py: -------------------------------------------------------------------------------- 1 | """ 2 | VQE on 2D square lattice Heisenberg model with size n*m 3 | """ 4 | 5 | import tensorflow as tf 6 | import tensorcircuit as tc 7 | 8 | # import cotengra as ctg 9 | 10 | # optr = ctg.ReusableHyperOptimizer( 11 | # methods=["greedy", "kahypar"], 12 | # parallel=True, 13 | # minimize="flops", 14 | # max_time=120, 15 | # max_repeats=4096, 16 | # progbar=True, 17 | # ) 18 | # tc.set_contractor("custom", optimizer=optr, preprocessing=True) 19 | 20 | tc.set_dtype("complex64") 21 | tc.set_backend("tensorflow") 22 | 23 | 24 | n, m, nlayers = 3, 2, 2 25 | coord = tc.templates.graphs.Grid2DCoord(n, m) 26 | 27 | 28 | def singlet_init(circuit): # assert n % 2 == 0 29 | nq = circuit._nqubits 30 | for i in range(0, nq - 1, 2): 31 | j = (i + 1) % nq 32 | circuit.X(i) 33 | circuit.H(i) 34 | circuit.cnot(i, j) 35 | circuit.X(j) 36 | return circuit 37 | 38 | 39 | def vqe_forward(param): 40 | paramc = tc.backend.cast(param, dtype="complex64") 41 | c = tc.Circuit(n * m) 42 | c = singlet_init(c) 43 | for i in range(nlayers): 44 | c = tc.templates.blocks.Grid2D_entangling( 45 | c, coord, tc.gates._swap_matrix, paramc[i] 46 | ) 47 | loss = tc.templates.measurements.heisenberg_measurements(c, coord.lattice_graph()) 48 | return loss 49 | 50 | 51 | vgf = tc.backend.jit( 52 | tc.backend.value_and_grad(vqe_forward), 53 | ) 54 | param = tc.backend.implicit_randn(stddev=0.1, shape=[nlayers, 2 * n * m]) 55 | 56 | 57 | if __name__ == "__main__": 58 | lr = tf.keras.optimizers.schedules.ExponentialDecay(0.01, 100, 0.9) 59 | opt = tc.backend.optimizer(tf.keras.optimizers.Adam(lr)) 60 | for j in range(1000): 61 | loss, gr = vgf(param) 62 | param = opt.update(gr, param) 63 | if j % 50 == 0: 64 | print("loss", loss.numpy()) 65 | -------------------------------------------------------------------------------- /examples/vqe_parallel_pmap.py: -------------------------------------------------------------------------------- 1 | """ 2 | jax pmap paradigm for vqe on multiple gpus 3 | """ 4 | 5 | import os 6 | 7 | os.environ["XLA_FLAGS"] = "--xla_force_host_platform_device_count=8" 8 | from functools import partial 9 | import jax 10 | import optax 11 | import tensorcircuit as tc 12 | 13 | K = tc.set_backend("jax") 14 | tc.set_contractor("cotengra") 15 | 16 | 17 | def vqef(param, measure, n, nlayers): 18 | c = tc.Circuit(n) 19 | c.h(range(n)) 20 | for i in range(nlayers): 21 | c.rzz(range(n - 1), range(1, n), theta=param[i, 0]) 22 | c.rx(range(n), theta=param[i, 1]) 23 | return K.real( 24 | tc.templates.measurements.parameterized_measurements(c, measure, onehot=True) 25 | ) 26 | 27 | 28 | def get_tfim_ps(n): 29 | tfim_ps = [] 30 | for i in range(n): 31 | tfim_ps.append(tc.quantum.xyz2ps({"x": [i]}, n=n)) 32 | for i in range(n): 33 | tfim_ps.append(tc.quantum.xyz2ps({"z": [i, (i + 1) % n]}, n=n)) 34 | return K.convert_to_tensor(tfim_ps) 35 | 36 | 37 | vqg_vgf = jax.vmap(K.value_and_grad(vqef), in_axes=(None, 0, None, None)) 38 | 39 | 40 | @partial( 41 | jax.pmap, 42 | axis_name="pmap", 43 | in_axes=(0, 0, None, None), 44 | static_broadcasted_argnums=(2, 3), 45 | ) 46 | def update(param, measure, n, nlayers): 47 | # Compute the gradients on the given minibatch (individually on each device). 48 | loss, grads = vqg_vgf(param, measure, n, nlayers) 49 | grads = K.sum(grads, axis=0) 50 | grads = jax.lax.psum(grads, axis_name="pmap") 51 | loss = K.sum(loss, axis=0) 52 | loss = jax.lax.psum(loss, axis_name="pmap") 53 | param = opt.update(grads, param) 54 | return param, loss 55 | 56 | 57 | if __name__ == "__main__": 58 | n = 8 59 | nlayers = 4 60 | ndevices = 8 61 | m = get_tfim_ps(n) 62 | m = K.reshape(m, [ndevices, m.shape[0] // ndevices] + list(m.shape[1:])) 63 | param = K.stateful_randn(jax.random.PRNGKey(43), shape=[nlayers, 2, n], stddev=0.1) 64 | param = K.stack([param] * ndevices) 65 | opt = K.optimizer(optax.adam(1e-2)) 66 | for _ in range(100): 67 | param, loss = update(param, m, n, nlayers) 68 | print(loss[0]) 69 | -------------------------------------------------------------------------------- /examples/vqnhe_h6.py: -------------------------------------------------------------------------------- 1 | """ 2 | H6 molecule VQNHE with code from tc.application 3 | """ 4 | 5 | import sys 6 | 7 | sys.path.insert(0, "../") 8 | import numpy as np 9 | import tensorcircuit as tc 10 | from tensorcircuit.applications.vqes import VQNHE, JointSchedule, construct_matrix_v3 11 | 12 | tc.set_backend("tensorflow") 13 | tc.set_dtype("complex128") 14 | 15 | h6h = np.load("./h6_hamiltonian.npy") # reported in 0.99 A 16 | hamiltonian = construct_matrix_v3(h6h.tolist()) 17 | 18 | 19 | vqeinstance = VQNHE( 20 | 10, 21 | hamiltonian, 22 | {"width": 16, "stddev": 0.001, "choose": "complex-rbm"}, # model parameter 23 | {"filled_qubit": [0, 1, 3, 4, 5, 6, 8, 9], "epochs": 2}, # circuit parameter 24 | shortcut=True, # enable shortcut for full Hamiltonian matrix evaluation 25 | ) 26 | # 1110011100 27 | 28 | rs = vqeinstance.multi_training( 29 | tries=2, # 10 30 | maxiter=500, # 10000 31 | threshold=0.5e-8, 32 | optq=JointSchedule(200, 0.01, 800, 0.002, 800), 33 | optc=JointSchedule(200, 0.0006, 10000, 0.008, 5000), 34 | onlyq=0, 35 | debug=200, 36 | checkpoints=[(900, -3.18), (2600, -3.19), (4500, -3.2)], 37 | ) 38 | print(rs) 39 | -------------------------------------------------------------------------------- /mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | python_version = 3.8 3 | ignore_missing_imports = True 4 | strict = True 5 | warn_unused_ignores = False 6 | disallow_untyped_calls = False 7 | local_partial_types = False 8 | implicit_reexport = True 9 | 10 | [mypy-tensorcircuit.backends.pytorch_ops] 11 | ;;mypy simply cannot ignore files with wildcard patterns... 12 | ;;only module level * works... 13 | ignore_errors = True 14 | 15 | 16 | [mypy-cirq.*] 17 | ignore_errors = True 18 | 19 | ;; doesn't work due to https://github.com/python/mypy/issues/10757 20 | ;; mypy + numpy is currently a disaster, never use mypy in your next project 21 | ;; unless you enjoy writting sth worse than C 22 | ;; both the establish status of mypy and support from other packages are just wasting your time 23 | ;; GET AWAY MYPY AND TYPE ANNOTATION !!! WRITTING PYTHON AS IT IS !!! 24 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | filterwarnings = 3 | ignore::DeprecationWarning 4 | ignore:Explicitly requested dtype*:UserWarning -------------------------------------------------------------------------------- /requirements/requirements-dev.txt: -------------------------------------------------------------------------------- 1 | mypy==1.5.1 2 | pytest==6.2.4 3 | pytest-cov 4 | pytest-benchmark 5 | pytest-xdist 6 | black[jupyter] 7 | sphinx>=4.0 8 | pytest-lazy-fixture 9 | pylint==2.17.5 10 | furo 11 | sphinx-copybutton 12 | nbsphinx 13 | myst-parser 14 | sphinx-design -------------------------------------------------------------------------------- /requirements/requirements-docker-v2.txt: -------------------------------------------------------------------------------- 1 | torch>2.0 # 2.0.1 2 | jax[cuda11_pip]==0.4.7 3 | tensorflow==2.11 4 | # tf 2.12 can make torch 2 hangs with runtime error 5 | # check with hybrid_gpu_pipeline.py example 6 | cupy-cuda11x==12.0.0 7 | tensornetwork==0.4.6 8 | graphviz 9 | numpy==1.23.5 10 | scipy==1.10.1 11 | sympy==1.12 12 | cirq==1.1.0 13 | qiskit 14 | matplotlib 15 | jupyter 16 | cotengra 17 | networkx 18 | optax==0.1.5 19 | kahypar 20 | optuna 21 | baytune 22 | nevergrad 23 | scikit-learn==1.2.2 24 | scikit-optimize 25 | openfermion 26 | quimb 27 | openfermionpyscf 28 | pennylane==0.30.0 29 | mthree==1.1.0 30 | mitiq==0.26.0 31 | # below is for development 32 | mypy==1.3.0 33 | pytest 34 | pytest-cov 35 | pytest-benchmark 36 | pytest-xdist 37 | pytest-lazy-fixture 38 | black==23.3.0 39 | sphinx>=4.0 40 | sphinx-intl 41 | sphinx-copybutton 42 | nbsphinx 43 | furo 44 | myst-parser 45 | pylint 46 | sphinx-design 47 | # made in 202306 48 | -------------------------------------------------------------------------------- /requirements/requirements-docker.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | scipy 3 | cirq 4 | tensorflow==2.7 5 | tensornetwork==0.4.6 6 | graphviz 7 | jax==0.3.4 8 | # jaxlib indepedent specify the version in Dockerfile 9 | torch 10 | torchvision 11 | networkx 12 | optax 13 | kahypar 14 | optuna 15 | baytune 16 | nevergrad 17 | scikit-optimize 18 | matplotlib 19 | jupyter 20 | qiskit 21 | openfermion 22 | quimb 23 | openfermionpyscf 24 | pennylane 25 | # tensorflow_quantum==0.6.1 26 | mthree 27 | mitiq 28 | # below is for development 29 | mypy==1.2.0 30 | pytest 31 | pytest-cov 32 | pytest-benchmark 33 | pytest-xdist 34 | pytest-lazy-fixture 35 | black==23.3.0 36 | sphinx>=4.0 37 | sphinx-intl 38 | sphinx-copybutton 39 | nbsphinx 40 | furo 41 | myst-parser 42 | pylint 43 | -------------------------------------------------------------------------------- /requirements/requirements-extra.txt: -------------------------------------------------------------------------------- 1 | # extra dependencies for ci 2 | qiskit<1.0 3 | qiskit-aer<1.0 4 | qiskit-nature 5 | mitiq 6 | cirq 7 | torch==2.2.2 8 | # jupyter 9 | mthree==1.1.0 10 | openfermion 11 | -------------------------------------------------------------------------------- /requirements/requirements-rtd.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | scipy 3 | cirq 4 | tensorflow 5 | tensornetwork 6 | graphviz 7 | networkx 8 | jax 9 | optax 10 | torch 11 | sphinx==4.3.2 12 | ipykernel 13 | furo==2022.4.7 14 | sphinx-copybutton 15 | nbsphinx 16 | myst-parser 17 | urllib3==1.26.15 18 | sphinx-design -------------------------------------------------------------------------------- /requirements/requirements-types.txt: -------------------------------------------------------------------------------- 1 | types-urllib3 2 | types-typed-ast 3 | types-toml 4 | types-setuptools 5 | types-requests 6 | types-pytz 7 | types-protobuf 8 | types-docutils -------------------------------------------------------------------------------- /requirements/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | scipy 3 | tensorflow<2.16 # tf 2.16 with integration of keras 3 seems a disaster... 4 | tensornetwork-ng 5 | graphviz 6 | jax 7 | jaxlib 8 | networkx 9 | optax -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | from tensorcircuit import __version__, __author__ 4 | 5 | with open("README.md", "r", encoding="utf-8") as fh: 6 | long_description = fh.read() 7 | 8 | 9 | setuptools.setup( 10 | name="tensorcircuit", 11 | version=__version__, 12 | author=__author__, 13 | author_email="shixinzhang@tencent.com", 14 | description="High performance unified quantum computing framework for the NISQ era", 15 | long_description=long_description, 16 | long_description_content_type="text/markdown", 17 | url="https://github.com/tencent-quantum-lab/tensorcircuit", 18 | packages=setuptools.find_packages(), 19 | include_package_data=True, 20 | install_requires=["numpy", "scipy", "tensornetwork-ng", "networkx"], 21 | extras_require={ 22 | "tensorflow": ["tensorflow<2.16"], 23 | "jax": ["jax", "jaxlib"], 24 | "torch": ["torch"], 25 | "qiskit": ["qiskit<1.0"], 26 | "cloud": ["qiskit<1.0", "mthree"], 27 | }, 28 | classifiers=[ 29 | "Programming Language :: Python :: 3", 30 | "Operating System :: OS Independent", 31 | ], 32 | ) 33 | -------------------------------------------------------------------------------- /tensorcircuit/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.12.0" 2 | __author__ = "TensorCircuit Authors" 3 | __creator__ = "refraction-ray" 4 | 5 | from .utils import gpu_memory_share 6 | 7 | gpu_memory_share() 8 | 9 | from .about import about 10 | from .cons import ( 11 | backend, 12 | set_backend, 13 | set_dtype, 14 | set_contractor, 15 | get_backend, 16 | get_dtype, 17 | get_contractor, 18 | set_function_backend, 19 | set_function_dtype, 20 | set_function_contractor, 21 | runtime_backend, 22 | runtime_dtype, 23 | runtime_contractor, 24 | ) # prerun of set hooks 25 | from . import gates 26 | from . import basecircuit 27 | from .gates import Gate 28 | from .circuit import Circuit, expectation 29 | from .mpscircuit import MPSCircuit 30 | from .densitymatrix import DMCircuit as DMCircuit_reference 31 | from .densitymatrix import DMCircuit2 32 | 33 | DMCircuit = DMCircuit2 # compatibility issue to still expose DMCircuit2 34 | from .gates import num_to_tensor, array_to_tensor 35 | from .vis import qir2tex, render_pdf 36 | from . import interfaces 37 | from . import templates 38 | from . import results 39 | from . import quantum 40 | from .quantum import QuOperator, QuVector, QuAdjointVector, QuScalar 41 | from . import compiler 42 | from . import cloud 43 | from . import fgs 44 | from .fgs import FGSSimulator 45 | 46 | try: 47 | from . import keras 48 | from .keras import KerasLayer, KerasHardwareLayer 49 | except ModuleNotFoundError: 50 | pass # in case tf is not installed 51 | 52 | try: 53 | from . import torchnn 54 | from .torchnn import TorchLayer, TorchHardwareLayer 55 | except ModuleNotFoundError: 56 | pass # in case torch is not installed 57 | 58 | try: 59 | import qiskit 60 | 61 | qiskit.QuantumCircuit.cnot = qiskit.QuantumCircuit.cx 62 | qiskit.QuantumCircuit.toffoli = qiskit.QuantumCircuit.ccx 63 | qiskit.QuantumCircuit.fredkin = qiskit.QuantumCircuit.cswap 64 | 65 | # amazing qiskit 1.0 nonsense... 66 | except ModuleNotFoundError: 67 | pass 68 | 69 | # just for fun 70 | from .asciiart import set_ascii 71 | -------------------------------------------------------------------------------- /tensorcircuit/applications/README.md: -------------------------------------------------------------------------------- 1 | Code implementation in this submodule `applications` are not very well maintained and extensively tested. There are indeed many interesting pieces, but try on your own risk. 2 | 3 | 4 | ## Differentiable Quantum Architecture Search 5 | 6 | In `applications`, framework and relevant applications of DQAS are implemented, mainly in `vags.py` and `dqas.py`. 7 | 8 | The entrypoint of DQAS framework is `DQAS_search` and `DQAS_search_pmb` for advanced probabilistic model based DQAS. 9 | One can also check [examples](/examples) for integrated example demos. 10 | 11 | ## Variational Quantum-Neural Hybrid Eigensolver 12 | 13 | The main library functions and utilities are in `vqes.py`. Also see [examples](/examples) including `vqnhe_h6.py` and `adiabatic_vqnhe.py`. 14 | -------------------------------------------------------------------------------- /tensorcircuit/applications/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | The application codebase is related to research and previous version of tensorcircuit, 3 | the code inside is subject to change, be caution to use. 4 | Most of the useful code is and will be refactored and copied to other parts of tensorcircuit. 5 | """ 6 | -------------------------------------------------------------------------------- /tensorcircuit/applications/ai/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/tensorcircuit/applications/ai/__init__.py -------------------------------------------------------------------------------- /tensorcircuit/applications/finance/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/tensorcircuit/applications/finance/__init__.py -------------------------------------------------------------------------------- /tensorcircuit/applications/physics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/tensorcircuit/applications/physics/__init__.py -------------------------------------------------------------------------------- /tensorcircuit/applications/physics/baseline.py: -------------------------------------------------------------------------------- 1 | """ 2 | baseline calculators for physical systems 3 | """ 4 | 5 | import numpy as np 6 | 7 | 8 | def TFIM1Denergy( 9 | L: int, Jzz: float = 1.0, Jx: float = 1.0, Pauli: bool = True 10 | ) -> float: 11 | # PBC 12 | # nice tutorial: https://arxiv.org/pdf/2009.09208.pdf 13 | # further investigation on 1 TFIM solution structure is required 14 | # will fail on AFM phase Jzz>Jx and Jzz>0 and odd sites (frustration in boundary) 15 | e = 0 16 | if Pauli: 17 | Jx *= 2 18 | Jzz *= 4 19 | for i in range(L): 20 | q = np.pi * (2 * i - (1 + (-1) ** L) / 2) / L 21 | e -= np.abs(Jx) / 2 * np.sqrt(1 + Jzz**2 / 4 / Jx**2 - Jzz / Jx * np.cos(q)) 22 | return e 23 | 24 | 25 | def Heisenberg1Denergy(L: int, Pauli: bool = True, maxiters: int = 1000) -> float: 26 | # PBC 27 | error = 1e-15 28 | eps = 1e-20 # avoid zero division 29 | phi = np.zeros([L // 2, L // 2]) 30 | phi2 = np.zeros([L // 2, L // 2]) 31 | lamb = np.array([2 * i + 1 for i in range(L // 2)]) 32 | for _ in range(maxiters): 33 | k = 1 / L * (2 * np.pi * lamb + np.sum(phi, axis=-1) - np.diag(phi)) 34 | for i in range(L // 2): 35 | for j in range(L // 2): 36 | phi2[i, j] = ( 37 | np.arctan( 38 | 2 39 | / ( 40 | 1 / (np.tan(k[i] / 2) + eps) 41 | - 1 / (np.tan(k[j] / 2) + eps) 42 | + eps 43 | ) 44 | ) 45 | * 2 46 | ) 47 | if np.allclose(phi, phi2, rtol=error): # converged 48 | break 49 | phi = phi2.copy() 50 | else: 51 | raise ValueError( 52 | "the maxiters %s is too small for bethe ansatz to converge" % maxiters 53 | ) 54 | e = -np.sum(1 - np.cos(k)) + L / 4 55 | if Pauli is True: 56 | e *= 4 57 | return e # type: ignore 58 | -------------------------------------------------------------------------------- /tensorcircuit/backends/__init__.py: -------------------------------------------------------------------------------- 1 | # prerun to hack base abstract backend 2 | from .backend_factory import get_backend 3 | -------------------------------------------------------------------------------- /tensorcircuit/backends/backend_factory.py: -------------------------------------------------------------------------------- 1 | """ 2 | Backend register 3 | """ 4 | 5 | from typing import Any, Dict, Text, Union 6 | 7 | import tensornetwork as tn 8 | 9 | try: # old version tn compatiblity 10 | from tensornetwork.backends import base_backend 11 | 12 | tnbackend = base_backend.BaseBackend 13 | 14 | except ImportError: 15 | from tensornetwork.backends import abstract_backend 16 | 17 | tnbackend = abstract_backend.AbstractBackend 18 | 19 | from .numpy_backend import NumpyBackend 20 | from .jax_backend import JaxBackend 21 | from .tensorflow_backend import TensorFlowBackend 22 | from .pytorch_backend import PyTorchBackend 23 | from .cupy_backend import CuPyBackend 24 | 25 | bk = Any # tnbackend 26 | 27 | _BACKENDS = { 28 | "numpy": NumpyBackend, 29 | "jax": JaxBackend, 30 | "tensorflow": TensorFlowBackend, 31 | "pytorch": PyTorchBackend, # no intention to fully maintain this one 32 | "cupy": CuPyBackend, # no intention to fully maintain this one 33 | } 34 | 35 | tn.backends.backend_factory._BACKENDS["cupy"] = CuPyBackend 36 | 37 | _INSTANTIATED_BACKENDS: Dict[str, bk] = dict() 38 | 39 | 40 | def get_backend(backend: Union[Text, bk]) -> bk: 41 | """ 42 | Get the `tc.backend` object. 43 | 44 | :param backend: "numpy", "tensorflow", "jax", "pytorch" 45 | :type backend: Union[Text, tnbackend] 46 | :raises ValueError: Backend doesn't exist for `backend` argument. 47 | :return: The `tc.backend` object that with all registered universal functions. 48 | :rtype: backend object 49 | """ 50 | if isinstance(backend, tnbackend): 51 | return backend 52 | backend = backend.lower() 53 | if backend not in _BACKENDS: 54 | raise ValueError("Backend '{}' does not exist".format(backend)) 55 | if backend in _INSTANTIATED_BACKENDS: 56 | return _INSTANTIATED_BACKENDS[backend] 57 | _INSTANTIATED_BACKENDS[backend] = _BACKENDS[backend]() 58 | 59 | return _INSTANTIATED_BACKENDS[backend] 60 | -------------------------------------------------------------------------------- /tensorcircuit/cloud/__init__.py: -------------------------------------------------------------------------------- 1 | from . import apis 2 | from . import abstraction 3 | from . import wrapper 4 | from .wrapper import batch_expectation_ps 5 | from .apis import submit_task 6 | -------------------------------------------------------------------------------- /tensorcircuit/cloud/config.py: -------------------------------------------------------------------------------- 1 | tencent_base_url = "https://quantum.tencent.com/cloud/quk/" 2 | -------------------------------------------------------------------------------- /tensorcircuit/cloud/local.py: -------------------------------------------------------------------------------- 1 | """ 2 | Cloud provider from local machine 3 | """ 4 | 5 | from typing import Any, Dict, List, Optional, Union, Sequence 6 | from uuid import uuid4 7 | import time 8 | 9 | from .abstraction import Device, sep, Task 10 | from ..utils import is_sequence 11 | from ..abstractcircuit import AbstractCircuit 12 | 13 | local_devices = ["testing"] 14 | 15 | task_list: Dict[str, Any] = {} # memory only task cache 16 | 17 | 18 | def list_devices(token: Optional[str] = None, **kws: Any) -> List[Device]: 19 | rs = [] 20 | for d in local_devices: 21 | rs.append(Device.from_name("local" + sep + d)) 22 | return rs 23 | 24 | 25 | def get_task_details( 26 | task: Task, device: Device, token: str, prettify: bool 27 | ) -> Dict[str, Any]: 28 | if task.id_ in task_list: 29 | return task_list[task.id_] # type: ignore 30 | raise ValueError("no task with id: %s" % task.id_) 31 | 32 | 33 | def submit_task( 34 | device: Device, 35 | token: str, 36 | shots: Union[int, Sequence[int]] = 1024, 37 | version: str = "1", 38 | circuit: Optional[Union[AbstractCircuit, Sequence[AbstractCircuit]]] = None, 39 | **kws: Any 40 | ) -> List[Task]: 41 | def _circuit2result(c: AbstractCircuit) -> Dict[str, Any]: 42 | if device.name in ["testing", "default"]: 43 | count = c.sample(batch=shots, allow_state=True, format="count_dict_bin") # type: ignore 44 | else: 45 | raise ValueError("Unsupported device from local provider: %s" % device.name) 46 | d = { 47 | "id": str(uuid4()), 48 | "state": "completed", 49 | "at": time.time() * 1e6, 50 | "shots": shots, 51 | "device": device.name, 52 | "results": count, 53 | } 54 | return d 55 | 56 | if is_sequence(circuit): 57 | tl = [] 58 | for c in circuit: # type: ignore 59 | d = _circuit2result(c) 60 | task_list[d["id"]] = d 61 | tl.append(Task(id_=d["id"], device=device)) 62 | return tl 63 | else: 64 | d = _circuit2result(circuit) # type: ignore 65 | task_list[d["id"]] = d 66 | 67 | return Task(id_=d["id"], device=device) # type: ignore 68 | 69 | 70 | def list_tasks(device: Device, token: str, **filter_kws: Any) -> List[Task]: 71 | r = [] 72 | for t, v in task_list.items(): 73 | if (device is not None and v["device"] == device.name) or device is None: 74 | r.append(Task(id_=t, device=Device.from_name("local" + sep + v["device"]))) 75 | return r 76 | -------------------------------------------------------------------------------- /tensorcircuit/cloud/quafu_provider.py: -------------------------------------------------------------------------------- 1 | """ 2 | Cloud provider from QuaFu: http://quafu.baqis.ac.cn/ 3 | """ 4 | 5 | from typing import Any, Dict, List, Optional, Sequence, Union 6 | import logging 7 | 8 | from quafu import User, QuantumCircuit 9 | from quafu import Task as Task_ 10 | 11 | from .abstraction import Device, sep, Task 12 | from ..abstractcircuit import AbstractCircuit 13 | from ..utils import is_sequence 14 | 15 | logger = logging.getLogger(__name__) 16 | 17 | 18 | def list_devices(token: Optional[str] = None, **kws: Any) -> List[Device]: 19 | raise NotImplementedError 20 | 21 | 22 | def list_properties(device: Device, token: Optional[str] = None) -> Dict[str, Any]: 23 | raise NotImplementedError 24 | 25 | 26 | def submit_task( 27 | device: Device, 28 | token: str, 29 | shots: Union[int, Sequence[int]] = 1024, 30 | circuit: Optional[Union[AbstractCircuit, Sequence[AbstractCircuit]]] = None, 31 | source: Optional[Union[str, Sequence[str]]] = None, 32 | compile: bool = True, 33 | **kws: Any 34 | ) -> Task: 35 | if source is None: 36 | 37 | def c2qasm(c: Any) -> str: 38 | from qiskit.circuit import QuantumCircuit 39 | 40 | if isinstance(c, QuantumCircuit): 41 | s = c.qasm() 42 | # nq = c.num_qubits 43 | else: 44 | s = c.to_openqasm() 45 | return s # type: ignore 46 | 47 | if not is_sequence(circuit): 48 | source = c2qasm(circuit) 49 | else: 50 | source = [c2qasm(c) for c in circuit] # type: ignore 51 | user = User() 52 | user.save_apitoken(token) 53 | 54 | def c2task(source: str) -> Task: 55 | nq = int(source.split("\n")[2].split("[")[1].split("]")[0]) # type: ignore 56 | qc = QuantumCircuit(nq) 57 | qc.from_openqasm(source) 58 | task = Task_() 59 | device_name = device.name.split(sep)[-1] 60 | task.config(backend=device_name, shots=shots, compile=compile) 61 | res = task.send(qc, wait=False) 62 | wrapper = Task(res.taskid, device=device) 63 | return wrapper 64 | 65 | if not is_sequence(source): 66 | return c2task(source) # type: ignore 67 | else: 68 | return [c2task(s) for s in source] # type: ignore 69 | 70 | 71 | def resubmit_task(task: Task, token: str) -> Task: 72 | raise NotImplementedError 73 | 74 | 75 | def remove_task(task: Task, token: str) -> Any: 76 | raise NotImplementedError 77 | 78 | 79 | def list_tasks(device: Device, token: str, **filter_kws: Any) -> List[Task]: 80 | raise NotImplementedError 81 | 82 | 83 | def get_task_details( 84 | task: Task, device: Device, token: str, prettify: bool 85 | ) -> Dict[str, Any]: 86 | # id results 87 | r = {} 88 | r["id"] = task.id_ 89 | t = Task_() 90 | r["results"] = dict(t.retrieve(task.id_).counts) # type: ignore 91 | if r["results"]: 92 | r["state"] = "completed" 93 | else: 94 | r["state"] = "pending" 95 | return r 96 | -------------------------------------------------------------------------------- /tensorcircuit/compiler/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Experimental module, no software agnostic unified interface for now, 3 | only reserve for internal use 4 | """ 5 | 6 | from .composed_compiler import Compiler, DefaultCompiler, default_compile 7 | from . import simple_compiler 8 | from . import qiskit_compiler 9 | -------------------------------------------------------------------------------- /tensorcircuit/interfaces/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Interfaces bridging different backends 3 | """ 4 | 5 | from . import tensortrans 6 | from .tensortrans import ( 7 | which_backend, 8 | numpy_args_to_backend, 9 | general_args_to_numpy, 10 | general_args_to_backend, 11 | args_to_tensor, 12 | ) 13 | from .numpy import numpy_interface, np_interface 14 | from .scipy import scipy_interface, scipy_optimize_interface 15 | from .torch import torch_interface, pytorch_interface, torch_interface_kws 16 | from .tensorflow import tensorflow_interface, tf_interface 17 | 18 | 19 | # TODO(@refraction-ray): jax interface using puer_callback and custom_vjp 20 | -------------------------------------------------------------------------------- /tensorcircuit/interfaces/numpy.py: -------------------------------------------------------------------------------- 1 | """ 2 | Interface wraps quantum function as a numpy function 3 | """ 4 | 5 | from typing import Any, Callable 6 | from functools import wraps 7 | 8 | from ..cons import backend 9 | from .tensortrans import general_args_to_numpy, numpy_args_to_backend 10 | 11 | Tensor = Any 12 | 13 | 14 | def numpy_interface( 15 | fun: Callable[..., Any], 16 | jit: bool = True, 17 | ) -> Callable[..., Any]: 18 | """ 19 | Convert ``fun`` on ML backend into a numpy function 20 | 21 | :Example: 22 | 23 | .. code-block:: python 24 | 25 | K = tc.set_backend("tensorflow") 26 | 27 | def f(params, n): 28 | c = tc.Circuit(n) 29 | for i in range(n): 30 | c.rx(i, theta=params[i]) 31 | for i in range(n-1): 32 | c.cnot(i, i+1) 33 | r = K.real(c.expectation_ps(z=[n-1])) 34 | return r 35 | 36 | n = 3 37 | f_np = tc.interfaces.numpy_interface(f, jit=True) 38 | f_np(np.ones([n]), n) # 0.1577285 39 | 40 | 41 | :param fun: The quantum function 42 | :type fun: Callable[..., Any] 43 | :param jit: whether to jit ``fun``, defaults to True 44 | :type jit: bool, optional 45 | :return: The numpy interface compatible version of ``fun`` 46 | :rtype: Callable[..., Any] 47 | """ 48 | if jit: 49 | fun = backend.jit(fun) 50 | 51 | @wraps(fun) 52 | def numpy_fun(*args: Any, **kws: Any) -> Any: 53 | backend_args = numpy_args_to_backend(args) 54 | r = fun(*backend_args, **kws) 55 | np_r = general_args_to_numpy(r) 56 | return np_r 57 | 58 | return numpy_fun 59 | 60 | 61 | np_interface = numpy_interface 62 | -------------------------------------------------------------------------------- /tensorcircuit/results/__init__.py: -------------------------------------------------------------------------------- 1 | from . import counts 2 | from . import readout_mitigation 3 | 4 | rem = readout_mitigation # alias 5 | -------------------------------------------------------------------------------- /tensorcircuit/results/qem/__init__.py: -------------------------------------------------------------------------------- 1 | from . import qem_methods 2 | 3 | apply_zne = qem_methods.apply_zne 4 | zne_option = qem_methods.zne_option # type: ignore 5 | 6 | apply_dd = qem_methods.apply_dd 7 | dd_option = qem_methods.dd_option # type: ignore 8 | used_qubits = qem_methods.used_qubits 9 | prune_ddcircuit = qem_methods.prune_ddcircuit 10 | add_dd = qem_methods.add_dd 11 | 12 | apply_rc = qem_methods.apply_rc 13 | rc_circuit = qem_methods.rc_circuit 14 | rc_candidates = qem_methods.rc_candidates 15 | -------------------------------------------------------------------------------- /tensorcircuit/templates/__init__.py: -------------------------------------------------------------------------------- 1 | from . import ansatz 2 | from . import blocks 3 | from . import chems 4 | from . import dataset 5 | from . import graphs 6 | from . import measurements 7 | from . import conversions 8 | 9 | costfunctions = measurements 10 | -------------------------------------------------------------------------------- /tensorcircuit/templates/chems.py: -------------------------------------------------------------------------------- 1 | """ 2 | Useful utilities for quantum chemistry related task 3 | """ 4 | 5 | from .conversions import get_ps # pylint:disable=unused-import 6 | 7 | # backward compatibility for the entry point 8 | -------------------------------------------------------------------------------- /tensorcircuit/templates/dataset.py: -------------------------------------------------------------------------------- 1 | """ 2 | Quantum machine learning related data preprocessing and embedding 3 | """ 4 | 5 | from typing import Any, Optional, Sequence, Tuple 6 | 7 | import numpy as np 8 | 9 | from ..cons import backend, dtypestr 10 | from ..gates import array_to_tensor 11 | 12 | Tensor = Any 13 | 14 | 15 | def amplitude_encoding( 16 | fig: Tensor, nqubits: int, index: Optional[Sequence[int]] = None 17 | ) -> Tensor: 18 | # non-batch version 19 | # [WIP] 20 | fig = backend.reshape(fig, shape=[-1]) 21 | norm = backend.norm(fig) 22 | fig = fig / norm 23 | if backend.shape_tuple(fig)[0] < 2**nqubits: 24 | fig = backend.concat( 25 | [ 26 | fig, 27 | backend.zeros( 28 | [2**nqubits - backend.shape_tuple(fig)[0]], dtype=fig.dtype 29 | ), 30 | ], 31 | ) 32 | if index is not None: 33 | index = array_to_tensor(index, dtype="int32") 34 | fig = backend.gather1d(fig, index) 35 | fig = backend.cast(fig, dtypestr) 36 | return fig 37 | 38 | 39 | # batched_amplitude_encoding = backend.vmap(amplitude_encoding, vectorized_argnums=0) 40 | 41 | 42 | def mnist_pair_data( 43 | a: int, 44 | b: int, 45 | binarize: bool = False, 46 | threshold: float = 0.4, 47 | loader: Any = None, 48 | ) -> Tensor: 49 | def filter_pair(x: Tensor, y: Tensor, a: int, b: int) -> Tuple[Tensor, Tensor]: 50 | keep = (y == a) | (y == b) 51 | x, y = x[keep], y[keep] 52 | y = y == a 53 | return x, y 54 | 55 | if loader is None: 56 | import tensorflow as tf 57 | 58 | loader = tf.keras.datasets.mnist 59 | 60 | (x_train, y_train), (x_test, y_test) = loader.load_data() 61 | x_train, x_test = x_train[..., np.newaxis] / 255.0, x_test[..., np.newaxis] / 255.0 62 | 63 | if binarize: 64 | x_train[x_train > threshold] = 1.0 65 | x_train[x_train <= threshold] = 0.0 66 | x_test[x_test > threshold] = 1.0 67 | x_test[x_test <= threshold] = 0.0 68 | 69 | x_train, y_train = filter_pair(x_train, y_train, a, b) 70 | x_test, y_test = filter_pair(x_test, y_test, a, b) 71 | return (x_train, y_train), (x_test, y_test) 72 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tencent-quantum-lab/tensorcircuit/58835a31971b07f8a7ec566d714916386b187397/tests/__init__.py -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import pytest 4 | 5 | thisfile = os.path.abspath(__file__) 6 | modulepath = os.path.dirname(os.path.dirname(thisfile)) 7 | 8 | sys.path.insert(0, modulepath) 9 | import tensorcircuit as tc 10 | 11 | 12 | @pytest.fixture(scope="function") 13 | def npb(): 14 | tc.set_backend("numpy") 15 | yield 16 | tc.set_backend("numpy") # default backend 17 | 18 | 19 | @pytest.fixture(scope="function") 20 | def tfb(): 21 | tc.set_backend("tensorflow") 22 | yield 23 | tc.set_backend("numpy") # default backend 24 | 25 | 26 | @pytest.fixture(scope="function") 27 | def jaxb(): 28 | try: 29 | tc.set_backend("jax") 30 | yield 31 | tc.set_backend("numpy") 32 | 33 | except ImportError as e: 34 | print(e) 35 | tc.set_backend("numpy") 36 | pytest.skip("****** No jax backend found, skipping test suit *******") 37 | 38 | 39 | @pytest.fixture(scope="function") 40 | def torchb(): 41 | try: 42 | tc.set_backend("pytorch") 43 | yield 44 | tc.set_backend("numpy") 45 | except ImportError as e: 46 | print(e) 47 | tc.set_backend("numpy") 48 | pytest.skip("****** No torch backend found, skipping test suit *******") 49 | 50 | 51 | @pytest.fixture(scope="function") 52 | def cpb(): 53 | try: 54 | tc.set_backend("cupy") 55 | yield 56 | tc.set_backend("numpy") 57 | except ImportError as e: 58 | print(e) 59 | tc.set_backend("numpy") 60 | pytest.skip("****** No cupy backend found, skipping test suit *******") 61 | 62 | 63 | @pytest.fixture(scope="function") 64 | def highp(): 65 | tc.set_dtype("complex128") 66 | yield 67 | tc.set_dtype("complex64") 68 | -------------------------------------------------------------------------------- /tests/test_ensemble.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import tensorflow as tf 4 | import numpy as np 5 | import pytest 6 | 7 | thisfile = os.path.abspath(__file__) 8 | modulepath = os.path.dirname(os.path.dirname(thisfile)) 9 | 10 | sys.path.insert(0, modulepath) 11 | 12 | from tensorcircuit.applications.ai.ensemble import bagging 13 | 14 | 15 | @pytest.mark.xfail( 16 | int(tf.__version__.split(".")[1]) >= 16, reason="legacy optimizer fails tf>=2.16" 17 | ) 18 | def test_ensemble_bagging(): 19 | data_amount = 100 # Amount of data to be used 20 | linear_dimension = 4 # linear demension of the data 21 | epochs = 10 22 | batch_size = 32 23 | lr = 1e-3 24 | 25 | x_train, y_train = ( 26 | np.ones([data_amount, linear_dimension]), 27 | np.ones([data_amount, 1]), 28 | ) 29 | 30 | obj_bagging = bagging() 31 | 32 | def model(): 33 | DROP = 0.1 34 | 35 | activation = "selu" 36 | inputs = tf.keras.Input(shape=(linear_dimension,), name="digits") 37 | x0 = tf.keras.layers.Dense( 38 | 1, 39 | kernel_regularizer=tf.keras.regularizers.l2(9.613e-06), 40 | activation=activation, 41 | )(inputs) 42 | x0 = tf.keras.layers.Dropout(DROP)(x0) 43 | 44 | x = tf.keras.layers.Dense( 45 | 1, 46 | kernel_regularizer=tf.keras.regularizers.l2(1e-07), 47 | activation="sigmoid", 48 | )(x0) 49 | 50 | model = tf.keras.Model(inputs, x) 51 | 52 | return model 53 | 54 | obj_bagging.append(model(), False) 55 | obj_bagging.append(model(), False) 56 | obj_bagging.append(model(), False) 57 | obj_bagging.compile( 58 | loss=tf.keras.losses.BinaryCrossentropy(), 59 | optimizer=tf.keras.optimizers.legacy.Adam(lr), 60 | metrics=[tf.keras.metrics.AUC(), "acc"], 61 | ) 62 | obj_bagging.train( 63 | x=x_train, y=y_train, epochs=epochs, batch_size=batch_size, verbose=0 64 | ) 65 | 66 | v_weight = obj_bagging.predict(x_train, "weight") 67 | v_average = obj_bagging.predict(x_train, "average") 68 | v_most = obj_bagging.predict(x_train, "most") 69 | validation_data = [] 70 | validation_data.append(obj_bagging.eval([y_train, v_weight], "acc")) 71 | validation_data.append(obj_bagging.eval([y_train, v_average], "auc")) 72 | validation_data.append(obj_bagging.eval([y_train, v_most], "acc")) 73 | -------------------------------------------------------------------------------- /tests/test_qaoa.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | 4 | thisfile = os.path.abspath(__file__) 5 | modulepath = os.path.dirname(os.path.dirname(thisfile)) 6 | 7 | sys.path.insert(0, modulepath) 8 | 9 | import numpy as np 10 | import pytest 11 | 12 | from tensorcircuit.applications.dqas import set_op_pool 13 | from tensorcircuit.applications.graphdata import get_graph 14 | from tensorcircuit.applications.layers import Hlayer, rxlayer, zzlayer 15 | from tensorcircuit.applications.vags import evaluate_vag 16 | from tensorcircuit.templates.ansatz import QAOA_ansatz_for_Ising 17 | from tensorcircuit.circuit import Circuit 18 | 19 | 20 | def test_vag(tfb): 21 | set_op_pool([Hlayer, rxlayer, zzlayer]) 22 | expene, ene, eneg, p = evaluate_vag( 23 | np.array([0.0, 0.3, 0.5, 0.7, -0.8]), 24 | [0, 2, 1, 2, 1], 25 | get_graph("10A"), 26 | lbd=0, 27 | overlap_threhold=11, 28 | ) 29 | print(expene, eneg, p) 30 | np.testing.assert_allclose(ene.numpy(), -7.01, rtol=1e-2) 31 | 32 | 33 | cases = [ 34 | ("X", True), 35 | ("X", False), 36 | ("XY", True), 37 | ("XY", False), 38 | ("ZZ", True), 39 | ("ZZ", False), 40 | ] 41 | 42 | 43 | @pytest.fixture 44 | def example_inputs(): 45 | params = [0.1, 0.2, 0.3, 0.4] 46 | nlayers = 2 47 | pauli_terms = [[0, 1, 0], [1, 0, 1]] 48 | weights = [0.5, -0.5] 49 | return params, nlayers, pauli_terms, weights 50 | 51 | 52 | @pytest.mark.parametrize("mixer, full_coupling", cases) 53 | def test_QAOA_ansatz_for_Ising(example_inputs, full_coupling, mixer): 54 | params, nlayers, pauli_terms, weights = example_inputs 55 | circuit = QAOA_ansatz_for_Ising( 56 | params, nlayers, pauli_terms, weights, full_coupling, mixer 57 | ) 58 | n = len(pauli_terms[0]) 59 | assert isinstance(circuit, Circuit) 60 | assert circuit._nqubits == n 61 | 62 | if mixer == "X": 63 | assert circuit.gate_count() == n + nlayers * (len(pauli_terms) + n) 64 | elif mixer == "XY": 65 | if full_coupling is False: 66 | assert circuit.gate_count() == n + nlayers * (len(pauli_terms) + 2 * n) 67 | else: 68 | assert circuit.gate_count() == n + nlayers * ( 69 | len(pauli_terms) + sum(range(n + 1)) 70 | ) 71 | else: 72 | if full_coupling is False: 73 | assert circuit.gate_count() == n + nlayers * (len(pauli_terms) + n) 74 | else: 75 | assert circuit.gate_count() == n + nlayers * ( 76 | len(pauli_terms) + sum(range(n + 1)) / 2 77 | ) 78 | 79 | 80 | @pytest.mark.parametrize("mixer, full_coupling", [("AB", True), ("XY", 1), ("TC", 5)]) 81 | def test_QAOA_ansatz_errors(example_inputs, full_coupling, mixer): 82 | params, nlayers, pauli_terms, weights = example_inputs 83 | with pytest.raises(ValueError): 84 | QAOA_ansatz_for_Ising( 85 | params, nlayers, pauli_terms, weights, full_coupling, mixer 86 | ) 87 | -------------------------------------------------------------------------------- /tests/test_quantum_attr.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | 4 | import numpy as np 5 | import tensorflow as tf 6 | 7 | thisfile = os.path.abspath(__file__) 8 | modulepath = os.path.dirname(os.path.dirname(thisfile)) 9 | 10 | sys.path.insert(0, modulepath) 11 | from tensorcircuit.applications.vags import double_state, reduced_density_matrix 12 | 13 | 14 | def test_double_state(): 15 | s = double_state(tf.constant([[1.0, 0], [0, -1.0]]), beta=2.0) 16 | np.testing.assert_allclose(np.linalg.norm(s.numpy()), 1.0) 17 | np.testing.assert_allclose( 18 | s.numpy(), 19 | np.array( 20 | [ 21 | np.exp(-1) / np.sqrt(np.exp(2) + np.exp(-2)), 22 | 0, 23 | 0, 24 | np.exp(1) / np.sqrt(np.exp(2) + np.exp(-2)), 25 | ] 26 | ), 27 | atol=1e-5, 28 | ) 29 | s2 = double_state(tf.constant([[0.0, 1.0], [1.0, 0.0]]), beta=1.0) 30 | np.testing.assert_allclose(np.linalg.norm(s2.numpy()), 1.0) 31 | em = np.exp(-0.5) 32 | ep = np.exp(0.5) 33 | ans = np.array([em + ep, em - ep, em - ep, em + ep]) 34 | ans /= np.linalg.norm(ans) 35 | np.testing.assert_allclose(s2.numpy(), ans, atol=1e-5) 36 | 37 | 38 | def test_reduced_dm(): 39 | rho = reduced_density_matrix( 40 | tf.random.normal(shape=[128]), freedom=7, cut=[1, 3, 5] 41 | ) 42 | np.testing.assert_allclose(np.trace(rho.numpy()), 1, atol=1e-5) 43 | -------------------------------------------------------------------------------- /tests/test_simplify.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | thisfile = os.path.abspath(__file__) 5 | modulepath = os.path.dirname(os.path.dirname(thisfile)) 6 | 7 | sys.path.insert(0, modulepath) 8 | import numpy as np 9 | import tensornetwork as tn 10 | from tensorcircuit import simplify 11 | 12 | 13 | def test_infer_shape(): 14 | a = tn.Node(np.ones([2, 3, 5])) 15 | b = tn.Node(np.ones([3, 5, 7])) 16 | a[1] ^ b[0] 17 | a[2] ^ b[1] 18 | assert simplify.infer_new_shape(a, b) == ((2, 7), (2, 3, 5), (3, 5, 7)) 19 | 20 | 21 | def test_rank_simplify(): 22 | a = tn.Node(np.ones([2, 2]), name="a") 23 | b = tn.Node(np.ones([2, 2]), name="b") 24 | c = tn.Node(np.ones([2, 2, 2, 2]), name="c") 25 | d = tn.Node(np.ones([2, 2, 2, 2, 2, 2]), name="d") 26 | e = tn.Node(np.ones([2, 2]), name="e") 27 | 28 | a[1] ^ c[0] 29 | b[1] ^ c[1] 30 | c[2] ^ d[0] 31 | c[3] ^ d[1] 32 | d[4] ^ e[0] 33 | 34 | nodes = simplify._full_rank_simplify([a, b, c, d, e]) 35 | assert nodes[0].shape == tuple([2 for _ in range(6)]) 36 | assert len(nodes) == 1 37 | 38 | f = tn.Node(np.ones([2, 2]), name="f") 39 | g = tn.Node(np.ones([2, 2, 2, 2]), name="g") 40 | h = tn.Node(np.ones([2, 2, 2, 2]), name="h") 41 | 42 | f[1] ^ g[0] 43 | g[2] ^ h[1] 44 | 45 | nodes = simplify._full_rank_simplify([f, g, h]) 46 | assert len(nodes) == 2 47 | --------------------------------------------------------------------------------