├── Project.toml ├── README.md ├── examples ├── TEBD.jl ├── boundarymps.jl ├── loopcorrections.jl ├── time-evolution.ipynb ├── time-evolution.jl └── time_evolution_Heisenberg.jl └── src ├── Backend ├── beliefpropagation.jl ├── boundarymps.jl └── loopcorrection.jl ├── TensorNetworkQuantumSimulator.jl ├── apply.jl ├── constructors.jl ├── expect.jl ├── gates.jl ├── graph_ops.jl ├── imports.jl ├── sample.jl └── utils.jl /Project.toml: -------------------------------------------------------------------------------- 1 | name = "TensorNetworkQuantumSimulator" 2 | uuid = "4de3b72a-362e-43dd-83ff-3f381eda9f9c" 3 | authors = ["JoeyT1994 ", "MSRudolph "] 4 | version = "0.0.1" 5 | 6 | [deps] 7 | Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4" 8 | EinExprs = "b1794770-133b-4de1-afb4-526377e9f4c5" 9 | GraphRecipes = "bd48cda9-67a9-57be-86fa-5b3c104eda73" 10 | Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" 11 | ITensorMPS = "0d1a4710-d33b-49a5-8f18-73bdf49b47e2" 12 | ITensorNetworks = "2919e153-833c-4bdc-8836-1ea460a35fc7" 13 | ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" 14 | JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" 15 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 16 | NamedGraphs = "678767b0-92e7-4007-89e4-4527a8725b19" 17 | PauliPropagation = "293282d5-3c99-4fb6-92d0-fd3280a19750" 18 | SimpleGraphAlgorithms = "41400c72-0c58-5c16-8579-4ecbce768449" 19 | SimpleGraphConverter = "205b04f2-f585-4877-a239-566270b3f673" 20 | StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" 21 | TensorOperations = "6aa20fa7-93e2-5fca-9bc0-fbd0db3c71a2" 22 | 23 | [compat] 24 | Dictionaries = "0.4" 25 | EinExprs = "0.6.4" 26 | GraphRecipes = "0.5.13" 27 | Graphs = "1.8.0" 28 | ITensorMPS = "0.3.17" 29 | ITensorNetworks = "0.13.9" 30 | ITensors = "0.9" 31 | JSON = "0.21.4" 32 | LinearAlgebra = "1.11.0" 33 | NamedGraphs = "0.6.8" 34 | PauliPropagation = "0.3.0" 35 | SimpleGraphAlgorithms = "0.6.0" 36 | SimpleGraphConverter = "0.1.0" 37 | StatsBase = "0.34.4" 38 | TensorOperations = "5.2" 39 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TensorNetworkQuantumSimulator 2 | 3 | A wrapper around [ITensorNetworks](https://github.com/ITensor/ITensorNetworks.jl) for simulating quantum circuits with tensor networks (TNs) of near-arbitrary geometry. 4 | 5 | The main workhorses of the simulation are _belief propagation_ (BP) for gauging the TNs, _simple update_ for applying the gates, and BP with _loop corrections_ or _boundary MPS_ for estimating expectation values. This package is an experimental compilation of state-of-the-art features before some of them get integrated into [ITensorNetworks](https://github.com/ITensor/ITensorNetworks.jl) over time, with a focus on simulating quantum circuits. 6 | 7 | The workflow is that you pass a `NamedGraph` object to the tensor network constructor, which is a graph describing the desired connectivity of your tensor network. Then you define and subsequently apply the desired gates to the TN (truncating the bonds of the tensor network down to some desired threshold), estimating expectation values with any of the available techniques along the way. These techniques make different levels of approximation and have different control paramaters. The relevant literature describes these in more detail. 8 | 9 | ## Upcoming Features 10 | - Gates beyond Pauli rotations, for example, Clifford gates. 11 | - Applying gates to distant nodes of the TN via SWAP gates. 12 | - Sampling bitstrings from loopy networks. 13 | 14 | ## Relevant Literature 15 | - [Gauging tensor networks with belief propagation](https://www.scipost.org/SciPostPhys.15.6.222?acad_field_slug=chemistry) 16 | - [Efficient Tensor Network Simulation of IBM’s Eagle Kicked Ising Experiment](https://journals.aps.org/prxquantum/abstract/10.1103/PRXQuantum.5.010308) 17 | - [Loop Series Expansions for Tensor Networks](https://arxiv.org/abs/2409.03108) 18 | - [Dynamics of disordered quantum systems with two- and three-dimensional tensor networks](https://arxiv.org/abs/2503.05693) 19 | 20 | ## Authors 21 | The package was developed by Joseph Tindall ([JoeyT1994](https://github.com/JoeyT1994)), a Postdoctoral Researcher at the Center for Computational Quantum Physics, Flatiron Institute NYC and Manuel S. Rudolph ([MSRudolph](https://github.com/MSRudolph)), a PhD Candidate at EPFL, Switzerland, during a research stay at the Center for Computational Quantum Physics, Flatiron Institute NYC. 22 | 23 | -------------------------------------------------------------------------------- /examples/TEBD.jl: -------------------------------------------------------------------------------- 1 | using TensorNetworkQuantumSimulator 2 | const TN = TensorNetworkQuantumSimulator 3 | 4 | using ITensorNetworks 5 | const ITN = ITensorNetworks 6 | using ITensors 7 | 8 | using NamedGraphs 9 | using Graphs 10 | const NG = NamedGraphs 11 | const G = Graphs 12 | using NamedGraphs.NamedGraphGenerators: named_grid 13 | 14 | function main() 15 | nx, ny, nz = 3, 3, 3 16 | #Build a qubit layout of a 3x3x3 periodic cube 17 | g = named_grid((nx, ny, nz); periodic = true) 18 | 19 | nqubits = length(vertices(g)) 20 | s = ITN.siteinds("S=1/2", g) 21 | ψ = ITensorNetwork(v -> "Z+", s) 22 | 23 | maxdim, cutoff = 4, 1e-14 24 | apply_kwargs = (; maxdim, cutoff, normalize = true) 25 | 26 | ψψ = build_bp_cache(ψ) 27 | h, J = -1.0, -1.0 28 | no_trotter_steps = 25 29 | δt = 0.04 30 | 31 | #Do a 7-way edge coloring then Trotterise the Hamiltonian into commuting groups 32 | layer = [] 33 | ec = edge_color(g, 7) 34 | append!(layer, ("Rz", [v], h*δt) for v in vertices(g)) 35 | for colored_edges in ec 36 | append!(layer, ("Rxx", pair, 2*J*δt) for pair in colored_edges) 37 | end 38 | append!(layer, ("Rz", [v], h*δt) for v in vertices(g)) 39 | 40 | #Vertices to measure "Z" on 41 | vs_measure = [first(center(g))] 42 | observables = [("Z", [v]) for v in vs_measure] 43 | 44 | #Edges to measure bond entanglement on: 45 | e_ent = first(edges(g)) 46 | 47 | χinit = maxlinkdim(ψ) 48 | println("Initial bond dimension of the state is $χinit") 49 | 50 | expect_sigmaz = real.(expect(ψ, observables; (cache!) = Ref(ψψ))) 51 | println("Initial Sigma Z on selected sites is $expect_sigmaz") 52 | 53 | time = 0 54 | 55 | Zs = Float64[] 56 | 57 | # evolve! The first evaluation will take significantly longer because of compilation. 58 | for l = 1:no_trotter_steps 59 | #printing 60 | println("Layer $l") 61 | 62 | # pass BP cache manually 63 | t = @timed ψ, ψψ, errors = 64 | apply(layer, ψ, ψψ; apply_kwargs, verbose = false); 65 | 66 | # push expectation to list 67 | push!(Zs, only(real(expect(ψ, observables; (cache!) = Ref(ψψ))))) 68 | 69 | # printing 70 | println("Took time: $(t.time) [s]. Max bond dimension: $(maxlinkdim(ψ))") 71 | println("Maximum Gate error for layer was $(maximum(errors))") 72 | println("Sigma z on central site is $(last(Zs))") 73 | end 74 | end 75 | 76 | main() 77 | -------------------------------------------------------------------------------- /examples/boundarymps.jl: -------------------------------------------------------------------------------- 1 | using TensorNetworkQuantumSimulator 2 | const TN = TensorNetworkQuantumSimulator 3 | 4 | using ITensorNetworks 5 | const ITN = ITensorNetworks 6 | using ITensors 7 | 8 | using NamedGraphs 9 | using Graphs 10 | const NG = NamedGraphs 11 | const G = Graphs 12 | using NamedGraphs.NamedGraphGenerators: named_grid, named_hexagonal_lattice_graph 13 | 14 | using EinExprs: Greedy 15 | 16 | using Random 17 | Random.seed!(1634) 18 | 19 | function main() 20 | nx, ny = 5, 5 21 | χ = 2 22 | ITensors.disable_warn_order() 23 | 24 | gs = [ 25 | (named_grid((nx, 1)), "line"), 26 | (named_hexagonal_lattice_graph(nx - 2, ny - 2), "hexagonal"), 27 | (named_grid((nx, ny)), "square"), 28 | ] 29 | for (g, g_str) in gs 30 | println("Testing for $g_str lattice with $(nv(g)) vertices") 31 | s = siteinds("S=1/2", g) 32 | ψ = ITN.random_tensornetwork(ComplexF64, s; link_space = χ) 33 | s = ITN.siteinds(ψ) 34 | v_centre = first(G.center(g)) 35 | 36 | ψ, _ = TN.symmetric_gauge(ψ) 37 | 38 | println("Computing single site expectation value via various means") 39 | 40 | sz_bp = expect(ψ, ("Z", [v_centre]); alg = "bp") 41 | println("BP value for Z is $sz_bp") 42 | 43 | boundary_mps_ranks = [1, 2, 4, 8, 16, 32] 44 | for r in boundary_mps_ranks 45 | sz_boundarymps = expect( 46 | ψ, 47 | ("Z", [v_centre]); 48 | alg = "boundarymps", 49 | cache_construction_kwargs = (; message_rank = r), 50 | ) 51 | println("Boundary MPS Value for Z at Rank $r is $sz_boundarymps") 52 | end 53 | 54 | sz_exact = expect(ψ, ("Z", [v_centre]); alg = "exact") 55 | println("Exact value for Z is $sz_exact") 56 | 57 | if !is_tree(g) 58 | v_centre_neighbor = 59 | first(filter(v -> first(v) == first(v_centre), neighbors(g, v_centre))) 60 | println("Computing two site, neighboring, expectation value via various means") 61 | 62 | sz_bp = expect(ψ, ("ZZ", [v_centre, v_centre_neighbor]); alg = "bp") 63 | println("BP value for ZZ is $sz_bp") 64 | 65 | boundary_mps_ranks = [1, 2, 4, 8, 16, 32] 66 | for r in boundary_mps_ranks 67 | sz_boundarymps = expect( 68 | ψ, 69 | ("ZZ", [v_centre, v_centre_neighbor]); 70 | alg = "boundarymps", 71 | message_rank = r, 72 | ) 73 | println("Boundary MPS Value for ZZ at Rank $r is $sz_boundarymps") 74 | end 75 | 76 | sz_exact = expect(ψ, ("ZZ", [v_centre, v_centre_neighbor]); alg = "exact") 77 | println("Exact value for ZZ is $sz_exact") 78 | end 79 | end 80 | end 81 | 82 | main() 83 | -------------------------------------------------------------------------------- /examples/loopcorrections.jl: -------------------------------------------------------------------------------- 1 | using TensorNetworkQuantumSimulator 2 | const TN = TensorNetworkQuantumSimulator 3 | 4 | using ITensorNetworks 5 | const ITN = ITensorNetworks 6 | using ITensors 7 | 8 | using NamedGraphs 9 | using Graphs 10 | const NG = NamedGraphs 11 | const G = Graphs 12 | using NamedGraphs.NamedGraphGenerators: named_grid, named_hexagonal_lattice_graph 13 | 14 | using EinExprs: Greedy 15 | 16 | using Random 17 | Random.seed!(1634) 18 | 19 | function main() 20 | nx, ny = 5, 5 21 | χ = 3 22 | ITensors.disable_warn_order() 23 | gs = [ 24 | (named_grid((nx, 1)), "line", 0), 25 | (named_hexagonal_lattice_graph(nx, ny), "hexagonal", 6), 26 | (named_grid((nx, ny)), "square", 4), 27 | ] 28 | for (g, g_str, smallest_loop_size) in gs 29 | println("Testing for $g_str lattice with $(NG.nv(g)) vertices") 30 | s = siteinds("S=1/2", g) 31 | ψ = ITN.random_tensornetwork(ComplexF64, s; link_space = χ) 32 | s = ITN.siteinds(ψ) 33 | 34 | ψ = normalize(ψ; alg = "bp") 35 | 36 | norm_sqr_bp = inner(ψ, ψ; alg = "loopcorrections", max_configuration_size = 0, cache_update_kwargs = TN.default_posdef_bp_update_kwargs()) 37 | norm_sqr = inner( 38 | ψ, 39 | ψ; 40 | alg = "loopcorrections", 41 | max_configuration_size = 2*(smallest_loop_size) - 1, 42 | ) 43 | norm_sqr_exact = inner( 44 | ψ, 45 | ψ; 46 | alg = "exact", 47 | contraction_sequence_kwargs = (; alg = "einexpr", optimizer = Greedy()), 48 | ) 49 | 50 | println("Bp Value for norm is $norm_sqr_bp") 51 | println("1st Order Loop Corrected Value for norm is $norm_sqr") 52 | println("Exact Value for norm is $norm_sqr_exact") 53 | end 54 | end 55 | 56 | main() 57 | -------------------------------------------------------------------------------- /examples/time-evolution.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "id": "b508750a-ded1-40c1-9b16-326ef2a2564c", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "using TensorNetworkQuantumSimulator\n", 11 | "const TN = TensorNetworkQuantumSimulator\n", 12 | "\n", 13 | "using ITensorNetworks\n", 14 | "\n", 15 | "using NamedGraphs.NamedGraphGenerators: named_grid" 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": null, 21 | "id": "3dd1eb1b-2253-425e-8e87-8123f9789d09", 22 | "metadata": { 23 | "scrolled": true 24 | }, 25 | "outputs": [], 26 | "source": [ 27 | "nx = 5\n", 28 | "ny = 5\n", 29 | "\n", 30 | "# the graph is your main friend in working with the TNs\n", 31 | "g = named_grid((nx, ny))\n", 32 | "nq = length(vertices(g))" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": 3, 38 | "id": "56473a6f-2ca4-469c-838f-cb1d4b291130", 39 | "metadata": {}, 40 | "outputs": [], 41 | "source": [ 42 | "dt = 0.05\n", 43 | "\n", 44 | "hx = 1.0\n", 45 | "hz = 0.8\n", 46 | "J = 0.5\n", 47 | "\n", 48 | "# pauli rotations are tuples like `(pauli_string, [site_labels], parameter)`\n", 49 | "layer = []\n", 50 | "append!(layer, (\"RX\", [v], 2*hx*dt) for v in vertices(g))\n", 51 | "append!(layer, (\"RZ\", [v], 2*hz*dt) for v in vertices(g))\n", 52 | "append!(layer, (\"RZZ\", pair, 2*J*dt) for pair in edges(g));" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": null, 58 | "id": "273ff2f3-f55c-48a4-8d1f-c6ba3e83947f", 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "# observables are tuples like `(pauli_string, [site_labels], optional:coefficient)`\n", 63 | "# it's important that the `site_labels` match the names of the vertices of the graph `g`\n", 64 | "obs = (\"Z\", [(3, 3)]) # right in the middle" 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "execution_count": null, 70 | "id": "97e62b8f-e3b4-4ae8-9085-e28a0e62bbfa", 71 | "metadata": {}, 72 | "outputs": [], 73 | "source": [ 74 | "# the number of circuit layers\n", 75 | "nl = 25" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": null, 81 | "id": "42a46f00-9c53-493b-b932-4db0da47ab33", 82 | "metadata": { 83 | "scrolled": true 84 | }, 85 | "outputs": [], 86 | "source": [ 87 | "# the initial state\n", 88 | "ψ = zerostate(g)\n", 89 | "\n", 90 | "# an array to keep track of expectations\n", 91 | "expectations = Float64[real(expect(ψ, obs))]\n", 92 | "\n", 93 | "# evolve! The first evaluation will take significantly longer because of compulation.\n", 94 | "for l in 1:nl\n", 95 | " #printing\n", 96 | " println(\"Layer $l\")\n", 97 | "\n", 98 | " # apply layer\n", 99 | " t = @timed ψ, errors = apply(layer, ψ);\n", 100 | "\n", 101 | " # push expectation to list\n", 102 | " push!(expectations, real(expect(ψ, obs)))\n", 103 | "\n", 104 | " # printing\n", 105 | " println(\" Took time: $(t.time) [s]. Max bond dimension: $(maxlinkdim(ψ))\")\n", 106 | "end" 107 | ] 108 | }, 109 | { 110 | "cell_type": "code", 111 | "execution_count": null, 112 | "id": "9b9faa8d-162d-4244-8aef-30d5ccd6f1b5", 113 | "metadata": {}, 114 | "outputs": [], 115 | "source": [ 116 | "using Plots\n", 117 | "plot((0:nl) .* dt, expectations, xlabel=\"t\", ylabel=\"Expectation\", lw=2, label=\"Evaluate with belief propagation\")" 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "execution_count": null, 123 | "id": "b32c65a6-e8b1-44e2-8124-aee61dd27c71", 124 | "metadata": {}, 125 | "outputs": [], 126 | "source": [] 127 | }, 128 | { 129 | "cell_type": "code", 130 | "execution_count": 8, 131 | "id": "3af3987e-a45c-4a6e-9229-6985bda6bc35", 132 | "metadata": {}, 133 | "outputs": [], 134 | "source": [ 135 | "## A few more advanced options\n", 136 | "# we will still do exactly the same evolution but also do boundary mps for expectation values" 137 | ] 138 | }, 139 | { 140 | "cell_type": "code", 141 | "execution_count": null, 142 | "id": "d639e470-b17e-42d2-a1d9-6a28e803ab79", 143 | "metadata": {}, 144 | "outputs": [], 145 | "source": [ 146 | "# max bond dimension for the TN\n", 147 | "# we will use enough and just see how\n", 148 | "apply_kwargs = (maxdim=10, cutoff=1e-10, normalize=false)" 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "execution_count": null, 154 | "id": "833be3c7-d6ce-4146-8953-4d8dea8e50d8", 155 | "metadata": { 156 | "scrolled": true 157 | }, 158 | "outputs": [], 159 | "source": [ 160 | "# the initial state\n", 161 | "ψ = zerostate(g)\n", 162 | "\n", 163 | "# create the BP cache manually\n", 164 | "ψψ = build_bp_cache(ψ)\n", 165 | "\n", 166 | "# an array to keep track of expectations\n", 167 | "expectations_advanced = Float64[real(expect(ψ, obs))]\n", 168 | "boundarymps_rank = 4\n", 169 | "\n", 170 | "# evolve! The first evaluation will take significantly longer because of compulation.\n", 171 | "for l in 1:nl\n", 172 | " println(\"Layer $l\")\n", 173 | "\n", 174 | " # pass BP cache manually\n", 175 | " t1 = @timed ψ, ψψ, errors = apply(layer, ψ, ψψ; apply_kwargs, verbose=false);\n", 176 | " \n", 177 | " ## could also update outside \n", 178 | " # t2 = @timed ψψ = updatecache(ψψ)\n", 179 | "\n", 180 | " # push expectation to list\n", 181 | " # pass the cache instead of the state so that things don't have to update over and over\n", 182 | " push!(expectations_advanced, real(expect(ψ, obs; alg = \"boundarymps\", cache_construction_kwargs =(; message_rank = boundarymps_rank)))) # with some boundary mps correction\n", 183 | "\n", 184 | " \n", 185 | " println(\" Took time: $(t1.time) [s]. Max bond dimension: $(maxlinkdim(ψ))\")\n", 186 | "end" 187 | ] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "execution_count": null, 192 | "id": "89d0a34f-39ed-4d90-8a2d-d3324e516cc2", 193 | "metadata": {}, 194 | "outputs": [], 195 | "source": [ 196 | "plot((0:nl) .* dt, expectations, xlabel=\"t\", ylabel=\"Expectation\", lw=2, label=\"Evaluate with belief propagation\")\n", 197 | "plot!((0:nl) .* dt, expectations_advanced, lw=2, label=\"With some boundary mps rank $boundarymps_rank\")" 198 | ] 199 | }, 200 | { 201 | "cell_type": "code", 202 | "execution_count": null, 203 | "id": "ce2063cd-8acc-4e44-9764-9ae436c2163b", 204 | "metadata": {}, 205 | "outputs": [], 206 | "source": [] 207 | } 208 | ], 209 | "metadata": { 210 | "kernelspec": { 211 | "display_name": "Julia 1.11.2", 212 | "language": "julia", 213 | "name": "julia-1.11" 214 | }, 215 | "language_info": { 216 | "file_extension": ".jl", 217 | "mimetype": "application/julia", 218 | "name": "julia", 219 | "version": "1.11.2" 220 | } 221 | }, 222 | "nbformat": 4, 223 | "nbformat_minor": 5 224 | } 225 | -------------------------------------------------------------------------------- /examples/time-evolution.jl: -------------------------------------------------------------------------------- 1 | using TensorNetworkQuantumSimulator 2 | const TN = TensorNetworkQuantumSimulator 3 | 4 | using ITensorNetworks 5 | 6 | using NamedGraphs.NamedGraphGenerators: named_grid 7 | using Statistics 8 | 9 | function main() 10 | nx = 5 11 | ny = 5 12 | 13 | # the graph is your main friend. This will be the geometry of the TN you wull work with 14 | g = named_grid((nx, ny)) 15 | nq = length(vertices(g)) 16 | 17 | dt = 0.25 18 | 19 | hx = 1.0 20 | hz = 0.8 21 | J = 0.5 22 | 23 | #Build a layer of the circuit. Pauli rotations are tuples like `(pauli_string, [site_labels], parameter)` 24 | layer = [] 25 | append!(layer, ("Rx", [v], 2*hx*dt) for v in vertices(g)) 26 | append!(layer, ("Rz", [v], 2*hz*dt) for v in vertices(g)) 27 | append!(layer, ("Rzz", pair, 2*J*dt) for pair in edges(g)); 28 | 29 | # observables are tuples like `(pauli_string, [site_labels], optional:coefficient)` 30 | # it's important that the `site_labels` match the names of the vertices of the graph `g` 31 | obs = ("Z", [(3, 3)]) # right in the middle 32 | 33 | # the number of circuit layers 34 | nl = 20 35 | 36 | # the initial state 37 | ψ = zerostate(g) 38 | 39 | # an array to keep track of expectations 40 | expectations = Float64[real(expect(ψ, obs))] 41 | 42 | # max bond dimension for the TN 43 | # we will use enough and just see how 44 | apply_kwargs = (maxdim = 5, cutoff = 1e-10, normalize = false) 45 | 46 | # evolve! The first evaluation will take significantly longer because of compilation. 47 | for l = 1:nl 48 | #printing 49 | println("Layer $l") 50 | 51 | # apply layer 52 | t = @timed ψ, errors = apply(layer, ψ; apply_kwargs); 53 | 54 | # push expectation to list 55 | push!(expectations, real(expect(ψ, obs))) 56 | 57 | # printing 58 | println(" Took time: $(t.time) [s]. Max bond dimension: $(maxlinkdim(ψ))") 59 | println(" Maximum Gate error for layer was $(maximum(errors))") 60 | end 61 | 62 | 63 | ## A few more advanced options 64 | # we will still do exactly the same evolution but also do boundary mps for expectation values 65 | 66 | # the initial state 67 | ψ = zerostate(g) 68 | 69 | # create the BP cache manually 70 | ψψ = build_bp_cache(ψ) 71 | 72 | # an array to keep track of expectations 73 | expectations_advanced = Float64[real(expect(ψ, obs))] 74 | boundarymps_rank = 4 75 | 76 | # evolve! The first evaluation will take significantly longer because of compulation. 77 | for l = 1:nl 78 | println("Layer $l") 79 | 80 | # pass BP cache manually 81 | t1 = @timed ψ, ψψ, errors = 82 | apply(layer, ψ, ψψ; apply_kwargs, verbose = false); 83 | 84 | ## could also update outside 85 | # t2 = @timed ψψ = updatecache(ψψ) 86 | 87 | # push expectation to list 88 | # pass the cache instead of the state so that things don't have to update over and over 89 | push!( 90 | expectations_advanced, 91 | real( 92 | expect( 93 | ψ, 94 | obs; 95 | alg = "boundarymps", 96 | cache_construction_kwargs = (; message_rank = boundarymps_rank), 97 | ), 98 | ), 99 | ) # with some boundary mps correction 100 | 101 | 102 | println(" Took time: $(t1.time) [s]. Max bond dimension: $(maxlinkdim(ψ))") 103 | println(" Maximum Gate error for layer was $(maximum(errors))") 104 | end 105 | end 106 | 107 | main() 108 | -------------------------------------------------------------------------------- /examples/time_evolution_Heisenberg.jl: -------------------------------------------------------------------------------- 1 | using TensorNetworkQuantumSimulator 2 | const TN = TensorNetworkQuantumSimulator 3 | 4 | using ITensorNetworks 5 | const ITN = ITensorNetworks 6 | using ITensors 7 | 8 | using NamedGraphs 9 | using Graphs 10 | const NG = NamedGraphs 11 | const G = Graphs 12 | using NamedGraphs.NamedGraphGenerators: named_grid 13 | 14 | function main() 15 | nx, ny = 4,4 16 | g = named_grid((nx, ny)) 17 | 18 | nqubits = length(vertices(g)) 19 | #Physical indices represent "Identity, X, Y, Z" in that order 20 | s = ITN.siteinds(4, g) 21 | #Initial operator is Z on the designated site 22 | vz = first(center(g)) 23 | init_state = ("Z", [vz]) 24 | ψ0 = TN.topaulitensornetwork(init_state, s) 25 | 26 | maxdim, cutoff = 4, 1e-14 27 | apply_kwargs = (; maxdim, cutoff, normalize = true) 28 | #Parameters for BP, as the graph is not a tree (it has loops), we need to specify these 29 | 30 | ψ = copy(ψ0) 31 | 32 | ψψ = build_bp_cache(ψ) 33 | 34 | h, J = -1.0, -1.0 35 | no_trotter_steps = 10 36 | δt = 0.04 37 | 38 | #Do a 4-way edge coloring then Trotterise the Hamiltonian into commuting groups. Lets do Ising with the designated parameters 39 | layer = [] 40 | ec = edge_color(g, 4) 41 | append!(layer, ("Rz", [v], h*δt) for v in vertices(g)) 42 | for colored_edges in ec 43 | append!(layer, ("Rxx", pair, 2*J*δt) for pair in colored_edges) 44 | end 45 | append!(layer, ("Rz", [v], h*δt) for v in vertices(g)) 46 | 47 | χinit = maxlinkdim(ψ) 48 | println("Initial bond dimension of the Heisenberg operator is $χinit") 49 | 50 | time = 0 51 | 52 | Zs = Float64[] 53 | 54 | for l = 1:no_trotter_steps 55 | println("Layer $l") 56 | 57 | #Apply the circuit 58 | t = @timed ψ, ψψ, errors = 59 | apply(layer, ψ, ψψ; apply_kwargs, verbose = false); 60 | #Reset the Frobenius norm to unity 61 | ψ, ψψ = normalize(ψ, ψψ) 62 | println("Frobenius norm of O(t) is $(scalar(ψψ))") 63 | 64 | #Take traces 65 | tr_ψt = inner(ψ, TN.identitytensornetwork(s); alg = "bp") 66 | tr_ψtψ0 = inner(ψ, ψ0; alg = "bp") 67 | println("Trace(O(t)) is $(tr_ψt)") 68 | println("Trace(O(t)O(0)) is $(tr_ψtψ0)") 69 | 70 | # printing 71 | println("Took time: $(t.time) [s]. Max bond dimension: $(maxlinkdim(ψ))") 72 | println("Maximum Gate error for layer was $(maximum(errors))") 73 | end 74 | end 75 | 76 | main() 77 | -------------------------------------------------------------------------------- /src/Backend/beliefpropagation.jl: -------------------------------------------------------------------------------- 1 | const _default_bp_update_maxiter = 25 2 | const _default_bp_update_tol = 1e-10 3 | 4 | ## Frontend functions 5 | 6 | # `default_message_update` lives in ITensorNetworks.jl 7 | default_posdef_message_update_function(ms) = make_hermitian.(default_message_update(ms)) 8 | 9 | function default_posdef_bp_update_kwargs(; cache_is_tree = false) 10 | message_update_function = default_posdef_message_update_function 11 | return (; maxiter=default_bp_update_maxiter(cache_is_tree), tol=_default_bp_update_tol, message_update_kwargs=(; message_update_function)) 12 | end 13 | 14 | function default_nonposdef_bp_update_kwargs(; cache_is_tree = false) 15 | message_update_function = default_message_update 16 | return (; maxiter=default_bp_update_maxiter(cache_is_tree), tol=_default_bp_update_tol, message_update_kwargs=(; message_update_function)) 17 | end 18 | 19 | function default_bp_update_maxiter(cache_is_tree::Bool = false) 20 | !cache_is_tree && return _default_bp_update_maxiter 21 | return 1 22 | end 23 | 24 | """ 25 | updatecache(bp_cache::BeliefPropagationCache; maxiter::Int64, tol::Number, message_update_kwargs = (; message_update_function = default_message_update)) 26 | 27 | Update the message tensors inside a bp-cache, running over the graph up to maxiter times until convergence to the desired tolerance `tol`. 28 | If the cache is positive definite, the message update function can 29 | """ 30 | function updatecache(bp_cache; maxiter=default_bp_update_maxiter(is_tree(partitioned_graph(bp_cache))), tol=_default_bp_update_tol, message_update_kwargs=(; message_update_function=default_message_update)) 31 | return update(bp_cache; maxiter, tol, message_update_kwargs) 32 | end 33 | 34 | """ 35 | build_bp_cache(ψ::ITensorNetwork, args...; kwargs...) 36 | 37 | Build the tensornetwork and cache of message tensors for the norm square network `ψIψ`. 38 | """ 39 | function build_bp_cache( 40 | ψ::AbstractITensorNetwork, 41 | args...; 42 | update_cache=true, 43 | cache_update_kwargs=default_posdef_bp_update_kwargs(; cache_is_tree = is_tree(ψ)), 44 | ) 45 | bp_cache = BeliefPropagationCache(QuadraticFormNetwork(ψ), args...) 46 | # TODO: QuadraticFormNetwork() builds ψIψ network, but for Pauli picture `norm_sqr_network()` is enough 47 | # https://github.com/ITensor/ITensorNetworks.jl/blob/main/test/test_belief_propagation.jl line 49 to construct the cache without the identities. 48 | if update_cache 49 | bp_cache = updatecache(bp_cache; cache_update_kwargs...) 50 | end 51 | return bp_cache 52 | end 53 | 54 | """ 55 | is_flat(bpc::BeliefPropagationCache) 56 | 57 | Is the network inside bpc `flat', i.e. does every partition contain only one tensor 58 | """ 59 | function is_flat(bpc::BeliefPropagationCache) 60 | pg = partitioned_tensornetwork(bpc) 61 | return all([length(vertices(pg, pv)) == 1 for pv in partitionvertices(pg)]) 62 | end 63 | 64 | """ 65 | symmetric_gauge(ψ::AbstractITensorNetwork; cache_update_kwargs = default_posdef_bp_update_kwargs(), kwargs...) 66 | 67 | Transform a tensor netework into the symmetric gauge, where the BP message tensors are all diagonal 68 | """ 69 | function symmetric_gauge(ψ::AbstractITensorNetwork; cache_update_kwargs=default_posdef_bp_update_kwargs(; cache_is_tree = is_tree(ψ)), kwargs...) 70 | ψ_vidal = VidalITensorNetwork(ψ; cache_update_kwargs, kwargs...) 71 | cache_ref = Ref{BeliefPropagationCache}() 72 | ψ_symm = ITensorNetwork(ψ_vidal; (cache!)=cache_ref) 73 | bp_cache = cache_ref[] 74 | return ψ_symm, bp_cache 75 | end 76 | 77 | """ 78 | LinearAlgebra.normalize(ψ::ITensorNetwork, ψψ_bpc::BeliefPropagationCache; cache_update_kwargs = default_posdef_bp_update_kwargs(), update_cache = false) 79 | 80 | Scale a tensor netework and its norm_sqr cache such that ψIψ = 1 under the BP approximation 81 | """ 82 | function LinearAlgebra.normalize( 83 | ψ::ITensorNetwork, 84 | ψψ_bpc::BeliefPropagationCache; 85 | cache_update_kwargs=default_posdef_bp_update_kwargs(; cache_is_tree = is_tree(ψ)), 86 | update_cache=false, 87 | ) 88 | ψψ_bpc_ref = Ref(copy(ψψ_bpc)) 89 | ψ = normalize(ψ; alg="bp", (cache!)=ψψ_bpc_ref, cache_update_kwargs, update_cache) 90 | 91 | return ψ, ψψ_bpc_ref[] 92 | end 93 | 94 | """ 95 | ITensors.scalar(bp_cache::AbstractBeliefPropagationCache; alg = "bp", cache_update_kwargs) 96 | 97 | Compute the contraction of the tensor network inside the bp_cache with different algorithm choices 98 | """ 99 | function ITensors.scalar( 100 | bp_cache::AbstractBeliefPropagationCache, 101 | args...; 102 | alg="bp", 103 | kwargs..., 104 | ) 105 | return scalar(Algorithm(alg), bp_cache, args...; kwargs...) 106 | end 107 | 108 | function ITensors.scalar(alg::Algorithm"bp", bp_cache::AbstractBeliefPropagationCache) 109 | return scalar(bp_cache) 110 | end 111 | 112 | """ 113 | ITensorNetworks.region_scalar(bpc::BeliefPropagationCache, verts::Vector) 114 | 115 | Compute contraction involving incoming messages to the contiguous set of tensors on the given vertices 116 | """ 117 | function ITensorNetworks.region_scalar(bpc::BeliefPropagationCache, verts::Vector) 118 | partitions = partitionvertices(bpc, verts) 119 | length(partitions) == 1 && return region_scalar(bpc, only(partitions)) 120 | if length(partitions) == 2 121 | p1, p2 = first(partitions), last(partitions) 122 | if parent(p1) ∉ neighbors(partitioned_graph(bpc), parent(p2)) 123 | error( 124 | "Only contractions involving neighboring partitions are currently supported", 125 | ) 126 | end 127 | ms = incoming_messages(bpc, partitions) 128 | local_tensors = factors(bpc, partitions) 129 | ts = [ms; local_tensors] 130 | seq = contraction_sequence(ts; alg="optimal") 131 | return contract(ts; sequence=seq)[] 132 | end 133 | error("Contractions involving more than 2 partitions not currently supported") 134 | return nothing 135 | end 136 | 137 | 138 | """ 139 | entanglement(ψ::ITensorNetwork, e::NamedEdge; (cache!) = nothing, cache_update_kwargs = default_posdef_bp_update_kwargs()) 140 | 141 | Bipartite Von-Neumann entanglement entropy, estimated, via BP, using the spectrum of the bond tensor on the given edge. 142 | """ 143 | function entanglement( 144 | ψ::ITensorNetwork, 145 | e::NamedEdge; 146 | (cache!)=nothing, 147 | cache_update_kwargs=default_posdef_bp_update_kwargs(; cache_is_tree = is_tree(ψ)), 148 | ) 149 | cache = isnothing(cache!) ? build_bp_cache(ψ; cache_update_kwargs) : cache![] 150 | ψ_vidal = VidalITensorNetwork(ψ; cache) 151 | bt = ITensorNetworks.bond_tensor(ψ_vidal, e) 152 | ee = 0 153 | for d in diag(bt) 154 | ee -= abs(d) >= eps(eltype(bt)) ? d * d * log2(d * d) : 0 155 | end 156 | return abs(ee) 157 | end 158 | 159 | function make_hermitian(A::ITensor) 160 | A_inds = inds(A) 161 | @assert length(A_inds) == 2 162 | return (A + ITensors.swapind(conj(A), first(A_inds), last(A_inds))) / 2 163 | end 164 | 165 | #Delete the message tensor on partition edge pe from the cache 166 | function delete_message!(bpc::AbstractBeliefPropagationCache, pe::PartitionEdge) 167 | return delete_messages!(bpc, [pe]) 168 | end 169 | 170 | #Delete the message tensors on the vector of partition edges pes from the cache 171 | function delete_messages!(bpc::AbstractBeliefPropagationCache, pes::Vector) 172 | ms = messages(bpc) 173 | for pe in pes 174 | haskey(ms, pe) && delete!(ms, pe) 175 | end 176 | return bpc 177 | end -------------------------------------------------------------------------------- /src/Backend/boundarymps.jl: -------------------------------------------------------------------------------- 1 | struct BoundaryMPSCache{V, PV, BPC<:AbstractBeliefPropagationCache{V, PV},PG} <: AbstractBeliefPropagationCache{V, PV} 2 | bp_cache::BPC 3 | partitionedplanargraph::PG 4 | maximum_virtual_dimension::Int64 5 | end 6 | 7 | const _default_boundarymps_update_alg = "orthogonal" 8 | const _default_boundarymps_update_niters = 40 9 | const _default_boundarymps_update_tolerance = 1e-12 10 | const _default_boundarymps_update_cutoff = 1e-12 11 | 12 | function default_boundarymps_update_kwargs(; cache_is_flat = false, kwargs...) 13 | alg = ITensorNetworks.default_message_update_alg(cache_is_flat) 14 | return (; alg, message_update_kwargs = ITensorNetworks.default_message_update_kwargs(; cache_is_flat, kwargs...)) 15 | end 16 | 17 | ITensorNetworks.default_message_update_alg(cache_is_flat::Bool = false) = cache_is_flat ? "ITensorMPS" : "orthogonal" 18 | 19 | function ITensorNetworks.default_message_update_kwargs(; cache_is_flat = false, cutoff = _default_boundarymps_update_cutoff, kwargs...) 20 | !cache_is_flat && return return (; niters = _default_boundarymps_update_niters, tolerance = _default_boundarymps_update_tolerance) 21 | return (; cutoff = cutoff, kwargs...) 22 | end 23 | 24 | ITensorNetworks.default_cache_update_kwargs(alg::Algorithm"boundarymps") = default_boundarymps_update_kwargs() 25 | 26 | ## Frontend functions 27 | 28 | """ 29 | updatecache(bmpsc::BoundaryMPSCache; alg, message_update_kwargs = (; niters, tolerance)) 30 | 31 | Update the MPS messages inside a boundaryMPS-cache. 32 | """ 33 | function updatecache(bmpsc::BoundaryMPSCache, args...; alg = ITensorNetworks.default_message_update_alg(is_flat(bmpsc)), 34 | message_update_kwargs = ITensorNetworks.default_message_update_kwargs(; cache_is_flat = is_flat(bmpsc), maxdim = maximum_virtual_dimension(bmpsc)), kwargs...) 35 | return update(bmpsc, args...; alg, message_update_kwargs, kwargs...) 36 | end 37 | 38 | """ 39 | build_boundarymps_cache(ψ::AbstractITensorNetwork, message_rank::Int64; cache_construction_kwargs = (;), cache_update_kwargs = default_posdef_boundarymps_update_kwargs()) 40 | 41 | Build the Boundary MPS cache for ψIψ and update it appropriately 42 | """ 43 | function build_boundarymps_cache( 44 | ψ::AbstractITensorNetwork, 45 | message_rank::Int64; 46 | cache_construction_kwargs = (;), 47 | cache_update_kwargs = default_boundarymps_update_kwargs(; cache_is_flat = false, maxdim = message_rank), 48 | update_bp_cache = false, 49 | update_cache = true 50 | ) 51 | # build the BP cache 52 | ψIψ = build_bp_cache(ψ; update_cache = update_bp_cache) 53 | 54 | # convert BP cache to boundary MPS cache, no further update needed 55 | return build_boundarymps_cache( 56 | ψIψ, 57 | message_rank; 58 | cache_construction_kwargs, 59 | cache_update_kwargs, 60 | update_cache 61 | ) 62 | end 63 | 64 | function build_boundarymps_cache( 65 | ψIψ::AbstractBeliefPropagationCache, 66 | message_rank::Int64; 67 | update_cache = true, 68 | cache_construction_kwargs = (;), 69 | cache_update_kwargs = default_boundarymps_update_kwargs(; cache_is_flat = is_flat(ψIψ), maxdim = message_rank), 70 | ) 71 | 72 | ψIψ = BoundaryMPSCache(ψIψ; message_rank, cache_construction_kwargs...) 73 | 74 | if update_cache 75 | ψIψ = updatecache(ψIψ; cache_update_kwargs...) 76 | end 77 | 78 | return ψIψ 79 | end 80 | 81 | is_flat(bmpsc::BoundaryMPSCache) = is_flat(bp_cache(bmpsc)) 82 | 83 | ## Backend functions 84 | bp_cache(bmpsc::BoundaryMPSCache) = bmpsc.bp_cache 85 | partitionedplanargraph(bmpsc::BoundaryMPSCache) = bmpsc.partitionedplanargraph 86 | ppg(bmpsc) = partitionedplanargraph(bmpsc) 87 | maximum_virtual_dimension(bmpsc::BoundaryMPSCache) = bmpsc.maximum_virtual_dimension 88 | planargraph(bmpsc::BoundaryMPSCache) = unpartitioned_graph(partitionedplanargraph(bmpsc)) 89 | 90 | function ITensorNetworks.partitioned_tensornetwork(bmpsc::BoundaryMPSCache) 91 | return partitioned_tensornetwork(bp_cache(bmpsc)) 92 | end 93 | ITensorNetworks.messages(bmpsc::BoundaryMPSCache) = messages(bp_cache(bmpsc)) 94 | 95 | function ITensorNetworks.default_bp_maxiter( 96 | alg::Algorithm, 97 | bmpsc::BoundaryMPSCache, 98 | ) 99 | return default_bp_maxiter(partitioned_graph(ppg(bmpsc))) 100 | end 101 | function ITensorNetworks.default_edge_sequence(alg::Algorithm, bmpsc::BoundaryMPSCache) 102 | return pair.(default_edge_sequence(ppg(bmpsc))) 103 | end 104 | 105 | default_boundarymps_message_rank(tn::AbstractITensorNetwork) = maxlinkdim(tn)^2 106 | ITensorNetworks.partitions(bmpsc::BoundaryMPSCache) = 107 | parent.(collect(partitionvertices(ppg(bmpsc)))) 108 | NamedGraphs.PartitionedGraphs.partitionedges(bmpsc::BoundaryMPSCache) = pair.(partitionedges(ppg(bmpsc))) 109 | 110 | function ITensorNetworks.cache( 111 | alg::Algorithm"boundarymps", 112 | tn; 113 | bp_cache_construction_kwargs = default_cache_construction_kwargs(Algorithm("bp"), tn), 114 | kwargs..., 115 | ) 116 | return BoundaryMPSCache( 117 | BeliefPropagationCache(tn; bp_cache_construction_kwargs...); 118 | kwargs..., 119 | ) 120 | end 121 | 122 | function ITensorNetworks.default_cache_construction_kwargs(alg::Algorithm"boundarymps", tn) 123 | return (; 124 | bp_cache_construction_kwargs = default_cache_construction_kwargs( 125 | Algorithm("bp"), 126 | tn, 127 | ) 128 | ) 129 | end 130 | 131 | function Base.copy(bmpsc::BoundaryMPSCache) 132 | return BoundaryMPSCache( 133 | copy(bp_cache(bmpsc)), 134 | copy(ppg(bmpsc)), 135 | maximum_virtual_dimension(bmpsc), 136 | ) 137 | end 138 | 139 | function ITensorNetworks.default_message( 140 | bmpsc::BoundaryMPSCache, 141 | pe::PartitionEdge; 142 | kwargs..., 143 | ) 144 | return default_message(bp_cache(bmpsc), pe::PartitionEdge; kwargs...) 145 | end 146 | 147 | #Get the dimension of the virtual index between the two message tensors on pe1 and pe2 148 | function virtual_index_dimension( 149 | bmpsc::BoundaryMPSCache, 150 | pe1::PartitionEdge, 151 | pe2::PartitionEdge, 152 | ) 153 | pes = planargraph_sorted_partitionedges(bmpsc, planargraph_partitionpair(bmpsc, pe1)) 154 | 155 | if findfirst(x -> x == pe1, pes) > findfirst(x -> x == pe2, pes) 156 | lower_pe, upper_pe = pe2, pe1 157 | else 158 | lower_pe, upper_pe = pe1, pe2 159 | end 160 | inds_above = reduce(vcat, linkinds.((bmpsc,), partitionedges_above(bmpsc, lower_pe))) 161 | inds_below = reduce(vcat, linkinds.((bmpsc,), partitionedges_below(bmpsc, upper_pe))) 162 | 163 | return Int(minimum(( 164 | prod(Float64.(dim.(inds_above))), 165 | prod(Float64.(dim.(inds_below))), 166 | Float64(maximum_virtual_dimension(bmpsc)), 167 | ))) 168 | end 169 | 170 | #Vertices of the planargraph 171 | function planargraph_vertices(bmpsc::BoundaryMPSCache, partition) 172 | return vertices(ppg(bmpsc), PartitionVertex(partition)) 173 | end 174 | 175 | #Get partition(s) of vertices of the planargraph 176 | function planargraph_partitions(bmpsc::BoundaryMPSCache, vertices::Vector) 177 | return parent.(partitionvertices(ppg(bmpsc), vertices)) 178 | end 179 | 180 | function planargraph_partition(bmpsc::BoundaryMPSCache, vertex) 181 | return only(planargraph_partitions(bmpsc, [vertex])) 182 | end 183 | 184 | #Get interpartition pairs of partition edges in the underlying partitioned tensornetwork 185 | function planargraph_partitionpair(bmpsc::BoundaryMPSCache, pe::PartitionEdge) 186 | return pair(partitionedge(ppg(bmpsc), parent(pe))) 187 | end 188 | 189 | #Sort (bottom to top) partitoonedges between pair of partitions in the planargraph 190 | function planargraph_sorted_partitionedges(bmpsc::BoundaryMPSCache, partitionpair::Pair) 191 | pg = ppg(bmpsc) 192 | src_vs, dst_vs = vertices(pg, PartitionVertex(first(partitionpair))), 193 | vertices(pg, PartitionVertex(last(partitionpair))) 194 | es = reduce( 195 | vcat, 196 | [ 197 | [src_v => dst_v for dst_v in intersect(neighbors(pg, src_v), dst_vs)] for 198 | src_v in src_vs 199 | ], 200 | ) 201 | es = sort(NamedEdge.(es); by = x -> findfirst(isequal(src(x)), src_vs)) 202 | return PartitionEdge.(es) 203 | end 204 | 205 | #Constructor, inserts missing edge in the planar graph to ensure each partition is connected 206 | #allowing the code to work for arbitrary grids and not just square grids 207 | function BoundaryMPSCache( 208 | bpc::BeliefPropagationCache; 209 | grouping_function::Function = v -> first(v), 210 | group_sorting_function::Function = v -> last(v), 211 | message_rank::Int64 = default_boundarymps_message_rank(tensornetwork(bpc)), 212 | 213 | ) 214 | bpc = insert_pseudo_planar_edges(bpc; grouping_function) 215 | planar_graph = partitioned_graph(bpc) 216 | vertex_groups = group(grouping_function, collect(vertices(planar_graph))) 217 | vertex_groups = map(x -> sort(x; by = group_sorting_function), vertex_groups) 218 | ppg = PartitionedGraph(planar_graph, vertex_groups) 219 | bmpsc = BoundaryMPSCache(bpc, ppg, message_rank) 220 | set_interpartition_messages!(bmpsc) 221 | return bmpsc 222 | end 223 | 224 | function BoundaryMPSCache(tn, args...; kwargs...) 225 | return BoundaryMPSCache(BeliefPropagationCache(tn, args...); kwargs...) 226 | end 227 | 228 | #Functions to get the parellel partitionedges sitting above and below a partitionedge 229 | function partitionedges_above(bmpsc::BoundaryMPSCache, pe::PartitionEdge) 230 | pes = planargraph_sorted_partitionedges(bmpsc, planargraph_partitionpair(bmpsc, pe)) 231 | pe_pos = only(findall(x -> x == pe, pes)) 232 | return PartitionEdge[pes[i] for i = (pe_pos+1):length(pes)] 233 | end 234 | 235 | function partitionedges_below(bmpsc::BoundaryMPSCache, pe::PartitionEdge) 236 | pes = planargraph_sorted_partitionedges(bmpsc, planargraph_partitionpair(bmpsc, pe)) 237 | pe_pos = only(findall(x -> x == pe, pes)) 238 | return PartitionEdge[pes[i] for i = 1:(pe_pos-1)] 239 | end 240 | 241 | function partitionedge_above(bmpsc::BoundaryMPSCache, pe::PartitionEdge) 242 | pes_above = partitionedges_above(bmpsc, pe) 243 | isempty(pes_above) && return nothing 244 | return first(pes_above) 245 | end 246 | 247 | function partitionedge_below(bmpsc::BoundaryMPSCache, pe::PartitionEdge) 248 | pes_below = partitionedges_below(bmpsc, pe) 249 | isempty(pes_below) && return nothing 250 | return last(pes_below) 251 | end 252 | 253 | #Initialise all the interpartition message tensors 254 | function set_interpartition_messages!( 255 | bmpsc::BoundaryMPSCache, 256 | partitionpairs::Vector{<:Pair}, 257 | ) 258 | m_keys = keys(messages(bmpsc)) 259 | for partitionpair in partitionpairs 260 | pes = planargraph_sorted_partitionedges(bmpsc, partitionpair) 261 | for pe in pes 262 | if pe ∉ m_keys 263 | set_message!(bmpsc, pe, ITensor[dense(delta(linkinds(bmpsc, pe)))]) 264 | end 265 | end 266 | for i = 1:(length(pes)-1) 267 | virt_dim = virtual_index_dimension(bmpsc, pes[i], pes[i+1]) 268 | ind = Index(virt_dim, "m$(i)$(i+1)") 269 | m1, m2 = only(message(bmpsc, pes[i])), only(message(bmpsc, pes[i+1])) 270 | set_message!(bmpsc, pes[i], ITensor[m1*delta(ind)]) 271 | set_message!(bmpsc, pes[i+1], ITensor[m2*delta(ind)]) 272 | end 273 | end 274 | return bmpsc 275 | end 276 | 277 | function set_interpartition_messages!(bmpsc::BoundaryMPSCache) 278 | partitionpairs = pair.(partitionedges(ppg(bmpsc))) 279 | return set_interpartition_messages!( 280 | bmpsc, 281 | vcat(partitionpairs, reverse.(partitionpairs)), 282 | ) 283 | end 284 | 285 | #Switch the message tensors on partition edges with their reverse (and dagger them) 286 | function switch_message!(bmpsc::BoundaryMPSCache, pe::PartitionEdge) 287 | ms = messages(bmpsc) 288 | me, mer = message(bmpsc, pe), message(bmpsc, reverse(pe)) 289 | set!(ms, pe, dag.(mer)) 290 | set!(ms, reverse(pe), dag.(me)) 291 | return bmpsc 292 | end 293 | 294 | function switch_messages!(bmpsc::BoundaryMPSCache, partitionpair::Pair) 295 | for pe in planargraph_sorted_partitionedges(bmpsc, partitionpair) 296 | switch_message!(bmpsc, pe) 297 | end 298 | return bmpsc 299 | end 300 | 301 | function partition_graph(bmpsc::BoundaryMPSCache, partition) 302 | vs = planargraph_vertices(bmpsc, partition) 303 | return subgraph(unpartitioned_graph(ppg(bmpsc)), vs) 304 | end 305 | 306 | function partition_update!(bmpsc::BoundaryMPSCache, seq::Vector{<:PartitionEdge}) 307 | message_update_function_kwargs = (; normalize = false) 308 | for pe in seq 309 | m = updated_message(bp_cache(bmpsc), pe; message_update_function_kwargs) 310 | set_message!(bmpsc, pe, m) 311 | end 312 | return bmpsc 313 | end 314 | 315 | #Out-of-place version 316 | function partition_update(bmpsc::BoundaryMPSCache, seq::Vector{<:PartitionEdge}) 317 | bmpsc = copy(bmpsc) 318 | return partition_update!(bmpsc, seq) 319 | end 320 | 321 | #Move the orthogonality centre one step on an interpartition from the message tensor on pe1 to that on pe2 322 | function gauge_step!( 323 | alg::Algorithm"orthogonal", 324 | bmpsc::BoundaryMPSCache, 325 | pe1::PartitionEdge, 326 | pe2::PartitionEdge; 327 | kwargs..., 328 | ) 329 | m1, m2 = only(message(bmpsc, pe1)), only(message(bmpsc, pe2)) 330 | @assert !isempty(commoninds(m1, m2)) 331 | left_inds = uniqueinds(m1, m2) 332 | m1, Y = factorize(m1, left_inds; ortho = "left", kwargs...) 333 | m2 = m2 * Y 334 | set_message!(bmpsc, pe1, ITensor[m1]) 335 | set_message!(bmpsc, pe2, ITensor[m2]) 336 | return bmpsc 337 | end 338 | 339 | #Move the orthogonality centre via a sequence of steps between message tensors 340 | function gauge_walk!( 341 | alg::Algorithm, 342 | bmpsc::BoundaryMPSCache, 343 | seq::Vector; 344 | kwargs..., 345 | ) 346 | for (pe1, pe2) in seq 347 | gauge_step!(alg::Algorithm, bmpsc, pe1, pe2; kwargs...) 348 | end 349 | return bmpsc 350 | end 351 | 352 | function inserter!( 353 | alg::Algorithm, 354 | bmpsc::BoundaryMPSCache, 355 | update_pe::PartitionEdge, 356 | m::ITensor; 357 | ) 358 | set_message!(bmpsc, reverse(update_pe), ITensor[dag(m)]) 359 | return bmpsc 360 | end 361 | 362 | #Default 1-site extracter 363 | function extracter( 364 | alg::Algorithm"orthogonal", 365 | bmpsc::BoundaryMPSCache, 366 | update_pe::PartitionEdge; 367 | ) 368 | m = only(updated_message( 369 | bmpsc, 370 | update_pe; 371 | message_update_function_kwargs = (; normalize = false))) 372 | return m 373 | end 374 | 375 | function ITensors.commonind(bmpsc::BoundaryMPSCache, pe1::PartitionEdge, pe2::PartitionEdge) 376 | m1, m2 = message(bmpsc, pe1), message(bmpsc, pe2) 377 | return commonind(only(m1), only(m2)) 378 | end 379 | 380 | function merge_internal_tensors(O::Union{MPS, MPO}) 381 | internal_inds = filter(i -> isempty(ITensorMPS.siteinds(O, i)), [i for i in 1:length(O)]) 382 | 383 | while !isempty(internal_inds) 384 | site = first(internal_inds) 385 | tensors = [O[i] for i in setdiff([i for i in 1:length(O)], [site])] 386 | if site != length(O) 387 | tensors[site] = tensors[site] * O[site] 388 | else 389 | tensors[site - 1] = tensors[site - 1] * O[site] 390 | end 391 | 392 | O = typeof(O)(tensors) 393 | 394 | internal_inds = filter(i -> isempty(ITensorMPS.siteinds(O, i)), [i for i in 1:length(O)]) 395 | end 396 | return O 397 | end 398 | 399 | function ITensorMPS.MPO(bmpsc::BoundaryMPSCache, partition) 400 | sorted_vs = sort(planargraph_vertices(bmpsc, partition)) 401 | ts = [copy(bmpsc[v]) for v in sorted_vs] 402 | O = ITensorMPS.MPO(ts) 403 | return O 404 | end 405 | 406 | function ITensorMPS.MPS(bmpsc::BoundaryMPSCache, partitionpair::Pair) 407 | sorted_pes = planargraph_sorted_partitionedges(bmpsc, partitionpair) 408 | ms = [only(message(bmpsc, pe)) for pe in sorted_pes] 409 | return ITensorMPS.MPS(ms) 410 | end 411 | 412 | function truncate!(bmpsc::BoundaryMPSCache, partitionpair::Pair; truncate_kwargs...) 413 | M = ITensorMPS.MPS(bmpsc, partitionpair) 414 | M = ITensorMPS.truncate(M; truncate_kwargs...) 415 | return set_interpartition_message!(bmpsc, M, partitionpair) 416 | end 417 | 418 | function set_interpartition_message!(bmpsc::BoundaryMPSCache, M::Union{MPS, MPO}, partitionpair::Pair) 419 | sorted_pes = planargraph_sorted_partitionedges(bmpsc, partitionpair) 420 | for i in 1:length(M) 421 | set_message!(bmpsc, sorted_pes[i], ITensor[M[i]]) 422 | end 423 | return bmpsc 424 | end 425 | 426 | function updater!(alg::Algorithm"orthogonal", bmpsc::BoundaryMPSCache, partition_graph, prev_pe, update_pe) 427 | prev_pe == nothing && return bmpsc 428 | 429 | gauge_step!(alg, bmpsc, reverse(prev_pe), reverse(update_pe)) 430 | pupdate_seq = a_star(partition_graph, parent(src(prev_pe)), parent(src(update_pe))) 431 | partition_update!(bmpsc, PartitionEdge.(pupdate_seq)) 432 | return bmpsc 433 | end 434 | 435 | #Update all the message tensors on an interpartition via an n-site fitting procedure 436 | function ITensorNetworks.update( 437 | alg::Algorithm"orthogonal", 438 | bmpsc::BoundaryMPSCache, 439 | partitionpair::Pair; 440 | niters::Int64, 441 | tolerance, 442 | normalize = true, 443 | ) 444 | bmpsc = copy(bmpsc) 445 | delete_partition_messages!(bmpsc, first(partitionpair)) 446 | switch_messages!(bmpsc, partitionpair) 447 | 448 | pes = planargraph_sorted_partitionedges(bmpsc, partitionpair) 449 | pg = partition_graph(bmpsc, first(partitionpair)) 450 | update_seq = vcat([pes[i] for i in 1:length(pes)], [pes[i] for i in (length(pes) - 1):-1:2]) 451 | 452 | init_gauge_seq = [(reverse(pes[i]), reverse(pes[i-1])) for i in length(pes):-1:2] 453 | init_update_seq = post_order_dfs_edges(pg, parent(src(first(update_seq)))) 454 | !isempty(init_gauge_seq) && gauge_walk!(alg, bmpsc, init_gauge_seq) 455 | !isempty(init_update_seq) && partition_update!(bmpsc, PartitionEdge.(init_update_seq)) 456 | 457 | prev_cf, prev_pe = 0, nothing 458 | for i = 1:niters 459 | cf = 0 460 | if i == niters 461 | update_seq = vcat(update_seq, pes[1]) 462 | end 463 | for update_pe in update_seq 464 | updater!(alg, bmpsc, pg, prev_pe, update_pe) 465 | m = extracter(alg, bmpsc, update_pe) 466 | n = sqrt((m * dag(m))[]) 467 | cf += n 468 | if normalize 469 | m /= n 470 | end 471 | inserter!(alg, bmpsc, update_pe, m) 472 | prev_pe = update_pe 473 | end 474 | epsilon = abs(cf - prev_cf) / length(update_seq) 475 | !isnothing(tolerance) && epsilon < tolerance && break 476 | prev_cf = cf 477 | end 478 | delete_partition_messages!(bmpsc, first(partitionpair)) 479 | switch_messages!(bmpsc, partitionpair) 480 | return bmpsc 481 | end 482 | 483 | function prev_partitionpair(bmpsc::BoundaryMPSCache, partitionpair::Pair) 484 | pppg = partitioned_graph(ppg(bmpsc)) 485 | vns = neighbors(pppg, first(partitionpair)) 486 | length(vns) == 1 && return nothing 487 | 488 | @assert length(vns) == 2 489 | v1, v2 = first(vns), last(vns) 490 | last(partitionpair) == v1 && return v2 => first(partitionpair) 491 | last(partitionpair) == v2 && return v1 => first(partitionpair) 492 | end 493 | 494 | function generic_apply(O::MPO, M::MPS; kwargs...) 495 | is_simple_mpo = (length(O) == length(M) && all([length(ITensors.siteinds(O, i)) == 2 for i in 1:length(O)])) 496 | is_simple_mpo && return ITensorMPS.apply(O, M; kwargs...) 497 | 498 | O_tensors = ITensor[] 499 | for i in 1:length(O) 500 | m_ind = filter(j -> !isempty(ITensors.commoninds(O[i], M[j])), [j for j in 1:length(M)]) 501 | if isempty(m_ind) 502 | push!(O_tensors, O[i]) 503 | else 504 | m_ind = only(m_ind) 505 | push!(O_tensors, O[i] * M[m_ind]) 506 | end 507 | end 508 | O = ITensorNetwork([i for i in 1:length(O_tensors)], O_tensors) 509 | O = ITensorNetworks.combine_linkinds(O) 510 | O = ITensorMPS.MPS([O[v] for v in vertices(O)]) 511 | O = merge_internal_tensors(O) 512 | return truncate(O; kwargs...) 513 | end 514 | 515 | #Update all the message tensors on an interpartition via the ITensorMPS apply function 516 | function ITensorNetworks.update( 517 | alg::Algorithm"ITensorMPS", 518 | bmpsc::BoundaryMPSCache, 519 | partitionpair::Pair; 520 | cutoff::Number = _default_boundarymps_update_cutoff, 521 | maxdim::Int = maximum_virtual_dimension(bmpsc), 522 | kwargs... 523 | ) 524 | bmpsc = copy(bmpsc) 525 | prev_pp = prev_partitionpair(bmpsc, partitionpair) 526 | O = ITensorMPS.MPO(bmpsc, first(partitionpair)) 527 | O = ITensorMPS.truncate(O; cutoff, maxdim) 528 | isnothing(prev_pp) && return set_interpartition_message!(bmpsc, merge_internal_tensors(O), partitionpair) 529 | 530 | M = ITensorMPS.MPS(bmpsc, prev_pp) 531 | M_out = generic_apply(O, M; cutoff, maxdim) 532 | return set_interpartition_message!(bmpsc, M_out, partitionpair) 533 | end 534 | 535 | #Environment support, assume all vertices live in the same partition for now 536 | function ITensorNetworks.environment(bmpsc::BoundaryMPSCache, verts::Vector; kwargs...) 537 | vs = parent.((partitionvertices(bp_cache(bmpsc), verts))) 538 | partition = only(planargraph_partitions(bmpsc, parent.(partitionvertices(bmpsc, verts)))) 539 | pg = partition_graph(bmpsc, partition) 540 | update_seq = post_order_dfs_edges(pg,first(vs)) 541 | bmpsc = partition_update(bmpsc, PartitionEdge.(update_seq)) 542 | return environment(bp_cache(bmpsc), verts; kwargs...) 543 | end 544 | 545 | function ITensorNetworks.region_scalar(bmpsc::BoundaryMPSCache, partition) 546 | pg = partition_graph(bmpsc, partition) 547 | v = first(center(pg)) 548 | update_seq = post_order_dfs_edges(pg,v) 549 | bmpsc = partition_update(bmpsc, PartitionEdge.(update_seq)) 550 | return region_scalar(bp_cache(bmpsc), PartitionVertex(v)) 551 | end 552 | 553 | function ITensorNetworks.region_scalar(bmpsc::BoundaryMPSCache, verts::Vector) 554 | partitions = planargraph_partitions(bmpsc, parent.(partitionvertices(bmpsc, verts))) 555 | if length(partitions) == 1 556 | return region_scalar(bmpsc, only(partitions)) 557 | end 558 | error("Contractions involving more than 1 partition not currently supported") 559 | end 560 | 561 | function ITensorNetworks.region_scalar(bmpsc::BoundaryMPSCache, partitionpair::Pair) 562 | pes = planargraph_sorted_partitionedges(bmpsc, partitionpair) 563 | out = ITensor(one(Bool)) 564 | for pe in pes 565 | out = (out * (only(message(bmpsc, pe)))) * only(message(bmpsc, reverse(pe))) 566 | end 567 | return out[] 568 | end 569 | 570 | function add_partitionedges(pg::PartitionedGraph, pes::Vector{<:PartitionEdge}) 571 | g = partitioned_graph(pg) 572 | g = add_edges(g, parent.(pes)) 573 | return PartitionedGraph( 574 | unpartitioned_graph(pg), 575 | g, 576 | partitioned_vertices(pg), 577 | which_partition(pg), 578 | ) 579 | end 580 | 581 | function add_partitionedges(bpc::BeliefPropagationCache, pes::Vector{<:PartitionEdge}) 582 | pg = add_partitionedges(partitioned_tensornetwork(bpc), pes) 583 | return BeliefPropagationCache(pg, messages(bpc)) 584 | end 585 | 586 | #Add partition edges necessary to connect up all vertices in a partition in the planar graph created by the sort function 587 | function insert_pseudo_planar_edges( 588 | bpc::BeliefPropagationCache; 589 | grouping_function = v -> first(v), 590 | ) 591 | pg = partitioned_graph(bpc) 592 | partitions = unique(grouping_function.(collect(vertices(pg)))) 593 | pseudo_edges = PartitionEdge[] 594 | for p in partitions 595 | vs = sort(filter(v -> grouping_function(v) == p, collect(vertices(pg)))) 596 | for i = 1:(length(vs)-1) 597 | if vs[i] ∉ neighbors(pg, vs[i+1]) 598 | push!(pseudo_edges, PartitionEdge(NamedEdge(vs[i] => vs[i+1]))) 599 | end 600 | end 601 | end 602 | return add_partitionedges(bpc, pseudo_edges) 603 | end 604 | 605 | pair(pe::PartitionEdge) = parent(src(pe)) => parent(dst(pe)) 606 | 607 | function delete_partition_messages!(bmpsc::BoundaryMPSCache, partition) 608 | pg = partition_graph(bmpsc, partition) 609 | pes = PartitionEdge.(edges(pg)) 610 | pes = vcat(pes, reverse.(pes)) 611 | return delete_messages!(bmpsc, pes) 612 | end 613 | 614 | function delete_partitionpair_messages!(bmpsc::BoundaryMPSCache, partitionpair::Pair) 615 | pes = planargraph_sorted_partitionedges(bmpsc, partitionpair) 616 | return delete_messages!(bmpsc, pes) 617 | end -------------------------------------------------------------------------------- /src/Backend/loopcorrection.jl: -------------------------------------------------------------------------------- 1 | """ 2 | ITensors.scalar(alg::Algorithm"loopcorrections", bp_cache::AbstractBeliefPropagationCache; max_configuration_size::Int64) 3 | 4 | Compute the contraction of the tensor network in the bp cache with loop corrections, up to configurations of a specific size 5 | """ 6 | function ITensors.scalar( 7 | alg::Algorithm"loopcorrections", 8 | bp_cache::AbstractBeliefPropagationCache; 9 | max_configuration_size::Int64, 10 | ) 11 | zbp = scalar(bp_cache; alg = "bp") 12 | bp_cache = rescale(bp_cache) 13 | #Count the cycles using NamedGraphs 14 | egs = 15 | edgeinduced_subgraphs_no_leaves(partitioned_graph(bp_cache), max_configuration_size) 16 | isempty(egs) && return zbp 17 | ws = weights(bp_cache, egs) 18 | return zbp*(1 + sum(ws)) 19 | end 20 | 21 | #Function for allowing ITensorNetwork scalar() and inner() to work with alg = "loopcorrections" 22 | function ITensors.scalar( 23 | alg::Algorithm"loopcorrections", 24 | tn::AbstractITensorNetwork; 25 | max_configuration_size::Int64, 26 | (cache!) = nothing, 27 | cache_construction_kwargs = default_cache_construction_kwargs(Algorithm("bp"), tn), 28 | update_cache = isnothing(cache!), 29 | cache_update_kwargs = default_nonposdef_bp_update_kwargs(; cache_is_tree = is_tree(tn)), 30 | ) 31 | if isnothing(cache!) 32 | cache! = Ref(cache(Algorithm("bp"), tn; cache_construction_kwargs...)) 33 | end 34 | 35 | if update_cache 36 | cache![] = update(cache![]; cache_update_kwargs...) 37 | end 38 | 39 | return scalar(cache![]; alg, max_configuration_size) 40 | end 41 | 42 | #Transform the indices in the given subgraph of the tensornetwork so that antiprojectors can be inserted without duplicate indices appearing 43 | function sim_edgeinduced_subgraph(bpc::BeliefPropagationCache, eg) 44 | bpc = copy(bpc) 45 | pvs = PartitionVertex.(collect(vertices(eg))) 46 | pes = 47 | unique(reduce(vcat, [boundary_partitionedges(bpc, [pv]; dir = :out) for pv in pvs])) 48 | updated_pes = PartitionEdge[] 49 | antiprojectors = ITensor[] 50 | for pe in pes 51 | if reverse(pe) ∉ updated_pes 52 | mer = only(message(bpc, reverse(pe))) 53 | linds = inds(mer) 54 | linds_sim = sim.(linds) 55 | mer = replaceinds(mer, linds, linds_sim) 56 | ms = messages(bpc) 57 | set!(ms, reverse(pe), ITensor[mer]) 58 | verts = vertices(bpc, src(pe)) 59 | for v in verts 60 | t = bpc[v] 61 | t_inds = filter(i -> i ∈ linds, inds(t)) 62 | if !isempty(t_inds) 63 | t_ind = only(t_inds) 64 | t_ind_pos = findfirst(x -> x == t_ind, linds) 65 | t = replaceind(t, t_ind, linds_sim[t_ind_pos]) 66 | setindex_preserve_graph!(bpc, t, v) 67 | end 68 | end 69 | push!(updated_pes, pe) 70 | 71 | if pe ∈ PartitionEdge.(edges(eg)) || reverse(pe) ∈ PartitionEdge.(edges(eg)) 72 | row_inds, col_inds = linds, linds_sim 73 | row_combiner, col_combiner = combiner(row_inds), combiner(col_inds) 74 | ap = 75 | denseblocks(delta(combinedind(col_combiner), dag(combinedind(row_combiner)))) 76 | ap = ap * row_combiner * dag(col_combiner) 77 | ap = ap - only(message(bpc, pe)) * mer 78 | push!(antiprojectors, ap) 79 | end 80 | end 81 | end 82 | return bpc, antiprojectors 83 | end 84 | 85 | #Get the all edges incident to the region specified by the vector of edges passed 86 | function ITensorNetworks.boundary_partitionedges( 87 | bpc::BeliefPropagationCache, 88 | pes::Vector{<:PartitionEdge}, 89 | ) 90 | pvs = unique(vcat(src.(pes), dst.(pes))) 91 | bpes = PartitionEdge[] 92 | for pv in pvs 93 | incoming_es = boundary_partitionedges(bpc, pv; dir = :in) 94 | incoming_es = filter(e -> e ∉ pes && reverse(e) ∉ pes, incoming_es) 95 | append!(bpes, incoming_es) 96 | end 97 | return bpes 98 | end 99 | 100 | #Compute the contraction of the bp configuration specified by the edge induced subgraph eg 101 | function weight(bpc::BeliefPropagationCache, eg) 102 | pvs = PartitionVertex.(collect(vertices(eg))) 103 | pes = PartitionEdge.(collect(edges(eg))) 104 | bpc, antiprojectors = sim_edgeinduced_subgraph(bpc, eg) 105 | incoming_ms = 106 | ITensor[only(message(bpc, pe)) for pe in boundary_partitionedges(bpc, pes)] 107 | local_tensors = factors(bpc, vertices(bpc, pvs)) 108 | ts = [incoming_ms; local_tensors; antiprojectors] 109 | seq = any(hasqns.(ts)) ? contraction_sequence(ts; alg = "optimal") : contraction_sequence(ts; alg = "einexpr", optimizer = Greedy()) 110 | return contract(ts; sequence = seq)[] 111 | end 112 | 113 | #Vectorized version of weight 114 | function weights(bpc, egs, args...) 115 | return [weight(bpc, eg, args...) for eg in egs] 116 | end 117 | -------------------------------------------------------------------------------- /src/TensorNetworkQuantumSimulator.jl: -------------------------------------------------------------------------------- 1 | module TensorNetworkQuantumSimulator 2 | 3 | 4 | include("imports.jl") 5 | include("Backend/beliefpropagation.jl") 6 | include("Backend/loopcorrection.jl") 7 | include("Backend/boundarymps.jl") 8 | 9 | # a helpful union types for the caches that we use 10 | const CacheNetwork = Union{AbstractBeliefPropagationCache,BoundaryMPSCache} 11 | const TensorNetwork = Union{AbstractITensorNetwork,CacheNetwork} 12 | 13 | 14 | include("graph_ops.jl") 15 | include("utils.jl") 16 | include("constructors.jl") 17 | include("gates.jl") 18 | include("apply.jl") 19 | include("expect.jl") 20 | include("sample.jl") 21 | 22 | 23 | export 24 | updatecache, 25 | build_bp_cache, 26 | vertices, 27 | edges, 28 | apply, 29 | truncate, 30 | expect, 31 | expect_boundarymps, 32 | expect_loopcorrect, 33 | fidelity, 34 | fidelity_boundarymps, 35 | fidelity_loopcorrect, 36 | make_hermitian, 37 | build_boundarymps_cache, 38 | truncate, 39 | maxlinkdim, 40 | siteinds, 41 | edge_color, 42 | zerostate, 43 | getnqubits, 44 | named_grid, 45 | sample 46 | 47 | end 48 | -------------------------------------------------------------------------------- /src/apply.jl: -------------------------------------------------------------------------------- 1 | const _default_apply_kwargs = 2 | (maxdim = Inf, cutoff = 1e-10, normalize = true) 3 | 4 | """ 5 | ITensors.apply(circuit::AbstractVector, ψ::ITensorNetwork; bp_update_kwargs = default_posdef_bp_update_kwargs() apply_kwargs = (; maxdim, cutoff)) 6 | 7 | Apply a circuit to a tensor network with the cache built and updated internally 8 | """ 9 | function ITensors.apply( 10 | circuit::AbstractVector, 11 | ψ::ITensorNetwork; 12 | bp_update_kwargs = default_posdef_bp_update_kwargs(; cache_is_tree = is_tree(ψ)), 13 | kwargs..., 14 | ) 15 | ψψ = build_bp_cache(ψ; cache_update_kwargs = bp_update_kwargs) 16 | ψ, ψψ, truncation_errors = apply(circuit, ψ, ψψ; kwargs...) 17 | return ψ, truncation_errors 18 | end 19 | 20 | #Convert a circuit in (gate_str, sites_to_act_on, params) form to ITensors and then apply it 21 | function ITensors.apply( 22 | circuit::AbstractVector, 23 | ψ::ITensorNetwork, 24 | ψψ::BeliefPropagationCache; 25 | kwargs..., 26 | ) 27 | circuit = toitensor(circuit, siteinds(ψ)) 28 | return apply(circuit, ψ, ψψ; kwargs...) 29 | end 30 | 31 | """ 32 | ITensors.apply(circuit::AbstractVector{<:ITensor}, ψ::ITensorNetwork, ψψ::BeliefPropagationCache; apply_kwargs = _default_apply_kwargs, bp_update_kwargs = default_posdef_bp_update_kwargs(), update_cache = true, verbose = false) 33 | 34 | Apply a sequence of itensors to the network with its corresponding cache. Apply kwargs should be a NamedTuple containing desired maxdim and cutoff. Update the cache every time an overlapping gate is encountered. 35 | """ 36 | function ITensors.apply( 37 | circuit::AbstractVector{<:ITensor}, 38 | ψ::ITensorNetwork, 39 | ψψ::BeliefPropagationCache; 40 | apply_kwargs = _default_apply_kwargs, 41 | bp_update_kwargs = default_posdef_bp_update_kwargs(; cache_is_tree = is_tree(ψ)), 42 | update_cache = true, 43 | verbose = false, 44 | ) 45 | 46 | ψ, ψψ = copy(ψ), copy(ψψ) 47 | # merge all the kwargs with the defaults 48 | apply_kwargs = merge(_default_apply_kwargs, apply_kwargs) 49 | 50 | # we keep track of the vertices that have been acted on by 2-qubit gates 51 | # only they increase the counter 52 | # this is the set that keeps track. 53 | affected_indices = Set{Index{Int64}}() 54 | truncation_errors = zeros((length(circuit))) 55 | 56 | # If the circuit is applied in the Heisenberg picture, the circuit needs to already be reversed 57 | for (ii, gate) in enumerate(circuit) 58 | 59 | # check if the gate is a 2-qubit gate and whether it affects the counter 60 | # we currently only increment the counter if the gate affects vertices that have already been affected 61 | cache_update_required = _cacheupdate_check(affected_indices, gate) 62 | 63 | # update the BP cache 64 | if update_cache && cache_update_required 65 | if verbose 66 | println("Updating BP cache") 67 | end 68 | 69 | t = @timed ψψ = updatecache(ψψ; bp_update_kwargs...) 70 | 71 | affected_indices = Set{Index{Int64}}() 72 | 73 | if verbose 74 | println("Done in $(t.time) secs") 75 | end 76 | 77 | end 78 | 79 | # actually apply the gate 80 | t = @timed ψ, ψψ, truncation_errors[ii] = apply!(gate, ψ, ψψ; apply_kwargs) 81 | affected_indices = union(affected_indices, Set(inds(gate))) 82 | 83 | if verbose 84 | println( 85 | "Gate $ii: Simulation time: $(t.time) secs, Max χ: $(maxlinkdim(ψ)), Error: $(truncation_errors[ii])", 86 | ) 87 | end 88 | 89 | end 90 | 91 | ψψ = updatecache(ψψ; bp_update_kwargs...) 92 | 93 | return ψ, ψψ, truncation_errors 94 | end 95 | 96 | #Apply function for a single gate 97 | function ITensors.apply( 98 | gate::Tuple, 99 | ψ::ITensorNetwork; 100 | apply_kwargs = _default_apply_kwargs, 101 | bp_update_kwargs = default_posdef_bp_update_kwargs(; cache_is_tree = is_tree(ψ)), 102 | ) 103 | ψ, ψψ, truncation_error = 104 | apply(gate, ψ, build_bp_cache(ψ; cache_update_kwargs = bp_update_kwargs); apply_kwargs) 105 | # because the cache is not passed, we return the state only 106 | return ψ, truncation_error 107 | end 108 | 109 | #Apply function for a single gate 110 | function ITensors.apply( 111 | gate::Tuple, 112 | ψ::ITensorNetwork, 113 | ψψ::BeliefPropagationCache; 114 | apply_kwargs = _default_apply_kwargs, 115 | bp_update_kwargs = default_posdef_bp_update_kwargs(; cache_is_tree = is_tree(ψ)) 116 | ) 117 | ψ, ψψ, truncation_error = apply( 118 | toitensor(gate, siteinds(ψ)), 119 | ψ, 120 | ψψ; 121 | apply_kwargs, 122 | ) 123 | ψψ = updatecache(ψψ; bp_update_kwargs...) 124 | return ψ, ψψ, truncation_error 125 | end 126 | 127 | function ITensors.apply(gate::ITensor, 128 | ψ::AbstractITensorNetwork, 129 | ψψ::BeliefPropagationCache; 130 | apply_kwargs = _default_apply_kwargs, 131 | ) 132 | ψ, ψψ = copy(ψ), copy(ψψ) 133 | return apply!(gate, ψ, ψψ, apply_kwargs) 134 | end 135 | 136 | #Apply function for a single gate. All apply functions will pass through here 137 | function apply!( 138 | gate::ITensor, 139 | ψ::AbstractITensorNetwork, 140 | ψψ::BeliefPropagationCache; 141 | apply_kwargs = _default_apply_kwargs, 142 | ) 143 | # TODO: document each line 144 | 145 | vs = neighbor_vertices(ψ, gate) 146 | envs = length(vs) == 1 ? nothing : incoming_messages(ψψ, PartitionVertex.(vs)) 147 | 148 | err = 0.0 149 | s_values = ITensor(1.0) 150 | function callback(; singular_values, truncation_error) 151 | err = truncation_error 152 | s_values = singular_values 153 | return nothing 154 | end 155 | 156 | # this is the only call to a lower-level apply that we currently do. 157 | ψ = ITensorNetworks.apply(gate, ψ; envs, callback, apply_kwargs...) 158 | 159 | if length(vs) == 2 160 | v1, v2 = vs 161 | setindex_preserve_graph!(ψ, noprime(ψ[v1]), v1) 162 | setindex_preserve_graph!(ψ, noprime(ψ[v2]), v2) 163 | pe = partitionedge(ψψ, (v1, "bra") => (v2, "bra")) 164 | ind2 = commonind(s_values, ψ[v1]) 165 | δuv = dag(copy(s_values)) 166 | δuv = replaceind(δuv, ind2, ind2') 167 | map_diag!(sign, δuv, δuv) 168 | s_values = denseblocks(s_values) * denseblocks(δuv) 169 | set_message!(ψψ, pe, dag.(ITensor[s_values])) 170 | set_message!(ψψ, reverse(pe), ITensor[s_values]) 171 | end 172 | for v in vs 173 | setindex_preserve_graph!(ψψ, ψ[v], (v, "ket")) 174 | setindex_preserve_graph!(ψψ, prime(dag(ψ[v])), (v, "bra")) 175 | end 176 | return ψ, ψψ, err 177 | end 178 | 179 | #Checker for whether the cache needs updating (overlapping gate encountered) 180 | function _cacheupdate_check(affected_indices::Set, gate::ITensor) 181 | indices = inds(gate) 182 | 183 | # check if we have a two-site gate and any of the qinds are in the affected_indices. If so update cache 184 | length(indices) == 4 && any(ind in affected_indices for ind in indices) && return true 185 | return false 186 | end 187 | -------------------------------------------------------------------------------- /src/constructors.jl: -------------------------------------------------------------------------------- 1 | const stringtointmap = Dict("I" => 1, "X" => 2, "Y" => 3, "Z" => 4) 2 | 3 | """ 4 | zerostate(g::NamedGraph; pauli_basis = false) 5 | 6 | Tensor network for vacuum state on given graph, i.e all spins up 7 | """ 8 | function zerostate(g::NamedGraph; pauli_basis = false) 9 | if !pauli_basis 10 | # the most common case 11 | return zerostate(siteinds("Qubit", g)) 12 | else 13 | return zerostate(siteinds(4, g)) 14 | end 15 | end 16 | 17 | """ 18 | zerostate(indices::IndsNetwork) 19 | 20 | Tensor network for vacuum state on given indsnetwork 21 | """ 22 | function zerostate(indices::IndsNetwork) 23 | inds = reduce(vcat, [indices[v] for v in vertices(indices)]) 24 | dims = dim.(inds) 25 | if all(d -> d== 2, dims) 26 | return ITensorNetwork(v -> [1.0, 0.0], indices) 27 | elseif all(d -> d== 4, dims) 28 | return ITensorNetwork(v -> [1.0, 0.0, 0.0, 1.0], indices) 29 | else 30 | throw(ArgumentError("Only physical dimensions 2 and 4 are supported.")) 31 | end 32 | end 33 | 34 | """ 35 | topaulitensornetwork(op, g::NamedGraph) 36 | 37 | Tensor network (in Heisenberg picture) for given pauli string on given graph 38 | """ 39 | function topaulitensornetwork(op, g::NamedGraph) 40 | return topaulitensornetwork(op, siteinds(4, g)) 41 | end 42 | 43 | """ 44 | topaulitensornetwork(op, tninds::IndsNetwork) 45 | 46 | Tensor network (in Heisenberg picture) for given pauli string on given IndsNetwork 47 | """ 48 | function topaulitensornetwork(op, tninds::IndsNetwork) 49 | nq = getnqubits(tninds) 50 | 51 | op_string = op[1] # could be "XX", "Y", "Z" 52 | op_inds = op[2] # could be [1, 2], [1], [2] 53 | if length(op) == 2 54 | op_coeff = 1.0 55 | elseif length(op) == 3 56 | op_coeff = op[3] 57 | else 58 | throw(ArgumentError("Wrong Operator format")) 59 | end 60 | 61 | # verify that the operator is acting on the correct number of qubits 62 | @assert length(op_inds) == length(op_string) "Pauli string $(op_string) does not match the number of indices $(op_inds)." 63 | 64 | # verify that all op_inds are actually in tninds 65 | all_inds = vertices(tninds) 66 | for ind in op_inds 67 | if !(ind in all_inds) 68 | throw( 69 | ArgumentError( 70 | "Index $ind of the operator is not in the IndsNetwork $tninds.", 71 | ), 72 | ) 73 | end 74 | end 75 | 76 | function map_f(ind) 77 | pos = findfirst(i -> i == ind, op_inds) 78 | if isnothing(pos) 79 | #the identity case 80 | return [1.0, 0.0, 0.0, 0.0] 81 | end 82 | 83 | # only give the first element the op coefficient 84 | if pos == 1 85 | coeff = op_coeff 86 | else 87 | coeff = 1.0 88 | end 89 | 90 | # pos should now be "I", "X", "Y", or "Z" 91 | pauli = string(op_string[pos]) 92 | one_pos = stringtointmap[pauli] 93 | vec = zeros(4) 94 | coeff 95 | vec[one_pos] = coeff 96 | return vec 97 | end 98 | 99 | 100 | return ITensorNetwork(map_f, tninds) 101 | end 102 | 103 | """ 104 | identitytensornetwork(tninds::IndsNetwork) 105 | 106 | Tensor network (in Heisenberg picture) for identity matrix on given IndsNetwork 107 | """ 108 | function identitytensornetwork(tninds::IndsNetwork) 109 | return topaulitensornetwork(("I", [first(vertices(tninds))]), tninds) 110 | end -------------------------------------------------------------------------------- /src/expect.jl: -------------------------------------------------------------------------------- 1 | default_expect_alg() = "bp" 2 | 3 | """ 4 | ITensorNetworks.expect(alg::Algorithm"exact", ψ::AbstractITensorNetwork, observables::Vector{<:Tuple}, contraction_sequence_kwargs = (; alg = "einexpr", optimizer = Greedy())) 5 | 6 | Function for computing expectation values for any vector of pauli strings via exact contraction. 7 | This will be infeasible for larger networks with high bond dimension. 8 | """ 9 | function ITensorNetworks.expect( 10 | alg::Algorithm"exact", 11 | ψ::AbstractITensorNetwork, 12 | observables::Vector{<:Tuple}; 13 | contraction_sequence_kwargs=(; alg="einexpr", optimizer=Greedy()), 14 | ) 15 | 16 | s = siteinds(ψ) 17 | ψIψ = QuadraticFormNetwork(ψ) 18 | 19 | out = [] 20 | for obs in observables 21 | op_strings, vs, coeff = collectobservable(obs) 22 | if iszero(coeff) 23 | push!(out, 0.0) 24 | continue 25 | end 26 | ψOψ = copy(ψIψ) 27 | for (op_string, v) in zip(op_strings, vs) 28 | ψOψ[(v, "operator")] = ITensors.op(op_string, s[v]) 29 | end 30 | 31 | numer_seq = contraction_sequence(ψOψ; contraction_sequence_kwargs...) 32 | denom_seq = contraction_sequence(ψIψ; contraction_sequence_kwargs...) 33 | numer, denom = 34 | contract(ψOψ; sequence=numer_seq)[], contract(ψIψ; sequence=denom_seq)[] 35 | push!(out, numer / denom) 36 | end 37 | return out 38 | end 39 | 40 | function ITensorNetworks.expect(alg::Algorithm"exact", 41 | ψ::AbstractITensorNetwork, 42 | observable::Tuple; 43 | kwargs... 44 | ) 45 | return only(expect(alg, ψ, [observable]; kwargs...)) 46 | end 47 | 48 | 49 | """ 50 | ITensorNetworks.expect(alg::Algorithm, ψ::AbstractITensorNetwork, observables::Vector{<:Tuple}; (cache!) = nothing, 51 | update_cache = isnothing(cache!), cache_update_kwargs = alg == Algorithm("bp") ? default_posdef_bp_update_kwargs() : ITensorNetworks.default_cache_update_kwargs(alg), 52 | cache_construction_kwargs = default_cache_construction_kwargs(alg, QuadraticFormNetwork(ψ), ), kwargs...) 53 | 54 | Function for computing expectation values for any vector of pauli strings via different cached based algorithms. 55 | Support: alg = "bp" and alg = "boundarymps". The latter takes cache_construction_kwargs = (; message_rank::Int) as a constructor. 56 | """ 57 | function ITensorNetworks.expect( 58 | alg::Algorithm, 59 | ψ::AbstractITensorNetwork, 60 | observables::Vector{<:Tuple}; 61 | (cache!)=nothing, 62 | update_cache=isnothing(cache!), 63 | cache_update_kwargs=alg == Algorithm("bp") ? default_posdef_bp_update_kwargs(; cache_is_tree = is_tree(ψ)) : ITensorNetworks.default_cache_update_kwargs(alg), 64 | cache_construction_kwargs= (;), 65 | message_rank = nothing, 66 | kwargs..., 67 | ) 68 | 69 | if alg == Algorithm("boundarymps") && !isnothing(message_rank) 70 | cache_construction_kwargs = merge(cache_construction_kwargs, (; message_rank)) 71 | end 72 | if isnothing(cache!) 73 | ψIψ = QuadraticFormNetwork(ψ) 74 | cache! = Ref(cache(alg, ψIψ; cache_construction_kwargs...)) 75 | end 76 | 77 | if update_cache 78 | cache![] = update(cache![]; cache_update_kwargs...) 79 | end 80 | 81 | return expect(cache![], observables; alg, kwargs...) 82 | end 83 | 84 | # Here we turn a single tuple observable into a vector of tuples -- the expected format in ITensorNetworks 85 | function ITensorNetworks.expect( 86 | alg::Algorithm, 87 | ψ::AbstractITensorNetwork, 88 | observable::Tuple; 89 | kwargs..., 90 | ) 91 | return only(expect(alg, ψ, [observable]; kwargs...)) 92 | end 93 | 94 | 95 | """ 96 | expect(ψ::AbstractITensorNetwork, obs; alg="bp", kwargs...) 97 | 98 | Calculate the expectation value of an `ITensorNetwork` `ψ` with an observable or vector of observables `obs` using the desired algorithm `alg`. 99 | Currently supported: alg = "bp", "boundarymps" or "exact". 100 | "bp" will be imprecise for networks with strong loop correlations, but is otherwise fast. 101 | "boundarymps" is more precise and slower, and can only be used if the network is planar with coordinate vertex labels like (1, 1), (1, 2), etc. 102 | "exact" will be infeasible for larger networks with high bond dimension. 103 | """ 104 | function ITensorNetworks.expect( 105 | ψ::AbstractITensorNetwork, 106 | obs::Union{Tuple, Vector{<:Tuple}}; 107 | alg=default_expect_alg(), 108 | kwargs..., 109 | ) 110 | return expect(Algorithm(alg), ψ, obs; kwargs...) 111 | end 112 | 113 | """ 114 | expect(ψIψ::AbstractBeliefPropagationCache, obs::Tuple; kwargs...) 115 | 116 | Foundational expectation function for a given (norm) cache network with an observable. 117 | This can be a `BeliefPropagationCache` or a `BoundaryMPSCache`. 118 | Valid observables are tuples of the form `(op, qinds)` or `(op, qinds, coeff)`, 119 | where `op` is a string or vector of strings, `qinds` is a vector of indices, and `coeff` is a coefficient (default 1.0). 120 | The `kwargs` are not used. 121 | """ 122 | function ITensorNetworks.expect( 123 | ψIψ::AbstractBeliefPropagationCache, 124 | obs::Tuple; 125 | kwargs... 126 | ) 127 | 128 | op_strings, vs, coeff = collectobservable(obs) 129 | iszero(coeff) && return 0.0 130 | 131 | ψOψ = insert_observable(ψIψ, obs) 132 | 133 | numerator = region_scalar(ψOψ, [(v, "ket") for v in vs]) 134 | denominator = region_scalar(ψIψ, [(v, "ket") for v in vs]) 135 | 136 | return coeff * numerator / denominator 137 | end 138 | 139 | function ITensorNetworks.expect( 140 | ψIψ::AbstractBeliefPropagationCache, 141 | observables::Vector{<:Tuple}; 142 | kwargs..., 143 | ) 144 | return map(obs -> expect(ψIψ, obs; kwargs...), observables) 145 | end 146 | 147 | """ 148 | insert_observable(ψIψ::AbstractBeliefPropagationCache, obs) 149 | 150 | Insert an obervable O into ψIψ to create the cache containing ψOψ. 151 | Drops the coefficient of the observable in the third slot of the obs tuple. 152 | Example: obs = ("X", [1, 2]) or obs = ("XX", [1, 2], 0.5) -> ("XX", [1, 2]) 153 | """ 154 | function insert_observable(ψIψ::AbstractBeliefPropagationCache, obs) 155 | op_strings, verts, _ = collectobservable(obs) 156 | 157 | ψIψ_vs = [ψIψ[(v, "operator")] for v in verts] 158 | sinds = 159 | [commonind(ψIψ[(v, "ket")], ψIψ_vs[i]) for (i, v) in enumerate(verts)] 160 | operators = [ITensors.op(op_strings[i], sinds[i]) for i in eachindex(op_strings)] 161 | 162 | ψOψ = update_factors(ψIψ, Dictionary([(v, "operator") for v in verts], operators)) 163 | return ψOψ 164 | end 165 | 166 | #Process an observable into more readable form 167 | function collectobservable(obs::Tuple) 168 | # unpack 169 | op = obs[1] 170 | qinds = obs[2] 171 | coeff = length(obs) == 2 ? 1.0 : last(obs) 172 | 173 | @assert !(op == "" && isempty(qinds)) 174 | 175 | op_vec = [string(o) for o in op] 176 | qinds_vec = _tovec(qinds) 177 | return op_vec, qinds_vec, coeff 178 | end 179 | 180 | _tovec(qinds) = vec(collect(qinds)) 181 | _tovec(qinds::NamedEdge) = [qinds.src, qinds.dst] 182 | -------------------------------------------------------------------------------- /src/gates.jl: -------------------------------------------------------------------------------- 1 | # conversion of a tuple circuit to an ITensor circuit 2 | function toitensor(circuit, sinds::IndsNetwork) 3 | return [toitensor(gate, sinds) for gate in circuit] 4 | end 5 | 6 | #Determine if a string represents a pauli string 7 | function _ispaulistring(string::String) 8 | return all(s ∈ ['X', 'Y', 'X', 'x', 'y', 'z'] for s in string) 9 | end 10 | 11 | #Gates which take a single theta argument (rotation argument) 12 | function _takes_theta_argument(string::String) 13 | return string ∈ ["Rx", "Ry", "Rz", "CRx", "CRy", "CRz", "Rxxyy", "Rxxyyzz"] 14 | end 15 | 16 | #Gates which take a single phi argument (rotation argument) 17 | function _takes_phi_argument(string::String) 18 | return string ∈ ["Rxx", "Ryy", "Rzz", "P", "CPHASE"] 19 | end 20 | 21 | #Convert a gate to the corrresponding ITensor 22 | function toitensor(gate::Tuple, sinds::IndsNetwork) 23 | 24 | gate_symbol = gate[1] 25 | gate_inds = gate[2] 26 | # if it is a NamedEdge, we need to convert it to a tuple 27 | gate_inds = _ensuretuple(gate_inds) 28 | s_inds = [only(sinds[v]) for v in gate_inds] 29 | 30 | all(map(sind -> dim(sind) == 4, s_inds)) && 31 | return toitensor_heisenberg(gate_symbol, gate[3], s_inds) 32 | 33 | if _ispaulistring(gate_symbol) 34 | gate = 35 | prod(ITensors.op(string(op), sind) for (op, sind) in zip(gate_symbol, s_inds)) 36 | elseif length(gate) == 2 37 | gate = ITensors.op(gate_symbol, s_inds...) 38 | elseif _takes_theta_argument(gate_symbol) 39 | gate = ITensors.op(gate_symbol, s_inds...; θ = gate[3]) 40 | elseif _takes_phi_argument(gate_symbol) 41 | gate = ITensors.op(gate_symbol, s_inds...; ϕ = 0.5 * gate[3]) 42 | else 43 | throw(ArgumentError("Wrong gate format")) 44 | end 45 | 46 | return gate 47 | 48 | end 49 | 50 | 51 | """ 52 | paulirotationmatrix(generator, θ) 53 | """ 54 | function paulirotationmatrix(generator, θ) 55 | symbols = [Symbol(s) for s in generator] 56 | pauli_rot = PP.PauliRotation(symbols, 1:length(symbols)) 57 | return PP.tomatrix(pauli_rot, θ) 58 | end 59 | 60 | #Convert a gate that's in the Heisenberg picture to an ITensor for the Pauli Transfer Matrix 61 | function toitensor_heisenberg(generator, θ, indices) 62 | @assert first(generator) == 'R' 63 | generator = generator[2:length(generator)] 64 | @assert _ispaulistring(generator) 65 | generator = uppercase.(generator) 66 | U = paulirotationmatrix(generator, θ) 67 | U = PP.calculateptm(U, heisenberg = true) 68 | 69 | # check for physical dimension matching 70 | # TODO 71 | 72 | # define legs of the tensor 73 | legs = (indices..., [ind' for ind in indices]...) 74 | 75 | # create the ITensor 76 | return itensor(transpose(U), legs) 77 | end 78 | 79 | #Return itself as the type is already correct 80 | function toitensor(gate::ITensor, sinds::IndsNetwork) 81 | return gate 82 | end 83 | 84 | #Conversion of the gate indices to a tuple 85 | function _ensuretuple(gate_inds::Union{Tuple,AbstractArray}) 86 | return gate_inds 87 | end 88 | 89 | #Conversion of a NamedEdge to a tuple 90 | function _ensuretuple(gate_inds::NamedEdge) 91 | return (gate_inds.src, gate_inds.dst) 92 | end 93 | 94 | """ 95 | ITensors.op(::OpName"Rxxyy", ::SiteType"S=1/2"; θ::Float64) 96 | 97 | Gate for rotation by XXYY at a given angle 98 | """ 99 | function ITensors.op( 100 | ::OpName"Rxxyy", ::SiteType"S=1/2"; θ::Float64 101 | ) 102 | mat = zeros(ComplexF64, 4, 4) 103 | mat[1, 1] = 1 104 | mat[4, 4] = 1 105 | mat[2, 2] = cos(θ) 106 | mat[2, 3] = -1.0 * im * sin(θ) 107 | mat[3, 2] = -1.0 * im * sin(θ) 108 | mat[3, 3] = cos(θ) 109 | return mat 110 | end 111 | 112 | """ 113 | ITensors.op(::OpName"Rxxyyzz", ::SiteType"S=1/2"; θ::Float64) 114 | 115 | Gate for rotation by XXYYZZ at a given angle 116 | """ 117 | function ITensors.op( 118 | ::OpName"Rxxyyzz", ::SiteType"S=1/2"; θ::Float64 119 | ) 120 | a = exp( im * θ * 0.5) 121 | mat = zeros(ComplexF64, 4, 4) 122 | mat[1, 1] = conj(a) 123 | mat[2, 2] = cos(θ) * a 124 | mat[2, 3] = -1.0 * im * a * sin(θ) 125 | mat[3, 2] = -1.0 * im * a * sin(θ) 126 | mat[3, 3] = cos(θ) * a 127 | mat[4,4] = conj(a) 128 | return mat 129 | end 130 | -------------------------------------------------------------------------------- /src/graph_ops.jl: -------------------------------------------------------------------------------- 1 | """ 2 | heavy_hexagonal_lattice(nx::Int64, ny::Int64) 3 | 4 | Create heavy-hexagonal lattice geometry with nx columns of heavy-hexes and ny rows 5 | """ 6 | function heavy_hexagonal_lattice(nx::Int64, ny::Int64) 7 | g = named_hexagonal_lattice_graph(nx, ny) 8 | # create some space for inserting the new vertices 9 | g = rename_vertices(v -> (2 * first(v) - 1, 2 * last(v) - 1), g) 10 | for e in edges(g) 11 | vsrc, vdst = src(e), dst(e) 12 | v_new = ((first(vsrc) + first(vdst)) / 2, (last(vsrc) + last(vdst)) / 2) 13 | g = add_vertex(g, v_new) 14 | g = rem_edge(g, e) 15 | g = add_edges(g, [NamedEdge(vsrc => v_new), NamedEdge(v_new => vdst)]) 16 | end 17 | return g 18 | end 19 | 20 | """ 21 | lieb_lattice(nx::Int64, ny::Int64; periodic = false) 22 | 23 | Create Lieb lattice geometry with nx columns of decorated squared and ny rows 24 | """ 25 | function lieb_lattice(nx::Int64, ny::Int64; periodic = false) 26 | @assert (!periodic && isodd(nx) && isodd(ny)) || (periodic && iseven(nx) && iseven(ny)) 27 | g = named_grid((nx, ny); periodic) 28 | for v in vertices(g) 29 | if iseven(first(v)) && iseven(last(v)) 30 | g = rem_vertex(g, v) 31 | end 32 | if iseven(first(v)) && iseven(last(v)) 33 | g = rem_vertex(g, v) 34 | end 35 | end 36 | return g 37 | 38 | end 39 | 40 | function topologytograph(topology) 41 | # TODO: adapt this to named graphs with non-integer labels 42 | # find number of vertices 43 | nq = maximum(maximum.(topology)) 44 | adjm = zeros(Int, nq, nq) 45 | for (ii, jj) in topology 46 | adjm[ii, jj] = adjm[jj, ii] = 1 47 | end 48 | return NamedGraph(SimpleGraph(adjm)) 49 | end 50 | 51 | 52 | function graphtotopology(g) 53 | return [[edge.src, edge.dst] for edge in edges(g)] 54 | end -------------------------------------------------------------------------------- /src/imports.jl: -------------------------------------------------------------------------------- 1 | using LinearAlgebra 2 | using StatsBase 3 | 4 | using Dictionaries: Dictionary, set! 5 | 6 | using Graphs: simplecycles_limited_length, has_edge, SimpleGraph, center, steiner_tree, is_tree 7 | 8 | using SimpleGraphConverter 9 | using SimpleGraphAlgorithms: edge_color 10 | 11 | using NamedGraphs 12 | using NamedGraphs: 13 | AbstractNamedGraph, 14 | AbstractGraph, 15 | position_graph, 16 | rename_vertices, 17 | edges, 18 | vertextype, 19 | add_vertex!, 20 | neighbors, 21 | edgeinduced_subgraphs_no_leaves, 22 | unique_cyclesubgraphs_limited_length 23 | using NamedGraphs.GraphsExtensions: 24 | src, 25 | dst, 26 | subgraph, 27 | is_connected, 28 | degree, 29 | add_edge, 30 | a_star, 31 | add_edge!, 32 | edgetype, 33 | leaf_vertices, 34 | post_order_dfs_edges, 35 | decorate_graph_edges, 36 | add_vertex!, 37 | add_vertex, 38 | rem_edge, 39 | rem_vertex, 40 | add_edges, 41 | rem_vertices 42 | 43 | using NamedGraphs.PartitionedGraphs: 44 | PartitionedGraphs, 45 | partitioned_vertices, 46 | partitionedges, 47 | unpartitioned_graph, 48 | which_partition 49 | 50 | using NamedGraphs.NamedGraphGenerators: named_grid, named_hexagonal_lattice_graph 51 | 52 | using TensorOperations 53 | 54 | using ITensors 55 | using ITensors: Index, ITensor, inner, itensor, apply, map_diag!, @Algorithm_str, scalar, @OpName_str, @SiteType_str 56 | using ITensorMPS 57 | 58 | using ITensorNetworks 59 | using ITensorNetworks: 60 | AbstractBeliefPropagationCache, 61 | AbstractFormNetwork, 62 | AbstractITensorNetwork, 63 | AbstractIndsNetwork, 64 | Indices, 65 | BeliefPropagationCache, 66 | QuadraticFormNetwork, 67 | PartitionedGraph, 68 | IndsNetwork, 69 | ITensorNetwork, 70 | inner_network, 71 | PartitionVertex, 72 | PartitionEdge, 73 | Algorithm, 74 | VidalITensorNetwork, 75 | expect, 76 | default_cache_construction_kwargs, 77 | cache, 78 | norm_sqr_network, 79 | update, 80 | updated_message, 81 | set_message, 82 | set_message!, 83 | set_messages!, 84 | siteinds, 85 | vertices, 86 | dim, 87 | apply, 88 | neighbor_vertices, 89 | environment, 90 | incoming_messages, 91 | partitionedge, 92 | messages, 93 | update_factor, 94 | logscalar, 95 | partitioned_tensornetwork, 96 | tensornetwork, 97 | operator_vertex, 98 | ket_vertex, 99 | update_factors, 100 | scalar_factors_quotient, 101 | partitionedges, 102 | region_scalar, 103 | rescale, 104 | partitionvertices, 105 | partitioned_graph, 106 | powerset, 107 | boundary_partitionedges, 108 | message, 109 | factors, 110 | contraction_sequence, 111 | group, 112 | partitionedges, 113 | linkinds, 114 | generic_state, 115 | setindex_preserve_graph!, 116 | edge_tag, 117 | default_edge_sequence, 118 | default_bp_maxiter, 119 | default_message_update, 120 | tree_orthogonalize, 121 | gauge_walk, 122 | maxlinkdim, 123 | default_cache_construction_kwargs 124 | 125 | 126 | 127 | 128 | using ITensorNetworks.ITensorsExtensions: map_eigvals 129 | 130 | using EinExprs: Greedy 131 | 132 | import PauliPropagation 133 | const PP = PauliPropagation 134 | -------------------------------------------------------------------------------- /src/sample.jl: -------------------------------------------------------------------------------- 1 | using StatsBase 2 | 3 | #Take nsamples bitstrings from a 2D open boundary tensornetwork using boundary MPS with relevant ranks 4 | #Computes logq (logarithm of the probability of generating that sample conditioned on the specified message ranks) 5 | #And an approximation to p/q which is good if the left message rank is high so the left environments converge 6 | function _sample( 7 | ψ::ITensorNetwork, 8 | nsamples::Int64; 9 | projected_message_rank::Int64, 10 | norm_message_rank::Int64, 11 | norm_message_update_kwargs=(; niters = _default_boundarymps_update_niters, tolerance = _default_boundarymps_update_tolerance), 12 | projected_message_update_kwargs = (;cutoff = _default_boundarymps_update_cutoff, maxdim = projected_message_rank), 13 | kwargs..., 14 | ) 15 | ψ, ψψ = symmetric_gauge(ψ) 16 | ψ, ψψ = normalize(ψ, ψψ) 17 | 18 | norm_MPScache = BoundaryMPSCache(ψψ; message_rank=norm_message_rank) 19 | sorted_partitions = sort(ITensorNetworks.partitions(norm_MPScache)) 20 | seq = [ 21 | sorted_partitions[i] => sorted_partitions[i-1] for 22 | i = length(sorted_partitions):-1:2 23 | ] 24 | norm_message_update_kwargs = (; norm_message_update_kwargs..., normalize=false) 25 | norm_MPScache = update(Algorithm("orthogonal"), norm_MPScache, seq; norm_message_update_kwargs...) 26 | 27 | projected_MPScache = BoundaryMPSCache(ψ; message_rank=projected_message_rank) 28 | 29 | #Generate the bit_strings moving left to right through the network 30 | probs_and_bitstrings = NamedTuple[] 31 | for j = 1:nsamples 32 | p_over_q_approx, logq, bitstring = _get_one_sample( 33 | norm_MPScache, projected_MPScache, sorted_partitions; projected_message_update_kwargs, kwargs...) 34 | push!(probs_and_bitstrings, (poverq=p_over_q_approx, logq=logq, bitstring=bitstring)) 35 | end 36 | 37 | return probs_and_bitstrings, ψ 38 | end 39 | 40 | #Compute bitstrings conditioned on whatever kwargs used 41 | function sample(ψ::ITensorNetwork, nsamples::Int64; kwargs...) 42 | probs_and_bitstrings, _ = _sample(ψ::ITensorNetwork, nsamples::Int64; kwargs...) 43 | # returns just the bitstrings 44 | return getindex.(probs_and_bitstrings, :bitstring) 45 | end 46 | 47 | #Compute bitstrings and corresponding p/qs : a sufficiently large left message rank should be used 48 | function sample_directly_certified(ψ::ITensorNetwork, nsamples::Int64; projected_message_rank=5 * maxlinkdim(ψ), kwargs...) 49 | probs_and_bitstrings, _ = _sample(ψ::ITensorNetwork, nsamples::Int64; projected_message_rank, kwargs...) 50 | # returns the self-certified p/q, logq and bitstrings 51 | return probs_and_bitstrings 52 | end 53 | 54 | #Compute bitstrings and independently computed p/qs : a sufficiently large certification message rank should be used 55 | function sample_certified(ψ::ITensorNetwork, nsamples::Int64; certification_message_rank=5 * maxlinkdim(ψ), certification_message_update_kwargs = (; maxdim = certification_message_rank, cutoff = _default_boundarymps_update_cutoff), kwargs...) 56 | probs_and_bitstrings, ψ = _sample(ψ::ITensorNetwork, nsamples::Int64; kwargs...) 57 | # send the bitstrings and the logq to the certification function 58 | return certify_samples(ψ, probs_and_bitstrings; certification_message_rank, certification_message_update_kwargs, symmetrize_and_normalize=false) 59 | end 60 | 61 | function _get_one_sample( 62 | norm_MPScache::BoundaryMPSCache, 63 | projected_MPScache::BoundaryMPSCache, 64 | sorted_partitions; 65 | projected_message_update_kwargs= (; cutoff = _default_boundarymps_update_cutoff, maxdim = maximum_virtual_dimension(projected_MPScache)), 66 | kwargs..., 67 | ) 68 | 69 | projected_message_update_kwargs = (; projected_message_update_kwargs..., normalize=false) 70 | 71 | norm_MPScache = copy(norm_MPScache) 72 | 73 | bit_string = Dictionary{keytype(vertices(projected_MPScache)),Int}() 74 | p_over_q_approx = nothing 75 | logq = 0 76 | for (i, partition) in enumerate(sorted_partitions) 77 | 78 | p_over_q_approx, _logq, bit_string, = 79 | sample_partition!(norm_MPScache, partition, bit_string; kwargs...) 80 | vs = planargraph_vertices(norm_MPScache, partition) 81 | logq += _logq 82 | 83 | projected_MPScache = update_factors( 84 | projected_MPScache, 85 | Dict(zip(vs, [copy(norm_MPScache[(v, "ket")]) for v in vs])), 86 | ) 87 | 88 | 89 | if i < length(sorted_partitions) 90 | next_partition = sorted_partitions[i+1] 91 | 92 | #Alternate fitting procedure here which is faster for small bond dimensions but slower for large 93 | projected_MPScache = update(Algorithm("ITensorMPS"), 94 | projected_MPScache, 95 | partition => next_partition; 96 | projected_message_update_kwargs...) 97 | 98 | pes = planargraph_sorted_partitionedges(norm_MPScache, partition => next_partition) 99 | 100 | for pe in pes 101 | m = only(message(projected_MPScache, pe)) 102 | set_message!(norm_MPScache, pe, [m, dag(prime(m))]) 103 | end 104 | end 105 | 106 | i > 1 && delete_partitionpair_messages!(projected_MPScache, sorted_partitions[i-1] => sorted_partitions[i]) 107 | i > 2 && delete_partitionpair_messages!(norm_MPScache, sorted_partitions[i-2] => sorted_partitions[i-1]) 108 | end 109 | 110 | return p_over_q_approx, logq, bit_string 111 | end 112 | 113 | function certify_sample( 114 | ψ::ITensorNetwork, bitstring, logq::Number; 115 | certification_message_rank::Int64, 116 | certification_message_update_kwargs = (; maxdim = certification_message_rank, cutoff = _default_boundarymps_update_cutoff), 117 | symmetrize_and_normalize=true, 118 | ) 119 | if symmetrize_and_normalize 120 | ψ, ψψ = symmetric_gauge(ψ) 121 | ψ = normalize(ψ, cache! = Ref(ψψ)) 122 | end 123 | 124 | ψproj = copy(ψ) 125 | s = siteinds(ψ) 126 | qv = sqrt(exp((1 / length(vertices(ψ))) * logq)) 127 | for v in vertices(ψ) 128 | ψproj[v] = ψ[v] * onehot(only(s[v]) => bitstring[v] + 1) / qv 129 | end 130 | 131 | bmpsc = BoundaryMPSCache(ψproj; message_rank=certification_message_rank) 132 | 133 | pg = partitioned_graph(ppg(bmpsc)) 134 | partition = first(center(pg)) 135 | seq = [src(e) => dst(e) for e in post_order_dfs_edges(pg, partition)] 136 | 137 | bmpsc = update(Algorithm("ITensorMPS"), bmpsc, seq; certification_message_update_kwargs...) 138 | 139 | p_over_q = region_scalar(bmpsc, partition) 140 | p_over_q *= conj(p_over_q) 141 | 142 | return (poverq=p_over_q, bitstring=bitstring) 143 | end 144 | 145 | certify_sample(ψ, logq_and_bitstring::NamedTuple; kwargs...) = certify_sample(ψ, logq_and_bitstring.bitstring, logq_and_bitstring.logq; kwargs...) 146 | 147 | function certify_samples(ψ::ITensorNetwork, bitstrings, logqs::Vector{<:Number}; kwargs...) 148 | return [certify_sample(ψ, bitstring, logq; kwargs...) for (bitstring, logq) in zip(bitstrings, logqs)] 149 | end 150 | 151 | function certify_samples(ψ::ITensorNetwork, probs_and_bitstrings::Vector{<:NamedTuple}; kwargs...) 152 | return [certify_sample(ψ, prob_and_bitstring; kwargs...) for prob_and_bitstring in probs_and_bitstrings] 153 | end 154 | 155 | #Sample along the column/ row specified by pv with the left incoming MPS message input and the right extractable from the cache 156 | function sample_partition!( 157 | ψIψ::BoundaryMPSCache, 158 | partition, 159 | bit_string::Dictionary; 160 | kwargs..., 161 | ) 162 | vs = sort(planargraph_vertices(ψIψ, partition)) 163 | seq = PartitionEdge[PartitionEdge(vs[i] => vs[i-1]) for i in length(vs):-1:2] 164 | !isempty(seq) && partition_update!(ψIψ, seq) 165 | prev_v, traces = nothing, [] 166 | logq = 0 167 | for v in vs 168 | !isnothing(prev_v) && partition_update!(ψIψ, [PartitionEdge(prev_v => v)]) 169 | env = environment(bp_cache(ψIψ), [(v, "operator")]) 170 | seq = contraction_sequence(env; alg="optimal") 171 | ρ = contract(env; sequence=seq) 172 | ρ_tr = tr(ρ) 173 | push!(traces, ρ_tr) 174 | ρ /= ρ_tr 175 | # the usual case of single-site 176 | config = StatsBase.sample(1:length(diag(ρ)), Weights(real.(diag(ρ)))) 177 | # config is 1 or 2, but we want 0 or 1 for the sample itself 178 | set!(bit_string, v, config - 1) 179 | s_ind = only(filter(i -> plev(i) == 0, inds(ρ))) 180 | P = onehot(s_ind => config) 181 | q = diag(ρ)[config] 182 | logq += log(q) 183 | ψv = copy(ψIψ[(v, "ket")]) / sqrt(q) 184 | ψv = P * ψv 185 | setindex_preserve_graph!(ψIψ, ITensor(one(Bool)), (v, "operator")) 186 | setindex_preserve_graph!(ψIψ, copy(ψv), (v, "ket")) 187 | setindex_preserve_graph!(ψIψ, dag(prime(copy(ψv))), (v, "bra")) 188 | prev_v = v 189 | end 190 | 191 | delete_partition_messages!(ψIψ, partition) 192 | 193 | return first(traces), logq, bit_string 194 | end 195 | -------------------------------------------------------------------------------- /src/utils.jl: -------------------------------------------------------------------------------- 1 | getnqubits(g::NamedGraph) = length(g.vertices) 2 | getnqubits(tninds::IndsNetwork) = length(tninds.data_graph.vertex_data) 3 | 4 | """ 5 | trace(Q::ITensorNetwork) 6 | 7 | Take the trace of an ITensorNetwork. In the Pauli basis this is the direct trace. In Schrodinger this is the sum of coefficients 8 | """ 9 | function trace(Q::ITensorNetwork) 10 | d = getphysicaldim(siteinds(Q)) 11 | if d == 2 12 | vec = [1.0, 1.0] 13 | elseif d == 4 14 | vec = [1.0, 0.0, 0.0, 0.0] 15 | else 16 | throwdimensionerror() 17 | end 18 | 19 | val = ITensorNetworks.inner(ITensorNetwork(v -> vec, siteinds(Q)), Q; alg = "bp") 20 | return val 21 | end 22 | 23 | """ 24 | truncate(ψ::ITensorNetwork; maxdim, cutoff=nothing, bp_update_kwargs= (...)) 25 | 26 | Truncate the ITensorNetwork `ψ` to a maximum bond dimension `maxdim` using the specified singular value cutoff. 27 | """ 28 | function ITensorNetworks.truncate( 29 | ψ::ITensorNetwork; 30 | cache_update_kwargs = default_posdef_bp_update_kwargs(; cache_is_tree = is_tree(ψ)), 31 | kwargs..., 32 | ) 33 | ψ_vidal = VidalITensorNetwork(ψ; kwargs...) 34 | return ITensorNetwork(ψ_vidal) 35 | end 36 | # 37 | --------------------------------------------------------------------------------