├── .github └── workflows │ ├── cache.yml │ ├── preview.yml │ └── publish.yml ├── .gitignore ├── README.md ├── _notebook_repo ├── .gitignore └── README.md ├── build.sh └── lectures ├── 0-Preliminaries ├── Resources.md └── Software.md ├── 1-Introduction ├── FockSpace.md ├── ManyBody.md ├── Observables.md ├── QuantumMechanics.md └── QuantumToClassical.md ├── 2-TensorNetworks ├── LinearAlgebra.md ├── Symmetries.md ├── TensorNetworkStates.md └── TensorNetworks.md ├── 3-MatrixProductStates ├── Algorithms.md ├── InfiniteMPS.md ├── MatrixProductOperators.md └── MatrixProductStates.md ├── 4-Algorithms ├── FixedpointAlgorithms.md └── TimeEvolutionAlgorithms.md ├── 5-Tutorials ├── FiniteEntanglementScaling.md └── SymmetricTensors.md ├── Project.toml ├── References.md ├── _config.yml ├── _static ├── FiniteMPS │ ├── corrFunc.svg │ ├── fmps.svg │ ├── gauge.svg │ ├── leftOrth.svg │ ├── peps.svg │ ├── pfmps.svg │ ├── svd.svg │ └── transfer.svg ├── FixedpointAlgorithms │ ├── H_AC.svg │ ├── H_C.svg │ ├── effHam.svg │ ├── energyOpt.svg │ ├── env.svg │ ├── envNorm.svg │ ├── fixedpoint.svg │ ├── localUpdate.svg │ ├── localUpdate2.svg │ └── mpoHam.svg ├── InfiniteMPS │ ├── corrFunc.svg │ ├── corrFunc2.svg │ ├── diagC.svg │ ├── expVal.svg │ ├── expVal2.svg │ ├── expVal3.svg │ ├── fixedPoints.svg │ ├── gaugeTransform.svg │ ├── leftGauge.svg │ ├── leftOrth.svg │ ├── mixedGauge.svg │ ├── mixedGauge2.svg │ ├── mpsNorm.svg │ ├── normAC.svg │ ├── qrConv.svg │ ├── qrStep.svg │ ├── rightGauge.svg │ ├── rightOrth.svg │ ├── tm.svg │ ├── tmDecomp.svg │ ├── tmLimit.svg │ ├── tmPower.svg │ ├── traceNorm.svg │ ├── truncMPS.svg │ ├── umps.svg │ ├── umps3.svg │ └── unitaryGauge.svg ├── MPO │ ├── boltzmann.svg │ ├── boltzmann_mpo.svg │ ├── boundary_mps.svg │ ├── expectation_value.svg │ ├── expectation_value2.svg │ ├── kronecker.svg │ ├── mpo.svg │ ├── partition_function.svg │ ├── partition_function_1.svg │ └── transfer.svg ├── SymmetricTensors │ ├── Fib_fusiontrees.svg │ ├── SU2_fusiontrees.svg │ ├── SUN_fusiontrees.svg │ ├── U1_fusiontrees.svg │ ├── X.svg │ ├── Z2_fusiontrees.svg │ ├── ZZ.svg │ ├── ZZX_symm.svg │ ├── anotherfusiontree.svg │ ├── bosonham.svg │ ├── bosonops.svg │ ├── fZ2_fusiontrees.svg │ ├── fermioncomm.svg │ ├── fermionham.svg │ ├── fermionham_bis.svg │ ├── fermionops.svg │ ├── fusiontree.svg │ ├── none2symm.svg │ ├── symmetric_tensor.svg │ ├── symmtens.svg │ └── wignereckart.svg ├── TensorNetworkStates │ ├── full_state.svg │ ├── peps.svg │ ├── svd1.svg │ └── tn_state.svg ├── TensorNetworks │ ├── R-tensor.svg │ ├── contraction.svg │ ├── eig.svg │ ├── grouping.svg │ ├── indexing.svg │ ├── ladder1.svg │ ├── ladder2.svg │ ├── leftOrth.svg │ ├── matmat.svg │ ├── matvec.svg │ ├── network.svg │ ├── outer-product.svg │ ├── polar.svg │ ├── qr.svg │ ├── svd.svg │ ├── tr.svg │ ├── trace-cyclic.svg │ ├── trace.svg │ ├── unitary.svg │ └── vecvec.svg ├── TimeEvolution │ ├── AncillaMPS.svg │ ├── DensityMatrix.svg │ ├── FirstOrderTrick.svg │ ├── Heffs.svg │ ├── ImagTimeEvolution.svg │ ├── MPOHam.svg │ ├── Mconstraint.svg │ ├── TDVPProjector.svg │ ├── TimeEvolution.svg │ └── TimeMPO_1stOrder.svg ├── figures │ └── alg │ │ ├── boundary_mps.svg │ │ ├── ctmrg.svg │ │ ├── lambda.svg │ │ ├── lattice.svg │ │ ├── simple_update.svg │ │ ├── tebd.svg │ │ ├── tebd_mps.svg │ │ ├── tebd_trunc.svg │ │ ├── tensor_network.svg │ │ └── transfer.svg └── lectures-favicon.ico ├── _toc.yml ├── assets └── references.bib ├── intro.md ├── logo.png └── requirements.txt /.github/workflows/cache.yml: -------------------------------------------------------------------------------- 1 | name: Build Cache 2 | on: 3 | push: 4 | branches: 5 | - main 6 | jobs: 7 | cache: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Checkout 11 | uses: actions/checkout@v2 12 | - name: Set up Python 13 | uses: actions/setup-python@v2 14 | with: 15 | python-version: '3.9' 16 | cache: 'pip' 17 | - run: pip install -r lectures/requirements.txt 18 | - name: Install LaTeX dependencies 19 | run: | 20 | sudo apt-get -qq update 21 | sudo apt-get install -y \ 22 | texlive-latex-recommended \ 23 | texlive-latex-extra \ 24 | texlive-fonts-recommended \ 25 | texlive-fonts-extra \ 26 | texlive-xetex \ 27 | latexmk \ 28 | xindy \ 29 | texlive-luatex \ 30 | dvipng \ 31 | ghostscript \ 32 | cm-super 33 | - name: Set up Julia 34 | uses: julia-actions/setup-julia@v1 35 | with: 36 | version: 1.9 37 | - name: Install IJulia and Setup Project 38 | shell: bash 39 | run: | 40 | julia -e 'using Pkg; Pkg.add("IJulia");' 41 | julia --project=lectures --threads auto -e 'using Pkg; Pkg.instantiate();' 42 | - uses: julia-actions/cache@v1 43 | - name: Build HTML 44 | shell: bash -l {0} 45 | run: | 46 | jb build lectures --path-output ./ 47 | - name: Upload the "_build" folder (cache) 48 | uses: actions/upload-artifact@v2 49 | with: 50 | name: build-cache 51 | path: _build -------------------------------------------------------------------------------- /.github/workflows/preview.yml: -------------------------------------------------------------------------------- 1 | name: Deploy PR previews 2 | 3 | on: 4 | pull_request: 5 | types: 6 | - opened 7 | - reopened 8 | - synchronize 9 | - closed 10 | 11 | concurrency: preview-${{ github.ref }} 12 | 13 | jobs: 14 | deploy-preview: 15 | runs-on: ubuntu-20.04 16 | steps: 17 | - name: Checkout 18 | uses: actions/checkout@v3 19 | 20 | # Install dependencies 21 | - name: Set up Python 3.9 22 | uses: actions/setup-python@v4 23 | with: 24 | python-version: '3.9' 25 | cache: 'pip' 26 | - run: pip install -r lectures/requirements.txt 27 | - name: Install LaTeX dependencies 28 | run: | 29 | sudo apt-get -qq update 30 | sudo apt-get install -y \ 31 | texlive-latex-recommended \ 32 | texlive-latex-extra \ 33 | texlive-fonts-recommended \ 34 | texlive-fonts-extra \ 35 | texlive-xetex \ 36 | latexmk \ 37 | xindy \ 38 | texlive-luatex \ 39 | dvipng \ 40 | ghostscript \ 41 | cm-super 42 | - name: Set up Julia 43 | uses: julia-actions/setup-julia@v1 44 | with: 45 | version: 1.9 46 | - name: Install IJulia and Setup Project 47 | shell: bash 48 | run: | 49 | julia -e 'using Pkg; Pkg.add("IJulia");' 50 | julia --project=lectures --threads auto -e 'using Pkg; Pkg.instantiate();' 51 | - uses: julia-actions/cache@v1 52 | - name: Build the book 53 | run: | 54 | jupyter-book build lectures --path-output ./ 55 | - name: Deploy preview 56 | uses: rossjrw/pr-preview-action@v1 57 | with: 58 | source-dir: _build/html -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: deploy-book 2 | 3 | # Only run this when the master branch changes 4 | on: 5 | push: 6 | branches: 7 | - main 8 | workflow_dispatch: 9 | 10 | # This job installs dependencies, builds the book, and pushes it to `gh-pages` 11 | jobs: 12 | deploy-book: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v4 16 | 17 | # Install dependencies 18 | - name: Set up Python 3.9 19 | uses: actions/setup-python@v4 20 | with: 21 | python-version: '3.9' 22 | cache: 'pip' 23 | - run: pip install -r lectures/requirements.txt 24 | - name: Install LaTeX dependencies 25 | run: | 26 | sudo apt-get -qq update 27 | sudo apt-get install -y \ 28 | texlive-latex-recommended \ 29 | texlive-latex-extra \ 30 | texlive-fonts-recommended \ 31 | texlive-fonts-extra \ 32 | texlive-xetex \ 33 | latexmk \ 34 | xindy \ 35 | texlive-luatex \ 36 | dvipng \ 37 | ghostscript \ 38 | cm-super 39 | - name: Set up Julia 40 | uses: julia-actions/setup-julia@v1 41 | with: 42 | version: 1.9 43 | - name: Install IJulia and Setup Project 44 | shell: bash 45 | run: | 46 | julia -e 'using Pkg; Pkg.add("IJulia");' 47 | julia --project=lectures --threads auto -e 'using Pkg; Pkg.instantiate();' 48 | - uses: julia-actions/cache@v1 49 | - name: Download "build" folder (cache) 50 | uses: dawidd6/action-download-artifact@v2 51 | with: 52 | workflow: cache.yml 53 | branch: main 54 | name: build-cache 55 | path: _build 56 | - name: Build Download Notebooks (sphinx-tojupyter) 57 | shell: bash -l {0} 58 | run: | 59 | jb build lectures --path-output ./ --builder=custom --custom-builder=jupyter 60 | zip -r download-notebooks.zip _build/jupyter 61 | - uses: actions/upload-artifact@v2 62 | with: 63 | name: download-notebooks 64 | path: download-notebooks.zip 65 | - name: Copy Download Notebooks for GH-PAGES 66 | shell: bash -l {0} 67 | run: | 68 | mkdir -p _build/html/_notebooks 69 | rsync -r _build/jupyter/ _build/html/_notebooks/ 70 | - name: Build the book 71 | run: | 72 | jupyter-book build lectures --path-output ./ 73 | 74 | # Push the book's HTML to github-pages 75 | - name: GitHub Pages action 76 | uses: JamesIves/github-pages-deploy-action@v4 77 | with: 78 | token: ${{ secrets.GITHUB_TOKEN }} 79 | folder: _build/html 80 | clean-exclude: pr-preview 81 | 82 | - name: Prepare notebooks sync 83 | shell: bash 84 | run: | 85 | mkdir -p _build/lecture-julia.notebooks 86 | cp -a _notebook_repo/. _build/lecture-julia.notebooks 87 | cp -a _build/jupyter/. _build/lecture-julia.notebooks 88 | rm -rf _build/lecture-julia.notebooks/_static 89 | rm -rf _build/lecture-julia.notebooks/_panels_static 90 | cp lectures/Project.toml _build/lecture-julia.notebooks 91 | cp lectures/Manifest.toml _build/lecture-julia.notebooks 92 | ls -a _build/lecture-julia.notebooks 93 | 94 | - name: Commit notebooks to lecture-julia.notebooks 95 | uses: cpina/github-action-push-to-another-repository@v1.7.2 96 | env: 97 | SSH_DEPLOY_KEY: ${{ secrets.SSH_DEPLOY_KEY }} 98 | with: 99 | source-directory: _build/lecture-julia.notebooks 100 | destination-github-username: quantumghent 101 | destination-repository-name: TensorTutorials.notebooks 102 | user-email: lukas.devos@ugent.be 103 | target-branch: main 104 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | _build/ 2 | Manifest.toml 3 | .vscode/ 4 | *.bak 5 | *.sav.tmp -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Tensor Tutorials 2 | 3 | This is a repository containing the source material for several tutorials on tensor networks 4 | in the context of quantum many-body theory and statistical mechanics. The tutorials are 5 | hosted as a Jupyter book at https://quantumghent.github.io/TensorTutorials/. 6 | 7 | # Local development: 8 | 9 | Run the `build.sh` script to build the pages locally. Optionally, you can pass the `-d` flag 10 | to update the dependencies. WARNING: the dependencies will overwrite your installation of 11 | LateX, and the script is only tested on Ubuntu systems. Use this feature at your own risk. 12 | 13 | ```bash 14 | ./build.sh [-d] 15 | ``` 16 | -------------------------------------------------------------------------------- /_notebook_repo/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantumghent/TensorTutorials/c941aef5c7c1dad0d692afc2d6143dfb75a1339a/_notebook_repo/.gitignore -------------------------------------------------------------------------------- /_notebook_repo/README.md: -------------------------------------------------------------------------------- 1 | # Tensor Tutorials - Notebooks 2 | 3 | [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/quantumghent/TensorTutorials.notebooks/main) 4 | 5 | - [Lecture source](https://github.com/quantumghent/TensorTutorials) 6 | - [README source code](https://github.com/quantumghent/TensorTutorials.notebooks/blob/main/_notebook_repo/README.md) -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Building the project..." 3 | 4 | usage() { echo "Usage: $0 [-h] [-d]" 1>&2; exit 1; } 5 | 6 | while getopts 'hd' arg; do 7 | case "${arg}" in 8 | h) 9 | usage 10 | ;; 11 | d) 12 | echo "Installing dependencies:" 13 | 14 | echo "Installing Python dependencies" 15 | pip install -r lectures/requirements.txt 16 | 17 | echo "Installing LaTeX dependencies" 18 | sudo apt-get -qq update 19 | sudo apt-get install -y \ 20 | texlive-latex-recommended \ 21 | texlive-latex-extra \ 22 | texlive-fonts-recommended \ 23 | texlive-fonts-extra \ 24 | texlive-xetex \ 25 | latexmk \ 26 | xindy \ 27 | texlive-luatex \ 28 | dvipng \ 29 | ghostscript \ 30 | cm-super 31 | 32 | echo "Install IJulia and Setup Project" 33 | julia -e 'using Pkg; Pkg.add("IJulia");' 34 | julia --project=lectures --threads auto -e 'using Pkg; Pkg.instantiate();' 35 | ;; 36 | *) 37 | usage 38 | ;; 39 | esac 40 | done 41 | 42 | echo "Building the lectures" 43 | jupyter-book build lectures/ -------------------------------------------------------------------------------- /lectures/0-Preliminaries/Resources.md: -------------------------------------------------------------------------------- 1 | # Useful Resources 2 | 3 | Other useful resources for learning about tensor networks include (but are certainly not limited to): 4 | 5 | - [tensornetwork.org](http://tensornetwork.org/) 6 | - [tensors.net](https://www.tensors.net/) -------------------------------------------------------------------------------- /lectures/0-Preliminaries/Software.md: -------------------------------------------------------------------------------- 1 | --- 2 | jupytext: 3 | formats: md:myst 4 | text_representation: 5 | extension: .md 6 | format_name: myst 7 | kernelspec: 8 | display_name: Julia 9 | language: julia 10 | name: julia-1.9 11 | --- 12 | 13 | # Getting Started with Numerics 14 | 15 | On this page, there are some links with relevant information for getting started with 16 | numerical computing. We point to some references for the Julia programming language, as well 17 | as some resources for learning about version control software. 18 | 19 | ## Version Control 20 | 21 | Version control software is a tool used in software development (and sometimes in other 22 | fields) to manage and track changes made to a project's source code, documents, or any other 23 | set of files. It allows multiple contributors to work collaboratively on a project, keeping 24 | a history of changes, and facilitating the organization and synchronization of different 25 | versions of the project. The most popular version control system is 26 | [git](https://git-scm.com/), which is a free tool developed by Linus Torvalds in 2005, and 27 | has become the de facto standard in the software development industry. 28 | 29 | Again, multiple resources are available for learning about git. From the official website, 30 | the book [Pro Git](https://git-scm.com/book/en/v2) is a good place to start. For a more 31 | dynamic approach, you can learn git through 32 | [this interactive tutorial](https://learngitbranching.js.org/). 33 | 34 | ## Julia 35 | 36 | In order to get started with Julia, there are many resources already available. The 37 | [official documentation](https://docs.julialang.org/en/v1/) is a good place to start, and a 38 | full _getting started_ exposition can be found for example 39 | [here](https://julia.quantecon.org/intro.html). There is also a 40 | [learning page](https://julialang.org/learning/) that has tutorials on different topics, a 41 | list of books, and much more. 42 | 43 | Additionally, there is an active [forum](https://discourse.julialang.org/) for asking 44 | questions, as well as a [slack channel](https://julialang.org/slack/) and a stack overflow 45 | page. 46 | 47 | ## Julia Packages 48 | 49 | Julia has a very active open-source community, and many packages are available for different 50 | purposes. These typically have their own documentation, and are hosted on GitHub. An 51 | (incomplete) list of packages that are relevant for this course are given below: 52 | 53 | - [TensorKit.jl](https://github.com/Jutho/TensorKit.jl) 54 | - [TensorOperations.jl](https://github.com/Jutho/TensorOperations.jl) 55 | - [KrylovKit.jl](https://github.com/Jutho/KrylovKit.jl) 56 | - [OptimKit.jl](https://github.com/Jutho/OptimKit.jl) 57 | - [MPSKit.jl](https://github.com/maartenvd/MPSKit.jl) 58 | - [PEPSKit.jl](https://github.com/quantumghent/PEPSKit.jl) 59 | 60 | Also check out the [GitHub page for our organization](https://github.com/quantumghent), 61 | which hosts and/or links many of the relevant software repositories. 62 | 63 | ## Noteworthy Tensor Network Software 64 | 65 | There are many additional software libraries available for tensor network computations, or 66 | more generally for quantum physics research. Below you can find an incomplete list of some 67 | of these. 68 | 69 | - [ITensor](https://itensor.org/) Julia/C++ library for tensor network calculations 70 | - [TenPy](https://tenpy.readthedocs.io/en/latest/) Python library for tensor network calculations 71 | - [QUIMB](https://quimb.readthedocs.io/en/latest/) Python library for quantum information many-body calculations 72 | -------------------------------------------------------------------------------- /lectures/2-TensorNetworks/TensorNetworkStates.md: -------------------------------------------------------------------------------- 1 | --- 2 | jupytext: 3 | formats: md:myst 4 | text_representation: 5 | extension: .md 6 | format_name: myst 7 | kernelspec: 8 | display_name: Julia 9 | language: julia 10 | name: julia-1.9 11 | --- 12 | 13 | (tensor_network_states)= 14 | # Tensor Network States 15 | 16 | After our introduction on [quantum many body systems](many_body) and 17 | [tensor networks](tensor_networks), we move on to considering how tensor networks can 18 | characterize many-body systems. We start with a constructive approach to approximating an 19 | arbitrary quantum state by a *tensor network state*. We then qualify in what settings such a 20 | representation is efficient, and introduce several classes of tensor network states used in 21 | different settings. We end this section by broadly commenting on how efficient manipulations 22 | of tensor network states can be used to simulate quantum systems. 23 | 24 | ## Quantum States as Tensor Networks 25 | 26 | Consider a quantum many body system which consists of physical spins with a local Hilbert 27 | space $ \mathbb H_i = \mathbb C^d $ of dimension $ d $, which we will call the *physical 28 | dimension*, are located at every site $ i $ of some lattice $ \Lambda $. This gives rise to 29 | a total Hilbert space of the system $ \mathbb H = \bigotimes_{i = 1}^{N} \mathbb H_i = 30 | \left( \mathbb C^d \right)^{\otimes N}$ where $ N = |\Lambda| $ is the total number of sites 31 | in the lattice. A general quantum state in this [many-body Hilbert space](many_body) can be 32 | represented in terms a set of $d^N$ complex coefficients $C_{s_1,s_2,...,s_N} \in \mathbb 33 | C$, where $s_i\in \{0,...,d-1\}$, with respect to the computational basis as 34 | 35 | ```{math} 36 | \ket{\psi} = \sum_{s_1,s_2,...,s_N} C_{s_1,s_2,...,s_N}\ket{s_1,s_2,...,s_N}. 37 | ``` 38 | 39 | The exponential increase in the number of coefficients with the system size means that it is 40 | entirely impossible to store the full state vector of a quantum system of any reasonable 41 | size in this way. For example, a system of $N=100$ spins with $d=2$ has $2^{100} \approx 42 | 10^{30}$ coefficients, which is far more than the number of atoms in the universe. 43 | 44 | Instead of directly storing this full state vector, we can alternatively parametrize it as a 45 | tensor network. Consider for example the case $N=4$. We can then represent the state vector 46 | as a tensor $C_{s_1,s_2,s_3,s_4}$ with four indices, where each index corresponds to a 47 | physical spin. The full state is then recovered as 48 | 49 | ```{figure} ../_static/TensorNetworkStates/full_state.svg 50 | :scale: 12% 51 | :name: full_state 52 | :align: center 53 | ``` 54 | We can now split the full tensor $C$ into separate components by consecutively applying the 55 | SVD between pairs of physical indices. For example, splitting out the first index we can 56 | rewrite $C$ as 57 | 58 | ```{figure} ../_static/TensorNetworkStates/svd1.svg 59 | :scale: 12% 60 | :name: svd1 61 | :align: center 62 | ``` 63 | 64 | In this expression we can interpret $L^{(1)}$ a a $d \times D$ matrix, $\lambda^{(1)}$ as a 65 | $D \times D$ matrix and $R^{(1)}$ as a $D$ by $d^{N-1}$ matrix. The horizontal edge in this 66 | diagram is called a *virtual bond* and the dimension $D$ of this bond is called the *bond 67 | dimension*. The bond dimension is a measure of the entanglement in the state, and in this 68 | case encodes the amount of entanglement between the first site and the rest of the system. 69 | So far we have not actually done anything significant, since this decomposition of $C$ 70 | actually increased the total number of required coefficients, instead of reducing it. The 71 | key point is that we can reduce the number of parameters by *truncating* $\lambda^{(1)}$ to 72 | only keep the $D$ largest singular values. This results in a *low rank approximation* of the 73 | original state, where the quality of the approximation is controlled by the chosen final 74 | bond dimension $D$. 75 | 76 | By repeatedly applying this procedure, grouping and splitting indices in the resulting 77 | diagrams and absorbing the bond tensors $\lambda^{(i)}$ into the site tensors we can 78 | decompose $C$ into a tensor network of any geometry. For example, we can approximate $C$ as 79 | the contraction of a square network to end up with a *tensor network state* of the form 80 | 81 | ```{figure} ../_static/TensorNetworkStates/tn_state.svg 82 | :scale: 12% 83 | :name: tn_state 84 | :align: center 85 | ``` 86 | 87 | In words, this expression means that for every basis state $ \ket{s_1,s_2,s_3,s_4} $ its 88 | corresponding coefficient in the superposition is obtained by indexing all of the *physical 89 | legs* pointing downward according to the corresponding physical basis state and contracting 90 | the resulting network. 91 | 92 | We can therefore parametrize an arbitrary quantum state in terms of a set of local tensors 93 | $A^{(i)}$, where each of these tensors encodes a number of parameters that is polynomial in 94 | its physical dimension $d$ and bond dimensions $D$ (which can in principle be different for 95 | every virtual bond). For a general quantum state however, a good tensor network state 96 | approximation requires a bond dimension which scales exponentially with the system size, 97 | meaning that we have not actually gained anything in terms of efficiency. However, it turns 98 | out that for many physically relevant states the bond dimension can be bounded by a constant 99 | independent of the system size, in which case the tensor network representation leads to an 100 | exponential reduction in the number of variational parameters. 101 | 102 | (area_laws)= 103 | ## Area Laws and Tensor Network States 104 | 105 | To see why this is the case, let us study the entanglement entropy of a tensor network 106 | state. Consider the following two-dimensional network, where all physical indices have a 107 | dimension $d$ and we assume all virtual bonds have the same dimension $D$, 108 | 109 | ```{figure} ../_static/TensorNetworkStates/peps.svg 110 | :scale: 12% 111 | :name: peps 112 | :align: center 113 | ``` 114 | 115 | We now want to quantify the entanglement between the shaded region $ \mathcal A$ and the 116 | rest of the system for this specific state. To this end, we first recall the formula for the 117 | bipartite entanglement entropy Eq. {eq}`entanglement_entropy`, and note that the number of 118 | terms in this expression is determined by the number of non-zero Schmidt coefficients, the 119 | latter of which is referred to as the *Schmidt rank*. Looking back now at our initial 120 | decomposition of the full state tensor $C$ by splitting out its first index above, we see 121 | that the Shchmidt rank is precisely given by the bond dimension $D$ across this cut. From 122 | this, you should be able to convince yourself that the maximal entanglement entropy across 123 | this cut is determined by the bond dimension as $S \sim \log(D)$. Extending this line of 124 | reasoning to our question of the entanglement between the region $ \mathcal A$ and the rest 125 | of the system, we see that each virtual leg connecting $\mathcal A$ to the rest of the 126 | system can contribute a term $\log(D)$ to the entanglement entropy. Therefore we arrive at 127 | 128 | ```{math} 129 | S(\mathcal A) \sim \log(D) \; \partial \mathcal A, 130 | ``` 131 | 132 | where $ \partial \mathcal A $ is the size of the boundary of $\mathcal A$ (which in this 133 | two-dimensional case is its perimeter). 134 | 135 | Clearly, this tensor network state then naturally obeys an area law for its entanglement 136 | entropy. In our discussion of the 137 | [low temparature properties of quantum many body systems](zero_temp) however, we have 138 | already seen that low-energy states of locally interacting Hamiltonians obey exactly such an 139 | area law. It is this fact that tensor network states inherently encode area law entanglement 140 | that makes them so well suited for representing low-energy states of quantum systems. They 141 | can only target a tiny corner of the full exponentially large Hilbert space, but this corner 142 | is precisely where the most relevant physics happens. This observation has given rise to a 143 | large family of tensor network states which allow for an efficient parametrization of states 144 | with varying geometries. 145 | 146 | 147 | 148 | ```{note} 149 | An equally important feature of tensor networks is that they, aside from providing an 150 | efficient parametrization of states, also allow for efficient *manipulations* of these 151 | states. This means that they can be used to compute interesting features of quantum systems, 152 | and can be optimized to target states of specific interest such as ground states and 153 | low-lying excitations. For all of the network geometries depicted above there exist 154 | corresponding algorithms that put them to efficient use, some of which will be highlighted 155 | in future sections of this tutorial. 156 | ``` 157 | 158 | 159 | -------------------------------------------------------------------------------- /lectures/3-MatrixProductStates/Algorithms.md: -------------------------------------------------------------------------------- 1 | --- 2 | jupytext: 3 | formats: md:myst 4 | text_representation: 5 | extension: .md 6 | format_name: myst 7 | kernelspec: 8 | display_name: Julia 9 | language: julia 10 | name: julia-1.9 11 | --- 12 | 13 | # A Simple Tensor Network Algorithm 14 | 15 | Having introduced tensor networks in general, with a focus on the case of MPS, we now turn 16 | to the question of how to use them to solve specific problems. While a large number of 17 | tensor network algorithms have been developed, many of them more advanced and/or efficient 18 | than the ones we will discuss here, we will focus on a few simple algorithms that are easy 19 | to understand and implement. Importantly, these algorithms are also the building blocks of 20 | more advanced algorithms, for example in higher spatial dimensions. 21 | 22 | Effectively, we have already seen how to use MPS to compute expectation values or 23 | correlation functions, or derive all kind of properties. Here, we focus on how to obtain the 24 | desired MPS in the first place. In other words, given a certain problem, how can we optimize 25 | an MPS, or a more general tensor network, to solve it? 26 | 27 | ## Simulating Quantum Systems 28 | 29 | As a first example, let us consider the problem of simulating a quantum system. We can 30 | formalize this idea as follows: Given a Hamiltonian $H$, and some initial state 31 | $\ket{\psi_0}$ at time $t=0$, is there a way to compute the _time-evolved state_ 32 | $\ket{\psi(t)} = e^{-i H t} \ket{\psi_0}$ at some later time $t$. 33 | 34 | In general, this is a very hard problem. For example, one could naively try to 35 | [compute the matrix exponential](https://en.wikipedia.org/wiki/Matrix_exponential#Computing_the_matrix_exponential), 36 | but this quickly becomes prohibitively expensive, as the dimension of the Hamiltonian scales 37 | exponentially with the number of particles. However, for physically relevant systems the 38 | Hamiltonian does not consist of a random matrix, but rather exhibits additional structure 39 | that can be used to simplify the problem. 40 | 41 | (tebd)= 42 | ## Time-Evolving Block Decimation (TEBD) 43 | 44 | A particularly powerful example can be found for systems with local interactions, where the 45 | Hamiltonian is of the form: 46 | 47 | ```{math} 48 | H = \sum_{i,j} h_{ij} 49 | ``` 50 | 51 | where $h_{ij}$ denotes a local operator, acting only on a small number of sites. In this 52 | case, although $e^{-i H t}$ is unfeasible to compute, each of the constituent terms act only 53 | on a much smaller subsystem and therefore $e^{-ih_{ij}t}$ can be computed efficiently. 54 | However, as these terms generally do not commute, we cannot simply apply them one after the 55 | other. Instead, we can use the first-order Suzuki-Trotter decomposition to approximate the 56 | time-evolution operator, which states that for any two Hermitian operators $A$ and $B$, and 57 | any real number $\Delta t$, we have: 58 | 59 | ```{math} 60 | :label: trotter_first 61 | e^{(A + B) \Delta t} = e^{A \Delta t} e^{B \Delta t} + \mathcal O(\Delta t^2). 62 | ``` 63 | 64 | If we now split the full time interval $t$ into $m$ steps, we obtain the approximation 65 | 66 | ```{math} 67 | e^{-i H t} = \left( e^{-i H_e \frac{t}{m}} e^{-i H_o \frac{t}{m}} \right)^m + \mathcal O 68 | \left( \frac{t^2}{m}\right) 69 | ``` 70 | 71 | where the approximation error can be managed by choosing a sufficiently large $m$. 72 | 73 | ```{note} 74 | There actually exist entire families of such exponential product approximations up to a 75 | given order {cite}`hatano2005finding`. For our purposes however, it is sufficient to 76 | illustrate a simulation procedure using this first-order approximation. 77 | ``` 78 | 79 | ### Example: One-Dimensional Nearest-Neighbor Hamiltonians 80 | 81 | We can put the discussion above into practice by applying it to the example of a nearest-neighbour Hamiltonian on a one-dimensional lattice: 82 | 83 | ```{math} 84 | H = \sum_{n=1}^N h_{n,n+1} 85 | ``` 86 | 87 | where $N$ is the number of sites and we are assuming periodic boundary conditions. We now 88 | want to simulate the dynamics of this Hamiltonian in an efficient way using the 89 | aforementioned approximation Eq. {eq}`trotter_first`. The simplest way to do this is to 90 | split the local terms into two groups, where terms within a group commute with each other, 91 | but not with terms in the other group. For example, we could split the Hamiltonian into even 92 | ($H_e$) and odd terms ($H_o$): 93 | 94 | ```{math} 95 | :label: hamsplit 96 | H_e = \sum_n h_{(2n, 2n+1)}, \qquad H_o = \sum_n h_{(2n+1, 2n+2)}. 97 | ``` 98 | 99 | It is a simple exercise to show that the local terms within a group commute, as they act on 100 | non-overlapping sites. Therefore, if we can find a MPS representation of the initial state, 101 | the procedure for simulating the time evolution is as follows: 102 | 103 | ```{image} /_static/figures/alg/tebd_mps.svg 104 | :scale: 10% 105 | :name: tebd_mps 106 | :align: center 107 | ``` 108 | 109 | This procedure does not solve the problem as-is, as evaluating this network exactly would 110 | still require a bond dimension which grows exponentially with the number of layers $m$. 111 | Instead, we can retain an efficient description by locally truncating the bond dimension, by 112 | computing an SVD an retaining only the largest $\chi$ singular values. 113 | 114 | ```{figure} /_static/figures/alg/tebd_trunc.svg 115 | :scale: 10% 116 | :name: tebd_trunc 117 | :align: center 118 | ``` 119 | 120 | ## Groundstate Search 121 | 122 | Another important problem in quantum physics is the determination of the groundstate of a 123 | given Hamiltonian. Again, this can be made more formal as follows: Given a Hamiltonian $H$, 124 | is there a way to find the state $\ket{\psi_0}$ that minimizes the expectation value 125 | $\bra{\psi} H \ket{\psi}$. 126 | 127 | In fact, this problem faces the same difficulty as the one discussed above, namely that the 128 | naive solution strategy involves finding the eigenvector of the Hamiltonian matrix with the 129 | smallest eigenvalue, which again scales exponentially with the number of particles. However, 130 | as before, we can exploit the structure of the Hamiltonian to find a more efficient 131 | solution. 132 | 133 | ### Imaginary Time Evolution 134 | 135 | In fact, the problem of finding groundstates can be mapped to the problem of simulating 136 | dynamics, by making use of a trick known as _imaginary time evolution_. The idea is to 137 | consider the time evolution operator $e^{-i H t}$, but to replace the real time $t$ by an 138 | imaginary time $\tau = i t$. If we now consider the limit $\tau \to \infty$ and deal with 139 | the normalization appropriately, we can see that applying the evolution operator to a state 140 | $\ket{\psi_0}$ will effectively project it on its lowest energy eigenstate, as all other 141 | eigenstates will be damped out exponentially. In other words, we can find the groundstate of 142 | a Hamiltonian by simulating its dynamics for a sufficiently long imaginary time. 143 | 144 | ```{math} 145 | \lim_{\tau \to \infty} e^{-i H \tau} &= \lim_{\tau \to \infty} \sum_{i=0}^\infty e^{-E_i 146 | \tau} \ket{\psi_i} \bra{\psi_i} \\ 147 | &= \lim_{\tau \to \infty} e^{- E_0 \tau} \left(\ket{\psi_0} \bra{\psi_0} + 148 | \sum_{i>0}^\infty e^{(-E_i + E_0) \tau}\ket{\psi_i}\bra{\psi_i} \right) \\ 149 | &\approx e^{-E_0 \tau} \ket{\psi_0} \bra{\psi_0} 150 | ``` 151 | 152 | where we have made use of the fact that all but the first term in the sum are damped out. In 153 | this regard, the groundstate search problem can also be tackled with the TEBD algorithm 154 | discussed above, by simply replacing the real time $t$ by an imaginary time $\tau$ and 155 | continuing time-evolution until convergence is reached. 156 | 157 | ## Conclusion 158 | 159 | We have now seen a first example of algorithms that can be used for optimizing tensor 160 | networks, either to simulate dynamics or to find groundstates. We conclude by mentioning 161 | that this is only the tip of the iceberg, and that there exist many more algorithms that can 162 | be used to solve a variety of problems. 163 | 164 | ````{admonition} Outlook 165 | To close out this lecture, we briefly comment on the higher dimensional generalizations of 166 | the TEBD procedure and the difficulties this brings with it. For local quantum Hamiltonians 167 | in higher dimensions we can follow a similar procedure, where we split the full Hamiltonian 168 | into sum of parts that each only contain non-overlapping local terms. Time evolution can 169 | then be simulated by applying a similar sequence of layers, where in each layer we evolve 170 | with all local operators in a given Hamiltonian part in parallel. 171 | 172 | The problem with this approach however is that the local update step {ref}`tebd_trunc` is 173 | ill-conditioned for higher-dimensional networks if the full quantum state is is not taken 174 | into account for the truncation. Indeed, while in the one-dimensional case the rest of the 175 | network surrounding the sites we want to update can be brought into account exactly by 176 | working in appropriate gauge, this is not possible in general. Consider for example a 177 | general network where want to apply some update to the central site, 178 | ```{image} /_static/figures/alg/tensor_network.svg 179 | :scale: 6% 180 | :name: tensor_network 181 | :align: center 182 | ``` 183 | 184 | Since this network contains loops, there is no way to exactly capture the surrounding 185 | network in general. One instead has to resort to approximation techniques for the 186 | *environments* of a given update site, where the quality of the environment approximations 187 | directly affects the stability of the local update. The simplest way of doing this is to use 188 | the so-called *simple update* procedure {cite}`jiang2008accurate` where all loops in the 189 | network are simply ignored and the environment is approximated by a product state, 190 | ```{image} /_static/figures/alg/simple_update.svg 191 | :scale: 6% 192 | :name: simple_update 193 | :align: center 194 | ``` 195 | 196 | More accurate results can be obtained by taking into account the full quantum state of the 197 | system in each local update by means of the *full update* procedure 198 | {cite}`jordan2008classical`. However, this gain in accuracy comes with a substantial 199 | increase in computational cost due to the full environment approximation at each step. 200 | ```` -------------------------------------------------------------------------------- /lectures/4-Algorithms/FixedpointAlgorithms.md: -------------------------------------------------------------------------------- 1 | --- 2 | jupytext: 3 | formats: md:myst 4 | text_representation: 5 | extension: .md 6 | format_name: myst 7 | kernelspec: 8 | display_name: Julia 9 | language: julia 10 | name: julia-1.9 11 | --- 12 | 13 | (fixed_point_algorithms)= 14 | # Fixed-Point algorithms 15 | 16 | In this section we introduce two algorithms for approximating the ground state of local gapped Hamiltonians using matrix product state techniques. Approximating ground states in a variational manner boils down to minimizing 17 | ```{math} 18 | \min_{\ket{\psi}\in D} \frac{\braket{\psi|H|\psi}}{\braket{\psi|\psi}}, 19 | ``` 20 | over a restricted class of states $D$. For simplicity, we will assume the Hamiltonian under consideration has an MPO representation of the form 21 | ```{image} /_static/FixedpointAlgorithms/mpoHam.svg 22 | :scale: 12% 23 | :name: mpoHam 24 | :align: center 25 | ``` 26 | which can encode interactions of arbitrary range as discussed in the previous section. In this formulation, approximating the ground state of $H$ is equivalent to finding the MPS fixed point the MPO Hamiltonian corresponding to the eigenvalue $\Lambda$ with the smallest real part, 27 | ```{image} /_static/FixedpointAlgorithms/fixedpoint.svg 28 | :scale: 12% 29 | :name: fixedpoint 30 | :align: center 31 | ``` 32 | 33 | In the algorithms discussed below we optimize over matrix product states of a fixed finite bond dimension. In the first algorithm known as DMRG (density matrix renormalization group) the states we consider are finite MPS, whereas the second algorithm VUMPS (variational uniform matrix product state algorithm), as the name suggests, optimizes over uniform MPS. Hence, VUMPS enables direct optimization in the thermodynamic limit, without breaking translation invariance. 34 | 35 | Our exposition of DMRG closes follows the one in {cite}`bridgeman2017handwaving`, and that of VUMPS closely follows the excellent set of lecture notes {cite}`vanderstraeten2019tangentspace`. 36 | 37 | ## DMRG 38 | Starting from a random MPS ansatz, DMRG tries to approximate the ground state by sequentially optimizing over all the MPS tensors one by one and sweeping through the chain, until convergence is reached. Let us discuss this algorithm in a bit more detail step by step. 39 | 40 | ### Algorithm 41 | 42 | Let us consider a random ansatz, by taking random tensors $\{A_1,A_2,...,A_L\}$, $L$ being the number of sites. Fixing all tensors but the one at site $i$, the local tensor $A_i$ is updated according to 43 | 44 | ```{image} /_static/FixedpointAlgorithms/localUpdate.svg 45 | :scale: 12% 46 | :name: localUpdate 47 | :align: center 48 | ``` 49 | 50 | Though seemingly daunting we can turn this problem in a simple eigenvalue problem by making full use of the mixed gauge. By bringing all tensors on the right of $A_i$ in the right canonical form and those to the left in left canonical form the denominator simply becomes $\braket{A_i|A_i}$ and the update reduces to 51 | 52 | ```{image} /_static/FixedpointAlgorithms/localUpdate2.svg 53 | :scale: 12% 54 | :name: localUpdate2 55 | :align: center 56 | ``` 57 | 58 | 59 | Here the *effective Hamiltonian* $\mathcal H_i$, defined as 60 | 61 | ```{image} /_static/FixedpointAlgorithms/effHam.svg 62 | :scale: 12% 63 | :name: effHam 64 | :align: center 65 | ``` 66 | 67 | encodes the effect of the full system Hamiltonian on the current center site $i$. The variational problem of the local update can then be solved by finding the eigenvector of $\mathcal{H}_i$ corresponding to the smallest real eigenvalue, and this repeatedly for every site sweeping back and forth through the chain, each time moving the orthogonality center of the MPS. At each update step a large part of the effective Hamiltonian can simply be reused, making the routine very efficient. Notice however that DMRG manifestly breaks translation invariance by updating one tensor at a time. As we will see, VUMPS does not suffer from this artefact. 68 | 69 | From this brief explanation it should be clear that DMRG is a surprisingly simple algorithm. Nevertheless DMRG has proven itself time and time again, and is the most successful algorithm for variationally approximating the ground state of local gapped (1+1)d Hamiltonians. DMRG is implemented in MPSKit and can be called by `DMRG()`. 70 | 71 | ### Example 72 | 73 | Let us illustrate the use of DMRG in MPSKit by approximating the ground state of the transverse field Ising model. The Ising model is implemented in MPSKitModels as follows 74 | ```{math} 75 | H = -J\left(\sum_{} Z_i Z_j + \sum_i h_x X_i + h_z Z_i\right), 76 | ``` 77 | where we are free to choose the parameters $J$, $h_x$ and $h_z$, and $X$ and $Z$ are the generators of $\mathfrak{su}(2)$, and thus differ from the usual Pauli matrices by a factor of $\frac{1}{2}$. 78 | 79 | Let us consider 16 lattice sites, bond dimension 12, open boundary conditions and let's stick to the default critical values of $h_x=0.5$ and $h_z=0$. Finding the ground state using DMRG then only takes a handful of iterations! 80 | 81 | ```{code-cell} julia 82 | using TensorKit, MPSKit, MPSKitModels 83 | 84 | d = 2 # Physical dimension 85 | L = 16 # Length spin chain 86 | D = 12 # Bond dimension 87 | 88 | H = transverse_field_ising() 89 | 90 | algorithm = DMRG(); # Summon DMRG 91 | Ψ = FiniteMPS(L, ℂ^d, ℂ^D) # Random MPS ansatz with bond dimension D 92 | Ψ₀,_ = find_groundstate(Ψ, H, algorithm); 93 | ``` 94 | 95 | ## VUMPS 96 | 97 | As mentioned above, VUMPS optimizes uniform MPS directly in the thermodynamic limit. Since the total energy becomes unbounded in this limit, our objective should be to rather minimize the energy density. When working in the mixed gauge, this minimization problem can be represented diagrammatically as 98 | 99 | ```{image} /_static/FixedpointAlgorithms/energyOpt.svg 100 | :scale: 12% 101 | :name: energyOpt 102 | :align: center 103 | ``` 104 | 105 | where we have introduced the left- and right fixed points $F_L$ and $F_R$ defined as 106 | 107 | ```{image} /_static/FixedpointAlgorithms/env.svg 108 | :scale: 12% 109 | :name: env 110 | :align: center 111 | ``` 112 | 113 | which obey the normalization condition 114 | 115 | ```{image} /_static/FixedpointAlgorithms/envNorm.svg 116 | :scale: 12% 117 | :name: envNorm 118 | :align: center 119 | ``` 120 | 121 | The VUMPS algorithm offers the advantage of global optimalization by design, since the algorithm, contrary to DMRG, does not rely on individual updates of local tensors. 122 | 123 | Given a Hamiltonian of the form mentioned above and an intial random uniform MPS defined by $\{A_L, A_R,C\}$, VUMPS approximates the ground state by finding an approximate solution to the fixed-point equations 124 | ```{math} 125 | A_C' = H_{A_C}(A_C), \\ 126 | C' = H_C(C), \\ 127 | A_C = A_LC = CA_R. 128 | ``` 129 | A detailed derivation that these equations characterize the variational minimum in the manifold of uniform MPS is beyond the scope of these notes, but see {cite}`vanderstraeten2019tangentspace`. 130 | 131 | In these equations the effective Hamiltonians $H_{A_C}$ and $H_{C}$ acting on $A_C$ and $C$ respectively are given by is given by 132 | 133 | ```{image} /_static/FixedpointAlgorithms/H_AC.svg 134 | :scale: 12% 135 | :name: H_AC 136 | :align: center 137 | ``` 138 | 139 | ```{image} /_static/FixedpointAlgorithms/H_C.svg 140 | :scale: 12% 141 | :name: H_C 142 | :align: center 143 | ``` 144 | 145 | The last equation then simply states that $C$ intertwines the left - and right-orthonormal form of the tensor $A$. 146 | 147 | 148 | ### Algorithm 149 | 150 | Let us now explain step-by-step how VUMPS finds an approximate solution to the fixed-point equations in an iterative way. 151 | 152 | 153 | 1. We initialize the algorithm with the random guess $\{A_L, A_R,C\}$, and chose a tolerance $\eta$. 154 | 155 | 2. We first solve the first two eigenvalue equations 156 | ```{math} 157 | A_C = H_{A_C}(A_C), \\ 158 | C = H_C(C), 159 | ``` 160 | using for example an Arnoldi algorithm with the previous approximations of $A_C$ and $C$ as initial guess. This yields two tensors $\tilde A_C$ and $\tilde C$. 161 | 162 | 3. From $\tilde A_C$ and $\tilde C$ we compute $\tilde A_L$ and $\tilde A_R$ that minimize following two-norms 163 | 164 | ```{math} 165 | \epsilon_L = \min_{A_L^\dagger A_L=1} ||\tilde A_C-\tilde A_L\tilde C||_2, \\ 166 | \epsilon_R = \min_{A_R A_R^\dagger=1} ||\tilde A_C-\tilde C\tilde A_R||_2, 167 | ``` 168 | and thus approximately solve the last equation. Note that the minimum is taken over respectively left - and right isometric matrices. We comment below on the analytic soltuion of these equations and how this analytic solution can be approximated efficiently. 169 | 170 | 4. Update $A_L\leftarrow\tilde A_L$, $A_R\leftarrow\tilde A_R$ and $C\leftarrow\tilde C$. 171 | 172 | 5. Evaluate $\epsilon=\max(\epsilon_L,\epsilon_R)$ and repeat until $\epsilon$ is below the tolerance $\eta$. 173 | 174 | Let us finally comment on solving the minimization problem to approximate $\tilde A_{L/R}$. 175 | 176 | A beautiful result in linear algebra states that the minimum is exactly given by $\tilde A_L=U_lV_l^\dagger$ where $U_l$ and $V_l$ are the isometries arising from the singular value decomposition of $\tilde A_C\tilde C^\dagger=U_l\Sigma_lV_l^\dagger$, and similarly $\tilde A_R=U_rV_r^\dagger$, where $\tilde C^\dagger\tilde A_C=U_r\Sigma_rV_r^\dagger$. Even though this approach will work well for the first iteration steps, this might not be the best solution close to convergence. When approaching the exact solution $A^s_C=A^s_LC=CA^s_R$ the singular values in $\Sigma_{l/r}$ become really small so that in finite precision arithmetic the singular vector in the isometries $U_{l/r}$ and $V_{l/r}$ are poor approximations of the exact singular vectors. A robust and close to optimal solution turns out to be 177 | ```{math} 178 | \tilde A_L = U^l_{A_C}(U^l_C)^\dagger,\qquad \tilde A_R = (U^r_C)^\dagger U^r_{A_C}, 179 | ``` 180 | where the $U$'s are the unitaries appearing in the polar decomposition of 181 | ```{math} 182 | \tilde A_C = U^l_{A_C}P^l_{A_C},\qquad \tilde C = U^l_CP^l_C,\\ 183 | \tilde A^r_C = P^r_{A_C}U^r_{A_C},\qquad \tilde C = P^r_CU^r_C. 184 | ``` 185 | 186 | ### Example 187 | 188 | Let us demonstrate the algorithm using MPSKit by estimating the ground state energy density of the spin 1 XXX model. The VUMPS algorithm is called in the same way as we called DMRG. We initialize a random initial MPS with bond dimension 12 and physical dimension 3 (because the spin 1 representation of SU(2) is $2\cdot1+1=3$-dimensional). Obviously we don't have to specify a system size because we work directly in the thermodynamic limit. 189 | 190 | ```{code-cell} julia 191 | H = heisenberg_XYZ() 192 | 193 | Ψ = InfiniteMPS(ℂ^3, ℂ^D) 194 | algorithm = VUMPS() 195 | Ψ₀, envs = find_groundstate(Ψ, H, algorithm); 196 | ``` 197 | 198 | It takes about 30 iterations and a second or two to reach convergence. Let us gauge how well the ground state energy density was approximated by calling 199 | 200 | ```{code-cell} julia 201 | expectation_value(Ψ₀, H) 202 | ``` 203 | 204 | The value we obtain here is to be compared with the quasi-exact value -1.401 484 038 971 2(2) obtained in {cite}`haegeman2011time`. As you can see, even with such a small bond dimension we can easily approximate the ground state energy up to 3 decimals. -------------------------------------------------------------------------------- /lectures/5-Tutorials/FiniteEntanglementScaling.md: -------------------------------------------------------------------------------- 1 | --- 2 | jupytext: 3 | formats: md:myst 4 | text_representation: 5 | extension: .md 6 | format_name: myst 7 | kernelspec: 8 | display_name: Julia 9 | language: julia 10 | name: julia-1.9 11 | --- 12 | 13 | # Finite Entanglement Scaling 14 | 15 | Tutorial on finite entanglement scaling with uniform MPS, using [MPSKit.jl](https://github.com/maartenvd/MPSKit.jl). 16 | 17 | *Coming soon*. 18 | -------------------------------------------------------------------------------- /lectures/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | IJulia = "7073ff75-c697-5162-941a-fcdaad2a7d2a" 3 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 4 | MPSKit = "bb1c41ca-d63c-52ed-829e-0820dda26502" 5 | MPSKitModels = "ca635005-6f8c-4cd1-b51d-8491250ef2ab" 6 | Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" 7 | SUNRepresentations = "1a50b95c-7aac-476d-a9ce-2bfc675fc617" 8 | Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" 9 | TensorKit = "07d1fe3e-3e46-537d-9eac-e9e13d0d4cec" 10 | TensorOperations = "6aa20fa7-93e2-5fca-9bc0-fbd0db3c71a2" 11 | WignerSymbols = "9f57e263-0b3d-5e2e-b1be-24f2bb48858b" 12 | -------------------------------------------------------------------------------- /lectures/References.md: -------------------------------------------------------------------------------- 1 | --- 2 | jupytext: 3 | text_representation: 4 | extension: .md 5 | format_name: myst 6 | kernelspec: 7 | display_name: Julia 8 | language: julia 9 | name: julia-1.9 10 | --- 11 | 12 | (references)= 13 | # References 14 | 15 | ```{bibliography} 16 | :style: alpha 17 | ``` 18 | -------------------------------------------------------------------------------- /lectures/_config.yml: -------------------------------------------------------------------------------- 1 | # Book settings 2 | # Learn more at https://jupyterbook.org/customize/config.html 3 | 4 | title: TensorTutorials 5 | author: Jacob Bridgeman, Lander Burgelman, Lukas Devos, Jutho Haegeman, Daan Maertens, Bram Vancraeynest-De Cuiper and Kevin Vervoort 6 | logo: logo.png 7 | 8 | # Force re-execution of notebooks on each build. 9 | # See https://jupyterbook.org/content/execute.html 10 | execute: 11 | execute_notebooks: cache 12 | timeout: 300 13 | 14 | # Define the name of the latex output file for PDF builds 15 | latex: 16 | latex_documents: 17 | targetname: book.tex 18 | 19 | # Add a bibtex file so that we can create citations 20 | bibtex_bibfiles: 21 | - assets/references.bib 22 | 23 | 24 | # Information about where the book exists on the web 25 | repository: 26 | url: https://github.com/quantumghent/TensorTutorials # Online location of your book 27 | path_to_book: lectures/ # Optional path to your book, relative to the repository root 28 | branch: main # Which branch of the repository should be used when creating links (optional) 29 | 30 | sphinx: 31 | extra_extensions: [sphinx_multitoc_numbering, sphinxext.rediraffe, sphinx_tojupyter] 32 | config: 33 | bibtex_reference_style: author_year 34 | bibtex_default_style: alpha 35 | nb_mime_priority_overrides: [ 36 | # HTML 37 | ['html', 'application/vnd.jupyter.widget-view+json', 10], 38 | ['html', 'application/javascript', 20], 39 | ['html', 'text/html', 30], 40 | ['html', 'text/latex', 40], 41 | ['html', 'image/svg+xml', 50], 42 | ['html', 'image/png', 60], 43 | ['html', 'image/jpeg', 70], 44 | ['html', 'text/markdown', 80], 45 | ['html', 'text/plain', 90], 46 | # Jupyter Notebooks 47 | ['jupyter', 'application/vnd.jupyter.widget-view+json', 10], 48 | ['jupyter', 'application/javascript', 20], 49 | ['jupyter', 'text/html', 30], 50 | ['jupyter', 'text/latex', 40], 51 | ['jupyter', 'image/svg+xml', 50], 52 | ['jupyter', 'image/png', 60], 53 | ['jupyter', 'image/jpeg', 70], 54 | ['jupyter', 'text/markdown', 80], 55 | ['jupyter', 'text/plain', 90], 56 | # LaTeX 57 | ['latex', 'text/latex', 10], 58 | ['latex', 'application/pdf', 20], 59 | ['latex', 'image/png', 30], 60 | ['latex', 'image/jpeg', 40], 61 | ['latex', 'text/markdown', 50], 62 | ['latex', 'text/plain', 60], 63 | # Link Checker 64 | ['linkcheck', 'text/plain', 10], 65 | ] 66 | highlight_language: julia 67 | html_favicon: _static/lectures-favicon.ico 68 | html_baseurl: https://quantumghent.github.io/TensorTutorials 69 | html_theme: quantumghent_book_theme 70 | html_static_path: ['_static'] 71 | html_theme_options: 72 | header_organisation_url: https://quantumghent.github.io/ 73 | header_organisation_github: https://github.com/quantumghent/ 74 | header_organisation: QuantumGroup@UGent 75 | repository_url: https://github.com/quantumghent/TensorTutorials/ 76 | repository_branch: main 77 | nb_repository_url: https://github.com/quantumghent/TensorTutorials.notebooks/ 78 | nb_branch: main 79 | nb_url: https://quantumghent.github.io/TensorTutorials/ 80 | description: This website presents a set of lectures on Tensor Network methods 81 | keywords: Julia, Tensor Networks, Quantum Many-Body Physics, Statistical Mechanics 82 | analytics: 83 | google_analytics_id: G-3PCWRLGWND 84 | launch_buttons: 85 | colab_url: https://colab.research.google.com 86 | mathjax3_config: 87 | tex: 88 | macros: 89 | "argmax" : "arg\\,max" 90 | "argmin" : "arg\\,min" 91 | packages: 92 | '[+]': ['physics'] 93 | mathjax_path: https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js 94 | rediraffe_redirects: 95 | index_toc.md: intro.md 96 | tojupyter_default_lang: julia 97 | tojupyter_lang_synonyms: ['julia-1.9'] 98 | tojupyter_static_file_path: ["source/_static", "_static"] 99 | tojupyter_target_html: true 100 | tojupyter_urlpath: "https://quantumghent.github.io/TensorTutorials/" 101 | tojupyter_image_urlpath: "https://quantumghent.github.io/TensorTutorials/_static/" 102 | tojupyter_kernels: 103 | julia: 104 | kernelspec: 105 | display_name: Julia 106 | language: julia 107 | name: julia-1.9 108 | tojupyter_images_markdown: true 109 | -------------------------------------------------------------------------------- /lectures/_static/FiniteMPS/leftOrth.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /lectures/_static/InfiniteMPS/expVal2.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /lectures/_static/InfiniteMPS/expVal3.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | -------------------------------------------------------------------------------- /lectures/_static/InfiniteMPS/gaugeTransform.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | -------------------------------------------------------------------------------- /lectures/_static/InfiniteMPS/leftGauge.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /lectures/_static/InfiniteMPS/leftOrth.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /lectures/_static/InfiniteMPS/rightOrth.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /lectures/_static/InfiniteMPS/traceNorm.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /lectures/_static/MPO/mpo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /lectures/_static/SymmetricTensors/Fib_fusiontrees.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | -------------------------------------------------------------------------------- /lectures/_static/SymmetricTensors/X.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /lectures/_static/SymmetricTensors/ZZ.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /lectures/_static/SymmetricTensors/fusiontree.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /lectures/_static/TensorNetworks/grouping.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /lectures/_static/TensorNetworks/leftOrth.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /lectures/_static/TensorNetworks/matmat.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /lectures/_static/TensorNetworks/matvec.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /lectures/_static/TensorNetworks/outer-product.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /lectures/_static/TensorNetworks/tr.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /lectures/_static/TensorNetworks/vecvec.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /lectures/_static/figures/alg/simple_update.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /lectures/_static/figures/alg/tensor_network.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /lectures/_static/lectures-favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantumghent/TensorTutorials/c941aef5c7c1dad0d692afc2d6143dfb75a1339a/lectures/_static/lectures-favicon.ico -------------------------------------------------------------------------------- /lectures/_toc.yml: -------------------------------------------------------------------------------- 1 | # Table of contents 2 | # Learn more at https://jupyterbook.org/customize/toc.html 3 | 4 | format: jb-book 5 | root: intro 6 | parts: 7 | - caption: Preliminaries 8 | numbered: true 9 | chapters: 10 | - file: 0-Preliminaries/Software 11 | - file: 0-Preliminaries/Resources 12 | - caption: Introduction to Quantum Many-Body Physics 13 | numbered: true 14 | chapters: 15 | - file: 1-Introduction/QuantumMechanics 16 | - file: 1-Introduction/ManyBody 17 | - file: 1-Introduction/FockSpace 18 | - file: 1-Introduction/Observables 19 | - file: 1-Introduction/QuantumToClassical 20 | - caption: Tensors and Tensor Networks 21 | numbered: true 22 | chapters: 23 | - file: 2-TensorNetworks/LinearAlgebra 24 | - file: 2-TensorNetworks/TensorNetworks 25 | - file: 2-TensorNetworks/TensorNetworkStates 26 | - file: 2-TensorNetworks/Symmetries 27 | - caption: Matrix Product States 28 | numbered: true 29 | chapters: 30 | - file: 3-MatrixProductStates/MatrixProductStates 31 | - file: 3-MatrixProductStates/InfiniteMPS 32 | - file: 3-MatrixProductStates/Algorithms 33 | - file: 3-MatrixProductStates/MatrixProductOperators 34 | - caption: Tensor Network Algorithms 35 | numbered: true 36 | chapters: 37 | - file: 4-Algorithms/FixedpointAlgorithms 38 | - file: 4-Algorithms/TimeEvolutionAlgorithms 39 | - caption: In-Depth Tutorials 40 | numbered: true 41 | chapters: 42 | - file: 5-Tutorials/SymmetricTensors 43 | - file: 5-Tutorials/FiniteEntanglementScaling 44 | - caption: Other 45 | numbered: true 46 | chapters: 47 | - file: References 48 | -------------------------------------------------------------------------------- /lectures/assets/references.bib: -------------------------------------------------------------------------------- 1 | @Article{pfeifer2014faster, 2 | author = {Pfeifer, Robert N. C. and Haegeman, Jutho and Verstraete, Frank}, 3 | journal = {Physical Review E}, 4 | title = {Faster identification of optimal contraction sequences for tensor networks}, 5 | year = {2014}, 6 | pages = {033315}, 7 | volume = {90}, 8 | doi = {10.1103/PhysRevE.90.033315}, 9 | eprint = {1304.6112}, 10 | issue = {3}, 11 | numpages = {18}, 12 | } 13 | 14 | @Article{vanderstraeten2019tangentspace, 15 | author = {Vanderstraeten, Laurens and Haegeman, Jutho and Verstraete, Frank}, 16 | journal = {SciPost Physics Lecture Notes}, 17 | title = {Tangent-Space Methods for Uniform Matrix Product States}, 18 | year = {2019}, 19 | issn = {2590-1990}, 20 | pages = {007}, 21 | doi = {10.21468/SciPostPhysLectNotes.7}, 22 | eprint = {1810.07006}, 23 | langid = {english}, 24 | } 25 | 26 | @Article{haegeman2011time, 27 | author = {Haegeman, Jutho and Cirac, J Ignacio and Osborne, Tobias J and Pi{\v{z}}orn, Iztok and Verschelde, Henri and Verstraete, Frank}, 28 | journal = {Physical Review Letters}, 29 | title = {Time-dependent variational principle for quantum lattices}, 30 | year = {2011}, 31 | pages = {070601}, 32 | volume = {107}, 33 | doi = {10.1103/PhysRevLett.107.070601}, 34 | eprint = {1103.0936}, 35 | number = {7}, 36 | } 37 | 38 | @Article{bridgeman2017handwaving, 39 | author = {Jacob C Bridgeman and Christopher T Chubb}, 40 | journal = {Journal of Physics A: Mathematical and Theoretical}, 41 | title = {Hand-waving and interpretive dance: an introductory course on tensor networks}, 42 | year = {2017}, 43 | pages = {223001}, 44 | volume = {50}, 45 | doi = {10.1088/1751-8121/aa6dc3}, 46 | eprint = {1603.03039}, 47 | number = {22}, 48 | } 49 | 50 | @Article{feiguin2007interacting, 51 | author = {Feiguin, Adrian and Trebst, Simon and Ludwig, Andreas WW and Troyer, Matthias and Kitaev, Alexei and Wang, Zhenghan and Freedman, Michael H}, 52 | journal = {Physical Review Letters}, 53 | title = {Interacting anyons in topological quantum liquids: The golden chain}, 54 | year = {2007}, 55 | pages = {160409}, 56 | volume = {98}, 57 | doi = {10.1103/PhysRevLett.98.160409}, 58 | eprint = {cond-mat/0612341}, 59 | number = {16}, 60 | } 61 | 62 | @Article{rams2018precise, 63 | author = {Rams, Marek M. and Czarnik, Piotr and Cincio, Lukasz}, 64 | journal = {Physical Review X}, 65 | title = {Precise Extrapolation of the Correlation Function Asymptotics in Uniform Tensor Network States with Application to the {{Bose-Hubbard}} and {{XXZ}} Models}, 66 | year = {2018}, 67 | issn = {2160-3308}, 68 | pages = {041033}, 69 | volume = {8}, 70 | doi = {10.1103/PhysRevX.8.041033}, 71 | eprint = {1801.08554}, 72 | number = {4}, 73 | } 74 | 75 | @Misc{vandamme2023efficient, 76 | author = {Maarten Van Damme and Jutho Haegeman and Ian McCulloch and Laurens Vanderstraeten}, 77 | title = {Efficient higher-order matrix product operators for time evolution}, 78 | year = {2023}, 79 | eprint = {2302.14181}, 80 | } 81 | 82 | @Article{FiniteTemperature, 83 | author = {Verstraete, F. and Garc\'{\i}a-Ripoll, J. J. and Cirac, J. I.}, 84 | journal = {Physical Review Letters}, 85 | title = {Matrix Product Density Operators: Simulation of Finite-Temperature and Dissipative Systems}, 86 | year = {2004}, 87 | pages = {207204}, 88 | volume = {93}, 89 | doi = {10.1103/PhysRevLett.93.207204}, 90 | eprint = {cond-mat/0406426}, 91 | issue = {20}, 92 | numpages = {4}, 93 | } 94 | 95 | @Article{Calabrese_2005, 96 | author = {Pasquale Calabrese and John Cardy}, 97 | journal = {Journal of Statistical Mechanics: Theory and Experiment}, 98 | title = {Evolution of entanglement entropy in one-dimensional systems}, 99 | year = {2005}, 100 | pages = {P04010}, 101 | volume = {2005}, 102 | doi = {10.1088/1742-5468/2005/04/p04010}, 103 | eprint = {cond-mat/0503393}, 104 | number = {04}, 105 | } 106 | 107 | @Article{Lubich_2015, 108 | author = {Christian Lubich and Ivan V. Oseledets and Bart Vandereycken}, 109 | journal = {{SIAM} Journal on Numerical Analysis}, 110 | title = {Time Integration of Tensor Trains}, 111 | year = {2015}, 112 | pages = {917--941}, 113 | volume = {53}, 114 | doi = {10.1137/140976546}, 115 | eprint = {1407.2042}, 116 | number = {2}, 117 | } 118 | 119 | @Article{HaegemanTDVP, 120 | author = {Haegeman, Jutho and Lubich, Christian and Oseledets, Ivan and Vandereycken, Bart and Verstraete, Frank}, 121 | journal = {Physical Review B}, 122 | title = {Unifying time evolution and optimization with matrix product states}, 123 | year = {2016}, 124 | pages = {165116}, 125 | volume = {94}, 126 | doi = {10.1103/PhysRevB.94.165116}, 127 | eprint = {1408.5056}, 128 | issue = {16}, 129 | numpages = {10}, 130 | } 131 | 132 | @Article{zauner-stauber2018variational, 133 | author = {{Zauner-Stauber}, V. and Vanderstraeten, L. and Fishman, M. T. and Verstraete, F. and Haegeman, J.}, 134 | journal = {Physical Review B}, 135 | title = {Variational Optimization Algorithms for Uniform Matrix Product States}, 136 | year = {2018}, 137 | issn = {2469-9950, 2469-9969}, 138 | pages = {045145}, 139 | volume = {97}, 140 | doi = {10.1103/PhysRevB.97.045145}, 141 | eprint = {1701.07035}, 142 | number = {4}, 143 | } 144 | 145 | @Article{nishino1996corner, 146 | author = {Nishino, T. and Okunishi, K.}, 147 | journal = {Journal of the Physical Society of Japan}, 148 | title = {Corner {{Transfer Matrix Renormalization Group Method}}}, 149 | year = {1996}, 150 | issn = {0031-9015, 1347-4073}, 151 | pages = {891--894}, 152 | volume = {65}, 153 | doi = {10.1143/JPSJ.65.891}, 154 | eprint = {cond-mat/9507087}, 155 | number = {4}, 156 | } 157 | 158 | @InCollection{hatano2005finding, 159 | author = {Hatano, Naomichi and Suzuki, Masuo}, 160 | booktitle = {Quantum Annealing and Other Optimization Methods}, 161 | title = {Finding {{Exponential Product Formulas}} of {{Higher Orders}}}, 162 | year = {2005}, 163 | pages = {37--68}, 164 | volume = {679}, 165 | doi = {10.1007/11526216_2}, 166 | eprint = {math-ph/0506007}, 167 | } 168 | 169 | @Article{vidal2003efficient, 170 | author = {Vidal, Guifre}, 171 | journal = {Physical Review Letters}, 172 | title = {Efficient Classical Simulation of Slightly Entangled Quantum Computations}, 173 | year = {2003}, 174 | issn = {0031-9007, 1079-7114}, 175 | pages = {147902}, 176 | volume = {91}, 177 | doi = {10.1103/PhysRevLett.91.147902}, 178 | eprint = {quant-ph/0301063}, 179 | number = {14}, 180 | } 181 | 182 | @Article{jiang2008accurate, 183 | author = {Jiang, H. C. and Weng, Z. Y. and Xiang, T.}, 184 | journal = {Physical Review Letters}, 185 | title = {Accurate Determination of Tensor Network State of Quantum Lattice Models in Two Dimensions}, 186 | year = {2008}, 187 | issn = {0031-9007, 1079-7114}, 188 | pages = {090603}, 189 | volume = {101}, 190 | doi = {10.1103/PhysRevLett.101.090603}, 191 | eprint = {0806.3719}, 192 | number = {9}, 193 | } 194 | 195 | @Article{jordan2008classical, 196 | author = {Jordan, J. and Or{\'u}s, R. and Vidal, G. and Verstraete, F. and Cirac, J. I.}, 197 | journal = {Physical Review Letters}, 198 | title = {Classical {{Simulation}} of {{Infinite-Size Quantum Lattice Systems}} in {{Two Spatial Dimensions}}}, 199 | year = {2008}, 200 | pages = {250602}, 201 | volume = {101}, 202 | doi = {10.1103/PhysRevLett.101.250602}, 203 | eprint = {cond-mat/0703788}, 204 | number = {25}, 205 | } 206 | 207 | @Article{onsager1944crystal, 208 | author = {Onsager, Lars}, 209 | journal = {Physical Review}, 210 | title = {Crystal Statistics. I. A Two-Dimensional Model with an Order-Disorder Transition}, 211 | year = {1944}, 212 | pages = {117--149}, 213 | volume = {65}, 214 | doi = {10.1103/PhysRev.65.117}, 215 | issue = {3-4}, 216 | numpages = {0}, 217 | } 218 | 219 | @Article{Landau:1937obd, 220 | author = {Landau, L. D.}, 221 | journal = {Journal of Experimental and Theoretical Physics}, 222 | title = {{On the theory of phase transitions}}, 223 | year = {1937}, 224 | pages = {19--32}, 225 | volume = {7}, 226 | doi = {10.1016/B978-0-08-010586-4.50034-1}, 227 | editor = {ter Haar, D.}, 228 | } 229 | 230 | @Article{hubig17generic, 231 | title = {Generic construction of efficient matrix product operators}, 232 | author = {Hubig, C. and McCulloch, I. P. and Schollw\"ock, U.}, 233 | journal = {Phys. Rev. B}, 234 | volume = {95}, 235 | issue = {3}, 236 | pages = {035129}, 237 | numpages = {12}, 238 | year = {2017}, 239 | month = {Jan}, 240 | publisher = {American Physical Society}, 241 | doi = {10.1103/PhysRevB.95.035129}, 242 | url = {https://link.aps.org/doi/10.1103/PhysRevB.95.035129} 243 | } 244 | 245 | @article{eckart1936approximation, 246 | title = {The Approximation of One Matrix by Another of Lower Rank}, 247 | author = {Eckart, Carl and Young, Gale}, 248 | year = {1936}, 249 | month = {Sep}, 250 | journal = {Psychometrika}, 251 | volume = {1}, 252 | number = {3}, 253 | pages = {211--218}, 254 | issn = {1860-0980}, 255 | doi = {10.1007/BF02288367}, 256 | langid = {english}, 257 | keywords = {Canonic Form,Lower Rank,Mathematical Problem,Public Policy,Statistical Theory} 258 | } 259 | 260 | @Comment{jabref-meta: databaseType:bibtex;} 261 | -------------------------------------------------------------------------------- /lectures/intro.md: -------------------------------------------------------------------------------- 1 | # Tensor Network Methods with Julia 2 | 3 | This is a series of tutorials on tensor network methods and their applications in quantum 4 | physics as well as statistical mechanics. It illustrates the theory, and aims to give 5 | hands-on practise by providing Julia code examples, as well as showcasing the software 6 | libraries that have been developed. 7 | 8 | The lectures have been designed and written by Jacob Bridgeman, Lander Burgelman, Lukas 9 | Devos, Jutho Haegeman, Daan Maertens, Bram Vancraeynest-De Cuiper and Kevin Vervoort. 10 | 11 | ```{tableofcontents} 12 | ``` 13 | -------------------------------------------------------------------------------- /lectures/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantumghent/TensorTutorials/c941aef5c7c1dad0d692afc2d6143dfb75a1339a/lectures/logo.png -------------------------------------------------------------------------------- /lectures/requirements.txt: -------------------------------------------------------------------------------- 1 | docutils==0.16 2 | sphinx==5.0.2 3 | jupyter-book==0.15.1 4 | quantumghent-book-theme @ git+https://github.com/quantumghent/quantumghent-book-theme@main 5 | sphinx-tojupyter==0.3.0 6 | sphinxext.rediraffe==0.2.7 --------------------------------------------------------------------------------