├── .gitignore ├── 0_resources ├── 1502.05767.pdf ├── NA-08-01.pdf ├── README.md ├── adjoint.pdf └── deeplearning2017_johnson_automatic_differentiation_01.pdf ├── 1_deep_learning ├── README.md ├── brachistochrone │ ├── README.md │ ├── brachistochrone.py │ └── exact_solver.py ├── computation_graph.py ├── lecture_notes.pdf ├── realnvp │ ├── README.md │ ├── main.py │ ├── objectives │ │ ├── __init__.py │ │ ├── gaussian.py │ │ ├── mog.py │ │ ├── ring2ds.py │ │ ├── template.py │ │ └── wave.py │ └── realnvp.py ├── schrodinger.py ├── simple_ad.py ├── slides │ ├── SSSS-1-DLReview.pdf │ ├── SSSS-2-GenerativeModels.pdf │ ├── SSSS-3-DiffProgm.pdf │ └── SSSS-4-DLApplication.pdf └── tanh.py ├── 2_tensor_network ├── README.md ├── Tutorial_tensor_network.pdf ├── assets │ ├── Z.png │ ├── Z2.png │ ├── gen1.png │ ├── gen2.png │ ├── gen3.png │ ├── gen4.png │ ├── gradients.png │ ├── image-20190430141119642.png │ ├── image-20190430143433180.png │ ├── image-20190430144239877.png │ ├── image-20190430144449213.png │ ├── image-20190430145634644.png │ ├── image-20190430150813001.png │ ├── image-20190430152031663.png │ ├── init.png │ ├── init2.png │ └── px.png ├── imgs │ ├── L.png │ ├── L0.png │ ├── Z.png │ ├── born_machine.png │ ├── cond_prob.png │ ├── cp.png │ ├── joint_prob.png │ ├── mnist_mps.png │ ├── mps.png │ ├── mps1.png │ ├── mps2.png │ ├── mps_left.png │ ├── psi_prime.png │ ├── rank_one.png │ ├── recon0.png │ ├── recon1.png │ ├── supunsup.png │ ├── tensor.png │ ├── tensor_diagram.png │ ├── tensor_networks.png │ ├── training.png │ ├── tucker.png │ └── two_qubits.png ├── kacward.py ├── mnist ├── mnist28.npy ├── mnist784_bin_1000.npy ├── mnist_100_28x28_p0.5.npy ├── mps_tutorial.ipynb ├── tensor_contraction_methods.pdf └── tensor_contraction_simple.ipynb ├── 3_julia └── julia-hands-on.ipynb ├── 4_quantum ├── QC-with-Yao.ipynb ├── README.md ├── VQE_action.ipynb ├── bloch_sphere.jl ├── graph_embeding.ipynb ├── images │ ├── diff_circuit.png │ ├── differentiable.png │ ├── fourqubit.png │ ├── hgate.png │ ├── landscape.pdf │ ├── mreset.png │ ├── qfttn.png │ ├── twoqubit.png │ └── twoqubit.py ├── kernel_learn.ipynb ├── qc_tensor_mapping.ipynb ├── qcbm_gaussian.ipynb ├── quantum_lecture_note.pdf ├── variational_quantum_circuit.ipynb └── yao-talk-2019.pdf ├── Challenge.md ├── LICENSE ├── Manifest.toml ├── Project.toml ├── README.md ├── _assets ├── SongShanHu2019.jpeg ├── SongShanHu2019.key └── c60.jpg └── src └── SSSS.jl /.gitignore: -------------------------------------------------------------------------------- 1 | *.jl.cov 2 | *.jl.*.cov 3 | *.jl.mem 4 | *.jl~ 5 | *~ 6 | .DS_Store 7 | 8 | *.jl.cov 9 | *.jl.*.cov 10 | *.jl.mem 11 | 12 | docs/build/ 13 | docs/site/ 14 | 15 | _local/ 16 | .ipynb_checkpoints/ 17 | .vscode/ 18 | *.ipynb_checkpoints 19 | **/*.ipynb_checkpoints 20 | **/**/*.ipynb_checkpoints 21 | *.out 22 | *.log 23 | 24 | *.jld2 25 | **/*.jld2 26 | **/**/*.jld2 27 | 28 | *.jld 29 | **/*.jld 30 | **/**/*.jld 31 | 32 | _*.dat 33 | *.swp 34 | __pycache__/ 35 | 36 | *.aux 37 | -------------------------------------------------------------------------------- /0_resources/1502.05767.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/0_resources/1502.05767.pdf -------------------------------------------------------------------------------- /0_resources/NA-08-01.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/0_resources/NA-08-01.pdf -------------------------------------------------------------------------------- /0_resources/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Automatic Differentiation 4 | 5 | ### Theory 6 | 7 | https://timvieira.github.io/blog/post/2017/08/18/backprop-is-not-just-the-chain-rule/ 8 | 9 | https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf 10 | 11 | https://math.mit.edu/~stevenj/18.336/adjoint.pdf 12 | 13 | https://colah.github.io/posts/2015-08-Backprop/ 14 | 15 | http://www.cs.cornell.edu/courses/cs5740/2017sp/lectures/04-nn-compgraph.pdf 16 | 17 | ### Implementation 18 | 19 | http://videolectures.net/deeplearning2017_johnson_automatic_differentiation/ 20 | 21 | https://github.com/mattjj/autodidact 22 | 23 | ### Applications 24 | 25 | https://arxiv.org/abs/1502.05767 26 | 27 | ## Generative Models 28 | 29 | https://openai.com/blog/generative-models/ 30 | 31 | https://deepmind.com/blog/wavenet-generative-model-raw-audio/ 32 | 33 | https://deepmind.com/blog/high-fidelity-speech-synthesis-wavenet/ 34 | 35 | https://deepgenerativemodels.github.io/notes/index.html 36 | 37 | ## Probabilistic Graphical Models 38 | 39 | https://ermongroup.github.io/cs228-notes/ 40 | 41 | ## Differentiable Programming 42 | 43 | https://www.edge.org/response-detail/26794 44 | 45 | https://medium.com/@karpathy/software-2-0-a64152b37c35 46 | 47 | https://www.youtube.com/watch?v=LjWzgTPFu14 48 | 49 | ## Neural ODE 50 | 51 | https://github.com/rtqichen/torchdiffeq 52 | 53 | https://news.ycombinator.com/item?id=18676986 54 | 55 | ## Deep Learning with PyTorch 56 | 57 | https://fleuret.org/ee559/ 58 | 59 | 60 | -------------------------------------------------------------------------------- /0_resources/adjoint.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/0_resources/adjoint.pdf -------------------------------------------------------------------------------- /0_resources/deeplearning2017_johnson_automatic_differentiation_01.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/0_resources/deeplearning2017_johnson_automatic_differentiation_01.pdf -------------------------------------------------------------------------------- /1_deep_learning/README.md: -------------------------------------------------------------------------------- 1 | # Deep Learning 2 | 3 | ## Table of Contents 4 | * [`lecture_notes.pdf`](https://github.com/QuantumBFS/SSSS/blob/master/1_deep_learning/lecture_notes.pdf) and [`slides/`](https://github.com/QuantumBFS/SSSS/tree/master/1_deep_learning/slides) 5 | * Demo codes 6 | * Poor man's computation graph: [`computation_graph.py`](https://github.com/QuantumBFS/SSSS/blob/master/1_deep_learning/computation_graph.py) 7 | * Variational free energy with flow model: [`realnvp/`](https://github.com/QuantumBFS/SSSS/tree/master/1_deep_learning/realnvp) 8 | * Hamiltonian inverse design with reverse mode AD: [`schrodinger.py`](https://github.com/QuantumBFS/SSSS/blob/master/1_deep_learning/schrodinger.py) 9 | * Solving the fastest descent problem with NeuralODE [`brachistochrone/`](https://github.com/QuantumBFS/SSSS/tree/master/1_deep_learning/brachistochrone) 10 | 11 | 12 | Welcome for pull requests and issues! 13 | 14 | -------------------------------------------------------------------------------- /1_deep_learning/brachistochrone/README.md: -------------------------------------------------------------------------------- 1 | ## Solves the brachistochrone problem with [NeuralODE](https://github.com/rtqichen/torchdiffeq) 2 | 3 | Do you still remember the "quickest descent" problem we learned as a kid ? If not, please [remind yourself](http://mathworld.wolfram.com/BrachistochroneProblem.html) a bit. 4 | 5 | Here, we are going to solve the problem by minimizing the path parametrized by a neural network. The unusual thing is that our objective function involves an integration 6 | $$ 7 | t = \int_{x_0}^{x_1} \sqrt{\frac{1+(dy/dx)^2}{2 g (y_1- y_0) + v_0^2}} d x 8 | $$ 9 | We assume the particle moves from $(x_0, y_0)=(0, 0)$ to $(x_1, y_1)$ along the path $y(x)$ . And $g$ is the gravity constant, $v_0$ is the initial velocity. 10 | 11 | Computing the objective function amounts to integrating an ordinary differential equation. And NeuralODE computes its gradient with respect to path, accurately and efficiently! 12 | 13 | Play with the code and think about the following 14 | 15 | - [ ] Change the value of $g$ and $v_0$. Does the solution agrees with your intuition ? 16 | - [ ] What happens when $v_0\rightarrow 0$ ? Why does this happen ? Could you fix it ? 17 | - [ ] Is there any other cool application you can thing of ? You are welcome to share with us. 18 | 19 | -------------------------------------------------------------------------------- /1_deep_learning/brachistochrone/brachistochrone.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import torch.optim as optim 5 | import numpy as np 6 | 7 | from torchdiffeq import odeint_adjoint as odeint 8 | from exact_solver import solver 9 | 10 | class MLP(nn.Module): 11 | def __init__(self, hidden_size, y1=1.0): 12 | super(MLP, self).__init__() 13 | self.fc1 = nn.Linear(1, hidden_size) 14 | self.fc2 = nn.Linear(hidden_size, hidden_size) 15 | self.fc3 = nn.Linear(hidden_size, 1) 16 | 17 | self.y1 = y1 18 | 19 | def _f(self, x): 20 | out = F.softplus(self.fc1(x)) 21 | out = F.softplus(self.fc2(out)) 22 | out = self.fc3(out) 23 | return out.sum() 24 | 25 | def forward(self, x): 26 | ''' 27 | y(0) = 0 28 | y(1) = y1 29 | ''' 30 | f0 = self._f(torch.tensor([0.0])) 31 | f1 = self._f(torch.tensor([1.0])) 32 | return self._f(x) - (f0 + self.y1)*(1.0-x) - f1*x + self.y1 33 | 34 | def value_and_grad(self, x): 35 | y = self.forward(x) 36 | return y, torch.autograd.grad(y, x, grad_outputs=torch.ones(x.shape[0]), create_graph=True)[0] 37 | 38 | class Brachistochrone(nn.Module): 39 | def __init__(self, g, v0, net): 40 | super(Brachistochrone, self).__init__() 41 | self.v0 = v0 42 | self.g = g 43 | self.net = net 44 | 45 | def forward(self, x, t): 46 | with torch.enable_grad(): 47 | y, dydx = self.net.value_and_grad(x.view(-1).detach().requires_grad_()) 48 | return torch.sqrt((1+dydx**2)/(2*self.g*y+ self.v0**2)) 49 | 50 | def plot(model,para): 51 | plt.cla() 52 | xlist = torch.linspace(0.0, 1.0, 21) 53 | ylist = [model.net(torch.tensor([x])) for x in xlist] 54 | plt.plot(xlist.numpy(), ylist, lw=2,label='learned curve') 55 | plt.plot([0.0, 1.0], [0.0, model.net.y1], 'r*', ms=20) 56 | plt.gca().invert_yaxis() 57 | 58 | tlist = np.linspace(para[2],para[3],21) 59 | xlist = para[0]*(tlist- np.sin(tlist)) - para[1] 60 | ylist = para[0]*(1 - np.cos(tlist)) -para[4] 61 | plt.plot(xlist,ylist,lw=2,label='exact') 62 | plt.legend(loc='upper right') 63 | 64 | plt.xlabel('$x$') 65 | plt.ylabel('$y$') 66 | 67 | plt.draw() 68 | plt.pause(0.01) 69 | 70 | if __name__ == '__main__': 71 | 72 | g = 10.0 #gravity 73 | v0 = 1.0 #initial velocity 74 | nh = 32 #number of hidden neurons 75 | y1 = 1.0 #fininal y coordinate 76 | 77 | para = np.append(solver(v0,g,y1),v0**2/(2*g)) # exact solution as a reference 78 | tbest = (para[3]-para[2])*np.sqrt(para[0]/g) 79 | 80 | model = Brachistochrone(g, v0, MLP(nh, y1)) 81 | optimizer = optim.Adam(model.parameters(), lr=1E-2) 82 | 83 | import matplotlib.pyplot as plt 84 | # Set up figure. 85 | fig = plt.figure(figsize=(8,8), facecolor='white') 86 | ax = fig.add_subplot(111, frameon=False) 87 | plt.ion() 88 | plt.show(block=False) 89 | 90 | for epoch in range(100): 91 | optimizer.zero_grad() 92 | t = odeint(model, torch.tensor([0.0]), torch.tensor([0.0, 1.0])) 93 | loss = t[1] - t[0] 94 | loss.backward() 95 | optimizer.step() 96 | print ("step = %d"%epoch, "time = %.5f"%loss.item(), " (exact = %.5f)"%tbest) 97 | plot(model,para) 98 | -------------------------------------------------------------------------------- /1_deep_learning/brachistochrone/exact_solver.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | def solver(v0,g,y1): 3 | #'''the number of points to draw 4 | # y = k(1-cos(theta)) 5 | # x = k(theta- sin(theta)) 6 | # Point 1:(-b,-v0**2/(2*g)) theta=0 7 | # Point 2:(0,0) theta0 8 | # Point 3:(1,1) theta1''' 9 | a = v0**2.0/(2*g) 10 | def f(para): 11 | [k,b,theta0,theta1] = para 12 | out = np.zeros(4) 13 | out[0] = k*(1-np.cos(theta0))-a 14 | out[1] = k*(theta0 - np.sin(theta0))-b 15 | out[2] = k*(1-np.cos(theta1))-y1-a 16 | out[3] = k*(theta1 - np.sin(theta1))-1-b 17 | return np.array(out) 18 | from scipy.optimize import fsolve 19 | return fsolve(f,[2.0, 0.5 , 0.5 , 2.0]) #start point 20 | 21 | if __name__ == "__main__": 22 | y=1.0 23 | v0=0 24 | g=10 25 | para = solver(v0,g,y) 26 | para = np.append(para,v0**2.0/(2*g)) 27 | theta_list = np.linspace(para[2],para[3],3) 28 | x_exact = para[0]*(theta_list-np.sin(theta_list))-para[1] 29 | y_exact = para[0]*(1-np.cos(theta_list))-para[4] 30 | print(para) 31 | print(theta_list) 32 | print(x_exact) 33 | print(y_exact) 34 | -------------------------------------------------------------------------------- /1_deep_learning/computation_graph.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import numpy as np 3 | 4 | class Dense(object): 5 | ''' 6 | linear node f(x) = xW + b. 7 | 8 | Attributes: 9 | params (list): variables (input nodes) that directly feed into this node, W and b. 10 | params_delta (list): gradients for parameters. 11 | ''' 12 | def __init__(self, input_shape, output_shape, mean=0, variance=0.01): 13 | self.params = [mean + variance * np.random.randn(input_shape, output_shape), 14 | mean + variance * np.random.randn(output_shape)] 15 | self.params_delta = [None, None] 16 | 17 | def forward(self, x, *args): 18 | '''function itself.''' 19 | self.x = x # store for backward 20 | W, b = self.params 21 | return np.dot(x, W) + b 22 | 23 | def backward(self, delta): 24 | ''' 25 | Args: 26 | delta (ndarray): gradient of L with repect to node's output, dL/dy. 27 | 28 | Returns: 29 | ndarray: gradient of L with respect to node's input, dL/dx 30 | ''' 31 | self.params_delta[0] = np.dot(self.x.T, delta) 32 | self.params_delta[1] = np.sum(delta, 0) 33 | return np.dot(delta, self.params[0].T) 34 | 35 | class F(object): 36 | '''base class for functions with no parameters.''' 37 | def __init__(self): 38 | self.params = [] 39 | self.params_delta = [] 40 | 41 | class Sigmoid(F): 42 | '''Sigmoid activation function module''' 43 | def forward(self, x): 44 | self.y = 1.0 / (1.0 + np.exp(-x)) 45 | return self.y 46 | 47 | def backward(self, delta): 48 | return delta * ((1 - self.y) * self.y) 49 | 50 | class MSE(F): 51 | '''Mean function module''' 52 | def __init__(self, y): 53 | super(MSE, self).__init__() 54 | self.y = y 55 | 56 | def forward(self, x): 57 | self.x = x 58 | return ((x-self.y)**2).mean() 59 | 60 | def backward(self, delta): 61 | return delta*2*(self.x-self.y)/np.prod(self.x.shape) 62 | 63 | class Sequential(object): 64 | def __init__(self, layers): 65 | self.layers = layers 66 | 67 | def forward(self, x): 68 | for l in self.layers: 69 | x = l.forward(x) 70 | return x 71 | 72 | def backward(self): 73 | delta = 1.0 74 | for l in self.layers[::-1]: 75 | delta = l.backward(delta) 76 | return delta 77 | 78 | if __name__=='__main__': 79 | np.random.seed(42) 80 | 81 | n_batch = 32 82 | n_in = 1 83 | n_hidden = 100 84 | 85 | x = np.random.rand(n_batch, n_in) 86 | y = (x**2).sum(axis=1, keepdims=True) # size = (n_batch, 1) 87 | 88 | model = Sequential([Dense(n_in, n_hidden), Sigmoid(), Dense(n_hidden, 1), MSE(y)]) 89 | 90 | def func(x): 91 | x = x.reshape(n_batch, n_in) 92 | return model.forward(x) 93 | 94 | def grad(x): 95 | x = x.reshape(n_batch, n_in) 96 | model.forward(x) 97 | return model.backward().reshape(n_batch*n_in) 98 | 99 | from scipy.optimize import check_grad 100 | x = np.random.randn(n_batch*n_in) 101 | print ('gradient check:', check_grad(func, grad, x) ) 102 | -------------------------------------------------------------------------------- /1_deep_learning/lecture_notes.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/1_deep_learning/lecture_notes.pdf -------------------------------------------------------------------------------- /1_deep_learning/realnvp/README.md: -------------------------------------------------------------------------------- 1 | # Fun with Normalizing Flows 2 | 3 | Normalizing Flows are an iterative flows from the latent space of simple *base distribution* (e.g. independent Gaussians) to the data space with complex distributions. These flows are reversible, which means that you can use them to map complex data distribution to the normal distribution, hence the name **Normalizing Flow**. Normalizing Flows are simple yet elegant generative models which demonstrate **representation learning**. 4 | 5 | Some background readings before start: 6 | 7 | - Rui Shu's [Precursor to Normalizing Flows](http://ruishu.io/2018/05/19/change-of-variables/) 8 | 9 | - Eric Jang's tutorial [1](https://blog.evjang.com/2018/01/nf1.html) and [2](https://blog.evjang.com/2018/01/nf2.html) 10 | 11 | - OpenAI's [Glow](https://blog.openai.com/glow/) 12 | 13 | Here we employ the Real NVP network introduced in [this paper](https://arxiv.org/abs/1605.08803) for variational calculation of toy target densities. The goal is to minimize the following loss 14 | $$ 15 | \mathcal{L} = \int d x\, q(x) [\ln q(x) + E (x)], 16 | $$ 17 | where $q(x)$ is the model density, and $E(x)$ is a given energy function. One can show that the loss function is lower bounded $\mathcal{L} \ge -\ln Z$, where $Z = \int d x \, e^{-E(x)}$ is the partition function. One will arrive at the equality only when the variational density matches to the target density $q(x) = e^{-E(x)}/Z$. 18 | 19 | Please play with the code and finish the following tasks 20 | 21 | - [ ] Make a plot of the loss versus training epochs, and compare with exactly computed $-\ln Z$ 22 | - [ ] How to make sense of the learned latent space ? Could you do something fun with it ? 23 | 24 | -------------------------------------------------------------------------------- /1_deep_learning/realnvp/main.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | 7 | from realnvp import NVPNet 8 | import objectives 9 | 10 | if __name__=='__main__': 11 | import argparse 12 | parser = argparse.ArgumentParser(description='') 13 | parser.add_argument("-cuda", type=int, default=-1, help="use GPU") 14 | parser.add_argument("-target", default='Ring2D', 15 | choices=['Ring2D', 'Ring5', 'Wave', 'Gaussian', 'Mog2'], help="target distribution") 16 | parser.add_argument("-batchsize", type=int, default=1024, help="batchsize") 17 | args = parser.parse_args() 18 | device = torch.device("cpu" if args.cuda<0 else "cuda:"+str(args.cuda)) 19 | 20 | xlimits=[-4, 4] 21 | ylimits=[-4, 4] 22 | numticks=31 23 | x = np.linspace(*xlimits, num=numticks, dtype=np.float32) 24 | y = np.linspace(*ylimits, num=numticks, dtype=np.float32) 25 | X, Y = np.meshgrid(x, y) 26 | xy = np.concatenate([np.atleast_2d(X.ravel()), np.atleast_2d(Y.ravel())]).T 27 | xy = torch.from_numpy(xy).contiguous().to(device) 28 | 29 | # Set up plotting code 30 | def plot_isocontours(ax, func, alpha=1.0): 31 | zs = np.exp(func(xy).cpu().detach().numpy()) 32 | Z = zs.reshape(X.shape) 33 | plt.contour(X, Y, Z, alpha=alpha) 34 | ax.set_yticks([]) 35 | ax.set_xticks([]) 36 | plt.xlim(xlimits) 37 | plt.ylim(ylimits) 38 | 39 | target = getattr(objectives, args.target)() 40 | target.to(device) 41 | 42 | # Set up figure. 43 | fig = plt.figure(figsize=(8,8), facecolor='white') 44 | ax = fig.add_subplot(111, frameon=False) 45 | plt.ion() 46 | plt.show(block=False) 47 | 48 | model = NVPNet(dim = 2, hdim = 10, depth = 8) 49 | model.to(device) 50 | 51 | optimizer = torch.optim.Adam(model.parameters(), lr = 1e-2) 52 | 53 | params = list(model.parameters()) 54 | params = list(filter(lambda p: p.requires_grad, params)) 55 | nparams = sum([np.prod(p.size()) for p in params]) 56 | print ('total nubmer of trainable parameters:', nparams) 57 | 58 | np_losses = [] 59 | for e in range(200): 60 | x, logp = model.sample(args.batchsize) 61 | loss = logp.mean() - target(x).mean() 62 | 63 | model.zero_grad() 64 | loss.backward() 65 | optimizer.step() 66 | 67 | with torch.no_grad(): 68 | print (e, loss.item()) 69 | np_losses.append([loss.item()]) 70 | 71 | plt.cla() 72 | plot_isocontours(ax, target, alpha=0.5) 73 | plot_isocontours(ax, model.logprob) 74 | 75 | samples = x.cpu().detach().numpy() 76 | plt.plot(samples[:, 0], samples[:,1],'o', alpha=0.8) 77 | 78 | plt.draw() 79 | plt.pause(0.01) 80 | 81 | np_losses = np.array(np_losses) 82 | fig = plt.figure(figsize=(8,8), facecolor='white') 83 | plt.ioff() 84 | plt.plot(np_losses) 85 | plt.show() 86 | -------------------------------------------------------------------------------- /1_deep_learning/realnvp/objectives/__init__.py: -------------------------------------------------------------------------------- 1 | from .ring2ds import Ring2D,Ring5 2 | from .mog import Mog2 3 | from .wave import Wave 4 | from .gaussian import Gaussian 5 | 6 | __all__ =['Ring2D','Ring5','Mog2','Gaussian', 'Wave'] 7 | -------------------------------------------------------------------------------- /1_deep_learning/realnvp/objectives/gaussian.py: -------------------------------------------------------------------------------- 1 | from .template import Target 2 | 3 | class Gaussian(Target): 4 | def __init__(self): 5 | super(Gaussian, self).__init__(2,'Gaussian') 6 | 7 | def energy(self, x): 8 | return (-x[:,0]**2 - 2*x[:, 1]**2 - 2.0*x[:, 0] * x[:, 1]) 9 | -------------------------------------------------------------------------------- /1_deep_learning/realnvp/objectives/mog.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from .template import Target 4 | 5 | class Mog2(Target): 6 | 7 | def __init__(self, offset=0.8): 8 | super(Mog2, self).__init__(2,'Mog2') 9 | self.offset = offset 10 | 11 | def energy(self, x): 12 | 13 | v1 = torch.sqrt((x[:,0]-self.offset)**2 + (x[:, 1]-self.offset)**2)*2. 14 | v2 = torch.sqrt((x[:,0]+self.offset)**2 + (x[:, 1]+self.offset)**2)*2. 15 | 16 | pdf1 = torch.exp(-0.5* v1*v1) /np.sqrt(2*np.pi * 0.25) 17 | pdf2 = torch.exp(-0.5* v2*v2) /np.sqrt(2*np.pi * 0.25) 18 | 19 | return torch.log(0.5*pdf1 + 0.5* pdf2) 20 | -------------------------------------------------------------------------------- /1_deep_learning/realnvp/objectives/ring2ds.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from .template import Target 4 | 5 | class Ring2D(Target): 6 | 7 | def __init__(self): 8 | super(Ring2D, self).__init__(2,'Ring2D') 9 | 10 | def energy(self, x): 11 | return -(torch.sqrt((x**2).sum(dim=1))-2.0)**2/0.32 12 | 13 | class Ring5(Target): 14 | 15 | def __init__(self): 16 | super(Ring5, self).__init__(2,'Ring5') 17 | 18 | def energy(self, x): 19 | x2 = torch.sqrt((x**2).sum(dim=1)) 20 | u1 = (x2 - 1.) **2 /0.04 21 | u2 = (x2 - 2.) **2 /0.04 22 | u3 = (x2 - 3.) **2 /0.04 23 | u4 = (x2 - 4.) **2 /0.04 24 | u5 = (x2 - 5.) **2 /0.04 25 | 26 | u1 = u1.view(-1, 1) 27 | u2 = u2.view(-1, 1) 28 | u3 = u3.view(-1, 1) 29 | u4 = u4.view(-1, 1) 30 | u5 = u5.view(-1, 1) 31 | 32 | u = torch.cat((u1, u2, u3, u4, u5), dim=1) 33 | return -torch.min(u, dim=1)[0] -------------------------------------------------------------------------------- /1_deep_learning/realnvp/objectives/template.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | class Target(nn.Module): 5 | ''' 6 | base class for target 7 | ''' 8 | def __init__(self,nvars,name = "Target"): 9 | super(Target, self).__init__() 10 | self.nvars = nvars 11 | self.name = name 12 | 13 | def __call__(self, x): 14 | return self.energy(x) 15 | 16 | def energy(self,z): 17 | raise NotImplementedError(str(type(self))) 18 | -------------------------------------------------------------------------------- /1_deep_learning/realnvp/objectives/wave.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from .template import Target 4 | 5 | class Wave(Target): 6 | 7 | def __init__(self): 8 | super(Wave, self).__init__(2,'Wave') 9 | 10 | def energy(self, x): 11 | w = torch.sin(np.pi*x[:, 0]/2.) 12 | return -0.5*((x[:, 1] -w)/0.4)**2 -------------------------------------------------------------------------------- /1_deep_learning/realnvp/realnvp.py: -------------------------------------------------------------------------------- 1 | import math 2 | import torch 3 | import torch.nn as nn 4 | 5 | class NVPCouplingLayer(nn.Module): 6 | 7 | def __init__(self, map_s, map_t, b): 8 | super(NVPCouplingLayer , self).__init__() 9 | self.map_s = map_s 10 | self.map_t = map_t 11 | self.b = b.clone().unsqueeze(0) 12 | 13 | def forward(self, x): 14 | self.logjac = x.new_zeros(x.shape[0]) 15 | s, t = self.map_s(self.b * x), self.map_t(self.b * x) 16 | y = self.b * x + (1 - self.b) * (torch.exp(s) * x + t) 17 | self.logjac += ((1 - self.b) * s).sum(1) 18 | return y 19 | 20 | def inverse(self, y): 21 | self.logjac = y.new_zeros(y.shape[0]) 22 | s, t = self.map_s(self.b * y), self.map_t(self.b * y) 23 | self.logjac -= ((1 - self.b) * s).sum(1) 24 | y = self.b * y + (1 - self.b) * (torch.exp(-s) * (y - t)) 25 | return y 26 | 27 | class NVPNet(nn.Module): 28 | def __init__(self, dim, hdim, depth, device='cpu'): 29 | super(NVPNet, self).__init__() 30 | self.dim = dim 31 | self.device = device 32 | b = torch.Tensor(dim).to(device) 33 | self.layers = nn.ModuleList() 34 | for d in range(depth): 35 | if d%2 == 0: 36 | # Tag half the dimensions 37 | i = torch.randperm(b.numel()).narrow(0, 0, b.numel() // 2) 38 | b.zero_()[i] = 1 39 | else: 40 | b=1-b 41 | 42 | map_s = nn.Sequential(nn.Linear(dim, hdim), 43 | nn.ELU(), 44 | nn.Linear(hdim, hdim), 45 | nn.ELU(), 46 | nn.Linear(hdim, dim) 47 | ) 48 | map_t = nn.Sequential(nn.Linear(dim, hdim), 49 | nn.ELU(), 50 | nn.Linear(hdim, hdim), 51 | nn.ELU(), 52 | nn.Linear(hdim, dim) 53 | ) 54 | self.layers.append(NVPCouplingLayer(map_s, map_t, b)) 55 | 56 | def forward(self, x): 57 | self.logjac = x.new_zeros(x.shape[0]) 58 | for m in self.layers: 59 | x = m(x) 60 | self.logjac += m.logjac 61 | return x 62 | 63 | def inverse(self, y): 64 | self.logjac = y.new_zeros(y.shape[0]) 65 | for m in reversed(self.layers): 66 | y = m.inverse(y) 67 | self.logjac += m.logjac 68 | return y 69 | 70 | def sample(self, batch_size): 71 | z = torch.Tensor(batch_size, self.dim).normal_() 72 | x = self.forward(z) 73 | logp = - 0.5 * z.pow(2).add(math.log(2 * math.pi)).sum(1) - self.logjac 74 | return x, logp 75 | 76 | def logprob(self, x): 77 | z = self.inverse(x) 78 | return - 0.5 * z.pow(2).add(math.log(2 * math.pi)).sum(1) + self.logjac 79 | 80 | def save(self, save_dict): 81 | for d, layer in enumerate(self.layers): 82 | save_dict['map_s'+str(d)] = layer.map_s.state_dict() 83 | save_dict['map_t'+str(d)] = layer.map_t.state_dict() 84 | save_dict['mask'+str(d)] = layer.b.to('cpu') 85 | return save_dict 86 | 87 | def load(self, save_dict): 88 | for d, layer in enumerate(self.layers): 89 | layer.map_s.load_state_dict(save_dict['map_s'+str(d)]) 90 | layer.map_t.load_state_dict(save_dict['map_t'+str(d)]) 91 | layer.b = save_dict['mask'+str(d)].to(self.device) 92 | return save_dict 93 | 94 | if __name__=='__main__': 95 | 96 | batch_size = 100 97 | dim = 2 98 | 99 | model = NVPNet(dim = dim, hdim = 4, depth = 8) 100 | z = torch.randn(batch_size, dim, requires_grad=True) 101 | 102 | x = model.forward(z) # generate a new dataset. 103 | x_logjac = model.logjac # record log(Jacobian) in generate process. 104 | print (model.logjac) 105 | 106 | z_infer = model.inverse(x) # inference back to the original dataset. 107 | z_infer_logjac = model.logjac # record log(Jacobian) in inference process. 108 | print (model.logjac) 109 | 110 | from numpy.testing import assert_array_almost_equal 111 | assert_array_almost_equal(z_infer.data.numpy(),z.data.numpy()) # test if they are the same. 112 | assert_array_almost_equal(x_logjac.data.numpy(),-z_infer_logjac.data.numpy()) # abs(log(Jacobian)) 113 | -------------------------------------------------------------------------------- /1_deep_learning/schrodinger.py: -------------------------------------------------------------------------------- 1 | ''' 2 | idea taken from https://math.mit.edu/~stevenj/18.336/adjoint.pdf 3 | ''' 4 | 5 | import numpy as np 6 | import torch 7 | torch.set_default_dtype(torch.float64) 8 | import torch.nn as nn 9 | 10 | class Schrodinger1D(nn.Module): 11 | def __init__(self, xmesh): 12 | super(Schrodinger1D, self).__init__() 13 | 14 | self.xmesh = xmesh 15 | self.potential = nn.Parameter(xmesh**2) 16 | 17 | nmesh = xmesh.shape[0] 18 | h2 = (xmesh[1]-xmesh[0])**2 19 | self.K = torch.diag(1/h2*torch.ones(nmesh, dtype=xmesh.dtype), diagonal=0) \ 20 | - torch.diag(0.5/h2*torch.ones(nmesh-1, dtype=xmesh.dtype), diagonal=1) \ 21 | - torch.diag(0.5/h2*torch.ones(nmesh-1, dtype=xmesh.dtype), diagonal=-1) 22 | 23 | def _solve(self): 24 | 25 | H = torch.diag(self.potential) + self.K 26 | _, psi = torch.symeig(H, eigenvectors=True) 27 | 28 | return psi[:, 0] # 0 for ground state 29 | 30 | def forward(self, target): 31 | psi = self._solve() 32 | return (psi**2 - target).abs().sum() 33 | 34 | def plot(self, target): 35 | psi = self._solve() 36 | 37 | plt.cla() 38 | plt.plot(self.xmesh.numpy(), target.numpy(), label='target') 39 | plt.plot(self.xmesh.numpy(), psi.square().detach().numpy(), label='current') 40 | plt.plot(self.xmesh.numpy(), self.potential.detach().numpy()/10000, label='V/10000') 41 | plt.legend() 42 | plt.draw() 43 | 44 | if __name__=='__main__': 45 | #prepare mesh and target density 46 | xmin = -1; xmax = 1; Nmesh = 500 47 | xmesh = torch.linspace(xmin, xmax, Nmesh) 48 | 49 | target = torch.zeros(Nmesh) 50 | idx = torch.where(torch.abs(xmesh)<0.5) 51 | target[idx] = 1.-torch.abs(xmesh[idx]) 52 | target = (target/torch.norm(target))**2 53 | 54 | model = Schrodinger1D(xmesh) 55 | optimizer = torch.optim.LBFGS(model.parameters(), max_iter=10, tolerance_change = 1E-7, tolerance_grad=1E-7, line_search_fn='strong_wolfe') 56 | 57 | def closure(): 58 | optimizer.zero_grad() 59 | loss = model(target) # density difference 60 | loss.backward() 61 | return loss 62 | 63 | import matplotlib.pyplot as plt 64 | plt.ion() 65 | for epoch in range(50): 66 | loss = optimizer.step(closure) 67 | print (epoch, loss.item()) 68 | model.plot(target) 69 | plt.pause(0.01) 70 | plt.ioff() 71 | 72 | model.plot(target) 73 | plt.show() 74 | -------------------------------------------------------------------------------- /1_deep_learning/simple_ad.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | class NodeBase(object): 4 | 5 | def __init__(self, data): 6 | super(NodeBase, self).__init__() 7 | self._data = data 8 | 9 | def backward(self, delta): 10 | raise NotImplementedError 11 | 12 | @property 13 | def data(self): 14 | return self._data 15 | 16 | @data.setter 17 | def data(self, value): 18 | self._data = value 19 | 20 | def __repr__(self): 21 | return 'tracked ' + repr(self.data) 22 | 23 | def __radd__(self, rhs): 24 | return Node(Add, self, rhs) 25 | 26 | def __ladd__(self, lhs): 27 | return Node(Add, lhs, self) 28 | 29 | 30 | class Variable(NodeBase): 31 | 32 | def __init__(self, x, grad=None): 33 | super(Variable, self).__init__(x) 34 | self.grad = grad 35 | 36 | def backward(self, delta): 37 | if self.grad is None: 38 | self.grad = delta 39 | else: 40 | self.grad += delta 41 | return None 42 | 43 | 44 | class Node(NodeBase): 45 | 46 | def __init__(self, f, *args): 47 | data = [each.data if isinstance(each, NodeBase) else each for each in args] 48 | super(Node, self).__init__(f.eval(*data)) 49 | self.args = args 50 | self.args_data = data 51 | self.f = f 52 | 53 | def backward(self, delta): 54 | grads = self.f.gradient(delta, self.data, *self.args_data) 55 | for each, grad in zip(self.args, grads): 56 | if isinstance(each, NodeBase): 57 | each.backward(grad) 58 | return 59 | 60 | 61 | class Functional: 62 | 63 | def eval(self, *args): 64 | raise NotImplementedError 65 | 66 | def gradient(self, delta, output, *args): 67 | raise NotImplementedError 68 | 69 | 70 | class MatMul(Functional): 71 | 72 | @staticmethod 73 | def eval(A, B): 74 | return np.matmul(A, B) 75 | 76 | @staticmethod 77 | def gradient(delta, output, A, B): 78 | 79 | def adjoint(x): 80 | return np.conj(x.T) 81 | 82 | return np.matmul(delta, adjoint(B)), np.matmul(adjoint(A), delta) 83 | 84 | 85 | class Add(Functional): 86 | 87 | @staticmethod 88 | def eval(A, B): 89 | return A + B 90 | 91 | @staticmethod 92 | def gradient(delta, output, A, B): 93 | return delta, delta 94 | 95 | 96 | class Sigmoid(Functional): 97 | 98 | @staticmethod 99 | def eval(X): 100 | return 1. / (1. + np.exp(-X)) 101 | 102 | @staticmethod 103 | def gradient(delta, output, X): 104 | return delta * (1 - output) * output, 105 | 106 | 107 | def matmul(A, B): 108 | return Node(MatMul, A, B) 109 | 110 | def sigmoid(X): 111 | return Node(Sigmoid, X) 112 | 113 | 114 | if __name__ == '__main__': 115 | A = Variable(np.random.rand(2, 3)) 116 | B = Variable(np.random.rand(3, 4)) 117 | C = Variable(np.random.rand(2, 4)) 118 | 119 | # Dense 120 | Z = sigmoid(matmul(A, B) + C) 121 | Z.backward(np.random.rand(2, 4)) 122 | print(A.grad) 123 | -------------------------------------------------------------------------------- /1_deep_learning/slides/SSSS-1-DLReview.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/1_deep_learning/slides/SSSS-1-DLReview.pdf -------------------------------------------------------------------------------- /1_deep_learning/slides/SSSS-2-GenerativeModels.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/1_deep_learning/slides/SSSS-2-GenerativeModels.pdf -------------------------------------------------------------------------------- /1_deep_learning/slides/SSSS-3-DiffProgm.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/1_deep_learning/slides/SSSS-3-DiffProgm.pdf -------------------------------------------------------------------------------- /1_deep_learning/slides/SSSS-4-DLApplication.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/1_deep_learning/slides/SSSS-4-DLApplication.pdf -------------------------------------------------------------------------------- /1_deep_learning/tanh.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Follows HIPS autograd https://github.com/HIPS/autograd/blob/master/examples/tanh.py 3 | ''' 4 | 5 | import torch 6 | import matplotlib.pyplot as plt 7 | 8 | x = torch.linspace(-7, 7, 100, requires_grad=True) 9 | 10 | for i in range(7): 11 | if (i==0): 12 | y = torch.tanh(x/2) 13 | else: 14 | y, = torch.autograd.grad(y, x, grad_outputs=torch.ones(y.shape[0]), create_graph=True) 15 | 16 | plt.plot(x.detach().numpy(), y.detach().numpy(), '-', label='$%g$'%(i)) 17 | 18 | plt.legend() 19 | plt.show() 20 | -------------------------------------------------------------------------------- /2_tensor_network/README.md: -------------------------------------------------------------------------------- 1 | # Tensor Networks 2 | 3 | ## Table of Contents 4 | 5 | * [`Slides on tensor networks`](https://github.com/QuantumBFS/SSSS/blob/master/2_tensor_network/Tutorial_tensor_network.pdf) 6 | * [`Slides on contraction methods for infinite tensor networks`](https://github.com/QuantumBFS/SSSS/blob/master/2_tensor_network/tensor_contraction_methods.pdf) 7 | * [`Tutorial and demo codes on computing $2$-D Ising model partition function using tensor networks`](https://github.com/QuantumBFS/SSSS/blob/master/2_tensor_network/tensor_contraction_simple.ipynb) 8 | * [`Tutorial and demo codes on the MPS Born machine`](https://github.com/QuantumBFS/SSSS/blob/master/2_tensor_network/mps_tutorial.ipynb) 9 | 10 | -------------------------------------------------------------------------------- /2_tensor_network/Tutorial_tensor_network.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/Tutorial_tensor_network.pdf -------------------------------------------------------------------------------- /2_tensor_network/assets/Z.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/assets/Z.png -------------------------------------------------------------------------------- /2_tensor_network/assets/Z2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/assets/Z2.png -------------------------------------------------------------------------------- /2_tensor_network/assets/gen1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/assets/gen1.png -------------------------------------------------------------------------------- /2_tensor_network/assets/gen2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/assets/gen2.png -------------------------------------------------------------------------------- /2_tensor_network/assets/gen3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/assets/gen3.png -------------------------------------------------------------------------------- /2_tensor_network/assets/gen4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/assets/gen4.png -------------------------------------------------------------------------------- /2_tensor_network/assets/gradients.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/assets/gradients.png -------------------------------------------------------------------------------- /2_tensor_network/assets/image-20190430141119642.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/assets/image-20190430141119642.png -------------------------------------------------------------------------------- /2_tensor_network/assets/image-20190430143433180.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/assets/image-20190430143433180.png -------------------------------------------------------------------------------- /2_tensor_network/assets/image-20190430144239877.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/assets/image-20190430144239877.png -------------------------------------------------------------------------------- /2_tensor_network/assets/image-20190430144449213.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/assets/image-20190430144449213.png -------------------------------------------------------------------------------- /2_tensor_network/assets/image-20190430145634644.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/assets/image-20190430145634644.png -------------------------------------------------------------------------------- /2_tensor_network/assets/image-20190430150813001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/assets/image-20190430150813001.png -------------------------------------------------------------------------------- /2_tensor_network/assets/image-20190430152031663.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/assets/image-20190430152031663.png -------------------------------------------------------------------------------- /2_tensor_network/assets/init.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/assets/init.png -------------------------------------------------------------------------------- /2_tensor_network/assets/init2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/assets/init2.png -------------------------------------------------------------------------------- /2_tensor_network/assets/px.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/assets/px.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/L.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/L.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/L0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/L0.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/Z.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/Z.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/born_machine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/born_machine.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/cond_prob.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/cond_prob.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/cp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/cp.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/joint_prob.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/joint_prob.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/mnist_mps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/mnist_mps.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/mps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/mps.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/mps1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/mps1.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/mps2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/mps2.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/mps_left.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/mps_left.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/psi_prime.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/psi_prime.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/rank_one.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/rank_one.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/recon0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/recon0.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/recon1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/recon1.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/supunsup.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/supunsup.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/tensor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/tensor.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/tensor_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/tensor_diagram.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/tensor_networks.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/tensor_networks.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/training.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/training.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/tucker.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/tucker.png -------------------------------------------------------------------------------- /2_tensor_network/imgs/two_qubits.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/imgs/two_qubits.png -------------------------------------------------------------------------------- /2_tensor_network/kacward.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | ''' 4 | Kac-Ward exact Ising 5 | See Theorem 1 of https://arxiv.org/abs/1011.3494 6 | ''' 7 | 8 | phi = np.array([[0., np.pi/2, -np.pi/2, np.nan ], 9 | [-np.pi/2, 0.0, np.nan, np.pi/2], 10 | [np.pi/2, np.nan, 0.0, -np.pi/2], 11 | [np.nan, -np.pi/2, np.pi/2, 0] 12 | ]) 13 | 14 | def logcosh(x): 15 | xp = np.abs(x) 16 | if (xp< 12): 17 | return np.log( np.cosh(x) ) 18 | else: 19 | return xp - np.log(2.) 20 | 21 | def neighborsite(i, n, L): 22 | """ 23 | The coordinate system is geometrically left->right, down -> up 24 | y| 25 | | 26 | | 27 | |________ x 28 | (0,0) 29 | So as a definition, l means x-1, r means x+1, u means y+1, and d means y-1 30 | """ 31 | x = i%L 32 | y = i//L # y denotes 33 | site = None 34 | # ludr : 35 | if (n==0): 36 | if (x-1>=0): 37 | site = (x-1) + y*L 38 | elif (n==1): 39 | if (y+1=0): 43 | site = x + (y-1)*L 44 | elif (n==3): 45 | if (x+1" 153 | ] 154 | }, 155 | "metadata": {}, 156 | "output_type": "display_data" 157 | } 158 | ], 159 | "source": [ 160 | "def show_imgs(imgs,l1=4,l2=5,s1=6,s2=6):\n", 161 | " \"\"\" Plot images \"\"\"\n", 162 | " plt.rcParams['figure.figsize']=(s1,s2)\n", 163 | " imgs=imgs.cpu().reshape([-1,28,28])\n", 164 | " g, ax = plt.subplots(l1,l2)\n", 165 | " for i in range(l1): \n", 166 | " for j in range(l2):\n", 167 | " a=i*l2+j\n", 168 | " if(a>=imgs.shape[0]):\n", 169 | " break\n", 170 | " ax[i][j].imshow(imgs[a,:,:],cmap='summer')\n", 171 | " ax[i][j].set_xticks([])\n", 172 | " ax[i][j].set_yticks([])\n", 173 | " plt.show()\n", 174 | " \n", 175 | "show_imgs(data,2,10,10,2)" 176 | ] 177 | }, 178 | { 179 | "cell_type": "code", 180 | "execution_count": 23, 181 | "metadata": { 182 | "slideshow": { 183 | "slide_type": "subslide" 184 | } 185 | }, 186 | "outputs": [ 187 | { 188 | "name": "stdout", 189 | "output_type": "stream", 190 | "text": [ 191 | "shape of data is torch.Size([20, 784])\n", 192 | "tensor([[0, 0, 0, ..., 0, 0, 0],\n", 193 | " [0, 0, 0, ..., 0, 0, 0],\n", 194 | " [0, 0, 0, ..., 0, 0, 0],\n", 195 | " ...,\n", 196 | " [0, 0, 0, ..., 0, 0, 0],\n", 197 | " [0, 0, 0, ..., 0, 0, 0],\n", 198 | " [0, 0, 0, ..., 0, 0, 0]])\n" 199 | ] 200 | } 201 | ], 202 | "source": [ 203 | "print(\"shape of data is \",data.shape )\n", 204 | "print(data)" 205 | ] 206 | }, 207 | { 208 | "cell_type": "markdown", 209 | "metadata": { 210 | "slideshow": { 211 | "slide_type": "slide" 212 | } 213 | }, 214 | "source": [ 215 | "### MPS initialization\n", 216 | "Define the mps, which is a list of 3-way tensors containing random values \n", 217 | "" 218 | ] 219 | }, 220 | { 221 | "cell_type": "code", 222 | "execution_count": 20, 223 | "metadata": { 224 | "slideshow": { 225 | "slide_type": "fragment" 226 | } 227 | }, 228 | "outputs": [], 229 | "source": [ 230 | "Dmax=30 # maximum bond dimension\n", 231 | "#mydevice=torch.device(\"cuda:0\")\n", 232 | "mydevice=torch.device(\"cpu\")\n", 233 | "data=data.to(mydevice)\n", 234 | "bond_dims=[Dmax for i in range(n-1)]+[1]\n", 235 | "tensors= [ torch.randn(bond_dims[i-1],2,bond_dims[i],device=mydevice) for i in range(n)]\n", 236 | "\n" 237 | ] 238 | }, 239 | { 240 | "cell_type": "code", 241 | "execution_count": 24, 242 | "metadata": { 243 | "slideshow": { 244 | "slide_type": "subslide" 245 | } 246 | }, 247 | "outputs": [ 248 | { 249 | "name": "stdout", 250 | "output_type": "stream", 251 | "text": [ 252 | "shape of tensors is 784\n", 253 | "[torch.Size([1, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 30]), torch.Size([30, 2, 1])]\n" 254 | ] 255 | } 256 | ], 257 | "source": [ 258 | "print(\"shape of tensors is\",len(tensors))\n", 259 | "print([i.shape for i in tensors])" 260 | ] 261 | }, 262 | { 263 | "cell_type": "markdown", 264 | "metadata": { 265 | "slideshow": { 266 | "slide_type": "slide" 267 | } 268 | }, 269 | "source": [ 270 | "Now check the bond dimensions and tensors" 271 | ] 272 | }, 273 | { 274 | "cell_type": "code", 275 | "execution_count": 25, 276 | "metadata": { 277 | "scrolled": true, 278 | "slideshow": { 279 | "slide_type": "fragment" 280 | } 281 | }, 282 | "outputs": [ 283 | { 284 | "name": "stdout", 285 | "output_type": "stream", 286 | "text": [ 287 | "There are 784 tensors\n", 288 | "[30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 1]\n" 289 | ] 290 | } 291 | ], 292 | "source": [ 293 | "print(\"There are\",len(tensors), \"tensors\")\n", 294 | "print(bond_dims)" 295 | ] 296 | }, 297 | { 298 | "cell_type": "code", 299 | "execution_count": 26, 300 | "metadata": { 301 | "slideshow": { 302 | "slide_type": "subslide" 303 | } 304 | }, 305 | "outputs": [ 306 | { 307 | "name": "stdout", 308 | "output_type": "stream", 309 | "text": [ 310 | "shape of a tensor is torch.Size([30, 2, 30])\n" 311 | ] 312 | } 313 | ], 314 | "source": [ 315 | "print(\"shape of a tensor is\",tensors[5].shape)" 316 | ] 317 | }, 318 | { 319 | "cell_type": "markdown", 320 | "metadata": { 321 | "slideshow": { 322 | "slide_type": "slide" 323 | } 324 | }, 325 | "source": [ 326 | "Question: does the contration with one image give a probability of the image?" 327 | ] 328 | }, 329 | { 330 | "cell_type": "markdown", 331 | "metadata": { 332 | "slideshow": { 333 | "slide_type": "fragment" 334 | } 335 | }, 336 | "source": [ 337 | "Answer: No" 338 | ] 339 | }, 340 | { 341 | "cell_type": "markdown", 342 | "metadata": { 343 | "slideshow": { 344 | "slide_type": "fragment" 345 | } 346 | }, 347 | "source": [ 348 | "\n", 349 | "" 350 | ] 351 | }, 352 | { 353 | "cell_type": "markdown", 354 | "metadata": { 355 | "slideshow": { 356 | "slide_type": "slide" 357 | } 358 | }, 359 | "source": [ 360 | "Canonicalization using QR decompositions\n", 361 | "* Left canonicalization using sequential QR decompositions. After that, all tensors except the right most ones are isometry with a column orthogonal unforded matrix\n", 362 | "\n" 363 | ] 364 | }, 365 | { 366 | "cell_type": "markdown", 367 | "metadata": { 368 | "slideshow": { 369 | "slide_type": "slide" 370 | } 371 | }, 372 | "source": [ 373 | "" 374 | ] 375 | }, 376 | { 377 | "cell_type": "code", 378 | "execution_count": 28, 379 | "metadata": { 380 | "slideshow": { 381 | "slide_type": "subslide" 382 | } 383 | }, 384 | "outputs": [], 385 | "source": [ 386 | "def orthogonalize(site,going_right):\n", 387 | " dl=bond_dims[site-1] # left bond dimension\n", 388 | " d=bond_dims[site] # current bond dimension\n", 389 | " if(going_right):\n", 390 | " A=tensors[site].cpu().view(dl*2,d) # A is a matrix unfolded from the current tensor\n", 391 | " Q,R=torch.qr(A)\n", 392 | " R/=R.norm() # devided by norm \n", 393 | " tensors[site] = Q.contiguous().view(dl,2,-1).to(mydevice)\n", 394 | " tensors[site+1] = (R.to(mydevice)@tensors[site+1].view(d,-1)).view(-1,2,bond_dims[site+1])\n", 395 | " bond_dims[site] = Q.shape[1] # economy QR, so the right dimension could be either dl or d\n", 396 | " else: # going left\n", 397 | " A=tensors[site].cpu().view(dl,d*2).t()\n", 398 | " Q,R=torch.qr(A)\n", 399 | " R/=R.norm() \n", 400 | " tensors[site]=Q.t().contiguous().view(-1,2,d).to(mydevice)\n", 401 | " tensors[site-1] = (tensors[site-1].view(-1,dl)@R.t().to(mydevice)).view(bond_dims[site-2],2,-1)\n", 402 | " bond_dims[site-1] = Q.shape[1]\n" 403 | ] 404 | }, 405 | { 406 | "cell_type": "markdown", 407 | "metadata": { 408 | "slideshow": { 409 | "slide_type": "subslide" 410 | } 411 | }, 412 | "source": [ 413 | "After the canonicalization:\n", 414 | "* It make the partition function of the model equals to $1$. \n", 415 | "* The isometries have condition number $1$, preserving very well the computation precisions.\n", 416 | "\n", 417 | "" 418 | ] 419 | }, 420 | { 421 | "cell_type": "code", 422 | "execution_count": 30, 423 | "metadata": { 424 | "slideshow": { 425 | "slide_type": "subslide" 426 | } 427 | }, 428 | "outputs": [ 429 | { 430 | "name": "stdout", 431 | "output_type": "stream", 432 | "text": [ 433 | " site #783 / 784 time_used=0.67 Sec.\n" 434 | ] 435 | } 436 | ], 437 | "source": [ 438 | "t0=time.time()\n", 439 | "sys.stdout.write(\"Orthogonalizing...\\t\")\n", 440 | "for site in range(n-1):\n", 441 | " sys.stdout.write(\"\\r site #%d / %d \"%(site+1,n)); sys.stdout.flush()\n", 442 | " orthogonalize(site,True) \n", 443 | "sys.stdout.write(\"time_used=%.2f Sec.\\n\"%(time.time()-t0) ) " 444 | ] 445 | }, 446 | { 447 | "cell_type": "code", 448 | "execution_count": 31, 449 | "metadata": { 450 | "slideshow": { 451 | "slide_type": "fragment" 452 | } 453 | }, 454 | "outputs": [ 455 | { 456 | "name": "stdout", 457 | "output_type": "stream", 458 | "text": [ 459 | "The shape of the last tensor is torch.Size([30, 2, 1])\n" 460 | ] 461 | } 462 | ], 463 | "source": [ 464 | "print(\"The shape of the last tensor is\",tensors[783].shape)" 465 | ] 466 | }, 467 | { 468 | "cell_type": "markdown", 469 | "metadata": { 470 | "slideshow": { 471 | "slide_type": "slide" 472 | } 473 | }, 474 | "source": [ 475 | "Canonicalization changes the bond dimensions from\n", 476 | "\n", 477 | "to\n", 478 | "" 479 | ] 480 | }, 481 | { 482 | "cell_type": "code", 483 | "execution_count": 33, 484 | "metadata": { 485 | "scrolled": true, 486 | "slideshow": { 487 | "slide_type": "subslide" 488 | } 489 | }, 490 | "outputs": [ 491 | { 492 | "name": "stdout", 493 | "output_type": "stream", 494 | "text": [ 495 | "[2, 4, 8, 16, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 1]\n" 496 | ] 497 | } 498 | ], 499 | "source": [ 500 | "print(bond_dims)" 501 | ] 502 | }, 503 | { 504 | "cell_type": "markdown", 505 | "metadata": { 506 | "slideshow": { 507 | "slide_type": "slide" 508 | } 509 | }, 510 | "source": [ 511 | "Now contracting mps with one image gives the probability amplitude of the image, becuase\n", 512 | "\n", 513 | "and ?" 514 | ] 515 | }, 516 | { 517 | "cell_type": "code", 518 | "execution_count": 36, 519 | "metadata": { 520 | "slideshow": { 521 | "slide_type": "fragment" 522 | } 523 | }, 524 | "outputs": [], 525 | "source": [ 526 | "def get_psi():\n", 527 | " psi=torch.ones([m,1,1],device=mydevice)\n", 528 | " for site in range(n):\n", 529 | " psi = psi @ tensors[site][:,data[:,site],:].permute(1,0,2)\n", 530 | " return(psi)" 531 | ] 532 | }, 533 | { 534 | "cell_type": "markdown", 535 | "metadata": { 536 | "slideshow": { 537 | "slide_type": "slide" 538 | } 539 | }, 540 | "source": [ 541 | "Generating samples using the *ancestra* sampling\n", 542 | "" 543 | ] 544 | }, 545 | { 546 | "cell_type": "markdown", 547 | "metadata": { 548 | "slideshow": { 549 | "slide_type": "subslide" 550 | } 551 | }, 552 | "source": [ 553 | "" 554 | ] 555 | }, 556 | { 557 | "cell_type": "markdown", 558 | "metadata": { 559 | "slideshow": { 560 | "slide_type": "subslide" 561 | } 562 | }, 563 | "source": [ 564 | "" 565 | ] 566 | }, 567 | { 568 | "cell_type": "markdown", 569 | "metadata": { 570 | "slideshow": { 571 | "slide_type": "subslide" 572 | } 573 | }, 574 | "source": [ 575 | "" 576 | ] 577 | }, 578 | { 579 | "cell_type": "code", 580 | "execution_count": 38, 581 | "metadata": { 582 | "slideshow": { 583 | "slide_type": "fragment" 584 | } 585 | }, 586 | "outputs": [], 587 | "source": [ 588 | "def gen_samples(ns):\n", 589 | " samples=torch.zeros([ns,n],device=mydevice)\n", 590 | " for site in range(n-1):# left canonicalize\n", 591 | " orthogonalize(site,True) \n", 592 | " for s in range(ns):\n", 593 | " vec=torch.ones(1,1,device=mydevice)\n", 594 | " for site in range(n-1,-1,-1):\n", 595 | " vec = (tensors[site].view(-1,bond_dims[site])@vec).view(-1,2)\n", 596 | " p0 = vec[:,0].norm()**2/ (vec.norm()**2)\n", 597 | " x = (0 if np.random.rand() < p0 else 1)\n", 598 | " vec = vec[:,x]\n", 599 | " samples[s][site]=x\n", 600 | " return samples" 601 | ] 602 | }, 603 | { 604 | "cell_type": "markdown", 605 | "metadata": { 606 | "slideshow": { 607 | "slide_type": "slide" 608 | } 609 | }, 610 | "source": [ 611 | "### Initialize cache for MPS\n", 612 | "* Computing probability of a image --> contracting from the first tensor to the last tensor. " 613 | ] 614 | }, 615 | { 616 | "cell_type": "markdown", 617 | "metadata": { 618 | "slideshow": { 619 | "slide_type": "fragment" 620 | } 621 | }, 622 | "source": [ 623 | "* Lots of computation results can be re-used in the future computations." 624 | ] 625 | }, 626 | { 627 | "cell_type": "markdown", 628 | "metadata": { 629 | "slideshow": { 630 | "slide_type": "fragment" 631 | } 632 | }, 633 | "source": [ 634 | "* We would store the contraction results in the cache." 635 | ] 636 | }, 637 | { 638 | "cell_type": "markdown", 639 | "metadata": { 640 | "slideshow": { 641 | "slide_type": "fragment" 642 | } 643 | }, 644 | "source": [ 645 | "* Notice that the cache is for all images." 646 | ] 647 | }, 648 | { 649 | "cell_type": "code", 650 | "execution_count": 39, 651 | "metadata": { 652 | "slideshow": { 653 | "slide_type": "subslide" 654 | } 655 | }, 656 | "outputs": [ 657 | { 658 | "name": "stdout", 659 | "output_type": "stream", 660 | "text": [ 661 | "Caching...\ttime_used=0.13 Sec." 662 | ] 663 | } 664 | ], 665 | "source": [ 666 | "t0=time.time()\n", 667 | "cache=[] \n", 668 | "sys.stdout.write(\"Caching...\\t\")\n", 669 | "sys.stdout.flush()\n", 670 | "cache.append( torch.ones([m,1,1],device=mydevice)) # The initial elements, all images have cache 1\n", 671 | "for site in range(n-1):\n", 672 | " B=cache[site] @ tensors[site][:,data[:,site],:].permute(1,0,2)\n", 673 | " B /= B.abs().max()\n", 674 | " cache.append( B ) # batched matrix multiplications\n", 675 | "cache.append( torch.ones(m,1,1,device=mydevice)) # the last element, matrix [1,1] for all images\n", 676 | "sys.stdout.write(\"time_used=%.2f Sec.\"%(time.time()-t0) )" 677 | ] 678 | }, 679 | { 680 | "cell_type": "markdown", 681 | "metadata": { 682 | "slideshow": { 683 | "slide_type": "slide" 684 | } 685 | }, 686 | "source": [ 687 | "* ```B /= B.abs().max()``` is used to preserve computational precision" 688 | ] 689 | }, 690 | { 691 | "cell_type": "markdown", 692 | "metadata": { 693 | "slideshow": { 694 | "slide_type": "fragment" 695 | } 696 | }, 697 | "source": [ 698 | "* Because the gradient is invariant by multiplying the cache by a constant, for each image. " 699 | ] 700 | }, 701 | { 702 | "cell_type": "markdown", 703 | "metadata": { 704 | "slideshow": { 705 | "slide_type": "fragment" 706 | } 707 | }, 708 | "source": [ 709 | "* Length of the cache is $n+1$. " 710 | ] 711 | }, 712 | { 713 | "cell_type": "markdown", 714 | "metadata": { 715 | "slideshow": { 716 | "slide_type": "fragment" 717 | } 718 | }, 719 | "source": [ 720 | "* For an image, each pixel has a correponding vector, denoting the temporary results in tensor contractions (from left to right or from right to left)." 721 | ] 722 | }, 723 | { 724 | "cell_type": "markdown", 725 | "metadata": { 726 | "slideshow": { 727 | "slide_type": "subslide" 728 | } 729 | }, 730 | "source": [ 731 | "Let us look at the content of cache for image alpha=1:" 732 | ] 733 | }, 734 | { 735 | "cell_type": "code", 736 | "execution_count": 44, 737 | "metadata": { 738 | "scrolled": false, 739 | "slideshow": { 740 | "slide_type": "fragment" 741 | } 742 | }, 743 | "outputs": [ 744 | { 745 | "name": "stdout", 746 | "output_type": "stream", 747 | "text": [ 748 | "cache site 1 tensor([[1.]])\n", 749 | "cache site 2 tensor([[-0.0853, 1.0000]])\n", 750 | "cache site 3 tensor([[ 1.0000, 0.0972, -0.4895, 0.7524]])\n" 751 | ] 752 | } 753 | ], 754 | "source": [ 755 | "alpha=1 # the image 1\n", 756 | "print(\"cache site 1 \",cache[0][alpha])\n", 757 | "print(\"cache site 2 \",cache[1][alpha])\n", 758 | "print(\"cache site 3 \",cache[2][alpha])" 759 | ] 760 | }, 761 | { 762 | "cell_type": "markdown", 763 | "metadata": { 764 | "slideshow": { 765 | "slide_type": "subslide" 766 | } 767 | }, 768 | "source": [ 769 | "Then the probability amplitude $\\psi$ for images is given by $|\\psi|^2$ , let us check!" 770 | ] 771 | }, 772 | { 773 | "cell_type": "code", 774 | "execution_count": 42, 775 | "metadata": { 776 | "scrolled": true, 777 | "slideshow": { 778 | "slide_type": "fragment" 779 | } 780 | }, 781 | "outputs": [ 782 | { 783 | "name": "stdout", 784 | "output_type": "stream", 785 | "text": [ 786 | "Probability of generating image 3 = 0.00000\n" 787 | ] 788 | } 789 | ], 790 | "source": [ 791 | "psi=get_psi()\n", 792 | "print(\"Probability of generating image 3 = %.5f\"%(psi*psi)[3])" 793 | ] 794 | }, 795 | { 796 | "cell_type": "markdown", 797 | "metadata": { 798 | "slideshow": { 799 | "slide_type": "subslide" 800 | } 801 | }, 802 | "source": [ 803 | "**Let us plot probability of all training images**" 804 | ] 805 | }, 806 | { 807 | "cell_type": "code", 808 | "execution_count": 43, 809 | "metadata": { 810 | "slideshow": { 811 | "slide_type": "fragment" 812 | } 813 | }, 814 | "outputs": [ 815 | { 816 | "data": { 817 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAmsAAACPCAYAAAClBpxZAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAEMlJREFUeJzt3X2wXHV9x/H3pwlgtRYCCZom2EBJ28E+IN1BW6vDFIHgKKGVtnEYjRUnpZWxjtOZxmEUB/uH9MmOHaoTBA2MSizWkladGEDamU5FbjA8RMRcqJZACtEwSMeONPbbP/Zcu1x2cy/uJvfs5v2aObPn4XvO/f1ydvd8cs6e3VQVkiRJaqcfW+gGSJIkaTDDmiRJUosZ1iRJklrMsCZJktRihjVJkqQWM6xJkiS1mGFNkiSpxQxrkiRJLWZYkyRJarHFC92AUVq6dGmtWrVqoZshSZI0px07dny7qpbNVTdRYW3VqlVMTU0tdDMkSZLmlORb86kbyWXQJGuSPJBkOsnGPsuPSbKlWX5HklXN/FVJ/jvJzmb4SM86v5Lk3madDyXJKNoqSZI0ToYOa0kWAVcD5wOnAW9MctqsskuAJ6rqVOCDwFU9yx6sqtOb4dKe+R8GNgCrm2HNsG2VJEkaN6M4s3YmMF1VD1XV08CNwNpZNWuBzc34TcDZBztTlmQ58JNV9W9VVcD1wIUjaKskSdJYGUVYWwE83DO9p5nXt6aqDgBPAic0y05O8tUk/5zkVT31e+bYJgBJNiSZSjK1b9++4XoiSZLUMqMIa/3OkNU8a/YCL6mqlwHvAj6Z5Cfnuc3uzKpNVdWpqs6yZXPeUCFJkjRWRhHW9gAn9UyvBB4dVJNkMXAssL+qvl9V3wGoqh3Ag8DPNvUr59imJEnSxBtFWLsTWJ3k5CRHA+uArbNqtgLrm/GLgNuqqpIsa25QIMkpdG8keKiq9gJPJXlF89m2NwM3j6CtkiRJY2Xo71mrqgNJLgO2AYuA66pqV5Irgamq2gpcC9yQZBrYTzfQAbwauDLJAeAHwKVVtb9Z9gfAx4EfB77QDJIkSUeUdG+2nAydTqf8UlxJkjQOkuyoqs5cdf42qCRJUosZ1iRJklrMsCZJktRihjVJkqQWM6xJkiS1mGFNkiSpxQxrkiRJLWZYkyRJajHDmiRJUosZ1iRJklrMsCZJktRihjVJkqQWM6xJkiS1mGFNkiSpxQxrkiRJLTaSsJZkTZIHkkwn2dhn+TFJtjTL70iyqpl/TpIdSe5tHn+jZ53bm23ubIYTR9FWSZKkcbJ42A0kWQRcDZwD7AHuTLK1qr7WU3YJ8ERVnZpkHXAV8LvAt4HXV9WjSX4B2Aas6Fnv4qqaGraNkiRJ42oUZ9bOBKar6qGqehq4EVg7q2YtsLkZvwk4O0mq6qtV9WgzfxfwvCTHjKBNkiRJE2EUYW0F8HDP9B6eeXbsGTVVdQB4EjhhVs0bgK9W1fd75n2suQT6niQZQVslSZLGyijCWr8QVc+lJslL6V4a/f2e5RdX1S8Cr2qGN/X948mGJFNJpvbt2/ecGi5JktR2owhre4CTeqZXAo8OqkmyGDgW2N9MrwQ+C7y5qh6cWaGqHmkenwI+Sfdy67NU1aaq6lRVZ9myZSPojiRJUnuMIqzdCaxOcnKSo4F1wNZZNVuB9c34RcBtVVVJjgM+B7y7qv51pjjJ4iRLm/GjgNcB942grZIkSWNl6LDWfAbtMrp3ct4PfLqqdiW5MskFTdm1wAlJpoF3ATNf73EZcCrwnllf0XEMsC3JPcBO4BHgmmHbKkmSNG5SNfvjZeOr0+nU1JTf9CFJktovyY6q6sxV5y8YSJIktZhhTZIkqcUMa5IkSS1mWJMkSWoxw5okSVKLGdYkSZJazLAmSZLUYoY1SZKkFjOsSZIktZhhTZIkqcUMa5IkSS1mWJMkSWoxw5okSVKLGdYkSZJazLAmSZLUYoY1SZKkFhtJWEuyJskDSaaTbOyz/JgkW5rldyRZ1bPs3c38B5KcN99tSpIkHQmGDmtJFgFXA+cDpwFvTHLarLJLgCeq6lTgg8BVzbqnAeuAlwJrgL9Nsmie25QkSZp4ozizdiYwXVUPVdXTwI3A2lk1a4HNzfhNwNlJ0sy/saq+X1X/Dkw325vPNiVJkibeKMLaCuDhnuk9zby+NVV1AHgSOOEg685nmwAk2ZBkKsnUvn37huiGJElS+4wirKXPvJpnzXOd/+yZVZuqqlNVnWXLlh20oZIkSeNmFGFtD3BSz/RK4NFBNUkWA8cC+w+y7ny2KUmSNPFGEdbuBFYnOTnJ0XRvGNg6q2YrsL4Zvwi4raqqmb+uuVv0ZGA18JV5blOSJGniLR52A1V1IMllwDZgEXBdVe1KciUwVVVbgWuBG5JM0z2jtq5Zd1eSTwNfAw4Ab6+qHwD02+awbZUkSRo36Z7gmgydTqempqYWuhmSJElzSrKjqjpz1fkLBpIkSS1mWJMkSWoxw5okSVKLGdYkSZJazLAmSZLUYoY1SZKkFjOsSZIktZhhTZIkqcUMa5IkSS1mWJMkSWoxw5okSVKLGdYkSZJazLAmSZLUYoY1SZKkFjOsSZIktZhhTZIkqcWGCmtJjk+yPcnu5nHJgLr1Tc3uJOubec9P8rkkX0+yK8kHeurfkmRfkp3N8LZh2ilJkjSuhj2zthG4tapWA7c208+Q5HjgCuDlwJnAFT2h7i+q6ueBlwGvTHJ+z6pbqur0ZvjokO2UJEkaS8OGtbXA5mZ8M3Bhn5rzgO1Vtb+qngC2A2uq6ntV9SWAqnoauAtYOWR7JEmSJsqwYe1FVbUXoHk8sU/NCuDhnuk9zbwfSnIc8Hq6Z+dmvCHJPUluSnLSoAYk2ZBkKsnUvn37ftR+SJIktdKcYS3JLUnu6zOsneffSJ951bP9xcCngA9V1UPN7H8EVlXVLwG38P9n7569oapNVdWpqs6yZcvm2SRJkqTxsHiugqp6zaBlSR5Lsryq9iZZDjzep2wPcFbP9Erg9p7pTcDuqvrrnr/5nZ7l1wBXzdVOSZKkSTTsZdCtwPpmfD1wc5+abcC5SZY0Nxac28wjyZ8CxwLv7F2hCX4zLgDuH7KdkiRJY2nYsPYB4Jwku4FzmmmSdJJ8FKCq9gPvB+5shiuran+SlcDlwGnAXbO+ouMdzdd53A28A3jLkO2UJEkaS6mquavGRKfTqampqYVuhiRJ0pyS7Kiqzlx1/oKBJElSixnWJEmSWsywJkmS1GKGNUmSpBYzrEmSJLWYYU2SJKnFDGuSJEktZliTJElqMcOaJElSixnWJEmSWsywJkmS1GKGNUmSpBYzrEmSJLWYYU2SJKnFDGuSJEktNlRYS3J8ku1JdjePSwbUrW9qdidZ3zP/9iQPJNnZDCc2849JsiXJdJI7kqwapp2SJEnjatgzaxuBW6tqNXBrM/0MSY4HrgBeDpwJXDEr1F1cVac3w+PNvEuAJ6rqVOCDwFVDtlOSJGksDRvW1gKbm/HNwIV9as4DtlfV/qp6AtgOrHkO270JODtJhmyrJEnS2Bk2rL2oqvYCNI8n9qlZATzcM72nmTfjY80l0Pf0BLIfrlNVB4AngROGbKskSdLYWTxXQZJbgBf3WXT5PP9GvzNi1TxeXFWPJHkh8BngTcD1c6wzu30bgA0AL3nJS+bZJEmSpPEwZ1irqtcMWpbksSTLq2pvkuXA433K9gBn9UyvBG5vtv1I8/hUkk/S/Uzb9c06JwF7kiwGjgX2D2jfJmATQKfT6RvoJEmSxtWwl0G3AjN3d64Hbu5Tsw04N8mS5saCc4FtSRYnWQqQ5CjgdcB9fbZ7EXBbVRnEJEnSEWfOM2tz+ADw6SSXAP8B/DZAkg5waVW9rar2J3k/cGezzpXNvBfQDW1HAYuAW4BrmpprgRuSTNM9o7ZuyHZKkiSNpUzSCatOp1NTU1ML3QxJkqQ5JdlRVZ056yYprCXZB3xrHqVLgW8f4ua0mf23//b/yGX/7b/9b4+frqplcxVNVFibryRT80myk8r+23/7b/8Xuh0Lxf7b/3Hsv78NKkmS1GKGNUmSpBY7UsPapoVuwAKz/0c2+39ks/9HNvs/ho7Iz6xJkiSNiyP1zJokSdJYmOiwlmRNkgeSTCfZ2Gf5MUm2NMvvSLLq8Lfy0EhyUpIvJbk/ya4kf9Sn5qwkTybZ2QzvXYi2HipJvpnk3qZvz/oCvnR9qNn/9yQ5YyHaeSgk+bme/bozyXeTvHNWzUTt/yTXJXk8yX09845Psj3J7uZxyYB11zc1u5Os71fTdgP6/+dJvt48vz+b5LgB6x70tTIOBvT/fUke6XmOv3bAugc9VoyDAf3f0tP3bybZOWDdSdj/fY95E/MeUFUTOdD9VYQHgVOAo4G7gdNm1fwh8JFmfB2wZaHbPcL+LwfOaMZfCHyjT//PAv5podt6CP8NvgksPcjy1wJfAAK8Arhjodt8iP4dFgH/Sff7fCZ2/wOvBs4A7uuZ92fAxmZ8I3BVn/WOBx5qHpc040sWuj8j6v+5wOJm/Kp+/W+WHfS1Mg7DgP6/D/jjOdab81gxDkO//s9a/pfAeyd4//c95k3Ke8Akn1k7E5iuqoeq6mngRmDtrJq1wOZm/Cbg7CQ5jG08ZKpqb1Xd1Yw/BdwPrFjYVrXOWuD66voycFyS5QvdqEPgbODBqprPF0aPrar6F7o/T9er9zW+Gbiwz6rnAduran9VPQFsB9YcsoYeIv36X1VfrKoDzeSXgZWHvWGHyYD9Px/zOVa03sH63xzXfgf41GFt1GF0kGPeRLwHTHJYWwE83DO9h2eHlR/WNG9oTwInHJbWHUbN5d2XAXf0WfyrSe5O8oUkLz2sDTv0Cvhikh1JNvRZPp/nyCRYx+A36Une/wAvqqq90H0zB07sU3OkPA/eSvdMcj9zvVbG2WXNZeDrBlwCOxL2/6uAx6pq94DlE7X/Zx3zJuI9YJLDWr8zZLNvfZ1PzVhL8hPAZ4B3VtV3Zy2+i+6lsV8G/gb4h8PdvkPslVV1BnA+8PYkr561/EjY/0cDFwB/12fxpO//+ToSngeXAweATwwomeu1Mq4+DPwMcDqwl+6lwNkmfv8Db+TgZ9UmZv/PccwbuFqfea16DkxyWNsDnNQzvRJ4dFBNksXAsfxop9FbKclRdJ+0n6iqv5+9vKq+W1X/1Yx/HjgqydLD3MxDpqoebR4fBz5L93JHr/k8R8bd+cBdVfXY7AWTvv8bj81c2m4eH+9TM9HPg+bD0q8DLq7mAzqzzeO1Mpaq6rGq+kFV/S9wDf37Nen7fzHwW8CWQTWTsv8HHPMm4j1gksPancDqJCc3ZxfWAVtn1WwFZu76uAi4bdCb2bhpPqNwLXB/Vf3VgJoXz3xGL8mZdJ8P3zl8rTx0krwgyQtnxul+0Pq+WWVbgTen6xXAkzOnyyfIwP9RT/L+79H7Gl8P3NynZhtwbpIlzWWyc5t5Yy/JGuBPgAuq6nsDaubzWhlLsz6D+pv079d8jhXj7DXA16tqT7+Fk7L/D3LMm4z3gIW+w+FQDnTv9vsG3Tt9Lm/mXUn3jQvgeXQvD00DXwFOWeg2j7Dvv073NO49wM5meC1wKXBpU3MZsIvu3U9fBn5tods9wv6f0vTr7qaPM/u/t/8Brm6eH/cCnYVu94j/DZ5PN3wd2zNvYvc/3VC6F/gfuv9TvoTuZ1BvBXY3j8c3tR3goz3rvrV5H5gGfm+h+zLC/k/T/SzOzHvAzN3vPwV8vhnv+1oZt2FA/29oXtv30D1oL5/d/2b6WceKcRv69b+Z//GZ13xP7STu/0HHvIl4D/AXDCRJklpski+DSpIkjT3DmiRJUosZ1iRJklrMsCZJktRihjVJkqQWM6xJkiS1mGFNkiSpxQxrkiRJLfZ/1l95Mhk4XhwAAAAASUVORK5CYII=\n", 818 | "text/plain": [ 819 | "
" 820 | ] 821 | }, 822 | "metadata": { 823 | "needs_background": "light" 824 | }, 825 | "output_type": "display_data" 826 | } 827 | ], 828 | "source": [ 829 | "ax=plt.bar(range(1,psi.shape[0]+1),(psi.cpu()**2).squeeze())" 830 | ] 831 | }, 832 | { 833 | "cell_type": "markdown", 834 | "metadata": { 835 | "slideshow": { 836 | "slide_type": "subslide" 837 | } 838 | }, 839 | "source": [ 840 | "**Hey, the probability is $0$, what is wrong?** " 841 | ] 842 | }, 843 | { 844 | "cell_type": "markdown", 845 | "metadata": { 846 | "slideshow": { 847 | "slide_type": "fragment" 848 | } 849 | }, 850 | "source": [ 851 | "* Because the space is too large and the model is randomly initialized!" 852 | ] 853 | }, 854 | { 855 | "cell_type": "markdown", 856 | "metadata": { 857 | "slideshow": { 858 | "slide_type": "fragment" 859 | } 860 | }, 861 | "source": [ 862 | "* Indeed, the purpose of training is exactly to increase the probability of given images. " 863 | ] 864 | }, 865 | { 866 | "cell_type": "markdown", 867 | "metadata": { 868 | "slideshow": { 869 | "slide_type": "fragment" 870 | } 871 | }, 872 | "source": [ 873 | "* This is the so-called *maximum likelihood learning*." 874 | ] 875 | }, 876 | { 877 | "cell_type": "markdown", 878 | "metadata": { 879 | "slideshow": { 880 | "slide_type": "fragment" 881 | } 882 | }, 883 | "source": [ 884 | "* The procedure is sweeping back and force, from right to left, then from left to right. " 885 | ] 886 | }, 887 | { 888 | "cell_type": "markdown", 889 | "metadata": { 890 | "slideshow": { 891 | "slide_type": "fragment" 892 | } 893 | }, 894 | "source": [ 895 | "* During each sweep, the visited tensor is updated according to the *gradients* of the log-probability with respect to tensor elements." 896 | ] 897 | }, 898 | { 899 | "cell_type": "markdown", 900 | "metadata": { 901 | "slideshow": { 902 | "slide_type": "slide" 903 | } 904 | }, 905 | "source": [ 906 | "## Training MPS\n", 907 | "" 908 | ] 909 | }, 910 | { 911 | "cell_type": "markdown", 912 | "metadata": { 913 | "slideshow": { 914 | "slide_type": "slide" 915 | } 916 | }, 917 | "source": [ 918 | "** Computing gradient **\n", 919 | "\n", 920 | "$\\mathcal{L}=-\\frac{1}{m}\\sum_{\\mathbf{x}\\in\\mathrm{data}}\\ln P(\\mathbf{x})$\n", 921 | "\n", 922 | "$\\nabla \\mathcal{L}=-\\frac{2}{m}\\sum_{\\mathbf{x}\\in\\mathrm{data}}\\frac{\\psi'(\\mathbf{x})}{\\psi(\\mathbf{x})}+\\frac{Z'}{Z}$\n", 923 | "" 924 | ] 925 | }, 926 | { 927 | "cell_type": "markdown", 928 | "metadata": { 929 | "slideshow": { 930 | "slide_type": "slide" 931 | } 932 | }, 933 | "source": [ 934 | "Some tips on the code:\n", 935 | "* Only images with $i^{\\mathrm{th}}$ element $v_i$ contribute to tensor element $A_{w_{i-1},v_i,w_i}$.\n", 936 | "* ```torch.sum(left_vec.permute(0,2,1) @ right_vec.permute(0,2,1)``` computes $\\Psi'(\\mathbf{x})$.\n", 937 | "* ```psi = left_vec @ A.permute(1,0,2) @right_vec ``` does not compute _real_ $\\Psi$, as cache is rescaled to preserve precision. Thus one needs to call ```psi=get_psi()``` in order to compute correct probability amplitude $\\Psi$.\n" 938 | ] 939 | }, 940 | { 941 | "cell_type": "markdown", 942 | "metadata": { 943 | "slideshow": { 944 | "slide_type": "subslide" 945 | } 946 | }, 947 | "source": [ 948 | "* ```tensors_bak=tensors.copy()``` is to backup the training environment for restoring after generating images.\n", 949 | "* ```@``` operator in ```left_vec @ A.permute(1,0,2) @right_vec ``` and in ```cache[site] @ tensors[site][:,data[:,site],:].permute(1,0,2)``` works as batched matrix-matrix multiplications.\n", 950 | "* In ```orthogonalize()```, imposing matrix $R$ to be norm-one using ```R/=R.norm() ``` actually let the partition function $Z=1$." 951 | ] 952 | }, 953 | { 954 | "cell_type": "markdown", 955 | "metadata": {}, 956 | "source": [ 957 | "**Begin Learning:**" 958 | ] 959 | }, 960 | { 961 | "cell_type": "code", 962 | "execution_count": 45, 963 | "metadata": { 964 | "scrolled": false, 965 | "slideshow": { 966 | "slide_type": "slide" 967 | } 968 | }, 969 | "outputs": [ 970 | { 971 | "name": "stdout", 972 | "output_type": "stream", 973 | "text": [ 974 | " Epoch #0, site #783 / 784 NLL=72.145, LowerBound=2.996, total_prob=0.000 time_used=3.40 Sec.\n", 975 | " generating samples..." 976 | ] 977 | }, 978 | { 979 | "data": { 980 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAlYAAACXCAYAAADXjhUnAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAADexJREFUeJzt3X2sZPVdx/H3RxZqtC0P7tZuedqC+ECrrXRDaLWIVinQBrS2FqIp0JoNtVj7RxNpmgAhMRaNmlQQQu3KQwgQUHBtFylaDf1DkAtZnsrTgjQsILuFFiS1VuTrH3MumQwzewf2d3ce7vuVTO6Z8/vNmd9vfjPnfO45Z86kqpAkSdKu+6FJN0CSJGleGKwkSZIaMVhJkiQ1YrCSJElqxGAlSZLUiMFKkiSpkYkGqyQbk2xPcm+DZb0zyb8luS/J3Uk+2leWJH+U5KEk9yf59K4+nyRJ0qBM8jpWSY4GXgAur6q37+KyfhKoqno4yVuAO4CfqarvJjkd+GXgtKp6Kcmbqmr7LndAkiSpz0T3WFXVLcCz/fOSHJrkH5PckeQbSX56zGU9VFUPd9NPAtuBNV3xJ4HzquqlrtxQJUmSmpvGc6wuAX6/qt4FfBb4q1e7gCRHAnsBj3SzDgU+mmQhyY1JDmvWWkmSpM6qSTegX5LXA+8Brk2yOPt1XdmHgPOGPOyJqnp/3zLWAlcApy7uoeqW8f2qWt8tZyPw3uXphSRJWqkmeo4VQJJ1wFeq6u1J3gg8WFVrX+Oy3gj8K/DHVXVt3/wHgOOq6rH0Ett3q2rvXW68JElSn6k6FFhVzwP/keQj8PK3+d4xzmOT7AVcT+9E+GsHim8AfqWb/iXgoUZNliRJetmkvxV4FXAMsBp4GjgH+DpwEbAW2BO4uqqGHQIcXNbvAH8D3Nc3+7Sq2pJkH+BK4CB630I8o6ruatgVSZKkyR8KlCRJmhdTdShQkiRplhmsJEmSGpnY5RZWr15d69atm9TTS5Im5J4nnmu+zJ/d3y96a3ndcccd366qNUvVm1iwWrduHQsLC5N6eknShKw766vNl7nwhQ80X6bUL8m3xqnnoUBJkqRGDFaSJEmNGKwkSZIaMVhJkiQ1YrCSJElqxGAlSZLUiMFKkiSpEYOVJElSIwYrSZKkRgxWkiRJjRisJEmSGjFYSZIkNWKwkiRJasRgJUmS1IjBSpIkqRGDlSRJUiMGK0mSpEYMVpIkSY0sGaySbEyyPcm9I8qPSfJcki3d7ez2zZQkSZp+q8aocylwAXD5Tup8o6o+2KRFkiRJM2rJPVZVdQvw7G5oiyRJ0kxrdY7Vu5PcleTGJG9rtExJkqSZMs6hwKXcCRxcVS8kOQG4AThsWMUkG4ANAAcddFCDp5YkSZoeu7zHqqqer6oXuunNwJ5JVo+oe0lVra+q9WvWrNnVp5YkSZoquxyskrw5SbrpI7tlPrOry5UkSZo1Sx4KTHIVcAywOsk24BxgT4Cquhj4MPDJJC8C/w2cXFW1bC2WJEmaUksGq6o6ZYnyC+hdjkGSJGlF88rrkiRJjRisJEmSGjFYSZIkNWKwkiRJasRgJUmS1IjBSpIkqRGDlSRJUiMGK0mSpEYMVpIkSY0YrCRJkhoxWEmSJDVisJIkSWrEYCVJktSIwUqSJKkRg5UkSVIjBitJkqRGDFaSJEmNGKwkSZIaMVhJkiQ1smSwSrIxyfYk944oT5IvJtma5O4kR7RvpiRJ0vQbZ4/VpcBxOyk/Hjisu20ALtr1ZkmSJM2eJYNVVd0CPLuTKicBl1fPrcA+Sda2aqAkSdKsaHGO1f7A4333t3XzJEmSVpQWwSpD5tXQismGJAtJFnbs2NHgqSVJkqZHi2C1DTiw7/4BwJPDKlbVJVW1vqrWr1mzpsFTS5IkTY8WwWoT8LHu24FHAc9V1VMNlitJkjRTVi1VIclVwDHA6iTbgHOAPQGq6mJgM3ACsBX4HnD6cjVWkiRpmi0ZrKrqlCXKC/hUsxZJkiTNKK+8LkmS1IjBSpIkqRGDlSRJUiMGK0mSpEYMVpIkSY0YrCRJkhoxWEmSJDVisJIkSWrEYCVJktSIwUqSJKkRg5UkSVIjBitJkqRGDFaSJEmNGKwkSZIaMVhJkiQ1YrCSJElqxGAlSZLUiMFKkiSpEYOVJElSIwYrSZKkRsYKVkmOS/Jgkq1JzhpSflqSHUm2dLffbd9USZKk6bZqqQpJ9gAuBH4N2AbcnmRTVX1zoOo1VXXmMrRRkiRpJoyzx+pIYGtVPVpVPwCuBk5a3mZJkiTNnnGC1f7A4333t3XzBv1mkruTXJfkwCatkyRJmiHjBKsMmVcD9/8BWFdVPwf8E3DZ0AUlG5IsJFnYsWPHq2upJEnSlBsnWG0D+vdAHQA82V+hqp6pqv/p7n4JeNewBVXVJVW1vqrWr1mz5rW0V5IkaWqNE6xuBw5L8tYkewEnA5v6KyRZ23f3ROD+dk2UJEmaDUt+K7CqXkxyJnATsAewsaruS3IesFBVm4BPJzkReBF4FjhtGdssSZI0lZYMVgBVtRnYPDDv7L7pzwGfa9s0SZKk2eKV1yVJkhoxWEmSJDVisJIkSWrEYCVJktSIwUqSJKkRg5UkSVIjBitJkqRGDFaSJEmNGKwkSZIaMVhJkiQ1YrCSJElqxGAlSZLUiMFKkiSpEYOVJElSI6sm3QCNb91ZX226vMe+8IGmy5MkaaVzj5UkSVIjBitJkqRGDFaSJEmNGKwkSZIaGStYJTkuyYNJtiY5a0j565Jc05XflmRd64ZKkiRNuyWDVZI9gAuB44HDgVOSHD5Q7RPAd6rqJ4C/AM5v3VBJkqRpN87lFo4EtlbVowBJrgZOAr7ZV+ck4Nxu+jrggiSpqmrYVulV8xIVkqTdaZxDgfsDj/fd39bNG1qnql4EngN+rEUDJUmSZsU4e6wyZN7gnqhx6pBkA7Chu/tCkgfHeP7VwLfHqDevlq3/mY0DtlM1/hN4zaaq/xNg/+3/WP2fkfXZa+F7YHr6f/A4lcYJVtuAA/vuHwA8OaLOtiSrgL2BZwcXVFWXAJeM07BFSRaqav2recw8sf/23/7b/0m3Y1JWev/B12AW+z/OocDbgcOSvDXJXsDJwKaBOpuAU7vpDwNf9/wqSZK00iy5x6qqXkxyJnATsAewsaruS3IesFBVm4AvA1ck2UpvT9XJy9loSZKkaTTWjzBX1WZg88C8s/umvw98pG3TXvaqDh3OIfu/stn/lc3+a6W/BjPX/3jETpIkqQ1/0kaSJKmRqQlWK/lnc5IcmORfktyf5L4kfzCkzjFJnkuypbudPWxZsyrJY0nu6fq2MKQ8Sb7Yjf/dSY6YRDuXQ5Kf6hvXLUmeT/KZgTpzNf5JNibZnuTevnn7Jbk5ycPd331HPPbUrs7DSU4dVmfajej/nyZ5oHt/X59knxGP3elnZRaM6P+5SZ7oe4+fMOKxO91WzIIR/b+mr++PJdky4rHzMP5Dt3lzsw6oqonf6J0U/whwCLAXcBdw+ECd3wMu7qZPBq6ZdLsb9n8tcEQ3/QbgoSH9Pwb4yqTbuoyvwWPA6p2UnwDcSO+aaUcBt026zcv0OuwB/Cdw8DyPP3A0cARwb9+8PwHO6qbPAs4f8rj9gEe7v/t20/tOuj+N+n8ssKqbPn9Y/7uynX5WZuE2ov/nAp9d4nFLbitm4Tas/wPlfwacPcfjP3SbNy/rgGnZY/Xyz+ZU1Q+AxZ/N6XcScFk3fR3wviTDLkw6c6rqqaq6s5v+L+B+Xnl1+5XuJODy6rkV2CfJ2kk3ahm8D3ikqr416YYsp6q6hVde667/M34Z8OtDHvp+4OaqeraqvgPcDBy3bA1dJsP6X1Vfq94vVwDcSu+agXNpxPiPY5xtxdTbWf+77dpvAVft1kbtRjvZ5s3FOmBagpU/m9PpDnH+PHDbkOJ3J7kryY1J3rZbG7b8CvhakjvSu0L/oHHeI/PgZEavUOd5/AF+vKqegt6KF3jTkDor5X3wcXp7aIdZ6rMyy87sDoVuHHEYaCWM/3uBp6vq4RHlczX+A9u8uVgHTEuwavazObMsyeuBvwU+U1XPDxTfSe/w0DuAvwRu2N3tW2a/UFVHAMcDn0py9ED5Shj/vYATgWuHFM/7+I9rJbwPPg+8CFw5ospSn5VZdRFwKPBO4Cl6h8MGzf34A6ew871VczP+S2zzRj5syLypeg9MS7B6NT+bQ3byszmzKsme9N5gV1bV3w2WV9XzVfVCN70Z2DPJ6t3czGVTVU92f7cD19Pb5d9vnPfIrDseuLOqnh4smPfx7zy9eHi3+7t9SJ25fh90J+J+EPjt6k4oGTTGZ2UmVdXTVfV/VfUS8CWG92vex38V8CHgmlF15mX8R2zz5mIdMC3BakX/bE53TP3LwP1V9ecj6rx58ZyyJEfSG7tndl8rl0+SH03yhsVpeifx3jtQbRPwsfQcBTy3uMt4joz8T3Wex79P/2f8VODvh9S5CTg2yb7doaJju3kzL8lxwB8CJ1bV90bUGeezMpMGzpn8DYb3a5xtxSz7VeCBqto2rHBexn8n27z5WAdM+uz5xRu9b309RO8bH5/v5p1HbyUD8MP0DpFsBf4dOGTSbW7Y91+ktyvzbmBLdzsBOAM4o6tzJnAfvW/B3Aq8Z9Ltbtj/Q7p+3dX1cXH8+/sf4MLu/XEPsH7S7W78GvwIvaC0d9+8uR1/egHyKeB/6f0H+gl650z+M/Bw93e/ru564K/7Hvvxbj2wFTh90n1p2P+t9M4dWVwHLH4L+i3A5m566Gdl1m4j+n9F99m+m94Gdu1g/7v7r9hWzNptWP+7+Zcufub76s7j+I/a5s3FOsArr0uSJDUyLYcCJUmSZp7BSpIkqRGDlSRJUiMGK0mSpEYMVpIkSY0YrCRJkhoxWEmSJDVisJIkSWrk/wE08Mn1XCyHCwAAAABJRU5ErkJggg==\n", 981 | "text/plain": [ 982 | "
" 983 | ] 984 | }, 985 | "metadata": { 986 | "needs_background": "light" 987 | }, 988 | "output_type": "display_data" 989 | }, 990 | { 991 | "data": { 992 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1oAAAB/CAYAAAD2K7OGAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAE1NJREFUeJzt3UHO5LaVAGD+Ay+MAQxk4WyySd8hPkkOMefoc8whssoxOndwNt7EiwEMDLKrLDplCxWJeqSeJEr1fRu7/1KpxBJFiXyPrI/H41EAAADI819nHwAAAMDd6GgBAAAk09ECAABIpqMFAACQTEcLAAAgmY4WAABAMh0tAACAZDpaAAAAyXS0AAAAkn3TsvH333/7+PTpu72OZVc//vhL+fnnf37Utrl7+Uq5fxmVb1zq6Fd3L18p9y/j3ctXyv3LqHzjUke/unv5SnmPMjZ1tD59+q58+fLn/qM60Q8//GV1m7uXr5T7l1H5xqWOfnX38pVy/zLevXyl3L+MyjcudfSru5evlPcoo9RBAACAZDpaAAAAyXS0AAAAkuloAQAAJNPRAgAASKajBQAAkExHCwAAIJmOFgAAQDIdLQAAgGTfnH0AAEDMx+eNO/gp4ygAiBDRAgAASCaiBQAN/vZTf2TpEXjf3L6f73v+d3NkC4Dd6WgNrOlGKh1kV89zEXlI6t6/cwi31Ns5qrU3c6/pfHGmtPrnXpjm9ZwstSnpbccNzuHqdxIso9RBAACAZCJaE1vSQVodNqrwZnrP4ePzctRqaX+v2zt3wJxp29DSTvREwLRDlDJ/L3y9V03vddNtezM3ovfC6HG8u4xn0ui5POuZtFbGuXoxV8dGj+6LaAEAACS7TUQrK5dyT6P3ut9J74T0uVGUuVHDdz6vi2Uf4BrcInROL15Gjlcboa2N2q61T1zXXs8zr/tdy9Z4FX2GWYtsRY8jS8b+WyI+oe9p53tF7XhrxzdShk60jasdazSit2d5RbQAAACSNUW0WvNFty5jG91+dK35zr3fG3G172/r6Mie52aPeYRrEbijR7j2KmOE64qj1UZoZUHsZ4/nmVL22ecoomU7qv5umZNd0xI1udp8+6X7/Ws57vIcHom4Pj7v+2yXnjq4ZVJjJJw5YlpWdMLedJvX30R5fZ3jRRrMuVSe0Rqknmsw0pmKlnOk1IOnpca2Nlk8uijK3lrPybQNSn/guXFq5K/fxwBlvNoD+J5Gq6etbWptHyO1kVO9nYcRn2einarpdr3X39llnYocy2jPLkuy+xWvr7V2op+v/fDX2DFIHQQAAEiWHtFa6xn2Thzs7cU+R6ajPc8ePZPzRhotyJhI2PI5S+d7acnXI0aaa+dkS2rAGdGdI1McrjIhf6nstajVXuXISFmaGxGf264lUrlHeZcmXr++tlcbNGJdfIrUvdo5fX1PtiNTeEc9T2vff+Z+R/oO7hQNWWsjW7erGTGbY85I9e9Pfyjly+f2aUTT1yNTQXrazdX9+sFiAACAc3RFtFrnNqzJnMs19++WnmeP1pGdaLSrNvcrsq85z1HKuXzkubkdvWpzmZb2vTWKudUei5ZMtz17lKt3lPLs496iZ9Jzy34zr5kle8ybmx7vHsc9jYZERhu3uFL9jMwXmBp97mOWsxcZav3MPebQjTg/tNXSc+HrayPX5aXnrsj8+7PXEojcv5eeMfd+bl9Si5xH732tz+BHX19dHa21zsPWsHPLZ42gFnJeSj2IVKw9y16rfNGUntf3rf0t8lr0M7NlpC6NVi/XjD7hPrPDn13WUc51bUBjabu5fz/fF3mY2EMkPXKtPoycyhQdwGj93kd6IJ879tHbmF57tCtHpC3PqQ3CLKXzT9U6UD0P0HtqbT9qnd8R63b0fpCZ/jr6uTwq1bpG6iAAAECy9MUwlpzd+z1KRhpk7762io6ozZ3L1u2vZC3loZYaOXIqXnSU8iy9o/rT9x5VtiO+r2jEKTqpeDXd+kAZEZwR6uyS6Ejz6zZL+6j97SwZkaxoBsXa/WbPxa+en1HK9tTjEUTbiyU9EZOR6u3T3D2j9rc5I6V/7lE3RztvvRkAR5dDRAsAACDZrhGtjKU0Rx1tX7NlxGr08tUmUo4+6voqIxqy92T/qMjiKdPXpv9uycVfem2pDjz/njnKvNeI/5b2JnskvXWkuGXeyMjX5JK1Y+6dB/U6Cr13NGTp+ljadikCduT8uZZjLiUWee0dXV7b/uNzOWzxq4zI1BWuxd55LiNF7qZa24roe1sjYEdovZcvbXuEljau5zrsXXOg5f1zRLQAAACSHTZH62nLSMIoIiNxvfMkrmTLeRul3FuiUWfmYM+NmNUiOmvRx9p8nbkR9Zbo0a9/Txxl7pnTE6mvW85h9kh6ZDSvdeT0qDo6/RHKyHd8ZHu/Ojdtx2jInFCEppx3T+yZB9iyQtvo83nmVtKLZAA8nX382VoikaOUPXLtbLm+zp6PVVOLRrdGjPbO7mj53ubuLVv32bN9RHpHq/XGP9oyjBFbw4/TfYx0gWZMYG5NuxtZJO3lLNEGsiaaahj5zFHNdRivsoDCq550yZYH3qX99Yj8jtbSogatqUlXP4c1V62rS3rSP4+4R7akvmenDp6ttwxXule03sPnzvmIz2wt1upt5rPbHgNWkeMatQ8hdRAAACBZekRrS5hupB5oxJaw5dJ7zxyhzYySRMO6c/tfGn3ae5L6kmia3hGmKVlLNqe/DWhuFHFuFHqpzn18bp/QP7ePM/V8fqT9GaG9ibQ90XO+NiG9Vkf21trGXi0Vu5R4GbdGMzO1pizV1NIKRzpPpaxHCa4avYnozbi66ncQPe5Ry3elVNVXIloAAADJDl8Mo+Zq83oyjm3k8k21jlK2RuZCE8IHmKR+9gjzdO7LO8mYy/P6vpaJvc//jvrdt7adS6PWI5avFvmqRc7XolijlXUtYhedNzJyVCia4THauWkxV0efttbD5/uzsjtC992Nrnxur3Ssr0Zbuv0stbaxloGV+d2c9jtaU73pWJGbS3bDFDmWqbmb59wN8moPBVNLDwVXD7XPuVpZWhvbq9W9V5EHudZ6OVL5M9PejpjIvzbpuiV9Z2nbK6eUlNK/QMmV0+5fnTGpfS4Nu3XCfautZfn1/TsPOm5tG+5UN6+qtrjF05UWNOkVHXDcs7xSBwEAAJJ1R7Qio8J7RKha93/UCFDL97DWi77DSMLIaVbvpPUc3PWcXblce0UZz1784YoRxUxXTqna2xHfxVwa9tbnjjMXWskUTfFc2/4K7n4dRiKydyvzaES0AAAAknVHtPSAmaNeQC7X1D05r/8psjR6LVvi7EyKtfleV3fHOnvHMtXUFhBiHyJaAAAAyYZa3v3dRhYA4G5q815qPz7+qnUO80jPECMdC0ypm8caqqMFAFxba2fIgx+wZITfCdxC6iAAAECyj8fjEd/44+MfpZS/73c4u/rj4/H4fW2Du5evlPuXUfmGpo6W+5evlPuX8e7lK+X+ZVS+oamj5f7lK+VNytjS0QIAAGCd1EEAAIBkOloAAADJdLQAAACS6WgBAAAk09ECAABIpqMFAACQTEcLAAAgmY4WAABAMh0tAACAZDpaAAAAyb5p2fj77799fPr03V7Hsqsff/yl/PzzPz9q29y9fKXcv4zKNy519Ku7l6+U+5fx7uUr5f5lVL5xqaNfffz3t4/yu2uWr/zfL+Xx/85hKY0drU+fvitfvvy5/6hO9MMPf1nd5u7lK+X+ZVS+camjX929fKXcv4x3L18p9y+j8o1LHf23331Xyv9cs3zlf53DJ6mDAAAAyXS0AAAAkuloAQAAJNPRAgAASKajBQAAkExHCwAAIJmOFgAAQLKm39ECAADO8/jc976PzvfRT0QLAAAgmYjWgLpGHH7KPgoAAEazV2TqGSkT+cojogUAAJBMRGvibz/l9+Ln8mijnzF9r9EFAK7ked9qnU+y+X4nwwNKKf957a1dW5418+lonSB601Hhf/Pxubh5whtYaveW2s30dlI7U0rJHXicdrh6O1/AfpZSBg34byd1EAAAIJmI1s5GGwGYjlLWRojnws1LIei1kWaTK+G97JGGPd3fY+H/a+/hGLV0+en5iEa5WlOfgN+0Xi+ut3wiWgAAAMlEtAZz2DyElX2+jjBuGTWu5fy27Ad4P9E5Au8492fEnwLpWezp9ZyZFwLtWrOHtizW9k4Wv5NgWyqiBQAAkOwWEa1QD/zElaQiowYjjODtNboRGWE+q8xbPrdllZ7pPIRFF1ntbGmuzF3Kd2dbRjqPMFef5o55Lnr1ej3uWYbWOWjRNnCuTanNmbjifIrWY75iGd+d5fnP0fq9R9rRvWW0pbU2P3o/mNsu6zto6mjt9TtTtX2OHNqMptPNPYjX3nvWjWXuc3pTBs96UIvU0S3HNm2EIuclu9Hacg1GOoY1tQfeo0Qa1OnrGR3ppc+YFXhAyHxI36vjtDVVolf2wM4oD+ktbcXSe1vP4VzHc4R7ZyQtvXb/JGZLO7O1fZrWvasthrV2zGnPpIMN/j+NeJ6i9/u115YWaFt7Jp87npbvSeogAABAskNSB9eWe73j5OWMMo0wshAdiX3drjWqdORoa20kJKq2JPFcnT5rNDkySrl0DS5FvvYuR2Qkdu4ctkSJX/cxt90oiytkfN+RKOYI7U3NXRbRiVxHtRTKufdHos1Hfk+Rnw7ZM1VnTz3PLOnl2jkaktXW733f2yPLqpT2rI8Ro3Zbnm2W7F2+pcjU2jNVLQVyuq9IenmkjK3fg4gWAABAskMiWtERoEg+bDRScsTIwh4j3iONiLxqGVFeGxVpmd+VMaq2tp/MeYJnR0CmenOQM6J+R4mOQEWijCPMQ+uREYkbYQ5PT11bai9Gj5RE2rnadbs0ejtCWVvnpJ1xzFuiIb3zXEe2Zb7nWdkPe8iaw1177fG5lB/+2v85EVlzuc/KNmp5rbbNXDmic/Cyyr25oxUJz03//zXNZ/rQPTeBcun9r/uv/W1vu06YTPanP5Ty5XN8kmpLo7qktdzZ31Nrys1TbUJvT7rkmbakco5crqi1h9W5On1mqtXSZ/des63HcdY57ylXLSX2bK1tzqvWMozywF+7f49wXqKWnjd6B+8y0rL2ekhfu989tdz/z+5Mb9Vbb2vPTVnXaO15OXv/VxR5PjvqOV3qIAAAQLLNEa1oVCmS/rC2j9rfR7MWOl6y94TvabpE1qS/q6SZ1ULIa9s/jRqp3KpW1tHLFzknrRHctVHC7PS01lSrva6zM6N4S8dwxYnoT1npO3Ovzb0+4ndQyvok9TM8szumau3GXD1srXsZZf34XHZZDKMWdY0+z0QWG8g0dw6fWrM41rZpOdehSOfGc9iamdOyvzsZ4ZlNRAsAACDZIYthlHKvyaOtC3u0jISNNqrQOmoVHVk5s5wZc7Smr9f2nz2aMh3Bix7789+Ra3C0+rek59hro6xzUd6siG+W6Ahtax09S08UIDrZfFQZi308jVzOrAjtnmXs/XH7lrlUz+2vUDefWutoZoT1ua/oHLTogiatz2y1fVztvvl631u6P1ypji4ZtQwiWgAAAMkOi2i9yp7jcJTW6M1ajn10dOTs1dFqUbm1v13VXHQjmtddmwe3ZdRlOoJXm+sQnV+2dGxzeldg2sPcHKqeXPQrjFJG2pylkcmlujLCOewRmvvwb0vndCkiv+f30XoOp39rvc+cNarb2/bPfTejjLRnz7s8sizR7Ien3rk8mfPvsuYv9XjHuUyjXW+1yPZIbV2rQzpa0Qq81uBO/772vr20phospWx1N0CNx7On1gmYo18MEbW0rCMnRNf2s9bRq4kuLHGUWsPbko484vUzp6etjDi6nHM/I/F6LFs6e1vq6Rn1OSMNde57nHvPWXU6krK6tH3ttdGu0R6RAY+WQddIal3r74S1HkfktTlnDK5vTZtvbavODiDU6tXIg+Gt9SzrOXpvUgcBAACSnZ46eDetI5dbQ+17WBvxyBidvYNRy5id7vJqpAhy77aj6o2KLl2zLQvXZJ7XyM9IRCM4cxGwO5zrqBHvEU+9EZnpf88qT2R592y1aGrTtX9wal1mhspI9Tdq9MhJ7VqqXZ+Rfc4Z9Ry23lOOIqIFAACQrDui1TJS2tN7bBnFG7V3XTPyKGXWsfSW8a7zu0ZWix7MnceWRSRGWXihVsYrqH3XrVGj2typUZw5N5CY7ojMAOYWFopMuD8qKryn3pH9nnmEI+p5FqndD88WaStHOM4zzF3LR0e2ujtae98E1yYmtmyfKWtlmqtU+i2Tmt8pteDq5q7nyKqGPfs+S2ubVWtnlh7Ior//0iPje7zLIgNXTGvpNXJ57rJSW8tA7h0GfTPK8A7XYDSV/szy3uUaPILFMAAAAG7itMUwtjizZ/5uowLvVl5+M8K5zwrx15aJX9K11OwJv/9SM2KaS6+7pxRfsXyjHx/M6YlCjdqWjnIcLBPRAgAASHbJiBbwHrJG69511O9O5b5TWebMzYvkuq6yMEavK5ep59ivXF7OJaIFAACQTEQLAAZg1Pw+nEuglMaOVu2X1M/+lXcAgBZXXISEfUV+c9EzL1FSBwEAAJJ9PB6P+MYfH/8opfx9v8PZ1R8fj8fvaxvcvXyl3L+Myjc0dbTcv3yl3L+Mdy9fKfcvo/INTR0t9y9fKW9SxpaOFgAAAOukDgIAACTT0QIAAEimowUAAJBMRwsAACCZjhYAAEAyHS0AAIBkOloAAADJdLQAAACS6WgBAAAk+xcnATbgbjxkMgAAAABJRU5ErkJggg==\n", 993 | "text/plain": [ 994 | "
" 995 | ] 996 | }, 997 | "metadata": {}, 998 | "output_type": "display_data" 999 | }, 1000 | { 1001 | "name": "stdout", 1002 | "output_type": "stream", 1003 | "text": [ 1004 | "Press any key to continue, or 'stop' to quit stop\n" 1005 | ] 1006 | } 1007 | ], 1008 | "source": [ 1009 | "learning_rate=0.08\n", 1010 | "for epoch in range(9): # one sweep, from right to left, then from left to right\n", 1011 | " going_right=False\n", 1012 | " t0=time.time()\n", 1013 | " for site in [i for i in range(n-1,0,-1)]+[i for i in range(n-1)]:\n", 1014 | " # to update tensors[site] which is a 3-way tensor of size [dl,2,dr]\n", 1015 | " sys.stdout.write(\"\\r Epoch #%d, site #%d / %d \"%(epoch, site+1,n)); sys.stdout.flush()\n", 1016 | " if(site==0): going_right=True\n", 1017 | " gradients = torch.zeros_like(tensors[site],device=mydevice)\n", 1018 | " for i in [0,1]: # the pixel could be either 0 or 1\n", 1019 | " idx=(data[:,site]==i).nonzero().type(torch.LongTensor).squeeze() # this returns indices of non-zero elements\n", 1020 | " if(idx.numel()==0): continue\n", 1021 | " left_vec = cache[site][idx,:,:] # a vector on the left of the site\n", 1022 | " right_vec = cache[site+1][idx,:,:] # a vector on the right of the site\n", 1023 | " A=tensors[site][:,data[idx,site],:]\n", 1024 | " if(idx.numel()==1): \n", 1025 | " A=A.view(A.shape[0],1,A.shape[1])\n", 1026 | " left_vec=left_vec.view(1,left_vec.shape[0],left_vec.shape[1])\n", 1027 | " right_vec=right_vec.view(1,right_vec.shape[0],right_vec.shape[1])\n", 1028 | " psi = left_vec @ A.permute(1,0,2) @right_vec \n", 1029 | " gradients[:,i,:] = torch.sum(left_vec.permute(0,2,1) @ right_vec.permute(0,2,1) / psi,0) \n", 1030 | " gradients = 2.0*(gradients/m-tensors[site]) \n", 1031 | " tensors[site] += learning_rate * gradients/gradients.norm()\n", 1032 | " orthogonalize(site,going_right)\n", 1033 | " if(going_right): # Update cache\n", 1034 | " cache[site+1] = cache[site] @ tensors[site][:,data[:,site],:].permute(1,0,2)\n", 1035 | " cache[site+1] /= cache[site+1].abs().max()\n", 1036 | " else:\n", 1037 | " cache[site] = tensors[site][:,data[:,site],:].permute(1,0,2) @ cache[site+1]\n", 1038 | " cache[site] /= cache[site].abs().max()\n", 1039 | " psi=get_psi()\n", 1040 | " tensors_bak=tensors.copy()\n", 1041 | " sys.stdout.write(\"NLL=%.3f, LowerBound=%.3f, total_prob=%.3f time_used=%.2f Sec.\"%(-torch.mean(torch.log(psi*psi)),math.log(m),torch.sum(psi.squeeze()**2),time.time()-t0 ))\n", 1042 | " sys.stdout.write(\"\\n generating samples...\")\n", 1043 | " ax=plt.bar(range(1,psi.shape[0]+1),psi.cpu().squeeze()**2)\n", 1044 | " imgs=gen_samples(30)\n", 1045 | " show_imgs(imgs,2,15,15,2)\n", 1046 | " tensors=tensors_bak.copy()\n", 1047 | " if(epoch < 8):\n", 1048 | " a=input(\"Press any key to continue, or 'stop' to quit \")\n", 1049 | " if (a==\"stop\"):\n", 1050 | " break\n" 1051 | ] 1052 | }, 1053 | { 1054 | "cell_type": "markdown", 1055 | "metadata": {}, 1056 | "source": [ 1057 | "### References for further reading:\n", 1058 | "* Ulrich Schollwock, “The density-matrix renormalization group in the age of matrix product states,” Annals of Physics 326, 96– 192 (2011).\n", 1059 | "* Tamara G. Kolda, and Brett W. Bader, \"Tensor decompositions and applications\", SIAM review 51, 455 (2009).\n", 1060 | "* Ivan V Oseledets, “Tensor-train decomposition,” SIAM Journal on Scientific Computing 33, 2295–2317 (2011).\n", 1061 | "* Edwin Miles Miles Stoudenmire and David J. Schwab, “Supervised Learning with Quantum-Inspired Tensor Networks,” Advances in Neural Information Processing Systems 29, 4799 (2016), arXiv:1605.05775.\n", 1062 | "* Alexander Novikov, Mikhail Trofimov, and Ivan Oseledets, “Exponential machines,” arXiv:1605.05775 (2016).\n", 1063 | "* Jinguo Liu, and Lei Wang,Differentiable Learning of Quantum Circuit Born Machine, arXiv:1804.04168 (2018).\n", 1064 | "* Edwin Miles Stoudenmire, “Learning relevant features of data with multi-scale tensor networks,” Quantum Science and Technology (2018)." 1065 | ] 1066 | }, 1067 | { 1068 | "cell_type": "code", 1069 | "execution_count": null, 1070 | "metadata": {}, 1071 | "outputs": [], 1072 | "source": [] 1073 | } 1074 | ], 1075 | "metadata": { 1076 | "celltoolbar": "Slideshow", 1077 | "kernelspec": { 1078 | "display_name": "Python 3", 1079 | "language": "python", 1080 | "name": "python3" 1081 | }, 1082 | "language_info": { 1083 | "codemirror_mode": { 1084 | "name": "ipython", 1085 | "version": 3 1086 | }, 1087 | "file_extension": ".py", 1088 | "mimetype": "text/x-python", 1089 | "name": "python", 1090 | "nbconvert_exporter": "python", 1091 | "pygments_lexer": "ipython3", 1092 | "version": "3.7.3" 1093 | } 1094 | }, 1095 | "nbformat": 4, 1096 | "nbformat_minor": 2 1097 | } 1098 | -------------------------------------------------------------------------------- /2_tensor_network/tensor_contraction_methods.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/2_tensor_network/tensor_contraction_methods.pdf -------------------------------------------------------------------------------- /4_quantum/README.md: -------------------------------------------------------------------------------- 1 | # Quantum Computing 2 | 3 | ## Table of Contents 4 | * Lecture Note: [quantum_lecture_note.pdf](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/quantum_lecture_note.pdf) 5 | * Slides: [google slides](https://docs.google.com/presentation/d/1jUTpa8pB3jEOWDW1U0rDTDQ-kpri8j8S4y77GQCo3iM/edit?usp=sharing) 6 | * Notebooks 7 | * The solution to the graph embeding problem: [graph_embeding.ipynb](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/graph_embeding.ipynb) 8 | * Quantum circuit computing with Yao.jl: [QC-with-Yao.ipynb](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/QC-with-Yao.ipynb) 9 | * Landscape of a quantum circuit: [variational_quantum_circuit.ipynb](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/variational_quantum_circuit.ipynb) 10 | * Variational quantum eigensolver: [variational_quantum_circuit.ipynb](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/variational_quantum_circuit.ipynb) 11 | * Matrix Product state inspired variational quantum eigensolver [VQE_action.ipynb](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/VQE_action.ipynb) 12 | * Quantum circuit born machine: [qcbm_gaussian.ipynb](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/qcbm_gaussian.ipynb) 13 | * Gradient vanishing problem: [variational_quantum_circuit.ipynb](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/variational_quantum_circuit.ipynb) and [VQE_action.ipynb](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/VQE_action.ipynb) 14 | * Mapping a quantum circuit to tensor networks: [qc_tensor_mapping.ipynb](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/qc_tensor_mapping.ipynb) 15 | 16 | Welcome for pull requests and issues! 17 | -------------------------------------------------------------------------------- /4_quantum/bloch_sphere.jl: -------------------------------------------------------------------------------- 1 | using Makie, GLMakie, Observables, Colors, Yao 2 | AbstractPlotting.inline!(true) 3 | 4 | function bloch_arrow(θ, ϕ) 5 | x = sin(2θ) * cos(ϕ) 6 | y = sin(2θ) * sin(ϕ) 7 | z = cos(2θ) 8 | return (x, y, z) 9 | end 10 | 11 | function bloch_arrow(r::ArrayReg) 12 | @assert nqubits(r) == 1 "invalid quantum state, expect only one qubit" 13 | st = statevec(r) 14 | global_phase = exp(im * angle(st[1])) 15 | st = st ./ global_phase 16 | θ = acos(real(st[1])) # st[1] is real 17 | ϕ = iszero(θ) ? zero(θ) : angle(st[2] / sin(θ)) 18 | return bloch_arrow(θ, ϕ) 19 | end 20 | 21 | function plot_arrow!(scene, r::ArrayReg; kwargs...) 22 | x, y, z = bloch_arrow(r) 23 | lines!(scene, [0.0, x], [0.0, y], [0.0, z]; kwargs...) 24 | return scene 25 | end 26 | 27 | function plot_block!(scene, x::AbstractBlock; linewidth=5.0, kwargs...) 28 | r = ArrayReg(bit"0") 29 | plot_arrow!(scene, r; color=:red, linewidth=linewidth, kwargs...) 30 | rand_color = RGB(rand(0:255)/255, rand(0:255)/255, rand(0:255)/255) 31 | plot_arrow!(scene, apply!(r, x); color=rand_color ,linewidth=linewidth, kwargs...) 32 | return scene 33 | end 34 | 35 | function bloch_sphere!(scene) 36 | wireframe!(scene, Sphere(Point3f0(0), 1f0), color=:cyan3, linewidth=0.1, thickness = 0.6f0, transparency = true) 37 | return scene 38 | end 39 | 40 | 41 | function plotapply(x::AbstractBlock) 42 | AbstractPlotting.inline!(false) 43 | scene = Scene(resolution=(1000, 1000), center=false) 44 | bloch_sphere!(scene) 45 | plot_block!(scene, x) 46 | rotate_cam!(scene, 0.5, 0.0, 0.0) 47 | return scene 48 | end 49 | 50 | function animate_rot() 51 | scene = Scene(resolution=(1000, 1000)) 52 | bloch_sphere!(scene) 53 | scene.center = false 54 | 55 | time = Node(0.0) 56 | scene = lift() 57 | bloch_arrow!(scene, x) 58 | rotate_cam!(scene, 0.5, 0.0, 0.0) 59 | end 60 | 61 | # Pauli Gates 62 | function animate_rot(blk::AbstractBlock) 63 | AbstractPlotting.inline!(false) 64 | screen = GLMakie.Screen() 65 | GLMakie.GLFW.set_visibility!(GLMakie.to_native(screen), AbstractPlotting.use_display[]) 66 | 67 | AbstractPlotting.inline!(true) 68 | scene = Scene(resolution=(1000, 1000), center=false) 69 | 70 | 71 | bloch_sphere!(scene) 72 | 73 | r = ArrayReg(bit"0") 74 | plot_arrow!(scene, r; color=:red, linewidth=5.0) 75 | time = Node(0.0) 76 | 77 | lifted = lift(time) do t 78 | x, y, z = bloch_arrow(apply!(r, rot(blk, t))) 79 | [0.0, x], [0.0, y], [0.0, z] 80 | end 81 | 82 | lines!(scene, lifted; color=:blue, linewidth=5.0) 83 | rotate_cam!(scene, 0.1, 0.0, 0.0) 84 | 85 | for _ in 1:100 86 | sleep(0.1) 87 | push!(time, to_value(time) + 0.001) 88 | display(screen, scene) 89 | end 90 | return scene 91 | end 92 | 93 | # 1. plot pauli gates 94 | plotapply(X) 95 | 96 | # 2. plot rotate animation 97 | scene = animate_rot(chain(X, H, X)) 98 | 99 | # 3. make video 100 | scene = Scene(resolution=(1000, 1000), center=false) 101 | bloch_sphere!(scene) 102 | r = ArrayReg(bit"0") 103 | plot_arrow!(scene, r; color=:red, linewidth=5.0) 104 | 105 | T = Node(0.0) 106 | lifted = lift(T) do t 107 | x, y, z = bloch_arrow(apply!(r, rot(X, t))) 108 | [0.0, x], [0.0, y], [0.0, z] 109 | end 110 | 111 | lines!(scene, lifted; color=:blue, linewidth=5.0) 112 | rotate_cam!(scene, 0.1, 0.0, 0.0) 113 | 114 | record(scene, "bloch.gif", 1:100) do i 115 | push!(T, 0.001 * i) 116 | end 117 | -------------------------------------------------------------------------------- /4_quantum/graph_embeding.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "scrolled": true 8 | }, 9 | "outputs": [ 10 | { 11 | "name": "stderr", 12 | "output_type": "stream", 13 | "text": [ 14 | "┌ Info: Recompiling stale cache file /home/leo/.julia/compiled/v1.1/Zygote/4kbLI.ji for Zygote [e88e6eb3-aa80-5325-afca-941959d7151f]\n", 15 | "└ @ Base loading.jl:1184\n", 16 | "┌ Info: Recompiling stale cache file /home/leo/.julia/compiled/v1.1/Flux/QdkVy.ji for Flux [587475ba-b771-5e3f-ad9e-33799f191a9c]\n", 17 | "└ @ Base loading.jl:1184\n" 18 | ] 19 | } 20 | ], 21 | "source": [ 22 | "using Zygote\n", 23 | "using Statistics: var, mean\n", 24 | "using LinearAlgebra: norm\n", 25 | "using Flux.NNlib: relu\n", 26 | "using Flux.Optimise" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 2, 32 | "metadata": {}, 33 | "outputs": [ 34 | { 35 | "data": { 36 | "text/plain": [ 37 | "myvar (generic function with 1 method)" 38 | ] 39 | }, 40 | "execution_count": 2, 41 | "metadata": {}, 42 | "output_type": "execute_result" 43 | } 44 | ], 45 | "source": [ 46 | "function myvar(v)\n", 47 | " mv = mean(v)\n", 48 | " sum((v .- mv).^2)./(length(v)-1)\n", 49 | "end" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": 3, 55 | "metadata": {}, 56 | "outputs": [ 57 | { 58 | "data": { 59 | "text/plain": [ 60 | "train (generic function with 1 method)" 61 | ] 62 | }, 63 | "execution_count": 3, 64 | "metadata": {}, 65 | "output_type": "execute_result" 66 | } 67 | ], 68 | "source": [ 69 | "function train(params)\n", 70 | " opt = ADAM(0.01)\n", 71 | " V = 8\n", 72 | " maxiter = 20000\n", 73 | " msk = [false, true, true, true, false, true, true, true]\n", 74 | " pp = params[:,msk]\n", 75 | " for i=1:maxiter\n", 76 | " grad = view(loss'(params), :,msk)\n", 77 | " Optimise.update!(opt, pp, grad)\n", 78 | " view(params, :, msk) .= pp\n", 79 | " if i%100 == 0\n", 80 | " @show loss(params)\n", 81 | " end\n", 82 | " end\n", 83 | " params\n", 84 | "end" 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": 4, 90 | "metadata": {}, 91 | "outputs": [ 92 | { 93 | "data": { 94 | "text/plain": [ 95 | "15-element Array{Tuple{Int64,Int64},1}:\n", 96 | " (1, 6) \n", 97 | " (2, 7) \n", 98 | " (3, 8) \n", 99 | " (4, 9) \n", 100 | " (5, 10)\n", 101 | " (1, 2) \n", 102 | " (2, 3) \n", 103 | " (3, 4) \n", 104 | " (4, 5) \n", 105 | " (1, 5) \n", 106 | " (6, 8) \n", 107 | " (8, 10)\n", 108 | " (7, 10)\n", 109 | " (7, 9) \n", 110 | " (6, 9) " 111 | ] 112 | }, 113 | "execution_count": 4, 114 | "metadata": {}, 115 | "output_type": "execute_result" 116 | } 117 | ], 118 | "source": [ 119 | "L1 = [(1,6), (2,7), (3,8), (4,9), (5,10), (1,2), (2,3), (3,4), (4,5), (5,1), (6,8), (8,10), (10,7), (7,9), (9,6)]\n", 120 | "L1 = [i 4 | 5 | ## 赛题 6 | 在如图所示的Buckyball结构中,在每一个顶点上有一个 的伊辛自旋。近邻的自旋之间有单位强度的反铁磁耦合。 7 | 8 | 1. 求,其中 是顶点数,而 9 | 10 | 11 | 12 | 2. 求基态简并度。 13 | 14 | ----------------------------------------- 15 | 16 | *translation*: In the Buckyball structure as shown in the figure, we attach an ising spin on each vertex. The neighboring spins interact with an anti-ferromagnetic coupling of unit strength. 17 | 18 | 1. Get , where is the number of vertices, and 19 | 20 | 21 | 22 | 2. Count the ground state degeneracy. 23 | 24 | ## 比赛规则 25 | - 个人或组队参加皆可。组队参加时,奖品分配由内部协商决定。 26 | - 比赛截止时间北京时间5月10日0时。以此时刻之前commit到GiHub的求解程序和结果为评判标准。 27 | - 编程语言不限,但商业软件需有正版授权。编译,JIT 和软件启动不计入运行时间。 28 | - 第一问是必答题,第二问是附加题。 29 | - 鼓励探索多种解法。结果数值精度、程序效率和原创性都是评分的重要标准。优胜队由授课教师投票评选。唯一的优胜队获得奖品。 30 | - 求解程序须采用开源协议。代码版权归参赛者所有。 31 | - 对参赛有实质贡献的合作者须在程序中明确列出。 32 | 33 | ## 比赛流程 34 | 35 | - 在GitHub上建立私有代码库,添加用户`quantumbfs0` 作为合作者 (Settings/Collaborators)。 36 | - 开发求解程序 :soccer:。 37 | - 代码库中除了包含求解程序之外,在文件`ans1.txt`和`ans2.txt`中以文本格式分别存储两问的答案。 38 | - 周三下午是自由编程时间,授课教师在会场为大家解决技术问题。 39 | - 5月10日0时以后公开代码库供评审和学习。 40 | - 优胜队、或多个取得正确结果的赛队参加周五上午的答辩,分享求解思路和程序实现。 41 | 42 | ## [Submissions](https://github.com/quantumbfs0?tab=stars) 43 | 44 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 QuantumBFS 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Manifest.toml: -------------------------------------------------------------------------------- 1 | # This file is machine-generated - editing it directly is not advised 2 | 3 | [[AbstractFFTs]] 4 | deps = ["LinearAlgebra", "Test"] 5 | git-tree-sha1 = "dfaf23dba016254bb0eed6de326510caea2889bf" 6 | uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c" 7 | version = "0.4.0" 8 | 9 | [[AbstractTrees]] 10 | deps = ["Markdown", "Test"] 11 | git-tree-sha1 = "6621d9645702c1c4e6970cc6a3eae440c768000b" 12 | uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" 13 | version = "0.2.1" 14 | 15 | [[Adapt]] 16 | deps = ["LinearAlgebra", "Test"] 17 | git-tree-sha1 = "53d8fec4f662088c1202530e338a11a919407f3b" 18 | uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" 19 | version = "0.4.2" 20 | 21 | [[ArrayInterface]] 22 | deps = ["Requires", "Test"] 23 | git-tree-sha1 = "6a1a371393e56f5e8d5657fe4da4b11aea0bfbae" 24 | uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9" 25 | version = "0.1.1" 26 | 27 | [[AssetRegistry]] 28 | deps = ["Distributed", "JSON", "Pidfile", "SHA", "Test"] 29 | git-tree-sha1 = "b25e88db7944f98789130d7b503276bc34bc098e" 30 | uuid = "bf4720bc-e11a-5d0c-854e-bdca1663c893" 31 | version = "0.1.0" 32 | 33 | [[Base64]] 34 | uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" 35 | 36 | [[BinDeps]] 37 | deps = ["Compat", "Libdl", "SHA", "URIParser"] 38 | git-tree-sha1 = "12093ca6cdd0ee547c39b1870e0c9c3f154d9ca9" 39 | uuid = "9e28174c-4ba2-5203-b857-d8d62c4213ee" 40 | version = "0.8.10" 41 | 42 | [[BinaryProvider]] 43 | deps = ["Libdl", "SHA"] 44 | git-tree-sha1 = "c7361ce8a2129f20b0e05a89f7070820cfed6648" 45 | uuid = "b99e7846-7c00-51b0-8f62-c81ae34c0232" 46 | version = "0.5.4" 47 | 48 | [[BitBasis]] 49 | deps = ["LinearAlgebra", "LuxurySparse", "StaticArrays"] 50 | git-tree-sha1 = "cae9734b45bad789307959a30cba955c47bd7174" 51 | uuid = "50ba71b6-fa0f-514d-ae9a-0916efc90dcf" 52 | version = "0.4.4" 53 | 54 | [[CSSUtil]] 55 | deps = ["Colors", "Compat", "JSON", "Measures", "Pkg", "WebIO"] 56 | git-tree-sha1 = "ff13fd99e4dd54f56eb064815f843bc992a871a2" 57 | uuid = "70588ee8-6100-5070-97c1-3cb50ed05fe8" 58 | version = "0.1.0" 59 | 60 | [[CSTParser]] 61 | deps = ["LibGit2", "Test", "Tokenize"] 62 | git-tree-sha1 = "437c93bc191cd55957b3f8dee7794b6131997c56" 63 | uuid = "00ebfdb7-1f24-5e51-bd34-a7502290713f" 64 | version = "0.5.2" 65 | 66 | [[CacheServers]] 67 | deps = ["Distributed", "Test"] 68 | git-tree-sha1 = "b584b04f236d3677b4334fab095796a128445bf8" 69 | uuid = "a921213e-d44a-5460-ac04-5d720a99ba71" 70 | version = "0.2.0" 71 | 72 | [[CodeTracking]] 73 | deps = ["InteractiveUtils", "Test", "UUIDs"] 74 | git-tree-sha1 = "9b21a2dfe51ba71fdc5688039075819196595367" 75 | uuid = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" 76 | version = "0.5.7" 77 | 78 | [[CodecZlib]] 79 | deps = ["BinaryProvider", "Libdl", "Test", "TranscodingStreams"] 80 | git-tree-sha1 = "36bbf5374c661054d41410dc53ff752972583b9b" 81 | uuid = "944b1d66-785c-5afd-91f1-9de20f533193" 82 | version = "0.5.2" 83 | 84 | [[ColorTypes]] 85 | deps = ["FixedPointNumbers", "Random", "Test"] 86 | git-tree-sha1 = "f73b0e10f2a5756de7019818a41654686da06b09" 87 | uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f" 88 | version = "0.7.5" 89 | 90 | [[Colors]] 91 | deps = ["ColorTypes", "FixedPointNumbers", "InteractiveUtils", "Printf", "Reexport", "Test"] 92 | git-tree-sha1 = "9f0a0210450acb91c730b730a994f8eef1d3d543" 93 | uuid = "5ae59095-9a9b-59fe-a467-6f913c188581" 94 | version = "0.9.5" 95 | 96 | [[CommonSubexpressions]] 97 | deps = ["Test"] 98 | git-tree-sha1 = "efdaf19ab11c7889334ca247ff4c9f7c322817b0" 99 | uuid = "bbf7d656-a473-5ed7-a52c-81e309532950" 100 | version = "0.2.0" 101 | 102 | [[Compat]] 103 | deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"] 104 | git-tree-sha1 = "84aa74986c5b9b898b0d1acaf3258741ee64754f" 105 | uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" 106 | version = "2.1.0" 107 | 108 | [[Conda]] 109 | deps = ["Compat", "JSON", "VersionParsing"] 110 | git-tree-sha1 = "b625d802587c2150c279a40a646fba63f9bd8187" 111 | uuid = "8f4d0f93-b110-5947-807f-2305c1781a2d" 112 | version = "1.2.0" 113 | 114 | [[Contour]] 115 | deps = ["LinearAlgebra", "StaticArrays", "Test"] 116 | git-tree-sha1 = "b974e164358fea753ef853ce7bad97afec15bb80" 117 | uuid = "d38c429a-6771-53c6-b99e-75d170b6e991" 118 | version = "0.5.1" 119 | 120 | [[Crayons]] 121 | deps = ["Test"] 122 | git-tree-sha1 = "f621b8ef51fd2004c7cf157ea47f027fdeac5523" 123 | uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f" 124 | version = "4.0.0" 125 | 126 | [[DataStructures]] 127 | deps = ["InteractiveUtils", "OrderedCollections", "Random", "Serialization", "Test"] 128 | git-tree-sha1 = "ca971f03e146cf144a9e2f2ce59674f5bf0e8038" 129 | uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" 130 | version = "0.15.0" 131 | 132 | [[Dates]] 133 | deps = ["Printf"] 134 | uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" 135 | 136 | [[DelimitedFiles]] 137 | deps = ["Mmap"] 138 | uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab" 139 | 140 | [[DiffEqBase]] 141 | deps = ["Compat", "DocStringExtensions", "IterativeSolvers", "IteratorInterfaceExtensions", "LinearAlgebra", "RecipesBase", "RecursiveArrayTools", "RecursiveFactorization", "Requires", "Roots", "SparseArrays", "StaticArrays", "Statistics", "SuiteSparse", "TableTraits", "TreeViews"] 142 | git-tree-sha1 = "c4560b4ae808770bc58a31f8c796b3f195e6239a" 143 | uuid = "2b5f629d-d688-5b77-993f-72d75c75574e" 144 | version = "5.7.0" 145 | 146 | [[DiffResults]] 147 | deps = ["Compat", "StaticArrays"] 148 | git-tree-sha1 = "34a4a1e8be7bc99bc9c611b895b5baf37a80584c" 149 | uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5" 150 | version = "0.0.4" 151 | 152 | [[DiffRules]] 153 | deps = ["Random", "Test"] 154 | git-tree-sha1 = "dc0869fb2f5b23466b32ea799bd82c76480167f7" 155 | uuid = "b552c78f-8df3-52c6-915a-8e097449b14b" 156 | version = "0.0.10" 157 | 158 | [[Distributed]] 159 | deps = ["Random", "Serialization", "Sockets"] 160 | uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" 161 | 162 | [[DocStringExtensions]] 163 | deps = ["LibGit2", "Markdown", "Pkg", "Test"] 164 | git-tree-sha1 = "4d30e889c9f106a51ffa4791a88ffd4765bf20c3" 165 | uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" 166 | version = "0.7.0" 167 | 168 | [[ExponentialUtilities]] 169 | deps = ["LinearAlgebra", "Printf", "Random", "SparseArrays", "Test"] 170 | git-tree-sha1 = "6fad21cd7637d0ad6a7661f8abea1149922d6c9c" 171 | uuid = "d4d017d3-3776-5f7e-afef-a10c40355c18" 172 | version = "1.4.0" 173 | 174 | [[FFTW]] 175 | deps = ["AbstractFFTs", "BinaryProvider", "Compat", "Conda", "Libdl", "LinearAlgebra", "Reexport", "Test"] 176 | git-tree-sha1 = "29cda58afbf62f35b1a094882ad6c745a47b2eaa" 177 | uuid = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" 178 | version = "0.2.4" 179 | 180 | [[FileWatching]] 181 | uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" 182 | 183 | [[FixedPointNumbers]] 184 | deps = ["Test"] 185 | git-tree-sha1 = "b8045033701c3b10bf2324d7203404be7aef88ba" 186 | uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93" 187 | version = "0.5.3" 188 | 189 | [[Flux]] 190 | deps = ["AbstractTrees", "Adapt", "CodecZlib", "Colors", "DelimitedFiles", "Juno", "LinearAlgebra", "MacroTools", "NNlib", "Pkg", "Printf", "Random", "Reexport", "Requires", "SHA", "Statistics", "StatsBase", "Tracker", "ZipFile"] 191 | git-tree-sha1 = "e0740e96abfc84c305a1d3f3bb7cb4eade842027" 192 | repo-rev = "master" 193 | repo-url = "https://github.com/FluxML/Flux.jl.git" 194 | uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c" 195 | version = "0.8.3" 196 | 197 | [[ForwardDiff]] 198 | deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "InteractiveUtils", "LinearAlgebra", "NaNMath", "Random", "SparseArrays", "SpecialFunctions", "StaticArrays", "Test"] 199 | git-tree-sha1 = "4c4d727f1b7e0092134fabfab6396b8945c1ea5b" 200 | uuid = "f6369f11-7733-5829-9624-2563aa707210" 201 | version = "0.10.3" 202 | 203 | [[FunctionalCollections]] 204 | deps = ["Test"] 205 | git-tree-sha1 = "04cb9cfaa6ba5311973994fe3496ddec19b6292a" 206 | uuid = "de31a74c-ac4f-5751-b3fd-e18cd04993ca" 207 | version = "0.5.0" 208 | 209 | [[GR]] 210 | deps = ["Base64", "DelimitedFiles", "LinearAlgebra", "Pkg", "Printf", "Random", "Serialization", "Sockets", "Test"] 211 | git-tree-sha1 = "537b61986d8a5fd21e577541f8f0735e481eb3ed" 212 | uuid = "28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71" 213 | version = "0.39.1" 214 | 215 | [[IJulia]] 216 | deps = ["Base64", "Conda", "Dates", "InteractiveUtils", "JSON", "Markdown", "MbedTLS", "Pkg", "Printf", "REPL", "Random", "SoftGlobalScope", "Test", "UUIDs", "ZMQ"] 217 | git-tree-sha1 = "8eb8459d806de665f1347b25e9ad9428c2609f0f" 218 | uuid = "7073ff75-c697-5162-941a-fcdaad2a7d2a" 219 | version = "1.18.1" 220 | 221 | [[IRTools]] 222 | deps = ["InteractiveUtils", "MacroTools", "Test"] 223 | git-tree-sha1 = "1a946e462d8a86cc60df2cbfd4ff9c2ed9ee2615" 224 | repo-rev = "master" 225 | repo-url = "https://github.com/MikeInnes/IRTools.jl.git" 226 | uuid = "7869d1d1-7146-5819-86e3-90919afe41df" 227 | version = "0.2.0" 228 | 229 | [[Interact]] 230 | deps = ["CSSUtil", "InteractBase", "JSON", "Knockout", "Observables", "OrderedCollections", "Reexport", "Test", "WebIO", "Widgets"] 231 | git-tree-sha1 = "f6531bc554990a6659966afcb8f05543c032068b" 232 | uuid = "c601a237-2ae4-5e1e-952c-7a85b0c7eef1" 233 | version = "0.10.2" 234 | 235 | [[InteractBase]] 236 | deps = ["Base64", "CSSUtil", "Colors", "Dates", "JSExpr", "JSON", "Knockout", "Observables", "OrderedCollections", "Random", "Test", "WebIO", "Widgets"] 237 | git-tree-sha1 = "83fc45f21bfec97e5cd56e790f6945497ab0f095" 238 | uuid = "d3863d7c-f0c8-5437-a7b4-3ae773c01009" 239 | version = "0.10.1" 240 | 241 | [[InteractiveUtils]] 242 | deps = ["Markdown"] 243 | uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" 244 | 245 | [[IterativeSolvers]] 246 | deps = ["LinearAlgebra", "Printf", "Random", "RecipesBase", "SparseArrays", "Test"] 247 | git-tree-sha1 = "5687f68018b4f14c0da54d402bb23eecaec17f37" 248 | uuid = "42fd0dbc-a981-5370-80f2-aaf504508153" 249 | version = "0.8.1" 250 | 251 | [[IteratorInterfaceExtensions]] 252 | git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856" 253 | uuid = "82899510-4779-5014-852e-03e436cf321d" 254 | version = "1.0.0" 255 | 256 | [[JSExpr]] 257 | deps = ["JSON", "MacroTools", "Observables", "Test", "WebIO"] 258 | git-tree-sha1 = "013bc2143a2e84ea489365cf30db3407deb540c2" 259 | uuid = "97c1335a-c9c5-57fe-bc5d-ec35cebe8660" 260 | version = "0.5.0" 261 | 262 | [[JSON]] 263 | deps = ["Dates", "Distributed", "Mmap", "Sockets", "Test", "Unicode"] 264 | git-tree-sha1 = "1f7a25b53ec67f5e9422f1f551ee216503f4a0fa" 265 | uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" 266 | version = "0.20.0" 267 | 268 | [[JuliaInterpreter]] 269 | deps = ["CodeTracking", "InteractiveUtils", "Random", "UUIDs"] 270 | git-tree-sha1 = "553281260578bb50d6d550977f438754e7e93397" 271 | uuid = "aa1ae85d-cabe-5617-a682-6adf51b2e16a" 272 | version = "0.5.1" 273 | 274 | [[Juno]] 275 | deps = ["Base64", "Logging", "Media", "Profile", "Test"] 276 | git-tree-sha1 = "4e4a8d43aa7ecec66cadaf311fbd1e5c9d7b9175" 277 | uuid = "e5e0dc1b-0480-54bc-9374-aad01c23163d" 278 | version = "0.7.0" 279 | 280 | [[Knockout]] 281 | deps = ["JSExpr", "JSON", "Observables", "Pkg", "Test", "WebIO"] 282 | git-tree-sha1 = "5cca7f070f85392cd0f42220c8ea5f7be96dacf4" 283 | uuid = "bcebb21b-c2e3-54f8-a781-646b90f6d2cc" 284 | version = "0.2.2" 285 | 286 | [[KrylovKit]] 287 | deps = ["LinearAlgebra", "Printf", "Random", "Test"] 288 | git-tree-sha1 = "b20b380cc5e28f079ba5212450d7d42a10a85194" 289 | uuid = "0b1a1467-8014-51b9-945f-bf0ae24f4b77" 290 | version = "0.3.4" 291 | 292 | [[LaTeXStrings]] 293 | deps = ["Compat"] 294 | git-tree-sha1 = "7ab9b8788cfab2bdde22adf9004bda7ad9954b6c" 295 | uuid = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f" 296 | version = "1.0.3" 297 | 298 | [[Latexify]] 299 | deps = ["InteractiveUtils", "LaTeXStrings", "MacroTools", "Markdown", "Printf", "Requires", "Test"] 300 | git-tree-sha1 = "5ea2cc735d06ef3abbac12dcc8403cf1f4999bf7" 301 | uuid = "23fbe1c1-3f47-55db-b15f-69d7ec21a316" 302 | version = "0.8.2" 303 | 304 | [[LegibleLambdas]] 305 | deps = ["MLStyle", "MacroTools", "Test"] 306 | git-tree-sha1 = "987298a681f4946b0d5d87ded1e89b53957d3b07" 307 | uuid = "f1f30506-32fe-5131-bd72-7c197988f9e5" 308 | version = "0.2.0" 309 | 310 | [[LibGit2]] 311 | uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" 312 | 313 | [[Libdl]] 314 | uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" 315 | 316 | [[LinearAlgebra]] 317 | deps = ["Libdl"] 318 | uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 319 | 320 | [[LinearMaps]] 321 | deps = ["LinearAlgebra", "SparseArrays", "Test"] 322 | git-tree-sha1 = "ea9bb3d793917bced9c98da5ed8b6f449c4a74ba" 323 | uuid = "7a12625a-238d-50fd-b39a-03d52299707e" 324 | version = "2.3.0" 325 | 326 | [[Logging]] 327 | uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" 328 | 329 | [[LoweredCodeUtils]] 330 | deps = ["JuliaInterpreter"] 331 | git-tree-sha1 = "c530b90b64249b525f1e40e71f64d5382c62f073" 332 | uuid = "6f1432cf-f94c-5a45-995e-cdbf5db27b0b" 333 | version = "0.3.4" 334 | 335 | [[LuxurySparse]] 336 | deps = ["LinearAlgebra", "Random", "SparseArrays", "StaticArrays"] 337 | git-tree-sha1 = "003ae3a72b221a5fbe08e59965288268b4661efb" 338 | uuid = "d05aeea4-b7d4-55ac-b691-9e7fabb07ba2" 339 | version = "0.4.0" 340 | 341 | [[MLStyle]] 342 | deps = ["Statistics", "Test"] 343 | git-tree-sha1 = "12d2f421dcff9c12b2aebcc25759f0cdca16a16b" 344 | uuid = "d8e11817-5142-5d16-987a-aa16d5891078" 345 | version = "0.3.0" 346 | 347 | [[MacroTools]] 348 | deps = ["CSTParser", "Compat", "DataStructures", "Test"] 349 | git-tree-sha1 = "daecd9e452f38297c686eba90dba2a6d5da52162" 350 | uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" 351 | version = "0.5.0" 352 | 353 | [[Markdown]] 354 | deps = ["Base64"] 355 | uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" 356 | 357 | [[MbedTLS]] 358 | deps = ["BinaryProvider", "Dates", "Distributed", "Libdl", "Random", "Sockets", "Test"] 359 | git-tree-sha1 = "2d94286a9c2f52c63a16146bb86fd6cdfbf677c6" 360 | uuid = "739be429-bea8-5141-9913-cc70e7f3736d" 361 | version = "0.6.8" 362 | 363 | [[Measures]] 364 | deps = ["Test"] 365 | git-tree-sha1 = "ddfd6d13e330beacdde2c80de27c1c671945e7d9" 366 | uuid = "442fdcdd-2543-5da2-b0f3-8c86c306513e" 367 | version = "0.3.0" 368 | 369 | [[Media]] 370 | deps = ["MacroTools", "Test"] 371 | git-tree-sha1 = "75a54abd10709c01f1b86b84ec225d26e840ed58" 372 | uuid = "e89f7d12-3494-54d1-8411-f7d8b9ae1f27" 373 | version = "0.5.0" 374 | 375 | [[Missings]] 376 | deps = ["Dates", "InteractiveUtils", "SparseArrays", "Test"] 377 | git-tree-sha1 = "d1d2585677f2bd93a97cfeb8faa7a0de0f982042" 378 | uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28" 379 | version = "0.4.0" 380 | 381 | [[Mmap]] 382 | uuid = "a63ad114-7e13-5084-954f-fe012c677804" 383 | 384 | [[NNlib]] 385 | deps = ["BinaryProvider", "Libdl", "LinearAlgebra", "Requires", "Statistics", "TimerOutputs"] 386 | git-tree-sha1 = "b7d9624a6c67b6cef981322692e2d0b35a010c63" 387 | repo-rev = "master" 388 | repo-url = "https://github.com/FluxML/NNlib.jl.git" 389 | uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" 390 | version = "0.6.0" 391 | 392 | [[NaNMath]] 393 | deps = ["Compat"] 394 | git-tree-sha1 = "ce3b85e484a5d4c71dd5316215069311135fa9f2" 395 | uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" 396 | version = "0.3.2" 397 | 398 | [[Observables]] 399 | deps = ["Test"] 400 | git-tree-sha1 = "dc02cec22747d1d10d9f70d8a1c03432b5bfbcd0" 401 | uuid = "510215fc-4207-5dde-b226-833fc4488ee2" 402 | version = "0.2.3" 403 | 404 | [[OrderedCollections]] 405 | deps = ["Random", "Serialization", "Test"] 406 | git-tree-sha1 = "c4c13474d23c60d20a67b217f1d7f22a40edf8f1" 407 | uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" 408 | version = "1.1.0" 409 | 410 | [[Pidfile]] 411 | deps = ["FileWatching", "Test"] 412 | git-tree-sha1 = "1ffd82728498b5071cde851bbb7abd780d4445f3" 413 | uuid = "fa939f87-e72e-5be4-a000-7fc836dbe307" 414 | version = "1.1.0" 415 | 416 | [[Pkg]] 417 | deps = ["Dates", "LibGit2", "Markdown", "Printf", "REPL", "Random", "SHA", "UUIDs"] 418 | uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" 419 | 420 | [[PlotThemes]] 421 | deps = ["PlotUtils", "Requires", "Test"] 422 | git-tree-sha1 = "f3afd2d58e1f6ac9be2cea46e4a9083ccc1d990b" 423 | uuid = "ccf2f8ad-2431-5c83-bf29-c5338b663b6a" 424 | version = "0.3.0" 425 | 426 | [[PlotUtils]] 427 | deps = ["Colors", "Dates", "Printf", "Random", "Reexport", "Test"] 428 | git-tree-sha1 = "8e87bbb778c26f575fbe47fd7a49c7b5ca37c0c6" 429 | uuid = "995b91a9-d308-5afd-9ec6-746e21dbc043" 430 | version = "0.5.8" 431 | 432 | [[Plots]] 433 | deps = ["Base64", "Contour", "Dates", "FixedPointNumbers", "GR", "JSON", "LinearAlgebra", "Measures", "NaNMath", "Pkg", "PlotThemes", "PlotUtils", "Printf", "REPL", "Random", "RecipesBase", "Reexport", "Requires", "Showoff", "SparseArrays", "StaticArrays", "Statistics", "StatsBase", "Test", "UUIDs"] 434 | git-tree-sha1 = "5bcc6dbc8b7fe0823d9a63dde87d7d4542149693" 435 | uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" 436 | version = "0.24.0" 437 | 438 | [[Printf]] 439 | deps = ["Unicode"] 440 | uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" 441 | 442 | [[Profile]] 443 | deps = ["Printf"] 444 | uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79" 445 | 446 | [[PyCall]] 447 | deps = ["Conda", "Dates", "Libdl", "LinearAlgebra", "MacroTools", "Pkg", "Serialization", "Statistics", "Test", "VersionParsing"] 448 | git-tree-sha1 = "6e5bac1b1faf3575731a6a5b76f638f2389561d3" 449 | uuid = "438e738f-606a-5dbb-bf0a-cddfbfd45ab0" 450 | version = "1.91.2" 451 | 452 | [[QuAlgorithmZoo]] 453 | deps = ["BitBasis", "DiffEqBase", "FFTW", "LinearAlgebra", "LuxurySparse", "MacroTools", "SparseArrays", "StatsBase", "Yao", "YaoArrayRegister", "YaoBlocks"] 454 | git-tree-sha1 = "10d05ccd1032ed269a15b0c5fc4077dbf98460e4" 455 | repo-rev = "master" 456 | repo-url = "https://github.com/QuantumBFS/QuAlgorithmZoo.jl.git" 457 | uuid = "65c24e16-9b0a-11e8-1353-efc5bc5f6586" 458 | version = "0.1.0" 459 | 460 | [[REPL]] 461 | deps = ["InteractiveUtils", "Markdown", "Sockets"] 462 | uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" 463 | 464 | [[Random]] 465 | deps = ["Serialization"] 466 | uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" 467 | 468 | [[RecipesBase]] 469 | deps = ["Random", "Test"] 470 | git-tree-sha1 = "0b3cb370ee4dc00f47f1193101600949f3dcf884" 471 | uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" 472 | version = "0.6.0" 473 | 474 | [[RecursiveArrayTools]] 475 | deps = ["ArrayInterface", "RecipesBase", "Requires", "StaticArrays", "Statistics", "Test"] 476 | git-tree-sha1 = "187ea7dd541955102c7035a6668613bdf52022ca" 477 | uuid = "731186ca-8d62-57ce-b412-fbd966d074cd" 478 | version = "0.20.0" 479 | 480 | [[RecursiveFactorization]] 481 | deps = ["LinearAlgebra", "Random", "Test"] 482 | git-tree-sha1 = "54410ebd72cbb84d7b7678eb3da643f8e71181fc" 483 | uuid = "f2c3362d-daeb-58d1-803e-2bc74f2840b4" 484 | version = "0.0.1" 485 | 486 | [[Reexport]] 487 | deps = ["Pkg"] 488 | git-tree-sha1 = "7b1d07f411bc8ddb7977ec7f377b97b158514fe0" 489 | uuid = "189a3867-3050-52da-a836-e630ba90ab69" 490 | version = "0.2.0" 491 | 492 | [[Requires]] 493 | deps = ["Test"] 494 | git-tree-sha1 = "f6fbf4ba64d295e146e49e021207993b6b48c7d1" 495 | uuid = "ae029012-a4dd-5104-9daa-d747884805df" 496 | version = "0.5.2" 497 | 498 | [[Revise]] 499 | deps = ["CodeTracking", "Distributed", "FileWatching", "JuliaInterpreter", "LibGit2", "LoweredCodeUtils", "OrderedCollections", "Pkg", "REPL", "UUIDs", "Unicode"] 500 | git-tree-sha1 = "84f29e1b3670ade5c1b4f6fc80eac166bd37bb59" 501 | uuid = "295af30f-e4ad-537b-8983-00126c2a3abe" 502 | version = "2.1.3" 503 | 504 | [[Roots]] 505 | deps = ["Printf", "Statistics", "Test"] 506 | git-tree-sha1 = "7228278e31d6d0e22a1ae0b41ea9a0df2859f33d" 507 | uuid = "f2b01f46-fcfa-551c-844a-d8ac1e96c665" 508 | version = "0.8.1" 509 | 510 | [[SHA]] 511 | uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" 512 | 513 | [[Serialization]] 514 | uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" 515 | 516 | [[SharedArrays]] 517 | deps = ["Distributed", "Mmap", "Random", "Serialization"] 518 | uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383" 519 | 520 | [[Showoff]] 521 | deps = ["Compat"] 522 | git-tree-sha1 = "276b24f3ace98bec911be7ff2928d497dc759085" 523 | uuid = "992d4aef-0814-514b-bc4d-f2e9a6c4116f" 524 | version = "0.2.1" 525 | 526 | [[SimpleTraits]] 527 | deps = ["InteractiveUtils", "MacroTools", "Test"] 528 | git-tree-sha1 = "c0a542b8d5e369b179ccd296b2ca987f6da5da0a" 529 | uuid = "699a6c99-e7fa-54fc-8d76-47d257e15c1d" 530 | version = "0.8.0" 531 | 532 | [[Sockets]] 533 | uuid = "6462fe0b-24de-5631-8697-dd941f90decc" 534 | 535 | [[SoftGlobalScope]] 536 | deps = ["Test"] 537 | git-tree-sha1 = "012661b70364840fcd380912d878d96f7bf95ff3" 538 | uuid = "b85f4697-e234-5449-a836-ec8e2f98b302" 539 | version = "1.0.10" 540 | 541 | [[SortingAlgorithms]] 542 | deps = ["DataStructures", "Random", "Test"] 543 | git-tree-sha1 = "03f5898c9959f8115e30bc7226ada7d0df554ddd" 544 | uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c" 545 | version = "0.3.1" 546 | 547 | [[SparseArrays]] 548 | deps = ["LinearAlgebra", "Random"] 549 | uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" 550 | 551 | [[SpecialFunctions]] 552 | deps = ["BinDeps", "BinaryProvider", "Libdl", "Test"] 553 | git-tree-sha1 = "0b45dc2e45ed77f445617b99ff2adf0f5b0f23ea" 554 | uuid = "276daf66-3868-5448-9aa4-cd146d93841b" 555 | version = "0.7.2" 556 | 557 | [[StaticArrays]] 558 | deps = ["InteractiveUtils", "LinearAlgebra", "Random", "Statistics", "Test"] 559 | git-tree-sha1 = "3841b39ed5f047db1162627bf5f80a9cd3e39ae2" 560 | uuid = "90137ffa-7385-5640-81b9-e52037218182" 561 | version = "0.10.3" 562 | 563 | [[Statistics]] 564 | deps = ["LinearAlgebra", "SparseArrays"] 565 | uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" 566 | 567 | [[StatsBase]] 568 | deps = ["DataStructures", "LinearAlgebra", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics"] 569 | git-tree-sha1 = "8a0f4b09c7426478ab677245ab2b0b68552143c7" 570 | uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" 571 | version = "0.30.0" 572 | 573 | [[SuiteSparse]] 574 | deps = ["Libdl", "LinearAlgebra", "SparseArrays"] 575 | uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" 576 | 577 | [[TableTraits]] 578 | deps = ["IteratorInterfaceExtensions"] 579 | git-tree-sha1 = "b1ad568ba658d8cbb3b892ed5380a6f3e781a81e" 580 | uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c" 581 | version = "1.0.0" 582 | 583 | [[Test]] 584 | deps = ["Distributed", "InteractiveUtils", "Logging", "Random"] 585 | uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 586 | 587 | [[TimerOutputs]] 588 | deps = ["Crayons", "Printf", "Test", "Unicode"] 589 | git-tree-sha1 = "b80671c06f8f8bae08c55d67b5ce292c5ae2660c" 590 | uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" 591 | version = "0.5.0" 592 | 593 | [[Tokenize]] 594 | deps = ["Printf", "Test"] 595 | git-tree-sha1 = "3e83f60b74911d3042d3550884ca2776386a02b8" 596 | uuid = "0796e94c-ce3b-5d07-9a54-7f471281c624" 597 | version = "0.5.3" 598 | 599 | [[Tracker]] 600 | deps = ["Adapt", "DiffRules", "ForwardDiff", "LinearAlgebra", "MacroTools", "NNlib", "NaNMath", "Printf", "Random", "Requires", "SpecialFunctions", "Statistics", "Test"] 601 | git-tree-sha1 = "0bec1b68c63a0e8a58d3944261cbf4cc9577c8a1" 602 | uuid = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" 603 | version = "0.2.0" 604 | 605 | [[TranscodingStreams]] 606 | deps = ["Random", "Test"] 607 | git-tree-sha1 = "a25d8e5a28c3b1b06d3859f30757d43106791919" 608 | uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa" 609 | version = "0.9.4" 610 | 611 | [[TreeViews]] 612 | deps = ["Test"] 613 | git-tree-sha1 = "8d0d7a3fe2f30d6a7f833a5f19f7c7a5b396eae6" 614 | uuid = "a2a6695c-b41b-5b7d-aed9-dbfdeacea5d7" 615 | version = "0.3.0" 616 | 617 | [[TupleTools]] 618 | deps = ["Random", "Test"] 619 | git-tree-sha1 = "b006524003142128cc6d36189dce337729aa0050" 620 | uuid = "9d95972d-f1c8-5527-a6e0-b4b365fa01f6" 621 | version = "1.1.0" 622 | 623 | [[URIParser]] 624 | deps = ["Test", "Unicode"] 625 | git-tree-sha1 = "6ddf8244220dfda2f17539fa8c9de20d6c575b69" 626 | uuid = "30578b45-9adc-5946-b283-645ec420af67" 627 | version = "0.4.0" 628 | 629 | [[UUIDs]] 630 | deps = ["Random", "SHA"] 631 | uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" 632 | 633 | [[Unicode]] 634 | uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" 635 | 636 | [[VersionParsing]] 637 | deps = ["Compat"] 638 | git-tree-sha1 = "c9d5aa108588b978bd859554660c8a5c4f2f7669" 639 | uuid = "81def892-9a0e-5fdd-b105-ffc91e053289" 640 | version = "1.1.3" 641 | 642 | [[WebIO]] 643 | deps = ["AssetRegistry", "Base64", "Compat", "Distributed", "FunctionalCollections", "JSON", "Logging", "Observables", "Random", "Requires", "Sockets", "Test", "UUIDs", "Widgets"] 644 | git-tree-sha1 = "125dc746e5b36424c6a7e694889a7f3d434a160a" 645 | uuid = "0f1e0344-ec1d-5b48-a673-e5cf874b6c29" 646 | version = "0.8.1" 647 | 648 | [[Widgets]] 649 | deps = ["Colors", "Dates", "Observables", "OrderedCollections", "Test"] 650 | git-tree-sha1 = "c53befc70c6b91eaa2a9888c2f6ac2d92720a81b" 651 | uuid = "cc8bc4a8-27d6-5769-a93b-9d913e69aa62" 652 | version = "0.6.1" 653 | 654 | [[Yao]] 655 | deps = ["BitBasis", "Reexport", "YaoArrayRegister", "YaoBase", "YaoBlocks"] 656 | git-tree-sha1 = "3e8b2f69e70ac83778c77d097cdb3b0b116c17b8" 657 | repo-rev = "master" 658 | repo-url = "https://github.com/QuantumBFS/Yao.jl.git" 659 | uuid = "5872b779-8223-5990-8dd0-5abbb0748c8c" 660 | version = "0.4.2" 661 | 662 | [[YaoArrayRegister]] 663 | deps = ["BitBasis", "LinearAlgebra", "LuxurySparse", "StaticArrays", "StatsBase", "TupleTools", "YaoBase"] 664 | git-tree-sha1 = "e87a6f0dd4f68cdac1629fc2eeb5d2423faa47f1" 665 | repo-rev = "master" 666 | repo-url = "https://github.com/QuantumBFS/YaoArrayRegister.jl.git" 667 | uuid = "e600142f-9330-5003-8abb-0ebd767abc51" 668 | version = "0.3.7" 669 | 670 | [[YaoBase]] 671 | deps = ["BitBasis", "LegibleLambdas", "LinearAlgebra", "LuxurySparse", "MLStyle", "MacroTools", "SparseArrays", "Test", "TupleTools"] 672 | git-tree-sha1 = "0bbe0640e5cb2d1998f3fc32b939095e78199ca6" 673 | uuid = "a8f54c17-34bc-5a9d-b050-f522fe3f755f" 674 | version = "0.9.1" 675 | 676 | [[YaoBlocks]] 677 | deps = ["BitBasis", "CacheServers", "ExponentialUtilities", "LegibleLambdas", "LinearAlgebra", "LinearMaps", "LuxurySparse", "MLStyle", "Random", "SimpleTraits", "SparseArrays", "StaticArrays", "StatsBase", "YaoArrayRegister", "YaoBase"] 678 | git-tree-sha1 = "53a9668a425e2d83e1946b21445ed5266a952dc9" 679 | repo-rev = "master" 680 | repo-url = "https://github.com/QuantumBFS/YaoBlocks.jl.git" 681 | uuid = "418bc28f-b43b-5e0b-a6e7-61bbc1a2c1df" 682 | version = "0.3.4" 683 | 684 | [[ZMQ]] 685 | deps = ["BinaryProvider", "FileWatching", "Libdl", "Sockets", "Test"] 686 | git-tree-sha1 = "34e7ac2d1d59d19d0e86bde99f1f02262bfa1613" 687 | uuid = "c2297ded-f4af-51ae-bb23-16f91089e4e1" 688 | version = "1.0.0" 689 | 690 | [[ZipFile]] 691 | deps = ["BinaryProvider", "Libdl", "Printf", "Test"] 692 | git-tree-sha1 = "5f6f663890dfb9bad6af75a86a43f67904e5050e" 693 | uuid = "a5390f91-8eb1-5f08-bee0-b1d1ffed6cea" 694 | version = "0.8.1" 695 | 696 | [[Zygote]] 697 | deps = ["DiffRules", "ForwardDiff", "IRTools", "InteractiveUtils", "LinearAlgebra", "MacroTools", "NNlib", "NaNMath", "Random", "Requires", "SpecialFunctions", "Statistics"] 698 | git-tree-sha1 = "ac4a5b2d7a6f2b2925a64150d46479a558c26667" 699 | repo-rev = "master" 700 | repo-url = "https://github.com/FluxML/Zygote.jl.git" 701 | uuid = "e88e6eb3-aa80-5325-afca-941959d7151f" 702 | version = "0.3.0" 703 | -------------------------------------------------------------------------------- /Project.toml: -------------------------------------------------------------------------------- 1 | name = "SSSS" 2 | uuid = "038cdfb8-671c-11e9-3f17-89122373f6c7" 3 | version = "0.1.0" 4 | 5 | [deps] 6 | FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" 7 | IJulia = "7073ff75-c697-5162-941a-fcdaad2a7d2a" 8 | Interact = "c601a237-2ae4-5e1e-952c-7a85b0c7eef1" 9 | Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" 10 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Deep Learning and Quantum Programming: A Spring School 2 | 3 | Song Shan Lake Spring School, features lectures, code challenge, install party and happy fatty night. 4 | 5 | *South Bay Interdisciplinary Science Center, Songshan Lake Materials Laboratory, Dongguan, China, 5th-10th May, 2019* 6 | 7 | ## Table of Contents 8 | 1. Deep Learning 9 | * [`lecture_notes.pdf`](https://github.com/QuantumBFS/SSSS/blob/master/1_deep_learning/lecture_notes.pdf) and [`slides/`](https://github.com/QuantumBFS/SSSS/tree/master/1_deep_learning/slides) 10 | * Demo codes 11 | * Poor man's computation graph: [`computation_graph.py`](https://github.com/QuantumBFS/SSSS/blob/master/1_deep_learning/computation_graph.py) 12 | * Variational free energy with flow model: [`realnvp/`](https://github.com/QuantumBFS/SSSS/tree/master/1_deep_learning/realnvp) 13 | * Hamiltonian inverse design with reverse mode AD: [`schrodinger.py`](https://github.com/QuantumBFS/SSSS/blob/master/1_deep_learning/schrodinger.py) 14 | * Solving the fastest descent problem with NeuralODE [`brachistochrone/`](https://github.com/QuantumBFS/SSSS/tree/master/1_deep_learning/brachistochrone) 15 | 2. Tensor Networks 16 | * [`Slides on tensor networks`](https://github.com/QuantumBFS/SSSS/blob/master/2_tensor_network/Tutorial_tensor_network.pdf) 17 | * [`Slides on contraction methods for infinite tensor networks`](https://github.com/QuantumBFS/SSSS/blob/master/2_tensor_network/tensor_contraction_methods.pdf) 18 | * [`Tutorial and demo codes on computing $2$-D Ising model partition function using tensor networks`](https://github.com/QuantumBFS/SSSS/blob/master/2_tensor_network/tensor_contraction_simple.ipynb) 19 | * [`Tutorial and demo codes on the MPS Born machine`](https://github.com/QuantumBFS/SSSS/blob/master/2_tensor_network/mps_tutorial.ipynb) 20 | 3. Julia language 21 | * [`julia-hands-on.ipynb`](https://github.com/QuantumBFS/SSSS/blob/master/3_julia/julia-hands-on.ipynb) 22 | 4. Quantum Computing with Yao.jl 23 | * Lecture Note: [`quantum_lecture_note.pdf`](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/quantum_lecture_note.pdf) 24 | * Slides: [`google slides`](https://docs.google.com/presentation/d/1jUTpa8pB3jEOWDW1U0rDTDQ-kpri8j8S4y77GQCo3iM/edit?usp=sharing) 25 | * Notebooks 26 | * The solution to the graph embeding problem: [`graph_embeding.ipynb`](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/graph_embeding.ipynb) 27 | * Quantum circuit computing with Yao.jl: [`QC-with-Yao.ipynb`](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/QC-with-Yao.ipynb), [`Yao-talk`](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/yao-talk-2019.pdf) 28 | * Landscape of a quantum circuit: [`variational_quantum_circuit.ipynb`](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/variational_quantum_circuit.ipynb) 29 | * Variational quantum eigensolver: [`variational_quantum_circuit.ipynb`](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/variational_quantum_circuit.ipynb) 30 | * Matrix Product state inspired variational quantum eigensolver [`VQE_action.ipynb`](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/VQE_action.ipynb) 31 | * Quantum circuit born machine: [`qcbm_gaussian.ipynb`](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/qcbm_gaussian.ipynb) 32 | * Gradient vanishing problem: [`variational_quantum_circuit.ipynb`](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/variational_quantum_circuit.ipynb) and [`VQE_action.ipynb`](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/VQE_action.ipynb) 33 | * Mapping a quantum circuit to tensor networks: [`qc_tensor_mapping.ipynb`](https://github.com/QuantumBFS/SSSS/blob/master/4_quantum/qc_tensor_mapping.ipynb) 34 | 35 | Welcome for pull requests and issues! 36 | 37 | 38 | ## Challenge 39 | 40 | [Song-Shan-Hu Sping School Coding Challenge](Challenge.md) 41 | 42 | ## Preparation 43 | 44 | ### Quick start 45 | 46 | - [quick start for git](http://rogerdudler.github.io/git-guide/) 47 | - [quick start for command line interface](https://www.makeuseof.com/tag/a-quick-guide-to-get-started-with-the-linux-command-line/) 48 | 49 | ### Installation 50 | - [how to install ubuntu](https://tutorials.ubuntu.com/tutorial/tutorial-install-ubuntu-desktop) 51 | - [install annaconda](https://www.anaconda.com/distribution/) 52 | - [install PyTorch](https://pytorch.org/) 53 | - [install Julia](https://julialang.org) 54 | - [intall Yao.jl](https://github.com/QuantumBFS/Yao.jl#installation) 55 | 56 | ### Julia 57 | - [Julia语言的中文教程](https://github.com/Roger-luo/TutorialZH.jl) 58 | - [快速入门 Julia 语言](https://www.bilibili.com/video/av28248187?from=search&seid=5171149583764025744) 59 | - [Julia入门指引](https://discourse.juliacn.com/t/topic/159) 60 | 61 | 62 | ## Usage 63 | 64 | You can open this repo as a Julia package if you have julia installed: 65 | 66 | 1. open your Julia REPL, press `]` 67 | 2. type the following 68 | 69 | ```julia 70 | (1.0) pkg> add https://github.com/QuantumBFS/SSSS.git 71 | ``` 72 | 73 | 3. press backspace 74 | 4. type the following 75 | 76 | ```julia 77 | julia> using SSSS 78 | 79 | julia> notebooks() 80 | ``` 81 | 82 | ## License 83 | 84 | The code is released under MIT License. The rest part is released under [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/) 85 | 86 |

87 | poster 88 |

89 | -------------------------------------------------------------------------------- /_assets/SongShanHu2019.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/_assets/SongShanHu2019.jpeg -------------------------------------------------------------------------------- /_assets/SongShanHu2019.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/_assets/SongShanHu2019.key -------------------------------------------------------------------------------- /_assets/c60.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantumBFS/SSSS/b75ecbbfc19a954816164e7081737bd152f48070/_assets/c60.jpg -------------------------------------------------------------------------------- /src/SSSS.jl: -------------------------------------------------------------------------------- 1 | module SSSS 2 | 3 | using IJulia, Pkg 4 | 5 | export notebooks 6 | 7 | # function tutorial() 8 | # cmd = IJulia.find_jupyter_subcommand("notebook") 9 | # push!(cmd.exec, joinpath(@__DIR__, "..", "notebooks", "tutorial.ipynb")) 10 | # return IJulia.launch(cmd, joinpath(@__DIR__, "..", "notebooks"), false) 11 | # end 12 | 13 | function notebooks() 14 | return IJulia.notebook(dir=joinpath(@__DIR__, "..")) 15 | end 16 | 17 | # REQUIRE = [ 18 | # "GR", 19 | # "PyCall", 20 | # "IJulia", 21 | # "Revise", 22 | # "Plots", 23 | # "Latexify", 24 | # "FFTW", 25 | # PackageSpec(name="Flux", rev="master"), 26 | # "BitBasis", 27 | # "KrylovKit", 28 | # PackageSpec(url="https://github.com/QuantumBFS/QuAlgorithmZoo.jl.git", rev="master"), 29 | # PackageSpec(name="IRTools", rev="master"), 30 | # PackageSpec(name="NNlib", rev="master"), 31 | # PackageSpec(name="Zygote", rev="master"), 32 | # PackageSpec(name="Yao", rev="master"), 33 | # PackageSpec(name="YaoBlocks", rev="master"), 34 | # PackageSpec(name="YaoArrayRegister", rev="master"), 35 | # ] 36 | 37 | # function __init__() 38 | # for each in REQUIRE 39 | # if each in keys(Pkg.installed()) 40 | # continue 41 | # else 42 | # Pkg.add(each) 43 | # end 44 | # end 45 | # end 46 | 47 | end # module 48 | --------------------------------------------------------------------------------