├── .gitignore ├── Project.toml ├── onsager.jl ├── svd_backward.patch ├── trg.jl ├── torchsvd.py ├── trg.py └── HOTRG.jl /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | -------------------------------------------------------------------------------- /Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" 3 | HCubature = "19dc6840-f33b-545b-b366-655c7e3ffd49" 4 | TensorOperations = "6aa20fa7-93e2-5fca-9bc0-fbd0db3c71a2" 5 | -------------------------------------------------------------------------------- /onsager.jl: -------------------------------------------------------------------------------- 1 | using HCubature 2 | using ForwardDiff 3 | 4 | function integrand(x, K) 5 | #https://en.wikipedia.org/wiki/Ising_model 6 | log(cosh(2*K)^2 - sinh(2*K) *cos(x[1]) - sinh(2*K)*cos(x[2])) 7 | end 8 | 9 | function lnZ(K) 10 | log(2) + hcubature(x->integrand(x, K), (0.0, 0.0), (2*pi, 2*pi), rtol=1e-10)[1]/(8*pi^2) 11 | end 12 | 13 | dlnZ(K::Vector) = ForwardDiff.gradient(K->lnZ(K[1]), K)[1]; 14 | dlnZ2(K::Vector) = ForwardDiff.gradient(K->dlnZ(K), K)[1]; 15 | 16 | for K in collect(0.0:0.1:2.0) 17 | println(K, " ", lnZ(K), " ", -dlnZ([K]), " ", dlnZ2([K])*K^2) 18 | end 19 | -------------------------------------------------------------------------------- /svd_backward.patch: -------------------------------------------------------------------------------- 1 | diff --git a/tools/autograd/templates/Functions.cpp b/tools/autograd/templates/Functions.cpp 2 | index 3062e0021..9fdc4a792 100644 3 | --- a/tools/autograd/templates/Functions.cpp 4 | +++ b/tools/autograd/templates/Functions.cpp 5 | @@ -1557,11 +1557,12 @@ Tensor svd_backward(const std::vector &grads, const T 6 | auto sigma_mat_inv = sigma.pow(-1).diag(); 7 | auto sigma_expanded_sq = sigma.pow(2).expand_as(sigma_mat); 8 | auto F = sigma_expanded_sq - sigma_expanded_sq.t(); 9 | + F = F.div(F.pow(2) + 1E-12); 10 | // The following two lines invert values of F, and fills the diagonal with 0s. 11 | // Notice that F currently has 0s on diagonal. So we fill diagonal with +inf 12 | // first to prevent nan from appearing in backward of this function. 13 | - F.diagonal().fill_(INFINITY); 14 | - F = F.pow(-1); 15 | + //F.diagonal().fill_(INFINITY); 16 | + //F = F.pow(-1); 17 | 18 | Tensor u_term, v_term; 19 | 20 | -------------------------------------------------------------------------------- /trg.jl: -------------------------------------------------------------------------------- 1 | using LinearAlgebra:svd, Diagonal 2 | using TensorOperations 3 | 4 | function TRG(K::Float64, Dcut::Int, no_iter::Int) 5 | D = 2 6 | inds = collect(1:D) 7 | 8 | T = zeros(Float64, D, D, D, D) 9 | M = [[sqrt(cosh(K)) sqrt(sinh(K))]; 10 | [sqrt(cosh(K)) -sqrt(sinh(K))]; 11 | ] 12 | for i in inds, j in inds, k in inds, l in inds 13 | for a in inds 14 | T[i, j, k, l] += M[a, i] * M[a, j] * M[a, k] * M[a, l] 15 | end 16 | end 17 | 18 | lnZ = 0.0 19 | for n in collect(1:no_iter) 20 | 21 | #println(n, " ", maximum(T), " ", minimum(T)) 22 | maxval = maximum(T) 23 | T = T/maxval 24 | lnZ += 2^(no_iter-n+1)*log(maxval) 25 | 26 | D_new = min(D^2, Dcut) 27 | 28 | Ma = reshape(permutedims(T, (3, 2, 1, 4)), (D^2, D^2)) 29 | Mb = reshape(permutedims(T, (4, 3, 2, 1)), (D^2, D^2)) 30 | 31 | F = svd(Ma) 32 | 33 | S1 = reshape(F.U[:,1:D_new]*Diagonal(sqrt.(F.S[1:D_new])), (D, D, D_new)) 34 | S3 = reshape(Diagonal(sqrt.(F.S[1:D_new]))*F.Vt[1:D_new, :], (D_new, D, D)) 35 | F = svd(Mb) 36 | S2 = reshape(F.U[:,1:D_new]*Diagonal(sqrt.(F.S[1:D_new])), (D, D, D_new)) 37 | S4 = reshape(Diagonal(sqrt.(F.S[1:D_new]))*F.Vt[1:D_new, :], (D_new, D, D)) 38 | 39 | @tensor T_new[r, u, l, d] := S1[w, a, r] * S2[a, b, u] * S3[l, b, g] * S4[d, g, w] 40 | 41 | D = D_new 42 | T = T_new 43 | end 44 | trace = 0.0 45 | for i in collect(1:D) 46 | trace += T[i, i, i, i] 47 | end 48 | lnZ += log(trace) 49 | end 50 | 51 | Dcut = 10 52 | n = 30 53 | 54 | ts = 0.1:0.1:3; 55 | β = inv.(ts); 56 | @show "=====TRG======" 57 | lnZ = [] 58 | for K in β 59 | t = 1.0/K 60 | #T = Ising( K ) 61 | y = TRG(K, Dcut, n); 62 | #@show lnZ 63 | println(1/K, " ", y/2^n) 64 | push!(lnZ,y/2^n) 65 | end 66 | F = - ts.* lnZ 67 | -------------------------------------------------------------------------------- /torchsvd.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.linalg 3 | import torch, pdb 4 | 5 | def safe_inverse(x, epsilon=1E-12): 6 | return x/(x**2 + epsilon) 7 | 8 | class SVD(torch.autograd.Function): 9 | @staticmethod 10 | def forward(self, A): 11 | U, S, V = torch.svd(A) 12 | #numpy_input = A.detach().numpy() 13 | #U, S, Vt = scipy.linalg.svd(numpy_input, full_matrices=False, lapack_driver='gesvd') 14 | #U = torch.as_tensor(U, dtype=A.dtype, device=A.device) 15 | #S = torch.as_tensor(S, dtype=A.dtype, device=A.device) 16 | #V = torch.as_tensor(np.transpose(Vt), dtype=A.dtype, device=A.device) 17 | self.save_for_backward(U, S, V) 18 | return U, S, V 19 | 20 | @staticmethod 21 | def backward(self, dU, dS, dV): 22 | U, S, V = self.saved_tensors 23 | Vt = V.t() 24 | Ut = U.t() 25 | M = U.size(0) 26 | N = V.size(0) 27 | NS = len(S) 28 | 29 | F = (S - S[:, None]) 30 | F = safe_inverse(F) 31 | F.diagonal().fill_(0) 32 | 33 | G = (S + S[:, None]) 34 | G.diagonal().fill_(np.inf) 35 | G = 1/G 36 | 37 | UdU = Ut @ dU 38 | VdV = Vt @ dV 39 | 40 | Su = (F+G)*(UdU-UdU.t())/2 41 | Sv = (F-G)*(VdV-VdV.t())/2 42 | 43 | dA = U @ (Su + Sv + torch.diag(dS)) @ Vt 44 | if (M>NS): 45 | dA = dA + (torch.eye(M, dtype=dU.dtype, device=dU.device) - U@Ut) @ (dU/S) @ Vt 46 | if (N>NS): 47 | dA = dA + (U/S) @ dV.t() @ (torch.eye(N, dtype=dU.dtype, device=dU.device) - V@Vt) 48 | #print (dU.norm().item(), dS.norm().item(), dV.norm().item()) 49 | #print (Su.norm().item(), Sv.norm().item(), dS.norm().item()) 50 | #print (dA1.norm().item(), dA2.norm().item(), dA3.norm().item()) 51 | return dA 52 | 53 | def test_svd(): 54 | M, N = 50, 40 55 | torch.manual_seed(2) 56 | input = torch.rand(M, N, dtype=torch.float64, requires_grad=True) 57 | assert(torch.autograd.gradcheck(SVD.apply, input, eps=1e-6, atol=1e-4)) 58 | print("Test Pass!") 59 | 60 | if __name__=='__main__': 61 | test_svd() 62 | -------------------------------------------------------------------------------- /trg.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torchsvd import SVD 3 | svd = SVD.apply 4 | 5 | def TRG(K, Dcut, no_iter, device='cpu', epsilon=1E-12): 6 | D = 2 7 | 8 | #Boltzmann factor on a bond M=LR^T 9 | M = torch.stack([torch.cat([torch.exp(K), torch.exp(-K)]), 10 | torch.cat([torch.exp(-K), torch.exp(K)]) 11 | ]) 12 | U, S, V = svd(M) 13 | L = U*torch.sqrt(S) 14 | R = V*torch.sqrt(S) 15 | 16 | # L 17 | # | 18 | # T = R^{T}-o-L 19 | # | 20 | # R^{T} 21 | 22 | T = torch.einsum('ai,aj,ak,al->ijkl', (L, L, R, R)) 23 | 24 | lnZ = 0.0 25 | for n in range(no_iter): 26 | 27 | #print(n, " ", T.max(), " ", T.min()) 28 | maxval = T.abs().max() 29 | T = T/maxval 30 | lnZ += 2**(no_iter-n)*torch.log(maxval) 31 | 32 | Ma = T.permute(2, 1, 0, 3).contiguous().view(D**2, D**2) 33 | Mb = T.permute(3, 2, 1, 0).contiguous().view(D**2, D**2) 34 | 35 | Ua, Sa, Va = svd(Ma) 36 | Ub, Sb, Vb = svd(Mb) 37 | 38 | D_new = min(min(D**2, Dcut), min((Sa>epsilon).sum().item(), (Sb>epsilon).sum().item())) 39 | #print (n, D_new, Sa[:D_new].min(), Sb[:D_new].min()) 40 | 41 | S1 = (Ua[:, :D_new]* torch.sqrt(Sa[:D_new])).view(D, D, D_new) 42 | S3 = (Va[:, :D_new]* torch.sqrt(Sa[:D_new])).view(D, D, D_new) 43 | S2 = (Ub[:, :D_new]* torch.sqrt(Sb[:D_new])).view(D, D, D_new) 44 | S4 = (Vb[:, :D_new]* torch.sqrt(Sb[:D_new])).view(D, D, D_new) 45 | 46 | T_new = torch.einsum('war,abu,bgl,gwd->ruld', (S1, S2, S3, S4)) 47 | 48 | D = D_new 49 | T = T_new 50 | 51 | trace = 0.0 52 | for i in range(D): 53 | trace += T[i, i, i, i] 54 | lnZ += torch.log(trace) 55 | 56 | return lnZ 57 | 58 | if __name__=="__main__": 59 | import numpy as np 60 | import argparse 61 | parser = argparse.ArgumentParser(description='') 62 | parser.add_argument("-float32", action='store_true', help="use float32") 63 | parser.add_argument("-cuda", type=int, default=-1, help="use GPU") 64 | args = parser.parse_args() 65 | device = torch.device("cpu" if args.cuda<0 else "cuda:"+str(args.cuda)) 66 | dtype = torch.float32 if args.float32 else torch.float64 67 | 68 | Dcut = 24 69 | n = 20 70 | 71 | for K in np.linspace(0.4, 0.5, 101): 72 | beta = torch.tensor([K], dtype=dtype, device=device).requires_grad_() 73 | lnZ = TRG(beta, Dcut, n, device=device) 74 | dlnZ, = torch.autograd.grad(lnZ, beta,create_graph=True) # En = -d lnZ / d beta 75 | dlnZ2, = torch.autograd.grad(dlnZ, beta) # Cv = beta^2 * d^2 lnZ / d beta^2 76 | print (K, lnZ.item()/2**n, -dlnZ.item()/2**n, dlnZ2.item()*beta.item()**2/2**n) 77 | -------------------------------------------------------------------------------- /HOTRG.jl: -------------------------------------------------------------------------------- 1 | using TensorOperations 2 | using LinearAlgebra 3 | using JLD2 4 | function IsingMatrix(K::Float64) 5 | M = [[sqrt(cosh(K)) sqrt(sinh(K))]; 6 | [sqrt(cosh(K)) -sqrt(sinh(K))]; 7 | ] 8 | end 9 | function Ising(K::Float64) 10 | D = 2; 11 | inds = 1:D; 12 | T = zeros(Float64, D, D, D, D) 13 | M = IsingMatrix( K ) 14 | for i in inds, j in inds, k in inds, l in inds 15 | for a in inds 16 | T[i, j, k, l] += M[a, i] * M[a, j] * M[a, k] * M[a, l] 17 | end 18 | end 19 | T; 20 | end 21 | function Gauge( T::Array{Float64} , Dcut::Int, s::Char) 22 | # T is a D*D*D*D tensor u l d r 23 | if s == 'l' || s == 'L' 24 | @tensor M_l[a,A,c,C] := (T[x,a,z,b] * T[x,c,w,b])* (T[w,C,y,B] * T[z,A,y,B]); 25 | #@show M_l 26 | D = size(M_l,1); 27 | M_l = reshape(M_l, (D*D,D*D)); 28 | M_l = ( M_l + M_l') / 2; 29 | 30 | #@show M_l 31 | 32 | # println("M_l nsym:", norm(M_l - M_l')) 33 | vl, Ul = eigen( M_l); 34 | D_new = min(D^2, Dcut) 35 | inds_new = collect(1:D_new) 36 | p = sortperm(vl,rev=true) 37 | TrunErrLeft = 1 - sum( vl[p[inds_new]]) / sum( vl) 38 | vl = vl[p[inds_new]] 39 | Ul = Ul[:,p[inds_new]] 40 | Ul = reshape( Ul, (D,D,D_new)) 41 | return Ul, TrunErrLeft 42 | elseif s == 'r' || s == 'R' 43 | @tensor M_r[a,A,c,C] := (T[x,b,z,a] * T[x,b,w,c])* (T[w,B,y,C] * T[z,B,y,A]); 44 | #@show M_l 45 | D = size(M_r,1); 46 | M_r = reshape(M_r, (D*D,D*D)); 47 | M_r = ( M_r + M_r')/2; 48 | vr, Ur = eigen( M_r); 49 | D_new = min(D^2, Dcut) 50 | inds_new = collect(1:D_new) 51 | p = sortperm(vr,rev=true) 52 | TrunErrRight = 1 - sum( vr[p[inds_new]]) / sum( vr) 53 | vr = vr[p[inds_new]] 54 | Ur = Ur[:,p[inds_new]] 55 | Ur = reshape( Ur, (D,D,D_new)) 56 | return Ur, TrunErrRight 57 | end 58 | end 59 | 60 | function HOTRG(T::Array{Float64} , Dcut::Int, no_iter::Int) 61 | lnZ = 0.0 62 | for k = 1:no_iter 63 | Ul, TrunErrLeft = Gauge(T, Dcut,'l') 64 | Ur, TrunErrRight = Gauge(T,Dcut,'r') 65 | U = TrunErrLeft < TrunErrRight ? Ul : Ur 66 | @tensoropt T[w,x,z,y] := T[x,a,o,b] * U[a,A,z] * T[o,A,y,B] * U[b,B,w] 67 | f = norm(T) 68 | lnZ += log(f) / 2^k 69 | T = T / f 70 | # println(lnZ) 71 | end 72 | sum = 0.0; 73 | D1 = size(T,1) 74 | D2 = size(T,2) 75 | for x = 1:D1, y = 1:D2 76 | sum += T[x,y,x,y] 77 | end 78 | @show sum 79 | lnZ += log(sum)/2^no_iter 80 | return lnZ 81 | end 82 | function OneIsingTensor( Ku::Float64, Kl::Float64, Kd::Float64,Kr::Float64) 83 | T = zeros(Float64, 2, 2, 2, 2) 84 | Mu = IsingMatrix( Ku ) 85 | Md = IsingMatrix( Kd ) 86 | Ml = IsingMatrix( Kl ) 87 | Mr = IsingMatrix( Kr ) 88 | inds = 1:2; 89 | for i in inds, j in inds, k in inds, l in inds 90 | for a in inds 91 | T[i, j, k, l] += Mu[a, i] * Ml[a, j] * Md[a, k] * Mr[a, l] 92 | end 93 | end 94 | return T 95 | end 96 | function SpinGlassTensor( Ks::Array{Tuple{Float64,Float64},2}, ix::Int,iy::Int) 97 | # Construct the Tensor at site (i,j) 98 | # Ks is a Nx * Ny matrix of all the coefficients (Jx,Jy) 99 | # Square lattice of size (Lx,Ly) PBC 100 | # 1,1 -- 1,2 -- ... 1,Ly --- 101 | # | | | 102 | # 2,1 -- 2,2 -- ... 2,Ly --- 103 | # | | | 104 | # . . ... . 105 | # Lx,1 -- Lx,2 .... Lx,Ly --- 106 | # \ | | 107 | Lx, Ly = size( Ks ) 108 | Kd = Ks[ix,iy][2] 109 | Kr = Ks[ix,iy][1] 110 | Ku = Ks[ix,iy ==1 ? Ly : iy-1][2] 111 | Kl = Ks[ ix ==1 ? Lx : ix-1, iy][1] 112 | T = OneIsingTensor( Ku, Kl, Kd, Kr ) 113 | end 114 | function test() 115 | Dcut = 30 116 | n = 30 117 | @show "=====HOTRG======" 118 | ts = 0.1:0.1:3; 119 | β = inv.(ts); 120 | #lnZ = zeros(size(β)) 121 | lnZ = [] 122 | for K in β 123 | T = Ising( K ) 124 | y = HOTRG(T, Dcut, n) 125 | #@show lnZ 126 | println(1/K, " ", y) 127 | push!(lnZ,y) 128 | end 129 | F = - ts.* lnZ 130 | end 131 | --------------------------------------------------------------------------------