├── .gitignore ├── README.md ├── TrialModel.py ├── models.py ├── requirements.txt └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | .idea/ 6 | # C extensions 7 | *.so 8 | 9 | # Model Weights 10 | *.pt 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | pip-wheel-metadata/ 27 | share/python-wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | MANIFEST 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .nox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | *.py,cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | db.sqlite3 65 | db.sqlite3-journal 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | 77 | # PyBuilder 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | env1/ 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | 134 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GCN Partitioning 2 | Graph Partitoning Using Graph Convolutional Networks as described in [GAP: Generalizable Approximate Graph Partitioning Framework](https://arxiv.org/abs/1903.00614) 3 | 4 | ## Loss Backward Equations 5 | To handle large graphs, the loss function is implemented using sparse torch tensors using a custom loss class. 6 | 7 | If ![Z = (Y / \Gamma)(1 - Y)^{T} \circ A ](https://render.githubusercontent.com/render/math?math=Z%20%3D%20(Y%20%2F%20%5CGamma)(1%20-%20Y)%5E%7BT%7D%20%5Ccirc%20A%20) 8 | 9 | where Y_{ij} is the probability of node i being in partition j. 10 | 11 | ![L = \sum_{A_{lm} \neq 0} Z_{lm} ](https://render.githubusercontent.com/render/math?math=L%20%3D%20%5Csum_%7BA_%7Blm%7D%20%5Cneq%200%7D%20Z_%7Blm%7D%20) 12 | 13 | Then the gradients can be calculated by the equations: 14 | 15 | ![\frac{\partial z_{i \alpha}}{\partial y_{ij}} = A_{i \alpha} \left(\frac{\Gamma_{j} (1 - y_{\alpha j}) - y_{ij}(1 - y_{\alpha j})D_{i}}{\Gamma_{j}^{2}}\right)](https://render.githubusercontent.com/render/math?math=%5Cfrac%7B%5Cpartial%20z_%7Bi%20%5Calpha%7D%7D%7B%5Cpartial%20y_%7Bij%7D%7D%20%3D%20A_%7Bi%20%5Calpha%7D%20%5Cleft(%5Cfrac%7B%5CGamma_%7Bj%7D%20(1%20-%20y_%7B%5Calpha%20j%7D)%20-%20y_%7Bij%7D(1%20-%20y_%7B%5Calpha%20j%7D)D_%7Bi%7D%7D%7B%5CGamma_%7Bj%7D%5E%7B2%7D%7D%5Cright)) 16 | 17 | ![\frac{\partial z_{\alpha i}}{\partial y_{ij}} = A_{\alpha i} \left(\frac{\Gamma_{j} (- y_{\alpha j}) - y_{\alpha j}(1 - y_{ij})D_{i}}{\Gamma_{j}^{2}}\right)](https://render.githubusercontent.com/render/math?math=%5Cfrac%7B%5Cpartial%20z_%7B%5Calpha%20i%7D%7D%7B%5Cpartial%20y_%7Bij%7D%7D%20%3D%20A_%7B%5Calpha%20i%7D%20%5Cleft(%5Cfrac%7B%5CGamma_%7Bj%7D%20(-%20y_%7B%5Calpha%20j%7D)%20-%20y_%7B%5Calpha%20j%7D(1%20-%20y_%7Bij%7D)D_%7Bi%7D%7D%7B%5CGamma_%7Bj%7D%5E%7B2%7D%7D%5Cright)) 18 | 19 | ![\frac{\partial z_{i^{'} \alpha}}{\partial y_{ij}} = A_{i^{'} \alpha} \left(\frac{(1 - y_{\alpha j}) y_{i^{'}j}D_{i}}{\Gamma_{j}^{2}}\right) \;\;\; i^{'}, \alpha \neq i](https://render.githubusercontent.com/render/math?math=%5Cfrac%7B%5Cpartial%20z_%7Bi%5E%7B'%7D%20%5Calpha%7D%7D%7B%5Cpartial%20y_%7Bij%7D%7D%20%3D%20A_%7Bi%5E%7B'%7D%20%5Calpha%7D%20%5Cleft(%5Cfrac%7B(1%20-%20y_%7B%5Calpha%20j%7D)%20y_%7Bi%5E%7B'%7Dj%7DD_%7Bi%7D%7D%7B%5CGamma_%7Bj%7D%5E%7B2%7D%7D%5Cright)%20%5C%3B%5C%3B%5C%3B%20i%5E%7B'%7D%2C%20%5Calpha%20%5Cneq%20i) 20 | 21 | ## Installation 22 | Create a virtual environment using venv 23 | 24 | ```bash 25 | python3 -m venv env 26 | ``` 27 | 28 | Source the virtual environment 29 | 30 | ```bash 31 | source env/bin/activate 32 | ``` 33 | 34 | Use the package manager [pip](https://pip.pypa.io/en/stable/) to install requirements. 35 | 36 | ```bash 37 | pip install -r requirements.txt 38 | ``` 39 | 40 | ## Usage 41 | ```bash 42 | python TrialModel.py 43 | ``` 44 | ## Limitations 45 | Has only been tested on small custom graphs. 46 | 47 | ## License 48 | [MIT](https://choosealicense.com/licenses/mit/) 49 | -------------------------------------------------------------------------------- /TrialModel.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | from __future__ import print_function 3 | import tqdm 4 | import time 5 | import argparse 6 | import numpy as np 7 | import scipy.sparse as sp 8 | import torch 9 | import torch.nn.functional as F 10 | import torch.optim as optim 11 | from utils import * 12 | from scipy import sparse 13 | from models import * 14 | 15 | 16 | def Train(model, x, adj, A, optimizer): 17 | ''' 18 | Training Specifications 19 | ''' 20 | 21 | max_epochs = 100 22 | min_loss = 100 23 | for epoch in (range(max_epochs)): 24 | Y = model(x, adj) 25 | loss = CutLoss.apply(Y,A) 26 | # loss = custom_loss(Y, A) 27 | print('Epoch {}: Loss = {}'.format(epoch, loss.item())) 28 | if loss < min_loss: 29 | min_loss = loss.item() 30 | torch.save(model.state_dict(), "./trial_weights.pt") 31 | loss.backward() 32 | optimizer.step() 33 | 34 | 35 | def Test(model, x, adj, A, *argv): 36 | ''' 37 | Test Final Results 38 | ''' 39 | model.load_state_dict(torch.load("./trial_weights.pt")) 40 | Y = model(x, adj) 41 | node_idx = test_partition(Y) 42 | print(node_idx) 43 | if argv != (): 44 | if argv[0] == 'debug': 45 | print('Normalized Cut obtained using the above partition is : {0:.3f}'.format(custom_loss(Y,A).item())) 46 | else: 47 | print('Normalized Cut obtained using the above partition is : {0:.3f}'.format(CutLoss.apply(Y,A).item())) 48 | 49 | def main(): 50 | ''' 51 | Adjecency matrix and modifications 52 | ''' 53 | A = input_matrix() 54 | 55 | # Modifications 56 | A_mod = A + sp.eye(A.shape[0]) # Adding Self Loop 57 | norm_adj = symnormalise(A_mod) # Normalization using D^(-1/2) A D^(-1/2) 58 | adj = sparse_mx_to_torch_sparse_tensor(norm_adj).to('cuda') # SciPy to Torch sparse 59 | As = sparse_mx_to_torch_sparse_tensor(A).to('cuda') # SciPy to sparse Tensor 60 | A = sparse_mx_to_torch_sparse_tensor(A).to_dense().to('cuda') # SciPy to Torch Tensor 61 | print(A) 62 | 63 | ''' 64 | Declare Input Size and Tensor 65 | ''' 66 | N = A.shape[0] 67 | d = 512 68 | 69 | torch.manual_seed(100) 70 | x = torch.randn(N, d) 71 | x = x.to('cuda') 72 | 73 | ''' 74 | Model Definition 75 | ''' 76 | gl = [d, 64, 16] 77 | ll = [16, 2] 78 | 79 | model = GCN(gl, ll, dropout=0.5).to('cuda') 80 | optimizer = optim.Adam(model.parameters(), lr=5e-4, weight_decay=5e-6) 81 | print(model) 82 | 83 | # check_grad(model, x, adj, A, As) 84 | 85 | #Train 86 | Train(model, x, adj, As, optimizer) 87 | 88 | # Test the best partition 89 | Test(model, x, adj, As) 90 | 91 | if __name__ == '__main__': 92 | main() -------------------------------------------------------------------------------- /models.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | import torch 4 | import scipy.sparse as sp 5 | from torch.nn.parameter import Parameter 6 | from torch.nn.modules.module import Module 7 | import torch.nn.functional as F 8 | import torch.nn as nn 9 | from utils import * 10 | 11 | 12 | class GraphConvolution(Module): 13 | """ 14 | Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 15 | """ 16 | 17 | def __init__(self, in_features, out_features, bias=True): 18 | super(GraphConvolution, self).__init__() 19 | self.in_features = in_features 20 | self.out_features = out_features 21 | self.weight = Parameter(torch.FloatTensor(in_features, out_features)) 22 | if bias: 23 | self.bias = Parameter(torch.FloatTensor(out_features)) 24 | else: 25 | self.register_parameter('bias', None) 26 | self.reset_parameters() 27 | 28 | def reset_parameters(self): 29 | stdv = 1. / math.sqrt(self.weight.size(1)) 30 | self.weight.data.uniform_(-stdv, stdv) 31 | if self.bias is not None: 32 | self.bias.data.uniform_(-stdv, stdv) 33 | 34 | def forward(self, H, A): 35 | W = self.weight 36 | b = self.bias 37 | 38 | HW = torch.mm(H, W) 39 | # AHW = SparseMM.apply(A, HW) 40 | AHW = torch.spmm(A, HW) 41 | if self.bias is not None: 42 | return AHW + b 43 | else: 44 | return AHW 45 | 46 | def __repr__(self): 47 | return self.__class__.__name__ + ' (' \ 48 | + str(self.in_features) + ' -> ' \ 49 | + str(self.out_features) + ')' 50 | 51 | 52 | class GCN(torch.nn.Module): 53 | 54 | def __init__(self, gl, ll, dropout): 55 | super(GCN, self).__init__() 56 | if ll[0] != gl[-1]: 57 | assert 'Graph Conv Last layer and Linear first layer sizes dont match' 58 | # self.gc1 = GraphConvolution(nfeat, nhid) 59 | # self.gc2 = GraphConvolution(nhid, nclass) 60 | self.dropout = dropout 61 | self.graphlayers = nn.ModuleList([GraphConvolution(gl[i], gl[i+1], bias=True) for i in range(len(gl)-1)]) 62 | self.linlayers = nn.ModuleList([nn.Linear(ll[i], ll[i+1]) for i in range(len(ll)-1)]) 63 | 64 | def forward(self, H, A): 65 | # x = F.relu(self.gc1(x, adj)) 66 | # x = F.dropout(x, self.dropout, training=self.training) 67 | # x = self.gc2(x, adj) 68 | for idx, hidden in enumerate(self.graphlayers): 69 | H = F.relu(hidden(H,A)) 70 | if idx < len(self.graphlayers) - 2: 71 | H = F.dropout(H, self.dropout, training=self.training) 72 | 73 | H_emb = H 74 | 75 | for idx, hidden in enumerate(self.linlayers): 76 | H = F.relu(hidden(H)) 77 | 78 | # print(H) 79 | return F.softmax(H, dim=1) 80 | 81 | def __repr__(self): 82 | return str([self.graphlayers[i] for i in range(len(self.graphlayers))] + [self.linlayers[i] for i in range(len(self.linlayers))]) 83 | 84 | 85 | class CutLoss(torch.autograd.Function): 86 | ''' 87 | Class for forward and backward pass for the loss function described in https://arxiv.org/abs/1903.00614 88 | 89 | arguments: 90 | Y_ij : Probability that a node i belongs to partition j 91 | A : sparse adjecency matrix 92 | 93 | Returns: 94 | Loss : Y/Gamma * (1 - Y)^T dot A 95 | ''' 96 | 97 | @staticmethod 98 | def forward(ctx, Y, A): 99 | ctx.save_for_backward(Y,A) 100 | D = torch.sparse.sum(A, dim=1).to_dense() 101 | Gamma = torch.mm(Y.t(), D.unsqueeze(1)) 102 | YbyGamma = torch.div(Y, Gamma.t()) 103 | # print(Gamma) 104 | Y_t = (1 - Y).t() 105 | loss = torch.tensor([0.], requires_grad=True).to('cuda') 106 | idx = A._indices() 107 | data = A._values() 108 | for i in range(idx.shape[1]): 109 | # print(YbyGamma[idx[0,i],:].dtype) 110 | # print(Y_t[:,idx[1,i]].dtype) 111 | # print(torch.dot(YbyGamma[idx[0, i], :], Y_t[:, idx[1, i]]) * data[i]) 112 | loss += torch.dot(YbyGamma[idx[0, i], :], Y_t[:, idx[1, i]]) * data[i] 113 | # print(loss) 114 | # loss = torch.sum(torch.mm(YbyGamma, Y_t) * A) 115 | return loss 116 | 117 | @staticmethod 118 | def backward(ctx, grad_out): 119 | Y, A, = ctx.saved_tensors 120 | idx = A._indices() 121 | data = A._values() 122 | D = torch.sparse.sum(A, dim=1).to_dense() 123 | Gamma = torch.mm(Y.t(), D.unsqueeze(1)) 124 | # print(Gamma.shape) 125 | gradient = torch.zeros_like(Y) 126 | # print(gradient.shape) 127 | for i in range(gradient.shape[0]): 128 | for j in range(gradient.shape[1]): 129 | alpha_ind = (idx[0, :] == i).nonzero() 130 | alpha = idx[1, alpha_ind] 131 | A_i_alpha = data[alpha_ind] 132 | temp = A_i_alpha / torch.pow(Gamma[j], 2) * (Gamma[j] * (1 - 2 * Y[alpha, j]) - D[i] * ( 133 | Y[i, j] * (1 - Y[alpha, j]) + (1 - Y[i, j]) * (Y[alpha, j]))) 134 | gradient[i, j] = torch.sum(temp) 135 | 136 | l_idx = list(idx.t()) 137 | l2 = [] 138 | l2_val = [] 139 | # [l2.append(mem) for mem in l_idx if((mem[0] != i).item() and (mem[1] != i).item())] 140 | for ptr, mem in enumerate(l_idx): 141 | if ((mem[0] != i).item() and (mem[1] != i).item()): 142 | l2.append(mem) 143 | l2_val.append(data[ptr]) 144 | extra_gradient = 0 145 | if (l2 != []): 146 | for val, mem in zip(l2_val, l2): 147 | extra_gradient += (-D[i] * torch.sum( 148 | Y[mem[0], j] * (1 - Y[mem[1], j]) / torch.pow(Gamma[j], 2))) * val 149 | 150 | gradient[i, j] += extra_gradient 151 | 152 | # print(gradient) 153 | return gradient, None -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ConfigArgParse==1.0 2 | numpy==1.18.1 3 | pkg-resources==0.0.0 4 | PyYAML==5.3 5 | scipy==1.4.1 6 | torch==1.4.0 7 | tqdm==4.42.1 8 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | import torch 4 | import scipy.sparse as sp 5 | from torch.nn.parameter import Parameter 6 | from torch.nn.modules.module import Module 7 | import torch.nn.functional as F 8 | import torch.nn as nn 9 | from models import * 10 | 11 | def input_matrix(): 12 | ''' 13 | Returns a test sparse SciPy adjecency matrix 14 | ''' 15 | # N = 8 16 | # data = np.ones(2 * 11) 17 | # row = np.array([0,0,1,1,1,2,2,2,3,3,3,4,4,4,4,5,5,6,6,6,7,7]) 18 | # col = np.array([1,2,0,2,3,0,1,3,1,2,4,3,5,6,7,4,6,4,5,7,4,6]) 19 | 20 | N = 7 21 | data = np.ones(2 * 9) 22 | row = np.array([0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6]) 23 | col = np.array([2, 3, 4, 6, 0, 4, 5, 6, 0, 4, 5, 1, 2, 3, 2, 3, 1, 2]) 24 | 25 | # N = 3 26 | # data = np.array([1/2,1/2,1/3,1/3]) 27 | # row = np.array([0,1,1,2]) 28 | # col = np.array([1,0,2,1]) 29 | 30 | A = sp.csr_matrix((data, (row, col)), shape=(N, N)) 31 | 32 | return A 33 | 34 | def check_grad(model, x, adj, A, As): 35 | Y = model(x, adj) 36 | Y.register_hook(print) 37 | print(Y) 38 | print('\n') 39 | loss1 = CutLoss.apply(Y,As) 40 | loss = custom_loss(Y, A) 41 | print('\n') 42 | loss.backward() 43 | print('\n') 44 | loss1.backward() 45 | # test_backward(Y,As) 46 | # test = torch.autograd.gradcheck(CutLoss.apply, (Y.double(), As.double()), check_sparse_nnz=True) 47 | 48 | 49 | class SparseMM(torch.autograd.Function): 50 | """ 51 | Sparse x dense matrix multiplication with autograd support. 52 | 53 | Implementation by Soumith Chintala: 54 | https://discuss.pytorch.org/t/ 55 | does-pytorch-support-autograd-on-sparse-matrix/6156/7 56 | """ 57 | @staticmethod 58 | def forward(ctx, M1, M2): 59 | ctx.save_for_backward(M1, M2) 60 | return torch.mm(M1, M2) 61 | 62 | @staticmethod 63 | def backward(ctx, g): 64 | M1, M2 = ctx.saved_tensors 65 | g1 = g2 = None 66 | 67 | if ctx.needs_input_grad[0]: 68 | g1 = torch.mm(g, M2.t()) 69 | 70 | if ctx.needs_input_grad[1]: 71 | g2 = torch.mm(M1.t(), g) 72 | 73 | return g1, g2 74 | 75 | 76 | class GCN(torch.nn.Module): 77 | 78 | def __init__(self, gl, ll, dropout): 79 | super(GCN, self).__init__() 80 | if ll[0] != gl[-1]: 81 | assert 'Graph Conv Last layer and Linear first layer sizes dont match' 82 | # self.gc1 = GraphConvolution(nfeat, nhid) 83 | # self.gc2 = GraphConvolution(nhid, nclass) 84 | self.dropout = dropout 85 | self.graphlayers = nn.ModuleList([GraphConvolution(gl[i], gl[i+1], bias=True) for i in range(len(gl)-1)]) 86 | self.linlayers = nn.ModuleList([nn.Linear(ll[i], ll[i+1]) for i in range(len(ll)-1)]) 87 | 88 | def forward(self, H, A): 89 | # x = F.relu(self.gc1(x, adj)) 90 | # x = F.dropout(x, self.dropout, training=self.training) 91 | # x = self.gc2(x, adj) 92 | for idx, hidden in enumerate(self.graphlayers): 93 | H = F.relu(hidden(H,A)) 94 | if idx < len(self.graphlayers) - 2: 95 | H = F.dropout(H, self.dropout, training=self.training) 96 | 97 | H_emb = H 98 | 99 | for idx, hidden in enumerate(self.linlayers): 100 | H = F.relu(hidden(H)) 101 | 102 | # print(H) 103 | return F.softmax(H, dim=1) 104 | 105 | def __repr__(self): 106 | return str([self.graphlayers[i] for i in range(len(self.graphlayers))] + [self.linlayers[i] for i in range(len(self.linlayers))]) 107 | 108 | 109 | def custom_loss(Y, A): 110 | ''' 111 | loss function described in https://arxiv.org/abs/1903.00614 112 | 113 | arguments: 114 | Y_ij : Probability that a node i belongs to partition j 115 | A : dense adjecency matrix 116 | 117 | Returns: 118 | Loss : Y/Gamma * (1 - Y)^T dot A 119 | ''' 120 | D = torch.sum(A, dim=1) 121 | Gamma = torch.mm(Y.t(), D.unsqueeze(1)) 122 | # print(Gamma) 123 | loss = torch.sum(torch.mm(torch.div(Y.float(), Gamma.t()), (1 - Y).t().float()) * A.float()) 124 | return loss 125 | 126 | # loss = custom_loss(Y, A) 127 | def to_sparse(x): 128 | """ converts dense tensor x to sparse format """ 129 | x_typename = torch.typename(x).split('.')[-1] 130 | sparse_tensortype = getattr(torch.sparse, x_typename) 131 | 132 | indices = torch.nonzero(x) 133 | if len(indices.shape) == 0: # if all elements are zeros 134 | return sparse_tensortype(*x.shape) 135 | indices = indices.t() 136 | values = x[tuple(indices[i] for i in range(indices.shape[0]))] 137 | return sparse_tensortype(indices, values, x.size()) 138 | 139 | def custom_loss_sparse(Y, A): 140 | ''' 141 | loss function described in https://arxiv.org/abs/1903.00614 142 | 143 | arguments: 144 | Y_ij : Probability that a node i belongs to partition j 145 | A : sparse adjecency matrix 146 | 147 | Returns: 148 | Loss : Y/Gamma * (1 - Y)^T dot A 149 | ''' 150 | D = torch.sparse.sum(A, dim=1).to_dense() 151 | Gamma = torch.mm(Y.t(), D.unsqueeze(1).float()) 152 | YbyGamma = torch.div(Y, Gamma.t()) 153 | Y_t = (1 - Y).t() 154 | loss = torch.tensor([0.]) 155 | idx = A._indices() 156 | for i in range(idx.shape[1]): 157 | loss += torch.dot(YbyGamma[idx[0,i],:], Y_t[:,idx[1,i]]) 158 | return loss 159 | 160 | def RandLargeGraph(N,c): 161 | ''' 162 | Creates large random graphs with c fraction connections compared to the actual graph size 163 | ''' 164 | i = (torch.LongTensor(2,int(c * N)).random_(0, N)) 165 | v = 1. * torch.ones(int(c * N)) 166 | return torch.sparse.FloatTensor(i, v, torch.Size([N, N])) 167 | 168 | 169 | def test_backward(Y,A): 170 | ''' 171 | This a function to debug if the gradients from the CutLoss class match the actual gradients 172 | ''' 173 | idx = A._indices() 174 | data = A._values() 175 | D = torch.sparse.sum(A, dim=1).to_dense() 176 | Gamma = torch.mm(Y.t(), D.unsqueeze(1)) 177 | # print(Gamma.shape) 178 | gradient = torch.zeros_like(Y, requires_grad=True) 179 | # print(gradient.shape) 180 | # print(idx) 181 | for i in range(gradient.shape[0]): 182 | for j in range(gradient.shape[1]): 183 | # if i == 1 and j == 0: 184 | alpha_ind = (idx[0, :] == i).nonzero() 185 | alpha = idx[1, alpha_ind] 186 | A_i_alpha = data[alpha_ind] 187 | temp = A_i_alpha/ torch.pow(Gamma[j], 2) * ( Gamma[j] * (1 - 2 * Y[alpha, j]) - D[i] * ( Y[i, j] * (1 - Y[alpha, j]) + (1 - Y[i, j]) * (Y[alpha, j]) ) ) 188 | gradient[i, j] = torch.sum(temp) 189 | 190 | l_idx = list(idx.t()) 191 | l2 = [] 192 | l2_val = [] 193 | # [l2.append(mem) for mem in l_idx if((mem[0] != i).item() and (mem[1] != i).item())] 194 | for ptr, mem in enumerate(l_idx): 195 | if ((mem[0] != i).item() and (mem[1] != i).item()): 196 | l2.append(mem) 197 | l2_val.append(data[ptr]) 198 | extra_gradient = 0 199 | if(l2 != []): 200 | for val, mem in zip(l2_val, l2): 201 | extra_gradient += (-D[i] * torch.sum(Y[mem[0],j] * (1 - Y[mem[1],j]) / torch.pow(Gamma[j],2))) * val 202 | 203 | gradient[i,j] += extra_gradient 204 | 205 | print(gradient) 206 | 207 | 208 | 209 | 210 | 211 | def normalize(mx): 212 | """Row-normalize sparse matrix""" 213 | rowsum = np.array(mx.sum(1)) 214 | r_inv = np.power(rowsum, -1).flatten() 215 | r_inv[np.isinf(r_inv)] = 0. 216 | r_mat_inv = sp.diags(r_inv) 217 | mx = r_mat_inv.dot(mx) 218 | return mx 219 | 220 | 221 | def symnormalise(M): 222 | """ 223 | symmetrically normalise sparse matrix 224 | 225 | arguments: 226 | M: scipy sparse matrix 227 | 228 | returns: 229 | D^{-1/2} M D^{-1/2} 230 | where D is the diagonal node-degree matrix 231 | """ 232 | 233 | d = np.array(M.sum(1)) 234 | 235 | dhi = np.power(d, -1 / 2).flatten() 236 | dhi[np.isinf(dhi)] = 0. 237 | DHI = sp.diags(dhi) # D half inverse i.e. D^{-1/2} 238 | 239 | return (DHI.dot(M)).dot(DHI) 240 | 241 | def sparse_mx_to_torch_sparse_tensor(sparse_mx): 242 | """Convert a scipy sparse matrix to a torch sparse tensor.""" 243 | sparse_mx = sparse_mx.tocoo().astype(np.float32) 244 | indices = torch.from_numpy( 245 | np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64)) 246 | values = torch.from_numpy(sparse_mx.data) 247 | shape = torch.Size(sparse_mx.shape) 248 | return torch.sparse.FloatTensor(indices, values, shape) 249 | 250 | def test_partition(Y): 251 | _, idx = torch.max(Y, 1) 252 | return idx 253 | 254 | def Train_dense(model, x, adj, A, optimizer): 255 | ''' 256 | Training Specifications 257 | ''' 258 | 259 | max_epochs = 100 260 | min_loss = 100 261 | for epoch in (range(max_epochs)): 262 | Y = model(x, adj) 263 | # loss = CutLoss.apply(Y,A) 264 | loss = custom_loss(Y, A) 265 | print('Epoch {}: Loss = {}'.format(epoch, loss.item())) 266 | if loss < min_loss: 267 | min_loss = loss.item() 268 | torch.save(model.state_dict(), "./trial_weights.pt") 269 | loss.backward() 270 | optimizer.step() 271 | --------------------------------------------------------------------------------