├── DBLP
├── DBLP_GCN.py
├── DBLP_SCT.py
├── DBLP_utils.py
├── README.md
├── layers.py
├── normalization.py
├── utils.py
└── utils_sct.py
├── Figures
├── h116h251.png
└── readme
├── README.md
├── data
├── ind.citeseer.allx
├── ind.citeseer.ally
├── ind.citeseer.graph
├── ind.citeseer.test.index
├── ind.citeseer.tx
├── ind.citeseer.ty
├── ind.citeseer.x
├── ind.citeseer.y
├── ind.cora.allx
├── ind.cora.ally
├── ind.cora.graph
├── ind.cora.test.index
├── ind.cora.tx
├── ind.cora.ty
├── ind.cora.x
├── ind.cora.y
├── ind.pubmed.allx
├── ind.pubmed.ally
├── ind.pubmed.graph
├── ind.pubmed.test.index
├── ind.pubmed.tx
├── ind.pubmed.ty
├── ind.pubmed.x
└── ind.pubmed.y
├── layers.py
├── load_pretrain_model.py
├── models.py
├── normalization.py
├── pytorchtools.py
├── state_dict_model.pt
├── train.py
└── utils.py
/DBLP/DBLP_GCN.py:
--------------------------------------------------------------------------------
1 | import os.path as osp
2 | import scipy.sparse as sp
3 | import argparse
4 | import torch
5 | import torch.nn.functional as F
6 | import torch.nn as nn
7 | from torch_geometric.datasets import CitationFull
8 | from torch_geometric.utils import to_scipy_sparse_matrix
9 | import torch_geometric.transforms as T
10 | path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'DBLP')
11 | from torch_geometric.utils import to_scipy_sparse_matrix
12 | from utils import normalize_adjacency_matrix,normalizemx
13 | from DBLP_utils import SCAT_Red
14 | from utils import normalize_adjacency_matrix,sparse_mx_to_torch_sparse_tensor
15 | from layers import GC_withres,GraphConvolution
16 | #from torch_geometric.nn import GATConv
17 | from torch.optim.lr_scheduler import MultiStepLR,StepLR
18 |
19 | #dataset = TUDataset(root= path,name='REDDIT-BINARY')
20 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
21 |
22 | dataset = CitationFull(path,name = 'dblp',transform=T.TargetIndegree())
23 | data = dataset[0]
24 | # Num of feat:1639
25 | adj = to_scipy_sparse_matrix(edge_index = data.edge_index)
26 | adj = adj+ adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
27 | A_tilde = sparse_mx_to_torch_sparse_tensor(normalize_adjacency_matrix(adj,sp.eye(adj.shape[0]))).to(device)
28 | adj = sparse_mx_to_torch_sparse_tensor(adj).to(device)
29 | #print(dataset)
30 | #print(data.x.shape)
31 | #print(data.y.shape)
32 |
33 |
34 | #tp = SCAT_Red(in_features=1639,med_f0=10,med_f1=10,med_f2=10,med_f3=10,med_f4=10).to(device)
35 | #tp2 = SCAT_Red(in_features=40,med_f0=30,med_f1=10,med_f2=10,med_f3=10,med_f4=10).to(device)
36 | train_mask = torch.cat((torch.ones(10000),torch.zeros(2000),torch.zeros(2000),torch.zeros(3716)),0)>0
37 | val_mask = torch.cat((torch.zeros(10000),torch.ones(2000),torch.zeros(2000),torch.zeros(3716)),0)>0
38 | test_mask = torch.cat((torch.zeros(10000),torch.zeros(2000),torch.ones(2000),torch.zeros(3716)),0)>0
39 |
40 | class GCN(nn.Module):
41 | def __init__(self, nfeat, nhid, nclass, dropout):
42 | super(GCN, self).__init__()
43 |
44 | self.gc1 = GraphConvolution(nfeat, nhid)
45 | # self.gc12 = GraphConvolution(nhid, nhid)
46 | self.gc2 = GraphConvolution(nhid, nclass)
47 | self.dropout = dropout
48 |
49 | def forward(self, x, adj):
50 | x = F.relu(self.gc1(x, adj))
51 | x = F.dropout(x, self.dropout, training=self.training)
52 | # x = F.relu(self.gc12(x, adj))
53 | # x = F.dropout(x, self.dropout, training=self.training)
54 | x = self.gc2(x, adj)
55 | return F.log_softmax(x, dim=1)
56 |
57 |
58 | #class Net(torch.nn.Module):
59 | # def __init__(self,dropout=0.6):
60 | # super(Net, self).__init__()
61 | # self.sct1 = SCAT_Red(in_features=1639,med_f0=40,med_f1=20,med_f2=20,med_f3=20,med_f4=20)
62 | # self.sct2 = SCAT_Red(in_features=120,med_f0=40,med_f1=20,med_f2=20,med_f3=20,med_f4=20)
63 | # self.res1 = GC_withres(120,4,smooth=0.1)
64 | # self.dropout = dropout
65 | # def forward(self):
66 | # x = torch.FloatTensor.abs_(self.sct1(data.x,A_tilde= A_tilde,adj = adj))**1
67 | # x = torch.FloatTensor.abs_(self.sct2(x,A_tilde= A_tilde,adj = adj))**1
68 | # x = F.dropout(x, self.dropout, training=self.training)
69 | # x = self.res1(x, A_tilde)
70 | # return F.log_softmax(x, dim=1)
71 | import numpy as np
72 | features = data.x
73 | features = torch.FloatTensor(np.array(features)).cuda()
74 |
75 | model = GCN(nfeat=1639,nhid=64,nclass=4,dropout=0.3)
76 | model, data = model.to(device), data.to(device)
77 | optimizer = torch.optim.Adam(model.parameters(), lr=0.05, weight_decay=5e-4)
78 | scheduler = StepLR(optimizer, step_size=100, gamma=0.9)
79 |
80 |
81 | def train():
82 | model.train()
83 | # output = model(features, adj)
84 | output = model(features,A_tilde)
85 | optimizer.zero_grad()
86 | F.nll_loss(output[train_mask], data.y[train_mask]).backward()
87 | optimizer.step()
88 |
89 |
90 | def test():
91 | model.eval()
92 | logits = model(features, adj)
93 | accs = []
94 | for mask in [train_mask, val_mask,test_mask]:
95 | pred = logits[mask].max(1)[1]
96 | acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item()
97 | accs.append(acc)
98 | return accs
99 |
100 | import time
101 | accu_list = []
102 | time_list = []
103 | start_time = time.time()
104 |
105 | for epoch in range(1, 2001):
106 | train()
107 | log = 'Epoch: {:03d}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}'
108 | print(log.format(epoch, *test()))
109 | val_acc = test()[1]
110 | # print(val_acc)
111 | accu_list.append(float(val_acc))
112 | time_list.append(time.time()-start_time)
113 | scheduler.step()
114 | import numpy as np
115 | #np.savetxt('sct_time.txt',time_list)
116 | #np.savetxt('sct_accu.txt',accu_list)
117 |
118 |
--------------------------------------------------------------------------------
/DBLP/DBLP_SCT.py:
--------------------------------------------------------------------------------
1 | import os.path as osp
2 | import scipy.sparse as sp
3 | import argparse
4 | import torch
5 | import torch.nn.functional as F
6 | import torch.nn as nn
7 | from torch_geometric.datasets import CitationFull
8 | from torch_geometric.utils import to_scipy_sparse_matrix
9 | import torch_geometric.transforms as T
10 | path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'DBLP')
11 | from torch_geometric.utils import to_scipy_sparse_matrix
12 | from utils import normalize_adjacency_matrix,normalizemx
13 | from DBLP_utils import SCAT_Red
14 | from utils import normalize_adjacency_matrix,sparse_mx_to_torch_sparse_tensor
15 | from layers import GC_withres
16 | #from torch_geometric.nn import GATConv
17 | from torch.optim.lr_scheduler import MultiStepLR,StepLR
18 |
19 | #dataset = TUDataset(root= path,name='REDDIT-BINARY')
20 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
21 |
22 | dataset = CitationFull(path,name = 'dblp',transform=T.TargetIndegree())
23 | data = dataset[0]
24 | # Num of feat:1639
25 | adj = to_scipy_sparse_matrix(edge_index = data.edge_index)
26 | adj = adj+ adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
27 | A_tilde = sparse_mx_to_torch_sparse_tensor(normalize_adjacency_matrix(adj,sp.eye(adj.shape[0]))).to(device)
28 | adj = sparse_mx_to_torch_sparse_tensor(adj).to(device)
29 | #print(dataset)
30 | #print(data.x.shape)
31 | #print(data.y.shape)
32 |
33 |
34 | #tp = SCAT_Red(in_features=1639,med_f0=10,med_f1=10,med_f2=10,med_f3=10,med_f4=10).to(device)
35 | #tp2 = SCAT_Red(in_features=40,med_f0=30,med_f1=10,med_f2=10,med_f3=10,med_f4=10).to(device)
36 | train_mask = torch.cat((torch.ones(10000),torch.zeros(2000),torch.zeros(2000),torch.zeros(3716)),0)>0
37 | val_mask = torch.cat((torch.zeros(10000),torch.ones(2000),torch.zeros(2000),torch.zeros(3716)),0)>0
38 | test_mask = torch.cat((torch.zeros(10000),torch.zeros(2000),torch.ones(2000),torch.zeros(3716)),0)>0
39 |
40 |
41 | class Net(torch.nn.Module):
42 | def __init__(self,dropout=0.6):
43 | super(Net, self).__init__()
44 | self.sct1 = SCAT_Red(in_features=1639,med_f0=40,med_f1=20,med_f2=20,med_f3=20,med_f4=20)
45 | self.sct2 = SCAT_Red(in_features=120,med_f0=40,med_f1=20,med_f2=20,med_f3=20,med_f4=20)
46 | self.res1 = GC_withres(120,4,smooth=0.1)
47 | self.dropout = dropout
48 | def forward(self):
49 | x = torch.FloatTensor.abs_(self.sct1(data.x,A_tilde= A_tilde,adj = adj))**1
50 | x = torch.FloatTensor.abs_(self.sct2(x,A_tilde= A_tilde,adj = adj))**1
51 | x = F.dropout(x, self.dropout, training=self.training)
52 | x = self.res1(x, A_tilde)
53 | return F.log_softmax(x, dim=1)
54 |
55 | model, data = Net().to(device), data.to(device)
56 | optimizer = torch.optim.Adam(model.parameters(), lr=0.1, weight_decay=5e-4)
57 | scheduler = MultiStepLR(optimizer, milestones=[10], gamma=0.5)
58 |
59 |
60 | def train():
61 | model.train()
62 | optimizer.zero_grad()
63 | F.nll_loss(model()[train_mask], data.y[train_mask]).backward()
64 | optimizer.step()
65 |
66 |
67 | def test():
68 | model.eval()
69 | logits, accs = model(), []
70 | for mask in [train_mask, val_mask,test_mask]:
71 | pred = logits[mask].max(1)[1]
72 | acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item()
73 | accs.append(acc)
74 | return accs
75 |
76 | import time
77 | accu_list = []
78 | time_list = []
79 | start_time = time.time()
80 |
81 | for epoch in range(1, 101):
82 | train()
83 | log = 'Epoch: {:03d}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}'
84 | print(log.format(epoch, *test()))
85 | val_acc = test()[1]
86 | print(val_acc)
87 | accu_list.append(float(val_acc))
88 | time_list.append(time.time()-start_time)
89 | scheduler.step()
90 | import numpy as np
91 | np.savetxt('sct_time.txt',time_list)
92 | np.savetxt('sct_accu.txt',accu_list)
93 |
94 |
--------------------------------------------------------------------------------
/DBLP/DBLP_utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.sparse
3 | import math
4 | import numpy as np
5 | import scipy.sparse as sp
6 | from scipy.sparse import csr_matrix
7 | import torch.nn.functional as F
8 | from torch.nn.parameter import Parameter
9 | from torch.nn.modules.module import Module
10 | import torch.nn as nn
11 | from utils import *
12 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
13 |
14 | # def normalize_adjacency_tensor_matrix(A, I_n):
15 | # # A is a torch sparse tensor
16 | # A_tilde = torch.sparse.FloatTensor.add(A,I_n)
17 | # degrees = torch.sparse.sum(A_tilde,dim=0).to_dense().pow(-0.5)
18 | # D_inv = torch.sparse_coo_tensor(I_n._indices(),degrees,size=I_n.size())
19 | # A_tilde_hat = torch.sparse.mm(A_tilde,D_inv.to_dense())
20 | # A_tilde_hat = torch.sparse.mm(D_inv.to_sparse(),A_tilde_hat)
21 | # return A_tilde_hat
22 |
23 |
24 | def normalizem_tentor_mx(mx,I_n):
25 | # mx is a torch sparse tensor
26 | degrees = torch.sparse.sum(mx,dim=0).to_dense().pow(-1)
27 | D_inv = torch.sparse_coo_tensor(I_n._indices(),degrees,size=I_n.size())
28 | # torch.sparse_coo_tensor(t._indices(), t._values(), size = (n,n))
29 | # turn degree into a dense tensor
30 | mx = torch.sparse.mm(mx,D_inv.to_dense())
31 | # return a dense tensor
32 | return mx
33 |
34 | def red_gene_sct(sparse_tensor,dense_tensor,order):
35 | for i in range(0,order):
36 | dense_tensor = torch.sparse.mm(sparse_tensor,dense_tensor)
37 | return dense_tensor
38 | class SCAT_Red(nn.Module):
39 | def __init__(self,in_features,med_f0,med_f1,med_f2,med_f3,med_f4,bias=True):
40 | super(SCAT_Red, self).__init__()
41 | # self.features = features
42 | self.in_features = in_features
43 | # self.adjacency_mx = adjacency_mx
44 | self.med_f0 = med_f0
45 | self.med_f1 = med_f1
46 | self.med_f2 = med_f2
47 | self.bias = bias
48 | self.med_f3 = med_f3
49 | self.med_f4 = med_f4
50 | # features shape (N_of_nodes,N_of_feature)
51 | # adjacency_mx shape(N_of_nodes,N_of_nodes)
52 | # in_features is N_of_feature
53 | self.weight0 = Parameter(torch.FloatTensor(in_features, med_f0))
54 | self.weight1 = Parameter(torch.FloatTensor(in_features, med_f1))
55 | self.weight2 = Parameter(torch.FloatTensor(in_features, med_f2))
56 | self.weight3 = Parameter(torch.FloatTensor(in_features, med_f3))
57 | self.weight4 = Parameter(torch.FloatTensor(in_features, med_f4))
58 | if bias:
59 | print('Processing first three')
60 | self.bias1 = Parameter(torch.FloatTensor(med_f1))
61 | self.bias0 = Parameter(torch.FloatTensor(med_f0))
62 | self.bias2 = Parameter(torch.FloatTensor(med_f2))
63 | self.bias3 = Parameter(torch.FloatTensor(med_f3))
64 | self.bias4 = Parameter(torch.FloatTensor(med_f4))
65 |
66 | else:
67 | self.register_parameter('bias', None)
68 | self.reset_parameters()
69 | def reset_parameters(self):
70 | stdv0 = 1. / math.sqrt(self.weight0.size(1))
71 | stdv1 = 1. / math.sqrt(self.weight1.size(1))
72 | stdv2 = 1. / math.sqrt(self.weight2.size(1))
73 |
74 | stdv3 = 1. / math.sqrt(self.weight3.size(1))
75 | stdv4 = 1. / math.sqrt(self.weight4.size(1))
76 | torch.nn.init.xavier_uniform_(self.weight0)
77 | torch.nn.init.xavier_uniform_(self.weight2)
78 | torch.nn.init.xavier_uniform_(self.weight1)
79 | torch.nn.init.xavier_uniform_(self.weight3)
80 | torch.nn.init.xavier_uniform_(self.weight4)
81 | if self.bias is not None:
82 | self.bias1.data.uniform_(-stdv1, stdv1)
83 | self.bias0.data.uniform_(-stdv0, stdv0)
84 | self.bias2.data.uniform_(-stdv2, stdv2)
85 | self.bias3.data.uniform_(-stdv3, stdv3)
86 | self.bias4.data.uniform_(-stdv4, stdv4)
87 | def forward(self,features,A_tilde,adj,order1 = 1,order2 = 2):
88 | # adj is extracted from the graph structure
89 | # features: torch tensor
90 | # adjacency_mx: sparse tensor
91 | # adj = adjacency_mx
92 | # adj = adj+ adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
93 | # A_tilde = normalize_adjacency_matrix(adj,sp.eye(adj.shape[0]))
94 | # adj = normalizemx(adj)
95 | I_n = sp.eye(adj.size()[0]) # requires GPU here
96 | I_n = sparse_mx_to_torch_sparse_tensor(I_n).to(device)
97 | input = features
98 | # A_tilde = normalize_adjacency_tensor_matrix(adj,I_n)
99 | # A_tilde = A_tilde
100 | # A_tilde = sparse_mx_to_torch_sparse_tensor(A_tilde)
101 | # do A \cdot Feature_Matrix
102 | # torch.sparse.mm(mat1, mat2) sparse matrix mat1 and dense matrix mat2
103 | # print('Processing first three')
104 | support0 = torch.mm(torch.sparse.mm(A_tilde,input),self.weight0) + self.bias0
105 | support1 = torch.mm(torch.sparse.mm(A_tilde,torch.sparse.mm(A_tilde,input)),self.weight1) + self.bias1
106 | support2 = torch.mm(torch.sparse.mm(A_tilde,torch.sparse.mm(A_tilde,torch.sparse.mm(A_tilde,input))),self.weight2) + self.bias2
107 |
108 |
109 | # torch.sparse.FloatTensor.mul_(A,B)
110 | # A,B sparse tensor
111 | # A and B has to has the same size (n,n), perform A\cdotB
112 | # return a sparse tensor
113 | # torch.sparse.FloatTensor.matmul(mx,mx.to_dense())
114 | # support0 = torch.sparse.FloatTensor.matmul(torch.sparse.mm(A_tilde,input),self.weight0) + self.bias0
115 | # support1 = torch.sparse.FloatTensor.matmul(torch.sparse.FloatTensor.matmul(A_tilde,torch.sparse.FloatTensor.matmul(A_tilde,input)).to_sparse(),self.weight1) + self.bias1
116 | # # support1 = torch.sparse.mm(torch.sparse.FloatTensor.mul_(A_tilde,torch.sparse.mm(A_tilde,input)),self.weight1) + self.bias1
117 | # support2 = torch.sparse.mm(torch.sparse.FloatTensor.mul_(A_tilde,torch.sparse.FloatTensor.mul_(A_tilde,torch.sparse.mm(A_tilde,input))),self.weight2) + self.bias2
118 | #
119 | #
120 |
121 |
122 |
123 | # scattering 1
124 | # generata first scatter feature layer
125 | # input adj: a torch tensor
126 | adj = normalizem_tentor_mx(adj,I_n) #A \cdot D^(-1)
127 | adj_power = 0.5 * torch.sparse.FloatTensor.add(adj.to_sparse(),I_n) # the P, transfer adj to adj.to_sparse() saves a lots of time
128 | support3 = torch.mm(red_gene_sct(adj_power,input,order1),self.weight3)-\
129 | torch.mm(red_gene_sct(adj_power,input,2*order1),self.weight3)
130 | support3 = support3 + self.bias3
131 |
132 |
133 | support4 = torch.mm(red_gene_sct(adj_power,input,order2),self.weight4)-\
134 | torch.mm(red_gene_sct(adj_power,input,2*order2),self.weight4)
135 | support4 = support4 + self.bias4
136 |
137 | # support2 = torch.sparse.mm(torch.sparse.mm(A_tilde,torch.sparse.mm(A_tilde,torch.sparse.mm(A_tilde,input))),self.weight2) + self.bias2
138 | support_3hop = torch.cat((support0,support1,support2,support3,support4), 1)
139 |
140 | output_3hop = support_3hop
141 | return output_3hop
142 |
--------------------------------------------------------------------------------
/DBLP/README.md:
--------------------------------------------------------------------------------
1 | ```
2 | python DBLP_SCT.py
3 | ```
4 |
--------------------------------------------------------------------------------
/DBLP/layers.py:
--------------------------------------------------------------------------------
1 | import math
2 | import numpy as np
3 | import scipy.sparse as sp
4 | from scipy.sparse import csr_matrix
5 |
6 |
7 | import torch.nn.functional as F
8 | import torch
9 | from torch.nn.parameter import Parameter
10 | from torch.nn.modules.module import Module
11 | from utils_sct import sparse_mx_to_torch_sparse_tensor
12 | from utils_sct import normalize
13 |
14 | class GraphConvolution(Module):
15 | """
16 | Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
17 | """
18 | def __init__(self, in_features, out_features, bias=True):
19 | super(GraphConvolution, self).__init__()
20 | self.in_features = in_features
21 | self.out_features = out_features
22 | self.weight = Parameter(torch.FloatTensor(in_features, out_features))
23 | if bias:
24 | self.bias = Parameter(torch.FloatTensor(out_features))
25 | else:
26 | self.register_parameter('bias', None)
27 | self.reset_parameters()
28 | def reset_parameters(self):
29 | stdv = 1. / math.sqrt(self.weight.size(1))
30 | self.weight.data.uniform_(-stdv, stdv)
31 | if self.bias is not None:
32 | self.bias.data.uniform_(-stdv, stdv)
33 | def forward(self, input, adj):
34 | # adj is extracted from the graph structure
35 | support = torch.mm(input, self.weight)
36 | output = torch.spmm(adj, support)
37 | if self.bias is not None:
38 | return output + self.bias
39 | else:
40 | return output
41 | def __repr__(self):
42 | return self.__class__.__name__ + ' (' \
43 | + str(self.in_features) + ' -> ' \
44 | + str(self.out_features) + ')'
45 |
46 |
47 |
48 |
49 | class GC_withres(Module):
50 | """
51 | Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
52 | """
53 | def __init__(self, in_features, out_features,smooth,bias=True):
54 | super(GC_withres, self).__init__()
55 | self.in_features = in_features
56 | self.out_features = out_features
57 | self.smooth = smooth
58 | self.weight = Parameter(torch.FloatTensor(in_features, out_features))
59 | self.bias = Parameter(torch.FloatTensor(out_features))
60 | self.reset_parameters()
61 | def reset_parameters(self):
62 | stdv = 1. / math.sqrt(self.weight.size(1))
63 | self.weight.data.uniform_(-stdv, stdv)
64 | self.bias.data.uniform_(-stdv, stdv)
65 | def forward(self, input, adj):
66 | # adj is extracted from the graph structure
67 | support = torch.mm(input, self.weight)
68 | I_n = sp.eye(adj.shape[0])
69 | I_n = sparse_mx_to_torch_sparse_tensor(I_n).cuda()
70 | output = torch.spmm((self.smooth*adj+I_n)/(1+self.smooth), support) + self.bias
71 | return output
72 | def __repr__(self):
73 | return self.__class__.__name__ + ' (' \
74 | + str(self.in_features) + ' -> ' \
75 | + str(self.out_features) + ')'
76 |
77 |
78 |
79 |
80 |
--------------------------------------------------------------------------------
/DBLP/normalization.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy.sparse as sp
3 | import torch
4 |
5 | def aug_normalized_adjacency(adj):
6 | adj = adj + sp.eye(adj.shape[0])
7 | adj = sp.coo_matrix(adj)
8 | row_sum = np.array(adj.sum(1))
9 | d_inv_sqrt = np.power(row_sum, -0.5).flatten()
10 | d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
11 | d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
12 | return d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt).tocoo()
13 |
14 | def fetch_normalization(type):
15 | switcher = {
16 | 'AugNormAdj': aug_normalized_adjacency, # A' = (D + I)^-1/2 * ( A + I ) * (D + I)^-1/2
17 | }
18 | func = switcher.get(type, lambda: "Invalid normalization technique.")
19 | return func
20 |
21 | def row_normalize(mx):
22 | """Row-normalize sparse matrix"""
23 | rowsum = np.array(mx.sum(1))
24 | r_inv = np.power(rowsum, -1).flatten()
25 | r_inv[np.isinf(r_inv)] = 0.
26 | r_mat_inv = sp.diags(r_inv)
27 | mx = r_mat_inv.dot(mx)
28 | return mx
29 |
--------------------------------------------------------------------------------
/DBLP/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy.sparse as sp
3 | import torch
4 | import sys
5 | import pickle as pkl
6 | import networkx as nx
7 | from normalization import fetch_normalization, row_normalize
8 | from time import perf_counter
9 | def normalize_adjacency_matrix(A, I):
10 | """
11 | Creating a normalized adjacency matrix with self loops.
12 | :param A: Sparse adjacency matrix.
13 | :param I: Identity matrix.
14 | :return A_tile_hat: Normalized adjacency matrix.
15 | """
16 | A_tilde = A + I
17 | degrees = A_tilde.sum(axis=0)[0].tolist()
18 | D = sp.diags(degrees, [0])
19 | D = D.power(-0.5)
20 | A_tilde_hat = D.dot(A_tilde).dot(D)
21 | return A_tilde_hat
22 | def normalize(mx):
23 | """Row-normalize sparse matrix"""
24 | rowsum = np.array(mx.sum(1))
25 | r_inv = np.power(rowsum, -1).flatten()
26 | r_inv[np.isinf(r_inv)] = 0.
27 | r_mat_inv = sp.diags(r_inv)
28 | mx = r_mat_inv.dot(mx)
29 | return mx
30 |
31 | def normalizemx(mx):
32 | degrees = mx.sum(axis=0)[0].tolist()
33 | # print(degrees)
34 | D = sp.diags(degrees, [0])
35 | D = D.power(-1)
36 | mx = mx.dot(D)
37 | return mx
38 |
39 | def scattering1st(spmx,order):
40 | # print(type(spmx))
41 | I_n = sp.eye(spmx.shape[0])
42 | adj_sct = 0.5*(spmx+I_n)
43 | adj_power = adj_sct
44 | adj_power = sparse_mx_to_torch_sparse_tensor(adj_power)
45 | adj_sct = sparse_mx_to_torch_sparse_tensor(adj_sct)
46 | I_n = sparse_mx_to_torch_sparse_tensor(I_n)
47 | for i in range(order-1):
48 | adj_power = torch.spmm(adj_power.cuda(),adj_sct.cuda().to_dense())
49 | adj_int = torch.spmm((adj_power-I_n.cuda()),adj_power.cuda())
50 | # adj_int.data=abs(adj_int.data)
51 | #return -1*adj_int
52 | return adj_int
53 |
54 | def scattering2nd(m1,m2):
55 | _m2 = m2
56 | _m2.data=abs(_m2.data)
57 | m3 = torch.spmm(m1,_m2)
58 | # m3.data = abs(m3.data)
59 | # adj_power.data = abs(adj_power.data)
60 | return m3
61 |
62 | def parse_index_file(filename):
63 | """Parse index file."""
64 | index = []
65 | for line in open(filename):
66 | index.append(int(line.strip()))
67 | return index
68 |
69 | def preprocess_citation(adj, features, normalization="FirstOrderGCN"):
70 | adj_normalizer = fetch_normalization(normalization)
71 | adj = adj_normalizer(adj)
72 | features = row_normalize(features)
73 | return adj, features
74 |
75 | def sparse_mx_to_torch_sparse_tensor(sparse_mx):
76 | """Convert a scipy sparse matrix to a torch sparse tensor."""
77 | sparse_mx = sparse_mx.tocoo().astype(np.float32)
78 | indices = torch.from_numpy(
79 | np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
80 | values = torch.from_numpy(sparse_mx.data)
81 | shape = torch.Size(sparse_mx.shape)
82 | return torch.sparse.FloatTensor(indices, values, shape)
83 |
84 | def load_citation(dataset_str="cora", normalization="AugNormAdj", cuda=True):
85 | """
86 | Load Citation Networks Datasets.
87 | """
88 | names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
89 | objects = []
90 | for i in range(len(names)):
91 | with open("data/ind.{}.{}".format(dataset_str.lower(), names[i]), 'rb') as f:
92 | if sys.version_info > (3, 0):
93 | objects.append(pkl.load(f, encoding='latin1'))
94 | else:
95 | objects.append(pkl.load(f))
96 |
97 | x, y, tx, ty, allx, ally, graph = tuple(objects)
98 | test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
99 | test_idx_range = np.sort(test_idx_reorder)
100 |
101 | if dataset_str == 'citeseer':
102 | # Fix citeseer dataset (there are some isolated nodes in the graph)
103 | # Find isolated nodes, add them as zero-vecs into the right position
104 | test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
105 | tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
106 | tx_extended[test_idx_range-min(test_idx_range), :] = tx
107 | tx = tx_extended
108 | ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
109 | ty_extended[test_idx_range-min(test_idx_range), :] = ty
110 | ty = ty_extended
111 |
112 | features = sp.vstack((allx, tx)).tolil()
113 | features[test_idx_reorder, :] = features[test_idx_range, :]
114 | adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
115 | adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
116 | labels = np.vstack((ally, ty))
117 | labels[test_idx_reorder, :] = labels[test_idx_range, :]
118 |
119 | idx_test = test_idx_range.tolist()
120 | idx_train = range(len(y))
121 | idx_val = range(len(y), len(y)+500)
122 |
123 | # adj, features = preprocess_citation(adj, features, normalization)
124 |
125 | # porting to pytorch
126 | #features = torch.FloatTensor(np.array(features.todense())).float()
127 | labels = torch.LongTensor(labels)
128 | labels = torch.max(labels, dim=1)[1]
129 | #adj = sparse_mx_to_torch_sparse_tensor(adj).float()
130 | idx_train = torch.LongTensor(idx_train)
131 | idx_val = torch.LongTensor(idx_val)
132 | idx_test = torch.LongTensor(idx_test)
133 |
134 | features = normalize(features)
135 | A_tilde = normalize_adjacency_matrix(adj,sp.eye(adj.shape[0]))
136 | adj = normalizemx(adj)
137 | features = torch.FloatTensor(np.array(features.todense()))
138 |
139 | print('Loading')
140 | adj_sct1 = scattering1st(adj,2)
141 | adj_sct2 = scattering1st(adj,4)
142 | adj_sct4 = scattering1st(adj,8)
143 | adj_sct8=scattering2nd(adj_sct4,adj_sct2)
144 | adj_sct16=scattering2nd(adj_sct2,adj_sct1)
145 |
146 | adj = sparse_mx_to_torch_sparse_tensor(adj)
147 | A_tilde = sparse_mx_to_torch_sparse_tensor(A_tilde)
148 | return adj,A_tilde,adj_sct1,adj_sct2,adj_sct4,adj_sct8,adj_sct16, features, labels, idx_train, idx_val, idx_test
149 |
150 | def sgc_precompute(features, adj, degree):
151 | t = perf_counter()
152 | for i in range(degree):
153 | features = torch.spmm(adj, features)
154 | precompute_time = perf_counter()-t
155 | return features, precompute_time
156 |
157 | def set_seed(seed, cuda):
158 | np.random.seed(seed)
159 | torch.manual_seed(seed)
160 | if cuda: torch.cuda.manual_seed(seed)
161 |
162 | def loadRedditFromNPZ(dataset_dir):
163 | adj = sp.load_npz(dataset_dir+"reddit_adj.npz")
164 | data = np.load(dataset_dir+"reddit.npz")
165 |
166 | return adj, data['feats'], data['y_train'], data['y_val'], data['y_test'], data['train_index'], data['val_index'], data['test_index']
167 | def accuracy(output, labels):
168 | preds = output.max(1)[1].type_as(labels)
169 | correct = preds.eq(labels).double()
170 | correct = correct.sum()
171 | return correct / len(labels)
172 |
--------------------------------------------------------------------------------
/DBLP/utils_sct.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy.sparse as sp
3 | import torch
4 |
5 | def normalize(mx):
6 | """Row-normalize sparse matrix"""
7 | rowsum = np.array(mx.sum(1))
8 | r_inv = np.power(rowsum, -1).flatten()
9 | r_inv[np.isinf(r_inv)] = 0.
10 | r_mat_inv = sp.diags(r_inv)
11 | mx = r_mat_inv.dot(mx)
12 | return mx
13 |
14 |
15 |
16 | def sparse_mx_to_torch_sparse_tensor(sparse_mx):
17 | """Convert a scipy sparse matrix to a torch sparse tensor."""
18 | sparse_mx = sparse_mx.tocoo().astype(np.float32)
19 | indices = torch.from_numpy(
20 | np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
21 | values = torch.from_numpy(sparse_mx.data)
22 | shape = torch.Size(sparse_mx.shape)
23 | return torch.sparse.FloatTensor(indices, values, shape)
24 |
--------------------------------------------------------------------------------
/Figures/h116h251.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/Figures/h116h251.png
--------------------------------------------------------------------------------
/Figures/readme:
--------------------------------------------------------------------------------
1 | Figures
2 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Scattering GCN
2 |
3 |
4 |
5 | The followup work is here:
6 | (attention-based architecture to produce adaptive node representations)
7 |
8 | https://arxiv.org/abs/2010.15010 to be appeared at ICASSP
9 | https://github.com/dms-net/Attention-based-Scattering
10 |
11 | ```
12 | python train.py
13 | ```
14 | ```
15 | @article{min2020scattering,
16 | title={Scattering GCN: Overcoming Oversmoothness in Graph Convolutional Networks},
17 | author={Min, Yimeng and Wenkel, Frederik and Wolf, Guy},
18 | journal={arXiv preprint arXiv:2003.08414},
19 | year={2020}
20 | }
21 | ```
22 | During the training, we found that we can assign different widths of channels and achieve similar performace (sometimes even seems better):
23 | e.g. here is the training history of hid1:16 hid2:51 dropout:0.92
24 | ```
25 | python train.py --hid1 16 --hid2 51 --dropout 0.92
26 | ```
27 | 
28 |
29 | Where the highest validation accuracy@Epoch=175 corresponds to a test accuracy of 84.2.
30 | During the grid search, we search the widths of scattering channels, dropout and the smooth parameters for the graph res layer. Tuning the width of the three los-pass ones may also result in better performance.
31 | Some very different widths: e.g.(python train.py --hid1 5 --hid2 50 --smoo 0.6) can also have relatively good performance on Cora.
32 | The scatteringGCN relies on handcrafted design, requiring careful selection of frequency bands.
33 | We recommend using the scattering attention based model for learning node-wise weights for combining multiple scattering and GCN channels, though may hurt the performance.
34 |
35 | Another thing we want to re-emphasize is that the activation value in this paper is ||^q, we don't use relu/tanh, etc.
36 |
37 |
38 | ## Requirement:
39 | pytorch\
40 | cuda\
41 | scipy: for the sparse matrix operation
42 |
43 | ## Reference
44 | https://github.com/tkipf/pygcn \
45 | https://github.com/PetarV-/GAT \
46 | https://github.com/liqimai/Efficient-SSL
47 |
48 |
--------------------------------------------------------------------------------
/data/ind.citeseer.allx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.citeseer.allx
--------------------------------------------------------------------------------
/data/ind.citeseer.ally:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.citeseer.ally
--------------------------------------------------------------------------------
/data/ind.citeseer.graph:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.citeseer.graph
--------------------------------------------------------------------------------
/data/ind.citeseer.test.index:
--------------------------------------------------------------------------------
1 | 2488
2 | 2644
3 | 3261
4 | 2804
5 | 3176
6 | 2432
7 | 3310
8 | 2410
9 | 2812
10 | 2520
11 | 2994
12 | 3282
13 | 2680
14 | 2848
15 | 2670
16 | 3005
17 | 2977
18 | 2592
19 | 2967
20 | 2461
21 | 3184
22 | 2852
23 | 2768
24 | 2905
25 | 2851
26 | 3129
27 | 3164
28 | 2438
29 | 2793
30 | 2763
31 | 2528
32 | 2954
33 | 2347
34 | 2640
35 | 3265
36 | 2874
37 | 2446
38 | 2856
39 | 3149
40 | 2374
41 | 3097
42 | 3301
43 | 2664
44 | 2418
45 | 2655
46 | 2464
47 | 2596
48 | 3262
49 | 3278
50 | 2320
51 | 2612
52 | 2614
53 | 2550
54 | 2626
55 | 2772
56 | 3007
57 | 2733
58 | 2516
59 | 2476
60 | 2798
61 | 2561
62 | 2839
63 | 2685
64 | 2391
65 | 2705
66 | 3098
67 | 2754
68 | 3251
69 | 2767
70 | 2630
71 | 2727
72 | 2513
73 | 2701
74 | 3264
75 | 2792
76 | 2821
77 | 3260
78 | 2462
79 | 3307
80 | 2639
81 | 2900
82 | 3060
83 | 2672
84 | 3116
85 | 2731
86 | 3316
87 | 2386
88 | 2425
89 | 2518
90 | 3151
91 | 2586
92 | 2797
93 | 2479
94 | 3117
95 | 2580
96 | 3182
97 | 2459
98 | 2508
99 | 3052
100 | 3230
101 | 3215
102 | 2803
103 | 2969
104 | 2562
105 | 2398
106 | 3325
107 | 2343
108 | 3030
109 | 2414
110 | 2776
111 | 2383
112 | 3173
113 | 2850
114 | 2499
115 | 3312
116 | 2648
117 | 2784
118 | 2898
119 | 3056
120 | 2484
121 | 3179
122 | 3132
123 | 2577
124 | 2563
125 | 2867
126 | 3317
127 | 2355
128 | 3207
129 | 3178
130 | 2968
131 | 3319
132 | 2358
133 | 2764
134 | 3001
135 | 2683
136 | 3271
137 | 2321
138 | 2567
139 | 2502
140 | 3246
141 | 2715
142 | 3066
143 | 2390
144 | 2381
145 | 3162
146 | 2741
147 | 2498
148 | 2790
149 | 3038
150 | 3321
151 | 2481
152 | 3050
153 | 3161
154 | 3122
155 | 2801
156 | 2957
157 | 3177
158 | 2965
159 | 2621
160 | 3208
161 | 2921
162 | 2802
163 | 2357
164 | 2677
165 | 2519
166 | 2860
167 | 2696
168 | 2368
169 | 3241
170 | 2858
171 | 2419
172 | 2762
173 | 2875
174 | 3222
175 | 3064
176 | 2827
177 | 3044
178 | 2471
179 | 3062
180 | 2982
181 | 2736
182 | 2322
183 | 2709
184 | 2766
185 | 2424
186 | 2602
187 | 2970
188 | 2675
189 | 3299
190 | 2554
191 | 2964
192 | 2597
193 | 2753
194 | 2979
195 | 2523
196 | 2912
197 | 2896
198 | 2317
199 | 3167
200 | 2813
201 | 2482
202 | 2557
203 | 3043
204 | 3244
205 | 2985
206 | 2460
207 | 2363
208 | 3272
209 | 3045
210 | 3192
211 | 2453
212 | 2656
213 | 2834
214 | 2443
215 | 3202
216 | 2926
217 | 2711
218 | 2633
219 | 2384
220 | 2752
221 | 3285
222 | 2817
223 | 2483
224 | 2919
225 | 2924
226 | 2661
227 | 2698
228 | 2361
229 | 2662
230 | 2819
231 | 3143
232 | 2316
233 | 3196
234 | 2739
235 | 2345
236 | 2578
237 | 2822
238 | 3229
239 | 2908
240 | 2917
241 | 2692
242 | 3200
243 | 2324
244 | 2522
245 | 3322
246 | 2697
247 | 3163
248 | 3093
249 | 3233
250 | 2774
251 | 2371
252 | 2835
253 | 2652
254 | 2539
255 | 2843
256 | 3231
257 | 2976
258 | 2429
259 | 2367
260 | 3144
261 | 2564
262 | 3283
263 | 3217
264 | 3035
265 | 2962
266 | 2433
267 | 2415
268 | 2387
269 | 3021
270 | 2595
271 | 2517
272 | 2468
273 | 3061
274 | 2673
275 | 2348
276 | 3027
277 | 2467
278 | 3318
279 | 2959
280 | 3273
281 | 2392
282 | 2779
283 | 2678
284 | 3004
285 | 2634
286 | 2974
287 | 3198
288 | 2342
289 | 2376
290 | 3249
291 | 2868
292 | 2952
293 | 2710
294 | 2838
295 | 2335
296 | 2524
297 | 2650
298 | 3186
299 | 2743
300 | 2545
301 | 2841
302 | 2515
303 | 2505
304 | 3181
305 | 2945
306 | 2738
307 | 2933
308 | 3303
309 | 2611
310 | 3090
311 | 2328
312 | 3010
313 | 3016
314 | 2504
315 | 2936
316 | 3266
317 | 3253
318 | 2840
319 | 3034
320 | 2581
321 | 2344
322 | 2452
323 | 2654
324 | 3199
325 | 3137
326 | 2514
327 | 2394
328 | 2544
329 | 2641
330 | 2613
331 | 2618
332 | 2558
333 | 2593
334 | 2532
335 | 2512
336 | 2975
337 | 3267
338 | 2566
339 | 2951
340 | 3300
341 | 2869
342 | 2629
343 | 2747
344 | 3055
345 | 2831
346 | 3105
347 | 3168
348 | 3100
349 | 2431
350 | 2828
351 | 2684
352 | 3269
353 | 2910
354 | 2865
355 | 2693
356 | 2884
357 | 3228
358 | 2783
359 | 3247
360 | 2770
361 | 3157
362 | 2421
363 | 2382
364 | 2331
365 | 3203
366 | 3240
367 | 2351
368 | 3114
369 | 2986
370 | 2688
371 | 2439
372 | 2996
373 | 3079
374 | 3103
375 | 3296
376 | 2349
377 | 2372
378 | 3096
379 | 2422
380 | 2551
381 | 3069
382 | 2737
383 | 3084
384 | 3304
385 | 3022
386 | 2542
387 | 3204
388 | 2949
389 | 2318
390 | 2450
391 | 3140
392 | 2734
393 | 2881
394 | 2576
395 | 3054
396 | 3089
397 | 3125
398 | 2761
399 | 3136
400 | 3111
401 | 2427
402 | 2466
403 | 3101
404 | 3104
405 | 3259
406 | 2534
407 | 2961
408 | 3191
409 | 3000
410 | 3036
411 | 2356
412 | 2800
413 | 3155
414 | 3224
415 | 2646
416 | 2735
417 | 3020
418 | 2866
419 | 2426
420 | 2448
421 | 3226
422 | 3219
423 | 2749
424 | 3183
425 | 2906
426 | 2360
427 | 2440
428 | 2946
429 | 2313
430 | 2859
431 | 2340
432 | 3008
433 | 2719
434 | 3058
435 | 2653
436 | 3023
437 | 2888
438 | 3243
439 | 2913
440 | 3242
441 | 3067
442 | 2409
443 | 3227
444 | 2380
445 | 2353
446 | 2686
447 | 2971
448 | 2847
449 | 2947
450 | 2857
451 | 3263
452 | 3218
453 | 2861
454 | 3323
455 | 2635
456 | 2966
457 | 2604
458 | 2456
459 | 2832
460 | 2694
461 | 3245
462 | 3119
463 | 2942
464 | 3153
465 | 2894
466 | 2555
467 | 3128
468 | 2703
469 | 2323
470 | 2631
471 | 2732
472 | 2699
473 | 2314
474 | 2590
475 | 3127
476 | 2891
477 | 2873
478 | 2814
479 | 2326
480 | 3026
481 | 3288
482 | 3095
483 | 2706
484 | 2457
485 | 2377
486 | 2620
487 | 2526
488 | 2674
489 | 3190
490 | 2923
491 | 3032
492 | 2334
493 | 3254
494 | 2991
495 | 3277
496 | 2973
497 | 2599
498 | 2658
499 | 2636
500 | 2826
501 | 3148
502 | 2958
503 | 3258
504 | 2990
505 | 3180
506 | 2538
507 | 2748
508 | 2625
509 | 2565
510 | 3011
511 | 3057
512 | 2354
513 | 3158
514 | 2622
515 | 3308
516 | 2983
517 | 2560
518 | 3169
519 | 3059
520 | 2480
521 | 3194
522 | 3291
523 | 3216
524 | 2643
525 | 3172
526 | 2352
527 | 2724
528 | 2485
529 | 2411
530 | 2948
531 | 2445
532 | 2362
533 | 2668
534 | 3275
535 | 3107
536 | 2496
537 | 2529
538 | 2700
539 | 2541
540 | 3028
541 | 2879
542 | 2660
543 | 3324
544 | 2755
545 | 2436
546 | 3048
547 | 2623
548 | 2920
549 | 3040
550 | 2568
551 | 3221
552 | 3003
553 | 3295
554 | 2473
555 | 3232
556 | 3213
557 | 2823
558 | 2897
559 | 2573
560 | 2645
561 | 3018
562 | 3326
563 | 2795
564 | 2915
565 | 3109
566 | 3086
567 | 2463
568 | 3118
569 | 2671
570 | 2909
571 | 2393
572 | 2325
573 | 3029
574 | 2972
575 | 3110
576 | 2870
577 | 3284
578 | 2816
579 | 2647
580 | 2667
581 | 2955
582 | 2333
583 | 2960
584 | 2864
585 | 2893
586 | 2458
587 | 2441
588 | 2359
589 | 2327
590 | 3256
591 | 3099
592 | 3073
593 | 3138
594 | 2511
595 | 2666
596 | 2548
597 | 2364
598 | 2451
599 | 2911
600 | 3237
601 | 3206
602 | 3080
603 | 3279
604 | 2934
605 | 2981
606 | 2878
607 | 3130
608 | 2830
609 | 3091
610 | 2659
611 | 2449
612 | 3152
613 | 2413
614 | 2722
615 | 2796
616 | 3220
617 | 2751
618 | 2935
619 | 3238
620 | 2491
621 | 2730
622 | 2842
623 | 3223
624 | 2492
625 | 3074
626 | 3094
627 | 2833
628 | 2521
629 | 2883
630 | 3315
631 | 2845
632 | 2907
633 | 3083
634 | 2572
635 | 3092
636 | 2903
637 | 2918
638 | 3039
639 | 3286
640 | 2587
641 | 3068
642 | 2338
643 | 3166
644 | 3134
645 | 2455
646 | 2497
647 | 2992
648 | 2775
649 | 2681
650 | 2430
651 | 2932
652 | 2931
653 | 2434
654 | 3154
655 | 3046
656 | 2598
657 | 2366
658 | 3015
659 | 3147
660 | 2944
661 | 2582
662 | 3274
663 | 2987
664 | 2642
665 | 2547
666 | 2420
667 | 2930
668 | 2750
669 | 2417
670 | 2808
671 | 3141
672 | 2997
673 | 2995
674 | 2584
675 | 2312
676 | 3033
677 | 3070
678 | 3065
679 | 2509
680 | 3314
681 | 2396
682 | 2543
683 | 2423
684 | 3170
685 | 2389
686 | 3289
687 | 2728
688 | 2540
689 | 2437
690 | 2486
691 | 2895
692 | 3017
693 | 2853
694 | 2406
695 | 2346
696 | 2877
697 | 2472
698 | 3210
699 | 2637
700 | 2927
701 | 2789
702 | 2330
703 | 3088
704 | 3102
705 | 2616
706 | 3081
707 | 2902
708 | 3205
709 | 3320
710 | 3165
711 | 2984
712 | 3185
713 | 2707
714 | 3255
715 | 2583
716 | 2773
717 | 2742
718 | 3024
719 | 2402
720 | 2718
721 | 2882
722 | 2575
723 | 3281
724 | 2786
725 | 2855
726 | 3014
727 | 2401
728 | 2535
729 | 2687
730 | 2495
731 | 3113
732 | 2609
733 | 2559
734 | 2665
735 | 2530
736 | 3293
737 | 2399
738 | 2605
739 | 2690
740 | 3133
741 | 2799
742 | 2533
743 | 2695
744 | 2713
745 | 2886
746 | 2691
747 | 2549
748 | 3077
749 | 3002
750 | 3049
751 | 3051
752 | 3087
753 | 2444
754 | 3085
755 | 3135
756 | 2702
757 | 3211
758 | 3108
759 | 2501
760 | 2769
761 | 3290
762 | 2465
763 | 3025
764 | 3019
765 | 2385
766 | 2940
767 | 2657
768 | 2610
769 | 2525
770 | 2941
771 | 3078
772 | 2341
773 | 2916
774 | 2956
775 | 2375
776 | 2880
777 | 3009
778 | 2780
779 | 2370
780 | 2925
781 | 2332
782 | 3146
783 | 2315
784 | 2809
785 | 3145
786 | 3106
787 | 2782
788 | 2760
789 | 2493
790 | 2765
791 | 2556
792 | 2890
793 | 2400
794 | 2339
795 | 3201
796 | 2818
797 | 3248
798 | 3280
799 | 2570
800 | 2569
801 | 2937
802 | 3174
803 | 2836
804 | 2708
805 | 2820
806 | 3195
807 | 2617
808 | 3197
809 | 2319
810 | 2744
811 | 2615
812 | 2825
813 | 2603
814 | 2914
815 | 2531
816 | 3193
817 | 2624
818 | 2365
819 | 2810
820 | 3239
821 | 3159
822 | 2537
823 | 2844
824 | 2758
825 | 2938
826 | 3037
827 | 2503
828 | 3297
829 | 2885
830 | 2608
831 | 2494
832 | 2712
833 | 2408
834 | 2901
835 | 2704
836 | 2536
837 | 2373
838 | 2478
839 | 2723
840 | 3076
841 | 2627
842 | 2369
843 | 2669
844 | 3006
845 | 2628
846 | 2788
847 | 3276
848 | 2435
849 | 3139
850 | 3235
851 | 2527
852 | 2571
853 | 2815
854 | 2442
855 | 2892
856 | 2978
857 | 2746
858 | 3150
859 | 2574
860 | 2725
861 | 3188
862 | 2601
863 | 2378
864 | 3075
865 | 2632
866 | 2794
867 | 3270
868 | 3071
869 | 2506
870 | 3126
871 | 3236
872 | 3257
873 | 2824
874 | 2989
875 | 2950
876 | 2428
877 | 2405
878 | 3156
879 | 2447
880 | 2787
881 | 2805
882 | 2720
883 | 2403
884 | 2811
885 | 2329
886 | 2474
887 | 2785
888 | 2350
889 | 2507
890 | 2416
891 | 3112
892 | 2475
893 | 2876
894 | 2585
895 | 2487
896 | 3072
897 | 3082
898 | 2943
899 | 2757
900 | 2388
901 | 2600
902 | 3294
903 | 2756
904 | 3142
905 | 3041
906 | 2594
907 | 2998
908 | 3047
909 | 2379
910 | 2980
911 | 2454
912 | 2862
913 | 3175
914 | 2588
915 | 3031
916 | 3012
917 | 2889
918 | 2500
919 | 2791
920 | 2854
921 | 2619
922 | 2395
923 | 2807
924 | 2740
925 | 2412
926 | 3131
927 | 3013
928 | 2939
929 | 2651
930 | 2490
931 | 2988
932 | 2863
933 | 3225
934 | 2745
935 | 2714
936 | 3160
937 | 3124
938 | 2849
939 | 2676
940 | 2872
941 | 3287
942 | 3189
943 | 2716
944 | 3115
945 | 2928
946 | 2871
947 | 2591
948 | 2717
949 | 2546
950 | 2777
951 | 3298
952 | 2397
953 | 3187
954 | 2726
955 | 2336
956 | 3268
957 | 2477
958 | 2904
959 | 2846
960 | 3121
961 | 2899
962 | 2510
963 | 2806
964 | 2963
965 | 3313
966 | 2679
967 | 3302
968 | 2663
969 | 3053
970 | 2469
971 | 2999
972 | 3311
973 | 2470
974 | 2638
975 | 3120
976 | 3171
977 | 2689
978 | 2922
979 | 2607
980 | 2721
981 | 2993
982 | 2887
983 | 2837
984 | 2929
985 | 2829
986 | 3234
987 | 2649
988 | 2337
989 | 2759
990 | 2778
991 | 2771
992 | 2404
993 | 2589
994 | 3123
995 | 3209
996 | 2729
997 | 3252
998 | 2606
999 | 2579
1000 | 2552
1001 |
--------------------------------------------------------------------------------
/data/ind.citeseer.tx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.citeseer.tx
--------------------------------------------------------------------------------
/data/ind.citeseer.ty:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.citeseer.ty
--------------------------------------------------------------------------------
/data/ind.citeseer.x:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.citeseer.x
--------------------------------------------------------------------------------
/data/ind.citeseer.y:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.citeseer.y
--------------------------------------------------------------------------------
/data/ind.cora.allx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.cora.allx
--------------------------------------------------------------------------------
/data/ind.cora.ally:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.cora.ally
--------------------------------------------------------------------------------
/data/ind.cora.graph:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.cora.graph
--------------------------------------------------------------------------------
/data/ind.cora.test.index:
--------------------------------------------------------------------------------
1 | 2692
2 | 2532
3 | 2050
4 | 1715
5 | 2362
6 | 2609
7 | 2622
8 | 1975
9 | 2081
10 | 1767
11 | 2263
12 | 1725
13 | 2588
14 | 2259
15 | 2357
16 | 1998
17 | 2574
18 | 2179
19 | 2291
20 | 2382
21 | 1812
22 | 1751
23 | 2422
24 | 1937
25 | 2631
26 | 2510
27 | 2378
28 | 2589
29 | 2345
30 | 1943
31 | 1850
32 | 2298
33 | 1825
34 | 2035
35 | 2507
36 | 2313
37 | 1906
38 | 1797
39 | 2023
40 | 2159
41 | 2495
42 | 1886
43 | 2122
44 | 2369
45 | 2461
46 | 1925
47 | 2565
48 | 1858
49 | 2234
50 | 2000
51 | 1846
52 | 2318
53 | 1723
54 | 2559
55 | 2258
56 | 1763
57 | 1991
58 | 1922
59 | 2003
60 | 2662
61 | 2250
62 | 2064
63 | 2529
64 | 1888
65 | 2499
66 | 2454
67 | 2320
68 | 2287
69 | 2203
70 | 2018
71 | 2002
72 | 2632
73 | 2554
74 | 2314
75 | 2537
76 | 1760
77 | 2088
78 | 2086
79 | 2218
80 | 2605
81 | 1953
82 | 2403
83 | 1920
84 | 2015
85 | 2335
86 | 2535
87 | 1837
88 | 2009
89 | 1905
90 | 2636
91 | 1942
92 | 2193
93 | 2576
94 | 2373
95 | 1873
96 | 2463
97 | 2509
98 | 1954
99 | 2656
100 | 2455
101 | 2494
102 | 2295
103 | 2114
104 | 2561
105 | 2176
106 | 2275
107 | 2635
108 | 2442
109 | 2704
110 | 2127
111 | 2085
112 | 2214
113 | 2487
114 | 1739
115 | 2543
116 | 1783
117 | 2485
118 | 2262
119 | 2472
120 | 2326
121 | 1738
122 | 2170
123 | 2100
124 | 2384
125 | 2152
126 | 2647
127 | 2693
128 | 2376
129 | 1775
130 | 1726
131 | 2476
132 | 2195
133 | 1773
134 | 1793
135 | 2194
136 | 2581
137 | 1854
138 | 2524
139 | 1945
140 | 1781
141 | 1987
142 | 2599
143 | 1744
144 | 2225
145 | 2300
146 | 1928
147 | 2042
148 | 2202
149 | 1958
150 | 1816
151 | 1916
152 | 2679
153 | 2190
154 | 1733
155 | 2034
156 | 2643
157 | 2177
158 | 1883
159 | 1917
160 | 1996
161 | 2491
162 | 2268
163 | 2231
164 | 2471
165 | 1919
166 | 1909
167 | 2012
168 | 2522
169 | 1865
170 | 2466
171 | 2469
172 | 2087
173 | 2584
174 | 2563
175 | 1924
176 | 2143
177 | 1736
178 | 1966
179 | 2533
180 | 2490
181 | 2630
182 | 1973
183 | 2568
184 | 1978
185 | 2664
186 | 2633
187 | 2312
188 | 2178
189 | 1754
190 | 2307
191 | 2480
192 | 1960
193 | 1742
194 | 1962
195 | 2160
196 | 2070
197 | 2553
198 | 2433
199 | 1768
200 | 2659
201 | 2379
202 | 2271
203 | 1776
204 | 2153
205 | 1877
206 | 2027
207 | 2028
208 | 2155
209 | 2196
210 | 2483
211 | 2026
212 | 2158
213 | 2407
214 | 1821
215 | 2131
216 | 2676
217 | 2277
218 | 2489
219 | 2424
220 | 1963
221 | 1808
222 | 1859
223 | 2597
224 | 2548
225 | 2368
226 | 1817
227 | 2405
228 | 2413
229 | 2603
230 | 2350
231 | 2118
232 | 2329
233 | 1969
234 | 2577
235 | 2475
236 | 2467
237 | 2425
238 | 1769
239 | 2092
240 | 2044
241 | 2586
242 | 2608
243 | 1983
244 | 2109
245 | 2649
246 | 1964
247 | 2144
248 | 1902
249 | 2411
250 | 2508
251 | 2360
252 | 1721
253 | 2005
254 | 2014
255 | 2308
256 | 2646
257 | 1949
258 | 1830
259 | 2212
260 | 2596
261 | 1832
262 | 1735
263 | 1866
264 | 2695
265 | 1941
266 | 2546
267 | 2498
268 | 2686
269 | 2665
270 | 1784
271 | 2613
272 | 1970
273 | 2021
274 | 2211
275 | 2516
276 | 2185
277 | 2479
278 | 2699
279 | 2150
280 | 1990
281 | 2063
282 | 2075
283 | 1979
284 | 2094
285 | 1787
286 | 2571
287 | 2690
288 | 1926
289 | 2341
290 | 2566
291 | 1957
292 | 1709
293 | 1955
294 | 2570
295 | 2387
296 | 1811
297 | 2025
298 | 2447
299 | 2696
300 | 2052
301 | 2366
302 | 1857
303 | 2273
304 | 2245
305 | 2672
306 | 2133
307 | 2421
308 | 1929
309 | 2125
310 | 2319
311 | 2641
312 | 2167
313 | 2418
314 | 1765
315 | 1761
316 | 1828
317 | 2188
318 | 1972
319 | 1997
320 | 2419
321 | 2289
322 | 2296
323 | 2587
324 | 2051
325 | 2440
326 | 2053
327 | 2191
328 | 1923
329 | 2164
330 | 1861
331 | 2339
332 | 2333
333 | 2523
334 | 2670
335 | 2121
336 | 1921
337 | 1724
338 | 2253
339 | 2374
340 | 1940
341 | 2545
342 | 2301
343 | 2244
344 | 2156
345 | 1849
346 | 2551
347 | 2011
348 | 2279
349 | 2572
350 | 1757
351 | 2400
352 | 2569
353 | 2072
354 | 2526
355 | 2173
356 | 2069
357 | 2036
358 | 1819
359 | 1734
360 | 1880
361 | 2137
362 | 2408
363 | 2226
364 | 2604
365 | 1771
366 | 2698
367 | 2187
368 | 2060
369 | 1756
370 | 2201
371 | 2066
372 | 2439
373 | 1844
374 | 1772
375 | 2383
376 | 2398
377 | 1708
378 | 1992
379 | 1959
380 | 1794
381 | 2426
382 | 2702
383 | 2444
384 | 1944
385 | 1829
386 | 2660
387 | 2497
388 | 2607
389 | 2343
390 | 1730
391 | 2624
392 | 1790
393 | 1935
394 | 1967
395 | 2401
396 | 2255
397 | 2355
398 | 2348
399 | 1931
400 | 2183
401 | 2161
402 | 2701
403 | 1948
404 | 2501
405 | 2192
406 | 2404
407 | 2209
408 | 2331
409 | 1810
410 | 2363
411 | 2334
412 | 1887
413 | 2393
414 | 2557
415 | 1719
416 | 1732
417 | 1986
418 | 2037
419 | 2056
420 | 1867
421 | 2126
422 | 1932
423 | 2117
424 | 1807
425 | 1801
426 | 1743
427 | 2041
428 | 1843
429 | 2388
430 | 2221
431 | 1833
432 | 2677
433 | 1778
434 | 2661
435 | 2306
436 | 2394
437 | 2106
438 | 2430
439 | 2371
440 | 2606
441 | 2353
442 | 2269
443 | 2317
444 | 2645
445 | 2372
446 | 2550
447 | 2043
448 | 1968
449 | 2165
450 | 2310
451 | 1985
452 | 2446
453 | 1982
454 | 2377
455 | 2207
456 | 1818
457 | 1913
458 | 1766
459 | 1722
460 | 1894
461 | 2020
462 | 1881
463 | 2621
464 | 2409
465 | 2261
466 | 2458
467 | 2096
468 | 1712
469 | 2594
470 | 2293
471 | 2048
472 | 2359
473 | 1839
474 | 2392
475 | 2254
476 | 1911
477 | 2101
478 | 2367
479 | 1889
480 | 1753
481 | 2555
482 | 2246
483 | 2264
484 | 2010
485 | 2336
486 | 2651
487 | 2017
488 | 2140
489 | 1842
490 | 2019
491 | 1890
492 | 2525
493 | 2134
494 | 2492
495 | 2652
496 | 2040
497 | 2145
498 | 2575
499 | 2166
500 | 1999
501 | 2434
502 | 1711
503 | 2276
504 | 2450
505 | 2389
506 | 2669
507 | 2595
508 | 1814
509 | 2039
510 | 2502
511 | 1896
512 | 2168
513 | 2344
514 | 2637
515 | 2031
516 | 1977
517 | 2380
518 | 1936
519 | 2047
520 | 2460
521 | 2102
522 | 1745
523 | 2650
524 | 2046
525 | 2514
526 | 1980
527 | 2352
528 | 2113
529 | 1713
530 | 2058
531 | 2558
532 | 1718
533 | 1864
534 | 1876
535 | 2338
536 | 1879
537 | 1891
538 | 2186
539 | 2451
540 | 2181
541 | 2638
542 | 2644
543 | 2103
544 | 2591
545 | 2266
546 | 2468
547 | 1869
548 | 2582
549 | 2674
550 | 2361
551 | 2462
552 | 1748
553 | 2215
554 | 2615
555 | 2236
556 | 2248
557 | 2493
558 | 2342
559 | 2449
560 | 2274
561 | 1824
562 | 1852
563 | 1870
564 | 2441
565 | 2356
566 | 1835
567 | 2694
568 | 2602
569 | 2685
570 | 1893
571 | 2544
572 | 2536
573 | 1994
574 | 1853
575 | 1838
576 | 1786
577 | 1930
578 | 2539
579 | 1892
580 | 2265
581 | 2618
582 | 2486
583 | 2583
584 | 2061
585 | 1796
586 | 1806
587 | 2084
588 | 1933
589 | 2095
590 | 2136
591 | 2078
592 | 1884
593 | 2438
594 | 2286
595 | 2138
596 | 1750
597 | 2184
598 | 1799
599 | 2278
600 | 2410
601 | 2642
602 | 2435
603 | 1956
604 | 2399
605 | 1774
606 | 2129
607 | 1898
608 | 1823
609 | 1938
610 | 2299
611 | 1862
612 | 2420
613 | 2673
614 | 1984
615 | 2204
616 | 1717
617 | 2074
618 | 2213
619 | 2436
620 | 2297
621 | 2592
622 | 2667
623 | 2703
624 | 2511
625 | 1779
626 | 1782
627 | 2625
628 | 2365
629 | 2315
630 | 2381
631 | 1788
632 | 1714
633 | 2302
634 | 1927
635 | 2325
636 | 2506
637 | 2169
638 | 2328
639 | 2629
640 | 2128
641 | 2655
642 | 2282
643 | 2073
644 | 2395
645 | 2247
646 | 2521
647 | 2260
648 | 1868
649 | 1988
650 | 2324
651 | 2705
652 | 2541
653 | 1731
654 | 2681
655 | 2707
656 | 2465
657 | 1785
658 | 2149
659 | 2045
660 | 2505
661 | 2611
662 | 2217
663 | 2180
664 | 1904
665 | 2453
666 | 2484
667 | 1871
668 | 2309
669 | 2349
670 | 2482
671 | 2004
672 | 1965
673 | 2406
674 | 2162
675 | 1805
676 | 2654
677 | 2007
678 | 1947
679 | 1981
680 | 2112
681 | 2141
682 | 1720
683 | 1758
684 | 2080
685 | 2330
686 | 2030
687 | 2432
688 | 2089
689 | 2547
690 | 1820
691 | 1815
692 | 2675
693 | 1840
694 | 2658
695 | 2370
696 | 2251
697 | 1908
698 | 2029
699 | 2068
700 | 2513
701 | 2549
702 | 2267
703 | 2580
704 | 2327
705 | 2351
706 | 2111
707 | 2022
708 | 2321
709 | 2614
710 | 2252
711 | 2104
712 | 1822
713 | 2552
714 | 2243
715 | 1798
716 | 2396
717 | 2663
718 | 2564
719 | 2148
720 | 2562
721 | 2684
722 | 2001
723 | 2151
724 | 2706
725 | 2240
726 | 2474
727 | 2303
728 | 2634
729 | 2680
730 | 2055
731 | 2090
732 | 2503
733 | 2347
734 | 2402
735 | 2238
736 | 1950
737 | 2054
738 | 2016
739 | 1872
740 | 2233
741 | 1710
742 | 2032
743 | 2540
744 | 2628
745 | 1795
746 | 2616
747 | 1903
748 | 2531
749 | 2567
750 | 1946
751 | 1897
752 | 2222
753 | 2227
754 | 2627
755 | 1856
756 | 2464
757 | 2241
758 | 2481
759 | 2130
760 | 2311
761 | 2083
762 | 2223
763 | 2284
764 | 2235
765 | 2097
766 | 1752
767 | 2515
768 | 2527
769 | 2385
770 | 2189
771 | 2283
772 | 2182
773 | 2079
774 | 2375
775 | 2174
776 | 2437
777 | 1993
778 | 2517
779 | 2443
780 | 2224
781 | 2648
782 | 2171
783 | 2290
784 | 2542
785 | 2038
786 | 1855
787 | 1831
788 | 1759
789 | 1848
790 | 2445
791 | 1827
792 | 2429
793 | 2205
794 | 2598
795 | 2657
796 | 1728
797 | 2065
798 | 1918
799 | 2427
800 | 2573
801 | 2620
802 | 2292
803 | 1777
804 | 2008
805 | 1875
806 | 2288
807 | 2256
808 | 2033
809 | 2470
810 | 2585
811 | 2610
812 | 2082
813 | 2230
814 | 1915
815 | 1847
816 | 2337
817 | 2512
818 | 2386
819 | 2006
820 | 2653
821 | 2346
822 | 1951
823 | 2110
824 | 2639
825 | 2520
826 | 1939
827 | 2683
828 | 2139
829 | 2220
830 | 1910
831 | 2237
832 | 1900
833 | 1836
834 | 2197
835 | 1716
836 | 1860
837 | 2077
838 | 2519
839 | 2538
840 | 2323
841 | 1914
842 | 1971
843 | 1845
844 | 2132
845 | 1802
846 | 1907
847 | 2640
848 | 2496
849 | 2281
850 | 2198
851 | 2416
852 | 2285
853 | 1755
854 | 2431
855 | 2071
856 | 2249
857 | 2123
858 | 1727
859 | 2459
860 | 2304
861 | 2199
862 | 1791
863 | 1809
864 | 1780
865 | 2210
866 | 2417
867 | 1874
868 | 1878
869 | 2116
870 | 1961
871 | 1863
872 | 2579
873 | 2477
874 | 2228
875 | 2332
876 | 2578
877 | 2457
878 | 2024
879 | 1934
880 | 2316
881 | 1841
882 | 1764
883 | 1737
884 | 2322
885 | 2239
886 | 2294
887 | 1729
888 | 2488
889 | 1974
890 | 2473
891 | 2098
892 | 2612
893 | 1834
894 | 2340
895 | 2423
896 | 2175
897 | 2280
898 | 2617
899 | 2208
900 | 2560
901 | 1741
902 | 2600
903 | 2059
904 | 1747
905 | 2242
906 | 2700
907 | 2232
908 | 2057
909 | 2147
910 | 2682
911 | 1792
912 | 1826
913 | 2120
914 | 1895
915 | 2364
916 | 2163
917 | 1851
918 | 2391
919 | 2414
920 | 2452
921 | 1803
922 | 1989
923 | 2623
924 | 2200
925 | 2528
926 | 2415
927 | 1804
928 | 2146
929 | 2619
930 | 2687
931 | 1762
932 | 2172
933 | 2270
934 | 2678
935 | 2593
936 | 2448
937 | 1882
938 | 2257
939 | 2500
940 | 1899
941 | 2478
942 | 2412
943 | 2107
944 | 1746
945 | 2428
946 | 2115
947 | 1800
948 | 1901
949 | 2397
950 | 2530
951 | 1912
952 | 2108
953 | 2206
954 | 2091
955 | 1740
956 | 2219
957 | 1976
958 | 2099
959 | 2142
960 | 2671
961 | 2668
962 | 2216
963 | 2272
964 | 2229
965 | 2666
966 | 2456
967 | 2534
968 | 2697
969 | 2688
970 | 2062
971 | 2691
972 | 2689
973 | 2154
974 | 2590
975 | 2626
976 | 2390
977 | 1813
978 | 2067
979 | 1952
980 | 2518
981 | 2358
982 | 1789
983 | 2076
984 | 2049
985 | 2119
986 | 2013
987 | 2124
988 | 2556
989 | 2105
990 | 2093
991 | 1885
992 | 2305
993 | 2354
994 | 2135
995 | 2601
996 | 1770
997 | 1995
998 | 2504
999 | 1749
1000 | 2157
1001 |
--------------------------------------------------------------------------------
/data/ind.cora.tx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.cora.tx
--------------------------------------------------------------------------------
/data/ind.cora.ty:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.cora.ty
--------------------------------------------------------------------------------
/data/ind.cora.x:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.cora.x
--------------------------------------------------------------------------------
/data/ind.cora.y:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.cora.y
--------------------------------------------------------------------------------
/data/ind.pubmed.allx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.pubmed.allx
--------------------------------------------------------------------------------
/data/ind.pubmed.ally:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.pubmed.ally
--------------------------------------------------------------------------------
/data/ind.pubmed.graph:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.pubmed.graph
--------------------------------------------------------------------------------
/data/ind.pubmed.test.index:
--------------------------------------------------------------------------------
1 | 18747
2 | 19392
3 | 19181
4 | 18843
5 | 19221
6 | 18962
7 | 19560
8 | 19097
9 | 18966
10 | 19014
11 | 18756
12 | 19313
13 | 19000
14 | 19569
15 | 19359
16 | 18854
17 | 18970
18 | 19073
19 | 19661
20 | 19180
21 | 19377
22 | 18750
23 | 19401
24 | 18788
25 | 19224
26 | 19447
27 | 19017
28 | 19241
29 | 18890
30 | 18908
31 | 18965
32 | 19001
33 | 18849
34 | 19641
35 | 18852
36 | 19222
37 | 19172
38 | 18762
39 | 19156
40 | 19162
41 | 18856
42 | 18763
43 | 19318
44 | 18826
45 | 19712
46 | 19192
47 | 19695
48 | 19030
49 | 19523
50 | 19249
51 | 19079
52 | 19232
53 | 19455
54 | 18743
55 | 18800
56 | 19071
57 | 18885
58 | 19593
59 | 19394
60 | 19390
61 | 18832
62 | 19445
63 | 18838
64 | 19632
65 | 19548
66 | 19546
67 | 18825
68 | 19498
69 | 19266
70 | 19117
71 | 19595
72 | 19252
73 | 18730
74 | 18913
75 | 18809
76 | 19452
77 | 19520
78 | 19274
79 | 19555
80 | 19388
81 | 18919
82 | 19099
83 | 19637
84 | 19403
85 | 18720
86 | 19526
87 | 18905
88 | 19451
89 | 19408
90 | 18923
91 | 18794
92 | 19322
93 | 19431
94 | 18912
95 | 18841
96 | 19239
97 | 19125
98 | 19258
99 | 19565
100 | 18898
101 | 19482
102 | 19029
103 | 18778
104 | 19096
105 | 19684
106 | 19552
107 | 18765
108 | 19361
109 | 19171
110 | 19367
111 | 19623
112 | 19402
113 | 19327
114 | 19118
115 | 18888
116 | 18726
117 | 19510
118 | 18831
119 | 19490
120 | 19576
121 | 19050
122 | 18729
123 | 18896
124 | 19246
125 | 19012
126 | 18862
127 | 18873
128 | 19193
129 | 19693
130 | 19474
131 | 18953
132 | 19115
133 | 19182
134 | 19269
135 | 19116
136 | 18837
137 | 18872
138 | 19007
139 | 19212
140 | 18798
141 | 19102
142 | 18772
143 | 19660
144 | 19511
145 | 18914
146 | 18886
147 | 19672
148 | 19360
149 | 19213
150 | 18810
151 | 19420
152 | 19512
153 | 18719
154 | 19432
155 | 19350
156 | 19127
157 | 18782
158 | 19587
159 | 18924
160 | 19488
161 | 18781
162 | 19340
163 | 19190
164 | 19383
165 | 19094
166 | 18835
167 | 19487
168 | 19230
169 | 18791
170 | 18882
171 | 18937
172 | 18928
173 | 18755
174 | 18802
175 | 19516
176 | 18795
177 | 18786
178 | 19273
179 | 19349
180 | 19398
181 | 19626
182 | 19130
183 | 19351
184 | 19489
185 | 19446
186 | 18959
187 | 19025
188 | 18792
189 | 18878
190 | 19304
191 | 19629
192 | 19061
193 | 18785
194 | 19194
195 | 19179
196 | 19210
197 | 19417
198 | 19583
199 | 19415
200 | 19443
201 | 18739
202 | 19662
203 | 18904
204 | 18910
205 | 18901
206 | 18960
207 | 18722
208 | 18827
209 | 19290
210 | 18842
211 | 19389
212 | 19344
213 | 18961
214 | 19098
215 | 19147
216 | 19334
217 | 19358
218 | 18829
219 | 18984
220 | 18931
221 | 18742
222 | 19320
223 | 19111
224 | 19196
225 | 18887
226 | 18991
227 | 19469
228 | 18990
229 | 18876
230 | 19261
231 | 19270
232 | 19522
233 | 19088
234 | 19284
235 | 19646
236 | 19493
237 | 19225
238 | 19615
239 | 19449
240 | 19043
241 | 19674
242 | 19391
243 | 18918
244 | 19155
245 | 19110
246 | 18815
247 | 19131
248 | 18834
249 | 19715
250 | 19603
251 | 19688
252 | 19133
253 | 19053
254 | 19166
255 | 19066
256 | 18893
257 | 18757
258 | 19582
259 | 19282
260 | 19257
261 | 18869
262 | 19467
263 | 18954
264 | 19371
265 | 19151
266 | 19462
267 | 19598
268 | 19653
269 | 19187
270 | 19624
271 | 19564
272 | 19534
273 | 19581
274 | 19478
275 | 18985
276 | 18746
277 | 19342
278 | 18777
279 | 19696
280 | 18824
281 | 19138
282 | 18728
283 | 19643
284 | 19199
285 | 18731
286 | 19168
287 | 18948
288 | 19216
289 | 19697
290 | 19347
291 | 18808
292 | 18725
293 | 19134
294 | 18847
295 | 18828
296 | 18996
297 | 19106
298 | 19485
299 | 18917
300 | 18911
301 | 18776
302 | 19203
303 | 19158
304 | 18895
305 | 19165
306 | 19382
307 | 18780
308 | 18836
309 | 19373
310 | 19659
311 | 18947
312 | 19375
313 | 19299
314 | 18761
315 | 19366
316 | 18754
317 | 19248
318 | 19416
319 | 19658
320 | 19638
321 | 19034
322 | 19281
323 | 18844
324 | 18922
325 | 19491
326 | 19272
327 | 19341
328 | 19068
329 | 19332
330 | 19559
331 | 19293
332 | 18804
333 | 18933
334 | 18935
335 | 19405
336 | 18936
337 | 18945
338 | 18943
339 | 18818
340 | 18797
341 | 19570
342 | 19464
343 | 19428
344 | 19093
345 | 19433
346 | 18986
347 | 19161
348 | 19255
349 | 19157
350 | 19046
351 | 19292
352 | 19434
353 | 19298
354 | 18724
355 | 19410
356 | 19694
357 | 19214
358 | 19640
359 | 19189
360 | 18963
361 | 19218
362 | 19585
363 | 19041
364 | 19550
365 | 19123
366 | 19620
367 | 19376
368 | 19561
369 | 18944
370 | 19706
371 | 19056
372 | 19283
373 | 18741
374 | 19319
375 | 19144
376 | 19542
377 | 18821
378 | 19404
379 | 19080
380 | 19303
381 | 18793
382 | 19306
383 | 19678
384 | 19435
385 | 19519
386 | 19566
387 | 19278
388 | 18946
389 | 19536
390 | 19020
391 | 19057
392 | 19198
393 | 19333
394 | 19649
395 | 19699
396 | 19399
397 | 19654
398 | 19136
399 | 19465
400 | 19321
401 | 19577
402 | 18907
403 | 19665
404 | 19386
405 | 19596
406 | 19247
407 | 19473
408 | 19568
409 | 19355
410 | 18925
411 | 19586
412 | 18982
413 | 19616
414 | 19495
415 | 19612
416 | 19023
417 | 19438
418 | 18817
419 | 19692
420 | 19295
421 | 19414
422 | 19676
423 | 19472
424 | 19107
425 | 19062
426 | 19035
427 | 18883
428 | 19409
429 | 19052
430 | 19606
431 | 19091
432 | 19651
433 | 19475
434 | 19413
435 | 18796
436 | 19369
437 | 19639
438 | 19701
439 | 19461
440 | 19645
441 | 19251
442 | 19063
443 | 19679
444 | 19545
445 | 19081
446 | 19363
447 | 18995
448 | 19549
449 | 18790
450 | 18855
451 | 18833
452 | 18899
453 | 19395
454 | 18717
455 | 19647
456 | 18768
457 | 19103
458 | 19245
459 | 18819
460 | 18779
461 | 19656
462 | 19076
463 | 18745
464 | 18971
465 | 19197
466 | 19711
467 | 19074
468 | 19128
469 | 19466
470 | 19139
471 | 19309
472 | 19324
473 | 18814
474 | 19092
475 | 19627
476 | 19060
477 | 18806
478 | 18929
479 | 18737
480 | 18942
481 | 18906
482 | 18858
483 | 19456
484 | 19253
485 | 19716
486 | 19104
487 | 19667
488 | 19574
489 | 18903
490 | 19237
491 | 18864
492 | 19556
493 | 19364
494 | 18952
495 | 19008
496 | 19323
497 | 19700
498 | 19170
499 | 19267
500 | 19345
501 | 19238
502 | 18909
503 | 18892
504 | 19109
505 | 19704
506 | 18902
507 | 19275
508 | 19680
509 | 18723
510 | 19242
511 | 19112
512 | 19169
513 | 18956
514 | 19343
515 | 19650
516 | 19541
517 | 19698
518 | 19521
519 | 19087
520 | 18976
521 | 19038
522 | 18775
523 | 18968
524 | 19671
525 | 19412
526 | 19407
527 | 19573
528 | 19027
529 | 18813
530 | 19357
531 | 19460
532 | 19673
533 | 19481
534 | 19036
535 | 19614
536 | 18787
537 | 19195
538 | 18732
539 | 18884
540 | 19613
541 | 19657
542 | 19575
543 | 19226
544 | 19589
545 | 19234
546 | 19617
547 | 19707
548 | 19484
549 | 18740
550 | 19424
551 | 18784
552 | 19419
553 | 19159
554 | 18865
555 | 19105
556 | 19315
557 | 19480
558 | 19664
559 | 19378
560 | 18803
561 | 19605
562 | 18870
563 | 19042
564 | 19426
565 | 18848
566 | 19223
567 | 19509
568 | 19532
569 | 18752
570 | 19691
571 | 18718
572 | 19209
573 | 19362
574 | 19090
575 | 19492
576 | 19567
577 | 19687
578 | 19018
579 | 18830
580 | 19530
581 | 19554
582 | 19119
583 | 19442
584 | 19558
585 | 19527
586 | 19427
587 | 19291
588 | 19543
589 | 19422
590 | 19142
591 | 18897
592 | 18950
593 | 19425
594 | 19002
595 | 19588
596 | 18978
597 | 19551
598 | 18930
599 | 18736
600 | 19101
601 | 19215
602 | 19150
603 | 19263
604 | 18949
605 | 18974
606 | 18759
607 | 19335
608 | 19200
609 | 19129
610 | 19328
611 | 19437
612 | 18988
613 | 19429
614 | 19368
615 | 19406
616 | 19049
617 | 18811
618 | 19296
619 | 19256
620 | 19385
621 | 19602
622 | 18770
623 | 19337
624 | 19580
625 | 19476
626 | 19045
627 | 19132
628 | 19089
629 | 19120
630 | 19265
631 | 19483
632 | 18767
633 | 19227
634 | 18934
635 | 19069
636 | 18820
637 | 19006
638 | 19459
639 | 18927
640 | 19037
641 | 19280
642 | 19441
643 | 18823
644 | 19015
645 | 19114
646 | 19618
647 | 18957
648 | 19176
649 | 18853
650 | 19648
651 | 19201
652 | 19444
653 | 19279
654 | 18751
655 | 19302
656 | 19505
657 | 18733
658 | 19601
659 | 19533
660 | 18863
661 | 19708
662 | 19387
663 | 19346
664 | 19152
665 | 19206
666 | 18851
667 | 19338
668 | 19681
669 | 19380
670 | 19055
671 | 18766
672 | 19085
673 | 19591
674 | 19547
675 | 18958
676 | 19146
677 | 18840
678 | 19051
679 | 19021
680 | 19207
681 | 19235
682 | 19086
683 | 18979
684 | 19300
685 | 18939
686 | 19100
687 | 19619
688 | 19287
689 | 18980
690 | 19277
691 | 19326
692 | 19108
693 | 18920
694 | 19625
695 | 19374
696 | 19078
697 | 18734
698 | 19634
699 | 19339
700 | 18877
701 | 19423
702 | 19652
703 | 19683
704 | 19044
705 | 18983
706 | 19330
707 | 19529
708 | 19714
709 | 19468
710 | 19075
711 | 19540
712 | 18839
713 | 19022
714 | 19286
715 | 19537
716 | 19175
717 | 19463
718 | 19167
719 | 19705
720 | 19562
721 | 19244
722 | 19486
723 | 19611
724 | 18801
725 | 19178
726 | 19590
727 | 18846
728 | 19450
729 | 19205
730 | 19381
731 | 18941
732 | 19670
733 | 19185
734 | 19504
735 | 19633
736 | 18997
737 | 19113
738 | 19397
739 | 19636
740 | 19709
741 | 19289
742 | 19264
743 | 19353
744 | 19584
745 | 19126
746 | 18938
747 | 19669
748 | 18964
749 | 19276
750 | 18774
751 | 19173
752 | 19231
753 | 18973
754 | 18769
755 | 19064
756 | 19040
757 | 19668
758 | 18738
759 | 19082
760 | 19655
761 | 19236
762 | 19352
763 | 19609
764 | 19628
765 | 18951
766 | 19384
767 | 19122
768 | 18875
769 | 18992
770 | 18753
771 | 19379
772 | 19254
773 | 19301
774 | 19506
775 | 19135
776 | 19010
777 | 19682
778 | 19400
779 | 19579
780 | 19316
781 | 19553
782 | 19208
783 | 19635
784 | 19644
785 | 18891
786 | 19024
787 | 18989
788 | 19250
789 | 18850
790 | 19317
791 | 18915
792 | 19607
793 | 18799
794 | 18881
795 | 19479
796 | 19031
797 | 19365
798 | 19164
799 | 18744
800 | 18760
801 | 19502
802 | 19058
803 | 19517
804 | 18735
805 | 19448
806 | 19243
807 | 19453
808 | 19285
809 | 18857
810 | 19439
811 | 19016
812 | 18975
813 | 19503
814 | 18998
815 | 18981
816 | 19186
817 | 18994
818 | 19240
819 | 19631
820 | 19070
821 | 19174
822 | 18900
823 | 19065
824 | 19220
825 | 19229
826 | 18880
827 | 19308
828 | 19372
829 | 19496
830 | 18771
831 | 19325
832 | 19538
833 | 19033
834 | 18874
835 | 19077
836 | 19211
837 | 18764
838 | 19458
839 | 19571
840 | 19121
841 | 19019
842 | 19059
843 | 19497
844 | 18969
845 | 19666
846 | 19297
847 | 19219
848 | 19622
849 | 19184
850 | 18977
851 | 19702
852 | 19539
853 | 19329
854 | 19095
855 | 19675
856 | 18972
857 | 19514
858 | 19703
859 | 19188
860 | 18866
861 | 18812
862 | 19314
863 | 18822
864 | 18845
865 | 19494
866 | 19411
867 | 18916
868 | 19686
869 | 18967
870 | 19294
871 | 19143
872 | 19204
873 | 18805
874 | 19689
875 | 19233
876 | 18758
877 | 18748
878 | 19011
879 | 19685
880 | 19336
881 | 19608
882 | 19454
883 | 19124
884 | 18868
885 | 18807
886 | 19544
887 | 19621
888 | 19228
889 | 19154
890 | 19141
891 | 19145
892 | 19153
893 | 18860
894 | 19163
895 | 19393
896 | 19268
897 | 19160
898 | 19305
899 | 19259
900 | 19471
901 | 19524
902 | 18783
903 | 19396
904 | 18894
905 | 19430
906 | 19690
907 | 19348
908 | 19597
909 | 19592
910 | 19677
911 | 18889
912 | 19331
913 | 18773
914 | 19137
915 | 19009
916 | 18932
917 | 19599
918 | 18816
919 | 19054
920 | 19067
921 | 19477
922 | 19191
923 | 18921
924 | 18940
925 | 19578
926 | 19183
927 | 19004
928 | 19072
929 | 19710
930 | 19005
931 | 19610
932 | 18955
933 | 19457
934 | 19148
935 | 18859
936 | 18993
937 | 19642
938 | 19047
939 | 19418
940 | 19535
941 | 19600
942 | 19312
943 | 19039
944 | 19028
945 | 18879
946 | 19003
947 | 19026
948 | 19013
949 | 19149
950 | 19177
951 | 19217
952 | 18987
953 | 19354
954 | 19525
955 | 19202
956 | 19084
957 | 19032
958 | 18749
959 | 18867
960 | 19048
961 | 18999
962 | 19260
963 | 19630
964 | 18727
965 | 19356
966 | 19083
967 | 18926
968 | 18789
969 | 19370
970 | 18861
971 | 19311
972 | 19557
973 | 19531
974 | 19436
975 | 19140
976 | 19310
977 | 19501
978 | 18721
979 | 19604
980 | 19713
981 | 19262
982 | 19563
983 | 19507
984 | 19440
985 | 19572
986 | 19513
987 | 19515
988 | 19518
989 | 19421
990 | 19470
991 | 19499
992 | 19663
993 | 19508
994 | 18871
995 | 19528
996 | 19500
997 | 19307
998 | 19288
999 | 19594
1000 | 19271
1001 |
--------------------------------------------------------------------------------
/data/ind.pubmed.tx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.pubmed.tx
--------------------------------------------------------------------------------
/data/ind.pubmed.ty:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.pubmed.ty
--------------------------------------------------------------------------------
/data/ind.pubmed.x:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.pubmed.x
--------------------------------------------------------------------------------
/data/ind.pubmed.y:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/data/ind.pubmed.y
--------------------------------------------------------------------------------
/layers.py:
--------------------------------------------------------------------------------
1 | import math
2 | import numpy as np
3 | import scipy.sparse as sp
4 | from scipy.sparse import csr_matrix
5 |
6 |
7 | import torch.nn.functional as F
8 | import torch
9 | from torch.nn.parameter import Parameter
10 | from torch.nn.modules.module import Module
11 | from utils import sparse_mx_to_torch_sparse_tensor
12 | from utils import normalize
13 | class GC(Module):
14 | """
15 | Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
16 | """
17 | def __init__(self, in_features, out_features, bias=True):
18 | super(GC, self).__init__()
19 | self.in_features = in_features
20 | self.out_features = out_features
21 | self.weight = Parameter(torch.FloatTensor(in_features, out_features))
22 | if bias:
23 | self.bias = Parameter(torch.FloatTensor(out_features))
24 | else:
25 | self.register_parameter('bias', None)
26 | self.reset_parameters()
27 | def reset_parameters(self):
28 | stdv = 1. / math.sqrt(self.weight.size(1))
29 | self.weight.data.uniform_(-stdv, stdv)
30 | if self.bias is not None:
31 | self.bias.data.uniform_(-stdv, stdv)
32 | def forward(self, input, adj):
33 | # adj is extracted from the graph structure
34 | support = torch.mm(input, self.weight)
35 | output = torch.spmm(adj, support)
36 | if self.bias is not None:
37 | return output + self.bias
38 | else:
39 | return output
40 | def __repr__(self):
41 | return self.__class__.__name__ + ' (' \
42 | + str(self.in_features) + ' -> ' \
43 | + str(self.out_features) + ')'
44 |
45 |
46 | class GC_withres(Module):
47 | """
48 | res conv
49 | """
50 | def __init__(self, in_features, out_features,smooth,bias=True):
51 | super(GC_withres, self).__init__()
52 | self.in_features = in_features
53 | self.out_features = out_features
54 | self.smooth = smooth
55 | self.weight = Parameter(torch.FloatTensor(in_features, out_features))
56 | if bias:
57 | self.bias = Parameter(torch.FloatTensor(out_features))
58 | else:
59 | self.register_parameter('bias', None)
60 | self.reset_parameters()
61 | def reset_parameters(self):
62 | stdv = 1. / math.sqrt(self.weight.size(1))
63 | self.weight.data.uniform_(-stdv, stdv)
64 | if self.bias is not None:
65 | self.bias.data.uniform_(-stdv, stdv)
66 | def forward(self, input, adj):
67 | # adj is extracted from the graph structure
68 | support = torch.mm(input, self.weight)
69 | I_n = sp.eye(adj.shape[0])
70 | I_n = sparse_mx_to_torch_sparse_tensor(I_n).cuda()
71 | output = torch.spmm((I_n+self.smooth*adj)/(1+self.smooth), support)
72 | if self.bias is not None:
73 | return output + self.bias
74 | else:
75 | return output
76 | def __repr__(self):
77 | return self.__class__.__name__ + ' (' \
78 | + str(self.in_features) + ' -> ' \
79 | + str(self.out_features) + ')'
80 |
81 |
82 |
83 |
84 |
85 |
86 | class NGCN(Module):
87 | """
88 | Bandpass model, consider 3 Lap matrix
89 | """
90 | def __init__(self, in_features,med_f0,med_f1,med_f2,med_f3,med_f4,bias=True):
91 | super(NGCN, self).__init__()
92 | self.in_features = in_features
93 | self.med_f0 = med_f0
94 | self.med_f1 = med_f1
95 | self.med_f2 = med_f2
96 | self.med_f3 = med_f3
97 | self.med_f4 = med_f4
98 |
99 | self.weight0 = Parameter(torch.FloatTensor(in_features, med_f0))
100 | self.weight1 = Parameter(torch.FloatTensor(in_features, med_f1))
101 | self.weight2 = Parameter(torch.FloatTensor(in_features, med_f2))
102 | self.weight3 = Parameter(torch.FloatTensor(in_features, med_f3))
103 | self.weight4 = Parameter(torch.FloatTensor(in_features, med_f4))
104 |
105 |
106 | #self.weight = Parameter(torch.FloatTensor((med_f0+med_f1+med_f2), out_features))
107 |
108 | if bias:
109 | self.bias1 = Parameter(torch.FloatTensor(med_f1))
110 | self.bias0 = Parameter(torch.FloatTensor(med_f0))
111 | self.bias2 = Parameter(torch.FloatTensor(med_f2))
112 | self.bias3 = Parameter(torch.FloatTensor(med_f3))
113 | self.bias4 = Parameter(torch.FloatTensor(med_f4))
114 |
115 | else:
116 | self.register_parameter('bias', None)
117 | self.reset_parameters()
118 | def reset_parameters(self):
119 | stdv0 = 1. / math.sqrt(self.weight0.size(1))
120 | stdv1 = 1. / math.sqrt(self.weight1.size(1))
121 | stdv2 = 1. / math.sqrt(self.weight2.size(1))
122 |
123 | stdv3 = 1. / math.sqrt(self.weight3.size(1))
124 | stdv4 = 1. / math.sqrt(self.weight4.size(1))
125 | torch.nn.init.xavier_uniform(self.weight0)
126 | torch.nn.init.xavier_uniform(self.weight2)
127 | torch.nn.init.xavier_uniform(self.weight1)
128 | torch.nn.init.xavier_uniform(self.weight3)
129 | torch.nn.init.xavier_uniform(self.weight4)
130 | if self.bias0 is not None:
131 | self.bias1.data.uniform_(-stdv1, stdv1)
132 | self.bias0.data.uniform_(-stdv0, stdv0)
133 | self.bias2.data.uniform_(-stdv2, stdv2)
134 |
135 | self.bias3.data.uniform_(-stdv3, stdv3)
136 | self.bias4.data.uniform_(-stdv4, stdv4)
137 |
138 | def forward(self, input, adj,A_tilde,s1_sct,s2_sct,s3_sct,adj_sct_o1,adj_sct_o2):
139 | # adj is extracted from the graph structure
140 | # adj_sct_o1,adj_sct_o2: two scatterng matrix index of different order
141 | # e.g. adj_sct_o1 = [1,1]--> denotes 1st order, 1 index
142 | # e.g. adj_sct_o1 = [2,1]--> denotes 2nd order
143 | # 1_sct,2_sct,3_sct: three first order matrix
144 | support0 = torch.mm(input, self.weight0)
145 | output0 = torch.spmm(A_tilde, support0) + self.bias0
146 | support1 = torch.mm(input, self.weight1)
147 | output1 = torch.spmm(A_tilde, support1)
148 | output1 = torch.spmm(A_tilde, output1)+ self.bias1
149 |
150 | support2 = torch.mm(input, self.weight2)
151 | output2 = torch.spmm(A_tilde, support2)
152 | output2 = torch.spmm(A_tilde, output2)
153 | output2 = torch.spmm(A_tilde, output2)+ self.bias2
154 | support3 = torch.mm(input, self.weight3)
155 | if adj_sct_o1[0] == 1:
156 | if adj_sct_o1[1] == 1:
157 | output3 = torch.spmm(s1_sct.cuda(), support3)+ self.bias3
158 | elif adj_sct_o1[1] == 2:
159 | output3 = torch.spmm(s2_sct.cuda(), support3)+ self.bias3
160 | elif adj_sct_o1[1] == 3:
161 | output3 = torch.spmm(s3_sct.cuda(), support3)+ self.bias3
162 | else:
163 | print('Please type in the right index!')
164 |
165 | elif adj_sct_o1[0] == 2:
166 | # second order scatt
167 | # adj_sct_o1[1] == 1----> psi_2|psi_1 x |
168 | # adj_sct_o1[1] == 2----> psi_3|psi_1 x |
169 | # adj_sct_o1[1] == 3----> psi_3|psi_2 x |
170 | if adj_sct_o1[1] == 1:
171 | output3 = torch.spmm(s2_sct.cuda(),torch.FloatTensor.abs(torch.spmm(s1_sct.cuda(), support3)))+ self.bias3
172 | elif adj_sct_o1[1] == 2:
173 | output3 = torch.spmm(s3_sct.cuda(),torch.FloatTensor.abs(torch.spmm(s1_sct.cuda(), support3)))+ self.bias3
174 | elif adj_sct_o1[1] == 3:
175 | output3 = torch.spmm(s3_sct.cuda(),torch.FloatTensor.abs(torch.spmm(s2_sct.cuda(), support3)))+ self.bias3
176 | else:
177 | print('Please type in the right index!')
178 | else:
179 | print('Please type in the right index!')
180 |
181 |
182 | support4 = torch.mm(input, self.weight4)
183 | if adj_sct_o2[0] == 1:
184 | if adj_sct_o2[1] == 1:
185 | output4 = torch.spmm(s1_sct.cuda(), support4)+ self.bias4
186 | elif adj_sct_o2[1] == 2:
187 | output4 = torch.spmm(s2_sct.cuda(), support4)+ self.bias4
188 | elif adj_sct_o2[1] == 3:
189 | output4 = torch.spmm(s3_sct.cuda(), support4)+ self.bias4
190 | else:
191 | print('Please type in the right index!')
192 |
193 | elif adj_sct_o2[0] == 2:
194 | # second order scatt
195 | # adj_sct_o1[1] == 1----> psi_2|psi_1 x |
196 | # adj_sct_o1[1] == 2----> psi_3|psi_1 x |
197 | # adj_sct_o1[1] == 3----> psi_3|psi_2 x |
198 | if adj_sct_o2[1] == 1:
199 | output4 = torch.spmm(s2_sct.cuda(),torch.FloatTensor.abs(torch.spmm(s1_sct.cuda(), support4)))+ self.bias4
200 | elif adj_sct_o2[1] == 2:
201 | output4 = torch.spmm(s3_sct.cuda(),torch.FloatTensor.abs(torch.spmm(s1_sct.cuda(), support4)))+ self.bias4
202 | elif adj_sct_o2[1] == 3:
203 | output4 = torch.spmm(s3_sct.cuda(),torch.FloatTensor.abs(torch.spmm(s2_sct.cuda(), support4)))+ self.bias4
204 | else:
205 | print('Please type in the right index!')
206 | else:
207 | print('Please type in the right index!')
208 |
209 |
210 |
211 |
212 | support_3hop = torch.cat((output0,output1,output2,output3,output4), 1)
213 | output_3hop = support_3hop
214 | if self.bias0 is not None:
215 | return output_3hop
216 | #return output_3hop
217 | else:
218 | return output_3hop
219 | def __repr__(self):
220 | return self.__class__.__name__ + ' (' \
221 | + str(self.in_features) + ' -> ' \
222 | + str(self.out_features) + ')'
223 |
224 |
225 |
226 |
227 |
228 |
--------------------------------------------------------------------------------
/load_pretrain_model.py:
--------------------------------------------------------------------------------
1 | from __future__ import division
2 | from __future__ import print_function
3 | from utils import load_citation, accuracy
4 | import time
5 | import argparse
6 | import torch
7 | import numpy as np
8 | torch.manual_seed(42)
9 | torch.cuda.manual_seed(42)
10 | np.random.seed(42)
11 | torch.backends.cudnn.deterministic = True
12 | from scipy import sparse
13 | from torch.optim.lr_scheduler import MultiStepLR,StepLR
14 |
15 | import torch.nn.functional as F
16 | import torch.optim as optim
17 | from models import GCN
18 | # Training settings
19 | parser = argparse.ArgumentParser()
20 | parser.add_argument('--dataset', type=str, default="cora",help='Dataset to use.')
21 | parser.add_argument('--no-cuda', action='store_true', default=False,
22 | help='Disables CUDA training.')
23 | parser.add_argument('--fastmode', action='store_true', default=False,
24 | help='Validate during training pass.')
25 | parser.add_argument('--seed', type=int, default=42, help='Random seed.')
26 | parser.add_argument('--epochs', type=int, default=200,
27 | help='Number of epochs to train.')
28 | parser.add_argument('--patience', type=int, default=200,
29 | help='Number of epochs to train.')
30 | parser.add_argument('--lr', type=float, default=0.005,
31 | help='Initial learning rate.')
32 | parser.add_argument('--weight_decay', type=float, default=0.0,
33 | help='Weight decay (L2 loss on parameters).')
34 | parser.add_argument('--l1', type=float, default=0.05,
35 | help='Weight decay (L1 loss on parameters).')
36 | parser.add_argument('--hid1', type=int, default=13,
37 | help='Number of hidden units.')
38 | parser.add_argument('--hid2', type=int, default=25,
39 | help='Number of hidden units.')
40 | parser.add_argument('--smoo', type=float, default=0.5,
41 | help='Smooth for Res layer')
42 | parser.add_argument('--dropout', type=float, default=0.9,
43 | help='Dropout rate (1 - keep probability).')
44 | parser.add_argument('--normalization', type=str, default='AugNormAdj',
45 | choices=['AugNormAdj'],
46 | help='Normalization method for the adjacency matrix.')
47 |
48 | parser.add_argument('--order_1',type=int, default=1)
49 | parser.add_argument('--sct_inx1', type=int, default=1)
50 | parser.add_argument('--order_2',type=int, default=1)
51 | parser.add_argument('--sct_inx2', type=int, default=3)
52 | args = parser.parse_args()
53 | args.cuda = not args.no_cuda and torch.cuda.is_available()
54 |
55 | np.random.seed(args.seed)
56 | torch.manual_seed(args.seed)
57 | if args.cuda:
58 | torch.cuda.manual_seed(args.seed)
59 |
60 | # Load data
61 | #adj, features, labels, idx_train, idx_val, idx_test = load_data()
62 | adj,A_tilde,adj_sct1,adj_sct2,adj_sct4,features, labels, idx_train, idx_val, idx_test = load_citation(args.dataset, args.normalization,args.cuda)
63 | # Model and optimizer
64 | model = GCN(nfeat=features.shape[1],
65 | para3=args.hid1,
66 | para4=args.hid2,
67 | nclass=labels.max().item() + 1,
68 | dropout=args.dropout,
69 | smoo=args.smoo)
70 |
71 |
72 |
73 | PATH = "state_dict_model.pt"
74 | model.load_state_dict(torch.load(PATH))
75 | if args.cuda:
76 | model = model.cuda()
77 | features = features.cuda()
78 | A_tilde = A_tilde.cuda()
79 | adj = adj.cuda()
80 | labels = labels.cuda()
81 | idx_train = idx_train.cuda()
82 | idx_val = idx_val.cuda()
83 | idx_test = idx_test.cuda()
84 |
85 | optimizer = optim.Adam(model.parameters(),
86 | lr=args.lr, weight_decay=args.weight_decay)
87 | scheduler = StepLR(optimizer, step_size=50, gamma=0.9)
88 |
89 | def test():
90 | model.eval()
91 | output = model(features,adj,A_tilde,adj_sct1,adj_sct2,adj_sct4,[args.order_1,args.sct_inx1],[args.order_2,args.sct_inx2])
92 | loss_test = F.nll_loss(output[idx_test], labels[idx_test])
93 | acc_test = accuracy(output[idx_test], labels[idx_test])
94 | print("Test set results:",
95 | "loss= {:.4f}".format(loss_test.item()),
96 | "accuracy= {:.4f}".format(acc_test.item()))
97 | # Testing
98 | test()
99 |
100 |
101 |
102 |
--------------------------------------------------------------------------------
/models.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch
3 | import torch.nn.functional as F
4 | from layers import GC_withres,NGCN
5 | class GCN(nn.Module):
6 | def __init__(self, nfeat, para3,para4, nclass, dropout,smoo):
7 | super(GCN, self).__init__()
8 |
9 | self.gc1 = NGCN(nfeat,med_f0=10,med_f1=10,med_f2=10,med_f3=para3,med_f4=para4)
10 | # self.gc1 = NGCN(nfeat,med_f0=28,med_f1=1,med_f2=1,med_f3=para3,med_f4=para4)
11 | # self.gc2 = NGCN(30+para3+para4,med_f0=28,med_f1=1,med_f2=1,med_f3=para3,med_f4=para4)
12 | self.gc11 = GC_withres(30+para3+para4, nclass,smooth=smoo)
13 | self.dropout = dropout
14 |
15 | def forward(self, x,adj, A_tilde,s1_sct,s2_sct,s3_sct,\
16 | sct_index1,\
17 | sct_index2):
18 | x = torch.FloatTensor.abs_(self.gc1(x,adj,A_tilde,\
19 | s1_sct,s2_sct,s3_sct,\
20 | adj_sct_o1 = sct_index1,\
21 | adj_sct_o2 = sct_index2))**4
22 | x = F.dropout(x, self.dropout, training=self.training)
23 | x = self.gc11(x, adj)
24 | return F.log_softmax(x, dim=1)
25 |
--------------------------------------------------------------------------------
/normalization.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy.sparse as sp
3 | import torch
4 |
5 | def aug_normalized_adjacency(adj):
6 | adj = adj + sp.eye(adj.shape[0])
7 | adj = sp.coo_matrix(adj)
8 | row_sum = np.array(adj.sum(1))
9 | d_inv_sqrt = np.power(row_sum, -0.5).flatten()
10 | d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
11 | d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
12 | return d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt).tocoo()
13 |
14 | def fetch_normalization(type):
15 | switcher = {
16 | 'AugNormAdj': aug_normalized_adjacency, # A' = (D + I)^-1/2 * ( A + I ) * (D + I)^-1/2
17 | }
18 | func = switcher.get(type, lambda: "Invalid normalization technique.")
19 | return func
20 |
21 | def row_normalize(mx):
22 | """Row-normalize sparse matrix"""
23 | rowsum = np.array(mx.sum(1))
24 | r_inv = np.power(rowsum, -1).flatten()
25 | r_inv[np.isinf(r_inv)] = 0.
26 | r_mat_inv = sp.diags(r_inv)
27 | mx = r_mat_inv.dot(mx)
28 | return mx
29 |
--------------------------------------------------------------------------------
/pytorchtools.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 | class EarlyStopping:
5 | """Early stops the training if validation loss doesn't improve after a given patience."""
6 | def __init__(self, patience=7, verbose=False, delta=0, path='checkpoint.pt'):
7 | """
8 | Args:
9 | patience (int): How long to wait after last time validation loss improved.
10 | Default: 7
11 | verbose (bool): If True, prints a message for each validation loss improvement.
12 | Default: False
13 | delta (float): Minimum change in the monitored quantity to qualify as an improvement.
14 | Default: 0
15 | path (str): Path for the checkpoint to be saved to.
16 | Default: 'checkpoint.pt'
17 | """
18 | self.patience = patience
19 | self.verbose = verbose
20 | self.counter = 0
21 | self.best_score = None
22 | self.early_stop = False
23 | self.val_loss_min = np.Inf
24 | self.delta = delta
25 | self.path = path
26 |
27 | def __call__(self, val_loss, model):
28 |
29 | score = -val_loss
30 |
31 | if self.best_score is None:
32 | self.best_score = score
33 | self.save_checkpoint(val_loss, model)
34 | elif score < self.best_score + self.delta:
35 | self.counter += 1
36 | print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
37 | if self.counter >= self.patience:
38 | self.early_stop = True
39 | else:
40 | self.best_score = score
41 | self.save_checkpoint(val_loss, model)
42 | self.counter = 0
43 |
44 | def save_checkpoint(self, val_loss, model):
45 | '''Saves model when validation loss decrease.'''
46 | if self.verbose:
47 | print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
48 | torch.save(model.state_dict(), self.path)
49 | self.val_loss_min = val_loss
50 |
--------------------------------------------------------------------------------
/state_dict_model.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dms-net/scatteringGCN/7dff3bb20068a17d17ed726c5b159f44065b589c/state_dict_model.pt
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | from __future__ import division
2 | from __future__ import print_function
3 | from utils import load_citation, accuracy
4 | import time
5 | import argparse
6 | import torch
7 | import numpy as np
8 | torch.manual_seed(42)
9 | torch.cuda.manual_seed(42)
10 | np.random.seed(42)
11 | torch.backends.cudnn.deterministic = True
12 | from scipy import sparse
13 | from torch.optim.lr_scheduler import MultiStepLR,StepLR
14 |
15 | import torch.nn.functional as F
16 | import torch.optim as optim
17 | from models import GCN
18 | # Training settings
19 | parser = argparse.ArgumentParser()
20 | parser.add_argument('--dataset', type=str, default="cora",help='Dataset to use.')
21 | parser.add_argument('--no-cuda', action='store_true', default=False,
22 | help='Disables CUDA training.')
23 | parser.add_argument('--fastmode', action='store_true', default=False,
24 | help='Validate during training pass.')
25 | parser.add_argument('--seed', type=int, default=42, help='Random seed.')
26 | parser.add_argument('--epochs', type=int, default=200,
27 | help='Number of epochs to train.')
28 | parser.add_argument('--patience', type=int, default=200,
29 | help='Number of epochs to train.')
30 | parser.add_argument('--lr', type=float, default=0.005,
31 | help='Initial learning rate.')
32 | parser.add_argument('--weight_decay', type=float, default=0.0,
33 | help='Weight decay (L2 loss on parameters).')
34 | parser.add_argument('--l1', type=float, default=0.05,
35 | help='Weight decay (L1 loss on parameters).')
36 | parser.add_argument('--hid1', type=int, default=13,
37 | help='Number of hidden units.')
38 | parser.add_argument('--hid2', type=int, default=25,
39 | help='Number of hidden units.')
40 | parser.add_argument('--smoo', type=float, default=0.5,
41 | help='Smooth for Res layer')
42 | parser.add_argument('--dropout', type=float, default=0.9,
43 | help='Dropout rate (1 - keep probability).')
44 | parser.add_argument('--normalization', type=str, default='AugNormAdj',
45 | choices=['AugNormAdj'],
46 | help='Normalization method for the adjacency matrix.')
47 |
48 | parser.add_argument('--order_1',type=int, default=1)
49 | parser.add_argument('--sct_inx1', type=int, default=1)
50 | parser.add_argument('--order_2',type=int, default=1)
51 | parser.add_argument('--sct_inx2', type=int, default=3)
52 | args = parser.parse_args()
53 | args.cuda = not args.no_cuda and torch.cuda.is_available()
54 |
55 | np.random.seed(args.seed)
56 | torch.manual_seed(args.seed)
57 | if args.cuda:
58 | torch.cuda.manual_seed(args.seed)
59 |
60 | # Load data
61 | #adj, features, labels, idx_train, idx_val, idx_test = load_data()
62 | adj,A_tilde,adj_sct1,adj_sct2,adj_sct4,features, labels, idx_train, idx_val, idx_test = load_citation(args.dataset, args.normalization,args.cuda)
63 | # Model and optimizer
64 | model = GCN(nfeat=features.shape[1],
65 | para3=args.hid1,
66 | para4=args.hid2,
67 | nclass=labels.max().item() + 1,
68 | dropout=args.dropout,
69 | smoo=args.smoo)
70 |
71 |
72 |
73 | if args.cuda:
74 | model = model.cuda()
75 | features = features.cuda()
76 | A_tilde = A_tilde.cuda()
77 | adj = adj.cuda()
78 | labels = labels.cuda()
79 | idx_train = idx_train.cuda()
80 | idx_val = idx_val.cuda()
81 | idx_test = idx_test.cuda()
82 |
83 | optimizer = optim.Adam(model.parameters(),
84 | lr=args.lr, weight_decay=args.weight_decay)
85 | scheduler = StepLR(optimizer, step_size=50, gamma=0.9)
86 |
87 | acc_val_list = []
88 | def train(epoch):
89 | global valid_error
90 | t = time.time()
91 | model.train()
92 | optimizer.zero_grad()
93 | output = model(features,adj,A_tilde,adj_sct1,adj_sct2,adj_sct4,[args.order_1,args.sct_inx1],[args.order_2,args.sct_inx2])
94 | loss_train = F.nll_loss(output[idx_train], labels[idx_train])
95 |
96 | regularization_loss = 0
97 | for param in model.parameters():
98 | regularization_loss = torch.sum(torch.abs(param))
99 |
100 | loss_train = regularization_loss*args.l1+loss_train
101 | acc_train = accuracy(output[idx_train], labels[idx_train])
102 | loss_train.backward()
103 | optimizer.step()
104 | if not args.fastmode:
105 | # Evaluate validation set performance separately,
106 | # deactivates dropout during validation run.
107 | model.eval()
108 | output = model(features,adj,A_tilde,adj_sct1,adj_sct2,adj_sct4,[args.order_1,args.sct_inx1],[args.order_2,args.sct_inx2])
109 | loss_val = F.nll_loss(output[idx_val], labels[idx_val])
110 | acc_val = accuracy(output[idx_val], labels[idx_val])
111 | print('Epoch: {:04d}'.format(epoch+1),
112 | 'Hid1: {:04d}'.format(args.hid1),
113 | 'Hid2: {:04d}'.format(args.hid2),
114 | 'loss_train: {:.4f}'.format(loss_train.item()),
115 | 'acc_train: {:.4f}'.format(acc_train.item()),
116 | 'loss_val: {:.4f}'.format(loss_val.item()),
117 | 'acc_val: {:.4f}'.format(acc_val.item()),
118 | 'time: {:.4f}s'.format(time.time() - t))
119 | acc_val_list.append(acc_val.item())
120 | valid_error = 1.0 - acc_val.item()
121 |
122 |
123 | def test():
124 | model.eval()
125 | output = model(features,adj,A_tilde,adj_sct1,adj_sct2,adj_sct4,[args.order_1,args.sct_inx1],[args.order_2,args.sct_inx2])
126 | loss_test = F.nll_loss(output[idx_test], labels[idx_test])
127 | acc_test = accuracy(output[idx_test], labels[idx_test])
128 | print("Test set results:",
129 | "loss= {:.4f}".format(loss_test.item()),
130 | "accuracy= {:.4f}".format(acc_test.item()))
131 | acc_val_list.append(acc_test.item())
132 |
133 | # Train model
134 | t_total = time.time()
135 | #from pytorchtools import EarlyStopping
136 |
137 | #patience = args.patience
138 | #early_stopping = EarlyStopping(patience=patience, verbose=True)
139 |
140 | for epoch in range(args.epochs):
141 | train(epoch)
142 | scheduler.step()
143 | # print(valid_error)
144 | # early_stopping(valid_error, model)
145 | # if early_stopping.early_stop:
146 | # print("Early stopping")
147 | # break
148 | print("Optimization Finished!")
149 | print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
150 |
151 | # Testing
152 | test()
153 |
154 |
--------------------------------------------------------------------------------
/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy.sparse as sp
3 | import torch
4 | import sys
5 | import pickle as pkl
6 | import networkx as nx
7 | from normalization import fetch_normalization, row_normalize
8 | from time import perf_counter
9 | def normalize_adjacency_matrix(A, I):
10 | """
11 | Creating a normalized adjacency matrix with self loops.
12 | :param A: Sparse adjacency matrix.
13 | :param I: Identity matrix.
14 | :return A_tile_hat: Normalized adjacency matrix.
15 | """
16 | A_tilde = A + I
17 | degrees = A_tilde.sum(axis=0)[0].tolist()
18 | D = sp.diags(degrees, [0])
19 | D = D.power(-0.5)
20 | A_tilde_hat = D.dot(A_tilde).dot(D)
21 | return A_tilde_hat
22 | def normalize(mx):
23 | """Row-normalize sparse matrix"""
24 | rowsum = np.array(mx.sum(1))
25 | r_inv = np.power(rowsum, -1).flatten()
26 | r_inv[np.isinf(r_inv)] = 0.
27 | r_mat_inv = sp.diags(r_inv)
28 | mx = r_mat_inv.dot(mx)
29 | return mx
30 |
31 | def normalizemx(mx):
32 | degrees = mx.sum(axis=0)[0].tolist()
33 | # print(degrees)
34 | D = sp.diags(degrees, [0])
35 | D = D.power(-1)
36 | mx = mx.dot(D)
37 | return mx
38 | def scattering1st(spmx,order):
39 | I_n = sp.eye(spmx.shape[0])
40 | adj_sct = 0.5*(spmx+I_n)
41 | adj_power = adj_sct
42 | adj_power = sparse_mx_to_torch_sparse_tensor(adj_power).cuda()
43 | adj_sct = sparse_mx_to_torch_sparse_tensor(adj_sct).cuda()
44 | I_n = sparse_mx_to_torch_sparse_tensor(I_n)
45 | if order>1:
46 | for i in range(order-1):
47 | adj_power = torch.spmm(adj_power,adj_sct.to_dense())
48 | print('Generating SCT')
49 | adj_int = torch.spmm((adj_power-I_n.cuda()),adj_power)
50 | else:
51 | adj_int = torch.spmm((adj_power-I_n.cuda()),adj_power.to_dense())
52 | return adj_int
53 |
54 | def parse_index_file(filename):
55 | """Parse index file."""
56 | index = []
57 | for line in open(filename):
58 | index.append(int(line.strip()))
59 | return index
60 |
61 | def preprocess_citation(adj, features, normalization="FirstOrderGCN"):
62 | adj_normalizer = fetch_normalization(normalization)
63 | adj = adj_normalizer(adj)
64 | features = row_normalize(features)
65 | return adj, features
66 |
67 | def sparse_mx_to_torch_sparse_tensor(sparse_mx):
68 | """Convert a scipy sparse matrix to a torch sparse tensor."""
69 | sparse_mx = sparse_mx.tocoo().astype(np.float32)
70 | indices = torch.from_numpy(
71 | np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
72 | values = torch.from_numpy(sparse_mx.data)
73 | shape = torch.Size(sparse_mx.shape)
74 | return torch.sparse.FloatTensor(indices, values, shape)
75 |
76 | def load_citation(dataset_str="cora", normalization="AugNormAdj", cuda=True):
77 | """
78 | Load Citation Networks Datasets.
79 | """
80 | names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
81 | objects = []
82 | for i in range(len(names)):
83 | with open("data/ind.{}.{}".format(dataset_str.lower(), names[i]), 'rb') as f:
84 | if sys.version_info > (3, 0):
85 | objects.append(pkl.load(f, encoding='latin1'))
86 | else:
87 | objects.append(pkl.load(f))
88 |
89 | x, y, tx, ty, allx, ally, graph = tuple(objects)
90 | test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
91 | test_idx_range = np.sort(test_idx_reorder)
92 |
93 | if dataset_str == 'citeseer':
94 | # Fix citeseer dataset (there are some isolated nodes in the graph)
95 | # Find isolated nodes, add them as zero-vecs into the right position
96 | test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
97 | tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
98 | tx_extended[test_idx_range-min(test_idx_range), :] = tx
99 | tx = tx_extended
100 | ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
101 | ty_extended[test_idx_range-min(test_idx_range), :] = ty
102 | ty = ty_extended
103 |
104 | features = sp.vstack((allx, tx)).tolil()
105 | features[test_idx_reorder, :] = features[test_idx_range, :]
106 | adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
107 | adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
108 | labels = np.vstack((ally, ty))
109 | labels[test_idx_reorder, :] = labels[test_idx_range, :]
110 |
111 |
112 | idx_test = test_idx_range.tolist()
113 | idx_train = range(len(y))
114 | idx_val = range(len(y), len(y)+500)
115 |
116 | # take from https://github.com/tkipf/pygcn/blob/master/pygcn/utils.py
117 | # idx_train = range(140)
118 | # idx_val = range(200, 500)
119 | # idx_test = range(500, 1500)
120 |
121 |
122 | labels = torch.LongTensor(labels)
123 | labels = torch.max(labels, dim=1)[1]
124 | idx_train = torch.LongTensor(idx_train)
125 | idx_val = torch.LongTensor(idx_val)
126 | idx_test = torch.LongTensor(idx_test)
127 |
128 | features = normalize(features)
129 | A_tilde = normalize_adjacency_matrix(adj,sp.eye(adj.shape[0]))
130 | adj = normalizemx(adj)
131 | features = torch.FloatTensor(np.array(features.todense()))
132 | print('Loading')
133 | adj_sct1 = scattering1st(adj,1) ## psi_1 = P(I-P)
134 | adj_sct2 = scattering1st(adj,2) # psi_2 = P^2(I-P^2)
135 | adj_sct4 = scattering1st(adj,4) # psi_3 = P^4(I-P^4)
136 | adj = sparse_mx_to_torch_sparse_tensor(adj)
137 | A_tilde = sparse_mx_to_torch_sparse_tensor(A_tilde)
138 | return adj,A_tilde,adj_sct1,adj_sct2,adj_sct4,features, labels, idx_train, idx_val, idx_test
139 |
140 | def sgc_precompute(features, adj, degree):
141 | t = perf_counter()
142 | for i in range(degree):
143 | features = torch.spmm(adj, features)
144 | precompute_time = perf_counter()-t
145 | return features, precompute_time
146 |
147 | def set_seed(seed, cuda):
148 | np.random.seed(seed)
149 | torch.manual_seed(seed)
150 | if cuda: torch.cuda.manual_seed(seed)
151 |
152 | def accuracy(output, labels):
153 | preds = output.max(1)[1].type_as(labels)
154 | correct = preds.eq(labels).double()
155 | correct = correct.sum()
156 | return correct / len(labels)
157 |
--------------------------------------------------------------------------------