├── src
├── __init__.py
├── DPGGAN
│ ├── __init__.py
│ ├── DPCounter.py
│ ├── gcn_layer.py
│ ├── px_expander.py
│ ├── linear.py
│ ├── adadp.py
│ ├── utils_dp.py
│ ├── functional.py
│ ├── dp_aggregators.py
│ ├── dp_encoders.py
│ ├── model.py
│ ├── gaussian_moments.py
│ └── data_utils.py
├── GGAN
│ ├── __init__.py
│ ├── draw.py
│ ├── linear.py
│ ├── encoders.py
│ ├── functional.py
│ ├── logger.py
│ ├── utils.py
│ ├── graph_drawer.py
│ ├── aggregators.py
│ ├── dataloader.py
│ ├── train.py
│ ├── config.py
│ ├── main.py
│ └── model.py
├── logger.py
├── utils.py
├── graph_drawer.py
├── dataloader.py
├── test.py
├── train.py
├── config.py
├── main.py
└── eval.py
├── graph_classification_exp
├── __init__.py
├── models
│ ├── __init__.py
│ ├── mlp.py
│ └── graphcnn.py
├── README.md
├── sample_IMDBMULTI.py
├── preprocess_nx_data.py
├── random_pred.py
├── util.py
└── main.py
├── data.zip
├── link_classification_exp
├── node2vec
│ ├── requirements.txt
│ ├── .gitignore
│ ├── graph
│ │ └── karate.edgelist
│ ├── LICENSE.md
│ ├── README.md
│ └── src
│ │ ├── main.py
│ │ └── node2vec.py
├── test.png
├── draw.py
├── preprocess.py
└── main.py
├── run_graph_classification_exp.sh
├── .gitignore
├── run.sh
├── run_link_classification_exp.sh
├── requirements.txt
├── README.md
└── environment.yml
/src/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/DPGGAN/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/GGAN/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/graph_classification_exp/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/graph_classification_exp/models/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/data.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/haonan3/Secure-Network-Release-with-Link-Privacy/HEAD/data.zip
--------------------------------------------------------------------------------
/link_classification_exp/node2vec/requirements.txt:
--------------------------------------------------------------------------------
1 | networkx==1.11
2 | numpy==1.11.2
3 | gensim==0.13.3
4 |
--------------------------------------------------------------------------------
/link_classification_exp/test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/haonan3/Secure-Network-Release-with-Link-Privacy/HEAD/link_classification_exp/test.png
--------------------------------------------------------------------------------
/link_classification_exp/node2vec/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | .DS_Store
3 | target
4 | bin
5 | build
6 | .gradle
7 | *.iml
8 | *.ipr
9 | *.iws
10 | *.log
11 | .classpath
12 | .project
13 | .settings
14 | .idea
--------------------------------------------------------------------------------
/src/DPGGAN/DPCounter.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | class DPCounter:
4 | def __init__(self, args, model_args):
5 | self.T = 0
6 | self.eps = 0
7 | self.delta = model_args.delta
8 | self.should_stop = False
9 | self.sigma = model_args.noise_sigma
10 | self.q = float(args.batch_size) / (args.num_samples)
--------------------------------------------------------------------------------
/run_graph_classification_exp.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | export PYTHONPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
3 |
4 | python graph_classification_exp/preprocess_nx_data.py --dataset relabeled_dblp2 --model orig
5 |
6 | CUDA_VISIBLE_DEVICES=1 python graph_classification_exp/main.py --hidden_dim 256 --epochs 300 --lr 0.0005 --dataset relabeled_dblp2 &
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | log/
2 | data/
3 | src/__pycache__
4 | src/graph_drawer.py
5 | src/test.py
6 | src/GGAN/__pycache__
7 | src/DPGGAN/__pycache__
8 | graph_classification_exp/__pycache__
9 | graph_classification_exp/models/__pycache__
10 | graph_classification_exp/logs
11 | graph_classification_exp/dataset
12 | link_classification_exp/dataset
13 | link_classification_exp/log
14 | .DS_Store
15 | .idea
--------------------------------------------------------------------------------
/run.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | export PYTHONPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
3 |
4 | python src/main.py --model_name GGAN --dataset new_dblp2 &
5 |
6 | python src/main.py --model_name GGAN --dataset new_IMDB_MULTI &
7 |
8 | python src/main.py --model_name GVAE --dataset new_dblp2 &
9 |
10 | python src/main.py --model_name GVAE --dataset new_IMDB_MULTI &
--------------------------------------------------------------------------------
/run_link_classification_exp.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | export PYTHONPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
3 |
4 | CUDA_VISIBLE_DEVICES=1 python link_classification_exp/main.py --graph_name new_IMDB_MULTI --graph_type 'GGAN_{}' --epochs 40 &
5 |
6 | CUDA_VISIBLE_DEVICES=2 python link_classification_exp/main.py --graph_name new_dblp2 --graph_type 'GGAN_{}' --epochs 40 &
--------------------------------------------------------------------------------
/graph_classification_exp/README.md:
--------------------------------------------------------------------------------
1 | The model we used for graph classification experiment is from How Powerful are Graph Neural Networks?(GIN)
2 |
3 | The code is from the official PyTorch implementation of the experiments in the following paper:
4 |
5 | Keyulu Xu*, Weihua Hu*, Jure Leskovec, Stefanie Jegelka. How Powerful are Graph Neural Networks? ICLR 2019.
6 |
7 | [arXiv](https://arxiv.org/abs/1810.00826) [OpenReview](https://openreview.net/forum?id=ryGs6iA5Km)
--------------------------------------------------------------------------------
/link_classification_exp/node2vec/graph/karate.edgelist:
--------------------------------------------------------------------------------
1 | 1 32
2 | 1 22
3 | 1 20
4 | 1 18
5 | 1 14
6 | 1 13
7 | 1 12
8 | 1 11
9 | 1 9
10 | 1 8
11 | 1 7
12 | 1 6
13 | 1 5
14 | 1 4
15 | 1 3
16 | 1 2
17 | 2 31
18 | 2 22
19 | 2 20
20 | 2 18
21 | 2 14
22 | 2 8
23 | 2 4
24 | 2 3
25 | 3 14
26 | 3 9
27 | 3 10
28 | 3 33
29 | 3 29
30 | 3 28
31 | 3 8
32 | 3 4
33 | 4 14
34 | 4 13
35 | 4 8
36 | 5 11
37 | 5 7
38 | 6 17
39 | 6 11
40 | 6 7
41 | 7 17
42 | 9 34
43 | 9 33
44 | 9 33
45 | 10 34
46 | 14 34
47 | 15 34
48 | 15 33
49 | 16 34
50 | 16 33
51 | 19 34
52 | 19 33
53 | 20 34
54 | 21 34
55 | 21 33
56 | 23 34
57 | 23 33
58 | 24 30
59 | 24 34
60 | 24 33
61 | 24 28
62 | 24 26
63 | 25 32
64 | 25 28
65 | 25 26
66 | 26 32
67 | 27 34
68 | 27 30
69 | 28 34
70 | 29 34
71 | 29 32
72 | 30 34
73 | 30 33
74 | 31 34
75 | 31 33
76 | 32 34
77 | 32 33
78 | 33 34
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2020.12.5
2 | cffi @ file:///home/conda/feedstock_root/build_artifacts/cffi_1606601121339/work
3 | cycler==0.10.0
4 | decorator==4.4.2
5 | gensim==3.8.3
6 | joblib==1.0.0
7 | kiwisolver==1.3.1
8 | matplotlib==3.3.3
9 | mpmath==1.1.0
10 | networkx==2.5
11 | numpy @ file:///home/conda/feedstock_root/build_artifacts/numpy_1610324545699/work
12 | olefile @ file:///home/conda/feedstock_root/build_artifacts/olefile_1602866521163/work
13 | Pillow @ file:///home/conda/feedstock_root/build_artifacts/pillow_1610407356860/work
14 | powerlaw==1.4.6
15 | pycparser @ file:///home/conda/feedstock_root/build_artifacts/pycparser_1593275161868/work
16 | pyparsing==2.4.7
17 | python-dateutil==2.8.1
18 | python-igraph==0.8.3
19 | scikit-learn==0.24.0
20 | scipy==1.5.4
21 | six @ file:///home/conda/feedstock_root/build_artifacts/six_1590081179328/work
22 | smart-open==4.1.2
23 | texttable==1.6.3
24 | threadpoolctl==2.1.0
25 | torch==1.1.0
26 | torchvision==0.3.0
27 | tqdm==4.56.0
28 |
--------------------------------------------------------------------------------
/link_classification_exp/draw.py:
--------------------------------------------------------------------------------
1 | import matplotlib
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | import sys
5 | import pickle
6 | from scipy import stats
7 |
8 | models = ['Original', 'GGAN', 'DPGGAN']
9 | styles = ['k1-', 'g1-', 'r.--'] #, 'g*--', 'b^--']
10 | mean = {}
11 | mean['Original'] = [[0.1, 1.0, 10.0], [0.8661, 0.8661, 0.8661]]
12 | mean['GGAN'] = [[0.1, 1.0, 10.0], [0.6316, 0.6316, 0.6316]]
13 | mean['DPGGAN'] = [[0.1, 1.0, 10.0], [0.5798, 0.5889, 0.5931]]
14 |
15 | for m in models:
16 | plt.plot(mean[m][0], mean[m][1], styles[models.index(m)], label=m)
17 |
18 | plt.grid(linestyle='--', linewidth=0.5)
19 | plt.xlim(0, 10.1) # ind.
20 | plt.ylim(0.5, 0.9) #.MSG
21 |
22 | plt.xlabel('epsilon', fontsize=12)
23 | plt.ylabel('AUC', fontsize=12)
24 | plt.yticks(fontsize=12)
25 | plt.xticks(fontsize=12)
26 | plt.legend(fontsize=10, loc='lower right', ncol=1)
27 | plt.tight_layout()
28 |
29 | plt.savefig("test.png", format='png', dpi=200, bbox_inches='tight')
30 | plt.show()
--------------------------------------------------------------------------------
/src/GGAN/draw.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 |
4 |
5 |
6 | models = ['Original','GGAN (no DP)',
7 | 'GVAE','NetGAN',#'GraphRNN',
8 | 'DPGGAN\nepsilon=10', 'DPGGAN\nepsilon=1', 'DPGGAN\nepsilon=0.1']
9 |
10 |
11 |
12 | mean_imdb = [0.8661,0.7743,
13 | 0.7714,0.7619,
14 | 0.5931,0.5889,0.5798]
15 | mean_dblp = [0.6824,0.6637,
16 | 0.7463,0.6536,
17 | 0.5527,0.5328,0.5137]
18 | y = range(7)
19 |
20 | plt.figure(figsize=(6,3))
21 | bar_color=plt.get_cmap('RdYlGn')(np.linspace(0.15, 0.85, 2))
22 |
23 | bar1 = plt.barh(y=[i + 0.2 for i in y], height = 0.4,width=mean_dblp,
24 | alpha = 0.8, color = bar_color[0],label = 'DBLP')
25 |
26 | bar2 = plt.barh(y=[i - 0.2 for i in y],height =0.4,width = mean_imdb,
27 | alpha = 0.8,color =bar_color[1],label = 'IMDB')
28 |
29 | plt.yticks(y,models)
30 | plt.xlim(0.5,0.9)
31 | plt.ylabel('Models')
32 | plt.xlabel('Accuracy')
33 | plt.legend()
34 | plt.tight_layout()
35 |
36 |
37 | plt.savefig("link_pred.png", format='png', dpi=200, bbox_inches='tight')
38 | plt.show()
--------------------------------------------------------------------------------
/link_classification_exp/node2vec/LICENSE.md:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2016 Aditya Grover
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/src/DPGGAN/gcn_layer.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn.functional as F
3 | from torch.nn.modules.module import Module
4 | from torch.nn.parameter import Parameter
5 |
6 |
7 | class GraphConvolution(Module):
8 | """
9 | Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
10 | """
11 |
12 | def __init__(self, in_features, out_features, dropout=0., act=F.relu):
13 | super(GraphConvolution, self).__init__()
14 | self.in_features = in_features
15 | self.out_features = out_features
16 | self.dropout = dropout
17 | self.act = act
18 | self.weight = Parameter(torch.FloatTensor(in_features, out_features))
19 | self.reset_parameters()
20 |
21 | def reset_parameters(self):
22 | torch.nn.init.xavier_uniform_(self.weight)
23 |
24 | def forward(self, input, adj):
25 | input = F.dropout(input, self.dropout, self.training)
26 | support = torch.mm(input, self.weight)
27 | output = torch.mm(adj, support)
28 | output = self.act(output)
29 | return output
30 |
31 | def __repr__(self):
32 | return self.__class__.__name__ + ' (' \
33 | + str(self.in_features) + ' -> ' \
34 | + str(self.out_features) + ')'
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Secure Deep Graph Generation with Link Differential Privacy
2 |
3 | This repository is the PyTorch implementation of DPGGan (IJCAI 2021).
4 |
5 | [arXiv](https://arxiv.org/abs/2005.00455)
6 |
7 | If you make use of the code/experiment, please cite our paper (Bibtex below).
8 |
9 | ```
10 | @inproceedings{yang2020secure,
11 | title={Secure Deep Graph Generation with Link Differential Privacy},
12 | author={Carl Yang and Haonan Wang and Ke Zhang and Liang Chen and Lichao Sun},
13 | year={2021},
14 | booktitle={The International Joint Conference on Artificial Intelligence (IJCAI)},
15 | }
16 |
17 | ```
18 |
19 | Contact: Haonan Wang (haonan3@illinois.edu), Carl Yang (yangji9181@gmail.com)
20 |
21 |
22 | ## Installation
23 | Install PyTorch following the instuctions on the [official website] (https://pytorch.org/). The code has been tested over PyTorch 1.1.0 versions.
24 |
25 | Then install the other dependencies.
26 | ```
27 | conda env create -f environment.yml
28 |
29 | conda activate dpggan
30 |
31 | pip install -r requirements.txt
32 | ```
33 |
34 | ## Test run
35 | Unzip the dataset file
36 | ```
37 | unzip data.zip
38 | ```
39 |
40 | and run
41 |
42 | ```
43 | sh run.sh
44 | ```
45 |
46 | Default parameters are not the best performing-hyper-parameters. Hyper-parameters need to be specified through the commandline arguments.
47 |
48 | For graph classification experiment and link prediction experiment, please refer `run_graph_classification_exp.sh` and `run_link_classification_exp.sh`.
49 |
--------------------------------------------------------------------------------
/src/DPGGAN/px_expander.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.autograd import Variable
3 |
4 | # clip and accumulate clipped gradients
5 | def acc_scaled_grads(model, C, cum_grads):
6 | # this two 'batch size' should be equal.
7 | assert model.batch_size == model.batch_proc_size
8 | batch_size = model.batch_proc_size
9 | g_norm = Variable(torch.zeros(batch_size), requires_grad=False)
10 | counter1 = 0
11 | counter2 = 0
12 | g_norm = {}
13 | for p in filter(lambda p: p.requires_grad, model.parameters()):
14 | if len(p.data.shape) == 2:
15 | continue
16 | counter2 += 1
17 | if p.grad is not None:
18 | counter1 += 1
19 | g_norm[str(counter2)] = torch.sqrt(torch.sum(p.grad.view(p.shape[0], -1) ** 2, 1))
20 |
21 | # do clipping and accumulate
22 | for p, key in zip(filter(lambda p: p.requires_grad, model.parameters()), cum_grads.keys()):
23 | if len(p.data.shape) == 2:
24 | continue
25 | if p is not None:
26 | cum_grads[key] = torch.sum((p.grad / torch.clamp(g_norm[key].contiguous().view(-1, 1, 1) / C, min=1)), dim=0)
27 |
28 |
29 | # add noise and replace model grads with cumulative grads
30 | def add_noise_with_cum_grads(model, C, sigma, cum_grads, samp_num):
31 | for p, key in zip(filter(lambda p: p.requires_grad, model.parameters()), cum_grads.keys()):
32 | if len(p.data.shape) == 2:
33 | continue
34 | proc_size = model.batch_size
35 | if key == '1':
36 | proc_size = proc_size * (samp_num+1)
37 | if p.grad is not None:
38 | # add noise to summed clipped pars
39 | if proc_size > 1:
40 | p.grad = ((cum_grads[key].expand(proc_size, -1, -1) +Variable((sigma * C)*torch.normal(mean=torch.zeros_like(p.grad[0]).data, std=1.0).expand(proc_size, -1, -1))) / proc_size)
41 | # p.grad = (torch.sum((p.grad), dim=0).expand(proc_size, -1, -1)) / proc_size
--------------------------------------------------------------------------------
/graph_classification_exp/models/mlp.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 |
5 | ###MLP with lienar output
6 | class MLP(nn.Module):
7 | def __init__(self, num_layers, input_dim, hidden_dim, output_dim):
8 | '''
9 | num_layers: number of layers in the neural networks (EXCLUDING the input layer). If num_layers=1, this reduces to linear model.
10 | input_dim: dimensionality of input features
11 | hidden_dim: dimensionality of hidden units at ALL layers
12 | output_dim: number of classes for prediction
13 | device: which device to use
14 | '''
15 |
16 | super(MLP, self).__init__()
17 |
18 | self.linear_or_not = True #default is linear model
19 | self.num_layers = num_layers
20 |
21 | if num_layers < 1:
22 | raise ValueError("number of layers should be positive!")
23 | elif num_layers == 1:
24 | #Linear model
25 | self.linear = nn.Linear(input_dim, output_dim)
26 | else:
27 | #Multi-layer model
28 | self.linear_or_not = False
29 | self.linears = torch.nn.ModuleList()
30 | self.batch_norms = torch.nn.ModuleList()
31 |
32 | self.linears.append(nn.Linear(input_dim, hidden_dim))
33 | for layer in range(num_layers - 2):
34 | self.linears.append(nn.Linear(hidden_dim, hidden_dim))
35 | self.linears.append(nn.Linear(hidden_dim, output_dim))
36 |
37 | for layer in range(num_layers - 1):
38 | self.batch_norms.append(nn.BatchNorm1d((hidden_dim)))
39 |
40 | def forward(self, x):
41 | if self.linear_or_not:
42 | #If linear model
43 | return self.linear(x)
44 | else:
45 | #If MLP
46 | h = x
47 | for layer in range(self.num_layers - 1):
48 | h = F.relu(self.batch_norms[layer](self.linears[layer](h)))
49 | return self.linears[self.num_layers - 1](h)
--------------------------------------------------------------------------------
/src/DPGGAN/linear.py:
--------------------------------------------------------------------------------
1 | '''
2 |
3 | Linear module modified for the expander and clipping individual gradients.
4 |
5 | This code is due to Mikko Heikkilä (@mixheikk)
6 |
7 | '''
8 | import math
9 |
10 | import torch
11 | from torch.nn.parameter import Parameter
12 | import src.DPGGAN.functional as F
13 | from torch.nn.modules import Module
14 |
15 |
16 | # The difference between original Linear and custom Linear is create Parameter for each item
17 | class Linear(Module):
18 | def __init__(self, in_features, out_features, bias=True, batch_size = None):
19 | super(Linear, self).__init__()
20 | self.in_features = in_features
21 | self.out_features = out_features
22 | self.batch_size = batch_size
23 | if batch_size is not None:
24 | self.weight = Parameter(torch.Tensor(batch_size, out_features, in_features))
25 | else:
26 | self.weight = Parameter(torch.Tensor(out_features, in_features))
27 | if bias:
28 | if batch_size is not None:
29 | self.bias = Parameter(torch.Tensor(batch_size, out_features))
30 | else:
31 | self.bias = Parameter(torch.Tensor(out_features))
32 | else:
33 | self.register_parameter('bias', None)
34 | self.reset_parameters()
35 |
36 | def reset_parameters(self):
37 | stdv = 1. / math.sqrt(self.weight.size(1))
38 | self.weight.data.uniform_(-stdv, stdv)
39 | if self.bias is not None:
40 | self.bias.data.uniform_(-stdv, stdv)
41 |
42 | def forward(self, input, for_test=False):
43 | if len(input.shape) == 2 and not for_test:
44 | input = input.view(input.shape[0],1,input.shape[1])
45 | return F.linear(input, self.weight, self.bias, for_test=for_test)
46 |
47 | def __repr__(self):
48 | return self.__class__.__name__ + '(' \
49 | + 'in_features=' + str(self.in_features) \
50 | + ', out_features=' + str(self.out_features) \
51 | + ', bias=' + str(self.bias is not None) + ')'
52 |
--------------------------------------------------------------------------------
/src/GGAN/linear.py:
--------------------------------------------------------------------------------
1 | '''
2 |
3 | Linear module modified for the expander and clipping individual gradients.
4 |
5 | This code is due to Mikko Heikkilä (@mixheikk)
6 |
7 | '''
8 | import math
9 |
10 | import torch
11 | from torch.nn.parameter import Parameter
12 | from torch.nn.modules import Module
13 |
14 |
15 | # The difference between original Linear and custom Linear is create Parameter for each item
16 | class Linear(Module):
17 | def __init__(self, in_features, out_features, bias=True, batch_size = None):
18 | super(Linear, self).__init__()
19 | self.in_features = in_features
20 | self.out_features = out_features
21 | self.batch_size = batch_size
22 | if batch_size is not None:
23 | self.weight = Parameter(torch.Tensor(batch_size, in_features, out_features))
24 | else:
25 | self.weight = Parameter(torch.Tensor(in_features, out_features))
26 | if bias:
27 | if batch_size is not None:
28 | self.bias = Parameter(torch.Tensor(batch_size, out_features))
29 | else:
30 | self.bias = Parameter(torch.Tensor(out_features))
31 | else:
32 | self.register_parameter('bias', None)
33 | self.reset_parameters()
34 |
35 | def reset_parameters(self):
36 | stdv = 1. / math.sqrt(self.weight.size(1))
37 | self.weight.data.uniform_(-stdv, stdv)
38 | if self.bias is not None:
39 | self.bias.data.uniform_(-stdv, stdv)
40 |
41 | def forward(self, input, nodes):
42 | hidden = input.view(input.shape[0], 1, input.shape[1])
43 | sub_weight = self.weight[nodes,]
44 | output = hidden.matmul(sub_weight)
45 | output = output.view(output.shape[0], output.shape[2])
46 | return output
47 |
48 | def __repr__(self):
49 | return self.__class__.__name__ + '(' \
50 | + 'in_features=' + str(self.in_features) \
51 | + ', out_features=' + str(self.out_features) \
52 | + ', bias=' + str(self.bias is not None) + ')'
53 |
--------------------------------------------------------------------------------
/src/GGAN/encoders.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from torch.nn import init
4 | import torch.nn.functional as F
5 |
6 | class Encoder(nn.Module):
7 | """
8 | Encodes a node's using 'convolutional' GraphSage approach
9 | """
10 | def __init__(self, feature_dim, embed_dim, adj_lists, aggregator, first_layer,num_sample=10, gcn=False, cuda=False):
11 | super(Encoder, self).__init__()
12 | self.feat_dim = feature_dim
13 | self.adj_lists = adj_lists
14 | self.aggregator = aggregator
15 | self.num_sample = num_sample
16 | self.first_layer = first_layer
17 |
18 | self.gcn = gcn
19 | self.embed_dim = embed_dim
20 | self.cuda = cuda
21 | self.aggregator.cuda = cuda
22 | if self.gcn:
23 | output_dim = self.feat_dim
24 | else:
25 | output_dim = 2 * self.feat_dim
26 | self.weight = nn.Parameter(torch.FloatTensor(embed_dim, output_dim))
27 | init.xavier_uniform_(self.weight)
28 |
29 | def forward(self, features, nodes, samp_neighs=None, feature_dict=None):
30 | """
31 | Generates embeddings for a batch of nodes.
32 |
33 | nodes -- list of nodes
34 | """
35 | if self.first_layer:
36 | neigh_feats = self.aggregator.forward(features, nodes,
37 | [self.adj_lists[int(node)] for node in nodes],
38 | self.num_sample)
39 | else:
40 | assert (samp_neighs != None)
41 | assert (feature_dict != None)
42 | neigh_feats = self.aggregator.forward(features, nodes, samp_neighs,
43 | self.num_sample, feature_dict=feature_dict)
44 | if not self.gcn:
45 | if self.first_layer:
46 | self_feats = features(torch.LongTensor(nodes))
47 | else:
48 | self_feats = features[nodes]
49 | combined = torch.cat([self_feats, neigh_feats], dim=1)
50 | else:
51 | combined = neigh_feats
52 | combined = F.relu(self.weight.mm(combined.t()))
53 | return combined.t()
54 |
--------------------------------------------------------------------------------
/src/GGAN/functional.py:
--------------------------------------------------------------------------------
1 |
2 | """Functional interface"""
3 |
4 | import warnings
5 | import math
6 | from operator import mul
7 | from functools import reduce
8 | import sys
9 |
10 | import torch
11 | #from torch._C import _infer_size, _add_docstr
12 | #from . import _functions
13 | from torch.nn import _functions
14 | #from .modules import utils
15 | from torch.nn.modules import utils
16 | #from ._functions.linear import Bilinear
17 | #from torch.nn._functions.linear import Bilinear
18 | #from ._functions.padding import ConstantPadNd
19 | #from torch.nn._functions.padding import ConstantPadNd
20 | #from ._functions import vision
21 | #from torch.nn._functions import vision
22 | #from ._functions.thnn.fold import Col2Im, Im2Col
23 | #from torch.nn._functions.thnn.fold import Col2Im,Im2Col
24 | from torch.autograd import Variable
25 | #from .modules.utils import _single, _pair, _triple
26 | #from torch.nn.modules.utils import _single, _pair, _triple
27 |
28 |
29 | '''
30 | Linear layer modified for PX gradients
31 |
32 | The code is due to Mikko Heikkilä (@mixheikk)
33 | '''
34 |
35 |
36 | # Note: bias not checked yet
37 | def linear(input, weight, bias=None, batch_size=None, nodes=None):
38 | """
39 | Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.
40 |
41 | Shape:
42 | - Input: :math:`(N, *, in\_features)` where `*` means any number of
43 | additional dimensions
44 | - Weight: :math:`(out\_features, in\_features)`
45 | - Bias: :math:`(out\_features)`
46 | - Output: :math:`(N, *, out\_features)`
47 | """
48 | if input.dim() == 2 and bias is not None:
49 | # fused op is marginally faster
50 | print("!!!!")
51 | sys.exit(1)
52 | if batch_size is None:
53 | return torch.addmm(bias, input, weight.t())
54 | else:
55 | print('fused op in functional.linear not implemented yet!')
56 | sys.exit(1)
57 | return torch.addmm(bias, input, weight.t())
58 |
59 | sub_weight = weight[nodes,]
60 | output = input.matmul(torch.transpose(sub_weight,-2,-1))
61 |
62 | # kts bias kun muu toimii
63 | if bias is not None:
64 | output += bias
65 | return output
66 |
--------------------------------------------------------------------------------
/src/logger.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 |
4 | dir_path = os.path.dirname(os.path.realpath(__file__))
5 | root_path = os.path.abspath(os.path.join(dir_path, os.pardir))
6 |
7 |
8 | class stat_logger:
9 | def __init__(self, args, current_time):
10 | self.log_file_name = '{}_{}_{}.txt'.format(current_time, args.model_name, args.dataset_str)
11 | self.log_save_folder = root_path + '/log/txt_log/'
12 | self.save_path = self.log_save_folder + self.log_file_name
13 |
14 |
15 | def check_folder(self):
16 | pass
17 |
18 | def write(self, log_info):
19 | with open(self.save_path, 'a') as log_file:
20 | log_file.write(log_info + '\n')
21 |
22 |
23 | def form_generated_stat_log(self, epoch, property_cache):
24 | stat_vec_cache = []
25 | assert len(property_cache) > 1
26 | for property in property_cache:
27 | _, stat_vec = self.dict_to_vec(property)
28 | stat_vec_cache.append(stat_vec)
29 | stat_vec_mean = np.array(stat_vec_cache).mean(axis=0)
30 | stat_vec_str = ["%.3f" % number for number in stat_vec_mean]
31 | stat_vec_log = ' '.join(stat_vec_str)
32 | log = 'Epoch@{}: '.format(epoch) + stat_vec_log
33 | return log
34 |
35 |
36 | def from_dp_log(self, model):
37 | counter = model.dp_counter
38 |
39 | def form_original_stat_log(self, property):
40 | stat_name, stat_vec = self.dict_to_vec(property)
41 | stat_vec_str = ["%.3f" % number for number in stat_vec]
42 | stat_name_log = ' '.join(stat_name)
43 | stat_vec_log = ' '.join(stat_vec_str)
44 | return stat_name_log + '\n' + 'original_graph: ' + stat_vec_log
45 |
46 |
47 | def form_args_log_content(self, args, model_args):
48 | args_info_str = str(args).split('Namespace')[1].split('(')[1].split(')')[0]
49 | model_args_info_str = str(model_args.__dict__).split('{')[1].split('}')[0]
50 | return 'Args: {}.\nModel_Args: {}.\n'.format(args_info_str, model_args_info_str)
51 |
52 |
53 | def dict_to_vec(self, stat_dict):
54 | stat_name = list(stat_dict.keys())
55 | stat_vec = np.array(list(stat_dict.values()))
56 | return stat_name, stat_vec
--------------------------------------------------------------------------------
/src/GGAN/logger.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 |
4 | dir_path = os.path.dirname(os.path.realpath(__file__))
5 | root_path = os.path.abspath(os.path.join(dir_path, os.pardir))
6 |
7 |
8 | class stat_logger:
9 | def __init__(self, args, current_time):
10 | self.log_file_name = '{}_{}_{}.txt'.format(current_time, args.model_name, args.dataset_str)
11 | self.log_save_folder = root_path + '/log/txt_log/'
12 | self.save_path = self.log_save_folder + self.log_file_name
13 |
14 |
15 | def check_folder(self):
16 | pass
17 |
18 | def write(self, log_info):
19 | with open(self.save_path, 'a') as log_file:
20 | log_file.write(log_info + '\n')
21 |
22 |
23 | def form_generated_stat_log(self, epoch, property_cache):
24 | stat_vec_cache = []
25 | assert len(property_cache) > 1
26 | for property in property_cache:
27 | _, stat_vec = self.dict_to_vec(property)
28 | stat_vec_cache.append(stat_vec)
29 | stat_vec_mean = np.array(stat_vec_cache).mean(axis=0)
30 | stat_vec_str = ["%.3f" % number for number in stat_vec_mean]
31 | stat_vec_log = ' '.join(stat_vec_str)
32 | log = 'Epoch@{}: '.format(epoch) + stat_vec_log
33 | return log
34 |
35 |
36 | def from_dp_log(self, model):
37 | counter = model.dp_counter
38 |
39 | def form_original_stat_log(self, property):
40 | stat_name, stat_vec = self.dict_to_vec(property)
41 | stat_vec_str = ["%.3f" % number for number in stat_vec]
42 | stat_name_log = ' '.join(stat_name)
43 | stat_vec_log = ' '.join(stat_vec_str)
44 | return stat_name_log + '\n' + 'original_graph: ' + stat_vec_log
45 |
46 |
47 | def form_args_log_content(self, args, model_args):
48 | args_info_str = str(args).split('Namespace')[1].split('(')[1].split(')')[0]
49 | model_args_info_str = str(model_args.__dict__).split('{')[1].split('}')[0]
50 | return 'Args: {}.\nModel_Args: {}.\n'.format(args_info_str, model_args_info_str)
51 |
52 |
53 | def dict_to_vec(self, stat_dict):
54 | stat_name = list(stat_dict.keys())
55 | stat_vec = np.array(list(stat_dict.values()))
56 | return stat_name, stat_vec
--------------------------------------------------------------------------------
/link_classification_exp/node2vec/README.md:
--------------------------------------------------------------------------------
1 | # node2vec
2 |
3 | This repository provides a reference implementation of *node2vec* as described in the paper:
4 | > node2vec: Scalable Feature Learning for Networks.
5 | > Aditya Grover and Jure Leskovec.
6 | > Knowledge Discovery and Data Mining, 2016.
7 | >
8 |
9 | The *node2vec* algorithm learns continuous representations for nodes in any (un)directed, (un)weighted graph. Please check the [project page](https://snap.stanford.edu/node2vec/) for more details.
10 |
11 | ### Basic Usage
12 |
13 | #### Example
14 | To run *node2vec* on Zachary's karate club network, execute the following command from the project home directory:
15 | ``python src/main.py --input graph/karate.edgelist --output emb/karate.emd``
16 |
17 | #### Options
18 | You can check out the other options available to use with *node2vec* using:
19 | ``python src/main.py --help``
20 |
21 | #### Input
22 | The supported input format is an edgelist:
23 |
24 | node1_id_int node2_id_int
25 |
26 | The graph is assumed to be undirected and unweighted by default. These options can be changed by setting the appropriate flags.
27 |
28 | #### Output
29 | The output file has *n+1* lines for a graph with *n* vertices.
30 | The first line has the following format:
31 |
32 | num_of_nodes dim_of_representation
33 |
34 | The next *n* lines are as follows:
35 |
36 | node_id dim1 dim2 ... dimd
37 |
38 | where dim1, ... , dimd is the *d*-dimensional representation learned by *node2vec*.
39 |
40 | ### Citing
41 | If you find *node2vec* useful for your research, please consider citing the following paper:
42 |
43 | @inproceedings{node2vec-kdd2016,
44 | author = {Grover, Aditya and Leskovec, Jure},
45 | title = {node2vec: Scalable Feature Learning for Networks},
46 | booktitle = {Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining},
47 | year = {2016}
48 | }
49 |
50 |
51 | ### Miscellaneous
52 |
53 | Please send any questions you might have about the code and/or the algorithm to .
54 |
55 | *Note:* This is only a reference implementation of the *node2vec* algorithm and could benefit from several performance enhancement schemes, some of which are discussed in the paper.
56 |
--------------------------------------------------------------------------------
/src/utils.py:
--------------------------------------------------------------------------------
1 | from collections import defaultdict
2 | import numpy as np
3 | import networkx as nx
4 | import scipy
5 | import scipy.sparse as sp
6 | import matplotlib.pyplot as plt
7 | from src.DPGGAN.data_utils import make_adj_label
8 |
9 | def sigmoid(x):
10 | return 1 / (1 + np.exp(-x))
11 |
12 |
13 | def graph_to_adj_list(adj):
14 | # Sparse adj matrix to adj lists
15 | G = nx.from_scipy_sparse_matrix(adj)
16 | adj_lists = defaultdict(set)
17 |
18 | # Check isolated node before training
19 | for node, adjacencies in enumerate(G.adjacency()):
20 | if len(list(adjacencies[1].keys())) == 0:
21 | print("Node %d is isolated !!!" % node)
22 | assert False
23 | adj_lists[node] = set(list(adjacencies[1].keys()))
24 |
25 | return adj_lists
26 |
27 |
28 | def save_top_n(adj, n, threshold=None):
29 | if threshold is None:
30 | il1 = np.tril_indices(adj.shape[0])
31 | adj[il1] = float("-infinity")
32 | index = adj.reshape((-1,)).argsort()[-n:]
33 | (row, col) = divmod(index, adj.shape[0])
34 | top_n = np.zeros_like(adj)
35 | top_n[row,col] = 1
36 | m = np.ones_like(adj)
37 | m[(top_n + top_n.T) == 0] = 0
38 | return m, 0
39 | else:
40 | # find neck_value
41 | adj_ = adj.copy()
42 | il1 = np.tril_indices(adj_.shape[0])
43 | adj_[il1] = float("-infinity")
44 | index = adj_.reshape((-1,)).argsort()[-n:]
45 | last_one = index[0]
46 | (row, col) = divmod(last_one, adj_.shape[0])
47 | neck_value = adj[row,col]
48 | # convert predict adj to adj(0,1)
49 | adj[adj >= threshold] = 1
50 | adj[adj < threshold] = 0
51 | return adj, neck_value
52 |
53 |
54 | def save_edge_num(graph):
55 | graph = graph - sp.dia_matrix((graph.diagonal()[np.newaxis, :], [0]), shape=graph.shape)
56 | graph.eliminate_zeros()
57 | assert np.diag(graph.todense()).sum() == 0
58 | original_garph = nx.from_scipy_sparse_matrix(graph)
59 | n = original_garph.number_of_edges()
60 | return n
61 |
62 |
63 |
64 | def sample_subgraph(args, node_num, dataset):
65 | index = np.random.choice(node_num, args.batch_size, replace=False)
66 | sub_adj = make_adj_label(index, dataset.adj)
67 | return index, sub_adj
--------------------------------------------------------------------------------
/src/GGAN/utils.py:
--------------------------------------------------------------------------------
1 | from collections import defaultdict
2 | import numpy as np
3 | import networkx as nx
4 | import scipy
5 | import scipy.sparse as sp
6 | import matplotlib.pyplot as plt
7 | from src.DPGGAN.data_utils import make_adj_label
8 |
9 | def sigmoid(x):
10 | return 1 / (1 + np.exp(-x))
11 |
12 |
13 | def graph_to_adj_list(adj):
14 | # Sparse adj matrix to adj lists
15 | G = nx.from_scipy_sparse_matrix(adj)
16 | adj_lists = defaultdict(set)
17 |
18 | # Check isolated node before training
19 | for node, adjacencies in enumerate(G.adjacency()):
20 | if len(list(adjacencies[1].keys())) == 0:
21 | print("Node %d is isolated !!!" % node)
22 | assert False
23 | adj_lists[node] = set(list(adjacencies[1].keys()))
24 |
25 | return adj_lists
26 |
27 |
28 | def save_top_n(adj, n, threshold=None):
29 | if threshold is None:
30 | il1 = np.tril_indices(adj.shape[0])
31 | adj[il1] = float("-infinity")
32 | index = adj.reshape((-1,)).argsort()[-n:]
33 | (row, col) = divmod(index, adj.shape[0])
34 | top_n = np.zeros_like(adj)
35 | top_n[row,col] = 1
36 | m = np.ones_like(adj)
37 | m[(top_n + top_n.T) == 0] = 0
38 | return m, 0
39 | else:
40 | # find neck_value
41 | adj_ = adj.copy()
42 | il1 = np.tril_indices(adj_.shape[0])
43 | adj_[il1] = float("-infinity")
44 | index = adj_.reshape((-1,)).argsort()[-n:]
45 | last_one = index[0]
46 | (row, col) = divmod(last_one, adj_.shape[0])
47 | neck_value = adj[row,col]
48 | # convert predict adj to adj(0,1)
49 | adj[adj >= threshold] = 1
50 | adj[adj < threshold] = 0
51 | return adj, neck_value
52 |
53 |
54 | def save_edge_num(graph):
55 | graph = graph - sp.dia_matrix((graph.diagonal()[np.newaxis, :], [0]), shape=graph.shape)
56 | graph.eliminate_zeros()
57 | assert np.diag(graph.todense()).sum() == 0
58 | original_garph = nx.from_scipy_sparse_matrix(graph)
59 | n = original_garph.number_of_edges()
60 | return n
61 |
62 |
63 |
64 | def sample_subgraph(args, node_num, dataset):
65 | index = np.random.choice(node_num, args.batch_size, replace=False)
66 | sub_adj = make_adj_label(index, dataset.adj)
67 | return index, sub_adj
--------------------------------------------------------------------------------
/src/DPGGAN/adadp.py:
--------------------------------------------------------------------------------
1 | '''
2 | A code for implementing the ADADP algorithm for neural networks,
3 | described in
4 |
5 | Koskela, A. and Honkela, A.,
6 | Learning rate adaptation for differentially private stochastic gradient descent.
7 | arXiv preprint arXiv:1809.03832. (2018)
8 |
9 | The code is due to Antti Koskela (@koskeant)
10 |
11 | '''
12 | import torch
13 | from torch.optim.optimizer import Optimizer
14 | import numpy as np
15 |
16 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
17 |
18 | class ADADP(Optimizer):
19 |
20 | def __init__(self, params, lr=1e-3):
21 | defaults = dict(lr=lr)
22 | self.p0 = None
23 | self.p1 = None
24 | self.lrs = lr
25 | self.accepted = 0
26 | self.failed = 0
27 |
28 | self.lrs_history = []
29 |
30 | super(ADADP, self).__init__(params, defaults)
31 |
32 | def step1(self):
33 |
34 | del self.p0
35 | self.p0 = []
36 |
37 | del self.p1
38 | self.p1 = []
39 |
40 | for group in self.param_groups:
41 |
42 | for p in group['params']:
43 | if p.grad is None:
44 | continue
45 |
46 | dd = p.data.clone()
47 | self.p0.append(dd)
48 |
49 | self.p1.append(p.data - self.lrs*p.grad.data)
50 | p.data.add_(-0.5*self.lrs, p.grad.data)
51 |
52 | def step2(self, tol=1.0):
53 |
54 | for group in self.param_groups:
55 |
56 | err_e = 0.0
57 |
58 | for ijk,p in enumerate(group['params']):
59 | p.data.add_(-0.5*self.lrs, p.grad.data)
60 | err_e += (((self.p1[ijk] - p.data)**2/(torch.max(torch.ones(self.p1[ijk].size()).to(device),self.p1[ijk]**2))).norm(1))
61 |
62 | err_e = np.sqrt(err_e)
63 |
64 | self.lrs = float(self.lrs*min(max(np.sqrt(tol/err_e),0.9), 1.1))
65 |
66 | ## Accept the step only if err < tol.
67 | #if err_e > 1.0*tol:
68 | # for ijk,p in enumerate(group['params']):
69 | # p.data = self.p0[ijk]
70 | #if err_e < tol:
71 | # self.accepted += 1
72 | #else :
73 | # self.failed += 1
74 |
75 | self.lrs_history.append(self.lrs)
76 |
77 |
78 |
--------------------------------------------------------------------------------
/src/DPGGAN/utils_dp.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from collections import OrderedDict
3 | from torch.autograd import Variable
4 | from src.DPGGAN import gaussian_moments as gm, px_expander
5 |
6 | '''
7 | Update privacy budget
8 |
9 | priv_pars: the privacy dictionary
10 | '''
11 | def update_privacy_pars(dp_counter):
12 | verify = False
13 | max_lmbd = 32
14 | lmbds = range(1, max_lmbd + 1)
15 | log_moments = []
16 | for lmbd in lmbds:
17 | log_moment = 0
18 | '''
19 | print('Here q = ' + str(priv_pars['q']))
20 | print('Here sigma = ' + str(priv_pars['sigma']))
21 | print('Here T = ' + str(priv_pars['T']))
22 | '''
23 | log_moment += gm.compute_log_moment(dp_counter.q, dp_counter.sigma, dp_counter.T, lmbd, verify=verify)
24 | log_moments.append((lmbd, log_moment))
25 | dp_counter.eps, _ = gm.get_privacy_spent(log_moments, target_delta=dp_counter.delta)
26 | return dp_counter
27 |
28 |
29 | '''
30 | create container for accumulated gradient
31 |
32 | :return is the gradient container
33 | '''
34 | def create_cum_grads(model):
35 | cum_grads = OrderedDict()
36 | for i, p in enumerate(model.parameters()):
37 | if p.requires_grad:
38 | cum_grads[str(i)] = Variable(torch.zeros(p.shape[1:]), requires_grad=False)
39 | return cum_grads
40 |
41 |
42 |
43 | def update_privacy_account(model_args, model):
44 | stop_signal = False
45 | if 'dp_counter' in set(model.__dict__.keys()):
46 | model.dp_counter.T += 1
47 | update_privacy_pars(model.dp_counter)
48 | model_args.grad_norm_max *= model_args.C_decay
49 | if model.dp_counter.eps > model_args.eps_requirement:
50 | model.dp_counter.should_stop = True
51 | stop_signal = model.dp_counter.should_stop
52 | return stop_signal
53 |
54 |
55 |
56 | def perturb_grad(model_args, model):
57 | # For DP model: accumulate grads in the container, cum_grads; add noise on sum of grads
58 | px_expander.acc_scaled_grads(model=model, C=model_args.grad_norm_max, cum_grads=model.cum_grads)
59 |
60 | # because we don't use lot-batch structure, so just add noise after acc_grads
61 | px_expander.add_noise_with_cum_grads(model=model, C=model_args.grad_norm_max,
62 | sigma=model_args.noise_sigma, cum_grads=model.cum_grads,
63 | samp_num=model_args.samp_num)
--------------------------------------------------------------------------------
/src/DPGGAN/functional.py:
--------------------------------------------------------------------------------
1 |
2 | """Functional interface"""
3 |
4 | import warnings
5 | import math
6 | from operator import mul
7 | from functools import reduce
8 | import sys
9 |
10 | import torch
11 | #from torch._C import _infer_size, _add_docstr
12 | #from . import _functions
13 | from torch.nn import _functions
14 | #from .modules import utils
15 | from torch.nn.modules import utils
16 | #from ._functions.linear import Bilinear
17 | #from torch.nn._functions.linear import Bilinear
18 | #from ._functions.padding import ConstantPadNd
19 | #from torch.nn._functions.padding import ConstantPadNd
20 | #from ._functions import vision
21 | #from torch.nn._functions import vision
22 | #from ._functions.thnn.fold import Col2Im, Im2Col
23 | #from torch.nn._functions.thnn.fold import Col2Im,Im2Col
24 | from torch.autograd import Variable
25 | #from .modules.utils import _single, _pair, _triple
26 | #from torch.nn.modules.utils import _single, _pair, _triple
27 |
28 |
29 | '''
30 | Linear layer modified for PX gradients
31 |
32 | The code is due to Mikko Heikkilä (@mixheikk)
33 | '''
34 |
35 |
36 | # Note: bias not checked yet
37 | def linear(input, weight, bias=None, batch_size=None, for_test=None):
38 | """
39 | Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.
40 |
41 | Shape:
42 | - Input: :math:`(N, *, in\_features)` where `*` means any number of
43 | additional dimensions
44 | - Weight: :math:`(out\_features, in\_features)`
45 | - Bias: :math:`(out\_features)`
46 | - Output: :math:`(N, *, out\_features)`
47 | """
48 | if input.dim() == 2 and bias is not None:
49 | # fused op is marginally faster
50 | if batch_size is None:
51 | return torch.mm(input, weight.t())
52 | else:
53 | print('fused op in functional.linear not implemented yet!')
54 | sys.exit(1)
55 | return torch.addmm(bias, input, weight.t())
56 |
57 | if for_test:
58 | if len(list(input.shape)) == 3:
59 | input = input.view(input.shape[0], input.shape[2])
60 | # output = input.matmul(torch.transpose(weight,-2,-1)[0])
61 | output = torch.mm(input, weight[0].t())
62 | assert len(list(output.shape)) == 2
63 | else:
64 | # output = input.matmul(torch.transpose(weight,-2,-1))
65 | output = torch.bmm(input, weight.permute(0,2,1))
66 | output = output.view(output.shape[0], output.shape[2])
67 | assert len(list(output.shape)) == 2
68 |
69 | # kts bias kun muu toimii
70 | if bias is not None:
71 | output += bias
72 | return output
73 |
--------------------------------------------------------------------------------
/graph_classification_exp/sample_IMDBMULTI.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import pickle
4 | import random
5 | from collections import defaultdict
6 |
7 | import networkx as nx
8 | dir_path = os.path.dirname(os.path.realpath(__file__))
9 | root_path = os.path.abspath(os.path.join(dir_path, os.pardir))
10 |
11 | def relabel_dblp2(data_list):
12 | # according to the method that we create dblp2 data, we can relabel dblp2 data to three classes
13 | # For (label<24) ->0; (24