├── .DS_Store ├── static ├── .DS_Store ├── images │ ├── fig1.jpg │ ├── fig2.jpg │ ├── fig3.jpg │ ├── fig4.jpg │ ├── fig5.jpg │ ├── .DS_Store │ └── table1.jpg ├── js │ ├── index.js │ ├── bulma-slider.min.js │ ├── bulma-slider.js │ └── bulma-carousel.min.js └── css │ ├── index.css │ ├── bulma-carousel.min.css │ └── bulma-slider.min.css ├── layers.py ├── LICENSE ├── experiments.sh ├── data_loader.py ├── model.py ├── README.md ├── calculate_starved_nodes.py ├── utils.py ├── GCN_KNN_R.txt ├── GCN_KNN.txt ├── GCN_KNN_U.txt ├── citation_networks.py ├── main.py └── index.html /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Jianglin954/LGI-LS/HEAD/.DS_Store -------------------------------------------------------------------------------- /static/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Jianglin954/LGI-LS/HEAD/static/.DS_Store -------------------------------------------------------------------------------- /static/images/fig1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Jianglin954/LGI-LS/HEAD/static/images/fig1.jpg -------------------------------------------------------------------------------- /static/images/fig2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Jianglin954/LGI-LS/HEAD/static/images/fig2.jpg -------------------------------------------------------------------------------- /static/images/fig3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Jianglin954/LGI-LS/HEAD/static/images/fig3.jpg -------------------------------------------------------------------------------- /static/images/fig4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Jianglin954/LGI-LS/HEAD/static/images/fig4.jpg -------------------------------------------------------------------------------- /static/images/fig5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Jianglin954/LGI-LS/HEAD/static/images/fig5.jpg -------------------------------------------------------------------------------- /static/images/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Jianglin954/LGI-LS/HEAD/static/images/.DS_Store -------------------------------------------------------------------------------- /static/images/table1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Jianglin954/LGI-LS/HEAD/static/images/table1.jpg -------------------------------------------------------------------------------- /layers.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class GCNConv_dense(nn.Module): 5 | def __init__(self, input_size, output_size): 6 | super(GCNConv_dense, self).__init__() 7 | self.linear = nn.Linear(input_size, output_size) 8 | 9 | def init_para(self): 10 | self.linear.reset_parameters() 11 | 12 | def forward(self, input, A, sparse=False): 13 | hidden = self.linear(input) 14 | if sparse: 15 | output = torch.sparse.mm(A, hidden) 16 | else: 17 | output = torch.matmul(A, hidden) 18 | return output -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Jianglin Lu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /experiments.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ###################### GCN_KNN ON Pubmed ####################### 4 | python main.py -dataset pubmed -dropout2 0.5 -dropout_adj2 0.0 -epochs 2000 -half_train 0 \ 5 | -half_val_as_train 0 -hidden 32 -knn_metric cosine -lr 0.01 -nlayers 2 \ 6 | -normalization sym -ntrials 5 -patience 3000 -sparse 0 -w_decay 0.0005 \ 7 | -k 15 -method GCN_KNN | tee -a GCN_KNN.txt 8 | 9 | 10 | ###################### GCN_KNN_U ON Pubmed ####################### 11 | python main.py -dataset pubmed -dropout2 0.5 -dropout_adj2 0.0 -epochs 2000 -half_train 0 \ 12 | -half_val_as_train 0 -hidden 32 -knn_metric cosine -lr 0.01 -nlayers 2 \ 13 | -normalization sym -ntrials 5 -patience 3000 -sparse 0 -w_decay 0.0005 \ 14 | -k 15 -alpha 100 -klabel 30 -method GCN_KNN_U | tee -a GCN_KNN_U.txt 15 | 16 | 17 | ###################### GCN_KNN_R ON Pubmed ####################### 18 | python main.py -dataset pubmed -dropout2 0.5 -dropout_adj2 0.0 -epochs 2000 -half_train 0 \ 19 | -half_val_as_train 0 -hidden 32 -knn_metric cosine -lr 0.01 -nlayers 2 \ 20 | -normalization sym -ntrials 5 -patience 3000 -sparse 0 -w_decay 0.0005 \ 21 | -k 15 -alpha 100 -klabel 30 -method GCN_KNN_R | tee -a GCN_KNN_R.txt -------------------------------------------------------------------------------- /data_loader.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | import torch 3 | 4 | from citation_networks import load_citation_network, sample_mask 5 | 6 | warnings.simplefilter("ignore") 7 | 8 | 9 | def load_ogb_data(dataset_str): 10 | from ogb.nodeproppred.dataset_pyg import PygNodePropPredDataset 11 | dataset = PygNodePropPredDataset(dataset_str) 12 | 13 | data = dataset[0] 14 | features = data.x 15 | nfeats = data.num_features 16 | nclasses = dataset.num_classes 17 | labels = data.y 18 | 19 | split_idx = dataset.get_idx_split() 20 | 21 | train_mask = sample_mask(split_idx['train'], data.x.shape[0]) 22 | val_mask = sample_mask(split_idx['valid'], data.x.shape[0]) 23 | test_mask = sample_mask(split_idx['test'], data.x.shape[0]) 24 | 25 | features = torch.FloatTensor(features) 26 | labels = torch.LongTensor(labels).view(-1) 27 | train_mask = torch.BoolTensor(train_mask) 28 | val_mask = torch.BoolTensor(val_mask) 29 | test_mask = torch.BoolTensor(test_mask) 30 | 31 | return features, nfeats, labels, nclasses, train_mask, val_mask, test_mask 32 | 33 | 34 | def load_data(args): 35 | dataset_str = args.dataset 36 | 37 | if dataset_str.startswith('ogb'): 38 | return load_ogb_data(dataset_str) 39 | 40 | return load_citation_network(dataset_str) 41 | -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | from layers import GCNConv_dense 4 | 5 | class GCN(nn.Module): 6 | def __init__(self, in_channels, hidden_channels, out_channels, num_layers, dropout, dropout_adj, Adj, sparse): 7 | super(GCN, self).__init__() 8 | self.layers = nn.ModuleList() 9 | self.layers.append(GCNConv_dense(in_channels, hidden_channels)) 10 | for i in range(num_layers - 2): 11 | self.layers.append(GCNConv_dense(hidden_channels, hidden_channels)) 12 | self.layers.append(GCNConv_dense(hidden_channels, out_channels)) 13 | 14 | self.dropout = dropout 15 | self.dropout_adj = nn.Dropout(p=dropout_adj) 16 | self.dropout_adj_p = dropout_adj 17 | self.Adj = Adj 18 | self.Adj.requires_grad = False 19 | self.sparse = sparse 20 | 21 | def forward(self, x): 22 | 23 | if self.sparse: 24 | Adj = self.Adj 25 | Adj.edata['w'] = F.dropout(Adj.edata['w'], p=self.dropout_adj_p, training=self.training) 26 | else: 27 | Adj = self.dropout_adj(self.Adj) 28 | 29 | for i, conv in enumerate(self.layers[:-1]): 30 | x = conv(x, Adj) 31 | x = F.relu(x) 32 | x = F.dropout(x, p=self.dropout, training=self.training) 33 | x = self.layers[-1](x, Adj) 34 | return x -------------------------------------------------------------------------------- /static/js/index.js: -------------------------------------------------------------------------------- 1 | window.HELP_IMPROVE_VIDEOJS = false; 2 | 3 | var INTERP_BASE = "./static/interpolation/stacked"; 4 | var NUM_INTERP_FRAMES = 240; 5 | 6 | var interp_images = []; 7 | function preloadInterpolationImages() { 8 | for (var i = 0; i < NUM_INTERP_FRAMES; i++) { 9 | var path = INTERP_BASE + '/' + String(i).padStart(6, '0') + '.jpg'; 10 | interp_images[i] = new Image(); 11 | interp_images[i].src = path; 12 | } 13 | } 14 | 15 | function setInterpolationImage(i) { 16 | var image = interp_images[i]; 17 | image.ondragstart = function() { return false; }; 18 | image.oncontextmenu = function() { return false; }; 19 | $('#interpolation-image-wrapper').empty().append(image); 20 | } 21 | 22 | 23 | $(document).ready(function() { 24 | // Check for click events on the navbar burger icon 25 | $(".navbar-burger").click(function() { 26 | // Toggle the "is-active" class on both the "navbar-burger" and the "navbar-menu" 27 | $(".navbar-burger").toggleClass("is-active"); 28 | $(".navbar-menu").toggleClass("is-active"); 29 | 30 | }); 31 | 32 | var options = { 33 | slidesToScroll: 1, 34 | slidesToShow: 3, 35 | loop: true, 36 | infinite: true, 37 | autoplay: false, 38 | autoplaySpeed: 3000, 39 | } 40 | 41 | // Initialize all div with carousel class 42 | var carousels = bulmaCarousel.attach('.carousel', options); 43 | 44 | // Loop on each carousel initialized 45 | for(var i = 0; i < carousels.length; i++) { 46 | // Add listener to event 47 | carousels[i].on('before:show', state => { 48 | console.log(state); 49 | }); 50 | } 51 | 52 | // Access to bulmaCarousel instance of an element 53 | var element = document.querySelector('#my-element'); 54 | if (element && element.bulmaCarousel) { 55 | // bulmaCarousel instance is available as element.bulmaCarousel 56 | element.bulmaCarousel.on('before-show', function(state) { 57 | console.log(state); 58 | }); 59 | } 60 | 61 | /*var player = document.getElementById('interpolation-video'); 62 | player.addEventListener('loadedmetadata', function() { 63 | $('#interpolation-slider').on('input', function(event) { 64 | console.log(this.value, player.duration); 65 | player.currentTime = player.duration / 100 * this.value; 66 | }) 67 | }, false);*/ 68 | preloadInterpolationImages(); 69 | 70 | $('#interpolation-slider').on('input', function(event) { 71 | setInterpolationImage(this.value); 72 | }); 73 | setInterpolationImage(0); 74 | $('#interpolation-slider').prop('max', NUM_INTERP_FRAMES - 1); 75 | 76 | bulmaSlider.attach(); 77 | 78 | }) 79 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LGI-LS (NeurIPS 2023) 2 | Codes for the NeurIPS 2023 paper `Latent Graph Inference with Limited Supervision`. 3 | 4 | [Project page](https://jianglin954.github.io/LGI-LS/) 5 | 6 | 9 | 10 | 11 | 12 | ## Datasets 13 | 14 | The `Cora`, `Citeseer`, and `Pubmed` datasets can be download from [here](https://github.com/tkipf/gcn/tree/master/gcn/data). Please place the downloaded files in the folder `data_tf`. The `ogbn-arxiv` dataset will be loaded automatically. 15 | 16 | 17 | ## Installation 18 | ```bash 19 | conda create -n LGI python=3.7.2 20 | conda activate LGI 21 | pip install torch==1.5.1 torchvision==0.6.1 22 | pip install scipy==1.2.1 23 | pip install scikit-learn==0.21.3 24 | pip install dgl-cu102==0.5.2 25 | pip install ogb==1.2.3 26 | wget https://data.pyg.org/whl/torch-1.5.0%2Bcu102/torch_scatter-2.0.5-cp37-cp37m-linux_x86_64.whl 27 | wget https://data.pyg.org/whl/torch-1.5.0%2Bcu102/torch_sparse-0.6.5-cp37-cp37m-linux_x86_64.whl 28 | wget https://data.pyg.org/whl/torch-1.5.0%2Bcu102/torch_cluster-1.5.4-cp37-cp37m-linux_x86_64.whl 29 | wget https://data.pyg.org/whl/torch-1.5.0%2Bcu102/torch_spline_conv-1.2.0-cp37-cp37m-linux_x86_64.whl 30 | pip install torch_scatter-2.0.5-cp37-cp37m-linux_x86_64.whl 31 | pip install torch_sparse-0.6.5-cp37-cp37m-linux_x86_64.whl 32 | pip install torch_cluster-1.5.4-cp37-cp37m-linux_x86_64.whl 33 | pip install torch_spline_conv-1.2.0-cp37-cp37m-linux_x86_64.whl 34 | pip install torch-geometric==1.6.1 35 | ``` 36 | 37 | 38 | ## Usage 39 | 40 | We provide `GCN+KNN`, `GCN+KNN_U`, and `GCN+KNN_R` as examples due to their simplicity and effectiveness. To test their performances on the `Pubmed` dataset, run the following command: 41 | 42 | ```bash 43 | bash experiments.sh 44 | ``` 45 | 46 | The experimental results will be saved in the corresponding *.txt file. 47 | 48 | # Reference 49 | 50 | @inproceedings{Jianglin2023LGI, 51 | title={Latent Graph Inference with Limited Supervision}, 52 | author={Lu, Jianglin and Xu, Yi and Wang, Huan and Bai, Yue and Fu, Yun}, 53 | booktitle={Advances in Neural Information Processing Systems}, 54 | year={2023} 55 | } 56 | 57 | @inproceedings{fatemi2021slaps, 58 | title={SLAPS: Self-Supervision Improves Structure Learning for Graph Neural Networks}, 59 | author={Fatemi, Bahare and Asri, Layla El and Kazemi, Seyed Mehran}, 60 | booktitle={Advances in Neural Information Processing Systems}, 61 | year={2021} 62 | } 63 | 64 | # Acknowledgement 65 | Our codes are mainly based on [SLAPS](https://github.com/BorealisAI/SLAPS-GNN/tree/main). For other comparison methods, please refer to their publicly available code repositories. We gratefully thank the authors for their contributions. 66 | -------------------------------------------------------------------------------- /static/css/index.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: 'Noto Sans', sans-serif; 3 | } 4 | 5 | 6 | .footer .icon-link { 7 | font-size: 25px; 8 | color: #000; 9 | } 10 | 11 | .link-block a { 12 | margin-top: 5px; 13 | margin-bottom: 5px; 14 | } 15 | 16 | .dnerf { 17 | font-variant: small-caps; 18 | } 19 | 20 | 21 | .teaser .hero-body { 22 | padding-top: 0; 23 | padding-bottom: 3rem; 24 | } 25 | 26 | .teaser { 27 | font-family: 'Google Sans', sans-serif; 28 | } 29 | 30 | 31 | .publication-title { 32 | } 33 | 34 | .publication-banner { 35 | max-height: parent; 36 | 37 | } 38 | 39 | .publication-banner video { 40 | position: relative; 41 | left: auto; 42 | top: auto; 43 | transform: none; 44 | object-fit: fit; 45 | } 46 | 47 | .publication-header .hero-body { 48 | } 49 | 50 | .publication-title { 51 | font-family: 'Google Sans', sans-serif; 52 | } 53 | 54 | .publication-authors { 55 | font-family: 'Google Sans', sans-serif; 56 | } 57 | 58 | .publication-venue { 59 | color: #555; 60 | width: fit-content; 61 | font-weight: bold; 62 | } 63 | 64 | .publication-awards { 65 | color: #ff3860; 66 | width: fit-content; 67 | font-weight: bolder; 68 | } 69 | 70 | .publication-authors { 71 | } 72 | 73 | .publication-authors a { 74 | color: hsl(204, 86%, 53%) !important; 75 | } 76 | 77 | .publication-authors a:hover { 78 | text-decoration: underline; 79 | } 80 | 81 | .author-block { 82 | display: inline-block; 83 | } 84 | 85 | .publication-banner img { 86 | } 87 | 88 | .publication-authors { 89 | /*color: #4286f4;*/ 90 | } 91 | 92 | .publication-video { 93 | position: relative; 94 | width: 100%; 95 | height: 0; 96 | padding-bottom: 56.25%; 97 | 98 | overflow: hidden; 99 | border-radius: 10px !important; 100 | } 101 | 102 | .publication-video iframe { 103 | position: absolute; 104 | top: 0; 105 | left: 0; 106 | width: 100%; 107 | height: 100%; 108 | } 109 | 110 | .publication-body img { 111 | } 112 | 113 | .results-carousel { 114 | overflow: hidden; 115 | } 116 | 117 | .results-carousel .item { 118 | margin: 5px; 119 | overflow: hidden; 120 | border: 1px solid #bbb; 121 | border-radius: 10px; 122 | padding: 0; 123 | font-size: 0; 124 | } 125 | 126 | .results-carousel video { 127 | margin: 0; 128 | } 129 | 130 | 131 | .interpolation-panel { 132 | background: #f5f5f5; 133 | border-radius: 10px; 134 | } 135 | 136 | .interpolation-panel .interpolation-image { 137 | width: 100%; 138 | border-radius: 5px; 139 | } 140 | 141 | .interpolation-video-column { 142 | } 143 | 144 | .interpolation-panel .slider { 145 | margin: 0 !important; 146 | } 147 | 148 | .interpolation-panel .slider { 149 | margin: 0 !important; 150 | } 151 | 152 | #interpolation-image-wrapper { 153 | width: 100%; 154 | } 155 | #interpolation-image-wrapper img { 156 | border-radius: 5px; 157 | } 158 | -------------------------------------------------------------------------------- /calculate_starved_nodes.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import argparse 3 | from citation_networks import load_citation_network_calculate_starved_nodes 4 | import numpy as np 5 | 6 | 7 | def Calculate_Number_of_Starved_Nodes(args): 8 | features, nfeats, labels, nclasses, train_mask, val_mask, test_mask, adj = load_citation_network_calculate_starved_nodes(args.dataset) 9 | 10 | ####### Citeseer 370 ####### 11 | ## train_mask[120:370] = True 12 | ## val_mask[120:370] = False 13 | ####### Cora 390 ####### 14 | ## train_mask[140:390] = True 15 | ## val_mask[140:390] = False 16 | 17 | adj = adj + np.eye(adj.shape[0]) 18 | adj = torch.from_numpy(adj) 19 | adj1hop = adj[:, train_mask] 20 | adj1hop = adj1hop.sum(dim=1) 21 | adj1hop = adj1hop.numpy() 22 | connect_label = np.sum(adj1hop > 0) 23 | num_of_1hop_starved_nodes = adj1hop.shape[0] - connect_label 24 | notcon_label_index = np.where(adj1hop == 0)[0] 25 | 26 | adjpower2 = torch.mm(adj, adj) 27 | adjpower2np = adjpower2.numpy() 28 | index = np.argwhere(adjpower2np == 1) 29 | rows = index[:, 0] 30 | rows = np.unique(rows) 31 | label_adjpower2np = adjpower2np[:, train_mask] 32 | label_index = np.argwhere(label_adjpower2np == 1) 33 | label_rows = label_index[:, 0] 34 | label_rows = np.unique(label_rows) 35 | diff_rows = np.setdiff1d(rows, label_rows) 36 | interrows = np.intersect1d(notcon_label_index, diff_rows) 37 | num_of_2hop_starved_nodes = interrows.size 38 | 39 | adjpower3 = torch.mm(adjpower2, adj) 40 | adjpower3np = adjpower3.numpy() 41 | index3hop = np.argwhere(adjpower3np == 1) 42 | rows3ho = index3hop[:, 0] 43 | rows3ho = np.unique(rows3ho) 44 | label_adjpower3np = adjpower3np[:, train_mask] 45 | label_index3hop = np.argwhere(label_adjpower3np == 1) 46 | label_rows3hop = label_index3hop[:, 0] 47 | label_rows3hop = np.unique(label_rows3hop) 48 | diff_rows3hop = np.setdiff1d(rows3ho, label_rows3hop) 49 | interrows3hop = np.intersect1d(interrows, diff_rows3hop) 50 | num_of_3hop_starved_nodes = interrows3hop.size 51 | 52 | adjpower4 = torch.mm(adjpower3, adj) 53 | adjpower4np = adjpower4.numpy() 54 | index4hop = np.argwhere(adjpower4np == 1) 55 | rows4ho = index4hop[:, 0] 56 | rows4ho = np.unique(rows4ho) 57 | label_adjpower4np = adjpower4np[:, train_mask] 58 | label_index4hop = np.argwhere(label_adjpower4np == 1) 59 | label_rows4hop = label_index4hop[:, 0] 60 | label_rows4hop = np.unique(label_rows4hop) 61 | diff_rows4hop = np.setdiff1d(rows4ho, label_rows4hop) 62 | interrows4hop = np.intersect1d(interrows3hop, diff_rows4hop) 63 | num_of_4hop_starved_nodes = interrows4hop.size 64 | 65 | print("The number of 1-, 2-, 3-, and 4-hop starved nodes on " + args.dataset + 66 | " dataset are {:0d}, {:0d}, {:0d}, and {:0d}, respectively.".format(num_of_1hop_starved_nodes, 67 | num_of_2hop_starved_nodes, num_of_3hop_starved_nodes, num_of_4hop_starved_nodes)) 68 | 69 | 70 | 71 | if __name__ == '__main__': 72 | parser = argparse.ArgumentParser() 73 | 74 | parser.add_argument('-dataset', type=str, default='cora', help='See choices', 75 | choices=['cora', 'citeseer', 'pubmed']) 76 | 77 | args = parser.parse_args() 78 | Calculate_Number_of_Starved_Nodes(args) 79 | -------------------------------------------------------------------------------- /static/css/bulma-carousel.min.css: -------------------------------------------------------------------------------- 1 | @-webkit-keyframes spinAround{from{-webkit-transform:rotate(0);transform:rotate(0)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes spinAround{from{-webkit-transform:rotate(0);transform:rotate(0)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.slider{position:relative;width:100%}.slider-container{display:flex;flex-wrap:nowrap;flex-direction:row;overflow:hidden;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0);min-height:100%}.slider-container.is-vertical{flex-direction:column}.slider-container .slider-item{flex:none}.slider-container .slider-item .image.is-covered img{-o-object-fit:cover;object-fit:cover;-o-object-position:center center;object-position:center center;height:100%;width:100%}.slider-container .slider-item .video-container{height:0;padding-bottom:0;padding-top:56.25%;margin:0;position:relative}.slider-container .slider-item .video-container.is-1by1,.slider-container .slider-item .video-container.is-square{padding-top:100%}.slider-container .slider-item .video-container.is-4by3{padding-top:75%}.slider-container .slider-item .video-container.is-21by9{padding-top:42.857143%}.slider-container .slider-item .video-container embed,.slider-container .slider-item .video-container iframe,.slider-container .slider-item .video-container object{position:absolute;top:0;left:0;width:100%!important;height:100%!important}.slider-navigation-next,.slider-navigation-previous{display:flex;justify-content:center;align-items:center;position:absolute;width:42px;height:42px;background:#fff center center no-repeat;background-size:20px 20px;border:1px solid #fff;border-radius:25091983px;box-shadow:0 2px 5px #3232321a;top:50%;margin-top:-20px;left:0;cursor:pointer;transition:opacity .3s,-webkit-transform .3s;transition:transform .3s,opacity .3s;transition:transform .3s,opacity .3s,-webkit-transform .3s}.slider-navigation-next:hover,.slider-navigation-previous:hover{-webkit-transform:scale(1.2);transform:scale(1.2)}.slider-navigation-next.is-hidden,.slider-navigation-previous.is-hidden{display:none;opacity:0}.slider-navigation-next svg,.slider-navigation-previous svg{width:25%}.slider-navigation-next{left:auto;right:0;background:#fff center center no-repeat;background-size:20px 20px}.slider-pagination{display:none;justify-content:center;align-items:center;position:absolute;bottom:0;left:0;right:0;padding:.5rem 1rem;text-align:center}.slider-pagination .slider-page{background:#fff;width:10px;height:10px;border-radius:25091983px;display:inline-block;margin:0 3px;box-shadow:0 2px 5px #3232321a;transition:-webkit-transform .3s;transition:transform .3s;transition:transform .3s,-webkit-transform .3s;cursor:pointer}.slider-pagination .slider-page.is-active,.slider-pagination .slider-page:hover{-webkit-transform:scale(1.4);transform:scale(1.4)}@media screen and (min-width:800px){.slider-pagination{display:flex}}.hero.has-carousel{position:relative}.hero.has-carousel+.hero-body,.hero.has-carousel+.hero-footer,.hero.has-carousel+.hero-head{z-index:10;overflow:hidden}.hero.has-carousel .hero-carousel{position:absolute;top:0;left:0;bottom:0;right:0;height:auto;border:none;margin:auto;padding:0;z-index:0}.hero.has-carousel .hero-carousel .slider{width:100%;max-width:100%;overflow:hidden;height:100%!important;max-height:100%;z-index:0}.hero.has-carousel .hero-carousel .slider .has-background{max-height:100%}.hero.has-carousel .hero-carousel .slider .has-background .is-background{-o-object-fit:cover;object-fit:cover;-o-object-position:center center;object-position:center center;height:100%;width:100%}.hero.has-carousel .hero-body{margin:0 3rem;z-index:10} -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.sparse as sp 3 | import torch 4 | import torch.nn.functional as F 5 | from sklearn.neighbors import kneighbors_graph 6 | 7 | EOS = 1e-10 8 | 9 | def apply_non_linearity(tensor, non_linearity, i): 10 | if non_linearity == 'elu': 11 | return F.elu(tensor * i - i) + 1 12 | elif non_linearity == 'relu': 13 | return F.relu(tensor) 14 | elif non_linearity == 'none': 15 | return tensor 16 | else: 17 | raise NameError('We dont support the non-linearity yet') 18 | 19 | 20 | def get_random_mask(features, r, nr): 21 | nones = torch.sum(features > 0.0).float() 22 | nzeros = features.shape[0] * features.shape[1] - nones 23 | pzeros = nones / nzeros / r * nr 24 | probs = torch.zeros(features.shape).cuda() 25 | probs[features == 0.0] = pzeros 26 | probs[features > 0.0] = 1 / r 27 | mask = torch.bernoulli(probs) 28 | return mask 29 | 30 | 31 | def get_random_mask_ogb(features, r): 32 | probs = torch.full(features.shape, 1 / r) 33 | mask = torch.bernoulli(probs) 34 | return mask 35 | 36 | 37 | def accuracy(preds, labels): 38 | pred_class = torch.max(preds, 1)[1] 39 | return torch.sum(torch.eq(pred_class, labels)).float() / labels.shape[0] 40 | 41 | 42 | def nearest_neighbors(X, k, metric): 43 | adj = kneighbors_graph(X, k, metric=metric) 44 | adj = np.array(adj.todense(), dtype=np.float32) 45 | adj += np.eye(adj.shape[0]) 46 | return adj 47 | 48 | 49 | def nearest_neighbors_sparse(X, k, metric): 50 | adj = kneighbors_graph(X, k, metric=metric) 51 | loop = np.arange(X.shape[0]) 52 | [s_, d_, val] = sp.find(adj) 53 | s = np.concatenate((s_, loop)) 54 | d = np.concatenate((d_, loop)) 55 | 56 | return s, d 57 | 58 | 59 | def nearest_neighbors_pre_exp(X, k, metric, i): 60 | adj = kneighbors_graph(X, k, metric=metric) 61 | adj = np.array(adj.todense(), dtype=np.float32) 62 | adj += np.eye(adj.shape[0]) 63 | adj = adj * i - i 64 | return adj 65 | 66 | 67 | def nearest_neighbors_pre_elu(X, k, metric, i): 68 | adj = kneighbors_graph(X, k, metric=metric) 69 | adj = np.array(adj.todense(), dtype=np.float32) 70 | adj += np.eye(adj.shape[0]) 71 | adj = adj * i - i 72 | return adj 73 | 74 | 75 | def normalize(adj, mode, sparse=False): 76 | if not sparse: 77 | if mode == "sym": 78 | inv_sqrt_degree = 1. / (torch.sqrt(adj.sum(dim=1, keepdim=False)) + EOS) 79 | return inv_sqrt_degree[:, None] * adj * inv_sqrt_degree[None, :] 80 | elif mode == "row": 81 | inv_degree = 1. / (adj.sum(dim=1, keepdim=False) + EOS) 82 | return inv_degree[:, None] * adj 83 | else: 84 | exit("wrong norm mode") 85 | else: 86 | adj = adj.coalesce() 87 | if mode == "sym": 88 | inv_sqrt_degree = 1. / (torch.sqrt(torch.sparse.sum(adj, dim=1).values())) 89 | D_value = inv_sqrt_degree[adj.indices()[0]] * inv_sqrt_degree[adj.indices()[1]] 90 | 91 | elif mode == "row": 92 | inv_degree = 1. / (torch.sparse.sum(adj, dim=1).values() + EOS) 93 | D_value = inv_degree[adj.indices()[0]] 94 | else: 95 | exit("wrong norm mode") 96 | new_values = adj.values() * D_value 97 | 98 | return torch.sparse.FloatTensor(adj.indices(), new_values, adj.size()) 99 | 100 | 101 | def symmetrize(adj): # only for non-sparse 102 | return (adj + adj.T) / 2 103 | 104 | 105 | def cal_similarity_graph(node_embeddings1, node_embeddings2): 106 | similarity_graph = torch.mm(node_embeddings1, node_embeddings2.t()) 107 | return similarity_graph 108 | 109 | 110 | def top_k(raw_graph, K): 111 | values, indices = raw_graph.topk(k=int(K), dim=-1) 112 | assert torch.max(indices) < raw_graph.shape[1] 113 | mask = torch.zeros(raw_graph.shape).cuda() 114 | mask[torch.arange(raw_graph.shape[0]).view(-1, 1), indices] = 1. 115 | 116 | mask.requires_grad = False 117 | sparse_graph = raw_graph * mask 118 | return sparse_graph 119 | 120 | 121 | def knn_fast(X, k, b): 122 | X = F.normalize(X, dim=1, p=2) 123 | index = 0 124 | values = torch.zeros(X.shape[0] * (k + 1)).cuda() 125 | rows = torch.zeros(X.shape[0] * (k + 1)).cuda() 126 | cols = torch.zeros(X.shape[0] * (k + 1)).cuda() 127 | norm_row = torch.zeros(X.shape[0]).cuda() 128 | norm_col = torch.zeros(X.shape[0]).cuda() 129 | while index < X.shape[0]: 130 | if (index + b) > (X.shape[0]): 131 | end = X.shape[0] 132 | else: 133 | end = index + b 134 | sub_tensor = X[index:index + b] 135 | similarities = torch.mm(sub_tensor, X.t()) 136 | vals, inds = similarities.topk(k=k + 1, dim=-1) 137 | values[index * (k + 1):(end) * (k + 1)] = vals.view(-1) 138 | cols[index * (k + 1):(end) * (k + 1)] = inds.view(-1) 139 | rows[index * (k + 1):(end) * (k + 1)] = torch.arange(index, end).view(-1, 1).repeat(1, k + 1).view(-1) 140 | norm_row[index: end] = torch.sum(vals, dim=1) 141 | norm_col.index_add_(-1, inds.view(-1), vals.view(-1)) 142 | index += b 143 | norm = norm_row + norm_col 144 | rows = rows.long() 145 | cols = cols.long() 146 | values *= (torch.pow(norm[rows], -0.5) * torch.pow(norm[cols], -0.5)) 147 | return rows, cols, values 148 | -------------------------------------------------------------------------------- /GCN_KNN_R.txt: -------------------------------------------------------------------------------- 1 | Epoch 0100: Test Loss 0.6579, Test Accuracy 0.7220 2 | Epoch 0200: Test Loss 0.6608, Test Accuracy 0.7240 3 | Epoch 0300: Test Loss 0.6845, Test Accuracy 0.7210 4 | Epoch 0400: Test Loss 0.6724, Test Accuracy 0.7300 5 | Epoch 0500: Test Loss 0.6861, Test Accuracy 0.7370 6 | Epoch 0600: Test Loss 0.6820, Test Accuracy 0.7170 7 | Epoch 0700: Test Loss 0.6858, Test Accuracy 0.7330 8 | Epoch 0800: Test Loss 0.6767, Test Accuracy 0.7190 9 | Epoch 0900: Test Loss 0.7292, Test Accuracy 0.7250 10 | Epoch 1000: Test Loss 0.7002, Test Accuracy 0.7180 11 | Epoch 1100: Test Loss 0.6837, Test Accuracy 0.7270 12 | Epoch 1200: Test Loss 0.6879, Test Accuracy 0.7380 13 | Epoch 1300: Test Loss 0.6824, Test Accuracy 0.7350 14 | Epoch 1400: Test Loss 0.6979, Test Accuracy 0.7270 15 | Epoch 1500: Test Loss 0.6870, Test Accuracy 0.7280 16 | Epoch 1600: Test Loss 0.6959, Test Accuracy 0.7200 17 | Epoch 1700: Test Loss 0.6740, Test Accuracy 0.7390 18 | Epoch 1800: Test Loss 0.7175, Test Accuracy 0.7150 19 | Epoch 1900: Test Loss 0.6756, Test Accuracy 0.7420 20 | Epoch 2000: Test Loss 0.7083, Test Accuracy 0.7110 21 | Trial 00: test accuracy 0.7560 22 | Epoch 0100: Test Loss 0.6769, Test Accuracy 0.7280 23 | Epoch 0200: Test Loss 0.6874, Test Accuracy 0.7220 24 | Epoch 0300: Test Loss 0.6501, Test Accuracy 0.7420 25 | Epoch 0400: Test Loss 0.6929, Test Accuracy 0.7140 26 | Epoch 0500: Test Loss 0.6731, Test Accuracy 0.7300 27 | Epoch 0600: Test Loss 0.6721, Test Accuracy 0.7440 28 | Epoch 0700: Test Loss 0.7048, Test Accuracy 0.7350 29 | Epoch 0800: Test Loss 0.6762, Test Accuracy 0.7360 30 | Epoch 0900: Test Loss 0.7102, Test Accuracy 0.7180 31 | Epoch 1000: Test Loss 0.6769, Test Accuracy 0.7320 32 | Epoch 1100: Test Loss 0.6765, Test Accuracy 0.7490 33 | Epoch 1200: Test Loss 0.6901, Test Accuracy 0.7340 34 | Epoch 1300: Test Loss 0.7240, Test Accuracy 0.7080 35 | Epoch 1400: Test Loss 0.6887, Test Accuracy 0.7430 36 | Epoch 1500: Test Loss 0.7409, Test Accuracy 0.7280 37 | Epoch 1600: Test Loss 0.7067, Test Accuracy 0.7200 38 | Epoch 1700: Test Loss 0.7062, Test Accuracy 0.7440 39 | Epoch 1800: Test Loss 0.6860, Test Accuracy 0.7440 40 | Epoch 1900: Test Loss 0.7146, Test Accuracy 0.7130 41 | Epoch 2000: Test Loss 0.7018, Test Accuracy 0.7090 42 | Trial 01: test accuracy 0.7500 43 | Epoch 0100: Test Loss 0.6625, Test Accuracy 0.7320 44 | Epoch 0200: Test Loss 0.6824, Test Accuracy 0.7170 45 | Epoch 0300: Test Loss 0.6493, Test Accuracy 0.7540 46 | Epoch 0400: Test Loss 0.6645, Test Accuracy 0.7330 47 | Epoch 0500: Test Loss 0.6624, Test Accuracy 0.7410 48 | Epoch 0600: Test Loss 0.6703, Test Accuracy 0.7310 49 | Epoch 0700: Test Loss 0.6654, Test Accuracy 0.7330 50 | Epoch 0800: Test Loss 0.7169, Test Accuracy 0.7060 51 | Epoch 0900: Test Loss 0.7028, Test Accuracy 0.7230 52 | Epoch 1000: Test Loss 0.6906, Test Accuracy 0.7200 53 | Epoch 1100: Test Loss 0.7019, Test Accuracy 0.7170 54 | Epoch 1200: Test Loss 0.7104, Test Accuracy 0.7180 55 | Epoch 1300: Test Loss 0.6829, Test Accuracy 0.7300 56 | Epoch 1400: Test Loss 0.7041, Test Accuracy 0.7180 57 | Epoch 1500: Test Loss 0.7010, Test Accuracy 0.7320 58 | Epoch 1600: Test Loss 0.7160, Test Accuracy 0.7100 59 | Epoch 1700: Test Loss 0.7124, Test Accuracy 0.6930 60 | Epoch 1800: Test Loss 0.6834, Test Accuracy 0.7360 61 | Epoch 1900: Test Loss 0.6920, Test Accuracy 0.7250 62 | Epoch 2000: Test Loss 0.6994, Test Accuracy 0.7160 63 | Trial 02: test accuracy 0.7540 64 | Epoch 0100: Test Loss 0.6613, Test Accuracy 0.7230 65 | Epoch 0200: Test Loss 0.6541, Test Accuracy 0.7320 66 | Epoch 0300: Test Loss 0.6605, Test Accuracy 0.7360 67 | Epoch 0400: Test Loss 0.6551, Test Accuracy 0.7380 68 | Epoch 0500: Test Loss 0.6901, Test Accuracy 0.7230 69 | Epoch 0600: Test Loss 0.6752, Test Accuracy 0.7340 70 | Epoch 0700: Test Loss 0.7092, Test Accuracy 0.7130 71 | Epoch 0800: Test Loss 0.6749, Test Accuracy 0.7290 72 | Epoch 0900: Test Loss 0.7025, Test Accuracy 0.7100 73 | Epoch 1000: Test Loss 0.6850, Test Accuracy 0.7250 74 | Epoch 1100: Test Loss 0.7263, Test Accuracy 0.6990 75 | Epoch 1200: Test Loss 0.6876, Test Accuracy 0.7340 76 | Epoch 1300: Test Loss 0.6837, Test Accuracy 0.7360 77 | Epoch 1400: Test Loss 0.6937, Test Accuracy 0.7190 78 | Epoch 1500: Test Loss 0.6799, Test Accuracy 0.7400 79 | Epoch 1600: Test Loss 0.6909, Test Accuracy 0.7320 80 | Epoch 1700: Test Loss 0.6800, Test Accuracy 0.7260 81 | Epoch 1800: Test Loss 0.7063, Test Accuracy 0.7320 82 | Epoch 1900: Test Loss 0.6959, Test Accuracy 0.7300 83 | Epoch 2000: Test Loss 0.7676, Test Accuracy 0.6830 84 | Trial 03: test accuracy 0.7490 85 | Epoch 0100: Test Loss 0.6803, Test Accuracy 0.7200 86 | Epoch 0200: Test Loss 0.6718, Test Accuracy 0.7010 87 | Epoch 0300: Test Loss 0.6744, Test Accuracy 0.7130 88 | Epoch 0400: Test Loss 0.6971, Test Accuracy 0.7080 89 | Epoch 0500: Test Loss 0.6719, Test Accuracy 0.7350 90 | Epoch 0600: Test Loss 0.6789, Test Accuracy 0.7240 91 | Epoch 0700: Test Loss 0.6830, Test Accuracy 0.7290 92 | Epoch 0800: Test Loss 0.6752, Test Accuracy 0.7310 93 | Epoch 0900: Test Loss 0.6730, Test Accuracy 0.7290 94 | Epoch 1000: Test Loss 0.7089, Test Accuracy 0.7070 95 | Epoch 1100: Test Loss 0.6864, Test Accuracy 0.7290 96 | Epoch 1200: Test Loss 0.7054, Test Accuracy 0.6960 97 | Epoch 1300: Test Loss 0.6959, Test Accuracy 0.7250 98 | Epoch 1400: Test Loss 0.7100, Test Accuracy 0.7060 99 | Epoch 1500: Test Loss 0.6711, Test Accuracy 0.7380 100 | Epoch 1600: Test Loss 0.6958, Test Accuracy 0.7370 101 | Epoch 1700: Test Loss 0.6961, Test Accuracy 0.7180 102 | Epoch 1800: Test Loss 0.6886, Test Accuracy 0.7280 103 | Epoch 1900: Test Loss 0.6945, Test Accuracy 0.7220 104 | Epoch 2000: Test Loss 0.6933, Test Accuracy 0.7340 105 | Trial 04: test accuracy 0.7520 106 | [0.7560000419616699, 0.7500000596046448, 0.7540000081062317, 0.7490000128746033, 0.7520000338554382] 107 | std of test accuracy 0.25612493003552406 108 | average of test accuracy 75.22000312805176 109 | Namespace(alpha=100.0, dataset='pubmed', dropout2=0.5, dropout_adj2=0.0, epochs=2000, half_train=0, half_val_as_train=0, hidden=32, k=15, klabel=30, knn_metric='cosine', lr=0.01, method='GCN_KNN_R', nlayers=2, normalization='sym', ntrials=5, patience=3000, sparse=0, w_decay=0.0005) 110 | -------------------------------------------------------------------------------- /GCN_KNN.txt: -------------------------------------------------------------------------------- 1 | Epoch 0100: Test Loss 0.9308, Test Accuracy 0.6700 2 | Epoch 0200: Test Loss 0.9911, Test Accuracy 0.6720 3 | Epoch 0300: Test Loss 1.0182, Test Accuracy 0.6660 4 | Epoch 0400: Test Loss 1.0541, Test Accuracy 0.6700 5 | Epoch 0500: Test Loss 1.0613, Test Accuracy 0.6690 6 | Epoch 0600: Test Loss 1.0692, Test Accuracy 0.6700 7 | Epoch 0700: Test Loss 1.0517, Test Accuracy 0.6690 8 | Epoch 0800: Test Loss 1.0433, Test Accuracy 0.6670 9 | Epoch 0900: Test Loss 1.0576, Test Accuracy 0.6710 10 | Epoch 1000: Test Loss 1.0480, Test Accuracy 0.6710 11 | Epoch 1100: Test Loss 1.0781, Test Accuracy 0.6670 12 | Epoch 1200: Test Loss 1.0473, Test Accuracy 0.6700 13 | Epoch 1300: Test Loss 1.0448, Test Accuracy 0.6720 14 | Epoch 1400: Test Loss 1.0538, Test Accuracy 0.6720 15 | Epoch 1500: Test Loss 1.0396, Test Accuracy 0.6730 16 | Epoch 1600: Test Loss 1.0477, Test Accuracy 0.6700 17 | Epoch 1700: Test Loss 1.0483, Test Accuracy 0.6690 18 | Epoch 1800: Test Loss 1.0591, Test Accuracy 0.6640 19 | Epoch 1900: Test Loss 1.0484, Test Accuracy 0.6730 20 | Epoch 2000: Test Loss 1.0584, Test Accuracy 0.6650 21 | ******************************* 22 | Trial 00: test accuracy 0.6820 23 | ******************************* 24 | Epoch 0100: Test Loss 0.9393, Test Accuracy 0.6690 25 | Epoch 0200: Test Loss 0.9812, Test Accuracy 0.6710 26 | Epoch 0300: Test Loss 1.0247, Test Accuracy 0.6710 27 | Epoch 0400: Test Loss 1.0322, Test Accuracy 0.6730 28 | Epoch 0500: Test Loss 1.0755, Test Accuracy 0.6740 29 | Epoch 0600: Test Loss 1.0343, Test Accuracy 0.6760 30 | Epoch 0700: Test Loss 1.0327, Test Accuracy 0.6710 31 | Epoch 0800: Test Loss 1.0594, Test Accuracy 0.6710 32 | Epoch 0900: Test Loss 1.0518, Test Accuracy 0.6700 33 | Epoch 1000: Test Loss 1.0454, Test Accuracy 0.6720 34 | Epoch 1100: Test Loss 1.0565, Test Accuracy 0.6740 35 | Epoch 1200: Test Loss 1.0422, Test Accuracy 0.6650 36 | Epoch 1300: Test Loss 1.0546, Test Accuracy 0.6770 37 | Epoch 1400: Test Loss 1.0551, Test Accuracy 0.6730 38 | Epoch 1500: Test Loss 1.0577, Test Accuracy 0.6720 39 | Epoch 1600: Test Loss 1.0519, Test Accuracy 0.6690 40 | Epoch 1700: Test Loss 1.0469, Test Accuracy 0.6680 41 | Epoch 1800: Test Loss 1.0536, Test Accuracy 0.6730 42 | Epoch 1900: Test Loss 1.0573, Test Accuracy 0.6700 43 | Epoch 2000: Test Loss 1.0558, Test Accuracy 0.6750 44 | ******************************* 45 | Trial 01: test accuracy 0.6850 46 | ******************************* 47 | Epoch 0100: Test Loss 0.9141, Test Accuracy 0.6740 48 | Epoch 0200: Test Loss 0.9941, Test Accuracy 0.6790 49 | Epoch 0300: Test Loss 1.0365, Test Accuracy 0.6750 50 | Epoch 0400: Test Loss 1.0633, Test Accuracy 0.6720 51 | Epoch 0500: Test Loss 1.0611, Test Accuracy 0.6680 52 | Epoch 0600: Test Loss 1.0651, Test Accuracy 0.6730 53 | Epoch 0700: Test Loss 1.0369, Test Accuracy 0.6760 54 | Epoch 0800: Test Loss 1.0292, Test Accuracy 0.6780 55 | Epoch 0900: Test Loss 1.0589, Test Accuracy 0.6690 56 | Epoch 1000: Test Loss 1.0506, Test Accuracy 0.6710 57 | Epoch 1100: Test Loss 1.0408, Test Accuracy 0.6720 58 | Epoch 1200: Test Loss 1.0524, Test Accuracy 0.6660 59 | Epoch 1300: Test Loss 1.0474, Test Accuracy 0.6680 60 | Epoch 1400: Test Loss 1.0608, Test Accuracy 0.6710 61 | Epoch 1500: Test Loss 1.0547, Test Accuracy 0.6750 62 | Epoch 1600: Test Loss 1.0660, Test Accuracy 0.6670 63 | Epoch 1700: Test Loss 1.0677, Test Accuracy 0.6690 64 | Epoch 1800: Test Loss 1.0506, Test Accuracy 0.6720 65 | Epoch 1900: Test Loss 1.0500, Test Accuracy 0.6700 66 | Epoch 2000: Test Loss 1.0510, Test Accuracy 0.6700 67 | ******************************* 68 | Trial 02: test accuracy 0.6830 69 | ******************************* 70 | Epoch 0100: Test Loss 0.9345, Test Accuracy 0.6650 71 | Epoch 0200: Test Loss 1.0037, Test Accuracy 0.6620 72 | Epoch 0300: Test Loss 1.0136, Test Accuracy 0.6710 73 | Epoch 0400: Test Loss 1.0494, Test Accuracy 0.6630 74 | Epoch 0500: Test Loss 1.0589, Test Accuracy 0.6720 75 | Epoch 0600: Test Loss 1.0513, Test Accuracy 0.6660 76 | Epoch 0700: Test Loss 1.0519, Test Accuracy 0.6750 77 | Epoch 0800: Test Loss 1.0596, Test Accuracy 0.6700 78 | Epoch 0900: Test Loss 1.0389, Test Accuracy 0.6740 79 | Epoch 1000: Test Loss 1.0474, Test Accuracy 0.6690 80 | Epoch 1100: Test Loss 1.0489, Test Accuracy 0.6720 81 | Epoch 1200: Test Loss 1.0406, Test Accuracy 0.6740 82 | Epoch 1300: Test Loss 1.0420, Test Accuracy 0.6740 83 | Epoch 1400: Test Loss 1.0672, Test Accuracy 0.6710 84 | Epoch 1500: Test Loss 1.0566, Test Accuracy 0.6730 85 | Epoch 1600: Test Loss 1.0326, Test Accuracy 0.6720 86 | Epoch 1700: Test Loss 1.0345, Test Accuracy 0.6710 87 | Epoch 1800: Test Loss 1.0513, Test Accuracy 0.6730 88 | Epoch 1900: Test Loss 1.0550, Test Accuracy 0.6720 89 | Epoch 2000: Test Loss 1.0739, Test Accuracy 0.6660 90 | ******************************* 91 | Trial 03: test accuracy 0.6820 92 | ******************************* 93 | Epoch 0100: Test Loss 0.9478, Test Accuracy 0.6720 94 | Epoch 0200: Test Loss 1.0101, Test Accuracy 0.6700 95 | Epoch 0300: Test Loss 1.0290, Test Accuracy 0.6700 96 | Epoch 0400: Test Loss 1.0391, Test Accuracy 0.6710 97 | Epoch 0500: Test Loss 1.0544, Test Accuracy 0.6650 98 | Epoch 0600: Test Loss 1.0526, Test Accuracy 0.6720 99 | Epoch 0700: Test Loss 1.0387, Test Accuracy 0.6750 100 | Epoch 0800: Test Loss 1.0632, Test Accuracy 0.6680 101 | Epoch 0900: Test Loss 1.0552, Test Accuracy 0.6730 102 | Epoch 1000: Test Loss 1.0434, Test Accuracy 0.6740 103 | Epoch 1100: Test Loss 1.0551, Test Accuracy 0.6650 104 | Epoch 1200: Test Loss 1.0528, Test Accuracy 0.6720 105 | Epoch 1300: Test Loss 1.0494, Test Accuracy 0.6740 106 | Epoch 1400: Test Loss 1.0535, Test Accuracy 0.6690 107 | Epoch 1500: Test Loss 1.0436, Test Accuracy 0.6730 108 | Epoch 1600: Test Loss 1.0472, Test Accuracy 0.6710 109 | Epoch 1700: Test Loss 1.0671, Test Accuracy 0.6670 110 | Epoch 1800: Test Loss 1.0632, Test Accuracy 0.6720 111 | Epoch 1900: Test Loss 1.0334, Test Accuracy 0.6760 112 | Epoch 2000: Test Loss 1.0418, Test Accuracy 0.6720 113 | ******************************* 114 | Trial 04: test accuracy 0.6810 115 | ******************************* 116 | [0.6820000410079956, 0.6850000619888306, 0.6830000281333923, 0.6820000410079956, 0.6810000538825989] 117 | std of test accuracy 0.13564696244500568 118 | average of test accuracy 68.26000452041626 119 | Namespace(alpha=100, dataset='pubmed', dropout2=0.5, dropout_adj2=0.0, epochs=2000, half_train=0, half_val_as_train=0, hidden=32, k=15, klabel=30, knn_metric='cosine', lr=0.01, method='GCN_KNN', nlayers=2, normalization='sym', ntrials=5, patience=3000, sparse=0, w_decay=0.0005) 120 | -------------------------------------------------------------------------------- /GCN_KNN_U.txt: -------------------------------------------------------------------------------- 1 | Epoch 0100: Test Loss 0.7103, Test Accuracy 0.7150 2 | Epoch 0200: Test Loss 0.7386, Test Accuracy 0.7170 3 | Epoch 0300: Test Loss 0.7514, Test Accuracy 0.7190 4 | Epoch 0400: Test Loss 0.7671, Test Accuracy 0.7230 5 | Epoch 0500: Test Loss 0.7695, Test Accuracy 0.7130 6 | Epoch 0600: Test Loss 0.7819, Test Accuracy 0.7160 7 | Epoch 0700: Test Loss 0.7770, Test Accuracy 0.7180 8 | Epoch 0800: Test Loss 0.8087, Test Accuracy 0.6960 9 | Epoch 0900: Test Loss 0.7899, Test Accuracy 0.7060 10 | Epoch 1000: Test Loss 0.7731, Test Accuracy 0.7310 11 | Epoch 1100: Test Loss 0.8043, Test Accuracy 0.6990 12 | Epoch 1200: Test Loss 0.7687, Test Accuracy 0.7320 13 | Epoch 1300: Test Loss 0.8118, Test Accuracy 0.6890 14 | Epoch 1400: Test Loss 0.7736, Test Accuracy 0.7330 15 | Epoch 1500: Test Loss 0.7970, Test Accuracy 0.7070 16 | Epoch 1600: Test Loss 0.8053, Test Accuracy 0.7010 17 | Epoch 1700: Test Loss 0.7937, Test Accuracy 0.7100 18 | Epoch 1800: Test Loss 0.7697, Test Accuracy 0.7270 19 | Epoch 1900: Test Loss 0.7871, Test Accuracy 0.7110 20 | Epoch 2000: Test Loss 0.7934, Test Accuracy 0.7200 21 | ******************************* 22 | Trial 00: test accuracy 0.7390 23 | ******************************* 24 | Epoch 0100: Test Loss 0.7020, Test Accuracy 0.7020 25 | Epoch 0200: Test Loss 0.7882, Test Accuracy 0.6800 26 | Epoch 0300: Test Loss 0.7773, Test Accuracy 0.6980 27 | Epoch 0400: Test Loss 0.8016, Test Accuracy 0.6890 28 | Epoch 0500: Test Loss 0.8162, Test Accuracy 0.6780 29 | Epoch 0600: Test Loss 0.8593, Test Accuracy 0.6670 30 | Epoch 0700: Test Loss 0.8106, Test Accuracy 0.6810 31 | Epoch 0800: Test Loss 0.8429, Test Accuracy 0.6760 32 | Epoch 0900: Test Loss 0.7895, Test Accuracy 0.7100 33 | Epoch 1000: Test Loss 0.7845, Test Accuracy 0.7020 34 | Epoch 1100: Test Loss 0.8374, Test Accuracy 0.6730 35 | Epoch 1200: Test Loss 0.8033, Test Accuracy 0.6930 36 | Epoch 1300: Test Loss 0.8221, Test Accuracy 0.6800 37 | Epoch 1400: Test Loss 0.7926, Test Accuracy 0.7110 38 | Epoch 1500: Test Loss 0.7947, Test Accuracy 0.7210 39 | Epoch 1600: Test Loss 0.8563, Test Accuracy 0.6770 40 | Epoch 1700: Test Loss 0.8136, Test Accuracy 0.6860 41 | Epoch 1800: Test Loss 0.7981, Test Accuracy 0.7040 42 | Epoch 1900: Test Loss 0.8299, Test Accuracy 0.6800 43 | Epoch 2000: Test Loss 0.8059, Test Accuracy 0.7030 44 | ******************************* 45 | Trial 01: test accuracy 0.7330 46 | ******************************* 47 | Epoch 0100: Test Loss 0.6826, Test Accuracy 0.7230 48 | Epoch 0200: Test Loss 0.7199, Test Accuracy 0.7320 49 | Epoch 0300: Test Loss 0.7448, Test Accuracy 0.7200 50 | Epoch 0400: Test Loss 0.7429, Test Accuracy 0.7240 51 | Epoch 0500: Test Loss 0.7656, Test Accuracy 0.7180 52 | Epoch 0600: Test Loss 0.7590, Test Accuracy 0.7290 53 | Epoch 0700: Test Loss 0.7821, Test Accuracy 0.7160 54 | Epoch 0800: Test Loss 0.7618, Test Accuracy 0.7300 55 | Epoch 0900: Test Loss 0.7957, Test Accuracy 0.6930 56 | Epoch 1000: Test Loss 0.7671, Test Accuracy 0.7250 57 | Epoch 1100: Test Loss 0.7917, Test Accuracy 0.6970 58 | Epoch 1200: Test Loss 0.8005, Test Accuracy 0.6950 59 | Epoch 1300: Test Loss 0.7812, Test Accuracy 0.7200 60 | Epoch 1400: Test Loss 0.7929, Test Accuracy 0.6950 61 | Epoch 1500: Test Loss 0.7900, Test Accuracy 0.7130 62 | Epoch 1600: Test Loss 0.7575, Test Accuracy 0.7280 63 | Epoch 1700: Test Loss 0.8469, Test Accuracy 0.6750 64 | Epoch 1800: Test Loss 0.7911, Test Accuracy 0.7160 65 | Epoch 1900: Test Loss 0.8070, Test Accuracy 0.6870 66 | Epoch 2000: Test Loss 0.7866, Test Accuracy 0.7120 67 | ******************************* 68 | Trial 02: test accuracy 0.7390 69 | ******************************* 70 | Epoch 0100: Test Loss 0.7018, Test Accuracy 0.7300 71 | Epoch 0200: Test Loss 0.7345, Test Accuracy 0.7300 72 | Epoch 0300: Test Loss 0.7316, Test Accuracy 0.7310 73 | Epoch 0400: Test Loss 0.7502, Test Accuracy 0.7370 74 | Epoch 0500: Test Loss 0.7608, Test Accuracy 0.7390 75 | Epoch 0600: Test Loss 0.7715, Test Accuracy 0.7300 76 | Epoch 0700: Test Loss 0.7656, Test Accuracy 0.7300 77 | Epoch 0800: Test Loss 0.7638, Test Accuracy 0.7320 78 | Epoch 0900: Test Loss 0.7529, Test Accuracy 0.7300 79 | Epoch 1000: Test Loss 0.7487, Test Accuracy 0.7330 80 | Epoch 1100: Test Loss 0.7658, Test Accuracy 0.7280 81 | Epoch 1200: Test Loss 0.7581, Test Accuracy 0.7310 82 | Epoch 1300: Test Loss 0.7613, Test Accuracy 0.7300 83 | Epoch 1400: Test Loss 0.7555, Test Accuracy 0.7370 84 | Epoch 1500: Test Loss 0.7663, Test Accuracy 0.7320 85 | Epoch 1600: Test Loss 0.7575, Test Accuracy 0.7330 86 | Epoch 1700: Test Loss 0.7633, Test Accuracy 0.7280 87 | Epoch 1800: Test Loss 0.7624, Test Accuracy 0.7290 88 | Epoch 1900: Test Loss 0.7734, Test Accuracy 0.7330 89 | Epoch 2000: Test Loss 0.7589, Test Accuracy 0.7300 90 | ******************************* 91 | Trial 03: test accuracy 0.7490 92 | ******************************* 93 | Epoch 0100: Test Loss 0.6871, Test Accuracy 0.7270 94 | Epoch 0200: Test Loss 0.7135, Test Accuracy 0.7310 95 | Epoch 0300: Test Loss 0.7260, Test Accuracy 0.7280 96 | Epoch 0400: Test Loss 0.7417, Test Accuracy 0.7290 97 | Epoch 0500: Test Loss 0.7533, Test Accuracy 0.7280 98 | Epoch 0600: Test Loss 0.7526, Test Accuracy 0.7280 99 | Epoch 0700: Test Loss 0.7634, Test Accuracy 0.7300 100 | Epoch 0800: Test Loss 0.7562, Test Accuracy 0.7280 101 | Epoch 0900: Test Loss 0.7828, Test Accuracy 0.7160 102 | Epoch 1000: Test Loss 0.7672, Test Accuracy 0.7310 103 | Epoch 1100: Test Loss 0.7921, Test Accuracy 0.6990 104 | Epoch 1200: Test Loss 0.7910, Test Accuracy 0.6910 105 | Epoch 1300: Test Loss 0.7679, Test Accuracy 0.7260 106 | Epoch 1400: Test Loss 0.8391, Test Accuracy 0.6760 107 | Epoch 1500: Test Loss 0.7827, Test Accuracy 0.7300 108 | Epoch 1600: Test Loss 0.7659, Test Accuracy 0.7320 109 | Epoch 1700: Test Loss 0.7711, Test Accuracy 0.7280 110 | Epoch 1800: Test Loss 0.7681, Test Accuracy 0.7260 111 | Epoch 1900: Test Loss 0.7758, Test Accuracy 0.7300 112 | Epoch 2000: Test Loss 0.8067, Test Accuracy 0.6870 113 | ******************************* 114 | Trial 04: test accuracy 0.7410 115 | ******************************* 116 | [0.7390000224113464, 0.7330000400543213, 0.7390000224113464, 0.7490000128746033, 0.7410000562667847] 117 | std of test accuracy 0.5153632355230239 118 | average of test accuracy 74.02000308036804 119 | Namespace(alpha=100.0, dataset='pubmed', dropout2=0.5, dropout_adj2=0.0, epochs=2000, half_train=0, half_val_as_train=0, hidden=32, k=15, klabel=30, knn_metric='cosine', lr=0.01, method='GCN_KNN_U', nlayers=2, normalization='sym', ntrials=5, patience=3000, sparse=0, w_decay=0.0005) 120 | -------------------------------------------------------------------------------- /static/js/bulma-slider.min.js: -------------------------------------------------------------------------------- 1 | !function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.bulmaSlider=e():t.bulmaSlider=e()}("undefined"!=typeof self?self:this,function(){return function(n){var r={};function i(t){if(r[t])return r[t].exports;var e=r[t]={i:t,l:!1,exports:{}};return n[t].call(e.exports,e,e.exports,i),e.l=!0,e.exports}return i.m=n,i.c=r,i.d=function(t,e,n){i.o(t,e)||Object.defineProperty(t,e,{configurable:!1,enumerable:!0,get:n})},i.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return i.d(e,"a",e),e},i.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},i.p="",i(i.s=0)}([function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),n.d(e,"isString",function(){return l});var r=n(1),i=Object.assign||function(t){for(var e=1;e=l.length&&(s=!0)):s=!0),s&&(t.once&&(u[e]=null),t.callback(r))});-1!==u.indexOf(null);)u.splice(u.indexOf(null),1)}}]),e}();e.a=i}]).default}); -------------------------------------------------------------------------------- /citation_networks.py: -------------------------------------------------------------------------------- 1 | # The MIT License 2 | 3 | # Copyright (c) 2016 Thomas Kipf 4 | 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | 23 | import pickle as pkl 24 | import sys 25 | import warnings 26 | 27 | import numpy as np 28 | import scipy.sparse as sp 29 | import torch 30 | 31 | warnings.simplefilter("ignore") 32 | 33 | 34 | def parse_index_file(filename): 35 | """Parse index file.""" 36 | index = [] 37 | for line in open(filename): 38 | index.append(int(line.strip())) 39 | return index 40 | 41 | 42 | def sample_mask(idx, l): 43 | """Create mask.""" 44 | mask = np.zeros(l) 45 | mask[idx] = 1 46 | return np.array(mask, dtype=np.bool) 47 | 48 | 49 | def load_citation_network(dataset_str): 50 | names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph'] 51 | objects = [] 52 | for i in range(len(names)): 53 | with open("data_tf/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f: 54 | if sys.version_info > (3, 0): 55 | objects.append(pkl.load(f, encoding='latin1')) 56 | else: 57 | objects.append(pkl.load(f)) 58 | x, y, tx, ty, allx, ally, graph = tuple(objects) 59 | 60 | test_idx_reorder = parse_index_file("data_tf/ind.{}.test.index".format(dataset_str)) 61 | test_idx_range = np.sort(test_idx_reorder) 62 | 63 | if dataset_str == 'citeseer': 64 | test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1) 65 | tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1])) 66 | tx_extended[test_idx_range - min(test_idx_range), :] = tx 67 | tx = tx_extended 68 | ty_extended = np.zeros((len(test_idx_range_full), y.shape[1])) 69 | ty_extended[test_idx_range - min(test_idx_range), :] = ty 70 | ty = ty_extended 71 | 72 | 73 | features = sp.vstack((allx, tx)).tolil() 74 | features[test_idx_reorder, :] = features[test_idx_range, :] 75 | 76 | labels = np.vstack((ally, ty)) 77 | labels[test_idx_reorder, :] = labels[test_idx_range, :] 78 | idx_test = test_idx_range.tolist() 79 | idx_train = range(len(y)) 80 | idx_val = range(len(y), len(y) + 500) 81 | 82 | train_mask = sample_mask(idx_train, labels.shape[0]) 83 | val_mask = sample_mask(idx_val, labels.shape[0]) 84 | test_mask = sample_mask(idx_test, labels.shape[0]) 85 | 86 | features = torch.FloatTensor(features.todense()) 87 | labels = torch.LongTensor(labels) 88 | train_mask = torch.BoolTensor(train_mask) 89 | val_mask = torch.BoolTensor(val_mask) 90 | test_mask = torch.BoolTensor(test_mask) 91 | 92 | nfeats = features.shape[1] 93 | for i in range(labels.shape[0]): 94 | sum_ = torch.sum(labels[i]) 95 | if sum_ != 1: 96 | labels[i] = torch.tensor([1, 0, 0, 0, 0, 0]) 97 | labels = (labels == 1).nonzero()[:, 1] 98 | nclasses = torch.max(labels).item() + 1 99 | 100 | return features, nfeats, labels, nclasses, train_mask, val_mask, test_mask 101 | 102 | 103 | def load_citation_network_halftrain(dataset_str): 104 | names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph'] 105 | objects = [] 106 | for i in range(len(names)): 107 | with open("data_tf/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f: 108 | if sys.version_info > (3, 0): 109 | objects.append(pkl.load(f, encoding='latin1')) 110 | else: 111 | objects.append(pkl.load(f)) 112 | x, y, tx, ty, allx, ally, graph = tuple(objects) 113 | test_idx_reorder = parse_index_file("data_tf/ind.{}.test.index".format(dataset_str)) 114 | test_idx_range = np.sort(test_idx_reorder) 115 | 116 | if dataset_str == 'citeseer': 117 | test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1) 118 | tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1])) 119 | tx_extended[test_idx_range - min(test_idx_range), :] = tx 120 | tx = tx_extended 121 | ty_extended = np.zeros((len(test_idx_range_full), y.shape[1])) 122 | ty_extended[test_idx_range - min(test_idx_range), :] = ty 123 | ty = ty_extended 124 | 125 | features = sp.vstack((allx, tx)).tolil() 126 | features[test_idx_reorder, :] = features[test_idx_range, :] 127 | 128 | labels = np.vstack((ally, ty)) 129 | labels[test_idx_reorder, :] = labels[test_idx_range, :] 130 | idx_test = test_idx_range.tolist() 131 | idx_train = range(len(y)) 132 | idx_val = range(len(y), len(y) + 500) 133 | 134 | train_mask = sample_mask(idx_train, labels.shape[0]) 135 | val_mask = sample_mask(idx_val, labels.shape[0]) 136 | test_mask = sample_mask(idx_test, labels.shape[0]) 137 | 138 | features = torch.FloatTensor(features.todense()) 139 | labels = torch.LongTensor(labels) 140 | train_mask = torch.BoolTensor(train_mask) 141 | val_mask = torch.BoolTensor(val_mask) 142 | test_mask = torch.BoolTensor(test_mask) 143 | 144 | nfeats = features.shape[1] 145 | for i in range(labels.shape[0]): 146 | sum_ = torch.sum(labels[i]) 147 | if sum_ != 1: 148 | labels[i] = torch.tensor([1, 0, 0, 0, 0, 0]) 149 | labels = (labels == 1).nonzero()[:, 1] 150 | nclasses = torch.max(labels).item() + 1 151 | 152 | if dataset_str == 'pubmed': 153 | colum_sum = torch.zeros((1, 3)) 154 | elif dataset_str == 'cora': 155 | colum_sum = torch.zeros((1, 7)) 156 | elif dataset_str == 'citeseer': 157 | colum_sum = torch.zeros((1, 6)) 158 | 159 | for iii in range(y.shape[0]): 160 | colum_sum = colum_sum + y[iii, :] 161 | if colum_sum.max() > 10: 162 | colum_sum = colum_sum - y[iii, :] 163 | train_mask[iii] = 0 164 | 165 | return features, nfeats, labels, nclasses, train_mask, val_mask, test_mask 166 | 167 | 168 | 169 | 170 | 171 | def load_citation_network_calculate_starved_nodes(dataset_str): 172 | names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph'] 173 | objects = [] 174 | for i in range(len(names)): 175 | with open("data_tf/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f: 176 | if sys.version_info > (3, 0): 177 | objects.append(pkl.load(f, encoding='latin1')) 178 | else: 179 | objects.append(pkl.load(f)) 180 | x, y, tx, ty, allx, ally, graph = tuple(objects) 181 | 182 | num_vertices = len(graph) 183 | 184 | adjacency = [[0 for j in range(num_vertices)] for i in range(num_vertices)] 185 | 186 | for i in range(num_vertices): 187 | for j in graph[i]: 188 | adjacency[i][j] = 1 189 | 190 | adjacency = np.array(adjacency) 191 | 192 | 193 | 194 | test_idx_reorder = parse_index_file("data_tf/ind.{}.test.index".format(dataset_str)) 195 | test_idx_range = np.sort(test_idx_reorder) 196 | 197 | if dataset_str == 'citeseer': 198 | # Fix citeseer dataset (there are some isolated nodes in the graph) 199 | # Find isolated nodes, add them as zero-vecs into the right position 200 | test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1) 201 | tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1])) 202 | tx_extended[test_idx_range - min(test_idx_range), :] = tx 203 | tx = tx_extended 204 | ty_extended = np.zeros((len(test_idx_range_full), y.shape[1])) 205 | ty_extended[test_idx_range - min(test_idx_range), :] = ty 206 | ty = ty_extended 207 | 208 | 209 | features = sp.vstack((allx, tx)).tolil() 210 | features[test_idx_reorder, :] = features[test_idx_range, :] 211 | 212 | labels = np.vstack((ally, ty)) 213 | labels[test_idx_reorder, :] = labels[test_idx_range, :] 214 | idx_test = test_idx_range.tolist() 215 | idx_train = range(len(y)) 216 | idx_val = range(len(y), len(y) + 500) 217 | 218 | train_mask = sample_mask(idx_train, labels.shape[0]) 219 | val_mask = sample_mask(idx_val, labels.shape[0]) 220 | test_mask = sample_mask(idx_test, labels.shape[0]) 221 | 222 | features = torch.FloatTensor(features.todense()) 223 | labels = torch.LongTensor(labels) 224 | train_mask = torch.BoolTensor(train_mask) 225 | val_mask = torch.BoolTensor(val_mask) 226 | test_mask = torch.BoolTensor(test_mask) 227 | 228 | nfeats = features.shape[1] 229 | for i in range(labels.shape[0]): 230 | sum_ = torch.sum(labels[i]) 231 | if sum_ != 1: 232 | labels[i] = torch.tensor([1, 0, 0, 0, 0, 0]) 233 | labels = (labels == 1).nonzero()[:, 1] 234 | nclasses = torch.max(labels).item() + 1 235 | 236 | return features, nfeats, labels, nclasses, train_mask, val_mask, test_mask, adjacency 237 | 238 | -------------------------------------------------------------------------------- /static/css/bulma-slider.min.css: -------------------------------------------------------------------------------- 1 | @-webkit-keyframes spinAround{from{-webkit-transform:rotate(0);transform:rotate(0)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes spinAround{from{-webkit-transform:rotate(0);transform:rotate(0)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}input[type=range].slider{-webkit-appearance:none;-moz-appearance:none;appearance:none;margin:1rem 0;background:0 0;touch-action:none}input[type=range].slider.is-fullwidth{display:block;width:100%}input[type=range].slider:focus{outline:0}input[type=range].slider:not([orient=vertical])::-webkit-slider-runnable-track{width:100%}input[type=range].slider:not([orient=vertical])::-moz-range-track{width:100%}input[type=range].slider:not([orient=vertical])::-ms-track{width:100%}input[type=range].slider:not([orient=vertical]).has-output+output,input[type=range].slider:not([orient=vertical]).has-output-tooltip+output{width:3rem;background:#4a4a4a;border-radius:4px;padding:.4rem .8rem;font-size:.75rem;line-height:.75rem;text-align:center;text-overflow:ellipsis;white-space:nowrap;color:#fff;overflow:hidden;pointer-events:none;z-index:200}input[type=range].slider:not([orient=vertical]).has-output-tooltip:disabled+output,input[type=range].slider:not([orient=vertical]).has-output:disabled+output{opacity:.5}input[type=range].slider:not([orient=vertical]).has-output{display:inline-block;vertical-align:middle;width:calc(100% - (4.2rem))}input[type=range].slider:not([orient=vertical]).has-output+output{display:inline-block;margin-left:.75rem;vertical-align:middle}input[type=range].slider:not([orient=vertical]).has-output-tooltip{display:block}input[type=range].slider:not([orient=vertical]).has-output-tooltip+output{position:absolute;left:0;top:-.1rem}input[type=range].slider[orient=vertical]{-webkit-appearance:slider-vertical;-moz-appearance:slider-vertical;appearance:slider-vertical;-webkit-writing-mode:bt-lr;-ms-writing-mode:bt-lr;writing-mode:bt-lr}input[type=range].slider[orient=vertical]::-webkit-slider-runnable-track{height:100%}input[type=range].slider[orient=vertical]::-moz-range-track{height:100%}input[type=range].slider[orient=vertical]::-ms-track{height:100%}input[type=range].slider::-webkit-slider-runnable-track{cursor:pointer;animate:.2s;box-shadow:0 0 0 #7a7a7a;background:#dbdbdb;border-radius:4px;border:0 solid #7a7a7a}input[type=range].slider::-moz-range-track{cursor:pointer;animate:.2s;box-shadow:0 0 0 #7a7a7a;background:#dbdbdb;border-radius:4px;border:0 solid #7a7a7a}input[type=range].slider::-ms-track{cursor:pointer;animate:.2s;box-shadow:0 0 0 #7a7a7a;background:#dbdbdb;border-radius:4px;border:0 solid #7a7a7a}input[type=range].slider::-ms-fill-lower{background:#dbdbdb;border-radius:4px}input[type=range].slider::-ms-fill-upper{background:#dbdbdb;border-radius:4px}input[type=range].slider::-webkit-slider-thumb{box-shadow:none;border:1px solid #b5b5b5;border-radius:4px;background:#fff;cursor:pointer}input[type=range].slider::-moz-range-thumb{box-shadow:none;border:1px solid #b5b5b5;border-radius:4px;background:#fff;cursor:pointer}input[type=range].slider::-ms-thumb{box-shadow:none;border:1px solid #b5b5b5;border-radius:4px;background:#fff;cursor:pointer}input[type=range].slider::-webkit-slider-thumb{-webkit-appearance:none;appearance:none}input[type=range].slider.is-circle::-webkit-slider-thumb{border-radius:290486px}input[type=range].slider.is-circle::-moz-range-thumb{border-radius:290486px}input[type=range].slider.is-circle::-ms-thumb{border-radius:290486px}input[type=range].slider:active::-webkit-slider-thumb{-webkit-transform:scale(1.25);transform:scale(1.25)}input[type=range].slider:active::-moz-range-thumb{transform:scale(1.25)}input[type=range].slider:active::-ms-thumb{transform:scale(1.25)}input[type=range].slider:disabled{opacity:.5;cursor:not-allowed}input[type=range].slider:disabled::-webkit-slider-thumb{cursor:not-allowed;-webkit-transform:scale(1);transform:scale(1)}input[type=range].slider:disabled::-moz-range-thumb{cursor:not-allowed;transform:scale(1)}input[type=range].slider:disabled::-ms-thumb{cursor:not-allowed;transform:scale(1)}input[type=range].slider:not([orient=vertical]){min-height:calc((1rem + 2px) * 1.25)}input[type=range].slider:not([orient=vertical])::-webkit-slider-runnable-track{height:.5rem}input[type=range].slider:not([orient=vertical])::-moz-range-track{height:.5rem}input[type=range].slider:not([orient=vertical])::-ms-track{height:.5rem}input[type=range].slider[orient=vertical]::-webkit-slider-runnable-track{width:.5rem}input[type=range].slider[orient=vertical]::-moz-range-track{width:.5rem}input[type=range].slider[orient=vertical]::-ms-track{width:.5rem}input[type=range].slider::-webkit-slider-thumb{height:1rem;width:1rem}input[type=range].slider::-moz-range-thumb{height:1rem;width:1rem}input[type=range].slider::-ms-thumb{height:1rem;width:1rem}input[type=range].slider::-ms-thumb{margin-top:0}input[type=range].slider::-webkit-slider-thumb{margin-top:-.25rem}input[type=range].slider[orient=vertical]::-webkit-slider-thumb{margin-top:auto;margin-left:-.25rem}input[type=range].slider.is-small:not([orient=vertical]){min-height:calc((.75rem + 2px) * 1.25)}input[type=range].slider.is-small:not([orient=vertical])::-webkit-slider-runnable-track{height:.375rem}input[type=range].slider.is-small:not([orient=vertical])::-moz-range-track{height:.375rem}input[type=range].slider.is-small:not([orient=vertical])::-ms-track{height:.375rem}input[type=range].slider.is-small[orient=vertical]::-webkit-slider-runnable-track{width:.375rem}input[type=range].slider.is-small[orient=vertical]::-moz-range-track{width:.375rem}input[type=range].slider.is-small[orient=vertical]::-ms-track{width:.375rem}input[type=range].slider.is-small::-webkit-slider-thumb{height:.75rem;width:.75rem}input[type=range].slider.is-small::-moz-range-thumb{height:.75rem;width:.75rem}input[type=range].slider.is-small::-ms-thumb{height:.75rem;width:.75rem}input[type=range].slider.is-small::-ms-thumb{margin-top:0}input[type=range].slider.is-small::-webkit-slider-thumb{margin-top:-.1875rem}input[type=range].slider.is-small[orient=vertical]::-webkit-slider-thumb{margin-top:auto;margin-left:-.1875rem}input[type=range].slider.is-medium:not([orient=vertical]){min-height:calc((1.25rem + 2px) * 1.25)}input[type=range].slider.is-medium:not([orient=vertical])::-webkit-slider-runnable-track{height:.625rem}input[type=range].slider.is-medium:not([orient=vertical])::-moz-range-track{height:.625rem}input[type=range].slider.is-medium:not([orient=vertical])::-ms-track{height:.625rem}input[type=range].slider.is-medium[orient=vertical]::-webkit-slider-runnable-track{width:.625rem}input[type=range].slider.is-medium[orient=vertical]::-moz-range-track{width:.625rem}input[type=range].slider.is-medium[orient=vertical]::-ms-track{width:.625rem}input[type=range].slider.is-medium::-webkit-slider-thumb{height:1.25rem;width:1.25rem}input[type=range].slider.is-medium::-moz-range-thumb{height:1.25rem;width:1.25rem}input[type=range].slider.is-medium::-ms-thumb{height:1.25rem;width:1.25rem}input[type=range].slider.is-medium::-ms-thumb{margin-top:0}input[type=range].slider.is-medium::-webkit-slider-thumb{margin-top:-.3125rem}input[type=range].slider.is-medium[orient=vertical]::-webkit-slider-thumb{margin-top:auto;margin-left:-.3125rem}input[type=range].slider.is-large:not([orient=vertical]){min-height:calc((1.5rem + 2px) * 1.25)}input[type=range].slider.is-large:not([orient=vertical])::-webkit-slider-runnable-track{height:.75rem}input[type=range].slider.is-large:not([orient=vertical])::-moz-range-track{height:.75rem}input[type=range].slider.is-large:not([orient=vertical])::-ms-track{height:.75rem}input[type=range].slider.is-large[orient=vertical]::-webkit-slider-runnable-track{width:.75rem}input[type=range].slider.is-large[orient=vertical]::-moz-range-track{width:.75rem}input[type=range].slider.is-large[orient=vertical]::-ms-track{width:.75rem}input[type=range].slider.is-large::-webkit-slider-thumb{height:1.5rem;width:1.5rem}input[type=range].slider.is-large::-moz-range-thumb{height:1.5rem;width:1.5rem}input[type=range].slider.is-large::-ms-thumb{height:1.5rem;width:1.5rem}input[type=range].slider.is-large::-ms-thumb{margin-top:0}input[type=range].slider.is-large::-webkit-slider-thumb{margin-top:-.375rem}input[type=range].slider.is-large[orient=vertical]::-webkit-slider-thumb{margin-top:auto;margin-left:-.375rem}input[type=range].slider.is-white::-moz-range-track{background:#fff!important}input[type=range].slider.is-white::-webkit-slider-runnable-track{background:#fff!important}input[type=range].slider.is-white::-ms-track{background:#fff!important}input[type=range].slider.is-white::-ms-fill-lower{background:#fff}input[type=range].slider.is-white::-ms-fill-upper{background:#fff}input[type=range].slider.is-white .has-output-tooltip+output,input[type=range].slider.is-white.has-output+output{background-color:#fff;color:#0a0a0a}input[type=range].slider.is-black::-moz-range-track{background:#0a0a0a!important}input[type=range].slider.is-black::-webkit-slider-runnable-track{background:#0a0a0a!important}input[type=range].slider.is-black::-ms-track{background:#0a0a0a!important}input[type=range].slider.is-black::-ms-fill-lower{background:#0a0a0a}input[type=range].slider.is-black::-ms-fill-upper{background:#0a0a0a}input[type=range].slider.is-black .has-output-tooltip+output,input[type=range].slider.is-black.has-output+output{background-color:#0a0a0a;color:#fff}input[type=range].slider.is-light::-moz-range-track{background:#f5f5f5!important}input[type=range].slider.is-light::-webkit-slider-runnable-track{background:#f5f5f5!important}input[type=range].slider.is-light::-ms-track{background:#f5f5f5!important}input[type=range].slider.is-light::-ms-fill-lower{background:#f5f5f5}input[type=range].slider.is-light::-ms-fill-upper{background:#f5f5f5}input[type=range].slider.is-light .has-output-tooltip+output,input[type=range].slider.is-light.has-output+output{background-color:#f5f5f5;color:#363636}input[type=range].slider.is-dark::-moz-range-track{background:#363636!important}input[type=range].slider.is-dark::-webkit-slider-runnable-track{background:#363636!important}input[type=range].slider.is-dark::-ms-track{background:#363636!important}input[type=range].slider.is-dark::-ms-fill-lower{background:#363636}input[type=range].slider.is-dark::-ms-fill-upper{background:#363636}input[type=range].slider.is-dark .has-output-tooltip+output,input[type=range].slider.is-dark.has-output+output{background-color:#363636;color:#f5f5f5}input[type=range].slider.is-primary::-moz-range-track{background:#00d1b2!important}input[type=range].slider.is-primary::-webkit-slider-runnable-track{background:#00d1b2!important}input[type=range].slider.is-primary::-ms-track{background:#00d1b2!important}input[type=range].slider.is-primary::-ms-fill-lower{background:#00d1b2}input[type=range].slider.is-primary::-ms-fill-upper{background:#00d1b2}input[type=range].slider.is-primary .has-output-tooltip+output,input[type=range].slider.is-primary.has-output+output{background-color:#00d1b2;color:#fff}input[type=range].slider.is-link::-moz-range-track{background:#3273dc!important}input[type=range].slider.is-link::-webkit-slider-runnable-track{background:#3273dc!important}input[type=range].slider.is-link::-ms-track{background:#3273dc!important}input[type=range].slider.is-link::-ms-fill-lower{background:#3273dc}input[type=range].slider.is-link::-ms-fill-upper{background:#3273dc}input[type=range].slider.is-link .has-output-tooltip+output,input[type=range].slider.is-link.has-output+output{background-color:#3273dc;color:#fff}input[type=range].slider.is-info::-moz-range-track{background:#209cee!important}input[type=range].slider.is-info::-webkit-slider-runnable-track{background:#209cee!important}input[type=range].slider.is-info::-ms-track{background:#209cee!important}input[type=range].slider.is-info::-ms-fill-lower{background:#209cee}input[type=range].slider.is-info::-ms-fill-upper{background:#209cee}input[type=range].slider.is-info .has-output-tooltip+output,input[type=range].slider.is-info.has-output+output{background-color:#209cee;color:#fff}input[type=range].slider.is-success::-moz-range-track{background:#23d160!important}input[type=range].slider.is-success::-webkit-slider-runnable-track{background:#23d160!important}input[type=range].slider.is-success::-ms-track{background:#23d160!important}input[type=range].slider.is-success::-ms-fill-lower{background:#23d160}input[type=range].slider.is-success::-ms-fill-upper{background:#23d160}input[type=range].slider.is-success .has-output-tooltip+output,input[type=range].slider.is-success.has-output+output{background-color:#23d160;color:#fff}input[type=range].slider.is-warning::-moz-range-track{background:#ffdd57!important}input[type=range].slider.is-warning::-webkit-slider-runnable-track{background:#ffdd57!important}input[type=range].slider.is-warning::-ms-track{background:#ffdd57!important}input[type=range].slider.is-warning::-ms-fill-lower{background:#ffdd57}input[type=range].slider.is-warning::-ms-fill-upper{background:#ffdd57}input[type=range].slider.is-warning .has-output-tooltip+output,input[type=range].slider.is-warning.has-output+output{background-color:#ffdd57;color:rgba(0,0,0,.7)}input[type=range].slider.is-danger::-moz-range-track{background:#ff3860!important}input[type=range].slider.is-danger::-webkit-slider-runnable-track{background:#ff3860!important}input[type=range].slider.is-danger::-ms-track{background:#ff3860!important}input[type=range].slider.is-danger::-ms-fill-lower{background:#ff3860}input[type=range].slider.is-danger::-ms-fill-upper{background:#ff3860}input[type=range].slider.is-danger .has-output-tooltip+output,input[type=range].slider.is-danger.has-output+output{background-color:#ff3860;color:#fff} -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import random 3 | from data_loader import load_data 4 | from citation_networks import load_citation_network_halftrain 5 | from model import GCN 6 | from utils import * 7 | 8 | class Experiment: 9 | def __init__(self): 10 | super(Experiment, self).__init__() 11 | 12 | def get_loss_fixed_adj(self, model, mask, features, labels): 13 | logits = model(features) 14 | logp = F.log_softmax(logits, 1) 15 | loss = F.nll_loss(logp[mask], labels[mask], reduction='mean') 16 | accu = accuracy(logp[mask], labels[mask]) 17 | return loss, accu 18 | 19 | def half_val_as_train(self, val_mask, train_mask): 20 | val_size = np.count_nonzero(val_mask) 21 | counter = 0 22 | for i in range(len(val_mask)): 23 | if val_mask[i] and counter < val_size / 2: 24 | counter += 1 25 | val_mask[i] = False 26 | train_mask[i] = True 27 | return val_mask, train_mask 28 | 29 | 30 | def GCN_KNN(self, args): 31 | 32 | if args.half_train: 33 | print("Using half of labeled nodes for training!") 34 | features, nfeats, labels, nclasses, train_mask, val_mask, test_mask = load_citation_network_halftrain(args.dataset) 35 | else: 36 | features, nfeats, labels, nclasses, train_mask, val_mask, test_mask = load_data(args) 37 | 38 | Adj = torch.from_numpy(nearest_neighbors(features, args.k, args.knn_metric)).cuda() 39 | Adj = normalize(Adj, args.normalization, args.sparse) 40 | 41 | if torch.cuda.is_available(): 42 | features = features.cuda() 43 | 44 | if args.half_val_as_train: 45 | val_mask, train_mask = self.half_val_as_train(val_mask, train_mask) 46 | 47 | 48 | test_accu = [] 49 | for trial in range(args.ntrials): 50 | model = GCN(in_channels=nfeats, hidden_channels=args.hidden, out_channels=nclasses, num_layers=args.nlayers, 51 | dropout=args.dropout2, dropout_adj=args.dropout_adj2, Adj=Adj, sparse=args.sparse) 52 | optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.w_decay) 53 | 54 | best_test_accu = 0 55 | counter = 0 56 | if torch.cuda.is_available(): 57 | model = model.cuda() 58 | train_mask = train_mask.cuda() 59 | val_mask = val_mask.cuda() 60 | test_mask = test_mask.cuda() 61 | features = features.cuda() 62 | labels = labels.cuda() 63 | 64 | for epoch in range(1, args.epochs + 1): 65 | model.train() 66 | loss, accu = self.get_loss_fixed_adj(model, train_mask, features, labels) 67 | optimizer.zero_grad() 68 | loss.backward() 69 | 70 | optimizer.step() 71 | 72 | if epoch % 1 == 0: 73 | with torch.no_grad(): 74 | model.eval() 75 | test_loss_, test_accu_ = self.get_loss_fixed_adj(model, test_mask, features, labels) 76 | if epoch % 100 == 0: 77 | print("Epoch {:04d}: Test Loss {:.4f}, Test Accuracy {:.4f}".format(epoch, test_loss_, test_accu_)) 78 | if test_accu_ > best_test_accu: 79 | counter = 0 80 | best_test_accu = test_accu_ 81 | else: 82 | counter += 1 83 | if counter >= args.patience: 84 | break 85 | 86 | with torch.no_grad(): 87 | model.eval() 88 | print("*******************************") 89 | print("Trial {:02d}: test accuracy {:.4f}".format(trial, best_test_accu)) 90 | print("*******************************") 91 | test_accu.append(best_test_accu.item()) 92 | 93 | print(test_accu) 94 | print("std of test accuracy", np.std(test_accu) * 100) 95 | print("average of test accuracy", np.mean(test_accu) * 100) 96 | 97 | 98 | def GCN_KNN_U(self, args): 99 | 100 | if args.half_train: 101 | print("Using half of labeled nodes for training!") 102 | features, nfeats, labels, nclasses, train_mask, val_mask, test_mask = load_citation_network_halftrain(args.dataset) 103 | else: 104 | features, nfeats, labels, nclasses, train_mask, val_mask, test_mask = load_data(args) 105 | 106 | Adj = torch.from_numpy(nearest_neighbors(features, args.k, args.knn_metric)).cuda() 107 | 108 | 109 | 110 | if torch.cuda.is_available(): 111 | features = features.cuda() 112 | 113 | if args.half_val_as_train: 114 | val_mask, train_mask = self.half_val_as_train(val_mask, train_mask) 115 | 116 | 117 | 118 | CUR_C = Adj.clone()[:, train_mask] 119 | row_mask = torch.sum(CUR_C, dim=1) > 0 120 | asy_similarities = cal_similarity_graph(features, features[train_mask, :]) 121 | asy_similarities = top_k(asy_similarities, args.klabel) 122 | asy_similarities[train_mask, :] = 0.0 # Empirically, we found that train_mask works better!! 123 | # asy_similarities[row_mask, :] = 0.0 124 | 125 | 126 | Adj[:, train_mask] = Adj[:, train_mask] + args.alpha * asy_similarities 127 | Adj = normalize(Adj, args.normalization, args.sparse) 128 | 129 | test_accu = [] 130 | for trial in range(args.ntrials): 131 | model = GCN(in_channels=nfeats, hidden_channels=args.hidden, out_channels=nclasses, num_layers=args.nlayers, 132 | dropout=args.dropout2, dropout_adj=args.dropout_adj2, Adj=Adj, sparse=args.sparse) 133 | optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.w_decay) 134 | 135 | best_test_accu = 0 136 | counter = 0 137 | if torch.cuda.is_available(): 138 | model = model.cuda() 139 | train_mask = train_mask.cuda() 140 | val_mask = val_mask.cuda() 141 | test_mask = test_mask.cuda() 142 | features = features.cuda() 143 | labels = labels.cuda() 144 | 145 | for epoch in range(1, args.epochs + 1): 146 | model.train() 147 | loss, accu = self.get_loss_fixed_adj(model, train_mask, features, labels) 148 | optimizer.zero_grad() 149 | loss.backward() 150 | 151 | optimizer.step() 152 | 153 | if epoch % 1 == 0: 154 | with torch.no_grad(): 155 | model.eval() 156 | test_loss_, test_accu_ = self.get_loss_fixed_adj(model, test_mask, features, labels) 157 | if epoch % 100 == 0: 158 | print("Epoch {:04d}: Test Loss {:.4f}, Test Accuracy {:.4f}".format(epoch, test_loss_, test_accu_)) 159 | if test_accu_ > best_test_accu: 160 | counter = 0 161 | best_test_accu = test_accu_ 162 | else: 163 | counter += 1 164 | if counter >= args.patience: 165 | break 166 | 167 | with torch.no_grad(): 168 | model.eval() 169 | print("*******************************") 170 | print("Trial {:02d}: test accuracy {:.4f}".format(trial, best_test_accu)) 171 | print("*******************************") 172 | test_accu.append(best_test_accu.item()) 173 | 174 | print(test_accu) 175 | print("std of test accuracy", np.std(test_accu) * 100) 176 | print("average of test accuracy", np.mean(test_accu) * 100) 177 | 178 | 179 | 180 | def GCN_KNN_R(self, args): 181 | 182 | if args.half_train: 183 | print("Using half of labeled nodes for training!") 184 | features, nfeats, labels, nclasses, train_mask, val_mask, test_mask = load_citation_network_halftrain(args.dataset) 185 | else: 186 | features, nfeats, labels, nclasses, train_mask, val_mask, test_mask = load_data(args) 187 | 188 | Adj = torch.from_numpy(nearest_neighbors(features, args.k, args.knn_metric)).cuda() 189 | 190 | 191 | 192 | if torch.cuda.is_available(): 193 | features = features.cuda() 194 | 195 | if args.half_val_as_train: 196 | val_mask, train_mask = self.half_val_as_train(val_mask, train_mask) 197 | 198 | asy_similarities = cal_similarity_graph(features, features[train_mask, :]) 199 | asy_similarities = top_k(asy_similarities, args.klabel) 200 | 201 | Adj[:, train_mask] = Adj[:, train_mask] + args.alpha * asy_similarities 202 | Adj = normalize(Adj, args.normalization, args.sparse) 203 | 204 | test_accu = [] 205 | for trial in range(args.ntrials): 206 | model = GCN(in_channels=nfeats, hidden_channels=args.hidden, out_channels=nclasses, num_layers=args.nlayers, 207 | dropout=args.dropout2, dropout_adj=args.dropout_adj2, Adj=Adj, sparse=args.sparse) 208 | optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.w_decay) 209 | 210 | best_test_accu = 0 211 | counter = 0 212 | if torch.cuda.is_available(): 213 | model = model.cuda() 214 | train_mask = train_mask.cuda() 215 | val_mask = val_mask.cuda() 216 | test_mask = test_mask.cuda() 217 | features = features.cuda() 218 | labels = labels.cuda() 219 | 220 | for epoch in range(1, args.epochs + 1): 221 | model.train() 222 | loss, accu = self.get_loss_fixed_adj(model, train_mask, features, labels) 223 | optimizer.zero_grad() 224 | loss.backward() 225 | 226 | optimizer.step() 227 | 228 | if epoch % 1 == 0: 229 | with torch.no_grad(): 230 | model.eval() 231 | test_loss_, test_accu_ = self.get_loss_fixed_adj(model, test_mask, features, labels) 232 | if epoch % 100 == 0: 233 | print("Epoch {:04d}: Test Loss {:.4f}, Test Accuracy {:.4f}".format(epoch, test_loss_, test_accu_)) 234 | if test_accu_ > best_test_accu: 235 | counter = 0 236 | best_test_accu = test_accu_ 237 | else: 238 | counter += 1 239 | if counter >= args.patience: 240 | break 241 | 242 | 243 | with torch.no_grad(): 244 | model.eval() 245 | print("Trial {:02d}: test accuracy {:.4f}".format(trial, best_test_accu)) 246 | test_accu.append(best_test_accu.item()) 247 | 248 | print(test_accu) 249 | print("std of test accuracy", np.std(test_accu) * 100) 250 | print("average of test accuracy", np.mean(test_accu) * 100) 251 | 252 | 253 | 254 | 255 | if __name__ == '__main__': 256 | parser = argparse.ArgumentParser() 257 | 258 | parser.add_argument('-dataset', type=str, default='pubmed', help='See choices', 259 | choices=['cora', 'citeseer', 'pubmed', 'ogbn-arxiv']) 260 | parser.add_argument('-ntrials', type=int, default=5, help='Number of trials') 261 | parser.add_argument('-epochs', type=int, default=2000, help='Number of epochs to train.') 262 | parser.add_argument('-lr', type=float, default=0.01, help='Initial learning rate.') 263 | parser.add_argument('-w_decay', type=float, default=0.0005, help='Weight decay (L2 loss on parameters).') 264 | parser.add_argument('-hidden', type=int, default=32, help='Number of hidden units.') 265 | parser.add_argument('-nlayers', type=int, default=2, help='#layers') 266 | parser.add_argument('-k', type=int, default=15, help='k for initializing with knn') 267 | parser.add_argument('-knn_metric', type=str, default='cosine', help='See choices', choices=['cosine', 'minkowski']) 268 | parser.add_argument('-half_val_as_train', type=int, default=0, help='use first half of validation for training') 269 | parser.add_argument('-half_train', type=int, default=0, help='use half of labeled nodes for training') 270 | parser.add_argument('-normalization', type=str, default='sym') 271 | parser.add_argument('-sparse', type=int, default=0) 272 | parser.add_argument('-patience', type=int, default=3000, help='patience for early stopping') 273 | parser.add_argument('-method', type=str, default='GCN_KNN', help='See choices', 274 | choices=['GCN_KNN', 'GCN_KNN_U', 'GCN_KNN_R']) 275 | experiment = Experiment() 276 | 277 | 278 | def set_seed(seed): 279 | random.seed(seed) 280 | np.random.seed(seed) 281 | torch.manual_seed(seed) 282 | if torch.cuda.is_available(): 283 | torch.cuda.manual_seed(seed) 284 | torch.cuda.manual_seed_all(seed) 285 | set_seed(42) 286 | torch.backends.cudnn.deterministic = True 287 | torch.backends.cudnn.benchmark = True 288 | 289 | parser.add_argument('-dropout2', type=float, default=0.5, help='Dropout rate in GCN.') 290 | parser.add_argument('-dropout_adj2', type=float, default=0., help='Dropout rate GCN.') 291 | parser.add_argument('-alpha', type=float, default=100, help='control the contribution of asy and sys similarity') 292 | parser.add_argument('-klabel', type=int, default=30, help='k_label for asymmetric similarity') 293 | args = parser.parse_args() 294 | 295 | if args.method == "GCN_KNN": 296 | experiment.GCN_KNN(args) 297 | elif args.method == "GCN_KNN_U": 298 | experiment.GCN_KNN_U(args) 299 | elif args.method == "GCN_KNN_R": 300 | experiment.GCN_KNN_R(args) 301 | print(args) -------------------------------------------------------------------------------- /static/js/bulma-slider.js: -------------------------------------------------------------------------------- 1 | (function webpackUniversalModuleDefinition(root, factory) { 2 | if(typeof exports === 'object' && typeof module === 'object') 3 | module.exports = factory(); 4 | else if(typeof define === 'function' && define.amd) 5 | define([], factory); 6 | else if(typeof exports === 'object') 7 | exports["bulmaSlider"] = factory(); 8 | else 9 | root["bulmaSlider"] = factory(); 10 | })(typeof self !== 'undefined' ? self : this, function() { 11 | return /******/ (function(modules) { // webpackBootstrap 12 | /******/ // The module cache 13 | /******/ var installedModules = {}; 14 | /******/ 15 | /******/ // The require function 16 | /******/ function __webpack_require__(moduleId) { 17 | /******/ 18 | /******/ // Check if module is in cache 19 | /******/ if(installedModules[moduleId]) { 20 | /******/ return installedModules[moduleId].exports; 21 | /******/ } 22 | /******/ // Create a new module (and put it into the cache) 23 | /******/ var module = installedModules[moduleId] = { 24 | /******/ i: moduleId, 25 | /******/ l: false, 26 | /******/ exports: {} 27 | /******/ }; 28 | /******/ 29 | /******/ // Execute the module function 30 | /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); 31 | /******/ 32 | /******/ // Flag the module as loaded 33 | /******/ module.l = true; 34 | /******/ 35 | /******/ // Return the exports of the module 36 | /******/ return module.exports; 37 | /******/ } 38 | /******/ 39 | /******/ 40 | /******/ // expose the modules object (__webpack_modules__) 41 | /******/ __webpack_require__.m = modules; 42 | /******/ 43 | /******/ // expose the module cache 44 | /******/ __webpack_require__.c = installedModules; 45 | /******/ 46 | /******/ // define getter function for harmony exports 47 | /******/ __webpack_require__.d = function(exports, name, getter) { 48 | /******/ if(!__webpack_require__.o(exports, name)) { 49 | /******/ Object.defineProperty(exports, name, { 50 | /******/ configurable: false, 51 | /******/ enumerable: true, 52 | /******/ get: getter 53 | /******/ }); 54 | /******/ } 55 | /******/ }; 56 | /******/ 57 | /******/ // getDefaultExport function for compatibility with non-harmony modules 58 | /******/ __webpack_require__.n = function(module) { 59 | /******/ var getter = module && module.__esModule ? 60 | /******/ function getDefault() { return module['default']; } : 61 | /******/ function getModuleExports() { return module; }; 62 | /******/ __webpack_require__.d(getter, 'a', getter); 63 | /******/ return getter; 64 | /******/ }; 65 | /******/ 66 | /******/ // Object.prototype.hasOwnProperty.call 67 | /******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; 68 | /******/ 69 | /******/ // __webpack_public_path__ 70 | /******/ __webpack_require__.p = ""; 71 | /******/ 72 | /******/ // Load entry module and return exports 73 | /******/ return __webpack_require__(__webpack_require__.s = 0); 74 | /******/ }) 75 | /************************************************************************/ 76 | /******/ ([ 77 | /* 0 */ 78 | /***/ (function(module, __webpack_exports__, __webpack_require__) { 79 | 80 | "use strict"; 81 | Object.defineProperty(__webpack_exports__, "__esModule", { value: true }); 82 | /* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "isString", function() { return isString; }); 83 | /* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__events__ = __webpack_require__(1); 84 | var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; 85 | 86 | var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); 87 | 88 | var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; 89 | 90 | function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } 91 | 92 | function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; } 93 | 94 | function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } 95 | 96 | 97 | 98 | var isString = function isString(unknown) { 99 | return typeof unknown === 'string' || !!unknown && (typeof unknown === 'undefined' ? 'undefined' : _typeof(unknown)) === 'object' && Object.prototype.toString.call(unknown) === '[object String]'; 100 | }; 101 | 102 | var bulmaSlider = function (_EventEmitter) { 103 | _inherits(bulmaSlider, _EventEmitter); 104 | 105 | function bulmaSlider(selector) { 106 | var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; 107 | 108 | _classCallCheck(this, bulmaSlider); 109 | 110 | var _this = _possibleConstructorReturn(this, (bulmaSlider.__proto__ || Object.getPrototypeOf(bulmaSlider)).call(this)); 111 | 112 | _this.element = typeof selector === 'string' ? document.querySelector(selector) : selector; 113 | // An invalid selector or non-DOM node has been provided. 114 | if (!_this.element) { 115 | throw new Error('An invalid selector or non-DOM node has been provided.'); 116 | } 117 | 118 | _this._clickEvents = ['click']; 119 | /// Set default options and merge with instance defined 120 | _this.options = _extends({}, options); 121 | 122 | _this.onSliderInput = _this.onSliderInput.bind(_this); 123 | 124 | _this.init(); 125 | return _this; 126 | } 127 | 128 | /** 129 | * Initiate all DOM element containing selector 130 | * @method 131 | * @return {Array} Array of all slider instances 132 | */ 133 | 134 | 135 | _createClass(bulmaSlider, [{ 136 | key: 'init', 137 | 138 | 139 | /** 140 | * Initiate plugin 141 | * @method init 142 | * @return {void} 143 | */ 144 | value: function init() { 145 | this._id = 'bulmaSlider' + new Date().getTime() + Math.floor(Math.random() * Math.floor(9999)); 146 | this.output = this._findOutputForSlider(); 147 | 148 | this._bindEvents(); 149 | 150 | if (this.output) { 151 | if (this.element.classList.contains('has-output-tooltip')) { 152 | // Get new output position 153 | var newPosition = this._getSliderOutputPosition(); 154 | 155 | // Set output position 156 | this.output.style['left'] = newPosition.position; 157 | } 158 | } 159 | 160 | this.emit('bulmaslider:ready', this.element.value); 161 | } 162 | }, { 163 | key: '_findOutputForSlider', 164 | value: function _findOutputForSlider() { 165 | var _this2 = this; 166 | 167 | var result = null; 168 | var outputs = document.getElementsByTagName('output') || []; 169 | 170 | Array.from(outputs).forEach(function (output) { 171 | if (output.htmlFor == _this2.element.getAttribute('id')) { 172 | result = output; 173 | return true; 174 | } 175 | }); 176 | return result; 177 | } 178 | }, { 179 | key: '_getSliderOutputPosition', 180 | value: function _getSliderOutputPosition() { 181 | // Update output position 182 | var newPlace, minValue; 183 | 184 | var style = window.getComputedStyle(this.element, null); 185 | // Measure width of range input 186 | var sliderWidth = parseInt(style.getPropertyValue('width'), 10); 187 | 188 | // Figure out placement percentage between left and right of input 189 | if (!this.element.getAttribute('min')) { 190 | minValue = 0; 191 | } else { 192 | minValue = this.element.getAttribute('min'); 193 | } 194 | var newPoint = (this.element.value - minValue) / (this.element.getAttribute('max') - minValue); 195 | 196 | // Prevent bubble from going beyond left or right (unsupported browsers) 197 | if (newPoint < 0) { 198 | newPlace = 0; 199 | } else if (newPoint > 1) { 200 | newPlace = sliderWidth; 201 | } else { 202 | newPlace = sliderWidth * newPoint; 203 | } 204 | 205 | return { 206 | 'position': newPlace + 'px' 207 | }; 208 | } 209 | 210 | /** 211 | * Bind all events 212 | * @method _bindEvents 213 | * @return {void} 214 | */ 215 | 216 | }, { 217 | key: '_bindEvents', 218 | value: function _bindEvents() { 219 | if (this.output) { 220 | // Add event listener to update output when slider value change 221 | this.element.addEventListener('input', this.onSliderInput, false); 222 | } 223 | } 224 | }, { 225 | key: 'onSliderInput', 226 | value: function onSliderInput(e) { 227 | e.preventDefault(); 228 | 229 | if (this.element.classList.contains('has-output-tooltip')) { 230 | // Get new output position 231 | var newPosition = this._getSliderOutputPosition(); 232 | 233 | // Set output position 234 | this.output.style['left'] = newPosition.position; 235 | } 236 | 237 | // Check for prefix and postfix 238 | var prefix = this.output.hasAttribute('data-prefix') ? this.output.getAttribute('data-prefix') : ''; 239 | var postfix = this.output.hasAttribute('data-postfix') ? this.output.getAttribute('data-postfix') : ''; 240 | 241 | // Update output with slider value 242 | this.output.value = prefix + this.element.value + postfix; 243 | 244 | this.emit('bulmaslider:ready', this.element.value); 245 | } 246 | }], [{ 247 | key: 'attach', 248 | value: function attach() { 249 | var _this3 = this; 250 | 251 | var selector = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 'input[type="range"].slider'; 252 | var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; 253 | 254 | var instances = new Array(); 255 | 256 | var elements = isString(selector) ? document.querySelectorAll(selector) : Array.isArray(selector) ? selector : [selector]; 257 | elements.forEach(function (element) { 258 | if (typeof element[_this3.constructor.name] === 'undefined') { 259 | var instance = new bulmaSlider(element, options); 260 | element[_this3.constructor.name] = instance; 261 | instances.push(instance); 262 | } else { 263 | instances.push(element[_this3.constructor.name]); 264 | } 265 | }); 266 | 267 | return instances; 268 | } 269 | }]); 270 | 271 | return bulmaSlider; 272 | }(__WEBPACK_IMPORTED_MODULE_0__events__["a" /* default */]); 273 | 274 | /* harmony default export */ __webpack_exports__["default"] = (bulmaSlider); 275 | 276 | /***/ }), 277 | /* 1 */ 278 | /***/ (function(module, __webpack_exports__, __webpack_require__) { 279 | 280 | "use strict"; 281 | var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); 282 | 283 | function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } 284 | 285 | var EventEmitter = function () { 286 | function EventEmitter() { 287 | var listeners = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : []; 288 | 289 | _classCallCheck(this, EventEmitter); 290 | 291 | this._listeners = new Map(listeners); 292 | this._middlewares = new Map(); 293 | } 294 | 295 | _createClass(EventEmitter, [{ 296 | key: "listenerCount", 297 | value: function listenerCount(eventName) { 298 | if (!this._listeners.has(eventName)) { 299 | return 0; 300 | } 301 | 302 | var eventListeners = this._listeners.get(eventName); 303 | return eventListeners.length; 304 | } 305 | }, { 306 | key: "removeListeners", 307 | value: function removeListeners() { 308 | var _this = this; 309 | 310 | var eventName = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null; 311 | var middleware = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false; 312 | 313 | if (eventName !== null) { 314 | if (Array.isArray(eventName)) { 315 | name.forEach(function (e) { 316 | return _this.removeListeners(e, middleware); 317 | }); 318 | } else { 319 | this._listeners.delete(eventName); 320 | 321 | if (middleware) { 322 | this.removeMiddleware(eventName); 323 | } 324 | } 325 | } else { 326 | this._listeners = new Map(); 327 | } 328 | } 329 | }, { 330 | key: "middleware", 331 | value: function middleware(eventName, fn) { 332 | var _this2 = this; 333 | 334 | if (Array.isArray(eventName)) { 335 | name.forEach(function (e) { 336 | return _this2.middleware(e, fn); 337 | }); 338 | } else { 339 | if (!Array.isArray(this._middlewares.get(eventName))) { 340 | this._middlewares.set(eventName, []); 341 | } 342 | 343 | this._middlewares.get(eventName).push(fn); 344 | } 345 | } 346 | }, { 347 | key: "removeMiddleware", 348 | value: function removeMiddleware() { 349 | var _this3 = this; 350 | 351 | var eventName = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null; 352 | 353 | if (eventName !== null) { 354 | if (Array.isArray(eventName)) { 355 | name.forEach(function (e) { 356 | return _this3.removeMiddleware(e); 357 | }); 358 | } else { 359 | this._middlewares.delete(eventName); 360 | } 361 | } else { 362 | this._middlewares = new Map(); 363 | } 364 | } 365 | }, { 366 | key: "on", 367 | value: function on(name, callback) { 368 | var _this4 = this; 369 | 370 | var once = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : false; 371 | 372 | if (Array.isArray(name)) { 373 | name.forEach(function (e) { 374 | return _this4.on(e, callback); 375 | }); 376 | } else { 377 | name = name.toString(); 378 | var split = name.split(/,|, | /); 379 | 380 | if (split.length > 1) { 381 | split.forEach(function (e) { 382 | return _this4.on(e, callback); 383 | }); 384 | } else { 385 | if (!Array.isArray(this._listeners.get(name))) { 386 | this._listeners.set(name, []); 387 | } 388 | 389 | this._listeners.get(name).push({ once: once, callback: callback }); 390 | } 391 | } 392 | } 393 | }, { 394 | key: "once", 395 | value: function once(name, callback) { 396 | this.on(name, callback, true); 397 | } 398 | }, { 399 | key: "emit", 400 | value: function emit(name, data) { 401 | var _this5 = this; 402 | 403 | var silent = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : false; 404 | 405 | name = name.toString(); 406 | var listeners = this._listeners.get(name); 407 | var middlewares = null; 408 | var doneCount = 0; 409 | var execute = silent; 410 | 411 | if (Array.isArray(listeners)) { 412 | listeners.forEach(function (listener, index) { 413 | // Start Middleware checks unless we're doing a silent emit 414 | if (!silent) { 415 | middlewares = _this5._middlewares.get(name); 416 | // Check and execute Middleware 417 | if (Array.isArray(middlewares)) { 418 | middlewares.forEach(function (middleware) { 419 | middleware(data, function () { 420 | var newData = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null; 421 | 422 | if (newData !== null) { 423 | data = newData; 424 | } 425 | doneCount++; 426 | }, name); 427 | }); 428 | 429 | if (doneCount >= middlewares.length) { 430 | execute = true; 431 | } 432 | } else { 433 | execute = true; 434 | } 435 | } 436 | 437 | // If Middleware checks have been passed, execute 438 | if (execute) { 439 | if (listener.once) { 440 | listeners[index] = null; 441 | } 442 | listener.callback(data); 443 | } 444 | }); 445 | 446 | // Dirty way of removing used Events 447 | while (listeners.indexOf(null) !== -1) { 448 | listeners.splice(listeners.indexOf(null), 1); 449 | } 450 | } 451 | } 452 | }]); 453 | 454 | return EventEmitter; 455 | }(); 456 | 457 | /* harmony default export */ __webpack_exports__["a"] = (EventEmitter); 458 | 459 | /***/ }) 460 | /******/ ])["default"]; 461 | }); -------------------------------------------------------------------------------- /index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | Latent Graph Inference with Limited Supervision 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 42 | 43 | 44 | 45 | 46 | 47 | 67 | 68 | 69 |
70 |
71 |
72 |
73 |
74 |

Latent Graph Inference with Limited Supervision

75 |
76 | 77 | Jianglin Lu1*  78 | 79 | 80 | Yi Xu1  81 | 82 | 83 | Huan Wang1  84 | 85 | 86 | Yue Bai1  87 | 88 | 89 | Yun Fu1,2  90 | 91 |
92 |

NeurIPS 2023

93 | 94 |
95 | 1Department of Electrical and Computer Engineering, Northeastern University 
96 | 2Khoury College of Computer Science, Northeastern University
97 |
98 | 99 |
100 | *Corresponding author: jianglinlu@outlook.com
101 |
102 | 103 |
104 | 134 | 135 |
136 |
137 |
138 |
139 |
140 |
141 | 142 |
143 |
144 |
145 |
146 |

Abstract

147 |
148 | Latent graph inference (LGI) aims to jointly learn the underlying graph structure and node representations from data features. 149 | However, existing LGI methods commonly suffer from the issue of supervision starvation, where massive edge weights are learned without semantic supervision and do not contribute to the training loss. 150 | Consequently, these supervision-starved weights, which may determine the predictions of testing samples, cannot be semantically optimal, resulting in poor generalization. 151 | In this paper, we observe that this issue is actually caused by the graph sparsification operation, which severely destroys the important connections established between pivotal nodes and labeled ones. 152 | To address this, we propose to restore the corrupted affinities and replenish the missed supervision for better LGI. 153 | The key challenge then lies in identifying the critical nodes and recovering the corrupted affinities. 154 | We begin by defining the pivotal nodes as k-hop starved nodes, which can be identified based on a given adjacency matrix. 155 | Considering the high computational burden, we further present a more efficient alternative inspired by CUR matrix decomposition. 156 | Subsequently, we eliminate the starved nodes by reconstructing the destroyed connections. 157 | Extensive experiments on representative benchmarks demonstrate that reducing the starved nodes consistently improves the performance of state-of-the-art LGI methods, especially under extremely limited supervision (6.12% improvement on Pubmed with a labeling rate of only 0.3%). 158 |
159 |
160 |
161 |
162 |
163 | 164 | 165 | 166 | 167 |
168 |
169 |
170 |
171 |

Latent Graph Inference

172 |
173 |

Existing GNNs typically require a prior graph to learn node representations, which poses a major challenge when encountering incomplete or even missing graphs. 174 | This limitation has spurred the development of latent graph inference (LGI), also known as graph structure learning. 175 | In general, LGI aims to jointly learn the underlying graph and discriminative node representations solely from the features of nodes. 176 | The following gives the definition of latent graph inference.

177 |

[Definition 1] (Latent Graph Inference) Given a graph $\mathcal{G}(\mathcal{V}, \mathbf{X} )$ containing $n$ nodes $\mathcal{V}=\{V_1, \ldots, V_n\}$ and a feature matrix $\mathbf{X} \in \mathbb{R}^{n\times d}$ with each row $\mathbf{X}_{i:} \in \mathbb{R}^d$ representing the $d$-dimensional attributes of node $V_i$, latent graph inference (LGI) aims to simultaneously learn the underlying graph topology encoded by an adjacency matrix $\mathbf{A} \in \mathbb{R}^{n\times n}$ and the discriminative $d'$-dimensional node representations $\mathbf{Z} \in \mathbb{R}^{n\times d'}$ based on $\mathbf{X}$, where the learned $\mathbf{A}$ and $\mathbf{Z}$ are jointly optimal for certain downstream tasks $\mathcal{T}$ given a specific loss function $\mathcal{L}$.

178 |

In this work, we adopt the most common settings from existing LGI literatures, considering $\mathcal{T}$ as the semi-supervised node classification task and $\mathcal{L}$ as the cross-entropy loss.

179 |
180 | 181 | 182 | 183 |

Supervision Starvation

184 |
185 |

186 | Let us consider a general LGI model $\mathcal{M}$ consisting of a latent graph generator $\mathcal{P}_{\mathbf{\Phi}}$ and a node encoder $\mathcal{F}_{\mathbf{\Theta}}$. 187 | For simplicity, we ignore the activation function and assume that $\mathcal{F}_{\mathbf{\Theta}}$ is implemented using a $1$-layer GNN, i.e., $\mathcal{F}_{\mathbf{\Theta}}=\mathtt{GNN}_1(\mathbf{X}, \mathbf{A}; \mathbf{\Theta})$, where $\mathbf{A}=\mathcal{P}_{\mathbf{\Phi}}(\mathbf{X})$. 188 | For each node $\mathbf{X}_{i:}$, the corresponding node representation $\mathbf{Z}_{i:}$ learned by the model $\mathcal{M}$ can be expressed as: 189 | \begin{equation} 190 | \mathbf{Z}_{i:} = \mathbf{A}_{i:}\mathbf{X}\mathbf{\Theta} = \left(\sum_{j \in \Omega} \mathbf{A}_{ij}\mathbf{X}_{j:} \right)\mathbf{\Theta}, 191 | \end{equation} 192 | where $\Omega=\{j\ |\ \mathbb{1}_{\mathbb{R}^+}(\mathbf{A})_{ij}=1 \}$ and $\mathbf{A}_{ij}=\mathcal{P}_{\mathbf{\Phi}}(\mathbf{X}_{i:}, \mathbf{X}_{j:})$. Consider the node classification loss: 193 | \begin{equation} 194 | \min_{\mathbf{A}, \mathbf{\Theta}} \mathcal{L} = \sum_{i\in \mathcal{Y}_{L}} \sum_{j=1}^{|\mathcal{C}|} \mathbf{Y}_{ij} \ln \mathbf{Z}_{ij} = \sum_{i\in \mathcal{Y}_{L}} \mathbf{Y}_{i:} \ln \mathbf{Z}_{i:}^{\top} = \sum_{i\in \mathcal{Y}_{L}} \mathbf{Y}_{i:} \ln \left( \left(\sum_{j \in \Omega} \mathbf{A}_{ij}\mathbf{X}_{j:} \right)\mathbf{\Theta}\right)^\top, 195 | \end{equation} 196 | where $\mathcal{Y}_{L}$ represents the set of indexes of labeled nodes and $|\mathcal{C}|$ denotes the size of label set.

197 |

198 | We observe that, for $\forall i\in \mathcal{Y}_{L}$, $j \in \Omega$, $\mathbf{A}_{ij}$ is optimized via backpropagation under the supervision of label $\mathbf{Y}_{i:}$. 199 | For $\forall i \notin \mathcal{Y}_{L}$, however, if $j \notin \mathcal{Y}_{L}$ for $\forall j \in \Omega$, $\mathbf{A}_{ij}$ will receive no supervision from any label and, as a result, cannot be semantically optimal after training. 200 | Consequently, the learning models exhibit poor generalization as the predictions of testing nodes inevitably rely on these supervision-starved weights. 201 | This phenomenon is referred to as Supervision Starvation (SS), where many edge weights are learned without any label supervision.

202 |

203 | We may ask why this problem arises? 204 | In fact, the SS problem is caused by a common and necessary post-processing operation known as graph sparsification, which is employed in the majority of LGI methods to generate a sparse latent graph. 205 | To be more specific, graph sparsification adjusts the initial dense graph to a sparse one through the following procedure: 206 | \begin{equation} 207 | \mathbf{A}_{ij}=\left\{ 208 | \begin{aligned} 209 | &\mathbf{A}_{ij}, & \text{if } \ \mathbf{A}_{ij} \in \operatorname{top-\kappa}(\mathbf{A}_{i:}) \\ 210 | & 0, & \text{otherwise}, 211 | \end{aligned} 212 | \right. 213 | \end{equation} 214 | where $\operatorname{top-\kappa}(\mathbf{A}_{i:})$ denotes the set of the top $\kappa$ values in $\mathbf{A}_{i:}$. 215 | After this sparsification operation, a significant number of edge weights are directly erased, including the crucial connections established between pivotal nodes and labeled nodes. 216 |

217 |

How many important nodes or connections suffer from this problem? We delve into this question in the next section.

218 |
219 | 220 | 221 | 222 |

Starved Nodes

223 |
224 |

225 | To count how many nodes suffer from the supervision starvation problem, we first give the definition of the k-hop starved node: 226 |

227 |

228 | [Definition 2] (k-hop Starved Node) Given a graph $\mathcal{G}(\mathcal{V}, \mathbf{X})$ consisting of $n$ nodes $\mathcal{V}=\{V_1, \ldots, V_n\}$ and the corresponding node features $\mathbf{X}$, for a $k$-layer graph neural network $\mathtt{GNN}_k(\mathbf{X}; \mathbf{\Theta)}$ with network parameters $\mathbf{\Theta}$, the unlabeled node $V_i$ is a k-hop starved node if, for $\forall \kappa \in \{1, \ldots, k\}$, $\forall V_j \in \mathcal{N}_\kappa(i)$, where $\mathcal{N}_\kappa(i)$ is the set of $\kappa$-hop neighbors of $V_i$, $V_j$ is unlabeled. 229 | Specifically, $0$-hop starved nodes are defined as the unlabeled nodes. 230 |

231 | 232 |

233 | Note that k-hop starved nodes are defined based on the k-layer GNNs since a k-layer GNN can only aggregate signals from k-hop neighbors of nodes. 234 | In order to provide an intuitive perspective, we use two real-world graph datasets, Cora and Citeseer, as examples, and calculate the number of k-hop starved nodes based on their original graph topologies. 235 | The following picture shows the statistical results for k=1, 2, 3, 4, where the suffix number of datasets represents the number of labeled nodes. 236 |

237 |
238 | 239 |
240 |
241 | We observe that, there are many nodes suffer from the supervision starvation problem. 242 | The larger the labeling rate (i.e., the more labeled nodes), the smaller the number of starved nodes. 243 | This is natural because the more labeled nodes, the greater the probability that a node will connect to a labeled node. 244 | On the other hand, the number of k-hop starved nodes decreases as the value of k increases. 245 | Taking the Citeseer120 dataset as an example, increasing GNN to 4 layers (capturing 4-hop neighbors) reduces the number of starved nodes from near 3,000 to near 500. 246 | This can be explained by the fact that as k increases, the nodes have more neighbors (from 1- to k-hop), and the possibility of having at least one labeled neighbor increases. 247 |
248 | 249 | 250 | 251 |

How to Identify Starved Nodes

252 |
253 |

254 | After define the k-hop starved nodes, we show how to identify such nodes based on a given initial adjacency matrix. The following theorem gives a solution. 255 |

256 |

257 | [Theorem 1] Given a sparse adjacency matrix $\mathbf{A} \in \mathbb{R}^{n\times n}$ with self-connections generated on graph $\mathcal{G}(\mathcal{V}, \mathbf{X})$ by a latent graph inference model with a $k$-layer graph neural network $\mathtt{GNN}_k(\mathbf{X}; \mathbf{\Theta)}$, the node $V_i$ is a $k$-hop starved node, if $\exists j \in \{1, \ldots, n\}$, such that $[\mathbb{1}_{\mathbb{R}^+}(\mathbf{A})]^k_{ij}=1$, and for $\forall j \in \{j\ |\ [\mathbb{1}_{\mathbb{R}^+}(\mathbf{A})]_{ij}=1 \cup [\mathbb{1}_{\mathbb{R}^+}(\mathbf{A})]^2_{ij}=1 \cup \ldots \cup [\mathbb{1}_{\mathbb{R}^+}(\mathbf{A})]^k_{ij}=1 \}$, $V_j$ is unlabeled. 258 |

259 |

260 | To provide a clearer understanding, we present an example to illustrate the process of identifying $k$-hop starved nodes based on the given adjacency matrix. 261 | The following figure depicts a graph consisting of $6$ nodes, with $2$ labeled and $4$ unlabeled. 262 | The corresponding adjacency matrix is shown in the right, where all edge weights are set to $1$ for simplicity. 263 | To identify the $k$-hop starved nodes, we need to determine the $k$-hop neighbors for each node. 264 | The steps below demonstrate the identification process of $k$-hop neighbors based on the given adjacency matrix, where the $k$-hop neighbors for each node are listed at the end of each row of the corresponding matrices: 265 |

266 |
267 |
268 |

269 | 270 | 271 | 272 | 290 |
\begin{equation} \qquad\quad \end{equation} 273 | \begin{equation} 274 | \mathbf{A}: \begin{array}{lll} 275 | & \begin{array}{llllll}\ \ 1 & 2 & 3 & 4 & 5 & 6 \end{array} & \\ 276 | \begin{array}{l} 1 \\ 2 \\ 3 \\ 4 \\ 5 \\ 6 \end{array}& 277 | \left[\begin{array}{llllll} 278 | 1 & 0 & 1 & 0 & 1 & 0 \\ 279 | 0 & 1 & 1 & 1 & 0 & 0 \\ 280 | 1 & 1 & 1 & 0 & 0 & 1 \\ 281 | 0 & 1 & 0 & 1 & 0 & 0 \\ 282 | 1 & 0 & 0 & 0 & 1 & 0 \\ 283 | 0 & 0 & 1 & 0 & 0 & 1 \\ 284 | \end{array}\right] 285 | \begin{array}{l} 286 | \end{array} 287 | \end{array} \nonumber 288 | \end{equation} 289 |
291 |

292 | 293 |

294 | \begin{equation} 295 | \text{Identifying $1$-hop neighbors based on }\mathbf{A}: \begin{array}{lll} 296 | & \begin{array}{llllll}\ \ 1 & 2 & 3 & 4 & 5 & 6 \end{array} & \\ 297 | \begin{array}{l} 1 \\ 2 \\ 3 \\ 4 \\ 5 \\ 6 \end{array}& 298 | \left[\begin{array}{llllll} 299 | 1 & 0 & 1 & 0 & 1 & 0 \\ 300 | 0 & 1 & 1 & 1 & 0 & 0 \\ 301 | 1 & 1 & 1 & 0 & 0 & 1 \\ 302 | 0 & 1 & 0 & 1 & 0 & 0 \\ 303 | 1 & 0 & 0 & 0 & 1 & 0 \\ 304 | 0 & 0 & 1 & 0 & 0 & 1 \\ 305 | \end{array}\right] 306 | \begin{array}{l} (V_3, V_5) \\ (V_3, {V_4}) \\ (V_1, {V_2}, V_6) \\ ({V_2}) \\ (V_1) \\ (V_3) \end{array} 307 | \end{array} \nonumber 308 | \end{equation} 309 |

310 | 311 |
312 |

313 | Since nodes $V_2$ and $V_4$ are labeled, we identify the $1$-hop starved nodes as $\{V_1, V_5, V_6\}$ (we want to clarify that self-connections are not considered when defining k-hop neighbors.). 314 |

315 |
316 | 317 |

318 | \begin{equation} 319 | \text{Identifying $2$-hop neighbors based on }\mathbf{A}^2: \begin{array}{lll} 320 | & \begin{array}{llllll}\ \ 1 & 2 & 3 & 4 & 5 & 6 \end{array} & \\ 321 | \begin{array}{l} 1 \\ 2 \\ 3 \\ 4 \\ 5 \\ 6 \end{array}& 322 | \left[\begin{array}{llllll} 323 | 3 & 1 & 2 & 0 & 2 & 1 \\ 324 | 1 & 3 & 2 & 2 & 0 & 1 \\ 325 | 2 & 2 & 4 & 1 & 1 & 2 \\ 326 | 0 & 2 & 1 & 2 & 0 & 0 \\ 327 | 2 & 0 & 1 & 0 & 2 & 0 \\ 328 | 1 & 1 & 2 & 0 & 0 & 2 \\ 329 | \end{array}\right] 330 | \begin{array}{l} ({V_2}, V_6) \\ (V_1, V_6) \\ ({V_4}, V_5) \\ (V_3) \\ (V_3) \\ (V_1, {V_2}) \end{array} 331 | \end{array} \nonumber 332 | \end{equation} 333 |

334 |
335 |

336 | Now, we can identify $2$-hop starved nodes from the set $\{V_1, V_5, V_6\}$ as $\{V_5\}$. 337 |

338 |
339 | 340 |

341 | \begin{equation} 342 | \text{Identifying $3$-hop neighbors based on }\mathbf{A}^{3}: \begin{array}{lll} 343 | & \begin{array}{llllll}\ \ 1 & 2 & 3 & \ \ 4 & 5 & 6 \end{array} & \\ 344 | \begin{array}{l} 1 \\ 2 \\ 3 \\ 4 \\ 5 \\ 6 \end{array}& 345 | \left[\begin{array}{llllll} 346 | 7 & 3 & 7 & 1 & 5 & 3 \\ 347 | 3 & 7 & 7 & 5 & 1 & 3 \\ 348 | 7 & 7 & 10 & 3 & 3 & 6 \\ 349 | 1 & 5 & 3 & 4 & 0 & 1 \\ 350 | 5 & 1 & 3 & 0 & 4 & 1 \\ 351 | 3 & 3 & 6 & 1 & 1 & 4 \\ 352 | \end{array}\right] 353 | \begin{array}{l} (V_4) \\ (V_5) \\ (\varnothing) \\ (V_1,V_6) \\ (V_2,V_6) \\ (V_4,V_5) \end{array} 354 | \end{array} \nonumber 355 | \end{equation} 356 |

357 | 358 |
359 |

360 | We observe that there are no $3$-hop starved nodes. 361 |

362 |
363 | 364 |

365 | \begin{equation} 366 | \text{Identifying $4$-hop neighbors based on }\mathbf{A}^4: \begin{array}{lll} 367 | & \begin{array}{llllll}\ \ \ 1 & \ \ 2 & \ \ 3 & \ 4 & \ \ \ 5 &\ \ 6 \end{array} & \\ 368 | \begin{array}{l} 1 \\ 2 \\ 3 \\ 4 \\ 5 \\ 6 \end{array}& 369 | \left[\begin{array}{llllll} 370 | 19 & 11 & 20 & 4 & 12 & 10 \\ 371 | 11 & 19 & 20 & 12 & 4 & 10 \\ 372 | 20 & 20 & 30 & 10 & 10 & 16 \\ 373 | 4 & 12 & 10 & 9 & 1 & 4 \\ 374 | 12 & 4 & 10 & 1 & 9 & 4 \\ 375 | 10 & 10 & 16 & 4 & 4 & 10 \\ 376 | \end{array}\right] 377 | \begin{array}{l} (\varnothing) \\ (\varnothing) \\ (\varnothing) \\ (V_5) \\ (V_4) \\ (\varnothing) \end{array} 378 | \end{array} \nonumber 379 | \end{equation} 380 |

381 |
382 |

383 | We observe that there are no $4$-hop starved nodes. 384 |

385 |

386 | Note that, a node identified as $k$-hop starved node is also considered as a $(k-1)$-hop starved node, as exemplified by node $V_5$. 387 | Consequently, if there are no $k$-hop starved nodes present, it follows that there are no $(k+1)$-hop starved nodes. 388 |

389 |
390 | 391 | 392 | 393 |

CUR Decomposition Makes A Better Solution

394 |
395 |

396 | Although the above strategy is effective to identify starved nodes, it is computationally complex. 397 | Even with a small value of $k$, the computational cost of identifying $k$-hop starved nodes based on such strategy is prohibitively expensive. 398 | For example, when identifying $2$-hop starved nodes, the time complexity of computing $\mathbf{A}^2$ alone reaches $\mathcal{O}(n^3)$. 399 | To solve this problem, we provide a more efficient alternative approach inspired by matrix CUR decomposition: 400 |

401 |

402 | [Definition 3] (CUR Decomposition) Given $\mathbf{Q} \in \mathbb{R}^{n\times m}$ of rank $\rho=\mathtt{rank}(\mathbf{Q})$, rank parameter $k < \rho$, and accuracy parameter $0 < \varepsilon < 1$, construct column matrix $\mathbf{C} \in \mathbb{R}^{n\times c}$ with $c$ columns from $\mathbf{Q}$, row matrix $\mathbf{R} \in \mathbb{R}^{r\times m}$ with $r$ rows from $\mathbf{Q}$, and intersection matrix $\mathbf{U} \in \mathbb{R}^{c\times r}$ with $c$, $r$, and $\mathtt{rank}(\mathbf{U})$ being as small as possible, in oder to reconstruct $\mathbf{Q}$ within relative-error: 403 | \begin{equation} 404 | ||\mathbf{Q}-\mathbf{CUR}||_F^2 \leq (1+\varepsilon)||\mathbf{Q}-\mathbf{Q}_k||_F^2. 405 | \end{equation} 406 | Here, $\mathbf{Q}_k = \mathbf{U}_k \mathbf{\Sigma}_k \mathbf{V}_k^T \in \mathbb{R}^{n\times m}$ is the best rank $k$ matrix obtained via the singular value decomposition (SVD) of $\mathbf{Q}$. 407 |

408 |

409 | With the definition of CUR decomposition, we can find a more efficient solution to identify the starved nodes. 410 | The following theorem demonstrates how we can accomplish this goal. 411 |

412 |

413 | [Theorm 2] Given a sparse adjacency matrix $\mathbf{A} \in \mathbb{R}^{n\times n}$ with self-connections generated on graph $\mathcal{G}(\mathcal{V}, \mathbf{X})$, 414 | construct $\mathbf{C} = \mathbf{A}[:, col\_mask] \in \mathbb{R}^{n\times c}$, where $col\_mask \in \{0, 1\}^{n}$ contains only $c$ positive values corresponding to $c$ labeled nodes, and $\mathbf{R} = \mathbf{A}[row\_mask, :] \in \mathbb{R}^{r\times n}$ with $row\_mask = \mathbb{1}_{\mathbb{R}^-}(\mathbf{C}\mathbb{1}_c) \in \{0, 1\}^{n}$. Then, (a) ${\mathbf{U}} = \mathbf{A}[row\_mask, col\_mask] = \mathbf{0} \in \mathbb{R}^{r\times c}$, where $\mathbf{0}$ is a zero matrix, (b) the set of $1$-hop starved nodes $\texttt{Set}_1(r) = \{V_i | i \in {\texttt{RM}_+} \} $, where $\texttt{RM}_+ \in \mathbb{N}^r$ indicates the set of indexes of positive elements from $row\_mask$, and (c) for each $i \in \texttt{RM}_+$, $V_i$ is a $2$-hop starved node if, for $\forall j$ satisfying $[\mathbb{1}_{\mathbb{R}^+}(\mathbf{R})]_{ij}=1$, $j \in \texttt{RM}_+$. 415 |

416 |

417 | The above theorem provides a more efficient alternative for identifying $k$-hop starved nodes for $k\in \{1, 2\}$. 418 | In fact, the column matrix $\mathbf{C}$ models the relationships between all nodes and $c$ labeled nodes, the row matrix $\mathbf{R}$ models the affinities between $r$ $1$-hop starved nodes and the whole nodes, and the intersection matrix ${\mathbf{U}}$ models the strength of connections between $r$ $1$-hop starved nodes and $c$ labeled nodes. 419 | This theorem states that ${\mathbf{U}}=\mathbf{0}$, indicting that there are no connections between the starved nodes and the labeled ones. 420 | For better illustration, we show the $\mathbf{C}$, $\mathbf{U}$, and $\mathbf{R}$ matrices of the above adjacency matrix as follows: 421 |

422 | 423 |
424 | 425 |

426 | \begin{equation} 427 | \mathbf{C} : \begin{array}{lll} 428 | & \begin{array}{ll}\ \ \ 2 & 4 \end{array} & \\ 429 | \begin{array}{l} 1 \\ 2 \\ 3 \\ 4 \\ 5 \\ 6 \end{array}& 430 | \left[\begin{array}{llll} 431 | 0 & 0 \\ 432 | 1 & 1 \\ 433 | 1 & 0 \\ 434 | 1 & 1 \\ 435 | 0 & 0 \\ 436 | 0 & 0 \\ 437 | \end{array}\right] 438 | \end{array}; \quad\qquad 439 | \mathbf{R} : \begin{array}{lll} 440 | & \begin{array}{llllll} \ \ 1 & 2 & 3 & 4 & 5 & 6 \end{array} & \\ 441 | \begin{array}{l} 1 \\ 5 \\ 6 \end{array}& 442 | \left[\begin{array}{llllll} 443 | 1 & 0 & 1 & 0 & 1 & 0 \\ 444 | 1 & 0 & 0 & 0 & 1 & 0 \\ 445 | 0 & 0 & 1 & 0 & 0 & 1 \\ 446 | \end{array}\right] 447 | \end{array}; \quad\qquad 448 | \mathbf{U} : \begin{array}{lll} 449 | & \begin{array}{ll} \ \ 2 & 4 \end{array} & \\ 450 | \begin{array}{l} 1 \\ 5 \\ 6 \end{array}& 451 | \left[\begin{array}{ll} 452 | 0 & 0 \\ 453 | 0 & 0 \\ 454 | 0 & 0 \\ 455 | \end{array}\right] 456 | \end{array} 457 | \nonumber 458 | \end{equation} 459 |

460 | 461 |
462 | Based on the $\mathbf{C}, \mathbf{U}, \mathbf{R}$ matrices, we can determine that $row\_mask = [1, 0, 0, 0, 1, 1]^{\top}$, $\texttt{RM}_+=\{1, 5, 6\}$, the $1$-hop starved nodes are $V_1, V_5, V_6$, and the $2$-hop starved node is $V_5$. 463 |
464 | 465 | 466 |

How to Eliminate Starved Nodes

467 |
468 |

469 | After identification, we can reduce the starved nodes by rebuilding the corrupted affinities. 470 | Specifically, we reconstrcut the intersection matrix ${\mathbf{U}}$ to ensure that the reconstructed $\widetilde{\mathbf{U}} \neq \mathbf{0}$. 471 | Consequently, the rebuilt adjacency matrix can be rewritten as: 472 | \begin{equation} 473 | \widetilde{\mathbf{A}}^{} = \mathbf{A}_{} + \alpha \mathbf{B} = \mathbf{A}_{} + \alpha \Gamma\left(\widetilde{\mathbf{U}}, n\right), 474 | \label{equation5} 475 | \end{equation} 476 | where function $\Gamma(\widetilde{\mathbf{U}}, n)$ extends the matrix $\widetilde{\mathbf{U}}\in \mathbb{R}^{r\times c}$ to an $n \times n$ matrix by padding $n-r$ rows of zeros and $n-c$ columns of zeros in the corresponding positions. 477 | In fact, with the reconstructed $\widetilde{\mathbf{U}}$, we can also set $\widetilde{\mathbf{Q}}=\mathbf{C\widetilde{U}R}$ as the regularization $\mathbf{B}$. It is, of course, sensible and feasible. However, a potential drawback is that the reconstruction of $\widetilde{\mathbf{Q}}$ requires matrix multiplications of three matrices, which is time-consuming. Unexpectedly, we find that only constructing matrix $\widetilde{\mathbf{U}}$ is enough to solve the SS problem since it models the relationships between $1$-hop starved nodes and labeled ones. 478 |

479 |

480 | The question now turns to how to reconstruct the intersection matrix $\widetilde{\mathbf{U}}$. 481 | For simplicity, we directly adopt the same strategy used in constructing $\mathbf{A}_{}$. 482 | Specifically, for each row $i$ of $\widetilde{\mathbf{U}}$, we establish a connection between $V_i$ and $V_j$ for $\forall j \in {\texttt{CM}_+}$, where ${\texttt{CM}_+} \in \mathbb{N}^c$ represents the set of indexes of positive elements from $col\_mask$. 483 | We then assign weights to these connections based on their distance. 484 | Note that, if we ensure each row of $\widetilde{\mathbf{U}}$ has at least one weight greater than 0, there will be no $\kappa$-hop starved nodes for $\forall \kappa > 1$. 485 | This means that we do not need to feed the $k$-hop starved nodes satiated, simply feeding $\kappa$-hop ones for $\forall \kappa < k$ makes $k$-hop starved nodes cease to exist. 486 |

487 |
488 | 489 | 490 | 491 |

Experiments

492 |
493 |

494 |

495 | 496 |
497 |

498 |
499 |
500 |

501 |

502 | 503 | 504 | 505 |
506 |

507 | 508 | 509 | 510 | 511 | 512 | 513 | 514 | 515 | 516 | 517 | 518 | 519 | 520 | 521 | 522 | 523 | 524 | 525 |
526 |
527 |

BibTeX

528 |
@inproceedigs{Jianglin2023LGI,
529 | title={Latent Graph Inference with Limited Supervision},
530 | author={Lu, Jianglin and Xu, Yi and Wang, Huan and Bai, Yue and Fu, Yun},
531 | booktitle={Advances in Neural Information Processing Systems},
532 | year={2023}
533 | }
534 | 
535 |
536 |
537 | 538 | 539 |
540 |
541 | This website is borrowed from nerfies. 542 |
543 |
544 | 545 | 546 | 547 | 548 | -------------------------------------------------------------------------------- /static/js/bulma-carousel.min.js: -------------------------------------------------------------------------------- 1 | !function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.bulmaCarousel=e():t.bulmaCarousel=e()}("undefined"!=typeof self?self:this,function(){return function(i){var n={};function s(t){if(n[t])return n[t].exports;var e=n[t]={i:t,l:!1,exports:{}};return i[t].call(e.exports,e,e.exports,s),e.l=!0,e.exports}return s.m=i,s.c=n,s.d=function(t,e,i){s.o(t,e)||Object.defineProperty(t,e,{configurable:!1,enumerable:!0,get:i})},s.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return s.d(e,"a",e),e},s.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},s.p="",s(s.s=5)}([function(t,e,i){"use strict";i.d(e,"d",function(){return s}),i.d(e,"e",function(){return r}),i.d(e,"b",function(){return o}),i.d(e,"c",function(){return a}),i.d(e,"a",function(){return l});var n=i(2),s=function(e,t){(t=Array.isArray(t)?t:t.split(" ")).forEach(function(t){e.classList.remove(t)})},r=function(t){return t.getBoundingClientRect().width||t.offsetWidth},o=function(t){return t.getBoundingClientRect().height||t.offsetHeight},a=function(t){var e=1=t._x&&this._x<=e._x&&this._y>=t._y&&this._y<=e._y}},{key:"constrain",value:function(t,e){if(t._x>e._x||t._y>e._y)return this;var i=this._x,n=this._y;return null!==t._x&&(i=Math.max(i,t._x)),null!==e._x&&(i=Math.min(i,e._x)),null!==t._y&&(n=Math.max(n,t._y)),null!==e._y&&(n=Math.min(n,e._y)),new s(i,n)}},{key:"reposition",value:function(t){t.style.top=this._y+"px",t.style.left=this._x+"px"}},{key:"toString",value:function(){return"("+this._x+","+this._y+")"}},{key:"x",get:function(){return this._x},set:function(){var t=0this.state.length-this.slidesToShow&&!this.options.centerMode?this.state.next=this.state.index:this.state.next=this.state.index+this.slidesToScroll,this.show()}},{key:"previous",value:function(){this.options.loop||this.options.infinite||0!==this.state.index?this.state.next=this.state.index-this.slidesToScroll:this.state.next=this.state.index,this.show()}},{key:"start",value:function(){this._autoplay.start()}},{key:"pause",value:function(){this._autoplay.pause()}},{key:"stop",value:function(){this._autoplay.stop()}},{key:"show",value:function(t){var e=1this.options.slidesToShow&&(this.options.slidesToScroll=this.slidesToShow),this._breakpoint.init(),this.state.index>=this.state.length&&0!==this.state.index&&(this.state.index=this.state.index-this.slidesToScroll),this.state.length<=this.slidesToShow&&(this.state.index=0),this._ui.wrapper.appendChild(this._navigation.init().render()),this._ui.wrapper.appendChild(this._pagination.init().render()),this.options.navigationSwipe?this._swipe.bindEvents():this._swipe._bindEvents(),this._breakpoint.apply(),this._slides.forEach(function(t){return e._ui.container.appendChild(t)}),this._transitioner.init().apply(!0,this._setHeight.bind(this)),this.options.autoplay&&this._autoplay.init().start()}},{key:"destroy",value:function(){var e=this;this._unbindEvents(),this._items.forEach(function(t){e.element.appendChild(t)}),this.node.remove()}},{key:"id",get:function(){return this._id}},{key:"index",set:function(t){this._index=t},get:function(){return this._index}},{key:"length",set:function(t){this._length=t},get:function(){return this._length}},{key:"slides",get:function(){return this._slides},set:function(t){this._slides=t}},{key:"slidesToScroll",get:function(){return"translate"===this.options.effect?this._breakpoint.getSlidesToScroll():1}},{key:"slidesToShow",get:function(){return"translate"===this.options.effect?this._breakpoint.getSlidesToShow():1}},{key:"direction",get:function(){return"rtl"===this.element.dir.toLowerCase()||"rtl"===this.element.style.direction?"rtl":"ltr"}},{key:"wrapper",get:function(){return this._ui.wrapper}},{key:"wrapperWidth",get:function(){return this._wrapperWidth||0}},{key:"container",get:function(){return this._ui.container}},{key:"containerWidth",get:function(){return this._containerWidth||0}},{key:"slideWidth",get:function(){return this._slideWidth||0}},{key:"transitioner",get:function(){return this._transitioner}}],[{key:"attach",value:function(){var i=this,t=0>t/4).toString(16)})}},function(t,e,i){"use strict";var n=i(3),s=i(8),r=function(){function n(t,e){for(var i=0;i=t.slider.state.length-t.slider.slidesToShow&&!t.slider.options.loop&&!t.slider.options.infinite?t.stop():t.slider.next())},this.slider.options.autoplaySpeed))}},{key:"stop",value:function(){this._interval=clearInterval(this._interval),this.emit("stop",this)}},{key:"pause",value:function(){var t=this,e=0parseInt(e.changePoint,10)}),this._currentBreakpoint=this._getActiveBreakpoint(),this}},{key:"destroy",value:function(){this._unbindEvents()}},{key:"_bindEvents",value:function(){window.addEventListener("resize",this[s]),window.addEventListener("orientationchange",this[s])}},{key:"_unbindEvents",value:function(){window.removeEventListener("resize",this[s]),window.removeEventListener("orientationchange",this[s])}},{key:"_getActiveBreakpoint",value:function(){var t=!0,e=!1,i=void 0;try{for(var n,s=this.options.breakpoints[Symbol.iterator]();!(t=(n=s.next()).done);t=!0){var r=n.value;if(r.changePoint>=window.innerWidth)return r}}catch(t){e=!0,i=t}finally{try{!t&&s.return&&s.return()}finally{if(e)throw i}}return this._defaultBreakpoint}},{key:"getSlidesToShow",value:function(){return this._currentBreakpoint?this._currentBreakpoint.slidesToShow:this._defaultBreakpoint.slidesToShow}},{key:"getSlidesToScroll",value:function(){return this._currentBreakpoint?this._currentBreakpoint.slidesToScroll:this._defaultBreakpoint.slidesToScroll}},{key:"apply",value:function(){this.slider.state.index>=this.slider.state.length&&0!==this.slider.state.index&&(this.slider.state.index=this.slider.state.index-this._currentBreakpoint.slidesToScroll),this.slider.state.length<=this._currentBreakpoint.slidesToShow&&(this.slider.state.index=0),this.options.loop&&this.slider._loop.init().apply(),this.options.infinite&&this.slider._infinite.init().apply(),this.slider._setDimensions(),this.slider._transitioner.init().apply(!0,this.slider._setHeight.bind(this.slider)),this.slider._setClasses(),this.slider._navigation.refresh(),this.slider._pagination.refresh()}},{key:s,value:function(t){var e=this._getActiveBreakpoint();e.slidesToShow!==this._currentBreakpoint.slidesToShow&&(this._currentBreakpoint=e,this.apply())}}]),e}();e.a=r},function(t,e,i){"use strict";var n=function(){function n(t,e){for(var i=0;ithis.slider.state.length-1-this._infiniteCount;i-=1)e=i-1,t.unshift(this._cloneSlide(this.slider.slides[e],e-this.slider.state.length));for(var n=[],s=0;s=this.slider.state.length?(this.slider.state.index=this.slider.state.next=this.slider.state.next-this.slider.state.length,this.slider.transitioner.apply(!0)):this.slider.state.next<0&&(this.slider.state.index=this.slider.state.next=this.slider.state.length+this.slider.state.next,this.slider.transitioner.apply(!0)))}},{key:"_cloneSlide",value:function(t,e){var i=t.cloneNode(!0);return i.dataset.sliderIndex=e,i.dataset.cloned=!0,(i.querySelectorAll("[id]")||[]).forEach(function(t){t.setAttribute("id","")}),i}}]),e}();e.a=s},function(t,e,i){"use strict";var n=i(12),s=function(){function n(t,e){for(var i=0;ithis.slider.state.length-this.slider.slidesToShow&&Object(n.a)(this.slider._slides[this.slider.state.length-1],this.slider.wrapper)?this.slider.state.next=0:this.slider.state.next=Math.min(Math.max(this.slider.state.next,0),this.slider.state.length-this.slider.slidesToShow):this.slider.state.next=0:this.slider.state.next<=0-this.slider.slidesToScroll?this.slider.state.next=this.slider.state.length-this.slider.slidesToShow:this.slider.state.next=0)}}]),e}();e.a=r},function(t,e,i){"use strict";i.d(e,"a",function(){return n});var n=function(t,e){var i=t.getBoundingClientRect();return e=e||document.documentElement,0<=i.top&&0<=i.left&&i.bottom<=(window.innerHeight||e.clientHeight)&&i.right<=(window.innerWidth||e.clientWidth)}},function(t,e,i){"use strict";var n=i(14),s=i(1),r=function(){function n(t,e){for(var i=0;ithis.slider.slidesToShow?(this._ui.previous.classList.remove("is-hidden"),this._ui.next.classList.remove("is-hidden"),0===this.slider.state.next?(this._ui.previous.classList.add("is-hidden"),this._ui.next.classList.remove("is-hidden")):this.slider.state.next>=this.slider.state.length-this.slider.slidesToShow&&!this.slider.options.centerMode?(this._ui.previous.classList.remove("is-hidden"),this._ui.next.classList.add("is-hidden")):this.slider.state.next>=this.slider.state.length-1&&this.slider.options.centerMode&&(this._ui.previous.classList.remove("is-hidden"),this._ui.next.classList.add("is-hidden"))):(this._ui.previous.classList.add("is-hidden"),this._ui.next.classList.add("is-hidden")))}},{key:"render",value:function(){return this.node}}]),e}();e.a=o},function(t,e,i){"use strict";e.a=function(t){return'
'+t.previous+'
\n
'+t.next+"
"}},function(t,e,i){"use strict";var n=i(16),s=i(17),r=i(1),o=function(){function n(t,e){for(var i=0;ithis.slider.slidesToShow){for(var t=0;t<=this._count;t++){var e=document.createRange().createContextualFragment(Object(s.a)()).firstChild;e.dataset.index=t*this.slider.slidesToScroll,this._pages.push(e),this._ui.container.appendChild(e)}this._bindEvents()}}},{key:"onPageClick",value:function(t){this._supportsPassive||t.preventDefault(),this.slider.state.next=t.currentTarget.dataset.index,this.slider.show()}},{key:"onResize",value:function(){this._draw()}},{key:"refresh",value:function(){var e=this,t=void 0;(t=this.slider.options.infinite?Math.ceil(this.slider.state.length-1/this.slider.slidesToScroll):Math.ceil((this.slider.state.length-this.slider.slidesToShow)/this.slider.slidesToScroll))!==this._count&&(this._count=t,this._draw()),this._pages.forEach(function(t){t.classList.remove("is-active"),parseInt(t.dataset.index,10)===e.slider.state.next%e.slider.state.length&&t.classList.add("is-active")})}},{key:"render",value:function(){return this.node}}]),e}();e.a=a},function(t,e,i){"use strict";e.a=function(){return'
'}},function(t,e,i){"use strict";e.a=function(){return'
'}},function(t,e,i){"use strict";var n=i(4),s=i(1),r=function(){function n(t,e){for(var i=0;iMath.abs(this._lastTranslate.y)&&(this._supportsPassive||t.preventDefault(),t.stopPropagation())}}},{key:"onStopDrag",value:function(t){this._origin&&this._lastTranslate&&(Math.abs(this._lastTranslate.x)>.2*this.width?this._lastTranslate.x<0?this.slider.next():this.slider.previous():this.slider.show(!0)),this._origin=null,this._lastTranslate=null}}]),e}();e.a=o},function(t,e,i){"use strict";var n=i(20),s=i(21),r=function(){function n(t,e){for(var i=0;it.x?(s.x=0,this.slider.state.next=0):this.options.vertical&&Math.abs(this._position.y)>t.y&&(s.y=0,this.slider.state.next=0)),this._position.x=s.x,this._position.y=s.y,this.options.centerMode&&(this._position.x=this._position.x+this.slider.wrapperWidth/2-Object(o.e)(i)/2),"rtl"===this.slider.direction&&(this._position.x=-this._position.x,this._position.y=-this._position.y),this.slider.container.style.transform="translate3d("+this._position.x+"px, "+this._position.y+"px, 0)",n.x>t.x&&this.slider.transitioner.end()}}},{key:"onTransitionEnd",value:function(t){"translate"===this.options.effect&&(this.transitioner.isAnimating()&&t.target==this.slider.container&&this.options.infinite&&this.slider._infinite.onTransitionEnd(t),this.transitioner.end())}}]),n}();e.a=n},function(t,e,i){"use strict";e.a={initialSlide:0,slidesToScroll:1,slidesToShow:1,navigation:!0,navigationKeys:!0,navigationSwipe:!0,pagination:!0,loop:!1,infinite:!1,effect:"translate",duration:300,timing:"ease",autoplay:!1,autoplaySpeed:3e3,pauseOnHover:!0,breakpoints:[{changePoint:480,slidesToShow:1,slidesToScroll:1},{changePoint:640,slidesToShow:2,slidesToScroll:2},{changePoint:768,slidesToShow:3,slidesToScroll:3}],onReady:null,icons:{previous:'\n \n ',next:'\n \n '}}},function(t,e,i){"use strict";e.a=function(t){return'
\n
\n
'}},function(t,e,i){"use strict";e.a=function(){return'
'}}]).default}); --------------------------------------------------------------------------------