├── .DS_Store ├── .gitignore ├── .readthedocs.yaml ├── Dynamic ├── dynamicGnn.py └── main-dynamic.py ├── LICENSE.txt ├── NeuroGraph ├── .DS_Store ├── __init__.py ├── data │ ├── HCP_behavioral.csv │ ├── fc.npy │ └── ids.pkl ├── datasets.py ├── preprocess.py ├── test.py └── utils.py ├── README.md ├── __init__.py ├── doc ├── .DS_Store ├── Makefile ├── NeuroGraph.rst ├── _build │ ├── .DS_Store │ ├── doctrees │ │ ├── NeuroGraph.doctree │ │ ├── datasets.doctree │ │ ├── environment.pickle │ │ ├── get_started.doctree │ │ ├── index.doctree │ │ ├── install.doctree │ │ ├── preprocess.doctree │ │ └── utils.doctree │ └── html │ │ ├── .DS_Store │ │ ├── .buildinfo │ │ ├── NeuroGraph.html │ │ ├── _modules │ │ ├── NeuroGraph │ │ │ ├── datasets.html │ │ │ ├── preprocess.html │ │ │ └── utils.html │ │ └── index.html │ │ ├── _sources │ │ ├── NeuroGraph.rst.txt │ │ ├── datasets.rst.txt │ │ ├── get_started.rst.txt │ │ ├── index.rst.txt │ │ ├── install.rst.txt │ │ ├── preprocess.rst.txt │ │ └── utils.rst.txt │ │ ├── _static │ │ ├── _sphinx_javascript_frameworks_compat.js │ │ ├── basic.css │ │ ├── css │ │ │ ├── badge_only.css │ │ │ ├── fonts │ │ │ │ ├── Roboto-Slab-Bold.woff │ │ │ │ ├── Roboto-Slab-Bold.woff2 │ │ │ │ ├── Roboto-Slab-Regular.woff │ │ │ │ ├── Roboto-Slab-Regular.woff2 │ │ │ │ ├── fontawesome-webfont.eot │ │ │ │ ├── fontawesome-webfont.svg │ │ │ │ ├── fontawesome-webfont.ttf │ │ │ │ ├── fontawesome-webfont.woff │ │ │ │ ├── fontawesome-webfont.woff2 │ │ │ │ ├── lato-bold-italic.woff │ │ │ │ ├── lato-bold-italic.woff2 │ │ │ │ ├── lato-bold.woff │ │ │ │ ├── lato-bold.woff2 │ │ │ │ ├── lato-normal-italic.woff │ │ │ │ ├── lato-normal-italic.woff2 │ │ │ │ ├── lato-normal.woff │ │ │ │ └── lato-normal.woff2 │ │ │ └── theme.css │ │ ├── doctools.js │ │ ├── documentation_options.js │ │ ├── file.png │ │ ├── jquery-3.6.0.js │ │ ├── jquery.js │ │ ├── js │ │ │ ├── badge_only.js │ │ │ ├── html5shiv-printshiv.min.js │ │ │ ├── html5shiv.min.js │ │ │ └── theme.js │ │ ├── language_data.js │ │ ├── minus.png │ │ ├── plus.png │ │ ├── pygments.css │ │ ├── searchtools.js │ │ ├── sphinx_highlight.js │ │ ├── underscore-1.13.1.js │ │ └── underscore.js │ │ ├── datasets.html │ │ ├── genindex.html │ │ ├── get_started.html │ │ ├── index.html │ │ ├── install.html │ │ ├── objects.inv │ │ ├── preprocess.html │ │ ├── py-modindex.html │ │ ├── search.html │ │ ├── searchindex.js │ │ └── utils.html ├── conf.py ├── datasets.rst ├── get_started.rst ├── index.rst ├── install.rst ├── make.bat ├── preprocess.rst └── utils.rst ├── main.py ├── main_dynamic.py ├── requirements.txt ├── run_baseline.sh ├── setup.py ├── test.py └── utils.py /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/.DS_Store -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | formats: all 8 | 9 | # Set the version of Python and other tools you might need 10 | build: 11 | os: ubuntu-22.04 12 | tools: 13 | python: "3.11" 14 | 15 | # Build documentation in the docs/ directory with Sphinx 16 | sphinx: 17 | configuration: doc/conf.py 18 | formats: 19 | - pdf 20 | - epub 21 | 22 | # We recommend specifying your dependencies to enable reproducible builds: 23 | # https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 24 | python: 25 | install: 26 | # - requirements: doc/requirements.txt 27 | - requirements: requirements.txt -------------------------------------------------------------------------------- /Dynamic/dynamicGnn.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch_geometric.nn import aggr 5 | from torch_geometric.nn import APPNP, MLP, GCNConv, GINConv,SAGEConv,GraphConv,TransformerConv,ChebConv,GATConv,SGConv,GeneralConv 6 | from torch.nn import Conv1d, MaxPool1d, ModuleList 7 | # import random 8 | import math 9 | softmax = torch.nn.LogSoftmax(dim=1) 10 | 11 | class DynamicGNN(nn.Module): 12 | def __init__(self, dataset,input_dim, hidden_channels,hidden_dim,num_heads,num_layers, GNN,dropout,num_classes,k = 0.6): 13 | super(DynamicGNN, self).__init__() 14 | if k < 1: # Transform percentile to number. 15 | num_nodes = sorted([data.num_nodes for data in dataset]) 16 | k = num_nodes[int(math.ceil(k * len(num_nodes))) - 1] 17 | k = max(10, k) 18 | self.k = int(k) 19 | self.aggr = aggr.SumAggregation() 20 | self.convs = ModuleList() 21 | self.convs.append(GNN(dataset.num_features, hidden_channels)) 22 | for i in range(0, num_layers - 1): 23 | self.convs.append(GCNConv(hidden_channels, hidden_channels)) 24 | 25 | # self.input_dim = input_dim 26 | self.hidden_dim = hidden_dim 27 | self.multihead_attn = nn.MultiheadAttention(hidden_channels, num_heads) 28 | self.layer_norm1 = nn.LayerNorm(hidden_channels) 29 | self.layer_norm2 = nn.LayerNorm(hidden_channels) 30 | self.dropout1 = nn.Dropout(dropout) 31 | self.dropout2 = nn.Dropout(dropout) 32 | self.mlp = nn.Sequential(nn.Linear(hidden_channels, hidden_dim), nn.ReLU(), nn.Dropout(dropout), nn.Linear(hidden_dim, hidden_channels)) 33 | self.linear = nn.Linear(hidden_channels, num_classes) 34 | 35 | def forward(self, batch): 36 | xs = [] 37 | for b in batch: 38 | x = b.x 39 | for conv in self.convs: 40 | x = conv(x, b.edge_index).tanh() 41 | x = self.aggr(x) 42 | xs.append(x) 43 | x = torch.stack(xs, dim=0) 44 | x = x.squeeze(dim=1) 45 | x, attn_matrix = self.multihead_attn(x, x, x) 46 | x_attend = self.dropout1(x) 47 | x = self.layer_norm1(x_attend) 48 | x_attend2 = self.mlp(x_attend) 49 | x_attend = x_attend + self.dropout2(x_attend2) 50 | x_attend = self.layer_norm2(x_attend) 51 | x = x_attend.relu() 52 | x = torch.sum(x,dim=0) 53 | x = self.linear(x) 54 | return x 55 | -------------------------------------------------------------------------------- /Dynamic/main-dynamic.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | import torch.nn.functional as F 4 | from networks import GNNs 5 | from dynamicGnn import * 6 | from torch import tensor 7 | from torch.optim import Adam 8 | import numpy as np 9 | import os,random 10 | from torch_geometric.data import Data 11 | from sklearn.metrics import accuracy_score 12 | from sklearn.model_selection import train_test_split 13 | from sklearn.metrics import accuracy_score 14 | from torch_geometric.nn import GCNConv, GINConv,SAGEConv,GraphConv,TransformerConv,ResGatedGraphConv,ChebConv,GATConv,SGConv,GeneralConv 15 | from torch_geometric.loader import DataLoader 16 | # from gin import * 17 | import os,random 18 | import os.path as osp 19 | from utils import * 20 | import sys 21 | import time 22 | from torch_geometric.data import Batch 23 | from sklearn.decomposition import PCA 24 | parser = argparse.ArgumentParser() 25 | parser.add_argument('--dataset', type=str, default='DynHCPGender') #HCPTask, HCPRest 26 | parser.add_argument('--runs', type=int, default=1) 27 | parser.add_argument('--x', type=str, default="corr") 28 | parser.add_argument('--device', type=str, default='cuda') 29 | parser.add_argument('--seed', type=int, default=123) 30 | parser.add_argument('--model', type=str, default="TransformerConv") 31 | parser.add_argument('--hidden1', type=int, default=128) 32 | parser.add_argument('--hidden2', type=int, default=32) 33 | parser.add_argument('--num_heads', type=int, default=1) 34 | parser.add_argument('--num_layers', type=int, default=3) 35 | parser.add_argument('--epochs', type=int, default=50) 36 | parser.add_argument('--echo_epoch', type=int, default=30) 37 | parser.add_argument('--batch_size', type=int, default=16) 38 | parser.add_argument('--early_stopping', type=int, default=50) 39 | parser.add_argument('--lr', type=float, default=1e-4) 40 | parser.add_argument('--weight_decay', type=float, default=0.0005) 41 | parser.add_argument('--dropout', type=float, default=0.5) 42 | args = parser.parse_args() 43 | path = "base_params/" 44 | res_path = "base_results/" 45 | path_data = "../../data/" 46 | if not os.path.isdir(path): 47 | os.mkdir(path) 48 | if not os.path.isdir(res_path): 49 | os.mkdir(res_path) 50 | def logger(info): 51 | f = open(os.path.join(res_path, 'dynamic_results.csv'), 'a') 52 | print(info, file=f) 53 | log = "dataset,model,hidden, num_layers,epochs,batch size, loss, acc, std" 54 | logger(log) 55 | torch.manual_seed(args.seed) 56 | if torch.cuda.is_available(): 57 | torch.cuda.manual_seed(args.seed) 58 | random.seed(args.seed) 59 | np.random.seed(args.seed) 60 | 61 | start = time.time() 62 | # Please see NeuroGraph's documentation for downloading the procesed datasets. This block of code (data loading) may need to be revised for incorporating the latest datasets. 63 | if args.dataset=='DynHCPGender': 64 | dataset_raw = torch.load(os.path.join(path_data,args.dataset,"processed", args.dataset+".pt")) 65 | dataset,labels = [],[] 66 | for v in dataset_raw: 67 | batches = v.get('batches') 68 | if len(batches)>0: 69 | for b in batches: 70 | y = b.y[0].item() 71 | dataset.append(b) 72 | labels.append(y) 73 | else: 74 | dataset = torch.load(os.path.join(path_data,args.dataset,"processed", args.dataset+".pt")) 75 | labels = dataset['labels'] 76 | dataset = dataset['batches'] 77 | 78 | 79 | print("dataset loaded successfully!",args.dataset) 80 | 81 | 82 | train_tmp, test_indices = train_test_split(list(range(len(labels))), 83 | test_size=0.2, stratify=labels,random_state=123,shuffle= True) 84 | 85 | tmp = [dataset[i] for i in train_tmp] 86 | labels_tmp = [labels[i] for i in train_tmp] 87 | train_indices, val_indices = train_test_split(list(range(len(labels_tmp))), 88 | test_size=0.125, stratify=labels_tmp,random_state=123,shuffle = True) 89 | train_dataset = [tmp[i] for i in train_indices] 90 | val_dataset = [tmp[i] for i in val_indices] 91 | train_labels= [labels_tmp[i] for i in train_indices] 92 | val_labels = [labels_tmp[i] for i in val_indices] 93 | test_dataset = [dataset[i] for i in test_indices] 94 | test_labels =[labels[i] for i in test_indices] 95 | 96 | 97 | print("dataset {} loaded with train {} val {} test {} splits".format(args.dataset,len(train_dataset), len(val_dataset), len(test_dataset))) 98 | 99 | args.num_features,args.num_classes = 100,len(np.unique(labels)) 100 | print("number of features and classes",args.num_features,args.num_classes) 101 | criterion = torch.nn.CrossEntropyLoss() 102 | def train(train_loader): 103 | model.train() 104 | total_loss = 0 105 | for data in train_loader: 106 | data = data.to(args.device) 107 | out = model(data).reshape(1,-1) 108 | 109 | loss = criterion(out, data[0].y) 110 | total_loss +=loss 111 | loss.backward() 112 | optimizer.step() 113 | optimizer.zero_grad() 114 | return total_loss/len(train_loader) 115 | 116 | def test(loader): 117 | model.eval() 118 | correct = 0 119 | for data in loader: 120 | data = data.to(args.device) 121 | out = model(data) 122 | out = torch.argmax(out) 123 | if out == data[0].y: 124 | correct +=1 125 | return correct / len(loader) 126 | 127 | 128 | seeds = [123,124] 129 | for index in range(args.runs): 130 | # this block of code needs to be updated for the recent pytoch versions 131 | # torch.manual_seed(seeds[index]) 132 | # if torch.cuda.is_available(): 133 | # torch.cuda.manual_seed(seeds[index]) 134 | # random.seed(seeds[index]) 135 | # np.random.seed(seeds[index]) 136 | # torch.backends.cudnn.deterministic = True 137 | # torch.backends.cudnn.benchmark = False 138 | 139 | gnn = eval(args.model) 140 | model = DynamicGNN(args.num_features,args.hidden1,args.hidden2,args.num_heads,args.num_layers,gnn,args.dropout, args.num_classes).to(args.device) 141 | optimizer = Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) 142 | loss, test_acc = [],[] 143 | best_val_acc,best_val_loss,pat = 0.0,0.0,0 144 | for epoch in range(args.epochs): 145 | ep_start = time.time() 146 | loss = train(train_dataset) 147 | val_acc = test(val_dataset) 148 | test_acc = test(test_dataset) 149 | if epoch%10==0: 150 | print("epoch: {}, loss: {}, val_acc:{}, test_acc:{}".format(epoch, np.round(loss.item(),6), np.round(val_acc,2),np.round(test_acc,2))) 151 | # val_acc_history.append(val_acc) 152 | if val_acc > best_val_acc: 153 | best_val_acc = val_acc 154 | pat = 0 155 | torch.save(model.state_dict(), path + args.dataset+args.model+'-checkpoint-best-acc.pkl') 156 | else: 157 | pat += 1 158 | if pat >=args.early_stopping and epoch > args.epochs // 2: 159 | print("early stopped!") 160 | break 161 | ep_end = time.time() 162 | print("epoch time:", ep_end-ep_start) 163 | model.load_state_dict(torch.load(path + args.dataset+args.model+'-checkpoint-best-acc.pkl')) 164 | model.eval() 165 | test_acc = test(test_dataset) 166 | test_loss = train(test_dataset).item() 167 | 168 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Anwar Said 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /NeuroGraph/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/NeuroGraph/.DS_Store -------------------------------------------------------------------------------- /NeuroGraph/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/NeuroGraph/__init__.py -------------------------------------------------------------------------------- /NeuroGraph/data/fc.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/NeuroGraph/data/fc.npy -------------------------------------------------------------------------------- /NeuroGraph/data/ids.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/NeuroGraph/data/ids.pkl -------------------------------------------------------------------------------- /NeuroGraph/datasets.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | import shutil 4 | from typing import Callable, List, Optional 5 | 6 | import torch 7 | 8 | from torch_geometric.data import ( 9 | Data, 10 | InMemoryDataset, 11 | download_url, 12 | extract_zip 13 | ) 14 | 15 | class NeuroGraphDataset(InMemoryDataset): 16 | r"""The NeuroGraph benchmark datasets from the 17 | `"NeuroGraph: Benchmarks for Graph Machine Learning in Brain Connectomics" 18 | `_ paper. 19 | :class:`NeuroGraphDataset` holds a collection of five neuroimaging graph 20 | learning datasets that span multiple categories of demographics, mental 21 | states, and cognitive traits. 22 | See the `documentation 23 | `_ and the 24 | `Github `_ for more details. 25 | 26 | +--------------------+---------+----------------------+ 27 | | Dataset | #Graphs | Task | 28 | +====================+=========+======================+ 29 | | :obj:`HCPTask` | 7,443 | Graph Classification | 30 | +--------------------+---------+----------------------+ 31 | | :obj:`HCPGender` | 1,078 | Graph Classification | 32 | +--------------------+---------+----------------------+ 33 | | :obj:`HCPAge` | 1,065 | Graph Classification | 34 | +--------------------+---------+----------------------+ 35 | | :obj:`HCPFI` | 1,071 | Graph Regression | 36 | +--------------------+---------+----------------------+ 37 | | :obj:`HCPWM` | 1,078 | Graph Regression | 38 | +--------------------+---------+----------------------+ 39 | 40 | Args: 41 | root (str): Root directory where the dataset should be saved. 42 | name (str): The name of the dataset (one of :obj:`"HCPGender"`, 43 | :obj:`"HCPTask"`, :obj:`"HCPAge"`, :obj:`"HCPFI"`, 44 | :obj:`"HCPWM"`). 45 | transform (callable, optional): A function/transform that takes in an 46 | :obj:`torch_geometric.data.Data` object and returns a transformed 47 | version. The data object will be transformed before every access. 48 | (default: :obj:`None`) 49 | pre_transform (callable, optional): A function/transform that takes in 50 | an :obj:`torch_geometric.data.Data` object and returns a 51 | transformed version. The data object will be transformed before 52 | being saved to disk. (default: :obj:`None`) 53 | pre_filter (callable, optional): A function that takes in an 54 | :obj:`torch_geometric.data.Data` object and returns a boolean 55 | value, indicating whether the data object should be included in the 56 | final dataset. (default: :obj:`None`) 57 | """ 58 | 59 | url = 'https://vanderbilt.box.com/shared/static' 60 | filenames = { 61 | 'HCPGender': 'r6hlz2arm7yiy6v6981cv2nzq3b0meax.zip', 62 | 'HCPTask': '8wzz4y17wpxg2stip7iybtmymnybwvma.zip', 63 | 'HCPAge': 'lzzks4472czy9f9vc8aikp7pdbknmtfe.zip', 64 | 'HCPWM': 'xtmpa6712fidi94x6kevpsddf9skuoxy.zip', 65 | 'HCPFI': 'g2md9h9snh7jh6eeay02k1kr9m4ido9f.zip', 66 | } 67 | 68 | def __init__( 69 | self, 70 | root: str, 71 | name: str, 72 | transform: Optional[Callable] = None, 73 | pre_transform: Optional[Callable] = None, 74 | pre_filter: Optional[Callable] = None, 75 | ): 76 | assert name in self.filenames.keys() 77 | self.name = name 78 | 79 | super().__init__(root, transform, pre_transform, pre_filter) 80 | self.data, self.slices = torch.load(self.processed_paths[0]) 81 | 82 | @property 83 | def raw_dir(self) -> str: 84 | return os.path.join(self.root, self.name, 'raw') 85 | 86 | @property 87 | def raw_file_names(self) -> str: 88 | return 'data.pt' 89 | 90 | @property 91 | def processed_dir(self) -> str: 92 | return os.path.join(self.root, self.name, 'processed') 93 | 94 | @property 95 | def processed_file_names(self) -> str: 96 | return 'data.pt' 97 | 98 | def download(self): 99 | url = f'{self.url}/{self.filenames[self.name]}' 100 | path = download_url(url, self.raw_dir) 101 | extract_zip(path, self.raw_dir) 102 | os.unlink(path) 103 | os.rename( 104 | osp.join(self.raw_dir, self.name, 'processed', f'{self.name}.pt'), 105 | osp.join(self.raw_dir, 'data.pt')) 106 | shutil.rmtree(osp.join(self.raw_dir, self.name)) 107 | 108 | def process(self): 109 | data, slices = torch.load(self.raw_paths[0]) 110 | 111 | num_samples = slices['x'].size(0) - 1 112 | data_list: List[Data] = [] 113 | for i in range(num_samples): 114 | x = data.x[slices['x'][i]:slices['x'][i + 1]] 115 | edge_index = data.edge_index[ 116 | :, 117 | slices['edge_index'][i]:slices['edge_index'][i + 1], 118 | ] 119 | sample = Data(x=x, edge_index=edge_index, y=data.y[i]) 120 | 121 | if self.pre_filter is not None and not self.pre_filter(sample): 122 | continue 123 | 124 | if self.pre_transform is not None: 125 | sample = self.pre_transform(sample) 126 | 127 | data_list.append(sample) 128 | data, slices = self.collate(data_list) 129 | torch.save((data,slices), self.processed_paths[0]) 130 | 131 | class NeuroGraphDynamic(): 132 | r"""Graph-based neuroimaging benchmark datasets, e.g., 133 | :obj:`"DynHCPGender"`, :obj:`"DynHCPAge"`, :obj:`"DynHCPActivity"`, 134 | :obj:`"DynHCPWM"`, or :obj:`"DynHCPFI"` 135 | 136 | Args: 137 | root (str): Root directory where the dataset should be saved. 138 | name (str): The name of the dataset. 139 | 140 | Returns: 141 | list: A list of graphs in PyTorch Geometric (pyg) format. Each graph contains a list of dynamic graphs batched in pyg batch. 142 | """ 143 | url = 'https://vanderbilt.box.com/shared/static' 144 | filenames = { 145 | 'DynHCPGender': 'mj0z6unea34lfz1hkdwsinj7g22yohxn.zip', 146 | 'DynHCPActivity': '2so3fnfqakeu6hktz322o3nm2c8ocus7.zip', 147 | 'DynHCPAge': '195f9teg4t4apn6kl6hbc4ib4g9addtq.zip', 148 | 'DynHCPWM': 'mxy8fq3ghm60q6h7uhnu80pgvfxs6xo2.zip', 149 | 'DynHCPFI': 'un7w3ohb2mmyjqt1ou2wm3g87y1lfuuo.zip', 150 | } 151 | def __init__(self,root, name): 152 | self.root = root 153 | self.name = name 154 | assert name in self.filenames.keys() 155 | self.name = name 156 | file_path = os.path.join(self.root,self.name,'processed', self.name+".pt") 157 | if not os.path.exists(file_path): 158 | self.download() 159 | self.dataset, self.labels = self.load_data() 160 | 161 | def download(self): 162 | url = f'{self.url}/{self.filenames[self.name]}' 163 | path = download_url(url, os.path.join(self.root, self.name)) 164 | extract_zip(path, self.root) 165 | os.unlink(path) 166 | 167 | def load_data(self): 168 | if self.name=='DynHCPActivity': 169 | dataset_raw = torch.load(os.path.join(self.root,self.name,'processed', self.name+".pt")) 170 | dataset,labels = [],[] 171 | for v in dataset_raw: 172 | batches = v.get('batches') 173 | if len(batches)>0: 174 | for b in batches: 175 | y = b.y[0].item() 176 | dataset.append(b) 177 | labels.append(y) 178 | else: 179 | dataset = torch.load(os.path.join(self.root,self.name,'processed', self.name+".pt")) 180 | labels = dataset['labels'] 181 | dataset = dataset['batches'] 182 | return dataset,labels 183 | 184 | 185 | -------------------------------------------------------------------------------- /NeuroGraph/test.py: -------------------------------------------------------------------------------- 1 | from NeuroGraph import utils 2 | import numpy as np 3 | from nilearn.image import load_img 4 | 5 | 6 | 7 | img = load_img("NeuroGraph/data/raw/1.nii.gz") 8 | regs = np.loadtxt("NeuroGraph/data/raw/1.txt") 9 | fmri = img.get_fdata() 10 | fc = utils.preprocess(fmri, regs,100) 11 | # fc = np.load("NeuroGraph/data/fc.npy") 12 | print(fc.shape) 13 | data = utils.construct_data(fc, 1) 14 | print(data) -------------------------------------------------------------------------------- /NeuroGraph/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from nilearn.datasets import fetch_atlas_schaefer_2018 4 | from nilearn.image import load_img 5 | from nilearn.connectome import ConnectivityMeasure 6 | from scipy.stats import zscore 7 | from torch_geometric.data import Data 8 | 9 | 10 | def construct_corr(m): 11 | """ 12 | This function construct correlation matrix from the preprocessed fmri matrix 13 | Args. 14 | 15 | m (numpy array): a preprocessed numpy matrix 16 | return: correlation matrix 17 | """ 18 | zd_Ytm = (m - np.nanmean(m, axis=0)) / np.nanstd(m, axis=0, ddof=1) 19 | conn = ConnectivityMeasure(kind='correlation') 20 | fc = conn.fit_transform([m])[0] 21 | zd_fc = conn.fit_transform([zd_Ytm])[0] 22 | fc *= np.tri(*fc.shape) 23 | np.fill_diagonal(fc, 0) 24 | # zscored upper triangle 25 | zd_fc *= 1 - np.tri(*zd_fc.shape, k=-1) 26 | np.fill_diagonal(zd_fc, 0) 27 | corr = fc + zd_fc 28 | return corr 29 | def regress_head_motions(Y,regs): 30 | """ 31 | This function regress out six rigid- body head motion parameters, along with their derivatives, from the fMRI data 32 | 33 | Args: 34 | Y (numpy array)): fmri image 35 | regs (numpy array): movement regressor 36 | """ 37 | B2 = np.matmul(np.linalg.pinv(regs),Y) 38 | m = Y - np.matmul(regs,B2) 39 | return m 40 | 41 | 42 | def remove_drifts(Y): 43 | """ 44 | This function removes the scanner drifts in the fMRI signals that arise from instrumental factors. By eliminating these trends, we enhance the signal-to-noise ratio and increase the sensitivity to neural activity. 45 | 46 | """ 47 | start = 1 48 | stop = Y.shape[0] 49 | step = 1 50 | t = np.arange(start, stop+step, step) 51 | tzd = zscore(np.vstack((t, t**2)), axis=1) 52 | XX = np.vstack((np.ones(Y.shape[0]), tzd)) 53 | B = np.matmul(np.linalg.pinv(XX).T,Y) 54 | Yt = Y - np.matmul(XX.T,B) 55 | return Yt 56 | 57 | def parcellation(fmri, n_rois= 1000): 58 | """ 59 | Prepfrom brain parcellation 60 | 61 | Args: 62 | 63 | fmri (numpy array): fmri image 64 | rois (int): {100, 200, 300, 400, 500, 600, 700, 800, 900, 1000}, optional, 65 | Number of regions of interest. Default=1000. 66 | 67 | """ 68 | roi = fetch_atlas_schaefer_2018(n_rois=n_rois,yeo_networks=17, resolution_mm=2) 69 | atlas = load_img(roi['maps']) 70 | volume = atlas.get_fdata() 71 | subcor_ts = [] 72 | for i in np.unique(volume): 73 | if i != 0: 74 | bool_roi = np.zeros(volume.shape, dtype=int) 75 | bool_roi[volume == i] = 1 76 | bool_roi = bool_roi.astype(np.bool) 77 | roi_ts_mean = [] 78 | for t in range(fmri.shape[-1]): 79 | roi_ts_mean.append(np.mean(fmri[:, :, :, t][bool_roi])) 80 | subcor_ts.append(np.array(roi_ts_mean)) 81 | 82 | Y = np.array(subcor_ts).T 83 | return Y 84 | 85 | def preprocess(fmri,regs, n_rois =1000): 86 | 87 | """ 88 | Preprocess fMRI data using NeuroGraph preprocessing pipeline 89 | 90 | Args: 91 | 92 | fmri (numpy array): fmri image 93 | regs (numpy array): regressor array 94 | rois (int): {100, 200, 300, 400, 500, 600, 700, 800, 900, 1000}, optional, 95 | Number of regions of interest. Default=1000. 96 | 97 | """ 98 | roi = fetch_atlas_schaefer_2018(n_rois=n_rois,yeo_networks=17, resolution_mm=2) 99 | atlas = load_img(roi['maps']) 100 | volume = atlas.get_fdata() 101 | subcor_ts = [] 102 | for i in np.unique(volume): 103 | if i != 0: 104 | bool_roi = np.zeros(volume.shape, dtype=int) 105 | bool_roi[volume == i] = 1 106 | bool_roi = bool_roi.astype(np.bool) 107 | roi_ts_mean = [] 108 | for t in range(fmri.shape[-1]): 109 | roi_ts_mean.append(np.mean(fmri[:, :, :, t][bool_roi])) 110 | subcor_ts.append(np.array(roi_ts_mean)) 111 | 112 | Y = np.array(subcor_ts).T 113 | start = 1 114 | stop = Y.shape[0] 115 | step = 1 116 | # detrending 117 | t = np.arange(start, stop+step, step) 118 | tzd = zscore(np.vstack((t, t**2)), axis=1) 119 | XX = np.vstack((np.ones(Y.shape[0]), tzd)) 120 | B = np.matmul(np.linalg.pinv(XX).T,Y) 121 | Yt = Y - np.matmul(XX.T,B) 122 | # regress out head motion regressors 123 | B2 = np.matmul(np.linalg.pinv(regs),Yt) 124 | Ytm = Yt - np.matmul(regs,B2) 125 | # zscore over axis=0 (time) 126 | zd_Ytm = (Ytm - np.nanmean(Ytm, axis=0)) / np.nanstd(Ytm, axis=0, ddof=1) 127 | conn = ConnectivityMeasure(kind='correlation') 128 | fc = conn.fit_transform([Ytm])[0] 129 | zd_fc = conn.fit_transform([zd_Ytm])[0] 130 | fc *= np.tri(*fc.shape) 131 | np.fill_diagonal(fc, 0) 132 | 133 | # zscored upper triangle 134 | zd_fc *= 1 - np.tri(*zd_fc.shape, k=-1) 135 | np.fill_diagonal(zd_fc, 0) 136 | corr = fc + zd_fc 137 | return corr 138 | 139 | 140 | def construct_adj(corr, threshold=5): 141 | """ 142 | create adjacency matrix from functional connectome matrix 143 | 144 | Args: 145 | 146 | corr (n x n numpy matrix): functional connectome matrix 147 | 148 | Threshold (int (1- 100)): threshold for controling graph density. 149 | 150 | the more higher the threshold, the more denser the graph. default: 5 151 | """ 152 | 153 | corr_matrix_copy = corr.copy() 154 | threshold = np.percentile(corr_matrix_copy[corr_matrix_copy > 0], 100 - threshold) 155 | corr_matrix_copy[corr_matrix_copy < threshold] = 0 156 | corr_matrix_copy[corr_matrix_copy >= threshold] = 1 157 | return corr_matrix_copy 158 | 159 | def construct_data(corr, label, threshold = 5): 160 | """ 161 | create pyg data object from functional connectome matrix. We use correlation as node features 162 | Args: 163 | 164 | corr (n x n numpy matrix): functional connectome matrix 165 | 166 | Threshold (int (1- 100)): threshold for controling graph density. 167 | 168 | the more higher the threshold, the more denser the graph. default: 5 169 | 170 | 171 | """ 172 | 173 | A = torch.tensor(corr.copy()) 174 | threshold = np.percentile(A[A > 0], 100 - threshold) 175 | A[A < threshold] = 0 176 | A[A >= threshold] = 1 177 | edge_index = A.nonzero().t().to(torch.long) 178 | data = Data(x = corr, edge_index=edge_index, y = label) 179 | return data 180 | 181 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NeuroGraph 2 | 3 | [Documentation](https://neurograph.readthedocs.io/en/latest/) | [Paper](https://arxiv.org/pdf/2306.06202.pdf) | [Website](https://anwar-said.github.io/anwarsaid/neurograph.html) 4 | 5 | NeuroGraph presents a comprehensive compilation of neuroimaging datasets organized in a graph-based format, encompassing a wide range of demographic factors, mental states, and cognitive traits. Additionally, NeuroGraph offers convenient preprocessing tools for fMRI datasets, facilitating seamless predictive modeling processes. Readers are referred to the detailed documentation, paper, and website for further details on how to use NeuroGraph in their projects. Please cite the following paper if you use NeuroGraph in your work. 6 | 7 | For training GNNs on benchmarks, please use the following script. 8 | 9 | ``` 10 | ./run_baseline.sh 11 | ``` 12 | 13 | 14 | 15 | ## Cite 16 | 17 | ``` 18 | @article{said2023neurograph, 19 | title={NeuroGraph: Benchmarks for Graph Machine Learning in Brain Connectomics}, 20 | author={Said, Anwar and Bayrak, Roza G and Derr, Tyler and Shabbir, Mudassir and Moyer, Daniel and Chang, Catie and Koutsoukos, Xenofon}, 21 | journal={arXiv preprint arXiv:2306.06202}, 22 | year={2023} 23 | } 24 | ``` 25 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/__init__.py -------------------------------------------------------------------------------- /doc/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/.DS_Store -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /doc/NeuroGraph.rst: -------------------------------------------------------------------------------- 1 | NeuroGraph is a collection of graph-based neuroimaging datasets that span multiple categories of demographics, mental states and cognitive traits. The following provides an overview of these categories and their associated datasets. 2 | 3 | The data is made available in accordance with the WU-Minn HCP Consortium Open Access Data Use Terms (Step 4), which can be found at https://www.humanconnectome.org/study/hcp-young-adult/document/wu-minn-hcp-consortium-open-access-data-use-terms. 4 | 5 | Demographics 6 | -------------------------- 7 | 8 | Demographics category includes gender and age estimation. The gender attribute facilitates a binary classification with the categories being male and female. Age is categorized into three distinct groups as in: 22-25, 26-30, and 31-35 years. We introduce four datasets named: HCP-Gender, HCP-Age, DynHCP-Gender, and DynHCP-Age under this category. The first two are 9 | static graph datasets while the last two are the corresponding dynamic graph datasets. 10 | 11 | Mental States 12 | ------------------------------- 13 | The mental state decoding involves seven tasks: Emotion Processing, Gambling, Language, Motor, Relational Processing, Social Cognition, and Working Memory. Each task is designed to help delineate a core set of functions relevant to different facets of the relation between human brain, cognition and behavior. Under this category, we present two datasets: HCP-Activity, a static representation, and DynHCP-Activity, its dynamic counterpart. 14 | 15 | Cognitive Traits 16 | ----------------------------------- 17 | The cognitive traits category of our dataset comprises two significant traits: working memory (List Sorting) and fluid intelligence evaluation with PMAT24. Working memory refers to an individual’s capacity to temporarily hold and manipulate information, a crucial aspect that influences higher cognitive functions such as reasoning, comprehension, and learning. Fluid intelligence represents the ability to solve novel problems, independent of any knowledge from the past. It demonstrates the capacity to analyze complex relationships, identify patterns, and derive solutions in dynamic situations. The prediction of both these traits, quantified as continuous variables in our dataset, are treated as regression problem. We aim to predict 18 | the performance or scores related to these cognitive traits based on the functional connectome graphs. We generate four datasets under cognitive traits: HCP Fluid Intelligence (HCP-FI), HCP Working Memory (HCP-WM), DynHCP-FI and DynHCP-WM. 19 | -------------------------------------------------------------------------------- /doc/_build/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/.DS_Store -------------------------------------------------------------------------------- /doc/_build/doctrees/NeuroGraph.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/doctrees/NeuroGraph.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/datasets.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/doctrees/datasets.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/environment.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/doctrees/environment.pickle -------------------------------------------------------------------------------- /doc/_build/doctrees/get_started.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/doctrees/get_started.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/index.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/doctrees/index.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/install.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/doctrees/install.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/preprocess.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/doctrees/preprocess.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/utils.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/doctrees/utils.doctree -------------------------------------------------------------------------------- /doc/_build/html/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/.DS_Store -------------------------------------------------------------------------------- /doc/_build/html/.buildinfo: -------------------------------------------------------------------------------- 1 | # Sphinx build info version 1 2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. 3 | config: 7313becfcf9696e10c17e4e0753ff7df 4 | tags: 645f666f9bcd5a90fca523b33c5a78b7 5 | -------------------------------------------------------------------------------- /doc/_build/html/NeuroGraph.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Demographics — NeuroGraph 2.1.0 documentation 8 | 9 | 10 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 |
28 | 77 | 78 |
82 | 83 |
84 |
85 |
86 | 93 |
94 |
95 |
96 |
97 | 98 |

NeuroGraph is a collection of graph-based neuroimaging datasets that span multiple categories of demographics, mental states and cognitive traits. The following provides an overview of these categories and their associated datasets.

99 |

The data is made available in accordance with the WU-Minn HCP Consortium Open Access Data Use Terms (Step 4), which can be found at https://www.humanconnectome.org/study/hcp-young-adult/document/wu-minn-hcp-consortium-open-access-data-use-terms.

100 |
101 |

Demographics

102 |

Demographics category includes gender and age estimation. The gender attribute facilitates a binary classification with the categories being male and female. Age is categorized into three distinct groups as in: 22-25, 26-30, and 31-35 years. We introduce four datasets named: HCP-Gender, HCP-Age, DynHCP-Gender, and DynHCP-Age under this category. The first two are 103 | static graph datasets while the last two are the corresponding dynamic graph datasets.

104 |
105 |
106 |

Mental States

107 |

The mental state decoding involves seven tasks: Emotion Processing, Gambling, Language, Motor, Relational Processing, Social Cognition, and Working Memory. Each task is designed to help delineate a core set of functions relevant to different facets of the relation between human brain, cognition and behavior. Under this category, we present two datasets: HCP-Activity, a static representation, and DynHCP-Activity, its dynamic counterpart.

108 |
109 |
110 |

Cognitive Traits

111 |

The cognitive traits category of our dataset comprises two significant traits: working memory (List Sorting) and fluid intelligence evaluation with PMAT24. Working memory refers to an individual’s capacity to temporarily hold and manipulate information, a crucial aspect that influences higher cognitive functions such as reasoning, comprehension, and learning. Fluid intelligence represents the ability to solve novel problems, independent of any knowledge from the past. It demonstrates the capacity to analyze complex relationships, identify patterns, and derive solutions in dynamic situations. The prediction of both these traits, quantified as continuous variables in our dataset, are treated as regression problem. We aim to predict 112 | the performance or scores related to these cognitive traits based on the functional connectome graphs. We generate four datasets under cognitive traits: HCP Fluid Intelligence (HCP-FI), HCP Working Memory (HCP-WM), DynHCP-FI and DynHCP-WM.

113 |
114 | 115 | 116 |
117 |
118 |
122 | 123 |
124 | 125 |
126 |

© Copyright 2023, Anwar Said.

127 |
128 | 129 | Built with Sphinx using a 130 | theme 131 | provided by Read the Docs. 132 | 133 | 134 |
135 |
136 |
137 |
138 |
139 | 144 | 145 | 146 | -------------------------------------------------------------------------------- /doc/_build/html/_modules/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Overview: module code — NeuroGraph 2.1.0 documentation 7 | 8 | 9 | 10 | 11 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 |
25 | 74 | 75 |
79 | 80 |
81 |
82 |
83 |
    84 |
  • 85 | 86 |
  • 87 |
  • 88 |
89 |
90 |
91 |
92 |
93 | 94 |

All modules for which code is available

95 | 99 | 100 |
101 |
102 |
103 | 104 |
105 | 106 |
107 |

© Copyright 2023, Anwar Said.

108 |
109 | 110 | Built with Sphinx using a 111 | theme 112 | provided by Read the Docs. 113 | 114 | 115 |
116 |
117 |
118 |
119 |
120 | 125 | 126 | 127 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/NeuroGraph.rst.txt: -------------------------------------------------------------------------------- 1 | NeuroGraph is a collection of graph-based neuroimaging datasets that span multiple categories of demographics, mental states and cognitive traits. The following provides an overview of these categories and their associated datasets. 2 | 3 | The data is made available in accordance with the WU-Minn HCP Consortium Open Access Data Use Terms (Step 4), which can be found at https://www.humanconnectome.org/study/hcp-young-adult/document/wu-minn-hcp-consortium-open-access-data-use-terms. 4 | 5 | Demographics 6 | -------------------------- 7 | 8 | Demographics category includes gender and age estimation. The gender attribute facilitates a binary classification with the categories being male and female. Age is categorized into three distinct groups as in: 22-25, 26-30, and 31-35 years. We introduce four datasets named: HCP-Gender, HCP-Age, DynHCP-Gender, and DynHCP-Age under this category. The first two are 9 | static graph datasets while the last two are the corresponding dynamic graph datasets. 10 | 11 | Mental States 12 | ------------------------------- 13 | The mental state decoding involves seven tasks: Emotion Processing, Gambling, Language, Motor, Relational Processing, Social Cognition, and Working Memory. Each task is designed to help delineate a core set of functions relevant to different facets of the relation between human brain, cognition and behavior. Under this category, we present two datasets: HCP-Activity, a static representation, and DynHCP-Activity, its dynamic counterpart. 14 | 15 | Cognitive Traits 16 | ----------------------------------- 17 | The cognitive traits category of our dataset comprises two significant traits: working memory (List Sorting) and fluid intelligence evaluation with PMAT24. Working memory refers to an individual’s capacity to temporarily hold and manipulate information, a crucial aspect that influences higher cognitive functions such as reasoning, comprehension, and learning. Fluid intelligence represents the ability to solve novel problems, independent of any knowledge from the past. It demonstrates the capacity to analyze complex relationships, identify patterns, and derive solutions in dynamic situations. The prediction of both these traits, quantified as continuous variables in our dataset, are treated as regression problem. We aim to predict 18 | the performance or scores related to these cognitive traits based on the functional connectome graphs. We generate four datasets under cognitive traits: HCP Fluid Intelligence (HCP-FI), HCP Working Memory (HCP-WM), DynHCP-FI and DynHCP-WM. 19 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/datasets.rst.txt: -------------------------------------------------------------------------------- 1 | Load Benchmark Datasets 2 | ================================== 3 | 4 | 5 | .. automodule:: NeuroGraph.datasets 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: -------------------------------------------------------------------------------- /doc/_build/html/_sources/get_started.rst.txt: -------------------------------------------------------------------------------- 1 | Introduction by Example 2 | ================================ 3 | 4 | We will briefly introduce the fundamental concepts of NeuroGraph through self-contained examples. We closely follow the data representation format of `PyG `_. Therefore, interested readers are referred to the `PyG `_ documentation for an introduction to the graph machine learning and PyG's data representation formats. 5 | 6 | 7 | Loading Benchmark datasets 8 | ---------------------------------- 9 | 10 | NeuroGraph provides two classes for loading static and dynamic benchmark datastes. 11 | 12 | Loading Static Benchmarks 13 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 14 | NeuroGraph utilizes the `PyG` `InMemoryDataset` class to facilitate the loading of datasets. this allows an easy-to-use interface for applying graph machine learning pipelines. For example, the `HCPGender` benchmark can be loaded as follows: 15 | 16 | 17 | .. code-block:: python 18 | :linenos: 19 | 20 | from NeuroGraph.datasets import NeuroGraphDataset 21 | dataset = NeuroGraphDataset(root="data/", name= "HCPGender") 22 | print(dataset.num_classes) 23 | print(dataset.num_features) 24 | 25 | 26 | Loading Dynamic Dataset 27 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 28 | 29 | To efficiently store and utilize the dynamic datasets in `PyG`` Batch format, we provide the corresponding functionality. Here is an example of loading the `DynHCPGender` dataset: 30 | 31 | 32 | .. code-block:: python 33 | :linenos: 34 | from NeuroGraph.datasets import NeuroGraphDynamic 35 | data_obj = NeuroGraphDynamic(root="data/", name= "DynHCPGender") 36 | dataset = data_obj.dataset 37 | labels = data_obj.labels 38 | print(len(dataset), len(labels)) 39 | 40 | The dataset is a list of dynamic graphs represented in the `PyG` batch format, making it compatible with graph machine learning pipelines. 41 | 42 | 43 | Preprocessing Examples 44 | ==================================== 45 | 46 | To bridge the gap betwee NeuroGraph and graph machine learning domains, NeuroGraph offers tools to easily preprocess and construct graph-based neuroimaging datasets. Here, we demonstrate how to preprocess your own data to construct functional connectomes and generate corresponding graphs-based representations. 47 | 48 | 49 | .. code-block:: python 50 | :linenos: 51 | from NeuroGraph import utils 52 | fc = utils.preprocess(fmri, regs, n_rois= 1000) # fmri and regs could be numpy arrays 53 | 54 | The corresponding `Adjacency matrix` and `PyG` data objects can be created from the functional_connectome as follows. 55 | 56 | .. code-block:: python 57 | :linenos: 58 | 59 | from NeuroGraph import utils 60 | adj = utils.construct_adj(fc, threshold= 5) # construct the adjacency matrix 61 | data = utils.construct_data(fc, label= 1,threshold = 5) # construct PyG data object 62 | 63 | We use correlation as node features while constructing data object from functional connectome. 64 | 65 | The following is the source code for processing one fMRI scan with corresponding regressor using our preprocessing pipeline. 66 | 67 | .. code-block:: python 68 | :linenos: 69 | 70 | from NeuroGraph import utils 71 | import numpy as np 72 | from nilearn.image import load_img 73 | img = load_img("data/raw/1.nii.gz") # 1.nii.gz is fMRI scan 74 | regs = np.loadtxt("data/raw/1.txt") # 1.txt is the movement regressor 75 | fmri = img.get_fdata() 76 | fc = utils.preprocess(fmri, regs, n_rois= 100) 77 | adj = utils.construct_adj(fc, threshold= 5) # construct the adjacency matrix 78 | data = utils.construct_data(fc, label = 1,threshold = 5) # construct torch Data object 79 | 80 | 81 | Our preprocessing pipeline consists of five steps and can also be applied seperately in steps. 82 | 83 | .. code-block:: python 84 | :linenos: 85 | 86 | from NeuroGraph import utils 87 | import numpy as np 88 | from nilearn.image import load_img 89 | 90 | img = load_img("data/raw/1.nii.gz") 91 | regs = np.loadtxt("data/raw/1.txt") 92 | fmri = img.get_fdata() 93 | parcells = utils.parcellation(fmri,n_rois = 100) ## this uses schaefer atlas by default 94 | Y = utils.remove_drifts(parcells) 95 | Y = utils.regress_head_motions(Y,regs) 96 | fc = utils.construct_corr(Y) 97 | adj = utils.construct_adj(fc, threshold= 5) # construct the adjacency matrix 98 | data = utils.construct_data(fc, label = 1,threshold = 5) 99 | 100 | 101 | 102 | Preprocessing Human Connectome Project (HCP1200) Dataset 103 | ============================================================================== 104 | 105 | NeuroGraph utilizes the HCP1200 dataset as a primary data source for exploring the dataset generation search space and constructing benchmarks. The HCP1200 dataset can be accessed from the `HCP website `_ by accepting the data usage terms. Additionally, the dataset is also available on an AWS S3 bucket, which can be accessed once authorization has been obtained from HCP. In this section, we provide various functions that allow you to crawl and preprocess the HCP datasets, enabling the construction of graph-based neuroimaging datasets. These functions streamline the process of obtaining and preparing the data for further analysis and modeling. 106 | 107 | 108 | Download and preprocess static datasets 109 | --------------------------------------------------- 110 | 111 | .. code-block:: python 112 | :linenos: 113 | 114 | from NeuroGraph.preprocess import Brain_Connectome_Rest_Download 115 | import boto3 116 | 117 | root = "data/" 118 | name = "HCPGender" 119 | threshold = 5 120 | path_to_data = "data/raw/HCPGender" # store the raw downloaded scans 121 | n_rois = 100 122 | n_jobs = 5 # this script runs in parallel and requires the number of jobs is an input 123 | 124 | ACCESS_KEY = '' # your connectomeDB credentials 125 | SECRET_KEY = '' 126 | s3 = boto3.client('s3', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY) 127 | # this function requires both HCP_behavioral.csv and ids.pkl files under the root directory. Both files have been provided and can be found under the data directory 128 | rest_dataset = Brain_Connectome_Rest_Download(root,name,n_rois, threshold,path_to_data,n_jobs,s3) 129 | 130 | 131 | The provided function facilitates the download of data from the AWS S3 bucket, performs preprocessing steps, and generates a graph-based dataset. It is important to note that the `rest_dataset` used in this function consists of four labels: gender, age, working memory, and fluid intelligence. To create separate datasets based on these labels, the following functionalities can be used. 132 | 133 | .. code-block:: python 134 | :linenos: 135 | 136 | from NeuroGraph import preprocess 137 | 138 | rest_dataset = preprocess.Brain_Connectome_Rest_Download(root,name,n_rois, threshold,path_to_data,n_jobs,s3) 139 | gender_dataset = preprocess.Gender_Dataset(root, "HCPGender",rest_dataset) 140 | age_dataset = preprocess.Age_Dataset(root, "HCPAge",rest_dataset) 141 | wm_datast = preprocess.WM_Dataset(root, "HCPWM",rest_dataset) 142 | fi_datast = preprocess.FI_Dataset(root, "HCPFI",rest_dataset) 143 | 144 | To construct the State dataset, the following functionalities can be used. 145 | 146 | .. code-block:: python 147 | :linenos: 148 | 149 | from NeuroGraph import preprocess 150 | 151 | state_dataset = preprocess.Brain_Connectome_State_Download(root, dataset_name,rois, threshold,path_to_data,n_jobs,s3) 152 | 153 | If you have the data locally, then the following functionalities can be used to preprocess the data. 154 | 155 | 156 | .. code-block:: python 157 | :linenos: 158 | 159 | from NeuroGraph import preprocess 160 | 161 | rest_dataset = preprocess.Brain_Connectome_Rest(root, name, n_rois, threshold, path_to_data, n_jobs) 162 | 163 | Similarly, for constructing the State dataset, the following function can be used. 164 | 165 | .. code-block:: python 166 | :linenos: 167 | 168 | from NeuroGraph import preprocess 169 | 170 | state_dataset = preprocess.Brain_Connectome_State(root, name, n_rois, threshold, path_to_data, n_jobs) 171 | 172 | 173 | Download and preprocess dynamic datasets 174 | --------------------------------------------------- 175 | 176 | We also offer similar functionalities for constructing dynamic datasets. You can create a dynamic REST dataset from the data stored locally as follows. 177 | 178 | 179 | 180 | .. code-block:: python 181 | :linenos: 182 | 183 | from NeuroGraph import preprocess 184 | 185 | ngd = Dyn_Prep(fmri, regs, n_rois=100, window_size=50, stride=3, dynamic_length=None) 186 | dataset = ngd.dataset 187 | labels = ngd.labels 188 | print(len(dataset),len(labels)) 189 | 190 | Here the dataset is a list containing dynamic graphs in the form of PyG Batch, which can be easily fed into graph machine learning pipelines. The following examples demonstrate how a dynamic REST dataset can be downloaded and preprocessed on the fly. 191 | 192 | .. code-block:: python 193 | :linenos: 194 | 195 | from NeuroGraph import preprocess 196 | 197 | dyn_obj = preporcess.Dyn_Down_Prep(root, name,s3,n_rois = 100, threshold = 10, window_size = 50,stride == 3, dynamic_length=150) 198 | dataset = dyn_obj.data_dict 199 | 200 | Dyn_Down_Prep class downloads and preprocess the rest dataset and provides a dictionary that contains a list of dynamic graphs against each id. The dataset can be further prprocessed as follows to construct each benchmark. 201 | 202 | .. code-block:: python 203 | :linenos: 204 | 205 | from NeuroGraph import preprocess 206 | 207 | dyn_obj = preporcess.Dyn_Down_Prep(root, name,s3,n_rois = 100, threshold = 10, window_size = 50,stride == 3, dynamic_length=150) 208 | dataset = dyn_obj.data_dict 209 | gender_dataset, labels = [],[] 210 | for k,v in dataset.items(): 211 | if v is None: 212 | continue 213 | l = v[0].y 214 | gender = int(l[0].item()) 215 | sub = [] 216 | for d in v: 217 | new_data = Data(x = d.x, edge_index = d.edge_index, y = gender) 218 | sub.append(new_data) 219 | batch = Batch.from_data_list(sub) 220 | gender_dataset.append(batch) 221 | labels.append(gender) 222 | print("gender dataset created with {} {} number of instances".format(len(gender_dataset), len(labels))) 223 | new_dataset = {'labels':labels, "batches":gender_dataset} 224 | 225 | age_dataset, labels = [],[] 226 | for k,v in dataset.items(): 227 | if v is None: 228 | continue 229 | l = v[0].y 230 | age = int(l[1].item()) 231 | if age <=2: ### Ignoring subjects with age >=36 232 | sub = [] 233 | for d in v: 234 | new_data = Data(x = d.x, edge_index = d.edge_index, y = age) 235 | sub.append(new_data) 236 | batch = Batch.from_data_list(sub) 237 | age_dataset.append(batch) 238 | labels.append(gender) 239 | print("Age dataset created with {} {} number of instances".format(len(age_dataset), len(labels))) 240 | new_dataset = {'labels':labels, "batches":age_dataset} 241 | 242 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/index.rst.txt: -------------------------------------------------------------------------------- 1 | .. NeuroGraph documentation master file, created by 2 | sphinx-quickstart on Sun Jun 25 13:46:36 2023. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | .. toctree:: 7 | :maxdepth: 2 8 | :caption: NeuroGraph: 9 | 10 | NeuroGraph 11 | 12 | .. toctree:: 13 | :maxdepth: 2 14 | :caption: Installations: 15 | 16 | install 17 | 18 | .. toctree:: 19 | :maxdepth: 2 20 | :caption: GET STARTED: 21 | 22 | get_started 23 | 24 | .. toctree:: 25 | :maxdepth: 2 26 | :caption: Loading Benchmarks: 27 | 28 | datasets 29 | 30 | 31 | .. toctree:: 32 | :maxdepth: 2 33 | :caption: Preprocessing: 34 | 35 | 36 | preprocess 37 | 38 | 39 | .. toctree:: 40 | :maxdepth: 2 41 | :caption: Utilities: 42 | 43 | utils 44 | 45 | 46 | Indices and tables 47 | ================== 48 | 49 | * :ref:`genindex` 50 | * :ref:`modindex` 51 | * :ref:`search` 52 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/install.rst.txt: -------------------------------------------------------------------------------- 1 | Installation 2 | ===================== 3 | 4 | NeuroGraph is available for Python 3 and can be easily installed with pip 5 | 6 | .. code-block:: python 7 | 8 | pip install NeuroGraph 9 | 10 | NeuroGraph is developed on top of PyG and requires PyG to be installed. To install PyG, please follow the instructions provided in the PyG documentation `here `_. 11 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/preprocess.rst.txt: -------------------------------------------------------------------------------- 1 | NeuroGraph Preprocessing Functionalities 2 | ============================================================ 3 | 4 | 5 | .. automodule:: NeuroGraph.preprocess 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: -------------------------------------------------------------------------------- /doc/_build/html/_sources/utils.rst.txt: -------------------------------------------------------------------------------- 1 | NeuroGraph Utilities 2 | ===================================================================== 3 | 4 | 5 | .. automodule:: NeuroGraph.utils 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: -------------------------------------------------------------------------------- /doc/_build/html/_static/_sphinx_javascript_frameworks_compat.js: -------------------------------------------------------------------------------- 1 | /* 2 | * _sphinx_javascript_frameworks_compat.js 3 | * ~~~~~~~~~~ 4 | * 5 | * Compatability shim for jQuery and underscores.js. 6 | * 7 | * WILL BE REMOVED IN Sphinx 6.0 8 | * xref RemovedInSphinx60Warning 9 | * 10 | */ 11 | 12 | /** 13 | * select a different prefix for underscore 14 | */ 15 | $u = _.noConflict(); 16 | 17 | 18 | /** 19 | * small helper function to urldecode strings 20 | * 21 | * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL 22 | */ 23 | jQuery.urldecode = function(x) { 24 | if (!x) { 25 | return x 26 | } 27 | return decodeURIComponent(x.replace(/\+/g, ' ')); 28 | }; 29 | 30 | /** 31 | * small helper function to urlencode strings 32 | */ 33 | jQuery.urlencode = encodeURIComponent; 34 | 35 | /** 36 | * This function returns the parsed url parameters of the 37 | * current request. Multiple values per key are supported, 38 | * it will always return arrays of strings for the value parts. 39 | */ 40 | jQuery.getQueryParameters = function(s) { 41 | if (typeof s === 'undefined') 42 | s = document.location.search; 43 | var parts = s.substr(s.indexOf('?') + 1).split('&'); 44 | var result = {}; 45 | for (var i = 0; i < parts.length; i++) { 46 | var tmp = parts[i].split('=', 2); 47 | var key = jQuery.urldecode(tmp[0]); 48 | var value = jQuery.urldecode(tmp[1]); 49 | if (key in result) 50 | result[key].push(value); 51 | else 52 | result[key] = [value]; 53 | } 54 | return result; 55 | }; 56 | 57 | /** 58 | * highlight a given string on a jquery object by wrapping it in 59 | * span elements with the given class name. 60 | */ 61 | jQuery.fn.highlightText = function(text, className) { 62 | function highlight(node, addItems) { 63 | if (node.nodeType === 3) { 64 | var val = node.nodeValue; 65 | var pos = val.toLowerCase().indexOf(text); 66 | if (pos >= 0 && 67 | !jQuery(node.parentNode).hasClass(className) && 68 | !jQuery(node.parentNode).hasClass("nohighlight")) { 69 | var span; 70 | var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); 71 | if (isInSVG) { 72 | span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); 73 | } else { 74 | span = document.createElement("span"); 75 | span.className = className; 76 | } 77 | span.appendChild(document.createTextNode(val.substr(pos, text.length))); 78 | node.parentNode.insertBefore(span, node.parentNode.insertBefore( 79 | document.createTextNode(val.substr(pos + text.length)), 80 | node.nextSibling)); 81 | node.nodeValue = val.substr(0, pos); 82 | if (isInSVG) { 83 | var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); 84 | var bbox = node.parentElement.getBBox(); 85 | rect.x.baseVal.value = bbox.x; 86 | rect.y.baseVal.value = bbox.y; 87 | rect.width.baseVal.value = bbox.width; 88 | rect.height.baseVal.value = bbox.height; 89 | rect.setAttribute('class', className); 90 | addItems.push({ 91 | "parent": node.parentNode, 92 | "target": rect}); 93 | } 94 | } 95 | } 96 | else if (!jQuery(node).is("button, select, textarea")) { 97 | jQuery.each(node.childNodes, function() { 98 | highlight(this, addItems); 99 | }); 100 | } 101 | } 102 | var addItems = []; 103 | var result = this.each(function() { 104 | highlight(this, addItems); 105 | }); 106 | for (var i = 0; i < addItems.length; ++i) { 107 | jQuery(addItems[i].parent).before(addItems[i].target); 108 | } 109 | return result; 110 | }; 111 | 112 | /* 113 | * backward compatibility for jQuery.browser 114 | * This will be supported until firefox bug is fixed. 115 | */ 116 | if (!jQuery.browser) { 117 | jQuery.uaMatch = function(ua) { 118 | ua = ua.toLowerCase(); 119 | 120 | var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || 121 | /(webkit)[ \/]([\w.]+)/.exec(ua) || 122 | /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || 123 | /(msie) ([\w.]+)/.exec(ua) || 124 | ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || 125 | []; 126 | 127 | return { 128 | browser: match[ 1 ] || "", 129 | version: match[ 2 ] || "0" 130 | }; 131 | }; 132 | jQuery.browser = {}; 133 | jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; 134 | } 135 | -------------------------------------------------------------------------------- /doc/_build/html/_static/css/badge_only.css: -------------------------------------------------------------------------------- 1 | .clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} -------------------------------------------------------------------------------- /doc/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff -------------------------------------------------------------------------------- /doc/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 -------------------------------------------------------------------------------- /doc/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff -------------------------------------------------------------------------------- /doc/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 -------------------------------------------------------------------------------- /doc/_build/html/_static/css/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/css/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /doc/_build/html/_static/css/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/css/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /doc/_build/html/_static/css/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/css/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /doc/_build/html/_static/css/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/css/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /doc/_build/html/_static/css/fonts/lato-bold-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/css/fonts/lato-bold-italic.woff -------------------------------------------------------------------------------- /doc/_build/html/_static/css/fonts/lato-bold-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/css/fonts/lato-bold-italic.woff2 -------------------------------------------------------------------------------- /doc/_build/html/_static/css/fonts/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/css/fonts/lato-bold.woff -------------------------------------------------------------------------------- /doc/_build/html/_static/css/fonts/lato-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/css/fonts/lato-bold.woff2 -------------------------------------------------------------------------------- /doc/_build/html/_static/css/fonts/lato-normal-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/css/fonts/lato-normal-italic.woff -------------------------------------------------------------------------------- /doc/_build/html/_static/css/fonts/lato-normal-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/css/fonts/lato-normal-italic.woff2 -------------------------------------------------------------------------------- /doc/_build/html/_static/css/fonts/lato-normal.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/css/fonts/lato-normal.woff -------------------------------------------------------------------------------- /doc/_build/html/_static/css/fonts/lato-normal.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/css/fonts/lato-normal.woff2 -------------------------------------------------------------------------------- /doc/_build/html/_static/doctools.js: -------------------------------------------------------------------------------- 1 | /* 2 | * doctools.js 3 | * ~~~~~~~~~~~ 4 | * 5 | * Base JavaScript utilities for all Sphinx HTML documentation. 6 | * 7 | * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | "use strict"; 12 | 13 | const _ready = (callback) => { 14 | if (document.readyState !== "loading") { 15 | callback(); 16 | } else { 17 | document.addEventListener("DOMContentLoaded", callback); 18 | } 19 | }; 20 | 21 | /** 22 | * highlight a given string on a node by wrapping it in 23 | * span elements with the given class name. 24 | */ 25 | const _highlight = (node, addItems, text, className) => { 26 | if (node.nodeType === Node.TEXT_NODE) { 27 | const val = node.nodeValue; 28 | const parent = node.parentNode; 29 | const pos = val.toLowerCase().indexOf(text); 30 | if ( 31 | pos >= 0 && 32 | !parent.classList.contains(className) && 33 | !parent.classList.contains("nohighlight") 34 | ) { 35 | let span; 36 | 37 | const closestNode = parent.closest("body, svg, foreignObject"); 38 | const isInSVG = closestNode && closestNode.matches("svg"); 39 | if (isInSVG) { 40 | span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); 41 | } else { 42 | span = document.createElement("span"); 43 | span.classList.add(className); 44 | } 45 | 46 | span.appendChild(document.createTextNode(val.substr(pos, text.length))); 47 | parent.insertBefore( 48 | span, 49 | parent.insertBefore( 50 | document.createTextNode(val.substr(pos + text.length)), 51 | node.nextSibling 52 | ) 53 | ); 54 | node.nodeValue = val.substr(0, pos); 55 | 56 | if (isInSVG) { 57 | const rect = document.createElementNS( 58 | "http://www.w3.org/2000/svg", 59 | "rect" 60 | ); 61 | const bbox = parent.getBBox(); 62 | rect.x.baseVal.value = bbox.x; 63 | rect.y.baseVal.value = bbox.y; 64 | rect.width.baseVal.value = bbox.width; 65 | rect.height.baseVal.value = bbox.height; 66 | rect.setAttribute("class", className); 67 | addItems.push({ parent: parent, target: rect }); 68 | } 69 | } 70 | } else if (node.matches && !node.matches("button, select, textarea")) { 71 | node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); 72 | } 73 | }; 74 | const _highlightText = (thisNode, text, className) => { 75 | let addItems = []; 76 | _highlight(thisNode, addItems, text, className); 77 | addItems.forEach((obj) => 78 | obj.parent.insertAdjacentElement("beforebegin", obj.target) 79 | ); 80 | }; 81 | 82 | /** 83 | * Small JavaScript module for the documentation. 84 | */ 85 | const Documentation = { 86 | init: () => { 87 | Documentation.highlightSearchWords(); 88 | Documentation.initDomainIndexTable(); 89 | Documentation.initOnKeyListeners(); 90 | }, 91 | 92 | /** 93 | * i18n support 94 | */ 95 | TRANSLATIONS: {}, 96 | PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), 97 | LOCALE: "unknown", 98 | 99 | // gettext and ngettext don't access this so that the functions 100 | // can safely bound to a different name (_ = Documentation.gettext) 101 | gettext: (string) => { 102 | const translated = Documentation.TRANSLATIONS[string]; 103 | switch (typeof translated) { 104 | case "undefined": 105 | return string; // no translation 106 | case "string": 107 | return translated; // translation exists 108 | default: 109 | return translated[0]; // (singular, plural) translation tuple exists 110 | } 111 | }, 112 | 113 | ngettext: (singular, plural, n) => { 114 | const translated = Documentation.TRANSLATIONS[singular]; 115 | if (typeof translated !== "undefined") 116 | return translated[Documentation.PLURAL_EXPR(n)]; 117 | return n === 1 ? singular : plural; 118 | }, 119 | 120 | addTranslations: (catalog) => { 121 | Object.assign(Documentation.TRANSLATIONS, catalog.messages); 122 | Documentation.PLURAL_EXPR = new Function( 123 | "n", 124 | `return (${catalog.plural_expr})` 125 | ); 126 | Documentation.LOCALE = catalog.locale; 127 | }, 128 | 129 | /** 130 | * highlight the search words provided in the url in the text 131 | */ 132 | highlightSearchWords: () => { 133 | const highlight = 134 | new URLSearchParams(window.location.search).get("highlight") || ""; 135 | const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); 136 | if (terms.length === 0) return; // nothing to do 137 | 138 | // There should never be more than one element matching "div.body" 139 | const divBody = document.querySelectorAll("div.body"); 140 | const body = divBody.length ? divBody[0] : document.querySelector("body"); 141 | window.setTimeout(() => { 142 | terms.forEach((term) => _highlightText(body, term, "highlighted")); 143 | }, 10); 144 | 145 | const searchBox = document.getElementById("searchbox"); 146 | if (searchBox === null) return; 147 | searchBox.appendChild( 148 | document 149 | .createRange() 150 | .createContextualFragment( 151 | '" 155 | ) 156 | ); 157 | }, 158 | 159 | /** 160 | * helper function to hide the search marks again 161 | */ 162 | hideSearchWords: () => { 163 | document 164 | .querySelectorAll("#searchbox .highlight-link") 165 | .forEach((el) => el.remove()); 166 | document 167 | .querySelectorAll("span.highlighted") 168 | .forEach((el) => el.classList.remove("highlighted")); 169 | const url = new URL(window.location); 170 | url.searchParams.delete("highlight"); 171 | window.history.replaceState({}, "", url); 172 | }, 173 | 174 | /** 175 | * helper function to focus on search bar 176 | */ 177 | focusSearchBar: () => { 178 | document.querySelectorAll("input[name=q]")[0]?.focus(); 179 | }, 180 | 181 | /** 182 | * Initialise the domain index toggle buttons 183 | */ 184 | initDomainIndexTable: () => { 185 | const toggler = (el) => { 186 | const idNumber = el.id.substr(7); 187 | const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); 188 | if (el.src.substr(-9) === "minus.png") { 189 | el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; 190 | toggledRows.forEach((el) => (el.style.display = "none")); 191 | } else { 192 | el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; 193 | toggledRows.forEach((el) => (el.style.display = "")); 194 | } 195 | }; 196 | 197 | const togglerElements = document.querySelectorAll("img.toggler"); 198 | togglerElements.forEach((el) => 199 | el.addEventListener("click", (event) => toggler(event.currentTarget)) 200 | ); 201 | togglerElements.forEach((el) => (el.style.display = "")); 202 | if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); 203 | }, 204 | 205 | initOnKeyListeners: () => { 206 | // only install a listener if it is really needed 207 | if ( 208 | !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && 209 | !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS 210 | ) 211 | return; 212 | 213 | const blacklistedElements = new Set([ 214 | "TEXTAREA", 215 | "INPUT", 216 | "SELECT", 217 | "BUTTON", 218 | ]); 219 | document.addEventListener("keydown", (event) => { 220 | if (blacklistedElements.has(document.activeElement.tagName)) return; // bail for input elements 221 | if (event.altKey || event.ctrlKey || event.metaKey) return; // bail with special keys 222 | 223 | if (!event.shiftKey) { 224 | switch (event.key) { 225 | case "ArrowLeft": 226 | if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; 227 | 228 | const prevLink = document.querySelector('link[rel="prev"]'); 229 | if (prevLink && prevLink.href) { 230 | window.location.href = prevLink.href; 231 | event.preventDefault(); 232 | } 233 | break; 234 | case "ArrowRight": 235 | if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; 236 | 237 | const nextLink = document.querySelector('link[rel="next"]'); 238 | if (nextLink && nextLink.href) { 239 | window.location.href = nextLink.href; 240 | event.preventDefault(); 241 | } 242 | break; 243 | case "Escape": 244 | if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; 245 | Documentation.hideSearchWords(); 246 | event.preventDefault(); 247 | } 248 | } 249 | 250 | // some keyboard layouts may need Shift to get / 251 | switch (event.key) { 252 | case "/": 253 | if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; 254 | Documentation.focusSearchBar(); 255 | event.preventDefault(); 256 | } 257 | }); 258 | }, 259 | }; 260 | 261 | // quick alias for translations 262 | const _ = Documentation.gettext; 263 | 264 | _ready(Documentation.init); 265 | -------------------------------------------------------------------------------- /doc/_build/html/_static/documentation_options.js: -------------------------------------------------------------------------------- 1 | var DOCUMENTATION_OPTIONS = { 2 | URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), 3 | VERSION: '2.1.0', 4 | LANGUAGE: 'en', 5 | COLLAPSE_INDEX: false, 6 | BUILDER: 'html', 7 | FILE_SUFFIX: '.html', 8 | LINK_SUFFIX: '.html', 9 | HAS_SOURCE: true, 10 | SOURCELINK_SUFFIX: '.txt', 11 | NAVIGATION_WITH_KEYS: false, 12 | SHOW_SEARCH_SUMMARY: true, 13 | ENABLE_SEARCH_SHORTCUTS: true, 14 | }; -------------------------------------------------------------------------------- /doc/_build/html/_static/file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/file.png -------------------------------------------------------------------------------- /doc/_build/html/_static/js/badge_only.js: -------------------------------------------------------------------------------- 1 | !function(e){var t={};function r(n){if(t[n])return t[n].exports;var o=t[n]={i:n,l:!1,exports:{}};return e[n].call(o.exports,o,o.exports,r),o.l=!0,o.exports}r.m=e,r.c=t,r.d=function(e,t,n){r.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},r.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.t=function(e,t){if(1&t&&(e=r(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(r.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)r.d(n,o,function(t){return e[t]}.bind(null,o));return n},r.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return r.d(t,"a",t),t},r.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},r.p="",r(r.s=4)}({4:function(e,t,r){}}); -------------------------------------------------------------------------------- /doc/_build/html/_static/js/html5shiv-printshiv.min.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @preserve HTML5 Shiv 3.7.3-pre | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed 3 | */ 4 | !function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="",p="hidden"in a,q=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){p=!0,q=!0}}();var y={elements:s.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:r,shivCSS:s.shivCSS!==!1,supportsUnknownElements:q,shivMethods:s.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=y,j(b);var z=/^$|\b(?:all|print)\b/,A="html5shiv",B=!q&&function(){var c=b.documentElement;return!("undefined"==typeof b.namespaces||"undefined"==typeof b.parentWindow||"undefined"==typeof c.applyElement||"undefined"==typeof c.removeNode||"undefined"==typeof a.attachEvent)}();y.type+=" print",y.shivPrint=o,o(b),"object"==typeof module&&module.exports&&(module.exports=y)}("undefined"!=typeof window?window:this,document); -------------------------------------------------------------------------------- /doc/_build/html/_static/js/html5shiv.min.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @preserve HTML5 Shiv 3.7.3 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed 3 | */ 4 | !function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3-pre",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document); -------------------------------------------------------------------------------- /doc/_build/html/_static/js/theme.js: -------------------------------------------------------------------------------- 1 | !function(n){var e={};function t(i){if(e[i])return e[i].exports;var o=e[i]={i:i,l:!1,exports:{}};return n[i].call(o.exports,o,o.exports,t),o.l=!0,o.exports}t.m=n,t.c=e,t.d=function(n,e,i){t.o(n,e)||Object.defineProperty(n,e,{enumerable:!0,get:i})},t.r=function(n){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(n,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(n,"__esModule",{value:!0})},t.t=function(n,e){if(1&e&&(n=t(n)),8&e)return n;if(4&e&&"object"==typeof n&&n&&n.__esModule)return n;var i=Object.create(null);if(t.r(i),Object.defineProperty(i,"default",{enumerable:!0,value:n}),2&e&&"string"!=typeof n)for(var o in n)t.d(i,o,function(e){return n[e]}.bind(null,o));return i},t.n=function(n){var e=n&&n.__esModule?function(){return n.default}:function(){return n};return t.d(e,"a",e),e},t.o=function(n,e){return Object.prototype.hasOwnProperty.call(n,e)},t.p="",t(t.s=0)}([function(n,e,t){t(1),n.exports=t(3)},function(n,e,t){(function(){var e="undefined"!=typeof window?window.jQuery:t(2);n.exports.ThemeNav={navBar:null,win:null,winScroll:!1,winResize:!1,linkScroll:!1,winPosition:0,winHeight:null,docHeight:null,isRunning:!1,enable:function(n){var t=this;void 0===n&&(n=!0),t.isRunning||(t.isRunning=!0,e((function(e){t.init(e),t.reset(),t.win.on("hashchange",t.reset),n&&t.win.on("scroll",(function(){t.linkScroll||t.winScroll||(t.winScroll=!0,requestAnimationFrame((function(){t.onScroll()})))})),t.win.on("resize",(function(){t.winResize||(t.winResize=!0,requestAnimationFrame((function(){t.onResize()})))})),t.onResize()})))},enableSticky:function(){this.enable(!0)},init:function(n){n(document);var e=this;this.navBar=n("div.wy-side-scroll:first"),this.win=n(window),n(document).on("click","[data-toggle='wy-nav-top']",(function(){n("[data-toggle='wy-nav-shift']").toggleClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift")})).on("click",".wy-menu-vertical .current ul li a",(function(){var t=n(this);n("[data-toggle='wy-nav-shift']").removeClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift"),e.toggleCurrent(t),e.hashChange()})).on("click","[data-toggle='rst-current-version']",(function(){n("[data-toggle='rst-versions']").toggleClass("shift-up")})),n("table.docutils:not(.field-list,.footnote,.citation)").wrap("
"),n("table.docutils.footnote").wrap("
"),n("table.docutils.citation").wrap("
"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n(''),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}if(t.length>0){$(".wy-menu-vertical .current").removeClass("current").attr("aria-expanded","false"),t.addClass("current").attr("aria-expanded","true"),t.closest("li.toctree-l1").parent().addClass("current").attr("aria-expanded","true");for(let n=1;n<=10;n++)t.closest("li.toctree-l"+n).addClass("current").attr("aria-expanded","true");t[0].scrollIntoView()}}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current").attr("aria-expanded","false"),e.siblings().find("li.current").removeClass("current").attr("aria-expanded","false");var t=e.find("> ul li");t.length&&(t.removeClass("current").attr("aria-expanded","false"),e.toggleClass("current").attr("aria-expanded",(function(n,e){return"true"==e?"false":"true"})))}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t0 63 | var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 64 | var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 65 | var s_v = "^(" + C + ")?" + v; // vowel in stem 66 | 67 | this.stemWord = function (w) { 68 | var stem; 69 | var suffix; 70 | var firstch; 71 | var origword = w; 72 | 73 | if (w.length < 3) 74 | return w; 75 | 76 | var re; 77 | var re2; 78 | var re3; 79 | var re4; 80 | 81 | firstch = w.substr(0,1); 82 | if (firstch == "y") 83 | w = firstch.toUpperCase() + w.substr(1); 84 | 85 | // Step 1a 86 | re = /^(.+?)(ss|i)es$/; 87 | re2 = /^(.+?)([^s])s$/; 88 | 89 | if (re.test(w)) 90 | w = w.replace(re,"$1$2"); 91 | else if (re2.test(w)) 92 | w = w.replace(re2,"$1$2"); 93 | 94 | // Step 1b 95 | re = /^(.+?)eed$/; 96 | re2 = /^(.+?)(ed|ing)$/; 97 | if (re.test(w)) { 98 | var fp = re.exec(w); 99 | re = new RegExp(mgr0); 100 | if (re.test(fp[1])) { 101 | re = /.$/; 102 | w = w.replace(re,""); 103 | } 104 | } 105 | else if (re2.test(w)) { 106 | var fp = re2.exec(w); 107 | stem = fp[1]; 108 | re2 = new RegExp(s_v); 109 | if (re2.test(stem)) { 110 | w = stem; 111 | re2 = /(at|bl|iz)$/; 112 | re3 = new RegExp("([^aeiouylsz])\\1$"); 113 | re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); 114 | if (re2.test(w)) 115 | w = w + "e"; 116 | else if (re3.test(w)) { 117 | re = /.$/; 118 | w = w.replace(re,""); 119 | } 120 | else if (re4.test(w)) 121 | w = w + "e"; 122 | } 123 | } 124 | 125 | // Step 1c 126 | re = /^(.+?)y$/; 127 | if (re.test(w)) { 128 | var fp = re.exec(w); 129 | stem = fp[1]; 130 | re = new RegExp(s_v); 131 | if (re.test(stem)) 132 | w = stem + "i"; 133 | } 134 | 135 | // Step 2 136 | re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; 137 | if (re.test(w)) { 138 | var fp = re.exec(w); 139 | stem = fp[1]; 140 | suffix = fp[2]; 141 | re = new RegExp(mgr0); 142 | if (re.test(stem)) 143 | w = stem + step2list[suffix]; 144 | } 145 | 146 | // Step 3 147 | re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; 148 | if (re.test(w)) { 149 | var fp = re.exec(w); 150 | stem = fp[1]; 151 | suffix = fp[2]; 152 | re = new RegExp(mgr0); 153 | if (re.test(stem)) 154 | w = stem + step3list[suffix]; 155 | } 156 | 157 | // Step 4 158 | re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; 159 | re2 = /^(.+?)(s|t)(ion)$/; 160 | if (re.test(w)) { 161 | var fp = re.exec(w); 162 | stem = fp[1]; 163 | re = new RegExp(mgr1); 164 | if (re.test(stem)) 165 | w = stem; 166 | } 167 | else if (re2.test(w)) { 168 | var fp = re2.exec(w); 169 | stem = fp[1] + fp[2]; 170 | re2 = new RegExp(mgr1); 171 | if (re2.test(stem)) 172 | w = stem; 173 | } 174 | 175 | // Step 5 176 | re = /^(.+?)e$/; 177 | if (re.test(w)) { 178 | var fp = re.exec(w); 179 | stem = fp[1]; 180 | re = new RegExp(mgr1); 181 | re2 = new RegExp(meq1); 182 | re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); 183 | if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) 184 | w = stem; 185 | } 186 | re = /ll$/; 187 | re2 = new RegExp(mgr1); 188 | if (re.test(w) && re2.test(w)) { 189 | re = /.$/; 190 | w = w.replace(re,""); 191 | } 192 | 193 | // and turn initial Y back to y 194 | if (firstch == "y") 195 | w = firstch.toLowerCase() + w.substr(1); 196 | return w; 197 | } 198 | } 199 | 200 | -------------------------------------------------------------------------------- /doc/_build/html/_static/minus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/minus.png -------------------------------------------------------------------------------- /doc/_build/html/_static/plus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/_static/plus.png -------------------------------------------------------------------------------- /doc/_build/html/_static/pygments.css: -------------------------------------------------------------------------------- 1 | pre { line-height: 125%; } 2 | td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } 3 | span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } 4 | td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } 5 | span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } 6 | .highlight .hll { background-color: #ffffcc } 7 | .highlight { background: #eeffcc; } 8 | .highlight .c { color: #408090; font-style: italic } /* Comment */ 9 | .highlight .err { border: 1px solid #FF0000 } /* Error */ 10 | .highlight .k { color: #007020; font-weight: bold } /* Keyword */ 11 | .highlight .o { color: #666666 } /* Operator */ 12 | .highlight .ch { color: #408090; font-style: italic } /* Comment.Hashbang */ 13 | .highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */ 14 | .highlight .cp { color: #007020 } /* Comment.Preproc */ 15 | .highlight .cpf { color: #408090; font-style: italic } /* Comment.PreprocFile */ 16 | .highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */ 17 | .highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ 18 | .highlight .gd { color: #A00000 } /* Generic.Deleted */ 19 | .highlight .ge { font-style: italic } /* Generic.Emph */ 20 | .highlight .gr { color: #FF0000 } /* Generic.Error */ 21 | .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ 22 | .highlight .gi { color: #00A000 } /* Generic.Inserted */ 23 | .highlight .go { color: #333333 } /* Generic.Output */ 24 | .highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ 25 | .highlight .gs { font-weight: bold } /* Generic.Strong */ 26 | .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ 27 | .highlight .gt { color: #0044DD } /* Generic.Traceback */ 28 | .highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ 29 | .highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ 30 | .highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ 31 | .highlight .kp { color: #007020 } /* Keyword.Pseudo */ 32 | .highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ 33 | .highlight .kt { color: #902000 } /* Keyword.Type */ 34 | .highlight .m { color: #208050 } /* Literal.Number */ 35 | .highlight .s { color: #4070a0 } /* Literal.String */ 36 | .highlight .na { color: #4070a0 } /* Name.Attribute */ 37 | .highlight .nb { color: #007020 } /* Name.Builtin */ 38 | .highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ 39 | .highlight .no { color: #60add5 } /* Name.Constant */ 40 | .highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ 41 | .highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ 42 | .highlight .ne { color: #007020 } /* Name.Exception */ 43 | .highlight .nf { color: #06287e } /* Name.Function */ 44 | .highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ 45 | .highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ 46 | .highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ 47 | .highlight .nv { color: #bb60d5 } /* Name.Variable */ 48 | .highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ 49 | .highlight .w { color: #bbbbbb } /* Text.Whitespace */ 50 | .highlight .mb { color: #208050 } /* Literal.Number.Bin */ 51 | .highlight .mf { color: #208050 } /* Literal.Number.Float */ 52 | .highlight .mh { color: #208050 } /* Literal.Number.Hex */ 53 | .highlight .mi { color: #208050 } /* Literal.Number.Integer */ 54 | .highlight .mo { color: #208050 } /* Literal.Number.Oct */ 55 | .highlight .sa { color: #4070a0 } /* Literal.String.Affix */ 56 | .highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ 57 | .highlight .sc { color: #4070a0 } /* Literal.String.Char */ 58 | .highlight .dl { color: #4070a0 } /* Literal.String.Delimiter */ 59 | .highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ 60 | .highlight .s2 { color: #4070a0 } /* Literal.String.Double */ 61 | .highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ 62 | .highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ 63 | .highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ 64 | .highlight .sx { color: #c65d09 } /* Literal.String.Other */ 65 | .highlight .sr { color: #235388 } /* Literal.String.Regex */ 66 | .highlight .s1 { color: #4070a0 } /* Literal.String.Single */ 67 | .highlight .ss { color: #517918 } /* Literal.String.Symbol */ 68 | .highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ 69 | .highlight .fm { color: #06287e } /* Name.Function.Magic */ 70 | .highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ 71 | .highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ 72 | .highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ 73 | .highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */ 74 | .highlight .il { color: #208050 } /* Literal.Number.Integer.Long */ -------------------------------------------------------------------------------- /doc/_build/html/_static/sphinx_highlight.js: -------------------------------------------------------------------------------- 1 | /* Highlighting utilities for Sphinx HTML documentation. */ 2 | "use strict"; 3 | 4 | const SPHINX_HIGHLIGHT_ENABLED = true 5 | 6 | /** 7 | * highlight a given string on a node by wrapping it in 8 | * span elements with the given class name. 9 | */ 10 | const _highlight = (node, addItems, text, className) => { 11 | if (node.nodeType === Node.TEXT_NODE) { 12 | const val = node.nodeValue; 13 | const parent = node.parentNode; 14 | const pos = val.toLowerCase().indexOf(text); 15 | if ( 16 | pos >= 0 && 17 | !parent.classList.contains(className) && 18 | !parent.classList.contains("nohighlight") 19 | ) { 20 | let span; 21 | 22 | const closestNode = parent.closest("body, svg, foreignObject"); 23 | const isInSVG = closestNode && closestNode.matches("svg"); 24 | if (isInSVG) { 25 | span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); 26 | } else { 27 | span = document.createElement("span"); 28 | span.classList.add(className); 29 | } 30 | 31 | span.appendChild(document.createTextNode(val.substr(pos, text.length))); 32 | const rest = document.createTextNode(val.substr(pos + text.length)); 33 | parent.insertBefore( 34 | span, 35 | parent.insertBefore( 36 | rest, 37 | node.nextSibling 38 | ) 39 | ); 40 | node.nodeValue = val.substr(0, pos); 41 | /* There may be more occurrences of search term in this node. So call this 42 | * function recursively on the remaining fragment. 43 | */ 44 | _highlight(rest, addItems, text, className); 45 | 46 | if (isInSVG) { 47 | const rect = document.createElementNS( 48 | "http://www.w3.org/2000/svg", 49 | "rect" 50 | ); 51 | const bbox = parent.getBBox(); 52 | rect.x.baseVal.value = bbox.x; 53 | rect.y.baseVal.value = bbox.y; 54 | rect.width.baseVal.value = bbox.width; 55 | rect.height.baseVal.value = bbox.height; 56 | rect.setAttribute("class", className); 57 | addItems.push({ parent: parent, target: rect }); 58 | } 59 | } 60 | } else if (node.matches && !node.matches("button, select, textarea")) { 61 | node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); 62 | } 63 | }; 64 | const _highlightText = (thisNode, text, className) => { 65 | let addItems = []; 66 | _highlight(thisNode, addItems, text, className); 67 | addItems.forEach((obj) => 68 | obj.parent.insertAdjacentElement("beforebegin", obj.target) 69 | ); 70 | }; 71 | 72 | /** 73 | * Small JavaScript module for the documentation. 74 | */ 75 | const SphinxHighlight = { 76 | 77 | /** 78 | * highlight the search words provided in localstorage in the text 79 | */ 80 | highlightSearchWords: () => { 81 | if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight 82 | 83 | // get and clear terms from localstorage 84 | const url = new URL(window.location); 85 | const highlight = 86 | localStorage.getItem("sphinx_highlight_terms") 87 | || url.searchParams.get("highlight") 88 | || ""; 89 | localStorage.removeItem("sphinx_highlight_terms") 90 | url.searchParams.delete("highlight"); 91 | window.history.replaceState({}, "", url); 92 | 93 | // get individual terms from highlight string 94 | const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); 95 | if (terms.length === 0) return; // nothing to do 96 | 97 | // There should never be more than one element matching "div.body" 98 | const divBody = document.querySelectorAll("div.body"); 99 | const body = divBody.length ? divBody[0] : document.querySelector("body"); 100 | window.setTimeout(() => { 101 | terms.forEach((term) => _highlightText(body, term, "highlighted")); 102 | }, 10); 103 | 104 | const searchBox = document.getElementById("searchbox"); 105 | if (searchBox === null) return; 106 | searchBox.appendChild( 107 | document 108 | .createRange() 109 | .createContextualFragment( 110 | '" 114 | ) 115 | ); 116 | }, 117 | 118 | /** 119 | * helper function to hide the search marks again 120 | */ 121 | hideSearchWords: () => { 122 | document 123 | .querySelectorAll("#searchbox .highlight-link") 124 | .forEach((el) => el.remove()); 125 | document 126 | .querySelectorAll("span.highlighted") 127 | .forEach((el) => el.classList.remove("highlighted")); 128 | localStorage.removeItem("sphinx_highlight_terms") 129 | }, 130 | 131 | initEscapeListener: () => { 132 | // only install a listener if it is really needed 133 | if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; 134 | 135 | document.addEventListener("keydown", (event) => { 136 | // bail for input elements 137 | if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; 138 | // bail with special keys 139 | if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; 140 | if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { 141 | SphinxHighlight.hideSearchWords(); 142 | event.preventDefault(); 143 | } 144 | }); 145 | }, 146 | }; 147 | 148 | _ready(() => { 149 | /* Do not call highlightSearchWords() when we are on the search page. 150 | * It will highlight words from the *previous* search query. 151 | */ 152 | if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); 153 | SphinxHighlight.initEscapeListener(); 154 | }); 155 | -------------------------------------------------------------------------------- /doc/_build/html/datasets.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Load Benchmark Datasets — NeuroGraph 2.1.0 documentation 8 | 9 | 10 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 |
28 | 77 | 78 |
82 | 83 |
84 |
85 |
86 | 93 |
94 |
95 |
96 |
97 | 98 |
99 |

Load Benchmark Datasets

100 |
101 | 102 | 103 |
104 |
105 |
109 | 110 |
111 | 112 |
113 |

© Copyright 2023, Anwar Said.

114 |
115 | 116 | Built with Sphinx using a 117 | theme 118 | provided by Read the Docs. 119 | 120 | 121 |
122 |
123 |
124 |
125 |
126 | 131 | 132 | 133 | -------------------------------------------------------------------------------- /doc/_build/html/genindex.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Index — NeuroGraph 2.1.0 documentation 7 | 8 | 9 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 |
25 | 74 | 75 |
79 | 80 |
81 |
82 |
83 |
    84 |
  • 85 | 86 |
  • 87 |
  • 88 |
89 |
90 |
91 |
92 |
93 | 94 | 95 |

Index

96 | 97 |
98 | 99 |
100 | 101 | 102 |
103 |
104 |
105 | 106 |
107 | 108 |
109 |

© Copyright 2023, Anwar Said.

110 |
111 | 112 | Built with Sphinx using a 113 | theme 114 | provided by Read the Docs. 115 | 116 | 117 |
118 |
119 |
120 |
121 |
122 | 127 | 128 | 129 | -------------------------------------------------------------------------------- /doc/_build/html/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Indices and tables — NeuroGraph 2.1.0 documentation 8 | 9 | 10 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 |
27 | 76 | 77 |
81 | 82 |
83 |
84 |
85 | 92 |
93 |
94 |
95 |
96 | 97 |
98 |

NeuroGraph:

99 | 104 |
105 |
106 |

Installations:

107 | 110 |
111 | 126 |
127 |

Loading Benchmarks:

128 | 131 |
132 |
133 |

Preprocessing:

134 | 137 |
138 |
139 |

Utilities:

140 | 143 |
144 |
145 |

Indices and tables

146 | 151 |
152 | 153 | 154 |
155 |
156 |
159 | 160 |
161 | 162 |
163 |

© Copyright 2023, Anwar Said.

164 |
165 | 166 | Built with Sphinx using a 167 | theme 168 | provided by Read the Docs. 169 | 170 | 171 |
172 |
173 |
174 |
175 |
176 | 181 | 182 | 183 | -------------------------------------------------------------------------------- /doc/_build/html/install.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Installation — NeuroGraph 2.1.0 documentation 8 | 9 | 10 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 |
28 | 77 | 78 |
82 | 83 |
84 |
85 |
86 | 93 |
94 |
95 |
96 |
97 | 98 |
99 |

Installation

100 |

NeuroGraph is available for Python 3 and can be easily installed with pip

101 |
pip install NeuroGraph
102 | 
103 |
104 |

NeuroGraph is developed on top of PyG and requires PyG to be installed. To install PyG, please follow the instructions provided in the PyG documentation here.

105 |
106 | 107 | 108 |
109 |
110 |
114 | 115 |
116 | 117 |
118 |

© Copyright 2023, Anwar Said.

119 |
120 | 121 | Built with Sphinx using a 122 | theme 123 | provided by Read the Docs. 124 | 125 | 126 |
127 |
128 |
129 |
130 |
131 | 136 | 137 | 138 | -------------------------------------------------------------------------------- /doc/_build/html/objects.inv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anwar-Said/NeuroGraph/0a2abf5a6fdd8ebda15e73def1f6426c5839c3c8/doc/_build/html/objects.inv -------------------------------------------------------------------------------- /doc/_build/html/preprocess.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | NeuroGraph Preprocessing Functionalities — NeuroGraph 2.1.0 documentation 8 | 9 | 10 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 |
28 | 77 | 78 |
82 | 83 |
84 |
85 |
86 |
    87 |
  • 88 | 89 |
  • 90 | View page source 91 |
  • 92 |
93 |
94 |
95 |
96 |
97 | 98 |
99 |

NeuroGraph Preprocessing Functionalities

100 |
101 | 102 | 103 |
104 |
105 |
109 | 110 |
111 | 112 |
113 |

© Copyright 2023, Anwar Said.

114 |
115 | 116 | Built with Sphinx using a 117 | theme 118 | provided by Read the Docs. 119 | 120 | 121 |
122 |
123 |
124 |
125 |
126 | 131 | 132 | 133 | -------------------------------------------------------------------------------- /doc/_build/html/py-modindex.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Python Module Index — NeuroGraph 2.1.0 documentation 7 | 8 | 9 | 10 | 11 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 |
28 | 77 | 78 |
82 | 83 |
84 |
85 |
86 |
    87 |
  • 88 | 89 |
  • 90 |
  • 91 |
92 |
93 |
94 |
95 |
96 | 97 | 98 |

Python Module Index

99 | 100 |
101 | n 102 |
103 | 104 | 105 | 106 | 108 | 109 | 111 | 114 | 115 | 116 | 119 | 120 | 121 | 124 | 125 | 126 | 129 |
 
107 | n
112 | NeuroGraph 113 |
    117 | NeuroGraph.datasets 118 |
    122 | NeuroGraph.preprocess 123 |
    127 | NeuroGraph.utils 128 |
130 | 131 | 132 |
133 |
134 |
135 | 136 |
137 | 138 |
139 |

© Copyright 2023, Anwar Said.

140 |
141 | 142 | Built with Sphinx using a 143 | theme 144 | provided by Read the Docs. 145 | 146 | 147 |
148 |
149 |
150 |
151 |
152 | 157 | 158 | 159 | -------------------------------------------------------------------------------- /doc/_build/html/search.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Search — NeuroGraph 2.1.0 documentation 7 | 8 | 9 | 10 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 |
28 | 77 | 78 |
82 | 83 |
84 |
85 |
86 |
    87 |
  • 88 | 89 |
  • 90 |
  • 91 |
92 |
93 |
94 |
95 |
96 | 97 | 104 | 105 | 106 |
107 | 108 |
109 | 110 |
111 |
112 |
113 | 114 |
115 | 116 |
117 |

© Copyright 2023, Anwar Said.

118 |
119 | 120 | Built with Sphinx using a 121 | theme 122 | provided by Read the Docs. 123 | 124 | 125 |
126 |
127 |
128 |
129 |
130 | 135 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | -------------------------------------------------------------------------------- /doc/_build/html/searchindex.js: -------------------------------------------------------------------------------- 1 | Search.setIndex({"docnames": ["NeuroGraph", "datasets", "get_started", "index", "install", "preprocess", "utils"], "filenames": ["NeuroGraph.rst", "datasets.rst", "get_started.rst", "index.rst", "install.rst", "preprocess.rst", "utils.rst"], "titles": ["Demographics", "Load Benchmark Datasets", "Introduction by Example", "Indices and tables", "Installation", "NeuroGraph Preprocessing Functionalities", "NeuroGraph Utilities"], "terms": {"neurograph": [0, 2, 4], "collect": 0, "graph": [0, 2], "base": [0, 2], "neuroimag": [0, 2], "dataset": [0, 3], "span": 0, "multipl": 0, "categori": 0, "The": [0, 2], "follow": [0, 2, 4], "provid": [0, 2, 4], "an": [0, 2], "overview": 0, "associ": 0, "data": [0, 2], "made": 0, "avail": [0, 2, 4], "accord": 0, "wu": 0, "minn": 0, "hcp": [0, 2], "consortium": 0, "open": 0, "access": [0, 2], "us": [0, 2], "term": [0, 2], "step": [0, 2], "4": 0, "which": [0, 2], "can": [0, 2, 4], "found": [0, 2], "http": 0, "www": 0, "humanconnectom": 0, "org": 0, "studi": 0, "young": 0, "adult": 0, "document": [0, 2, 4], "includ": 0, "gender": [0, 2], "ag": [0, 2], "estim": 0, "attribut": 0, "facilit": [0, 2], "binari": 0, "classif": 0, "being": 0, "male": 0, "femal": 0, "categor": 0, "three": 0, "distinct": 0, "group": 0, "22": 0, "25": 0, "26": 0, "30": 0, "31": 0, "35": 0, "year": 0, "we": [0, 2], "introduc": [0, 2], "four": [0, 2], "name": [0, 2], "dynhcp": 0, "under": [0, 2], "thi": [0, 2], "first": 0, "two": [0, 2], "ar": [0, 2], "static": [0, 3], "while": [0, 2], "last": 0, "correspond": [0, 2], "dynam": [0, 3], "decod": 0, "involv": 0, "seven": 0, "task": 0, "emot": 0, "process": [0, 2], "gambl": 0, "languag": 0, "motor": 0, "relat": 0, "social": 0, "work": [0, 2], "memori": [0, 2], "each": [0, 2], "design": 0, "help": 0, "delin": 0, "core": 0, "set": 0, "function": [0, 2, 3], "relev": 0, "differ": 0, "facet": 0, "between": 0, "human": [0, 3], "brain": 0, "behavior": 0, "present": 0, "activ": 0, "represent": [0, 2], "its": 0, "counterpart": 0, "our": [0, 2], "compris": 0, "signific": 0, "list": [0, 2], "sort": 0, "fluid": [0, 2], "intellig": [0, 2], "evalu": 0, "pmat24": 0, "refer": [0, 2], "individu": 0, "s": [], "capac": 0, "temporarili": 0, "hold": 0, "manipul": 0, "inform": 0, "crucial": 0, "aspect": 0, "influenc": 0, "higher": 0, "reason": 0, "comprehens": 0, "learn": [0, 2], "repres": [0, 2], "abil": 0, "solv": 0, "novel": 0, "problem": 0, "independ": 0, "ani": 0, "knowledg": 0, "from": [0, 2], "past": 0, "It": [0, 2], "demonstr": [0, 2], "analyz": 0, "complex": 0, "relationship": 0, "identifi": 0, "pattern": 0, "deriv": 0, "solut": 0, "situat": 0, "predict": 0, "both": [0, 2], "quantifi": 0, "continu": [0, 2], "variabl": 0, "treat": 0, "regress": 0, "aim": 0, "perform": [0, 2], "score": 0, "connectom": [0, 3], "gener": [0, 2], "fi": 0, "wm": 0, "briefli": 2, "fundament": 2, "concept": 2, "through": 2, "self": 2, "contain": 2, "close": 2, "format": 2, "pyg": [2, 4], "therefor": 2, "interest": 2, "reader": 2, "machin": 2, "class": 2, "datast": 2, "util": 2, "inmemorydataset": 2, "allow": 2, "easi": 2, "interfac": 2, "appli": 2, "pipelin": 2, "For": 2, "hcpgender": 2, "import": 2, "neurographdataset": 2, "root": 2, "print": 2, "num_class": 2, "num_featur": 2, "To": [2, 4], "effici": 2, "store": 2, "batch": 2, "here": [2, 4], "dynhcpgend": 2, "make": 2, "compat": 2, "bridg": 2, "gap": 2, "betwe": 2, "domain": 2, "offer": 2, "tool": 2, "easili": [2, 4], "construct": 2, "how": 2, "your": 2, "own": 2, "adjac": 2, "matrix": 2, "object": 2, "creat": 2, "functional_connectom": 2, "adj": 2, "construct_adj": 2, "fc": 2, "threshold": 2, "5": 2, "construct_data": 2, "label": 2, "1": 2, "correl": 2, "node": 2, "featur": 2, "sourc": 2, "code": 2, "one": 2, "fmri": 2, "scan": 2, "regressor": 2, "numpi": 2, "np": 2, "nilearn": 2, "imag": 2, "load_img": 2, "img": 2, "raw": 2, "nii": 2, "gz": 2, "reg": 2, "loadtxt": 2, "txt": 2, "movement": 2, "get_fdata": 2, "n_roi": 2, "100": 2, "torch": 2, "consist": 2, "five": 2, "also": 2, "seper": 2, "parcel": 2, "schaefer": 2, "atla": 2, "default": 2, "y": 2, "remove_drift": 2, "regress_head_mot": 2, "construct_corr": 2, "primari": 2, "explor": 2, "search": [2, 3], "space": 2, "websit": 2, "accept": 2, "usag": 2, "addition": 2, "aw": 2, "s3": 2, "bucket": 2, "onc": 2, "author": 2, "ha": 2, "been": 2, "obtain": 2, "In": 2, "section": 2, "variou": 2, "you": 2, "crawl": 2, "enabl": 2, "These": 2, "streamlin": 2, "prepar": 2, "further": 2, "analysi": 2, "model": 2, "brain_connectome_rest_download": 2, "boto3": 2, "path_to_data": 2, "n_job": 2, "script": 2, "run": 2, "parallel": 2, "requir": [2, 4], "number": 2, "job": 2, "input": 2, "access_kei": 2, "connectomedb": 2, "credenti": 2, "secret_kei": 2, "client": 2, "aws_access_key_id": 2, "aws_secret_access_kei": 2, "hcp_behavior": 2, "csv": 2, "id": 2, "pkl": 2, "file": 2, "directori": 2, "have": 2, "rest_dataset": 2, "note": 2, "separ": 2, "gender_dataset": 2, "age_dataset": 2, "hcpage": 2, "wm_datast": 2, "wm_dataset": 2, "hcpwm": 2, "fi_datast": 2, "fi_dataset": 2, "hcpfi": 2, "state": [2, 3], "state_dataset": 2, "brain_connectome_state_download": 2, "dataset_nam": 2, "roi": 2, "If": 2, "local": 2, "brain_connectome_rest": 2, "similarli": 2, "brain_connectome_st": 2, "similar": 2, "rest": 2, "ngd": 2, "dyn_prep": 2, "window_s": 2, "50": 2, "stride": 2, "3": [2, 4], "dynamic_length": 2, "none": 2, "len": 2, "form": 2, "fed": 2, "fly": 2, "dyn_obj": 2, "preporcess": 2, "dyn_down_prep": 2, "10": 2, "150": 2, "data_dict": 2, "dictionari": 2, "against": 2, "prprocess": 2, "k": 2, "v": 2, "item": 2, "l": 2, "0": 2, "int": 2, "sub": 2, "d": 2, "new_data": 2, "x": 2, "edge_index": 2, "append": 2, "from_data_list": 2, "instanc": 2, "new_dataset": 2, "2": 2, "ignor": 2, "subject": 2, "36": 2, "demograph": 3, "mental": 3, "cognit": 3, "trait": 3, "introduct": 3, "exampl": 3, "project": 3, "hcp1200": 3, "download": 3, "index": 3, "modul": 3, "page": 3, "python": 4, "pip": 4, "develop": 4, "top": 4, "pleas": 4, "instruct": 4, "i": [0, 2, 4], "": [0, 2]}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"demograph": 0, "mental": 0, "state": 0, "cognit": 0, "trait": 0, "load": [1, 2, 3], "benchmark": [1, 2, 3], "dataset": [1, 2], "introduct": 2, "exampl": 2, "static": 2, "dynam": 2, "preprocess": [2, 3, 5], "human": 2, "connectom": 2, "project": 2, "hcp1200": 2, "download": 2, "neurograph": [3, 5, 6], "instal": [3, 4], "get": 3, "start": 3, "util": [3, 6], "indic": 3, "tabl": 3, "function": 5}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 6, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.todo": 2, "sphinx.ext.viewcode": 1, "sphinx": 56}}) -------------------------------------------------------------------------------- /doc/_build/html/utils.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | NeuroGraph Utilities — NeuroGraph 2.1.0 documentation 8 | 9 | 10 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 |
27 | 76 | 77 |
81 | 82 |
83 |
84 |
85 | 92 |
93 |
94 |
95 |
96 | 97 |
98 |

NeuroGraph Utilities

99 |
100 | 101 | 102 |
103 |
104 |
107 | 108 |
109 | 110 |
111 |

© Copyright 2023, Anwar Said.

112 |
113 | 114 | Built with Sphinx using a 115 | theme 116 | provided by Read the Docs. 117 | 118 | 119 |
120 |
121 |
122 |
123 |
124 | 129 | 130 | 131 | -------------------------------------------------------------------------------- /doc/conf.py: -------------------------------------------------------------------------------- 1 | import sphinx_rtd_theme 2 | # Configuration file for the Sphinx documentation builder. 3 | # 4 | # For the full list of built-in configuration values, see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Project information ----------------------------------------------------- 8 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 9 | 10 | import os,sys 11 | sys.path.insert(0,os.path.abspath("..")) 12 | 13 | source_suffix = '.rst' 14 | master_doc = 'index' 15 | 16 | project = 'NeuroGraph' 17 | copyright = '2023, Anwar Said' 18 | author = 'Anwar Said' 19 | release = '2.1.0' 20 | 21 | # -- General configuration --------------------------------------------------- 22 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 23 | 24 | extensions = ["sphinx.ext.todo","sphinx.ext.viewcode","sphinx.ext.autodoc"] 25 | html_output_path = '$READTHEDOCS_OUTPUT/html' 26 | templates_path = ['_templates'] 27 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 28 | 29 | add_module_names = False 30 | language = "en" 31 | # -- Options for HTML output ------------------------------------------------- 32 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 33 | pygments_style = 'sphinx' 34 | todo_include_todos = False 35 | 36 | html_theme = 'sphinx_rtd_theme' 37 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 38 | 39 | html_static_path = ['_static'] 40 | -------------------------------------------------------------------------------- /doc/datasets.rst: -------------------------------------------------------------------------------- 1 | Load Benchmark Datasets 2 | ================================== 3 | 4 | 5 | .. automodule:: NeuroGraph.datasets 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: -------------------------------------------------------------------------------- /doc/get_started.rst: -------------------------------------------------------------------------------- 1 | Introduction by Example 2 | ================================ 3 | 4 | We will briefly introduce the fundamental concepts of NeuroGraph through self-contained examples. We closely follow the data representation format of `PyG `_. Therefore, interested readers are referred to the `PyG `_ documentation for an introduction to the graph machine learning and PyG's data representation formats. 5 | 6 | 7 | Loading Benchmark datasets 8 | ---------------------------------- 9 | 10 | NeuroGraph provides two classes for loading static and dynamic benchmark datastes. 11 | 12 | Loading Static Benchmarks 13 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 14 | NeuroGraph utilizes the `PyG` `InMemoryDataset` class to facilitate the loading of datasets. this allows an easy-to-use interface for applying graph machine learning pipelines. For example, the `HCPGender` benchmark can be loaded as follows: 15 | 16 | 17 | .. code-block:: python 18 | :linenos: 19 | 20 | from NeuroGraph.datasets import NeuroGraphDataset 21 | dataset = NeuroGraphDataset(root="data/", name= "HCPGender") 22 | print(dataset.num_classes) 23 | print(dataset.num_features) 24 | 25 | 26 | Loading Dynamic Dataset 27 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 28 | 29 | To efficiently store and utilize the dynamic datasets in `PyG`` Batch format, we provide the corresponding functionality. Here is an example of loading the `DynHCPGender` dataset: 30 | 31 | 32 | .. code-block:: python 33 | :linenos: 34 | from NeuroGraph.datasets import NeuroGraphDynamic 35 | data_obj = NeuroGraphDynamic(root="data/", name= "DynHCPGender") 36 | dataset = data_obj.dataset 37 | labels = data_obj.labels 38 | print(len(dataset), len(labels)) 39 | 40 | The dataset is a list of dynamic graphs represented in the `PyG` batch format, making it compatible with graph machine learning pipelines. 41 | 42 | 43 | Preprocessing Examples 44 | ==================================== 45 | 46 | To bridge the gap betwee NeuroGraph and graph machine learning domains, NeuroGraph offers tools to easily preprocess and construct graph-based neuroimaging datasets. Here, we demonstrate how to preprocess your own data to construct functional connectomes and generate corresponding graphs-based representations. 47 | 48 | 49 | .. code-block:: python 50 | :linenos: 51 | from NeuroGraph import utils 52 | fc = utils.preprocess(fmri, regs, n_rois= 1000) # fmri and regs could be numpy arrays 53 | 54 | The corresponding `Adjacency matrix` and `PyG` data objects can be created from the functional_connectome as follows. 55 | 56 | .. code-block:: python 57 | :linenos: 58 | 59 | from NeuroGraph import utils 60 | adj = utils.construct_adj(fc, threshold= 5) # construct the adjacency matrix 61 | data = utils.construct_data(fc, label= 1,threshold = 5) # construct PyG data object 62 | 63 | We use correlation as node features while constructing data object from functional connectome. 64 | 65 | The following is the source code for processing one fMRI scan with corresponding regressor using our preprocessing pipeline. 66 | 67 | .. code-block:: python 68 | :linenos: 69 | 70 | from NeuroGraph import utils 71 | import numpy as np 72 | from nilearn.image import load_img 73 | img = load_img("data/raw/1.nii.gz") # 1.nii.gz is fMRI scan 74 | regs = np.loadtxt("data/raw/1.txt") # 1.txt is the movement regressor 75 | fmri = img.get_fdata() 76 | fc = utils.preprocess(fmri, regs, n_rois= 100) 77 | adj = utils.construct_adj(fc, threshold= 5) # construct the adjacency matrix 78 | data = utils.construct_data(fc, label = 1,threshold = 5) # construct torch Data object 79 | 80 | 81 | Our preprocessing pipeline consists of five steps and can also be applied seperately in steps. 82 | 83 | .. code-block:: python 84 | :linenos: 85 | 86 | from NeuroGraph import utils 87 | import numpy as np 88 | from nilearn.image import load_img 89 | 90 | img = load_img("data/raw/1.nii.gz") 91 | regs = np.loadtxt("data/raw/1.txt") 92 | fmri = img.get_fdata() 93 | parcells = utils.parcellation(fmri,n_rois = 100) ## this uses schaefer atlas by default 94 | Y = utils.remove_drifts(parcells) 95 | Y = utils.regress_head_motions(Y,regs) 96 | fc = utils.construct_corr(Y) 97 | adj = utils.construct_adj(fc, threshold= 5) # construct the adjacency matrix 98 | data = utils.construct_data(fc, label = 1,threshold = 5) 99 | 100 | 101 | 102 | Preprocessing Human Connectome Project (HCP1200) Dataset 103 | ============================================================================== 104 | 105 | NeuroGraph utilizes the HCP1200 dataset as a primary data source for exploring the dataset generation search space and constructing benchmarks. The HCP1200 dataset can be accessed from the `HCP website `_ by accepting the data usage terms. Additionally, the dataset is also available on an AWS S3 bucket, which can be accessed once authorization has been obtained from HCP. In this section, we provide various functions that allow you to crawl and preprocess the HCP datasets, enabling the construction of graph-based neuroimaging datasets. These functions streamline the process of obtaining and preparing the data for further analysis and modeling. 106 | 107 | 108 | Download and preprocess static datasets 109 | --------------------------------------------------- 110 | 111 | .. code-block:: python 112 | :linenos: 113 | 114 | from NeuroGraph.preprocess import Brain_Connectome_Rest_Download 115 | import boto3 116 | 117 | root = "data/" 118 | name = "HCPGender" 119 | threshold = 5 120 | path_to_data = "data/raw/HCPGender" # store the raw downloaded scans 121 | n_rois = 100 122 | n_jobs = 5 # this script runs in parallel and requires the number of jobs is an input 123 | 124 | ACCESS_KEY = '' # your connectomeDB credentials 125 | SECRET_KEY = '' 126 | s3 = boto3.client('s3', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY) 127 | # this function requires both HCP_behavioral.csv and ids.pkl files under the root directory. Both files have been provided and can be found under the data directory 128 | rest_dataset = Brain_Connectome_Rest_Download(root,name,n_rois, threshold,path_to_data,n_jobs,s3) 129 | 130 | 131 | The provided function facilitates the download of data from the AWS S3 bucket, performs preprocessing steps, and generates a graph-based dataset. It is important to note that the `rest_dataset` used in this function consists of four labels: gender, age, working memory, and fluid intelligence. To create separate datasets based on these labels, the following functionalities can be used. 132 | 133 | .. code-block:: python 134 | :linenos: 135 | 136 | from NeuroGraph import preprocess 137 | 138 | rest_dataset = preprocess.Brain_Connectome_Rest_Download(root,name,n_rois, threshold,path_to_data,n_jobs,s3) 139 | gender_dataset = preprocess.Gender_Dataset(root, "HCPGender",rest_dataset) 140 | age_dataset = preprocess.Age_Dataset(root, "HCPAge",rest_dataset) 141 | wm_datast = preprocess.WM_Dataset(root, "HCPWM",rest_dataset) 142 | fi_datast = preprocess.FI_Dataset(root, "HCPFI",rest_dataset) 143 | 144 | To construct the State dataset, the following functionalities can be used. 145 | 146 | .. code-block:: python 147 | :linenos: 148 | 149 | from NeuroGraph import preprocess 150 | 151 | state_dataset = preprocess.Brain_Connectome_State_Download(root, dataset_name,rois, threshold,path_to_data,n_jobs,s3) 152 | 153 | If you have the data locally, then the following functionalities can be used to preprocess the data. 154 | 155 | 156 | .. code-block:: python 157 | :linenos: 158 | 159 | from NeuroGraph import preprocess 160 | 161 | rest_dataset = preprocess.Brain_Connectome_Rest(root, name, n_rois, threshold, path_to_data, n_jobs) 162 | 163 | Similarly, for constructing the State dataset, the following function can be used. 164 | 165 | .. code-block:: python 166 | :linenos: 167 | 168 | from NeuroGraph import preprocess 169 | 170 | state_dataset = preprocess.Brain_Connectome_State(root, name, n_rois, threshold, path_to_data, n_jobs) 171 | 172 | 173 | Download and preprocess dynamic datasets 174 | --------------------------------------------------- 175 | 176 | We also offer similar functionalities for constructing dynamic datasets. You can create a dynamic REST dataset from the data stored locally as follows. 177 | 178 | 179 | 180 | .. code-block:: python 181 | :linenos: 182 | 183 | from NeuroGraph import preprocess 184 | 185 | ngd = Dyn_Prep(fmri, regs, n_rois=100, window_size=50, stride=3, dynamic_length=None) 186 | dataset = ngd.dataset 187 | labels = ngd.labels 188 | print(len(dataset),len(labels)) 189 | 190 | Here the dataset is a list containing dynamic graphs in the form of PyG Batch, which can be easily fed into graph machine learning pipelines. The following examples demonstrate how a dynamic REST dataset can be downloaded and preprocessed on the fly. 191 | 192 | .. code-block:: python 193 | :linenos: 194 | 195 | from NeuroGraph import preprocess 196 | 197 | dyn_obj = preporcess.Dyn_Down_Prep(root, name,s3,n_rois = 100, threshold = 10, window_size = 50,stride == 3, dynamic_length=150) 198 | dataset = dyn_obj.data_dict 199 | 200 | Dyn_Down_Prep class downloads and preprocess the rest dataset and provides a dictionary that contains a list of dynamic graphs against each id. The dataset can be further prprocessed as follows to construct each benchmark. 201 | 202 | .. code-block:: python 203 | :linenos: 204 | 205 | from NeuroGraph import preprocess 206 | 207 | dyn_obj = preporcess.Dyn_Down_Prep(root, name,s3,n_rois = 100, threshold = 10, window_size = 50,stride == 3, dynamic_length=150) 208 | dataset = dyn_obj.data_dict 209 | gender_dataset, labels = [],[] 210 | for k,v in dataset.items(): 211 | if v is None: 212 | continue 213 | l = v[0].y 214 | gender = int(l[0].item()) 215 | sub = [] 216 | for d in v: 217 | new_data = Data(x = d.x, edge_index = d.edge_index, y = gender) 218 | sub.append(new_data) 219 | batch = Batch.from_data_list(sub) 220 | gender_dataset.append(batch) 221 | labels.append(gender) 222 | print("gender dataset created with {} {} number of instances".format(len(gender_dataset), len(labels))) 223 | new_dataset = {'labels':labels, "batches":gender_dataset} 224 | 225 | age_dataset, labels = [],[] 226 | for k,v in dataset.items(): 227 | if v is None: 228 | continue 229 | l = v[0].y 230 | age = int(l[1].item()) 231 | if age <=2: ### Ignoring subjects with age >=36 232 | sub = [] 233 | for d in v: 234 | new_data = Data(x = d.x, edge_index = d.edge_index, y = age) 235 | sub.append(new_data) 236 | batch = Batch.from_data_list(sub) 237 | age_dataset.append(batch) 238 | labels.append(gender) 239 | print("Age dataset created with {} {} number of instances".format(len(age_dataset), len(labels))) 240 | new_dataset = {'labels':labels, "batches":age_dataset} 241 | 242 | -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | .. NeuroGraph documentation master file, created by 2 | sphinx-quickstart on Sun Jun 25 13:46:36 2023. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | .. toctree:: 7 | :maxdepth: 2 8 | :caption: NeuroGraph: 9 | 10 | NeuroGraph 11 | 12 | .. toctree:: 13 | :maxdepth: 2 14 | :caption: Installations: 15 | 16 | install 17 | 18 | .. toctree:: 19 | :maxdepth: 2 20 | :caption: GET STARTED: 21 | 22 | get_started 23 | 24 | .. toctree:: 25 | :maxdepth: 2 26 | :caption: Loading Benchmarks: 27 | 28 | datasets 29 | 30 | 31 | .. toctree:: 32 | :maxdepth: 2 33 | :caption: Preprocessing: 34 | 35 | 36 | preprocess 37 | 38 | 39 | .. toctree:: 40 | :maxdepth: 2 41 | :caption: Utilities: 42 | 43 | utils 44 | 45 | 46 | Indices and tables 47 | ================== 48 | 49 | * :ref:`genindex` 50 | * :ref:`modindex` 51 | * :ref:`search` 52 | -------------------------------------------------------------------------------- /doc/install.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ===================== 3 | 4 | NeuroGraph is available for Python 3 and can be easily installed with pip 5 | 6 | .. code-block:: python 7 | 8 | pip install NeuroGraph 9 | 10 | NeuroGraph is developed on top of PyG and requires PyG to be installed. To install PyG, please follow the instructions provided in the PyG documentation `here `_. 11 | -------------------------------------------------------------------------------- /doc/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /doc/preprocess.rst: -------------------------------------------------------------------------------- 1 | NeuroGraph Preprocessing Functionalities 2 | ============================================================ 3 | 4 | 5 | .. automodule:: NeuroGraph.preprocess 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: -------------------------------------------------------------------------------- /doc/utils.rst: -------------------------------------------------------------------------------- 1 | NeuroGraph Utilities 2 | ===================================================================== 3 | 4 | 5 | .. automodule:: NeuroGraph.utils 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from NeuroGraph.datasets import NeuroGraphDataset 2 | import argparse 3 | import torch 4 | import torch.nn.functional as F 5 | from torch.optim import Adam 6 | import numpy as np 7 | from sklearn.metrics import accuracy_score 8 | from sklearn.model_selection import train_test_split 9 | from torch_geometric.loader import DataLoader 10 | import os,random 11 | import os.path as osp 12 | import sys 13 | import time 14 | from utils import * 15 | 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('--dataset', type=str, default='HCPGender') 18 | parser.add_argument('--runs', type=int, default=1) 19 | parser.add_argument('--device', type=str, default='cuda') 20 | parser.add_argument('--seed', type=int, default=123) 21 | parser.add_argument('--model', type=str, default="GCNConv") 22 | parser.add_argument('--hidden', type=int, default=32) 23 | parser.add_argument('--hidden_mlp', type=int, default=64) 24 | parser.add_argument('--num_layers', type=int, default=3) 25 | parser.add_argument('--epochs', type=int, default=100) 26 | parser.add_argument('--echo_epoch', type=int, default=50) 27 | parser.add_argument('--batch_size', type=int, default=16) 28 | parser.add_argument('--early_stopping', type=int, default=50) 29 | parser.add_argument('--lr', type=float, default=1e-5) 30 | parser.add_argument('--weight_decay', type=float, default=0.0005) 31 | parser.add_argument('--dropout', type=float, default=0.5) 32 | args = parser.parse_args() 33 | path = "base_params/" 34 | res_path = "results/" 35 | root = "data/" 36 | if not os.path.isdir(path): 37 | os.mkdir(path) 38 | if not os.path.isdir(res_path): 39 | os.mkdir(res_path) 40 | def logger(info): 41 | f = open(os.path.join(res_path, 'results_new.csv'), 'a') 42 | print(info, file=f) 43 | 44 | fix_seed(args.seed) 45 | dataset = NeuroGraphDataset(root=root, name= args.dataset) 46 | print(dataset.num_classes) 47 | print(len(dataset)) 48 | 49 | print("dataset loaded successfully!",args.dataset) 50 | labels = [d.y.item() for d in dataset] 51 | 52 | train_tmp, test_indices = train_test_split(list(range(len(labels))), 53 | test_size=0.2, stratify=labels,random_state=args.seed,shuffle= True) 54 | tmp = dataset[train_tmp] 55 | train_labels = [d.y.item() for d in tmp] 56 | train_indices, val_indices = train_test_split(list(range(len(train_labels))), 57 | test_size=0.125, stratify=train_labels,random_state=args.seed,shuffle = True) 58 | train_dataset = tmp[train_indices] 59 | val_dataset = tmp[val_indices] 60 | test_dataset = dataset[test_indices] 61 | print("dataset {} loaded with train {} val {} test {} splits".format(args.dataset,len(train_dataset), len(val_dataset), len(test_dataset))) 62 | train_loader = DataLoader(train_dataset, args.batch_size, shuffle=False) 63 | val_loader = DataLoader(val_dataset, args.batch_size, shuffle=False) 64 | test_loader = DataLoader(test_dataset, args.batch_size, shuffle=False) 65 | args.num_features,args.num_classes = dataset.num_features,dataset.num_classes 66 | 67 | criterion = torch.nn.CrossEntropyLoss() 68 | #criterion = torch.nn.L1Loss() 69 | def train(train_loader): 70 | model.train() 71 | total_loss = 0 72 | for data in train_loader: 73 | data = data.to(args.device) 74 | out = model(data) 75 | loss = criterion(out, data.y) 76 | total_loss +=loss 77 | loss.backward() 78 | optimizer.step() 79 | optimizer.zero_grad() 80 | return total_loss/len(train_loader.dataset) 81 | # return total_loss/len(train_loader) # For L1 loss. This may retun higher loss on the regression tasks since the paper used (total_loss/len(train_loader.dataset)) 82 | 83 | @torch.no_grad() 84 | def test(loader): 85 | model.eval() 86 | correct = 0 87 | for data in loader: 88 | data = data.to(args.device) 89 | out = model(data) 90 | pred = out.argmax(dim=1) 91 | correct += int((pred == data.y).sum()) 92 | return correct / len(loader.dataset) 93 | 94 | val_acc_history, test_acc_history, test_loss_history = [],[],[] 95 | seeds = [123,124] 96 | for index in range(args.runs): 97 | start = time.time() 98 | fix_seed(seeds[index]) 99 | gnn = eval(args.model) 100 | model = ResidualGNNs(args,train_dataset,args.hidden,args.hidden_mlp,args.num_layers,gnn).to(args.device) ## apply GNN* 101 | print(model) 102 | total_params = sum(p.numel() for p in model.parameters()) 103 | print(f"Total number of parameters is: {total_params}") 104 | optimizer = Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) 105 | loss, test_acc = [],[] 106 | best_val_acc,best_val_loss = 0.0,0.0 107 | for epoch in range(args.epochs): 108 | loss = train(train_loader) 109 | val_acc = test(val_loader) 110 | test_acc = test(test_loader) 111 | # if epoch%10==0: 112 | print("epoch: {}, loss: {}, val_acc:{}, test_acc:{}".format(epoch, np.round(loss.item(),6), np.round(val_acc,2),np.round(test_acc,2))) 113 | val_acc_history.append(val_acc) 114 | if val_acc > best_val_acc: 115 | best_val_acc = val_acc 116 | if epoch> int(args.epochs/2):## save the best model 117 | torch.save(model.state_dict(), path + args.dataset+args.model+'task-checkpoint-best-acc.pkl') 118 | 119 | 120 | #test the model 121 | model.load_state_dict(torch.load(path + args.dataset+args.model+'task-checkpoint-best-acc.pkl')) 122 | model.eval() 123 | test_acc = test(test_loader) 124 | test_loss = train(test_loader).item() 125 | test_acc_history.append(test_acc) 126 | test_loss_history.append(test_loss) 127 | -------------------------------------------------------------------------------- /main_dynamic.py: -------------------------------------------------------------------------------- 1 | #to be uploaded! -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx_rtd_theme 2 | boto3 3 | nilearn 4 | pandas 5 | torch 6 | torch_geometric 7 | networkx 8 | nibabel 9 | 10 | -------------------------------------------------------------------------------- /run_baseline.sh: -------------------------------------------------------------------------------- 1 | dataset="HCPGender" 2 | batch_size="16" 3 | model="GCNConv" 4 | hidden="64" 5 | main="main.py" 6 | python $main --dataset $dataset --model $model --device 'cuda' --batch_size $batch_size --runs 10 7 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name='NeuroGraph', 5 | version='3.0.0', 6 | long_description='A Python package for graph-based neuroimaging benchmarks and tools', 7 | long_description_content_type='text/markdown', 8 | author='Anwar Said', 9 | author_email='', 10 | packages=find_packages(), 11 | install_requires=[ 12 | # List any dependencies your package requires 13 | 'boto3', 14 | 'nilearn', 15 | 'nibabel', 16 | 'networkx', 17 | 'pandas', 18 | 'sphinx_rtd_theme' 19 | ], 20 | keywords = ['python','neuroimaging','graph machine learning'], 21 | classifiers=[ 22 | 'Development Status :: 5 - Production/Stable', 23 | 'Intended Audience :: Science/Research', 24 | 'License :: OSI Approved :: MIT License', 25 | 'Programming Language :: Python :: 3', 26 | 'Topic :: Scientific/Engineering', 27 | ], 28 | ) 29 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | from NeuroGraph import utils 2 | import numpy as np 3 | from nilearn.image import load_img 4 | import sys 5 | 6 | 7 | def main(file, roi,method): 8 | 9 | img = load_img("NeuroGraph/data/raw/1.nii.gz") 10 | regs = np.loadtxt("NeuroGraph/data/raw/1.txt") 11 | fmri = img.get_fdata() 12 | Y = utils.parcellation(fmri,100) 13 | Y = utils.remove_drifts(Y) 14 | print(Y.shape) 15 | M = utils.regress_head_motions(Y, regs) 16 | print(M.shape) 17 | fc = utils.construct_corr(M) 18 | 19 | np.save(fc) 20 | # fc = np.load("NeuroGraph/data/fc.npy") 21 | print(fc.shape) 22 | data = utils.construct_data(fc, 1) 23 | print(data) 24 | 25 | 26 | file_path = sys.argv[1] 27 | 28 | 29 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import Linear 3 | from torch import nn 4 | from torch_geometric.nn import global_max_pool 5 | from torch_geometric.nn import aggr 6 | import torch.nn.functional as F 7 | from torch_geometric.nn import APPNP, MLP, GCNConv, GINConv, SAGEConv, GraphConv, TransformerConv, ChebConv, GATConv, SGConv, GeneralConv 8 | from torch.nn import Conv1d, MaxPool1d, ModuleList 9 | import random 10 | import numpy as np 11 | softmax = torch.nn.LogSoftmax(dim=1) 12 | 13 | def fix_seed(seed): 14 | torch.manual_seed(seed) 15 | random.seed(seed) 16 | np.random.seed(seed) 17 | if torch.cuda.is_available(): 18 | torch.cuda.manual_seed(seed) 19 | torch.cuda.manual_seed_all(seed) 20 | torch.backends.cudnn.deterministic = True 21 | torch.backends.cudnn.benchmark = False 22 | 23 | class ResidualGNNs(torch.nn.Module): 24 | def __init__(self,args, train_dataset, hidden_channels,hidden, num_layers, GNN, k=0.6): 25 | super().__init__() 26 | self.convs = ModuleList() 27 | self.aggr = aggr.MeanAggregation() 28 | self.hidden_channels = hidden_channels 29 | num_features = train_dataset.num_features 30 | if args.model=="ChebConv": 31 | if num_layers>0: 32 | self.convs.append(GNN(num_features, hidden_channels,K=5)) 33 | for i in range(0, num_layers - 1): 34 | self.convs.append(GNN(hidden_channels, hidden_channels,K=5)) 35 | else: 36 | if num_layers>0: 37 | self.convs.append(GNN(num_features, hidden_channels)) 38 | for i in range(0, num_layers - 1): 39 | self.convs.append(GNN(hidden_channels, hidden_channels)) 40 | 41 | input_dim1 = int(((num_features * num_features)/2)- (num_features/2)+(hidden_channels*num_layers)) 42 | input_dim = int(((num_features * num_features)/2)- (num_features/2)) 43 | self.bn = nn.BatchNorm1d(input_dim) 44 | self.bnh = nn.BatchNorm1d(hidden_channels*num_layers) 45 | self.mlp = nn.Sequential( 46 | nn.Linear(input_dim1, hidden), 47 | nn.BatchNorm1d(hidden), 48 | nn.ReLU(), 49 | nn.Dropout(0.5), 50 | nn.Linear(hidden, hidden//2), 51 | nn.BatchNorm1d(hidden//2), 52 | nn.ReLU(), 53 | nn.Dropout(0.5), 54 | nn.Linear(hidden//2, hidden//2), 55 | nn.BatchNorm1d(hidden//2), 56 | nn.ReLU(), 57 | nn.Dropout(0.5), 58 | nn.Linear((hidden//2), args.num_classes), 59 | ) 60 | 61 | def forward(self, data): 62 | x, edge_index, batch = data.x, data.edge_index, data.batch 63 | xs = [x] 64 | for conv in self.convs: 65 | xs += [conv(xs[-1], edge_index).tanh()] 66 | h = [] 67 | for i, xx in enumerate(xs): 68 | if i== 0: 69 | xx = xx.reshape(data.num_graphs, x.shape[1],-1) 70 | x = torch.stack([t.triu().flatten()[t.triu().flatten().nonzero(as_tuple=True)] for t in xx]) 71 | x = self.bn(x) 72 | else: 73 | xx = self.aggr(xx,batch) 74 | h.append(xx) 75 | 76 | h = torch.cat(h,dim=1) 77 | h = self.bnh(h) 78 | x = torch.cat((x,h),dim=1) 79 | x = self.mlp(x) 80 | return softmax(x) --------------------------------------------------------------------------------