├── code
├── RF_modules
│ ├── __init__.py
│ ├── RFMapping.py
│ ├── graph_representation.py
│ ├── Datasets.py
│ └── sampling.py
└── examples
│ ├── __init__.py
│ ├── OPU_SBM_varying_r.py
│ └── Gs_DD_varying_m.py
└── README.md
/code/RF_modules/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/code/examples/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # OPU_Graph_Classifier
2 | ## Fast graph classifier with optical random features
3 |
4 | The package we built in this work is available in the directory: <./code/RF_modules>
5 |
6 |
7 | To run one of the example codes in <./code/examples> directory (let's say example_name.py), place yourself in <./code> directory and run the example from there, i.e. run the following command from the <./code> directory:
8 |
9 | python -m examples.example_name
10 |
11 | In all examples, we use one of the techniques (OPU RFs, Gs RFs, GS+EIG RFs) to represent graphs, then a linear SVM model is used to learn how to classify them.
12 |
13 | **Note:** In order to execute the codes, you must have access to [LightOn](https://docs.lighton.ai/) servers. Pleaser refer to the this guiding [tutorial](https://community.lighton.ai/t/how-to-use-lighton-cloud-general-guide/20) to help you do that correctly.
14 |
15 |
16 | ## Available examples:
17 | ### 1. OPU_SBM_varying_r.py
18 | In this example, we classify graphs in the SBM_based dataset. We vary the value of the Inter-class similarity parameter r, generate the corresponding graph datset, and learn how to classify graphs. We choose the follwing:
19 | * S_k : uniform sampling
20 | * s = 2000 , m = 5000 , k = 6
21 | * OPU RFs
22 | ### 2. Gs_DD_varying_m.py
23 | In this example, we classify graphs in the D&D dataset. We vary the RFs number m and for each value we learn how to classify the D&D graphs. It is expected to see that when m grows, the test accuracy improves too. We choose the follwing:
24 | * S_k : Induced random walk
25 | * s = 4000 , m = 5000 , k = 7
26 | * Gassian RFs
27 |
--------------------------------------------------------------------------------
/code/RF_modules/RFMapping.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Wed Oct 14 15:10:11 2020
5 |
6 | @author: hashemghanem
7 | """
8 | from abc import ABC, abstractmethod
9 | import numpy as np
10 |
11 |
12 | # LightOn related packages
13 | # note that you should have access to LightOn company's servers
14 | # This can be done by signing up and paying for the necessary credits
15 |
16 | import warnings
17 | warnings.filterwarnings('ignore')
18 | from IPython.core.display import display, HTML
19 | display(HTML(""))
20 | from lightonml.projections.sklearn import OPUMap
21 |
22 |
23 |
24 |
25 | class feature_map(ABC):
26 | '''
27 | Abstract class for (random) feature mappings.
28 | Ensure that the transform method is implemented.
29 | '''
30 | def __init__(self, input_dim, features_num):
31 | self.input_dim=input_dim
32 | self.output_dim=features_num
33 |
34 | @abstractmethod
35 | def transform(self, A):
36 | '''
37 | In: A (input_dim * batch_size)
38 | Out: B (output_dim * batch_size)
39 | '''
40 | pass
41 |
42 | class Gaussian_random_features(feature_map):
43 | '''
44 | This class computes Gaussian random features.
45 | When initializing a new instance, you should pass:
46 | sigma: STD of the Gaussian kernel
47 | input_dim, features_num: size of projection matrix
48 | '''
49 | def __init__(self, input_dim, features_num, sigma):
50 | self.proj_mat=sigma*np.random.randn(features_num,input_dim)
51 | self.features_num=features_num
52 |
53 | def transform(self, A):
54 | temp = self.proj_mat.dot(A)
55 | return np.concatenate((np.cos(temp),np.sin(temp)))
56 |
57 |
58 |
59 | class Lighton_random_features(feature_map):
60 | '''
61 | This class computes optical random features with
62 | the help of OPUs technology developed by LightOn company.
63 | When initializing a new instance, you should pass:
64 | input_dim, features_num: size of projection matrix
65 | '''
66 | def __init__(self, input_dim, features_num):
67 | self.features_num=features_num
68 | self.random_mapping = OPUMap(n_components=features_num)
69 | self.random_mapping.opu.open()
70 | def transform(self, A):
71 | A=np.uint8(A.T)
72 | train_random_features = self.random_mapping.transform(A)
73 | return train_random_features.astype('float32').T
74 | def close(self):
75 | self.random_mapping.opu.close()
--------------------------------------------------------------------------------
/code/RF_modules/graph_representation.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Wed Oct 14 17:43:48 2020
5 |
6 | @author: hashemghanem
7 | """
8 | import numpy as np
9 |
10 | class graphlet_avg_features():
11 | '''
12 | Main class for graphlet (random) feature averaging.
13 | Instanciated with a graph_sampler S_k and a feature_map.
14 | with 'apply' method, this class:
15 | 1. takes a set of graphs Gnx
16 | 2. sample samples_num subgraphs from each graph.
17 | 3. compute random features vector for each subgraph
18 | 4. representing each graph by the average of its subgraphs' features
19 | vector.
20 |
21 | For each graph, graphlet sampling can be done by batch until samples_num
22 | is reached (by default, only one batch). This is controled by batch_size
23 | argument, which should
24 |
25 | The subgraphs size is implicitly contained in sampler and feat_map
26 | (of course, they should match)
27 | Formally, to instanciate the class you pass the following arguments:
28 | 1. samples num: number of subgraphs to be sampled from each graph
29 | 2. sampler: an instance of the graph sampling calss
30 | 3. feat_map: an instance of the feature_map class
31 | 4. batch size: how many subgraphs per each patch
32 | 0 < batch_size <= samples_num
33 | '''
34 | def __init__(self, samples_num, sampler, feat_map, batch_size= None, \
35 | verbose=False):
36 | if batch_size is None:
37 | batch_size=samples_num
38 | self.num_batches=int(samples_num/batch_size)
39 | self.samples_num=self.num_batches*batch_size
40 | self.batch_size=batch_size
41 | self.sampler=sampler
42 | self.feat_map=feat_map
43 | self.verbose=verbose
44 |
45 | def calc_one_graph(self, G):
46 | for _ in range(self.num_batches):
47 | graphlets=self.sampler.sample(G, self.batch_size) # d*batch_size
48 | random_feature=self.feat_map.transform(graphlets) # m*batch_size
49 | result=random_feature.sum(axis=1) if _==0 \
50 | else result + random_feature.sum(axis=1)
51 | return result/self.samples_num
52 |
53 | def apply(self, Gnx):
54 | for (i,G) in enumerate(Gnx):
55 | if self.verbose and np.mod(i,10)==0:
56 | print('Graph {}/{}'.format(i,len(Gnx)))
57 | res=self.calc_one_graph(G)[:,None] if i==0 \
58 | else np.concatenate((res,self.calc_one_graph(G)[:,None]),\
59 | axis=1)
60 | return res
--------------------------------------------------------------------------------
/code/examples/OPU_SBM_varying_r.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from matplotlib import pyplot as plt
3 | import matplotlib.ticker as mtic
4 | from scipy import stats
5 | from sklearn.svm import SVC
6 | from sklearn.metrics import accuracy_score
7 | from sklearn.utils import shuffle
8 | from sklearn.pipeline import Pipeline
9 | from sklearn.model_selection import GridSearchCV, StratifiedKFold
10 |
11 | from RF_modules.sampling import graph_sampler
12 | from RF_modules.RFMapping import *
13 | from RF_modules.Datasets import dataset_loading
14 | from RF_modules.graph_representation import graphlet_avg_features
15 | from RF_modules.sampling import graph_sampler
16 |
17 | '''
18 | In this code, we do the following:
19 | 1. setting up the random feature mapping parameters.
20 | 2. choosing the graphlet sampler we want.
21 | 3. we vary the value of inter-class similarity parameter r, and for each
22 | value we:
23 | 1. generate the correspondent SBM-based dataset (training/testing).
24 | 2. calculate a representation vector for each graph in it.
25 | 3. Train an SVM (linear kernel) classifier on the resulted dataset
26 | 4. Evaluate the trained model on the new testing dataset
27 | 4. Plot the accuracy curve
28 | '''
29 |
30 |
31 | # the solution: model selection
32 | def run_grid(z_train, z_test, y_train, y_test, C_range = 10. ** np.arange(-2, 6)):
33 | param_grid = dict(C=C_range)
34 | grid = GridSearchCV(SVC(kernel='linear', gamma='auto'),
35 | param_grid=param_grid, cv=StratifiedKFold())
36 | print('Fit...')
37 | grid.fit(z_train, y_train)
38 | # Training error
39 | y_pred = grid.predict(z_test)
40 |
41 | # Computes and prints the classification accuracy
42 | acc = accuracy_score(y_test, y_pred)
43 | print("Accuracy:", str(round(acc*100, 2)) + "%")
44 | return acc
45 |
46 |
47 |
48 |
49 | # setting up the parameters k, m , s, r, S_k
50 | k, features_num,samples_num= 6 ,5000, 2000
51 | r= 1+np.linspace(0.2,1.3,4)
52 | sampler_type= "simple_random_sampling"
53 |
54 | # creating an instance of the required sampler
55 | sampler=graph_sampler(sampler_type, k)
56 |
57 | # creating an instance of the required feature mapping class (OPU RFs)
58 | feat_map=Lighton_random_features(k**2, features_num)
59 |
60 | accur=np.zeros(len(r))
61 | for (f_ind, factor) in enumerate(r):
62 | print('Processing r={}, Remaining experiments: {}/{}'.format(factor,len(r)-f_ind-1,len(r)))
63 | # generate a new synthetic dataset (SBM generator)
64 | (G_train,y_train),(G_test,y_test) = dataset_loading().generate_SBM(r= factor)
65 | # calculating a representation vector z for each graph
66 | graphletRF = graphlet_avg_features(samples_num, sampler, feat_map, batch_size=None, verbose=True)
67 | z_train=graphletRF.apply(G_train)
68 | z_test=graphletRF.apply(G_test)
69 | #Training, then evaluating a linear SVM model on the training/testing data
70 | accur[f_ind]= run_grid(z_train.T, z_test.T, y_train, y_test)
71 | # cutting access to the OPU
72 | feat_map.close()
73 |
74 | fig, ax = plt.subplots()
75 | fmt='%.0f%%'
76 | yticks=mtic.FormatStrFormatter(fmt)
77 | ax.yaxis.set_major_formatter(yticks)
78 | plt. plot(r, accur, 's', linewidth=2.8)
79 | np.savetxt('accur.csv', accur, delimiter=',')
80 | plt.xlabel('Inter-class similarity parameter (r)')
81 | plt.ylabel('Test accuracy')
82 | plt.grid()
83 | plt.show()
--------------------------------------------------------------------------------
/code/examples/Gs_DD_varying_m.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from matplotlib import pyplot as plt
3 | import matplotlib.ticker as mtic
4 | from scipy import stats
5 | from sklearn.svm import SVC
6 | from sklearn.metrics import accuracy_score
7 | from sklearn.utils import shuffle
8 | from sklearn.pipeline import Pipeline
9 | from sklearn.model_selection import GridSearchCV, StratifiedKFold
10 |
11 | from RF_modules.sampling import graph_sampler
12 | from RF_modules.RFMapping import *
13 | from RF_modules.Datasets import dataset_loading
14 | from RF_modules.graph_representation import graphlet_avg_features
15 | from RF_modules.sampling import graph_sampler
16 |
17 | '''
18 | In this code, we do the following:
19 | 1. setting up the random feature mapping parameters (Gaussian RFs here)
20 | 2. choosing the graphlet sampler we want (Random walk sampler here)
21 | 3. we classify graphs in D&D dataset.
22 | 4. we vary the number of random features m, and for each
23 | value we:
24 | 1. load the D&D dataset (training/testing).
25 | 2. calculate a representation vector for each graph in it.
26 | 3. Train an SVM (linear kernel) classifier on the resulted dataset
27 | 4. Evaluate the trained model on the testing dataset
28 | 5. Plot the accuracy curve
29 | '''
30 |
31 |
32 | # the solution: model selection
33 | def run_grid(z_train, z_test, y_train, y_test, C_range = 10. ** np.arange(-2, 6)):
34 | param_grid = dict(C=C_range)
35 | grid = GridSearchCV(SVC(kernel='linear', gamma='auto'),
36 | param_grid=param_grid, cv=StratifiedKFold())
37 | print('Fit...')
38 | grid.fit(z_train, y_train)
39 | # Training error
40 | y_pred = grid.predict(z_test)
41 |
42 | # Computes and prints the classification accuracy
43 | acc = accuracy_score(y_test, y_pred)
44 | print("Accuracy:", str(round(acc*100, 2)) + "%")
45 | return acc
46 |
47 |
48 |
49 |
50 | # setting up the parameters k, m , s, r, S_k
51 | k, features_num,samples_num= 7 ,np.array([100,500, 1000, 5000]), 4000
52 | sigma_Gs=0.1
53 | sampler_type= "random_walk_induced_graph_sampling"
54 |
55 | # creating an instance of the required sampler (RW)
56 | sampler=graph_sampler(sampler_type, k)
57 |
58 | accur=np.zeros(len(features_num))
59 |
60 | for (f_ind, feat) in enumerate(features_num):
61 | print('Processing m ={}, Remaining experiments: {}/{}'.format(feat,len(features_num)-f_ind-1,len(features_num)))
62 | # creating an instance of the required feature mapping class (OPU RFs)
63 | feat_map= Gaussian_random_features(k**2, feat, sigma = sigma_Gs)
64 | # load D&D dataset
65 | (G_train,y_train),(G_test,y_test) = dataset_loading().DD(test_size = 0.1)
66 | # calculating a representation vector z for each graph
67 | graphletRF = graphlet_avg_features(samples_num, sampler, feat_map, batch_size=None, verbose=True)
68 | z_train=graphletRF.apply(G_train)
69 | z_test=graphletRF.apply(G_test)
70 | #Training, then evaluating a linear SVM model on the training/testing data
71 | accur[f_ind]= run_grid(z_train.T, z_test.T, y_train, y_test)
72 |
73 |
74 | fig, ax = plt.subplots()
75 | fmt='%.0f%%'
76 | yticks=mtic.FormatStrFormatter(fmt)
77 | ax.yaxis.set_major_formatter(yticks)
78 | plt. plot(r, accur, 's', linewidth=2.8)
79 | np.savetxt('accur.csv', accur, delimiter=',')
80 | plt.xlabel('Number of RFs (m)')
81 | plt.ylabel('Test accuracy')
82 | plt.grid()
83 | plt.show()
--------------------------------------------------------------------------------
/code/RF_modules/Datasets.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Wed Oct 14 17:02:01 2020
5 |
6 | @author: hashemghanem
7 | """
8 |
9 | from grakel.datasets import fetch_dataset
10 | from sklearn.model_selection import train_test_split
11 | import networkx as nx
12 | import numpy as np
13 |
14 | class dataset_loading:
15 |
16 | '''
17 | This class provide uploading the following datasets in Networkx graph form:
18 | 1. Mutag dataset
19 | 2. Stochastic Block Model (SBM) graph generator
20 | 3. D&D dataset
21 | 4. Reddit-B dataset.
22 | For each dataset you pass the test_size as one of the arguments.
23 | Based on that, the output is as follow:
24 | (training graphs, training labels), (testing graphs, testing labels)
25 | '''
26 | def __init__(self):
27 | pass
28 | #mutag dataset
29 | def Mutag(self,test_size=0.1):
30 | Gnx_train=[];
31 | Gnx_test=[];
32 | MUTAG = fetch_dataset("MUTAG", verbose=True, as_graphs=False)
33 | G, y = MUTAG.data, MUTAG.target
34 | G_train, G_test, y_train, y_test = train_test_split(G, y, test_size= test_size)
35 | for i in range(len(G_train)):
36 | g_current=nx.Graph(list(G_train[i][2]));
37 | g_current.add_nodes_from(G_train[i][1])
38 | Gnx_train.append(g_current)
39 | for i in range(len(G_test)):
40 | g_current=nx.Graph(list(G_test[i][2]));
41 | g_current.add_nodes_from(G_test[i][1])
42 | Gnx_test.append(g_current)
43 | return (Gnx_train,y_train), (Gnx_test,y_test)
44 |
45 | #SBM generator
46 | def generate_SBM(self,Graphs_num=300, nodes_per_graph=60,block_size=10,\
47 | fraction=0.3, r=1.2, avg_deg=10, test_size=0.2):
48 | blocks_num=int(nodes_per_graph/block_size)
49 | sizes=[block_size]*blocks_num
50 | G,y=[],[]
51 | for i in range (Graphs_num):
52 | p_in=fraction if i embedding vector
30 | should be provided. We included Eigenvalues decomposition, i.e. f: adj_matrix -> sorted_eigenvalues(adj_matrix).
31 | To apply it pass preprocess= Eigen_values as an argument when initializing an instance.
32 |
33 | After you initialize an instance, you can sample your Graph/list of Graphs by calling sample
34 | method.
35 | '''
36 |
37 | def __init__(self, sampler_type, nodes_num, preprocess= None):
38 | if preprocess is None:
39 | preprocess=lambda x:x.flatten()
40 | elif preprocess=="Eigen_values":
41 | preprocess=lambda x:np.sort( np.real( LA.eig(x)[0] ) ).flatten() #########################################
42 | self.preprocess= preprocess
43 | self.nodes_num= nodes_num
44 | if(type(sampler_type)==tuple): # ex: this is the case of random_walk_flyback (name, p_flyback)
45 | self.sampler_type=sampler_type[0]
46 | if(sampler_type[0]=="random_walk_flyback_sampling"):self.p_flyback=sampler_type[1]
47 | else : self.sampler_type=sampler_type
48 |
49 | def simple_sampling(self,G,nodes_num): # one simple_random_sample of G
50 | return G.subgraph(random.sample(G.nodes(), nodes_num))
51 |
52 | def random_walk_sampling_simple(self,complete_graph, nodes_to_sample): # also just one sample using RW
53 | T,growth_size=100,2 # number of iterations (attempts to sample the graph)
54 | complete_graph = nx.convert_node_labels_to_integers(complete_graph, 0, 'default', True)
55 | # giving unique id to every node same as built-in function id
56 | for n, data in complete_graph.nodes(data=True):
57 | complete_graph.nodes[n]['id'] = n
58 |
59 | nr_nodes = len(complete_graph.nodes())
60 | upper_bound_nr_nodes_to_sample = nodes_to_sample
61 | index_of_first_random_node = random.randint(0, nr_nodes-1)
62 | sampled_graph = nx.Graph()
63 |
64 | sampled_graph.add_node(complete_graph.nodes[index_of_first_random_node]['id'])
65 |
66 | iteration = 1
67 | edges_before_t_iter = 0
68 | curr_node = index_of_first_random_node
69 | while sampled_graph.number_of_nodes() != upper_bound_nr_nodes_to_sample:
70 | edges = [n for n in complete_graph.neighbors(curr_node)]
71 | index_of_edge = random.randint(0, len(edges) - 1)
72 | chosen_node = edges[index_of_edge]
73 | sampled_graph.add_node(chosen_node)
74 | sampled_graph.add_edge(curr_node, chosen_node)
75 | curr_node = chosen_node
76 | iteration = iteration+1
77 |
78 | if iteration % T == 0:
79 | if ((sampled_graph.number_of_edges() - edges_before_t_iter) < growth_size):
80 | curr_node = random.randint(0, nr_nodes-1)
81 | edges_before_t_iter = sampled_graph.number_of_edges()
82 | return sampled_graph
83 |
84 | def random_walk_sampling_with_fly_back(self,complete_graph, nodes_to_sample, fly_back_prob): # returns one sample
85 | growth_size,T=2,100 # number of iterations (attempts to sample the graph)
86 | complete_graph = nx.convert_node_labels_to_integers(complete_graph, 0, 'default', True)
87 | # giving unique id to every node same as built-in function id
88 | for n, data in complete_graph.nodes(data=True):
89 | complete_graph.nodes[n]['id'] = n
90 |
91 | nr_nodes = len(complete_graph.nodes())
92 | upper_bound_nr_nodes_to_sample = nodes_to_sample
93 |
94 | index_of_first_random_node = random.randint(0, nr_nodes-1)
95 | sampled_graph = nx.Graph()
96 |
97 | sampled_graph.add_node(complete_graph.nodes[index_of_first_random_node]['id'])
98 |
99 | iteration = 1
100 | edges_before_t_iter = 0
101 | curr_node = index_of_first_random_node
102 | while sampled_graph.number_of_nodes() != upper_bound_nr_nodes_to_sample:
103 | edges = [n for n in complete_graph.neighbors(curr_node)]
104 | index_of_edge = random.randint(0, len(edges) - 1)
105 | chosen_node = edges[index_of_edge]
106 | sampled_graph.add_node(chosen_node)
107 | sampled_graph.add_edge(curr_node, chosen_node)
108 | choice = np.random.choice(['prev','neigh'], 1, p=[fly_back_prob,1-fly_back_prob])
109 | if choice == 'neigh':
110 | curr_node = chosen_node
111 | iteration=iteration+1
112 |
113 | if iteration % T == 0:
114 | if ((sampled_graph.number_of_edges() - edges_before_t_iter) < growth_size):
115 | curr_node = random.randint(0, nr_nodes-1)
116 | print ("Choosing another random node to continue random walk ")
117 | edges_before_t_iter = sampled_graph.number_of_edges()
118 |
119 | return sampled_graph
120 |
121 |
122 | def random_walk_induced_graph_sampling(self, complete_graph, nodes_to_sample):
123 | growth_size,T=2,100 # number of iterations (attempts to sample the graph)
124 | complete_graph = nx.convert_node_labels_to_integers(complete_graph, 0, 'default', True)
125 | # giving unique id to every node same as built-in function id
126 | for n, data in complete_graph.nodes(data=True):
127 | complete_graph.nodes[n]['id'] = n
128 |
129 | nr_nodes = len(complete_graph.nodes())
130 | upper_bound_nr_nodes_to_sample = nodes_to_sample
131 | index_of_first_random_node = random.randint(0, nr_nodes - 1)
132 |
133 | Sampled_nodes = set([complete_graph.nodes[index_of_first_random_node]['id']])
134 |
135 | iteration = 1
136 | nodes_before_t_iter = 0
137 | curr_node = index_of_first_random_node
138 | while len(Sampled_nodes) != upper_bound_nr_nodes_to_sample:
139 | edges = [n for n in complete_graph.neighbors(curr_node)]
140 | index_of_edge = random.randint(0, len(edges) - 1)
141 | chosen_node = edges[index_of_edge]
142 | Sampled_nodes.add(complete_graph.nodes[chosen_node]['id'])
143 | curr_node = chosen_node
144 | iteration=iteration+1
145 |
146 | if iteration % T == 0:
147 | if ((len(Sampled_nodes) - nodes_before_t_iter) < growth_size):
148 | curr_node = random.randint(0, nr_nodes - 1)
149 | nodes_before_t_iter = len(Sampled_nodes)
150 |
151 | sampled_graph = complete_graph.subgraph(Sampled_nodes)
152 |
153 | return sampled_graph
154 |
155 | def sample(self,G, samples_num):
156 | for _ in range (samples_num):
157 | if self.sampler_type=="simple_random_sampling": sampled_subgraph=self.simple_sampling(G,self.nodes_num)
158 |
159 | elif self.sampler_type=="simple_random_walk_sampling":
160 | sampled_subgraph=self.random_walk_sampling_simple(G,self.nodes_num)
161 |
162 | elif self.sampler_type=="random_walk_flyback_sampling":
163 | sampled_subgraph=self.random_walk_sampling_with_fly_back(G,self.nodes_num,self.p_flyback)
164 |
165 | elif self.sampler_type=="random_walk_induced_graph_sampling":
166 | sampled_subgraph=self.random_walk_induced_graph_sampling(G,self.nodes_num)
167 |
168 | adjacency=self.preprocess(nx.to_numpy_array(sampled_subgraph))[:,None] if _==0 \
169 | else np.concatenate((adjacency,self.preprocess(nx.to_numpy_array(sampled_subgraph))[:,None]),axis=1)
170 | return adjacency
171 |
172 |
173 |
174 |
175 |
--------------------------------------------------------------------------------