├── GAE ├── images │ ├── result.png │ └── graphsage_routing.png ├── sampling.py ├── data │ └── cora │ │ └── README ├── dataset.py ├── train.py ├── model.py └── gae.ipynb ├── GAT ├── images │ ├── result.png │ ├── mark_attention.png │ ├── graph_attention.png │ └── multi_head_attention.png ├── README.md ├── data │ └── cora │ │ └── README ├── train_batch.py ├── dataset.py └── model.py ├── GCN ├── images │ ├── graph.png │ ├── gcn_web.png │ ├── cnn_sample.jpg │ └── social_non_struct.jpg ├── README.md ├── data │ └── cora │ │ └── README ├── graph.py ├── train.py └── dataset.py ├── Social-LSTM ├── pred_results.pkl ├── data │ ├── trajectories.cpkl │ ├── pixel_pos_format.md │ └── ucy │ │ └── zara │ │ └── zara01 │ │ └── pixel_pos.csv ├── visual.py ├── social_model.py ├── grid.py ├── model.py ├── dataset.py ├── train.py ├── social_train.py └── social_dataset.py └── README.md /GAE/images/result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YoungTimes/GNN/HEAD/GAE/images/result.png -------------------------------------------------------------------------------- /GAT/images/result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YoungTimes/GNN/HEAD/GAT/images/result.png -------------------------------------------------------------------------------- /GCN/images/graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YoungTimes/GNN/HEAD/GCN/images/graph.png -------------------------------------------------------------------------------- /GCN/images/gcn_web.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YoungTimes/GNN/HEAD/GCN/images/gcn_web.png -------------------------------------------------------------------------------- /GCN/images/cnn_sample.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YoungTimes/GNN/HEAD/GCN/images/cnn_sample.jpg -------------------------------------------------------------------------------- /GAT/images/mark_attention.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YoungTimes/GNN/HEAD/GAT/images/mark_attention.png -------------------------------------------------------------------------------- /Social-LSTM/pred_results.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YoungTimes/GNN/HEAD/Social-LSTM/pred_results.pkl -------------------------------------------------------------------------------- /GAT/images/graph_attention.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YoungTimes/GNN/HEAD/GAT/images/graph_attention.png -------------------------------------------------------------------------------- /GAE/images/graphsage_routing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YoungTimes/GNN/HEAD/GAE/images/graphsage_routing.png -------------------------------------------------------------------------------- /GCN/images/social_non_struct.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YoungTimes/GNN/HEAD/GCN/images/social_non_struct.jpg -------------------------------------------------------------------------------- /Social-LSTM/data/trajectories.cpkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YoungTimes/GNN/HEAD/Social-LSTM/data/trajectories.cpkl -------------------------------------------------------------------------------- /GAT/images/multi_head_attention.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YoungTimes/GNN/HEAD/GAT/images/multi_head_attention.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GNN 2 | 3 | [图卷积神经网络(GCN)的Tensorflow 2.0实现](GCN/gcn.ipynb) 4 | 5 | 6 | [GraphSage的Tensorflow 2.0实现](GAE/gae.ipynb) -------------------------------------------------------------------------------- /GCN/README.md: -------------------------------------------------------------------------------- 1 | # 致谢 2 | 3 | 本文实现中,参考了以下文章,深表感谢! 4 | 5 | https://github.com/FighterLYL/GraphNeuralNetwork 6 | 7 | https://github.com/tkipf/keras-gcn 8 | 9 | https://blog.csdn.net/qq_41995574/article/details/99712339 10 | 11 | https://blog.csdn.net/weixin_40013463/article/details/81089223 12 | 13 | -------------------------------------------------------------------------------- /Social-LSTM/data/pixel_pos_format.md: -------------------------------------------------------------------------------- 1 | # Format of pixel_pos.csv file for each dataset 2 | 3 | Size of the matrix is 4 x numTrajectoryPoints 4 | 5 | The first row contains all the frame numbers 6 | 7 | The second row contains all the pedestrian IDs 8 | 9 | The third row contains all the y-coordinates 10 | 11 | The fourth row contains all the x-coordinates 12 | -------------------------------------------------------------------------------- /GAT/README.md: -------------------------------------------------------------------------------- 1 | # 参考材料: 2 | 3 | 1.https://arxiv.org/pdf/1710.10903.pdf 4 | 5 | 2.https://zhuanlan.zhihu.com/p/81350196 6 | 7 | 3.https://blog.csdn.net/qq_41995574/article/details/99931294 8 | 9 | 4.https://blog.csdn.net/weixin_36474809/article/details/89401552 10 | 11 | 5.https://github.com/dmlc/dgl/blob/master/examples/pytorch/gat/gat.py 12 | 13 | 6.https://github.com/coolsunxu/GAT_Pytorch/blob/master/gat_conv.py 14 | 15 | 7.https://github.com/Diego999/pyGAT/blob/master/layers.py -------------------------------------------------------------------------------- /GAE/sampling.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def sampling(src_nodes, sample_num, neighbor_table): 5 | """根据源节点采样指定数量的邻居节点,注意使用的是有放回的采样; 6 | 某个节点的邻居节点数量少于采样数量时,采样结果出现重复的节点 7 | 8 | Arguments: 9 | src_nodes {list, ndarray} -- 源节点列表 10 | sample_num {int} -- 需要采样的节点数 11 | neighbor_table {dict} -- 节点到其邻居节点的映射表 12 | 13 | Returns: 14 | np.ndarray -- 采样结果构成的列表 15 | """ 16 | results = [] 17 | for sid in src_nodes: 18 | # 从节点的邻居中进行有放回地进行采样 19 | res = np.random.choice(neighbor_table[sid], size=(sample_num, )) 20 | results.append(res) 21 | return np.asarray(results).flatten() 22 | 23 | 24 | def multihop_sampling(src_nodes, sample_nums, neighbor_table): 25 | """根据源节点进行多阶采样 26 | 27 | Arguments: 28 | src_nodes {list, np.ndarray} -- 源节点id 29 | sample_nums {list of int} -- 每一阶需要采样的个数 30 | neighbor_table {dict} -- 节点到其邻居节点的映射 31 | 32 | Returns: 33 | [list of ndarray] -- 每一阶采样的结果 34 | """ 35 | sampling_result = [src_nodes] 36 | 37 | for k, hopk_num in enumerate(sample_nums): 38 | hopk_result = sampling(sampling_result[k], hopk_num, neighbor_table) 39 | sampling_result.append(hopk_result) 40 | return sampling_result -------------------------------------------------------------------------------- /GAE/data/cora/README: -------------------------------------------------------------------------------- 1 | This directory contains the a selection of the Cora dataset (www.research.whizbang.com/data). 2 | 3 | The Cora dataset consists of Machine Learning papers. These papers are classified into one of the following seven classes: 4 | Case_Based 5 | Genetic_Algorithms 6 | Neural_Networks 7 | Probabilistic_Methods 8 | Reinforcement_Learning 9 | Rule_Learning 10 | Theory 11 | 12 | The papers were selected in a way such that in the final corpus every paper cites or is cited by atleast one other paper. There are 2708 papers in the whole corpus. 13 | 14 | After stemming and removing stopwords we were left with a vocabulary of size 1433 unique words. All words with document frequency less than 10 were removed. 15 | 16 | 17 | THE DIRECTORY CONTAINS TWO FILES: 18 | 19 | The .content file contains descriptions of the papers in the following format: 20 | 21 | + 22 | 23 | The first entry in each line contains the unique string ID of the paper followed by binary values indicating whether each word in the vocabulary is present (indicated by 1) or absent (indicated by 0) in the paper. Finally, the last entry in the line contains the class label of the paper. 24 | 25 | The .cites file contains the citation graph of the corpus. Each line describes a link in the following format: 26 | 27 | 28 | 29 | Each line contains two paper IDs. The first entry is the ID of the paper being cited and the second ID stands for the paper which contains the citation. The direction of the link is from right to left. If a line is represented by "paper1 paper2" then the link is "paper2->paper1". -------------------------------------------------------------------------------- /GAT/data/cora/README: -------------------------------------------------------------------------------- 1 | This directory contains the a selection of the Cora dataset (www.research.whizbang.com/data). 2 | 3 | The Cora dataset consists of Machine Learning papers. These papers are classified into one of the following seven classes: 4 | Case_Based 5 | Genetic_Algorithms 6 | Neural_Networks 7 | Probabilistic_Methods 8 | Reinforcement_Learning 9 | Rule_Learning 10 | Theory 11 | 12 | The papers were selected in a way such that in the final corpus every paper cites or is cited by atleast one other paper. There are 2708 papers in the whole corpus. 13 | 14 | After stemming and removing stopwords we were left with a vocabulary of size 1433 unique words. All words with document frequency less than 10 were removed. 15 | 16 | 17 | THE DIRECTORY CONTAINS TWO FILES: 18 | 19 | The .content file contains descriptions of the papers in the following format: 20 | 21 | + 22 | 23 | The first entry in each line contains the unique string ID of the paper followed by binary values indicating whether each word in the vocabulary is present (indicated by 1) or absent (indicated by 0) in the paper. Finally, the last entry in the line contains the class label of the paper. 24 | 25 | The .cites file contains the citation graph of the corpus. Each line describes a link in the following format: 26 | 27 | 28 | 29 | Each line contains two paper IDs. The first entry is the ID of the paper being cited and the second ID stands for the paper which contains the citation. The direction of the link is from right to left. If a line is represented by "paper1 paper2" then the link is "paper2->paper1". -------------------------------------------------------------------------------- /GCN/data/cora/README: -------------------------------------------------------------------------------- 1 | This directory contains the a selection of the Cora dataset (www.research.whizbang.com/data). 2 | 3 | The Cora dataset consists of Machine Learning papers. These papers are classified into one of the following seven classes: 4 | Case_Based 5 | Genetic_Algorithms 6 | Neural_Networks 7 | Probabilistic_Methods 8 | Reinforcement_Learning 9 | Rule_Learning 10 | Theory 11 | 12 | The papers were selected in a way such that in the final corpus every paper cites or is cited by atleast one other paper. There are 2708 papers in the whole corpus. 13 | 14 | After stemming and removing stopwords we were left with a vocabulary of size 1433 unique words. All words with document frequency less than 10 were removed. 15 | 16 | 17 | THE DIRECTORY CONTAINS TWO FILES: 18 | 19 | The .content file contains descriptions of the papers in the following format: 20 | 21 | + 22 | 23 | The first entry in each line contains the unique string ID of the paper followed by binary values indicating whether each word in the vocabulary is present (indicated by 1) or absent (indicated by 0) in the paper. Finally, the last entry in the line contains the class label of the paper. 24 | 25 | The .cites file contains the citation graph of the corpus. Each line describes a link in the following format: 26 | 27 | 28 | 29 | Each line contains two paper IDs. The first entry is the ID of the paper being cited and the second ID stands for the paper which contains the citation. The direction of the link is from right to left. If a line is represented by "paper1 paper2" then the link is "paper2->paper1". -------------------------------------------------------------------------------- /Social-LSTM/visual.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import pickle 4 | from scipy.stats import multivariate_normal 5 | 6 | def draw_heatmap(mux, muy, sx, sy, rho, plt = None, bound = 0.1): 7 | x, y = np.meshgrid(np.linspace(mux - bound, mux + bound, 200), 8 | np.linspace(muy - bound, muy + bound, 200)) 9 | 10 | mean = [mux, muy] 11 | 12 | # Extract covariance matrix 13 | cov = [[sx * sx, rho * sx * sy], [rho * sx * sy, sy * sy]] 14 | 15 | gaussian = multivariate_normal(mean = mean, cov = cov) 16 | d = np.dstack([x, y]) 17 | z = gaussian.pdf(d) 18 | 19 | z_min, z_max = -np.abs(z).max(), np.abs(z).max() 20 | 21 | plt.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max, alpha = 0.5) 22 | 23 | def visual(): 24 | data_file = "./pred_results.pkl" 25 | 26 | f = open(data_file, "rb") 27 | visual_data = pickle.load(f) 28 | f.close() 29 | 30 | pred_trajs = visual_data[0] 31 | truth_trajs = visual_data[1] 32 | gauss_params = visual_data[2] 33 | 34 | traj_num = len(pred_trajs) 35 | 36 | for index in range(traj_num): 37 | visual_trajectories(pred_trajs[index], truth_trajs[index], gauss_params[index]) 38 | 39 | 40 | 41 | def visual_trajectories(pred_traj, true_traj, gauss_param): 42 | fig_width = 10 43 | fig_height = 10 44 | 45 | fig = plt.figure(figsize=(fig_width, fig_width)) 46 | 47 | plt.plot(true_traj[:, 0], true_traj[:, 1], color = 'G', linestyle = '-.', linewidth = 3, 48 | marker = 'p', markersize = 15, markeredgecolor = 'g', markerfacecolor = 'g') 49 | 50 | plt.plot(pred_traj[:, 0], pred_traj[:, 1], color = 'R', linestyle = '-.', linewidth = 3, 51 | marker = 'p', markersize = 10, markeredgecolor = 'r', markerfacecolor = 'r') 52 | 53 | plt.show() 54 | 55 | 56 | visual() -------------------------------------------------------------------------------- /GCN/graph.py: -------------------------------------------------------------------------------- 1 | # https://github.com/tkipf/keras-gcn 2 | import tensorflow as tf 3 | 4 | class GraphConvolutionLayer(tf.keras.layers.Layer): 5 | """Basic graph convolution layer as in https://arxiv.org/abs/1609.02907""" 6 | def __init__(self, input_dim, output_dim, support=1, 7 | activation=None, 8 | use_bias=True, 9 | kernel_initializer='glorot_uniform', 10 | bias_initializer='zeros', 11 | kernel_regularizer=None, 12 | bias_regularizer=None): 13 | super(GraphConvolutionLayer, self).__init__() 14 | 15 | self.input_dim = input_dim 16 | self.output_dim = output_dim 17 | self.use_bias = use_bias 18 | self.kernel_initializer = kernel_initializer 19 | self.bias_initializer = bias_initializer 20 | self.kernel_regularizer = kernel_regularizer 21 | self.bias_regularizer = bias_regularizer 22 | self.activation = activation 23 | 24 | def build(self, nodes_shape): 25 | self.kernel = self.add_weight(shape = (self.input_dim, self.output_dim), 26 | initializer = self.kernel_initializer, 27 | name = 'kernel', 28 | regularizer = self.kernel_regularizer) 29 | if self.use_bias: 30 | self.bias = self.add_weight(shape=(self.output_dim, ), 31 | initializer=self.bias_initializer, 32 | name='bias', 33 | regularizer = self.bias_regularizer) 34 | else: 35 | self.bias = None 36 | 37 | self.built = True 38 | 39 | def call(self, nodes, edges): 40 | support = tf.matmul(nodes, self.kernel) 41 | 42 | output = tf.matmul(edges, support) 43 | 44 | if self.use_bias: 45 | output += self.bias 46 | 47 | if self.activation is not None: 48 | output = self.activation(output) 49 | 50 | return output 51 | 52 | 53 | class GraphConvolutionModel(tf.keras.Model): 54 | def __init__(self): 55 | super(GraphConvolutionModel, self).__init__() 56 | 57 | self.graph_conv_1 = GraphConvolutionLayer(1433, 16, 58 | activation=tf.keras.activations.relu, 59 | kernel_regularizer=tf.keras.regularizers.l2(0.01)) 60 | 61 | self.graph_conv_2 = GraphConvolutionLayer(16, 7) 62 | 63 | def call(self, x, training=False): 64 | 65 | nodes = x[0] 66 | edges = x[1] 67 | 68 | h = self.graph_conv_1(nodes, edges) 69 | logit = self.graph_conv_2(h, edges) 70 | 71 | return logit -------------------------------------------------------------------------------- /GCN/train.py: -------------------------------------------------------------------------------- 1 | from graph import GraphConvolutionLayer, GraphConvolutionModel 2 | from dataset import CoraData 3 | 4 | import time 5 | import tensorflow as tf 6 | import matplotlib.pyplot as plt 7 | 8 | dataset = CoraData() 9 | features, labels, adj, train_mask, val_mask, test_mask = dataset.data() 10 | 11 | graph = [features, adj] 12 | 13 | model = GraphConvolutionModel() 14 | 15 | loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True) 16 | 17 | def loss(model, x, y, train_mask, training): 18 | 19 | y_ = model(x, training=training) 20 | 21 | test_mask_logits = tf.gather_nd(y_, tf.where(train_mask)) 22 | masked_labels = tf.gather_nd(y, tf.where(train_mask)) 23 | 24 | return loss_object(y_true=masked_labels, y_pred=test_mask_logits) 25 | 26 | 27 | def grad(model, inputs, targets, train_mask): 28 | with tf.GradientTape() as tape: 29 | loss_value = loss(model, inputs, targets, train_mask, training=True) 30 | 31 | return loss_value, tape.gradient(loss_value, model.trainable_variables) 32 | 33 | def test(mask): 34 | logits = model(graph) 35 | 36 | test_mask_logits = tf.gather_nd(logits, tf.where(mask)) 37 | masked_labels = tf.gather_nd(labels, tf.where(mask)) 38 | 39 | ll = tf.math.equal(tf.math.argmax(masked_labels, -1), tf.math.argmax(test_mask_logits, -1)) 40 | accuarcy = tf.reduce_mean(tf.cast(ll, dtype=tf.float32)) 41 | 42 | return accuarcy 43 | 44 | optimizer=tf.keras.optimizers.Adam(learning_rate=0.01, decay=5e-5) 45 | 46 | # 记录过程值,以便最后可视化 47 | train_loss_results = [] 48 | train_accuracy_results = [] 49 | train_val_results = [] 50 | train_test_results = [] 51 | 52 | num_epochs = 200 53 | 54 | for epoch in range(num_epochs): 55 | 56 | loss_value, grads = grad(model, graph, labels, train_mask) 57 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 58 | 59 | accuarcy = test(train_mask) 60 | val_acc = test(val_mask) 61 | test_acc = test(test_mask) 62 | 63 | train_loss_results.append(loss_value) 64 | train_accuracy_results.append(accuarcy) 65 | train_val_results.append(val_acc) 66 | train_test_results.append(test_acc) 67 | 68 | print("Epoch {} loss={} accuracy={} val_acc={} test_acc={}".format(epoch, loss_value, accuarcy, val_acc, test_acc)) 69 | 70 | # 训练过程可视化 71 | fig, axes = plt.subplots(4, sharex=True, figsize=(12, 8)) 72 | fig.suptitle('Training Metrics') 73 | 74 | axes[0].set_ylabel("Loss", fontsize=14) 75 | axes[0].plot(train_loss_results) 76 | 77 | axes[1].set_ylabel("Accuracy", fontsize=14) 78 | axes[1].plot(train_accuracy_results) 79 | 80 | axes[2].set_ylabel("Val Acc", fontsize=14) 81 | axes[2].plot(train_val_results) 82 | 83 | axes[3].set_ylabel("Test Acc", fontsize=14) 84 | axes[3].plot(train_test_results) 85 | 86 | plt.show() -------------------------------------------------------------------------------- /Social-LSTM/social_model.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | class SocialModel(tf.keras.Model): 4 | 5 | def __init__(self, args): 6 | super(SocialModel, self).__init__() 7 | self.args = args 8 | 9 | self.cell = tf.keras.layers.LSTMCell(args.rnn_size) 10 | 11 | self.spatial_embedding = tf.keras.layers.Dense(args.embedding_size, activation = tf.keras.activations.relu) 12 | self.tensor_embedding = tf.keras.layers.Dense(args.embedding_size, activation = tf.keras.activations.relu) 13 | 14 | self.output_size = 5 15 | self.output_layer = tf.keras.layers.Dense(self.output_size) 16 | 17 | def get_social_tensor(self, grid, hidden_states): 18 | # Number of peds 19 | numNodes = grid.size()[0] 20 | 21 | # Construct the variable 22 | social_tensor = Variable(torch.zeros(numNodes, self.grid_size*self.grid_size, self.rnn_size)) 23 | if self.use_cuda: 24 | social_tensor = social_tensor.cuda() 25 | 26 | # For each ped 27 | for node in range(numNodes): 28 | # Compute the social tensor 29 | social_tensor[node] = torch.mm(torch.t(grid[node]), hidden_states) 30 | 31 | # Reshape the social tensor 32 | social_tensor = social_tensor.view(numNodes, self.grid_size*self.grid_size*self.rnn_size) 33 | return social_tensor 34 | 35 | def call(self, frame_datas, ped_lists, grid_frame_datas, ped_indexs): 36 | num_peds = len(ped_indexs) 37 | outputs = tf.zeros(self.seq_length * num_peds, self.output_size) 38 | 39 | # [args.seq_length, args.max_num_peds, 3] 40 | for frame_num, frame in enumerate(frame_data): 41 | # grid_frame_data = grid_frame_datas[frame_num] 42 | 43 | node_ids = [int(node_id) for node_id in ped_lists[frame_num]] 44 | 45 | if len(node_ids) == 0: 46 | continue 47 | 48 | list_of_nodes = [ped_indexs[x] for x in node_ids] 49 | 50 | nodes_current = frame[list_of_nodes,:] 51 | # Get the corresponding grid masks 52 | grid_current = grids[framenum] 53 | 54 | hidden_states_current = torch.index_select(hidden_states, 0, corr_index) 55 | 56 | social_tensor = self.get_social_tensor(grid_current, hidden_states_current) 57 | 58 | # Embed inputs 59 | input_embedded = self.spatial_embedding(nodes_current) 60 | tensor_embedded = self.tensor_embedding(social_tensor) 61 | 62 | # Concat input 63 | concat_embedded = tf.concat([input_embedded, tensor_embedded], axis = 1) 64 | 65 | h_nodes = self.cell(concat_embedded, (hidden_states_current)) 66 | 67 | # Compute the output 68 | outputs[framenum*numNodes + corr_index.data] = self.output_layer(h_nodes) 69 | 70 | # Update hidden and cell states 71 | hidden_states[corr_index.data] = h_nodes 72 | 73 | cell_states[corr_index.data] = c_nodes 74 | 75 | for frame_num in range(self.seq_length): 76 | for node in range(num_nodes): 77 | outputs_return[frame_num, node, :] = outputs[frame_num * num_nodes + node, :] 78 | 79 | -------------------------------------------------------------------------------- /GCN/dataset.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import scipy.sparse as sp 4 | import numpy as np 5 | 6 | class CoraData(): 7 | def __init__(self, data_root="data/cora/"): 8 | 9 | self._data_root = data_root 10 | 11 | self._data = self.process_data() 12 | 13 | 14 | def load_data(self, dataset="cora"): 15 | 16 | print('Loading {} dataset...'.format(dataset)) 17 | 18 | idx_features_labels = np.genfromtxt("{}{}.content".format(self._data_root, dataset), dtype=np.dtype(str)) 19 | 20 | edges = np.genfromtxt("{}{}.cites".format(self._data_root, dataset), dtype=np.int32) 21 | 22 | return idx_features_labels, edges 23 | 24 | def process_data(self): 25 | 26 | print("Process data ...") 27 | 28 | idx_features_labels, edges = self.load_data() 29 | 30 | features = idx_features_labels[:, 1:-1].astype(np.float32) 31 | features = self.normalize_feature(features) 32 | 33 | y = idx_features_labels[:, -1] 34 | labels = self.encode_onehot(y) 35 | 36 | idx = np.array(idx_features_labels[:, 0], dtype=np.int32) 37 | idx_map = {j: i for i, j in enumerate(idx)} 38 | edge_indexs = np.array(list(map(idx_map.get, edges.flatten())), dtype=np.int32) 39 | edge_indexs = edge_indexs.reshape(edges.shape) 40 | 41 | edge_index_len = len(edge_indexs) 42 | for i in range(edge_index_len): 43 | edge_indexs = np.concatenate((edge_indexs, [[edge_indexs[i][1], edge_indexs[i][0]]])) 44 | 45 | adjacency = sp.coo_matrix((np.ones(len(edge_indexs)), 46 | (edge_indexs[:, 0], edge_indexs[:, 1])), 47 | shape=(features.shape[0], features.shape[0]), dtype="float32") 48 | 49 | adjacency = self.normalize_adj(adjacency) 50 | 51 | 52 | train_index = np.arange(150) 53 | val_index = np.arange(150, 500) 54 | test_index = np.arange(500, 2708) 55 | 56 | train_mask = np.zeros(edge_indexs.shape[0], dtype = np.bool) 57 | val_mask = np.zeros(edge_indexs.shape[0], dtype = np.bool) 58 | test_mask = np.zeros(edge_indexs.shape[0], dtype = np.bool) 59 | train_mask[train_index] = True 60 | val_mask[val_index] = True 61 | test_mask[test_index] = True 62 | 63 | print('Dataset has {} nodes, {} edges, {} features.'.format(features.shape[0], adjacency.shape[0], features.shape[1])) 64 | 65 | return features, labels, adjacency, train_mask, val_mask, test_mask 66 | 67 | def encode_onehot(self, labels): 68 | classes = set(labels) 69 | classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)} 70 | labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32) 71 | return labels_onehot 72 | 73 | def normalize_adj(self, adjacency): 74 | 75 | """计算 L=D^-0.5 * (A+I) * D^-0.5""" 76 | adjacency += sp.eye(adjacency.shape[0]) # 增加自连接 77 | degree = np.array(adjacency.sum(1)) 78 | d_hat = sp.diags(np.power(degree, -0.5).flatten()) 79 | 80 | return d_hat.dot(adjacency).dot(d_hat).tocsr().todense() 81 | 82 | def normalize_feature(self, features): 83 | 84 | normal_features = features / features.sum(1).reshape(-1, 1) 85 | 86 | return normal_features 87 | 88 | def data(self): 89 | """返回Data数据对象,包括features, labes, adjacency, train_mask, val_mask, test_mask""" 90 | return self._data -------------------------------------------------------------------------------- /Social-LSTM/grid.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Helper functions to compute the masks relevant to social grid 3 | Author : Anirudh Vemula 4 | Date : 29th October 2016 5 | ''' 6 | import numpy as np 7 | 8 | def get_grid_mask(frame, dimensions, neighborhood_size, grid_size): 9 | ''' 10 | This function computes the binary mask that represents the 11 | occupancy of each ped in the other's grid 12 | params: 13 | frame : This will be a MNP x 3 matrix with each row being [pedID, x, y] 14 | dimensions : This will be a list [width, height] 15 | neighborhood_size : Scalar value representing the size of neighborhood considered 16 | grid_size : Scalar value representing the size of the grid discretization 17 | ''' 18 | 19 | # Maximum number of pedestrians 20 | mnp = frame.shape[0] 21 | width, height = dimensions[0], dimensions[1] 22 | 23 | frame_mask = np.zeros((mnp, mnp, grid_size**2)) 24 | 25 | width_bound, height_bound = neighborhood_size / (width * 1.0), neighborhood_size / (height * 1.0) 26 | 27 | # For each ped in the frame (existent and non-existent) 28 | for ped_index in range(mnp): 29 | # If pedID is zero, then non-existent ped 30 | if frame[ped_index, 0] == 0: 31 | # Binary mask should be zero for non-existent ped 32 | continue 33 | 34 | # Get x and y of the current ped 35 | current_x, current_y = frame[ped_index, 1], frame[ped_index, 2] 36 | 37 | width_low, width_high = current_x - width_bound / 2, current_x + width_bound / 2 38 | height_low, height_high = current_y - height_bound / 2, current_y + height_bound / 2 39 | 40 | # For all the other peds 41 | for otherped_index in range(mnp): 42 | # If other pedID is zero, then non-existent ped 43 | if frame[otherped_index, 0] == 0: 44 | # Binary mask should be zero 45 | continue 46 | 47 | # If the other pedID is the same as current pedID 48 | if frame[otherped_index, 0] == frame[ped_index, 0]: 49 | # The ped cannot be counted in his own grid 50 | continue 51 | 52 | # Get x and y of the other ped 53 | other_x, other_y = frame[otherped_index, 1], frame[otherped_index, 2] 54 | if other_x >= width_high or other_x < width_low or other_y >= height_high or other_y < height_low: 55 | # Ped not in surrounding, so binary mask should be zero 56 | continue 57 | 58 | # If in surrounding, calculate the grid cell 59 | cell_x = int(np.floor(((other_x - width_low) / width_bound) * grid_size)) 60 | cell_y = int(np.floor(((other_y - height_low) / height_bound) * grid_size)) 61 | 62 | # Other ped is in the corresponding grid cell of current ped 63 | frame_mask[ped_index, otherped_index, cell_x + cell_y * grid_size] = 1 64 | 65 | return frame_mask 66 | 67 | 68 | def get_sequence_grid_mask(sequence, dimensions, neighborhood_size, grid_size): 69 | ''' 70 | Get the grid masks for all the frames in the sequence 71 | params: 72 | sequence : A numpy matrix of shape SL x MNP x 3 73 | dimensions : This will be a list [width, height] 74 | neighborhood_size : Scalar value representing the size of neighborhood considered 75 | grid_size : Scalar value representing the size of the grid discretization 76 | ''' 77 | sl = sequence.shape[0] 78 | mnp = sequence.shape[1] 79 | sequence_mask = np.zeros((sl, mnp, mnp, grid_size**2)) 80 | 81 | for i in range(sl): 82 | sequence_mask[i, :, :, :] = get_grid_mask(sequence[i, :, :], dimensions, neighborhood_size, grid_size) 83 | 84 | return sequence_mask -------------------------------------------------------------------------------- /GAE/dataset.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import scipy.sparse as sp 4 | import numpy as np 5 | from collections import namedtuple 6 | 7 | 8 | Data = namedtuple('Data', ['x', 'y', 'adjacency_dict', 9 | 'train_mask', 'val_mask', 'test_mask']) 10 | 11 | class CoraData(): 12 | def __init__(self, data_root="data/cora/"): 13 | 14 | self._data_root = data_root 15 | 16 | self._data = self.process_data() 17 | 18 | 19 | def load_data(self, dataset="cora"): 20 | 21 | print('Loading {} dataset...'.format(dataset)) 22 | 23 | idx_features_labels = np.genfromtxt("{}{}.content".format(self._data_root, dataset), dtype=np.dtype(str)) 24 | 25 | edges = np.genfromtxt("{}{}.cites".format(self._data_root, dataset), dtype=np.int32) 26 | 27 | return idx_features_labels, edges 28 | 29 | def process_data(self): 30 | 31 | print("Process data ...") 32 | 33 | idx_features_labels, edges = self.load_data() 34 | 35 | features = idx_features_labels[:, 1:-1].astype(np.float32) 36 | features = self.normalize_feature(features) 37 | 38 | y = idx_features_labels[:, -1] 39 | labels = self.encode_onehot(y) 40 | 41 | idx = np.array(idx_features_labels[:, 0], dtype=np.int32) 42 | 43 | for self_idx in idx: 44 | edges = np.vstack((edges, [self_idx, self_idx])) 45 | 46 | idx_map = {j: i for i, j in enumerate(idx)} 47 | edge_indexs = np.array(list(map(idx_map.get, edges.flatten())), dtype=np.int32) 48 | edge_indexs = edge_indexs.reshape(edges.shape) 49 | 50 | adjacency = {} 51 | for edge in edge_indexs: 52 | key = edge[0].astype(np.int32) 53 | value = edge[1].astype(np.int32) 54 | 55 | target_value = np.array([]) 56 | if key in adjacency.keys(): 57 | target_value = adjacency[key] 58 | 59 | target_value = np.append(target_value, value) 60 | 61 | adjacency.update({key : target_value}) 62 | 63 | 64 | train_index = np.arange(150) 65 | val_index = np.arange(150, 500) 66 | test_index = np.arange(500, 2708) 67 | 68 | train_mask = np.zeros(edge_indexs.shape[0], dtype = np.bool) 69 | val_mask = np.zeros(edge_indexs.shape[0], dtype = np.bool) 70 | test_mask = np.zeros(edge_indexs.shape[0], dtype = np.bool) 71 | train_mask[train_index] = True 72 | val_mask[val_index] = True 73 | test_mask[test_index] = True 74 | 75 | print('Dataset has {} nodes, {} edges, {} features.'.format(features.shape[0], len(adjacency), features.shape[1])) 76 | 77 | return Data(x=features, y=labels, adjacency_dict=adjacency, 78 | train_mask=train_mask, val_mask=val_mask, test_mask=test_mask) 79 | 80 | def encode_onehot(self, labels): 81 | classes = set(labels) 82 | classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)} 83 | labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32) 84 | return labels_onehot 85 | 86 | def normalize_adj(self, adjacency): 87 | 88 | """计算 L=D^-0.5 * (A+I) * D^-0.5""" 89 | adjacency += sp.eye(adjacency.shape[0]) # 增加自连接 90 | degree = np.array(adjacency.sum(1)) 91 | d_hat = sp.diags(np.power(degree, -0.5).flatten()) 92 | 93 | return d_hat.dot(adjacency).dot(d_hat).tocsr().todense() 94 | 95 | def normalize_feature(self, features): 96 | 97 | normal_features = features / features.sum(1).reshape(-1, 1) 98 | 99 | return normal_features 100 | 101 | def data(self): 102 | """返回Data数据对象,包括features, labes, adjacency, train_mask, val_mask, test_mask""" 103 | return self._data -------------------------------------------------------------------------------- /GAT/train_batch.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | import numpy as np 4 | from model import GraphAttentionModel 5 | from dataset import CoraData 6 | from collections import namedtuple 7 | 8 | import matplotlib.pyplot as plt 9 | 10 | # INPUT_DIM = 1433 # 输入维度 11 | # # Note: 采样的邻居阶数需要与GCN的层数保持一致 12 | # HIDDEN_DIM = [128, 7] # 隐藏单元节点数 13 | # NUM_NEIGHBORS_LIST = [10, 10] # 每阶采样邻居的节点数 14 | # assert len(HIDDEN_DIM) == len(NUM_NEIGHBORS_LIST) 15 | BTACH_SIZE = 16 # 批处理大小 16 | EPOCHS = 20 17 | NUM_BATCH_PER_EPOCH = 20 # 每个epoch循环的批次数 18 | LEARNING_RATE = 0.01 # 学习率 19 | NUM_HEADS = 8 20 | INPUT_DIM = 1433 21 | HIDDEN_DIM = 24 22 | OUTPUT_DIM = 7 23 | 24 | Data = namedtuple('Data', ['x', 'y', 'traj','train_mask', 'val_mask', 'test_mask']) 25 | 26 | data = CoraData().data() 27 | 28 | train_index = np.where(data.train_mask)[0] 29 | train_label = data.y[train_index] 30 | test_index = np.where(data.test_mask)[0] 31 | val_index = np.where(data.val_mask)[0] 32 | 33 | model = GraphAttentionModel(INPUT_DIM, HIDDEN_DIM, OUTPUT_DIM, NUM_HEADS) 34 | 35 | loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True) 36 | optimizer=tf.keras.optimizers.Adam(learning_rate=0.01, decay=5e-4) 37 | 38 | # 记录过程值,以便最后可视化 39 | train_loss_results = [] 40 | train_accuracy_results = [] 41 | train_val_results = [] 42 | train_test_results = [] 43 | 44 | def train(): 45 | for e in range(EPOCHS): 46 | for batch in range(NUM_BATCH_PER_EPOCH): 47 | batch_src_index = np.random.choice(train_index, size=(BTACH_SIZE,)) 48 | batch_src_label = train_label[batch_src_index].astype(float) 49 | 50 | batch_sampling_x = data.x[batch_src_index] 51 | batch_adj = data.adj[np.ix_(batch_src_index, batch_src_index)] 52 | 53 | loss = 0.0 54 | with tf.GradientTape() as tape: 55 | batch_train_logits = model([batch_sampling_x, batch_adj], training = True) 56 | loss = loss_object(batch_src_label, batch_train_logits) 57 | grads = tape.gradient(loss, model.trainable_variables) 58 | 59 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 60 | 61 | # print("Epoch {:03d} Batch {:03d} Loss: {:.4f}".format(e, batch, loss)) 62 | 63 | train_accuracy = test(train_index) 64 | val_accuracy = test(val_index) 65 | test_accuracy = test(test_index) 66 | 67 | train_loss_results.append(loss) 68 | train_accuracy_results.append(train_accuracy) 69 | train_val_results.append(val_accuracy) 70 | train_test_results.append(test_accuracy) 71 | 72 | print("Epoch {:03d} train accuracy: {} val accuracy: {} test accuracy:{}".format(e, train_accuracy, val_accuracy, test_accuracy)) 73 | 74 | # ISSUE: https://stackoverflow.com/questions/58947679/no-gradients-provided-for-any-variable-in-tensorflow2-0 75 | 76 | # 训练过程可视化 77 | fig, axes = plt.subplots(4, sharex=True, figsize=(12, 8)) 78 | fig.suptitle('Training Metrics') 79 | 80 | axes[0].set_ylabel("Loss", fontsize=14) 81 | axes[0].plot(train_loss_results) 82 | 83 | axes[1].set_ylabel("Accuracy", fontsize=14) 84 | axes[1].plot(train_accuracy_results) 85 | 86 | axes[2].set_ylabel("Val Acc", fontsize=14) 87 | axes[2].plot(train_val_results) 88 | 89 | axes[3].set_ylabel("Test Acc", fontsize=14) 90 | axes[3].plot(train_test_results) 91 | 92 | plt.show() 93 | 94 | def test(index): 95 | test_x = data.x[index] 96 | 97 | test_adj = data.adj[np.ix_(index, index)] 98 | 99 | test_logits = model([test_x, test_adj], training = False) 100 | test_label = data.y[index] 101 | 102 | ll = tf.math.equal(tf.math.argmax(test_label, -1), tf.math.argmax(test_logits, -1)) 103 | accuarcy = tf.reduce_mean(tf.cast(ll, dtype=tf.float32)) 104 | 105 | return accuarcy 106 | 107 | 108 | if __name__ == '__main__': 109 | train() -------------------------------------------------------------------------------- /GAE/train.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | import numpy as np 4 | from model import GraphSage 5 | from dataset import CoraData 6 | from sampling import multihop_sampling 7 | from collections import namedtuple 8 | 9 | import matplotlib.pyplot as plt 10 | 11 | INPUT_DIM = 1433 # 输入维度 12 | # Note: 采样的邻居阶数需要与GCN的层数保持一致 13 | HIDDEN_DIM = [128, 7] # 隐藏单元节点数 14 | NUM_NEIGHBORS_LIST = [10, 10] # 每阶采样邻居的节点数 15 | assert len(HIDDEN_DIM) == len(NUM_NEIGHBORS_LIST) 16 | BTACH_SIZE = 16 # 批处理大小 17 | EPOCHS = 20 18 | NUM_BATCH_PER_EPOCH = 20 # 每个epoch循环的批次数 19 | LEARNING_RATE = 0.01 # 学习率 20 | 21 | Data = namedtuple('Data', ['x', 'y', 'adjacency_dict', 22 | 'train_mask', 'val_mask', 'test_mask']) 23 | 24 | data = CoraData().data() 25 | 26 | train_index = np.where(data.train_mask)[0] 27 | train_label = data.y[train_index] 28 | test_index = np.where(data.test_mask)[0] 29 | val_index = np.where(data.val_mask)[0] 30 | 31 | model = GraphSage(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, 32 | num_neighbors_list=NUM_NEIGHBORS_LIST) 33 | 34 | loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True) 35 | optimizer=tf.keras.optimizers.Adam(learning_rate=0.01, decay=5e-4) 36 | 37 | # 记录过程值,以便最后可视化 38 | train_loss_results = [] 39 | train_accuracy_results = [] 40 | train_val_results = [] 41 | train_test_results = [] 42 | 43 | def train(): 44 | for e in range(EPOCHS): 45 | for batch in range(NUM_BATCH_PER_EPOCH): 46 | batch_src_index = np.random.choice(train_index, size=(BTACH_SIZE,)) 47 | batch_src_label = train_label[batch_src_index].astype(float) 48 | 49 | batch_sampling_result = multihop_sampling(batch_src_index, NUM_NEIGHBORS_LIST, data.adjacency_dict) 50 | batch_sampling_x = [data.x[np.array(idx.astype(np.int32))] for idx in batch_sampling_result] 51 | 52 | loss = 0.0 53 | with tf.GradientTape() as tape: 54 | batch_train_logits = model(batch_sampling_x) 55 | loss = loss_object(batch_src_label, batch_train_logits) 56 | grads = tape.gradient(loss, model.trainable_variables) 57 | 58 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 59 | 60 | # print("Epoch {:03d} Batch {:03d} Loss: {:.4f}".format(e, batch, loss)) 61 | 62 | train_accuracy = test(train_index) 63 | val_accuracy = test(val_index) 64 | test_accuracy = test(test_index) 65 | 66 | train_loss_results.append(loss) 67 | train_accuracy_results.append(train_accuracy) 68 | train_val_results.append(val_accuracy) 69 | train_test_results.append(test_accuracy) 70 | 71 | print("Epoch {:03d} train accuracy: {} val accuracy: {} test accuracy:{}".format(e, train_accuracy, val_accuracy, test_accuracy)) 72 | 73 | # ISSUE: https://stackoverflow.com/questions/58947679/no-gradients-provided-for-any-variable-in-tensorflow2-0 74 | 75 | # 训练过程可视化 76 | fig, axes = plt.subplots(4, sharex=True, figsize=(12, 8)) 77 | fig.suptitle('Training Metrics') 78 | 79 | axes[0].set_ylabel("Loss", fontsize=14) 80 | axes[0].plot(train_loss_results) 81 | 82 | axes[1].set_ylabel("Accuracy", fontsize=14) 83 | axes[1].plot(train_accuracy_results) 84 | 85 | axes[2].set_ylabel("Val Acc", fontsize=14) 86 | axes[2].plot(train_val_results) 87 | 88 | axes[3].set_ylabel("Test Acc", fontsize=14) 89 | axes[3].plot(train_test_results) 90 | 91 | plt.show() 92 | 93 | def test(index): 94 | test_sampling_result = multihop_sampling(index, NUM_NEIGHBORS_LIST, data.adjacency_dict) 95 | test_x = [data.x[idx.astype(np.int32)] for idx in test_sampling_result] 96 | test_logits = model(test_x) 97 | test_label = data.y[index] 98 | 99 | ll = tf.math.equal(tf.math.argmax(test_label, -1), tf.math.argmax(test_logits, -1)) 100 | accuarcy = tf.reduce_mean(tf.cast(ll, dtype=tf.float32)) 101 | 102 | return accuarcy 103 | 104 | 105 | if __name__ == '__main__': 106 | train() -------------------------------------------------------------------------------- /GAT/dataset.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import scipy.sparse as sp 4 | import numpy as np 5 | from collections import namedtuple 6 | 7 | 8 | Data = namedtuple('Data', ['x', 'y', 'adj', 9 | 'train_mask', 'val_mask', 'test_mask']) 10 | 11 | class CoraData(): 12 | def __init__(self, data_root="data/cora/"): 13 | 14 | self._data_root = data_root 15 | 16 | self._data = self.process_data() 17 | 18 | 19 | def load_data(self, dataset="cora"): 20 | 21 | print('Loading {} dataset...'.format(dataset)) 22 | 23 | idx_features_labels = np.genfromtxt("{}{}.content".format(self._data_root, dataset), dtype=np.dtype(str)) 24 | 25 | edges = np.genfromtxt("{}{}.cites".format(self._data_root, dataset), dtype=np.int32) 26 | 27 | return idx_features_labels, edges 28 | 29 | def process_data(self): 30 | 31 | print("Process data ...") 32 | 33 | idx_features_labels, edges = self.load_data() 34 | 35 | features = idx_features_labels[:, 1:-1].astype(np.float32) 36 | features = self.normalize_feature(features) 37 | 38 | y = idx_features_labels[:, -1] 39 | labels = self.encode_onehot(y) 40 | 41 | # idx = np.array(idx_features_labels[:, 0], dtype=np.int32) 42 | 43 | # for self_idx in idx: 44 | # edges = np.vstack((edges, [self_idx, self_idx])) 45 | 46 | # idx_map = {j: i for i, j in enumerate(idx)} 47 | # edge_indexs = np.array(list(map(idx_map.get, edges.flatten())), dtype=np.int32) 48 | # edge_indexs = edge_indexs.reshape(edges.shape) 49 | 50 | idx = np.array(idx_features_labels[:, 0], dtype=np.int32) 51 | idx_map = {j: i for i, j in enumerate(idx)} 52 | edge_indexs = np.array(list(map(idx_map.get, edges.flatten())), dtype=np.int32) 53 | edge_indexs = edge_indexs.reshape(edges.shape) 54 | adjacency = sp.coo_matrix((np.ones(len(edge_indexs)), 55 | (edge_indexs[:, 0], edge_indexs[:, 1])), 56 | shape=(features.shape[0], features.shape[0]), dtype="float32") 57 | adjacency += sp.eye(adjacency.shape[0]) 58 | 59 | 60 | # adjacency = {} 61 | # for edge in edge_indexs: 62 | # key = edge[0].astype(np.int32) 63 | # value = edge[1].astype(np.int32) 64 | 65 | # target_value = np.array([]) 66 | # if key in adjacency.keys(): 67 | # target_value = adjacency[key] 68 | 69 | # target_value = np.append(target_value, value) 70 | 71 | # adjacency.update({key : target_value}) 72 | 73 | 74 | train_index = np.arange(150) 75 | val_index = np.arange(150, 500) 76 | test_index = np.arange(500, 2708) 77 | 78 | train_mask = np.zeros(edge_indexs.shape[0], dtype = np.bool) 79 | val_mask = np.zeros(edge_indexs.shape[0], dtype = np.bool) 80 | test_mask = np.zeros(edge_indexs.shape[0], dtype = np.bool) 81 | train_mask[train_index] = True 82 | val_mask[val_index] = True 83 | test_mask[test_index] = True 84 | 85 | print('Dataset has {} nodes, {} edges, {} features.'.format(features.shape[0], edges.shape[0], features.shape[1])) 86 | 87 | return Data(x=features, y=labels, adj=adjacency.toarray(), 88 | train_mask=train_mask, val_mask=val_mask, test_mask=test_mask) 89 | 90 | def encode_onehot(self, labels): 91 | classes = set(labels) 92 | classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)} 93 | labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32) 94 | return labels_onehot 95 | 96 | def normalize_adj(self, adjacency): 97 | 98 | """计算 L=D^-0.5 * (A+I) * D^-0.5""" 99 | adjacency += sp.eye(adjacency.shape[0]) # 增加自连接 100 | degree = np.array(adjacency.sum(1)) 101 | d_hat = sp.diags(np.power(degree, -0.5).flatten()) 102 | 103 | return d_hat.dot(adjacency).dot(d_hat).tocsr().todense() 104 | 105 | def normalize_feature(self, features): 106 | 107 | normal_features = features / features.sum(1).reshape(-1, 1) 108 | 109 | return normal_features 110 | 111 | def data(self): 112 | """返回Data数据对象,包括features, labes, adjacency, train_mask, val_mask, test_mask""" 113 | return self._data -------------------------------------------------------------------------------- /Social-LSTM/model.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | # class EmbeddingLayer(tf.keras.layers.Layer): 4 | # def __init__(self, args): 5 | # super(EmbeddingLayer, self).__init__() 6 | # self.in_dim = 2 7 | # self.out_dim = args.embedding_size 8 | 9 | # self.weight = self.add_weight(shape=(self.in_dim, self.out_dim), 10 | # initializer='glorot_uniform', 11 | # name='embedding_weight') 12 | 13 | # self.bias = self.add_weight(shape=(self.out_dim,), 14 | # initializer='zeros', 15 | # name='embedding_weight') 16 | 17 | # self.activation = tf.keras.activations.relu 18 | 19 | # def call(self, x): 20 | # x = tf.matmul(x, self.weight) 21 | # x = tf.add(x, self.bias) 22 | 23 | # output = self.activation(x) 24 | 25 | # return output 26 | 27 | # https://stackoverflow.com/questions/60624960/tf-keras-layers-rnn-vs-tf-keras-layers-stackedrnncells-tensorflow-2 28 | 29 | class Model(tf.keras.Model): 30 | 31 | def __init__(self, args): 32 | super(Model, self).__init__() 33 | self.args = args 34 | 35 | # dim = tf.zeros([args.batch_size, args.rnn_size]) 36 | # rnn_cells = [tf.keras.layers.LSTMCell(args.rnn_size) for _ in range(args.num_layers)] 37 | 38 | # for rnn_cell in rnn_cells: 39 | # print("+++++++++++++{}".format(rnn_cell.state_size)) 40 | 41 | # # stacked_lstm = tf.keras.layers.StackedRNNCells(rnn_cells) 42 | # # initial_state = stacked_lstm.get_initial_state(batch_size=args.batch_size, dtype=tf.float32) 43 | # self.lstm_layer = tf.keras.layers.RNN(rnn_cells, return_sequences=True, return_state=True) 44 | # Output size is the set of parameters (mu, sigma, corr) 45 | self.output_size = 5 # 2 mu, 2 sigma and 1 corr 46 | 47 | # self.input_layer = tf.keras.layers.Input(batch_shape=(batch_size, None, 2)) 48 | 49 | self.embedding_layer = tf.keras.layers.Dense(args.embedding_size, activation = tf.keras.activations.relu) # EmbeddingLayer(args) 50 | 51 | self.lstm_layer = tf.keras.layers.LSTM(args.rnn_size, return_sequences=True, 52 | return_state = True, 53 | stateful=True) 54 | 55 | self.dense = tf.keras.layers.Dense(self.output_size) 56 | 57 | # self.initial_state = None 58 | 59 | # self.lstm_layer = tf.keras.Sequential([ 60 | # tf.keras.layers.Dense(args.embedding_size, 61 | # activation = tf.keras.activations.relu), 62 | # ]) 63 | 64 | self.model = tf.keras.Sequential([ 65 | tf.keras.layers.Dense(args.embedding_size, activation = tf.keras.activations.relu, 66 | batch_input_shape = [args.batch_size, None, 2]), 67 | # self.model.add(tf.keras.layers.LSTM(args.rnn_size, return_sequences=True, 68 | # return_state = True, 69 | # stateful=True)) 70 | tf.keras.layers.GRU(args.rnn_size, 71 | return_sequences=True, 72 | stateful=True, 73 | recurrent_initializer='glorot_uniform'), 74 | tf.keras.layers.Dense(self.output_size) 75 | ]) 76 | 77 | def call(self, x): 78 | # print("=========================x shape:{}".format(x.shape)) 79 | 80 | # inputs = tf.split(x, self.args.seq_length, 1) 81 | # inputs = [tf.squeeze(input_, [1]) for input_ in inputs] 82 | 83 | # x = self.input_layer(x) 84 | # IN:(50, 10, 2), OUT:(50, 10, 128) 85 | # x = self.embedding_layer(x) 86 | 87 | # # print("=========================x11 shape:{}".format(x.shape)) 88 | # # # IN: (50, 10, 128), OUT:() 89 | # outputs, state_h, state_c = self.lstm_layer(x) 90 | 91 | # # self.initial_state = [state_h, state_c] 92 | 93 | # # print("===================outputs shape:{}".format(outputs.shape)) 94 | 95 | # output = self.dense(outputs) 96 | 97 | output = self.model(x) 98 | 99 | # print("=====================output shape:{}".format(output.shape)) 100 | 101 | return output 102 | # inputs = tf.split(x, self.args.seq_length, 1) 103 | # inputs = [tf.squeeze(input_, [1]) for input_ in inputs] 104 | 105 | # embedded_inputs = [] 106 | # for input in inputs: 107 | # # Each x is a 2D tensor of size numPoints x 2 108 | # # Embedding layer 109 | # embedded_x = self.embedding_layer(input) 110 | 111 | # print("===================embedded shape:{}".format(embedded_x.shape)) 112 | 113 | # embedded_inputs.append(embedded_x) 114 | 115 | # whole_seq_output, final_memory_state, final_carry_state = self.lstm_layer(embedded_inputs) 116 | 117 | # outputs, last_state = self.lstm_layer(embedded_inputs) 118 | 119 | # output = tf.reshape(tf.concat(1, outputs), [-1, args.rnn_size]) 120 | 121 | # output = self.dense(output) 122 | 123 | -------------------------------------------------------------------------------- /GAE/model.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | class NeighborAggregator(tf.keras.Model): 4 | def __init__(self, input_dim, output_dim, 5 | use_bias=False, aggr_method="mean"): 6 | """聚合节点邻居 7 | Args: 8 | input_dim: 输入特征的维度 9 | output_dim: 输出特征的维度 10 | use_bias: 是否使用偏置 (default: {False}) 11 | aggr_method: 邻居聚合方式 (default: {mean}) 12 | """ 13 | super(NeighborAggregator, self).__init__() 14 | 15 | self.input_dim = input_dim 16 | self.output_dim = output_dim 17 | self.use_bias = use_bias 18 | self.aggr_method = aggr_method 19 | 20 | self.weight = self.add_weight(shape = (self.input_dim, self.output_dim), 21 | initializer = 'glorot_uniform', 22 | name = 'kernel') 23 | 24 | if self.use_bias: 25 | self.bias = self.add_weight(shape = (self.input_dim, self.output_dim), 26 | initializer = 'zero', 27 | name = 'bias') 28 | 29 | def call(self, neighbor_feature): 30 | if self.aggr_method == "mean": 31 | aggr_neighbor = tf.math.reduce_mean(neighbor_feature, axis = 1) 32 | elif self.aggr_method == "sum": 33 | aggr_neighbor = tf.math.reduce_sum(neighbor_feature, axis = 1) 34 | elif self.aggr_method == "max": 35 | aggr_neighbor = tf.math.reduce_max(neighbor_feature, axis = 1) 36 | else: 37 | raise ValueError("Unknown aggr type, expected sum, max, or mean, but got {}" 38 | .format(self.aggr_method)) 39 | 40 | neighbor_hidden = tf.matmul(aggr_neighbor, self.weight) 41 | if self.use_bias: 42 | neighbor_hidden += self.bias 43 | 44 | return neighbor_hidden 45 | 46 | 47 | class SageGCN(tf.keras.Model): 48 | def __init__(self, input_dim, hidden_dim, 49 | activation=tf.keras.activations.relu, 50 | aggr_neighbor_method="mean", 51 | aggr_hidden_method="sum"): 52 | """SageGCN层定义 53 | Args: 54 | input_dim: 输入特征的维度 55 | hidden_dim: 隐层特征的维度, 56 | 当aggr_hidden_method=sum, 输出维度为hidden_dim 57 | 当aggr_hidden_method=concat, 输出维度为hidden_dim*2 58 | activation: 激活函数 59 | aggr_neighbor_method: 邻居特征聚合方法,["mean", "sum", "max"] 60 | aggr_hidden_method: 节点特征的更新方法,["sum", "concat"] 61 | """ 62 | super(SageGCN, self).__init__() 63 | 64 | assert aggr_neighbor_method in ["mean", "sum", "max"] 65 | assert aggr_hidden_method in ["sum", "concat"] 66 | 67 | self.input_dim = input_dim 68 | self.hidden_dim = hidden_dim 69 | self.aggr_neighbor_method = aggr_neighbor_method 70 | self.aggr_hidden_method = aggr_hidden_method 71 | self.activation = activation 72 | self.aggregator = NeighborAggregator(input_dim, hidden_dim, 73 | aggr_method=aggr_neighbor_method) 74 | 75 | self.weight = self.add_weight(shape = (self.input_dim, self.hidden_dim), 76 | initializer = 'glorot_uniform', 77 | name = 'kernel') 78 | 79 | 80 | def call(self, src_node_features, neighbor_node_features): 81 | neighbor_hidden = self.aggregator(neighbor_node_features) 82 | self_hidden = tf.matmul(src_node_features, self.weight) 83 | 84 | if self.aggr_hidden_method == "sum": 85 | hidden = self_hidden + neighbor_hidden 86 | elif self.aggr_hidden_method == "concat": 87 | hidden = tf.concat(1, [self_hidden, neighbor_hidden]) 88 | else: 89 | raise ValueError("Expected sum or concat, got {}" 90 | .format(self.aggr_hidden)) 91 | if self.activation: 92 | return self.activation(hidden) 93 | else: 94 | return hidden 95 | 96 | 97 | class GraphSage(tf.keras.Model): 98 | def __init__(self, input_dim, hidden_dim, 99 | num_neighbors_list): 100 | 101 | super(GraphSage, self).__init__() 102 | 103 | self.input_dim = input_dim 104 | self.hidden_dim = hidden_dim 105 | self.num_neighbors_list = num_neighbors_list 106 | self.num_layers = len(num_neighbors_list) 107 | self.gcn = [] 108 | self.gcn.append(SageGCN(input_dim, hidden_dim[0])) 109 | 110 | for index in range(0, len(hidden_dim) - 2): 111 | self.gcn.append(SageGCN(hidden_dim[index], hidden_dim[index + 1])) 112 | 113 | self.gcn.append(SageGCN(hidden_dim[-2], hidden_dim[-1], activation=None)) 114 | 115 | def call(self, node_features_list): 116 | hidden = node_features_list 117 | 118 | for l in range(self.num_layers): 119 | next_hidden = [] 120 | gcn = self.gcn[l] 121 | for hop in range(self.num_layers - l): 122 | src_node_features = hidden[hop] 123 | src_node_num = len(src_node_features) 124 | neighbor_node_features = tf.reshape(hidden[hop + 1], (src_node_num, self.num_neighbors_list[hop], -1)) 125 | h = gcn(src_node_features, neighbor_node_features) 126 | next_hidden.append(h) 127 | hidden = next_hidden 128 | return hidden[0] -------------------------------------------------------------------------------- /GAT/model.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | # Reference: https://github.com/danielegrattarola/keras-gat/blob/master/keras_gat/graph_attention_layer.py 4 | 5 | class MultiHeadGATLayer(tf.keras.layers.Layer): 6 | def __init__(self, in_dim, out_dim, 7 | attn_heads = 1, 8 | attn_heads_reduction = 'concat', # {'concat', 'average'} 9 | dropout_rate = 0.1, 10 | activation = None, 11 | use_bias = True, 12 | kernel_initializer = 'glorot_uniform', 13 | bias_initializer = 'zeros', 14 | attn_kernel_initializer = 'glorot_uniform', 15 | kernel_regularizer = None, 16 | bias_regularizer = None, 17 | attn_kernel_regularizer = None, 18 | activity_regularizer = None, 19 | **kwargs): 20 | 21 | if attn_heads_reduction not in {'concat', 'average'}: 22 | raise ValueError('Possbile reduction methods: concat, average') 23 | 24 | self.in_dim = in_dim 25 | self.out_dim = out_dim 26 | 27 | self.attn_heads = attn_heads 28 | self.attn_heads_reduction = attn_heads_reduction 29 | self.dropout_rate = dropout_rate 30 | self.activation = activation 31 | self.use_bias = use_bias 32 | 33 | self.kernel_initializer = kernel_initializer 34 | self.bias_initializer = bias_initializer 35 | self.attn_kernel_initializer = attn_kernel_initializer 36 | 37 | self.kernel_regularizer = kernel_regularizer 38 | self.bias_regularizer = bias_regularizer 39 | self.attn_kernel_regularizer = attn_kernel_regularizer 40 | self.activity_regularizer = activity_regularizer 41 | 42 | self.kernels = [] 43 | self.biases = [] 44 | self.atten_kernels = [] 45 | 46 | super(MultiHeadGATLayer, self).__init__(**kwargs) 47 | 48 | def build(self, input_shape): 49 | assert len(input_shape) >= 2 50 | 51 | for head in range(self.attn_heads): 52 | kernel = self.add_weight(shape=(self.in_dim, self.out_dim), 53 | initializer=self.kernel_initializer, 54 | regularizer=self.kernel_regularizer, 55 | name='kernel_{}'.format(head)) 56 | self.kernels.append(kernel) 57 | 58 | if self.use_bias: 59 | bias = self.add_weight(shape=(self.out_dim, ), 60 | initializer=self.bias_initializer, 61 | regularizer=self.bias_regularizer, 62 | name='bias_{}'.format(head)) 63 | self.biases.append(bias) 64 | 65 | 66 | atten_kernel = self.add_weight(shape=(2 * self.out_dim, 1), 67 | initializer=self.kernel_initializer, 68 | regularizer=self.kernel_regularizer, 69 | name='kernel_{}'.format(head)) 70 | self.atten_kernels.append(atten_kernel) 71 | 72 | self.built = True 73 | 74 | def call(self, inputs, training): 75 | X = inputs[0] 76 | A = inputs[1] 77 | 78 | N = X.shape[0] 79 | 80 | outputs = [] 81 | for head in range(self.attn_heads): 82 | 83 | kernel = self.kernels[head] 84 | 85 | features = tf.matmul(X, kernel) 86 | 87 | concat_features = tf.concat(\ 88 | [tf.reshape(tf.tile(features, [1, N]), [N * N, -1]),\ 89 | tf.tile(features, [N, 1])], axis = 1) 90 | 91 | concat_features = tf.reshape(concat_features, [N, -1, 2 * self.out_dim]) 92 | 93 | atten_kernel = self.atten_kernels[head] 94 | 95 | dense = tf.matmul(concat_features, atten_kernel) 96 | 97 | dense = tf.keras.layers.LeakyReLU(alpha=0.2)(dense) 98 | 99 | dense = tf.reshape(dense, [N, -1]) 100 | 101 | zero_vec = -9e15 * tf.ones_like(dense) 102 | attention = tf.where(A > 0, dense, zero_vec) 103 | 104 | dense = tf.keras.activations.softmax(attention, axis = -1) 105 | 106 | dropout_attn = tf.keras.layers.Dropout(self.dropout_rate)(dense, training = training) 107 | dropout_feat = tf.keras.layers.Dropout(self.dropout_rate)(features, training = training) 108 | 109 | node_features = tf.matmul(dropout_attn, dropout_feat) 110 | 111 | if self.use_bias: 112 | node_features = tf.add(node_features, self.biases[head]) 113 | 114 | outputs.append(node_features) 115 | 116 | if self.attn_heads_reduction == 'concat': 117 | output = tf.concat(outputs, axis = -1) 118 | else: 119 | output = tf.reduce_mean(tf.stack(outputs), axis=-1) 120 | 121 | if self.activation is not None: 122 | output = self.activation(output) 123 | 124 | return output 125 | 126 | 127 | class GraphAttentionModel(tf.keras.Model): 128 | def __init__(self, in_dim, hidden_dim, out_dim, num_heads): 129 | super(GraphAttentionModel, self).__init__() 130 | 131 | self.attention_layer1 = MultiHeadGATLayer(in_dim, hidden_dim, attn_heads = num_heads, activation = tf.keras.activations.elu) 132 | 133 | self.attention_layer2 = MultiHeadGATLayer(hidden_dim * num_heads, out_dim, attn_heads = 1) 134 | 135 | def call(self, x, training = False): 136 | adj = x[1] 137 | 138 | x = self.attention_layer1(x, training) 139 | 140 | output = self.attention_layer2([x, adj], training) 141 | 142 | return output -------------------------------------------------------------------------------- /Social-LSTM/dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | import numpy as np 4 | import random 5 | 6 | class DataLoader(): 7 | 8 | def __init__(self, batch_size=50, seq_length=5, datasets=[0, 1, 2, 3, 4], forcePreProcess=False): 9 | # List of data directories where raw data resides 10 | self.data_dirs = ['./data/eth/univ', './data/eth/hotel', 11 | './data/ucy/zara/zara01', './data/ucy/zara/zara02', 12 | './data/ucy/univ'] 13 | # self.data_dirs = ['./data/eth/univ', './data/eth/hotel'] 14 | 15 | self.used_data_dirs = [self.data_dirs[x] for x in datasets] 16 | 17 | # Data directory where the pre-processed pickle file resides 18 | self.data_dir = './data' 19 | 20 | # Store the batch size and the sequence length arguments 21 | self.batch_size = batch_size 22 | self.seq_length = seq_length 23 | 24 | # Define the path of the file in which the data needs to be stored 25 | data_file = os.path.join(self.data_dir, "trajectories.cpkl") 26 | 27 | # If the file doesn't exist already or if forcePreProcess is true 28 | if not(os.path.exists(data_file)) or forcePreProcess: 29 | print("Creating pre-processed data from raw data") 30 | # Preprocess the data from the csv files 31 | self.preprocess(self.used_data_dirs, data_file) 32 | 33 | # Load the data from the pickled file 34 | self.load_preprocessed(data_file) 35 | # Reset all the pointers 36 | self.reset_batch_pointer() 37 | 38 | def preprocess(self, data_dirs, data_file): 39 | all_ped_data = {} 40 | dataset_indices = [] 41 | current_ped = 0 42 | # For each dataset 43 | for directory in data_dirs: 44 | # Define the path to its respective csv file 45 | file_path = os.path.join(directory, 'pixel_pos.csv') 46 | 47 | print("processing {}".format(file_path)) 48 | 49 | # Load data from the csv file 50 | # Data is a 4 x numTrajPoints matrix 51 | # where each column is a (frameId, pedId, y, x) vector 52 | data = np.genfromtxt(file_path, delimiter=',') 53 | 54 | # Get the number of pedestrians in the current dataset 55 | numPeds = np.size(np.unique(data[1, :])) 56 | 57 | # For each pedestrian in the dataset 58 | for ped in range(1, numPeds+1): 59 | # Extract trajectory of the current ped 60 | traj = data[:, data[1, :] == ped] 61 | # Format it as (x, y, frameId) 62 | traj = traj[[3, 2, 0], :] 63 | 64 | # Store this in the dictionary 65 | all_ped_data[current_ped + ped] = traj 66 | 67 | # Current dataset done 68 | dataset_indices.append(current_ped+numPeds) 69 | current_ped += numPeds 70 | 71 | print("total ped nums: {}".format(numPeds)) 72 | 73 | # The complete data is a tuple of all pedestrian data, and dataset ped indices 74 | complete_data = (all_ped_data, dataset_indices) 75 | # Store the complete data into the pickle file 76 | f = open(data_file, "wb") 77 | pickle.dump(complete_data, f, protocol=2) 78 | f.close() 79 | 80 | def load_preprocessed(self, data_file): 81 | # Load data from the pickled file 82 | f = open(data_file, "rb") 83 | self.raw_data = pickle.load(f) 84 | f.close() 85 | 86 | # Get the pedestrian data from the pickle file 87 | all_ped_data = self.raw_data[0] 88 | # Not using dataset_indices for now 89 | # dataset_indices = self.raw_data[1] 90 | 91 | # Construct the data with sequences(or trajectories) longer than seq_length 92 | self.data = [] 93 | counter = 0 94 | 95 | # For each pedestrian in the data 96 | for ped in all_ped_data: 97 | # Extract his trajectory 98 | traj = all_ped_data[ped] 99 | # If the length of the trajectory is greater than seq_length (+2 as we need both source and target data) 100 | if traj.shape[1] > (self.seq_length+2): 101 | # TODO: (Improve) Store only the (x,y) coordinates for now 102 | self.data.append(traj[[0, 1], :].T) 103 | # Number of batches this datapoint is worth 104 | counter += int(traj.shape[1] / ((self.seq_length+2))) 105 | 106 | print("all ped data len: {}, seq length: {}".format(len(all_ped_data), self.seq_length)) 107 | # Calculate the number of batches (each of batch_size) in the data 108 | self.num_batches = int(counter / self.batch_size) 109 | 110 | def next_batch(self): 111 | ''' 112 | Function to get the next batch of points 113 | ''' 114 | # List of source and target data for the current batch 115 | x_batch = [] 116 | y_batch = [] 117 | # For each sequence in the batch 118 | for i in range(self.batch_size): 119 | # Extract the trajectory of the pedestrian pointed out by self.pointer 120 | traj = self.data[self.pointer] 121 | # Number of sequences corresponding to his trajectory 122 | n_batch = int(traj.shape[0] / (self.seq_length+2)) 123 | # Randomly sample a index from which his trajectory is to be considered 124 | idx = random.randint(0, traj.shape[0] - self.seq_length - 2) 125 | # Append the trajectory from idx until seq_length into source and target data 126 | x_batch.append(np.copy(traj[idx:idx+self.seq_length, :])) 127 | y_batch.append(np.copy(traj[idx+1:idx+self.seq_length+1, :])) 128 | 129 | if random.random() < (1.0/float(n_batch)): 130 | # Adjust sampling probability 131 | # if this is a long datapoint, sample this data more with 132 | # higher probability 133 | self.tick_batch_pointer() 134 | 135 | return x_batch, y_batch 136 | 137 | def tick_batch_pointer(self): 138 | ''' 139 | Advance the data pointer 140 | ''' 141 | self.pointer += 1 142 | if (self.pointer >= len(self.data)): 143 | self.pointer = 0 144 | 145 | def reset_batch_pointer(self): 146 | ''' 147 | Reset the data pointer 148 | ''' 149 | self.pointer = 0 -------------------------------------------------------------------------------- /Social-LSTM/train.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import argparse 4 | import os 5 | import time 6 | import pickle 7 | import datetime 8 | 9 | from model import Model 10 | from dataset import DataLoader 11 | 12 | def main(): 13 | parser = argparse.ArgumentParser() 14 | parser.add_argument('--rnn_size', type=int, default=128, 15 | help='size of RNN hidden state') 16 | parser.add_argument('--num_layers', type=int, default=1, 17 | help='number of layers in the RNN') 18 | parser.add_argument('--model', type=str, default='lstm', 19 | help='rnn, gru, or lstm') 20 | parser.add_argument('--batch_size', type=int, default=50, 21 | help='minibatch size') 22 | parser.add_argument('--seq_length', type=int, default=8, 23 | help='RNN sequence length') 24 | parser.add_argument('--num_epochs', type=int, default=200, 25 | help='number of epochs') 26 | parser.add_argument('--save_every', type=int, default=500, 27 | help='save frequency') 28 | parser.add_argument('--grad_clip', type=float, default=10., 29 | help='clip gradients at this value') 30 | parser.add_argument('--learning_rate', type=float, default=0.003, 31 | help='learning rate') 32 | parser.add_argument('--decay_rate', type=float, default=0.95, 33 | help='decay rate for rmsprop') 34 | parser.add_argument('--keep_prob', type=float, default=0.8, 35 | help='dropout keep probability') 36 | parser.add_argument('--embedding_size', type=int, default=64, 37 | help='Embedding dimension for the spatial coordinates') 38 | parser.add_argument('--leaveDataset', type=int, default=1, 39 | help='The dataset index to be left out in training') 40 | parser.add_argument('--test_dataset', type=int, default=4, 41 | help='Dataset to be tested on') 42 | parser.add_argument('--obs_length', type=int, default=5, 43 | help='Observed length of the trajectory') 44 | parser.add_argument('--pred_length', type=int, default=3, 45 | help='Predicted length of the trajectory') 46 | 47 | args = parser.parse_args() 48 | # test(args) 49 | train(args) 50 | 51 | 52 | def tf_2d_normal(x, y, mux, muy, sx, sy, rho): 53 | normx = tf.math.subtract(x, mux) 54 | normy = tf.math.subtract(y, muy) 55 | # Calculate sx*sy 56 | sxsy = tf.math.multiply(sx, sy) 57 | # Calculate the exponential factor 58 | z = tf.math.square(tf.math.divide(normx, sx)) + tf.math.square(tf.math.divide(normy, sy)) - 2*tf.math.divide(tf.math.multiply(rho, tf.math.multiply(normx, normy)), sxsy) 59 | 60 | negRho = 1 - tf.math.square(rho) 61 | # Numerator 62 | result = tf.math.exp(tf.math.divide(-z, 2*negRho)) 63 | # Normalization constant 64 | denom = 2 * np.pi * tf.math.multiply(sxsy, tf.math.sqrt(negRho)) 65 | # Final PDF calculation 66 | result = tf.math.divide(result, denom) 67 | 68 | return result 69 | 70 | def get_coef(output): 71 | z = output 72 | 73 | z_mux, z_muy, z_sx, z_sy, z_corr = tf.split(z, 5, -1) 74 | 75 | z_sx = tf.exp(z_sx) 76 | z_sy = tf.exp(z_sy) 77 | z_corr = tf.tanh(z_corr) 78 | 79 | return [z_mux, z_muy, z_sx, z_sy, z_corr] 80 | 81 | def get_lossfunc(z_mux, z_muy, z_sx, z_sy, z_corr, x_data, y_data): 82 | 83 | result0 = tf_2d_normal(x_data, y_data, z_mux, z_muy, z_sx, z_sy, z_corr) 84 | 85 | epsilon = 1e-20 86 | 87 | result1 = -tf.math.log(tf.math.maximum(result0, epsilon)) # Numerical stability 88 | 89 | return tf.reduce_sum(result1) 90 | 91 | def get_mean_error(pred_traj, true_traj, observed_length): 92 | error = np.zeros(len(true_traj) - observed_length) 93 | for i in range(observed_length, len(true_traj)): 94 | # The predicted position 95 | pred_pos = pred_traj[i, :] 96 | # The true position 97 | true_pos = true_traj[i, :] 98 | 99 | # The euclidean distance is the error 100 | error[i-observed_length] = np.linalg.norm(true_pos - pred_pos) 101 | 102 | # Return the mean error 103 | return np.mean(error) 104 | 105 | def get_final_error(pred_traj, true_traj): 106 | 107 | error = np.linalg.norm(pred_traj[-1, :] - true_traj[-1, :]) 108 | 109 | # Return the mean error 110 | return error 111 | 112 | 113 | def sample_gaussian_2d(mux, muy, sx, sy, rho): 114 | # Extract mean 115 | mean = [mux, muy] 116 | 117 | # Extract covariance matrix 118 | cov = [[sx*sx, rho*sx*sy], [rho*sx*sy, sy*sy]] 119 | # Sample a point from the multivariate normal distribution 120 | x = np.random.multivariate_normal(mean, cov, 1) 121 | return x[0][0], x[0][1] 122 | 123 | def test(args): 124 | checkpoint_dir = './training_checkpoints' 125 | 126 | # Dataset to get data from 127 | dataset = [args.test_dataset] 128 | 129 | # Initialize the dataloader object to 130 | # Get sequences of length obs_length+pred_length 131 | data_loader = DataLoader(1, args.pred_length + args.obs_length, dataset, True) 132 | 133 | # Reset the data pointers of the data loader object 134 | data_loader.reset_batch_pointer() 135 | 136 | tf.train.latest_checkpoint(checkpoint_dir) 137 | 138 | args.batch_size = 1 139 | 140 | test_model = build_model(args) # Model(args) 141 | 142 | test_model.load_weights(tf.train.latest_checkpoint(checkpoint_dir)) 143 | 144 | test_model.build(tf.TensorShape([1, None, 2])) 145 | 146 | # Maintain the total_error until now 147 | total_error = 0 148 | counter = 0 149 | final_error = 0.0 150 | 151 | truth_trajs = [] 152 | pred_trajs = [] 153 | gauss_params = [] 154 | 155 | for b in range(data_loader.num_batches): 156 | # Get the source, target data for the next batch 157 | x, y = data_loader.next_batch() 158 | 159 | base_pos = np.array([[e_x[0] for _ in range(len(e_x))] for e_x in x]) 160 | x = x - base_pos 161 | 162 | # The observed part of the trajectory 163 | obs_observed_traj = x[0][:args.obs_length] 164 | obs_observed_traj = tf.expand_dims(obs_observed_traj, 0) 165 | 166 | complete_traj = x[0][:args.obs_length] 167 | 168 | test_model.reset_states() 169 | 170 | # test_model.initial_state = None 171 | gauss_param = np.array([]) 172 | 173 | for idx in range(args.pred_length): 174 | tensor_x = tf.convert_to_tensor(obs_observed_traj) 175 | 176 | logits = test_model(tensor_x) 177 | 178 | [o_mux, o_muy, o_sx, o_sy, o_corr] = get_coef(logits) 179 | 180 | next_x, next_y = sample_gaussian_2d(o_mux[0][-1][0], o_muy[0][-1][0], o_sx[0][-1][0], o_sy[0][-1][0], o_corr[0][-1][0]) 181 | 182 | obs_observed_traj = tf.expand_dims([[next_x, next_y]], 0) 183 | 184 | if len(gauss_param) <=0: 185 | gauss_param = np.array([o_mux[0][-1][0], o_muy[0][-1][0], o_sx[0][-1][0], o_sy[0][-1][0], o_corr[0][-1][0]]) 186 | else: 187 | gauss_param = np.vstack((gauss_param, [o_mux[0][-1][0], o_muy[0][-1][0], o_sx[0][-1][0], o_sy[0][-1][0], o_corr[0][-1][0]])) 188 | 189 | 190 | complete_traj = np.vstack((complete_traj, [next_x, next_y])) 191 | 192 | total_error += get_mean_error(complete_traj + base_pos[0], x[0] + base_pos[0], args.obs_length) 193 | final_error += get_final_error(complete_traj + base_pos[0], x[0] + base_pos[0]) 194 | 195 | pred_trajs.append(complete_traj) 196 | truth_trajs.append(x[0]) 197 | gauss_params.append(gauss_param) 198 | 199 | print("Processed trajectory number: {} out of {} trajectories".format(b, data_loader.num_batches)) 200 | 201 | # Print the mean error across all the batches 202 | print("Total mean error of the model is {}".format(total_error/data_loader.num_batches)) 203 | print("Total final error of the model is {}".format(final_error/data_loader.num_batches)) 204 | 205 | data_file = "./pred_results.pkl" 206 | f = open(data_file, "wb") 207 | pickle.dump([pred_trajs, truth_trajs, gauss_params], f) 208 | f.close() 209 | 210 | def build_model(args): 211 | output_size = 5 212 | model = tf.keras.Sequential([ 213 | tf.keras.layers.Dense(args.embedding_size, activation = tf.keras.activations.relu, 214 | batch_input_shape = [args.batch_size, None, 2]), 215 | tf.keras.layers.GRU(args.rnn_size, 216 | return_sequences=True, 217 | stateful=True, 218 | recurrent_initializer='glorot_uniform'), 219 | tf.keras.layers.Dense(output_size) 220 | ]) 221 | 222 | return model 223 | 224 | def calc_prediction_error(mux, muy, sx, sy, corr, offset_positions, args): 225 | 226 | traj_nums = mux.shape[0] 227 | 228 | pred_nums = mux.shape[1] 229 | 230 | mean_error = 0.0 231 | final_error = 0.0 232 | for index in range(traj_nums): 233 | pred_traj = np.zeros((pred_nums, 2)) 234 | for pt_index in range(pred_nums): 235 | next_x, next_y = sample_gaussian_2d(mux[index][pt_index][0], 236 | muy[index][pt_index][0], sx[index][pt_index][0], 237 | sy[index][pt_index][0], corr[index][pt_index][0]) 238 | 239 | pred_traj[pt_index][0] = next_x 240 | pred_traj[pt_index][1] = next_y 241 | 242 | mean_error += get_mean_error(pred_traj, offset_positions[index], args.obs_length) 243 | final_error += get_final_error(pred_traj, offset_positions[index]) 244 | 245 | mean_error = mean_error / traj_nums 246 | final_error = final_error / traj_nums 247 | 248 | return mean_error, final_error 249 | 250 | 251 | def train(args): 252 | datasets = list(range(4)) 253 | 254 | data_loader = DataLoader(args.batch_size, args.seq_length, datasets, forcePreProcess=True) 255 | 256 | # Create a Vanilla LSTM model with the arguments 257 | model = build_model(args) 258 | 259 | train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32) 260 | optimizer = tf.keras.optimizers.RMSprop(args.learning_rate) 261 | 262 | current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") 263 | train_log_dir = 'logs/gradient_tape/' + current_time + '/train' 264 | # test_log_dir = 'logs/gradient_tape/' + current_time + '/test' 265 | train_summary_writer = tf.summary.create_file_writer(train_log_dir) 266 | # test_summary_writer = tf.summary.create_file_writer(test_log_dir) 267 | 268 | # 检查点保存至的目录 269 | checkpoint_dir = './training_checkpoints' 270 | # 检查点的文件名 271 | checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}") 272 | 273 | for e in range(args.num_epochs): 274 | 275 | data_loader.reset_batch_pointer() 276 | model.reset_states() 277 | 278 | for batch in range(data_loader.num_batches): 279 | start = time.time() 280 | 281 | x, y = data_loader.next_batch() 282 | 283 | base_pos = np.array([[e_x[0] for _ in range(len(e_x))] for e_x in x]) 284 | 285 | x_offset = x - base_pos 286 | y_offset = y - base_pos 287 | 288 | with tf.GradientTape() as tape: 289 | tensor_x = tf.convert_to_tensor(x_offset, dtype=tf.float32) 290 | 291 | logits = model(tensor_x) 292 | 293 | [o_mux, o_muy, o_sx, o_sy, o_corr] = get_coef(logits) 294 | 295 | tensor_y = tf.convert_to_tensor(y_offset, dtype=tf.float32) 296 | 297 | [x_data, y_data] = tf.split(tensor_y, 2, -1) 298 | 299 | # Compute the loss function 300 | loss = get_lossfunc(o_mux, o_muy, o_sx, o_sy, o_corr, x_data, y_data) 301 | 302 | mean_error, final_error = calc_prediction_error(o_mux, o_muy, o_sx, o_sy, o_corr, tensor_y, args) 303 | 304 | loss = tf.math.divide(loss, (args.batch_size * args.seq_length)) 305 | 306 | grads = tape.gradient(loss, model.trainable_variables) 307 | 308 | optimizer.lr.assign(args.learning_rate * (args.decay_rate ** e)) 309 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 310 | 311 | train_loss(loss) 312 | 313 | end = time.time() 314 | # Print epoch, batch, loss and time taken 315 | print("{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}, mean error = {}, final_error = {}" 316 | .format(e * data_loader.num_batches + batch, 317 | args.num_epochs * data_loader.num_batches, 318 | e, loss, end - start, mean_error, final_error)) 319 | 320 | with train_summary_writer.as_default(): 321 | tf.summary.scalar('loss', train_loss.result(), step=e) 322 | # tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch) 323 | 324 | model.save_weights(checkpoint_prefix.format(epoch=e)) 325 | 326 | 327 | test(args) 328 | 329 | 330 | if __name__ == '__main__': 331 | main() -------------------------------------------------------------------------------- /Social-LSTM/social_train.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import argparse 4 | import os 5 | import time 6 | import pickle 7 | 8 | from social_model import SocialModel 9 | from social_dataset import SocialDataLoader 10 | from grid import get_sequence_grid_mask 11 | 12 | def main(): 13 | parser = argparse.ArgumentParser() 14 | # RNN size parameter (dimension of the output/hidden state) 15 | parser.add_argument('--rnn_size', type=int, default=128, 16 | help='size of RNN hidden state') 17 | # TODO: (improve) Number of layers not used. Only a single layer implemented 18 | # Number of layers parameter 19 | parser.add_argument('--num_layers', type=int, default=1, 20 | help='number of layers in the RNN') 21 | # Model currently not used. Only LSTM implemented 22 | # Type of recurrent unit parameter 23 | parser.add_argument('--model', type=str, default='lstm', 24 | help='rnn, gru, or lstm') 25 | # Size of each batch parameter 26 | parser.add_argument('--batch_size', type=int, default=10, 27 | help='minibatch size') 28 | # Length of sequence to be considered parameter 29 | parser.add_argument('--seq_length', type=int, default=5, 30 | help='RNN sequence length') 31 | # Number of epochs parameter 32 | parser.add_argument('--num_epochs', type=int, default=50, 33 | help='number of epochs') 34 | # Frequency at which the model should be saved parameter 35 | parser.add_argument('--save_every', type=int, default=400, 36 | help='save frequency') 37 | # TODO: (resolve) Clipping gradients for now. No idea whether we should 38 | # Gradient value at which it should be clipped 39 | parser.add_argument('--grad_clip', type=float, default=10., 40 | help='clip gradients at this value') 41 | # Learning rate parameter 42 | parser.add_argument('--learning_rate', type=float, default=0.003, 43 | help='learning rate') 44 | # Decay rate for the learning rate parameter 45 | parser.add_argument('--decay_rate', type=float, default=0.95, 46 | help='decay rate for rmsprop') 47 | # Dropout not implemented. 48 | # Dropout probability parameter 49 | parser.add_argument('--keep_prob', type=float, default=0.8, 50 | help='dropout keep probability') 51 | # Dimension of the embeddings parameter 52 | parser.add_argument('--embedding_size', type=int, default=64, 53 | help='Embedding dimension for the spatial coordinates') 54 | # Size of neighborhood to be considered parameter 55 | parser.add_argument('--neighborhood_size', type=int, default=32, 56 | help='Neighborhood size to be considered for social grid') 57 | # Size of the social grid parameter 58 | parser.add_argument('--grid_size', type=int, default=2, 59 | help='Grid size of the social grid') 60 | # Maximum number of pedestrians to be considered 61 | parser.add_argument('--max_num_peds', type=int, default=27, 62 | help='Maximum Number of Pedestrians') 63 | # The leave out dataset 64 | parser.add_argument('--leaveDataset', type=int, default=1, 65 | help='The dataset index to be left out in training') 66 | args = parser.parse_args() 67 | train(args) 68 | 69 | def tf_2d_normal(x, y, mux, muy, sx, sy, rho): 70 | ''' 71 | Function that implements the PDF of a 2D normal distribution 72 | params: 73 | x : input x points 74 | y : input y points 75 | mux : mean of the distribution in x 76 | muy : mean of the distribution in y 77 | sx : std dev of the distribution in x 78 | sy : std dev of the distribution in y 79 | rho : Correlation factor of the distribution 80 | ''' 81 | 82 | # eq 3 in the paper 83 | # and eq 24 & 25 in Graves (2013) 84 | # Calculate (x - mux) and (y-muy) 85 | normx = tf.math.subtract(x, mux) 86 | normy = tf.math.subtract(y, muy) 87 | # Calculate sx*sy 88 | sxsy = tf.math.multiply(sx, sy) 89 | # Calculate the exponential factor 90 | z = tf.math.square(tf.math.divide(normx, sx)) + tf.math.square(tf.math.divide(normy, sy)) - 2*tf.math.divide(tf.math.multiply(rho, tf.math.multiply(normx, normy)), sxsy) 91 | negRho = 1 - tf.math.square(rho) 92 | # Numerator 93 | result = tf.math.exp(tf.math.divide(-z, 2*negRho)) 94 | # Normalization constant 95 | denom = 2 * np.pi * tf.math.multiply(sxsy, tf.math.sqrt(negRho)) 96 | # Final PDF calculation 97 | result = tf.math.divide(result, denom) 98 | 99 | return result 100 | 101 | def get_coef(output): 102 | # eq 20 -> 22 of Graves (2013) 103 | # TODO : (resolve) Does Social LSTM paper do this as well? 104 | # the paper says otherwise but this is essential as we cannot 105 | # have negative standard deviation and correlation needs to be between 106 | # -1 and 1 107 | 108 | z = output 109 | # Split the output into 5 parts corresponding to means, std devs and corr 110 | z_mux, z_muy, z_sx, z_sy, z_corr = tf.split(z, 5, -1) 111 | 112 | # The output must be exponentiated for the std devs 113 | z_sx = tf.exp(z_sx) 114 | z_sy = tf.exp(z_sy) 115 | # Tanh applied to keep it in the range [-1, 1] 116 | z_corr = tf.tanh(z_corr) 117 | 118 | return [z_mux, z_muy, z_sx, z_sy, z_corr] 119 | 120 | def get_lossfunc(z_mux, z_muy, z_sx, z_sy, z_corr, x_data, y_data): 121 | ''' 122 | Function to calculate given a 2D distribution over x and y, and target data 123 | of observed x and y points 124 | params: 125 | z_mux : mean of the distribution in x 126 | z_muy : mean of the distribution in y 127 | z_sx : std dev of the distribution in x 128 | z_sy : std dev of the distribution in y 129 | z_rho : Correlation factor of the distribution 130 | x_data : target x points 131 | y_data : target y points 132 | ''' 133 | step = tf.constant(1e-3, dtype=tf.float32, shape=(1, 1)) 134 | 135 | # Calculate the PDF of the data w.r.t to the distribution 136 | result0_1 = tf_2d_normal(x_data, y_data, z_mux, z_muy, z_sx, z_sy, z_corr) 137 | result0_2 = tf_2d_normal(tf.add(x_data, step), y_data, z_mux, z_muy, z_sx, z_sy, z_corr) 138 | result0_3 = tf_2d_normal(x_data, tf.add(y_data, step), z_mux, z_muy, z_sx, z_sy, z_corr) 139 | result0_4 = tf_2d_normal(tf.add(x_data, step), tf.add(y_data, step), z_mux, z_muy, z_sx, z_sy, z_corr) 140 | 141 | result0 = tf.math.divide(tf.add(tf.add(tf.add(result0_1, result0_2), result0_3), result0_4), tf.constant(4.0, dtype=tf.float32, shape=(1, 1))) 142 | result0 = tf.math.multiply(tf.math.multiply(result0, step), step) 143 | 144 | # For numerical stability purposes 145 | epsilon = 1e-20 146 | 147 | # TODO: (resolve) I don't think we need this as we don't have the inner 148 | # summation 149 | # result1 = tf.reduce_sum(result0, 1, keep_dims=True) 150 | # Apply the log operation 151 | result1 = -tf.math.log(tf.math.maximum(result0, epsilon)) # Numerical stability 152 | 153 | # TODO: For now, implementing loss func over all time-steps 154 | # Sum up all log probabilities for each data point 155 | return tf.reduce_sum(result1) 156 | 157 | 158 | def sample_gaussian_2d(mux, muy, sx, sy, rho): 159 | ''' 160 | Function to sample a point from a given 2D normal distribution 161 | params: 162 | mux : mean of the distribution in x 163 | muy : mean of the distribution in y 164 | sx : std dev of the distribution in x 165 | sy : std dev of the distribution in y 166 | rho : Correlation factor of the distribution 167 | ''' 168 | # Extract mean 169 | mean = [mux, muy] 170 | # Extract covariance matrix 171 | cov = [[sx*sx, rho*sx*sy], [rho*sx*sy, sy*sy]] 172 | # Sample a point from the multivariate normal distribution 173 | x = np.random.multivariate_normal(mean, cov, 1) 174 | return x[0][0], x[0][1] 175 | 176 | def sample(self, sess, traj, num = 10): 177 | ''' 178 | Given an initial trajectory (as a list of tuples of points), predict the future trajectory 179 | until a few timesteps 180 | Params: 181 | sess: Current session of Tensorflow 182 | traj: List of past trajectory points 183 | num: Number of time-steps into the future to be predicted 184 | ''' 185 | # Initial state with zeros 186 | state = sess.run(self.cell.zero_state(1, tf.float32)) 187 | 188 | # Iterate over all the positions seen in the trajectory 189 | for pos in traj[:-1]: 190 | # Create the input data tensor 191 | data = np.zeros((1, 1, 2), dtype=np.float32) 192 | data[0, 0, 0] = pos[0] # x 193 | data[0, 0, 1] = pos[1] # y 194 | 195 | # Create the feed dict 196 | feed = {self.input_data: data, self.initial_state: state} 197 | # Get the final state after processing the current position 198 | [state] = sess.run([self.final_state], feed) 199 | 200 | ret = traj 201 | 202 | # Last position in the observed trajectory 203 | last_pos = traj[-1] 204 | 205 | # Construct the input data tensor for the last point 206 | prev_data = np.zeros((1, 1, 2), dtype=np.float32) 207 | prev_data[0, 0, 0] = last_pos[0] # x 208 | prev_data[0, 0, 1] = last_pos[1] # y 209 | 210 | for t in range(num): 211 | # Create the feed dict 212 | feed = {self.input_data: prev_data, self.initial_state: state} 213 | 214 | # Get the final state and also the coef of the distribution of the next point 215 | [o_mux, o_muy, o_sx, o_sy, o_corr, state] = sess.run([self.mux, self.muy, self.sx, self.sy, self.corr, self.final_state], feed) 216 | 217 | # Sample the next point from the distribution 218 | next_x, next_y = sample_gaussian_2d(o_mux[0][0], o_muy[0][0], o_sx[0][0], o_sy[0][0], o_corr[0][0]) 219 | # Append the new point to the trajectory 220 | ret = np.vstack((ret, [next_x, next_y])) 221 | 222 | # Set the current sampled position as the last observed position 223 | prev_data[0, 0, 0] = next_x 224 | prev_data[0, 0, 1] = next_y 225 | 226 | return ret 227 | 228 | def train(args): 229 | datasets = list(range(2)) 230 | 231 | data_loader = SocialDataLoader(args.batch_size, args.seq_length, args.max_num_peds, datasets, forcePreProcess = True) 232 | 233 | model = SocialModel(args) 234 | 235 | optimizer = tf.keras.optimizers.RMSprop(args.learning_rate, decay = 5e-4) 236 | 237 | for e in range(args.num_epochs): 238 | 239 | data_loader.reset_batch_pointer() 240 | 241 | for batch in range(data_loader.num_batches): 242 | start = time.time() 243 | 244 | x, y, d, num_peds, ped_ids = data_loader.next_batch() 245 | 246 | for batch in range(data_loader.batch_size): 247 | 248 | x_batch, y_batch, d_batch, num_ped_batch, ped_id_batch = x[batch], y[batch], d[batch], num_peds[batch], ped_ids[batch] 249 | 250 | if d_batch == 0 and datasets[0] == 0: 251 | dataset_data = [640, 480] 252 | else: 253 | dataset_data = [720, 576] 254 | 255 | print(ped_id_batch) 256 | 257 | print(num_ped_batch) 258 | 259 | grid_batch = get_sequence_grid_mask(x_batch, dataset_data, args.neighborhood_size, args.grid_size) 260 | 261 | # print("grid batch size:{}".format(grid_batch.shape)) 262 | # print(np.where(grid_batch > 0)) 263 | 264 | # ped_ids_index = dict(zip(ped_id_batch, range(0, len(ped_id_batch)))) 265 | x_batch, ped_ids_index = data_loader.convert_proper_array(x_batch, num_ped_batch, ped_id_batch) 266 | 267 | train_loss = 0.0 268 | with tf.GradientTape() as tape: 269 | tensor_x = tf.convert_to_tensor(x_batch, dtype=tf.float32) 270 | 271 | logits = model(tensor_x, ped_id_batch, ped_ids_index) 272 | 273 | [o_mux, o_muy, o_sx, o_sy, o_corr] = get_coef(logits) 274 | 275 | # reshape target data so that it aligns with predictions 276 | tensor_y = tf.convert_to_tensor(y, dtype=tf.float32) 277 | 278 | # flat_target_data = tf.reshape(tensor_y, [-1, 2]) 279 | # Extract the x-coordinates and y-coordinates from the target data 280 | [x_data, y_data] = tf.split(tensor_y, 2, -1) 281 | 282 | # Compute the loss function 283 | loss = get_lossfunc(o_mux, o_muy, o_sx, o_sy, o_corr, x_data, y_data) 284 | 285 | # Compute the cost 286 | train_loss = tf.math.divide(loss, (args.batch_size * args.seq_length)) 287 | 288 | grads = tape.gradient(train_loss, model.trainable_variables) 289 | 290 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 291 | 292 | end = time.time() 293 | # Print epoch, batch, loss and time taken 294 | print("{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}" 295 | .format(e * data_loader.num_batches + batch, 296 | args.num_epochs * data_loader.num_batches, 297 | e, train_loss, end - start)) 298 | 299 | 300 | if __name__ == '__main__': 301 | main() -------------------------------------------------------------------------------- /Social-LSTM/social_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | import numpy as np 4 | import random 5 | 6 | class SocialDataLoader(): 7 | 8 | def __init__(self, batch_size=50, seq_length=5, max_num_peds=40, datasets=[0, 1, 2, 3, 4], forcePreProcess=False): 9 | 10 | # List of data directories where raw data resides 11 | # self.data_dirs = ['./data/eth/univ', './data/eth/hotel', 12 | # './data/ucy/zara/zara01', './data/ucy/zara/zara02', 13 | # './data/ucy/univ'] 14 | self.data_dirs = ['./data/eth/univ', './data/eth/hotel'] 15 | 16 | self.used_data_dirs = [self.data_dirs[x] for x in datasets] 17 | 18 | # Data directory where the pre-processed pickle file resides 19 | self.data_dir = './data' 20 | 21 | # Store the batch size and the sequence length arguments 22 | self.batch_size = batch_size 23 | self.seq_length = seq_length 24 | 25 | self.max_num_peds = max_num_peds 26 | 27 | # Define the path of the file in which the data needs to be stored 28 | data_file = os.path.join(self.data_dir, "social_trajectories.cpkl") 29 | 30 | # If the file doesn't exist already or if forcePreProcess is true 31 | if not(os.path.exists(data_file)) or forcePreProcess: 32 | print("Creating pre-processed data from raw data") 33 | # Preprocess the data from the csv files 34 | self.frame_preprocess(self.used_data_dirs, data_file) 35 | 36 | # Load the data from the pickled file 37 | self.load_preprocessed(data_file) 38 | # Reset all the pointers 39 | self.reset_batch_pointer() 40 | 41 | def frame_preprocess(self, data_dirs, data_file): 42 | ''' 43 | Function that will pre-process the pixel_pos.csv files of each dataset 44 | into data with occupancy grid that can be used 45 | params: 46 | data_dirs : List of directories where raw data resides 47 | data_file : The file into which all the pre-processed data needs to be stored 48 | ''' 49 | 50 | # all_frame_data would be a list of numpy arrays corresponding to each dataset 51 | # Each numpy array would be of size (numFrames, max_num_peds, 3) where each pedestrian's 52 | # pedId, x, y , in each frame is stored 53 | all_frame_data = [] 54 | # frameList_data would be a list of lists corresponding to each dataset 55 | # Each list would contain the frameIds of all the frames in the dataset 56 | frame_ids_data = [] 57 | # numPeds_data would be a list of lists corresponding to each dataset 58 | # Ech list would contain the number of pedestrians in each frame in the dataset 59 | num_peds_data = [] 60 | 61 | ped_ids_data = [] 62 | 63 | # Index of the current dataset 64 | dataset_index = 0 65 | 66 | # For each dataset 67 | for directory in data_dirs: 68 | 69 | # Define path of the csv file of the current dataset 70 | file_path = os.path.join(directory, 'pixel_pos.csv') 71 | 72 | # Load the data from the csv file 73 | data = np.genfromtxt(file_path, delimiter=',') 74 | 75 | # print("shape of data:{}".format(data.shape)) 76 | 77 | # Frame IDs of the frames in the current dataset 78 | frame_id_data = np.unique(data[0, :]).tolist() 79 | 80 | # print("frame_id_data") 81 | # print(frame_id_data) 82 | 83 | # Number of frames 84 | num_frames = len(frame_id_data) 85 | 86 | # Add the list of frameIDs to the frameList_data 87 | frame_ids_data.append(frame_id_data) 88 | # Initialize the list of numPeds for the current dataset 89 | 90 | # ped_id_data = np.unique(data[1, :]).tolist() 91 | 92 | num_peds_data.append([]) 93 | ped_ids_data.append([]) 94 | # Initialize the numpy array for the current dataset 95 | all_frame_data.append(np.zeros((num_frames, self.max_num_peds, 3))) 96 | 97 | # index to maintain the current frame 98 | curr_frame = 0 99 | for frame in frame_id_data: 100 | # Extract all pedestrians in current frame 101 | peds_in_frame = data[:, data[0, :] == frame] 102 | 103 | # print("peds in frame:{}".format(peds_in_frame.shape)) 104 | 105 | # print(peds_in_frame) 106 | 107 | # Extract peds list 108 | peds_list = peds_in_frame[1, :].tolist() 109 | 110 | # Helper print statement to figure out the maximum number of peds in any frame in any dataset 111 | # if len(pedsList) > 1: 112 | # print len(pedsList) 113 | # DEBUG 114 | # continue 115 | # print("=======================(before ped_ids_data[dataset_index]=========================") 116 | # print(ped_ids_data[dataset_index]) 117 | 118 | # Add number of peds in the current frame to the stored data 119 | num_peds_data[dataset_index].append(len(peds_list)) 120 | ped_ids_data[dataset_index].append(peds_list) 121 | 122 | # print("========================ped-list===============") 123 | # print(peds_list) 124 | 125 | # print("=======================(after ped_ids_data[dataset_index]=========================") 126 | # print(ped_ids_data[dataset_index]) 127 | 128 | # Initialize the row of the numpy array 129 | peds_with_pos = [] 130 | 131 | # For each ped in the current frame 132 | for ped in peds_list: 133 | # Extract their x and y positions 134 | current_x = peds_in_frame[3, peds_in_frame[1, :] == ped][0] 135 | current_y = peds_in_frame[2, peds_in_frame[1, :] == ped][0] 136 | 137 | # Add their pedID, x, y to the row of the numpy array 138 | peds_with_pos.append([ped, current_x, current_y]) 139 | 140 | # Add the details of all the peds in the current frame to all_frame_data 141 | all_frame_data[dataset_index][curr_frame, 0 : len(peds_list), :] = np.array(peds_with_pos) 142 | # Increment the frame index 143 | curr_frame += 1 144 | # Increment the dataset index 145 | dataset_index += 1 146 | 147 | # print("==================ped_ids_data========================") 148 | # print(ped_ids_data) 149 | 150 | # Save the tuple (all_frame_data, frameList_data, numPeds_data) in the pickle file 151 | f = open(data_file, "wb") 152 | pickle.dump((all_frame_data, frame_ids_data, num_peds_data, ped_ids_data), f, protocol=2) 153 | f.close() 154 | 155 | def load_preprocessed(self, data_file): 156 | ''' 157 | Function to load the pre-processed data into the DataLoader object 158 | params: 159 | data_file : the path to the pickled data file 160 | ''' 161 | # Load data from the pickled file 162 | f = open(data_file, 'rb') 163 | self.raw_data = pickle.load(f) 164 | f.close() 165 | 166 | # Get all the data from the pickle file 167 | self.data = self.raw_data[0] 168 | self.frame_list = self.raw_data[1] 169 | self.num_peds_list = self.raw_data[2] 170 | self.peds_list = self.raw_data[3] 171 | 172 | counter = 0 173 | 174 | # For each dataset 175 | for dataset in range(len(self.data)): 176 | # get the frame data for the current dataset 177 | all_frame_data = self.data[dataset] 178 | # Increment the counter with the number of sequences in the current dataset 179 | counter += int(len(all_frame_data) / (self.seq_length+2)) 180 | 181 | # Calculate the number of batches 182 | self.num_batches = int(counter/self.batch_size) 183 | 184 | def next_batch(self): 185 | ''' 186 | Function to get the next batch of points 187 | ''' 188 | # Source data 189 | x_batch = [] 190 | # Target data 191 | y_batch = [] 192 | # Dataset data 193 | d = [] 194 | 195 | # pedlist per sequence 196 | num_peds_batch = [] 197 | # pedlist per sequence 198 | peds_batch = [] 199 | #return target_id 200 | # target_id_batch = [] 201 | 202 | # Iteration index 203 | i = 0 204 | while i < self.batch_size: 205 | # Extract the frame data of the current dataset 206 | frame_data = self.data[self.dataset_pointer] 207 | num_peds_list = self.num_peds_list[self.dataset_pointer] 208 | peds_list = self.peds_list[self.dataset_pointer] 209 | 210 | # Get the frame pointer for the current dataset 211 | idx = self.frame_pointer 212 | # While there is still seq_length number of frames left in the current dataset 213 | if idx + self.seq_length + 1 < frame_data.shape[0]: 214 | # All the data in this sequence 215 | # seq_frame_data = frame_data[idx:idx+self.seq_length+1, :] 216 | seq_source_frame_data = frame_data[idx : idx + self.seq_length, :] 217 | seq_target_frame_data = frame_data[idx+1 : idx + self.seq_length + 1, :] 218 | 219 | seq_num_peds_list = num_peds_list[idx : idx + self.seq_length] 220 | seq_peds_list = peds_list[idx : idx + self.seq_length] 221 | # Number of unique peds in this sequence of frames 222 | # pedID_list = np.unique(seq_frame_data[:, :, 0]) 223 | # numUniquePeds = pedID_list.shape[0] 224 | 225 | # sourceData = np.zeros((self.seq_length, self.max_num_peds, 3)) 226 | # targetData = np.zeros((self.seq_length, self.max_num_peds, 3)) 227 | 228 | # for seq in range(self.seq_length): 229 | # sseq_frame_data = seq_source_frame_data[seq, :] 230 | # tseq_frame_data = seq_target_frame_data[seq, :] 231 | # for ped in range(numUniquePeds): 232 | # pedID = pedID_list[ped] 233 | 234 | # if pedID == 0: 235 | # continue 236 | # else: 237 | # sped = sseq_frame_data[sseq_frame_data[:, 0] == pedID, :] 238 | # tped = np.squeeze(tseq_frame_data[tseq_frame_data[:, 0] == pedID, :]) 239 | # if sped.size != 0: 240 | # sourceData[seq, ped, :] = sped 241 | # if tped.size != 0: 242 | # targetData[seq, ped, :] = tped 243 | 244 | x_batch.append(seq_source_frame_data) 245 | y_batch.append(seq_target_frame_data) 246 | num_peds_batch.append(seq_num_peds_list) 247 | peds_batch.append(seq_peds_list) 248 | 249 | self.frame_pointer += self.seq_length 250 | d.append(self.dataset_pointer) 251 | i += 1 252 | else: 253 | # Not enough frames left 254 | # Increment the dataset pointer and set the frame_pointer to zero 255 | self.tick_batch_pointer() 256 | 257 | return x_batch, y_batch, d, num_peds_batch, peds_batch 258 | 259 | def tick_batch_pointer(self): 260 | ''' 261 | Advance the dataset pointer 262 | ''' 263 | # Go to the next dataset 264 | self.dataset_pointer += 1 265 | # Set the frame pointer to zero for the current dataset 266 | self.frame_pointer = 0 267 | # If all datasets are done, then go to the first one again 268 | if self.dataset_pointer >= len(self.data): 269 | self.dataset_pointer = 0 270 | 271 | def reset_batch_pointer(self): 272 | ''' 273 | Reset all pointers 274 | ''' 275 | # Go to the first frame of the first dataset 276 | self.dataset_pointer = 0 277 | self.frame_pointer = 0 278 | 279 | def convert_proper_array(self, x_seq, num_ped_list, ped_list): 280 | #converter function to appropriate format. Instead of direcly use ped ids, we are mapping ped ids to 281 | #array indices using a lookup table for each sequence -> speed 282 | #output: seq_lenght (real sequence lenght+1)*max_ped_id+1 (biggest id number in the sequence)*2 (x,y) 283 | # print() 284 | # print("x seq shape:{}".format(x_seq.shape)) 285 | # print("ped list shape:{}".format(ped_list)) 286 | 287 | #get unique ids from sequence 288 | # unique_ids = np.unique(ped_list) 289 | unique_ids = np.unique(np.concatenate(ped_list).ravel().tolist()).astype(int) 290 | 291 | # unique_ids = [np.array(unique_id) for unique_id in unique_ids] 292 | 293 | # print(unique_ids) 294 | 295 | # print("===================") 296 | # print(zip(unique_ids, range(0, len(unique_ids)))) 297 | 298 | # print(unique_ids.tolist()) 299 | # print(list(range(0, len(unique_ids)))) 300 | 301 | # create a lookup table which maps ped ids -> array indices 302 | lookup_table = dict(zip(unique_ids.tolist(), list(range(0, len(unique_ids))))) 303 | 304 | seq_data = np.zeros(shape=(self.seq_length, len(lookup_table), 2)) 305 | 306 | # print("&&&&&&&&&&&&&&&&&&") 307 | # print(ped_list) 308 | 309 | # create new structure of array 310 | for ind, frame in enumerate(x_seq): 311 | corr_index = [] 312 | ped_ids = frame[:, 0] 313 | 314 | for ped_id_index in range(len(ped_ids)): 315 | ped_id = ped_ids[ped_id_index].astype(int) 316 | 317 | if ped_id == 0: 318 | continue 319 | 320 | corr_index.append(lookup_table[ped_id]) 321 | 322 | seq_data[ind, corr_index, :] = frame[corr_index, 1 : 3] 323 | 324 | # return_arr = Variable(torch.from_numpy(np.array(seq_data)).float()) 325 | 326 | print("xxxxxxxxxxxxxxxxxxxxxx") 327 | print(seq_data) 328 | 329 | return seq_data, lookup_table 330 | 331 | # def preprocess(self, data_dirs, data_file): 332 | # ''' 333 | # The function that pre-processes the pixel_pos.csv files of each dataset 334 | # into data that can be used 335 | # params: 336 | # data_dirs : List of directories where raw data resides 337 | # data_file : The file into which all the pre-processed data needs to be stored 338 | # ''' 339 | # # all_ped_data would be a dictionary with mapping from each ped to their 340 | # # trajectories given by matrix 3 x numPoints with each column 341 | # # in the order x, y, frameId 342 | # # Pedestrians from all datasets are combined 343 | # # Dataset pedestrian indices are stored in dataset_indices 344 | # all_ped_data = {} 345 | # dataset_indices = [] 346 | # current_ped = 0 347 | # # For each dataset 348 | # for directory in data_dirs: 349 | # # Define the path to its respective csv file 350 | # file_path = os.path.join(directory, 'pixel_pos.csv') 351 | 352 | # print("processing data: {}".format(file_path)) 353 | # # Load data from the csv file 354 | # # Data is a 4 x numTrajPoints matrix 355 | # # where each column is a (frameId, pedId, y, x) vector 356 | # data = np.genfromtxt(file_path, delimiter=',') 357 | 358 | # # Get the number of pedestrians in the current dataset 359 | # numPeds = np.size(np.unique(data[1, :])) 360 | 361 | # # For each pedestrian in the dataset 362 | # for ped in range(1, numPeds+1): 363 | # # Extract trajectory of the current ped 364 | # traj = data[:, data[1, :] == ped] 365 | # # Format it as (x, y, frameId) 366 | # traj = traj[[3, 2, 0], :] 367 | 368 | # # Store this in the dictionary 369 | # all_ped_data[current_ped + ped] = traj 370 | 371 | # # Current dataset done 372 | # dataset_indices.append(current_ped+numPeds) 373 | # current_ped += numPeds 374 | 375 | # # The complete data is a tuple of all pedestrian data, and dataset ped indices 376 | # complete_data = (all_ped_data, dataset_indices) 377 | # # Store the complete data into the pickle file 378 | # f = open(data_file, "wb") 379 | # pickle.dump(complete_data, f, protocol=2) 380 | # f.close() 381 | 382 | # def load_preprocessed(self, data_file): 383 | # ''' 384 | # Function to load the pre-processed data into the DataLoader object 385 | # params: 386 | # data_file : The path to the pickled data file 387 | # ''' 388 | 389 | # # Load data from the pickled file 390 | # f = open(data_file, "rb") 391 | # self.raw_data = pickle.load(f) 392 | # f.close() 393 | 394 | # # Get the pedestrian data from the pickle file 395 | # all_ped_data = self.raw_data[0] 396 | # # Not using dataset_indices for now 397 | # # dataset_indices = self.raw_data[1] 398 | 399 | # # Construct the data with sequences(or trajectories) longer than seq_length 400 | # self.data = [] 401 | # counter = 0 402 | 403 | # # For each pedestrian in the data 404 | # for ped in all_ped_data: 405 | # # Extract his trajectory 406 | # traj = all_ped_data[ped] 407 | # # If the length of the trajectory is greater than seq_length (+2 as we need both source and target data) 408 | # if traj.shape[1] > (self.seq_length+2): 409 | # # TODO: (Improve) Store only the (x,y) coordinates for now 410 | # self.data.append(traj[[0, 1], :].T) 411 | # # Number of batches this datapoint is worth 412 | # counter += int(traj.shape[1] / ((self.seq_length+2))) 413 | 414 | # # Calculate the number of batches (each of batch_size) in the data 415 | # self.num_batches = int(counter / self.batch_size) 416 | 417 | # def next_batch(self): 418 | # ''' 419 | # Function to get the next batch of points 420 | # ''' 421 | # # List of source and target data for the current batch 422 | # x_batch = [] 423 | # y_batch = [] 424 | # # For each sequence in the batch 425 | # for i in range(self.batch_size): 426 | # # Extract the trajectory of the pedestrian pointed out by self.pointer 427 | # traj = self.data[self.pointer] 428 | # # Number of sequences corresponding to his trajectory 429 | # n_batch = int(traj.shape[0] / (self.seq_length+2)) 430 | # # Randomly sample a index from which his trajectory is to be considered 431 | # idx = random.randint(0, traj.shape[0] - self.seq_length - 2) 432 | # # Append the trajectory from idx until seq_length into source and target data 433 | # x_batch.append(np.copy(traj[idx:idx+self.seq_length, :])) 434 | # y_batch.append(np.copy(traj[idx+1:idx+self.seq_length+1, :])) 435 | 436 | # if random.random() < (1.0/float(n_batch)): 437 | # # Adjust sampling probability 438 | # # if this is a long datapoint, sample this data more with 439 | # # higher probability 440 | # self.tick_batch_pointer() 441 | 442 | # return x_batch, y_batch 443 | 444 | # def tick_batch_pointer(self): 445 | # ''' 446 | # Advance the data pointer 447 | # ''' 448 | # self.pointer += 1 449 | # if (self.pointer >= len(self.data)): 450 | # self.pointer = 0 451 | 452 | # def reset_batch_pointer(self): 453 | # ''' 454 | # Reset the data pointer 455 | # ''' 456 | # self.pointer = 0 -------------------------------------------------------------------------------- /Social-LSTM/data/ucy/zara/zara01/pixel_pos.csv: -------------------------------------------------------------------------------- 1 | 0,0,0,0,0,0,0,0,2,3,3,3,3,4,4,4,4,5,6,7,7,7,8,8,8,8,9,10,10,10,10,11,12,12,13,14,14,14,14,15,15,16,16,17,17,17,18,18,18,19,19,20,20,21,21,22,22,22,22,22,24,24,25,25,26,26,27,27,27,27,29,29,29,30,31,31,31,32,32,33,33,33,35,35,36,36,36,37,38,40,40,40,40,41,41,41,44,44,45,45,47,48,48,48,50,51,52,52,52,53,54,56,57,57,57,58,60,61,61,61,62,62,64,64,65,65,66,67,67,68,68,68,69,69,71,72,72,72,72,73,74,74,75,75,75,75,76,76,77,78,79,79,79,79,79,80,80,80,80,81,82,82,82,84,84,84,84,84,84,85,85,86,86,87,87,88,88,89,89,90,91,91,92,92,92,92,92,94,94,95,95,96,96,97,97,99,99,100,101,101,102,103,103,105,105,106,106,106,107,110,110,110,111,111,111,114,114,114,115,115,116,116,118,118,118,119,120,120,122,122,122,124,124,124,125,126,126,127,129,130,130,131,131,132,134,134,135,135,136,137,137,138,139,140,141,141,142,143,144,144,145,145,145,147,148,149,149,150,150,150,151,153,153,153,155,156,156,157,158,159,160,161,162,162,162,163,165,166,166,166,168,168,168,169,170,170,172,173,173,173,173,174,177,177,178,181,184,185,189,189,191,193,193,194,195,195,195,197,197,198,198,198,199,200,201,202,202,203,204,205,206,207,207,207,208,208,208,209,209,211,211,211,211,212,212,214,214,215,215,217,217,217,217,217,218,218,220,220,221,221,221,222,223,223,224,226,226,226,227,228,229,230,231,235,236,236,236,239,240,240,240,244,245,245,245,246,248,248,250,250,251,253,253,253,255,255,257,258,258,258,259,260,260,261,261,262,263,264,264,264,264,267,267,268,268,269,271,272,272,272,272,274,276,278,278,279,280,284,284,286,288,291,292,293,299,302,302,315,315,318,319,323,323,328,329,334,334,338,339,342,343,346,346,349,350,358,359,362,363,366,366,367,370,371,372,374,375,377,378,379,379,380,380,381,381,383,383,384,385,385,386,387,388,388,389,390,390,390,390,391,392,393,393,394,394,395,395,396,397,397,398,398,398,400,400,400,400,400,400,401,401,402,403,403,403,403,403,404,404,404,405,406,406,407,407,407,408,408,409,411,411,412,412,413,414,415,415,416,416,417,417,418,420,420,421,421,421,422,422,424,424,424,425,426,426,426,427,428,428,429,430,431,431,431,432,433,434,435,435,435,437,437,438,438,439,442,442,446,446,447,448,448,448,449,450,451,452,452,453,453,453,454,455,455,455,456,456,457,458,459,459,459,460,461,462,462,463,463,463,464,466,466,466,467,467,467,469,469,469,470,470,470,471,471,472,472,473,473,473,474,474,475,475,476,476,477,477,477,477,477,477,478,479,479,480,481,481,481,481,481,481,482,482,483,483,484,484,484,484,485,485,485,486,487,487,487,488,488,488,489,490,490,490,491,491,491,492,492,493,493,494,495,496,496,496,498,500,500,501,501,501,506,507,508,508,508,512,512,518,521,524,525,527,528,531,532,535,537,539,540,541,543,543,544,546,546,546,548,548,550,550,551,551,552,553,554,555,556,557,558,558,558,559,561,562,562,564,564,564,565,566,566,568,569,569,570,570,570,570,572,574,575,576,577,577,577,580,580,581,583,584,585,587,589,589,591,593,593,593,594,596,597,597,598,599,600,602,603,604,605,606,607,609,609,610,613,613,613,616,618,618,618,622,625,626,627,627,628,629,630,631,631,632,633,633,634,634,636,636,636,637,637,638,639,639,640,640,640,641,641,641,642,643,643,644,644,645,645,645,645,646,647,649,649,649,649,649,649,650,650,651,651,653,653,654,654,654,655,655,655,656,656,658,658,658,659,659,659,660,660,660,661,661,662,662,663,663,663,664,664,664,664,664,666,666,666,666,667,667,667,667,667,667,667,668,668,668,669,670,670,671,671,671,671,671,672,672,672,672,672,672,672,672,672,673,674,674,674,674,675,675,675,676,676,676,676,676,676,676,676,676,677,678,678,678,678,678,679,679,679,679,680,680,680,680,681,681,681,681,681,681,681,682,682,682,682,683,683,684,684,684,684,684,685,685,685,685,686,686,686,686,686,686,687,687,687,687,687,687,689,689,689,689,690,690,690,691,691,691,692,692,692,692,692,692,692,692,693,694,694,694,694,694,695,695,696,696,696,696,697,697,697,697,698,698,698,699,699,700,700,700,700,701,701,701,702,702,702,702,703,703,703,704,704,704,705,705,705,706,706,706,707,707,708,709,709,709,709,711,713,713,713,713,714,714,714,715,716,717,717,717,718,720,720,721,722,722,723,727,727,727,732,732,734,735,736,737,739,739,741,742,743,743,746,746,747,748,748,749,751,753,753,754,755,755,758,758,759,760,761,761,762,762,765,766,766,766,766,767,767,767,769,770,771,771,772,772,772,774,774,775,776,777,777,780,781,782,783,784,785,786,786,787,788,788,790,790,791,792,795,795,796,796,797,797,797,799,799,800,802,803,804,806,809,810,811,811,813,813,814,815,816,816,816,816,817,817,818,818,819,819,819,819,820,821,821,822,822,823,824,824,825,825,825,826,826,826,826,827,829,829,829,830,830,830,830,832,834,834,834,834,834,834,835,835,835,836,838,838,838,839,839,839,840,842,842,842,842,843,844,844,845,846,846,846,846,847,849,849,849,849,850,850,851,851,851,852,852,853,854,854,854,854,854,855,855,856,856,856,857,857,858,858,859,860,861,862,863,864,864,866,868,868,872,872,873,873,874,875,875,876,876,876,878,879,880,880,880,880,880,883,884,884,884,884,885,888,888,889,890,890,891,892,894,896,896,897,898,902,902,903,903,906,907,907,907,911,912,912,916,916,916,917,920,920,920,921,921,924,925,925,925,926,929,929,930,930,932,933,934,935,935,937,939,939,939,941,942,942,944,944,945,946,948,948,949,949,950,952,953,954,954,954,954,956,958,958,959,959,959,959,960,962,962,963,963,963,964,964,965,966,967,967,967,969,969,970,970,970,971,972,975,975,976,976,976,977,979,980,982,982,982,986,986,986,988,990,990,991,991,995,995,995,998,999,1000,1000,1001,1001,1003,1004,1005,1005,1005,1008,1008,1010,1010,1011,1012,1013,1013,1014,1016,1016,1018,1018,1022,1023,1044,1049,1050,1054,1054,1057,1059,1060,1063,1064,1065,1068,1069,1069,1072,1072,1073,1074,1075,1075,1077,1077,1078,1079,1079,1079,1080,1081,1081,1081,1082,1082,1082,1082,1082,1083,1084,1084,1084,1084,1085,1085,1085,1085,1085,1086,1087,1087,1087,1087,1088,1088,1088,1089,1089,1089,1090,1091,1091,1091,1092,1092,1093,1094,1094,1095,1095,1096,1096,1098,1099,1099,1100,1100,1100,1101,1101,1105,1105,1105,1106,1106,1109,1109,1110,1111,1113,1114,1114,1114,1115,1116,1118,1119,1120,1121,1122,1124,1125,1125,1126 2 | 1,2,3,4,5,6,7,8,9,1,7,8,2,3,6,4,5,9,1,6,2,7,8,3,5,9,6,4,7,1,2,9,6,8,7,1,2,3,10,5,9,4,8,7,6,1,2,9,10,6,8,3,7,5,1,4,2,9,6,10,11,8,2,3,1,9,4,11,6,10,8,2,5,11,3,9,10,4,6,1,2,11,8,3,10,9,11,4,3,8,4,11,9,12,13,10,11,13,12,8,10,13,11,12,8,11,13,10,12,8,10,13,14,12,15,8,15,14,16,13,12,8,15,17,14,16,8,12,13,17,9,15,16,14,17,13,12,15,18,9,14,16,17,18,19,13,12,15,18,13,12,9,18,20,14,15,16,17,19,12,15,18,20,14,15,17,16,18,19,20,15,14,18,15,19,16,14,17,15,19,15,14,16,18,20,15,17,20,15,19,18,15,16,20,17,18,19,20,17,16,18,19,20,16,17,22,19,18,21,16,17,21,18,19,22,21,19,16,17,22,18,8,21,19,22,16,17,8,21,19,22,16,8,21,17,19,22,21,8,19,16,22,21,17,23,19,22,21,24,25,23,22,21,24,23,25,22,21,22,26,23,24,25,26,21,23,24,8,26,22,25,26,23,8,24,26,25,23,8,26,24,23,25,26,8,27,27,24,26,23,25,27,8,26,23,27,24,26,27,25,23,8,26,27,8,27,8,27,8,27,27,28,29,8,30,27,31,28,8,27,30,29,31,28,30,31,8,29,30,28,31,30,32,33,29,8,31,28,30,33,32,31,30,29,28,30,33,31,32,29,28,30,33,32,8,31,32,33,30,31,28,29,32,33,8,28,31,30,34,29,28,34,35,34,32,8,35,32,34,35,8,35,33,34,8,32,33,35,32,34,33,36,32,35,33,36,32,34,35,36,37,33,32,37,36,34,35,32,37,33,36,37,34,35,36,37,32,36,33,34,37,35,37,36,34,35,37,37,35,34,37,37,34,35,37,34,35,38,39,38,39,39,38,39,38,39,38,39,38,38,39,38,39,38,39,40,41,40,41,40,42,41,42,40,41,42,40,41,43,42,44,45,40,41,43,44,45,42,40,43,41,44,45,42,43,40,46,47,41,44,45,43,42,46,47,41,40,44,43,42,45,46,48,47,40,41,42,43,49,44,50,48,40,46,42,45,50,49,47,44,41,43,48,50,46,45,49,44,47,48,50,45,46,49,47,44,45,48,46,51,50,49,51,53,47,48,52,50,49,53,51,52,49,48,46,50,53,47,52,51,49,48,50,53,51,52,53,49,48,50,49,51,52,53,50,52,53,54,53,52,55,56,51,54,53,52,55,56,53,58,54,53,55,56,58,59,54,57,55,56,58,54,59,57,55,58,56,54,59,57,58,55,59,56,57,54,59,58,60,55,61,62,57,56,54,59,60,58,62,61,57,56,63,55,59,63,54,60,58,62,57,61,63,56,55,62,58,60,57,54,59,63,64,55,61,56,64,60,63,57,54,62,58,59,55,64,63,56,60,57,61,62,55,59,54,64,63,58,57,60,59,64,62,61,63,60,62,64,65,66,61,65,66,60,61,62,66,65,66,65,66,65,66,65,66,65,66,65,67,66,65,67,68,69,66,67,68,65,69,68,67,66,69,65,68,67,69,70,66,68,67,65,69,70,68,69,65,66,67,70,71,68,69,71,70,67,65,66,68,71,70,68,71,68,67,70,70,67,68,72,70,68,72,70,68,72,73,74,68,70,73,72,74,70,73,72,74,75,73,72,75,74,73,75,72,74,73,75,72,75,74,73,74,76,73,74,75,77,76,78,77,75,79,78,76,75,77,79,81,80,78,82,76,77,85,80,78,81,79,82,83,85,77,84,80,76,81,83,78,82,79,84,81,83,76,77,78,80,82,84,85,79,81,78,80,83,84,82,76,77,79,81,82,84,80,83,85,78,81,86,76,79,77,82,84,80,83,85,86,87,88,89,81,78,76,79,82,80,84,77,90,86,89,83,87,88,85,81,78,89,84,91,92,86,82,80,79,83,87,76,77,93,90,88,85,89,79,81,84,86,78,92,88,83,93,94,87,91,82,77,95,80,88,89,96,84,97,83,94,76,87,86,82,93,95,81,88,92,78,91,84,89,77,90,96,83,97,94,87,86,89,84,88,83,93,82,95,81,85,90,92,78,91,77,96,88,89,97,94,86,76,87,93,81,88,95,89,78,82,90,77,97,91,85,81,92,94,96,86,88,87,95,93,89,78,97,76,92,94,91,90,88,77,89,87,95,96,93,90,97,86,94,78,92,88,76,91,89,95,90,77,78,87,93,88,94,97,92,96,90,89,93,95,89,91,90,92,94,93,90,95,94,98,96,91,97,92,93,90,95,98,94,91,92,90,95,93,98,95,90,98,99,100,90,98,99,100,101,90,98,100,99,101,90,102,98,101,100,99,102,98,101,100,99,102,103,104,101,102,100,99,104,103,101,99,104,105,103,106,100,102,105,106,99,103,100,104,102,105,100,99,106,103,104,105,106,104,103,107,105,108,104,106,103,107,108,105,104,106,107,105,108,103,104,105,106,109,110,107,108,109,110,107,108,111,110,107,112,111,113,108,109,114,115,116,112,111,113,107,110,115,108,114,111,112,116,113,115,111,114,117,109,116,118,112,113,111,115,110,114,117,118,113,116,111,112,115,110,109,113,111,117,114,116,118,112,115,111,113,114,117,116,118,115,111,112,114,115,113,116,118,117,119,115,111,114,112,116,119,113,118,115,120,111,117,121,119,114,118,112,113,115,119,120,116,121,111,114,112,117,118,119,120,113,121,118,119,120,117,118,121,119,120,121,122,123,119,120,122,124,119,123,125,121,122,124,123,125,119,120,122,121,123,120,124,125,122,121,123,120,124,125,122,123,124,125,122,123,125,124,123,122,125,124,122,123,124,125,126,127,126,129,128,130,127,128,126,129,130,127,126,128,129,130,126,127,128,129,130,126,128,127,131,126,129,130,131,127,128,126,130,131,129,127,128,130,126,131,129,127,128,132,130,126,131,129,132,133,128,127,130,126,134,129,133,131,130,132,126,128,134,127,129,133,126,131,135,134,132,136,133,134,131,136,132,135,133,132,134,135,136,133,134,132,133,136,132,134,135,133,136,134,133,135,134,133,137,138,136,134,133,138,137,135,136,138,133,137,138,135,136,137,138,137,138,137,138,137,138,139,139,140,140,139,140,139,140,139,140,139,139,148,140,141,148,139,140,141,142,141,143,142,139,140,148,143,141,144,139,142,145,146,140,147,141,139,148,143,144,140,146,147,142,145,141,143,140,139,148,144,142,147,141,146,143,145,140,142,144,143,147,141,142,146,143,144,147,141,143,145,142,141,144,147,143,146,143,144,147,146,145,143,144,147,146,143,145,146,144,147,143,147,143,144,147,148,143,144,147,148 3 | 0.71354,0.67708,0.67708,0.625,0.67882,0.74826,0.72049,0.46528,0.73611,0.71354,0.74826,0.47917,0.67882,0.65972,0.75868,0.61111,0.69271,0.75868,0.70486,0.75694,0.6684,0.77083,0.47222,0.64931,0.72396,0.76215,0.73785,0.61111,0.78819,0.73438,0.69097,0.75347,0.67535,0.46875,0.79861,0.76215,0.72049,0.63368,0.49132,0.75,0.77604,0.59201,0.46007,0.79861,0.58333,0.77257,0.73264,0.78993,0.51215,0.5191,0.44965,0.63194,0.75868,0.7691,0.77778,0.57812,0.73264,0.77951,0.43229,0.53299,0.46007,0.45486,0.74653,0.62674,0.80382,0.78646,0.5625,0.49826,0.31944,0.54861,0.44618,0.75694,0.7691,0.50694,0.60069,0.77604,0.56944,0.5434,0.19444,0.82986,0.77431,0.54167,0.45486,0.55556,0.57292,0.7691,0.58507,0.50174,0.52951,0.4566,0.47396,0.63889,0.76389,0.72396,0.76389,0.58507,0.65278,0.76215,0.71354,0.47396,0.57465,0.75868,0.67361,0.70486,0.47743,0.68576,0.72743,0.57812,0.68056,0.48958,0.54861,0.69965,0.76562,0.65799,0.70833,0.48785,0.70139,0.75,0.47222,0.67361,0.63194,0.49479,0.67535,0.52951,0.72917,0.50347,0.46701,0.59549,0.6441,0.55035,0.75347,0.65278,0.50347,0.68403,0.53646,0.63368,0.58854,0.63715,0.49653,0.74653,0.66667,0.51042,0.51736,0.52257,0.38889,0.60069,0.57118,0.61111,0.54514,0.57986,0.57812,0.74479,0.55903,0.53819,0.63889,0.6059,0.50347,0.51562,0.44792,0.55903,0.60417,0.56597,0.56944,0.62847,0.59549,0.50868,0.47917,0.56597,0.49653,0.60938,0.59722,0.63021,0.57986,0.59722,0.50694,0.46875,0.62153,0.5,0.60938,0.50347,0.62326,0.57465,0.47049,0.58681,0.60069,0.63021,0.5191,0.62326,0.60069,0.51562,0.65972,0.56597,0.47917,0.68576,0.51736,0.74479,0.51389,0.75347,0.52431,0.48785,0.82986,0.50694,0.82986,0.52778,0.55382,0.36111,0.51562,0.93056,0.19097,0.57465,0.59201,0.28819,0.96875,0.53299,0.4375,0.37153,0.53646,0.6059,0.63368,0.50347,1,0.44271,0.43924,0.55382,0.5191,0.64236,0.67361,0.4566,0.52778,0.58333,0.51736,0.66319,0.46701,0.56597,0.71007,0.62847,0.52083,0.58854,0.48438,0.65799,0.67882,0.53819,0.6059,0.72049,0.51562,0.66319,0.56771,0.62847,0.78819,0.75,0.53993,0.58333,0.64062,0.78646,0.5434,0.74479,0.6059,0.63889,0.61806,0.76389,0.57465,0.80729,0.7309,0.73611,0.61979,0.59375,0.79514,0.50521,0.69965,0.66667,0.72049,0.67708,0.62847,0.49306,0.78299,0.63889,0.71354,0.66493,0.48958,0.62847,0.77951,0.67361,0.72222,0.62847,0.48264,0.49826,0.51736,0.78125,0.65799,0.68056,0.72569,0.54167,0.47917,0.7066,0.69444,0.56076,0.79688,0.73785,0.56944,0.74826,0.7066,0.46528,0.75347,0.55729,0.47049,0.54861,0.4809,0.53472,0.48785,0.50174,0.46181,0.6441,0.5816,0.48438,0.6684,0.39236,0.72743,0.64583,0.48264,0.34549,0.67014,0.59201,0.71181,0.65278,0.65799,0.71181,0.48264,0.59375,0.65625,0.64931,0.71701,0.67014,0.7066,0.64757,0.59896,0.49653,0.71875,0.65278,0.68056,0.64583,0.70139,0.73264,0.68056,0.59549,0.64931,0.67708,0.63889,0.76042,0.69444,0.6059,0.65625,0.69965,0.61111,0.66146,0.5,0.7691,0.6441,0.59722,0.70833,0.76215,0.6875,0.63021,0.6441,0.58507,0.50174,0.7066,0.76389,0.69792,0.48785,0.65972,0.72396,0.50694,0.58333,0.54861,0.63368,0.48958,0.61285,0.62674,0.5625,0.61111,0.50521,0.60938,0.57812,0.56771,0.49653,0.61979,0.57986,0.59896,0.64236,0.54688,0.60069,0.60938,0.65972,0.58333,0.60938,0.64236,0.67708,0.52778,0.57986,0.65972,0.66493,0.62326,0.6875,0.63542,0.67708,0.52604,0.59201,0.68924,0.60764,0.64236,0.69097,0.57986,0.54167,0.61458,0.71007,0.56597,0.72569,0.72743,0.66319,0.57118,0.53472,0.64931,0.51562,0.77431,0.60069,0.67882,0.50521,0.48785,0.71007,0.61632,0.46007,0.43576,0.64062,0.70833,0.34722,0.67708,0.74826,0.67361,0.63542,0.66146,0.62153,0.60069,0.65451,0.58854,0.64931,0.59028,0.64583,0.58333,0.67535,0.70312,0.62847,0.72222,0.65625,0.73611,0.67708,0.72049,0.69097,0.7066,0.67535,0.69097,0.5816,0.65278,0.58681,0.67882,0.63368,0.57986,0.65799,0.64236,0.66667,0.57812,0.64583,0.69792,0.63542,0.625,0.65625,0.65104,0.71875,0.57639,0.62847,0.67361,0.61458,0.66319,0.72396,0.57812,0.66667,0.60417,0.71007,0.76389,0.59375,0.67708,0.73958,0.63542,0.59549,0.70139,0.74826,0.57118,0.56424,0.68403,0.62153,0.61458,0.74132,0.68576,0.75,0.74826,0.51215,0.5434,0.625,0.61285,0.74132,0.69271,0.67708,0.75868,0.47222,0.6875,0.64583,0.78125,0.65278,0.72222,0.75347,0.71528,0.49479,0.61458,0.76042,0.63889,0.70486,0.79514,0.68924,0.73958,0.76215,0.76389,0.60764,0.79861,0.71007,0.65104,0.76042,0.73264,0.78646,0.76389,0.71181,0.76389,0.59028,0.66493,0.76562,0.50347,0.75521,0.76042,0.70486,0.57812,0.64583,0.52257,0.77431,0.71181,0.62326,0.76042,0.70833,0.56076,0.52431,0.76562,0.72396,0.78819,0.59028,0.77257,0.55208,0.53125,0.7934,0.73438,0.57292,0.54514,0.77951,0.50868,0.50347,0.79688,0.7309,0.61632,0.46701,0.74132,0.63542,0.71354,0.68056,0.75347,0.66493,0.3941,0.8125,0.69271,0.72222,0.78819,0.7066,0.47743,0.75694,0.66146,0.6684,0.76215,0.7309,0.52083,0.63194,0.68229,0.66319,0.53819,0.72396,0.53993,0.60069,0.66667,0.63368,0.61632,0.71875,0.58507,0.54861,0.66319,0.625,0.64931,0.56771,0.71875,0.61806,0.56424,0.7066,0.67014,0.60243,0.53993,0.70312,0.71007,0.74132,0.78819,0.75347,0.60417,0.66146,0.55382,0.70833,0.50868,0.78299,0.74826,0.77083,0.61632,0.79861,0.70486,0.55382,0.80556,0.63715,0.70833,0.50521,0.80035,0.73958,0.75694,0.80556,0.63368,0.68576,0.80035,0.50694,0.71181,0.71354,0.63021,0.55556,0.82986,0.75521,0.69792,0.75868,0.65104,0.79167,0.70312,0.86632,0.69097,0.64757,0.78125,0.5,0.56597,0.72569,0.81771,0.90104,0.67882,0.69097,0.69271,0.73438,0.76389,0.73785,0.57812,0.68924,0.87674,0.97049,0.5434,0.72396,0.67535,0.60069,0.94965,0.76389,0.72743,1.0174,0.67188,0.75694,1.0156,0.10938,0.11111,0.72396,0.20833,0.17882,0.65799,0.71354,0.75694,0.28125,0.29688,0.36632,0.42361,0.47049,0.49306,0.53299,0.53299,0.58507,0.56597,0.60764,0.5625,0.5,0.60243,0.54167,0.52083,0.71354,0.79861,0.57639,0.55382,0.70486,0.51042,0.79167,0.70833,0.57812,0.55382,0.78299,0.49653,0.71354,0.56597,0.78299,0.6684,0.54167,0.71875,0.53993,0.48264,0.77778,0.67188,0.72396,0.77431,0.49306,0.5434,0.53472,0.67014,0.59201,0.71181,0.76736,0.65278,0.66319,0.52257,0.46007,0.52083,0.71701,0.71528,0.6441,0.7066,0.78646,0.6875,0.52431,0.62847,0.61632,0.51215,0.64931,0.70312,0.57639,0.61458,0.6875,0.52951,0.56424,0.71354,0.65972,0.62326,0.53299,0.50868,0.6684,0.70312,0.61632,0.47049,0.6875,0.69618,0.60764,0.68056,0.65451,0.71528,0.66319,0.6059,0.67361,0.63368,0.69271,0.60938,0.7066,0.60764,0.66319,0.57812,0.62326,0.72049,0.6441,0.71701,0.75521,0.65278,0.52778,0.44618,0.71528,0.47396,0.5,0.47396,0.71354,0.52951,0.72049,0.42014,0.53993,0.69792,0.49306,0.71701,0.59896,0.55556,0.72049,0.5816,0.78819,0.69965,0.63368,0.50694,0.70486,0.55208,0.51042,0.78993,0.58854,0.57812,0.68924,0.71875,0.52431,0.57118,0.64062,0.58507,0.72743,0.61979,0.56076,0.59722,0.70312,0.59722,0.63889,0.68576,0.625,0.63021,0.78646,0.74653,0.57986,0.64062,0.7309,0.6441,0.67708,0.68056,0.68576,0.57812,0.74826,0.61285,0.70139,0.71875,0.76389,0.68924,0.77778,0.62326,0.65278,0.73785,0.65972,0.74306,0.56424,0.71701,0.76389,0.78472,0.73438,0.7691,0.73438,0.72743,0.68576,0.50174,0.64757,0.58681,0.64236,0.69792,0.71354,0.7934,0.7934,0.51215,0.098958,0.74479,0.52431,0.76736,0.72569,0.67882,0.78472,0.6684,0.56771,0.55208,0.8125,0.69097,0.77257,0.75521,0.72049,0.80208,0.64931,0.76736,0.7309,0.61285,0.49132,0.72049,0.17014,0.68924,0.77257,0.57465,0.6441,0.66146,0.80208,0.77604,0.54167,0.74653,0.71875,0.75868,0.72222,0.75868,0.75521,0.67361,0.69965,0.48264,0.70139,0.81076,0.71528,0.54514,0.80903,0.81076,0.73958,0.76042,0.7309,0.57812,0.75694,0.75868,0.69618,0.68924,0.67708,0.64757,0.70486,0.7309,0.5191,0.64757,0.82639,0.51736,0.47222,0.24479,0.80382,0.74479,0.74653,0.70312,0.73958,0.7691,0.48785,0.8125,0.69965,0.73785,0.67708,0.71701,0.65451,0.65278,0.77778,0.31597,0.69444,0.51389,0.63021,0.45486,0.80382,0.71007,0.45833,0.75,0.68056,0.77604,0.58681,0.76042,0.64757,0.68056,0.72049,0.62674,0.45486,0.50694,0.74479,0.42188,0.44271,0.73785,0.62153,0.77257,0.70312,0.67882,0.6441,0.77951,0.77257,0.69792,0.75174,0.57812,0.64062,0.4566,0.50347,0.72743,0.60069,0.68576,0.58854,0.63368,0.52083,0.68056,0.44271,0.44965,0.72917,0.52604,0.77257,0.64062,0.55382,0.72222,0.76736,0.56944,0.49132,0.72049,0.65104,0.61806,0.64583,0.44444,0.50868,0.56771,0.45312,0.5,0.68924,0.66667,0.63889,0.5434,0.72222,0.74826,0.77257,0.54688,0.37847,0.6875,0.49653,0.30903,0.63368,0.51562,0.74306,0.5434,0.69792,0.48611,0.49306,0.52604,0.81597,0.76736,0.65278,0.74132,0.75521,0.70833,0.48958,0.47569,0.79167,0.48958,0.69097,0.76389,0.49479,0.42535,0.72569,0.7934,0.37326,0.49306,0.80035,0.57812,0.64062,0.49653,0.78646,0.57986,0.64757,0.71181,0.46875,0.80903,0.65972,0.59375,0.71528,0.38542,0.74653,0.79688,0.74826,0.67188,0.60417,0.72396,0.80556,0.76389,0.68056,0.60764,0.73611,0.48785,0.41667,0.75347,0.72917,0.65972,0.59722,0.46528,0.52778,0.75521,0.58681,0.50694,0.70312,0.58681,0.76736,0.63889,0.74653,0.68576,0.75868,0.53819,0.59896,0.56944,0.54514,0.75521,0.69097,0.54514,0.47222,0.75347,0.63715,0.57118,0.69618,0.75,0.5816,0.64757,0.81944,0.66319,0.77604,0.59722,0.73264,0.68576,0.82292,0.77778,0.63542,0.65451,0.70833,0.81597,0.64236,0.76562,0.73438,0.67188,0.65278,0.71701,0.75174,0.81424,0.80556,0.74826,0.75174,0.80208,0.80035,0.73958,0.61632,0.79861,0.79688,0.68229,0.58681,0.62326,0.74132,0.74132,0.54688,0.49479,0.73264,0.64236,0.5816,0.59375,0.82465,0.79514,0.49826,0.74132,0.55729,0.58507,0.64931,0.71701,0.59722,0.50868,0.58681,0.56597,0.65972,0.72917,0.69792,0.72222,0.65972,0.6059,0.57986,0.51042,0.79688,0.56597,0.66146,0.71354,0.60069,0.69618,0.58507,0.65104,0.51389,0.80382,0.75174,0.61458,0.58854,0.67535,0.53819,0.71528,0.72743,0.65104,0.51562,0.59722,0.61285,0.55556,0.70139,0.6875,0.75174,0.53472,0.58333,0.6441,0.58507,0.5434,0.61285,0.67014,0.75521,0.69792,0.72917,0.54514,0.58333,0.58507,0.65104,0.64757,0.7309,0.60243,0.76042,0.53125,0.71354,0.58681,0.68403,0.76562,0.77431,0.56944,0.73611,0.64062,0.60243,0.48958,0.79514,0.70139,0.60069,0.76736,0.6059,0.53993,0.6684,0.68056,0.72396,0.80382,0.69965,0.62674,0.7691,0.74306,0.80729,0.69618,0.6875,0.74479,0.74653,0.81076,0.67535,0.74653,0.7691,0.68576,0.79688,0.64757,0.73785,0.71701,0.8125,0.65972,0.76042,0.73785,0.70833,0.70312,0.64236,0.75521,0.79688,0.64583,0.68924,0.74826,0.62326,0.65278,0.6684,0.71528,0.69618,0.73611,0.60764,0.67188,0.6441,0.70486,0.69271,0.60069,0.64236,0.70833,0.69792,0.59896,0.68924,0.63542,0.63194,0.70833,0.69271,0.6441,0.75174,0.67708,0.68229,0.72743,0.59375,0.77951,0.59201,0.60938,0.66493,0.69618,0.73958,0.67361,0.59201,0.62674,0.74132,0.75174,0.57812,0.67882,0.62674,0.76562,0.57292,0.75,0.69097,0.61979,0.80903,0.56424,0.68229,0.75,0.69271,0.54514,0.60069,0.80556,0.67708,0.73958,0.65972,0.52778,0.79167,0.6684,0.56944,0.70486,0.63889,0.75521,0.51389,0.65625,0.55903,0.64757,0.60417,0.75868,0.70139,0.5,0.64931,0.52257,0.75521,0.66319,0.55382,0.59722,0.63368,0.47049,0.74479,0.4809,0.65625,0.64757,0.59375,0.74479,0.41146,0.48785,0.73264,0.52778,0.42361,0.67361,0.35938,0.63715,0.79167,0.71875,0.73438,0.72222,0.66146,0.7066,0.625,0.71701,0.76736,0.77778,0.64062,0.76215,0.68576,0.77431,0.71354,0.61111,0.63368,0.73611,0.57465,0.70833,0.7309,0.61285,0.76389,0.53299,0.69444,0.56076,0.50694,0.76389,0.52257,0.46354,1.0156,1.0156,0.68056,0.43576,0.41319,0.94792,0.96701,0.78819,0.70139,0.84028,0.34722,0.82639,0.76042,0.78125,0.69444,0.78646,0.72743,0.74479,0.69792,0.74479,0.68924,0.74479,0.67708,0.73958,0.71701,0.52431,0.55208,0.71181,0.57639,0.71007,0.59722,0.7066,0.55729,0.70139,0.69444,0.36458,0.5434,1.0174,0.39757,0.68403,0.51736,0.9184,1.0174,0.78993,0.64757,0.96354,0.68056,0.51736,0.44965,0.62153,0.72917,0.66667,0.6875,0.88889,0.82812,0.77083,0.51389,0.60417,0.69271,0.68576,0.47049,0.61285,0.65104,0.48438,0.76389,0.59028,0.81424,0.81597,0.68056,0.63715,0.42361,0.70312,0.5,0.65451,0.80035,0.57986,0.68924,0.75,0.64236,0.81944,0.38021,0.79861,0.66319,0.64757,0.58681,0.69618,0.80382,0.73958,0.62153,0.65104,0.59549,0.71354,0.60938,0.79861,0.79514,0.72569,0.64236,0.59375,0.60938,0.72049,0.60243,0.63715,0.57812,0.71528,0.78299,0.5816,0.61806,0.5625,0.70139,0.55035,0.78125,0.7066,0.62847,0.57639,0.54688,0.59375,0.56076,0.63889,0.60938,0.51736,0.58854,0.69444,0.64062,0.56944 4 | 0.8875,0.88056,0.78611,0.78056,0.575,0.53611,0.41667,0.28611,0.99444,0.80278,0.49444,0.32222,0.79167,0.7,0.63889,0.68056,0.48889,0.90694,0.70556,0.7,0.69861,0.61528,0.3625,0.60278,0.40833,0.825,0.73611,0.55556,0.68194,0.61389,0.60417,0.75417,0.80278,0.40139,0.77361,0.52083,0.51111,0.47083,0.020833,0.28056,0.68056,0.42222,0.41528,0.87083,0.87778,0.41528,0.39861,0.59028,0.076389,0.89028,0.4125,0.35,0.98472,0.16111,0.30694,0.3,0.29722,0.49583,0.90833,0.18611,0.013889,0.35139,0.2,0.2375,0.18889,0.38611,0.20556,0.090278,0.90556,0.30417,0.30556,0.10278,0.0083333,0.19722,0.125,0.27222,0.41667,0.11944,0.90139,0.019444,0.013889,0.31528,0.24444,0.045833,0.525,0.14444,0.43333,0.055556,0.0083333,0.19722,0.015278,0.56944,0.088889,0.99583,0.97917,0.66944,0.72917,0.87361,0.87917,0.18611,0.80417,0.77222,0.87917,0.77083,0.16528,0.99861,0.66528,0.94722,0.66667,0.14167,0.99306,0.53472,0.98611,0.55,0.99444,0.12639,0.89306,0.84861,0.016667,0.41667,0.41944,0.098611,0.76389,0.0125,0.7125,0.038889,0.095833,0.30278,0.27917,0.059722,0.066667,0.64444,0.052778,0.57361,0.094444,0.15,0.17778,0.525,0.023611,0.05,0.43056,0.080556,0.12361,0.031944,0.015278,0.056944,0.093056,0.41111,0.056944,0.0069444,0.043056,0.015278,0.098611,0.018056,0.29028,0.32083,0.12361,0.15278,0.036111,0,0.275,0.14028,0.065278,0.17222,0.225,0.19861,0.18333,0.17639,0.094444,0.081944,0.19722,0.10556,0.20694,0.17083,0.15694,0.24722,0.0625,0.24722,0.13333,0.2125,0.10139,0.0083333,0.30694,0.20139,0.098611,0.063889,0.31111,0.098611,0.022222,0.27917,0.16667,0.0027778,0.38333,0.090278,0.37917,0.12361,0.35694,0.056944,0.44444,0.45,0.086111,0.40833,0.0097222,0.52778,0.5125,0.022222,0.45139,0.05,0.925,0.6125,0.59583,0.91528,0.023611,0.51944,0.055556,0.88472,0.58333,0.69722,0.67639,0.10278,0.0027778,0.097222,0.84583,0.64306,0.17222,0.79444,0.76944,0.091667,0.79722,0.71667,0.25694,0.875,0.083333,0.7375,0.875,0.79167,0.35694,0.64722,0.073611,0.88333,0.98472,0.45833,0.53056,0.9875,0.99306,0.99861,0.55972,0.40694,0.99583,0.99306,0.91667,0.64306,0.2625,0.88194,0.81806,0.87222,0.74306,0.13889,0.79722,0.015278,0.72361,0.74861,0.73472,0.11806,0.019444,0.61389,0.63194,0.097222,0.19722,0.97917,0.58611,0.27778,0.50694,0.13472,0.47361,0.37083,0.44583,0.42639,0.19306,0.44583,0.34444,0.32083,0.30556,0.54028,0.24722,0.99028,0.91389,0.20278,0.64167,0.19583,0.15833,0.82639,0.29444,0.74861,0.094444,0.74444,0.013889,0.84861,0.65278,0.015278,0.0097222,0.32222,0.9875,0.53056,0.30972,0.39028,0.26389,0.2625,0.22361,0.14028,0.083333,0.011111,0.011111,0.175,0.98611,0.030556,0.98194,0.095833,0.15972,0.0097222,0.88056,0.125,0.85833,0.19722,0.77917,0.74444,0.13472,0.2625,0.68194,0.32083,0.63333,0.58056,0.0125,0.0083333,0.39306,0.10694,0.54583,0.41944,0.50556,0.093056,0.10833,0.46389,0.44861,0.51667,0.51667,0.3625,0.18333,0.36389,0.20417,0.64028,0.65139,0.27917,0.25,0.25278,0.11806,0.26944,0.29722,0.32222,0.17639,0.15972,0.77917,0.7875,0.35417,0.35,0.15139,0.90139,0.020833,0.0097222,0.0125,0.9875,0.9875,0.045833,0.016667,0.093056,0.37639,0.11806,0.073611,0.39861,0.13333,0.11944,0.072222,0.15139,0.3625,0.19028,0.0055556,0.41806,0.40278,0.19722,0.45833,0.2375,0.47083,0.0027778,0.53194,0.24167,0.57083,0.10972,0.6125,0.25417,0.26806,0.19861,0.99306,0.69306,0.7,0.91806,0.31944,0.32639,0.35556,0.77917,0.83333,0.77639,0.43889,0.74722,0.42639,0.43611,0.59028,0.66528,0.98889,0.73472,0.9875,0.51667,0.57083,0.52639,0.45417,0.97778,0.61806,0.60694,0.34583,0.21389,0.71528,0.73333,0.11111,0.047222,0.81806,0.85556,0.030556,0.97917,0.98472,0.98472,0.98472,0.89028,0.89167,0.75278,0.74167,0.60833,0.59861,0.46667,0.44722,0.35833,0.31528,0.23056,0.22222,0.12222,0.12778,0.015278,0.0097222,0.98889,0.99444,0.89167,0.90417,0.79167,0.0083333,0.80278,0.11667,0.66528,0.68889,0.22917,0.56389,0.58194,0.99028,0.34583,0.016667,0.011111,0.44167,0.48889,0.86111,0.10556,0.11389,0.47917,0.33889,0.73611,0.39028,0.21944,0.22639,0.59722,0.60972,0.21944,0.020833,0.018056,0.28194,0.34028,0.36389,0.47222,0.72361,0.12222,0.1125,0.1875,0.12361,0.4625,0.33611,0.83472,0.50972,0.22639,0.019444,0.24444,0.031944,0.1,0.91389,0.2,0.98611,0.59444,0.99167,0.1125,-0.0013889,0.35,0.98472,0.65,0.90833,0.88889,0.37361,0.69722,0.011111,0.0041667,0.22222,0.81528,0.47639,0.77083,0.75972,0.80417,0.5125,0.35139,0.69444,0.89306,0.60278,0.63194,0.65556,0.99167,0.99444,0.45833,0.73333,0.0125,0.5375,0.50694,0.11944,0.0083333,0.81944,0.6,0.015278,0.40972,0.3875,0.088889,0.22917,0.1,0.30556,0.73889,0.99583,0.30139,0.18472,0.99583,0.22083,0.36389,0.19306,0.875,0.19861,0.29028,0.48611,0.37778,0.38889,0.069444,0.99028,0.10417,0.016667,0.63889,0.525,0.51944,0.034722,0.68333,0.64167,0.99306,0.76667,0.82083,0.99028,0.013889,0.99444,0.94861,0.87361,0.98611,0.96944,0.045833,0.96806,0.99306,0.93611,0.99306,0.91111,0.12222,0.9375,0.99306,0.88056,0.011111,0.82222,0.20556,0.84722,0.80417,0.87917,0.093056,0.72222,0.75556,0.29861,0.69861,0.80278,0.17917,0.67778,0.60972,0.72222,0.40833,0.26389,0.58333,0.65278,0.60972,0.019444,0.51806,0.019444,0.023611,0.36111,0.50139,0.48194,0.575,0.1,0.52083,0.10556,0.10972,0.46111,0.61528,0.99722,0.3625,0.46528,0.95833,0.36667,0.19583,0.41806,0.19444,0.57222,0.21806,0.90417,0.73889,0.28194,0.27917,0.31528,0.2875,0.67778,0.25833,0.35139,0.85556,0.99306,0.18889,0.33889,0.8625,0.94583,0.37639,0.81111,0.76667,0.15972,0.40833,0.18333,0.21111,0.081944,0.87639,0.77222,0.99444,0.47222,0.89583,0.49028,0.51111,0.0083333,0.10417,0.0069444,0.82083,0.75,0.011111,0.99167,0.59028,0.0125,0.76528,0.66528,0.65417,0.725,0.72083,0.78194,0.71667,0.83472,0.88333,0.80556,0.85833,0.8875,0.98194,0.9875,0.99306,0.90417,0.85694,0.90694,0.85833,0.90556,0.84444,0.88194,0.81944,0.82778,0.75417,0.74028,0.66389,0.99167,0.60694,0.57639,0.89583,0.0083333,0.016667,0.48611,0.825,0.075,0.45278,0.16944,0.15694,0.71111,0.38194,0.29167,0.35417,0.24028,0.6125,0.44583,0.32361,0.26389,0.3375,0.52222,0.2375,0.60278,0.42361,0.44583,0.76667,0.12639,0.12222,0.36667,0.52361,0.0083333,0.53333,0.99167,0.077778,0.60278,0.24444,0.015278,0.013889,0.625,0.15417,0.66944,0.69722,0.23889,0.73472,0.077778,0.71389,0.74583,0.011111,0.78889,0.015278,0.8125,0.84028,0.15417,0.85833,0.91528,0.275,0.99167,0.99306,0.9875,0.92361,0.89861,0.425,0.87778,0.99028,0.79167,0.53472,0.73056,0.98889,0.65556,0.65694,0.87083,0.58194,0.50833,0.74028,0.80417,0.41806,0.38333,0.60833,0.99028,0.47083,0.25556,0.24444,0.13889,0.98889,0.0097222,0.013889,0.18194,0.99306,0.90139,0.99306,0.94306,0.056944,0.015278,0.95833,0.87639,0.0125,0.90278,0.10556,0.99444,0.013889,0.9125,0.99444,0.8375,0.8625,0.011111,0.10972,0.85972,0.925,0.21944,0.94444,0.0097222,0.075,0.80417,0.0125,0.21528,0.77083,0.87361,0.077778,0.78611,0.89028,0.35139,0.094444,0.80694,0.16667,0.68611,0.71806,0.70556,0.34722,0.79167,0.175,0.091667,0.46667,0.74583,0.65556,0.45417,0.29306,0.26667,0.72083,0.59028,0.63889,0.59167,0.68889,0.67222,0.35417,0.52361,0.39167,0.1125,0.57917,0.64167,0.0097222,0.52222,0.70833,0.56806,0.60972,0.44167,0.6,0.49028,0.20278,0.10833,0.0125,0.015278,0.99167,0.56667,0.50139,0.4625,0.84722,0.525,0.68889,0.54861,0.50694,0.79722,0.19167,0.90694,0.59167,0.11528,0.13194,0.33889,0.48611,0.44861,0.82222,0.67083,0.98472,0.99028,0.29444,0.425,0.82917,0.94861,0.70417,0.21944,0.38889,0.44861,0.99028,0.79444,0.24167,0.46389,0.7125,0.99444,0.39306,0.77917,0.36528,0.3875,0.8875,0.31111,0.79028,0.90556,0.9875,0.30833,0.875,0.34722,0.38611,0.98889,0.98611,0.35556,0.59444,0.013889,0.85139,0.020833,0.8625,0.90556,0.28333,0.39444,0.46944,0.25833,0.80139,0.89722,0.27639,0.425,0.7625,0.32778,0.75139,0.90556,0.48611,0.33472,0.79306,0.12222,0.94444,0.1375,0.825,0.48056,0.56389,0.40972,0.99028,0.50278,0.99444,0.70417,0.15139,0.80278,0.15417,0.825,0.79722,0.64306,0.26111,0.63333,0.26806,0.26667,0.55556,0.33194,0.25,0.73056,0.68056,0.20694,0.5875,0.60278,0.0625,0.61111,0.70139,0.26111,0.19306,0.0125,0.80556,0.18056,0.36806,0.51528,0.99583,0.0069444,0.52361,0.625,0.40972,0.80972,0.70278,0.72083,0.62083,0.51528,0.19028,0.11806,0.48611,0.10417,0.45139,0.53472,0.43472,0.80833,0.80278,0.091667,0.14028,0.84583,0.50556,0.57778,0.43194,0.78333,0.59167,0.98472,0.42917,0.048611,0.36667,0.90278,0.025,0.33333,0.081944,0.40694,0.71944,0.019444,0.0069444,0.98472,0.35972,0.99028,0.30972,0.73889,0.28194,0.77639,0.65556,0.025,0.29167,0.28611,0.0083333,0.21806,0.6,0.21389,0.19444,0.20278,0.53472,0.16667,0.097222,0.9875,0.98889,0.12222,0.99444,0.1125,0.090278,0.45139,0.090278,0.88333,0.018056,0.011111,0.0069444,0.3625,0.038889,0.011111,0.74306,0.031944,0.26389,0.64167,0.020833,0.0125,0.15,0.47639,0.11528,0.10556,0.98472,0.076389,0.30694,0.23472,0.25972,0.8375,0.029167,0.99583,0.15972,0.66528,0.37639,0.4,0.81944,0.018056,0.44444,0.51944,0.55278,0.65694,0.011111,0.026389,0.24028,0.47778,0.69722,0.70417,0.091667,0.079167,0.013889,0.79028,0.19028,0.99167,0.19444,0.99306,0.83611,0.21944,0.88333,0.8625,0.89028,0.31944,0.94306,0.34028,0.011111,0.72778,0.99167,0.98611,0.68333,0.46111,0.48194,0.54167,0.51111,0.61528,0.64861,0.0055556,0.375,0.011111,0.73611,0.34306,0.77361,0.12778,0.12222,0.22917,0.85833,0.17778,0.29861,0.079167,0.30139,0.98194,0.98889,0.0097222,0.0069444,0.016667,0.019444,0.44861,0.4875,0.11667,0.15,0.625,0.70139,0.98611,0.32361,0.76806,0.99306,0.89861,0.99306,0.87222,0.45694,0.020833,0.013889,0.018056,0.90833,0.81389,0.9125,0.9875,0.52917,0.097222,0.97917,0.10417,0.78472,0.79583,0.12917,0.825,0.19722,0.71528,0.21528,0.029167,0.70139,0.22778,0.022222,0.67639,0.72639,0.63194,0.28889,0.78472,0.34167,0.14444,0.12083,0.61667,0.35833,0.55139,0.57361,0.41528,0.97639,0.98333,0.53333,0.47361,0.26111,0.45833,0.47639,0.24722,0.47083,0.53889,0.38472,0.45833,0.55417,0.36389,0.58889,0.35694,0.61944,0.30556,0.32639,0.65,0.69444,0.34028,0.72639,0.48194,0.51528,0.99306,0.79028,0.20417,0.76111,0.20556,0.83611,0.89167,0.20694,0.62083,0.9,0.041667,0.10278,0.66528,0.020833,0.80694,0.9,0.71667,0.058333,0.10972,0.99306,0.73194,0.095833,0.99167,0.10278,0.013889,0.99028,0.0125,0.83194,0.81944,0.61667,0.18472,0.018056,0.23472,0.91389,0.50278,0.29861,0.99028,0.99167,0.37361,0.33472,0.42917,0.54722,0.015278,0.011111,0.18194,0.57639,0.098611,0.016667,0.11111,0.10833,0.020833,0.70417,0.19722,0.12639,0.20972,0.12361,0.0125,0.71806,0.30694,0.86111,0.33333,0.83333,0.25694,0.27639,0.45278,0.98889,0.47917,0.98611,0.42361,0.43889,0.58056,0.60278,0.59167,0.5875,0.72778,0.71806,0.7375,0.75417,0.85139,0.87778,0.86944,0.875,0.98056,0.97917,0.99028,0.99444,0.98333,0.98056,0.925,0.98056,0.97778,0.98472,0.875,0.90556,0.85,0.89583,0.86806,0.76111,0.76389,0.7875,0.76389,0.74306,0.675,0.64861,0.67361,0.63611,0.59167,0.58472,0.57222,0.51528,0.99028,0.46667,0.47778,0.44861,0.88611,0.37639,0.4125,0.36667,0.32222,0.75694,0.35,0.26389,0.30833,0.20833,0.26111,0.62222,0.21944,0.18056,0.2,0.015278,0.1375,0.16389,0.46389,0.13056,0.12778,0.98889,0.10417,0.10278,0.052778,0.098611,0.99306,0.045833,0.88611,0.31528,0.0069444,0.31111,0.044444,0.013889,0.87083,0.022222,0.019444,0.77222,0.034722,0.15694,0.013889,0.7625,0.45556,0.0083333,0.65972,0.63611,0.0083333,0.10972,0.60278,0.13333,0.55694,0.70278,0.49583,0.25278,0.23889,0.45694,0.36944,0.87083,0.36389,0.36944,0.97778,0.26667,0.45417,0.26389,0.52917,0.16111,0.16944,0.64861,0.072222,0.093056,0.57917,0.53889,0.70694,0.0041667,0.054167,0.5875,0.63611,0.82083,0.85972,0.64167,0.023611,0.71944,0.69583,0.98889,0.98194,0.77083,0.75278,0.83194,0.82361,0.88056,0.87361,0.98056,0.98333,0.0125,0.14722,0.99306,0.88056,0.29722,0.7625,0.43472,0.6625,0.49583,0.55139,0.525,0.58889,0.031944,0.425,0.47083,0.054167,0.71111,0.28194,0.43889,0.50833,0.39444,0.99306,0.48611,0.82917,0.16667,0.065278,0.92639,0.34583,0.99028,0.86528,0.42639,0.99306,0.99167,0.083333,0.99028,0.34167,0.90972,0.095833,0.83472,0.91389,0.044444,0.90694,0.91528,0.36111,0.87917,0.31389,0.75833,0.0125,0.98889,0.1,0.82361,0.27778,0.83611,0.25833,0.75972,0.71389,0.72778,0.0055556,0.22917,0.75,0.66806,0.74444,0.17361,0.13194,0.59028,0.61389,0.65694,0.65694,0.086111,0.56111,0.44722,0.022222,0.0097222,0.56389,0.56944,0.50417,0.39306,0.43194,0.45694,0.46389,0.24722,0.22778,0.35278,0.35417,0.37917,0.10694,0.275,0.0083333,0.0097222,0.24306,0.25139,0.20694,0.17917,0.1375,0.14028,0.10417,0.076389,0.0097222,0.015278,0.022222,0.0069444 5 | -------------------------------------------------------------------------------- /GAE/gae.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## 1. GCN的问题\n", 8 | "\n", 9 | "GCN的训练方式是基于全图的形式,然而对于许多实际的业务场景,图的规模是非常巨大的,单张显卡的容量难以满足一张整图训练所需要的空间,所以小批量的训练方法对于大规模图数据的训练是十分必要的。\n", 10 | "\n", 11 | "此外,GCN要求在训练前知道整个图的结构信息,这也极大的限制了它的应用场景。\n", 12 | "\n", 13 | "\n", 14 | "## 2. GraphSage\n", 15 | "\n", 16 | "GraphSage通过采样邻居的策略将GCN的训练方式由全图(Full Batch)方式修改为以节点为中心的小批量(Mini Batch)的方式,这使得大规模图数据的分布式训练成为可能。\n", 17 | "\n", 18 | "\n", 19 | "![Visual illustration of the GraphSAGE sample and aggregate approach](images/graphsage_routing.png)\n", 20 | "\n", 21 | "### 2.1 采样邻居\n", 22 | "\n", 23 | "GNN模型中,图的信息聚合过程是沿着Graph Edge进行的,GNN中节点在第(k+1)层的特征只与其在(k)层的邻居有关,这种局部性质使得节点在(k)层的特征只与自己的k阶子图有关。因此对于k层网络,只需要采样Graph的k阶子图,就可以满足训练的需求。\n", 24 | "\n", 25 | "同时为了提升运算效率,对每个顶点的邻居节点进行有放回的采样,保证每个节点邻居个数都是相同的。这也是神经网络中处理数据的一种常用的策略。" 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": 1, 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "def sampling(src_nodes, sample_num, neighbor_table):\n", 35 | " \"\"\"根据源节点采样指定数量的邻居节点,注意使用的是有放回的采样;\n", 36 | " 某个节点的邻居节点数量少于采样数量时,采样结果出现重复的节点\n", 37 | " \n", 38 | " Arguments:\n", 39 | " src_nodes {list, ndarray} -- 源节点列表\n", 40 | " sample_num {int} -- 需要采样的节点数\n", 41 | " neighbor_table {dict} -- 节点到其邻居节点的映射表\n", 42 | " \n", 43 | " Returns:\n", 44 | " np.ndarray -- 采样结果构成的列表\n", 45 | " \"\"\"\n", 46 | " results = []\n", 47 | " for sid in src_nodes:\n", 48 | " # 从节点的邻居中进行有放回地进行采样\n", 49 | " res = np.random.choice(neighbor_table[sid], size=(sample_num, ))\n", 50 | " results.append(res)\n", 51 | " return np.asarray(results).flatten()\n", 52 | "\n", 53 | "\n", 54 | "def multihop_sampling(src_nodes, sample_nums, neighbor_table):\n", 55 | " \"\"\"根据源节点进行多阶采样\n", 56 | " \n", 57 | " Arguments:\n", 58 | " src_nodes {list, np.ndarray} -- 源节点id\n", 59 | " sample_nums {list of int} -- 每一阶需要采样的个数\n", 60 | " neighbor_table {dict} -- 节点到其邻居节点的映射\n", 61 | " \n", 62 | " Returns:\n", 63 | " [list of ndarray] -- 每一阶采样的结果\n", 64 | " \"\"\"\n", 65 | " sampling_result = [src_nodes]\n", 66 | "\n", 67 | " for k, hopk_num in enumerate(sample_nums):\n", 68 | " hopk_result = sampling(sampling_result[k], hopk_num, neighbor_table)\n", 69 | " sampling_result.append(hopk_result)\n", 70 | " return sampling_result" 71 | ] 72 | }, 73 | { 74 | "cell_type": "markdown", 75 | "metadata": {}, 76 | "source": [ 77 | "### 2.2 聚合邻居节点\n", 78 | "\n", 79 | "由于在图(Graph)中顶点的邻居是无序的,所以聚合函数最好是对称的,即无论如何改变输入的顺序,函数的输出结果总是不变的。\n", 80 | "\n", 81 | "常用的有如下几个聚合算子:\n", 82 | "\n", 83 | "1) 均值(Mean)/加和(Sum)聚合算子:\n", 84 | "\n", 85 | "$$Agg_{sum} = \\sigma (\\text{SUM} \\{W h_j + b\\}), \\forall v_j \\in N(v_i)$$\n", 86 | "\n", 87 | "$$Agg_{mean} = \\sigma (\\text{MEAN} \\{W h_j + b\\}), \\forall v_j \\in N(v_i)$$\n", 88 | "\n", 89 | "2) 池化(Pooling)聚合算子:\n", 90 | "\n", 91 | "$$Agg_{pool} = \\sigma (\\text{MAX} \\{W h_j + b\\}), \\forall v_j \\in N(v_i)$$\n" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": null, 97 | "metadata": {}, 98 | "outputs": [], 99 | "source": [ 100 | "class NeighborAggregator(tf.keras.Model):\n", 101 | " def __init__(self, input_dim, output_dim, \n", 102 | " use_bias=False, aggr_method=\"mean\"):\n", 103 | " \"\"\"聚合节点邻居\n", 104 | " Args:\n", 105 | " input_dim: 输入特征的维度\n", 106 | " output_dim: 输出特征的维度\n", 107 | " use_bias: 是否使用偏置 (default: {False})\n", 108 | " aggr_method: 邻居聚合方式 (default: {mean})\n", 109 | " \"\"\"\n", 110 | " super(NeighborAggregator, self).__init__()\n", 111 | "\n", 112 | " self.input_dim = input_dim\n", 113 | " self.output_dim = output_dim\n", 114 | " self.use_bias = use_bias\n", 115 | " self.aggr_method = aggr_method\n", 116 | "\n", 117 | " self.weight = self.add_weight(shape = (self.input_dim, self.output_dim),\n", 118 | " initializer = 'glorot_uniform',\n", 119 | " name = 'kernel')\n", 120 | "\n", 121 | " if self.use_bias:\n", 122 | " self.bias = self.add_weight(shape = (self.input_dim, self.output_dim),\n", 123 | " initializer = 'zero',\n", 124 | " name = 'bias')\n", 125 | "\n", 126 | " def call(self, neighbor_feature):\n", 127 | " if self.aggr_method == \"mean\":\n", 128 | " aggr_neighbor = tf.math.reduce_mean(neighbor_feature, axis = 1)\n", 129 | " elif self.aggr_method == \"sum\":\n", 130 | " aggr_neighbor = tf.math.reduce_sum(neighbor_feature, axis = 1)\n", 131 | " elif self.aggr_method == \"max\":\n", 132 | " aggr_neighbor = tf.math.reduce_max(neighbor_feature, axis = 1)\n", 133 | " else:\n", 134 | " raise ValueError(\"Unknown aggr type, expected sum, max, or mean, but got {}\"\n", 135 | " .format(self.aggr_method))\n", 136 | " \n", 137 | " neighbor_hidden = tf.matmul(aggr_neighbor, self.weight)\n", 138 | " if self.use_bias:\n", 139 | " neighbor_hidden += self.bias\n", 140 | "\n", 141 | " return neighbor_hidden" 142 | ] 143 | }, 144 | { 145 | "cell_type": "markdown", 146 | "metadata": {}, 147 | "source": [ 148 | "## 3. 模型训练\n", 149 | "\n", 150 | "这里我们使用的Cora数据集,该数据集由2708篇论文的特征、分类以及它们之间引用关系的5429条边组成,这些论文的类型被划分为7个类别:Case_Based、Genetic_Algorithms、Neural_Networks、Probabilistic_Methods、Reinforcement_Learning、Rule_Learning、Theory。最终实现的目标是,输入一篇论文的特征,就可以输出该论文属于哪个分类。\n", 151 | "\n", 152 | "可以看到,这里我们不再将整个Cora Dataset作为网络的输入,而是把Trainning Data分为一个个的Batch喂给模型。" 153 | ] 154 | }, 155 | { 156 | "cell_type": "code", 157 | "execution_count": null, 158 | "metadata": {}, 159 | "outputs": [], 160 | "source": [ 161 | "data = CoraData().data()\n", 162 | "\n", 163 | "train_index = np.where(data.train_mask)[0]\n", 164 | "train_label = data.y[train_index]\n", 165 | "test_index = np.where(data.test_mask)[0]\n", 166 | "val_index = np.where(data.val_mask)[0]\n", 167 | "\n", 168 | "model = GraphSage(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,\n", 169 | " num_neighbors_list=NUM_NEIGHBORS_LIST)\n", 170 | "\n", 171 | "loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True)\n", 172 | "optimizer=tf.keras.optimizers.Adam(learning_rate=0.01, decay=5e-4)\n", 173 | "\n", 174 | "def train():\n", 175 | " for e in range(EPOCHS):\n", 176 | " for batch in range(NUM_BATCH_PER_EPOCH):\n", 177 | " batch_src_index = np.random.choice(train_index, size=(BTACH_SIZE,))\n", 178 | " batch_src_label = train_label[batch_src_index].astype(float)\n", 179 | "\n", 180 | " batch_sampling_result = multihop_sampling(batch_src_index, NUM_NEIGHBORS_LIST, data.adjacency_dict)\n", 181 | " batch_sampling_x = [data.x[np.array(idx.astype(np.int32))] for idx in batch_sampling_result]\n", 182 | "\n", 183 | " with tf.GradientTape() as tape:\n", 184 | " batch_train_logits = model(batch_sampling_x)\n", 185 | " loss = loss_object(batch_src_label, batch_train_logits)\n", 186 | " grads = tape.gradient(loss, model.trainable_variables)\n", 187 | "\n", 188 | " optimizer.apply_gradients(zip(grads, model.trainable_variables))\n", 189 | "\n", 190 | " print(\"Epoch {:03d} Batch {:03d} Loss: {:.4f}\".format(e, batch, loss))" 191 | ] 192 | }, 193 | { 194 | "cell_type": "markdown", 195 | "metadata": {}, 196 | "source": [ 197 | "在Tensorflow 2.0中实现代码时,出现:\"No gradients provided for any variable\"的错误,追了半天,才在stackoverflow上找到原因:\n", 198 | "\n", 199 | "https://stackoverflow.com/questions/58947679/no-gradients-provided-for-any-variable-in-tensorflow2-0\n", 200 | "\n", 201 | "> Yup, this is a mildly annoying thing about GradientTape. You cannot do anything to the tensors outside the tape context (with...) or the tape will \"lose track\". You can fix it by simply moving the addition into the context:\n", 202 | "\n", 203 | "在测试集和验证集上测试模型的精度的代码如下:" 204 | ] 205 | }, 206 | { 207 | "cell_type": "code", 208 | "execution_count": 2, 209 | "metadata": {}, 210 | "outputs": [], 211 | "source": [ 212 | "def test(index):\n", 213 | " test_sampling_result = multihop_sampling(index, NUM_NEIGHBORS_LIST, data.adjacency_dict)\n", 214 | " test_x = [data.x[idx.astype(np.int32)] for idx in test_sampling_result]\n", 215 | " test_logits = model(test_x)\n", 216 | " test_label = data.y[index]\n", 217 | "\n", 218 | " ll = tf.math.equal(tf.math.argmax(test_label, -1), tf.math.argmax(test_logits, -1))\n", 219 | " accuarcy = tf.reduce_mean(tf.cast(ll, dtype=tf.float32))\n", 220 | "\n", 221 | " return accuarcy" 222 | ] 223 | }, 224 | { 225 | "cell_type": "markdown", 226 | "metadata": {}, 227 | "source": [ 228 | "最后,把模型跑起来,看看效果。" 229 | ] 230 | }, 231 | { 232 | "cell_type": "code", 233 | "execution_count": 1, 234 | "metadata": {}, 235 | "outputs": [ 236 | { 237 | "name": "stdout", 238 | "output_type": "stream", 239 | "text": [ 240 | "Process data ...\n", 241 | "Loading cora dataset...\n", 242 | "Dataset has 2708 nodes, 2708 edges, 1433 features.\n", 243 | "Epoch 000 train accuracy: 0.7733333110809326 val accuracy: 0.508571445941925 test accuracy:0.4180253744125366\n", 244 | "Epoch 001 train accuracy: 0.9599999785423279 val accuracy: 0.7142857313156128 test accuracy:0.6607789993286133\n", 245 | "Epoch 002 train accuracy: 0.9933333396911621 val accuracy: 0.7457143068313599 test accuracy:0.648097813129425\n", 246 | "Epoch 003 train accuracy: 1.0 val accuracy: 0.7142857313156128 test accuracy:0.6503623127937317\n", 247 | "Epoch 004 train accuracy: 1.0 val accuracy: 0.7142857313156128 test accuracy:0.6557971239089966\n", 248 | "Epoch 005 train accuracy: 1.0 val accuracy: 0.7200000286102295 test accuracy:0.6557971239089966\n", 249 | "Epoch 006 train accuracy: 1.0 val accuracy: 0.7171428799629211 test accuracy:0.65625\n", 250 | "Epoch 007 train accuracy: 1.0 val accuracy: 0.7342857122421265 test accuracy:0.65625\n", 251 | "Epoch 008 train accuracy: 1.0 val accuracy: 0.7285714149475098 test accuracy:0.6598731875419617\n", 252 | "Epoch 009 train accuracy: 1.0 val accuracy: 0.7142857313156128 test accuracy:0.6607789993286133\n", 253 | "Epoch 010 train accuracy: 1.0 val accuracy: 0.7200000286102295 test accuracy:0.6675724387168884\n", 254 | "Epoch 011 train accuracy: 1.0 val accuracy: 0.7171428799629211 test accuracy:0.6634963750839233\n", 255 | "Epoch 012 train accuracy: 1.0 val accuracy: 0.7314285635948181 test accuracy:0.6616848111152649\n", 256 | "Epoch 013 train accuracy: 1.0 val accuracy: 0.7114285826683044 test accuracy:0.6612318754196167\n", 257 | "Epoch 014 train accuracy: 1.0 val accuracy: 0.7114285826683044 test accuracy:0.6612318754196167\n", 258 | "Epoch 015 train accuracy: 1.0 val accuracy: 0.7028571367263794 test accuracy:0.6598731875419617\n", 259 | "Epoch 016 train accuracy: 1.0 val accuracy: 0.7171428799629211 test accuracy:0.6662137508392334\n", 260 | "Epoch 017 train accuracy: 1.0 val accuracy: 0.7142857313156128 test accuracy:0.6630434989929199\n", 261 | "Epoch 018 train accuracy: 1.0 val accuracy: 0.7228571176528931 test accuracy:0.6630434989929199\n", 262 | "Epoch 019 train accuracy: 1.0 val accuracy: 0.7171428799629211 test accuracy:0.6630434989929199\n" 263 | ] 264 | }, 265 | { 266 | "data": { 267 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAtsAAAILCAYAAAA5TlCHAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAgAElEQVR4nOzdeZhc1X3n//e3d0mtXS2k1tYCiUVsEgjZRuAlNjZ4QXjDYCcBx5hJ4mXGWZ44k8WJM78kk0zssSdOJhhj4g0Z2xMgBox3s4MkEAKxytrVQmqhfev1/P6oaqm6VS211FVdvbxfz1NPV91769a3r0rVn3vq3HMipYQkSZKkwisrdQGSJEnSUGXYliRJkorEsC1JkiQViWFbkiRJKhLDtiRJklQkhm1JkiSpSAzbktTPIqI8IvZHxMxCbjuQRcTpEbG/1HVIUn8zbEvSCWTDbuetIyIO5Tz+yMnuL6XUnlKqTSltLOS2Jysi/kdEpIj4/W7L/yi7/M97uZ/NEfHm422TUlqbUqrtQ7mSNCgZtiXpBLJhtzYbFjcC78lZ9u3u20dERf9XecpeBm7otuy3sssLYpAdD0kqKMO2JPVRtoX4uxFxR0TsA34zIt4QEY9HxO6I2BoRX46Iyuz2FdmW44bs429l198fEfsi4rGImH2y22bXXxURL0fEnoj4PxHxSETceJzyHwMmRMRZ2efPJ/O34eluv+PVEfFM9vd5OCLOyy6/A6gH7s+29P9BRMzJ1vzRiNgI/LhzWc7+JkbE7dljsysifpBdPjki7su+zs6IePCU/2EkaQAwbEtSYbwX+A4wFvgu0Ab8V2ASsBi4Evgvx3n+h4G/ACaQaT3/m5PdNiImA3cCf5x93XXAol7U/k3gt7P3fxv4Ru7KiLgE+CpwEzARuA24OyKqUkrXA43AVdmW/i/kPPWNwNnAu/K85neAKmAecBrwpezyPwbWAnXAlOzvKUmDlmFbkgrj4ZTSf6aUOlJKh1JKy1JKT6SU2lJKa4FbgDcd5/nfTyktTym1At8G5p/Ctu8GVqaU7s6u+yKwoxe1fxP4SLbl/drsPnPdDPxL9ndqTyndll1+yQn2+7mU0sGU0qHchRExA3gr8HsppV0ppZaUUmcLdiuZlvKZ2eW/6kX9kjRgGbYlqTA25T6IiLMj4t6IeDUi9gKfJ9Pa3JNXc+4fBI53MWFP29bn1pFSSsDmExWeUlpHpoX8b4HVKaXGbpvMAv4k27Vjd0TsBqYC006w6009LJ8B7Egp7cmz7u+BDcDPIuLXEfHHJ6pfkgYyw7YkFUbq9vjfgOeAOSmlMcBfAlHkGrYC0zsfRERw4kDc6RvAH9KtC0nWJuCvU0rjcm4jU0p3Ztd3/90zCzNhP59NwKSIGJPnOXtTSp9JKTUA15AJ+cf7RkCSBjTDtiQVx2hgD3AgIs7h+P21C+WHwEUR8Z7sCCD/lUzf5974DvB24Ad51t0CfCIiLomM2uxrjMqu3wac3tsiU0qbgJ8CX4mIcRFRGRFvBMju94zsicIeoD17k6RBybAtScXxh2SG1NtHppX7u8V+wZTSNuBDwBeA14AzyIwq0tyL5x5MKf00pXQ4z7ongN8D/hXYRWZYwN/M2eRvgb/OdjH5b70st/P5L5MJ65/KPj4L+DmwH3gE+FJK6eFe7lOSBpzo+Vs+SdJgFhHlZEYK+UBK6aFS1yNJw5Et25I0hETElRExNiKqyQyb1wY8WeKyJGnYMmxL0tByGZlxqneQGdv7mpTSCbuRSJKKw24kkiRJUpHYsi1JkiQViWFbkiRJKhLDtiRJklQkhm1JkiSpSAzbkiRJUpEYtiVJkqQiMWxLkiRJRWLYliRJkorEsC1JkiQViWFbkiRJKhLDtiRJklQkhm1JkiSpSAzbkiRJUpEYtiVJkqQiMWxLkiRJRWLYliRJkorEsC1JkiQViWFbkiRJKhLDtiRJklQkhm1JkiSpSAzbkiRJUpEYtiVJkqQiMWxLkiRJRWLYliRJkorEsC1JkiQViWFbkiRJKhLDtiRJklQkhm1JkiSpSAzbkiRJUpEYtiVJkqQiMWxLkiRJRWLYliRJkorEsC1JkiQViWFbkiRJKhLDtiRJklQkhm1JkiSpSAzbkiRJUpEYtiVJkqQiMWxLkiRJRWLYliRJkorEsC1JkiQViWFbkiRJKhLDtiRJklQkFaUuoFgmTZqUGhoaSl2GJEmShrgVK1bsSCnV5Vs3ZMN2Q0MDy5cvL3UZkiRJGuIiYkNP6+xGIkmSJBWJYbuA2to7uO3hddz/7NZSlyJJkqQBYMh2IymF8rLgeys209bewZXnTSEiSl2SJEmSSsiW7QKKCG66bDavbN/Pg6/sKHU5kiRJKjHDdoG958J6Jo+u5taH1pa6FEmSJJWYYbvAqirKuOHSBh56ZQcvvbqv1OVIkiSphAzbRfDhRTOpqSzjaw/bui1JkjSc9WvYjogrI+KliFgTEZ/Ns/6LEbEye3s5InbnrGvPWXdPf9Z9ssaPquIDF0/nrqcbadrXXOpyJEmSVCL9FrYjohz4CnAVMA+4PiLm5W6TUvpMSml+Smk+8H+A/5ez+lDnupTS1f1V96n6ncWzaWnv4JuP9zjGuSRJkoa4/mzZXgSsSSmtTSm1AEuBJcfZ/nrgjn6prAhOr6vlbedM5luPb+Bwa3upy5EkSVIJ9GfYngZsynm8ObvsGBExC5gN/DxncU1ELI+IxyPimh6ed3N2m+VNTU2FqvuUfeyy09l5oIX/eHpLqUuRJElSCfRn2M43w0vqYdvrgO+nlHKbhGemlBYCHwb+d0SccczOUrolpbQwpbSwrq6u7xX30etPn8C59WP42sPr6Ojo6VeVJEnSUNWfYXszMCPn8XSgsYdtr6NbF5KUUmP251rgl8CCwpdYWBHBTZfPZs32/fzqldK3tEuSJKl/9WfYXgbMjYjZEVFFJlAfM6pIRJwFjAcey1k2PiKqs/cnAYuB5/ul6j561/n1nDammq89tK7UpUiSJKmf9VvYTim1AZ8EHgBeAO5MKa2OiM9HRO7oItcDS1NKuf0uzgGWR8QzwC+Av08pDYqw3TnJzcNrdvDC1r2lLkeSJEn9KLpm2qFj4cKFafny5aUuA4DdB1t4w9/9nHddMJX/9cELS12OJEmSCigiVmSvLTyGM0j2g3Ejq/jgwuncvXIL2/ceLnU5kiRJ6ieG7X7y0cWzaetITnIjSZI0jBi2+8nsSaN42zmn8a3HN3CoxUluJEmShgPDdj+66bLZ7DrYyv97enOpS5EkSVI/MGz3o0WzJ3D+tLFOciNJkjRMGLb7UeckN2ubDvDLl7eXuhxJkiQVmWG7n73z/KlMGVPDrU5yI0mSNOQZtvtZZXkZNy5u4NFfv8bqxj2lLkeSJElFZNgugesvmcnIqnK+9rCt25IkSUOZYbsExo6s5NqFM/jPZxrZ5iQ3kiRJQ5Zhu0Q+uriBto7ENx5bX+pSJEmSVCSG7RKZNXEUb593Gt9+YiMHW9pKXY4kSZKKwLBdQjddfjq7D7byg6e2lLoUSZIkFYFhu4QWzhrPhdPHcpuT3EiSJA1Jhu0Sigg+dvnprNtxgJ+/6CQ3kiRJQ41hu8SuOm8K9WNruPXhtaUuRZIkSQVm2C6xzkluHl+7k+e2OMmNJEnSUNKvYTsiroyIlyJiTUR8Ns/6GyOiKSJWZm835ay7ISJeyd5u6M+6i+1Dl8xklJPcSJIkDTl9CtsRMSIi3hYRs3qxbTnwFeAqYB5wfUTMy7Ppd1NK87O3W7PPnQB8DngdsAj4XESM70vtA8nYEZVce0lmkptX9zjJjSRJ0lBxUmE7Im6PiN/P3q8CngR+DLwUEVed4OmLgDUppbUppRZgKbCkly/9DuAnKaWdKaVdwE+AK0+m9oHuo5fOpiMl/v2x9aUuRZIkSQVysi3b7wAez96/GhgNTAH+Kns7nmnAppzHm7PLunt/RKyKiO9HxIyTeW5E3BwRyyNieVNT0wnKGVhmThzJO86dwrcf38CBZie5kSRJGgpONmyPBzrHqLsS+EFKaTuZVup8XUJyRZ5l3QeX/k+gIaV0AfBT4N9P4rmklG5JKS1MKS2sq6s7QTkDz02Xz2bv4TZ+8NTmUpciSZKkAjjZsP0qcF62//U7yARigFqg9QTP3QzMyHk8HWjM3SCl9FpKqTn78KvAxb197lBw0czxzJ8xjtseXke7k9xIkiQNeicbtm8Dvgs8B7QDP8sufx3w4gmeuwyYGxGzs/29rwPuyd0gIqbmPLwaeCF7/wHg7RExPnth5Nuzy4aUiOCmy2ez/rWD/OyFbaUuR5IkSX1UcTIbp5Q+HxGrgZnA97IXOgK0Af/zBM9ti4hPkgnJ5cBtKaXVEfF5YHlK6R7g0xFxdXZ/O4Ebs8/dGRF/QyawA3w+pbTzZGofLK48dwrTxo3g1ofX8fZzp5S6HEmSJPVBpDQ0uyssXLgwLV++vNRlnJJbH1rL/7j3Be755GIumD6u1OVIkiTpOCJiRUppYb51Jzv037UR8facx38ZEZsj4oFuXUDUB9deMoPa6gonuZEkSRrkTrbP9l913omIi4D/DnwZqAT+qXBlDW9jair50CUzuHfVVhp3Hyp1OZIkSTpFJxu2ZwEvZe+/F7grpfQPwB8Aby1kYcPdjZc2OMmNJEnSIHeyYfswmYlsIBOuO4f+25OzXAUwY8JIrjpvKt95YqOT3EiSJA1SJxu2HwL+KSL+AlgI3JddfiZdZ3hUAXzs8tnsO9zG95Z7aCVJkgajkw3bnwRagA8Av5tS6pxY5iqG4LjXpXbRzPFcNHMctz2y3kluJEmSBqGTCtsppc0ppfeklC5MKd2Ws/y/pZQ+XfjydNPlp7Nx50F+8ryT3EiSJA02J9uyDUBE/EZEfDIiPhERbyl0UTrq7fNOY/r4EXzt4bWlLkWSJEkn6WTH2Z4WEU8CPwH+BPgs8NOIeCIi6otR4HBXUV7GRxfPZtn6XazctLvU5UiSJOkknGzL9peBdmBOSmlGSmkGMDe77MuFLk4Z1y6czmgnuZEkSRp0TjZsXwF8IqV0JPWllNYCn86uUxGMrqnkukUzuO/ZrWxxkhtJkqRB45T6bOfRUaD9qAc3XNoAwL8/ur6kdUiSJKn3TjZs/wz4ckTM6FwQETOBLwE/L2Rh6mr6+JFcdd4U7nhiI/ud5EaSJGlQONmw/WlgJLA2IjZExHrg18AI4FMFrk3d3HT56exrbuPOZU5yI0mSNBhUnMzGKaVNwEURcQVwNhDA88Aa4AvAtQWvUEfMnzGOhbPGc9sj67jh0gbKy6LUJUmSJOk4TqnPdkrpJyml/5NS+nJK6afAWOD9hS1N+dx0+Ww27zrEj1e/WupSJEmSdAKFukBS/eSKeVOYMWEEtzoMoCRJ0oDXr2E7Iq6MiJciYk1EfDbP+j+IiOcjYlVE/CwiZuWsa4+IldnbPf1Z90BSXhb8zuLZrNiwi6c27ip1OZIkSTqOfgvbEVEOfAW4CpgHXB8R87pt9jSwMKV0AfB94B9y1h1KKc3P3q7ul6IHqA8unMHoGie5kSRJGuh6dYFkL1qSx/RiN4uANdlJcIiIpcASMhdYApBS+kXO9o8Dv9mb+oab2uoKPrxoJl99aC2bdh5kxoSRpS5JkiRJefS2Zfu1E9zWAd84wT6mAblj1m3OLuvJx4D7cx7XRMTyiHg8Iq7J94SIuDm7zfKmpqYTlDO43XBpAxHhJDeSJEkDWK9atlNKHy3Aa+Ubpy7l3TDiN4GFwJtyFs9MKTVGxOnAzyPi2ZTSr7vVeQtwC8DChQvz7nuoqB83gnedP5WlyzbxX982l9E1laUuSZIkSd305wWSm4EZOY+nA43dN4qItwF/BlydUmruXJ5Sasz+XAv8ElhQzGIHg5sun83+5ja+6yQ3kiRJA1J/hu1lwNyImB0RVcB1QJe+4BGxAPg3MkF7e87y8RFRnb0/CVhMTl/v4eqC6eNY1DCBrz+ynrb2jlKXI0mSpG76LWynlNqATwIPAC8Ad6aUVkfE5yOic3SRfwRqge91G+LvHGB5RDwD/AL4+5TSsA/bAB+7fDZbdh/igdXbSl2KJEmSuomUhmbX5oULF6bly5eXuoyia+9I/MY//ZIJo6r4j99fXOpyJEmShp2IWJFSWphvnTNIDnKdk9w8vXE3KzY4yY0kSdJAYtgeAj5w8XTG1FRwm5PcSJIkDSiG7SFgVHUFH37dLO5/biubdh4sdTmSJEnKMmwPETdcOouyCG53khtJkqQBw7A9REwdO4J3XzCV7y7bxN7DraUuR5IkSRi2h5SPXXY6+5vb+IcfvciO/c0nfoIkSZKKqlfTtWtwOH/6WK6+sJ5vPb6RO57cxOVzJ/HeBdO4Yt5pjKzyn1qSJKm/Oc72EPTSq/u4a+UW7n56C417DjOyqpy3zzuNaxZM47I5k6go9wsNSZKkQjneONuG7SGsoyOxbP1O7lrZyL2rGtl7uI1JtVW8+4J6rlkwjQunjyUiSl2mJEnSoGbYFs1t7fzypSbuenoLP3txOy1tHcyeNIol8+u5Zv40GiaNKnWJkiRJg5JhW13sOdTKj57byl1PN/L4utdICebPGMc18+t594X1TKqtLnWJkiRJg4ZhWz3auucQ96xs5D+e3sKLr+6jvCy8sFKSJOkkGLbVK/kurHzHuVNYMr/eCyslSZJ6YNjWSTl6YeUW7l211QsrJUmSjsOwrVPW3NbOL15s4u6VW/jZC9tpaffCSkmSpFyGbRVETxdWvnfBNN59wVQmemGlJEkahgzbKrjjXVh58azx1FSWZ24VZfb1liRJQ9qACdsRcSXwJaAcuDWl9Pfd1lcD3wAuBl4DPpRSWp9d96fAx4B24NMppQeO91qG7f7z4qt7uevpRu5ZmbmwsruKssiG7zKqK8oZUZW5X1NRfnR5ZXn2cRkjKo8ur6ksz64rOxrgu21TXdF1+0rDvSRJ6kcDImxHRDnwMnAFsBlYBlyfUno+Z5vfBy5IKf1uRFwHvDel9KGImAfcASwC6oGfAmemlNp7ej3Ddv/r6Egs37CL9TsOcLitncOt7Rxu7eBQ69H7za3t2XUd2WXd7rcdvd9xim/N8rKgpqLsSEivriynOhvWe/p5NLR3/Vmd8/h4z6/uRQt+Son2jkRHgo7s/faU6OjIvU+XZR0pZbflyON8z0uJI/chc4JTUVZGRXlQXhZUlGV+VpaXHfdx53MqysKLYCVJ6qXjhe3+HER5EbAmpbQ2W9RSYAnwfM42S4C/yt7/PvDPkfmLvwRYmlJqBtZFxJrs/h7rp9rVC2VlwaLZE1g0e0Kf95VSorU9HQntzd1Ce/dw3pwT7JuzYf7oz6PbN7d1sPtgy5Fl3X+easCHoy34ZQEdOeG3IycQDyZlARVl2TBe3hnIyzJBvrxrQM8E98zjsghOlNODE27Ql9VHXr/zdXLrOd66o9tEl9fp8vxebJMr379790X5Gj3yvV26b3ait1RuSUd/79xl0eN25Dt2x2xz/GN8bD3H+Zfr8Tk9LD/OmyyldPTYJEhk/v+l7P3MNpnjlzmm3ZcdfX7uMjj6b9DTPo/Wl/l9I7re76w9yPwf67yfWZfdvofnc2Rd5Gxz9DFxgmPc/Tid8B109BgOKvn+T/dwXPL+/+/ldj3tdyC0U+S+F/O9P9NxtiPvdinPsmO3y9X986Knz06OrD/6WXLsc7t+zvT0ORXABy6ezoKZ4/NUVDr9GbanAZtyHm8GXtfTNimltojYA0zMLn+823OndX+BiLgZuBlg5syZBStc/S8iqKoIqirKGFNT2S+vmVKirSMdbYU/JrRnQn/35d1De3tHoiyC8rLMCUh5HA2h5WVH75cFXZYf3ZYu20Ycuzzffssi83qQCfpt7ZmW77aOTE2t3R63tWd+3/aOjuzPzm2OPm7rSLS1H/9xe3tmn53L2k9wxnKik44TBYATPz+7TercW84fiW5/S44EqDz7T90XcOwfmbx/xNKxf2zz/0GPXmyTR7cNe/q73uUwHff3zHN8jv2bm/+E4Dj7OG49x+wn/9oen3OcnSXICZ4ZXcNp15PBrmG3W8g9utEx4Tb7El2CcOeyBKQOSHQcE+A7H5N93JFyQ3vXUH8k0Oc8n7z7O7qPk9XbYDgQAmRv5D25PYnj0tsT3x5fq8RnJt0/f44JqTnb5jthPTbM9nBCnWcfuXvr6TO2p8/Orp+hvXwO3Z+buXPpGZNYMMAiYH+G7Xz/Vbu/K3vapjfPJaV0C3ALZLqRnGyBGt4iMq2zleVljK4pdTWSJGko6M8ryTYDM3IeTwcae9omIiqAscDOXj5XkiRJGlD6M2wvA+ZGxOyIqAKuA+7pts09wA3Z+x8Afp4y3yfcA1wXEdURMRuYCzzZT3VLkiRJp6TfupFk+2B/EniAzNB/t6WUVkfE54HlKaV7gK8B38xeALmTTCAnu92dZC6mbAM+cbyRSABWrFixIyI2FPFXOp5JwI4SvfZQ4PHrG49f33j8+sbj1zcev77x+PWdx/DUzOppxZCd1KaUImJ5T8O/6MQ8fn3j8esbj1/fePz6xuPXNx6/vvMYFp6zf0iSJElFYtiWJEmSisSwXRy3lLqAQc7j1zcev77x+PWNx69vPH594/HrO49hgdlnW5IkSSoSW7YlSZKkIjFsS5IkSUVi2JYkSZKKxLAtSZIkFYlhW5IkSSoSw7YkSZJUJIZtSZIkqUgM25IkSVKRGLYlSZKkIjFsS5IkSUVi2JYkSZKKxLAtSZIkFYlhW5IkSSqSilIXUCyTJk1KDQ0NpS5DkiRJQ9yKFSt2pJTq8q0bsmG7oaGB5cuXl7oMSZIkDXERsaGndQOiG0lE3BYR2yPiuR7WR0R8OSLWRMSqiLiov2uUJEmSTtZAadm+Hfhn4Bs9rL8KmJu9vQ741+xP6YiUEgda2jnc2l7qUiRJUgnUVldQU1le6jK6GBBhO6X0YEQ0HGeTJcA3UkoJeDwixkXE1JTS1n4pUCXR3NbOrgOt7DzQwq6DLUd+vra/6+OdB1rZeaCZXQdaaWnvKHXZkiSpRL5w7YW876LppS6jiwERtnthGrAp5/Hm7LIuYTsibgZuBpg5c2a/FacTa+9I7DnUmj8wH2hhZ7f7uw60sr+5rcf9jRtZyYSRVYwfVcW0cSM4f9oYJoyqZsKoSkYMsDNaSZLUPy6cMa7UJRxjsITtyLMsHbMgpVuAWwAWLlx4zHoV1+6DLdz19BZefHVfl9bonQda2H2oldTDv8jIqnLGj6xiYm0V40dWcXpdLeNHVjFhVCXjR1UxcVRV9nEmXI8bUUlF+YC43ECSJOm4BkvY3gzMyHk8HWgsUS3KkVLi8bU7WbpsI/c/9yotbR1Mqq1mUjY4nz1lzJGQPGFkJjxPyLmNH1k14PpWSZIkFcpgCdv3AJ+MiKVkLozcY3/t0mra18z3V2zmu8s2sv61g4ypqeD6S2Zw3aKZnDN1TKnLkyRJGhAGRNiOiDuANwOTImIz8DmgEiCl9H+B+4B3AmuAg8BHS1Pp8NbekXjolSaWPrmJn76wjbaOxKKGCXz6rXN55/lTbaGWJEnqZkCE7ZTS9SdYn4BP9FM56mbrnkPcuWwzdy7fxJbdh5gwqoqPLm7gQ5fMZM7k2lKXJ0mSNGANiLCtgaetvYOfv7idpcs28cuXttOR4LI5k/jTd57NFfNOo7rCVmxJkqQTMWyri007D7J02Ua+t3wz2/c1M3l0Nb/35jP40MKZzJw4stTlSZIkDSqGbdHS1sGPn3+VpU9u4uE1OygLePNZk7nukhn8xtmTHWZPkiTpFBm2h7FfN+1n6ZMb+cFTW9h5oIVp40bwmbedybWXTGfq2BGlLk+SJGnQM2wPM4db27nv2a0sfXITT67fSUVZ8LZzTuO6RTO4fG4d5WX55g+SJEnSqTBsDxMvbN3L0ic38h9Pb2Hv4TYaJo7kT648m/dfPI3Jo2tKXZ4kSdKQZNgewg40t/GfzzRyx7JNPLNpN1XlZVx53hSuWzSD18+eSJmt2JIkSUVl2B5iUkqs2ryHpcs2cs/KRg60tDN3ci1/8e55vG/BNMaPqip1iZIkScNGn8N2RPxv4NaU0nMFqEd99Kk7nuaHq7ZSU1nGuy+o5/pFM7ho5ngibMWWJEnqb4Vo2b4E+FRErABuBZamlPYWYL86Sc9s2s0PV23lxksb+IO3n8mYmspSlyRJkjSs9XkA5ZTSYmAe8Avgc0BjRHwjIt7U133r5Hz1obWMrq7gDw3akiRJA0JBZitJKb2UUvoTYAZwHVAL/DgiXomIz0bEhEK8jnq2aedB7n/uVT78upmMNmhLkiQNCIWeGrASGAOMBcqBjcBvARsj4sMFfi3l+Poj6wngxsUNpS5FkiRJWQUJ2xGxMCL+BdgK/APwODA3pfTWlNK5wJ8BXyzEa+lYew62snTZRt5zYb0zP0qSJA0gfQ7bEfEs8CiZLiQ3ArNSSn+WUlqXs9l3gLq+vpby+86TGznY0s5Nl88udSmSJEnKUYjRSO4Ebkspbelpg5RSE4XvsiKgpa2Drz+yjsvmTOLc+rGlLkeSJEk5ChGA/yfwWveFEVETEc6gUmT3PNPI9n3NfPyNp5e6FEmSJHVTiLD9PeD38yz/XTKt3iqSlBJffXAtZ502mjfOnVTqciRJktRNIcL2YuDHeZb/BLi0APtXDx58ZQcvbdvHx994ujNESpIkDUCFCNsjgbY8yzuA0QXYv3rw1QfXctqYaq6+sL7UpUiSJCmPQoTtVcD1eZZ/GHiuAPtXHs837uXhNTu48dLZVFV47akkSdJAVIjRSP4GuCsi5gA/zy57K/BB4L0F2L/yuPWhtYyqKufDr5tZ6lIkSZLUgz43iaaU7gXeA8wCvpy9zQSuTin9sK/717G27jnEPc80cu0lMxg7wqnZJUmSBqpCtGyTUvoR8KNC7Esndvsj6+lIid9Z7CQ2kiRJA5mdfQeZfYdb+c4TG3nn+VOZMWFkqcuRJEnScRRiuvaqiPjriHg5Ig5HRHvurRBF6qjvLtvEvuY2bnYSG0mSpAGvEC3bfwPcAPwTmeH+/hj4CplZJfNNdqNT1NrewW0Pr7EM0TEAACAASURBVON1sydwwfRxpS5HkiRJJ1CIsH0t8LsppX8D2oG7U0qfBj4HXFGA/Svrvme30rjnsK3akiRJg0QhwvZpwPPZ+/uBzibXHwFv780OIuLKiHgpItZExGfzrJ8VET+LiFUR8cuImF6AugeVlBJffWgtZ9SN4i1nTS51OZIkSeqFQoTtjUDnFIZrgHdk778BOHSiJ0dEOZluJ1cB84DrI2Jet83+F/CNlNIFwOeBvytA3YPKY2tf47kte/n45adTVubU7JIkSYNBIcL2f5CZxAbgS8BfR8Q64Hbg1l48fxGwJqW0NqXUAiwFlnTbZh7ws+z9X+RZP+R99cG1TKqt4poF00pdiiRJknqpz+Nsp5T+NOf+9yNiE7AYeLmXk9pMAzblPN4MvK7bNs8A7ycT5t8LjI6IiSml1/pU/CDx8rZ9/OKlJv7gijOpqSwvdTmSJEnqpT61bEdEZUR8NyLO6FyWUnoipfSFk5g9Ml+fiNTt8R8Bb4qIp4E3AVuAtjz13BwRyyNieVNTUy9ffuC79aG11FSW8Zuvn1XqUiRJknQS+hS2U0qtZC6C7B6OT8ZmYEbO4+lAY7fXaUwpvS+ltAD4s+yyPXnquSWltDCltLCurq4PJQ0c2/ce5q6nG/ngxTOYMKqq1OVIkiTpJBSiz/b/A97Xh+cvA+ZGxOyIqAKuA+7J3SAiJkVEZ61/CtzWh9cbVP79sfW0dnTwscucml2SJGmw6XOfbTKjkfx5RFwOLAcO5K5MKX3heE9OKbVFxCeBB4By4LaU0uqI+DywPKV0D/Bm4O8iIgEPAp8oQN0D3oHmNr71+EbeMW8KDZNGlbocSZIknaRChO0bgV3ABdlbrgQcN2wDpJTuA+7rtuwvc+5/H/h+XwsdbL63fBN7DrXycSexkSRJGpQKMRqJ/RuKoL0j8bVH1nHxrPFcPGt8qcuRJEnSKShEn20VwQOrX2XTzkN8/HJbtSVJkgarPrdsR8SXj7c+pfTpvr7GcJNS4t8eXMusiSO5Yt5ppS5HkiRJp6gQfbbP7/a4Ejg7u++nCrD/YWf5hl08s2k3f7PkXMqdml2SJGnQKkSf7bd0XxYRNcDXgIf6uv/h6JYH1zJ+ZCUfuHjGiTeWJEnSgFWUPtsppcPA/0d2Ahr13q+b9vPTF7bxW6+fxYgqp2aXJEkazIp5gWQdUFvE/Q9JX3t4HZXlZfzWGxpKXYokSZL6qBAXSP5B90XAVOAjdBs7W8f32v5mfrBiM++/aBp1o6tLXY4kSZL6qBAXSH6q2+MOoAn4OvB3Bdj/sPHNxzfQ3NbBxy5zuD9JkqShwEltBojDre1847ENvO2cycyZbO8bSZKkoaDPfbYjoio7+kj35TURUdXX/Q8XP3hqMzsPtDiJjSRJ0hBSiAskvwf8fp7lvwvcWYD9D3kdHYlbH1rHBdPHsmj2hFKXI0mSpAIpRNheDPw4z/KfAJcWYP9D3k9f2Ma6HQf4+OWnE+EkNpIkSUNFIcL2SKAtz/IOYHQB9j/kffWhtUwbN4KrzptS6lIkSZJUQIUI26uA6/Ms/zDwXAH2P6Q9tXEXy9bv4mOXzaaivJjDnkuSJKm/FWLov78B7oqIOcDPs8veCnwQeG8B9j+k3frQWsbUVHDtJU7NLkmSNNT0uSk1pXQv8B5gFvDl7G0mcHVK6Yd93f9QtvG1g/zouVf5yOtnUVtdiPMeSZIkDSQFSXgppR8BPyrEvoaT2x5ZR3lZcOOlDaUuRZIkSUVQiHG23xQRb+ph+Rv7uv+havfBFr67bBNL5k/jtDHHDFMuSZKkIaAQV+R9ERifZ/mY7Drl8e0nNnKotZ2bLncCTkmSpKGqEGH7LOCZPMufza5TN81t7Xz9kfW88cw6zp4yptTlSJIkqUgKEbYPAfV5lk8HWgqw/yHn7qcb2bG/mZudml2SJGlIK0TYfgD4+4g40pUkIiYAf5tdpxwdHYlbHlrLOVPHsHjOxFKXI0mSpCIqxGgkfwQ8CKyPiFXZZRcATcB1Bdj/kPKrl5tYs30/X/zQhU7NLkmSNMQVYpztrcCFZEL3KjJ9tf8QOB+Y19f9DzW3PLiWKWNqePcF+XreSJIkaSgp1DjbB4GvAkTENOCjwGoyE92UF+I1hoLntuzhsbWv8d/feTaVTs0uSZI05BUk8UVEeUS8NyLuBdaTmab9/wJzCrH/oeKrD62ltrqC6xbNLHUpkiRJ6gd9atmOiLOAm4DfBg4A3wHeAfxWSun5vpc3dGzZfYgfrtrK7yxuYExNZanLkSRJUj845ZbtiHgIeBwYB1ybUjo9pfTnQCpUcUPJ1x9eB8CNi53ERpIkabjoSzeSNwDfAL6UUvpVX4qIiCsj4qWIWBMRn82zfmZE/CIino6IVRHxzr68Xn/bc6iVO57cyLsvmMq0cSNKXY4kSZL6SV/C9kIy3VAeyobgz0TElJPdSUSUA18BriIzesn1EdF9FJM/B+5MKS0gM5zgv/Sh7n639MmNHGhp5+NOYiNJkjSsnHLYTimtTCl9ApgKfAFYAmzK7vNduZPcnMAiYE1KaW1KqQVYmt1Xl5cDOuc1Hws0nmrd/a2lrYOvP7KeS8+YyHnTxpa6HEmSJPWjQoyzfTil9M2U0puBc4B/BD4DvBoR9/diF9PIhPROm7PLcv0V8JsRsRm4D/hUvh1FxM0RsTwiljc1NZ3cL1IkP1zVyKt7D/PxN9qqLUmSNNwUdLDnlNKalNJngRnAtUBLL56WbxrF7hdZXg/cnlKaDrwT+GZEHFN7SumWlNLClNLCurq6k6y+8FJKfPWhdcydXMubzyx9PZIkSepfRZlZJaXUnlK6O6XUvTtIPpvJhPNO0zm2m8jHgDuz+34MqAEmFaLWYnpkzWu8sHUvH3/j6U7NLkmSNAwNhGkMlwFzI2J2RFSRuQDynm7bbATeChAR55AJ2wOjn8hx3PLQWupGV7NkvlOzS5IkDUclD9sppTbgk8ADwAtkRh1ZHRGfj4irs5v9IfDxiHgGuAO4MaU0oMfzfmHrXh58uYkbL22gusIZ6yVJkoajPs0gWSgppfvIXPiYu+wvc+4/Dyzu77r64taH1jGispyPvM6p2SVJkoarkrdsD0Wv7jnMPc9s4UOXzGDcyKpSlyNJkqQSMWwXwe2Prqe9I/E7Ts0uSZI0rBm2C2x/cxvffmIDV503lZkTR5a6HEmSJJWQYbvAvrtsE/sOt3HT5bZqS5IkDXeG7QJqa+/gtofXsahhAgtm9na2ekmSJA1VA2I0kqGivCz4xw9cQHWl5zCSJEkybBdURHDpnAE/saUkSZL6iU2wkiRJUpEYtiVJkqQiiQE+6/kpi4gmYEOJXn4SsKNErz0UePz6xuPXNx6/vvH49Y3Hr288fn3nMTw1s1JKdflWDNmwXUoRsTyltLDUdQxWHr++8fj1jcevbzx+fePx6xuPX995DAvPbiSSJElSkRi2JUmSpCIxbBfHLaUuYJDz+PWNx69vPH594/HrG49f33j8+s5jWGD22ZYkSZKKxJZtSZIkqUgM25IkSVKRGLYlSZKkIjFsS5IkSUVi2JYkSZKKxLAtSZIkFYlhW5IkSSoSw7YkSZJUJIZtSZIkqUgM25IkSVKRGLYlSZKkIjFsS5IkSUVi2JYkSZKKpKLUBRTLpEmTUkNDQ6nLkCRJ0hC3YsWKHSmlunzrhmzYbmhoYPny5aUuQ5IkSUNcRGzoaZ3dSCRJkqQiGbIt2+p/L2/bxw+faaSivIyrL6ynYdKoUpckSZJUUoZt9cnWPYe4Z2Ujd61s5IWteykLSMAXfvIyF84YxzXz63n3BfXUja4udamSJEn9LlJKpa6hKBYuXJjss10cew61cv+zW7lr5RaeWLeTlGB+Nli/64J62jo6+M9nGrnr6Uae37qX8rJg8ZxJXDO/nrefO4Xaas/xJEnS0BERK1JKC/OuM2yrNw63tvOLF7dz18ot/OLFJlraOzh90iiWzJ/Gkvk9dxl5Zds+7lq5hbtXNrJ51yFqKsu4Yt4UrplfzxvPrKOy3MsGJEnS4GbY1ilp70g8sfY17lq5hfufe5V9h9uoG13Ney6o55oF9Zw/bSwR0at9pZRYsWEXd63cwr2rtrLrYCvjR1byrgumcs38aVw8a3yv9yV119zWzivb9rO6cQ879rcwpqaCMSMqM7eaSsaOqGTMiArGjqikuqK81OVKkoYYw7Z6LaXE6sa93L1yC/c808i2vc3UVlfwjnOncM2Cet5w+kQq+tga3dLWwUOvNHHXykZ+8vyrHG7tYPr4ESyZX88186cx97TRBfptNBQdbGnjha37WN24h+e27GF1415e3raP1vbefZbVVJblBPDsz5qKbo+zQT0b0DvX1VZVUFbmSaG6am5rZ92OA7yybT+7DrZw3rSxnFs/xhM7aRgxbOuENu08yN0rt3DXykbWbN9PZXnwpjMnc82Cet52zmnUVBbnj8b+5jZ+vPpV7lrZyMOvNNGRYN7UMVyzoJ6rL5zGlLE1RXldDQ57DrWyunEPq7fszYTrxr2sbdpPR/Zja8KoKs6tH8O59WM5b1rm59SxNew73MaeQ63sPdya+Zm9ZZa1sedgzroj27Sx93Arx/tILAsY3a2lvHtwn1RbxRl1tcyZXMu4kVX9c6DUL/Y3t/Hr7ft5Zft+1hy57WPjzoNH3pOdqirKuGDaWC6aNZ6LZo7n4lnjvVBcGsIM28pr54EW7l2VGUlkxYZdACxqmMCSBfW887ypjB/Vv0GhaV8zP8zW88ym3UTA62dP5JoF9Vx53lTGjqjs13rUv5r2NfNc4x6eb9zLc1v28FzjHjbtPHRk/dSxNTnBOtNyOHVsTUG7H3V0JPa3ZMJ4ZxDfmw3i3cP7ntzwnl3W3NbRZX+TaquZM3kUcyePZs7kWuZOzoTwutHVdpsawHYdaDkSqF/Zvo812/fz6+37adxz+Mg2leXB7EmjmDO5ljnZf985dbWMG1nJqs17eGrjLlZs2MWzm/fQ0p55X8ycMJKLZ43nolnjuXjmeM6aMppyvymRhgTDto442NLGT57fxt0rG3nw5SbaOhJnnTaaJQvqufrCeqaPH1nqEgFYt+MAd2cvrFy34wBVFWX8xlmZlvY3nzW5aC3tKr6UElt2H2J1415Wb8m0Vq9u3MO2vc1Htpk1cSTn1Y/l3Gxr9bn1Y5hUO/BbBQ+3trN9bzNrmvbxyrbOsJYJavua245sN7qm4kjwzoTwTFibNm6E3VT6SUqJbXubj4Tp3H+r1w60HNluRGU5Z+ScMHXeZk4Y2asLvJvb2nluy16e2pAJ3ys27qJpX+a9PqqqnAUzx2dbv8exYOZ4GxU0qKSU2Hu4jaZ9h9m+t5mdB1sYUVnepUve2BGV1FSWDfkGBsP2MNfW3sHDa3Zw98pGHlj9Kgdb2pk6toars32kz5k6ptQl9iilxKrNe7hr5Rb+85mt7NjfzOiaCt553lSWLKjn9bMnDutw0tzWfkyra2tbB9WV5VRXlFGT/VldUUZ1ZTk12Z/VFWX9MhJMR0di/WsHMoE627/6ucY97D7YCmS6ZcyZXMt59WOZVz+G86Zlfo6pGVqBI6XE9n3N2QC+jzVN+3ll235+3bSfHfuPBruayrIjXVByw/isiaMG5Mg9re0dR7vsZFv6D7W2U1VRRk1FOdWVZd3eh5llNRXlVJZHv/zxbe9IbN518EiY7gzW3U+Axo6o7HLcz8jerx9b2BOglBKbdx060vK9YsMuXti6l44EETB3cm2m9Tvb9WT2pFFDPqRo4GnvSLx2oJnte5tp2tfM9n2Hsz8zy7bvO8z2fZl13b/Ry6eqvIwxIyqOXA9ztOtdRZ5raLp21RtdUzkovgEybA9DKSVWbtrN3Ssb+eGqxiMjNLzrgqksmT+NRQ0TBl1IbWvv4NFfZ0ZHeeC5VznQ0s6UMZmThiXz65k3dcyg+6OU222hS9/hPF0W9nYLNXsPt3K49cQfcj0pL4ujQbyinJrKrmGoMyhV5wal3ODUZbuj63YfbMm0Wme7hBxoaQcyH7ZnTRnNedPGMK9+LOfVj+HsKWMYUTW8v6XYdaCFNU3ZltVt+1nTlAmCW3Yf7UJTURY0TBrVJYDPmVzLGXW1ffqWJ6XEwZb2I++1zPvw2PdZT+/Lzn/bUxHBsUG8F++v3JPH3J+d69raE79uOhqs1zbt7xIG6kZXHzmOc7Ohes7kWupqS9e150BzG89s2n2k5fupDbvYezhzIjB+ZCUXzxrPgmz4vnD6uGH/f2Yg6jyp27jzIOUROe/Lo5+rne/j6orStfIebm0/EpqbegjQ2/c189r+5mOuQ4DMSWnd6Gomd97G1DB5dDV12dvEUdUcbm3v8rmx55jPkuwt57OmLd+L5RhdfXSEqfwBvYKxIzNB/bxpYzltTP9f72XYHkYadx9i6bJN3L1yCxteO0hVRRlvO2cyS+ZP481n1Q2Zq+MPtbTz0xe2cffKLfzypUx3mLmTa7lmwTQWzBxHULrQfbitPe+HSe6HTeeH0L7DrXk/0DrlXpDX00V5Y7qNplFVXkZzWwfNre2Zn22Zn4c7H7fm3M+3rq2d5tY867L7PNzWQcsJWjJGVpUzb+qYTB/raWM5r34scybXUlUx8FpnB6oDzW2Z0JgN4J0tshteO3DkPRMB08eP6NLFYfLoavY3t3X5Q1eMP3Jd/tDlvC9rKstpae848h46nPNe6vK+zL6XmnPeX7nvyyPPa82/7kQyxyX35GQ0c+pqGTty4H9r0tGROWFYkdP1ZG3TASBz4jWvfsyRlu+LZ42nftyIElc8fLS0dbDhtQNdviV5Jc9J3Yl0hu6ayp5PKI/5Rug46zp/VpQFOw+05IToowG6aV8zew61HlNLWWSuLzkaomuYPKY6J0QfDdTF6MLZ/aT/yOdWl2tjen/S/4VrL+R9F00veJ0nYtgeJtraO3jLP/2SzbsOcekZE1kyfxpXnjdlyH0l393OAy3c++xW7n56C8uzF3oOJMcbaq5rYO4WXAbwUHMdHalLoMoNQSOqymmYOGpQfO03GDW3tbN+x8EjfY07+xmvbTpw5EK8XJXl0W04w86wPHi/vk0p+/7Lc/IYBA2TRjKyamjNVLvzQAtPZ7uePLVxFys37T5y0jF1bA0XZft+n1c/pqQnteVlwcTaaupqqwf1yfXBljbWNh04pk//htcO0p5zgjp9/Igu3Y9mTRxFSnQ5wTycc4LZ3O0E80QnlvkaQ3ob26orypg8JvNvkRugJ4+uOdISPXlMpjV6IP4/763u3dmmjx/BxBJc42PYHibuf3Yrv/ftp/jXj1zEVedPLXU5JbF518EuI1iUQlVFWZeW6KHybYIGtrb2DjbtOsSO/c1dWp+Hw4VJw1Frewcvbt3Hig07WbFxN09t2NWl69FAMH5k5ZFgN3l0NXVjsqFv9NFW08ljaqitLt2J0Z6DrV0uaO68niL3WJaXBQ0TRx5zQfPpdaP6/aQupURre8obxJvb2mltT0wYVUnd6BrG1FT4f78fGbaHiWv/7TEadx/iV3/8lkF9lipJOnlb9xzi5W376Sjh3/W29sSO/Ucvqsv0Bc48btrXnPfbl5FV5XlbXI8E82zr7PiRVaf0TV9KiaZ9zV3CdOf9zpFhINMS3HmBcvfW6sHcSq/+cbywPSC+Z4uIK4EvAeXArSmlv++2/ovAW7IPRwKTU0rj+rfKgW114x6eXLeTP3vnOQZtSRqGpo4dwdSxA7f/dkqJ3Qdbadrf7YK8vUdHu3hh615+9XIz+3NGiulUWR5Mqu1sFa852kUiJ5jX1lSw8bWDOV0/Mt1AOi84hcx1CGdMruXNZ9ZlQvVptcypG8208SP8+6miKHnYjohy4CvAFcBmYFlE3JNSer5zm5TSZ3K2/xSwoN8LHeD+/dH1jKgs59qFM0pdiiRJx4gIxo+qYvyoKs48bfRxtz3Y0nakVTzfsHObdx3kqY272JkzJnp3nbO5Xj2/njl1mYtk556WuYjY7hXqTyUP28AiYE1KaS1ARCwFlgDP97D99cDn+qm2QWHngRbuWtnIBy+ePiiutJck6XhGVlXQMKmChkmjjrtda3sHO460lDez91ArMyeOZE5dbb/Pgiz1ZCCE7WnAppzHm4HX5dswImYBs4Gf97D+ZuBmgJkzZxa2ygHsjic30tLWwY2XNpS6FEmS+k1ledmA7z4jDYQe//m+y+np6o7rgO+nlPLOpJBSuiWltDCltLCurq5gBQ5kre0dfOvxDVw2ZxJzT/C1nCRJkvrXQAjbm4HcjsbTgcYetr0OuKPoFQ0iP169ja17DtuqLUmSNAD1OWxHxF9HxH/Js/zmiOhN3+plwNyImB0RVWQC9T159ncWMB54rK81DyW3P7qOmRNG8pazJ5e6FEmSJHVTiJbtG4Fn8ixfCXz0RE9OKbUBnwQeAF4A7kwprY6Iz0fE1TmbXg8sTUN1YPBT8NyWPSxbv4vffsMshyuSJEkagApxgeRpwLY8y5uAKb3ZQUrpPuC+bsv+stvjvzrF+oas2x9dz8iqcj7ocH+SJEkDUiFatjcCl+VZfjmwpQD7Vx479jdzz8pG3n/RdMaOcLg/SZKkgagQLdu3Al+MiAqODsn3VuAfgP9VgP0rj6VPbqSlvYMbLp1V6lIkSZLUg0KE7X8E6oB/BTqbWFuBf+4+7boKo7W9g28+voHL505izmSH+5MkSRqo+tyNJGX8MTCZTHeSy4HJKaU/6uu+ld+PnnuVbXub+ejihlKXIkmSpOPoc8t2RNQBlSmlRnKG5YuIeqA1pdTU19dQV7c/up5ZE0fy5jMd7k+SJGkgK8QFkt8G3pNn+buAbxVg/8qxavNuVmzYxQ1vaKDM4f4kSZIGtEKE7UuAX+VZ/qvsOhXQ7Y+uZ1RVOR9YOL3UpUiSJOkEChG2Kzl6YWSu6uxNBdK0r5kfPrOVD1w8nTE1DvcnSZI00BUibD8JHDNdO/B7wIoC7F9Zd2SH+/vtSxtKXYokSZJ6oRBD//0F8NOIuAD4WXbZW8l0IbmiAPsX0NLWwbce38CbzqzjjLraUpcjSZKkXijE0H+PAIuBRuDDwEey9xenlB7u6/6Vcf9zW9m+r5kbHe5PkiRp0ChEyzYppaeA67ovj4hRKaUDhXiN4e72R9cze9Io3jS3rtSlSJIkqZcK0Wf7GBHx+oj4GrC1GPsfblZu2s3TG3dzwxtmOdyfJEnSIFKwsB0REyPiMxHxHJlh/2YAf1io/Q9n//7oemqrK3j/xQ73J0mSNJgUYgbJtwMfB94NPAOcDVyaUnqyr/sWbN93mB+uauQjr5vFaIf7kyRJGlROuWU7Ij4XEeuBfwNeBC5IKb0eSMD+wpSn7zyxkdb2xA0O9ydJkjTo9KVl+y+AvwM+l1LqKFA9ypEZ7m8jbzmrjtmTRpW6HEmSJJ2kvvTZ/u/AB4CNEfGPEXF+gWpS1n3PbmXH/mZuXDy71KVIkiTpFJxy2E4p/UNK6RzgeqAOeCwingECmFSg+oa1rz+6ntPrRnH5HA+nJEnSYFSISW0eSindCEwF/hV4GvhFRDweEY5Gcoqe3riLZzbt5sZLGxzuT5IkaZAq2NB/KaV9KaX/m1K6BFgALAP+tFD7H25uf3Q9o6sreN9FDvcnSZI0WBVlUpuU0qqU0qeA+mLsf6jbtvcw967aygcXzqC2uiCTfEqSJKkEihK2O6WUWoq5/6Hq209spD0lfvsNs0pdiiRJkvqgqGFbJ6+5rZ3vPLGB3zhrMg0O9ydJkjSoGbYHmHtXbWXH/hZuXNxQ6lIkSZLUR4btASSlxNcfWc+cybVc5nB/kiRJg55hewB5auNunt2yhxsubSDC4f4kSZIGu1MK2xGxKyJ29ubWy/1dGREvRcSaiPhsD9tcGxHPR8TqiPjOqdQ90N3+6HpG11TwvgXTSl2KJEmSCuBUx5X7o0IVEBHlwFeAK4DNwLKIuCel9HzONnPJjNm9OKW0KyImF+r1B4pX9xzm/me3cuOlDYxyuD9JkqQh4ZRSXUrpawWsYRGwJqW0FiAilgJLgOdztvk48JWU0q7s628v4OsPCN9+YkN2uL+GUpciSZKkAhkIfbanAZtyHm/OLst1JnBmRDySnQb+ynw7ioibI+L/b+/Oo+wqy3yPf58z1EhlTshM0higGQQkQDeIC0EGucigEqBtBSfa7outq1u7pb3aNvYfevt612172d0LEcXuFijmqFHEqV2iaCphTACJkKSqEpKQOanhTM/9Y++qnFROpYra+9SuOuf3WavW2fvd737rqTf7pJ56z7vf3WFmHTt27KhSuPHryxf5zm82c/FJx7J4ZkvS4YiIiIhITCIn22aWNbPPhfOpD5hZrvxrNE1UKPMh+xlgGXAhcCNwp5lNO+Ik9zvcfbm7L589e/Yb/VES871nt7LzYI4Park/ERERkZoSx8j27YTTPIA08FngTmAv8IlRnN8FLCrbXwhsqVDnUXfPu/urwEsEyfekFyz39yrL5hzDecfPTDocEREREYlRHMn29cCfufvXgALwkLv/BfAPwNtHcf5qYJmZLTWzBuAGYOWQOo8MtGVmswimlbwSQ+yJW7NpN+u27OPm87Xcn4iIiEitiSPZngusC7cPAAPTO1YBl410srsXgFuBx4AXgHZ3X2dmt5vZVWG1x4CdZrYe+BnwaXffGUPsifvmrzYypSnDtVruT0RERKTmxLHGXCcwD9gM/J5gCb81BKuM9I2mAXdfRZCcl5d9vmzbgb8Kv2rG1r29/PD51/jwW5fS0qDl/kRERERqTRwj2ysJEmyAfwG+aGYvA3cD34yh/Zr1n09uwt15/x8dl3QoIiIiIlIFkYdT3f3TZdv3mVk3cB7wO3d/JGr7tWpgub93/OGxLJqh5f5EREREatGYk20zu9jdfzK03N1/CfwyUlR1YOUzW9jdk+dmLfcnIiIi8dKmBgAAHiNJREFUUrOiTCN53MxeMbPPmpnu7nsD3J1vPbGRE49t44//QMv9iYiIiNSqKMn2KcBDwMeBjWb2fTO7xszS8YRWu1Zv3M36rVruT0RERKTWjTnZdvcX3P1TBA+huZ7gqY/3A91m9mUzOzGmGGvOt371KlObs1xzhj4QEBEREallkVcjcfeCuz/k7lcCxwFfBd4NrDezX0Rtv9Z07+nlsXXbuOGcRTQ36EMAERERkVoWx9J/g9x9C/CvBAn3HuD8ONuvBVruT0RERKR+xPYkFTN7B/Ah4BqCh9ncA9wZV/u1oC9f5J7fbubSk+eycLqW+xMRERGpdZGSbTNbDHwQuJlgCskvgFuAB9x9VE+PrCePPt3NHi33JyIiIlI3oqyz/TjwdmA7wdMiv+HuG+IKrNa4O998YiMnzW3j3KUzkg5HRERERMZBlJHtXoIbIb/v7sWY4qlZv3l1Fy++tp8vv+c0LfcnIiIiUifGnGy7+1VxBlLrvvXERqa1ZLlay/2JiIiI1I1YVyORyrp29/Cj9a9x4zmLacpquT8RERGReqFkexz8x5ObMDP+VMv9iYiIiNQVJdtV1psrcu9vO7nslGNZMK056XBEREREZBwp2a6yR57uZm9vnpvPW5p0KCIiIiIyzpRsV5G7860nNnLyvCmcvWR60uGIiIiIyDhTsl1Fv35lJy9t28/N5y/Rcn8iIiIidUjJdhV964mNzGht4KrT5ycdioiIiIgkQMl2lXTu6uHHL2zjxnMWabk/ERERkTqlZLtKtNyfiIiIiCjZroKeXIF7f7uZy0+dy7ypWu5PREREpF4p2a6Ch5/qZl9fgQ+etyTpUEREREQkQUq2Yzaw3N+pC6Zw1nFa7k9ERESkninZjtmvfr+Tl7cf4Obzlmq5PxEREZE6p2Q7Zt98YiMzWxu48s3zkg5FRERERBI2IZJtM7vczF4ysw1m9pkKx282sx1m9nT49ZEk4hxJoVgCnPedu1jL/YmIiIgImaQDMLM08DXgEqALWG1mK919/ZCq97n7reMe4BuQSae486azcfekQxERERGRCWAijGyfA2xw91fcPQfcC1ydcEyRaK62iIiIiMDESLYXAJ1l+11h2VDvMbNnzewBM1tUqSEzu8XMOsysY8eOHdWIVURERERk1BKfRgJUGgYeOg/ju8A97t5vZh8D7gYuOuIk9zuAOwDCOd6b4g52lGYBryf0vWuB+i8a9V806r9o1H/RqP+iUf9Fpz4cm2EfGT4Rku0uoHykeiGwpbyCu+8s2/068OWRGnX32bFENwZm1uHuy5P6/pOd+i8a9V806r9o1H/RqP+iUf9Fpz6M30SYRrIaWGZmS82sAbgBWFlewczK19G7CnhhHOMTERERERmTxEe23b1gZrcCjwFp4C53X2dmtwMd7r4S+EszuwooALuAmxMLWERERERklBJPtgHcfRWwakjZ58u2bwNuG++4Irgj6QAmOfVfNOq/aNR/0aj/olH/RaP+i059GDPTmtAiIiIiItUxEeZsi4iIiIjUJCXbIiIiIiJVomRbRERERKRKlGyLiIiIiFSJkm0RERERkSpRsi0iIiIiUiVKtkVEREREqkTJtoiIiIhIlSjZFhERERGpEiXbIiIiIiJVomRbRERERKRKlGyLiIiIiFSJkm0RERERkSrJJB1AtcyaNcuXLFmSdBgiIiIiUuPWrFnzurvPrnSsZpPtJUuW0NHRkXQYIiIiIlLjzGzTcMc0jUREREREpEpqdmRbRETGR6nk7OvLs6cnT0+uSEtDmpaGNM0NaVoaMqRTlnSIIiKJUbItIiIAuDs9uSK7e3Ls6QmS5909Ofb05tlzMMfunjx7wv1DdXLs7c1T8uHbbcikaA0T7+aBRDybprUx3M8OJOcZWsuS9IGkvfy8lrJjzdk0KSXyUmUD74u9vcF7Ym9vnr29ucP29/QGrwf6CmTTKRqzKZoyaZqyKZqyaRozwWtTNkXjkPLGbHqw7tBjwTlp/cE6ySnZFhGpQblC6VBiXClRPphnT29ZeZhc54qlYdtsbUgzraWBaS1Zprc0sGBa8+D2tJYGprdkac6m6c0X6ckV6c0Frz35Aj39YVm+EJT1F9m+v+/werkC+eJRsvYKmrKpwxLz5myaTDpFJmVk0ykyaSOTSpFNG5l0imzKgrLB7aBONhW+hudm0uE5g+Vl7YxQN8iLDDMwIGUD2+GrgZlhBNupcJuwTmrIcbNDbZmFx8O2OKz9Q+claaL+AdRfCBLmfWFiPJgoDybQA/u5wQR6X3i8cJS/JjMpY2pzlqktWY5pzFAoOn2FIv35En35Iv2F4PVobYwkm7bBRLwxkx42mW/MpEjZxOz/8fIn5y5m+ZIZSYdxGCXbNeYLK9fxdOceFs9o4biZLeFrK8fNbGFOWyNW529CkdEqlpy+fJHefJAMlm/35sv3S4f2c0X6C0VKHpzv7hTdKXkw1aLkTrHEMOXhvh/a97CdgbIj2i0dWb8/TLIP5orD/mwN6RTTWrLhVwNLZ7UyvaWBqWHiPL0ly9Tm4HV6a5BcT23O0phJV73f88VSWQIeJubhdm+uyMFckd6y8t58kYP9hcGEvTdfpFAqkS86PbkChZKTLzqFYincLlEo+mCdQrFEvhS8RsiFZIhMykinjEzKSIWv6VRqsDw9WHa0/dQIx4M20ykG6wLs76uUQOfpzQ//ngBoa8oMXuvTmhuYN7WZqYP7wetAUj21OXjvTG3O0tqQHtXv1kKxNJh494Wv/fkSfYXiYFLeny/SNyRJ7wvrHK3u/r7CYLlT3xfypafMTTqEIyjZriGdu3q4+9cbWTqzlbWbd/O9Z7cc9sujKZti8YwWFs8Iku/jZrawaEYLx81oYeH0Fhoyul92rNydg7ki+/vy5ArDjwyOTyxBAubhNgRJmjs4fui4V6pf9uoMtlEa3A8KS2VtOcFxBo4nrFD0w5Lf3nxpyH6YLJdtDyTTfWHd3nxxTP+OKQumTKQtSDBSFiQFqXD0cmDfjLD80LGg3EinDtVNhfXMgsQjlTq8nUrtNmRSTAsT5WmtDUxrHhh5DhPn5iwto0wOkpBNp5janGJqc3bcv3ep5ORLYTJePLSdDxP1QjFM0MsS9aEJfKF06D0Gh953pRIjv8fCEzyMxQfPL68/0P6hbYbUT1LwswZ/DBZKTrEYvJaG2w/7+NB+0PdF9/APp6BOsURQN6wz8FUoBd9voNzdaWs6lBQvmtHCac0DyfFAecMRCfSU5mzVp2oEn6KkaG1U6lVv9C9eQx5c2wXAtz98Dgunt5ArlOje08umnQfp3NXDpp09bNrVw+adPfxyww768oeSiZTBvKnNg0n4QEI+MELe1jT+v/jGS6nkHMgV2N9XYH9f/rDXfRXKKm0f6C9oVGyCa84Gc4GbMimawukGzdk0xzRmmHVM4+B+c0MwRzLYTtEczplsLjun/PyB+k3ZFA3p1IRNYmVkqZTRmEqjXEhE4qT/UmpEqeTc39HF+cfPYuH0FiAY4Vo6q5Wls1qPqO/u7Njfz6YwCd+88yCbdwXJ+I/WbWPnwdxh9We0NgyOgicxPaUYjh71F0rki+FXwckVi+QKPliWKwQfQe/vP5QM7xuaGA9JmA/kCiOOBmVSRltThrambPiaYdGMFtqaMkwpK2trytI4AT4hGJjDWb49MMdz2Pmkh80pHcV80vI5ohNovmg6ZYclxs0NwTxGJcEiIpIEJds14tev7KR7Ty9/c/mJo6pvZsyZ0sScKU2cXeFGgv19eTaHo+ADCXnnrh6e6jz69JTFYQJanvzmiocnw/liWFYokTuiPEiiB7YHyqOMGjekU4clw21NGZbMailLnLO0NWaOSKbbmrJMCV+bskrWRERE5I1Tsl0j2js6mdKU4bKYbgxoa8pyyvypnDJ/6hHH8sUS3bt7wykpBwenp3Tu6uGJDa/Tmy+SDe/Uz6ZTNGSCj9cHyhoyYXk6RVM2xZSmTFC3rF55nfJzBo4NlAfnWNnx4GP/8sS5KVv9m7pEREREKlGyXQP29uT5wfOvcf3yReOSWGbTKZbMamXJrFZg9mHHPJyPoVFgERERESXbNWHlM93kCiVWLF+UdChKskVERETKJH8nl0TW3tHFSXPbOHXBlKRDEREREZEySrYnufVb9vFc916uP3uRRpVFREREJhgl25Pc/Ws6aUinuOaMBUmHIiIiIiJDKNmexPoLRR55qptLTj6W6a0NSYcjIiIiIkMo2Z7EfvLCdnb35Llu+cKkQxERERGRCpRsT2LtHZ3MndLEBctmj1xZRERERMZd5GTbzG4xsxsrlN9oZh+N2r5UtnVvL7/43Q7ee9ZC0indGCkiIiIyEcUxsv0p4LUK5d3hMamCh9Z2U3J471maQiIiIiIyUcWRbC8GXq1Qvjk8JjFzd9o7Ojl36YzwKY4iIiIiMhHFkWxvB06rUH46sDOG9mWI37y6i007eybEEyNFREREZHhxJNv3Al81swvskLcB/w+4L4b2ZYj2jk6OacxwxWnzkg5FRERERI4ijmT7c8DTwH8DveHXz4BngL8bTQNmdrmZvWRmG8zsM8PUWWFm681snZl9J4a4J6X9fXlWPbeVd50+n+aGdNLhiIiIiMhRZKI24O79wLVmdipwJmDAWnd/fjTnm1ka+BpwCdAFrDazle6+vqzOMuA24Hx3321mc6LGPVl979mt9OVLrNDa2iIiIiITXuRk28xSgIXJ9fNl5WnA3b00QhPnABvc/ZXwvHuBq4H1ZXU+CnzN3XcTNLo9atyTVXtHJ8vmHMMZi6YlHYqIiIiIjCCOaSQPAH9VofyTwP2jOH8B0Fm23xWWlTsBOMHMnjCzJ83s8koNhWt+d5hZx44dO0bxrSeXl7ft56nNe1ixfBFmWltbREREZKKLI9m+APhhhfLHgLeO4vxKWaMP2c8Ay4ALgRuBO83siKFdd7/D3Ze7+/LZs2vvqYr3r+kikzKuOXPo3yIiIiIiMhHFkWwfA+QqlBeAKaM4vwsoX8NuIbClQp1H3T3v7q8CLxEk33UjXyzx0NouLjppDrPbGpMOR0RERERGIY5k+3lgRYXyFRw+73o4q4FlZrbUzBqAG4CVQ+o8ArwdwMxmEUwreWXMEU9CP3txO68fyGltbREREZFJJPINksA/Ag+Y2RLgp2HZxcCfAtePdLK7F8zsVoJpJ2ngLndfZ2a3Ax3uvjI8dqmZrQeKwKfdva4emNPe0cXstkYuPLH2pseIiIiI1Ko4lv571MyuA/4X8IGw+Bngend/eJRtrAJWDSn7fNm2E9yEWelGzJq3fV8fP3tpOx+94A/IpOP4MEJERERExkMcI9u4+yMEUz2kCh56qptiyblOa2uLiIiITCpVGSY1s7lm9hkz+1012q8n7k57RyfLj5vO8bOPSTocEREREXkDYku2zSxlZu8ys0eBzQQPohnVNBIZ3trNu3llx0HdGCkiIiIyCcXxBMnjgQ8DNxEk77OB94ZTSySi9tVdtDSkueLN85IORURERETeoDGPbJvZ+8zs58CzwFLgIwTrZTug6SMxONhf4HvPbuF/nDaPYxpjmV4vIiIiIuMoSgZ3N/Al4Ep3PzBQqMeIx2fVc1s5mCuy4mxNIRERERGZjKLM2b4LuBX4gZndYmZTY4pJQvd3dLF0VivLj5uedCgiIiIiMgZjTrbd/RZgPkHS/QHgNTN7ELAo7UrglR0H+O3GXVy3fKE+LRARERGZpCIlxe7e4+7fdPe3Am8heIT668ATZvafZnZ1HEHWowfWdJFOGe99i9bWFhEREZmsYhuBdvcX3P3TwELgQ8BM4IG42q8nhWKJB9d2ceEJs5kzpSnpcERERERkjGKf7uHuBXd/0N3fCSyJu/168IuXd7BtXz/XaW1tERERkUmtqnOr3b27mu3XqvbVXcxsbeCik+YkHYqIiIiIRKAbGSeYnQf6+fEL27j2zAU0ZPTPIyIiIjKZKZubYB5+qptCyTWFRERERKQGKNmeQNyd9o5OTl80jRPntiUdjoiIiIhEFDnZNrNVlR5oY2ZtZrYqavv15Nmuvfxu2wFWLNdyfyIiIiK1II6R7cuAxgrlTcAlMbRfN9o7OmnKpnjX6fOTDkVEREREYpAZ64lmdvLAJnCCmc0qO5wGLge2RIitrvTmiqx8egtXnDqPKU3ZpMMRERERkRiMOdkGngc8/PrvIccMyAGfjNB+XXls3Wvs7y/oxkgRERGRGhIl2f5DgqR6PXABwWPaB+SAre7eF6H9utLe0cniGS2cu3RG0qGIiIiISEzGnGy7+0sAZtbs7v3xhVR/Nu/s4Ve/38lfX3ICqZQlHY6IiIiIxCSOGyTfaWYXDeyY2d+Y2QYze9TMZsfQfs17YE0nZvCes7QKiYiIiEgtiSPZ/kegAcDMTge+CHwbmAF8JYb2a1qx5DywposLls1m/rTmpMMRERERkRjFkWwvAV4Mt98NPOrutwOfAC6Nof2a9sSG19myt09ra4uIiIjUoDiS7RzQEm5fDPw43N4FTImh/ZrW3tHJtJYsl5x8bNKhiIiIiEjM4ki2nwC+bGafBs4BBp4auQzoHk0DZna5mb0UzvX+TIXjN5vZDjN7Ovz6SAxxJ25PT44frdvGNWcsoDGTTjocEREREYlZHMn2xwmeFvkR4BPu3hWWXwX8ZKSTzSwNfA14J3AycGPZA3PK3efuZ4Rfd8YQd+IefXoLuWKJFVpbW0RERKQmRVlnGwB330iFx7K7+8dH2cQ5wAZ3fwXAzO4FriZYv7umtXd0cuqCKZw8X7NtRERERGpRHCPbmFnWzK40s0+Y2ZSwbNHA9ggWAJ1l+11h2VDvMbNnzewBM6s4FGxmt5hZh5l17Nix4w3/HOPp+e69rNuyT6PaIiIiIjUscrJtZksIRqG/Q7DU36zw0F8D/zSaJiqU+ZD97wJL3P3NBDdg3l2pIXe/w92Xu/vy2bMn9hLfD6zpoiGT4qrT5ycdioiIiIhUSRwj2/9McJPkTKC3rPxhgtVJRtIFlA/vLgS2lFdw951lT6n8OnDWmKOdAPryRR5+qpvLTpnLtJaGpMMRERERkSqJPGcbOB84393zZocNUm8CRjNsuxpYZmZLCVYvuQH4k/IKZjbP3beGu1cBL0SOOkGPr9/G3t681tYWERERqXFxJNvp8GuohcD+kU5294KZ3Qo8FrZzl7uvM7PbgQ53Xwn8pZldBRQI1u++OYa4E9Pe0cmCac2cd/yskSuLiIiIyKQVR7L9OMHyf38e7ruZtQJ/D/xwNA24+yoOrc89UPb5su3bgNtiiDVx3Xt6+eWG1/n4RctIpypNVxcRERGRWhFHsv0p4Odm9izBetvfBk4gGNV+fwzt15QH13ThDtedpSkkIiIiIrUujnW2N5vZmwkS67MIbrq8D7jb3UecRlJPSiXn/jWdnP+mmSya0TLyCSIiIiIyqY052TazuwieGLnf3Q8A/xZfWLXpyVd30rmrl09demLSoYiIiIjIOIiy9N9NQHNcgdSD+zu6aGvKcNkpc5MORURERETGQZRkW3f3vQH7+vKsem4rV58xn6ZspcVbRERERKTWRH2ozdAnPcowvvvMFvoLJT2eXURERKSORL1B8rUhD7I5grtrGBdo7+jipLltnLZgatKhiIiIiMg4iZps3wLsiSOQWvbSa/t5pnMPn7vyZEb640REREREakfUZPu77r49lkhqWHtHJ9m0ce2ZC5IORURERETGUZQ525qvPQq5QomHn+rmkpOPZUZrQ9LhiIiIiMg40mokVfbTF7ex62CO63RjpIiIiEjdGfM0EnePupJJXWjv6GLulCbetmx20qGIiIiIyDhTwlxF2/b18fOXtvOesxaQTumDABEREZF6o2S7ih5c20XJ4bqzNIVEREREpB4p2a4Sd+f+ji7OWTqDJbNakw5HRERERBKgZLtKOjbt5tXXD+qJkSIiIiJ1TMl2lbSv7qS1Ic0Vp81NOhQRERERSYiS7So40F/g+89t5V2nz6elIepzg0RERERkslKyXQXff3YLPbkiK87WFBIRERGReqZkuwraO7p405xjOHPRtKRDEREREZEEKdmO2YbtB1izaTcrli/ETGtri4iIiNQzJdsxu39NJ+mUce2ZC5MORUREREQSpmQ7RsWS89Dabi46aQ6z2xqTDkdEREREEqalMmKUThn3fPRcCiVPOhQRERERmQCUbMfsTXPakg5BRERERCYITSMREREREakSJdsiIiIiIlVi7rU5v9jMdgCbEvr2s4DXE/retUD9F436Lxr1XzTqv2jUf9Go/6JTH47Nce4+u9KBmk22k2RmHe6+POk4Jiv1XzTqv2jUf9Go/6JR/0Wj/otOfRg/TSMREREREakSJdsiIiIiIlWiZLs67kg6gElO/ReN+i8a9V806r9o1H/RqP+iUx/GTHO2RURERESqRCPbIiIiIiJVomRbRERERKRKlGxHYGaXm9lLZrbBzD5T4Xijmd0XHv+NmS0Z/ygnJjNbZGY/M7MXzGydmX2iQp0LzWyvmT0dfn0+iVgnKjPbaGbPhX3TUeG4mdlXw+vvWTN7SxJxTkRmdmLZdfW0me0zs08OqaPrr4yZ3WVm283s+bKyGWb2uJm9HL5OH+bcm8I6L5vZTeMX9cQxTP/9k5m9GL4/HzazacOce9T3ej0Ypv++YGbdZe/RK4Y596i/q+vBMP13X1nfbTSzp4c5t+6vv6g0Z3uMzCwN/A64BOgCVgM3uvv6sjp/AbzZ3T9mZjcA17r79YkEPMGY2TxgnruvNbM2YA1wzZD+uxD4lLtfmVCYE5qZbQSWu3vFhw+Ev3g+DlwBnAv8s7ufO34RTg7he7kbONfdN5WVX4iuv0Fm9jbgAPBtdz81LPvfwC53/1KYxEx3978dct4MoANYDjjBe/0sd989rj9Awobpv0uBn7p7wcy+DDC0/8J6GznKe70eDNN/XwAOuPv/Ocp5I/6urgeV+m/I8a8Ae9399grHNlLn119UGtkeu3OADe7+irvngHuBq4fUuRq4O9x+ALjYzGwcY5yw3H2ru68Nt/cDLwALko2q5lxN8B+ru/uTwLTwjxw53MXA78sTbTmSu/8C2DWkuPz/uLuBayqcehnwuLvvChPsx4HLqxboBFWp/9z9R+5eCHefBBaOe2CTxDDX32iM5nd1zTta/4V5yQrgnnENqo4o2R67BUBn2X4XRyaLg3XC/1D3AjPHJbpJJJxecybwmwqH/9jMnjGzH5jZKeMa2MTnwI/MbI2Z3VLh+GiuUYEbGP6XjK6/ozvW3bdC8Ac0MKdCHV2Ho/Mh4AfDHBvpvV7Pbg2n4dw1zDQmXX8juwDY5u4vD3Nc119ESrbHrtII9dA5OaOpU9fM7BjgQeCT7r5vyOG1wHHufjrwL8Aj4x3fBHe+u78FeCfwP8OPCcvp+huBmTUAVwH3Vzis6y8eug5HYGafBQrAfw1TZaT3er36N+B44AxgK/CVCnV0/Y3sRo4+qq3rLyIl22PXBSwq218IbBmujpllgKmM7WOwmmRmWYJE+7/c/aGhx919n7sfCLdXAVkzmzXOYU5Y7r4lfN0OPEzwcWm50Vyj9e6dwFp33zb0gK6/Udk2MDUpfN1eoY6uw6MIbxi9EnifD3MT1Sje63XJ3be5e9HdS8DXqdwvuv6OIsxN3g3cN1wdXX/RKdkeu9XAMjNbGo6O3QCsHFJnJTBw5/17CW6E0V/UDM4R+wbwgrv/32HqzB2Y425m5xBcrzvHL8qJy8xawxtLMbNW4FLg+SHVVgIfsMAfEdz8snWcQ53ohh3R0fU3KuX/x90EPFqhzmPApWY2PfyY/9KwrO6Z2eXA3wJXuXvPMHVG816vS0PuQbmWyv0ymt/V9ewdwIvu3lXpoK6/eGSSDmCyCu8ev5Xgl0YauMvd15nZ7UCHu68kSCb/w8w2EIxo35BcxBPO+cD7gefKlhv6O2AxgLv/O8EfKH9uZgWgF7hBf6wMOhZ4OMwFM8B33P2HZvYxGOy/VQQrkWwAeoAPJhTrhGRmLQQrFPxZWVl5/+n6K2Nm9wAXArPMrAv4e+BLQLuZfRjYDFwX1l0OfMzdP+Luu8zsiwRJD8Dt7l53n/AN03+3AY3A4+F7+clw9ar5wJ3ufgXDvNcT+BESNUz/XWhmZxBMC9lI+F4u77/hflcn8CMkqlL/ufs3qHDPiq6/+GnpPxERERGRKtE0EhERERGRKlGyLSIiIiJSJUq2RURERESqRMm2iIiIiEiVKNkWEREREakSJdsiIiIiIlWiZFtEREREpEr+P4PSUo163/j3AAAAAElFTkSuQmCC\n", 268 | "text/plain": [ 269 | "
" 270 | ] 271 | }, 272 | "metadata": { 273 | "needs_background": "light" 274 | }, 275 | "output_type": "display_data" 276 | } 277 | ], 278 | "source": [ 279 | "from train import train\n", 280 | "\n", 281 | "train()" 282 | ] 283 | } 284 | ], 285 | "metadata": { 286 | "kernelspec": { 287 | "display_name": "Python 3", 288 | "language": "python", 289 | "name": "python3" 290 | }, 291 | "language_info": { 292 | "codemirror_mode": { 293 | "name": "ipython", 294 | "version": 3 295 | }, 296 | "file_extension": ".py", 297 | "mimetype": "text/x-python", 298 | "name": "python", 299 | "nbconvert_exporter": "python", 300 | "pygments_lexer": "ipython3", 301 | "version": "3.8.3" 302 | } 303 | }, 304 | "nbformat": 4, 305 | "nbformat_minor": 4 306 | } 307 | --------------------------------------------------------------------------------