├── figure.jpg ├── src ├── data_config.json ├── .DS_Store ├── experiments │ ├── .DS_Store │ ├── in-tandem-regularization │ │ ├── results │ │ │ └── .gitignore │ │ ├── configs │ │ │ ├── regularizers.json │ │ │ └── datasets.json │ │ └── main.py │ ├── stand-alone-regularization │ │ ├── results │ │ │ └── .gitignore │ │ ├── configs │ │ │ ├── regularizers.json │ │ │ └── datasets.json │ │ └── main.py │ └── larger-data │ │ └── main.py ├── legacy │ ├── comment.md │ └── legacy.py ├── regularizers.py ├── models.py ├── losses.py └── load_data.py ├── requirements.txt ├── LICENSE ├── README.md └── TANGOS_quickstart.ipynb /figure.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alanjeffares/TANGOS/HEAD/figure.jpg -------------------------------------------------------------------------------- /src/data_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "path_to_data": "path/to/your/data/folder/" 3 | } -------------------------------------------------------------------------------- /src/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alanjeffares/TANGOS/HEAD/src/.DS_Store -------------------------------------------------------------------------------- /src/experiments/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alanjeffares/TANGOS/HEAD/src/experiments/.DS_Store -------------------------------------------------------------------------------- /src/experiments/in-tandem-regularization/results/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory 2 | * 3 | # Except this file 4 | !.gitignore -------------------------------------------------------------------------------- /src/experiments/stand-alone-regularization/results/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory 2 | * 3 | # Except this file 4 | !.gitignore -------------------------------------------------------------------------------- /src/experiments/in-tandem-regularization/configs/regularizers.json: -------------------------------------------------------------------------------- 1 | { 2 | "no_reg": { 3 | "placeholder": [0] 4 | }, 5 | "l2": { 6 | "weight": [0.1, 0.01, 0.001] 7 | }, 8 | "l1": { 9 | "weight": [0.1, 0.01, 0.001] 10 | }, 11 | "dropout": { 12 | "p": [0.2, 0.5] 13 | }, 14 | "input_noise": { 15 | "std": [0.1, 0.01] 16 | }, 17 | "mixup": { 18 | "alpha": [1] 19 | }, 20 | "TANGOS": { 21 | "lambda_1": [1, 10, 100], 22 | "lambda_2":[0.1, 1], 23 | "param_schedule": [3], 24 | "subsample": [50] 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | liac-arff==2.5.0 2 | charset-normalizer==3.0.1 3 | functorch==1.13.1 4 | idna==3.4 5 | joblib==1.2.0 6 | numpy==1.24.2 7 | nvidia-cublas-cu11==11.10.3.66 8 | nvidia-cuda-nvrtc-cu11==11.7.99 9 | nvidia-cuda-runtime-cu11==11.7.99 10 | nvidia-cudnn-cu11==8.5.0.96 11 | pandas==1.5.3 12 | Pillow==9.4.0 13 | python-dateutil==2.8.2 14 | pytz==2022.7.1 15 | requests==2.28.2 16 | scikit-learn==1.2.1 17 | scipy==1.10.0 18 | six==1.16.0 19 | threadpoolctl==3.1.0 20 | torch==1.13.1 21 | torchaudio==0.13.1 22 | torchvision==0.14.1 23 | typing_extensions==4.4.0 24 | urllib3==1.26.14 25 | -------------------------------------------------------------------------------- /src/experiments/stand-alone-regularization/configs/regularizers.json: -------------------------------------------------------------------------------- 1 | { 2 | "no_reg": { 3 | "placeholder": [0] 4 | }, 5 | "l2": { 6 | "weight": [0.1, 0.01, 0.001] 7 | }, 8 | "l1": { 9 | "weight": [0.1, 0.01, 0.001] 10 | }, 11 | "dropout": { 12 | "p": [0.2, 0.5] 13 | }, 14 | "input_noise": { 15 | "std": [0.1, 0.01] 16 | }, 17 | "batch_norm": { 18 | "placeholder": [0] 19 | }, 20 | "mixup": { 21 | "alpha": [1] 22 | }, 23 | "TANGOS": { 24 | "lambda_1": [1, 10, 100], 25 | "lambda_2":[0.1, 1], 26 | "param_schedule": [3], 27 | "subsample": [50] 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/legacy/comment.md: -------------------------------------------------------------------------------- 1 | **Comment on functorch and batch norm** 2 | 3 | The original implementation of TANGOS used a different, less efficient method for the calculation of gradient attributions. This was updated after running the in tandem experiments. Although both methods produce identical results, there is a compatibility issue between functorch (the new library for calculating attributions) and batch norm. More details on this issue are discussed [here](https://pytorch.org/functorch/stable/batch_norm.html) and [here](https://github.com/pytorch/functorch/issues/384). We have therefore removed the combination of TANGOS and batch norm from the config for this experiment by default. In case this particular combination is required by someone in the future, we have included [the original implementation](https://github.com/alanjeffares/TANGOS/blob/main/src/legacy/legacy.py) for calculating the attribution loss in this folder. 4 | 5 | -------------------------------------------------------------------------------- /src/regularizers.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | device = 'cuda:0' 5 | 6 | def l1(model): 7 | l1_regularisation = 0. 8 | for param in model.parameters(): 9 | l1_regularisation += param.abs().sum() 10 | return l1_regularisation 11 | 12 | 13 | def add_input_noise(input, std, mean=0): 14 | return input + torch.randn(input.size()).to(device) * std + mean 15 | 16 | # adapted from https://github.com/facebookresearch/mixup-cifar10/blob/main/train.py 17 | def mixup_data(x, y, alpha=1.0, device='cpu'): 18 | '''Returns mixed inputs, pairs of targets, and lambda''' 19 | if alpha > 0: 20 | lam = np.random.beta(alpha, alpha) 21 | else: 22 | lam = 1 23 | 24 | batch_size = x.size()[0] 25 | if device=='cuda': 26 | index = torch.randperm(batch_size).cuda() 27 | else: 28 | index = torch.randperm(batch_size) 29 | 30 | mixed_x = lam * x + (1 - lam) * x[index, :] 31 | y_a, y_b = y, y[index] 32 | return mixed_x, y_a, y_b, lam 33 | 34 | 35 | def mixup_criterion(criterion, pred, y_a, y_b, lam): 36 | return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b) -------------------------------------------------------------------------------- /src/models.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class UCI_MLP(nn.Module): 5 | def __init__(self, num_features, num_outputs, dropout=0, batch_norm=False): 6 | super(UCI_MLP, self).__init__() 7 | self.dropout = torch.nn.Dropout(p=dropout) 8 | self.batch_norm = batch_norm 9 | d = num_features + 1 10 | self.fc1 = nn.Linear(num_features, d) 11 | self.bn1 = nn.BatchNorm1d(d) 12 | self.relu1 = nn.ReLU(inplace=False) 13 | self.fc2 = nn.Linear(d, d) 14 | self.bn2 = nn.BatchNorm1d(d) 15 | self.relu2 = nn.ReLU(inplace=False) 16 | self.fc3 = nn.Linear(d, num_outputs) 17 | 18 | def forward(self, x): 19 | batch_size = x.shape[0] 20 | out = self.fc1(x) 21 | if self.batch_norm and batch_size > 1: 22 | out = self.bn1(out) 23 | out = self.relu1(out) 24 | out = self.dropout(out) 25 | out = self.fc2(out) 26 | if self.batch_norm and batch_size > 1: 27 | out = self.bn2(out) 28 | h_output = self.relu2(out) 29 | h_output = self.dropout(h_output) 30 | out = self.fc3(h_output) 31 | return out, h_output -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2023, Alan Jeffares 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, this 9 | list of conditions and the following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above copyright notice, 12 | this list of conditions and the following disclaimer in the documentation 13 | and/or other materials provided with the distribution. 14 | 15 | 3. Neither the name of the copyright holder nor the names of its 16 | contributors may be used to endorse or promote products derived from 17 | this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /src/losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import torch.nn as nn 4 | from functorch import jacrev 5 | from functorch import vmap 6 | 7 | def MSE(output, label): 8 | return nn.MSELoss()(output.squeeze(), label) 9 | 10 | 11 | class parameter_schedule: 12 | def __init__(self, lambda_1, lambda_2, epoch): 13 | self.lambda_1 = lambda_1 14 | self.lambda_2 = lambda_2 15 | self.switch_epoch = epoch 16 | 17 | def get_reg(self, epoch): 18 | if epoch < self.switch_epoch: 19 | return 0, 0 20 | else: 21 | return self.lambda_1, self.lambda_2 22 | 23 | 24 | def cosine_similarity(w1, w2): 25 | return torch.dot(w1, w2).abs() / (torch.norm(w1, 2) * torch.norm(w2, 2)) 26 | 27 | 28 | def weight_correlation(weights, device='cpu'): 29 | h_dim = weights.shape[0] 30 | 31 | weight_corr = torch.tensor(0., requires_grad=True).to(device) 32 | weights = weights.clone().requires_grad_(True) 33 | 34 | cos = nn.CosineSimilarity(dim=0, eps=1e-6) 35 | 36 | for neuron_i in range(1, h_dim): 37 | for neuron_j in range(0, neuron_i): 38 | pairwise_corr = cosine_similarity(weights[neuron_i, :], weights[neuron_j, :]) 39 | weight_corr = weight_corr + pairwise_corr.norm(p=1) 40 | 41 | return weight_corr / (h_dim * (h_dim - 1) / 2) 42 | 43 | 44 | def kl_divergence(mu, logvar): 45 | KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) 46 | return KLD 47 | 48 | 49 | 50 | def attr_loss(forward_func, data_input, device='cpu', subsample=-1): 51 | ########## UPDATE functools ############ 52 | batch_size = data_input.shape[0] 53 | def to_latent(input_): 54 | _, h_out = forward_func(input_) 55 | return h_out 56 | 57 | data_input = data_input.clone().requires_grad_(True) 58 | jacobian = vmap(jacrev(to_latent), randomness='same')(data_input) 59 | neuron_attr = jacobian.swapaxes(0, 1) 60 | h_dim = neuron_attr.shape[0] 61 | 62 | if len(neuron_attr.shape) > 3: 63 | # h_dim x batch_size x features 64 | neuron_attr = neuron_attr.flatten(start_dim=2) 65 | 66 | sparsity_loss = torch.norm(neuron_attr, p=1) / (batch_size * h_dim * neuron_attr.shape[2]) 67 | 68 | cos = nn.CosineSimilarity(dim=1, eps=1e-6) 69 | correlation_loss = torch.tensor(0., requires_grad=True).to(device) 70 | 71 | if subsample > 0 and subsample < h_dim * (h_dim - 1) / 2: 72 | tensor_pairs = [list(np.random.choice(h_dim, size=(2), replace=False)) for i in range(subsample)] 73 | for tensor_pair in tensor_pairs: 74 | pairwise_corr = cos(neuron_attr[tensor_pair[0], :, :], neuron_attr[tensor_pair[1], :, :]).norm(p=1) 75 | correlation_loss = correlation_loss + pairwise_corr 76 | 77 | correlation_loss = correlation_loss / (batch_size * subsample) 78 | 79 | else: 80 | for neuron_i in range(1, h_dim): 81 | for neuron_j in range(0, neuron_i): 82 | pairwise_corr = cos(neuron_attr[neuron_i, :, :], neuron_attr[neuron_j, :, :]).norm(p=1) 83 | correlation_loss = correlation_loss + pairwise_corr 84 | num_pairs = h_dim * (h_dim - 1) / 2 85 | correlation_loss = correlation_loss / (batch_size * num_pairs) 86 | 87 | return sparsity_loss, correlation_loss -------------------------------------------------------------------------------- /src/legacy/legacy.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Depreciated implementation of attribution loss 3 | ''' 4 | 5 | import torch 6 | import numpy as np 7 | import torch.nn as nn 8 | 9 | 10 | def MSE(output, label): 11 | return nn.MSELoss()(output.squeeze(), label) 12 | 13 | 14 | def cosine_similarity(w1, w2): 15 | return torch.dot(w1, w2).abs() / (torch.norm(w1, 2) * torch.norm(w2, 2)) 16 | 17 | 18 | def weight_correlation(weights, device='cpu'): 19 | h_dim = weights.shape[0] 20 | 21 | weight_corr = torch.tensor(0., requires_grad=True).to(device) 22 | weights = weights.clone().requires_grad_(True) 23 | 24 | cos = nn.CosineSimilarity(dim=0, eps=1e-6) 25 | 26 | for neuron_i in range(1, h_dim): 27 | for neuron_j in range(0, neuron_i): 28 | pairwise_corr = cosine_similarity(weights[neuron_i, :], weights[neuron_j, :]) 29 | weight_corr = weight_corr + pairwise_corr.norm(p=1) 30 | 31 | return weight_corr / (h_dim * (h_dim - 1) / 2) 32 | 33 | 34 | def kl_divergence(mu, logvar): 35 | KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) 36 | return KLD 37 | 38 | 39 | def attr_loss(forward_func, data_input, device='cpu', subsample=-1): 40 | # data_input = data_input.clone().detach().requires_grad_(True) 41 | 42 | #### CHANGED THISSS 43 | data_input = data_input.clone().requires_grad_(True) 44 | 45 | _, h_output = forward_func(data_input) 46 | 47 | batch_size = data_input.shape[0] 48 | h_dim = h_output.shape[1] 49 | 50 | neuron_attr = [] 51 | 52 | for neuron in range(h_dim): 53 | grad_outputs = torch.nn.functional.one_hot(torch.tensor([neuron]), h_dim).repeat((batch_size, 1)).to(device) 54 | grad = torch.autograd.grad(outputs=h_output, inputs=data_input, 55 | grad_outputs=grad_outputs, 56 | create_graph=True)[0] 57 | 58 | neuron_attr.append(grad) 59 | 60 | neuron_attr = torch.stack(neuron_attr) 61 | 62 | if len(neuron_attr.shape) > 3: 63 | # h_dim x batch_size x features 64 | neuron_attr = neuron_attr.flatten(start_dim=2) 65 | 66 | sparsity_loss = torch.norm(neuron_attr, p=1) / (batch_size * h_dim * neuron_attr.shape[2]) 67 | 68 | cos = nn.CosineSimilarity(dim=1, eps=1e-6) 69 | correlation_loss = torch.tensor(0., requires_grad=True).to(device) 70 | 71 | if subsample > 0 and subsample < h_dim * (h_dim - 1) / 2: 72 | tensor_pairs = [list(np.random.choice(h_dim, size=(2), replace=False)) for i in range(subsample)] 73 | for tensor_pair in tensor_pairs: 74 | pairwise_corr = cos(neuron_attr[tensor_pair[0], :, :], neuron_attr[tensor_pair[1], :, :]).norm(p=1) 75 | correlation_loss = correlation_loss + pairwise_corr 76 | 77 | correlation_loss = correlation_loss / (batch_size * subsample) 78 | 79 | else: 80 | for neuron_i in range(1, h_dim): 81 | for neuron_j in range(0, neuron_i): 82 | pairwise_corr = cos(neuron_attr[neuron_i, :, :], neuron_attr[neuron_j, :, :]).norm(p=1) 83 | correlation_loss = correlation_loss + pairwise_corr 84 | num_pairs = h_dim * (h_dim - 1) / 2 85 | correlation_loss = correlation_loss / (batch_size * num_pairs) 86 | 87 | return sparsity_loss, correlation_loss -------------------------------------------------------------------------------- /src/experiments/in-tandem-regularization/configs/datasets.json: -------------------------------------------------------------------------------- 1 | { 2 | "student": { 3 | "type": "regression", 4 | "loader": "load_student", 5 | "num_features": 56, 6 | "num_outputs": 1, 7 | "lr": 0.0001 8 | }, 9 | "bioconcentration": { 10 | "type": "regression", 11 | "loader": "load_bioconcentration", 12 | "num_features": 45, 13 | "num_outputs": 1, 14 | "lr": 0.001 15 | }, 16 | "facebook": { 17 | "type": "regression", 18 | "loader": "load_facebook", 19 | "num_features": 21, 20 | "num_outputs": 1, 21 | "lr": 0.01 22 | }, 23 | "wine": { 24 | "type": "regression", 25 | "loader": "load_wine", 26 | "num_features": 11, 27 | "num_outputs": 1, 28 | "lr": 0.001 29 | }, 30 | "abalone": { 31 | "type": "regression", 32 | "loader": "load_abalone", 33 | "num_features": 9, 34 | "num_outputs": 1, 35 | "lr": 0.01 36 | }, 37 | "skillcraft": { 38 | "type": "regression", 39 | "loader": "load_skillcraft", 40 | "num_features": 18, 41 | "num_outputs": 1, 42 | "lr": 0.01 43 | }, 44 | "weather": { 45 | "type": "regression", 46 | "loader": "load_weather", 47 | "num_features": 45, 48 | "num_outputs": 1, 49 | "lr": 0.01 50 | }, 51 | "forest": { 52 | "type": "regression", 53 | "loader": "load_forest", 54 | "num_features": 39, 55 | "num_outputs": 1, 56 | "lr": 0.0001 57 | }, 58 | "protein": { 59 | "type": "regression", 60 | "loader": "load_protein", 61 | "num_features": 9, 62 | "num_outputs": 1, 63 | "lr": 0.01 64 | }, 65 | "heart": { 66 | "type": "classification", 67 | "loader": "load_heart", 68 | "num_features": 20, 69 | "num_outputs": 2, 70 | "lr": 0.01 71 | }, 72 | "breast": { 73 | "type": "classification", 74 | "loader": "load_breast", 75 | "num_features": 9, 76 | "num_outputs": 2, 77 | "lr": 0.01 78 | }, 79 | "cervical": { 80 | "type": "classification", 81 | "loader": "load_cervical", 82 | "num_features": 136, 83 | "num_outputs": 5, 84 | "lr": 0.01 85 | }, 86 | "credit": { 87 | "type": "classification", 88 | "loader": "load_credit", 89 | "num_features": 40, 90 | "num_outputs": 2, 91 | "lr": 0.001 92 | }, 93 | "hcv": { 94 | "type": "classification", 95 | "loader": "load_hcv", 96 | "num_features": 12, 97 | "num_outputs": 4, 98 | "lr": 0.001 99 | }, 100 | "tumor": { 101 | "type": "classification", 102 | "loader": "load_tumor", 103 | "num_features": 25, 104 | "num_outputs": 22, 105 | "lr": 0.001 106 | }, 107 | "soybean": { 108 | "type": "classification", 109 | "loader": "load_soybean", 110 | "num_features": 484, 111 | "num_outputs": 19, 112 | "lr": 0.001 113 | }, 114 | "australian": { 115 | "type": "classification", 116 | "loader": "load_australian", 117 | "num_features": 55, 118 | "num_outputs": 2, 119 | "lr": 0.001 120 | }, 121 | "entrance": { 122 | "type": "classification", 123 | "loader": "load_entrance", 124 | "num_features": 38, 125 | "num_outputs": 4, 126 | "lr": 0.001 127 | }, 128 | "thoracic": { 129 | "type": "classification", 130 | "loader": "load_thoracic", 131 | "num_features": 24, 132 | "num_outputs": 2, 133 | "lr": 0.001 134 | } 135 | } -------------------------------------------------------------------------------- /src/experiments/stand-alone-regularization/configs/datasets.json: -------------------------------------------------------------------------------- 1 | { 2 | "student": { 3 | "type": "regression", 4 | "loader": "load_student", 5 | "num_features": 56, 6 | "num_outputs": 1, 7 | "lr": 0.0001 8 | }, 9 | "bioconcentration": { 10 | "type": "regression", 11 | "loader": "load_bioconcentration", 12 | "num_features": 45, 13 | "num_outputs": 1, 14 | "lr": 0.001 15 | }, 16 | "facebook": { 17 | "type": "regression", 18 | "loader": "load_facebook", 19 | "num_features": 21, 20 | "num_outputs": 1, 21 | "lr": 0.01 22 | }, 23 | "wine": { 24 | "type": "regression", 25 | "loader": "load_wine", 26 | "num_features": 11, 27 | "num_outputs": 1, 28 | "lr": 0.001 29 | }, 30 | "abalone": { 31 | "type": "regression", 32 | "loader": "load_abalone", 33 | "num_features": 9, 34 | "num_outputs": 1, 35 | "lr": 0.01 36 | }, 37 | "skillcraft": { 38 | "type": "regression", 39 | "loader": "load_skillcraft", 40 | "num_features": 18, 41 | "num_outputs": 1, 42 | "lr": 0.01 43 | }, 44 | "weather": { 45 | "type": "regression", 46 | "loader": "load_weather", 47 | "num_features": 45, 48 | "num_outputs": 1, 49 | "lr": 0.01 50 | }, 51 | "forest": { 52 | "type": "regression", 53 | "loader": "load_forest", 54 | "num_features": 39, 55 | "num_outputs": 1, 56 | "lr": 0.0001 57 | }, 58 | "protein": { 59 | "type": "regression", 60 | "loader": "load_protein", 61 | "num_features": 9, 62 | "num_outputs": 1, 63 | "lr": 0.01 64 | }, 65 | "heart": { 66 | "type": "classification", 67 | "loader": "load_heart", 68 | "num_features": 20, 69 | "num_outputs": 2, 70 | "lr": 0.01 71 | }, 72 | "breast": { 73 | "type": "classification", 74 | "loader": "load_breast", 75 | "num_features": 9, 76 | "num_outputs": 2, 77 | "lr": 0.01 78 | }, 79 | "cervical": { 80 | "type": "classification", 81 | "loader": "load_cervical", 82 | "num_features": 136, 83 | "num_outputs": 5, 84 | "lr": 0.01 85 | }, 86 | "credit": { 87 | "type": "classification", 88 | "loader": "load_credit", 89 | "num_features": 40, 90 | "num_outputs": 2, 91 | "lr": 0.001 92 | }, 93 | "hcv": { 94 | "type": "classification", 95 | "loader": "load_hcv", 96 | "num_features": 12, 97 | "num_outputs": 4, 98 | "lr": 0.001 99 | }, 100 | "tumor": { 101 | "type": "classification", 102 | "loader": "load_tumor", 103 | "num_features": 25, 104 | "num_outputs": 22, 105 | "lr": 0.001 106 | }, 107 | "soybean": { 108 | "type": "classification", 109 | "loader": "load_soybean", 110 | "num_features": 484, 111 | "num_outputs": 19, 112 | "lr": 0.001 113 | }, 114 | "australian": { 115 | "type": "classification", 116 | "loader": "load_australian", 117 | "num_features": 55, 118 | "num_outputs": 2, 119 | "lr": 0.001 120 | }, 121 | "entrance": { 122 | "type": "classification", 123 | "loader": "load_entrance", 124 | "num_features": 38, 125 | "num_outputs": 4, 126 | "lr": 0.001 127 | }, 128 | "thoracic": { 129 | "type": "classification", 130 | "loader": "load_thoracic", 131 | "num_features": 24, 132 | "num_outputs": 2, 133 | "lr": 0.001 134 | } 135 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## TANGOS: Regularizing Tabular Neural Networks through Gradient Orthogonalization and Specialization 2 | 3 | [![pdf](https://img.shields.io/badge/PDF-ICLR%202023-red)](https://openreview.net/forum?id=n6H86gW8u0d) 4 | [![License: BSD 3-Clause](https://img.shields.io/badge/License-BSD-blue.svg)](https://github.com/alanjeffares/TANGOS/blob/main/LICENSE) 5 | 6 | ![TANGOS](figure.jpg?raw=true "TANGOS") 7 | 8 | This repository contains the code associated with [our ICLR 2023 paper](https://openreview.net/forum?id=n6H86gW8u0d) where we introduce a novel regularizer for training deep neural networks. Tabular Neural Gradient Orthogonalization and Specialization (TANGOS) provides a framework for regularization in the tabular setting built on latent unit attributions. For further details, please see our paper. 9 | 10 | 11 | ### Getting Started With TANGOS 12 | To quickly get started with integrating TANGOS into a PyTorch workflow, we have provided a handy quickstart guide. This consists of a simple MLP training routine with a drop-in function that calculates and applies TANGOS regularization. This example notebook can be found in [TANGOS_quickstart.ipynb](https://github.com/alanjeffares/TANGOS/blob/main/TANGOS_quickstart.ipynb). 13 | 14 | ### Experiments 15 | **Setup** 16 | 17 | Clone this repository and navigate to the root folder. 18 | ``` 19 | git clone https://github.com/alanjeffares/TANGOS.git 20 | cd TANGOS 21 | ``` 22 | Ensure PYTHONPATH is also set to the root folder. 23 | ``` 24 | export PYTHONPATH="/your/path/to/TANGOS" 25 | ``` 26 | Using conda, create and activate a new environment. 27 | ``` 28 | conda create -n pip python 29 | conda activate 30 | ``` 31 | Then install the repository requirements. 32 | ``` 33 | pip install -r requirements.txt 34 | ``` 35 | 36 | **Data** 37 | 38 | Datasets can be downloaded using `wget` and the `` described in Appendix L of the paper. 39 | ``` 40 | wget -P /path/to/data/folder/ https://archive.ics.uci.edu/ml/machine- 41 | learning-databases// 42 | ``` 43 | Then set the path to your local data folder in `src/data_config.json`. 44 | ``` 45 | {"path_to_data": "path/to/data/folder/"} 46 | ``` 47 | 48 | 49 | **Running** 50 | 51 | These folders are associated with the commented experiments from the paper. 52 | ``` 53 | └── src 54 | └── experiments 55 | ├── behavior-analysis # TANGOS Behavior Analysis. 56 | ├── compute # Approximation and Algorithm. 57 | ├── in-tandem-regularization # Generalisaton: In Tandem Regularization. 58 | ├── larger-data # Performance With Increasing Data Size. 59 | └── stand-alone-regularization # Generalization: Stand-Alone Regularization. 60 | ``` 61 | 62 | The main experiments can be run by navigating to the root folder and running the following command. 63 | 64 | ```python src/experiments//main.py``` 65 | 66 | Results and hyperparameters of these experiments are saved in json format to the results folder. 67 | 68 | ```src/experiments//results``` 69 | 70 | The behavior analysis and compute experiments are included in ```.ipynb``` notebooks with instructions included. Please note that all jupyter notebooks are self contained and designed to be run in colab by clicking the link at the top of each notebook (e.g. [![open in colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/alanjeffares/TANGOS/blob/main/TANGOS_quickstart.ipynb)). 71 | 72 | _Note: To run in tandem experiments with batch norm please see [our comment](https://github.com/alanjeffares/TANGOS/blob/main/src/legacy/comment.md)._ 73 | 74 | ### Citation 75 | If you use this code, please cite the associated paper. 76 | ``` 77 | @inproceedings{ 78 | jeffares2023tangos, 79 | title={{TANGOS}: Regularizing Tabular Neural Networks through Gradient Orthogonalization and Specialization}, 80 | author={Alan Jeffares and Tennison Liu and Jonathan Crabb{\'e} and Fergus Imrie and Mihaela van der Schaar}, 81 | booktitle={International Conference on Learning Representations}, 82 | year={2023}, 83 | url={https://openreview.net/forum?id=n6H86gW8u0d} 84 | } 85 | ``` 86 | -------------------------------------------------------------------------------- /src/experiments/stand-alone-regularization/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import itertools 4 | import argparse 5 | import json 6 | import copy 7 | from pathlib import Path 8 | import numpy as np 9 | import random 10 | import torch.nn as nn 11 | from torch import optim 12 | from sklearn.model_selection import KFold 13 | from src.losses import parameter_schedule, attr_loss, MSE 14 | from src.models import UCI_MLP 15 | from src.regularizers import l1, add_input_noise, mixup_data, mixup_criterion 16 | import src.load_data 17 | 18 | import warnings 19 | 20 | warnings.filterwarnings("ignore") 21 | 22 | EPOCHS = 200 23 | TRAINING_PATIENCE = 30 24 | BATCH_SIZE = 32 25 | K_FOLDS = 5 26 | DEVICE = 'cuda:0' 27 | 28 | 29 | def train_epoch(model, train_loader, optimiser, loss_func, params, regulariser, device='cpu'): 30 | reg_loss = 0 31 | for i, (data, label) in enumerate(train_loader): 32 | model.train() 33 | data, label = data.to(device), label.to(device) 34 | if regulariser == 'input_noise': 35 | data = add_input_noise(data, params['std']) 36 | optimiser.zero_grad() 37 | output, _ = model(data) 38 | pred_loss = loss_func(output, label) 39 | 40 | if regulariser == 'TANGOS' and (params['lambda_1_curr'] > 0 or params['lambda_2_curr'] > 0): 41 | sparsity_loss, correlation_loss = attr_loss(model, data, device=device, subsample=params['subsample']) 42 | reg_loss = params['lambda_1_curr'] * sparsity_loss + params['lambda_2_curr'] * correlation_loss 43 | 44 | elif regulariser == 'l1': 45 | reg_loss = params['weight'] * l1(model) 46 | 47 | elif regulariser == 'mixup': 48 | X_mixup, y_a, y_b, lam = mixup_data(data, label, alpha=params['alpha'], device=device) 49 | output_mixup, _ = model(X_mixup) 50 | reg_loss = mixup_criterion(loss_func, output_mixup, y_a, y_b, lam) 51 | 52 | loss = pred_loss + reg_loss 53 | loss.backward() 54 | optimiser.step() 55 | return model 56 | 57 | 58 | def evaluate(model, test_loader, loss_func, device=DEVICE): 59 | running_loss, running_pred_loss = 0, 0 60 | for epoch, (data, label) in enumerate(test_loader): 61 | model.eval() 62 | data, label = data.to(device), label.to(device) 63 | 64 | output, _ = model(data) 65 | 66 | # compute metric 67 | pred_loss = loss_func(output, label) 68 | loss = pred_loss 69 | 70 | running_loss += loss.item() 71 | running_pred_loss += pred_loss.item() 72 | 73 | return running_pred_loss/(epoch + 1), running_loss/(epoch + 1) 74 | 75 | def seed_worker(worker_id): 76 | worker_seed = torch.initial_seed() % 2 ** 32 77 | np.random.seed(worker_seed) 78 | random.seed(worker_seed) 79 | 80 | def ids_to_dataloader_split(data, train_ids, val_ids, seed): 81 | train_subsampler = torch.utils.data.SubsetRandomSampler(train_ids) 82 | val_subsampler = torch.utils.data.SubsetRandomSampler(val_ids) 83 | 84 | g = torch.Generator() 85 | g.manual_seed(seed) 86 | 87 | trainloader = torch.utils.data.DataLoader( 88 | data, 89 | batch_size=BATCH_SIZE, sampler=train_subsampler, worker_init_fn=seed_worker, generator=g) 90 | valloader = torch.utils.data.DataLoader( 91 | data, 92 | batch_size=BATCH_SIZE, sampler=val_subsampler, worker_init_fn=seed_worker, generator=g) 93 | return trainloader, valloader 94 | 95 | 96 | def init_results(tag, seed, datasets, config_regs, overwrite=False): 97 | """Helper function to initialise an empty dictionary for storing results""" 98 | results = load_results(f'experiment_{tag}_seed_{seed}') 99 | regularisers = config_regs.keys() 100 | if results and not overwrite: 101 | raise ValueError('Results already exist, to overwrite pass overwrite = True') 102 | else: 103 | results = {} 104 | for dataset in datasets: 105 | results[dataset] = {} 106 | for regulariser in regularisers: 107 | results[dataset][regulariser] = {} 108 | num_combinations = 1 109 | for value in config_regs[regulariser].values(): 110 | num_combinations *= len(value) 111 | for i in range(num_combinations): 112 | results[dataset][regulariser][i] = {} 113 | results[dataset][regulariser][i]['val_loss'] = [] 114 | 115 | save_results(results, f'experiment_{tag}_seed_{seed}') 116 | 117 | def load_results(file_name): 118 | curr_dir = os.path.dirname(__file__) 119 | results_dir = os.path.join(curr_dir, f'results/{file_name}.json') 120 | file_obj = Path(results_dir) 121 | if file_obj.is_file(): 122 | with open(results_dir) as f: 123 | results = json.load(f) 124 | return results 125 | else: 126 | print(f'{file_name}.json not found in results folder, generating new file.') 127 | return {} 128 | 129 | def save_results(results, file_name): 130 | curr_dir = os.path.dirname(__file__) 131 | results_dir = os.path.join(curr_dir, f'results/{file_name}.json') 132 | with open(results_dir, 'w') as f: 133 | json.dump(results, f) 134 | 135 | def run_fold(fold_name, model, trainloader, valloader, config, config_dataset, seed): 136 | tag, dataset, regulariser, params, fold = fold_name.split(':') 137 | loss_func = MSE if config_dataset['type'] == 'regression' else nn.CrossEntropyLoss() 138 | l2_weight = config['weight'] if regulariser == 'l2' else 0 139 | optimiser = optim.Adam(model.parameters(), lr=config_dataset['lr'], weight_decay=l2_weight) 140 | if regulariser == 'TANGOS': 141 | parameter_scheduler = parameter_schedule(config['lambda_1'], config['lambda_2'], config['param_schedule']) 142 | else: 143 | parameter_scheduler = None 144 | best_val_loss = np.inf; last_update = 0 145 | for epoch in range(EPOCHS): 146 | if regulariser == 'TANGOS': 147 | lambda_1, lambda_2 = parameter_scheduler.get_reg(epoch) 148 | config['lambda_1_curr'] = lambda_1 149 | config['lambda_2_curr'] = lambda_2 150 | 151 | model = train_epoch(model, trainloader, optimiser, loss_func, config, regulariser, device=DEVICE) 152 | val_loss, _ = evaluate(model, valloader, loss_func, device=DEVICE) 153 | 154 | if (val_loss < best_val_loss) or (epoch < 5): 155 | best_val_loss = val_loss 156 | best_model = copy.deepcopy(model) 157 | last_update = epoch 158 | 159 | # early stopping criteria 160 | if epoch - last_update == TRAINING_PATIENCE: 161 | break 162 | 163 | # save best model results for this fold 164 | results = load_results(f'experiment_{tag}_seed_{seed}') 165 | results[dataset][regulariser][params]['val_loss'].append(best_val_loss) 166 | save_results(results, f'experiment_{tag}_seed_{seed}') 167 | 168 | return best_val_loss, best_model, last_update 169 | 170 | def run_cv(config_dataset: dict, regulariser: str, params: dict, run_name: str, seed: int): 171 | data_fetcher = getattr(src.load_data, config_dataset['loader']) 172 | loaders = data_fetcher(seed=0) 173 | dropout = params['p'] if regulariser == 'dropout' else 0 174 | batch_norm = True if regulariser == 'batch_norm' else False 175 | kfold = KFold(n_splits=K_FOLDS, shuffle=False) 176 | best_loss = np.inf 177 | # loop through folds 178 | for fold, (train_ids, val_ids) in enumerate(kfold.split(loaders['train'])): 179 | torch.manual_seed(seed); np.random.seed(seed) 180 | trainloader, valloader = ids_to_dataloader_split(loaders['train'], train_ids, val_ids, seed=seed) 181 | fold_name = run_name + f':{fold}' 182 | model = UCI_MLP(num_features=config_dataset['num_features'], num_outputs=config_dataset['num_outputs'], 183 | dropout=dropout, batch_norm=batch_norm).to(DEVICE) 184 | fold_loss, fold_model, fold_epoch = run_fold(fold_name, model, trainloader, valloader, params, 185 | config_dataset, seed=seed) 186 | if fold_loss < best_loss: 187 | best_loss = fold_loss 188 | best_model = copy.deepcopy(fold_model) 189 | best_epoch = fold_epoch 190 | 191 | # evalutate best performing model on held out test set 192 | loss_func = MSE if config_dataset['type'] == 'regression' else nn.CrossEntropyLoss() 193 | test_loss, _ = evaluate(best_model, loaders['test'], loss_func) 194 | tag, dataset, regulariser, params = run_name.split(':') 195 | results = load_results(f'experiment_{tag}_seed_{seed}') 196 | results[dataset][regulariser][params]['test_loss'] = test_loss 197 | results[dataset][regulariser][params]['train_final_epoch'] = best_epoch 198 | print(test_loss) 199 | save_results(results, f'experiment_{tag}_seed_{seed}') 200 | 201 | def grid_search_iterable(parameter_dict: dict) -> list: 202 | """Generate an iterable list of hyperparameters from a dictionary containing the values to be considered""" 203 | keys, values = zip(*parameter_dict.items()) 204 | parameter_grid = [dict(zip(keys, v)) for v in itertools.product(*values)] 205 | return parameter_grid 206 | 207 | def load_config(name): 208 | curr_dir = os.path.dirname(__file__) 209 | config_dir = os.path.join(curr_dir, f'configs/{name}.json') 210 | with open(config_dir) as f: 211 | config_dict = json.load(f) 212 | config_keys = config_dict.keys() 213 | return config_dict, config_keys 214 | 215 | def run_experiment(seeds: list, tag: str): 216 | # load config files 217 | config_regs, regularisers = load_config('regularizers') 218 | config_data, datasets = load_config('datasets') 219 | 220 | for seed in seeds: 221 | # initialise results file 222 | init_results(tag, seed, datasets, config_regs, overwrite=True) 223 | for dataset in datasets: 224 | for regulariser in regularisers: 225 | parmaeter_iterable = grid_search_iterable(config_regs[regulariser]) 226 | for idx, param_set in enumerate(parmaeter_iterable): 227 | run_name = f'{tag}:{dataset}:{regulariser}:{idx}' 228 | # run CV on this combination 229 | print(run_name) 230 | run_cv(config_data[dataset], regulariser, param_set, run_name, seed) 231 | # save record of parameters used for this run 232 | param_record = load_results(f'params_record') 233 | param_record[f'id_:{seed}:{run_name}'] = param_set 234 | save_results(param_record, f'params_record') 235 | 236 | 237 | if __name__ == '__main__': 238 | parser = argparse.ArgumentParser() 239 | parser.add_argument('-seeds', default=[0], help='Set of seeds to use for experiments') 240 | parser.add_argument('-tag', default='tag', help='Tag name for set of experiments') 241 | args = parser.parse_args() 242 | print(args.seeds) 243 | run_experiment(seeds=args.seeds, tag=args.tag) -------------------------------------------------------------------------------- /src/experiments/larger-data/main.py: -------------------------------------------------------------------------------- 1 | from functorch import jacrev 2 | from functorch import vmap 3 | from torch import optim 4 | import numpy as np 5 | from sklearn.experimental import enable_iterative_imputer # noqa: F401,E402 6 | import pandas as pd 7 | from torch.utils.data import Dataset, DataLoader, TensorDataset 8 | from sklearn.model_selection import train_test_split 9 | from sklearn.preprocessing import StandardScaler 10 | import torch 11 | import torch.nn as nn 12 | import json 13 | 14 | d = pd.read_csv('path/to/data/dionis', header = None) 15 | TRAINING_RATIO = 0.1 # change this for different ratios of training data 16 | 17 | y = d.iloc[:,0] 18 | X = d.iloc[:, 1:] 19 | 20 | 21 | SEED = 0 22 | BATCH_SIZE = 256 23 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=SEED) 24 | X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=SEED) 25 | 26 | 27 | num = int(X_train.shape[0] * TRAINING_RATIO) 28 | X_train = X_train[:num] 29 | y_train = y_train[:num] 30 | 31 | scaler_train = StandardScaler() 32 | X_train = scaler_train.fit_transform(X_train) 33 | X_val = scaler_train.transform(X_val) 34 | X_test = scaler_train.transform(X_test) 35 | 36 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train.to_numpy())) 37 | val_dataset = TensorDataset(torch.Tensor(X_val), torch.Tensor(y_val.to_numpy())) 38 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.Tensor(y_test.to_numpy())) 39 | loaders = { 40 | 'train': DataLoader(train_dataset, 41 | batch_size=BATCH_SIZE, 42 | shuffle=True, 43 | num_workers=1), 44 | 45 | 'val': DataLoader(val_dataset, 46 | batch_size=BATCH_SIZE, 47 | shuffle=False, 48 | num_workers=1), 49 | 50 | 'test': DataLoader(test_dataset, 51 | batch_size=int(BATCH_SIZE), 52 | shuffle=False, 53 | num_workers=1) 54 | } 55 | 56 | 57 | class UCI_MLP(nn.Module): 58 | def __init__(self, num_features, num_outputs, dropout=0, batch_norm=False): 59 | super(UCI_MLP, self).__init__() 60 | self.dropout = torch.nn.Dropout(p=dropout) 61 | self.batch_norm = batch_norm 62 | d = num_features + 1 63 | self.fc1 = nn.Linear(num_features, 400) 64 | self.bn1 = nn.BatchNorm1d(d) 65 | self.relu1 = nn.ReLU(inplace=False) 66 | self.fc2 = nn.Linear(400, 100) 67 | self.bn2 = nn.BatchNorm1d(d) 68 | self.relu2 = nn.ReLU(inplace=False) 69 | self.fc3 = nn.Linear(100, 10) 70 | self.relu3 = nn.ReLU(inplace=False) 71 | self.fc4 = nn.Linear(10, num_outputs) 72 | 73 | def forward(self, x): 74 | batch_size = x.shape[0] 75 | out = self.fc1(x) 76 | if self.batch_norm and batch_size > 1: 77 | out = self.bn1(out) 78 | out = self.relu1(out) 79 | out = self.dropout(out) 80 | out = self.fc2(out) 81 | if self.batch_norm and batch_size > 1: 82 | out = self.bn2(out) 83 | out = self.relu2(out) 84 | out = self.dropout(out) 85 | out = self.fc3(out) 86 | h_output = self.relu3(out) 87 | out = self.fc4(h_output) 88 | return out, h_output 89 | 90 | 91 | def attr_loss(forward_func, data_input, device='cpu', subsample=-1): 92 | ########## UPDATE functools ############ 93 | batch_size = data_input.shape[0] 94 | 95 | def test(input_): 96 | _, h_out = forward_func(input_) 97 | return h_out 98 | 99 | data_input = data_input.clone().requires_grad_(True) 100 | jacobian = vmap(jacrev(test))(data_input) 101 | neuron_attr = jacobian.swapaxes(0, 1) 102 | h_dim = neuron_attr.shape[0] 103 | 104 | if len(neuron_attr.shape) > 3: 105 | # h_dim x batch_size x features 106 | neuron_attr = neuron_attr.flatten(start_dim=2) 107 | 108 | sparsity_loss = torch.norm(neuron_attr, p=1) / (batch_size * h_dim * neuron_attr.shape[2]) 109 | 110 | cos = nn.CosineSimilarity(dim=1, eps=1e-6) 111 | correlation_loss = torch.tensor(0., requires_grad=True).to(device) 112 | if subsample > 0 and subsample < h_dim * (h_dim - 1) / 2: 113 | tensor_pairs = [list(np.random.choice(h_dim, size=(2), replace=False)) for i in range(subsample)] 114 | for tensor_pair in tensor_pairs: 115 | pairwise_corr = cos(neuron_attr[tensor_pair[0], :, :], neuron_attr[tensor_pair[1], :, :]).norm(p=1) 116 | correlation_loss = correlation_loss + pairwise_corr 117 | 118 | correlation_loss = correlation_loss / (batch_size * subsample) 119 | 120 | else: 121 | for neuron_i in range(1, h_dim): 122 | for neuron_j in range(0, neuron_i): 123 | pairwise_corr = cos(neuron_attr[neuron_i, :, :], neuron_attr[neuron_j, :, :]).norm(p=1) 124 | correlation_loss = correlation_loss + pairwise_corr 125 | num_pairs = h_dim * (h_dim - 1) / 2 126 | correlation_loss = correlation_loss / (batch_size * num_pairs) 127 | 128 | return sparsity_loss, correlation_loss 129 | 130 | 131 | def train_epoch(model, loader, loss_func, optimiser, epoch, 132 | lambda_1=0, lambda_2=0, device='cpu', subsample=-1): 133 | running_loss = 0 134 | for i, (data, label) in enumerate(loader): 135 | model.train() 136 | data, label = data.to(device), label.type(torch.LongTensor).to(device) 137 | optimiser.zero_grad() 138 | output, _ = model(data) 139 | 140 | pred_loss = loss_func(output.squeeze(), label) 141 | 142 | if lambda_1 + lambda_2 > 0: 143 | sparsity_loss, correlation_loss = attr_loss(model, data, device=device, subsample=subsample) 144 | else: 145 | sparsity_loss, correlation_loss = 0, 0 146 | 147 | loss = pred_loss + lambda_1 * sparsity_loss + lambda_2 * correlation_loss 148 | running_loss += loss.item() 149 | 150 | loss.backward() 151 | optimiser.step() 152 | 153 | if (i + 1) % 100 == 0: 154 | print('Epoch [{}], Step [{}/{}], Loss: {:.4f}' 155 | .format(epoch + 1, i + 1, len(loader), running_loss / (i + 1))) 156 | print(f"Lambda1: {lambda_1}, Lambda2: {lambda_2}") 157 | 158 | return model 159 | 160 | 161 | def evaluate(model, loader, loss_func, epoch, 162 | lambda_1=0, lambda_2=0, device='cpu', subsample=-1, log_set='test'): 163 | correct, total = 0, 0 164 | running_loss, running_pred_loss = 0, 0 165 | running_pred, running_gt = np.array([]), np.array([]) 166 | 167 | for i, (data, label) in enumerate(loader): 168 | model.eval() 169 | data, label = data.to(device), label.type(torch.LongTensor).to(device) 170 | 171 | output, _ = model(data) 172 | pred_loss = loss_func(output.squeeze(), label) 173 | 174 | sparsity_loss, correlation_loss = attr_loss(model, data, device=device, subsample=subsample) 175 | 176 | loss = pred_loss + lambda_1 * sparsity_loss + lambda_2 * correlation_loss 177 | 178 | running_loss += loss.item() 179 | running_pred_loss += pred_loss.item() 180 | 181 | pred_probs = torch.sigmoid(output) 182 | pred_y = torch.argmax(pred_probs, 1) 183 | correct += (pred_y == label).sum().item() 184 | total += float(label.size()[0]) 185 | 186 | 187 | accuracy = correct / total 188 | 189 | average_loss = running_loss / len(loader) 190 | averge_pred_loss = running_pred_loss / len(loader) 191 | 192 | print(f'[Test] Epoch: {epoch + 1}, accuracy: {accuracy:.4f}, ' \ 193 | f'average test loss: {average_loss:.4f}, ' \ 194 | f'pred loss: {averge_pred_loss:.4f}, ' \ 195 | f'sparsity loss: {sparsity_loss.item():.4f}, correlation loss: {correlation_loss.item():.4f}') 196 | 197 | return averge_pred_loss, accuracy 198 | 199 | 200 | def train_full(seed, lambda_1, lambda_2, LR=0): 201 | EPOCHS = 100 202 | TRAINING_PATIENCE = 5 203 | BATCH_SIZE = 256 204 | DEVICE = 'cuda:0' 205 | 206 | runs = 1 207 | 208 | learning_rate = 0.001 209 | weight_decay = LR 210 | num_features = 60 211 | num_outputs = 355 212 | subsample = 50 213 | model_save_path = 'model_weights' 214 | min_epoch = 1 215 | best_acc = 0 216 | accuracy_val_ls = [] 217 | 218 | for _ in range(runs): 219 | torch.random.manual_seed(seed) 220 | 221 | model = UCI_MLP(num_features, num_outputs, dropout=0, batch_norm=False).to(DEVICE) 222 | print(f'Training on {DEVICE}...') 223 | 224 | loss_func = nn.CrossEntropyLoss() 225 | optimiser = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay) 226 | 227 | patience = 0 228 | 229 | for epoch in range(EPOCHS): 230 | 231 | model = train_epoch(model, loaders['train'], loss_func, optimiser, 232 | epoch=epoch, lambda_1=lambda_1, lambda_2=lambda_2, 233 | device=DEVICE, subsample=subsample) 234 | 235 | val_loss, accuracy = evaluate(model, loaders['val'], loss_func, epoch=epoch, 236 | lambda_1=lambda_1, lambda_2=lambda_2, device=DEVICE, subsample=subsample) 237 | accuracy_val_ls.append(accuracy) 238 | 239 | if epoch >= min_epoch: 240 | if best_acc < accuracy: 241 | print(f'Epoch {epoch + 1} - Validation performance improved, saving model...') 242 | best_acc = accuracy 243 | torch.save(model.state_dict(), model_save_path) 244 | patience = 0 245 | else: 246 | patience += 1 247 | 248 | if patience == TRAINING_PATIENCE: 249 | print(f'Epoch {epoch + 1} - Early stopping since no improvement after {patience} epochs') 250 | break 251 | 252 | # evaluate on cutract dataset 253 | # load best model 254 | model.load_state_dict(torch.load(model_save_path)) 255 | averge_pred_loss, accuracy = evaluate(model, loaders['test'], loss_func, epoch=0, 256 | lambda_1=lambda_1, lambda_2=lambda_2, device=DEVICE, subsample=subsample, 257 | log_set='target') 258 | return accuracy 259 | 260 | 261 | 262 | # main logic for training baseline, tangos regularization and l2 regularization 263 | baseline_ls = [] 264 | for seed in range(6): 265 | acc = train_full(seed, 0, 0, LR=0) 266 | baseline_ls.append(acc) 267 | with open('src/experiments/larger-data/baseline.json', 'w') as f: 268 | json.dump({'test_acc': baseline_ls}, f) 269 | 270 | TANGOS_ls = [] 271 | for seed in range(6): 272 | acc = train_full(seed, 1, 0.01, LR=0) 273 | TANGOS_ls.append(acc) 274 | with open('src/experiments/larger-data/TANGOS.json', 'w') as f: 275 | json.dump({'test_acc': TANGOS_ls}, f) 276 | 277 | l2_ls = [] 278 | for seed in range(6): 279 | acc = train_full(seed, 0, 0, LR=0.0001) 280 | l2_ls.append(acc) 281 | with open('src/experiments/larger-data/l2.json', 'w') as f: 282 | json.dump({'test_acc': l2_ls}, f) 283 | 284 | -------------------------------------------------------------------------------- /src/experiments/in-tandem-regularization/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import itertools 4 | import argparse 5 | import json 6 | import copy 7 | from pathlib import Path 8 | import numpy as np 9 | import random 10 | import torch.nn as nn 11 | from torch import optim 12 | from sklearn.model_selection import KFold 13 | from src.losses import parameter_schedule, attr_loss, MSE 14 | from src.models import UCI_MLP 15 | from src.regularizers import l1, add_input_noise, mixup_data, mixup_criterion 16 | import src.load_data 17 | 18 | import warnings 19 | 20 | warnings.filterwarnings("ignore") 21 | 22 | EPOCHS = 200 23 | TRAINING_PATIENCE = 30 24 | BATCH_SIZE = 32 25 | K_FOLDS = 5 26 | DEVICE = 'cuda:0' 27 | 28 | 29 | def train_epoch(model, train_loader, optimiser, loss_func, params, params_ls, regulariser, device='cpu'): 30 | reg_loss = 0 31 | for i, (data, label) in enumerate(train_loader): 32 | model.train() 33 | data, label = data.to(device), label.to(device) 34 | if regulariser == 'input_noise': 35 | data = add_input_noise(data, params['std']) 36 | optimiser.zero_grad() 37 | output, _ = model(data) 38 | pred_loss = loss_func(output, label) 39 | 40 | if (params_ls['lambda_1_curr'] > 0) or (params_ls['lambda_2_curr'] > 0): 41 | sparsity_loss, correlation_loss = attr_loss(model, data, device=device, subsample=params_ls['subsample']) 42 | reg_loss = params_ls['lambda_1_curr'] * sparsity_loss + params_ls['lambda_2_curr'] * correlation_loss 43 | 44 | if regulariser == 'l1': 45 | reg_loss = params['weight'] * l1(model) 46 | 47 | elif regulariser == 'mixup': 48 | X_mixup, y_a, y_b, lam = mixup_data(data, label, alpha=params['alpha'], device=device) 49 | output_mixup, _ = model(X_mixup) 50 | reg_loss = mixup_criterion(loss_func, output_mixup, y_a, y_b, lam) 51 | 52 | loss = pred_loss + reg_loss 53 | loss.backward() 54 | optimiser.step() 55 | return model 56 | 57 | 58 | def evaluate(model, test_loader, loss_func, device=DEVICE): 59 | running_loss, running_pred_loss = 0, 0 60 | for epoch, (data, label) in enumerate(test_loader): 61 | model.eval() 62 | data, label = data.to(device), label.to(device) 63 | 64 | output, _ = model(data) 65 | 66 | # compute metric 67 | pred_loss = loss_func(output, label) 68 | loss = pred_loss 69 | 70 | running_loss += loss.item() 71 | running_pred_loss += pred_loss.item() 72 | 73 | return running_pred_loss/(epoch + 1), running_loss/(epoch + 1) 74 | 75 | def seed_worker(worker_id): 76 | worker_seed = torch.initial_seed() % 2 ** 32 77 | np.random.seed(worker_seed) 78 | random.seed(worker_seed) 79 | 80 | def ids_to_dataloader_split(data, train_ids, val_ids, seed): 81 | train_subsampler = torch.utils.data.SubsetRandomSampler(train_ids) 82 | val_subsampler = torch.utils.data.SubsetRandomSampler(val_ids) 83 | 84 | g = torch.Generator() 85 | g.manual_seed(seed) 86 | 87 | trainloader = torch.utils.data.DataLoader( 88 | data, 89 | batch_size=BATCH_SIZE, sampler=train_subsampler, worker_init_fn=seed_worker, generator=g) 90 | valloader = torch.utils.data.DataLoader( 91 | data, 92 | batch_size=BATCH_SIZE, sampler=val_subsampler, worker_init_fn=seed_worker, generator=g) 93 | return trainloader, valloader 94 | 95 | 96 | def init_results(tag, seed, datasets, config_regs, latent_sniper_regs, overwrite=False): 97 | """Helper function to initialise an empty dictionary for storing results""" 98 | results = load_results(f'experiment_{tag}_seed_{seed}') 99 | regularisers = config_regs.keys() 100 | num_combinations_ls = 1 101 | for i, j in latent_sniper_regs.items(): 102 | num_combinations_ls *= len(j) 103 | 104 | if results and not overwrite: 105 | raise ValueError('Results already exist, to overwrite pass overwrite = True') 106 | else: 107 | results = {} 108 | for dataset in datasets: 109 | results[dataset] = {} 110 | for regulariser in regularisers: 111 | results[dataset][regulariser] = {} 112 | num_combinations = 1 113 | for value in config_regs[regulariser].values(): 114 | num_combinations *= len(value) 115 | for i in range(num_combinations * num_combinations_ls): 116 | results[dataset][regulariser][i] = {} 117 | results[dataset][regulariser][i]['val_loss'] = [] 118 | 119 | save_results(results, f'experiment_{tag}_seed_{seed}') 120 | 121 | def load_results(file_name): 122 | curr_dir = os.path.dirname(__file__) 123 | results_dir = os.path.join(curr_dir, f'results/{file_name}.json') 124 | file_obj = Path(results_dir) 125 | if file_obj.is_file(): 126 | with open(results_dir) as f: 127 | results = json.load(f) 128 | return results 129 | else: 130 | print(f'{file_name}.json not found in results folder, generating new file.') 131 | return {} 132 | 133 | def save_results(results, file_name): 134 | curr_dir = os.path.dirname(__file__) 135 | results_dir = os.path.join(curr_dir, f'results/{file_name}.json') 136 | with open(results_dir, 'w') as f: 137 | json.dump(results, f) 138 | 139 | def run_fold(fold_name, model, trainloader, valloader, config, config_ls, config_dataset, seed): 140 | tag, dataset, regulariser, params, fold = fold_name.split(':') 141 | loss_func = MSE if config_dataset['type'] == 'regression' else nn.CrossEntropyLoss() 142 | l2_weight = config['weight'] if regulariser == 'l2' else 0 143 | optimiser = optim.Adam(model.parameters(), lr=config_dataset['lr'], weight_decay=l2_weight) 144 | parameter_scheduler = parameter_schedule(config_ls['lambda_1'], config_ls['lambda_2'], config_ls['param_schedule']) 145 | best_val_loss = np.inf; last_update = 0 146 | for epoch in range(EPOCHS): 147 | lambda_1, lambda_2 = parameter_scheduler.get_reg(epoch) 148 | config_ls['lambda_1_curr'] = lambda_1 149 | config_ls['lambda_2_curr'] = lambda_2 150 | 151 | model = train_epoch(model, trainloader, optimiser, loss_func, config, config_ls, regulariser, device=DEVICE) 152 | val_loss, _ = evaluate(model, valloader, loss_func, device=DEVICE) 153 | 154 | if (val_loss < best_val_loss) or (epoch < 5): 155 | best_val_loss = val_loss 156 | best_model = copy.deepcopy(model) 157 | last_update = epoch 158 | 159 | # early stopping criteria 160 | if epoch - last_update == TRAINING_PATIENCE: 161 | break 162 | 163 | # save best model results for this fold 164 | results = load_results(f'experiment_{tag}_seed_{seed}') 165 | results[dataset][regulariser][params]['val_loss'].append(best_val_loss) 166 | save_results(results, f'experiment_{tag}_seed_{seed}') 167 | 168 | return best_val_loss, best_model, last_update 169 | 170 | def run_cv(config_dataset: dict, regulariser: str, params: dict, params_ls: dict, run_name: str, seed: int): 171 | data_fetcher = getattr(src.load_data, config_dataset['loader']) 172 | loaders = data_fetcher(seed=0) 173 | dropout = params['p'] if regulariser == 'dropout' else 0 174 | batch_norm = True if regulariser == 'batch_norm' else False 175 | kfold = KFold(n_splits=K_FOLDS, shuffle=False) 176 | best_loss = np.inf 177 | # loop through folds 178 | for fold, (train_ids, val_ids) in enumerate(kfold.split(loaders['train'])): 179 | torch.manual_seed(seed); np.random.seed(seed) 180 | trainloader, valloader = ids_to_dataloader_split(loaders['train'], train_ids, val_ids, seed=seed) 181 | fold_name = run_name + f':{fold}' 182 | model = UCI_MLP(num_features=config_dataset['num_features'], num_outputs=config_dataset['num_outputs'], 183 | dropout=dropout, batch_norm=batch_norm).to(DEVICE) 184 | fold_loss, fold_model, fold_epoch = run_fold(fold_name, model, trainloader, valloader, params, params_ls, 185 | config_dataset, seed=seed) 186 | if fold_loss < best_loss: 187 | best_loss = fold_loss 188 | best_model = copy.deepcopy(fold_model) 189 | best_epoch = fold_epoch 190 | 191 | # evalutate best performing model on held out test set 192 | loss_func = MSE if config_dataset['type'] == 'regression' else nn.CrossEntropyLoss() 193 | test_loss, _ = evaluate(best_model, loaders['test'], loss_func) 194 | tag, dataset, regulariser, params = run_name.split(':') 195 | results = load_results(f'experiment_{tag}_seed_{seed}') 196 | results[dataset][regulariser][params]['test_loss'] = test_loss 197 | results[dataset][regulariser][params]['train_final_epoch'] = best_epoch 198 | print(test_loss) 199 | save_results(results, f'experiment_{tag}_seed_{seed}') 200 | 201 | def grid_search_iterable(parameter_dict: dict) -> list: 202 | """Generate an iterable list of hyperparameters from a dictionary containing the values to be considered""" 203 | keys, values = zip(*parameter_dict.items()) 204 | parameter_grid = [dict(zip(keys, v)) for v in itertools.product(*values)] 205 | return parameter_grid 206 | 207 | def load_config(name): 208 | curr_dir = os.path.dirname(__file__) 209 | config_dir = os.path.join(curr_dir, f'configs/{name}.json') 210 | with open(config_dir) as f: 211 | config_dict = json.load(f) 212 | config_keys = list(config_dict) 213 | return config_dict, config_keys 214 | 215 | def run_experiment(seeds: list, tag: str): 216 | # load config files 217 | config_regs, regularisers = load_config('regularizers') 218 | config_data, datasets = load_config('datasets') 219 | latent_sniper_regs = config_regs.pop('TANGOS', None) 220 | regularisers.remove('TANGOS') 221 | latent_sniper_iterable = grid_search_iterable(latent_sniper_regs) 222 | for seed in seeds: 223 | # initialise results file 224 | init_results(tag, seed, datasets, config_regs, latent_sniper_regs, overwrite=True) 225 | for dataset in datasets: 226 | for regulariser in regularisers: 227 | parmaeter_iterable = grid_search_iterable(config_regs[regulariser]) 228 | idx = 0 229 | for param_set in parmaeter_iterable: 230 | for param_set_ls in latent_sniper_iterable: 231 | run_name = f'{tag}:{dataset}:{regulariser}:{idx}' 232 | # run CV on this combination 233 | print(run_name) 234 | run_cv(config_data[dataset], regulariser, param_set, param_set_ls, run_name, seed) 235 | # save record of parameters used for this run 236 | param_record = load_results(f'params_record') 237 | param_record[f'id_:{seed}:{run_name}'] = param_set 238 | save_results(param_record, f'params_record') 239 | idx +=1 240 | 241 | 242 | if __name__ == '__main__': 243 | parser = argparse.ArgumentParser() 244 | parser.add_argument('-seeds', default=[0], help='Set of seeds to use for experiments') 245 | parser.add_argument('-tag', default='tag', help='Tag name for set of experiments') 246 | args = parser.parse_args() 247 | print(args.seeds) 248 | run_experiment(seeds=args.seeds, tag=args.tag) -------------------------------------------------------------------------------- /src/load_data.py: -------------------------------------------------------------------------------- 1 | import arff 2 | import os 3 | import re 4 | import torch 5 | import pandas as pd 6 | import numpy as np 7 | from torch.utils.data import Dataset, DataLoader, TensorDataset 8 | from sklearn.model_selection import train_test_split 9 | from sklearn.preprocessing import StandardScaler 10 | import json 11 | 12 | 13 | def get_path(): 14 | """Get path to data dir""" 15 | results_dir = 'src/data_config.json' 16 | with open(results_dir) as f: 17 | results = json.load(f) 18 | return results['path_to_data'] 19 | 20 | 21 | def load_wine(seed, train_prop=0.8, batch_size=64): 22 | data = pd.read_csv(get_path() + 'winequality-red.csv') 23 | data = data[:1000] 24 | 25 | X = data.drop('quality', axis=1) 26 | y = data.quality 27 | X, y = X.to_numpy(), y.to_numpy() 28 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 29 | 30 | X_scaler = StandardScaler() 31 | X_train = X_scaler.fit_transform(X_train) 32 | X_test = X_scaler.transform(X_test) 33 | y_scaler = StandardScaler() 34 | y_train = y_scaler.fit_transform(y_train.reshape(-1,1)).reshape(-1) 35 | y_test = y_scaler.transform(y_test.reshape(-1,1)).reshape(-1) 36 | 37 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train)) 38 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.Tensor(y_test)) 39 | 40 | loaders = { 41 | 'train': train_dataset, 42 | 43 | 'test': DataLoader(test_dataset, 44 | batch_size=batch_size, 45 | shuffle=False, 46 | num_workers=1) 47 | } 48 | return loaders 49 | 50 | def load_facebook(seed, train_prop=0.8, batch_size=64): 51 | data = pd.read_csv(get_path() + 'dataset_facebook.csv', sep=';') 52 | data.dropna(inplace=True) # drop missing values 53 | one_hot = pd.get_dummies(data['Type']) # onehotencode categorical column 54 | data = data.drop('Type', axis=1) 55 | data = data.join(one_hot) 56 | X = data.drop('Lifetime Post Total Impressions', axis = 1) 57 | y = data['Lifetime Post Total Impressions'] 58 | X, y = X.to_numpy(), y.to_numpy() 59 | 60 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 61 | 62 | X_scaler = StandardScaler() 63 | X_train = X_scaler.fit_transform(X_train) 64 | X_test = X_scaler.transform(X_test) 65 | y_scaler = StandardScaler() 66 | y_train = y_scaler.fit_transform(y_train.reshape(-1,1)).reshape(-1) 67 | y_test = y_scaler.transform(y_test.reshape(-1,1)).reshape(-1) 68 | 69 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train)) 70 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.Tensor(y_test)) 71 | 72 | loaders = { 73 | 'train': train_dataset, 74 | 75 | 'test': DataLoader(test_dataset, 76 | batch_size=batch_size, 77 | shuffle=False, 78 | num_workers=1) 79 | } 80 | return loaders 81 | 82 | 83 | def load_bioconcentration(seed, train_prop=0.8, batch_size=64): 84 | data = pd.read_csv(get_path() + 'Grisoni_et_al_2016_EnvInt88.csv', sep=',') 85 | 86 | X = data[['nHM', 'piPC09', 'PCD', 'X2Av', 'MLOGP', 'ON1V', 'N-072', 'B02[C-N]', 'F04[C-O]']] 87 | for var in ['nHM', 'N-072', 'B02[C-N]', 'F04[C-O]']: 88 | one_hot = pd.get_dummies(X[var], prefix=var) # onehotencode categorical column 89 | X = X.drop(var, axis=1) 90 | X = X.join(one_hot) 91 | 92 | y = data['logBCF'] 93 | X, y = X.to_numpy(), y.to_numpy() 94 | 95 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 96 | 97 | X_scaler = StandardScaler() 98 | X_train = X_scaler.fit_transform(X_train) 99 | X_test = X_scaler.transform(X_test) 100 | y_scaler = StandardScaler() 101 | y_train = y_scaler.fit_transform(y_train.reshape(-1,1)).reshape(-1) 102 | y_test = y_scaler.transform(y_test.reshape(-1,1)).reshape(-1) 103 | 104 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train)) 105 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.Tensor(y_test)) 106 | 107 | loaders = { 108 | 'train': train_dataset, 109 | 110 | 'test': DataLoader(test_dataset, 111 | batch_size=batch_size, 112 | shuffle=False, 113 | num_workers=1) 114 | } 115 | return loaders 116 | 117 | def load_student(seed, train_prop=0.8, batch_size=64): 118 | data = pd.read_csv(get_path() + 'student-por.csv', sep=';') 119 | 120 | X = data.drop(['G1', 'G2', 'G3'], axis = 1) 121 | for var in ['school', 'sex', 'address', 'famsize', 'Pstatus', 'Mjob', 'Fjob', 122 | 'reason', 'guardian','schoolsup', 'famsup', 'paid', 'activities', 123 | 'nursery', 'higher', 'internet', 'romantic',]: 124 | one_hot = pd.get_dummies(X[var], prefix=var) # onehotencode categorical column 125 | X = X.drop(var, axis=1) 126 | X = X.join(one_hot) 127 | 128 | y = data['G3'] 129 | X, y = X.to_numpy(), y.to_numpy() 130 | 131 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 132 | 133 | X_scaler = StandardScaler() 134 | X_train = X_scaler.fit_transform(X_train) 135 | X_test = X_scaler.transform(X_test) 136 | y_scaler = StandardScaler() 137 | y_train = y_scaler.fit_transform(y_train.reshape(-1,1)).reshape(-1) 138 | y_test = y_scaler.transform(y_test.reshape(-1,1)).reshape(-1) 139 | 140 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train)) 141 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.Tensor(y_test)) 142 | 143 | loaders = { 144 | 'train': train_dataset, 145 | 146 | 'test': DataLoader(test_dataset, 147 | batch_size=batch_size, 148 | shuffle=False, 149 | num_workers=1) 150 | } 151 | return loaders 152 | 153 | 154 | def load_abalone(seed, train_prop=0.8, batch_size=64): 155 | data = pd.read_csv(get_path() + 'abalone.data', sep=',', header=None) 156 | data = data[:1000] 157 | one_hot = pd.get_dummies(data[0], drop_first=True) # onehotencode categorical column 158 | data = data.drop(0, axis=1) 159 | data = data.join(one_hot) 160 | 161 | X = data.drop(8, axis=1) 162 | y = data[8] 163 | 164 | X, y = X.to_numpy(), y.to_numpy() 165 | 166 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 167 | 168 | X_scaler = StandardScaler() 169 | X_train = X_scaler.fit_transform(X_train) 170 | X_test = X_scaler.transform(X_test) 171 | y_scaler = StandardScaler() 172 | y_train = y_scaler.fit_transform(y_train.reshape(-1,1)).reshape(-1) 173 | y_test = y_scaler.transform(y_test.reshape(-1,1)).reshape(-1) 174 | 175 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train)) 176 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.Tensor(y_test)) 177 | 178 | loaders = { 179 | 'train': train_dataset, 180 | 181 | 'test': DataLoader(test_dataset, 182 | batch_size=batch_size, 183 | shuffle=False, 184 | num_workers=1) 185 | } 186 | return loaders 187 | 188 | 189 | def load_skillcraft(seed, train_prop=0.8, batch_size=64): 190 | data = pd.read_csv(get_path() + 'SkillCraft1_Dataset.csv', sep=',') 191 | data = data.replace('?', np.NaN) 192 | data = data.dropna() 193 | data = data[:1000] 194 | X = data.drop(['GameID', 'LeagueIndex'], axis=1) 195 | y = data['LeagueIndex'] 196 | 197 | X, y = X.to_numpy(), y.to_numpy() 198 | 199 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 200 | 201 | X_scaler = StandardScaler() 202 | X_train = X_scaler.fit_transform(X_train) 203 | X_test = X_scaler.transform(X_test) 204 | y_scaler = StandardScaler() 205 | y_train = y_scaler.fit_transform(y_train.reshape(-1, 1)).reshape(-1) 206 | y_test = y_scaler.transform(y_test.reshape(-1, 1)).reshape(-1) 207 | 208 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train)) 209 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.Tensor(y_test)) 210 | 211 | loaders = { 212 | 'train': train_dataset, 213 | 214 | 'test': DataLoader(test_dataset, 215 | batch_size=batch_size, 216 | shuffle=False, 217 | num_workers=1) 218 | } 219 | return loaders 220 | 221 | 222 | def load_weather(seed, train_prop=0.8, batch_size=64): 223 | data = pd.read_csv(get_path() + 'Bias_correction_ucl.csv', sep=',') 224 | data = data.dropna() 225 | data = data[:1000] 226 | one_hot = pd.get_dummies(data['station'], drop_first=True) # onehotencode categorical column 227 | data = data.drop('station', axis=1) 228 | data = data.join(one_hot) 229 | X = data.drop(['Date', 'Next_Tmax', 'Next_Tmin'], axis=1) 230 | y = data['Next_Tmax'] 231 | 232 | X, y = X.to_numpy(), y.to_numpy() 233 | 234 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 235 | 236 | X_scaler = StandardScaler() 237 | X_train = X_scaler.fit_transform(X_train) 238 | X_test = X_scaler.transform(X_test) 239 | y_scaler = StandardScaler() 240 | y_train = y_scaler.fit_transform(y_train.reshape(-1, 1)).reshape(-1) 241 | y_test = y_scaler.transform(y_test.reshape(-1, 1)).reshape(-1) 242 | 243 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train)) 244 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.Tensor(y_test)) 245 | 246 | loaders = { 247 | 'train': train_dataset, 248 | 249 | 'test': DataLoader(test_dataset, 250 | batch_size=batch_size, 251 | shuffle=False, 252 | num_workers=1) 253 | } 254 | return loaders 255 | 256 | 257 | def load_forest(seed, train_prop=0.8, batch_size=64): 258 | data = pd.read_csv(get_path() + 'forestfires.csv', sep=',') 259 | X = data.drop('area', axis=1) 260 | for var in ['X', 'Y', 'month', 'day']: 261 | one_hot = pd.get_dummies(X[var], prefix=var, drop_first=True) # onehotencode categorical column 262 | X = X.drop(var, axis=1) 263 | X = X.join(one_hot) 264 | y = np.log(data['area'] + 1) 265 | 266 | X, y = X.to_numpy(), y.to_numpy() 267 | 268 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 269 | 270 | X_scaler = StandardScaler() 271 | X_train = X_scaler.fit_transform(X_train) 272 | X_test = X_scaler.transform(X_test) 273 | y_scaler = StandardScaler() 274 | y_train = y_scaler.fit_transform(y_train.reshape(-1, 1)).reshape(-1) 275 | y_test = y_scaler.transform(y_test.reshape(-1, 1)).reshape(-1) 276 | 277 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train)) 278 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.Tensor(y_test)) 279 | 280 | loaders = { 281 | 'train': train_dataset, 282 | 283 | 'test': DataLoader(test_dataset, 284 | batch_size=batch_size, 285 | shuffle=False, 286 | num_workers=1) 287 | } 288 | return loaders 289 | 290 | def load_protein(seed, train_prop=0.8, batch_size=64): 291 | data = pd.read_csv(get_path() + 'CASP.csv', sep=',') 292 | data = data[:1000] 293 | X = data.drop('RMSD', axis=1) 294 | y = data['RMSD'] 295 | 296 | X, y = X.to_numpy(), y.to_numpy() 297 | 298 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 299 | 300 | X_scaler = StandardScaler() 301 | X_train = X_scaler.fit_transform(X_train) 302 | X_test = X_scaler.transform(X_test) 303 | y_scaler = StandardScaler() 304 | y_train = y_scaler.fit_transform(y_train.reshape(-1,1)).reshape(-1) 305 | y_test = y_scaler.transform(y_test.reshape(-1,1)).reshape(-1) 306 | 307 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train)) 308 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.Tensor(y_test)) 309 | 310 | loaders = { 311 | 'train': train_dataset, 312 | 313 | 'test': DataLoader(test_dataset, 314 | batch_size=batch_size, 315 | shuffle=False, 316 | num_workers=1) 317 | } 318 | return loaders 319 | 320 | 321 | def load_heart(seed, train_prop=0.8, batch_size=64): 322 | data = pd.read_csv(get_path() + 'heart.dat', sep=' ', header=None) 323 | X = data.drop(13, axis=1) 324 | y = data[13] - 1 325 | data[9] = np.log(data[9] + 1) 326 | for var in [1, 2, 5, 6, 8, 10, 11, 12]: 327 | one_hot = pd.get_dummies(X[var], prefix=var, drop_first=True) # onehotencode categorical column 328 | X = X.drop(var, axis=1) 329 | X = X.join(one_hot) 330 | 331 | X, y = X.to_numpy(), y.to_numpy() 332 | 333 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 334 | 335 | X_scaler = StandardScaler() 336 | X_train = X_scaler.fit_transform(X_train) 337 | X_test = X_scaler.transform(X_test) 338 | 339 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.tensor(y_train, dtype=torch.long)) 340 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.tensor(y_test, dtype=torch.long)) 341 | 342 | loaders = { 343 | 'train': train_dataset, 344 | 345 | 'test': DataLoader(test_dataset, 346 | batch_size=batch_size, 347 | shuffle=False, 348 | num_workers=1) 349 | } 350 | return loaders 351 | 352 | 353 | def load_breast(seed, train_prop=0.8, batch_size=64): 354 | data = pd.read_csv(get_path() + 'breast-cancer-wisconsin.data', header=None) 355 | X = data.drop([0, 10], axis=1) 356 | X[6].replace('?', np.nan, inplace=True) 357 | X[6].fillna((X[6].median()), inplace=True) 358 | y = data[10]/2 - 1 359 | 360 | X, y = X.to_numpy(), y.to_numpy() 361 | 362 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 363 | 364 | X_scaler = StandardScaler() 365 | X_train = X_scaler.fit_transform(X_train) 366 | X_test = X_scaler.transform(X_test) 367 | 368 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.tensor(y_train, dtype=torch.long)) 369 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.tensor(y_test, dtype=torch.long)) 370 | 371 | loaders = { 372 | 'train': train_dataset, 373 | 374 | 'test': DataLoader(test_dataset, 375 | batch_size=batch_size, 376 | shuffle=False, 377 | num_workers=1) 378 | } 379 | return loaders 380 | 381 | def load_cervical(seed, train_prop=0.8, batch_size=64): 382 | data = pd.read_csv(get_path() + 'risk_factors_cervical_cancer.csv') 383 | X = data[['Age', 'Number of sexual partners', 'First sexual intercourse', 384 | 'Num of pregnancies', 'Smokes', 'Smokes (years)', 'Smokes (packs/year)', 385 | 'Hormonal Contraceptives', 'Hormonal Contraceptives (years)', 'IUD', 386 | 'IUD (years)', 'STDs', 'STDs (number)', 'STDs:condylomatosis', 387 | 'STDs:cervical condylomatosis', 'STDs:vaginal condylomatosis', 388 | 'STDs:vulvo-perineal condylomatosis', 'STDs:syphilis', 389 | 'STDs:pelvic inflammatory disease', 'STDs:genital herpes', 390 | 'STDs:molluscum contagiosum', 'STDs:AIDS', 'STDs:HIV', 391 | 'STDs:Hepatitis B', 'STDs:HPV', 'STDs: Number of diagnosis', 392 | 'STDs: Time since first diagnosis', 'STDs: Time since last diagnosis', 393 | 'Dx:Cancer', 'Dx:CIN', 'Dx:HPV', 'Dx']] 394 | X = X.replace('?', np.nan) 395 | X.fillna((X.median()), inplace=True) 396 | mapping = {'Hinselmann': 1, 'Schiller': 2, 'Citology': 3, 'Biopsy': 4} 397 | y = data[['Hinselmann', 'Schiller', 'Citology', 'Biopsy']].idxmax(axis=1) 398 | y = y.replace(mapping) 399 | 400 | for var in ['Smokes', 'Smokes (years)', 'Smokes (packs/year)', 'Hormonal Contraceptives', 401 | 'IUD', 'STDs', 'STDs:condylomatosis', 'STDs:cervical condylomatosis', 402 | 'STDs:vaginal condylomatosis', 'STDs:vulvo-perineal condylomatosis', 403 | 'STDs:syphilis','STDs:pelvic inflammatory disease', 'STDs:genital herpes', 404 | 'STDs:molluscum contagiosum', 'STDs:AIDS', 'STDs:HIV', 'STDs:Hepatitis B', 405 | 'STDs:HPV', 'Dx:Cancer', 'Dx:CIN', 'Dx:HPV', 'Dx']: 406 | one_hot = pd.get_dummies(X[var], prefix=var, drop_first=True) # onehotencode categorical column 407 | X = X.drop(var, axis=1) 408 | X = X.join(one_hot) 409 | 410 | X, y = X.to_numpy(), y.to_numpy() 411 | 412 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 413 | 414 | X_scaler = StandardScaler() 415 | X_train = X_scaler.fit_transform(X_train) 416 | X_test = X_scaler.transform(X_test) 417 | 418 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.tensor(y_train, dtype=torch.long)) 419 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.tensor(y_test, dtype=torch.long)) 420 | 421 | loaders = { 422 | 'train': train_dataset, 423 | 424 | 'test': DataLoader(test_dataset, 425 | batch_size=batch_size, 426 | shuffle=False, 427 | num_workers=1) 428 | } 429 | return loaders 430 | 431 | def load_credit(seed, train_prop=0.8, batch_size=64): 432 | data = pd.read_csv(get_path() + "crx.data", header = None) 433 | data = data[data[13] != '?'] 434 | data[13] = np.log(data[13].astype(int) + 1) 435 | data[14] = np.log(data[14] + 1) 436 | data[7] = np.log(data[7] + 1) 437 | data[1].replace('?', np.nan, inplace=True) 438 | data[1].fillna((data[1].median()), inplace=True) 439 | X = data.drop(15, axis=1) 440 | y = data[15] 441 | mapping = {'+': 1, '-': 0} 442 | y.replace(mapping, inplace=True) 443 | for var in [0, 3, 4, 5, 6, 8, 9, 11, 12]: 444 | one_hot = pd.get_dummies(X[var], prefix=var, drop_first=True) # onehotencode categorical column 445 | X = X.drop(var, axis=1) 446 | X = X.join(one_hot) 447 | 448 | X, y = X.to_numpy(), y.to_numpy() 449 | 450 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 451 | 452 | X_scaler = StandardScaler() 453 | X_train = X_scaler.fit_transform(X_train) 454 | X_test = X_scaler.transform(X_test) 455 | 456 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.tensor(y_train, dtype=torch.long)) 457 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.tensor(y_test, dtype=torch.long)) 458 | 459 | loaders = { 460 | 'train': train_dataset, 461 | 462 | 'test': DataLoader(test_dataset, 463 | batch_size=batch_size, 464 | shuffle=False, 465 | num_workers=1) 466 | } 467 | return loaders 468 | 469 | 470 | def load_hcv(seed, train_prop=0.8, batch_size=64): 471 | data = pd.read_csv(get_path() + "hcvdat0.csv", index_col=0) 472 | y = data['Category'].apply(lambda x: int(x[0])) 473 | X = data.drop('Category', axis=1) 474 | one_hot = pd.get_dummies(X['Sex'], prefix='Sex', drop_first=True) # onehotencode categorical column 475 | X = X.drop('Sex', axis=1) 476 | X = X.join(one_hot) 477 | X = X.fillna(X.mean()) 478 | 479 | X, y = X.to_numpy(), y.to_numpy() 480 | 481 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 482 | 483 | X_scaler = StandardScaler() 484 | X_train = X_scaler.fit_transform(X_train) 485 | X_test = X_scaler.transform(X_test) 486 | 487 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.tensor(y_train, dtype=torch.long)) 488 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.tensor(y_test, dtype=torch.long)) 489 | 490 | loaders = { 491 | 'train': train_dataset, 492 | 493 | 'test': DataLoader(test_dataset, 494 | batch_size=batch_size, 495 | shuffle=False, 496 | num_workers=1) 497 | } 498 | return loaders 499 | 500 | 501 | def load_tumor(seed, train_prop=0.8, batch_size=64): 502 | data = pd.read_csv(get_path() + 'primary-tumor.data', header=None) 503 | y = data[0] - 1 504 | X = data.drop(0, axis=1) 505 | for var in X.columns: 506 | one_hot = pd.get_dummies(X[var], prefix=var, drop_first=True) # onehotencode categorical column 507 | X = X.drop(var, axis=1) 508 | X = X.join(one_hot) 509 | 510 | X, y = X.to_numpy(), y.to_numpy() 511 | 512 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 513 | 514 | X_scaler = StandardScaler() 515 | X_train = X_scaler.fit_transform(X_train) 516 | X_test = X_scaler.transform(X_test) 517 | 518 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.tensor(y_train, dtype=torch.long)) 519 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.tensor(y_test, dtype=torch.long)) 520 | 521 | loaders = { 522 | 'train': train_dataset, 523 | 524 | 'test': DataLoader(test_dataset, 525 | batch_size=batch_size, 526 | shuffle=False, 527 | num_workers=1) 528 | } 529 | return loaders 530 | 531 | def load_soybean(seed, train_prop=0.8, batch_size=64): 532 | data0 = pd.read_csv(get_path() + 'soybean-large.data', header=None) 533 | data1 = pd.read_csv(get_path() + 'soybean-large.test', header=None) 534 | data = pd.concat([data0, data1], axis=0) 535 | data.reset_index(inplace=True) 536 | y = data[0].rank(method='dense', ascending=False).astype(int) - 1 537 | X = data.drop(0, axis=1) 538 | for var in X.columns: 539 | one_hot = pd.get_dummies(X[var], prefix=f'dum_{var}', drop_first=True) # onehotencode categorical column 540 | X = X.drop(var, axis=1) 541 | X = X.join(one_hot) 542 | 543 | X, y = X.to_numpy(), y.to_numpy() 544 | 545 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 546 | 547 | X_scaler = StandardScaler() 548 | X_train = X_scaler.fit_transform(X_train) 549 | X_test = X_scaler.transform(X_test) 550 | 551 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.tensor(y_train, dtype=torch.long)) 552 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.tensor(y_test, dtype=torch.long)) 553 | 554 | loaders = { 555 | 'train': train_dataset, 556 | 557 | 'test': DataLoader(test_dataset, 558 | batch_size=batch_size, 559 | shuffle=False, 560 | num_workers=1) 561 | } 562 | return loaders 563 | 564 | def load_australian(seed, train_prop=0.8, batch_size=64): 565 | data = pd.read_csv(get_path() + 'australian.dat', sep=' ', header=None) 566 | data[2] = np.log(data[2] + 1) 567 | data[6] = np.log(data[6] + 1) 568 | data[12] = np.log(data[12] + 1) 569 | data[13] = np.log(data[13] + 1) 570 | y = data[14] 571 | X = data.drop(14, axis=1) 572 | for var in [0,3,4,5,7,8,9,10,11]: 573 | one_hot = pd.get_dummies(X[var], prefix=var, drop_first=True) # onehotencode categorical column 574 | X = X.drop(var, axis=1) 575 | X = X.join(one_hot) 576 | X, y = X.to_numpy(), y.to_numpy() 577 | 578 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 579 | 580 | X_scaler = StandardScaler() 581 | X_train = X_scaler.fit_transform(X_train) 582 | X_test = X_scaler.transform(X_test) 583 | 584 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.tensor(y_train, dtype=torch.long)) 585 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.tensor(y_test, dtype=torch.long)) 586 | 587 | loaders = { 588 | 'train': train_dataset, 589 | 590 | 'test': DataLoader(test_dataset, 591 | batch_size=batch_size, 592 | shuffle=False, 593 | num_workers=1) 594 | } 595 | return loaders 596 | 597 | def load_entrance(seed, train_prop=0.8, batch_size=64): 598 | data_arff = arff.load(open(get_path() + 'CEE_DATA.arff')) 599 | data = pd.DataFrame(data_arff['data']) 600 | y = data[0].rank(method='dense', ascending=False).astype(int) - 1 601 | X = data.drop(0, axis=1) 602 | for var in X.columns: 603 | one_hot = pd.get_dummies(X[var], prefix=var, drop_first=True) # onehotencode categorical column 604 | X = X.drop(var, axis=1) 605 | X = X.join(one_hot) 606 | X, y = X.to_numpy(), y.to_numpy() 607 | 608 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 609 | 610 | X_scaler = StandardScaler() 611 | X_train = X_scaler.fit_transform(X_train) 612 | X_test = X_scaler.transform(X_test) 613 | 614 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.tensor(y_train, dtype=torch.long)) 615 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.tensor(y_test, dtype=torch.long)) 616 | 617 | loaders = { 618 | 'train': train_dataset, 619 | 620 | 'test': DataLoader(test_dataset, 621 | batch_size=batch_size, 622 | shuffle=False, 623 | num_workers=1) 624 | } 625 | return loaders 626 | 627 | def load_thoracic(seed, train_prop=0.8, batch_size=64): 628 | data_arff = arff.load(open(get_path() + 'ThoraricSurgery.arff')) 629 | data = pd.DataFrame(data_arff['data']) 630 | y = data[16] 631 | y = y.replace({'T':1, 'F':0}) 632 | X = data.drop(16, axis=1) 633 | X[2] = np.log(X[2] + 1) 634 | for var in [0,3,4,5,6,7,8,9,10,11,12,13,14]: 635 | one_hot = pd.get_dummies(X[var], prefix=var, drop_first=True) # onehotencode categorical column 636 | X = X.drop(var, axis=1) 637 | X = X.join(one_hot) 638 | X, y = X.to_numpy(), y.to_numpy() 639 | 640 | X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed) 641 | 642 | X_scaler = StandardScaler() 643 | X_train = X_scaler.fit_transform(X_train) 644 | X_test = X_scaler.transform(X_test) 645 | 646 | train_dataset = TensorDataset(torch.Tensor(X_train), torch.tensor(y_train, dtype=torch.long)) 647 | test_dataset = TensorDataset(torch.Tensor(X_test), torch.tensor(y_test, dtype=torch.long)) 648 | 649 | loaders = { 650 | 'train': train_dataset, 651 | 652 | 'test': DataLoader(test_dataset, 653 | batch_size=batch_size, 654 | shuffle=False, 655 | num_workers=1) 656 | } 657 | return loaders -------------------------------------------------------------------------------- /TANGOS_quickstart.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "provenance": [] 7 | }, 8 | "kernelspec": { 9 | "name": "python3", 10 | "display_name": "Python 3" 11 | }, 12 | "language_info": { 13 | "name": "python" 14 | }, 15 | "accelerator": "GPU", 16 | "gpuClass": "standard" 17 | }, 18 | "cells": [ 19 | { 20 | "cell_type": "markdown", 21 | "source": [ 22 | "[![open in colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/alanjeffares/TANGOS/blob/main/TANGOS_quickstart.ipynb)" 23 | ], 24 | "metadata": { 25 | "id": "8mgNk6CZwB2r" 26 | } 27 | }, 28 | { 29 | "cell_type": "markdown", 30 | "source": [ 31 | "# TANGOS quickstart guide\n", 32 | "This script provides a simple example of applying TANGOS as a drop in regularizer in a standard pytorch workflow. We begin by defining a dataloader and a simple MLP architecture before providing a straightforward function for calculating the two TANGOS loss terms - specialization loss and orthogonalization loss. We then provide an example of this loss being applied to train a model in a standard training loop." 33 | ], 34 | "metadata": { 35 | "id": "zorOJBNUG7Cr" 36 | } 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": 1, 41 | "metadata": { 42 | "id": "2FGTU5LWGYXR" 43 | }, 44 | "outputs": [], 45 | "source": [ 46 | "# !pip install functorch\n", 47 | "from functorch import jacrev\n", 48 | "from functorch import vmap\n", 49 | "import pandas as pd\n", 50 | "from sklearn.model_selection import train_test_split\n", 51 | "from sklearn.preprocessing import StandardScaler\n", 52 | "from torch.utils.data import DataLoader, TensorDataset\n", 53 | "import torch\n", 54 | "import torch.nn as nn\n", 55 | "from torch import optim\n", 56 | "import numpy as np\n", 57 | "import matplotlib.pyplot as plt\n", 58 | "from typing import Callable" 59 | ] 60 | }, 61 | { 62 | "cell_type": "code", 63 | "source": [ 64 | "# download a dataset from the UCI repository\n", 65 | "!wget https://archive.ics.uci.edu/ml/machine-learning-databases/00510/Grisoni_et_al_2016_EnvInt88.csv" 66 | ], 67 | "metadata": { 68 | "id": "uJ3SBBJmGkPX" 69 | }, 70 | "execution_count": null, 71 | "outputs": [] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "source": [], 76 | "metadata": { 77 | "id": "ZM3mM_rxGw7E" 78 | }, 79 | "execution_count": 2, 80 | "outputs": [] 81 | }, 82 | { 83 | "cell_type": "markdown", 84 | "source": [ 85 | "## First define a simple data loader " 86 | ], 87 | "metadata": { 88 | "id": "1MJLYMjnGyFK" 89 | } 90 | }, 91 | { 92 | "cell_type": "code", 93 | "source": [ 94 | "# a data loader for the data\n", 95 | "def load_bioconcentration(seed, train_prop=0.8, batch_size=64):\n", 96 | " \"\"\"Returns dataloaders for the bioconcentration dataset\"\"\"\n", 97 | " data = pd.read_csv('Grisoni_et_al_2016_EnvInt88.csv', sep=',')\n", 98 | "\n", 99 | " # apply onehotencoding where appropriate\n", 100 | " X = data[['nHM', 'piPC09', 'PCD', 'X2Av', 'MLOGP', 'ON1V', 'N-072', 'B02[C-N]', 'F04[C-O]']]\n", 101 | " for var in ['nHM', 'N-072', 'B02[C-N]', 'F04[C-O]']:\n", 102 | " one_hot = pd.get_dummies(X[var], prefix=var) # onehotencode categorical columns\n", 103 | " X = X.drop(var, axis=1)\n", 104 | " X = X.join(one_hot)\n", 105 | "\n", 106 | " y = data['logBCF']\n", 107 | " X, y = X.to_numpy(), y.to_numpy()\n", 108 | "\n", 109 | " # split data\n", 110 | " X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_prop, random_state=seed)\n", 111 | "\n", 112 | " # rescale data\n", 113 | " X_scaler = StandardScaler()\n", 114 | " X_train = X_scaler.fit_transform(X_train)\n", 115 | " X_test = X_scaler.transform(X_test)\n", 116 | " y_scaler = StandardScaler()\n", 117 | " y_train = y_scaler.fit_transform(y_train.reshape(-1,1)).reshape(-1)\n", 118 | " y_test = y_scaler.transform(y_test.reshape(-1,1)).reshape(-1)\n", 119 | "\n", 120 | " train_dataset = TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train))\n", 121 | " test_dataset = TensorDataset(torch.Tensor(X_test), torch.Tensor(y_test))\n", 122 | "\n", 123 | " loaders = {\n", 124 | " 'train': DataLoader(train_dataset,\n", 125 | " batch_size=batch_size,\n", 126 | " shuffle=True,\n", 127 | " num_workers=1),\n", 128 | "\n", 129 | " 'test': DataLoader(test_dataset,\n", 130 | " batch_size=batch_size,\n", 131 | " shuffle=False,\n", 132 | " num_workers=1)\n", 133 | " }\n", 134 | " return loaders\n", 135 | " \n" 136 | ], 137 | "metadata": { 138 | "id": "ACmrnDmbGxUN" 139 | }, 140 | "execution_count": 3, 141 | "outputs": [] 142 | }, 143 | { 144 | "cell_type": "markdown", 145 | "source": [ 146 | "# Next design a simple MLP architecture" 147 | ], 148 | "metadata": { 149 | "id": "b5nbQ-5ZdV7y" 150 | } 151 | }, 152 | { 153 | "cell_type": "code", 154 | "source": [ 155 | "class SimpleMLP(nn.Module):\n", 156 | " def __init__(self, num_features):\n", 157 | " super(SimpleMLP, self).__init__()\n", 158 | " d = num_features + 1\n", 159 | " num_outputs = 1\n", 160 | " self.fc1 = nn.Linear(num_features, d)\n", 161 | " self.bn1 = nn.BatchNorm1d(d)\n", 162 | " self.relu1 = nn.ReLU(inplace=False)\n", 163 | " self.fc2 = nn.Linear(d, d)\n", 164 | " self.bn2 = nn.BatchNorm1d(d)\n", 165 | " self.relu2 = nn.ReLU(inplace=False)\n", 166 | " self.fc3 = nn.Linear(d, num_outputs)\n", 167 | "\n", 168 | " def forward(self, x):\n", 169 | " out = self.fc1(x)\n", 170 | " out = self.relu1(out)\n", 171 | " out = self.fc2(out)\n", 172 | " h_output = self.relu2(out)\n", 173 | " out = self.fc3(h_output)\n", 174 | " return out, h_output # note that we ensure the model outputs both predictions and a latent representation" 175 | ], 176 | "metadata": { 177 | "id": "aFjcO1xHHhvM" 178 | }, 179 | "execution_count": 4, 180 | "outputs": [] 181 | }, 182 | { 183 | "cell_type": "markdown", 184 | "source": [ 185 | "# Finally, we define a drop in function that calculates the TANGOS loss - outputting both the specialization and the orthogonalization components." 186 | ], 187 | "metadata": { 188 | "id": "S29uMPHFdkLR" 189 | } 190 | }, 191 | { 192 | "cell_type": "code", 193 | "source": [ 194 | "def TANGOS_loss(forward_func: Callable, data_input: torch.tensor, \n", 195 | " subsample: int = 50, device: str ='cpu'):\n", 196 | " \"\"\"\n", 197 | " A drop in function for calculating the TANGOS regularization loss. The loss\n", 198 | " consists of two components (specialization and orthogonalization) which are\n", 199 | " described in more detail in the main paper.\n", 200 | "\n", 201 | " Args:\n", 202 | " forward_func (Callable): The forward function from a pytorch model with \n", 203 | " an output tuple consisting of (_, latent_representation).\n", 204 | " data_input (torch.tensor): A batch of data.\n", 205 | " subsample (int): Number of pairs to subsample for the orthogonalization\n", 206 | " component.\n", 207 | " device (str): Indicating what device to run on.\n", 208 | "\n", 209 | " Returns:\n", 210 | " tuple containing the specialization loss and the orthogonalization loss\n", 211 | " both in torch tensor format.\n", 212 | " \"\"\"\n", 213 | "\n", 214 | " batch_size = data_input.shape[0]\n", 215 | " def wrapper(input_):\n", 216 | " \"\"\"A simple wrapper required by functools\"\"\"\n", 217 | " _, h_out = forward_func(input_)\n", 218 | " return h_out\n", 219 | " data_input = data_input.clone().requires_grad_(True)\n", 220 | " jacobian = vmap(jacrev(wrapper))(data_input)\n", 221 | " neuron_attr = jacobian.swapaxes(0,1)\n", 222 | " h_dim = neuron_attr.shape[0]\n", 223 | " \n", 224 | " if len(neuron_attr.shape) > 3:\n", 225 | " # h_dim x batch_size x features\n", 226 | " neuron_attr = neuron_attr.flatten(start_dim=2)\n", 227 | "\n", 228 | " # calculate specialization loss component\n", 229 | " spec_loss = torch.norm(neuron_attr, p=1)/(batch_size*h_dim*neuron_attr.shape[2])\n", 230 | "\n", 231 | " cos = nn.CosineSimilarity(dim=1, eps=1e-6) \n", 232 | " orth_loss = torch.tensor(0., requires_grad=True).to(device)\n", 233 | " \n", 234 | " # apply subsampling routine for orthogonalization loss\n", 235 | " if subsample > 0 and subsample < h_dim*(h_dim-1)/2:\n", 236 | " tensor_pairs = [list(np.random.choice(h_dim, size=(2), replace=False)) for i in range(subsample)]\n", 237 | " for tensor_pair in tensor_pairs:\n", 238 | " pairwise_corr = cos(neuron_attr[tensor_pair[0], :, :], \n", 239 | " neuron_attr[tensor_pair[1], :, :]).norm(p=1)\n", 240 | " orth_loss = orth_loss + pairwise_corr\n", 241 | "\n", 242 | " orth_loss = orth_loss/(batch_size*subsample)\n", 243 | "\n", 244 | " else:\n", 245 | " for neuron_i in range(1, h_dim):\n", 246 | " for neuron_j in range(0, neuron_i):\n", 247 | " pairwise_corr = cos(neuron_attr[neuron_i, :, :],\n", 248 | " neuron_attr[neuron_j, :, :]).norm(p=1)\n", 249 | " orth_loss = orth_loss + pairwise_corr\n", 250 | " num_pairs = h_dim*(h_dim-1)/2\n", 251 | " orth_loss = orth_loss/(batch_size*num_pairs)\n", 252 | "\n", 253 | " return spec_loss, orth_loss\n" 254 | ], 255 | "metadata": { 256 | "id": "8FEPP5KKH9bt" 257 | }, 258 | "execution_count": 5, 259 | "outputs": [] 260 | }, 261 | { 262 | "cell_type": "code", 263 | "source": [], 264 | "metadata": { 265 | "id": "S5Yr13ebH3p-" 266 | }, 267 | "execution_count": 5, 268 | "outputs": [] 269 | }, 270 | { 271 | "cell_type": "markdown", 272 | "source": [ 273 | "# Train a model with TANGOS regularization and another with L2 regularization. " 274 | ], 275 | "metadata": { 276 | "id": "Rr1EkGx4okOr" 277 | } 278 | }, 279 | { 280 | "cell_type": "code", 281 | "source": [ 282 | "# set seed for reproducablility\n", 283 | "torch.manual_seed(0)\n", 284 | "torch.cuda.manual_seed(0)\n", 285 | "np.random.seed(0)\n", 286 | "\n", 287 | "loss_func = nn.MSELoss()\n", 288 | "data = load_bioconcentration(0)\n", 289 | "train_loader = data['train']\n", 290 | "val_loader = data['test']\n", 291 | "\n", 292 | "lambda_1, lambda_2 = 100, 0.1\n", 293 | "lr = 0.001\n", 294 | "device = 'cuda'\n", 295 | "n_epochs = 100\n", 296 | "\n", 297 | "# instantiate models and optimimizers \n", 298 | "TANGOS_model = SimpleMLP(num_features=45).to(device)\n", 299 | "L2_model = SimpleMLP(num_features=45).to(device)\n", 300 | "TANGOS_optimiser = optim.Adam(TANGOS_model.parameters(), lr=lr, weight_decay=0)\n", 301 | "L2_optimiser = optim.Adam(L2_model.parameters(), lr=lr, weight_decay=0.1)\n", 302 | "\n", 303 | "TANGOS_train_loss_ls = []; L2_train_loss_ls = []\n", 304 | "TANGOS_val_loss_ls = []; L2_val_loss_ls = []\n", 305 | "\n", 306 | "for epoch in range(n_epochs):\n", 307 | " TANGOS_running_loss = 0; L2_running_loss = 0\n", 308 | " # training epoch\n", 309 | " for data, label in train_loader:\n", 310 | " TANGOS_model.train(); L2_model.train()\n", 311 | " data, label = data.to(device), label.to(device)\n", 312 | " TANGOS_optimiser.zero_grad(); L2_optimiser.zero_grad()\n", 313 | "\n", 314 | " # forward and backward pass for TANGOS model\n", 315 | " TANGOS_output, _ = TANGOS_model(data)\n", 316 | " MSE_loss = loss_func(TANGOS_output.squeeze(), label)\n", 317 | "\n", 318 | " spec_loss, orth_loss = TANGOS_loss(TANGOS_model, data, subsample=50,\n", 319 | " device=device) # calculate TANGOS loss\n", 320 | " TANGOS_reg_loss = lambda_1 * spec_loss + lambda_2 * orth_loss # weight the two terms\n", 321 | " TANGOS_loss_val = MSE_loss + TANGOS_reg_loss # add TANGOS loss to MSE loss\n", 322 | "\n", 323 | " TANGOS_running_loss += MSE_loss.item()\n", 324 | " TANGOS_loss_val.backward()\n", 325 | " TANGOS_optimiser.step()\n", 326 | "\n", 327 | " # forward and backward pass for L2 model\n", 328 | " L2_output, _ = L2_model(data)\n", 329 | " MSE_loss = loss_func(L2_output.squeeze(), label)\n", 330 | "\n", 331 | " L2_running_loss += MSE_loss.item()\n", 332 | " MSE_loss.backward()\n", 333 | " L2_optimiser.step()\n", 334 | "\n", 335 | " TANGOS_train_loss_ls.append(TANGOS_running_loss)\n", 336 | " L2_train_loss_ls.append(L2_running_loss)\n", 337 | "\n", 338 | " TANGOS_running_val_loss = 0; L2_running_val_loss = 0\n", 339 | " # validation epoch\n", 340 | " for data, label in val_loader:\n", 341 | " TANGOS_model.eval(); L2_model.eval()\n", 342 | " data, label = data.to(device), label.to(device)\n", 343 | "\n", 344 | " # evaluate TANGOS model\n", 345 | " TANGOS_output, _ = TANGOS_model(data)\n", 346 | " TANGOS_reg_loss = loss_func(TANGOS_output.squeeze(), label)\n", 347 | " TANGOS_running_val_loss += TANGOS_reg_loss.item()\n", 348 | "\n", 349 | " # evaluate l2 model\n", 350 | " L2_output, _ = L2_model(data)\n", 351 | " L2_loss = loss_func(L2_output.squeeze(), label)\n", 352 | " L2_running_val_loss += L2_loss.item()\n", 353 | "\n", 354 | " TANGOS_val_loss_ls.append(TANGOS_running_val_loss)\n", 355 | " L2_val_loss_ls.append(L2_running_val_loss)" 356 | ], 357 | "metadata": { 358 | "id": "P-Ky9Km1H3sQ" 359 | }, 360 | "execution_count": 6, 361 | "outputs": [] 362 | }, 363 | { 364 | "cell_type": "code", 365 | "source": [], 366 | "metadata": { 367 | "id": "tZM6XeeHK4f-" 368 | }, 369 | "execution_count": 6, 370 | "outputs": [] 371 | }, 372 | { 373 | "cell_type": "markdown", 374 | "source": [ 375 | "# Plot the training and validation loss plots" 376 | ], 377 | "metadata": { 378 | "id": "Yu_PgrtZow1w" 379 | } 380 | }, 381 | { 382 | "cell_type": "code", 383 | "source": [ 384 | "plt.figure(figsize=(15,4))\n", 385 | "\n", 386 | "plt.subplot(1, 2, 1)\n", 387 | "plt.title('Train Loss')\n", 388 | "plt.plot(TANGOS_train_loss_ls, label='TANGOS')\n", 389 | "plt.plot(L2_train_loss_ls, label='L2')\n", 390 | "plt.xlabel('Epochs')\n", 391 | "plt.ylabel('Loss')\n", 392 | "plt.ylim(1.5,6)\n", 393 | "plt.legend()\n", 394 | "\n", 395 | "plt.subplot(1, 2, 2)\n", 396 | "plt.title('Val Loss')\n", 397 | "plt.plot(TANGOS_val_loss_ls, label='TANGOS')\n", 398 | "plt.plot(L2_val_loss_ls, label='L2')\n", 399 | "plt.legend()\n", 400 | "plt.xlabel('Epochs')\n", 401 | "plt.ylabel('Loss')\n", 402 | "plt.ylim(.6,1.8)\n", 403 | "plt.show()" 404 | ], 405 | "metadata": { 406 | "colab": { 407 | "base_uri": "https://localhost:8080/", 408 | "height": 295 409 | }, 410 | "id": "PubGcagQJj5_", 411 | "outputId": "9e253623-9b5b-4cb9-88f3-759dfe2972a5" 412 | }, 413 | "execution_count": 7, 414 | "outputs": [ 415 | { 416 | "output_type": "display_data", 417 | "data": { 418 | "text/plain": [ 419 | "
" 420 | ], 421 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAA3gAAAEWCAYAAAA0DzVNAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOzdeXzcVb3/8ddnklmyTJI2SRe676UrSy1VuECprGJRWWRRQFFEuahsKl5/CCjX5aICFwERkNWK1oVeRDZllTWF0oWWUkpL0y1JtyTNnpzfH99JyDKTZpuZTPJ+Ph55TOb7PTP5pGhPP/M553PMOYeIiIiIiIikPl+yAxAREREREZG+oQRPRERERERkgFCCJyIiIiIiMkAowRMRERERERkglOCJiIiIiIgMEErwREREREREBggleCJJZGb/MLMLkh2HiIhIf2VmzswmJzsOkVShBE+km8ysstVXk5lVt3p+Xnfeyzl3snPu/h7GscnMPtmT14qIiCSKmT1hZjdEuX6ame0ws/RevPdzZvaV3kUoMrAowRPpJudcdvMX8CHw6VbXHm4e15sJS0REZAC5H/iCmVm7618EHnbONSQhJpEBSwmeSB8xs2PNrNjMvmtmO4DfmdkQM3vMzErNbE/k+9GtXtPyyaOZXWhmL5nZTZGxH5jZyT2II2hmN5vZtsjXzWYWjNwriMSw18x2m9mLZuaL3PuumW01swoze9fMFvXRH42IiAxufwPygf9ovmBmQ4BTgQfMbL6ZvRKZm7ab2W1mFujNDzQzn5n9wMw2m1mJmT1gZrmReyEze8jMdkV+5htmNjxy70Iz2xiZCz/o7sockf5ACZ5I3xoBDAXGARfj/X/sd5HnY4Fq4LZOXn8E8C5QAPwcuCfKJ54H8l/AAuAQYC4wH/hB5N6VQDFQCAwHvg84M5sG/CfwMedcGDgR2NTNnysiItKBc64a+CNwfqvLZwHrnHNvA43A5Xhz38eBRcA3evljL4x8LQQmAtl8NP9eAOQCY/ASz0uAajPLAm4FTo7MhZ8AVvQyDpGEU4In0reagB8652qdc9XOuV3OuT8756qccxXAjcAxnbx+s3Put865RrwlLSPxErHuOA+4wTlX4pwrBa7HWwYDUB95z3HOuXrn3IvOOYc3uQaBGWbmd85tcs69382fKyIiEsv9wBlmFoo8Pz9yDefccufcq865BufcJuA3dD5XdsV5wC+dcxudc5XANcDZke0T9XiJ3WTnXGPk55dHXtcEzDKzDOfcdufcml7GIZJwSvBE+lapc66m+YmZZZrZbyJLRMqBF4A8M0uL8fodzd8456oi32Z3M4aDgM2tnm+OXAP4H2AD8FRkCcr3Ij9rA/Bt4DqgxMz+YGYHISIi0geccy8BZcBnzGwS3uqS3wOY2dTI9oEdkbnyv/Gqeb0RbS5Mx/vQ9EHgSeAPka0MP498uLkf+DxeRW+7mf3dzKb3Mg6RhFOCJ9K3XLvnVwLTgCOccznA0ZHr3V122R3b8JaENhsbuYZzrsI5d6VzbiKwGLiiea+dc+73zrmjIq91wM/iGKOIiAw+D+BV7r4APOmc2xm5fgewDpgSmSu/T+/nyWhzYQOwM7KC5Xrn3Ay8ZZinRuLCOfekc+54vNUu64Df9jIOkYRTgicSX2G8fXd7zWwo8MM+fn9/ZLN481c6sAT4gZkVmlkBcC3wEICZnWpmkyP7+vbhLc1sMrNpZnZcpBlLTSTmpj6OVUREBrcHgE8CXyWyPDMiDJQDlZGK2de7+b7p7eZCP95ceLmZTTCzbLyq4CPOuQYzW2hmsyOracrxlmw2mdnwyNENWUAtUInmQklBSvBE4utmIANvWcqrwBN9/P6P4yVjzV/XAT8GioCVwCrgzcg1gCnAM3iT1ivA7c65Z/H23/00EucOYBjefgUREZE+Edlf9zKQBSxrdesq4FygAq9i9kg33/oO2s6FvwPuxVuK+QLwAd6Hl5dFxo8AluIld2uB5yNjfcAVeNW/3Xj7ALubbIoknXn9FURERERERCTVqYInIiIiIiIyQMQ1wTOzPDNbambrzGytmX283X0zs1vNbIOZrTSzw+IZj4iISH9gZvdGDl9eHeN+rpn9n5m9bWZrzOxLiY5RRERSU7wreLcATzjnpuMduLy23f2T8fYETcE7FPqOOMcjIiLSH9wHnNTJ/UuBd5xzc4FjgV+YWSABcYmISIqLW4JnZrl4LeHvAXDO1Tnn9rYbdhrwgPO8inc+2Mh4xSQiItIfOOdewGviEHMIEI50vM2OjG1IRGwiIpLa0uP43hOAUuB3ZjYXWA58K3KIZLNRwJZWz4sj17a3fiMzuxivwkdWVtbh06cn4czJ2nLY9T4UTIVAFuDNvqu37mNYOMjwnFDiYxIRGeCWL19e5pwrTHYcSXAbXpfBbXgt5D/vnIvarj1pc+TONRAMQ95YACpqGti0az+TCrPJDKQlJgYRkUGqs/kxngleOnAYcJlz7jUzuwX4HvD/uvtGzrm7gLsA5s2b54qKivo00C4pLoK7F8G5t8DUE1ouz77uSU4/bDTXLZ6Z+JhERAY4M9uc7BiS5ERgBXAcMAl42sxedM6Vtx+YtDnytvkwbDqc9QAAyzfv5vQ7XuGOL8/nmKmDMScXEUmczubHeO7BKwaKnXOvRZ4vxUv4WtsKjGn1fHTkWv8TyvMea/a1uTwkM8DeqrokBCQiIgPYl4C/RLYwbMA7xysJy1c6EQxDbUXL03DID0BFTX2yIhIREeKY4DnndgBbzGxa5NIi4J12w5YB50e6aS4A9jnnttMfhXK9x5q22wjzMv3sqdJkJiIifepDvHkTMxsOTAM2JjWi9toleDmRBK+8WlsFRUSSKZ5LNAEuAx6OdP7aCHzJzC4BcM7dCTwOnAJsAKrwPrHsn0I53mOHBE8VPBER6R4zW4LXHbPAzIqBHwJ+aJkffwTcZ2arAAO+65wrS1K40QWzoWJHy9NwyPsnhSp4IiLJFdcEzzm3ApjX7vKdre47vFbQ/V96ENIzoizR9LN51/4YLxKRwai+vp7i4mJqamqSHUrKCIVCjB49Gr/fn+xQEsI5d84B7m8DTuhsTNIFc9pU8DIDaaT5jIoaVfBEJDbNkd3Tk/kx3hW8gSUjr0OCl5fhZ89+VfBE5CPFxcWEw2HGjx+P1+VeOuOcY9euXRQXFzNhwoRkhyNd1W6JppkRDqVTrgqeiHRCc2TX9XR+jPdB5wNLKBeqOy7RLK9poKExavdqERmEampqyM/P18TVRWZGfn6+Ps1NNcEw1FWAcy2XwqF0VfBEpFOaI7uup/OjErzuCOVGXaIJUK4JTURa0cTVPfrzSkGBbHBNUF/Vcikc9GsPnogckP7O77qe/FkpweuOKAleXmYAgD1qtCIiIoNJMOw9tu6kmZGuLpoiIkmmBK87oiZ4XgVPnTRFpL/YtWsXhxxyCIcccggjRoxg1KhRLc9LSkrw+/3ceeedbV4zfvx4Tj/99JbnS5cu5cILL2x5/sQTTzB//nymT5/OIYccwuc//3k+/PBDwNsj8OMf/5gpU6YwdepUFi5cyJo1a1pee++99zJ79mzmzJnDrFmzePTRR+P7ByCJEYx0l253Fp724IlIfzYY5kg1WemOUF6HYxKGRCp4e3UWnoj0E/n5+axYsQKA6667juzsbK666ioA7rjjDhYsWMCSJUu45JJL2rxu+fLlvPPOO8yYMaPN9dWrV3PZZZexbNkyDj74YACWLVvGpk2bGDt2LL/+9a95+eWXefvtt8nMzOSpp55i8eLFrFmzhrKyMm688UbefPNNcnNzqayspLS0NAF/ChJ3LRW88pZL2oMnIv3dYJgjVcHrjuYKXqsN5c0VPB12LiKpYMmSJfziF79g69atFBcXt7l35ZVXcuONN3Z4zc9+9jO+//3vt0xcAIsXL+boo49uuX/bbbeRmZkJwAknnMAnPvEJHn74YUpKSgiHw2RnZwOQnZ2tTpkDRdD7b0ptZculHFXwRCSFDZQ5UhW87gjlehvK6ypbPrnMa6ngaYmmiHR0/f+t4Z1t5Qce2A0zDsrhh5+e2e3Xbdmyhe3btzN//nzOOussHnnkEa688sqW+2eddRa33347GzZsaPO6NWvWtHy62V55eTn79+9n4sSJba7PmzePNWvWcOGFFzJ8+HAmTJjAokWL+NznPsenP/3pbscu/VC0PXihdCprG2hqcvh8aqIgIp3THBmfOVIVvO4I5XqPrfbh5YTSSfOZlmiKSL/3yCOPcNZZZwFw9tlns2TJkjb309LSuPrqq/nJT34S8z2a9y5MnTqVm2666YA/My0tjSeeeIKlS5cydepULr/8cq677rpe/R7ST0RJ8MIhP87B/jot0xSR1DKQ5khV8LojI897rNkHuaMBr3VpboZfXTRFJKqefIoYL0uWLGHHjh08/PDDAGzbto333nuPKVOmtIz54he/yE9+8hNmzZrVcm3mzJm8+eabzJ07t2Xvwk033URlZSU5OTlkZWWxcePGNp9QLl++nGOOOQbw/p6cP38+8+fP5/jjj+dLX/qSkryBIEqTlZwM758V5TUNhEP+ZEQlIilEc2R85khV8LqjuYLX4bBzvyp4ItKvrV+/nsrKSrZu3cqmTZvYtGkT11xzTYdPKP1+P5dffjm/+tWvWq595zvf4cYbb2Tt2rUt16qqPjr77Oqrr+ab3/wm1dXVADzzzDO89NJLnHvuuWzbto0333yzZeyKFSsYN25cvH5NSaRAZA9eXdsKHqCz8EQkpQy0OVIVvO6IskQTIC/Dz95qVfBEpP9asmQJn/3sZ9tcO/300/n85z/Ptdde2+b6RRddxI9//OOW57Nnz+aWW27h/PPPp7y8nIKCAsaOHcv1118PwGWXXcaePXuYPXs2aWlpjBgxgkcffZSMjAxKSkq46qqr2LZtG6FQiMLCwg7tpyVFpQfB52+3RNP7Z4U6aYpIKhloc6S5Vh0hU8G8efNcUVFRcn747o1w66HwmTvhkHNaLl903xts31fD49/6j+TEJSL9ytq1a9t005KuifbnZmbLnXPzkhRSykn4HPmzCTDrc/CpXwDw9pa9nPbrf3PPBfNYdPDwxMUhIilDc2T3dXd+1BLN7gi12oPXSl5mQF00RURk8AmGVcETEelnlOB1R/OG8pooe/Cqtd9AREQGmWC4zTl42oMnIpJ8cU3wzGyTma0ysxVm1mHNiJkda2b7IvdXmNm10d6n30hLh0C4QwVvSKafqrpGahsakxSYiIhIEgTDUPvRGVbNFbxyVfBERJImEU1WFjrnyjq5/6Jz7tQExNE3QrlRl2gC7K2qZ3hOWjKiEhERSbxgGCpLWp6G/GkE0n2Uq4InIpI0WqLZXVETPG9Jis7CExGRQaXdHjyAnFC69uCJiCRRvBM8BzxlZsvN7OIYYz5uZm+b2T/MrP+cdhhLRl6Hc/CGZnkVvN2VSvBERGQQCWRDXWWbSzkhP+Xaly4ikjTxTvCOcs4dBpwMXGpmR7e7/yYwzjk3F/hf4G/R3sTMLjazIjMrKi0tjW/EBxKlgleYHQSgtLI2GRGJiHSQnZ3d4dovf/lLZsyYwZw5c1i0aBGbN29OQmQyoESp4IVVwRORfm6gz5FxTfCcc1sjjyXAX4H57e6XO+cqI98/DvjNrCDK+9zlnJvnnJtXWFgYz5APLEqCVxBJ8MpUwRORfuzQQw+lqKiIlStXcsYZZ/Cd73wn2SFJqgvmQH0VNH6U0IVDfnXRFJGUM5DmyLgleGaWZWbh5u+BE4DV7caMMDOLfD8/Es+ueMXUJ6IkeLkZftJ9RpkqeCLSjy1cuJDMzEwAFixYQHFxcZIjkpQXDHuPdR9V8XIy0tVFU0RSzkCaI+PZRXM48NdI/pYO/N4594SZXQLgnLsTOAP4upk1ANXA2c45F8eYei+U57WEbmoCn5cf+3xGfnaAsgoleCLSzj++BztW9e17jpgNJ/+0V29xzz33cPLJJ/dRQDJoNSd4tZWQMQSAcFAVPBHpIs2RcRG3BM85txGYG+X6na2+vw24LV4xxEUoF3BQu69lMgNvmaYqeCKSCh566CGKiop4/vnnkx2KpLpgZB9Lq3144VA65dWq4IlIahoIc2QizsEbWEK53mNN2wSvMBzUHjwR6aiXnyL2tWeeeYYbb7yR559/nmAwmOxwJNW1VPBaL9H0U13fSH1jE/40ncYkIp3QHBkX+pu3u1oneK2ogici/d1bb73F1772NZYtW8awYcOSHY4MBMEc77FdBQ+gUvvwRCSFDKQ5UhW87srI8x5jJHjOOSL7DkVEkqaqqorRo0e3PL/iiit4/PHHqays5MwzzwRg7NixLFu2LFkhykAQpclKOOQHoLymniGRc2JFRPqTgT5HKsHrruYKXrvDzguyA9Q3OvZV15OXqQlNRJKrqampw7UrrrgiCZFINGZ2L3AqUOKcmxVjzLHAzYAfKHPOHZO4CLso0HEPXk6kgqez8ESkvxroc6SWaHZXjCWaheHms/C0TFNERA7oPuCkWDfNLA+4HVjsnJsJnJmguLonyh68lgpetTppiogkgxK87upkDx5AaYUarYiISOeccy8AuzsZci7wF+fch5HxJQkJrLuiJHh5mV6Ct08JnohIUijB665AGLCYCZ4qeCIC0N+P9Oxv9OfVwVRgiJk9Z2bLzez8ZAcUlS8N/FlRE7y9SvBEJAb9nd91PfmzUoLXXT6fV8Wr6bgHD5TgiQiEQiF27dqlCayLnHPs2rWLUCiU7FD6k3TgcOBTwInA/zOzqdEGmtnFZlZkZkWlpaWJjNETzG6b4GV48+HeKiV4ItKR5siu6+n8qCYrPRHK7VDBG5IZIM1nSvBEhNGjR1NcXExS/rGdokKhUJuOZkIxsMs5tx/Yb2YvAHOB9e0HOufuAu4CmDdvXuL/xRQMt0nwMgJpBNN97K3SlgUR6UhzZPf0ZH5UgtcTURI8n8/IzwpQpj14IoOe3+9nwoQJyQ5DUtujwG1mlg4EgCOAXyU3pBjaJXjgLdNUBU9EotEcGX9K8HoiSoIHOuxcRES6xsyWAMcCBWZWDPwQ7zgEnHN3OufWmtkTwEqgCbjbObc6WfF2KhiGuso2l/IyAuyt1geeIiLJoASvJzLyoGxDh8sFYSV4IiJyYM65c7ow5n+A/0lAOL0TCMPezW0u5aqCJyKSNGqy0hMxK3gBSiuU4ImIyCASDENteZtLeRl+HZMgIpIkSvB6IpQXNcErzA5SVlmnrkAiIjJ4aA+eiEi/ogSvJ0K5UL8fGttOXgXZQeoamyivaUhSYCIiIgkWDENtJbT6cDMvU3vwRESSRQleT4TyvMeatktSCsI6C09ERAaZYDY01UPDR3NfboafmvomauobkxiYiMjgFNcEz8w2mdkqM1thZkVR7puZ3WpmG8xspZkdFs94+kwo13vscNh5EIAy7cMTEZHBIpjjPbY+7DzTD6B9eCIiSZCICt5C59whzrl5Ue6dDEyJfF0M3JGAeHrvQAlepZaliIjIIBEMe4+tGq0MyfRWtOzRYeciIgmX7CWapwEPOM+rQJ6ZjUxyTAfWkuC1bbTyUYKnCp6IiAwSzQleq7Pw8jK8Cp4arYiIJF68EzwHPGVmy83s4ij3RwFbWj0vjlxrw8wuNrMiMysqLS2NU6jdECPBG5oVwGdK8EREZBAJZHuPrZZo5mYqwRMRSZZ4J3hHOecOw1uKeamZHd2TN3HO3eWcm+ecm1dYWNi3EfZERqTJSnXbJZppPmNolg47FxGRQaRliWbrPXjeEs196qQpIpJwcU3wnHNbI48lwF+B+e2GbAXGtHo+OnKtf4tRwYPmw841oYmIyCARrcmKlmiKiCRN3BI8M8sys3Dz98AJwOp2w5YB50e6aS4A9jnntscrpj7jzwRfevTDzsNBSlXBExGRwSJKBS8zkIY/zdirLpoiIgmXHsf3Hg781cyaf87vnXNPmNklAM65O4HHgVOADUAV8KU4xtN3zLwqXtQKXpCNpfuTEJSIiEgSREnwzIzcjIAqeCIiSRC3BM85txGYG+X6na2+d8Cl8YohrkJ5HY5JAG+JZlllLc45IsmtiIjIwOXPAPO1SfDAOwtPe/BERBIv2cckpK5QbocmK+BV8GobmqisbUhCUCIiIglm5lXxWh2TAN4+PFXwREQSTwleT2XmQ9WuDpd12LmIiAw6wZyoFbw9SvBERBJOCV5PZRVET/DCOuxcREQGmWAYasvbXMrLDLCvSh92iogkmhK8nsrMh/1lHS4XZHtn/5RVKMETEZFBIpDdsYKX4VcXTRGRJFCC11NZBdBQDXVVbS4XqoInIiKDTTAMNe0reH6q6hqpbWhMUlAiIoOTEryeysz3HqvaVvGGZgYwg1LtwRMRkcEilNNhiWZupreiZZ+qeCIiCaUEr6cyC7zHdss009N8DM0MqIInIiKDRyi3YwUvww/APjVaERFJKCV4PZUVSfBidNIsKVeCJyIig0SwYwUvL9NL8LQPT0QksZTg9VTzEs0ojVYOyguxfV91ggMSERFJklAONNRAw0fbE/IyvCWaOgtPRCSxlOD1VMsevI4VvNFDMtmyu6rDdRERkQEpmOs9tqritVTwdFSCiEhCKcHrqVAu+PwdmqwAjB6SQXlNgzaWi4jI4BCKJHg1+1ou5bYkeJoLRUQSSQleT5nFPAtvzNBMALbu0TJNEREZBEI53mOrBC8cTCfNZ+ytVgVPRCSRlOD1RlZBjCWaGQAU79EyTRERGQSCkQSv1RJNM/MOO1cFT0QkoZTg9UZmfsw9eABbVMETEZHBoKWC1/4sPL+6aIqIJJgSvN6IsURzSKafzECaKngiIjI4BDsu0QTvLDydgyciklhxT/DMLM3M3jKzx6Lcu9DMSs1sReTrK/GOp09lFURtsmJmjBmSSbEqeCIiEoWZ3WtmJWa2+gDjPmZmDWZ2RqJi65FQxy6aAHmZAe3BExFJsERU8L4FrO3k/iPOuUMiX3cnIJ6+k1ngfVrZ2PHTydFDMpTgiYhILPcBJ3U2wMzSgJ8BTyUioF4Jhr3Hdks0tQdPRCTx4prgmdlo4FNAaiVuXZXV2Vl4GVqiKSIiUTnnXgB2H2DYZcCfgZL4R9RLvjQIhDtU8HIztURTRCTR4l3Buxn4DtDUyZjTzWylmS01szHRBpjZxWZWZGZFpaWlcQm0RzILvMcYjVYqaho0sYmISLeZ2Sjgs8AdXRjbP+bIUE6UCl6AitoG6hs7+2eAiIj0pbgleGZ2KlDinFveybD/A8Y75+YATwP3RxvknLvLOTfPOTevsLAwDtH2UGakghel0UrzUQlbVMUTEZHuuxn4rnPugJlRv5kjgzlQs7fNpbzIYef71ElTRCRh4lnBOxJYbGabgD8Ax5nZQ60HOOd2OedqI0/vBg6PYzx9L6u5ghf7sHPtwxMRkR6YB/whMoeeAdxuZp9JbkgHEMqN0mTFS/C0D09EJHHiluA5565xzo12zo0Hzgb+5Zz7QusxZjay1dPFdN6Mpf9pXqK5X4edi4hI33HOTXDOjY/MoUuBbzjn/pbksDoXbYlmZgCAfeqkKSKSMOmJ/oFmdgNQ5JxbBnzTzBYDDXibzS9MdDy9kjkUsKh78HIz/GQH01XBExGRDsxsCXAsUGBmxcAPAT+Ac+7OJIbWc8Ec2LWhzaW8DFXwREQSLSEJnnPuOeC5yPfXtrp+DXBNImKIC18aZAyJeRaeOmmKiEg0zrlzujH2wjiG0ndCOR0POtcSTRGRhEvEOXgDW2Z+1CYr4HXSVAVPREQGhWBkiaZzLZfyMrwlmnvVZEVEJGGU4PVWVkHUJZrw0WHnrtVkJyIiMiCFcqGpHhpqWi6FQ+mYwb4q7cETEUkUJXi9lZnfaYJXWdug9tAiIjLwhXK8x1aNVnw+IzfDrwqeiEgCKcHrrayCTpdoAmzZrWWaIiIywAVzvcf2RyVk+LUHT0QkgZTg9VZzBa+p41m0OipBREQGjZYKXttGK7mZAfZoiaaISMIoweutzAJwjVCzt8MtHXYuIiKDRihSwWuX4A3N9LN7vxI8EZFEUYLXW1mRw85jnIUXDqWrgiciIgNfMFLBa7dEsyA7SFllbRICEhEZnJTg9VZmvvcYs9FKJltUwRMRkYEuSpMVgIJwkF2VdTQ1qaO0iEgiKMHrreYKXsxGKzrsXEREBoFg9D14BdlBGpqcOkqLiCSIErzeaqngdZbg6Sw8EREZ4ALZgEVZoukddq5lmiIiiaEEr7cyO6/gjRmSSVVdI3vUIlpERAYyn89bptluiWZhdhCAUiV4IiIJoQSvt/wh71PLqt1Rbzd30ty0a38ioxIREUm8YG7HCl7YS/DKKtVJU0QkEZTg9YXM/JhLNKcOzwZg/Y6KREYkIiKSeFEqeAWRCl5ZhSp4IiKJoASvL2QVdLpEMzOQxjoleCIiMtAFczo0WcnL8JPmM+3BExFJECV4faGTCp7PZ0wdHmbdjvKo90VERAaMUA7Utk3wfD4jPyugBE9EJEHinuCZWZqZvWVmj0W5FzSzR8xsg5m9Zmbj4x1PXGQWwP7o5+ABHDwyzLs7KtRJU0REBrZQboclmtB82Ln24ImIJEIiKnjfAtbGuHcRsMc5Nxn4FfCzBMTT97LyYx50DjBteJg9VfWUaP+BiIgMZMGcDk1WAArDQVXwREQSJK4JnpmNBj4F3B1jyGnA/ZHvlwKLzMziGVNcZBZAQzXURe+UOW2Ed/ir9uGJiMiA1txkpd2KlYLsoJqsiIgkSLwreDcD3wGaYtwfBWwBcM41APuA/PaDzOxiMysys6LS0tJ4xdpzWZ2fhTd9RBiAddu1D09ERAawYA64xg4feBaEA5RV1mmrgohIAsQtwTOzU4ES59zy3r6Xc+4u59w859y8wsLCPoiuj2VFYqosiXp7SFaA4TlB3lUFT0REBrKQt2Kl/TLNwuwgdY1NlNc0JCEoEZHBJZ4VvCOBxWa2CfgDcJyZPdRuzFZgDICZpQO5QOzNbP3V0Ine4+73Yw6ZPiJHSzRFRGRgC+V6j7HOwtM+PBGRuItbguecu8Y5N9o5Nx44G/iXc+4L7YYtAy6IfH9GZEzqrWNu9a8AACAASURBVN/IGweWBmXvxRwyfUSYDSWV1DfGWq0qIiKS4oKRBK9Wh52LiCRLlxI8M8syM1/k+6lmttjM/D35gWZ2g5ktjjy9B8g3sw3AFcD3evKeSZcegCHjYVfsBG/aiDB1jU1sKoveiEVERFJTX86RKa95iWa7w84LwgEAHZUgIpIAXa3gvQCEzGwU8BTwReC+rv4Q59xzzrlTI99f65xbFvm+xjl3pnNusnNuvnNuY/fC70cKpsCuzpdogjppiogMQL2aIweUYIwET0s0RUQSpqsJnjnnqoDPAbc7584EZsYvrBSUP9lL8JqiL8GcNCyLNJ+xboc6aYqIDDCaI5uFoi/RHJIZwGdK8EREEqHLCZ6ZfRw4D/h75FpafEJKUfmTvbPwyouj3g6mpzGxIEudNEVEBh7Nkc1almi2TfDSfMbQLB12LiKSCF1N8L4NXAP81Tm3xswmAs/GL6wUVDDFe+ys0crIHNZuV4InIjLAaI5s5s/0mo7VdlytUpAdoLRCe/BEROKtSwmec+5559xi59zPIhvJy5xz34xzbKklP5LgdboPL8zWvdVU1NQnKCgREYm3nsyRZnavmZWY2eoY988zs5VmtsrMXjazuXEJvq+ZeVW8dnvwAArDquCJiCRCV7to/t7McswsC1gNvGNmV8c3tBSTPQwC4U47aU4fEQZg/U5V8UREBooezpH3ASd1cv8D4Bjn3GzgR8BdfRJsIgRzOizRBK/RihI8EZH46+oSzRnOuXLgM8A/gAl4XcKkmRkUTO50iea0SIKnZZoiIgNKt+dI59wLwO5O7r/snNsTefoqMLqPYo2/UG7MJZpllbWk4nG3IiKppKsJnj9yps9ngGXOuXpAf0O3lz8Fdm2IeXtUXgbhYLoarYiIDCzxniMvwkscozKzi82syMyKSktL+/DH9lAoN2YFr6a+if11jUkISkRk8OhqgvcbYBOQBbxgZuMA9ftvr2AK7NsCdVVRb5sZMw7KYcWWvQkOTERE4ihuc6SZLcRL8L4ba4xz7i7n3Dzn3LzCwsK++LG9E4y+B6/lLLwKLdMUEYmnrjZZudU5N8o5d4rzbAYWxjm21JM/yXvcHfu89iMm5rNm2z7K1WhFRGRAiNccaWZzgLuB05xzu3odaKKEcqIv0QzrsHMRkUToapOVXDP7ZfMSEDP7Bd4nldJaSyfN2PvwFkwcSpODok0xt16IiEgKicccaWZjgb8AX3TOre+TQBMlZpOVAKAET0Qk3rq6RPNeoAI4K/JVDvwuXkGlrOYKXlnsfXiHjR1CIM3HqxuV4ImIDBDdniPNbAnwCjDNzIrN7CIzu8TMLokMuRbIB243sxVmVhS/8PtYc5OVpqY2lwsjSzRLK3UWnohIPKV3cdwk59zprZ5fb2Yr4hFQSgtkQc7oTit4IX8ah4zJ47WNqbPaRkREOtXtOdI5d84B7n8F+EpfBJdwoRzAQV1l5HvP0KwAZtqDJyISb12t4FWb2VHNT8zsSKA6PiGluPxJnXbSBG+Z5qqt+3TguYjIwKA5srVgJKlr12glPc3HkMyAlmiKiMRZVxO8S4Bfm9kmM9sE3AZ8LW5RpbKCKd4SzU7O+TliYn5kH96emGNERCRlaI5srblq18lZeCIiEj9d7aL5tnNuLjAHmOOcOxQ4Lq6Rpar8KVC7D/bHPovosLFD8KcZr36gZZoiIqlOc2Q7oVzvMcZRCWXagyciElddreAB4Jwrd841fyR3RWdjzSxkZq+b2dtmtsbMro8y5kIzK41sIF9hZqm536C1gsneY1nsfXgZAW8fnhqtiIgMHN2ZIwe0zALvcX9Zh1tegqcKnohIPHUrwWvHDnC/Fjgu8qnmIcBJZrYgyrhHnHOHRL7u7kU8/UN+JMHrpNEKwBET8lm9dR+VtQ0JCEpERBLsQHPkwBUe6T1W7OhwqyA7qCYrIiJx1psEL/YmMyBy2Gtl5Kk/8tXpawaE3DGQFuxCo5V8GpuczsMTERmYBv58F0tmPvjSoWJ7h1sF4QD76xqprmtMQmAiIoNDpwmemVWYWXmUrwrgoAO9uZmlRVpFlwBPO+deizLsdDNbaWZLzWxMjPe5uPkA2dLS2Hvb+gVfmlfF2/lOp8MOG5fn7cPTMk0RkZTU2zlywPL5IHtEzAoe6LBzEZF46jTBc86FnXM5Ub7CzrkDnqHnnGt0zh0CjAbmm9msdkP+DxjvnJsDPA3cH+N97nLOzXPOzSssLOzab5ZM4z4BH74CDbEnsMxAOnNG5/GqzsMTEUlJvZ0jB7TwiKgVvObDzksqahIdkYjIoNGbJZpd5pzbCzwLnNTu+i7nXHMWdDdweCLiibvJi6C+Cj58tdNhzefhaR+eiIgMKOHoFbzxBVkAvF+6P9ERiYgMGnFL8Mys0MzyIt9nAMcD69qNGdnq6WJgbbziSajxR3n7D97/V6fDjp02jMYmx9/e2pqgwERERBIgRgVv7NBMMvxprNtekYSgREQGh3hW8EYCz5rZSuANvD14j5nZDWa2ODLmm5EjFN4GvglcGMd4EicYhjEL4P1/djps3rghHDImjzuff5/6xqYEBSciIhJn4RFQsxfqq9tcTvMZU0eEWbej4yHoIiLSN+KW4DnnVjrnDnXOzXHOzXLO3RC5fq1zblnk+2ucczOdc3Odcwudc+s6f9cUMvk42LEKKktiDjEz/nPhZIr3VPPoim0JDE5ERCSOmo9KqNzZ4dbBI8Ks3V6Oc4O30aiISDwlZA/eoDTpOO/x/Wc7Hbbo4GFMHxHm9uc20NikyU5ERAaA8AjvMco+vOkjwuypqqdU5+GJiMSFErx4GTHXOwvoAMs0zYxLF05mY+l+nljdcSIUERFJOS2HnXfchzd9ZA4Aa3doH56ISDwowYsXnw8mLvQqeE2d7687ZfZIJhZkcduzG7RkRUREUl9Lghe9ggewbrv24YmIxIMSvHiavAj2l8DO1Z0OS/MZXz92Emu3l/Psu7H37ImIiKSEjCGQFohawcvLDDAyN8Q6VfBEROJCCV48TVzoPR7guASAzxw6ilF5Gdz8zHuq4omISGozi3kWHnhVvLWq4ImIxIUSvHjKGQnDZh5wHx6AP83Htz85hZXF+/iH9uKJiEiqC4+MWsEDbx/e+6WVOiJIRCQOlODF26SF8OGrULf/gEM/d9hopgzL5qYn36VBk56IiKSy7OGdVvDqGx0bSw88N4qISPcowYu3aSdDYx2s+tMBh6b5jKtPnMbGsv0sXV6cgOBERETiJDyykwTP66SpA89FRPqeErx4G3ckHHQovPQraGw44PDjZwzn0LF53PzMe9TUNyYgQBERkTgIj4Da8qgrWCYWZuFPM9ZuV6MVEZG+pgQv3szg6KthzyZY/ecuDDe+e9J0dpTXcP/Lm+IenoiISFx0clSCP83H5GFhVfBEROJACV4iTD3Za7by4k0HPBMPYMHEfI6dVsjtz73P3qq6BAQoIiLSx8IjvMcYyzQPHhFmnSp4IiJ9TgleIvh8cPSVULYe1i7r0ku+d/J0Kmsb+PHf18Y5OBERkThoqeDF6qQZZkd5DXv264NMEZG+pAQvUWZ8BvIne1W8LpxzN31EDpccM5Gly4t5YX1pAgIUERHpQweo4H3UaEVVPBGRvqQEL1F8aXDUFbBjFbz3VJdectlxU5hYmMU1f1nF/toDN2gRERHpN0K5kJ7RaQUP1ElTRKSvKcFLpDlnQd5YePpaqK854PCQP42fnz6Hbfuq+Z8n301AgCIiIn3EzKvixajgFWYHyc8KaB+eiEgfi1uCZ2YhM3vdzN42szVmdn2UMUEze8TMNpjZa2Y2Pl7x9Atpfjj1V1C6Dv55Q5deMm/8UC74+Hjuf2UTRZt2xzc+ERFJCDO718xKzGx1jPtmZrdG5seVZnZYomPsE52chWdmHDwyh3e2q4InItKX4lnBqwWOc87NBQ4BTjKzBe3GXATscc5NBn4F/CyO8fQPkz8JH/sqvPpr2Phcl15y9YnTOCg3g4sfXM7qrfviG5+IiCTCfcBJndw/GZgS+boYuCMBMfW98PCYSzQBZo7KYd2OcmobdO6riEhfiVuC5zyVkaf+yFf77iKnAfdHvl8KLDIzi1dM/cbxN3gNV/72Dajee8DhWcF0HvrKEWT40zjnrld5Q5U8EZGU5px7AejsL/PTgAcic+mrQJ6ZjUxMdH0oPBIqd8a8PWdUHvWNjvU7KmOOERGR7onrHjwzSzOzFUAJ8LRz7rV2Q0YBWwCccw3APiA/yvtcbGZFZlZUWjoAOkoGMuFzd3nLVh6/uksvmVCQxZ8u+TiF4SBfvOc1nldnTRGRgaxlfowojlzroF/PkeERUFcJtdH32c0elQvAKq1OERHpM3FN8Jxzjc65Q4DRwHwzm9XD97nLOTfPOTevsLCwb4NMllGHwzHfhVV/hDfu6dJLDsrL4I+XfJyJBdl85f43WL5ZlTwRkcGuX8+RLWfhRd+HN2ZoBrkZfiV4IiJ9KCFdNJ1ze4Fn6bjfYCswBsDM0oFcYFciYuoXjr4KppzoVfE2/LNLLynIDrLkqws4KC+DSx9+i7LK2jgHKSIiSdAyP0aMjlxLLS1n4UXfh2dmzB6Vy6qtB96uICIiXRPPLpqFZpYX+T4DOB5Y127YMuCCyPdnAP9yrgungA8UvjQ44x4YdjD86UIoWdull+Vm+rn9vMPYU1XHt/7wFo1Ng+ePTERkkFgGnB/pprkA2Oeci92tpL86QAUPYNaoXN7dUaFGKyIifSSeFbyRwLNmthJ4A28P3mNmdoOZLY6MuQfIN7MNwBXA9+IYT/8UDMM5f4D0EPz+LKjs2v6JmQfl8qPTZvHvDbu45Zn1cQ5SRET6kpktAV4BpplZsZldZGaXmNklkSGPAxuBDcBvgW8kKdTeOUAFD2DO6FzqGx3v7tB5eCIifSE9Xm/snFsJHBrl+rWtvq8BzoxXDCkjb4yX5N13Ctz/aTj7YcifdMCXnfWxMRRt3s2t/9rA3up60nxGfWMTuRl+Ll04mcxA3P7ziohILzjnzjnAfQdcmqBw4icYhkB2pxW81o1W5ozOS1RkIiIDljKA/mL04XDuH+FPF8BvF8IZ93pn5h3ADafN4sPdVTzyxhYCaT4C6T52V9Xx7o5KfvPFw0nzDfxTJ0REpB8Lj+i0gjd6SAZ5mX6d8yoi0keU4PUnE4+Bi5+DP5wHD58Ji34IR34LOjkaMORP4w8Xf7zNtQde2cS1j67humVruOG0mQyGowVFRKSfyjkI9myOebu50crKYiV4IiJ9ISFdNKUbhoyHi56CGafBMz+Ex6+CpqZuvcX5Hx/P146eyIOvbuY3L2yMT5wiIiJdMXIu7FwNDXUxh8walcv6nRXU1KvRiohIb6mC1x8FsuCM30HeWPj3LVC9Bz5zJ6QHuvwW3z1pOtv21fDTf6xj9dZ9ZAXS8acb2UE/R0wcyoIJ+WQE0uL4S4iIiACj5kHj/8LOVd4ZsFHMGfVRo5W5Y7QPT0SkN5Tg9VdmcPwNkDHUq+TV7IOzHoRAZpde7vMZN505B+ccb324l4amJhoaHRU1Ddz5/PsE0n0cMWEox04bxicPHsa4/Kw4/0IiIjIoNSd1W9+MmeDNatVoRQmeiEjvKMHr7476NmQMgce+DXcdAyf/HCYt7NJLg+lp3HbuYW2u1dQ38voHu3lhfSnPrS/lR4+9w48ee4cpw7JZdPBwjps+jMPG5pGeptW7IiLSB3JHQ9Yw2Loc+GrUIaOHZDBEjVZERPqEErxUcPgF3gT59yvhwc/AwYvhxBu9JZzdFPKncfTUQo6eWsgPgA93VfHM2p38c91O7n5xI3c+/z65GX6OnlrIsHCQfdX1lFfXMzQrwLWfnqGjF0REpHvMvMrd1uWdDDFmqdGKiEif0L/WU8XkRfCNV+GV/4UXfgHvPQ2L/h8ccQn4er6Xbmx+Jl8+agJfPmoC5TX1vPReGf9aV8Lz60upqm0gJ8NPOJTOU+/sJBxK578+NaMPfykRERkURh0O6//hbTcI5UYdMntULne9sJGa+kZCfu0RFxHpKSV4qcQfgqOvhjlne901n/w+vLMMTvs1FEzu9dvnhPycMnskp8we2eHeNX9ZxT0vfcDiuaOYPTr65CwiIhLV6Fb78GJsM5gzOpeGJseabfs4fNzQBAYnIjKwaKNVKsobA+f8AT57F5SugzuPhGd/AlW74/Yjv3fydAqyg3zvLytpaOzesQ0iIjLIHXSo99jJMs2PTywg3Wc8s7YkQUGJiAxMSvBSlRnM/Txc+hpMPRGe/yncPBue+gFU7OjzH5eb4ef6xTNZs62ce176oM/fX0REBrCMIZA/2avgxZCb6WfBxHyeXNP3c5iIyGCiBC/VhUfAWQ/A11+GaSfDK7+Gm+fA05GjFfrQSbNGcMKM4fzy6fU8vmo7/1i1naXLi/n7yu3Uq6onIiKdGXU4bC0C52IOOXHmcDaW7mdDSUUCAxMRGViU4A0Uw2fC6XfDZcth5mfh3zfDrYfC67+F8u2wrxj2bO5Vdc/MuOG0WQTSfXzj4Tf5+sNvctWf3ubS37/JSTe/wLPrSnCdTNwiIjKIjZoHlTuhfFvMIcfPGAHAk2t2JioqEZEBR01WBpqhE+Fzv4EFl8CTP/CasTx+VdsxuWNg/FEw7kiv6pdV0OW3H5Eb4p9XHsOW3dVkBtLICqSzbkc5P/3HOr503xv8x5QCrls8k0mF2X38i4mISEprOfC8CHJHRR0yIjfE3DF5PLlmB5cu7H3zMBGRwUgJ3kB10KFw4WPw/r9gzybwpXvHKdRWwIeveMcsvL0E0kNw2Pnw8f+EIeO69NbDwiGGhUMtz8fmZ3LstGE8+OpmbnlmPZ+69UV++OmZnP2xMZhZnH5BERFJKSNmgc/vNVqZcVrMYSfOHM7Pn3iXbXurOSgvI4EBiogMDHFL8MxsDPAAMBxwwF3OuVvajTkWeBRo7trxF+fcDfGKadAx887Pa2/B1709EDtXw2u/gaLfwRv3eGPrq73lM+XboKke0oKQHoDs4XDCj2HK8VF/VCDdx0VHTeDUOSO54o8ruOYvq3jxvVJ+8tk55Gb64/yLiohIv5cehBGzO220AnDizBH8/Il3efqdnVzwifGJiU1EZACJZwWvAbjSOfemmYWB5Wb2tHPunXbjXnTOnRrHOCQaM2+iPe02OPYarznLe09CZgGMnOst3UwLQGMdNNTABy/Cw2fAoV+EE2+MeVDt8JwQD375CH7zwkZ+8dS7vPz+s8w6KJeJhVlMKMji0LFDmD0qlzSfKnsiIoPOqMO91SNNjd6qkigmFWYzeVg2T67ZoQRPRKQH4pbgOee2A9sj31eY2VpgFNA+wZNkyx0FJ/239xVLfQ089xN4+VZ4/1nvoNqqXbC/1Lt/xCUw83Pg8+HzGV8/dhKfmJTPfS9vYmNpJX99cysVtQ0A5GX6OXJyATNG5rCxdD/rd1awsbSS42cM56enzyHkjz7pA6zYspfNu/azeO5BWv4pIpJqRs+DN34LxW/A2AUxh504czh3Pr+RPfvrGJIVSGCAIiKpzxLR9dDMxgMvALOcc+Wtrh8L/BkoBrYBVznn1kR5/cXAxQBjx449fPPmzXGPWWLY8gb8/XKoLIWsQq9BS/k2KHsXhs2Ahf8F0z/lVQhbcc5RUlHLqxt38cL6Ml58r5SSilqGhYMcUVjHuVUP8afSsbx/0Kf57fmHt9njV9vQyOOrtvPQvzfwyR13c4xvJaWLfsUxx0RZfioiA4qZLXfOzUt2HKli3rx5rqioKNlhxLZ/F9x5FODgoqchb0zUYSuL97L4tn9z05lzOePw0YmNUUQkBXQ2P8Y9wTOzbOB54Ebn3F/a3csBmpxzlWZ2CnCLc25KZ+/X7yevwaipCdb8xavw7doAwRwYdrCX8OWNhdpyr9pXtRsKp8Os03HDDqaypo7w6ofgmeu8McA9TZ/m3tAF3Hj6XLbsruKlDWW88v4uhtZu4c6MO5jetIFKy8LnGqg/7bfkHhp7o76IpD4leN2TEnPkzjVw78neOa5ffgIyh3YY4pzjyJ/+i5F5GTxy8QLS03Sqk4hIa0lL8MzMDzwGPOmc+2UXxm8C5jnnymKNSYnJa7BqbIB3/uZ16dz5DpSs8Q5b9/khMx9COV4C6Jqg8GAIZHrd1CYcDaf8Al67E4ru4Tmbz9erL6GRND6Rs4vPDd3Ip8ruxZcewBb/Lx9kHEzlfWcx0z7Ajr8B+8RlYIZzjsYmp38IiAwgSvC6J2XmyE0vwYOf9To+n/8o+Dt2y/xT0RauXrqSrx09kWtOOTgJQYqI9F9JSfDM2yB1P7DbOfftGGNGADudc87M5gNLgXGuk6BSZvISr1Nn3X4IZH20ZLOyBN55FFb/BfZ+CAu/D4ec6913Dl6/C/fE96hPD+NvqMRco/e6cUd55/vlekt17v7Xag569nJOSXsdQrlUp+eypSbE1rosMoZP4uAZc8kdNQ0KpkLeOPAp6RNJRUrwuiel5sg1f4M/XQhTToDPP+h12WznB39bxUOvfsht5x7KqXMOSnyMIiL9VLISvKOAF4FVQFPk8veBsQDOuTvN7D+Br+N13KwGrnDOvdzZ+6bU5CU9s+EZWPF7GDIBhs+AYTOhcFqbfX2NTY4zbn+Jw3YtY0HWDqr2lTE8vYoxgQpya7aSbTUfvZ8/03v98Jkw+mMwer63VFRJn0i/pwSve1Jujiy6Fx67HCYtgrMf7lDJq2to4pzfvsra7eX89RtHMm1EOEmBioj0L0ndg9fXUm7ykrjZUFLBKbe+RFYgjUsXTuYLC8YR8qexZdd+7v9nEW+//Rbzs0v5xsw6svauh+1vQ/Ue78XBHG+PYOE07yt7OFTuhIrtsL8MxsyHOWd7y0ij2VcM//wRlK33qpAxzgfsF5qaoLE26hIokf5OCV73pOQc+eaDsOwyGH8UnPMHCGa3ub2zvIZT//clMgNpPHbZUYRDOltVREQJngxYW3ZXkZfpjzrhv7FpNxfe+zrDckL8/qtHMDInBLs3wpbXvRbdpeu8r6pdH70oPeSd8Ve5EzKGwrwvwdxzISsfAmEvUfr3rfDvW7y9hOERsHczzDgNTvoZ5IzsGGT5dnjrQcC8Jaa5o72EMpjtLV/1Z0FaL04sqa2ErUXgS4dxR7btYFqxA/54AexYCXPP8Y6zKJza858lkmBK8LonZefIlX+Ev37NW2HxhaUQbFupe2PTbs76zSt89T8m8n3txxMRUYIng9fyzXu44N7XGZoVYMnFCxiVF6WKtb/M+woPh1Ced+3DV+DV22Hd371ErpmlgWv0zvw7/novUXv5VnjhJq+ZzKzPwoRjvMYxDTXw0s1ectdYD3Ty/7W0gLeUNJDlJYBzz4ZZZ3iNaaLZVwyv/QY2vQjbV3oxAUz+JJz8c8if5B1p8cgXvA6lU06Adx/3Dq6ftMgbUzA5djzVe709kv7MzseJxJkSvO5J6Tlyzd9g6Ze9St55f+qwJ+87S9/mr29t5clvH83EwuwYbyIiMjgowZNBbcWWvXzxnteobWgikOajrrGJpibHwunD+N7J05nU2T8U9myCD16E2gqoq4T6Kph6UscDendv9JZsbvgn1O7zrpnPSwgPPQ+OuhyyR0D5Vi85qyzx3q9uv/dVvx/qq6GuCra9CSXveMnVjM94h8qPnAv5k72K3Eu/hDcf8JrSjF0AY47wHsveg2f/26syzvwsrPkr5BwEn38YRszyzi5c/jsvccXgvKUw+vCPfocPX4OnfgCl7370OwAcdJjXCGfW6V6sJWu9Dqn7y7x/gKWHvFinnwJDJ370urr98NxPve6okxbB8Te0rR5WlsLmf8P4//AqpCJRKMHrnpSfI1csgb9dAgcvhjPvA19ay63SiloW3vQc8ycM5d4LP5a8GEVE+gEleDLord1ezh+LtuAzIz3NqK1vYunyYqrrGznviLF87ZhJ+NOMuoYmGpscY4dmYu0Oa++SpkbYvgI+eMFbOjnvy5A7qnvv4RxsfRPeegBW/RnqKrzr/kxoavAqiod+Af7jSu+cwdYqdsDT18LKR2DScXD6PR3PmNr1Pjz0OS/JPOtBr9r43E/g3zdDzmiYdpL3vnljvWR0xe9h5+qOcaZneMlkS4XT4OBT4eOXeTE/drlXBZx2ipck11d5fx5jjoBVf/Ka6bhGb4nqERd7r2ud6DnnnZ2470PYu8X73faXel+15d5RG2Pmw6jDvUSz9F1vKWrJWu+/g5mXZA+bAbPP7LgMtvnvvmj/neuqvMdYezDb21/mdYZdvdSrwh75be/PtbP/DTkX/X5DHZS9C8Nndf56iPyevgOPS2EDOcEzs5OAW4A04G7n3E/b3R+L1406LzLme865xzt7zwExR758Gzz1X3D4hXDqzd7fe7veh5q93PVBPv/9j/X87ksfY+G0YcmOVEQkaZTgiURRVlnLLc+8x+9f/5DGprb/Pzhu+jBuP+8wQv60GK9OkMZ6r5HL9pVekxgzbx/dkHGdv25fMYRHtvn0u42KnfDw6V4yNGQC7HoPDv0inPjf0ZeFbl/pLfHMKvC6mg47GDIiy1kbG6ByB7xxj9cRr2avd71gKnz6Fhj3Ca9a99xPYPl9XlKXM8pLuiYeA289DKv/7CVG446E6t1e8llZAg3V7QIx70zFQJa39xG8Kqkv3Us2wVvumhbwEs+mBm9Zav5kWPhfXkV072Z46yF4e4lXNZ16kld9HLMANr3gVT7fe9r7WdNO9uKc/ElID3z032TflshZj2uh+HV4/1/ezxo+y0tAK3d6ieyR3/b+rLIKIJDtVXrXPQZrH4Ntb8HEY73luNM/5VWJi34HRfd4r1/wDTjhxujdXveXeZXR1+/y9op+6iYvxr7Q2OD976afJI0DNcEzszRgPXA8UAy8AZzjnHun1Zi7gLecc3eY2Qzgcefc+M7edcsURwAAIABJREFUd8DMkc9c761WyBsL5du8/38BjXPO5YT3z8D50njiW0cTSFc3ZBEZnJTgiXRiQ0klL75Xij/NRyDdx7a91dzyz/c4YsJQ7r7gY2QH/3979x2fVXU/cPxzsvcehAxISNh7bxBFEffedVRRK1q1atH2V2tbbWttndRVByoqbnEhKFORFfYmg0ASsvd81vn9cZ6QhAwSViD5vl8vXuS5ubnPvfe5ybnfe77nexr3/NgdmmW785i/NoOiKivv3DqaQJ8zsKpbTRl8dJMJVC5+wQQzx8tSaQInu80UqDlyXqvCVBO4xY5uHHzm7YZVz5iAyTcMfCPAL8IEgkGxEBhr0k19Qut/rroYMpPh4Bqw1UK3wc5U1l7162htxlEu/Rvk7wL/7lCebXq9ep0N3sGw73uoaZCS6hdpiuZohwn2qgpN76lyNb2QdeMd64QkQN8LTaAWOQCsNWbc5U/PQVlm/XquHibYBLOv0SNg32KTtltXwMdugcTp5tg3z4ehN8BFL9T3PhammrGXG98xYzz7XmAKBRWmmOD13L+Bw2rWK0w1xxc/yZy7llTkw8G15jweWGt6oIPiYPLDZhxo3XvbLCaYLUwxn2F5jtnfvheY4NLVvX69/StNuvK5f2v5IUMbdeIAbxzwZ631ec7XjwJorf/eYJ1XgTSt9T+d6/9baz2+te12mjZSa1j5L8je7Kx23Nf8Dv/0LHkx05mYciN3nd2fB85JOrZsCyGEOMNJgCdEO325OYsHP9rCwOhA5t06iiqLnZ3ZZWw+WMJnGzPJLq0h3N+TkioLU3qH89pNI3FxOQNvMrQ2aX7HU8XzTOCww7ZPYOuHpkdxyPX1qbN2qymqc3AdxI0z4xnrghK7FVKXQeqPJih09zapqf6R9fMzerYwhrMu0CnPhaq6Qj5RJiCq64F1OEyhnO2fmFTVUbebojZaw4p/ml7PvheaQG/DGyat1cXNTOEx4bdmTKOt1lR1XflMfS/mkUKTIG6MKSLk5mUC76I0E9gVpZl1XD2g+zCIHgnpK0xabkiCqb6avdkss1TUb9M7xATBNSUm8B54hemF3POtCZg9/GDWcghLOq6PrhMHeFcCM7TWtztf3wSM0VrPbrBOFLAYCAZ8gXO01smtbbfTt5FrXoZFc9jlM4Iriu5hTJ9Y/n75YLoFepnvO+yw7nXT4z7sV53/b5sQosuSAE+IY7BkZy73zN+IQ2tszhROpWBCrzBuHBvH2f0ieW9NBk98tZM55/flrim9OniPRafjvJkFTHA44lYYcbOZnuNIRWlmzKZ/N5OSGtrLzOuYvtL8y9poUlJt1SYw8w03aaSxY0yPatRQcK+7SXaYQG3FP824xsA4SDrH9NR1G2x6GN08TQCc8qPptd3znfn5PhdA/4sh4az67R2HLh7gPYhpp//t7MF7AxiodcPSvqCUmgXMAoiLixuRkZFxqg6jY2x+H/3lPZR5x/B4xWUscxnH/100iMt61OL65d3mwQVAWB/Ti5w0/bRJORaizTJWQ+4OM3b9ODMhROckAZ4Qx2hdehHfbM2mV4QfA7oH0LdbAL4NUja11sx+fxOLduTw/u1jGJPQfDXIokoLfp5uMl5EtN/exSYds8/59WmQx6ut4+y0NumYfhFHX9dSZfbvRO2jUycO8NqSorkDEwQedL5OA8ZqrfNa2m6XaSNTl8H3j0HeTtLcerGwZgh3un0Drh4UTnmSmIhwUxW4KNWM7Y3oZ3qwvQJNenSP8Y2vaVst7PoKgntCTDsvt9pyM03OCXig0WkcWAOr/g2Dr4FBV57699fapJAfOUzgTFGRB3NHm6EIvc6GK/7XtGDa8SreD0seh7F3N60MLtqvssAUe9N2oK7IW78T/7k1IAGeECdReY2VS176mfJaG+/fPoakyPoJevPKa/jP4r0s2HAQP083pvWN4Nz+3ZjSJ7zJ2D4hRFOdOMBzwxRZORvIwhRZuV5rvaPBOt8BC7TWbyul+gE/AtG6lYa7S7WRztRrvexJVEkGO72Hc2fZbRy0h3DBoCheuHoArslvmuJPlfkmdbiu8zO8L4z8tak2vO2j+nVc3OHiF2HodfXvU1NqgpXIgTD46sb7UFUEr0wy43PH3AmjZ7V8Q1dy0IxjVS7g6ulMix5qxh13FgUp8MPjppiUcjHn87ZFED28/dsqdhbEylxnUtNdnA+Qug0yQU/3oc33bNVWmPkUszbALd+Ym+xjUV0CWz40n0/cuLZXxNba3OgHxZqCYEfu247PzP63tr2PfmWyIiY+AD89a6ZZunpe6+dRa3MdVhWaMdIlB0xBsKoiM9VRw/NQmArzLjbjxN284Op3oPd59d+3VkPaCpPd0dYApabUjD3f8qGZ3mn8fRDRt/Wf0do8XLFVm8wRn5D29bbbak0mSUtDJdoid6eZPsrV3RSxC+/T9vfe8oHJYsneZM71kbwC4ew/meybI69Vu/W4H4hKgCfESbY7p4zL/7uaKoudflEBXDg4Cq01Ly9Ppdbm4IYxcVRb7fywK4+iSgtuLopBMYGMjg9hZI8QLDYHB4qqOFhcRXSQN7+Z2ksKBwhB5w3wAJRSM4HnMFMgvKm1flIp9Rdgg9Z6obNy5uuAH6CBR7TWi1vbZpdsI20WU2U4egTF1Tbe/DmdF5emcOuEnjx+0YD69RwOM8fn7m9h/f/MnKMAKHNzO+IW+GWuGRc7ZQ5MnWNSlb95yFmcyRVu+dr0/oG5Of34ZrO9hClmjKy7jwkCQ5NMGrRPKORsMb2D2Zua7rubN4z6tbkZ9o9s/vgKUsy+9pl59BtZu9UEQ0e2H9Ulpoqxw2pSV8P7msClMMUUairYZ4pEDbisca9XWba5ia2tMDerXoGm6FXC1MbjG6tLTEr3utdM8DrxfjN+9y1n8a5ZK9o236mlypzzTe9B2nKzLGqICRYdVhN4FKaY5d4h5nMbf6/ZdzBjnt+/CnK2Occce8Jt37dcebqmzAT3PqHmmIJiTaGsda+ZoL6uKjSY4lNJ55rqyEGxzW+vNAu+fcgcg0+YGSs96nazH5veg2VPmirJnoGm+vGgq5p+Vju+MNfV2X8y0yFlJcNHN5ufG3+v+ecdbNa1WWDjPFNVuTTTZHscSbma8zfpdzDpQfOgYd5FZt0r34QfnzCVsi+Za8ZSb5xnxnRX5Jix1KPvgHGzzfVSchBSlsDB9RAQZaYhCu8L+38yn391kZki6OB6E7T1ngEDLjeBm1eQCWiyN5oe3gO/mO3RIA7xj6qf2zduLEQOalzoa/fXZv9ytpuec3stoExP8bQ/tvy51LFbTRBsrTHX0eoXYO8iMwZe2805STrXVLHuOan5Mbx2qymCtvIZE9QFxUH0SDK8+vDqbi9mndWPnqHe5j1Wv2D+nnQfZip5l2WZY89Yba63i19ofX+PQgI8IU6BvLIavtl2iK+3HiI5oxiA6f0jeWxmP+LDzFM8m93BhoxiVu7NZ216EVszS7Da638H/b3cKK+x8cTFA7h5fM+OOAwhTiudOcA7GaSNNJ74agdv/byfP1/Un1smxAOwv6CSl5al0CPEh9nTElHZmyDjZxM4hTrHUNss8PX95gYurLeZpiZiAMx4Cr5+0BQaunOlGeu6aT58+Ru29XuA0uGzmRiYbwoebf+sacGj6BHQ7yLTY+jibtIHLRXmpn/rR+bGd9iN5qay+1AI6mFu7H9+zkyrgjYBw6QHzZgsd2+oLDQ3yVnJpscofzcUp5sAbPA1prKvb7i5+V/9kgluW6JczQ2ub7jpbYgdbaoB7/raLHdxOzxVBWDmTB11m5leZ/c3sPSvpqdo+K/MjWxdsJq1Ed48z6TJ3vhpfS+GrdbcoFurzU11wV5z3vZ8B9ZKcwzDbjI9T0fetFcW1Be/2vWVOY99ZpqAcvEfzPevehsCY0yA6RNqgjy/I+ZN3POd+UzLs+uXhSSYfSvLMmOOp/3RLM/4xVwr+5zPV0bcagKmuuO0W0114yWPm/M0/l7T85i2vD7Qz99tgpfx98LqF81Y0X4Xw4XP1vfiVhaa1MzAaLj9x/oenspC+O5hM6WQZ6DZRkCUCapKDphpfmJHm/fxCTXHGhhrzp2tFhY9anqqw/uaYNxhg5sXmsC4thw+vMEU0vKPMmO348aZHuldX5nPxd3bbK9gj9kfn1CznYZVpeMnw/S/mGCmstA8RFn3qulRPJJvuHmP8D6mB9Hdx/SuZ28y56WuN8zDzxxXcDzsWmh62QPjIHGaedjgGWA+7w1vmvXH/cb0wvlF1gfOZdnm93nTeyYttSHvELP+6DvM+69/A9a/bt7HM9BUoU6YaoLTImd16gNrzPy80SPhrMeg1zRq7Q7Of24VaQWVxAR789XsiQT7epiHQNs+MddlRW6DYx8LfS+CIdc0PTftIAGeEKdYdkk1ZTVW+nZrZk65BqotdrZnl+Lr4UZsiDd+nm78et4Gfkop4IvfTKB/99Z/XojOTgK89pE20rA7NHe9l8yPu3L5xxWD2ZpZwofrzE2jzaG5aWwPnrh4wOHqx5W1Nl5dkUq3QG+uGxWD+unf8NPzMPG3MOF+0ostBJfvJej9maYg0UXPw+tnURU6kEH7Z+Pu5sYX90wwf/O1NulqlQXmRjEo1gQbLSlMhVX/ga0LTC8VmBvX2jJzEzvqDug5wQSPacvNTbhXoAkYwASMob3MzXJIL9NDkrYC0ObG2Vplih9NnWMCmIK95l9lvinIFN7X9EKkrzDTsOz93vysVxAMv8mksgb3NNupKTVB2/rX63vYAOLGw/n/hKjBTY8veR58dZ/pHXLzMlV583c3nXLGO8RMUTPwCtNL2pbCIlVFprdtzcumt803HK7/qD6V8cBaeOcSU813yu/NcdVNgbPjcxO8X/yiCWDSV5hjstWaQDp+ctP3K82EFU+bYMHFDTz9TYBZ13MWPwUues6cZzCB4Yp/mmthysMmoFPKpBevfgGWPmn2qdsgiBllroX0laYCcbeBTd8/Zzssewr2fGNeRw2Fs//PpHweLetn72L4+gHzcOHmrxqnT9pqYeF9JoiZMgcSG2wvf69JE63IMQ8oEqeba81uMb1gebtMQNlzUtN9sNaYALSmxASE1ipzrCEJre9vaWZ9L1/GLyawTDrXPNzoNa3ptVFywEyHtHWBee0ZaH4nPHxNYK4dZv/iJ5vfCXdvM5dv7xlN02itNaZXL3WpeZBQesD5DWV+j8P7wOg7GxVvmrsshX99v4eHzu3NCz+mMDo+hLdvHYWbq7PuQk2p+VzD+5n9OkEZWhLgCXEGKayoZcbzqwjwcuOreyfi4+GG1e7g801ZJO8vxtfTDT8vNwK83BjeI5ihMUEtTtGgtWbVvgKqLDZmDIw6xUcixPGTAK99pI2sV2Wxce1ra9iaWYqbi+K60XHcOy2RN35K59WVaVw+PJqnrxjMqn0F/PGL7WSVVANw+bBonrp8EF5uLlgdmheXpjB3WQq9wn355qwc3L+YZXoWXFx5JOIVvstwxdvDFV9PNxbOnoC/1zGOq7HWQN4OEwDlbDM9iMNvMkFEnfRVplcPTBDUY4LpMTmymEhZNmz72ARyI3/dvjFwRWlmXFKvaeDh0/J6+XvMDXW3QWYeztZuWhfeZ1LrfMNNUBI1xPSyuHuZoM8vwhzLsY5Jqi03PU0JU5umY+5bAh9c27gH0tUDJj9iUijdPNr/foWppofKVmOuBU9/c+NfF8C1Vd4u04Obud5Z6bgSpj4GU3/f+s9lbzYFWBKmtu/9rDUmMPNq+8PjGqudb7cdIsDLnXP6t5BGfAy2Z5Xi0JrBMUFHX1nrth1nznYT0BWmmLTjynyTxjvsxvqguz20Nr8Pdqt5yNFMIaWskmrO/vdypvaO4JWbRvDR+oM88ulWZk1O4LGZxzj+s40kwBPiDPNzSgE3vrGWq0fEMio+hBeX7iOjsIpgH3esdk1FbX1DFe7vyTn9IhjfK4weoT7EBvvg4+nKl5uzeWNVOntyywH44wX9uH3SMfyBE6IDSYDXPtJGNpZfXst7azK4bFg0PZ2p8lprXlqawr+X7KVnqA/7C6tIjPDjyUsHsiatiGd/2MvA6ADmzOjH09/vZmtmKZOSwli1r4DZZyXykP0NWPcqaVNeZNr3oTx0bm9Gx4dy3etrOLd/JP+9YXiTMdQ2u4ODxdVUW+xdIjPD7tD89eudnD+wm6ku7XCYVD3fsI6ZsqLsEFTmYaobKhNcHpmy2dHsNvbt3UFMwgC8T4MibLllNby3JoP31x6gsNKCu6vi2/smNSokB5BeUElJlYVhccFt3vbyPXnMejcZV6X4cvYEeh+xzdYs25OH1eZgev/IdtUqqLXZcVEK97petRPkrneTWb43jx9/N5XoIG8A/u+L7by7JoMXrxvGRUO6n9D3a6hDAjylVCzwDhCJGUH5mtb6+SPWUcDzwEygCrhFa73xyG01JI2X6Cr+uWg3Ly9PBWBA9wAeOKc3Z/eLQCmFw6EpqrLw074CluzKZcWe/EZBn6uLwu7Q9O3mz+2TEli6O5dvt+Xwpwv7c9vE+I46JCHaTQK89pE2su3e/CmdZ5fs5Y7JCdw5JQFPN5P29eOuXO7/cDPltTaCfdx56rJBnD8oioc+3sLnm7L44u5xDPQu4JpPC0jLr2TFw1Px9XTjtZWpPPXtbh6c3pv+UQHsziljV045KbkVpBdUYrGbCp6XDu3OXy4dSEAbe/r25JTzw65c7piU0GSqHbtDk11STWxIKz1tHeDVFan8/bvdjEsI5YNZUoK/LbYcLOGSuT8T4e/JA9N7c9WImMMpflpriqusBPu4n5ICbBsPFHPda2uw2B2c3TeSK0fE8OhnW4kP8+WTu8YfzhrKLK7ikpd+prDSwsTEMB6YnsSIHq1X3Vy6O5e73t1IYoQfeeW1BHi7sXD2xKNWFi+rsfLnhTv4bGMWAENjg3hsZj9Gxx+9yueatEIeWLAZgDnn9+XiId3bfB4dDo1d62YDw6W7c7nt7Q08fF4f7jkr8fByi83BVa/+QlZx9eG/Dw1tPlhCjxAfM07vOHRUgBcFRGmtNyql/IFk4FKt9c4G68wE7sUEeGOA57XWY1rbrjReoquw2h08/8M+BkYHct6A1p9U1drspORVkFlcTWZxNXllNUxMCmNiYhhKKax2B/c65+v7yyUD+NW4nqfuQIQ4DhLgtY+0ke2jtW72b2tafgWfbszk5nE9iQgwaVmlVVbOfW4FQd4ePHReH+54Z0Ojv6dam3F/3+/IPbyd2BBvekf4kxjhR68IPzKLqpi7PJWoQC+ev3YYI3q03uvx2cZMHvt8GzVWB9eNjuOpywYe3l+HQ3Pfh5v4eushnr92KJcMbWMZ/5NsX245F7z4E56uLlRYbPz8+2l0d/ZsnMlqrHZKq61EBjRN03M4NFaH4/BDgmPx4Eeb+X57Dn2jAkjOKCYh3Jfp/SPZdaicbZklFFdZmZAYyhMXDyQxouVqqla7A6vdgY9H6wGTw6HZkFHM0NigRg8OKmptzHx+FXaH5v07xtAj1PR8f74pkwcWbDl8zVdZbFz58i8cLKri15PieW9NBgUVFib3Duf2ifFMTAxrMnxkyc5cfjM/mX5RAbx72xh25ZRxw//WMmNAN166fliL9zmrUwp46OMt5JbXcs9ZicQEefPvJXvILatlev9I/nrJQLoFNv1crHYHzy7Zy8srUukZ6ouvpyvbs8oYFhfE76b3wepwkJpXQWp+JX27+XPj2B64Ntjnndll/GZ+MgeLq4kN9qZnmC8R/p5kFFaxL6+CokoLCWG+fHf/pCaffXJGEVe8/AsPndub2dOSDi8vrrQw/dmV9O8ewDu3jW71Mzqa0yJFUyn1JfCS1npJg2WvAsu11h84X+8BpmqtD7W0HWm8hDg2FpuDe97fyJKdudw+MZ4Hz+3dpAEorbIS4O0mUzSI04YEeO0jbeTJVffE3sPNhcgAT358cGqjm+PKWhuLtufQM8yH3pH+zY7HS84o5rcfbuJQaQ1jE0II8vYgwNudMD8Pekf60y8qgOggb/72zU7mrz3AmPgQ+kUF8Pbq/fz1kgHc5Awon/xmJ6+vSqd7oBf5FbXMu3U04xNNNUaLzcHfv9vFt9sO8ew1Qxnfq/Fce19vzebLzdncOqFnk+8dqbLWhpe7a6Mb35bY7A4uf3k1mcXV/O/mkVz+39U8MqMPv5maeNSfBTNu8sEFWxgSG8Qdk+Lri1ScIlpr7A7d5H2rLXaufX0NqXkVLH5gcpOA9aGPt7B0dx5v3DyyXamKdQorahn396VcOzqWJy4ewJKduTz9/R7SCyrpHenPkJhAwv09mbd6P9VWO7dNjOe+aUlNeoZKqizc/OY69uSWc9mwaG4e37PZYm/55bX87uMtrNybz6SkMF65ccThbT388RY+3ZjJgjvHMapnfe+Y1pqb31pP8v4iFj84hae+NdfXmzeP4qy+EVRZbLz7SwavrUyjsNJCz1AfbhjTg55hvqzfX8Ta9CK2ZZYwKCaId24bTaC3+d2o6+39vwv7c82oWA6VVJNVUs2enHI2Hyxh04EScspqSAjz5T/XDGVobNDhz+TNn9OZuywFL3dXnr1mKFN6hx/e33XpRTz5zU62ZJZyzchY/nRRf7zdXfl0YyZPf7+H/PL6Srf+nm6U19oY0SOYf105mIRwP77YlMWcz7YS6O3O5cNjOFBURXp+JXnltcSFeJMU4U9SpB8zB0W1+ADj9nkbWJtWyMpHzjrcWzf7/Y18vyOHhbMn0i/q+NK1OzzAU0r1BFYCA7XWZQ2Wfw38Q2v9k/P1j8DvtdYttk7SeAlx7Cw2B098tYP5aw8QE+zNU5cNYkxCCIu25zB/zQHW7S9iSEwgj8zoy4TETjT5rjhjSYDXPtJGnnwPfrSZzzZm8dw1Q7l02LH1mpXVWPnXoj1syyqlrNpKabWVkmordoe5J1PK1He4a0ovHjq3Ny5Kccc7G1ixN593fz2GXYfK+MvXO7llfE8eOKc3V726mkMlNXx01ziCfNy5Z/5GNh4oIczPk9JqC/+8YjCXD4/B7tA8/f1uXl2RhoerCxa7g0lJYTx8Xp9mi12sTi3gzneTiQ7y5q+XDmxyw78juwxfTzd6hvqglOKlpft4ZvFe5l4/nAsGR3Hly6sprbay+IHJR31wqLXmwY9MGiyYFLx/Xz2EXuF+VFlsfLcth2+3HeKKETHMHNR60TCH8zy2VIDsSJW1Nj7blMW81fspqbIw9/rhZuwgJg327veSWbIrF3dXF6b1McU06izfk8ctb63H080FF6V45aYRjQKNlLwKVuzNZ9ehMnYdKiO9oJK/XTqQy4fXV1atq8L4w4OTSYzwP3w+rHbd6AFCQUUt//xuNx8nZ9I90Iu/XzH48HsVVVq48X9rScmr4LyB3Vi8I4dam4OxCSGc0y+SsQmh9IsK4OeUAh78aAvlNVYuHx7DgvUHGBIbxFu3jOKX1ELunr/RjDU9r+mE3weLqjj32ZX4e7mRV17LnPP7cteUXo3WqbXZWbQ9h/fWZLB+v5kyysPVhSGxgYxLCOX2yQmN0pO11tz5bjKLd+ZypLgQH4bFBTGiRzBXjYjF26NpD2lKXgX3zN/I3rxy7pmayLC4IF5ensqGjGJCfT3466UDm1wvlbU2lu/JJzLAk4RwP4J93PlyczaPL9xBjdXO5N7hLNmZy+j4EOZeP5xwf88m79sWe3PLmfHcSm6bEM8fL+zPN1sPcc/7G5v06h2rDg3wlFJ+wArgSa31Z0d8r00BnlJqFjALIC4ubkRGRsZJ3WchOru1aYU8+vk20vIrD8+91yPUhxkDuvH11kNklVQzITGUmYOiSM+vZF9eBVkl1Vw4OIo7J/dq9o+sECeDBHjtIwHeyVdlsbE6pZBpfSPaHEC0RV2q/a5D5ezLLWdsQihn9a0vBFJWY+WyuT+TV1ZLhcXmLOgyAlcXRXZJNZf/dzUOrbE5NLVWO09fOYSJSWHc/V4yq1MLmX1WIluzSlm5N58bxsQx5/y+LFh/kLnLUiiusnLB4CjmzOh7eDzfN1sP8cCCzcSEeFNjsZNdWsPlw6O5aWwPlu3O44vN2RwoqgIg0NudQdGBrE0v5LwB3XjpelO1c/7aDP7w+Xa+mj2RQTGBh4+lvMaKj4dbo17B99ce4LHPt/HAOb2JD/flT19up9pi56w+Eazal0+lxY6Phys1Vjv/unIIV4yoD5AyCiv5x3e7Sc2voKjSSnGVhW4BXo1SDJtTVmNl7tIU3l93gPIaG4OiA6mstXGgqIo/XzyAG8f2ODyn4uMX9afaaufpRXt44+aRnN0vkiqLjXOfXYmnmwvzbhvNHe8ksy+3nH9cMRhPNxfmr81gTVoRAGF+nvSL8ie/vJaDRVV899vJxIX6YLM7mPKv5fQI9eH9O9o2XjE5o4hHPtlKan4l14yM5c4pCdz93kb2F1by2q9GMqV3OMWVFhZsOMiC9QdJL6gE6nuqekf68eJ1w+nTzZ/vd+Rw7/ub6BnmQ25ZLT1Dffjk7vEtFiN546d0/vr1Ti4d2p1nrxnaauC+L7ec4iorg2MC8XJv+b6hvMbK2z/vx93NhahAL6KDvIkP8yXUr22BVbXFzp8X7mDBBjMVSnSQN7MmJ3D1yOaDwpbkldXw2Ofb+GFXHrdNiOfRmX2PuyjLQx9vYeHmbD6+axy3vLWOuBAfPr17/Anpne6wAE8p5Q58DXyvtf5PM9+XFE0hOkitzc5rK9JIza/g8uExh/Pla2125q85wEvLUiiqtODh5kJiuB8B3m6sSSsiOsibx2b2Y+agbmSX1rAnp4x9uSYXvaTKSkm1BVcXRVyIL/FhPiSE+zEiLviE3giJrkMCvPaRNrJzSy+o5NK5P5MY4cf828c0umnenVPGVa/8QlSgFy/fOIJe4WaclsXmYM5nW/lsYxburoq/XDIgKuURAAAViUlEQVSQ60bHHf658horr69K5/WVadgdmlsm9CTcz5OnvtvFyB7BvP6rkXi4ufDS0hReX5WG1a5xUTAhMYyLhnRHa83mg6VszSzBanewYNa4w+lopVVWRj35AzeMjePxiwYApjz+ta+tISrQiznn92Va3wh2ZJdx+curGRMfwrxbR+Piosgrq+EPX2xnXXoR5w2I5MoRsQyMDuCOdzawOrWQf1w+iKtHxjJ/7QGe+nYXrkoxrlcooX4eBPt48MG6A/h7ufPJXeMOj6Oso7Xmq62H+OvXOymsqGXmoChunRDP8Lggymps3P/hJpbtyWdUz2DW7y/mtgnx/Omi/lhsDi54YRVVFjs/PDiFZ3/Yy2sr0/joznGMjg+hrMbKHfM2sDbdBHWxId5cP7oHlw2LPjxGLKukmhnPraR3pD8LZo3lx9153PluMq/cOIIZA7u1+Vqosdp5/sd9vLoiFYcGb3dX3rh55OE03YYOlVazLt2kSQb7uDP7rKRGgc/qlALueGcDDg3f3DeRhPCWx/g5HJqfUgoYkxByXGMOT4ZF2w9RY3VwweCoYw7MtNbkldc2O9byWGSVVHPWM8tRmIqT39438XAv7fHqqCIrCpgHFGmt729hnQuA2dQXWXlBa93qiENpvIQ4NaosNvLLa4kJ9jn8lHVNWiFPfLWTXYfK8HZ3pdpaP1Gtl7sLgd7uBHl7YLU7OFhchdVu/r5MSAzlP1cPbfQHM7ukms82ZnLJ0OjTrgKcOH1IgNc+0kZ2fqVVVnw8XZu9gS2psuDj4dak2qbWmk+SM814rtjm5x3LKa3hmcV7+HRjJlrD9P6RvHjdsEZBZGp+BckZxUzpHd7mG+C73k1mQ0YRax49m9zyWi6b+zOuLgovd1fSCyoZEx/CodIaLDYH39w38ai9NjVWO3e+m8yKvfkMjA5ge1YZExPDePrKwY3GQm0+WML1r68hLsSHBXeOI9DbHa01mw6W8OySvazaV8DgmECevHRQo95FMGmZzyzew8vLUzlvQH1PKZgMmGteW8OMAd1YsiuXq0fG8vfLBzXavzd/TmdA90AmNVNoBOCLTVncv2AzD5/Xh9WpBaTnV7LykbOOqVdny8ES5i5L4fZJCW2qKNmStPwKqix2BkYHHn1l0S5/+3on//spncdm9mXW5F5H/4E26qgAbyKwCtgGOJyLHwPiALTWrziDwJeAGZhpEm5tbfwdSOMlREezOzQfbTjIrkNlJEX606+bP727+Tcp+V1Xvnv5njye+nY33h6u/OvKwQyPC2bushTeWZOBxebAz9ONv1wygMuGRTeb6lFQUcvDH2+h2mrn9V+NPPZJhMUZSQK89pE2UhyvHdmlbDpQwrWjYk9IGtn3O3K4891knr92KP9dlkp2STWf/mY88WG+fLjuAM/9sI/SaisL7hx71BL7dWptdu6Zv4mfUwp47IJ+3Dgmrtn2Y9W+fG57ez1DY4MYGhvEt9tyyCqpxs/TjYfP69OkauKRUvLK6RHq2ySYfvjjLXycnEm4vyc/PDjlcMGQttJac+8Hm1i0PQebQzcpsy86lxqrnZV78zm7X2SbihW1VYcXWTmRpPES4syTklfBvR9sOtzzV2uzc/nwGK4dFcvTi/awbn8RFw6O4slLBxHoU99QJmcUcc/8TRRXWbA7NMPigph32+ijln8WnYcEeO0jbaQ43VhsDkY/9QPlNTYUMO+20Y2KeFXUmmyR+LCWx8o1R2tNtdV+1Pbgqy3Z3PfhJtxcFBMTw7hgcHem949sd1DWUFGlhbvfS+auKb0ajZNsj9IqK+c9t5KiSgurH51GWBvHmwlRRwI8IUSHq7Ha+c+SvWSVVPPbs5PoHWly0O0OzSsrUnl2yV5clGJgdAAjegTj7e7Kf5enEh3szX9vGE56QSX3fbCJcb1CeePmUXi5u+JwaHYeKqOo0kJkgBeRAZ4EertTUWsjt6yG3LJaEsJ9iQo88+dg6qokwGsfaSPF6ehPX27nnV8yeOaqIVzZoDjKqbIvt5wIf69GDxBPB6n5FRwqMfPWCtFeEuAJIU5727NK+WprNhszitmSWYrF5mB6/0ieuWrI4SetnyZn8tAnWxiXEEq4vyc/7SugsNLSaDuuLupwqXGAAC83PrprXLNzAYnTnwR47SNtpDgdVVls7M4pZ/gxzBEnhGhea+2j5DkJIU4LA6MDDw/urrXZySmtIS7Ep9G4iitGxFBrc/DHL7YR4uvB5N7hTEoKIybYx9ljV0NhpYVgH3ciA7zw93Ljsc+286s31vHJXeOJCzXFXEqrrDyzeA9FlRauGBHNlN4RuLooaqx2vtycxdurMwjz82g0+Wtb7Mkpx8PNpd2pRkII0Zn5eLhJcCfEKSQ9eEKIM05FrQ0fd9c2Tb2wL7ecq179hUBvdz6+axybD5Twhy+2U1RpIdDbnaJKM1/S5N5h/LArj6JKC0kRfqTmVzCyZwhv3TLqqEFerc3Ocz+YctWB3u4snD1RKoOeINKD1z7SRgohRNfQWvt4/OWRhBDiFPPzdGvzvHpJkf68dcso8strOffZlcx6N5kwP0++vGcCax49m1duHE7fKH8+25jF8LhgPrhjLIsfmMzz1w5jw/4ibn17PZW1tha3vy2zlIte/ImXl6dyydBo7A7NrHeTqbK0/DNCCCGEECeLpGgKITq9YXHBvHLjCH7/6VZ+PT2eu6b2Olz2esbAKGYMjEJr3Sgd9KIh3QH47YebuOWtddw8vieJEX7Eh/mSX17Lkp25fL8jh3XpRYT7e/LWraM4q08Ey/fkcdvb63n4k628dN2wZkt3CyGEEEKcLBLgCSG6hMm9w/nl0bNb/H5zgVhdkPfQx1uY/f4mAFwU1NVw6R3pxz1nJXL7xITD1dmm9ongkRl9+cd3u0mK8GNcQih7c8vZm1uBRtMz1Je4EB8Swn1JCPNrc0+kEEIIIURbSIAnhBCtuGiImTMpLb+SfXnlpOZV4OflxvT+3VospnLn5AR2Zpfx3A/7eI59APh7uqEUlNXUp24G+7gzNiGU8b1C8fdyZ39hJRmFVRRWWhgRF8zUPuEMig6UIFAIIYQQbSZFVoQQ4iSosdpZtD2HIB93+nTzp1uAF0opSqosZBRWsTe3nLXpRfySWkhWSTUASkFUgBcB3u7syS1Hawjx9WBQdCDdAryIDPQiKtCLhDBfEiP8CO0CE+NKkZX2kTZSCCG6BpkmQQghTjEvd1cuHRbdZHmQjwdBPh4MiQ3iqpGxaK3JLK6m1mYnJtgHL3dXAAoralm1r4AVe/PZl1fOzkNlFFTU0vCZXLCPOyN6BDNjYBTT+0US6GOqgv64K5fle/NJijAppHXjDYUQQgjR+UmAJ4QQHUgp1eyUCqF+nlw6LLpRkGi1O8gprSE1v4KUPPNv5d58ftiVh5uLIjHCj7255Tg0hPl58M3WQyzbnccL1w2jR2jrc/NprVm2J49nvt9LzzAf/n7Z4MPjCuve+8N1B8gqqSHA240AL3cCvN0J8/Ug1M+TUD8PQn09pKiMEEII0cEkwBNCiDOEu6sLsSE+xIb4MLVPBGACsy2ZpXy37RBbM0uZPS2Jc/tHMqB7AIu25/D7T7cy8/lV/PniAVwyNBoPt6a9eduzSnnym138klZITLA3S3bmsuXgKl66fhjD4oLZcrCEOZ9tY9ehMtxcFDZH86n9SRF+3DEpgUuGdcfTzfWkngshhBBCNE/G4AkhRCeWVVLNAx9uZt3+Ivw83ZiYGMbUPuFoYGNGMRsPFJOaX0mwjzv3n9Ob68fEsSO7jNnvbySntIaz+0WwZGcu4f6ePHHxQM4bEEmtzUFZtZXSaisFFRYKK2vJKa3h041Z7DpURri/J7eM78lN43oQ4OV+1H1sjYzBax9pI4UQomtorX2UAE8IITo5u0OzdHcey/bksXx3HtmlNYAZwzc8LpjR8SFcNyauUTBWWmXl4U+2sHhnLjeOjeORGX2PGqxprVmdWshrK9NYnVrAykfOIirQ+7j2XQK89pE2UgghugYpsiKEEF2Yq4tiev9IpvePRGtNSl4Fbq4u9Az1aXHMXKCPO6/eNILiKishvh5teh+lFBMSw5iQGEZOaQ3dAr1O5GEIIYQQog0kwBNCiC5EKUVSpH+b121rcHckCe6EEEKIjnHSamcrpd5USuUppba38P2pSqlSpdRm578/nax9EUIIIU4nSqkZSqk9SqkUpdScFta5Wim1Uym1Qyn1/qneRyGEEGemk9mD9zbwEvBOK+us0lpfeBL3QQghhDitKKVcgbnAdCATWK+UWqi13tlgnSTgUWCC1rpYKRXRMXsrhBDiTHPSevC01iuBopO1fSGEEOIMNRpI0Vqnaa0twIfAJUescwcwV2tdDKC1zjvF+yiEEOIM1dFj8MYppbYA2cBDWusdza2klJoFzHK+rFBK7TnO9w0DCo5zG52RnJfmyXlpnpyX5sl5ad6xnpceJ3pHTgPRwMEGrzOBMUes0xtAKfUz4Ar8WWu9qLmNSRt5ysh5aZ6cl+bJeWmenJemTnj72JEB3kagh9a6Qik1E/gCSGpuRa31a8BrJ+qNlVIbpOx2U3JemifnpXlyXpon56V5cl7azQ3TJk4FYoCVSqlBWuuSI1eUNvLUkPPSPDkvzZPz0jw5L02djHNy0lI0j0ZrXaa1rnB+/S3grpQK66j9EUIIIU6RLCC2wesY57KGMoGFWmur1jod2EsLD0GFEEKIhjoswFNKdVPOCZiUUqOd+1LYUfsjhBBCnCLrgSSlVLxSygO4Flh4xDpfYHrvcD787A2kncqdFEIIcWY6aSmaSqkPMI1TmFIqE3gccAfQWr8CXAncrZSyAdXAtVprfbL25wgnLJWlk5Hz0jw5L82T89I8OS/Nk/PipLW2KaVmA99jxte9qbXeoZT6C7BBa73Q+b1zlVI7ATvwsNb6VD0Elc+qeXJemifnpXlyXpon56WpE35O1KmLqYQQQgghhBBCnEwdlqIphBBCCCGEEOLEkgBPCCGEEEIIITqJLhfgKaVmKKX2KKVSlFJzOnp/OopSKlYptUwptVMptUMp9Vvn8hCl1BKl1D7n/8Edva+nmlLKVSm1SSn1tfN1vFJqrfOaWeAsitDlKKWClFKfKKV2K6V2KaXGdfXrRSn1gPP3Z7tS6gOllFdXvV6UUm8qpfKUUtsbLGv2+lDGC85ztFUpNbzj9lw0JG2ktI9HI21kU9I+Nk/aSKMj2scuFeAppVyBucD5QH/gOqVU/47dqw5jA36nte4PjAXucZ6LOcCPWusk4Efn667mt8CuBq//CTyrtU4EioFfd8hedbzngUVa677AEMw56rLXi1IqGrgPGKm1HogplnEtXfd6eRuYccSylq6P8zEl/5MwE3S/fIr2UbRC2sjDpH1snbSRTUn7eARpIxt5m1PcPnapAA8YDaRordO01hbgQ+CSDt6nDqG1PqS13uj8uhzzxygacz7mOVebB1zaMXvYMZRSMcAFwP+crxUwDfjEuUqXOycASqlAYDLwBoDW2uKccLlLXy+YSsTeSik3wAc4RBe9XrTWK4GiIxa3dH1cAryjjTVAkFIq6tTsqWiFtJFI+9gaaSObkvaxVdJG0jHtY1cL8KKBgw1eZzqXdWlKqZ7AMGAtEKm1PuT8Vg4Q2UG71VGeAx4BHM7XoUCJ1trmfN1Vr5l4IB94y5ma8z+llC9d+HrRWmcBzwAHMI1WKZCMXC8NtXR9yN/i05N8LkeQ9rEJaSObkvaxGdJGHtVJbR+7WoAnjqCU8gM+Be7XWpc1/J5zXsIuM4+GUupCIE9rndzR+3IacgOGAy9rrYcBlRyRbtIFr5dgzJO2eKA74EvTFAzh1NWuD3Hmk/axMWkjWyTtYzOkjWy7k3F9dLUALwuIbfA6xrmsS1JKuWMar/la68+ci3PruoKd/+d11P51gAnAxUqp/ZjUpGmYvPogZ3oBdN1rJhPI1Fqvdb7+BNOgdeXr5RwgXWudr7W2Ap9hriG5Xuq1dH3I3+LTk3wuTtI+NkvayOZJ+9g8aSNbd1Lbx64W4K0HkpwVfDwwgz0XdvA+dQhn3vwbwC6t9X8afGshcLPz65uBL0/1vnUUrfWjWusYrXVPzLWxVGt9A7AMuNK5Wpc6J3W01jnAQaVUH+eis4GddOHrBZN2MlYp5eP8fao7J13+emmgpetjIfArZ7WwsUBpg1QV0XGkjUTax5ZIG9k8aR9bJG1k605q+6hMr2DXoZSaickhdwXe1Fo/2cG71CGUUhOBVcA26nPpH8OMM/gIiAMygKu11kcODO30lFJTgYe01hcqpRIwTytDgE3AjVrr2o7cv46glBqKGVjvAaQBt2IeEnXZ60Up9QRwDabq3ibgdkyufJe7XpRSHwBTgTAgF3gc+IJmrg9nY/8SJl2nCrhVa72hI/ZbNCZtpLSPbSFtZGPSPjZP2kijI9rHLhfgCSGEEEIIIURn1dVSNIUQQgghhBCi05IATwghhBBCCCE6CQnwhBBCCCGEEKKTkABPCCGEEEIIIToJCfCEEEIIIYQQopOQAE+IU0ApZVdKbW7wb84J3HZPpdT2E7U9IYQQ4lSR9lGIE8/t6KsIIU6Aaq310I7eCSGEEOI0I+2jECeY9OAJ0YGUUvuVUk8rpbYppdYppRKdy3sqpZYqpbYqpX5USsU5l0cqpT5XSm1x/hvv3JSrUup1pdQOpdRipZS3c/37lFI7ndv5sIMOUwghhGgXaR+FOHYS4AlxangfkYJyTYPvlWqtBwEvAc85l70IzNNaDwbmAy84l78ArNBaDwGGAzucy5OAuVrrAUAJcIVz+RxgmHM7d52sgxNCCCGOkbSPQpxgSmvd0fsgRKenlKrQWvs1s3w/ME1rnaaUcgdytNahSqkCIEprbXUuP6S1DlNK5QMxWuvaBtvoCSzRWic5X/8ecNda/00ptQioAL4AvtBaV5zkQxVCCCHaTNpHIU486cETouPpFr5uj9oGX9upH197ATAX8zRzvVJKxt0KIYQ4U0j7KMQxkABPiI53TYP/f3F+vRq41vn1DcAq59c/AncDKKVclVKBLW1UKeUCxGqtlwG/BwKBJk9JhRBCiNOUtI9CHAN5WiHEqeGtlNrc4PUirXVdKehgpdRWzFPG65zL7gXeUko9DOQDtzqX/xZ4TSn1a8yTyLuBQy28pyvwnrORU8ALWuuSE3ZEQgghxPGT9lGIE0zG4AnRgZxjDEZqrQs6el+EEEKI04W0j0IcO0nRFEIIIYQQQohOQnrwhBBCCCGEEKKTkB48IYQQQgghhOgkJMATQgghhBBCiE5CAjwhhBBCCCGE6CQkwBNCCCGEEEKITkICPCGEEEIIIYToJP4fZKbbuzTU72cAAAAASUVORK5CYII=\n" 422 | }, 423 | "metadata": { 424 | "needs_background": "light" 425 | } 426 | } 427 | ] 428 | }, 429 | { 430 | "cell_type": "code", 431 | "source": [], 432 | "metadata": { 433 | "id": "gbixC9QHSxF1" 434 | }, 435 | "execution_count": 7, 436 | "outputs": [] 437 | } 438 | ] 439 | } --------------------------------------------------------------------------------