├── pics
├── pgm_vsgd_2.png
└── cifar100_res.png
├── configs
├── train
│ ├── optimizer
│ │ ├── sgd.yaml
│ │ ├── adam.yaml
│ │ ├── adamW.yaml
│ │ └── vsgd.yaml
│ ├── scheduler
│ │ ├── stepLR.yaml
│ │ ├── cosine.yaml
│ │ └── plateau.yaml
│ └── defaults.yaml
├── wandb
│ └── defaults.yaml
├── defaults.yaml
├── model
│ ├── resnext.yaml
│ ├── vgg.yaml
│ ├── convmixer.yaml
│ └── resnet.yaml
├── dataset
│ ├── cifar100.yaml
│ └── tiny_imagenet.yaml
└── experiment
│ ├── cifar100_vgg.yaml
│ ├── tiny_imagenet_vgg.yaml
│ ├── cifar100_convmixer.yaml
│ ├── tiny_imagenet_convmixer.yaml
│ ├── tiny_imagenet_resnext.yaml
│ └── cifar100_resnext.yaml
├── _config.yml
├── src
├── utils
│ ├── wandb.py
│ ├── tester.py
│ └── trainer.py
├── model
│ ├── classifier.py
│ ├── convmixer.py
│ ├── vgg.py
│ └── resnext.py
├── dataset
│ ├── cifar10.py
│ ├── cifar100.py
│ ├── data_module.py
│ └── tiny_imagenet.py
├── run_experiment.py
└── vsgd.py
├── environment.yml
├── LICENSE
├── README.md
├── .gitignore
└── notebooks
└── vsgd_example.ipynb
/pics/pgm_vsgd_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/generativeai-tue/vsgd/HEAD/pics/pgm_vsgd_2.png
--------------------------------------------------------------------------------
/pics/cifar100_res.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/generativeai-tue/vsgd/HEAD/pics/cifar100_res.png
--------------------------------------------------------------------------------
/configs/train/optimizer/sgd.yaml:
--------------------------------------------------------------------------------
1 | _target_: torch.optim.SGD
2 | params: null
3 | lr: 0.1
4 | momentum: 0.
5 | weight_decay: 0.
--------------------------------------------------------------------------------
/configs/train/scheduler/stepLR.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | _target_: torch.optim.lr_scheduler.StepLR
3 | step_size: 5000
4 | gamma: 0.5
5 | last_epoch: -1
--------------------------------------------------------------------------------
/configs/train/scheduler/cosine.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | _target_: torch.optim.lr_scheduler.CosineAnnealingLR
3 | T_max: ${train.max_iter}
4 | eta_min: 0.
5 |
--------------------------------------------------------------------------------
/configs/wandb/defaults.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | setup:
3 | project: vsgd
4 | mode: online
5 | watch:
6 | log: gradients
7 | log_freq: 1000
8 | group: null
--------------------------------------------------------------------------------
/configs/train/optimizer/adam.yaml:
--------------------------------------------------------------------------------
1 | _target_: torch.optim.Adam
2 | params: null
3 | lr: 0.1
4 | eps: 1e-08
5 | weight_decay: 0.
6 | betas:
7 | - 0.9
8 | - 0.999
--------------------------------------------------------------------------------
/configs/train/optimizer/adamW.yaml:
--------------------------------------------------------------------------------
1 | _target_: torch.optim.AdamW
2 | params: null
3 | lr: 0.1
4 | eps: 1e-08
5 | weight_decay: 0.
6 | betas:
7 | - 0.9
8 | - 0.999
--------------------------------------------------------------------------------
/configs/train/optimizer/vsgd.yaml:
--------------------------------------------------------------------------------
1 | _target_: vsgd.VSGD
2 | params: null
3 | ghattg: 30.0
4 | ps: 1e-8
5 | tau2: 0.9
6 | tau1: 0.81
7 | lr: 0.1
8 | weight_decay: 0.0
--------------------------------------------------------------------------------
/configs/train/scheduler/plateau.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | _target_: torch.optim.lr_scheduler.ReduceLROnPlateau
3 | factor: 0.5
4 | patience: 10
5 | threshold: 1e-3
6 | cooldown: 0
7 | min_lr: 1e-5
--------------------------------------------------------------------------------
/configs/defaults.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | defaults:
4 | - _self_
5 | - model: resnet18
6 | - dataset: cifar10
7 | - train: defaults
8 | - wandb: defaults
9 | - experiment: null
--------------------------------------------------------------------------------
/configs/model/resnext.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | _target_: model.resnext.ResNeXt
3 | cardinality: 8
4 | depth: 29
5 | widen_factor: 4
6 | dropRate: 0
7 | num_classes: ${dataset.num_classes}
8 | name: resnext
9 |
--------------------------------------------------------------------------------
/configs/model/vgg.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | _target_: model.vgg.VGG
3 | cfg_id: A # A, B, D, E for 11, 13, 16, 19 layers of VGG
4 | batch_norm: true
5 | num_classes: ${dataset.num_classes}
6 | name: vgg
7 |
--------------------------------------------------------------------------------
/configs/model/convmixer.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | _target_: model.convmixer.ConvMixer
3 | dim: 256
4 | depth: 8
5 | kernel_size: 5
6 | patch_size: 2
7 | num_classes: ${dataset.num_classes}
8 | name: convmixer
9 |
--------------------------------------------------------------------------------
/_config.yml:
--------------------------------------------------------------------------------
1 | theme: jekyll-theme-cayman
2 |
3 |
4 | title: "Variational Stochastic Gradient Descent"
5 | description: "Code repository of the paper Variational Stochastic Gradient Descent for Deep Neural Networks"
6 |
7 |
--------------------------------------------------------------------------------
/configs/dataset/cifar100.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | data_module:
3 | _target_: dataset.cifar100.Cifar100
4 | batch_size: 64
5 | test_batch_size: 1024
6 | use_augmentations: true
7 | x_dim: 32
8 | num_classes: 100
9 | name: cifar100
--------------------------------------------------------------------------------
/configs/dataset/tiny_imagenet.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | data_module:
3 | _target_: dataset.tiny_imagenet.TinyImagenet
4 | batch_size: 64
5 | test_batch_size: 1024
6 | use_augmentations: true
7 | x_dim: 64
8 | num_classes: 200
9 | name: tiny-imagenet-200
--------------------------------------------------------------------------------
/src/utils/wandb.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import torch
4 | import wandb
5 |
6 |
7 | def get_checkpoint(entity: str, project: str, idx: str, device: str = "cpu"):
8 | # download the checkpoint from wandb to the local machine.
9 | file = wandb.restore(
10 | "last_chpt.pth", run_path=os.path.join(entity, project, idx), replace=True
11 | )
12 | # load the checkpoint
13 | chpt = torch.load(file.name, map_location=device)
14 | return chpt
15 |
--------------------------------------------------------------------------------
/configs/train/defaults.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | defaults:
3 | - optimizer: adamW
4 | - scheduler: null
5 |
6 | seed: 0
7 | resume_id: null
8 | device: cuda
9 | start_iter: 0
10 | max_iter: 10000
11 | grad_clip: 0
12 | grad_skip_thr: 0 # skip the update step is maximal grad norm is larger then this value (ignore if 0)
13 | save_freq: 1 # how often to save the checkpoint (in iterations)
14 | eval_test_freq: 10000 # how often to run evaluation on test dataset (in iterations)
15 | experiment_name: null
16 | optimizer_log_freq: -1
17 |
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: vsgd
2 | channels:
3 | - pytorch
4 | - nvidia
5 | - anaconda
6 | - conda-forge
7 | - defaults
8 | - huggingface
9 | dependencies:
10 | - python=3.10.4
11 | - numpy
12 | - scipy
13 | - matplotlib
14 | - wandb
15 | - tqdm
16 | - imageio
17 | - pip
18 | - wheel
19 | - pytorch=1.12.1
20 | - torchvision=0.13.1
21 | - cudatoolkit=11.3
22 | - torchmetrics
23 | - transformers
24 | - pip:
25 | - hydra-core==1.3
26 | - torch-fidelity==0.3.0
27 | - black
28 |
29 |
--------------------------------------------------------------------------------
/configs/experiment/cifar100_vgg.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 | defaults:
3 | - override /dataset: cifar100
4 | - override /model: vgg
5 | - override /train/scheduler: stepLR
6 |
7 | dataset:
8 | data_module:
9 | batch_size: 256
10 | test_batch_size: 256
11 | use_augmentations: true
12 | model:
13 | cfg_id: D
14 | batch_norm: true
15 | train:
16 | experiment_name: test
17 | resume_id: null
18 | seed: 124
19 | device: 'cuda:0'
20 | save_freq: 500
21 | eval_test_freq: 500
22 | grad_clip: 0
23 | grad_skip_thr: 0
24 | max_iter: 30000
25 | optimizer_log_freq: 100
26 | scheduler:
27 | step_size: 10000
28 | gamma: 0.5
29 |
--------------------------------------------------------------------------------
/configs/experiment/tiny_imagenet_vgg.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 | defaults:
3 | - override /dataset: tiny_imagenet
4 | - override /model: vgg
5 | - override /train/scheduler: stepLR
6 |
7 | dataset:
8 | data_module:
9 | batch_size: 128
10 | test_batch_size: 128
11 | use_augmentations: true
12 | model:
13 | cfg_id: E
14 | batch_norm: true
15 | train:
16 | experiment_name: test
17 | resume_id: null
18 | seed: 124
19 | device: 'cuda:0'
20 | save_freq: 500
21 | eval_test_freq: 500
22 | grad_clip: 0
23 | grad_skip_thr: 0
24 | max_iter: 60000
25 | optimizer_log_freq: 100
26 | scheduler:
27 | step_size: 20000
28 | gamma: 0.5
--------------------------------------------------------------------------------
/configs/experiment/cifar100_convmixer.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 | defaults:
3 | - override /dataset: cifar100
4 | - override /model: convmixer
5 | - override /train/scheduler: stepLR
6 |
7 | dataset:
8 | data_module:
9 | batch_size: 256
10 | test_batch_size: 256
11 | use_augmentations: true
12 | model:
13 | dim: 256
14 | depth: 8
15 | kernel_size: 5
16 | patch_size: 2
17 | train:
18 | experiment_name: test
19 | resume_id: null
20 | seed: 124
21 | device: 'cuda:0'
22 | save_freq: 500
23 | eval_test_freq: 500
24 | grad_clip: 0
25 | grad_skip_thr: 0
26 | max_iter: 30000
27 | optimizer_log_freq: 100
28 | scheduler:
29 | step_size: 10000
30 | gamma: 0.5
31 |
--------------------------------------------------------------------------------
/configs/experiment/tiny_imagenet_convmixer.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 | defaults:
3 | - override /dataset: tiny_imagenet
4 | - override /model: convmixer
5 | - override /train/scheduler: stepLR
6 |
7 | dataset:
8 | data_module:
9 | batch_size: 128
10 | test_batch_size: 128
11 | use_augmentations: true
12 | model:
13 | dim: 256
14 | depth: 8
15 | kernel_size: 5
16 | patch_size: 2
17 | train:
18 | experiment_name: test
19 | resume_id: null
20 | seed: 124
21 | device: 'cuda:0'
22 | save_freq: 500
23 | eval_test_freq: 500
24 | grad_clip: 0
25 | grad_skip_thr: 0
26 | max_iter: 60000
27 | optimizer_log_freq: 100
28 | scheduler:
29 | step_size: 20000
30 | gamma: 0.5
--------------------------------------------------------------------------------
/configs/experiment/tiny_imagenet_resnext.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 | defaults:
3 | - override /dataset: tiny_imagenet
4 | - override /model: resnext
5 | - override /train/scheduler: stepLR
6 |
7 | dataset:
8 | data_module:
9 | batch_size: 64
10 | test_batch_size: 64
11 | use_augmentations: true
12 | model:
13 | cardinality: 8
14 | depth: 18
15 | widen_factor: 4
16 | dropRate: 0
17 | train:
18 | experiment_name: test
19 | resume_id: null
20 | seed: 124
21 | device: 'cuda:0'
22 | save_freq: 500
23 | eval_test_freq: 500
24 | grad_clip: 0
25 | grad_skip_thr: 0
26 | max_iter: 60000
27 | optimizer_log_freq: 100
28 | scheduler:
29 | step_size: 20000
30 | gamma: 0.5
--------------------------------------------------------------------------------
/configs/experiment/cifar100_resnext.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 | defaults:
3 | - override /dataset: cifar100
4 | - override /model: resnext
5 | - override /train/scheduler: stepLR
6 |
7 | dataset:
8 | data_module:
9 | batch_size: 128
10 | test_batch_size: 128
11 | use_augmentations: true
12 | model:
13 | cardinality: 8
14 | depth: 18
15 | widen_factor: 4
16 | dropRate: 0
17 | train:
18 | experiment_name: test
19 | resume_id: null
20 | seed: 124
21 | device: 'cuda:0'
22 | save_freq: 500
23 | eval_test_freq: 500
24 | grad_clip: 0
25 | grad_skip_thr: 0
26 | max_iter: 30000
27 | optimizer_log_freq: 100
28 | scheduler:
29 | step_size: 10000
30 | gamma: 0.5
31 |
32 |
33 |
--------------------------------------------------------------------------------
/src/utils/tester.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import wandb
3 | from tqdm import tqdm
4 |
5 |
6 | def test(args, loader, model):
7 | model.eval()
8 | history = {}
9 | N = 0
10 | with torch.no_grad():
11 | for _, batch in tqdm(enumerate(loader)):
12 | if "cuda" in args.device:
13 | for i in range(len(batch)):
14 | batch[i] = batch[i].cuda(non_blocking=True)
15 |
16 | N += batch[0].shape[0]
17 | logs = model.test_step(
18 | batch=batch,
19 | )
20 |
21 | for k in logs.keys():
22 | if f"test/{k}" not in history.keys():
23 | history[f"test/{k}"] = 0.0
24 | history[f"test/{k}"] += logs[k]
25 |
26 | for k in history.keys():
27 | history[k] /= len(loader.dataset)
28 |
29 | wandb.log(history)
30 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Generativ/e AI group at the TU/e
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/src/model/classifier.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class ClassifierWrapper(nn.Module):
6 | def __init__(self, backbone, loss_fn=nn.CrossEntropyLoss(), **kwargs):
7 | super().__init__()
8 | self.backbone = backbone
9 | self.loss_fn = loss_fn
10 |
11 | def forward(self, batch):
12 | output = self.backbone(pixel_values=batch[0], return_dict=False)
13 | return output[0]
14 |
15 | def train_step(self, batch, scaler=None, device=None):
16 | if scaler is not None:
17 | with torch.autocast(device_type=device, dtype=torch.float16):
18 | logits = self.forward(batch)
19 | loss = self.loss_fn(logits, batch[1])
20 | else:
21 | logits = self.forward(batch)
22 | loss = self.loss_fn(logits, batch[1])
23 |
24 | logs = {
25 | "loss": loss.data,
26 | "accuracy": (logits.argmax(dim=1) == batch[1]).float().mean(),
27 | }
28 | return loss, logs
29 |
30 | def test_step(self, batch):
31 | logits = self.forward(batch)
32 | loss = self.loss_fn(logits, batch[1])
33 |
34 | logs = {
35 | "loss": loss.data * batch[0].shape[0],
36 | "accuracy": (logits.argmax(dim=1) == batch[1]).float().sum(),
37 | }
38 | return logs
39 |
--------------------------------------------------------------------------------
/configs/model/resnet.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | _target_: model.classifier.ClassifierWrapper
3 | backbone:
4 | _target_: transformers.ResNetForImageClassification
5 | config:
6 | _target_: transformers.ResNetConfig
7 | num_channels: 3
8 | embedding_size: 64 # Dimensionality (hidden size) for the embedding layer.
9 | hidden_sizes: # (List[int], optional, defaults to [256, 512, 1024, 2048]) — Dimensionality (hidden size) at each stage.
10 | - 64
11 | - 128
12 | - 256
13 | - 512
14 | depths: # (List[int], optional, defaults to [3, 4, 6, 3]) — Depth (number of layers) for each stage.
15 | - 2
16 | - 2
17 | - 2
18 | - 2
19 | layer_type: "basic" # (str, optional, defaults to "bottleneck") — The layer to use, it can be either "basic" (used for smaller models, like resnet-18 or resnet-34) or "bottleneck" (used for larger models like resnet-50 and above).
20 | hidden_act: "relu" # (str, optional, defaults to "relu") — The non-linear activation function in each block. If string, "gelu", "relu", "selu" and "gelu_new" are supported.
21 | downsample_in_first_stage: False # (bool, optional, defaults to False) — If True, the first stage will downsample the inputs using a stride of 2.
22 | downsample_in_bottleneck: False # (bool, optional, defaults to False) — If True, the first conv 1x1 in ResNetBottleNeckLayer will downsample the inputs using a stride of 2.
23 | num_labels: ${dataset.num_classes}
24 |
25 | name: resnet
26 | # see https://huggingface.co/docs/transformers/main/en/model_doc/resnet#transformers.ResNetConfig for more details
--------------------------------------------------------------------------------
/src/model/convmixer.py:
--------------------------------------------------------------------------------
1 | """VGG for CIFAR10. FC layers are removed.
2 | (c) YANG, Wei
3 | """
4 |
5 | import torch.nn as nn
6 |
7 | from model.classifier import ClassifierWrapper
8 |
9 | # adapted from https://github.com/locuslab/convmixer-cifar10/blob/main/train.py
10 |
11 |
12 | class Residual(nn.Module):
13 | def __init__(self, fn):
14 | super().__init__()
15 | self.fn = fn
16 |
17 | def forward(self, x):
18 | return self.fn(x) + x
19 |
20 |
21 | class ConvMixer(ClassifierWrapper):
22 | def __init__(
23 | self,
24 | dim,
25 | depth,
26 | kernel_size=5,
27 | patch_size=2,
28 | num_classes=100,
29 | loss_fn=nn.CrossEntropyLoss(),
30 | **kwargs
31 | ):
32 | bb = nn.Sequential(
33 | nn.Conv2d(3, dim, kernel_size=patch_size, stride=patch_size),
34 | nn.GELU(),
35 | nn.BatchNorm2d(dim),
36 | *[
37 | nn.Sequential(
38 | Residual(
39 | nn.Sequential(
40 | nn.Conv2d(
41 | dim, dim, kernel_size, groups=dim, padding="same"
42 | ),
43 | nn.GELU(),
44 | nn.BatchNorm2d(dim),
45 | )
46 | ),
47 | nn.Conv2d(dim, dim, kernel_size=1),
48 | nn.GELU(),
49 | nn.BatchNorm2d(dim),
50 | )
51 | for i in range(depth)
52 | ],
53 | nn.AdaptiveAvgPool2d((1, 1)),
54 | nn.Flatten(),
55 | nn.Linear(dim, num_classes)
56 | )
57 | super().__init__(backbone=bb, loss_fn=loss_fn)
58 |
59 | def forward(self, batch):
60 | x = self.backbone(batch[0])
61 | return x
62 |
--------------------------------------------------------------------------------
/src/dataset/cifar10.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from torch.utils.data import random_split
4 | from torchvision import datasets, transforms
5 |
6 | from dataset.data_module import DataModule, ToTensor
7 |
8 |
9 | class Cifar10(DataModule):
10 | def __init__(
11 | self,
12 | batch_size,
13 | test_batch_size,
14 | root,
15 | use_augmentations,
16 | ):
17 | super(Cifar10, self).__init__(
18 | batch_size=batch_size,
19 | test_batch_size=test_batch_size,
20 | root=root,
21 | )
22 | self.__dict__.update(locals())
23 | if use_augmentations:
24 | self.transforms = transforms.Compose(
25 | [
26 | transforms.RandomCrop(32, padding=4),
27 | transforms.RandomHorizontalFlip(),
28 | transforms.ToTensor(),
29 | transforms.Normalize(
30 | (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
31 | ),
32 | ]
33 | )
34 | self.test_transforms = transforms.Compose(
35 | [
36 | transforms.ToTensor(),
37 | transforms.Normalize(
38 | (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
39 | ),
40 | ]
41 | )
42 |
43 | else:
44 | self.transforms = transforms.Compose(
45 | [
46 | transforms.RandomHorizontalFlip(),
47 | ToTensor(),
48 | ]
49 | )
50 | self.test_transforms = transforms.Compose([ToTensor()])
51 | self.prepare_data()
52 |
53 | def prepare_data(self):
54 | datasets.CIFAR10(self.root, train=True, download=True)
55 | datasets.CIFAR10(self.root, train=False, download=True)
56 |
57 | def setup(self):
58 | cifar_full = datasets.CIFAR10(self.root, train=True, transform=self.transforms)
59 | cifar_full.processed_folder = os.path.join(self.root, cifar_full.base_folder)
60 | N = len(cifar_full)
61 | self.train = cifar_full
62 | self.train, self.val = random_split(cifar_full, [N - 256, 256])
63 | self.test = datasets.CIFAR10(
64 | self.root, train=False, transform=self.test_transforms
65 | )
66 | self.test.processed_folder = os.path.join(self.root, self.test.base_folder)
67 |
--------------------------------------------------------------------------------
/src/dataset/cifar100.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import numpy as np
4 | import torch
5 | from torch.utils.data import random_split
6 | from torchvision import datasets, transforms
7 |
8 | from dataset.data_module import DataModule, ToTensor
9 |
10 |
11 | class Cifar100(DataModule):
12 | def __init__(
13 | self,
14 | batch_size,
15 | test_batch_size,
16 | root,
17 | use_augmentations,
18 | ):
19 | super().__init__(
20 | batch_size=batch_size,
21 | test_batch_size=test_batch_size,
22 | root=root,
23 | )
24 | self.__dict__.update(locals())
25 | if use_augmentations:
26 | self.transforms = transforms.Compose(
27 | [
28 | transforms.RandomCrop(32, padding=4),
29 | transforms.RandomHorizontalFlip(),
30 | transforms.ToTensor(),
31 | transforms.Normalize(
32 | (0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)
33 | ),
34 | ]
35 | )
36 | self.test_transforms = transforms.Compose(
37 | [
38 | transforms.ToTensor(),
39 | transforms.Normalize(
40 | (0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)
41 | ),
42 | ]
43 | )
44 |
45 | else:
46 | self.transforms = transforms.Compose(
47 | [
48 | transforms.RandomHorizontalFlip(),
49 | ToTensor(),
50 | ]
51 | )
52 | self.test_transforms = transforms.Compose([ToTensor()])
53 | self.prepare_data()
54 |
55 | def prepare_data(self):
56 | datasets.CIFAR100(self.root, train=True, download=True)
57 | datasets.CIFAR100(self.root, train=False, download=True)
58 |
59 | def setup(self):
60 | cifar_full = datasets.CIFAR100(self.root, train=True, transform=self.transforms)
61 | cifar_full.processed_folder = os.path.join(self.root, cifar_full.base_folder)
62 | N = len(cifar_full)
63 | # self.train = cifar_full
64 | # self.val = None
65 | self.train, self.val = random_split(cifar_full, [N - 256, 256])
66 | self.test = datasets.CIFAR100(
67 | self.root, train=False, transform=self.test_transforms
68 | )
69 | self.test.processed_folder = os.path.join(self.root, self.test.base_folder)
70 |
--------------------------------------------------------------------------------
/src/dataset/data_module.py:
--------------------------------------------------------------------------------
1 | from itertools import permutations
2 |
3 | import numpy as np
4 | import torch
5 | from PIL import Image
6 | from torch.utils.data import DataLoader
7 | from torchvision import transforms
8 |
9 |
10 | class ToTensor:
11 | def __call__(self, x):
12 | x = torch.FloatTensor(np.asarray(x, dtype=np.float32)).permute(2, 0, 1)
13 | return x
14 |
15 |
16 | class Random90Rotation:
17 | def __call__(self, x):
18 | k = torch.ceil(3.0 * torch.rand(1)).long()
19 | u = torch.rand(1)
20 | if u < 0.5:
21 | x = x.rotate(90 * k)
22 | return x
23 |
24 |
25 | class ChannelSwap:
26 | def __call__(self, x):
27 | permutation = list(permutations(range(3), 3))[np.random.randint(0, 5)]
28 | u = torch.rand(1)
29 | if u < 0.5:
30 | x = np.array(x)[..., permutation]
31 | x = Image.fromarray(x)
32 | return x
33 |
34 |
35 | class DataModule:
36 | def __init__(
37 | self,
38 | batch_size,
39 | test_batch_size,
40 | root="data/",
41 | ):
42 | self.__dict__.update(locals())
43 | self.transforms = transforms.Compose(
44 | [
45 | ToTensor(),
46 | ]
47 | )
48 | self.test_transforms = transforms.Compose(
49 | [
50 | ToTensor(),
51 | ]
52 | )
53 | self.prepare_data()
54 |
55 | def prepare_data(self) -> None:
56 | """
57 | Download the data. Do preprocessing if necessary.
58 | :return:
59 | """
60 | raise NotImplementedError
61 |
62 | def setup(self) -> None:
63 | """
64 | Create self.train and self.val, self.test dataset
65 | :return: None
66 | """
67 | raise NotImplementedError
68 |
69 | def train_dataloader(self):
70 | params = {
71 | "pin_memory": True,
72 | "drop_last": True,
73 | "shuffle": True,
74 | "num_workers": 1,
75 | }
76 | train_loader = DataLoader(self.train, self.batch_size, **params)
77 | while True:
78 | yield from train_loader
79 |
80 | def val_dataloader(self):
81 | params = {
82 | "pin_memory": True,
83 | "drop_last": True,
84 | "shuffle": True,
85 | "num_workers": 1,
86 | }
87 | val_loader = DataLoader(self.val, self.test_batch_size, **params)
88 | while True:
89 | yield from val_loader
90 |
91 | def test_dataloader(self):
92 | test_loader = DataLoader(
93 | self.test,
94 | self.test_batch_size,
95 | num_workers=1,
96 | shuffle=False,
97 | pin_memory=True,
98 | drop_last=False,
99 | )
100 | return test_loader
101 |
--------------------------------------------------------------------------------
/src/model/vgg.py:
--------------------------------------------------------------------------------
1 | """VGG for CIFAR10. FC layers are removed.
2 | (c) YANG, Wei
3 | """
4 | import math
5 |
6 | import torch.nn as nn
7 | import torch.nn.functional as F
8 |
9 | from model.classifier import ClassifierWrapper
10 |
11 | # adapted from https://github.com/alecwangcq/KFAC-Pytorch/blob/master/models/cifar/vgg.py
12 |
13 | __all__ = [
14 | "VGG",
15 | "vgg11",
16 | "vgg11_bn",
17 | "vgg13",
18 | "vgg13_bn",
19 | "vgg16",
20 | "vgg16_bn",
21 | "vgg19_bn",
22 | "vgg19",
23 | ]
24 |
25 |
26 | cfg = {
27 | # vgg11
28 | "A": [64, "M", 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"],
29 | # vgg13:
30 | "B": [64, 64, "M", 128, 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"],
31 | # vgg16:
32 | "D": [
33 | 64,
34 | 64,
35 | "M",
36 | 128,
37 | 128,
38 | "M",
39 | 256,
40 | 256,
41 | 256,
42 | "M",
43 | 512,
44 | 512,
45 | 512,
46 | "M",
47 | 512,
48 | 512,
49 | 512,
50 | "M",
51 | ],
52 | # vgg19:
53 | "E": [
54 | 64,
55 | 64,
56 | "M",
57 | 128,
58 | 128,
59 | "M",
60 | 256,
61 | 256,
62 | 256,
63 | 256,
64 | "M",
65 | 512,
66 | 512,
67 | 512,
68 | 512,
69 | "M",
70 | 512,
71 | 512,
72 | 512,
73 | 512,
74 | "M",
75 | ],
76 | }
77 |
78 |
79 | class VGG(ClassifierWrapper):
80 | def __init__(
81 | self,
82 | cfg_id,
83 | batch_norm=False,
84 | num_classes=1000,
85 | loss_fn=nn.CrossEntropyLoss(),
86 | **kwargs
87 | ):
88 | super().__init__(
89 | backbone=VGG.make_layers(cfg[cfg_id], batch_norm=batch_norm),
90 | loss_fn=loss_fn,
91 | )
92 | self.classifier = nn.Linear(512, num_classes)
93 | self._initialize_weights()
94 |
95 | def forward(self, batch):
96 | x = self.backbone(batch[0])
97 | x = F.avg_pool2d(x, kernel_size=x.shape[-1], stride=1)
98 | x = x.view(x.size(0), -1)
99 | x = self.classifier(x)
100 | return x
101 |
102 | def _initialize_weights(self):
103 | for m in self.modules():
104 | if isinstance(m, nn.Conv2d):
105 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
106 | m.weight.data.normal_(0, math.sqrt(2.0 / n))
107 | if m.bias is not None:
108 | m.bias.data.zero_()
109 | elif isinstance(m, nn.BatchNorm2d):
110 | m.weight.data.fill_(1)
111 | m.bias.data.zero_()
112 | elif isinstance(m, nn.Linear):
113 | n = m.weight.size(1)
114 | m.weight.data.normal_(0, 0.01)
115 | m.bias.data.zero_()
116 |
117 | @staticmethod
118 | def make_layers(cfg, batch_norm=False):
119 | layers = []
120 | in_channels = 3
121 | for v in cfg:
122 | if v == "M":
123 | layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
124 | else:
125 | conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
126 | if batch_norm:
127 | layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
128 | else:
129 | layers += [conv2d, nn.ReLU(inplace=True)]
130 | in_channels = v
131 | return nn.Sequential(*layers)
132 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Variational Stochastic Gradient Descent for Deep Neural Networks
2 |
3 | This repository contains the source code accompanying the paper:
4 |
5 | [Variational Stochastic Gradient Descent for Deep Neural Networks](https://openreview.net/forum?id=xu4ATNjcdy)
6 |
[[Demos]](https://github.com/generativeai-tue/vsgd/blob/main/notebooks)
7 |
8 |
**[Anna Kuzina\*](https://akuzina.github.io/), [Haotian Chen\*](https://www.linkedin.com/in/haotian-chen-359b4520b/), [Babak Esmaeili](https://babak0032.github.io), & [Jakub M. Tomczak](https://jmtomczak.github.io/)**.
9 |
10 |
11 | #### Abstract
12 | *Optimizing deep neural networks is one of the main tasks in successful deep learning. Current state-of-the-art optimizers are adaptive gradient-based optimization methods such as Adam. Recently, there has been an increasing interest in formulating gradient-based optimizers in a probabilistic framework for better estimation of gradients and modeling uncertainties. Here, we propose to combine both approaches, resulting in the Variational Stochastic Gradient Descent (VSGD) optimizer. We model gradient updates as a probabilistic model and utilize stochastic variational inference (SVI) to derive an efficient and effective update rule. Further, we show how our VSGD method relates to other adaptive gradient-based optimizers like Adam.
13 | Lastly,
14 | we carry out experiments on two image classification datasets and four deep neural network architectures, where we show that VSGD outperforms Adam and SGD.*
15 |
16 |
17 |
18 |
19 |
20 | ### Repository structure
21 |
22 | #### Folders
23 |
24 | This repository is organized as follows:
25 |
26 | * `src` contains the main PyTorch library
27 | * `configs` contains the default configuration for `src/run_experiment.py`
28 | * `notebooks` contains a demo of using VSGD optimizer
29 |
30 |
31 | ----
32 | ### Reproduce
33 |
34 | ###### Install conda *(recommended)*
35 |
36 | ```bash
37 | conda env create -f environment.yml
38 | conda activate vsgd
39 | ```
40 |
41 | ###### Login wandb *(recommended)*
42 | ```bash
43 | wandb login
44 | ```
45 |
46 | ###### Download TinyImagenet dataset
47 |
48 | ```bash
49 | cd data/
50 | wget http://cs231n.stanford.edu/tiny-imagenet-200.zip
51 | unzip tiny-imagenet-200.zip
52 | ```
53 |
54 | ###### Starting an experiment
55 | All the experiments are run with `src/run_experiment.py`. Experiment configuration is handled by [Hydra](https://hydra.cc), one can find default configuration in the `configs/` folder.
56 |
57 | `configs/experiment/` contains configs for dataset-architecture pairs. For example, to train VGG model on cifar100 dataset with VSGD optimizer, run:
58 | ```bash
59 | PYTHONPATH=src/ python src/run_experiment.py experiment=cifar100_vgg train/optimizer=vsgd
60 | ```
61 |
62 | One can also change any default hyperparameters using the command line:
63 | ```bash
64 | PYTHONPATH=src/ python src/run_experiment.py experiment=cifar100_vgg train/optimizer=vsgd train.optimizer.weight_decay=0.01
65 | ```
66 |
67 |
68 | ----
69 |
70 | ### Cite
71 | If you found this work useful in your research, please consider citing:
72 |
73 | ```
74 | @article{
75 | chen2024variational,
76 | title={Variational Stochastic Gradient Descent for Deep Neural Networks},
77 | author={Chen, Haotian and Kuzina, Anna and Esmaeili, Babak and Tomczak, Jakub},
78 | year={2024},
79 | }
80 | ```
81 |
82 | ### Acknowledgements
83 | *Anna Kuzina is funded by the Hybrid Intelligence Center, a 10-year programme funded by the Dutch Ministry of Education, Culture and Science through the Netherlands Organisation for Scientific Research, https://hybrid-intelligence-centre.nl.
84 | This work was carried out on the Dutch national e-infrastructure with the support of SURF Cooperative.*
85 |
86 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | data/
2 | scripts/
3 | notebooks/
4 | pics/
5 | # Byte-compiled / optimized / DLL files
6 | __pycache__/
7 | *.py[cod]
8 | *$py.class
9 |
10 | # C extensions
11 | *.so
12 |
13 | # Distribution / packaging
14 | .Python
15 | build/
16 | develop-eggs/
17 | dist/
18 | downloads/
19 | eggs/
20 | .eggs/
21 | lib/
22 | lib64/
23 | parts/
24 | sdist/
25 | var/
26 | wheels/
27 | share/python-wheels/
28 | *.egg-info/
29 | .installed.cfg
30 | *.egg
31 | MANIFEST
32 |
33 | # PyInstaller
34 | # Usually these files are written by a python script from a template
35 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
36 | *.manifest
37 | *.spec
38 |
39 | # Installer logs
40 | pip-log.txt
41 | pip-delete-this-directory.txt
42 |
43 | # Unit test / coverage reports
44 | htmlcov/
45 | .tox/
46 | .nox/
47 | .coverage
48 | .coverage.*
49 | .cache
50 | nosetests.xml
51 | coverage.xml
52 | *.cover
53 | *.py,cover
54 | .hypothesis/
55 | .pytest_cache/
56 | cover/
57 |
58 | # Translations
59 | *.mo
60 | *.pot
61 |
62 | # Django stuff:
63 | *.log
64 | local_settings.py
65 | db.sqlite3
66 | db.sqlite3-journal
67 |
68 | # Flask stuff:
69 | instance/
70 | .webassets-cache
71 |
72 | # Scrapy stuff:
73 | .scrapy
74 |
75 | # Sphinx documentation
76 | docs/_build/
77 |
78 | # PyBuilder
79 | .pybuilder/
80 | target/
81 |
82 | # Jupyter Notebook
83 | .ipynb_checkpoints
84 |
85 | # IPython
86 | profile_default/
87 | ipython_config.py
88 |
89 | # pyenv
90 | # For a library or package, you might want to ignore these files since the code is
91 | # intended to run in multiple environments; otherwise, check them in:
92 | # .python-version
93 |
94 | # pipenv
95 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
96 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
97 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
98 | # install all needed dependencies.
99 | #Pipfile.lock
100 |
101 | # poetry
102 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
103 | # This is especially recommended for binary packages to ensure reproducibility, and is more
104 | # commonly ignored for libraries.
105 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
106 | #poetry.lock
107 |
108 | # pdm
109 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
110 | #pdm.lock
111 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
112 | # in version control.
113 | # https://pdm.fming.dev/#use-with-ide
114 | .pdm.toml
115 |
116 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
117 | __pypackages__/
118 |
119 | # Celery stuff
120 | celerybeat-schedule
121 | celerybeat.pid
122 |
123 | # SageMath parsed files
124 | *.sage.py
125 |
126 | # Environments
127 | .env
128 | .venv
129 | env/
130 | venv/
131 | ENV/
132 | env.bak/
133 | venv.bak/
134 |
135 | # Spyder project settings
136 | .spyderproject
137 | .spyproject
138 |
139 | # Rope project settings
140 | .ropeproject
141 |
142 | # mkdocs documentation
143 | /site
144 |
145 | # mypy
146 | .mypy_cache/
147 | .dmypy.json
148 | dmypy.json
149 |
150 | # Pyre type checker
151 | .pyre/
152 |
153 | # pytype static type analyzer
154 | .pytype/
155 |
156 | # Cython debug symbols
157 | cython_debug/
158 |
159 | # PyCharm
160 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
161 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
162 | # and can be added to the global gitignore or merged into this file. For a more nuclear
163 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
164 | #.idea/
--------------------------------------------------------------------------------
/src/run_experiment.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pprint import pprint
3 |
4 | import hydra.utils
5 | import numpy as np
6 | import omegaconf
7 | import torch
8 | import wandb
9 | from hydra.utils import instantiate
10 |
11 | import utils.tester as tester
12 | import utils.trainer as trainer
13 | from utils.wandb import get_checkpoint
14 |
15 |
16 | def params_to(param, device):
17 | param.data = param.data.to(device)
18 | if param._grad is not None:
19 | param._grad.data = param._grad.data.to(device)
20 |
21 |
22 | def optimizer_to(optim, device):
23 | for param in optim.state.values():
24 | if isinstance(param, torch.Tensor):
25 | params_to(param, device)
26 | elif isinstance(param, dict):
27 | for subparam in param.values():
28 | if isinstance(subparam, torch.Tensor):
29 | params_to(subparam, device)
30 |
31 |
32 | def load_from_checkpoint(args, model, optimizer=None, scheduler=None):
33 | chpt = get_checkpoint(
34 | args.wandb.setup.entity,
35 | args.wandb.setup.project,
36 | args.train.resume_id,
37 | device="cpu",
38 | )
39 | args.train.start_iter = chpt["iteration"]
40 | # Load model and ema model
41 | model.load_state_dict(chpt["model_state_dict"])
42 |
43 | # Load optimizer
44 | if optimizer is not None:
45 | opt_state_dict = chpt["optimizer_state_dict"]
46 | optimizer.load_state_dict(opt_state_dict)
47 | optimizer_to(optimizer, args.train.device)
48 |
49 | # Load scheduler
50 | if scheduler is not None:
51 | scheduler_state_dict = chpt["scheduler_state_dict"]
52 | scheduler.load_state_dict(scheduler_state_dict)
53 |
54 | return args, model, optimizer, scheduler
55 |
56 |
57 | def init_wandb(args):
58 | wandb.require("service")
59 |
60 | tags = [
61 | args.dataset.name,
62 | args.model.name,
63 | args.train.optimizer._target_,
64 | args.train.experiment_name,
65 | ]
66 | if args.train.resume_id is not None:
67 | wandb.init(
68 | **args.wandb.setup,
69 | id=args.train.resume_id,
70 | resume="must",
71 | settings=wandb.Settings(start_method="thread"),
72 | )
73 | else:
74 | wandb_cfg = omegaconf.OmegaConf.to_container(
75 | args, resolve=True, throw_on_missing=True
76 | )
77 | wandb.init(
78 | **args.wandb.setup,
79 | config=wandb_cfg,
80 | group=f"{args.model.name}_{args.dataset.name}"
81 | if args.wandb.group is None
82 | else args.wandb.group,
83 | tags=tags,
84 | dir=hydra.utils.get_original_cwd(),
85 | settings=wandb.Settings(start_method="thread"),
86 | )
87 | pprint(wandb.run.config)
88 | # define our custom x axis metric
89 | wandb.define_metric("iter")
90 | for pref in ["train", "val", "pic"]:
91 | wandb.define_metric(f"{pref}/*", step_metric="iter")
92 | wandb.define_metric("val/loss", summary="min", step_metric="iter")
93 | wandb.define_metric("test/loss", summary="min", step_metric="iter")
94 |
95 |
96 | def compute_params(model, args):
97 | # add network size
98 | num_param = sum(p.numel() for p in model.parameters() if p.requires_grad)
99 | print(num_param)
100 | wandb.run.summary["num_parameters"] = num_param
101 |
102 |
103 | @hydra.main(version_base="1.3", config_path="../configs", config_name="defaults.yaml")
104 | def run(args: omegaconf.DictConfig) -> None:
105 | # set cuda visible devices
106 | if args.train.device[-1] == "0":
107 | os.environ["CUDA_VISIBLE_DEVICES"] = "0"
108 | args.train.device = "cuda"
109 | elif args.train.device[-1] == "1":
110 | os.environ["CUDA_VISIBLE_DEVICES"] = "1"
111 | args.train.device = "cuda"
112 |
113 | # Set the seed
114 | torch.manual_seed(args.train.seed)
115 | torch.cuda.manual_seed(args.train.seed)
116 | np.random.seed(args.train.seed)
117 | torch.backends.cudnn.deterministic = True
118 | torch.backends.cudnn.benchmark = False
119 |
120 | # ------------
121 | # data
122 | # ------------
123 | dset_params = {"root": os.path.join(hydra.utils.get_original_cwd(), "data/")}
124 | data_module = instantiate(args.dataset.data_module, **dset_params)
125 | data_module.setup()
126 | train_loader = data_module.train_dataloader()
127 | val_loader = data_module.val_dataloader()
128 | test_loader = data_module.test_dataloader()
129 |
130 | # ------------
131 | # model & optimizer
132 | # ------------
133 | model = instantiate(args.model)
134 | optimizer = instantiate(args.train.optimizer, params=model.parameters())
135 | scheduler = None
136 | if hasattr(args.train, "scheduler"):
137 | scheduler = instantiate(args.train.scheduler, optimizer=optimizer)
138 |
139 | if args.train.resume_id is not None:
140 | print(f"Resume training {args.train.resume_id}")
141 | args, model, optimizer, scheduler = load_from_checkpoint(
142 | args, model, optimizer, scheduler
143 | )
144 |
145 | model.train()
146 | model.to(args.train.device)
147 |
148 | # ------------
149 | # logging
150 | # ------------
151 | init_wandb(args)
152 | wandb.watch(model, **args.wandb.watch)
153 | compute_params(model, args)
154 |
155 | # ------------
156 | # training
157 | # ------------
158 | if args.train.start_iter < args.train.max_iter:
159 | trainer.train(
160 | args.train,
161 | train_loader,
162 | val_loader,
163 | test_loader,
164 | model,
165 | optimizer,
166 | scheduler,
167 | )
168 |
169 | # ------------
170 | # testing
171 | # ------------
172 | model = instantiate(args.model)
173 | with omegaconf.open_dict(args):
174 | args.train.resume_id = wandb.run.id
175 | _, model, _, _ = load_from_checkpoint(args, model)
176 | model.to(args.train.device)
177 |
178 | tester.test(
179 | args.train,
180 | test_loader,
181 | model,
182 | )
183 | print("Test finished")
184 | wandb.finish()
185 |
186 |
187 | if __name__ == "__main__":
188 | run()
189 |
--------------------------------------------------------------------------------
/src/dataset/tiny_imagenet.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import os
3 |
4 | from PIL import Image
5 | from torch.utils.data import Dataset
6 | from torchvision import transforms
7 |
8 | from dataset.data_module import DataModule, ToTensor
9 |
10 |
11 | class TinyImagenet(DataModule):
12 | def __init__(
13 | self,
14 | batch_size,
15 | test_batch_size,
16 | root,
17 | use_augmentations,
18 | ):
19 | super().__init__(
20 | batch_size=batch_size,
21 | test_batch_size=test_batch_size,
22 | root=root,
23 | )
24 | self.__dict__.update(locals())
25 | if use_augmentations:
26 | self.transforms = transforms.Compose(
27 | [
28 | transforms.RandomCrop(size=64, padding=4),
29 | transforms.RandomHorizontalFlip(),
30 | transforms.RandomAffine(
31 | degrees=45, translate=(0.1, 0.1), scale=(0.9, 1.1)
32 | ),
33 | transforms.ColorJitter(
34 | brightness=0.2, contrast=0.2, saturation=0.2
35 | ),
36 | transforms.ToTensor(),
37 | transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
38 | ]
39 | )
40 | self.test_transforms = transforms.Compose(
41 | [
42 | transforms.ToTensor(),
43 | transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
44 | ]
45 | )
46 |
47 | else:
48 | self.transforms = transforms.Compose(
49 | [
50 | transforms.RandomHorizontalFlip(),
51 | ToTensor(),
52 | ]
53 | )
54 | self.test_transforms = transforms.Compose([ToTensor()])
55 |
56 | def prepare_data(self) -> None:
57 | pass
58 |
59 | def setup(self):
60 | self.train = TinyImageNet(self.root, split="train", transform=self.transforms)
61 | self.val = TinyImageNet(self.root, split="val", transform=self.test_transforms)
62 | self.test = TinyImageNet(self.root, split="val", transform=self.test_transforms)
63 |
64 |
65 | EXTENSION = "JPEG"
66 | NUM_IMAGES_PER_CLASS = 500
67 | CLASS_LIST_FILE = "wnids.txt"
68 | VAL_ANNOTATION_FILE = "val_annotations.txt"
69 |
70 |
71 | class TinyImageNet(Dataset):
72 | """Tiny ImageNet data set available from `http://cs231n.stanford.edu/tiny-imagenet-200.zip`.
73 | Dataset code adapted from https://github.com/leemengtw/tiny-imagenet/blob/master/TinyImageNet.py
74 |
75 | Parameters
76 | ----------
77 | root: string
78 | Root directory including `train`, `test` and `val` subdirectories.
79 | split: string
80 | Indicating which split to return as a data set.
81 | Valid option: [`train`, `test`, `val`]
82 | transform: torchvision.transforms
83 | A (series) of valid transformation(s).
84 | in_memory: bool
85 | Set to True if there is enough memory (about 5G) and want to minimize disk IO overhead.
86 | """
87 |
88 | def __init__(
89 | self,
90 | root,
91 | split="train",
92 | transform=None,
93 | target_transform=None,
94 | in_memory=False,
95 | ):
96 | self.root = os.path.join(os.path.expanduser(root), "tiny-imagenet-200")
97 | self.split = split
98 | self.transform = transform
99 | self.target_transform = target_transform
100 | self.in_memory = in_memory
101 | self.split_dir = os.path.join(self.root, self.split)
102 | self.image_paths = sorted(
103 | glob.iglob(
104 | os.path.join(self.split_dir, "**", "*.%s" % EXTENSION), recursive=True
105 | )
106 | )
107 | self.labels = {} # fname - label number mapping
108 | self.images = [] # used for in-memory processing
109 |
110 | # build class label - number mapping
111 | with open(os.path.join(self.root, CLASS_LIST_FILE), "r") as fp:
112 | self.label_texts = sorted([text.strip() for text in fp.readlines()])
113 | self.label_text_to_number = {text: i for i, text in enumerate(self.label_texts)}
114 |
115 | if self.split == "train":
116 | for label_text, i in self.label_text_to_number.items():
117 | for cnt in range(NUM_IMAGES_PER_CLASS):
118 | self.labels["%s_%d.%s" % (label_text, cnt, EXTENSION)] = i
119 | elif self.split == "val":
120 | with open(os.path.join(self.split_dir, VAL_ANNOTATION_FILE), "r") as fp:
121 | for line in fp.readlines():
122 | terms = line.split("\t")
123 | file_name, label_text = terms[0], terms[1]
124 | self.labels[file_name] = self.label_text_to_number[label_text]
125 |
126 | # read all images into torch tensor in memory to minimize disk IO overhead
127 | if self.in_memory:
128 | self.images = [self.read_image(path) for path in self.image_paths]
129 |
130 | def __len__(self):
131 | return len(self.image_paths)
132 |
133 | def __getitem__(self, index):
134 | file_path = self.image_paths[index]
135 |
136 | if self.in_memory:
137 | img = self.images[index]
138 | else:
139 | img = self.read_image(file_path)
140 | if self.split == "test":
141 | return img
142 | else:
143 | # file_name = file_path.split('/')[-1]
144 | return img, self.labels[os.path.basename(file_path)]
145 |
146 | def __repr__(self):
147 | fmt_str = "Dataset " + self.__class__.__name__ + "\n"
148 | fmt_str += " Number of datapoints: {}\n".format(self.__len__())
149 | tmp = self.split
150 | fmt_str += " Split: {}\n".format(tmp)
151 | fmt_str += " Root Location: {}\n".format(self.root)
152 | tmp = " Transforms (if any): "
153 | fmt_str += "{0}{1}\n".format(
154 | tmp, self.transform.__repr__().replace("\n", "\n" + " " * len(tmp))
155 | )
156 | tmp = " Target Transforms (if any): "
157 | fmt_str += "{0}{1}".format(
158 | tmp, self.target_transform.__repr__().replace("\n", "\n" + " " * len(tmp))
159 | )
160 | return fmt_str
161 |
162 | def read_image(self, path):
163 | # img = imageio.imread(path, pilmode='RGB')
164 | img = Image.open(path)
165 | img = img.convert("RGB")
166 | return self.transform(img) if self.transform else img
167 |
--------------------------------------------------------------------------------
/src/vsgd.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | import torch
4 | from torch import Tensor
5 | from torch.optim.optimizer import Optimizer, required
6 |
7 |
8 | class VSGD(Optimizer):
9 | def __init__(
10 | self,
11 | params: required,
12 | ghattg: float = 30.0,
13 | ps: float = 1e-8,
14 | tau1: float = 0.81,
15 | tau2: float = 0.9,
16 | lr: float = 0.1,
17 | weight_decay: float = 0.0,
18 | eps: float = 1e-8,
19 | ):
20 | """
21 | Args:
22 | ghattg: prior variance ratio between ghat and g,
23 | Var(ghat_t-g_t)/Var(g_t-g_{t-1}).
24 | ps: piror strength.
25 | tau1: remember rate for the gamma parameters of g
26 | tau2: remember rate for the gamma parameter of ghat
27 | lr: learning rate.
28 | weight_decay (float): weight decay coefficient (default: 0.0)
29 | """
30 |
31 | if not 0.0 <= weight_decay:
32 | raise ValueError(f"Invalid weight_decay value: {weight_decay}")
33 | defaults = dict(
34 | ghattg=ghattg,
35 | ps=ps,
36 | tau1=tau1,
37 | tau2=tau2,
38 | lr=lr,
39 | weight_decay=weight_decay,
40 | eps=eps,
41 | )
42 | super().__init__(params, defaults)
43 |
44 | def __setstate__(self, state):
45 | super(VSGD, self).__setstate__(state)
46 |
47 | def step(self, closure=None):
48 | """Performs a single optimization step.
49 |
50 | Args:
51 | closure (Callable, optional): A closure that reevaluates the model
52 | and returns the loss.
53 | """
54 | # self._cuda_graph_capture_health_check()
55 |
56 | loss = None
57 | if closure is not None:
58 | with torch.enable_grad():
59 | loss = closure()
60 |
61 | for group in self.param_groups:
62 | params_with_grad = []
63 | grads = []
64 | mug_list = []
65 | step_list = []
66 | pa2_list = []
67 | pbg2_list = []
68 | pbhg2_list = []
69 | bg_list = []
70 | bhg_list = []
71 |
72 | self._init_group(
73 | group,
74 | params_with_grad,
75 | grads,
76 | mug_list,
77 | step_list,
78 | pa2_list,
79 | pbg2_list,
80 | pbhg2_list,
81 | bg_list,
82 | bhg_list,
83 | group["ghattg"],
84 | group["ps"],
85 | )
86 |
87 | vsgd(
88 | params_with_grad,
89 | grads,
90 | mug_list,
91 | step_list,
92 | pa2_list,
93 | pbg2_list,
94 | pbhg2_list,
95 | bg_list,
96 | bhg_list,
97 | group["tau1"],
98 | group["tau2"],
99 | group["lr"],
100 | group["weight_decay"],
101 | group["eps"],
102 | )
103 |
104 | return loss
105 |
106 | def _init_group(
107 | self,
108 | group,
109 | params_with_grad: List[Tensor],
110 | grads: List[Tensor],
111 | mug_list: List,
112 | step_list: List,
113 | pa2_list: List,
114 | pbg2_list: List,
115 | pbhg2_list: List,
116 | bg_list: List,
117 | bhg_list: List,
118 | ghattg: float,
119 | ps: float,
120 | ):
121 | for p in group["params"]:
122 | if p.grad is None:
123 | continue
124 | params_with_grad.append(p)
125 |
126 | grads.append(p.grad)
127 | state = self.state[p]
128 |
129 | # State initialization
130 | if len(state) == 0:
131 | for k in ["mug", "bg", "bhg"]:
132 | # set a non zero small number to represent prior ignornance
133 | state[k] = torch.zeros_like(p, memory_format=torch.preserve_format)
134 | # initialize 2*a_0 and 2*b_0 as constants
135 | state["pa2"] = torch.tensor(2.0 * ps + 1.0 + 1e-4)
136 | state["pbg2"] = torch.tensor(2.0 * ps)
137 | state["pbhg2"] = torch.tensor(2.0 * ghattg * ps)
138 | state["step"] = torch.tensor(0.0)
139 |
140 | mug_list.append(state["mug"])
141 | bg_list.append(state["bg"])
142 | bhg_list.append(state["bhg"])
143 | step_list.append(state["step"])
144 | pa2_list.append(state["pa2"])
145 | pbg2_list.append(state["pbg2"])
146 | pbhg2_list.append(state["pbhg2"])
147 |
148 | def get_current_beta1_estimate(self) -> Tensor:
149 | betas = []
150 | for group in self.param_groups:
151 | for p in group["params"]:
152 | state = self.state[p]
153 | bg = state["bg"]
154 | bhg = state["bhg"]
155 | betas.append((bhg / (bg + bhg)).data)
156 | return betas
157 |
158 |
159 | def vsgd(
160 | params_with_grad: List[Tensor],
161 | grads: List[Tensor],
162 | mug_list: List[Tensor],
163 | step_list: List[Tensor],
164 | pa2_list: List[Tensor],
165 | pbg2_list: List[Tensor],
166 | pbhg2_list: List[Tensor],
167 | bg_list: List[Tensor],
168 | bhg_list: List[Tensor],
169 | tau1: float,
170 | tau2: float,
171 | lr: float,
172 | weight_decay: float,
173 | eps: float,
174 | ):
175 | for i, param in enumerate(params_with_grad):
176 | ghat = grads[i]
177 | mug = mug_list[i]
178 | mug1 = torch.clone(mug)
179 | step = step_list[i]
180 | step += 1
181 | pa2 = pa2_list[i]
182 | pbg2 = pbg2_list[i]
183 | pbhg2 = pbhg2_list[i]
184 | bg = bg_list[i]
185 | bhg = bhg_list[i]
186 | # weight decay following AdamW
187 | param.data.mul_(1 - lr * weight_decay)
188 |
189 | # variances of g and ghat
190 | if step == 1.0:
191 | sg = pbg2 / (pa2 - 1.0)
192 | shg = pbhg2 / (pa2 - 1.0)
193 | else:
194 | sg = bg / pa2
195 | shg = bhg / pa2
196 | # update muh, mug, Sigg and Sigh
197 | mug.copy_((ghat * sg + mug1 * shg) / (sg + shg))
198 | sigg = sg * shg / (sg + shg)
199 |
200 | # update 2*b
201 | mug_sq = sigg + mug**2
202 | bg2 = pbg2 + mug_sq - 2.0 * mug * mug1 + mug1**2
203 | bhg2 = pbhg2 + mug_sq - 2.0 * ghat * mug + ghat**2
204 |
205 | rho1 = step ** (-tau1)
206 | rho2 = step ** (-tau2)
207 | bg.mul_(1.0 - rho1).add_(bg2, alpha=rho1)
208 | bhg.mul_(1.0 - rho2).add_(bhg2, alpha=rho2)
209 |
210 | # update param
211 | param.data.add_(lr / (torch.sqrt(mug_sq) + eps) * mug, alpha=-1.0)
212 |
--------------------------------------------------------------------------------
/src/model/resnext.py:
--------------------------------------------------------------------------------
1 | from __future__ import division
2 |
3 | """
4 | Creates a ResNeXt Model as defined in:
5 | Xie, S., Girshick, R., Dollar, P., Tu, Z., & He, K. (2016).
6 | Aggregated residual transformations for deep neural networks.
7 | arXiv preprint arXiv:1611.05431.
8 | import from https://github.com/prlz77/ResNeXt.pytorch/blob/master/models/model.py
9 | """
10 | import torch.nn as nn
11 | import torch.nn.functional as F
12 | from torch.nn import init
13 |
14 | from model.classifier import ClassifierWrapper
15 |
16 | __all__ = ["resnext"]
17 |
18 |
19 | class ResNeXtBottleneck(nn.Module):
20 | """
21 | RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
22 | """
23 |
24 | def __init__(self, in_channels, out_channels, stride, cardinality, widen_factor):
25 | """Constructor
26 | Args:
27 | in_channels: input channel dimensionality
28 | out_channels: output channel dimensionality
29 | stride: conv stride. Replaces pooling layer.
30 | cardinality: num of convolution groups.
31 | widen_factor: factor to reduce the input dimensionality before convolution.
32 | """
33 | super(ResNeXtBottleneck, self).__init__()
34 | D = cardinality * out_channels // widen_factor
35 | self.conv_reduce = nn.Conv2d(
36 | in_channels, D, kernel_size=1, stride=1, padding=0, bias=False
37 | )
38 | self.bn_reduce = nn.BatchNorm2d(D)
39 | self.conv_conv = nn.Conv2d(
40 | D,
41 | D,
42 | kernel_size=3,
43 | stride=stride,
44 | padding=1,
45 | groups=cardinality,
46 | bias=False,
47 | )
48 | self.bn = nn.BatchNorm2d(D)
49 | self.conv_expand = nn.Conv2d(
50 | D, out_channels, kernel_size=1, stride=1, padding=0, bias=False
51 | )
52 | self.bn_expand = nn.BatchNorm2d(out_channels)
53 |
54 | self.shortcut = nn.Sequential()
55 | if in_channels != out_channels:
56 | self.shortcut.add_module(
57 | "shortcut_conv",
58 | nn.Conv2d(
59 | in_channels,
60 | out_channels,
61 | kernel_size=1,
62 | stride=stride,
63 | padding=0,
64 | bias=False,
65 | ),
66 | )
67 | self.shortcut.add_module("shortcut_bn", nn.BatchNorm2d(out_channels))
68 |
69 | def forward(self, x):
70 | bottleneck = self.conv_reduce.forward(x)
71 | bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)
72 | bottleneck = self.conv_conv.forward(bottleneck)
73 | bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)
74 | bottleneck = self.conv_expand.forward(bottleneck)
75 | bottleneck = self.bn_expand.forward(bottleneck)
76 | residual = self.shortcut.forward(x)
77 | return F.relu(residual + bottleneck, inplace=True)
78 |
79 |
80 | class ResNeXt(ClassifierWrapper):
81 | """
82 | ResNext optimized for the Cifar dataset, as specified in
83 | https://arxiv.org/pdf/1611.05431.pdf
84 | """
85 |
86 | def __init__(
87 | self,
88 | cardinality,
89 | depth,
90 | num_classes,
91 | widen_factor=4,
92 | dropRate=0,
93 | loss_fn=nn.CrossEntropyLoss(),
94 | **kwargs
95 | ):
96 | """Constructor
97 | Args:
98 | cardinality: number of convolution groups.
99 | depth: number of layers.
100 | num_classes: number of classes
101 | widen_factor: factor to adjust the channel dimensionality
102 | """
103 | super().__init__(backbone=nn.Identity(), loss_fn=loss_fn)
104 | self.cardinality = cardinality
105 | self.depth = depth
106 | self.block_depth = (self.depth - 2) // 9
107 | self.widen_factor = widen_factor
108 | self.num_classes = num_classes
109 | self.output_size = 64
110 | self.stages = [
111 | 64,
112 | 64 * self.widen_factor,
113 | 128 * self.widen_factor,
114 | 256 * self.widen_factor,
115 | ]
116 |
117 | self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
118 | self.bn_1 = nn.BatchNorm2d(64)
119 | self.stage_1 = self.block("stage_1", self.stages[0], self.stages[1], 1)
120 | self.stage_2 = self.block("stage_2", self.stages[1], self.stages[2], 2)
121 | self.stage_3 = self.block("stage_3", self.stages[2], self.stages[3], 2)
122 | self.classifier = nn.Linear(1024, num_classes)
123 | init.kaiming_normal(self.classifier.weight)
124 |
125 | for key in self.state_dict():
126 | if key.split(".")[-1] == "weight":
127 | if "conv" in key:
128 | init.kaiming_normal(self.state_dict()[key], mode="fan_out")
129 | if "bn" in key:
130 | self.state_dict()[key][...] = 1
131 | elif key.split(".")[-1] == "bias":
132 | self.state_dict()[key][...] = 0
133 |
134 | def block(self, name, in_channels, out_channels, pool_stride=2):
135 | """Stack n bottleneck modules where n is inferred from the depth of the network.
136 | Args:
137 | name: string name of the current block.
138 | in_channels: number of input channels
139 | out_channels: number of output channels
140 | pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
141 | Returns: a Module consisting of n sequential bottlenecks.
142 | """
143 | block = nn.Sequential()
144 | for bottleneck in range(self.block_depth):
145 | name_ = "%s_bottleneck_%d" % (name, bottleneck)
146 | if bottleneck == 0:
147 | block.add_module(
148 | name_,
149 | ResNeXtBottleneck(
150 | in_channels,
151 | out_channels,
152 | pool_stride,
153 | self.cardinality,
154 | self.widen_factor,
155 | ),
156 | )
157 | else:
158 | block.add_module(
159 | name_,
160 | ResNeXtBottleneck(
161 | out_channels,
162 | out_channels,
163 | 1,
164 | self.cardinality,
165 | self.widen_factor,
166 | ),
167 | )
168 | return block
169 |
170 | def forward(self, batch):
171 | x = self.conv_1_3x3.forward(batch[0])
172 | x = F.relu(self.bn_1.forward(x), inplace=True)
173 | x = self.stage_1.forward(x)
174 | x = self.stage_2.forward(x)
175 | x = self.stage_3.forward(x)
176 | x = F.avg_pool2d(x, kernel_size=x.shape[-1], stride=1)
177 | x = x.view(-1, 1024)
178 | return self.classifier(x)
179 |
--------------------------------------------------------------------------------
/src/utils/trainer.py:
--------------------------------------------------------------------------------
1 | import math
2 | import os
3 | import time
4 |
5 | import torch
6 | import wandb
7 |
8 | from utils.tester import test
9 |
10 |
11 | def save_chpt(args, iteration, model, optimizer, scheduler, loss, name="last_chpt"):
12 | chpt = {
13 | "iteration": iteration,
14 | "model_state_dict": model.state_dict(),
15 | "optimizer_state_dict": optimizer.state_dict(),
16 | "scheduler_state_dict": None if scheduler is None else scheduler.state_dict(),
17 | "loss": loss,
18 | }
19 | torch.save(chpt, os.path.join(wandb.run.dir, f"{name}.pth"))
20 | wandb.save(os.path.join(wandb.run.dir, f"{name}.pth"), base_path=wandb.run.dir)
21 | print("->model saved<-\n")
22 |
23 |
24 | def train(
25 | args,
26 | train_loader,
27 | val_loader,
28 | test_loader,
29 | model,
30 | optimizer,
31 | scheduler,
32 | ):
33 | with torch.no_grad():
34 | if val_loader is not None:
35 | # compute metrics on initialization
36 | batch = next(val_loader)
37 | history_val = run_iter(
38 | args=args,
39 | iteration=args.start_iter,
40 | batch=batch,
41 | model=model,
42 | optimizer=None,
43 | mode="val",
44 | )
45 | wandb.log({**history_val, "iter": args.start_iter})
46 |
47 | for iteration in range(args.start_iter, args.max_iter):
48 | batch = next(train_loader)
49 |
50 | time_start = time.time()
51 | history_train = run_iter(
52 | args,
53 | iteration=iteration,
54 | batch=batch,
55 | model=model,
56 | optimizer=optimizer,
57 | mode="train",
58 | )
59 |
60 | train_elapsed = time.time() - time_start
61 | time_start = time.time()
62 | history_val = {}
63 |
64 | if val_loader is not None:
65 | batch = next(val_loader)
66 | with torch.no_grad():
67 | history_val = run_iter(
68 | args,
69 | iteration=iteration + 1,
70 | batch=batch,
71 | model=model,
72 | optimizer=None,
73 | mode="val",
74 | )
75 |
76 | if scheduler is not None:
77 | if scheduler.__class__.__name__ == "ReduceLROnPlateau":
78 | scheduler.step(history_val["val/loss"])
79 | else:
80 | scheduler.step()
81 |
82 | val_elapsed = time.time() - time_start
83 | hist = {
84 | **history_train,
85 | **history_val,
86 | "train_time": train_elapsed,
87 | "val_time": val_elapsed,
88 | }
89 |
90 | # save metrics to wandb
91 | wandb.log(hist)
92 | # save checkpoint
93 | if iteration % args.save_freq == 0 or iteration == args.max_iter:
94 | loss = hist["train/loss"]
95 | if "val/loss" in hist.keys():
96 | loss = hist["val/loss"]
97 | save_chpt(
98 | args,
99 | iteration,
100 | model,
101 | optimizer,
102 | scheduler,
103 | loss,
104 | )
105 |
106 | if iteration % 100 == 0:
107 | print(
108 | "Iteration: {}/{}, Time elapsed: {:.2f}s\n"
109 | "* Train loss: {:.2f} \n".format(
110 | iteration + 1,
111 | args.max_iter,
112 | val_elapsed + train_elapsed,
113 | hist["train/loss"],
114 | )
115 | )
116 | if "val/loss" in hist.keys():
117 | if math.isnan(hist["val/loss"]):
118 | print("Nan loss, stopping training")
119 | break
120 |
121 | # run test eval to track the performance
122 | if (iteration + 1) % args.eval_test_freq == 0 and (
123 | iteration + 1
124 | ) < args.max_iter:
125 | print("Run test evaluation...")
126 | with torch.no_grad():
127 | test(
128 | args=args,
129 | loader=test_loader,
130 | model=model,
131 | )
132 |
133 | print("Save last checkpoint")
134 | loss = hist["train/loss"]
135 | if "val/loss" in hist.keys():
136 | loss = hist["val/loss"]
137 | save_chpt(args, args.max_iter, model, optimizer, scheduler, loss)
138 |
139 |
140 | def run_iter(args, iteration, batch, model, optimizer, mode="train"):
141 | if mode == "train":
142 | model.train()
143 | try:
144 | lr = optimizer.param_groups[0]["lr"]
145 | except:
146 | lr = 0.0
147 | history = {"lr": lr, "iter": iteration + 1}
148 | if iteration > 0:
149 | if args.optimizer_log_freq > 0 and iteration % args.optimizer_log_freq == 0:
150 | if hasattr(optimizer, "get_current_beta1_estimate"):
151 | vals = optimizer.get_current_beta1_estimate()
152 | vals = torch.cat([x.reshape(-1) for x in vals]).reshape(1, -1).cpu()
153 | history["beta1"] = wandb.Histogram(vals)
154 | history["beta1_median"] = vals.median()
155 | history["beta1_mean"] = vals.mean()
156 | elif "betas" in optimizer.param_groups[0]:
157 | beta1 = optimizer.param_groups[0]["betas"][0]
158 | bias_correction = 1 - beta1**iteration
159 | history["beta1_median"] = beta1 / bias_correction
160 | history["beta1_mean"] = beta1 / bias_correction
161 | elif "momentum" in optimizer.param_groups[0]:
162 | beta1 = optimizer.param_groups[0]["momentum"]
163 | history["beta1_median"] = beta1
164 | history["beta1_mean"] = beta1
165 |
166 | elif mode == "val":
167 | model.eval()
168 | history = {}
169 |
170 | if "cuda" in args.device:
171 | for i in range(len(batch)):
172 | batch[i] = batch[i].cuda(non_blocking=True)
173 | # Loss
174 | logs = {}
175 | if mode == "train":
176 | loss, logs = model.train_step(batch, device=args.device)
177 | elif mode == "val":
178 | with torch.no_grad():
179 | loss, logs = model.train_step(batch, device=args.device)
180 |
181 | if mode == "train":
182 | optimize(args, loss, model, optimizer)
183 |
184 | # Get the history
185 | for k in logs.keys():
186 | h_key = k
187 | if "/" not in k:
188 | h_key = f"{mode}/{k}"
189 | if "hist" in k:
190 | history[h_key] = wandb.Histogram(logs[k])
191 | else:
192 | history[h_key] = logs[k]
193 |
194 | return history
195 |
196 |
197 | def optim_step(params, optimizer, grad_clip_val, grad_skip_val):
198 | # clip gradient
199 | grad_norm = torch.nn.utils.clip_grad_norm_(params, grad_clip_val).item()
200 |
201 | if grad_skip_val == 0 or grad_norm < grad_skip_val:
202 | optimizer.step()
203 | return grad_norm
204 |
205 |
206 | def optimize(args, loss, model, optimizer):
207 | if args.grad_clip > 0:
208 | clip_to = args.grad_clip
209 | else:
210 | clip_to = 1e6
211 |
212 | logs = {"skipped_steps": 1}
213 | nans = torch.isnan(loss).sum().item()
214 | if nans == 0:
215 | logs["skipped_steps"] = 0
216 | # backprop through the main loss
217 | optimizer.zero_grad()
218 | loss.backward()
219 | params = [p for n, p in model.named_parameters() if p.requires_grad]
220 |
221 | grad_norm = optim_step(
222 | params=params,
223 | optimizer=optimizer,
224 | grad_clip_val=clip_to,
225 | grad_skip_val=args.grad_skip_thr,
226 | )
227 | logs["grad_norm"] = grad_norm
228 |
229 | wandb.log(logs)
230 |
--------------------------------------------------------------------------------
/notebooks/vsgd_example.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import matplotlib as mpl\n",
10 | "import matplotlib\n",
11 | "import matplotlib.pyplot as plt\n",
12 | "\n",
13 | "import numpy as np\n",
14 | "import os\n",
15 | "import torch\n",
16 | "\n",
17 | "mpl.rcParams['text.usetex'] = True\n",
18 | "mpl.rcParams['text.latex.preamble'] = r'\\usepackage{amsmath}'\n",
19 | "plt.rcParams['figure.figsize'] = [9, 7]\n",
20 | "\n",
21 | "\n",
22 | "# Append ../src to path\n",
23 | "import sys\n",
24 | "source_path = os.path.join(os.getcwd(), '../src')\n",
25 | "if source_path not in sys.path:\n",
26 | " sys.path.append(source_path)\n",
27 | " "
28 | ]
29 | },
30 | {
31 | "cell_type": "markdown",
32 | "metadata": {},
33 | "source": [
34 | "## 1. Get the datasets"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": 2,
40 | "metadata": {},
41 | "outputs": [],
42 | "source": [
43 | "from torchvision.datasets import MNIST\n",
44 | "from torch.utils.data import DataLoader\n",
45 | "from torchvision import transforms\n",
46 | "from torchvision.transforms import ToTensor\n",
47 | "\n",
48 | "train_dataset = MNIST(root='../data/', download=True, train=True, transform=ToTensor())\n",
49 | "test_dataset = MNIST(root='../data/', download=True, train=False, transform=ToTensor())\n"
50 | ]
51 | },
52 | {
53 | "cell_type": "markdown",
54 | "metadata": {},
55 | "source": [
56 | "## 2. Define NN\n",
57 | "\n",
58 | "In this example, we use the convmixer architecture\n",
59 | "\n",
60 | "\n",
61 | "code source: https://github.com/locuslab/convmixer-cifar10/blob/main/train.py"
62 | ]
63 | },
64 | {
65 | "cell_type": "code",
66 | "execution_count": 3,
67 | "metadata": {},
68 | "outputs": [],
69 | "source": [
70 | "import torch.nn as nn \n",
71 | "\n",
72 | "class Residual(nn.Module):\n",
73 | " def __init__(self, fn):\n",
74 | " super().__init__()\n",
75 | " self.fn = fn\n",
76 | "\n",
77 | " def forward(self, x):\n",
78 | " return self.fn(x) + x\n",
79 | " \n",
80 | "def build_convmixer(dim, patch_size, kernel_size, depth, num_classes):\n",
81 | " return nn.Sequential(\n",
82 | " nn.Conv2d(1, dim, kernel_size=patch_size, stride=patch_size),\n",
83 | " nn.GELU(),\n",
84 | " nn.BatchNorm2d(dim),\n",
85 | " *[\n",
86 | " nn.Sequential(\n",
87 | " Residual(\n",
88 | " nn.Sequential(\n",
89 | " nn.Conv2d(\n",
90 | " dim, dim, kernel_size, groups=dim, padding=\"same\"\n",
91 | " ),\n",
92 | " nn.GELU(),\n",
93 | " nn.BatchNorm2d(dim),\n",
94 | " )\n",
95 | " ),\n",
96 | " nn.Conv2d(dim, dim, kernel_size=1),\n",
97 | " nn.GELU(),\n",
98 | " nn.BatchNorm2d(dim),\n",
99 | " )\n",
100 | " for _ in range(depth)\n",
101 | " ],\n",
102 | " nn.AdaptiveAvgPool2d((1, 1)),\n",
103 | " nn.Flatten(),\n",
104 | " nn.Linear(dim, num_classes)\n",
105 | " )"
106 | ]
107 | },
108 | {
109 | "cell_type": "markdown",
110 | "metadata": {},
111 | "source": [
112 | "## 3. Train and test functions"
113 | ]
114 | },
115 | {
116 | "cell_type": "code",
117 | "execution_count": 4,
118 | "metadata": {},
119 | "outputs": [],
120 | "source": [
121 | "\n",
122 | "def train(net, train_dataset, optimizer, max_epochs, batch_size):\n",
123 | " device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
124 | " net.to(device)\n",
125 | "\n",
126 | " criterion = nn.CrossEntropyLoss()\n",
127 | " trainloader = DataLoader(train_dataset, \n",
128 | " batch_size=batch_size, \n",
129 | " shuffle=True, \n",
130 | " num_workers=2)\n",
131 | "\n",
132 | " logs = {'train_loss': []}\n",
133 | " for epoch in range(max_epochs): # loop over the dataset multiple times\n",
134 | "\n",
135 | " running_loss = 0.0\n",
136 | " for i, data in enumerate(trainloader, 0):\n",
137 | " # get the inputs; data is a list of [inputs, labels]\n",
138 | " inputs, labels = data\n",
139 | " inputs = inputs.to(device)\n",
140 | " labels = labels.to(device)\n",
141 | " \n",
142 | "\n",
143 | " # zero the parameter gradients\n",
144 | " optimizer.zero_grad()\n",
145 | "\n",
146 | " # forward + backward + optimize\n",
147 | " outputs = net(inputs)\n",
148 | " loss = criterion(outputs, labels)\n",
149 | " loss.backward()\n",
150 | " optimizer.step()\n",
151 | "\n",
152 | " running_loss += loss.item()\n",
153 | " logs['train_loss'].append(loss.item())\n",
154 | " \n",
155 | " print(f\"Epoch {epoch+1}, loss: {running_loss / len(trainloader): .2f}\")\n",
156 | " print('Finished Training')\n",
157 | " return net, logs\n",
158 | "\n",
159 | "def test(test_dataset, net, batch_size):\n",
160 | " \n",
161 | " device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
162 | " net.to(device)\n",
163 | "\n",
164 | " criterion = nn.CrossEntropyLoss()\n",
165 | " testloader = DataLoader(test_dataset, \n",
166 | " batch_size=batch_size, \n",
167 | " shuffle=False, \n",
168 | " num_workers=2,\n",
169 | " drop_last=False,\n",
170 | " )\n",
171 | "\n",
172 | " # logs = {'test_accuracy': []}\n",
173 | " correct_clfs = 0.\n",
174 | " running_loss = 0.\n",
175 | " for i, data in enumerate(testloader, 0):\n",
176 | " # get the inputs; data is a list of [inputs, labels]\n",
177 | " inputs, labels = data\n",
178 | " inputs = inputs.to(device)\n",
179 | " labels = labels.to(device)\n",
180 | " \n",
181 | " # forward \n",
182 | " logits = net(inputs)\n",
183 | " loss = criterion(logits, labels)\n",
184 | " \n",
185 | " running_loss += loss.item()\n",
186 | "\n",
187 | " correct_clfs += (logits.argmax(dim=1) == labels).float().sum().item()\n",
188 | " N_points = len(test_dataset)\n",
189 | " return running_loss / N_points, correct_clfs / N_points\n"
190 | ]
191 | },
192 | {
193 | "cell_type": "markdown",
194 | "metadata": {},
195 | "source": [
196 | "## 4. Compare to Adam and SGD"
197 | ]
198 | },
199 | {
200 | "cell_type": "code",
201 | "execution_count": 14,
202 | "metadata": {},
203 | "outputs": [
204 | {
205 | "name": "stdout",
206 | "output_type": "stream",
207 | "text": [
208 | "Epoch 1, loss: 0.27\n",
209 | "Epoch 2, loss: 0.10\n",
210 | "Epoch 3, loss: 0.09\n",
211 | "Finished Training\n",
212 | "Test accuracy: 98.08%\n"
213 | ]
214 | }
215 | ],
216 | "source": [
217 | "from vsgd import VSGD\n",
218 | "\n",
219 | "net = build_convmixer(dim=16, patch_size=2, kernel_size=3, depth=4, num_classes=10)\n",
220 | "vsgd = VSGD(net.parameters(), lr=0.01, ps=1e-7)\n",
221 | "net, logs_vsgd = train(net, train_dataset, vsgd, max_epochs=3, batch_size=32)\n",
222 | "logs_vsgd['test_loss'], logs_vsgd['test_acc'] = test(test_dataset, net, 32)\n",
223 | "\n",
224 | "acc = logs_vsgd['test_acc']*100\n",
225 | "print(f'Test accuracy: {acc:.2f}%')"
226 | ]
227 | },
228 | {
229 | "cell_type": "code",
230 | "execution_count": 17,
231 | "metadata": {},
232 | "outputs": [
233 | {
234 | "name": "stdout",
235 | "output_type": "stream",
236 | "text": [
237 | "Epoch 1, loss: 0.57\n",
238 | "Epoch 2, loss: 0.13\n",
239 | "Epoch 3, loss: 0.10\n",
240 | "Finished Training\n",
241 | "Test accuracy: 97.81%\n"
242 | ]
243 | }
244 | ],
245 | "source": [
246 | "from torch.optim import SGD\n",
247 | "\n",
248 | "sgd_net = build_convmixer(dim=16, patch_size=2, kernel_size=3, depth=4, num_classes=10)\n",
249 | "sgd = SGD(sgd_net.parameters(), lr=0.01, momentum=0.9)\n",
250 | "sgd_net, logs_sgd = train(sgd_net, train_dataset, sgd, max_epochs=3, batch_size=32)\n",
251 | "logs_sgd['test_loss'], logs_sgd['test_acc'] = test(test_dataset, sgd_net, 32)\n",
252 | "\n",
253 | "acc = logs_sgd['test_acc']*100\n",
254 | "print(f'Test accuracy: {acc:.2f}%')"
255 | ]
256 | },
257 | {
258 | "cell_type": "code",
259 | "execution_count": 15,
260 | "metadata": {},
261 | "outputs": [
262 | {
263 | "name": "stdout",
264 | "output_type": "stream",
265 | "text": [
266 | "Epoch 1, loss: 0.24\n",
267 | "Epoch 2, loss: 0.10\n",
268 | "Epoch 3, loss: 0.09\n",
269 | "Finished Training\n",
270 | "Test accuracy: 97.68%\n"
271 | ]
272 | }
273 | ],
274 | "source": [
275 | "from torch.optim import Adam\n",
276 | "\n",
277 | "adam_net = build_convmixer(dim=16, patch_size=2, kernel_size=3, depth=4, num_classes=10)\n",
278 | "adam = Adam(adam_net.parameters(), lr=0.01)\n",
279 | "adam_net, logs_adam = train(adam_net, train_dataset, adam, max_epochs=3, batch_size=32)\n",
280 | "logs_adam['test_loss'], logs_adam['test_acc'] = test(test_dataset, adam_net, batch_size=32)\n",
281 | "\n",
282 | "acc = logs_adam['test_acc']*100\n",
283 | "print(f'Test accuracy: {acc:.2f}%')"
284 | ]
285 | },
286 | {
287 | "cell_type": "code",
288 | "execution_count": 22,
289 | "metadata": {},
290 | "outputs": [
291 | {
292 | "data": {
293 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAr8AAAFECAYAAAAupFapAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAsTAAALEwEAmpwYAABl2UlEQVR4nO3deXxb13kn/N8BQJCUSAkktVmyFRlylqZxk5BS0qaThompt51mOk2mpJxuk7zThHw7bdN00iGb6ZJp0olKZdImTdqUdFY7SS2SibM4Th1SMb3EsS2RlmzZshZCuyguAEECXLDd8/5x7wUvgIuNBAlc4vf9fPgRce/BvecCFPDg4DnPEVJKEBERERGVA1uxO0BEREREtF4Y/BIRERFR2WDwS0RERERlg8EvEREREZUNBr9EREREVDYcxe4ArS0hhBvAyAru2iWl7C10fzYCIUQj1Mf0qJSyq9j9ISJr4uszUXEw+C0PfSbb3ABaAHgADJns9xTq5EKIVgD9AAaklG3FOgYRUQkq6uszUTli8LvBSSk9ADqSt2vBZAuAUSllyn4iIlpbfH0mKg4Gv7TmpJQDAESxj0FERKmEEIMAWqSUfI2lssAJb0RERERUNhj8EhEREVHZYPBLpoQQjUKIMSFEu3a7WwgxI4RoMbRpFUIMatul9ntjmmNJIUS32fGFEC1CiBGtzZgQonMtjmG4X4/W5xnt93at72Pa13+redz0Y0mtP90mbVyGPkjt3/6kxzZrGyIqb9rr8oj2+jCY/PqQ7XVE2yeh5hdDa6P/uPLoR07vBRnaj+jvNfm0M7wv9JjcVwohRpK2Fex9LVv/tGPLNNflMusfrR/m/FImbgD7tf+gjVBnGPuAeLmvfgCjAHoBeAF8DMCIEKJJSjma4/E7tH/7AJwE0A6gWwjh0fJ8C3oM7VrcAI5om/TgtAuAP4fzpSWE6AfQCnV29lHtPJ3aC+s9Ukr9+MehPp5HoT5uB5E6szuXNkRUpgyvywNQXxNaAQwKIQ5JKXN9HemBWmqtC8uvpQAAw+tVtn7k9V6gBartWh/0ShctAHqEEH36eXNttwIFeV/L1j+oj20n1Mc0uSzdYe3fYyu8BlotKSV/yvAH6gulBNCfZn+jtl8CGAPQmLTfZbLNrbUfTHOs7jTHb8zWrwIdQ9/eatjWnrwth8fOrC/6cTqT2rZo23uSHqNuk+O6cm3DH/7wZ+P+5PD63Jnm9WYMwIj2e86vI1ADYLnCvubzXpD2uqBOuMu3nf5a3GPSTuqPhUn7Vb2v5dG/Qa2d2+zx5ut58X6Y9kC5aJNJn96llH6TbR6oo6cH8jj2aNJx9BEL1xoc41DSfuPvB/M4n5luAB4p5VHjRqmOwAwAaBdqQXud8Xe9rT9pUy5tiKj8mL7eaNsbk1IW1vR1JM/3Av2btg+ZHGdoBe1WYzXva7n2T0/HiI+oa89NI9T3Lf8K+06rxOCXskkOLNPS/lP7kF/getJ4w/BiUL/Ox1gxLah1IX06gp5H3Ki9kI4CaNVyz3qEWtMzLpc2RFSeDB+i3Uk5uhLLwdaBYr6OZHgvcAMYyiHoy7XdSq32fS2n/kk17c4P9ZtBnZ7ykJKnTOuHwS9lczLdDiGEW0vqH9NeeGdgMspQQvQg1DgpRP/9xCqOq0+GGEuzX1+NSX9s7oEaKLuhvij2a4+hy3CfXNoQUfnRX0cGALSl+dFft9fldSSX9wJD0J5xdbpc263Sit/XVtC/PgAuw6S6NsN2KhIGv5SN32yjNoIwBjX3qRtAk1QLpJfyspv6yOx9Qq3K0A6176Myt8l16ejXvD/N/oQXS+2rtUMA6qB+Haa/OfXrd8ilDRGVJT1w80kpB9L8+IH1eR3J9b1AG4kGsgyQ5NpulfxmG3O5lhX0Tx/h1YPeFqztqDblgNUeaKXug5pzli7gK0X6LOdRLOds9clVLh8qpRwVQujHN6O/6KXkl0GdBdyrzTxOuX8ubYiofEgp/UIIP9Svz3N67Vrj15F83gs8OZ4713amkuZX5CPXa8m5f9r7wyiAw1pFIIApD0XHkV9aKVfyBu0Fp5TTHg4C8Esp26SUddrPqgJfgy6oOXgJdX21kYQWAANSSo9W3zFj3l0ubYiorHVB/SrdrL5tp/Yaks/riEe770pev10mfUj3XtCl7Tfrd6vh/Lm20yXP71jp67rL5Hxm15Jv/3q0Y3cA8VxgKiKO/NJKDUCbSKH97sJyIn+pOgG17u4YEr+S80AtWbPiWcRSyqNCiENYrus7BDUXWB9t1mcFH4Cad6fX2fRrbfRanLm2IaIyJaXs1V5v2oUQB6C+Trigvk64ob4mu5H768gJqF/19wshTkJ9LW/L8TUx5/cCKeWAEEKvftNi6Gej9u8hqCOvObXD8ut4q1AXNvJr+1Y6apzTteTRP51e97dVa0tFxpFfWqkPIXEihVvbNoCkr/dLkBvqi6P+0w61OPyq8uC03Lou7WYn1NGIo1LK/YYcvCGoaRAeqC+qnVBfYLuklF25tiGi8ialbIM6kuiD+hrRAvW1t0lK6cnzdaRXu6/+gb0XGSaFJcnrvcDQb4/WJ32hCePiHDm1015XO6AGvd1QX389AO7Uzp3rNeR9Lbleh6GfetDLlIcSIKRacJloQ9NmN89AfQEzrramr+pzH9QXrzZ+JUVERIWkDa60SCnrit0X4sgvlQ/9a7AjybNstXqPelrCahe7ICIiStYKljcrGQx+qVzoX1ndm2Z/Ier9EhERJdDKagJMeSgZTHugsqFVYuiEGggfg5qfVQ81L06vyNCW/ghExaPN3O/QcsvT7fcDcEspe/PZRkSFpVV86MLyCm8n0/3fpfXH4JfKijYztwPLs3L9UCdF9DDXl0qdEGLQ7A1UL2mlzUJvh/rBzpXLttVUOSEic9o8k0tQ/88NQZ1P4i9il8iApc6orGhv9Hyzp43mINRvMwA1yG0E0JDjNv5/ICowLdDl5LYSta7B77Zt2+S+ffvyvt/8/Dw2b95c+A6tI15DadgI1wBsjOuw2jWMjIxMSym3F7sfabiSbjfksS2BNiLcDgDV1dVNd9xxR0E6WEoURYHNxikvxcbnoXRs1Ofi/Pnzpq/b6xr87tu3DydP5lt2DxgeHkZzc3PhO7SOeA2lYSNcA7AxrsNq1yCEuFLsPmTgR+oqV7luS6DlAfcCwIEDB+RKXrNLndX+9jYqPg+lY6M+F+let5n2QERkfSewPKrrBjCo3c5lGxFRWdl4Y9xERBuQNlnzgD65Tds2CKgT2AC4tTYuKeVQrtuKcClEREXFkV8iIgvQAtW6pG2HDL8f1X4dyncbEVE54cgvEREREZUNBr9EREREVDYY/BIRERFR2WDwS0RERERlg8EvEREREZUNVnsgyiIUCsHn8yEQCCAWixW7OwCArVu34uzZs8XuxqoU8xrsdjtqa2tRX1+PysrKovSBiIiKo/SDX0VBRdhf7F5QmQqFQrh69Srq6uqwb98+VFRUQAhR7G4hEAigtra22N1YlWJdg5QSkUgEc3NzuHr1Kvbu3csAmIiojJR+2sPjf4/XnvsCMD9d7J5QGfL5fKirq8O2bdvgdDpLIvCl1RFCwOl0Ytu2bairq4PP5yt2l4iIaB2VfvA7flr9d9Ff1G5QeQoEAtiyZUuxu0FrZMuWLQgEAsXuBhERraOSD35jioSUstjdoDIVi8VQUVFR7G7QGqmoqCiZPG4iIlofJZ/z++KNWTikxHYwAKbiYKrDxsXnloio/JT8yC8ALEQkMHWu2N0gIiIiIosr+eD3cy4fntm0BDz/QLG7QkREREQWV/LBrwSg8JtJIiIiIiqAkg9+BQCl2J0gIiIiog2h5INfmzQEvwusx0m0njo6OiCEgMfjydqurq4uftvv96OjowP79++HEAJNTU3o6uqC3+83vb/H40FbWxvq6uoghMD+/fvR1taGoaGheJvR0VEIIVJ+6urq0NHRkfbYRERERqUf/EIs13n47h8CCssSEa2Xjo4OAEBPT0/Gdr29vTh8+DAANZC98847MTQ0hI6ODvT396OlpQW9vb04cuRIyn0HBgawf/9+jI6Oor29Hf39/ejo6IgHxMm6u7sxMzODmZkZjIyM4GMf+xj6+vrQ1NSUNUgnIiIq+VJnNgCKkJCQEBDA898Amt5f7G4RlYXGxka43W709vaiu7vbtM3AwACA5UC5q6sL9fX1GBsbi7dpbW1Fd3d3SnA6NDSEtrY2tLa2or+/P2FfZ2dnwsivzu12w+VyxfvX2NiI9vZ23HPPPWhqasLMzMyKr5eIiDa+kh/5XRAKXqwOI77OxblHitofonKjpxSMjo6a7j927BjcbjcaGxsBqAFtS0uLaVu3251ybLfbnRL46tIdJ5nL5cJ9990Hv9+Po0eP5nQfIiIqTyUf/OoSFnlj7i/RumltbQWQPvVhYGAgPuoLAPX19Th58mTW4373u9+Fx+NBV1dXQfrZ2NiIlpYW09QKIiIiXcmnPejUJY61mmff/UPgd44VtT9E//bcVVz1LRTl3OFwGE6nM2u7vfWb8Ntv2buqc+mjun19fSkBcG9vL4DlABlQ0x46OjrQ1NSE7u7utKO3+kiynitcCIcOHcLQ0BD8fn88NYKIiMio5Ed+KyvULi6EOdGNqFj01IfkHNz+/v54XrCuvb0d3d3dGB0dxaFDhyCEwKFDh1LSJk6fPg0ABQ1S9X7kMvJMRETlqeRHfuexGXbMYjoYwpbqimJ3hyhutSOqqxEIBFBbW7tu5zt8+DA6OjrQ09MTH8nVg2GzdIjOzk60t7ejr68Pg4ODGBgYQFNTEwYHB3PO410JjvYSEVE2JT/y66tSAwyZpR0RrR2Xy4XW1tZ4ZQdgOeUhXdqCy+WKly4bGxuD2+1OKF32xje+EQBS6vM2NTUl1PHNp3yZPrqcPLGOiIhIV/LBL7i0MVFJuPfeewEslzY7duwYWltbcxptdbvd8UUu9ABVrw7R19eX0La/vx8jIyNpS6tlcuLEifj5iIiIzJR88LupwoEZ4YLTXvJdJdrQ9ED32LFj8Hg8GB0dTajykI0ekOrB8nve8x643e6UIFefYJdvAOvxeDAwMIDOzs687kdEROWl5CPKbTVOTNu2oaaq5NOTiTa8w4cPY2BgAAMDA3C5XKb5u+kC4p6eHrhcroSgtqenJ+1Kbvnw+/1oa2uDy+XCxz72sVUdi4iINrbSjyi1tAfJpF+iouvo6IgvU2yW6+v3+9HX14e+vj4cPnwYTU1NANRUhqGhIQwODia0b2lpQX9/P9ra2tDU1ISOjg7U19fD4/GktNV5PJ54nrDP58PQ0BC6u7vh8/lw/PhxTnojIqKMSj/4ZdIvUclobGyEy+WC3+83HeF1uVy4dOkSent7cezYMfT29sLlcuHAgQMYGRmJ5/katba2YmRkBEeOHInnBeupD4ODgynpD11dXQkLY7hcLhw+fBjd3d0MfImIKKuSD3710Fey3gNRSZiZmcm43+VyobOzM6/c28bGxrRLHBvbSH4FREREq1TyOb+G6DfR0tx694SIiIiILK7kg990sS8WM48+ERERERElK/3gN13K7/jpde0HEREREVlfyQe/+tivb8urEzfffL4IfSEiIiIiKyv54Fcf+H14U1XijsmXgfM/Br51LzA/ve79IiIiIiLrKfngV+fbvDt148kvq//eenF9O0NERERElmSZ4FdCAm/+vWJ3g4iIiIgsrOSDX33C28TCLeDnfqO4nSEiIiIiSyv94Ff795zXk6ERV4Ejoo1NCNEqhGgRQrSn2d+p7e/Msm1GCDFo3EZEVE5KPvjl6sZEVO6EEK0AIKUc0m63JO3vBOA37G8026Y1b5NSHpJSHl23CyAiKiElv7xxTvxXi90DIqK1dBDAMe13D4BGAEOG/Q3adl1Lmm2jAFxCCLeU0vTrNG1kuR0Adu7cieHh4UL0v6QEg8ENeV1Ww+ehdJTbc1Hywa/IZej3lR8Ce38J2Pbq7G2JiKzHlXS7Iel2D4AOIcQQgP0AxtJsA4B6AD4hRI+UsiP5RFLKXgC9AHDgwAHZ3NxcqGsoGcPDw9iI12U1fB5KR7k9F6Wf9pCr+ali94CIaK34oQatpqSUHilll9ZmDIDHbJvWtldK6Qfg19MpiIjKycYJfomINq4TWB79dQMYNO7UcnxbtVSGg1LKgTTb2g25v0REZSlj8CuEcOkvoEKI7jRtMs5AJiLr8/v96OjowP79+yGEQFNTE7q6uuD3+03bezwetLW1oa6uDkII7N+/H21tbRgaWk5TPXXqFIQQKT91dXXo6OhIe+xyJKUcAODWJrq5DJPYBrX9o9rtVgBH0m0D0GfYph+XiKisZMv5PQyoX5MJIQ4KIdq1fDAAiTOQtRGFFv1FmYg2Bo/Hg6amJtTX16OjowNutxsnTpxAb6/6UtDdnfi5eGBgAG1tbXC73Whvb8fBgwfh8Xhw7NgxtLW1YWZmJqF9d3c32tvb4+caGhrCkSNHMDQ0hMHBQbjd7vW50BJnqM4wZNh2yPB7SiCbvE1LdxjVfhj4ElFZyhj8GgNdmHzVhuwzkInI4rq6ulBfX4+xsbH4ttbWVnR3d8PjSSwYMDQ0hLa2NrS2tqK/vz9hX2dnZ8LIr87tdsPlcgEAGhsb0djYiPb2dtxzzz1oampKCZaJiIhWI6dqD0IINwCfyaiuK+l28gzkVZfN8Xq9kIpEOBLG8PAw3uD1mra79vzzmL0UzuvY62kjlBEpx2vYunUrAoHA2nVohWKx2Lr1a2hoCO9973tNz7d9+/aE7R/60Iewb98+fOUrXzFt/9a3vjW+XVEUAMDi4mJKW7vdjs9+9rP4lV/5FXzyk5/ERz7ykQJeUaKlpSXL/10TEVHuci111mpWEgdZZiADqy+bM3B8AJ7ZSTgrnGoZjptfNG237U1vAvb9cl7HXk8boYxIOV7D2bNnUVtbu3YdWqFAILBu/aqvr8fp06eznm9gYACXL19GT09PTn2z2dQpB9XV1abt3/72t6OlpQX/8A//gL/+679eWedzUFVVhTe/+c1rdnwiIiotWYNfbbbwUe33Rn0ShSbjDORCePOON+OE59HsDbnEMa23ka8BM5eLcmpnOAw4ndkb1u0Dmj6wqnN1dXWho6MDTU1N6O7uRktLi2m7EydOAAAOHz68qvMZHTp0CENDQ/D7/fHUCCIiotXIVu2hBUC3EGJECDECbZTXMMPYdAZyITlsJb8OB9GG1t7eju7uboyOjuLQoUMQQuDQoUMYHR1NaKffLmSQqk92O3nyZMGOSURE5S3bhDd9ZaDk7cYZxikzkInKwipHVFcjHAigch3TMTo7O9He3o6+vj4MDg5iYGAATU1NGBwcTDsSXAgc7SUiokLbOItcSFnsHhBtaC6XC+3t7ejv78fY2Bjcbjfa2tri+xsb1bUTkuvzNjU1JdTxTa4QkYk+msxyZ0REVCgbJ/glonXjdrvji1zoAerBgwcBAH19fQlt+/v7MTIyklIPOBd6HjGDXyIiKpSSD34FlieyTQdDGRpywhvRetIDUj01obW1FW63OyXIdbvdaGxszDuA9Xg8GBgYQGdnZ0H6S0REBFgh+DUEtYvhWPqG4YV16A1R+enoMKtyCPT09MDlciUEtT09PfGljVfD7/ejra0NLpcLH/vYx1Z1LCIiIqONU0rhxH3Aq9du4g1ROfL7/ejr60NfXx8OHz6MpqYmAGoqg778sFFLSwv6+/vR1taGpqYmdHR0oL6+Hh6PJ6WtzuPxxPOEfT4fhoaG0N3dDZ/Ph+PHj3PSGxERFVTJB7/GtIdQVCliT4jKj8vlwqVLl9Db24tjx46ht7cXLpcLBw4cwMjISHySm1FraytGRkZw5MiReF6wnvowODiYkv7Q1dWFrq6uhHMePnwY3d3dDHyJiKjgSj/4NaQ9HD87gbsOfhA48aUi9oiovLhcLnR2duaVe9vY2Ij+/v6Mbd70pjdBskoLERGts5LP+TWKSQnU7Cx2N4iIiIjIoko++N1Tsyf+u6JIABlGiiJLa98hIiIiIrKskg9+37T9TfHfqyrsgLCnb3zpibXvEBERERFZVskHvwDg0NJ+hRCALUPwG+XILxERERGlV/LBr9PujP9+zbcAIMNiFqe+ufYdIiIiIiLLKvngd1PFpvjv13wLQD2XOSUiIiKilSn54BdA4mCvwwk0/0XRukJERERE1mWN4JeIiIiIqAAsGvxmyPslIiIiIkrDEsEvQ10iIiIiKgRLBL+6GELF7gIRERERWZilgt8oAsXuAhERERFZmKWCXyIiIiKi1bBE8CuTN4gMWcDh+bXsChERERFZmCWC37zMTxW7B0Qb2v79+yGEwOjoaM736e3thRACfr9/7TpGRESUg40X/BLRmhkdHYXH4wEAHDt2rMi9ISIiyh+DXyLK2bFjx+B2u9HS0oLe3t5id4eIiChvFg1+WfmXqBh6e3vR2tqKtrY2+P3+vFIfiIiISoE1gt+UGW+Z2ubTmIhyNTo6Cr/fj3vvvReHDx8GAPT09BS5V0RERPlxFLsDuairsmE2WuxeECUaOD+A64HrRTl3OByG0+nM2u722tvR+prWgpyzp6cHLpcLjY2NAIDGxkb09fWZBsBHjx7FkSNHUF9fj5aWFuzfvz+lzaVLl/DJT34SQ0ND8Pv9aGlpQU9PD9xud8Jxjh07hvvuuw9dXV04efIk3G437rvvPrhcLnR0dGBoaAhutxvd3d1obS3MtRIR0cZliZFfWz5ZDnM316wfROWsr68vPuILAPfeey/8fj+GhoYS2h09ehRdXV3xYLapqQlHjhxJOd5Xv/pV1NfX47777sPIyAh8Ph+ampoS2ni9XoyOjqKtrQ0dHR2477774PF40NbWhkOHDqGjowP9/f0AgLa2tjW4aiIi2mgsMfJbZQcQybHx0/8E7PvltewOEQAUbER1JQKBAGpra9ftfProbEdHR3xbe3s7urq60N/fj5aWlvj2I0eOoKWlJR6UAoDf70dXV1fCMT/xiU8kXEN3dzcOHTqEoaGhhOMB6qizvu3EiRM4evRoykhvW1sbRkdH4yPTREREZiwx8mtPHvrNtMgFERVcf38/XC4X3G43/H5/vF6v2+1GX19fvJ2eF2wMkgHA5XJlPceBAwcAIF5KzWwfgHgKhTFA1lMlfD5fbhdERERlyxLBb4pNDcXuAVFZ6evrg9/vR11dXcKPx+NJSH3QA1dj3m4mAwMDaGtrw/79+1FXV5e2nVnwnOs5iIiIjKwZ/G7ZDfzG54DK9fval6hc6SkPIyMjkFIm/MzMzABYrvqQzwjsb/7mb+JDH/oQDh06hMHBwfixyJwQolUI0SKEaE+zv1Pb35llW8bjEBFtdNYMfgGgdhdY75do7SVXeTDStw8MDABAvE1yBYixsbGE2x6PB4899hi6u7vR3t7OUdwshBCtACClHNJutyTt7wTgN+xvTLMt43GIiMqBdYNfIloXAwMDaG9PP0io5/fqAXBnZycGBgbiZciOHj2Ko0ePJtxHD3Z7enowMDCAoaEh3HPPPWt0BRvCQQB6MrQHQPInkQYAxuH2ljTbsh2HiGjDs0S1h/S4oAXRWtID2uQJbEaHDx9GR0cHjh07htbWVnR3dwNQV4MbGhpCa2srenp64tt1999/Pz784Q+jra0NjY2N6OjoQHd3N+rr69fugqzLlXQ7eeJDD4AOIcQQgP0AxtJsy3YcaOkQ7QCwc+dODA8Pr7LrpScYDG7I67IaPg+lo9yeC4sHv0S0llpbWyGzrJrocrlS2nR3d6cEu8mjx+95z3vw+7//+xnbpDtOcrvGxsas/bQ4P4C0nwqklB4AXUIIN9Qg12O2DWqwm/HThZSyF0AvABw4cEA2NzcXov8lZXh4GBvxuqyGz0PpKLfnwlJpDxIK/Avh5Q2/0gnYGL8T0YZ3Asujtm4Ag8adej6vFvAelFIOmG3LdhwionJgqeAXACYDoeUb218DvO1PUhsFp9avQ0REa0wLXN3aBDWXYcLaoLZ/VLvdCuBIhm2mxyEiKicWGzaVqfUdzL7qfPgjwPu+uQ79ISJaH1JKfdbgkGHbIcPvAyb3MduWchwionJiqZHfBVzLbXE3JbrmfSEiIiIi67FU8DuNnxa7C0RERERkYZYKflXJQ78beoY3ERERERWQJYJfm6GbG7ycEZUg/s1tXHxuiYjKjyWC33dseUf89x+cvpm4M92b1+QrQCi4hr2icmC32xGJRIrdDVojkUgEdru92N0gIqJ1lDX4FUK06uV00uyfEUIMauvIrwk7lt+cpoKhDC0Nhj4ODB9Zox5RuaitrcXc3Fyxu0FrZG5uDrW1tcXuBhERraOswa9ZqZwkbVLKQ4byOQUnUgucGWT42tJ7seB9ofJSX1+PmZkZTE9PIxwO82vyDUBKiXA4jOnpaczMzHA5ZSKiMlOIOr8uIYRbW0Vo/TEYoTVUWVmJvXv3wufz4fLly4jFYsXuEgBgaWkJVVVVxe7GqhTzGux2O2pra7F3715UVlYWpQ9ERFQchQh+6wH4hBA9UsqO5J1CiHYA7QCwc+dODA8P532CxcVFhCPqssaXr93E8LAvvs818yJu93rT3vfMCs63FoLB4IquvZTwGkpHMBhETU1NsbuxKhvhGoiIyHpWHfxKKXsBQAjh19aRHzDZ3wsABw4ckM3NzXmf49Kjl+CscAIANm1tQHPzQcNOG7D4VNr7ruR8a2F4eLhk+rJSvIbSsRGuYyNcAxERWc+qqj0IIdqFEI2F6kza82TM+SUiIiIiyk0u1R5aABwQQrQatunVH/q0261ATpPjCq/eve6nJCIiIiJrypr2IKUcAlCXtO2Q9q8fwKj2s2aBb8aR3623r9VpiYiIiGiDscQiF9kxLYKIiIiIsrNE8MucXyIiIiIqBEsEv0REREREhWCJ4FeILCO/2fYTEREREcEqwS8E6jY7i90NIiIiIrI4SwS/RERERESFsPGD30tPFLsHRERERFQiLBH8BmKBld/55vOF6wgRERERWZolgl8FSpZiZ5zwRkRERETZWSL4lVJmbrB1T6Y7F7YzRERERGRZ1gh+IaGHsBHMpTZ4119lvDcREREREWCR4FeBApuW2RDAhdQGVVvXt0NEREREZEmWCH4lZHyhi2n8tMi9ISIiIiKrskbwKyUqbCuc1CaVwnaGiIiIiCzLEsHvXVV3YXttZfz2rdml1EbNf7GOPSIiIiIiK7JE8Ftlq4qnPQASf/nQi6mNKjaZ31kxjPx+617gp58reP+IiIiIyBosEfyK1dTxjSwk3r7y9Oo6Q0RERESWZYngd1UmX1b/9Y4Vtx9EREREVHSOYndgXVw/CTzx6WL3goiIiIiKbOOP/AJAYLzYPSAiIiKiEmCJ4NcOe7G7QEREREQbgCWC3+VKDxnU7lr7jhARERGRpVki+M1J1Vbgd46Z74tF1rcvRERERFSSLBn8Ssj87vBCmqCYiIiIiMqKJYPfIFi2jIjKixCiVQjRIoRoT7O/U9vfmek+QogZIcSgsR0RUTmxZPAbw0L2RrlQYuqqb688UpjjERGtASFEKwBIKYe02y1J+zsB+A37G7U2Hm2bRwjRqDVvk1IeklIeXb8rICIqHZYMfgsmFlb/feHB4vaDiCizgwA82u8eAI1J+xsA+Ay3WwCcBNCvBb1uKeWots8lhHCvZWeJiEpZeSxyQURkba6k2w1Jt3sAdAghhgDsBzAmpfQLIXoA9AMYMrStB+ATQvRIKTuST6SlSLQDwM6dOzE8PFyYKyghwWBwQ16X1fB5KB3l9lww+CUiKn1+qEGrKSmlB0CXNqI7BjXNoQXAkJTyqBCiWwjRKqUckFL2AoAQwq9vSzpWL4BeADhw4IBsbm5emysqouHhYWzE67IaPg+lo9yei/JOeyAisoYTWB79dQMYNO7UcnxbtSD4oBbQNhpSHY4AqBdCtBtyf4mIypJFg988S50REVmYFsy6tdFcl2Fi26C2f1S73Qo10AWAXi3YbQFwWBvR7TO0Q/KoLxFRObBU2sOmSjsWQrHCHVAyiCYiazBUZxgybDtk+D05fcEPLX0haduo9sPAl4jKkiVHfvNe5IKIiIiICBYNfgtGiGL3gIiIiIjWkSWDXwEGrURERESUP0sGv8gl+HVUrX03iIiIiMhSLBX85jXie9c9a9cRIiIiIrIkSwW/edl6e/Y2rPZAREREVFYsFfzmVeXB/c48jswcYiIiIqJyYKngd7nGr8CL12czN86rkgNHgImIiIjKgaWCX6PPDp1HOKqs7iAsdUZERERUViy1wptOQRgAMLcUwbaaysSdd7cBSgFXgSMiIiKiDcOSI7/TeAoA8KMzt1J33t0KvPHe3A7ECW9EREREZcWSwW/hMf2BiIiIqBxkDX6FEK1CiMEs+1uEEO2F7doaevQvgYmXi90LIiIiIlpnWYNfKeVAun1CiFatzZB2u6VwXVtD3ovAc72GDUx/ICIiIioHq017OAjAo/3uAdC4yuOtL1Z7ICIiIiorq6324Eq63ZDcQEuHaAeAnTt3Ynh4OO+TBINBeBe8CEeWqzhMz3lx4bwfw0uX0t7vtXNhVEQCpvvCAYmLTzyB13u9UGwVeHl4GLZYGIrdmXf/chEMBld07aWE11A6NsJ1bIRrICIi61lt8OsHUJ+pgZSyF0AvABw4cEA2NzfnfZLh4WE0VDfAFwvEF7rY1tCAV79mB5p/8VXp77gnCpy4z3xf7W3Y/fa3AxNfAQDsuPlFdft/PArUZTjmCg0PD2Ml115KeA2lYyNcx0a4BiIisp7Vpj2cwPLorxtA2olxhVBdYU+4LbOVKrNluDwlar7dfzXPXhERERGRVeRS7aEFwAF9cpu2bRCIT4Zza21c+sS3tbK9tjJ7o1zNT6XZwclvRERERBtV1rQHLaCtS9p2yPD7Ue3XNQ18AcCW7wS1XW9cm44QERERkSVZZpGLX7vz11K2ZV2gbXPK/DsiIiIiKmOWCX4r7akpDwqXJyYiIiKiPFgm+CUiIiIiWi3LBL9mlR3WZOCXo8lEREREG5Zlgt+mnU0Jt5cwgcVYcA3OxOCXiIiIaKOyTPDbUJ04ee0qHsTwdG+RekNUGFJK/PTiNKIxpdhdISIiKguWCX7NhJT51R3g0hOF6QjRCo1cmcFXnrqE75++WeyuEBERlQVLB7/p/PHxP8Z3Lnwne8MbJ9e+M0QZzIfV5brnFiNF7gkREVF52JDBLwD85OpPsjda9K95P4iIiIiodGzY4Ddux8+l3zd7LXUbqz0QERERbViWDn5zilM371jzfsRFw0AosH7nIyIiIqK8WDr4zaks2ZbbVn5MKYEz3wEWZ3K769DHgW9/MM/zEREREdF6sXjwmyplMYyf+03g7R9d2cF8HuCFY8DTn8+9PRERERGVLEexO5APh03kfyebDdh198pOKLXaq9HQyu6/CmfH57BzSxXqNzvX/dxERKux7y9+WOwuZPTRu6P4QIn28fLfv7vYXSDa8Cw18ivECoLfQijCJLj/++g5/M33zqz7eYmIiIg2MksFv8lyD0lXGjSb3y8aU+ANrv1o8KJWA5aIiIiICsPSwe+akNmXmX3gmSvoHHgBC+HoOnSIiIiIiArF0sHv7MLqVsWSkLjpX0QoahhhfeWRrPd78cYsACAUyR4oExEVghCiVQjRIoRoT7O/U9vfmek+2Y5DRLTRWTr4BYA/+NqJFd83FFEwGQjh0vT88sa5GwXoFRFR4QghWgFASjmk3W5J2t8JwG/Y36i18WjbPNq2jMchIioHlgt+X7OztuDHzJo7vOQHBv4AmLkMxKLYEbpS8D4Q5SMmY7j/pfsxtTBV7K7Q+jgIQK+l6AHQmLS/AYDPcLsFwEkA/UKIRgBuKeVoDschItrwLFXqDAAqK1YQr6+0SoR+vwWv+u8rjwAOJ35jYgBfdX14uZ2iANefW9k5iFbgVuQWnrv1HGZCM/jTxj8tdndo7bmSbjck3e4B0CGEGAKwH8CYlNIvhOgB0A9gKMfjQEuHaAeAnTt3Ynh4OO/OfvTu0p4PsbO6dPu4ksfbqoLBYFldbykrt+fCcsFvUV16HLCrdXerlIXl7RceBUa+ltJcSomPf/8l/MYbd69TB4log/IDqE+3U0rpAdAlhHADGIOa5tACYEhKeVQI0a2lPGQ8jnasXgC9AHDgwAHZ3Nycd2dLtYau7qN3R/GZF0vz7e/y7zYXuwvrZnh4GCv5+6LCK7fnwnJpD+ta6Tc8n7otFk7dpo8MJ4l4r6ByfARffvJSgTu2NobPTeI7o9dXdQwpJZQcKmYQUV5OYHnU1g1g0LhTz+fVguCDUsoBAI1aqgMAHIEa9GY8DhFRObBc8JuNNM3gzTNkjmlfh72S4+hFmkUw7I924t2z38rv3EX0wM+u4IcvjK/qGF88/UV8+Ccfzt6QiHKmBbNubTTXZZiwNqjtH9Vut0INdAGgVwjRrt3nsJSyN91xiIjKSWl+77MCPxj7AXZs2oGDuw4CAG7NLeHiZBB37ajJ/2A/+QRw6BPIZxkNUr3sfbnYXSDakKSUR7VfhwzbDhl+H0hq74eWvpDtOERE5WRDjPyGowoevfwoHnj5AZyZVpcEnphdwpFHzq7sgFPncmq20nl0oVgI3z7/bURiq6tTnJdYFLh+cv3OR0RERFSCrBf8mgScPY+PIaZIzC1G0PtCykBHTqu25Xwy42GzDQyn2T94ZRCPXXsMw9eHV9SrFXnhQeCJTwO3zqzfOYmIiIhKjOWCX2ESkL50cw7XfAu4ND2fsFrbIm7iytwVwLbW2R2Zo+DkEeKYovZxXSeGBSfUf0OB9TsnlQ1vMIQ/+NoJnL7mL3ZXiIiIMrJc8JtsCZOIxBTMLakpBIohDr2Gfnz6xKcBuwM4fD/gqEp7nEsyhMeUFQaGWYeAi+j6CPCte4EFrf79SnM1iDK47FUrozx1cbrIPSEiIsrM8sHvBIYQQSB7/OmoBCqq0+7+TGwC31Zmsp5PkRI/qZpHGIuZ48irz2Y91rq4qM1pmblc1G6slH8hjMdemSx2N4iIiGiD2BDVHi7hK7k1rNwCLKYGuKYxbJpo+jxCOOlcwrz9WQD36o0TGwUngaf+Yfn4JTXYWlKdyeoLP7mIS9PzqNvshNNuw+t3byl2l1J8/vnP4/aa2/HeV793xcco4e8OiIiINhRLjfy+vuH1JltXH8yZBh6Pfco0UFa01hIy/WhzNLTqPpEqsKTWXP788Qv4zI9zq8Kx3s75zuH41ePF7gYRERHlwFLBb/svtKdsiyJxFbYLEwEoWXIgTl3z49K0yeptRrdeAPxX8u5jTkJzwOx1Neq+MbKcj7uWSjkvmSw2Hk9ERGRdlgp+HSZVG2JJwa+UwEI4ltIu2exiBNdmFgDkF3icv7UcXMfTGfINLM89qlZfWJgCHj8KDH08v/ubiCpR/OVTf4lTk6fStMg/+J2ZL36+rfmKfVTqvIteXJq1xrLeRERUXiwV/OYqGktTQmzrnoSb86HUIHkxHIM3uEZpC0oMGD8NIKl/wdwCzBvBG/jbn/0tFiILyxu/dS/w/DcRCAcwG5rFwPmBtPeXkBg8O4GxqWBO5/vc8Qv4xjNX4F8Imze4PgJMvpLTsXIlpcRSJPuHFyptH3/64/jMyc8UuxtEREQpLBf8ihxmj13xLgeHC+Ho8o63dGS977mJAK7NLObZqyx1fvWx5RcH1FziBW/6xleeBr51LxxKasD5o0s/wtTCFM7PnE/ccfb7OXRRAhI4cWkm55Xv9HxbJd3lPVGYUWujYyeu4Y++OYpwdB1rIBMREVHZsFzwa4MNDnvuiQoJgVtF+jq/hZUmWgzcTGxlli7xQh8AoEaZK3Sn8laMlAO9TmwkafS+IXJr3fuyntb0kZ65oi5vTURERNYLfoUQpqu8rbUZGcUNmebr/xz5Y0s4oaROtJtdjOC5S9qkt3SVIiKLRQtg1urRnluK5LQi2F1LZ/A7vi/gvTM5lrTL4Kc3Ijg7XvwPFutm3gv8qBMY+Wqxe0JERFQSrFnnN49oLJe5aBLAUiQGpyP9Z4G/jqmjtvfAZPTYcJJITMFN3yJeZXKMz/tGMaF48Q5bLYDly7g0PY+ex8fwljvrgUWfti+p4/0fAEQA2OFOcw25jR0WZISxQJUjPvPoOVyfWcQXf68p42PfEFWXZr497Fn1OZ+7FYPn0XP48gcOrvpYhbRmH+fCWn73VGmWiVsNGZ94yloZROns+4sfFrsLaX307ig+UML9u/z37y52F2iNWG7kd0/NHjjtuXfbMxXE7EIk7f4rjgjGZAihqBLPcc3fcjB4fiKAL/zkQtJW1UxsSd2uB4/ei6ZHm5UxjOEfEERisCejSyvsXwYTLycuf5yLlx4C/u19qz/1XPqJhWVf42Heqz4vl39a7J6UrI8//XF89PGPFrsbRERkMZYLfnds2oGqivy6/ZNzE2n3DdTM4VhN7l+Dm44x3Xw+/mskJiERRVgaclZT7qRtuPac6TmuI4yamB9+vJC6UzGvhJAuFWRBiSIqJfRwUiaPkl14VP13yqRqgxaBpgysXVy/BR3UeXplOLLnv6z+e/nJonajZM1PwxccRzi2ulQkIiIqP5YLflfi4ev/gqmFKfXGPX+DM9UF/sp7fjrh5mn7MfyP2PWUZvpoph5MJqcq/PuZcbw8Ppd22FNAAOOn0o4YAwBmrwGXn4rf7Jx+Ev+qTGW7AlPFHH0VCb9bYxz4exe/V+wulI/v/REwcSZrs2uBazg9dXodOkRERFZRFsGvPzKBn1z9iXpj58/jmZp71vR8MaSmT0wuKAhF1VHbdOOY/SevIxxVsod6Pm3xgItDy9t+1KUGvt4x4OnPJzR/Ra4sXSKeU1mOI68rMHhlcN3Pyecms+7nunHfC/cVuxtERFRCyiL4nZwL4dxEAJ9+9BVMZ1nAIpol9NT33h7xoPLpLEX848Ej8M2zYUwHQgnHeEwJZLx7cmCTMqntueU3dbngTVks44XJ04BxQYwCkFIiItemBq9xgQtrjPWukQWfWp4MyDq5cKXl6J44P5VYA5uIiKhMZA1+hRCtQogWIUR7mv0zQohBIURn4buX6m2737ai0a6nLp3HK+MB/OD0zYztTlTltsDFlpgfjvFRSCkxuxjJKQhJbjGfvNJbuvtJCf9iOO8wZ9wzCMSSJ/utbqTwm2e/iT9beHlVx0jnhy+Op8R6OV3zzBVgNjXNpKRICURyHIH/7h8CLxxbs65cmp7H15++jK/+9HLijltnMP7ddvzp8T+BdzHDQixEREQWljH4FUK0AoCUcki73WLSrE1KeUhKeXQN+pfi1XWvXlH8tgg16M1WpSsksq3WlujJC9O4ND0P33zqxJt0R0rX/SplAZ9xefFDOZuw/eSVGVyeXsB8KP1Inekx53MLYM7JJSyYVJKojXrx/01+AmJ+eYGJZ8afyemYKzFyZWZld/xRJ/DDtZv1f+qaP2XRjbyd/QHQ//78qmoAKbMN/+n4BfzB106sqiv6tcwtJX0wGvkang5eRiwyzzxZIiLasLKN/B4E4vW2PAAaTdq4hBDmxWc3OCklrs6qqQaR6Oq/qHdF1Ylz1+KLaaiBT0ALUpS06wyn7WHWFotKFJ+PTaLnyiMp++5aOIUKGYbj6sorDrw49SJO3jq54vsX24WJAD5//AK+PbLKkeWr2oeGRfMA/8kL06bbkyUvCjK1MIWIkr6UX1ymWrijDwA3RnI6vxV550PwZkl3IiKi8pFtkQtX0u0Gkzb1AHxCiB4pZUfyTi1doh0Adu7cieHh4bw7GQwGE+5nC88jHMl/JG56zouL8MPrCyAcVgNMmZS/GovGEA6HMe1NHDUNV6vtwxEHpFQQi8XwHf84vnj6b/G/YouoCUYwHZ1HOBxDJBJBOBaGb96HcFjB1OQkYtEoorEowuEwgtEgwg71eNOzXoTDMUx7vaiKzUBuURAOhyGlxMLCAoaHh7H5zP0Ih8OYjIUQDsdw+cpl+CPDeIOhj36hIFwVRkjaMD3rxZnhYYzfvBm/zulZLyAl/EuzmA5644/nrrHTCMswXr41Fr9mfd/s7CzC4TDOv3Ie/sAwgsEgvAtebA6FMG0YVT4zPIzJBQWuSgFv0jF6JnsAAB07Uv40cGtyCRLAvz3yGN4UfQHveeX76Hb+EeZFDZ588klUOwRu3Qph93wA4Ug44bhG+uNwJoe/rVg0immvN+e/wzF/DNPeCJ57cQa7FswX2fB6vViMqgXZ0h13//VrqF70YuzZZ7G46Vp8+4tTUUx71RF9/b7G5zUQvowrhmNOe9UR+gXXArxhL7zw4hM/+ATu2Wo+ibNqcQJ3eb0IBW24oB3nRkDBh65/CoGpXRiu/iDe8OLXAQChyu0IKkHM2P04c+YMbJ7cpwRcmFEfp0uxGQyL6yl/B2YemnwI1398Hbc7b8/5PLo3eL0IV4cRSHouFSnx5I0o3rLLgWqHwOT0NK7MKfjo14bxgTdU5n0eIiLaeLIFv36owW1aUspeABBC+IUQrVLKAZP9vQBw4MAB2dzcnHcnh4eHYbzfwPEBTIb8eR9nU8M8Nu3ahQbFAeeUE2q/E9/glQo7vl2/iD9z7cYuUYGYlPjT2DU4obavqHBACBvsdjvOSwmbw4lglQ011bXYtrUK1xf8qLMvwlnhRH1lPcaX5rBj505cvTkBu90Bp9OJ2qoaOLVR3G2bGnB9wY/Xb1nEZsWJOdjgdFZgMRaBo/oqmpubMX3+sxjaLDHtBGKKDZeXqvGR5mYoN/4Fnql57HZVwVYBOGOLqIQd2zY3oLm5GY8e/xGcNy7EzyOlxNbgVmyrakBz80FcnAxg143PwxlzYlNdHZSw+tmmuVktBXfl/Ek4FSdee9c+VL+1GcPDw2iobgAiN7Gtavlz0Nt/5R1ov/8kfrX6LBpss0C9O/58DRwf0I65/PzpHrisfn1/NlKJ99f6cK66Gu7NwLizAW9/+5tRU+nAw1OnUTNRC+e8+vi/xezv5+YX054j2T+O/Du2NTTErzGbrVdn8LT/Iu68w4Xm5lebthk4PhAfkU3bh6VHAV8Y2976VqBh//Lxf/oozgRqEbU5l/ukXQ8AbNu9D3cajqk/Zps2edFQqz4HEWcEzW9Pc96ZK8DMd4Cte7BHO875iQAWXrajocKHV9/8ItCgPZdb92DzjBd1dS684fVvQPPeNMc0UXvFh2dmx7BYYYfcdTsaYvrfknaMxRkgvABs3QMAmA3Noud7PXjG/gy+0PyFnM8DABh/AbjZAGd0Hg0NDQmP+cgVH65eGcOuinp0vGM/Hhzsx/jiLJybKnP6+yAioo0vW/B7Asujv24ACbWctFHdk1LK0cJ3rfCu4yFcvwW8EX+Qts0VRwSzthi+r/jRbt+OSA6pAw9vDuIdytb47a1RL1BRkdAmYvOhIl7fN9FNewS28FH8P/N1OLd8GOyKXENUieJJWxA3HOroYEyRCGgVERYjMQRDUVyfWcS2HdVZ+2k874vXZ/HZofP4m1AESHNXPT/acf6HwFs/kLBvVFlAo20TAHW0DQBee+WbeGLXHE5f8+Obz17B777VbJHn3EVjCoIrXnXPXEwsQMHW7A01a1p1wjuGvS99Ec2Lr8PQ1t9ayzOtCwVRnIn8C6af+VVsvyNp53f/OyAV4HfUiXwxab5YS04e+z/p+6A9YbGk5P5zsS8DaF75OYmIVolLTa9coZeazvi9pjaK69YmurkME9/0ILhPu91qaL/mdm3etar730TuS8YmZ0om394XMlkZLcm8cg3emu8iXQXff988j6uOCIJC/Wp/IbxcD/ipG0/hR7YATlVmrhSQktE5Nw7EUgPHeTELiRi882oO5JJWexihOdRFJ+FUUqtdmGWLfkXJnKP6k7OT+NKLX8rYRpduEmLPE568J5r9w+B5PPbKZNr9U7UP4hYezeuYKxaeV5covvQkTB9FrQxdrTKbuq8ALk4G0TnwAhbDscw5vwarqQUSwRwURDBt9v9rBeXxFiILueUzG0+jr0rI+sdERJRGtpFfGKo4DBm2HdL+9QMY1X7WJfAFoH71vgpekX1lKAB4QVnATSS++a5kJHBz5HzaYzyizKYcUw/4BBQsxVa2QAUe/giEsrxss5QSXhnFqGMIVViEOpBvMDeO3/P+k/p78OeAmu3L+3IMnJKdmjyVU7vpYAg/unoLbsNpLkwEMJqu+oOUwKlvAfvfBWy5LWHXSzdm8dKNWbzzdTsStscUiT/8hjqpK4gMK+QlWVUIFdCqZJz9AeC/krW5lBJihY91soXIAh56+Qm1G6EIqpM+YczYYhisnMefyRo48jinbz6MaEzBji1VaVpICCjI9sjJbGVXAHQ+0Yk7au9A11u6cu5fMobARESUrCwWuTBzsSK1NJnRzEIEnwtN4Psxf0Jw+kKlOmI6n6UkmtHb/A8n3H5cW9xCAng45o9vTxllzjBaJqVMGDFN7k04puCabyHh698gFNgQQ33oaQglQzqB90L6fTk6j8+l3ffc+HN4dvzZtPu3xGbwr0MZagkHJ4Gz3wce//uc+xOKxhBTJISUWMlHmFPX/CtfFMJ/BYqUmJhbWn3JNBNmo5wPvPwAfub9NhbhN73Po5vm8bIjhMtIrIKQ7ZH5n/2n8bHvvJh2/87Idfz8YuGqe1wLXMveiIiIKA9lG/x+b3Pm1dWWIjGEowoCS1HMLUYQiqpBix40++2xrIGCvl9AoEKGsTk2h3BMwWJEvW8oGkPAkNNqOkr11GdNj/2DF8ZxcTKY9tyzCxHEFIlINDHYqo9OYXt0HFsnV1ErNpb5g0M29798Px54+YG0+98//Rn8pv/rGY6gPbLahwPffBjhaG5B5V3KGG4PX1KXg/aO5dplAIB/Ib+v4HUTMoLvLvgwPruEpy+ap4tsi4zn/7hKCSjmubMzIXXUXIG2f/Za9iLXBVCtzANYLs9XaKcmT+FChuW6k0eUc/27ICKi8mHJ4Ne2ym4rwp5236zNPJjQl93NJColrjhS3/QVKbFPuQoAWAyrQXUkqiAaW3kw8tSFqYz7k4+8HIjr/2YPCvRAIhJT4JnSAu18ArT56bR1bbPZHcmeJgCowc1V3wIuTqX/IABoo6NS/aDhkGFg5jLwfPoAHAAQDUMkLf4xtxTJ6W9hNjSLP37u73BSmcc/xibxI3sAYUjTflYr8/ht3z8Dz/xr1uMm8F4Axk/l3l6r5bv7kQ+kbVKoNAHP1HyBjqTmvevfFHzpxS/hc7HlnG5FSnQOnMbZ8bmE+wgBTAVCOD+x/CH3ZjDz6o5ERFQerBn8itV2W+By5esw4ajJ614yze+6/7l0DQM1cynbzTIkjJPfCjEel2vQEj+XPkKW4eRC23nuVgD/54dn1Y1pRhpN+a8AvuXauN87dQMPv5A+AFkMr7wCQDSHdII/mE5ahNA4Snj+x2qwbvTwR3DnY3+UsOnPHjyFv/pu9pxxPdD6mZxH2PBB4+JkMCF9QpFAhb6oyXRibvhiOJZ5tDmU+duLFEv+/Nob3PAv4q8N133Nt4D7f3Y5p9zd1XrwlQfTflOwFInBGwyj72RieoQA4F9I/KD2qWc/tVZdJCIiC7Fk8FuoSUFfr72Rdt+MyQhw8le5xrd9L6KYjibuD0VieNG5FC9Tlk7yqGw2NsRQN524IleuKRj678/Nn8F5fA4Rs8j8Z/+c/phJAVckpuScx/r9Uzfx0Gj6xzxfZ8cD+NMHn4/fjipRhKGONC9FYin90r+SX6Zd4dIccPLLwGP/B1JKHD87oQaoC+bLQ8/Mh9UqDj9NzGu2IQY82wtEzVcTi2mPdVSrx3VhIogb/uXqGopUEoLJcxMBPH5uIs3VL0vJ+Y1FASX/r/ujUsFjyvLze23uGi7NXgKgPnc3DX393PELePzcFGYMwXm+/yullPDOhzJ+cAlHFZy+7sdiltH2HD7LERERAbBo8FtbUYtXNWxa03N47alvtsmDXLcMQW3M5J1/bGoeP940j9NZypTpHtmU+at73V2hl/AfZ4/l1FaXHBSMLqqjjIvCJPDQJsNJs3BmNnGE7aWbczj67+fy6gsA3JhJLakGAL8c/HHC7Qv4F/zU+bxp26nAUkJO50MXH8Jl3I8IAvijb47i499/Kb5PmIVFUkJKiYgerIaCOD8RxLeevYo/f/Tz+OPo1cwXceXphJuuqBcYOw5cPA5pcr6HNyc+v2NJKRAfXngZXziV54IPZh5qBy6o5dyaFrOX9Xvp5hymAiFcnk3Mge4+0Y3PnPwMFiILGA+9DKeyZFoKDwCemTiOy0gcnY0pMmX01SgYAa77FvGjM7fi2z7adxrfenb5cZ9djAAS8AVXl2dORESks2Twu7Vya8FGf1cqCol5s8BRo+T1dbDa9maWEWL9mHMikhBcxSAxheX7TgVCWAhHMZF0vAkZgV3Le40/ekndDAoF31Hyy9O95ltIu08CUPzXUz45TAfNR0dvM+T6/rfJ/4VqZQY+e/o6uDGEENUu4sKMWqVCgfphY2J2+UNH4OXU2r7zoSh+8+v/gN999M8R0fpX/cp38CcTfwW/9+GU9gAQgg/zSJePbExlUX9fNCS4XDXmgwencPf5z6cc4Zwv6YOE8e989jo2x1LTagDgytwVRJUovvSkB+euqsGkTQljT9h8SWajSExJGIFO9vWXv47nZ7+De71/i44p8wUmTnt/lrLt4mQQH+07DUWRKaPwESUS/3ueD6l/k3/x7RfgXwjj+Nnso93ZOMIzmJ28vurjEBHRxmPJ4LdQs3LmbbWFOZCJQB4rk+UaJse0r8ufqF7AScNo8g8cc/i8YRLQDf8i/AsRnK8IIxRVsBSNwYMQHpC+lLPOLEYQDC33tWfrDH6iBHBh5gKElFAgMVw9jygWcH3BPPDKJBJVEJi+jumZ1IDajxcy3nekagmuWPrFNGKQGMO/oq82sV9RLEJBBIrhA8EjTz6Xcv+nLkxhFi9hci4UX8nP5fkeAKAymibIxAO4ge/Gb5++5kc4w9f2V2XYfJXAi8uLJcZH2LNNJvzhR/HfkvOWAcwtTOHTJz6N7138Hn425jWkCKSO3d/0L6ppGyauSfP84pem1RF0PUXmTyb+ClsjOaRjhGZRrQQxMHodL92cSwiA/+6Zv0t5VKYC6geiIC6h+7luAMi7vJz+Gatp9C/heOQjed2XiIjKgzWD3wKZcWxDwF5XkGN9x5HfKl2KshzMBmy55WcaB0+NecRXsowYhyIKJmViG1ssjO3RcSwIBTEl9Uv6+as/w9bYFDwVEYxULmESw3jgws2EPswbApMtMfPR4oiWezoVSA24JvFYxn4DalUKV9Q8AH6wRp1UNm5PvLYbeAgX8S+4iH+Ob3vjwjNpz7GaPNF/On4BFybSTDyLpI6mXrAN46Z3FpEXH1rFWZNMqBPRjDVx1WtKCn0l8NffPQNvmuB3TJqPxpvZEVo+12wo9W9fQMHOyHX8Z//9OH3ND2D57z25du/MQgT3/+xy/PYtPBpvk295ObN0EwDYFAsA06uvX01ERNaXdYW3UlSopUsl7PA6dqI2TeCWjxu2CJBHsYJCLnYgDSN8SxEFCmTWQmbV8zewORbGt7WCFwvJC2q82I9XL4ZxNl4VTt0/txRBbZUDNiHigYmQCt4//RlApA8k5xZXVvfVkWY08sfKHOZsSUG/9yKqlHks2TYbto0Bj6ujpcl92xLzwSZrE7Yn/2W9hOUR9sfOmS+bnLZkncmqbvPCg6Vv/C5eFkC41oYbjgjqSnyWlmcqmPJNRgQRRDGPi/7zuP+Vf8VCyqi1NpIeTZo0+P0Po79iCXA644/1ycu+xDZmf0UCOHFr5bWpt4evIyYs+XJHREQFVtYjvxtFyFCxIRJT8MjmID7nSg4olk3Yowkrv+mSaxwnxufLYaGSdFcBiSWhxHNvzXzt6Uum2wO4iFmYlw7zVITTfsz5vuJP3ei9iF2RxFHFFx79KhbnzKs2VCvz2B96Ga7YNGZiEYwu3kpp83WbDxISk3gcj5xdXtkssBSNl2azIQq7jBhGv2XaTwFOZQkS6up8X3BM49uG0ni5fEAIQ+I8PoerocTJeOGYgkdeHM985zw+Mz52bhLXZ9RcbrMUnmFxHB58CTfnM+fVpkw0DE7ES7rpe+ZwNiFFJZ2vv5S68IlQIvht7z+jOk2aik4CCIaiqyqnR0REG4Mlg99Cjfzqgvatqz5GLgsfFFLyaKWx6sG5LEs3A8CsyTB1ppJsd4ZewfuXvpF6cs0/b51Bv0mN43Ts2uS7cfwQEzgOAPDaYviMy4tpbUTXm2bBkXRumowSj00Gsi6AURvz42NL1/EVZRrzJmPmEhH4cQrTyv2oVtRjjU0FcW4iACFj2Bu6iDvChkoJwcm0X78DwFXtcVaSP0Xk4NkqNZXihcXEfOmgSYBqUxL/Dp70voLz+BwWMkzU1L10Yw7eYBjjs+YT4ULassiKVjEjk2xzU2/hx/DCuNy1erxc/k/NTI3j6zVn0DD346xtAeDCzMWc2pUiIUSrEKJFCNGeZn+ntr9Tu90ohBgTQoxoP93a9hkhxKDejoio3Fgy+L3LdVdBj7ewhhPfVutZmX2lLBtE9jqoScGY2bKvP8pQam1XJH3ZLyHVdAC9WsW+0CuokGFEYjKeI7w7chmu6DRsMobT1/y4IzyGO8LLgcj26C2cd6rB2itOQ9CWHFjFosBo6oIHPkOgvDd8ATsj6ohkFAoyZfXqo+ZSaqsFp20J7Ih44sfVBWNfSrh97lYA4Wjm52I86UOGWUm5dIGfaV3mNLYEL+knAAB88QU171mvYR1VJM4qSwnBcCSmwPinMTmnBrlmKwLaZQR9Jy7ghevp893jC3iYMF51DMt/5xJqfvC5W8u51IqiABMvxRfqkJAIhqLx9KFrmEy4VsC8jN+/Pvr/4ub5R9L2qVQJIVoBQEo5pN1uSdrfCcBv2N8IoF5KuV9K2QTgQwB6tOZtUspDUsrU2ZNERGXAksGv2+Uudhdy8mxl+vJRuboZLUx90zMyt1rDRlFIzBsm4z2/KbUvN+0RXBSpweiesAcL4Wh8klPd0n34fe9n8QfTf28aSD1RNYOnq9Sv2ZUMI4Xy0uP49rXBlO0hyHjpLJuMxUdoX3SMYLhyAZ9xeRMqZJjJ9gFCV2VYLKMidiph32Q0jJuzS2lXwtukzEMCGEn420i94Ff0wE+qZcIuTi4HgnoedFSRmFuKQsrlhTOM16Jv8c6HsRSJoVKq53ywdg43tLJr/67MxUfso1JiIRzDjZnU0nX6OR+vVh9LAQV3hMewO5ha4iyZ0BYLSbfktm7/0kvqxDSTfbFIGIguAX71Q9hN/1L8byvdMX22mLrwiLEvUuJTJz+NWD4rFZaGgwD0unUeAI1J+xsAGHOdWvRAWOOWUur3dwkhrPEiSkS0BjgDBID+limFDSJ54tcqPFWdvv5trhbCMVRV2FO2m43/LUZiCKfp/0sy/0D8OzUBXDPUph2viEIkfV7y2mO4zZBnm27y17NVi3BKgbeEgFeFUvNEnzcEpmZHeHhzAP9pvhZQElch0z1UE0DUJGvjSkUEl0UYkMCT1Qvxg1+TYSwljQwqUuIrwov5mmjGEhAVSJ+b+++b51F/4xVcWviJ6X6HDAOwY7h6+fmYtIfgyhCLPfjcVQyfm8I/ixgE1NFUKSvj9XGjioJx/yJqZDReGi64FEXYrl5fKKrgwmQQOzb5cdGZ+q2J1x5DQMRwXqtOMR+OwZUSj6sbTmpBu037O7tluwQg8Zg+Www1seXn6LeuHUl/cRqbjOHXZ/8NPscOnG6wI/kJMC2L5jLe3zxlZ2/oAi5Xvi5pq8yYllKiXEm3G5Ju9wDoEEIMAdgPIJ6HI4Rol1L2GtrWA/AJIXqklB3JJ9LSKtoBYOfOnRgeHs67sx+9O78SdettZ3Xp9nElj3cmpXqdQGk/DwCfi1JS6OeCwa/BotiETTK3VdbWk2lOpZCYtkUx4Yghqn0VbpbKYJSxZmpqVayEwPdCRTglApldjAC25cBDyszneLJ6AW8JVWfsIwBM26P4cVIKxrmKMP5ThvvMCwWVJl9kKObr1OFTkXFgS+rjdV1EMO+IwmG4l8yjjEcMwJPKOTQs2hBZjKC2uiLl/MnP5kObb6IrvJx6EzE+30LgqraIyIKyXL/X2POFcAyoBF67dBq/P2c+eXBOxtIuoiIB9G71w5ZhyF0COO3MvRSa3TCBbdYWQ6VUjx1TMucIb4nNQMKlprdAItdZervnXwE8wwDeAECd1PlyZeo3FaFoDFUVOV5EafFDDVpNaaO6XdqI7hiWR4kB4BCAXkPbXgAQQviFEK1SyoGkY/Xq7Q8cOCCbm5vz7uwH/uKHed9nPX307ig+82Jpvv1d/t3mgh6vlJ+LUn4eAD4XpaTQz0XpXinFhUyC2kuOCC5tya+2cNqSXEBKRKZkGBmT2k84qgDOvLqQk8uO9COrj52bhJTZJ1EBSPnK2ygYiqbEVTFFxkdTjcbQG0+jAICGyC2kDrwl8i+EsQnqB5fk1QiTH1lHUl7snEm/w5jB/xLjiDpjacPBamUewBbTfQ/W5r9Aia5GmYUCW8Iy3clpFkKJQZj0e1Eo+NIWP1yKHW/EVrx4YxazdamPsR4s6+kV9okXsSNSicmK29XjZ+mjAPCDMw+gL7qECvwpvlFr/n8jFFVQleVYJeoElkd/3QAScn+0HF+3lHJACHFQz+cVQriS2rUDOCmlHF3zHhMRlShL5vwCwPvu+sM1Oe58ASo/lKJsdYWTVymbDKQf5VsIR02DxJWojc2gIZpaYsxMBBLTY8dzzs21G0ak8xVVZMKHDnseo7+1sZnl3FWTc89lWtQkrOYTP1e5iH/fFMQtm3qcENS8WX2CniOamlJz3RHBVUcEg5tSJ0n601XOyOFDxLbIOHZEbiRsq4tOxX/fF3oFm7xn8KpQ6iIS/7J1JuX8oUgs5e/NWC1DDaIlNin5fQvzsPcMMHUOQmZ5rlbyB1Fk2uisW5vo5jJMbBvU9o9qt1sBGPNM6pGYC9xnaIfkUV8ionJg2ZHfN+58DQDAYReZRzTzNOXYhc2x/EZUrWAhS33T5OoCwVAEyJChYDbZaCUaotmXydV9tyaAq44XYJfLEdueiHn9YACwZwuCsliKxLDJmd9/kRtJo9bBUBS1VY6Er/pfzpQ+MPUKhH23mp8MYNF2CTWLjyKGxA9lmxdvYMkkck0uN/dk9QJ+cTF7qgmwstJr9jS5ttmcvTmX9kuDnZEbUFbwjcJyDd/M15GtNFupMlRnGDJsO2T4PSWQ1dIhOgy3/QBGtR8GvkRUliwb/N5etwmvu60WdpvASzdW/pVuosLWD7YqKdXll7OZz6FebLJMC2Fkc1ULLI0xWoWSPpDMVJ4tX1VK4kjrw5sDuD2SW/LofCgWr0SRC5F0azx8CouwYXvOR1h22RHB5drlgFyfEFcKIgogbAoEJHZFbgCohgIJpwwhFMn8eCWn5Rg/jCX/L05eJTA5ZYOIiMqLZdMeAKDSkVoFYSWk9jAowgZAYMaxkjBj45CQCavGpdO3gjzSJwpQAUNKua7fXNsQRU3StwHnKsI4bpJeYCafwBcA/kWZStkmDRUmKk0qd+wLvZLTsV3RxOA3ZFKTuEaZw7v938j5mLk6P6FVgNCi01BMYm94DHtDFyC1nN+HagKmpfCA5VxzYHlBFps2um9csTB5guLtYQ+IiIh0lg5+VasfrV20bYbPsRNex04AwKw982SmcmCWN5rMZ4vBa88vtSCQKd81DxIStnUqV1WlrL5ec64UKTGeMFIpU1JWtkbNl2teiVAk9fkwHn9LLP0y2fmSErhhj+CmfTlVQs/P1VclvOyIQEhpmlYztxjBzHwYipToy7CaoEwpRWdSA9miqQ9ERLR6lg9+7QXKVJiz18VHgCl3I1kWjkh2MYell3OVrrZroTll7iW+ViuQtExxlVz9SPlq1EcnV3S/5BFmnV51wiHDqFfSB9b6aPlmJTXIXYpknoiYaVU5AJDRMK5M+jO2ISKijcvS0d5r6l4DIQTu2llT8GNHbGtQw4ssJxSNpQ3k1ooxJ1UYRigLNcmwWIzLKN8e9qBBZh9V3h65iQoZUkvTmdgcU4PjFyuXP6DUxmayHnfO91LWNkREtDFZOvi1CbX7m/OckZ+LHFJeqYjMKreZ1UNe/XnW/w8huYycPtkuuSKH1fxo88oWkHHICFxZ0i+CIlNl6lSLSv7LfRMR0cZg6eC31lmbvdEqRQVHgEtRxtXqNphdkaspo5kFrO5najrPXO5chLN8onyu0jy3env0Vvz67TKatVrDKUcuFbws/dJHRESrYOl3gHtfey/eccc71ujo6hvsLW2FKaK1dNOeflU7AKhIKtcVXuPo90weSxkDuY2QGye6mXkyTSWQ5JXjCjJZjVUNiYjKlqWD3ypHFd62+21rcuywqASAspwEl21BDCq8f8tSNq5a5lZWrVjWIuVEJ/IMdl05VMMQ9sKUSSQiIuspv8guR1MVu3GrYi9iwoGwrQqAWhGiHFh9YtVGlGkxD0pUncOyyELwpY+IqFxZ/h1AkeqI044tlQU9roQNS7ZNAIAJxx74HDsQsqnLxOr/EpFFMfglIipbln8HWI9i9TFRgTl7PUJCHQGetdev+TmJaA3ZmPRLRFSuCl8jbJ3F5Prlp0aFE5crX7du5yOitZFvHjEREW0cHPklorLj8J0vdheIiKhILB/8Jo78Wv5yiGg9CFZ7ICIqV5aPFvUJbwDwGvwJAKACrvg24+9ERAAgOeGNiKhsWf4dYN/WfQCArdUV6m38V+zF+7AVdwMAduPX1/T8M44d8Dl2xG8rwhGvEkFEpYnBLxFR+bL8O0ClXS1xtsnpwJc/cBAf//W3wY5KbMev4Ha8F5XYvibnXbJtwnTFbZi11ycshDFecQeUDF+pTlbsif8etLvWpG9ElAWDXyKismX5ag8A8Huv/714EHzXjhq8fvcWvHxzDpuwN6HdbVurMD67VJBz3qpYPnbAvhUOROC3N0DChmiah1URdizYauK3px07URPzF6Q/RJQPljojIipXG2L44xdv+0W8eceb47ffeLsrYX8dDqj/bnYmbG+oceK1u2oL0AOBGfv2+AjwjGM7pituS2l13XknEt90BW469xXg/ESUD0Uw+CUiKlcbIvhN510/p+bibtZGgG1Jb3j1m52oqij8rG8JG4K2rSnbFcOIsL5KnILl89+quKPgfSEiMwx+iYjK1YYOfgHgt9+yF9kucwfeCTc+iFfjwwDU9IhCmqzYjXGxK377cuXrMF7xqoQ2UVGBJdvmlPtec+7H5crXpmwfd74qZVvE5kzZZjRRcXuuXUZUZD4WkZVx5JeIqHxtiJzfZDu3qMHrHlc1ml+7AzWV/wH/9NxN/M0v/Rd849kxfP/aFwEA+vIYm/EqOKAGnpvwKjgdU6h22rEYXt3qcVMVtwEQWLBtQdgWhlk4qb8J6yPBkxV7oMCOiHBCEXbIpBGqa879cCCKkKjGnL0eW2K++L4bFW7cFrmCSmUx5Twx4YifI2B3ISKcqI9Omvbb79iGWXs9XhXiQgC0UW34z/1ERJTGhgx+7759Kz7+Gz+PO+rVYO8X92/DL+7/7wCArRXzqMbtWMR1/NHdnXj9zl34k2++lHgAAbxmZy3GZxcxORcyPUdDjRPeYDhjP+ZNUh+SKXDgpnMfItpI64LNPAf5mnM/pLBBgR0xqGXdfFqZtX2hV+LtQqIalVCD3+mK27AtMg4IgSnHbiiwx5dntsso6rEc/CrCjmvO/bDLGKJCPf5kxW44ZDRtkExkVaEYV4YkIipXG3b4Y2/DJgiTrzbvqNuEPfjP2IPfxO21u7GpYhO+/IGD+PIHDuKDb3djK96Aaoeah7trSxU++AsfgNNhQ1WFPV42bQ/ei9qqCuw0SY/YijfgtjxrC4dFVUK5NDMxUZGQH2x0w3lnPA1ixtGAJdtmTFTcgaBtKy5Xvg6Xna9NqT2sJJV6CthdaqUKLfAFgAXbFszZ6+F17AQAzGNzQnsAuO5053aRWRj7F7apj6s+Uk1UaL7Q6r7VISIi68o68iuEaAXgB+CWUvbmu7/U/NobdmEyEMIT5yvg2lSRsO+X9jfg4L578bLv59H7Qi/+/OCf486td+LS0lPYU7MHz4y+CQDQ+Wuvw2t3/RYOP/TB+H2rcBt+99X/HU9emAYAVOODWHJ9E1JK2O0CnonUUWIHanAb3o1rOLaqa4qIyvjvEvacJs5J2HDVeRcaYpMI2FwZF+YI2OsQsNchHAnjNjGDamUeXscueB1qHvOVytfAJhUAErXKLFzRadyq2Ivt0ZuICQecilpeLmKrRIUSwoKtFpuUgHZsF3yO7RAA9oYuAADGK/bCLqOICUdC6kXA7kKtoTScfrxMJituR0zYoUAdNbdFAtgjJwCoec0CEmFRiWolCADwOnaiITqRcAyfYwcUYVdH0fMQtlXFr51KS6h6W7G7QERERZIx+NUCW0gph4QQ7UKIFinlUK77S5EQAu9/2z68/237TPc77Db8wvZfwOfe+TnYbepI6/9+2/8GALy6chKTc6Hl8mjayPKv3v672Fbhxm+/dS9ur9uEm7OLuDFTg4/9+n0AgPnIPD75w0/CVz0HCQmn3YadsfdibNwOB2pwB+6FDU6867W34zvX/i/sNgGfllIh4IBEFL9wx1bEFOClG7Ooxm4s4ib+/h1/hwc9/4hT17zYVluJCrsNE34FCtSA8NWbmzEeewrBpSjqcQAKYvDj+fi1KsKBKcdu08dhL96Hq3gwZbs+Cvwq/D6u4AEAaiAdEzYtFaQCfrsaWMw4fxWRikuIhsPY7LRhLmKDgISEQKVc1Ea8hXYMNU0jKrbDia0IiSkA6qj25koHRCyMmVg1AnYXdkRuQBF23Kx4Fewyhh3RG/A5diCKCsSEA3s3hXFrXkFYpI7ML4rNuOx8HQRiAGzx8+upIwG7KyH4vVVxR3wiYrByOY3Fhhi2RcbhdeyEgMSO6E3YZQxLtk2Ys21FxOaEhA1OGYKQCpZsm+GQYeyM3kCFEtJG2gW2xGYSzrUrck19PIQNc/Y6bI16E/o/VbEb26Pj8YT16043BCScMoQlsQlOuYQKGcaCrQZ1sWlsjs3F7ztRcTskbNgVuWr6nKcjhQ3CsIx4ssuVr8MmJYCtMZ9pvnkpevudry92F4iIqEiyjfweBOLDkh4AjQCG8thvWXrga9T82h0Jt+/7tX/Ew6fH8VuN7njJtJbX70y53+aKzfg116/hrqa7cCN4A/td+7GtehtGrvgQjUm8Yc+b8b1TN9HadDv+61vvw6nJU9i3dR+2aaNTl2Yv4bbNt6HKUYWJuSW8PD6Hd2p9Obj305gLz2GTYxOuBq7CZd+Hrz59Dr/9i7uwr24nZkNtuOybwT7XHjx65haCof+Cn83ehwtTXvxp0x+iCjvxmtsq8PCZy5it/BEGz51DQ+UefON978b5iXfgwvzjGLz8E9gC78LPOeZwpfYqPnj3f8O+uu246vsPuG1rJZ7xTGEqcgFvv/N1cFW6cH3uFv7ppz/GX73j9/CziccweusM5qITOLT33XjrbW/GWa8HwdASek8+gorFJmzdM4TzN4FdNc34yNveg7HJRZyaewiKUomWPe9F82t3QFEkHj8/heMTX8HTVzdBCKC54X2wb30O04vbUBOO4g2bfguX5k/jPT//Znz1xDMQVVexudKB19W8C2MLT+Dmtbvxy/UueKtrsbNuEb/+2jfiU0//K8Z8N3HDeSeiwoGGmkooSgd27QjgSugMfmfvBzE+u4Ch699BBHOowFYIRwD/45d+D91PHsNevA822DFXcRYKwnjXvrfg6ZmvY3Z2O27Df8SsOINp8TQ2404c3F+Dp69WIRxdDiTvrHwXfMobsWvHFC5PDOK60403Rd6Is84l/Pq+38DxSz/GnpgLC/JnuGFfgGK7AzHnBzEW/gacTie24+2wYzOqxC748By2iJ9DEBcRxWlMOXZjyrEbdfJVgHISi7bNsNlsuGx7HWpjfuwWf4TK6KPwOfbium0UALBXvhuN+704O/k0FpUYri6o344IKKhSFrCoLdKyJTYDYXsNHLIKr8KvQNpicNZexcTcDxERlaiS83h9aB437ng9XDEfZOAWfJvcsHnPY8begJhtLzaHXsSMc0881aZSWcRtkSuQwoZw/euwafYduGw/jjvFTQhIXJS3Q8gY7tzyVlQtvBke25cgQkEs2GogoGBX5DoqlQXcqrgDAhK3RStxyWmHhA0CFQDCqItJ3Fntwx31uVc+ISKijUVImX7ihxCiB0CPlHJUCNEC4JCUsivX/VqbdgDtALBz586mBx9MHU3MJhgMoqamJnvDEsZrSKX/7ZnlZmcSlVFEZATVK8gJLvQ1KFIdOza7Bikl/CGJuqrcU+v9SwqqHQJCAE67QERRs8HttsTjzwZmUVNTA7vJUtrhmIQigfmIRH2VSOibIhUoUGCDHdcCCipsArtrbJBSIqIA4RhQ40xsbzPkh+vtQjKIzVowbdwXkiEsRRywCTtqKtTa2lLKeB9iikQwEoPDJhBdCmJLzZaE/kkp4QtFUFfpgJTqdiEkYoihQlQkHEu3qCzCIRxwwAEJQJHAUhSodgA2oT43oZiE05b/35rRO9/5zhEp5YEVH8CCDhw4IE+ePJn3/fb9xQ/XoDeF89G7o/jMi6U53/vy37+7oMcr5eeilJ8HgM9FKVnpcyGEMH3dznalfgD1q9gPLQ+4F1BfSJubm7OcMtXw8DBWcr9SwmsoDRvhGoCNcR0b4RqIiMh6sg1JnQDg0n53AxjMcz8RERERUcnIGPxKKQcAuLWUBpc+mU0IMZhpPxERERFRKcqa4CGlPKr9OmTYdijTfiIiIiKiUrRhF7kgIiIiIkrG4JeIiIiIygaDXyIiIiIqGwx+iYiIiKhsMPglIrIAIUSrEKJFWzjIbH+ntr9Tu90ohBgTQoxoP925HIeIaKNj8EtEVOKEEK0AYCg32ZK0vxOA37C/EUC9lHK/lLIJwIcA9GQ7DhFROWDwS0RU+g4C8Gi/ewA0Ju1vAOAz3G5JqrvullJ6cjgOEdGGt64LOY+MjEwLIa6s4K7bAEwXuj/rjNdQGjbCNQAb4zqsdg2vKuK5XUm3G5Ju9wDoEEIMAdgPYEzfIYRo15aZz+U40NIh9JSIoBDi3Ar7XLI+XMJ/e2pySnko5ecB4HNRSlbxXJi+bq9r8Cul3L6S+wkhTkopDxS6P+uJ11AaNsI1ABvjOjbCNawjP4D6dDu1Ud0uIYQbauDrMew+BEAPfjMeRztWr6H9hsS/vdLA56F0lNtzwbQHIqLSdwLLo7ZuAIPGndrktlY9tUFbeh5CCBcSZTwOEVE5YPBLRFTitGDWrU1QcxkmrA1q+0e1260AjhjuWg9DLnC64xARlZN1TXtYhY3wFRyvoTRshGsANsZ1bIRrWDdSyqPar0OGbYcMvw+Y3McDoCPbccoQ//ZKA5+H0lFWz4WQUha7D0RERERE64JpD0RERERUNhj8EhEREVHZKOmcX23yhh9qgfaSzUfR+tlhzL8z63uu29abNiPcrf0clFJ25dPfUrgGrR/6alWHrHoNRkKIbqtehxBiBsBJAIN6jqnVroGsQQjRD+CIPulP29YJYFRKOSSE6IH62uaB+vc4YGjXA3VSoEtrr/9/m4GaA+nS9ndp+dOUBz72xae9nt4npaxLs78dgKfcJr+W7MivlZbhTJ5oYtb3XLetZ78NDgM4YCiP1G61a9CWc23U+tEohHBb7RqMtD64td+teB1tUspDSYGv1a6BrKEHwMeStt2rBb6tAMa0v8UOGCb5aUFzv5SyTRu4MFbJ8Egpu7T7fAhA/xpfw4bDx75k3Augj6+liUo2+IW1l+E063uu29adlLLXMMKmf0q32jWMSimPaqPYngxLuZbsNei0hQqMIx1WvA6Xdh06K14DWYD+gVe/rb3J64GWH8Ahvd6xlNKvtXEjqdSbvs/k+H4APdoIGeXODz72RWWo890DQ9UXIYRLCDGofQhp07a5hRA9Qoh+fRBCqx8+KIToFkKMaANjPdr9LK2Ug19X0u2UZThLmCvpdkMe24pGe1HyaS9KrqTdlrgGAAewvLSrK2mfVa7BnfQ1nytpvxWuox6AT/tqE7DmNZB1DBgCpA6ob/Z6YNwDoF9789aD5OQPmNl4ADQVqrPlgI99STgMoEdLCWo0BMPt2vY2aAvdSCk9UsoObVtyecQuqB8oXdqIvAsWV8rBrx9ZluEsYX6k9j3XbcXUqv1hAxa9Bu0Fd78hb9RS1yCEaDHJvfLDYtehfZvgB+C36nNBlmIc2XIZPzxKKQe0r9bbANynbfZgOa3IpY1mzWQ4vr5sNOWBj33RdQBoE0J0a7cPa//uBzCa3Fgb6W2H9vxo9P9LXsPvfpPVIy2llCe8WXkZTrO+u3LcVhTa0qh6fmYjLHYN2n/uMS19ww81gLLUNWh8+upbUFfisuJz0Q7gpHECEix2DWQtUkqPEMKnvQ7o3zZACGH8FsWX1N74YbNDCHEgwyk6ANyzJp3foPjYF5f2Te5JfUBL+xauH+pkwhGoqUIeaN+uaa/bLm1/h8khN5SSDX6llANCiE5hgWU4tT4e0ALIgXR9z3VbkfrfLYTQJ410We0aoL7hGZdt1SsHWOkajMvU6i9Eaf8vlPB19EF9LvTJa/pESitdA1lPD9RZ7V2GbS6hLgHth/ph6kP6DinlIW3UsQNqcHbScD+3Fki7oH6Q/lC6vFRKi499ccXTf4CEDx1uqK/Rx4UQh7D8TZsHQBfKJNWMK7wRERERUdko5ZxfIiIiIqKCYvBLRERERGWDwS8RERERlQ0Gv0RERERUNhj8EhEREVHZYPBLRERERGWDwS8RERERlY3/H8CY0if1StL5AAAAAElFTkSuQmCC",
294 | "text/plain": [
295 | ""
296 | ]
297 | },
298 | "metadata": {
299 | "needs_background": "light"
300 | },
301 | "output_type": "display_data"
302 | }
303 | ],
304 | "source": [
305 | "fig, ax = plt.subplots(1, 2, figsize=(12, 5))\n",
306 | "ax[0].plot(logs_vsgd['train_loss'], label='VSGD', alpha=0.7)\n",
307 | "ax[0].plot(logs_sgd['train_loss'], label='SGD', alpha=0.7)\n",
308 | "ax[0].plot(logs_adam['train_loss'], label='Adam', alpha=0.7)\n",
309 | "ax[0].legend(fontsize=20)\n",
310 | "ax[0].grid()\n",
311 | "ax[0].set_title('Training loss', fontsize=24)\n",
312 | "\n",
313 | "ax[1].bar(['VSGD', 'SGD', 'Adam'], [logs_vsgd['test_acc'], logs_sgd['test_acc'], logs_adam['test_acc']]);\n",
314 | "ax[1].grid()\n",
315 | "ax[1].set_ylim(0.97, 1.0);\n",
316 | "ax[1].set_title('Test accuracy', fontsize=24);"
317 | ]
318 | },
319 | {
320 | "cell_type": "code",
321 | "execution_count": null,
322 | "metadata": {},
323 | "outputs": [],
324 | "source": []
325 | }
326 | ],
327 | "metadata": {
328 | "kernelspec": {
329 | "display_name": "BASE_ENV",
330 | "language": "python",
331 | "name": "base_env"
332 | },
333 | "language_info": {
334 | "codemirror_mode": {
335 | "name": "ipython",
336 | "version": 3
337 | },
338 | "file_extension": ".py",
339 | "mimetype": "text/x-python",
340 | "name": "python",
341 | "nbconvert_exporter": "python",
342 | "pygments_lexer": "ipython3",
343 | "version": "3.6.12"
344 | }
345 | },
346 | "nbformat": 4,
347 | "nbformat_minor": 4
348 | }
349 |
--------------------------------------------------------------------------------