├── utils.py
├── .gitignore
├── README.md
├── layers.py
├── mnist.py
├── example.svg
└── example.ipynb
/utils.py:
--------------------------------------------------------------------------------
1 | import cmath
2 |
3 |
4 | def solve_quadratic(a, b, c):
5 | delta = (b**2) - (4*a*c)
6 | solution1 = (-b-cmath.sqrt(delta))/(2*a)
7 | solution2 = (-b+cmath.sqrt(delta))/(2*a)
8 |
9 | return max(solution1.real, solution2.real)
10 |
11 |
12 | def get_equivalent_compression(input_dim, output_dim, nhu, nhLayers, compression):
13 | '''
14 | Attempts to find a suitable hidden layer dimension
15 | to match the number of parameters in a model compressed
16 | with HashedNets.
17 | '''
18 | if nhLayers == 1 or compression == 1.0:
19 | return compression
20 |
21 | # Number of compressed parameters for the HashedNet
22 | # Assumes we hash all biases apart from the output
23 | N = input_dim * nhu + (nhLayers - 1) * nhu**2 + nhu * output_dim
24 | biases = nhu + nhu * (nhLayers - 1)
25 | compressed_N = N * compression + biases * compression + output_dim
26 |
27 | # Solve for compression rate (nhu * compress)
28 | # inp*nhu*x + nhu*x + layers*nhu*x*nhu*x + layers*nhu*x + oup*nhu*x + oup*x
29 | # (inp*nhu + nhu + layers*nhu + oup*nhu + oup)*x + layers*nhu*x*nhu*x
30 | a = (nhLayers - 1) * nhu**2
31 | b = nhu * (input_dim + 1 + (nhLayers - 1) + output_dim) + output_dim
32 | c = -compressed_N
33 |
34 | equiv_compression = solve_quadratic(a, b, c)
35 |
36 | c_nhu = nhu * equiv_compression
37 | equiv_N = (input_dim * c_nhu + c_nhu
38 | + (nhLayers -1 ) * c_nhu**2
39 | + (nhLayers - 1) * c_nhu
40 | + c_nhu * output_dim + output_dim)
41 | assert abs(equiv_N - compressed_N) < 10, 'Equiv: {} vs. compressed {}'.format(equiv_N, compressed_N)
42 |
43 | return equiv_compression
44 |
45 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # HashedNets
2 | This is a PyTorch implementation of [HashedNets](http://www.jmlr.org/proceedings/papers/v37/chenc15.pdf) by [Chen et al. (2015)](#References). The original authors have published a (Lua)Torch implementation [here](https://www.cse.wustl.edu/~ychen/HashedNets/).
3 |
4 | HashedNets implements parameter sharing by tying weights that collide in the same buckets given a hash function. The output dimensions of each layer remains the same, while the number of unique values in the weight matrices drop with the compression factor. The aim is to maintain performance as the model is compressed by making use of redundancy in the parameters. The benefit of using a hash function is that we only require the hash seed in order to record which parameters are tied, saving space.
5 |
6 | ## Example results on MNIST
7 | This is an approximate reproduction of a figure from the original paper (produced in `example.ipynb`). Note that the results will be slightly different from the original paper given the different implementations, and the fact that I have not run Bayesian hyperparameter optimisation which they do in the paper or averaged over multiple runs.
8 |
9 | 
10 |
11 | ## Dependencies
12 | - [PyTorch](https://pytorch.org)
13 | - [xxhash](https://pypi.org/project/xxhash/)
14 | - [TorchVision](https://pytorch.org) (to run the MNIST example)
15 |
16 | Tested with Python 3.6.8, PyTorch 1.0.1.post2, xxhash 1.3.0.
17 |
18 | ## Usage
19 | The main component of interest is probably the `HashLinear` layer in `layers.py`. See `mnist.py` for an example model using the layer.
20 |
21 | To see possible arguments to the script, run:
22 | ```sh
23 | python3 mnist.py --help
24 | ```
25 |
26 | To run the MNIST example with default hyperparameters, with or without hashing:
27 | ```sh
28 | python3 mnist.py --compress 0.015625 # No hashing -> Test accuracy: 94.09%
29 | python3 mnist.py --compress 0.015625 --hashed # With hashing -> Test accuracy: 96.83%
30 | ```
31 |
32 | ## References
33 | - Chen, W., Wilson, J., Tyree, S., Weinberger, K. and Chen, Y., 2015, June. [Compressing neural networks with the hashing trick](http://www.jmlr.org/proceedings/papers/v37/chenc15.pdf). In International Conference on Machine Learning (pp. 2285-2294).
34 |
--------------------------------------------------------------------------------
/layers.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from torch.nn import Parameter
4 | from torch.nn import init
5 | import torch.nn.functional as F
6 | import xxhash
7 | import math
8 |
9 |
10 | class HashLinear(nn.Module):
11 | '''
12 | This layer implements a linear hashed network as in
13 | - Chen, W., Wilson, J., Tyree, S., Weinberger, K. and Chen, Y., 2015,
14 | Compressing neural networks with the hashing trick.
15 | In International Conference on Machine Learning (pp. 2285-2294).
16 |
17 | It is largely based on the above authors (Lua)Torch implementation:
18 | https://www.cse.wustl.edu/~ychen/HashedNets
19 |
20 | Note that some static hashing parameters are wrapped with
21 | `Parameter(..., requires_grad=False)` so that they get sent
22 | to device along with the layer. I.e. check for requires_grad
23 | when computing total number of parameters.
24 | '''
25 | def __init__(self, in_features, out_features, compression,
26 | xi=True, hash_bias=True, bias=True, hash_seed=2):
27 | super(HashLinear, self).__init__()
28 |
29 | self.hash_seed = hash_seed
30 | self.use_bias = bias
31 | self.hash_bias = hash_bias
32 | self.xi = xi
33 |
34 | self.in_features = in_features
35 | self.out_features = out_features
36 |
37 | # Virtual sizes
38 | self.size_w = in_features * out_features
39 | self.size_b = out_features
40 |
41 | # Compressed sizes
42 | self.hsize_w = math.ceil(self.size_w * compression)
43 |
44 | if self.hash_bias:
45 | self.hsize_b = math.ceil(self.size_b * compression)
46 | else:
47 | self.hsize_b = self.size_b
48 |
49 | self.h_weight = Parameter(torch.Tensor(self.hsize_w))
50 | if bias:
51 | self.h_bias = Parameter(torch.Tensor(self.hsize_b))
52 |
53 | self.xxhash = xxhash
54 |
55 | self.hash_config('W')
56 | if bias and self.hash_bias:
57 | self.hash_config('B')
58 |
59 | self.reset_parameters()
60 |
61 | def hash_config(self, WorB):
62 | '''
63 | Returns virtual matrices with indices into the compressed
64 | size given by the hashing function.
65 | '''
66 | assert WorB == 'W' or WorB == 'B'
67 |
68 | if WorB == 'W':
69 | h_size = self.hsize_w
70 | dim1 = self.out_features
71 | dim2 = self.in_features
72 | self.idxW = self.hash_func(h_size, dim1, dim2, 'idxW')
73 | elif WorB == 'B':
74 | h_size = self.hsize_b
75 | dim1 = self.out_features
76 | dim2 = 1
77 | self.idxB = self.hash_func(h_size, dim1, dim2, 'idxB').squeeze()
78 |
79 | if self.xi:
80 | # Returns 1 and -1
81 | if WorB == 'W':
82 | self.xiW = Parameter(self.hash_func(2, dim1, dim2,
83 | 'xiW').add(1).mul(2).add(-3).float(),
84 | requires_grad=False)
85 | elif WorB == 'B':
86 | self.xiB = Parameter(self.hash_func(2, dim1, dim2,
87 | 'xiB').add(1).mul(2).add(-3).float().squeeze(),
88 | requires_grad=False)
89 |
90 | def hash_func(self, hN, size_out, size_in, extra_str=''):
91 | '''
92 | Hash matrix indices to an index in the compressed vector
93 | representation.
94 |
95 | Returns a matrix of indices with size size_out x size_in,
96 | where the indices are in the range [0,hN).
97 | '''
98 | idx = torch.LongTensor(size_out, size_in)
99 | for i in range(size_out):
100 | for j in range(size_in):
101 | key = '{}_{}{}'.format(i, j, extra_str)
102 |
103 | # Wrap hashed values to the compressed range
104 | idx[i, j] = self.xxhash.xxh32(key, self.hash_seed).intdigest() % hN
105 |
106 | return idx
107 |
108 | def reset_parameters(self):
109 | stdv = 1. / math.sqrt(self.in_features)
110 | init.uniform_(self.h_weight, -stdv, stdv)
111 | if self.use_bias:
112 | init.uniform_(self.h_bias, -stdv, stdv)
113 |
114 | def forward(self, x):
115 | # self.idxW is a matrix of the full size of type LongTensor,
116 | # which contains indices that selects from the elements in h_weight
117 | if self.use_bias:
118 | if self.hash_bias:
119 | if self.xi:
120 | return F.linear(x, self.h_weight[self.idxW] * self.xiW, self.h_bias[self.idxB] * self.xiB)
121 | else:
122 | return F.linear(x, self.h_weight[self.idxW], self.h_bias[self.idxB])
123 | else:
124 | if self.xi:
125 | return F.linear(x, self.h_weight[self.idxW] * self.xiW, self.h_bias)
126 | else:
127 | return F.linear(x, self.h_weight[self.idxW], self.h_bias)
128 | else:
129 | if self.xi:
130 | return F.linear(x, self.h_weight[self.idxW] * self.xiW, None)
131 | else:
132 | return F.linear(x, self.h_weight[self.idxW], None)
133 |
--------------------------------------------------------------------------------
/mnist.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import random
3 | import math
4 |
5 | import torch
6 | import torch.nn as nn
7 | import torch.nn.functional as F
8 | import torch.optim as optim
9 | from torchvision import datasets, transforms
10 | from torch.utils.data.sampler import SubsetRandomSampler
11 |
12 | from layers import HashLinear
13 | from utils import get_equivalent_compression
14 |
15 |
16 | def parse_arguments():
17 | parser = argparse.ArgumentParser(description='PyTorch HashedNets',
18 | formatter_class=argparse.ArgumentDefaultsHelpFormatter)
19 | parser.add_argument('--seed', type=int, default=1,
20 | help='random seed')
21 | parser.add_argument('--nhLayers', type=int, default=1,
22 | help='# hidden layers, excluding input/output layers')
23 | parser.add_argument('--nhu', type=int, default=1000,
24 | help='Number of hidden units')
25 | parser.add_argument('--hashed', default=False, action='store_true',
26 | help='Enable hashing')
27 | parser.add_argument('--compress', type=float, default=0.03125,
28 | help='Compression rate')
29 | parser.add_argument('--hash-bias', default=False, action='store_true',
30 | help='Hash bias terms')
31 | parser.add_argument('--lr', type=float, default=0.01,
32 | help='Learning rate at t=0')
33 | parser.add_argument('--decay-factor', type=float, default=0.1,
34 | help='Learning rate decay factor')
35 | parser.add_argument('--batch-size', type=int, default=50,
36 | help='Mini-batch size (1 = pure stochastic')
37 | parser.add_argument('--validation-percent', type=float, default=0.1,
38 | help='Percent of training data used for validation')
39 | parser.add_argument('--momentum', type=float, default=0.9,
40 | help='Momentum (SGD only)')
41 | parser.add_argument('--dropout', type=float, default=0.25,
42 | help='Dropout rate')
43 | parser.add_argument('--l2reg', type=float, default=0.0,
44 | help='l2 regularisation')
45 | parser.add_argument('--epochs', type=int, default=50,
46 | help='Maximum # of epochs')
47 | parser.add_argument('--patience', type=int, default=2,
48 | help='Number of epochs to wait before scaling lr.')
49 | parser.add_argument('--no-xi', default=True, action='store_false',
50 | help='Do not use auxiliary hash (sign factor)')
51 | parser.add_argument('--hash-seed', type=int, default=2,
52 | help='Seed for hash functions')
53 | parser.add_argument('--no-cuda', action='store_true', default=False,
54 | help='disables CUDA training')
55 | parser.add_argument('--save-model', action='store_true', default=False,
56 | help='For Saving the current Model')
57 | args = parser.parse_args()
58 |
59 | print(args)
60 | return args
61 |
62 |
63 | def load_data(batch_size, kwargs):
64 | '''
65 | Load MNIST data. Largely from PyTorch MNIST example.
66 | '''
67 | train_dataset = datasets.MNIST('../data',
68 | train=True, download=True,
69 | transform=transforms.Compose([
70 | transforms.ToTensor(),
71 | transforms.Normalize((0.1307,), (0.3081,))
72 | ]))
73 |
74 | num_train = len(train_dataset)
75 | indices = list(range(num_train))
76 | random.shuffle(indices)
77 | validation_percent = 0.1
78 | split = int(math.floor(validation_percent * num_train))
79 | train_idx, valid_idx = indices[split:], indices[:split]
80 |
81 | train_sampler = SubsetRandomSampler(train_idx)
82 | valid_sampler = SubsetRandomSampler(valid_idx)
83 |
84 | train_loader = torch.utils.data.DataLoader(
85 | train_dataset,
86 | batch_size=batch_size, sampler=train_sampler, **kwargs)
87 |
88 | valid_loader = torch.utils.data.DataLoader(
89 | train_dataset,
90 | batch_size=batch_size, sampler=valid_sampler, **kwargs)
91 |
92 | test_loader = torch.utils.data.DataLoader(
93 | datasets.MNIST('../data', train=False,
94 | transform=transforms.Compose([
95 | transforms.ToTensor(),
96 | transforms.Normalize((0.1307,), (0.3081,))
97 | ])),
98 | batch_size=batch_size, shuffle=True, **kwargs)
99 |
100 | return train_loader, valid_loader, test_loader
101 |
102 |
103 | def train(model, device, train_loader, optimizer, epoch, log_interval=5):
104 | '''
105 | One epoch of training.
106 | '''
107 | model.train()
108 | train_loss = 0.0
109 | for batch_idx, (data, target) in enumerate(train_loader):
110 | data, target = data.to(device), target.to(device)
111 | optimizer.zero_grad()
112 | output = model(data)
113 | loss = F.nll_loss(output, target)
114 | loss.backward()
115 | optimizer.step()
116 | if batch_idx % log_interval == 0:
117 | print('Train Epoch: {} [{}/{} ({:.2f}%)]\tLoss: {:.6f}'.format(
118 | epoch, batch_idx * len(data), len(train_loader.sampler),
119 | 100. * batch_idx / len(train_loader), loss.item()), end='\r')
120 | train_loss += loss.item()
121 |
122 | return train_loss / len(train_loader)
123 |
124 |
125 | def evaluate(model, device, loader):
126 | model.eval()
127 | loss = 0
128 | correct = 0
129 | with torch.no_grad():
130 | for data, target in loader:
131 | data, target = data.to(device), target.to(device)
132 | output = model(data)
133 | loss += F.nll_loss(output, target, reduction='sum').item()
134 | pred = output.argmax(dim=1, keepdim=True)
135 | correct += pred.eq(target.view_as(pred)).sum().item()
136 |
137 | loss /= len(loader.sampler)
138 | accuracy = 100. * correct / len(loader.sampler)
139 |
140 | return loss, accuracy
141 |
142 |
143 | class Net(nn.Module):
144 | '''
145 | Standard feedforward network with ReLU activations
146 | and optional interleaving dropout layers.
147 | '''
148 | def __init__(self, input_dim, output_dim, nhLayers=1, nhu=1000,
149 | compress=1.0, dropout=0.25):
150 | super(Net, self).__init__()
151 | self.nhLayers = nhLayers
152 | self.input_dim = input_dim
153 |
154 | c_nhu = round(nhu * compress)
155 |
156 | self.dropout0 = nn.Dropout(dropout)
157 | self.linear1 = nn.Linear(input_dim, c_nhu)
158 | self.dropout1 = nn.Dropout(dropout)
159 |
160 | for layer in range(2, nhLayers + 1):
161 | setattr(self, 'linear' + str(layer), nn.Linear(c_nhu, c_nhu))
162 | setattr(self, 'dropout' + str(layer), nn.Dropout(dropout))
163 |
164 | self.linear_out = nn.Linear(c_nhu, output_dim)
165 |
166 | def forward(self, x):
167 | x = x.reshape(-1, self.input_dim)
168 | x = self.dropout0(x)
169 | x = F.relu(self.linear1(x))
170 | x = self.dropout1(x)
171 |
172 | for layer in range(2, self.nhLayers + 1):
173 | x = F.relu(getattr(self, 'linear' + str(layer))(x))
174 | x = getattr(self, 'dropout' + str(layer))(x)
175 |
176 | x = self.linear_out(x)
177 | return F.log_softmax(x, dim=1)
178 |
179 |
180 | class HashedNet(nn.Module):
181 | '''
182 | Feedforward network with hashed linear layers,
183 | ReLU activations and optional interleaving dropout layers.
184 | '''
185 | def __init__(self, input_dim, output_dim, nhLayers=1, nhu=1000,
186 | compress=1.0, dropout=0.25, hash_seed=2):
187 | super(HashedNet, self).__init__()
188 | self.nhLayers = nhLayers
189 | self.input_dim = input_dim
190 |
191 | self.dropout0 = nn.Dropout(dropout)
192 | self.linear1 = HashLinear(input_dim, nhu, compress)
193 | self.dropout1 = nn.Dropout(dropout)
194 |
195 | for layer in range(2, nhLayers + 1):
196 | setattr(self, 'linear' + str(layer), HashLinear(nhu, nhu, compress,
197 | hash_seed))
198 | setattr(self, 'dropout' + str(layer), nn.Dropout(dropout))
199 |
200 | self.linear_out = HashLinear(nhu, output_dim, compress,
201 | hash_bias=False, hash_seed=hash_seed)
202 |
203 | def forward(self, x):
204 | x = x.reshape(-1, self.input_dim)
205 | x = self.dropout0(x)
206 | x = F.relu(self.linear1(x))
207 | x = self.dropout1(x)
208 |
209 | for layer in range(2, self.nhLayers + 1):
210 | x = F.relu(getattr(self, 'linear' + str(layer))(x))
211 | x = getattr(self, 'dropout' + str(layer))(x)
212 |
213 | x = self.linear_out(x)
214 | return F.log_softmax(x, dim=1)
215 |
216 |
217 | def main():
218 | args = parse_arguments()
219 |
220 | use_cuda = torch.cuda.is_available()
221 | torch.manual_seed(1)
222 | random.seed(1)
223 |
224 | device = torch.device("cuda" if use_cuda else "cpu")
225 | kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
226 |
227 | tr_loader, val_loader, test_loader = load_data(args.batch_size, kwargs)
228 | input_dim = 784
229 | output_dim = 10
230 |
231 | if args.hashed:
232 | model = HashedNet(input_dim, output_dim, args.nhLayers, args.nhu,
233 | args.compress, args.dropout, args.hash_seed).to(device)
234 | else:
235 | eq_compress = get_equivalent_compression(input_dim, output_dim,
236 | args.nhu, args.nhLayers, args.compress)
237 | model = Net(input_dim, output_dim, args.nhLayers, args.nhu,
238 | eq_compress, args.dropout).to(device)
239 |
240 | optimizer = optim.SGD(model.parameters(), lr=args.lr,
241 | momentum=args.momentum,
242 | weight_decay=args.l2reg)
243 |
244 | scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
245 | factor=args.decay_factor,
246 | patience=args.patience,
247 | verbose=True)
248 |
249 | print('The number of parameters is: {}'.format(
250 | sum(p.numel() for p in model.parameters() if p.requires_grad)))
251 |
252 | for epoch in range(1, args.epochs + 1):
253 | tr_loss = train(model, device, tr_loader, optimizer, epoch)
254 | val_loss, val_acc = evaluate(model, device, val_loader)
255 | scheduler.step(val_loss)
256 | print('Epoch {} Train loss: {:.3f} Val loss: {:.3f} Val acc: {:.2f}%'.format(
257 | epoch, tr_loss, val_loss, val_acc))
258 |
259 | test_loss, test_acc = evaluate(model, device, test_loader)
260 | print('Test loss: {:.3f} Test acc: {:.2f}%'.format(test_loss, test_acc))
261 |
262 | if (args.save_model):
263 | torch.save(model.state_dict(), "mnist.pt")
264 |
265 |
266 | if __name__ == '__main__':
267 | main()
268 |
--------------------------------------------------------------------------------
/example.svg:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 |
1161 |
--------------------------------------------------------------------------------
/example.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import torch\n",
10 | "import torch.optim as optim\n",
11 | "import random\n",
12 | "from mnist import load_data, train, evaluate, Net, HashedNet\n",
13 | "from utils import get_equivalent_compression\n",
14 | "\n",
15 | "use_cuda = torch.cuda.is_available()\n",
16 | "torch.manual_seed(1)\n",
17 | "random.seed(1)"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": 2,
23 | "metadata": {
24 | "scrolled": false
25 | },
26 | "outputs": [
27 | {
28 | "name": "stdout",
29 | "output_type": "stream",
30 | "text": [
31 | "Compression rate: 0.015625\n",
32 | "The number of parameters is: 12730\n",
33 | "Epoch 1 Train loss: 0.698 Val loss: 0.298 Val acc: 91.05%\n",
34 | "Epoch 2 Train loss: 0.540 Val loss: 0.262 Val acc: 92.23%\n",
35 | "Epoch 3 Train loss: 0.526 Val loss: 0.263 Val acc: 91.97%\n",
36 | "Epoch 4 Train loss: 0.515 Val loss: 0.255 Val acc: 91.85%\n",
37 | "Epoch 5 Train loss: 0.505 Val loss: 0.243 Val acc: 92.58%\n",
38 | "Epoch 6 Train loss: 0.498 Val loss: 0.243 Val acc: 92.82%\n",
39 | "Epoch 7 Train loss: 0.501 Val loss: 0.243 Val acc: 92.80%\n",
40 | "Epoch 8 Train loss: 0.497 Val loss: 0.239 Val acc: 92.87%\n",
41 | "Epoch 9 Train loss: 0.493 Val loss: 0.239 Val acc: 92.70%\n",
42 | "Epoch 10 Train loss: 0.490 Val loss: 0.235 Val acc: 93.00%\n",
43 | "Epoch 11 Train loss: 0.493 Val loss: 0.237 Val acc: 93.20%\n",
44 | "Epoch 12 Train loss: 0.496 Val loss: 0.242 Val acc: 92.73%\n",
45 | "Epoch 12: reducing learning rate of group 0 to 1.0000e-03.\n",
46 | "Epoch 13 Train loss: 0.479 Val loss: 0.238 Val acc: 92.75%\n",
47 | "Epoch 14 Train loss: 0.446 Val loss: 0.221 Val acc: 93.62%\n",
48 | "Epoch 15 Train loss: 0.435 Val loss: 0.220 Val acc: 93.50%\n",
49 | "Epoch 16 Train loss: 0.438 Val loss: 0.216 Val acc: 93.70%\n",
50 | "Epoch 17 Train loss: 0.438 Val loss: 0.216 Val acc: 93.62%\n",
51 | "Epoch 18 Train loss: 0.431 Val loss: 0.215 Val acc: 93.78%\n",
52 | "Epoch 19 Train loss: 0.428 Val loss: 0.216 Val acc: 93.73%\n",
53 | "Epoch 20 Train loss: 0.433 Val loss: 0.216 Val acc: 93.62%\n",
54 | "Epoch 20: reducing learning rate of group 0 to 1.0000e-04.\n",
55 | "Epoch 21 Train loss: 0.430 Val loss: 0.216 Val acc: 93.78%\n",
56 | "Epoch 22 Train loss: 0.431 Val loss: 0.215 Val acc: 93.80%\n",
57 | "Epoch 23 Train loss: 0.422 Val loss: 0.214 Val acc: 93.70%\n",
58 | "Epoch 24 Train loss: 0.426 Val loss: 0.213 Val acc: 93.83%\n",
59 | "Epoch 25 Train loss: 0.428 Val loss: 0.213 Val acc: 93.78%\n",
60 | "Epoch 26 Train loss: 0.424 Val loss: 0.214 Val acc: 93.72%\n",
61 | "Epoch 27 Train loss: 0.427 Val loss: 0.214 Val acc: 93.77%\n",
62 | "Epoch 27: reducing learning rate of group 0 to 1.0000e-05.\n",
63 | "Epoch 28 Train loss: 0.427 Val loss: 0.214 Val acc: 93.75%\n",
64 | "Epoch 29 Train loss: 0.424 Val loss: 0.213 Val acc: 93.75%\n",
65 | "Epoch 30 Train loss: 0.427 Val loss: 0.213 Val acc: 93.77%\n",
66 | "Epoch 31 Train loss: 0.427 Val loss: 0.213 Val acc: 93.72%\n",
67 | "Epoch 32 Train loss: 0.428 Val loss: 0.213 Val acc: 93.73%\n",
68 | "Epoch 33 Train loss: 0.427 Val loss: 0.213 Val acc: 93.73%\n",
69 | "Epoch 34 Train loss: 0.430 Val loss: 0.213 Val acc: 93.73%\n",
70 | "Epoch 35 Train loss: 0.427 Val loss: 0.213 Val acc: 93.73%\n",
71 | "Epoch 36 Train loss: 0.423 Val loss: 0.213 Val acc: 93.75%\n",
72 | "Epoch 36: reducing learning rate of group 0 to 1.0000e-06.\n",
73 | "Epoch 37 Train loss: 0.426 Val loss: 0.213 Val acc: 93.72%\n",
74 | "Epoch 38 Train loss: 0.428 Val loss: 0.213 Val acc: 93.72%\n",
75 | "Epoch 39 Train loss: 0.422 Val loss: 0.213 Val acc: 93.72%\n",
76 | "Epoch 40 Train loss: 0.426 Val loss: 0.213 Val acc: 93.72%\n",
77 | "Epoch 41 Train loss: 0.423 Val loss: 0.213 Val acc: 93.72%\n",
78 | "Epoch 41: reducing learning rate of group 0 to 1.0000e-07.\n",
79 | "Epoch 42 Train loss: 0.423 Val loss: 0.213 Val acc: 93.72%\n",
80 | "Epoch 43 Train loss: 0.426 Val loss: 0.213 Val acc: 93.72%\n",
81 | "Epoch 44 Train loss: 0.426 Val loss: 0.213 Val acc: 93.72%\n",
82 | "Epoch 44: reducing learning rate of group 0 to 1.0000e-08.\n",
83 | "Epoch 45 Train loss: 0.425 Val loss: 0.213 Val acc: 93.72%\n",
84 | "Epoch 46 Train loss: 0.430 Val loss: 0.213 Val acc: 93.72%\n",
85 | "Epoch 47 Train loss: 0.425 Val loss: 0.213 Val acc: 93.72%\n",
86 | "Epoch 48 Train loss: 0.422 Val loss: 0.213 Val acc: 93.72%\n",
87 | "Epoch 49 Train loss: 0.424 Val loss: 0.213 Val acc: 93.72%\n",
88 | "Epoch 50 Train loss: 0.426 Val loss: 0.213 Val acc: 93.72%\n",
89 | "Test loss: 0.208 Test acc: 94.09%\n",
90 | "The number of parameters is: 12433\n",
91 | "Epoch 1 Train loss: 0.566 Val loss: 0.233 Val acc: 93.00%\n",
92 | "Epoch 2 Train loss: 0.496 Val loss: 0.223 Val acc: 93.68%\n",
93 | "Epoch 3 Train loss: 0.491 Val loss: 0.237 Val acc: 93.37%\n",
94 | "Epoch 4 Train loss: 0.505 Val loss: 0.226 Val acc: 93.83%\n",
95 | "Epoch 4: reducing learning rate of group 0 to 1.0000e-03.\n",
96 | "Epoch 5 Train loss: 0.532 Val loss: 0.229 Val acc: 93.95%\n",
97 | "Epoch 6 Train loss: 0.302 Val loss: 0.153 Val acc: 95.38%\n",
98 | "Epoch 7 Train loss: 0.269 Val loss: 0.147 Val acc: 95.78%\n",
99 | "Epoch 8 Train loss: 0.260 Val loss: 0.143 Val acc: 95.70%\n",
100 | "Epoch 9 Train loss: 0.248 Val loss: 0.140 Val acc: 95.82%\n",
101 | "Epoch 10 Train loss: 0.244 Val loss: 0.134 Val acc: 95.98%\n",
102 | "Epoch 11 Train loss: 0.244 Val loss: 0.134 Val acc: 96.18%\n",
103 | "Epoch 12 Train loss: 0.235 Val loss: 0.132 Val acc: 96.00%\n",
104 | "Epoch 13 Train loss: 0.237 Val loss: 0.129 Val acc: 96.00%\n",
105 | "Epoch 14 Train loss: 0.229 Val loss: 0.127 Val acc: 96.18%\n",
106 | "Epoch 15 Train loss: 0.227 Val loss: 0.126 Val acc: 96.12%\n",
107 | "Epoch 16 Train loss: 0.227 Val loss: 0.126 Val acc: 96.20%\n",
108 | "Epoch 17 Train loss: 0.226 Val loss: 0.125 Val acc: 96.27%\n",
109 | "Epoch 18 Train loss: 0.225 Val loss: 0.121 Val acc: 96.35%\n",
110 | "Epoch 19 Train loss: 0.220 Val loss: 0.120 Val acc: 96.47%\n",
111 | "Epoch 20 Train loss: 0.219 Val loss: 0.120 Val acc: 96.48%\n",
112 | "Epoch 21 Train loss: 0.217 Val loss: 0.119 Val acc: 96.52%\n",
113 | "Epoch 22 Train loss: 0.214 Val loss: 0.118 Val acc: 96.40%\n",
114 | "Epoch 23 Train loss: 0.219 Val loss: 0.119 Val acc: 96.55%\n",
115 | "Epoch 24 Train loss: 0.213 Val loss: 0.117 Val acc: 96.43%\n",
116 | "Epoch 25 Train loss: 0.211 Val loss: 0.119 Val acc: 96.45%\n",
117 | "Epoch 26 Train loss: 0.211 Val loss: 0.115 Val acc: 96.48%\n",
118 | "Epoch 27 Train loss: 0.215 Val loss: 0.116 Val acc: 96.57%\n",
119 | "Epoch 28 Train loss: 0.209 Val loss: 0.114 Val acc: 96.63%\n",
120 | "Epoch 29 Train loss: 0.207 Val loss: 0.115 Val acc: 96.48%\n",
121 | "Epoch 30 Train loss: 0.204 Val loss: 0.116 Val acc: 96.47%\n",
122 | "Epoch 30: reducing learning rate of group 0 to 1.0000e-04.\n",
123 | "Epoch 31 Train loss: 0.206 Val loss: 0.115 Val acc: 96.50%\n",
124 | "Epoch 32 Train loss: 0.201 Val loss: 0.112 Val acc: 96.65%\n",
125 | "Epoch 33 Train loss: 0.192 Val loss: 0.112 Val acc: 96.57%\n",
126 | "Epoch 34 Train loss: 0.194 Val loss: 0.111 Val acc: 96.58%\n",
127 | "Epoch 35 Train loss: 0.194 Val loss: 0.112 Val acc: 96.67%\n",
128 | "Epoch 36 Train loss: 0.195 Val loss: 0.112 Val acc: 96.77%\n",
129 | "Epoch 36: reducing learning rate of group 0 to 1.0000e-05.\n",
130 | "Epoch 37 Train loss: 0.191 Val loss: 0.111 Val acc: 96.67%\n",
131 | "Epoch 38 Train loss: 0.197 Val loss: 0.111 Val acc: 96.60%\n",
132 | "Epoch 39 Train loss: 0.193 Val loss: 0.111 Val acc: 96.68%\n",
133 | "Epoch 39: reducing learning rate of group 0 to 1.0000e-06.\n",
134 | "Epoch 40 Train loss: 0.192 Val loss: 0.111 Val acc: 96.65%\n",
135 | "Epoch 41 Train loss: 0.190 Val loss: 0.111 Val acc: 96.67%\n",
136 | "Epoch 42 Train loss: 0.195 Val loss: 0.111 Val acc: 96.67%\n",
137 | "Epoch 42: reducing learning rate of group 0 to 1.0000e-07.\n",
138 | "Epoch 43 Train loss: 0.192 Val loss: 0.111 Val acc: 96.67%\n",
139 | "Epoch 44 Train loss: 0.195 Val loss: 0.111 Val acc: 96.67%\n",
140 | "Epoch 45 Train loss: 0.193 Val loss: 0.111 Val acc: 96.67%\n",
141 | "Epoch 45: reducing learning rate of group 0 to 1.0000e-08.\n",
142 | "Epoch 46 Train loss: 0.193 Val loss: 0.111 Val acc: 96.67%\n",
143 | "Epoch 47 Train loss: 0.194 Val loss: 0.111 Val acc: 96.67%\n",
144 | "Epoch 48 Train loss: 0.192 Val loss: 0.111 Val acc: 96.67%\n",
145 | "Epoch 49 Train loss: 0.194 Val loss: 0.111 Val acc: 96.67%\n",
146 | "Epoch 50 Train loss: 0.196 Val loss: 0.111 Val acc: 96.67%\n",
147 | "Test loss: 0.108 Test acc: 96.85%\n",
148 | "Compression rate: 0.03125\n",
149 | "The number of parameters is: 24655\n",
150 | "Epoch 1 Train loss: 0.503 Val loss: 0.213 Val acc: 93.43%\n",
151 | "Epoch 2 Train loss: 0.365 Val loss: 0.185 Val acc: 94.63%\n",
152 | "Epoch 3 Train loss: 0.337 Val loss: 0.182 Val acc: 94.88%\n",
153 | "Epoch 4 Train loss: 0.319 Val loss: 0.162 Val acc: 95.18%\n",
154 | "Epoch 5 Train loss: 0.316 Val loss: 0.161 Val acc: 95.37%\n",
155 | "Epoch 6 Train loss: 0.307 Val loss: 0.152 Val acc: 95.43%\n",
156 | "Epoch 7 Train loss: 0.301 Val loss: 0.147 Val acc: 95.65%\n",
157 | "Epoch 8 Train loss: 0.295 Val loss: 0.154 Val acc: 95.45%\n",
158 | "Epoch 9 Train loss: 0.293 Val loss: 0.149 Val acc: 95.38%\n",
159 | "Epoch 10 Train loss: 0.291 Val loss: 0.141 Val acc: 95.88%\n",
160 | "Epoch 11 Train loss: 0.289 Val loss: 0.151 Val acc: 95.50%\n",
161 | "Epoch 12 Train loss: 0.279 Val loss: 0.142 Val acc: 95.85%\n",
162 | "Epoch 12: reducing learning rate of group 0 to 1.0000e-03.\n",
163 | "Epoch 13 Train loss: 0.282 Val loss: 0.150 Val acc: 95.35%\n",
164 | "Epoch 14 Train loss: 0.255 Val loss: 0.128 Val acc: 96.40%\n",
165 | "Epoch 15 Train loss: 0.246 Val loss: 0.127 Val acc: 96.42%\n",
166 | "Epoch 16 Train loss: 0.244 Val loss: 0.124 Val acc: 96.50%\n",
167 | "Epoch 17 Train loss: 0.244 Val loss: 0.124 Val acc: 96.40%\n",
168 | "Epoch 18 Train loss: 0.241 Val loss: 0.122 Val acc: 96.40%\n",
169 | "Epoch 19 Train loss: 0.243 Val loss: 0.124 Val acc: 96.42%\n",
170 | "Epoch 20 Train loss: 0.238 Val loss: 0.124 Val acc: 96.58%\n",
171 | "Epoch 21 Train loss: 0.239 Val loss: 0.122 Val acc: 96.57%\n"
172 | ]
173 | },
174 | {
175 | "name": "stdout",
176 | "output_type": "stream",
177 | "text": [
178 | "Epoch 22 Train loss: 0.239 Val loss: 0.122 Val acc: 96.45%\n",
179 | "Epoch 23 Train loss: 0.239 Val loss: 0.123 Val acc: 96.47%\n",
180 | "Epoch 23: reducing learning rate of group 0 to 1.0000e-04.\n",
181 | "Epoch 24 Train loss: 0.236 Val loss: 0.122 Val acc: 96.40%\n",
182 | "Epoch 25 Train loss: 0.235 Val loss: 0.122 Val acc: 96.43%\n",
183 | "Epoch 26 Train loss: 0.237 Val loss: 0.122 Val acc: 96.50%\n",
184 | "Epoch 26: reducing learning rate of group 0 to 1.0000e-05.\n",
185 | "Epoch 27 Train loss: 0.233 Val loss: 0.122 Val acc: 96.45%\n",
186 | "Epoch 28 Train loss: 0.234 Val loss: 0.122 Val acc: 96.45%\n",
187 | "Epoch 29 Train loss: 0.233 Val loss: 0.122 Val acc: 96.45%\n",
188 | "Epoch 29: reducing learning rate of group 0 to 1.0000e-06.\n",
189 | "Epoch 30 Train loss: 0.233 Val loss: 0.122 Val acc: 96.47%\n",
190 | "Epoch 31 Train loss: 0.233 Val loss: 0.122 Val acc: 96.47%\n",
191 | "Epoch 32 Train loss: 0.238 Val loss: 0.122 Val acc: 96.47%\n",
192 | "Epoch 32: reducing learning rate of group 0 to 1.0000e-07.\n",
193 | "Epoch 33 Train loss: 0.237 Val loss: 0.122 Val acc: 96.47%\n",
194 | "Epoch 34 Train loss: 0.232 Val loss: 0.122 Val acc: 96.47%\n",
195 | "Epoch 35 Train loss: 0.238 Val loss: 0.122 Val acc: 96.47%\n",
196 | "Epoch 35: reducing learning rate of group 0 to 1.0000e-08.\n",
197 | "Epoch 36 Train loss: 0.231 Val loss: 0.122 Val acc: 96.47%\n",
198 | "Epoch 37 Train loss: 0.232 Val loss: 0.122 Val acc: 96.47%\n",
199 | "Epoch 38 Train loss: 0.238 Val loss: 0.122 Val acc: 96.47%\n",
200 | "Epoch 39 Train loss: 0.236 Val loss: 0.122 Val acc: 96.47%\n",
201 | "Epoch 40 Train loss: 0.236 Val loss: 0.122 Val acc: 96.47%\n",
202 | "Epoch 41 Train loss: 0.232 Val loss: 0.122 Val acc: 96.47%\n",
203 | "Epoch 42 Train loss: 0.238 Val loss: 0.122 Val acc: 96.47%\n",
204 | "Epoch 43 Train loss: 0.232 Val loss: 0.122 Val acc: 96.47%\n",
205 | "Epoch 44 Train loss: 0.237 Val loss: 0.122 Val acc: 96.47%\n",
206 | "Epoch 45 Train loss: 0.232 Val loss: 0.122 Val acc: 96.47%\n",
207 | "Epoch 46 Train loss: 0.235 Val loss: 0.122 Val acc: 96.47%\n",
208 | "Epoch 47 Train loss: 0.236 Val loss: 0.122 Val acc: 96.47%\n",
209 | "Epoch 48 Train loss: 0.233 Val loss: 0.122 Val acc: 96.47%\n",
210 | "Epoch 49 Train loss: 0.237 Val loss: 0.122 Val acc: 96.47%\n",
211 | "Epoch 50 Train loss: 0.233 Val loss: 0.122 Val acc: 96.47%\n",
212 | "Test loss: 0.122 Test acc: 96.47%\n",
213 | "The number of parameters is: 24855\n",
214 | "Epoch 1 Train loss: 0.397 Val loss: 0.158 Val acc: 95.30%\n",
215 | "Epoch 2 Train loss: 0.284 Val loss: 0.135 Val acc: 96.00%\n",
216 | "Epoch 3 Train loss: 0.261 Val loss: 0.125 Val acc: 96.25%\n",
217 | "Epoch 4 Train loss: 0.261 Val loss: 0.126 Val acc: 96.20%\n",
218 | "Epoch 5 Train loss: 0.245 Val loss: 0.124 Val acc: 96.02%\n",
219 | "Epoch 6 Train loss: 0.242 Val loss: 0.116 Val acc: 96.37%\n",
220 | "Epoch 7 Train loss: 0.238 Val loss: 0.122 Val acc: 96.37%\n",
221 | "Epoch 8 Train loss: 0.232 Val loss: 0.118 Val acc: 96.62%\n",
222 | "Epoch 8: reducing learning rate of group 0 to 1.0000e-03.\n",
223 | "Epoch 9 Train loss: 0.233 Val loss: 0.129 Val acc: 96.33%\n",
224 | "Epoch 10 Train loss: 0.173 Val loss: 0.100 Val acc: 97.12%\n",
225 | "Epoch 11 Train loss: 0.150 Val loss: 0.098 Val acc: 97.28%\n",
226 | "Epoch 12 Train loss: 0.148 Val loss: 0.096 Val acc: 97.12%\n",
227 | "Epoch 13 Train loss: 0.145 Val loss: 0.095 Val acc: 97.22%\n",
228 | "Epoch 14 Train loss: 0.142 Val loss: 0.092 Val acc: 97.27%\n",
229 | "Epoch 15 Train loss: 0.140 Val loss: 0.092 Val acc: 97.17%\n",
230 | "Epoch 16 Train loss: 0.137 Val loss: 0.092 Val acc: 97.25%\n",
231 | "Epoch 17 Train loss: 0.139 Val loss: 0.090 Val acc: 97.25%\n",
232 | "Epoch 18 Train loss: 0.137 Val loss: 0.089 Val acc: 97.30%\n",
233 | "Epoch 19 Train loss: 0.133 Val loss: 0.091 Val acc: 97.27%\n",
234 | "Epoch 20 Train loss: 0.130 Val loss: 0.087 Val acc: 97.28%\n",
235 | "Epoch 21 Train loss: 0.130 Val loss: 0.087 Val acc: 97.27%\n",
236 | "Epoch 22 Train loss: 0.130 Val loss: 0.089 Val acc: 97.20%\n",
237 | "Epoch 23 Train loss: 0.129 Val loss: 0.086 Val acc: 97.30%\n",
238 | "Epoch 24 Train loss: 0.131 Val loss: 0.088 Val acc: 97.27%\n",
239 | "Epoch 25 Train loss: 0.126 Val loss: 0.087 Val acc: 97.33%\n",
240 | "Epoch 26 Train loss: 0.123 Val loss: 0.086 Val acc: 97.38%\n",
241 | "Epoch 27 Train loss: 0.126 Val loss: 0.085 Val acc: 97.37%\n",
242 | "Epoch 28 Train loss: 0.127 Val loss: 0.085 Val acc: 97.35%\n",
243 | "Epoch 29 Train loss: 0.125 Val loss: 0.085 Val acc: 97.37%\n",
244 | "Epoch 30 Train loss: 0.125 Val loss: 0.084 Val acc: 97.37%\n",
245 | "Epoch 31 Train loss: 0.125 Val loss: 0.083 Val acc: 97.47%\n",
246 | "Epoch 32 Train loss: 0.121 Val loss: 0.082 Val acc: 97.55%\n",
247 | "Epoch 33 Train loss: 0.124 Val loss: 0.085 Val acc: 97.33%\n",
248 | "Epoch 34 Train loss: 0.122 Val loss: 0.084 Val acc: 97.35%\n",
249 | "Epoch 34: reducing learning rate of group 0 to 1.0000e-04.\n",
250 | "Epoch 35 Train loss: 0.118 Val loss: 0.084 Val acc: 97.45%\n",
251 | "Epoch 36 Train loss: 0.118 Val loss: 0.083 Val acc: 97.40%\n",
252 | "Epoch 37 Train loss: 0.116 Val loss: 0.082 Val acc: 97.38%\n",
253 | "Epoch 38 Train loss: 0.116 Val loss: 0.081 Val acc: 97.45%\n",
254 | "Epoch 39 Train loss: 0.116 Val loss: 0.081 Val acc: 97.42%\n",
255 | "Epoch 40 Train loss: 0.117 Val loss: 0.082 Val acc: 97.40%\n",
256 | "Epoch 41 Train loss: 0.117 Val loss: 0.082 Val acc: 97.42%\n",
257 | "Epoch 41: reducing learning rate of group 0 to 1.0000e-05.\n",
258 | "Epoch 42 Train loss: 0.119 Val loss: 0.081 Val acc: 97.47%\n",
259 | "Epoch 43 Train loss: 0.117 Val loss: 0.081 Val acc: 97.47%\n",
260 | "Epoch 44 Train loss: 0.118 Val loss: 0.081 Val acc: 97.47%\n",
261 | "Epoch 44: reducing learning rate of group 0 to 1.0000e-06.\n",
262 | "Epoch 45 Train loss: 0.116 Val loss: 0.081 Val acc: 97.47%\n",
263 | "Epoch 46 Train loss: 0.117 Val loss: 0.081 Val acc: 97.47%\n",
264 | "Epoch 47 Train loss: 0.117 Val loss: 0.081 Val acc: 97.47%\n",
265 | "Epoch 47: reducing learning rate of group 0 to 1.0000e-07.\n",
266 | "Epoch 48 Train loss: 0.115 Val loss: 0.081 Val acc: 97.47%\n",
267 | "Epoch 49 Train loss: 0.115 Val loss: 0.081 Val acc: 97.47%\n",
268 | "Epoch 50 Train loss: 0.116 Val loss: 0.081 Val acc: 97.47%\n",
269 | "Test loss: 0.077 Test acc: 97.75%\n",
270 | "Compression rate: 0.0625\n",
271 | "The number of parameters is: 49300\n",
272 | "Epoch 1 Train loss: 0.404 Val loss: 0.168 Val acc: 94.87%\n",
273 | "Epoch 2 Train loss: 0.256 Val loss: 0.134 Val acc: 95.82%\n",
274 | "Epoch 3 Train loss: 0.225 Val loss: 0.121 Val acc: 96.32%\n",
275 | "Epoch 4 Train loss: 0.210 Val loss: 0.115 Val acc: 96.35%\n",
276 | "Epoch 5 Train loss: 0.197 Val loss: 0.104 Val acc: 96.83%\n",
277 | "Epoch 6 Train loss: 0.192 Val loss: 0.104 Val acc: 96.87%\n",
278 | "Epoch 7 Train loss: 0.187 Val loss: 0.098 Val acc: 96.88%\n",
279 | "Epoch 8 Train loss: 0.179 Val loss: 0.100 Val acc: 96.77%\n",
280 | "Epoch 9 Train loss: 0.178 Val loss: 0.087 Val acc: 97.15%\n",
281 | "Epoch 10 Train loss: 0.173 Val loss: 0.095 Val acc: 97.00%\n",
282 | "Epoch 11 Train loss: 0.170 Val loss: 0.091 Val acc: 97.08%\n",
283 | "Epoch 11: reducing learning rate of group 0 to 1.0000e-03.\n",
284 | "Epoch 12 Train loss: 0.167 Val loss: 0.090 Val acc: 97.20%\n",
285 | "Epoch 13 Train loss: 0.141 Val loss: 0.083 Val acc: 97.35%\n",
286 | "Epoch 14 Train loss: 0.140 Val loss: 0.081 Val acc: 97.47%\n",
287 | "Epoch 15 Train loss: 0.137 Val loss: 0.080 Val acc: 97.55%\n",
288 | "Epoch 16 Train loss: 0.133 Val loss: 0.080 Val acc: 97.58%\n",
289 | "Epoch 17 Train loss: 0.133 Val loss: 0.079 Val acc: 97.50%\n",
290 | "Epoch 18 Train loss: 0.135 Val loss: 0.078 Val acc: 97.57%\n",
291 | "Epoch 19 Train loss: 0.133 Val loss: 0.079 Val acc: 97.52%\n",
292 | "Epoch 20 Train loss: 0.132 Val loss: 0.078 Val acc: 97.67%\n",
293 | "Epoch 21 Train loss: 0.129 Val loss: 0.079 Val acc: 97.63%\n",
294 | "Epoch 22 Train loss: 0.129 Val loss: 0.078 Val acc: 97.53%\n",
295 | "Epoch 23 Train loss: 0.129 Val loss: 0.078 Val acc: 97.60%\n",
296 | "Epoch 24 Train loss: 0.130 Val loss: 0.078 Val acc: 97.62%\n",
297 | "Epoch 25 Train loss: 0.127 Val loss: 0.078 Val acc: 97.73%\n",
298 | "Epoch 26 Train loss: 0.129 Val loss: 0.077 Val acc: 97.63%\n",
299 | "Epoch 27 Train loss: 0.129 Val loss: 0.076 Val acc: 97.72%\n",
300 | "Epoch 28 Train loss: 0.127 Val loss: 0.077 Val acc: 97.77%\n",
301 | "Epoch 29 Train loss: 0.128 Val loss: 0.076 Val acc: 97.72%\n",
302 | "Epoch 29: reducing learning rate of group 0 to 1.0000e-04.\n",
303 | "Epoch 30 Train loss: 0.127 Val loss: 0.076 Val acc: 97.77%\n",
304 | "Epoch 31 Train loss: 0.128 Val loss: 0.076 Val acc: 97.80%\n",
305 | "Epoch 32 Train loss: 0.126 Val loss: 0.076 Val acc: 97.77%\n",
306 | "Epoch 33 Train loss: 0.124 Val loss: 0.076 Val acc: 97.77%\n",
307 | "Epoch 34 Train loss: 0.121 Val loss: 0.075 Val acc: 97.77%\n",
308 | "Epoch 35 Train loss: 0.125 Val loss: 0.075 Val acc: 97.75%\n",
309 | "Epoch 36 Train loss: 0.122 Val loss: 0.075 Val acc: 97.75%\n",
310 | "Epoch 37 Train loss: 0.127 Val loss: 0.075 Val acc: 97.77%\n",
311 | "Epoch 38 Train loss: 0.123 Val loss: 0.075 Val acc: 97.75%\n",
312 | "Epoch 39 Train loss: 0.123 Val loss: 0.075 Val acc: 97.75%\n",
313 | "Epoch 40 Train loss: 0.121 Val loss: 0.075 Val acc: 97.77%\n",
314 | "Epoch 40: reducing learning rate of group 0 to 1.0000e-05.\n",
315 | "Epoch 41 Train loss: 0.123 Val loss: 0.075 Val acc: 97.77%\n",
316 | "Epoch 42 Train loss: 0.126 Val loss: 0.075 Val acc: 97.75%\n",
317 | "Epoch 43 Train loss: 0.123 Val loss: 0.075 Val acc: 97.75%\n"
318 | ]
319 | },
320 | {
321 | "name": "stdout",
322 | "output_type": "stream",
323 | "text": [
324 | "Epoch 43: reducing learning rate of group 0 to 1.0000e-06.\n",
325 | "Epoch 44 Train loss: 0.125 Val loss: 0.075 Val acc: 97.73%\n",
326 | "Epoch 45 Train loss: 0.123 Val loss: 0.075 Val acc: 97.73%\n",
327 | "Epoch 46 Train loss: 0.125 Val loss: 0.075 Val acc: 97.73%\n",
328 | "Epoch 46: reducing learning rate of group 0 to 1.0000e-07.\n",
329 | "Epoch 47 Train loss: 0.124 Val loss: 0.075 Val acc: 97.73%\n",
330 | "Epoch 48 Train loss: 0.123 Val loss: 0.075 Val acc: 97.73%\n",
331 | "Epoch 49 Train loss: 0.123 Val loss: 0.075 Val acc: 97.73%\n",
332 | "Epoch 49: reducing learning rate of group 0 to 1.0000e-08.\n",
333 | "Epoch 50 Train loss: 0.123 Val loss: 0.075 Val acc: 97.73%\n",
334 | "Test loss: 0.078 Test acc: 97.72%\n",
335 | "The number of parameters is: 49698\n",
336 | "Epoch 1 Train loss: 0.330 Val loss: 0.134 Val acc: 95.75%\n",
337 | "Epoch 2 Train loss: 0.205 Val loss: 0.118 Val acc: 96.47%\n",
338 | "Epoch 3 Train loss: 0.179 Val loss: 0.101 Val acc: 96.68%\n",
339 | "Epoch 4 Train loss: 0.166 Val loss: 0.097 Val acc: 97.25%\n",
340 | "Epoch 5 Train loss: 0.157 Val loss: 0.092 Val acc: 97.15%\n",
341 | "Epoch 6 Train loss: 0.151 Val loss: 0.083 Val acc: 97.60%\n",
342 | "Epoch 7 Train loss: 0.143 Val loss: 0.091 Val acc: 97.33%\n",
343 | "Epoch 8 Train loss: 0.138 Val loss: 0.089 Val acc: 97.40%\n",
344 | "Epoch 9 Train loss: 0.137 Val loss: 0.082 Val acc: 97.60%\n",
345 | "Epoch 10 Train loss: 0.135 Val loss: 0.087 Val acc: 97.33%\n",
346 | "Epoch 11 Train loss: 0.132 Val loss: 0.079 Val acc: 97.73%\n",
347 | "Epoch 12 Train loss: 0.129 Val loss: 0.082 Val acc: 97.65%\n",
348 | "Epoch 13 Train loss: 0.124 Val loss: 0.083 Val acc: 97.50%\n",
349 | "Epoch 13: reducing learning rate of group 0 to 1.0000e-03.\n",
350 | "Epoch 14 Train loss: 0.127 Val loss: 0.079 Val acc: 97.68%\n",
351 | "Epoch 15 Train loss: 0.098 Val loss: 0.072 Val acc: 97.87%\n",
352 | "Epoch 16 Train loss: 0.091 Val loss: 0.068 Val acc: 97.93%\n",
353 | "Epoch 17 Train loss: 0.084 Val loss: 0.068 Val acc: 97.98%\n",
354 | "Epoch 18 Train loss: 0.080 Val loss: 0.067 Val acc: 97.97%\n",
355 | "Epoch 19 Train loss: 0.081 Val loss: 0.068 Val acc: 97.97%\n",
356 | "Epoch 20 Train loss: 0.080 Val loss: 0.067 Val acc: 97.87%\n",
357 | "Epoch 21 Train loss: 0.078 Val loss: 0.066 Val acc: 98.10%\n",
358 | "Epoch 22 Train loss: 0.077 Val loss: 0.066 Val acc: 97.97%\n",
359 | "Epoch 23 Train loss: 0.075 Val loss: 0.065 Val acc: 98.15%\n",
360 | "Epoch 24 Train loss: 0.076 Val loss: 0.065 Val acc: 98.08%\n",
361 | "Epoch 25 Train loss: 0.073 Val loss: 0.064 Val acc: 98.10%\n",
362 | "Epoch 26 Train loss: 0.074 Val loss: 0.064 Val acc: 98.10%\n",
363 | "Epoch 27 Train loss: 0.072 Val loss: 0.063 Val acc: 98.02%\n",
364 | "Epoch 28 Train loss: 0.073 Val loss: 0.062 Val acc: 98.07%\n",
365 | "Epoch 29 Train loss: 0.071 Val loss: 0.063 Val acc: 98.08%\n",
366 | "Epoch 30 Train loss: 0.071 Val loss: 0.062 Val acc: 98.13%\n",
367 | "Epoch 30: reducing learning rate of group 0 to 1.0000e-04.\n",
368 | "Epoch 31 Train loss: 0.071 Val loss: 0.063 Val acc: 97.93%\n",
369 | "Epoch 32 Train loss: 0.069 Val loss: 0.062 Val acc: 98.02%\n",
370 | "Epoch 33 Train loss: 0.067 Val loss: 0.062 Val acc: 98.05%\n",
371 | "Epoch 34 Train loss: 0.070 Val loss: 0.062 Val acc: 98.03%\n",
372 | "Epoch 35 Train loss: 0.069 Val loss: 0.062 Val acc: 98.07%\n",
373 | "Epoch 36 Train loss: 0.069 Val loss: 0.062 Val acc: 98.10%\n",
374 | "Epoch 37 Train loss: 0.070 Val loss: 0.062 Val acc: 98.10%\n",
375 | "Epoch 38 Train loss: 0.068 Val loss: 0.062 Val acc: 98.12%\n",
376 | "Epoch 39 Train loss: 0.070 Val loss: 0.062 Val acc: 98.12%\n",
377 | "Epoch 40 Train loss: 0.066 Val loss: 0.062 Val acc: 98.10%\n",
378 | "Epoch 41 Train loss: 0.069 Val loss: 0.062 Val acc: 98.08%\n",
379 | "Epoch 42 Train loss: 0.070 Val loss: 0.062 Val acc: 98.10%\n",
380 | "Epoch 42: reducing learning rate of group 0 to 1.0000e-05.\n",
381 | "Epoch 43 Train loss: 0.068 Val loss: 0.062 Val acc: 98.13%\n",
382 | "Epoch 44 Train loss: 0.067 Val loss: 0.062 Val acc: 98.13%\n",
383 | "Epoch 45 Train loss: 0.068 Val loss: 0.062 Val acc: 98.13%\n",
384 | "Epoch 45: reducing learning rate of group 0 to 1.0000e-06.\n",
385 | "Epoch 46 Train loss: 0.067 Val loss: 0.062 Val acc: 98.13%\n",
386 | "Epoch 47 Train loss: 0.068 Val loss: 0.062 Val acc: 98.13%\n",
387 | "Epoch 48 Train loss: 0.067 Val loss: 0.062 Val acc: 98.13%\n",
388 | "Epoch 48: reducing learning rate of group 0 to 1.0000e-07.\n",
389 | "Epoch 49 Train loss: 0.070 Val loss: 0.062 Val acc: 98.13%\n",
390 | "Epoch 50 Train loss: 0.066 Val loss: 0.062 Val acc: 98.13%\n",
391 | "Test loss: 0.065 Test acc: 98.16%\n",
392 | "Compression rate: 0.125\n",
393 | "The number of parameters is: 99385\n",
394 | "Epoch 1 Train loss: 0.355 Val loss: 0.145 Val acc: 95.60%\n",
395 | "Epoch 2 Train loss: 0.197 Val loss: 0.108 Val acc: 96.67%\n",
396 | "Epoch 3 Train loss: 0.165 Val loss: 0.098 Val acc: 96.72%\n",
397 | "Epoch 4 Train loss: 0.147 Val loss: 0.090 Val acc: 96.92%\n",
398 | "Epoch 5 Train loss: 0.139 Val loss: 0.084 Val acc: 97.53%\n",
399 | "Epoch 6 Train loss: 0.128 Val loss: 0.075 Val acc: 97.67%\n",
400 | "Epoch 7 Train loss: 0.122 Val loss: 0.076 Val acc: 97.67%\n",
401 | "Epoch 8 Train loss: 0.116 Val loss: 0.078 Val acc: 97.43%\n",
402 | "Epoch 9 Train loss: 0.111 Val loss: 0.074 Val acc: 97.68%\n",
403 | "Epoch 10 Train loss: 0.107 Val loss: 0.070 Val acc: 98.00%\n",
404 | "Epoch 11 Train loss: 0.104 Val loss: 0.070 Val acc: 97.77%\n",
405 | "Epoch 12 Train loss: 0.100 Val loss: 0.065 Val acc: 98.05%\n",
406 | "Epoch 13 Train loss: 0.098 Val loss: 0.071 Val acc: 97.80%\n",
407 | "Epoch 14 Train loss: 0.096 Val loss: 0.065 Val acc: 97.83%\n",
408 | "Epoch 15 Train loss: 0.094 Val loss: 0.067 Val acc: 97.88%\n",
409 | "Epoch 16 Train loss: 0.092 Val loss: 0.064 Val acc: 97.97%\n",
410 | "Epoch 17 Train loss: 0.091 Val loss: 0.067 Val acc: 97.83%\n",
411 | "Epoch 18 Train loss: 0.089 Val loss: 0.068 Val acc: 97.80%\n",
412 | "Epoch 18: reducing learning rate of group 0 to 1.0000e-03.\n",
413 | "Epoch 19 Train loss: 0.090 Val loss: 0.070 Val acc: 97.77%\n",
414 | "Epoch 20 Train loss: 0.077 Val loss: 0.065 Val acc: 97.97%\n",
415 | "Epoch 21 Train loss: 0.071 Val loss: 0.063 Val acc: 98.08%\n",
416 | "Epoch 22 Train loss: 0.069 Val loss: 0.062 Val acc: 98.12%\n",
417 | "Epoch 23 Train loss: 0.066 Val loss: 0.062 Val acc: 98.13%\n",
418 | "Epoch 24 Train loss: 0.065 Val loss: 0.061 Val acc: 98.20%\n",
419 | "Epoch 25 Train loss: 0.065 Val loss: 0.060 Val acc: 98.20%\n",
420 | "Epoch 26 Train loss: 0.064 Val loss: 0.060 Val acc: 98.23%\n",
421 | "Epoch 27 Train loss: 0.064 Val loss: 0.060 Val acc: 98.23%\n",
422 | "Epoch 27: reducing learning rate of group 0 to 1.0000e-04.\n",
423 | "Epoch 28 Train loss: 0.064 Val loss: 0.061 Val acc: 98.15%\n",
424 | "Epoch 29 Train loss: 0.064 Val loss: 0.060 Val acc: 98.12%\n",
425 | "Epoch 30 Train loss: 0.063 Val loss: 0.060 Val acc: 98.13%\n",
426 | "Epoch 30: reducing learning rate of group 0 to 1.0000e-05.\n",
427 | "Epoch 31 Train loss: 0.061 Val loss: 0.060 Val acc: 98.10%\n",
428 | "Epoch 32 Train loss: 0.061 Val loss: 0.060 Val acc: 98.10%\n",
429 | "Epoch 33 Train loss: 0.063 Val loss: 0.060 Val acc: 98.10%\n",
430 | "Epoch 33: reducing learning rate of group 0 to 1.0000e-06.\n",
431 | "Epoch 34 Train loss: 0.061 Val loss: 0.060 Val acc: 98.12%\n",
432 | "Epoch 35 Train loss: 0.062 Val loss: 0.060 Val acc: 98.12%\n",
433 | "Epoch 36 Train loss: 0.062 Val loss: 0.060 Val acc: 98.12%\n",
434 | "Epoch 36: reducing learning rate of group 0 to 1.0000e-07.\n",
435 | "Epoch 37 Train loss: 0.063 Val loss: 0.060 Val acc: 98.12%\n",
436 | "Epoch 38 Train loss: 0.063 Val loss: 0.060 Val acc: 98.12%\n",
437 | "Epoch 39 Train loss: 0.063 Val loss: 0.060 Val acc: 98.12%\n",
438 | "Epoch 39: reducing learning rate of group 0 to 1.0000e-08.\n",
439 | "Epoch 40 Train loss: 0.062 Val loss: 0.060 Val acc: 98.12%\n",
440 | "Epoch 41 Train loss: 0.061 Val loss: 0.060 Val acc: 98.12%\n",
441 | "Epoch 42 Train loss: 0.063 Val loss: 0.060 Val acc: 98.12%\n",
442 | "Epoch 43 Train loss: 0.064 Val loss: 0.060 Val acc: 98.12%\n",
443 | "Epoch 44 Train loss: 0.061 Val loss: 0.060 Val acc: 98.12%\n",
444 | "Epoch 45 Train loss: 0.062 Val loss: 0.060 Val acc: 98.12%\n",
445 | "Epoch 46 Train loss: 0.060 Val loss: 0.060 Val acc: 98.12%\n",
446 | "Epoch 47 Train loss: 0.062 Val loss: 0.060 Val acc: 98.12%\n",
447 | "Epoch 48 Train loss: 0.062 Val loss: 0.060 Val acc: 98.12%\n",
448 | "Epoch 49 Train loss: 0.062 Val loss: 0.060 Val acc: 98.12%\n",
449 | "Epoch 50 Train loss: 0.062 Val loss: 0.060 Val acc: 98.12%\n",
450 | "Test loss: 0.057 Test acc: 98.25%\n",
451 | "The number of parameters is: 99385\n",
452 | "Epoch 1 Train loss: 0.303 Val loss: 0.125 Val acc: 96.20%\n",
453 | "Epoch 2 Train loss: 0.169 Val loss: 0.097 Val acc: 97.15%\n",
454 | "Epoch 3 Train loss: 0.139 Val loss: 0.086 Val acc: 97.38%\n",
455 | "Epoch 4 Train loss: 0.123 Val loss: 0.092 Val acc: 97.22%\n",
456 | "Epoch 5 Train loss: 0.113 Val loss: 0.084 Val acc: 97.17%\n",
457 | "Epoch 6 Train loss: 0.105 Val loss: 0.083 Val acc: 97.67%\n",
458 | "Epoch 7 Train loss: 0.102 Val loss: 0.077 Val acc: 97.75%\n",
459 | "Epoch 8 Train loss: 0.095 Val loss: 0.078 Val acc: 97.63%\n",
460 | "Epoch 9 Train loss: 0.087 Val loss: 0.070 Val acc: 98.02%\n",
461 | "Epoch 10 Train loss: 0.087 Val loss: 0.068 Val acc: 97.88%\n",
462 | "Epoch 11 Train loss: 0.087 Val loss: 0.074 Val acc: 98.03%\n",
463 | "Epoch 12 Train loss: 0.080 Val loss: 0.069 Val acc: 98.02%\n",
464 | "Epoch 12: reducing learning rate of group 0 to 1.0000e-03.\n",
465 | "Epoch 13 Train loss: 0.077 Val loss: 0.070 Val acc: 97.90%\n"
466 | ]
467 | },
468 | {
469 | "name": "stdout",
470 | "output_type": "stream",
471 | "text": [
472 | "Epoch 14 Train loss: 0.065 Val loss: 0.065 Val acc: 98.20%\n",
473 | "Epoch 15 Train loss: 0.055 Val loss: 0.064 Val acc: 98.27%\n",
474 | "Epoch 16 Train loss: 0.052 Val loss: 0.064 Val acc: 98.22%\n",
475 | "Epoch 17 Train loss: 0.051 Val loss: 0.063 Val acc: 98.27%\n",
476 | "Epoch 18 Train loss: 0.047 Val loss: 0.062 Val acc: 98.25%\n",
477 | "Epoch 19 Train loss: 0.047 Val loss: 0.063 Val acc: 98.25%\n",
478 | "Epoch 20 Train loss: 0.048 Val loss: 0.062 Val acc: 98.25%\n",
479 | "Epoch 21 Train loss: 0.045 Val loss: 0.062 Val acc: 98.32%\n",
480 | "Epoch 22 Train loss: 0.046 Val loss: 0.061 Val acc: 98.22%\n",
481 | "Epoch 23 Train loss: 0.045 Val loss: 0.061 Val acc: 98.32%\n",
482 | "Epoch 24 Train loss: 0.045 Val loss: 0.061 Val acc: 98.27%\n",
483 | "Epoch 25 Train loss: 0.045 Val loss: 0.060 Val acc: 98.35%\n",
484 | "Epoch 26 Train loss: 0.044 Val loss: 0.059 Val acc: 98.28%\n",
485 | "Epoch 27 Train loss: 0.042 Val loss: 0.060 Val acc: 98.22%\n",
486 | "Epoch 28 Train loss: 0.041 Val loss: 0.060 Val acc: 98.30%\n",
487 | "Epoch 28: reducing learning rate of group 0 to 1.0000e-04.\n",
488 | "Epoch 29 Train loss: 0.042 Val loss: 0.061 Val acc: 98.32%\n",
489 | "Epoch 30 Train loss: 0.041 Val loss: 0.060 Val acc: 98.35%\n",
490 | "Epoch 31 Train loss: 0.041 Val loss: 0.060 Val acc: 98.33%\n",
491 | "Epoch 31: reducing learning rate of group 0 to 1.0000e-05.\n",
492 | "Epoch 32 Train loss: 0.042 Val loss: 0.060 Val acc: 98.33%\n",
493 | "Epoch 33 Train loss: 0.042 Val loss: 0.060 Val acc: 98.33%\n",
494 | "Epoch 34 Train loss: 0.040 Val loss: 0.060 Val acc: 98.33%\n",
495 | "Epoch 34: reducing learning rate of group 0 to 1.0000e-06.\n",
496 | "Epoch 35 Train loss: 0.041 Val loss: 0.060 Val acc: 98.33%\n",
497 | "Epoch 36 Train loss: 0.038 Val loss: 0.060 Val acc: 98.33%\n",
498 | "Epoch 37 Train loss: 0.041 Val loss: 0.060 Val acc: 98.33%\n",
499 | "Epoch 37: reducing learning rate of group 0 to 1.0000e-07.\n",
500 | "Epoch 38 Train loss: 0.039 Val loss: 0.060 Val acc: 98.33%\n",
501 | "Epoch 39 Train loss: 0.041 Val loss: 0.060 Val acc: 98.33%\n",
502 | "Epoch 40 Train loss: 0.040 Val loss: 0.060 Val acc: 98.33%\n",
503 | "Epoch 40: reducing learning rate of group 0 to 1.0000e-08.\n",
504 | "Epoch 41 Train loss: 0.041 Val loss: 0.060 Val acc: 98.33%\n",
505 | "Epoch 42 Train loss: 0.042 Val loss: 0.060 Val acc: 98.33%\n",
506 | "Epoch 43 Train loss: 0.042 Val loss: 0.060 Val acc: 98.33%\n",
507 | "Epoch 44 Train loss: 0.039 Val loss: 0.060 Val acc: 98.33%\n",
508 | "Epoch 45 Train loss: 0.041 Val loss: 0.060 Val acc: 98.33%\n",
509 | "Epoch 46 Train loss: 0.041 Val loss: 0.060 Val acc: 98.33%\n",
510 | "Epoch 47 Train loss: 0.041 Val loss: 0.060 Val acc: 98.33%\n",
511 | "Epoch 48 Train loss: 0.042 Val loss: 0.060 Val acc: 98.33%\n",
512 | "Epoch 49 Train loss: 0.040 Val loss: 0.060 Val acc: 98.33%\n",
513 | "Epoch 50 Train loss: 0.039 Val loss: 0.060 Val acc: 98.33%\n",
514 | "Test loss: 0.053 Test acc: 98.48%\n",
515 | "Compression rate: 1\n",
516 | "The number of parameters is: 795010\n",
517 | "Epoch 1 Train loss: 0.294 Val loss: 0.132 Val acc: 95.90%\n",
518 | "Epoch 2 Train loss: 0.140 Val loss: 0.094 Val acc: 97.07%\n",
519 | "Epoch 3 Train loss: 0.106 Val loss: 0.075 Val acc: 97.63%\n",
520 | "Epoch 4 Train loss: 0.087 Val loss: 0.072 Val acc: 97.95%\n",
521 | "Epoch 5 Train loss: 0.077 Val loss: 0.065 Val acc: 98.02%\n",
522 | "Epoch 6 Train loss: 0.065 Val loss: 0.067 Val acc: 98.00%\n",
523 | "Epoch 7 Train loss: 0.059 Val loss: 0.061 Val acc: 98.12%\n",
524 | "Epoch 8 Train loss: 0.055 Val loss: 0.057 Val acc: 98.12%\n",
525 | "Epoch 9 Train loss: 0.048 Val loss: 0.057 Val acc: 98.25%\n",
526 | "Epoch 10 Train loss: 0.045 Val loss: 0.059 Val acc: 98.18%\n",
527 | "Epoch 11 Train loss: 0.041 Val loss: 0.054 Val acc: 98.32%\n",
528 | "Epoch 12 Train loss: 0.037 Val loss: 0.053 Val acc: 98.40%\n",
529 | "Epoch 13 Train loss: 0.035 Val loss: 0.057 Val acc: 98.35%\n",
530 | "Epoch 14 Train loss: 0.033 Val loss: 0.057 Val acc: 98.20%\n",
531 | "Epoch 15 Train loss: 0.034 Val loss: 0.053 Val acc: 98.53%\n",
532 | "Epoch 16 Train loss: 0.030 Val loss: 0.051 Val acc: 98.38%\n",
533 | "Epoch 17 Train loss: 0.029 Val loss: 0.048 Val acc: 98.48%\n",
534 | "Epoch 18 Train loss: 0.027 Val loss: 0.052 Val acc: 98.50%\n",
535 | "Epoch 19 Train loss: 0.026 Val loss: 0.055 Val acc: 98.33%\n",
536 | "Epoch 19: reducing learning rate of group 0 to 1.0000e-03.\n",
537 | "Epoch 20 Train loss: 0.025 Val loss: 0.057 Val acc: 98.45%\n",
538 | "Epoch 21 Train loss: 0.020 Val loss: 0.052 Val acc: 98.57%\n",
539 | "Epoch 22 Train loss: 0.018 Val loss: 0.050 Val acc: 98.70%\n",
540 | "Epoch 22: reducing learning rate of group 0 to 1.0000e-04.\n",
541 | "Epoch 23 Train loss: 0.018 Val loss: 0.049 Val acc: 98.73%\n",
542 | "Epoch 24 Train loss: 0.016 Val loss: 0.049 Val acc: 98.75%\n",
543 | "Epoch 25 Train loss: 0.016 Val loss: 0.048 Val acc: 98.75%\n",
544 | "Epoch 26 Train loss: 0.016 Val loss: 0.048 Val acc: 98.73%\n",
545 | "Epoch 27 Train loss: 0.016 Val loss: 0.048 Val acc: 98.73%\n",
546 | "Epoch 28 Train loss: 0.016 Val loss: 0.048 Val acc: 98.73%\n",
547 | "Epoch 29 Train loss: 0.016 Val loss: 0.048 Val acc: 98.75%\n",
548 | "Epoch 30 Train loss: 0.015 Val loss: 0.048 Val acc: 98.73%\n",
549 | "Epoch 31 Train loss: 0.016 Val loss: 0.048 Val acc: 98.72%\n",
550 | "Epoch 31: reducing learning rate of group 0 to 1.0000e-05.\n",
551 | "Epoch 32 Train loss: 0.014 Val loss: 0.048 Val acc: 98.73%\n",
552 | "Epoch 33 Train loss: 0.015 Val loss: 0.048 Val acc: 98.73%\n",
553 | "Epoch 34 Train loss: 0.015 Val loss: 0.048 Val acc: 98.73%\n",
554 | "Epoch 34: reducing learning rate of group 0 to 1.0000e-06.\n",
555 | "Epoch 35 Train loss: 0.015 Val loss: 0.048 Val acc: 98.72%\n",
556 | "Epoch 36 Train loss: 0.016 Val loss: 0.048 Val acc: 98.72%\n",
557 | "Epoch 37 Train loss: 0.016 Val loss: 0.048 Val acc: 98.72%\n",
558 | "Epoch 37: reducing learning rate of group 0 to 1.0000e-07.\n",
559 | "Epoch 38 Train loss: 0.016 Val loss: 0.048 Val acc: 98.72%\n",
560 | "Epoch 39 Train loss: 0.016 Val loss: 0.048 Val acc: 98.72%\n",
561 | "Epoch 40 Train loss: 0.015 Val loss: 0.048 Val acc: 98.72%\n",
562 | "Epoch 40: reducing learning rate of group 0 to 1.0000e-08.\n",
563 | "Epoch 41 Train loss: 0.017 Val loss: 0.048 Val acc: 98.72%\n",
564 | "Epoch 42 Train loss: 0.016 Val loss: 0.048 Val acc: 98.72%\n",
565 | "Epoch 43 Train loss: 0.016 Val loss: 0.048 Val acc: 98.72%\n",
566 | "Epoch 44 Train loss: 0.015 Val loss: 0.048 Val acc: 98.72%\n",
567 | "Epoch 45 Train loss: 0.017 Val loss: 0.048 Val acc: 98.72%\n",
568 | "Epoch 46 Train loss: 0.015 Val loss: 0.048 Val acc: 98.72%\n",
569 | "Epoch 47 Train loss: 0.016 Val loss: 0.048 Val acc: 98.72%\n",
570 | "Epoch 48 Train loss: 0.016 Val loss: 0.048 Val acc: 98.72%\n",
571 | "Epoch 49 Train loss: 0.016 Val loss: 0.048 Val acc: 98.72%\n",
572 | "Epoch 50 Train loss: 0.015 Val loss: 0.048 Val acc: 98.72%\n",
573 | "Test loss: 0.045 Test acc: 98.73%\n",
574 | "The number of parameters is: 795010\n",
575 | "Epoch 1 Train loss: 0.289 Val loss: 0.122 Val acc: 96.62%\n",
576 | "Epoch 2 Train loss: 0.142 Val loss: 0.088 Val acc: 97.23%\n",
577 | "Epoch 3 Train loss: 0.110 Val loss: 0.080 Val acc: 97.37%\n",
578 | "Epoch 4 Train loss: 0.090 Val loss: 0.075 Val acc: 97.57%\n",
579 | "Epoch 5 Train loss: 0.078 Val loss: 0.066 Val acc: 97.95%\n",
580 | "Epoch 6 Train loss: 0.069 Val loss: 0.065 Val acc: 97.87%\n",
581 | "Epoch 7 Train loss: 0.062 Val loss: 0.062 Val acc: 98.07%\n",
582 | "Epoch 8 Train loss: 0.055 Val loss: 0.060 Val acc: 98.13%\n",
583 | "Epoch 9 Train loss: 0.055 Val loss: 0.058 Val acc: 98.25%\n",
584 | "Epoch 10 Train loss: 0.048 Val loss: 0.058 Val acc: 98.45%\n",
585 | "Epoch 11 Train loss: 0.045 Val loss: 0.058 Val acc: 98.22%\n",
586 | "Epoch 11: reducing learning rate of group 0 to 1.0000e-03.\n",
587 | "Epoch 12 Train loss: 0.041 Val loss: 0.059 Val acc: 98.23%\n",
588 | "Epoch 13 Train loss: 0.032 Val loss: 0.053 Val acc: 98.50%\n",
589 | "Epoch 14 Train loss: 0.031 Val loss: 0.052 Val acc: 98.47%\n",
590 | "Epoch 15 Train loss: 0.027 Val loss: 0.051 Val acc: 98.55%\n",
591 | "Epoch 16 Train loss: 0.025 Val loss: 0.051 Val acc: 98.52%\n",
592 | "Epoch 17 Train loss: 0.024 Val loss: 0.050 Val acc: 98.45%\n",
593 | "Epoch 18 Train loss: 0.024 Val loss: 0.049 Val acc: 98.53%\n",
594 | "Epoch 19 Train loss: 0.023 Val loss: 0.049 Val acc: 98.58%\n",
595 | "Epoch 20 Train loss: 0.023 Val loss: 0.049 Val acc: 98.50%\n",
596 | "Epoch 21 Train loss: 0.022 Val loss: 0.050 Val acc: 98.52%\n",
597 | "Epoch 21: reducing learning rate of group 0 to 1.0000e-04.\n",
598 | "Epoch 22 Train loss: 0.023 Val loss: 0.049 Val acc: 98.60%\n",
599 | "Epoch 23 Train loss: 0.021 Val loss: 0.049 Val acc: 98.60%\n",
600 | "Epoch 24 Train loss: 0.022 Val loss: 0.049 Val acc: 98.60%\n",
601 | "Epoch 24: reducing learning rate of group 0 to 1.0000e-05.\n",
602 | "Epoch 25 Train loss: 0.021 Val loss: 0.049 Val acc: 98.60%\n",
603 | "Epoch 26 Train loss: 0.021 Val loss: 0.049 Val acc: 98.62%\n",
604 | "Epoch 27 Train loss: 0.021 Val loss: 0.049 Val acc: 98.62%\n",
605 | "Epoch 27: reducing learning rate of group 0 to 1.0000e-06.\n",
606 | "Epoch 28 Train loss: 0.021 Val loss: 0.049 Val acc: 98.62%\n",
607 | "Epoch 29 Train loss: 0.021 Val loss: 0.049 Val acc: 98.62%\n",
608 | "Epoch 30 Train loss: 0.021 Val loss: 0.049 Val acc: 98.62%\n",
609 | "Epoch 30: reducing learning rate of group 0 to 1.0000e-07.\n",
610 | "Epoch 31 Train loss: 0.022 Val loss: 0.049 Val acc: 98.62%\n",
611 | "Epoch 32 Train loss: 0.021 Val loss: 0.049 Val acc: 98.62%\n"
612 | ]
613 | },
614 | {
615 | "name": "stdout",
616 | "output_type": "stream",
617 | "text": [
618 | "Epoch 33 Train loss: 0.022 Val loss: 0.049 Val acc: 98.62%\n",
619 | "Epoch 33: reducing learning rate of group 0 to 1.0000e-08.\n",
620 | "Epoch 34 Train loss: 0.022 Val loss: 0.049 Val acc: 98.62%\n",
621 | "Epoch 35 Train loss: 0.022 Val loss: 0.049 Val acc: 98.62%\n",
622 | "Epoch 36 Train loss: 0.021 Val loss: 0.049 Val acc: 98.62%\n",
623 | "Epoch 37 Train loss: 0.021 Val loss: 0.049 Val acc: 98.62%\n",
624 | "Epoch 38 Train loss: 0.021 Val loss: 0.049 Val acc: 98.62%\n",
625 | "Epoch 39 Train loss: 0.022 Val loss: 0.049 Val acc: 98.62%\n",
626 | "Epoch 40 Train loss: 0.022 Val loss: 0.049 Val acc: 98.62%\n",
627 | "Epoch 41 Train loss: 0.022 Val loss: 0.049 Val acc: 98.62%\n",
628 | "Epoch 42 Train loss: 0.021 Val loss: 0.049 Val acc: 98.62%\n",
629 | "Epoch 43 Train loss: 0.023 Val loss: 0.049 Val acc: 98.62%\n",
630 | "Epoch 44 Train loss: 0.021 Val loss: 0.049 Val acc: 98.62%\n",
631 | "Epoch 45 Train loss: 0.021 Val loss: 0.049 Val acc: 98.62%\n",
632 | "Epoch 46 Train loss: 0.022 Val loss: 0.049 Val acc: 98.62%\n",
633 | "Epoch 47 Train loss: 0.021 Val loss: 0.049 Val acc: 98.62%\n",
634 | "Epoch 48 Train loss: 0.022 Val loss: 0.049 Val acc: 98.62%\n",
635 | "Epoch 49 Train loss: 0.021 Val loss: 0.049 Val acc: 98.62%\n",
636 | "Epoch 50 Train loss: 0.021 Val loss: 0.049 Val acc: 98.62%\n",
637 | "Test loss: 0.048 Test acc: 98.54%\n"
638 | ]
639 | }
640 | ],
641 | "source": [
642 | "device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n",
643 | "kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n",
644 | "train_loader, valid_loader, test_loader = load_data(batch_size=50, kwargs=kwargs)\n",
645 | "\n",
646 | "def train_nn(compress, hashed):\n",
647 | " input_dim = 784\n",
648 | " output_dim = 10\n",
649 | " \n",
650 | " if hashed:\n",
651 | " model = HashedNet(input_dim, output_dim, 1, 1000,\n",
652 | " compress, dropout=0.25).to(device)\n",
653 | " else:\n",
654 | " eq_compress = get_equivalent_compression(input_dim, output_dim,\n",
655 | " 1000, 1, compress)\n",
656 | " model = Net(input_dim, output_dim, 1, 1000,\n",
657 | " eq_compress, 0.25).to(device)\n",
658 | "\n",
659 | " optimizer = optim.SGD(model.parameters(), lr=0.01,\n",
660 | " momentum=0.9,\n",
661 | " weight_decay=0.0)\n",
662 | "\n",
663 | " scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,\n",
664 | " factor=0.1,\n",
665 | " patience=2,\n",
666 | " verbose=True)\n",
667 | "\n",
668 | " print('The number of parameters is: {}'.format(\n",
669 | " sum(p.numel() for p in model.parameters() if p.requires_grad)))\n",
670 | "\n",
671 | " for epoch in range(1, 50 + 1):\n",
672 | " tr_loss = train(model, device, train_loader, optimizer, epoch, log_interval=50)\n",
673 | " val_loss, val_acc = evaluate(model, device, valid_loader)\n",
674 | " scheduler.step(val_loss)\n",
675 | " print('Epoch {} Train loss: {:.3f} Val loss: {:.3f} Val acc: {:.2f}%'.format(\n",
676 | " epoch, tr_loss, val_loss, val_acc))\n",
677 | "\n",
678 | " test_loss, test_acc = evaluate(model, device, test_loader)\n",
679 | " print('Test loss: {:.3f} Test acc: {:.2f}%'.format(test_loss, test_acc))\n",
680 | " \n",
681 | " return test_loss, test_acc\n",
682 | "\n",
683 | "compression_rates = [1/64, 1/32, 1/16, 1/8, 1]\n",
684 | "nn_records = []\n",
685 | "hashednn_records = []\n",
686 | "for compression in compression_rates:\n",
687 | " print(\"Compression rate: {}\".format(compression))\n",
688 | " nn_records.append(train_nn(compression, hashed=False))\n",
689 | " hashednn_records.append(train_nn(compression, hashed=True))"
690 | ]
691 | },
692 | {
693 | "cell_type": "code",
694 | "execution_count": 43,
695 | "metadata": {},
696 | "outputs": [
697 | {
698 | "data": {
699 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXwAAAEWCAYAAABliCz2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3Xd4VFX6wPHvO5NGCr0TegtFaRFUEAJYUFTsuKK/RXdta9ldC1i2sMVVV111ldW1gausqKirWFBUgkGaCdJD7xI6gRRKSM7vj3OTTEJ6MrmTzPt5nvvMzK3vHMI795577jlijEEppVT953E7AKWUUrVDE75SSgUJTfhKKRUkNOErpVSQ0ISvlFJBQhO+UkoFCU34qlQiMlFEFpSx/AsR+XkpyzqJiBGRkFKWTxGRt2sqVp/9JojIrprer7JEJFNEurgdh6oaTfhBRkTuFpFkETkhItOrsy9jzMXGmDdrKLR6TUTCRGSWiGxzfggTii0XEXlSRA4605MiIj7L+4tIiohkO6/9K7ptTTLGRBtjtjjHnS4if/XHcZR/aMIPPruBvwJvuB1IfVHaVUwJFgA3AntKWHYbcAXQDzgTuAy43dl/GPAx8DbQBHgT+NiZX+a2SvnShB9kjDEfGmP+Bxys6DYi8rSIHBaRrSJysc/8RBH5pfPe66x3QES2AGOL7aOziMwXkQwRmQs0L7b8bBFZKCLpIrLC9wzYOc5fROR7Z/uvRKTI9mXE/pCIbHa2WysiVzrzw0TkkIic4bNuS+cMuoXz+VIRWe7EtFBEzvRZd5uITBaRlUBWeUnfGHPSGPOcMWYBkFvCKj8HnjHG7DLG/AQ8A0x0liUAIcBzxpgTxph/AgKMqsC2xcvjtGo654qjm/N+uohMFZHPnDJbIiJdi68rIrcBE4BJTjXPbGf5ZBH5ydl2vYiMLqtcVO3ShK/KMwRYj03QfwdeL6W64FbgUmAAEA9cU2z5f4EUZz9/wSYpAESkHfAZ9sqjKfAA8EF+4nXcANwMtATCnHUqYjNwHtAI+BPwtoi0McacBGZiz7jz/Qz4xhizX0QGYK+CbgeaAf8GPhGR8GLrjwUaG2NOVTCe0vQBVvh8XuHMy1+20hTtB2VlseWlbVsV12PLqgmwCXis+ArGmFeAGcDfnWqey0SkJ3A3cJYxJga4CNhWjThUDdOEr8qz3RjzqjEmF1uV0AZoVcJ612HPQHcaYw4Bj+cvEJEOwFnA750z1O+A2T7b3gh8boz53BiTZ4yZCyQDl/isM80Ys8EYcwx4D+hPBRhj3jfG7Hb2+y6wERjsLH4T+JnPD9hNwFvO+9uAfxtjlhhjcp17FSeAs312/0/n+x6rSCzliAaO+Hw+AkQ7sRVflr88pgLbVsVHxpilzo/YDCpY1tgrl3Cgt4iEGmO2GWM2VzEG5Qea8FV5CuqbjTHZztvoEtZrC+z0+by92LLDxpisUpZ3BK51qk7SRSQdGIb9cTktDiC7lBhOIyL/51Mtkw70xalOMsYscfaVICJxQDfgE5+Y7i8WU3vnu+Tz/b7VlQk09PncEMh0zuqLL8tfnlGBbauiSmVtjNkE/AaYAuwTkZki0rbsrVRt0oSvakoaNiHm61BsWRMRiSpl+U7gLWNMY58pyhjzRHUCEpGOwKvYaoZmxpjGwGps/Xe+N7FXGDcBs4wxx31ieqxYTJHGmHd8tq3JrmbXYG+65uvnzMtfdmaxM/Yziy0vbdvisoDI/A8i0roaMZ/2/Y0x/zXGDMP+YBrgyWrsX9UwTfhBRkRCRCQC8AJeEYmoRCuTsrwH3CsisSLSBHgof4ExZju2iuZPzs3SYdiWJPneBi4TkYucm78RYtvTx1Yzpihs0tkPICI3Y8/wfb0NXIlN+v/xmf8qcIeIDBErSkTGikgMpXBueE4vY3m4U/YAYc73zE/i/wHuE5F2zlnx/UD+vhKx1SX3Ovu425n/bQW2LW4F0EdsM88I7Nl4Ve0FCtrki0hPERnl3Oc4DhwD8qqxf1XDNOEHn99h/yM+hE1yx5x51fUq8CU2oSwDPiy2/AbsDeBDwB/xSa7GmJ3AOOARbHLeCTxINf8+jTFrsS1WFmGT0xnA98XW2enEa4Akn/nJ2BvRLwKHsTcvJ5ZzyPbF91/Memx5t8OW1THsmTDYm8KzgVXYq5DPnHk4N5ivAP4PSAduAa5w5pe5bXHGmA3An4GvsfczSn2wrgJex9bXp4vI/7D1908AB7DVQi2Bh6uxf1XDRAdAUcFORN4AdhtjqvzDJ7ZN/ArgTGNMTo0Fp1QN0oSvgpqIdAKWAwOMMVvdjUYp/9IqHRW0ROQv2CqQpzTZq2CgZ/hKKRUk9AxfKaWCRE00x6sxzZs3N506darStllZWURFRZW/ogK0vCpLy6tytLwqpzrllZKScsAY06L8NQMs4Xfq1Ink5OQqbZuYmEhCQkLNBlSPaXlVjpZX5Wh5VU51yktEtpe/lqVVOkopFST8mvBFpLHYQR/WiUiqiJzjz+MppZQqnb+rdJ4H5hhjrnEeTIksbwOllFL+4beELyKNgOE4j6M7j4GfLGsbpVTgyMnJYdeuXRw/frz8lYtp1KgRqampfoiqfqpIeUVERBAbG0toaGiVj+PPM/zO2H5RpolIP+zgF78u1kUuzsg5twG0atWKxMTESh8oMmsHA1c9wdKsh8iO6lD+BorMzMwqlXWwCsbyio6OplWrVrRr147Kdq2fm5uL1+v1U2T1T3nlZYzhyJEjrFixgszMzCofx28PXolIPLAYGGqMWSIizwNHjTG/L22b+Ph4U+lWOiezYOoQzJFdSKNYuGsJhGlzsPJoK4rKCcbySk1NJS4urtLJHiAjI4OYmFI7FlXFVKS8jDGsW7eOXr16FZkvIinGmPiKHMefN213AbucQSYAZgEDa/woH98FWfsRDGTth4/vLn8bpVSFVH3QLFXTauLfwm8J3xizB9jpjHMJMBpYW6MHWfY2bPgSTjl1jKeOw4Y5dr5SSqki/N0O/x5ghoisxI6L+bca3fs3UyAnu+i8nGw7XykVFKKji47AOH36dO6+u/JX+omJiVx66aVVjmPbtm307Vt8fB07X0R44YUXCubdfffdTJ8+HYCJEyfSs2dPTpw4AcCBAweoao8D5fFrs0xjzHKgQnVLVTJ6CnzxYNGkH9IAzv+T3w6plKqY+L/O5UDm6Q3zmkeHkfy7C1yIyD0tW7bk+eef5/bbbycsLOy05V6vlzfeeIM777zTr3HU7SdtB94IPS6CkIjCeeHR0O9692JSSgGUmOzLmu8Ps2fPZsiQIQwYMIDzzz+fvXv3AjB//nz69+9P//79GTBgABkZdjz4zMxMrrnmGuLi4pgwYQL5jVpSUlIYMWIEgwYN4qKLLiItLa1gfr9+/ejXrx9Tp04tNY4WLVowevRo3nzzzRKX/+pXv+LZZ5/l1KlTNfn1TxNQfelUybipTiudnYh47I3bpH/AiAfdjkypeqPTQ5+5ur9tT4wtddmxY8fo379/wedDhw5x+eWXAzBs2DAWL16MiPDaa6/x97//nWeeeYann36aqVOnMnToUDIzM4mIsCeNP/74I2vWrKFt27YMHTqU77//niFDhnDPPffw8ccf06JFC959910effRR3njjDW6++WZefPFFhg8fzoMPlp1zJk+ezMUXX8wtt9xy2rLY2FiGDRvGW2+9xWWXXVbC1jWj7if8sCiY8D7Z068jatR98OlvIfFv0OFs6Hye29EppfysQYMGLF++vODz9OnTCzph3LVrF+PHjyctLY2TJ0/SuXNnAIYOHcp9993HhAkTuOqqq4iNjQVg8ODBBe/79+/Ptm3baNy4MatXr+aCC2w1VG5uLm3atCE9PZ309HSGDx8OwE033cQXX3xRapxdunRhyJAh/Pe//y1x+cMPP8y4ceMYO7b0H7fqqvsJH6BlL34Y/AIJ8QlwZBckPQ1z/wC3fgvarEypaivrDLskGRkZnPHYdzW2v6q65557uO+++7j88stJTExkypQpADz00EOMHTuWzz//nKFDh/Lll18CEB4eXrCt1+vl1KlTGGPo06cPixYtKrLv9PT0SsfzyCOPcM011zBixIjTlnXv3p3+/fvz3nvvVXq/FVW36/BLkvAwnHc/TJilyV6pIHfkyBHatWsHUKT+fPPmzZxxxhlMnjyZs846i3Xr1pW6j549e7J///6ChJ+Tk8OaNWto3LgxjRs3ZsGCBQDMmDGj3Hji4uLo3bs3s2fPLnH5o48+ytNPP13h71dZ9S/he0Ng9B8gqpnbkSgV1JpHn94apaz5/jBlyhSuvfZaBg0aRPPmzQvmP/fcc/Tt25czzzyT0NBQLr744lL3ERYWxqxZs5g8eTL9+vWjf//+LFy4EIBp06Zx11130b9/fyraa8Gjjz7Krl27SlzWp08fBg6s+edT8wXUmLZV6lrBUeKj77k58M2fofsF0Hl49QOsR4Kxq4DqCMbySk1NPe0x/orSrhUqp6LlVdK/SaB0reC+Zf+Bhf+ED34JmfvcjkYppVxVvxP+wJ9Dx2GQudcm/bxctyNSSinX1O+E7w2Bq1+DyOawdT5857+bIUopFejqd8IHaNgGrnoFEEh8HLbMdzsipZRyRf1P+ADdRsPwBwBjq3Yy9rodkVJK1br68eBVRYx4CLYvgrBI8FZ9iDCllKqrguMMH2x9/s/+Cz97FyKbuh2NUsFjXypMPdu++oHX6y3oCK1///488cQTVd7XueeeW+VtO3XqxNVXX13wedasWUycOBGw3T14PB5WrlxZsLxv375s27atyseriuA5wweIaFT4PjcHDm+H5t3ci0ep+u5kFsy41nZ5MuNavwxBWrwvnerIf6CqqlJSUli7di29e/c+bVlsbCyPPfYY7777brWOUR3Bc4bvK/sQTLsEpl+i7fOV8idnCFJcGIJ0zpw5xMXFMXDgQO69996CwU2mTJlSpPsC3zPt/MFUrr/+ej77rLBHz4kTJzJr1qxyj3n//ffz2GOPlbjs0ksvZc2aNaxfv76qX6nagusMP19EIwgJt+3zP7wVbvwQPKWPGK+UAqY0Kn3Zpc9B/M32ffI0Yj79zenrnDoOaz6ErqPtWBYA/x4OaStK3ufAn8Pl/yw3rOLdI+f3Onnrrbfy7bff0q1bN8aPH1/ufnyNHz+e9957j7Fjx3Ly5Em++eYbXnrppXK3u+666/jXv/7Fpk2bTlvm8XiYNGkSf/vb30rtF9/fgvMM3+O17fOjWsCWREh6xu2IlAoeNTwEaX6VTv40fvx41q1bR+fOnenevTsiwo033lipfV588cXMmzePEydO8MUXXzB8+HAaNGhQ7nZer5cHH3yQxx9/vMTlN9xwA4sXL2br1q2ViqemBOcZPkBMa9s+/62rbPv8DmdrfztKlWXKkYqtF38zGT2vIWbjx6cPQRoaWXQI0ttL70LZX0JCQsjLyyv4fPz48dPWiYiIICEhgS+//JJ3332X66+v+Ch6N910E48//niJ49uGhIRw//338+STT1Yt+GoKzjP8fF1HwfAHweTBrF9o+3ylalLxIUhDIqDHGBgwwe+HjouLY9u2bWzevBmAd955p2BZp06dWLZsGQDLli0r9Wx7/PjxTJs2jaSkJMaMGVPhY4eGhvLb3/6WZ599tsTlEydO5Ouvv2b//v0V3mdNCe6ED5DwEHQ6D7L2QfLrbkejVP0ybqqtOkXs67gXa/wQ+XX4+dNDDz1EREQEr7zyCmPHjmXgwIG0bNmyYP2rr76aQ4cO0adPH1588UV69OhR4n4vvPBC5s+fz/nnn18w8Pju3bu55JJLyo3pF7/4Ranj04aFhXHvvfeyb58LDUaMMQEzDRo0yFTVvHnzqrytOZpmzMKpxuTmVn0fdUy1yisIBWN5rV27tsrbHj16tPDD3rXGvDjEvrpk3rx5ZuzYsa4dvzxFyqsMJf2bAMmmgjk2eOvwfcW0hnN+5XYUStVPLXvBXYvdjkKhVTqnS98JMydofb5S9UhCQgKffvqp22G4ThN+cV8+DOs+hQ+1/3ylTACNiBfsauLfQhN+cZc8bW8ubf0OvnvK7WiUck1ERAQHDx7UpB8AjDEcPHiQiIiIau1H6/CLi2ltH8r6zxWQ+AR0OAe6jHA7KqVqXWxsLLt27apS88Hjx49XOzkFk4qUV0REBLGxsdU6jib8knRJgBGTYf4Ttv/8OxZATCu3o1KqVoWGhtK5c+cqbZuYmMiAAQNqOKL6q7bKS6t0SjNiUmH7/A9+ofX5Sqk6TxN+aTxeuPp1iGkL3S8AxO2IlFKqWrRKpywxreCe5Brvv1sppdygZ/jl8U326Tu1fb5Sqs7ShF9R2xfBv8/T+nylVJ2lCb+imnYGTwhsS4L5f3c7GqWUqjS/JnwR2SYiq0RkuYgk+/NYfpffPh+B+U/agVOUUqoOqY0z/JHGmP7GmPhaOJZ/dUmw7fMx8MGtWp+vlKpTtEqnsrR9vlKqjhJ/9pMhIluBw4AB/m2MeaWEdW4DbgNo1arVoJkzZ1bpWJmZmQUjzvtb2InDxCf/htCcoyzv/xhHGveulePWpNosr/pAy6tytLwqpzrlNXLkyJSK1qD4O+G3M8b8JCItgbnAPcaYUgexjI+PN8nJVavqT0xMJCEhoWqBVsW2BSBe6HhO7R2zBtV6edVxWl6Vo+VVOdUpLxGpcML364NXxpifnNd9IvIRMBio/VGL/aHTMLcjUEqpSvFbHb6IRIlITP574EJgtb+O56r1c+Ddm7Q+XykV0Px507YVsEBEVgBLgc+MMXP8eDx3nMyC2fdC6ie2uaZSSgUovyV8Y8wWY0w/Z+pjjHnMX8dyVVgUXPUqtn3+32HzPLcjUkqpEmmzzJrQZQQkPAQY+PBWyNjjdkRKKXUaTfg1ZfiD0Hk4ZO23g6Zofb5SKsBowq8pHi9c9RpEtXT629H6fKVUYNGEX5NiWtn+dhp1gK6j3I5GKaWK0AFQalqXEXbQlJBwtyNRSqki9AzfH3yT/faFkHvKvViUUsqhCd+fkp6BaRdrfb5SKiBowven2LNAPPDdU7DpG7ejUUoFOU34/tR5OIzIb59/GxxNczsipVQQ04Tvb8MfgM4jIPuAbZ+v9flKKZdowvc3j9c21YxuBdsXwPwn3I5IKRWkNOHXhuiWNumLB1bMhBOZbkeklApC2g6/tnQebjtZ6zISwnUkIKVU7dOEX5vOuKboZ2NAxJ1YlFJBR6t03JCbA1/9HhIfdzsSpVQQ0TN8N6StgEUv2jP8DmdrvztKqVqhZ/huiI2HhIcBAx/cqu3zlVK1QhO+W867H7okaPt8pVSt0YTvFo/XttrR9vlKqVqiCd9Nvu3zv3tax8NVSvmV3rR1W+fhtj5/1w/Q+ky3o1FK1WOa8APBeQ/YV49ecCml/EczTCDweAqTfW4ObPra3XiUUvWSJvxAknsK3rwc3r5Gk75SqsZpwg8k3hDnIaz8/vN3ux2RUqoe0YQfaM67z3awln1Q2+crpWqUJvxAU9A+vzVs/17721FK1RhN+IEougVc87ptn5/0jNbnK6VqhCb8QNVpGIx8BDCwapbb0Sil6gFthx/Iht0PTTpDn6vcjkQpVQ9owg9kHk/RQVN0wBSlVDVolU5dkb4T3hij9flKqSrThF9XrP4Adi7W9vlKqSqrVMIXkSgR8VZyG6+I/Cgin1YuNFXEufcUts+f9Qttn6+UqrQyE76IeETkBhH5TET2AeuANBFZKyJPiUi3Chzj10BqTQQb1Hzb5+9YCIl/czsipVQdU94Z/jygK/Aw0NoY094Y0xIYBiwGnhSRG0vbWERigbHAazUUb3DT9vlKqWooL+Gfb4z5izFmpTEmL3+mMeaQMeYDY8zVwLtlbP8cMAnIK2MdVRkF7fOx9fmZ+9yNRylVZ4gxpuIri0QANwINgP8aYw6Wse6lwCXGmF+JSALwgDHm0hLWuw24DaBVq1aDZs6cWblv4MjMzCQ6OrpK29Y5Jo8zVj1GeuM+7Gx/hT3jr6SgKq8aoOVVOVpelVOd8ho5cmSKMSa+IutWNuH/G/gee8Z+uzHmvDLWfRy4CTgFRAANgQ+NMaVWAcXHx5vk5OQKx+MrMTGRhISEKm1bJ+XlVWvAlKArr2rS8qocLa/KqU55iUiFE355N23fEZGuPrOaAu8DHwBNytrWGPOwMSbWGNMJuB74tqxkryrJN9mn74Tti9yLRSlVJ5T3pO2jwF9FJA34C/A08BH2jH2Kf0NTFXJgE7x+vq3WuT0JGrVzOyKlVIAq8wzfGLPFGHMDNsm/CwwBxhpjEowxFe7RyxiTWFL9vaoBTbtA2wFO//naPl8pVbryqnSaiMhdQG/gWuAw8KWIXFYbwakK8Hjgylcgpg3sWATzHnM7IqVUgCrvrt//gHTAAG8ZY94CLgMGiMhsfwenKii6BVzttM9f8A/YqO3zlVKnKy/hNwNmYW/UtgMwxhwzxvwZpymlChCdhsLIR+37j26DIz+5G49SKuCUl/D/CMzBJv2HfBcYY9L8FZSqomH32UHQj6Xb6h2llPJRZisdY8wH2CaYqi7weGx/Owc3Q4chbkejlAow5d20fVVE+payLEpEbhGRCf4JTVVJVPOiyT4v171YlFIBpbwqnanAH0QkVUTeF5F/icgbIpIELARisNU9KhCtnwNTB2t9vlIKKL9KZzlwnYhEA/FAG+AYkGqMWV8L8amqMgZ+eA0OboJZt8DET8Eb6nZUSikXVagzFmNMpvPw1DvGmP9psq8DRODKl237/J2L4du/uh2RUsplOsRhfRbVHK55w7bP//452PCV2xEppVykCb++63gujPqdff/R7XBkl7vxKKVcU27Cd8akfbo2glF+MvS30HU0HDsEH94Oe9dy1tJ7YJ+OPKlUMCk34RtjcrFDGqq6yuOBq16BdvEw/EH473VEZu+EGdfCySy3o1NK1ZKKVun8KCKfiMhNInJV/uTXyFTNimoOv/walk2HrP0IBrL2w8d3ux2ZUqqWlNcffr4I4CAwymeeAT6s8YiU//w4AzZ8CaeO28+njsOGObDsbRioY9MoVd9VKOEbY272dyCqFnwzBXKyi87LyYa5v9eEr1QQqFCVjojEishHIrLPmT4QkVh/B6dq2OgpEBp5+vyT2bDyPfuwllKq3qpoHf404BOgrTPNduapumTgjdDjIgiJsJ+94fbBrNzj8OGt9oncY4fdjVEp5TcVTfgtjDHTjDGnnGk60MKPcSl/GTcVolpgEIhuCXcnw+UvQGgUrPkQPp/kdoRKKT+paMI/KCI3Om3yvSJyI/YmrqprwqJgwvtkR7aHCe9DeDQM/D+4cwF0vxDOn+J2hEopP6lowr8FuA7YA6QB1wB6I7euatmLHwa/AC17Fc5r2sX+ADRqZz/n5cLsX8OeVe7EqJSqceW20hERL3CVMebyWohHBYqU6Xb6cQaM/j2cczd4vG5HpZSqhoo+afuzWohFBZJ+10P8LZCXA3P/AG9eDuk73I5KKVUNFa3S+V5EXhSR80RkYP7k18iUu8Ki4NJn4Yb3IKoFbF8ALw3V5ptK1WEVfdK2v/P6Z595hqJP3qr6qMdF8KvF8Mm9sP4z23wzJAJ6aw2fUnVNRerwPcBLxpj3aiEeFYiimsP1M+DHt2DtxxA31u2IlFJVUJE6/DxAG2cHOxHbfHPCrMKbtxl74Os/Qc5xd2NTSlVIRevwvxaRB0SkvYg0zZ/8GpkKTCKF72f/Ghb8A15J0OabStUBFa3DH++83uUzzwBdajacyon/61wOZJ4snDHnMwCaR4eR/LsLXIoqiIyYZAdJ358Kr4zU5ptKBbiKDmLeuYTJ1WQPFE32FZivali7QXD7dxD/C22+qVQdUGbCF5FJPu+vLbbsb/4KStUhYVFw6T/ghvchqqVtvvnv4XAs3e3IlFLFlHeGf73P+4eLLRtTw7HUqOM5uW6HEFx6XAi/WgQ9x9oHtho0djsipVQx5SV8KeV9SZ8Dyoin5jH9+62a+GtTfvPNkY8Wzts8z05KKdeVl/BNKe9L+hxQ9h49wZTZa0l4KpH/LNrGiVOa+GuFSOFN2+xD8NHt8NYV8MVDkHPM3diUCnLlJfx+InJURDKAM533+Z/PKGtDEYkQkaUiskJE1ojIn2osakfz6LBS579840DiWsew5+hx/vDxGhKeSuStxds18dem8IZw1q0gXljykm3Jo803lXJNmc0yjTHVaV93AhhljMkUkVBggYh8YYxZXI19FuHb9DIxMZGEhIQiyy/s3Zo5a/bw/NcbWb83g9//bzUvzdvEXaO6ce2g9oSFVPQxBFUl3hAY8SB0GwUf3lbYfHPU7+Dce7T5plK1zG8Zz1iZzsdQZ6rVaiCPR7jkjDZ88evzePGGAXRvGc3uI8d59KPVjHw6kXeW7iAnN682QwpOxZtvfv1HW9WjlKpVYvzY86HTl34K0A2YaoyZXMI6twG3AbRq1WrQzJkzq3SszMxMoqOjy1wnzxiW7snl400nScuy37t5A+GyrqEMbRtCiCeg70PXqIqUlz80PZhMz/Uvktrrt6Q36Vfrx68qt8qrrtLyqpzqlNfIkSNTjDHxFVnXrwm/4CAijYGPgHuMMatLWy8+Pt4kJydX6RglVemUJjfP8OnK3Tz/zUa27M8CoEPTSO4e1Y2rBrQjxFv/q3oqU141LucYhDYo/Lzyfeg2GiIDt7cOV8urDtLyqpzqlJeIVDjh10pmM8akA/MIkLb7Xo8wrn875v52BM+O70fn5lHsOJTNpFkrGf2P+cxK2cUprerxH99kvzXJdrn80lBtvqmUn/kt4YtIC+fMHhFpAFwArPPX8arC6xGuHBDL3N8O55lr+9GpWSTbD2bzwPsruODZ7/hw2S5y8wK69Wnd16gdxJ4FGbu1+aZSfubPM/w2wDwRWQn8AMw1xnzqx+NVWYjXw9WDYvn6vhE8dc2ZdGgaydYDWdz33gou+Md8/vfjT5r4/aVpF7j5Cxj5O/CEOM03EyBtpduRKVXv+LOVzkpjzABjzJnGmL7GmD+Xv5W7Qrwero1vzzf3j+Dv15xJ+6YN2HIgi9+8u5wLn53Px8s18ftFfvPNX3xJyrIeAAAePElEQVQFzbrB/nXw6ihIne12ZErVK/X/7mQVhHo9XBffnm/vT+DJq88gtkkDNu/P4tczlzPmue+YvWI3eZr4a55v880GTaD92W5HpFS9ogm/DKFeD+PP6sC39yfw+FVn0K5xAzbuy+Sed35kzPPf8dnKNE38NS2/9827lkB0CzsvNwfWfa6DpytVTZrwKyAsxMPPBndg3gMJPHZlX9o2imDD3kzu+u8yLvlnEl+s0sRf43ybaH73NMz8Gbw/0fbPo5SqEk34lRAW4mHCkI7MezCBv1zRlzaNIli3J4M7Zyxj7AsLmLN6D7XxXEPQadweQqNg7f/gpXO1+aZSVaQJvwrCQ7zcdHZHEh9M4M/j+tCqYTipaUe54+0Uxv5zAV+t0cRfowbcCHcugNjBkJGmzTeVqiJN+NUQHuLl/87pxPwHRzLlst60jAlnbdpRbnsrhcteXMDXa/dq4q8ppTXfzNzndmRK1Rma8GtARKiXiUM7892kkfzh0t60iAln9U9H+eV/khk39Xu+XaeJv0YUNN+cC826Q0xriGzudlRK1Rlldo+sKici1Mstwzrzs8EdmLFkOy/P38zKXUe4ZXoy/do35jfndyehRwtEgqeTNr9oN9A238zJBo9zzpK+E0weNOnobmxKBTA9w/eDBmFefnleF5ImjeLRS3rRLCqMFTvTuXnaD1z10kLmb9ivZ/zVFRZph1QEyMu1/e2/PAxWzNTmm0qVQhO+HzUI83Lr8C4kTR7JwxfH0TQqjB93pPPzN5ZyzcuLSNqoib9G5GTbZpwnjtp+9rX5plIl0oRfCyLDQrh9RFeSJo1k8pg4mkSGkrL9MDe9vpTr/r2I7zcd0MRfHeExMP5tGDcVwqJ9mm9+63ZkSgUUTfi1KCo8hDsTupI0eRQPXtSTxpGh/LDtMBNeW8L4VxazaPNBt0Osu0Rs8807FkD7IU7zzSvhq9+5HZlSAUMTvguiw0O4a2Q3kiaN5IELe9CoQShLtx7iZ68u5vpXFrFkiyb+KmvaGSZ+bsfN9YTYPnmUUoAmfFfFRIRy96juJE0eyX0X9KBhRAiLtxxi/CuLueHVxfywTeuhq8QbAsMftC15hv6mcP7+DfYGr1JBShN+AGgYEcq9o7uTNHkUvzm/OzERISzcfJBrX17Eja8tIWW7Jv4qadUHPF77/mgavHEhvHkZpO9wNy6lXKIJP4A0ahDKb87vwYLJo7h3dHdiwkNYsOkAV7+0iJteX8KyHYfdDrHuOrILvGGw/Xs7nKI231RBSBN+AGrUIJT7LuhB0uSR3DOqG9HhISRtPMBV/1rIz99YyvKd6W6HWPe0PwvuXAS9LtPmmypoacIPYI0jw7j/wp4kTRrJXSO7EhXmZf6G/Vwx9XtunraUFZr4KyeqGVz31unNN7ctcDsypWqFJvw6oElUGA9eFEfS5FHcmdCVyDAv89bvZ9zU7/nF9B9YteuI2yHWHUWab54NmXttVY9SQUATfh3SNCqMyWPiSJo0kttHdKFBqJdv1u3jshcX8Ms3k1n9kyb+CmvaGW7+HH4+G9oPLpx/dLd7MSnlZ5rw66Bm0eE8fHEvkiaP5LbhXYgI9fB16l4ufWEBt/0nmbW7j7odYt3g8UKnYYWfN3wFz/eDBc9q801VL2nCr8OaR4fzyCW9SJo0il8O60x4iIev1u7lkn8mccdbKaSmaeKvlJ+SIfckfD0Fpl8Kh7fb+ftSOWvpPbAv1dXwlKouTfj1QIuYcH53aW+SJo/klqE28c9Zs4eLn0/iVzNSWL8nw+0Q64aRj8CEDyC6FexYaJtvprwJM64hMnsnzLgWTma5HaVSVab94dcjLWMi+MNlvbljRBf+lbiZ/y7dweer9vDF6j1cckYbFm46wOHsnMIN5nwGQPPoMJJ/d4FLUQeY7ufb5puf/hpSZ8Pse0E8CAay9sPHd8O109yOUqkq0TP8eqhlwwimXN6H7x4cyc/P6Uiox8NnK9OKJnsfBzJP1nKEAS6/+Wb/CfazybOvp47DhjmwaKp7sSlVDZrw67HWjSL407i+zJ+UwE1n60hQlSICG786fX5ONnz5CLw0DL75M+xcqjd4VZ2hCT8ItGnUgL9c0bfMdcb+M4k/fryaT1bsZnf6sVqKLMCNngKhkUXnecNtu/29qyDpGXj9Ani6O3x0B6z5CE6dcCVUpSpC6/AVAGt2H2XN7qO8uci2TGnTKIJBHZsQ37EJ8Z2aEtc6hhBvkJ0fDLwRNn8N67+w1TkhEdDzErjyZft07sav7LL07bDiHZvwJ20t3D5zP0S3cC9+pYrRhK8AeOfWs0nZfojk7YdZtv0waUeO8+nKND5dmQZAZJiX/u0bE9+xCYM6NWVAh8Y0jAh1OepaMG4qTB2CObILiWoB416EkHDoNtpOY56AAxts3f6xw3asXYDcHHhhIES3hB5joMdF0OEc8AZBmamApQk/iDSPDivxBm3z6DDO6dqMc7o2AyAvz7BxXyYp2w+TvP0QKdsPs/1gNgs3H2ShMyqXCPRsFWOvAjo1Ib5jU2KbNEBEavU7+V1YFEx4n+zp1xE14T372ZcItOhpJ18HN4N44OAmWPSincIb2h+JHmOg58UQ0aj2vodSaMIPKr5NLxMTE0lISChxPY9H6Nk6hp6tY7hhSAcA9mUcZ9n29IKrgNU/HWHdngzW7clgxhLbv3yLmHB7BeBUA/Vu05CwkHpQDdSyFz8MfoGElr0qsU0cPLgZdi21Z/8bvoT962y1z5qP4FeLCxN+9iE7Mld9+7FUAUcTvqqQljERjOnbmjF9WwNwPCeXVT8dIXnbYVKcq4D9GSf4YrVt9w8QEerhzNjGzn2AJgzs0ITGkUHUUZk3BDqea6cL/gyHttp6/59SoEVc4XpvXwVZB2y1T48x0Ok8CI1wL25Vb2nCV1USEerlrE5NOatTU6Arxhi2HMgiZZutBkrefpgt+7NYuvUQS7cW9jnfrWV0wVXAoI5N6Nw8qv5VA5WmaWcYcnvReSezbIdtmXvhh9fsFBoJXRLsD0DPsXrjV9UYTfiqRogIXVtE07VFNNed1R6AQ1knWbb9MMnb7VXAil1H2LQvk037Mpn5w04AmkWFMdBpDTSoYxPOiG1EeIjXza9Su8Ki4L51kPajrfbZMAfSVsD6z+0U3hD6XmXXPX4EwmLAUw+qyZQr/JbwRaQ98B+gFWCAV4wxz/vreCrwNI0K4/zerTi/dysATpzKZc3uowVXASnbD3Mg8yRz1+5l7tq9AIR5PZwR26jIVUCz6HA3v4b/eTzQbpCdRj5iz/g3fmV77+w6qnC9Lx6CTV9D9wvt2X/XkRAe417cqs7x5xn+KeB+Y8wyEYkBUkRkrjFmrR+PqQJYeIiXgR1sXf6tdMEYw45D2SRvK7wK2LDXtg5K2V44fm/n5lEFzwQM6tiEri2i8XjqcTVQw7YwaKKdfO1Phax9sPxtO3lCbffOPcZA3CXQuIMb0ao6xG8J3xiTBqQ57zNEJBVoB2jCV4CtBurYLIqOzaK4elAsAEeyc1i283DBVcDynelsPZDF1gNZzErZBUDjyFAGdii8AugX25gGYUFQDXTrPNi3trDVz86lsGWenY4dslcHACez7dPAXq2xVUWJMcb/BxHpBHwH9DXGHC227DbgNoBWrVoNmjlzZpWOkZmZSXR0dPUCDSJ1pbxO5Rl2ZuSx8XAeG9Nz2Xg4j/QTRf9mvQIdG3ro3thDtyZeujf20DiiZuu5A7G8Qk8epemhFJod/IEdHa4hM6YLAO13fEiHHR9wqOlADjaL51DTgZwKrd2qn0Asr0BWnfIaOXJkijEmviLr+j3hi0g0MB94zBjzYVnrxsfHm+Tk5Codp6x25ep0dbW8jDH8lH7MPhTmVAWt33OUvGJ/xu2bNiC+Y9OCq4AerWLwVqMaqE6V1we3wqr3Cj+Lx47f2+Mi2zVEix5+D6FOlVcAqE55iUiFE75fr/lEJBT4AJhRXrJXqiJEhNgmkcQ2iWRc/3YAZBzPYfnOdOeZgMP8uOMwOw8dY+ehn/jox58AiAkPYYDPfYD+7RsTFV5PqzyufhVGTIaNX9qqn+3f2wFddiyE3cvguv/Y9XJzbNfPIfX8prgq4M9WOgK8DqQaY/7hr+MoFRMRynndW3Bed9te/VRuHuv3ZhRcBaRsP8xP6cf4bsN+vtuwHwCvR+jVJob4jk0LmoW2bdzAza9Rs5p3s9M5d9nmnJvn2eTf46LCdTbPg/cn2tY+PcbY1j8xrVwLWfmfP09xhgI3AatEZLkz7xFjzOd+PKZShHg99GnbiD5tG/F/53QCIO3IsSI/AGvTjrL6JztNX7gNgLaNIhjUqSmDOjQmvlNTJk5bWrTvobo6QlhEI+hzhZ18pS2HnCxY96mdANoOLOzsrW3/2o9V+ZU/W+ksAOpx2zlVl7Rp1IBLz2zApWe2BSDrxClW7Ep3WgMdZtmOw+w+cpzdK3Yze8XuMvdVb0YIGzEJ+t/gPPD1JWydb6t8di+D1R/A3UsL1805BqH16AooSNXTSkylyhYVHsK5XZtzbtfmQGEPocnbDxX8COw4lF3q9n/8eDVxbRoS1zqGHq1i6u79gEaxcNYv7HQyG7Z+Z5t9NulUuM7hbfDiYOg83Onv56Ky2/zvS+WspfdA7/egMh3OKb+ro3+lStUs3x5CJwyxw0F2euizUtfPHygmX8dmkcS1jiGudUN6tbGvHZpG1q0HxMIioecYO/nalQy5J2HTXDt9/gC07F3Y2VvsWeBxnoM4mQUzriUyexfMuBbuWnJ6l9LKNZrwlaqChy+OY92eDFLTjrJ5fybbD2az/WA2X67ZW7BOg1AvPVvHOD8EMQVXBHWux9AzrrFn9xvn2rP/zd/aB8D2rYXFL9lRvvIHfvnoDsjaj2Agaz98fDdcO83d+FUBTfhKVcHtI7oWvD95Ko8tBzJZl5ZB6p6jrN+Twbq0DPYcPc7yneks35leZNs2jSKK/ADEtW5IlxZRhAbyEJLRLWHABDudOmmbem74EnJPFCb7lDch9ZPCbU4dtz8Qy962w0Uq12nCV6oUZY0Q5issxENc64bEtW7IFbQrmH8466QzSMxR1qXZ1/V7M0g7cpy0I8eZt35/4T68Hrq2jKZX6xjinCqhuDYxtIgOD7zuo0PCbFPOriOLzp/7h9PXzcmGz+6zzTfixtqBXpRrNOErVYqKjhBWmiZRRYeOBMjNsx3GrUs7SuqeDNalHWXdngx2HMomNe0oqWlH4cfCfTSNCiu4CohrE0Ov1g3p3iqaiNAA7Dvowsfgiwdsix5fuSfg47tsXX9+wt+/AWJaQ0TD2o8ziGnCV6oWeT1C5+ZRdG4excVntCmYn3niFOv3ZNjqIOeKIHXPUQ5lnSwyljCAR2wPonFtGtKrdQw9W9uqIdfHFB54I2z+GtZ/YatzQiJsb57dL4TdP0Jzny4dPrwV9qyyPwJdEuzVQrtBOsi7n2nCVyoARIeHFPT7k88Yw+4jxwuuAtY5VwRbDmSxeb+dPluZVrB+THiIvUncprC1UI9WMcRE1GISHTcVpg7BHNmFRLWw3TgUb6WTm1PYpn/nYjvNf8IO7tL5PDj7TnuTWNU4TfhKBSgRoV3jBrRr3IDRvQq7PDiek8umfZkFPwD5PwYHMk+Q7Iww5iu2SYMizUXj2sTQqVlUtTqTK1VYFEx4n+zp1xE14b2Sm2R6Q+GWObbLh20LbBcPWxLh4EY7yteZ4wvX3bEEjuyEziN0qMcaoAlfqTomItRL33aN6NuuUZH5+zNOFFQJpTo3iTfuzWTX4WPsOnyMr1MLm4yGh3jscwetYgqqhuLaNKRpVA00GW3Zix8Gv0BCeQ9dRTSyN3LjxtrP6Ttt4u8yonCdlGmw4h37vvUZtvqny0g7MLw++VtpmvCVqidaxITTIiacYd2bF8w7lZvHtoNZBT8AtrVQBj+lH2PlriOs3HWkyD5axoT7/ADE0LNVQ7q2jKqdcYYbt4eBNxWd1+EcO8D79oW2zn/PKlj4AnjD7dPBYx73f1z1iCZ8peqxEK+Hbi1j6NYyhsv6tS2Yf+RYTuENYqdqaP2eDPZlnGBfRmGvogAhHjtAvW9z0V6tG9KqYdEmo/F/nVvznc0N+rmdco7buv7NzghfaSvtAO/59q6B+X+3N3+7jIQmHat2vHpOE75SQahRg1AGd27K4M5NC+bl5Rl2HT5Gqs9zA+v2ZLDtYBbr92awfm8GH7O7yD7iWsfQy3mArLRO5Wqks7nQCKc6JwH4E2QdBHxGvdk4F9b+z04ATbvYxN91JHQ6Dxo0rn4M9YAmfKUUYPsT6tAskg7NIrmoT+uC+dknT7Fxb2aRewPr9mSQnp3Dkq2HWLL1ULn7fvSjVUSEeokI9RAeUvQ1ItRLeIiHcOc1ItRLRIiXcJ9ldp6HkPynkaOaFT1A36vsDeItibA1CQ5tsVPy6xDZDB7YBB5n27zcwr5/gowmfKVUmSLDQujXvjH92heeJRtj2Hv0RJEqof8tL71b6RlLdtRILF6PEOH8OEQ4PwRhIfk/DH2JCO1Hg7b30iN3I32OL6NHVgpZ4S2YM3cjEaEeouUEP/v+Yg427cfBlkM50nYYuc162v35/rgU+2HyR4smv1SBlUMTvlKq0kSE1o0iaN0ogoSeLQHKTPh/GdeH4zl5nDiVy/GcPI7n5HLiVNHX46fyOOHzetrynFxy8wxZJ3PJOplbZnxzaAKMdiYDuzcBcI5nDRPDjtJ2XxJt9yXBathjmvB9Xl8W5Pblq7x4sji99U+oVwquOsLzrz5Cyr5i8b1yKT4vItTr3yqwUmjCV0r53U3OyGPVlZNbyo+F82NyorwflZzOPHksng7pS+ma8QM9s1NonXuYq71JXO1NYkLMNHblRXI8J5e2OTvYdqoJh3PCyMk15OSeIuNEjXwN12jCV0rViIp2NlcdoV4PoV4PMdXaS29glH1rjO3mefM8OLCeGZdfVbjaPwfCkZ2YroPJ7TSC4+1HkN28LydypZwflcIfneLr+V65fLNuX7W+RVVowldK1YjqdjbnChFo1cdOvk5k2gfDDm1Bti0gZNsConmM6IhGttuHc++F9oOrdeiyBtjxF034SilVXHg03DYPsg/BtqTC9v+Ht0HqbBg0sXDdbQvsYC+dR0Bk09L2GBA04SulVGkim0LvcXYCOLTVJv4O5xaus/glWPcpINC2f2H7//ZDICS81F3XRhVYcZrwlVKqopp2tpOvzsNtR3A7l9huoHf/CAv+AaGRcM7dMOrREndVUAW2L5Ws6dcRNdH/g75rwldKqeoYcrudTmbB9kX2CmDzPNi3xt4HyLcrGZa8XHgF0LBtrQ/6rglfKaVqQlgUdD/fTgAZe4sO6LLhS1j1vp0Amve0YwNk7qm1Qd814SullD/EtCr6ud/19p7AlkR7o/fA+qLLa2HQd49f9qqUUqqoZl3taF43vAuTtkJECR265WTDN1P8FoImfKWUqm0hYXbQ99DIovNDI+H8P/ntsJrwlVLKDQNvhB4X2cHewb72GAMDJvjtkJrwlVLKLeOmQlQLDAJRLWDci349nCZ8pZRyS/6g75HtYcL7fm2SCZrwlVLKXc6g7/5+6Ao04SulVNDQhK+UUkFCE75SSgUJTfhKKRUkxBjjdgwFRGQ/sL2KmzcHDtRgOPWdllflaHlVjpZX5VSnvDoaY1pUZMWASvjVISLJxph4t+OoK7S8KkfLq3K0vCqntspLq3SUUipIaMJXSqkgUZ8S/ituB1DHaHlVjpZX5Wh5VU6tlFe9qcNXSilVtvp0hq+UUqoMmvCVUipIBHzCF5E3RGSfiKwuNv9sEXnVeX+miCwSkTUiskpEIoqt+0nx7eur8spLRAaLyHJnWiEiVzrL24vIPBFZ65Tjr935BrWrAuXVzCmXTBF5sdg6YSLyiohsEJF1InJ17UbvrgqUXaiIvOn8n0wVkYfdijVQlVaG/hLwCR+YDowpYf7FwBwRCQHeBu4wxvQBEoCc/JVE5Cog0/9hBozplFFewGog3hjT31nv304ZngLuN8b0Bs4G7hKR3rUTsqumU3Z5HQd+DzxQwjqPAvuMMT2A3sB8P8UYqKZTdtldC4QbY84ABgG3i0in2gqujphOyWXoFwGf8I0x3wGHSlg0GvgauBBYaYxZ4ax/0BiTCyAi0cB9wF9rKVzXlVdexphsY8wpZ14EYJzt0owxy5z3GUAq0K4WQnZVBcoryxizAJv4i7sFeNzZT54xJqieLK3A/00DRDknFA2Ak8DR2osw8JVRhn4R8Am/JCLSHMgxxhwBegBGRL4UkWUiMsln1b8AzwDZbsQZKIqVFyIyRETWAKuwV0aniq3fCRgALKnlUANC8fIqZZ38Eaj/4vzdvS8irWonwsBVrOxmAVlAGrADeNoYU2vJTZ2uTiZ87Fn9V877EGAYMMF5vVJERotIf6CrMeYjl2IMJL7lhTFmiVP9dRbwsO89D+eq6APgN8aYYD0bK1JepQgBYoGFxpiBwCLgaX8HVgf4lt1gIBdoC3QG7heRLm4Fpupuws+vIwTYBXxnjDlgjMkGPgcGAucA8SKyDVgA9BCRRBdiDQS+5VXAGJOKvb/RF0BEQrHJfoYx5sNajTCwlFhexRzEXjnml9P72L+7YOdbdjcAc4wxOcaYfcD3gPav46I6l/BFRIAzgeXOrC+BM0Qk0qkrHAGsNca8ZIxpa4zphD3z32CMSXAjZjcVLy8R6eyUEyLSEYgDtjnrvQ6kGmP+4Va8bivh76tExj6xOBvbSABsvfVavwYX4Eooux3AKGdZFLYxwDp3olNgL0sDmoi8g/1P1VxEdgEvAD86/+EwxhwWkX8AP2BvEn1ujPnMrXjdVl55YX/8HhKRHCAP+JUx5oCIDANuAlaJSP5/2EeMMZ/X7jeoXRUoL5yrxIZAmIhcAVxojFkLTAbeEpHngP3AzbUcvqsqUHZTgWnO/SIBphljVroSbIAqoQz/aIx53W/Hq2tdK4jI74BNxpiZbsdSF2h5VY6WV9Vp2QW+OpfwlVJKVU2dq8NXSilVNZrwlVIqSGjCV0qpIKEJXymlgoQmfOUKEWktIjNFZLOIpIjI5yLSw+24KkNE7hCR/6vlYz5S7PPC2jy+qtu0lY6qdc4DOguBN40xLzvz+gENjTFJtXD8kOL9BwWK8mITkUxjTHRtxqTqDz3DV24Yie1g6+X8GcaYFcaYJLGeEpHVTj/q4wFEJEFE5ovIxyKyRUSeEJEJIrLUWa+rs950EXlZRJKdfuovdeZPFDsuwrfAN868B0XkBxFZKSJ/cuZFichnYscKWO1z/CfEjhWwUkSeduZNEZEHnPf9RWSxs/wjEWnizE8UkSedODeIyHnFC8P5bkki8gnO07oi8j/nymeNiNyWHwPQQOxYBjOceZnOa4nlppSvgH/SVtVLfYGUUpZdBfQH+gHNgR9E5DtnWT+gF7Y72S3Aa8aYwWIHa7kH+I2zXidsx11dgXki0s2ZPxA40xhzSEQuBLo76wnwiYgMB1oAu40xYwFEpJGINAOuBOKMMUYKe8r09R/gHmPMfBH5M/BHn3hCnDgvceafX8L2A4G+xpitzudbnDgbOGXwgTHmIRG52xnLoELlZoxJK2FdFaT0DF8FmmHAO8aYXGPMXuygImc5y35w+u0/AWymsFfGVdgkn+89p3/6jdgfhjhn/lyf7nkvdKYfgWXOOt2dfV3gnJWf53TzewTbH/7rYgfUKdLdtog0AhobY/IHQHkTGO6zSn4HaynF4vS11CfZA9wrIiuAxUB7J7aylFVuSgGa8JU71mBHQKqsEz7v83w+51H0arX4jan8z1k+8wR43BjT35m6GWNeN8ZswJ5trwL+KiJ/cOrUB2P7d7+U8nvSLC3uXEq/qi6ITUQSsFcB5xhj+mF/lCJK2U6pCtOEr9zwLRCeXzcNBeMSnwckAeNFxCsiLbBnyksruf9rRcTj1Ot3AdaXsM6XwC1i+/9HRNqJSEsRaQtkG2PeBp4CBjrrNHI6kvstttqkgHMVcNinfv4mqjfcYSPgsDEmW0TisL1M5ssR2411cTVRbqqe0zp8VeucevArgedEZDK2umQbts57AXYsgxXYM/NJxpg9TuKrqB3YZNcQO6LXcdswqEgMX4lIL2CRsywTuBHoBjwlInnYsZHvBGKAj8UOFCPYYTOL+znwsohEYquRqtNz5hzgDhFJxf5YLfZZ9gqwUkSWGWMm+Mz/iBLKrRoxqHpIm2WqekVEpgOfGmNmuR2LUoFGq3SUUipI6Bm+UkoFCT3DV0qpIKEJXymlgoQmfKWUChKa8JVSKkhowldKqSDx/+Hd+r7dSWPiAAAAAElFTkSuQmCC\n",
700 | "text/plain": [
701 | ""
702 | ]
703 | },
704 | "metadata": {
705 | "needs_background": "light"
706 | },
707 | "output_type": "display_data"
708 | }
709 | ],
710 | "source": [
711 | "import matplotlib.pyplot as plt\n",
712 | "\n",
713 | "fig, ax = plt.subplots(figsize=(6,4))\n",
714 | "ax.plot([100.0-x[1] for x in hashednn_records], 's-', linewidth=2, label='Hashed NN')\n",
715 | "ax.plot([100.0-x[1] for x in nn_records], 'd--', linewidth=2, label='Equiv. NN')\n",
716 | "ax.set_xticklabels(['1/64', '1/32', '1/16', '1/8', '1'])\n",
717 | "ax.set_xticks(range(len(compression_rates)))\n",
718 | "ax.set_xlabel('Compression ratio')\n",
719 | "ax.set_ylabel('Error (%)')\n",
720 | "plt.title(\"1 hidden layer, 1000 units\")\n",
721 | "plt.legend(handlelength=3)\n",
722 | "plt.grid()\n",
723 | "plt.savefig('example.svg')\n",
724 | "plt.show()"
725 | ]
726 | }
727 | ],
728 | "metadata": {
729 | "kernelspec": {
730 | "display_name": "Python [conda env:pytorch3.7] *",
731 | "language": "python",
732 | "name": "conda-env-pytorch3.7-py"
733 | },
734 | "language_info": {
735 | "codemirror_mode": {
736 | "name": "ipython",
737 | "version": 3
738 | },
739 | "file_extension": ".py",
740 | "mimetype": "text/x-python",
741 | "name": "python",
742 | "nbconvert_exporter": "python",
743 | "pygments_lexer": "ipython3",
744 | "version": "3.6.8"
745 | }
746 | },
747 | "nbformat": 4,
748 | "nbformat_minor": 2
749 | }
750 |
--------------------------------------------------------------------------------