├── .gitignore ├── Minimum_example_JAX.ipynb ├── Minimum_example_Keras.ipynb ├── README.rst ├── pytorch ├── Minimum_example_pytorch.ipynb ├── README.rst ├── model_ncp.py ├── model_ncp.yml ├── train_main.py └── utils.py └── tensorflow ├── Minimum_example_tensorflow.ipynb ├── README.rst ├── model_ncp_tf.py ├── model_ncp_tf.yml ├── train_main_tf.py └── utils_tf.py /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.pyc 3 | .idea/* 4 | *.egg-info/* 5 | *tmp*.py 6 | *tmp* 7 | *.mat 8 | *.swp 9 | *.*.backup 10 | *Copy*.ipynb 11 | 12 | **/__pycache__/ 13 | **/results/ 14 | **/results_*/ 15 | **/template_icl/ 16 | **/.ipynb_checkpoints/ 17 | .ipynb_checkpoints/* 18 | **/demo/ 19 | .chainerui_commands 20 | *tmp_grigoris 21 | 22 | # personal testout files. 23 | personal.py 24 | 25 | -------------------------------------------------------------------------------- /Minimum_example_JAX.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "id": "mBZQNjLTy6qV" 7 | }, 8 | "source": [ 9 | "This notebook is providing a minimum code for running MNIST classification with a CCP model (i.e. a polynomial expansion without activation functions) in *JAX*. \n", 10 | "\n", 11 | "*Details*: The model implements a third-degree polynomial expansion (and in particular the [CCP model](https://github.com/grigorisg9gr/polynomial_nets) from the $\\Pi$-Nets), using a hidden dimension of 32. The linear operations involved are implemented as convolutions. The code is inspired by this tutorial: https://github.com/8bitmp3/JAX-Flax-Tutorial-Image-Classification-with-Linen (and was verified with JAX version 0.3.25).\n", 12 | "\n", 13 | "For implementations that obtain state-of-the-art code with polynomial nets, please visit other respositories, such as the https://github.com/grigorisg9gr/polynomial_nets" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "metadata": { 20 | "colab": { 21 | "base_uri": "https://localhost:8080/" 22 | }, 23 | "id": "bymClH6LzLrU", 24 | "outputId": "660c4ffd-9108-4b37-86d6-bf6b44ebffbf" 25 | }, 26 | "outputs": [], 27 | "source": [ 28 | "# # Install the dependencies\n", 29 | "!pip install --upgrade -q pip jax jaxlib flax optax tensorflow-datasets" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "metadata": { 36 | "id": "IrDe_c03zoiJ" 37 | }, 38 | "outputs": [], 39 | "source": [ 40 | "import jax.numpy as jnp # JAX NumPy\n", 41 | "from flax.linen import Dense\n", 42 | "from flax import linen as nn # The Linen API\n", 43 | "from flax.training import train_state\n", 44 | "import optax # The Optax gradient processing and optimization library\n", 45 | "from jax import jit, random, device_get, tree_map, value_and_grad\n", 46 | "\n", 47 | "import numpy as np # Ordinary NumPy\n", 48 | "import tensorflow_datasets as tfds # TFDS for MNIST" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": null, 54 | "metadata": { 55 | "id": "IzD4O-JIzx6m" 56 | }, 57 | "outputs": [], 58 | "source": [ 59 | "def get_datasets():\n", 60 | " ds_builder = tfds.builder('mnist')\n", 61 | " ds_builder.download_and_prepare()\n", 62 | " # Split into training/test sets\n", 63 | " train_ds = tfds.as_numpy(ds_builder.as_dataset(split='train', batch_size=-1))\n", 64 | " test_ds = tfds.as_numpy(ds_builder.as_dataset(split='test', batch_size=-1))\n", 65 | " # Convert to floating-points\n", 66 | " train_ds['image'] = jnp.float32(train_ds['image']) / 255.0\n", 67 | " test_ds['image'] = jnp.float32(test_ds['image']) / 255.0\n", 68 | " return train_ds, test_ds" 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": null, 74 | "metadata": { 75 | "colab": { 76 | "base_uri": "https://localhost:8080/" 77 | }, 78 | "id": "E_n5C_8bz2hU", 79 | "outputId": "9da22ea5-c9b9-410d-efa4-4c1f9a9bc110" 80 | }, 81 | "outputs": [], 82 | "source": [ 83 | "\n", 84 | "class Pi_net_Convs(nn.Module):\n", 85 | "\n", 86 | " @nn.compact\n", 87 | " # Provide a constructor to register a new parameter \n", 88 | " # and return its initial value\n", 89 | " def __call__(self, x):\n", 90 | " x1 = nn.Conv(features=32, kernel_size=(3, 3))(x)\n", 91 | " x2 = nn.Conv(features=32, kernel_size=(3, 3))(x)\n", 92 | " x3 = nn.Conv(features=32, kernel_size=(3, 3))(x)\n", 93 | " \n", 94 | " x = x1 * x2 * x3 + x1 * x2 + x2 * x3 + x1\n", 95 | " x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2))\n", 96 | " x = x.reshape((x.shape[0], -1)) # Flatten\n", 97 | " x = nn.Dense(features=32)(x)\n", 98 | "\n", 99 | " x = nn.Dense(features=10)(x) # There are 10 classes in MNIST\n", 100 | " return x\n", 101 | "\n", 102 | "\n", 103 | "def compute_metrics(logits, labels):\n", 104 | " loss = jnp.mean(optax.softmax_cross_entropy(logits, nn.one_hot(labels, num_classes=10)))\n", 105 | " accuracy = jnp.mean(jnp.argmax(logits, -1) == labels)\n", 106 | " metrics = {\n", 107 | " 'loss': loss,\n", 108 | " 'accuracy': accuracy\n", 109 | " }\n", 110 | " return metrics\n", 111 | "\n", 112 | "\n", 113 | "@jit\n", 114 | "def train_step(state, batch):\n", 115 | " def loss_fn(params):\n", 116 | " logits = Pi_net_Convs().apply({'params': params}, batch['image'])\n", 117 | " loss = jnp.mean(optax.softmax_cross_entropy(\n", 118 | " logits=logits, \n", 119 | " labels=nn.one_hot(batch['label'], num_classes=10)))\n", 120 | " return loss, logits\n", 121 | "\n", 122 | " grad_fn = value_and_grad(loss_fn, has_aux=True)\n", 123 | " (_, logits), grads = grad_fn(state.params)\n", 124 | " state = state.apply_gradients(grads=grads)\n", 125 | " metrics = compute_metrics(logits, batch['label'])\n", 126 | " return state, metrics\n", 127 | "\n", 128 | "@jit\n", 129 | "def eval_step(params, batch):\n", 130 | " logits = Pi_net_Convs().apply({'params': params}, batch['image'])\n", 131 | " return compute_metrics(logits, batch['label'])\n", 132 | "\n", 133 | "\n", 134 | "def train_epoch(state, train_ds, batch_size, epoch, rng):\n", 135 | " train_ds_size = len(train_ds['image'])\n", 136 | " steps_per_epoch = train_ds_size // batch_size\n", 137 | "\n", 138 | " perms = random.permutation(rng, len(train_ds['image']))\n", 139 | " perms = perms[:steps_per_epoch * batch_size] # Skip an incomplete batch\n", 140 | " perms = perms.reshape((steps_per_epoch, batch_size))\n", 141 | "\n", 142 | " batch_metrics = []\n", 143 | "\n", 144 | " for perm in perms:\n", 145 | " batch = {k: v[perm, ...] for k, v in train_ds.items()}\n", 146 | " state, metrics = train_step(state, batch)\n", 147 | " batch_metrics.append(metrics)\n", 148 | "\n", 149 | " training_batch_metrics = device_get(batch_metrics)\n", 150 | " training_epoch_metrics = {\n", 151 | " k: np.mean([metrics[k] for metrics in training_batch_metrics])\n", 152 | " for k in training_batch_metrics[0]}\n", 153 | "\n", 154 | " print('Training - epoch: %d, loss: %.4f, accuracy: %.2f' % (epoch, training_epoch_metrics['loss'], training_epoch_metrics['accuracy'] * 100))\n", 155 | "\n", 156 | " return state, training_epoch_metrics\n", 157 | "\n", 158 | "\n", 159 | "\n", 160 | "def eval_model(model, test_ds):\n", 161 | " metrics = eval_step(model, test_ds)\n", 162 | " metrics = device_get(metrics)\n", 163 | " eval_summary = tree_map(lambda x: x.item(), metrics)\n", 164 | " return eval_summary['loss'], eval_summary['accuracy']\n", 165 | "\n", 166 | "train_ds, test_ds = get_datasets()\n", 167 | "rng = random.PRNGKey(0)\n", 168 | "rng, init_rng = random.split(rng)\n", 169 | "cnn = Pi_net_Convs()\n", 170 | "params = cnn.init(init_rng, jnp.ones([1, 28, 28, 1]))['params']\n", 171 | "nesterov_momentum = 0.9\n", 172 | "learning_rate = 0.001\n", 173 | "tx = optax.sgd(learning_rate=learning_rate, nesterov=nesterov_momentum)\n", 174 | "\n", 175 | "state = train_state.TrainState.create(apply_fn=cnn.apply, params=params, tx=tx)\n", 176 | "\n", 177 | "num_epochs = 3\n", 178 | "batch_size = 32\n", 179 | "\n", 180 | "for epoch in range(1, num_epochs + 1):\n", 181 | " # Use a separate PRNG key to permute image data during shuffling\n", 182 | " rng, input_rng = random.split(rng)\n", 183 | " # Run an optimization step over a training batch\n", 184 | " state, train_metrics = train_epoch(state, train_ds, batch_size, epoch, input_rng)\n", 185 | " # Evaluate on the test set after each training epoch\n", 186 | " test_loss, test_accuracy = eval_model(state.params, test_ds)\n", 187 | " print('Testing - epoch: %d, loss: %.2f, accuracy: %.2f' % (epoch, test_loss, test_accuracy * 100))" 188 | ] 189 | }, 190 | { 191 | "cell_type": "code", 192 | "execution_count": null, 193 | "metadata": { 194 | "id": "zpYe6t8K0H8K" 195 | }, 196 | "outputs": [], 197 | "source": [] 198 | } 199 | ], 200 | "metadata": { 201 | "accelerator": "GPU", 202 | "colab": { 203 | "provenance": [] 204 | }, 205 | "gpuClass": "standard", 206 | "kernelspec": { 207 | "display_name": "Python 3", 208 | "language": "python", 209 | "name": "python3" 210 | }, 211 | "language_info": { 212 | "codemirror_mode": { 213 | "name": "ipython", 214 | "version": 3 215 | }, 216 | "file_extension": ".py", 217 | "mimetype": "text/x-python", 218 | "name": "python", 219 | "nbconvert_exporter": "python", 220 | "pygments_lexer": "ipython3", 221 | "version": "3.5.6" 222 | } 223 | }, 224 | "nbformat": 4, 225 | "nbformat_minor": 1 226 | } 227 | -------------------------------------------------------------------------------- /Minimum_example_Keras.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "This notebook is providing a minimum code for running MNIST classification with a CCP model (i.e. a polynomial expansion without activation functions) in *Keras*. \n", 8 | "\n", 9 | "\n", 10 | "*Details*: The model implements a third-degree polynomial expansion (and in particular the [CCP model](https://github.com/grigorisg9gr/polynomial_nets) from the $\\Pi$-Nets), using a hidden dimension of 16. The network is not optimized for performance reasons, but simply to introduce you to the concept of polynomial nets in Keras (verified with Keras v2.9.0). \n", 11 | "\n", 12 | "For implementations that obtain state-of-the-art code with polynomial nets, please visit other respositories, such as the https://github.com/grigorisg9gr/polynomial_nets" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "from keras.models import Sequential\n", 22 | "from keras.layers import Dense, Flatten\n", 23 | "from keras.datasets import mnist\n", 24 | "from keras.layers import Input, Dense, Flatten\n", 25 | "from keras.models import Model" 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": null, 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "(x_train, y_train), (x_test, y_test) = mnist.load_data()\n", 35 | "x_train, x_test = x_train/255., x_test/255.\n", 36 | "\n", 37 | "# Define the input layer\n", 38 | "input_layer = Input(shape=(28, 28, 1))\n", 39 | "flat = Flatten()(input_layer)\n", 40 | "\n", 41 | "# Define the polynomial network (minimum CCP implementation).\n", 42 | "hidden_size = 16\n", 43 | "n_degree = 3\n", 44 | "out = Dense(hidden_size, activation=None)(flat)\n", 45 | "for i in range(2, n_degree + 1):\n", 46 | " U = Dense(hidden_size, activation=None)\n", 47 | " out = U(flat) * out + out\n", 48 | " \n", 49 | "# Define the output layer.\n", 50 | "n_classes = 10\n", 51 | "C = Dense(n_classes, activation='softmax')\n", 52 | "out = C(out)\n", 53 | "\n", 54 | "# Create the model.\n", 55 | "model = Model(inputs=input_layer, outputs=out)\n", 56 | "\n", 57 | "model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n", 58 | "model.fit(x_train, y_train, epochs=5)\n", 59 | "model.evaluate(x_test, to_categorical(y_test))" 60 | ] 61 | } 62 | ], 63 | "metadata": { 64 | "kernelspec": { 65 | "display_name": "Python 3", 66 | "language": "python", 67 | "name": "python3" 68 | }, 69 | "language_info": { 70 | "codemirror_mode": { 71 | "name": "ipython", 72 | "version": 3 73 | }, 74 | "file_extension": ".py", 75 | "mimetype": "text/x-python", 76 | "name": "python", 77 | "nbconvert_exporter": "python", 78 | "pygments_lexer": "ipython3", 79 | "version": "3.5.6" 80 | } 81 | }, 82 | "nbformat": 4, 83 | "nbformat_minor": 4 84 | } 85 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | =========================================== 2 | Tutorial on high-degree polynomial networks 3 | =========================================== 4 | 5 | .. image:: https://img.shields.io/badge/PyTorch-1.12.0-red.svg 6 | :target: https://github.com/polynomial-nets/tutorial-2022-intro-polynomial-nets 7 | :alt: PyTorch 8 | 9 | .. image:: https://img.shields.io/badge/TensorFlow-2.4.0-green.svg 10 | :target: https://github.com/polynomial-nets/tutorial-2022-intro-polynomial-nets 11 | :alt: TensorFlow 12 | 13 | .. image:: https://img.shields.io/badge/JAX-0.3.2-blue.svg 14 | :target: https://github.com/polynomial-nets/tutorial-2022-intro-polynomial-nets 15 | :alt: JAX 16 | 17 | 18 | This code implements two polynomial networks for image recognition. 19 | The two codes are based on the paper of `"**Π-nets: Deep Polynomial Neural Networks**" `_ (also available `here `_ ) [1]_. 20 | 21 | The two networks are implemented in both PyTorch and TensorFlow (in the folder ``tensorflow``). Those networks aim to demonstrate the performance of the polynomial networks with minimal code examples; therefore, they are not really the state-of-the-art results on recognition. For networks that can achieve state-of-the-art results the source code of the papers can be followed, since they have more intricate implementations. For instance, for Π-nets, please check [1]_. 22 | 23 | Please visit the folders of ```pytorch``` or ```tensorflow``` for implementations in PyTorch and TensorFlow respectively. 24 | 25 | 26 | .. image:: https://img.shields.io/badge/-New-brightgreen 27 | :target: https://github.com/polynomial-nets/tutorial-2022-intro-polynomial-nets 28 | :alt: New 29 | 30 | New JAX and Keras implementations for polynomial networks have been added (e.g., ``Minimum_example_JAX.ipynb``). 31 | 32 | 33 | Notebooks with polynomial nets on different frameworks 34 | ====================================================== 35 | 36 | .. image:: https://colab.research.google.com/assets/colab-badge.svg 37 | :target: https://colab.research.google.com/drive/1UJ3l_t387GTWk8nSlr_fX2SNwXuglnNA 38 | :alt: PyTorch 39 | 40 | .. image:: https://colab.research.google.com/assets/colab-badge.svg 41 | :target: https://colab.research.google.com/drive/1i858yL63kRE5qWn_nMe8cktTFecxAMBQ 42 | :alt: TensorFlow 43 | 44 | .. image:: https://colab.research.google.com/assets/colab-badge.svg 45 | :target: https://colab.research.google.com/drive/1R3NVusAxDY6hKue-HMqeZBVY6ABLSn08 46 | :alt: JAX 47 | 48 | .. image:: https://colab.research.google.com/assets/colab-badge.svg 49 | :target: https://colab.research.google.com/drive/1ZyN-tEa6aBYP1QxLU-KVmYnK-5RCY465 50 | :alt: Keras 51 | 52 | The notebooks are the same as the one in the repo and contain minimum examples in PyTorch, TensorFlow, JAX and Keras respectively. 53 | 54 | 55 | 56 | 57 | Acknowledgements 58 | ================ 59 | 60 | We are thankful to Yongtao for the help of converting the code to TensorFlow. 61 | 62 | 63 | References 64 | ========== 65 | 66 | .. [1] https://github.com/grigorisg9gr/polynomial_nets/ 67 | 68 | .. [2] https://pypi.org/project/pyaml/ 69 | 70 | -------------------------------------------------------------------------------- /pytorch/Minimum_example_pytorch.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "This notebook is providing a minimum code for running MNIST classification with a CCP model (i.e. a polynomial expansion without activation functions) in *PyTorch*. \n", 8 | "\n", 9 | "\n", 10 | "*Details*: The model implements a fourth-degree polynomial expansion (and in particular the [CCP model](https://github.com/grigorisg9gr/polynomial_nets) from the $\\Pi$-Nets), using a hidden dimension of 16. The network is not optimized for performance reasons, but simply to introduce you to the concept of polynomial nets in PyTorch (verified with PyTorch v.1.13). \n", 11 | "\n", 12 | "For implementations that obtain state-of-the-art code with polynomial nets, please visit other respositories, such as the https://github.com/grigorisg9gr/polynomial_nets" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "import torch\n", 22 | "import torch.nn as nn\n", 23 | "import torch.optim as optim\n", 24 | "import torch.nn.functional as F\n", 25 | "from torch.utils.data import DataLoader\n", 26 | "from torchvision.datasets import MNIST\n", 27 | "from torchvision import transforms\n", 28 | "print(torch.__version__)" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": null, 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "class Net(nn.Module):\n", 38 | " def __init__(self, hidden_size=16, image_size=28, channels_in=1, n_degree=4, bias=True, n_classes=10):\n", 39 | " \"\"\"\n", 40 | " This is the initialization function of the network, which in this case is a polynomial network.\n", 41 | " The implementation here relies on the CCP model of $\\Pi$-nets. \n", 42 | " \"\"\"\n", 43 | " super(Net, self).__init__()\n", 44 | " self.image_size = image_size\n", 45 | " self.channels_in = channels_in\n", 46 | " self.total_image_size = self.image_size * self.image_size * channels_in\n", 47 | " self.hidden_size = hidden_size\n", 48 | " self.n_classes = n_classes\n", 49 | " self.n_degree = n_degree\n", 50 | " for i in range(1, self.n_degree + 1):\n", 51 | " setattr(self, 'U{}'.format(i), nn.Linear(self.total_image_size, self.hidden_size, bias=bias))\n", 52 | " self.C = nn.Linear(self.hidden_size, self.n_classes, bias=True)\n", 53 | "\n", 54 | " def forward(self, z):\n", 55 | " h = z.view(-1, self.total_image_size)\n", 56 | " out = self.U1(h)\n", 57 | " for i in range(2, self.n_degree + 1):\n", 58 | " out = getattr(self, 'U{}'.format(i))(h) * out + out\n", 59 | " out = self.C(out)\n", 60 | " return out" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "metadata": {}, 67 | "outputs": [], 68 | "source": [ 69 | "# Set device (CPU or GPU)\n", 70 | "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", 71 | "# Initialize model and move to device\n", 72 | "model = Net().to(device)\n", 73 | "\n", 74 | "# Set loss function and optimizer\n", 75 | "criterion = nn.CrossEntropyLoss()\n", 76 | "optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n", 77 | "\n", 78 | "# Load MNIST dataset and apply transformations\n", 79 | "transform = transforms.Compose([transforms.ToTensor(),\n", 80 | " transforms.Normalize((0.5,), (0.5,))])\n", 81 | "train_dataset = MNIST(root='./data', train=True, download=True, transform=transform)\n", 82 | "test_dataset = MNIST(root='./data', train=False, download=True, transform=transform)\n", 83 | "\n", 84 | "# Create data loaders\n", 85 | "train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)\n", 86 | "test_loader = DataLoader(test_dataset, batch_size=64, shuffle=True)\n", 87 | "\n", 88 | "# Train model\n", 89 | "for epoch in range(10):\n", 90 | " running_loss = 0.0\n", 91 | " for i, data in enumerate(train_loader, 0):\n", 92 | " # Get inputs and labels\n", 93 | " inputs, labels = data\n", 94 | " inputs, labels = inputs.to(device), labels.to(device)\n", 95 | "\n", 96 | " # Zero the parameter gradients\n", 97 | " optimizer.zero_grad()\n", 98 | "\n", 99 | " # Forward pass\n", 100 | " outputs = model(inputs)\n", 101 | " loss = criterion(outputs, labels)\n", 102 | "\n", 103 | " # Backward pass and optimize\n", 104 | " loss.backward()\n", 105 | " optimizer.step()\n", 106 | "\n", 107 | " # Print statistics\n", 108 | " # Print statistics\n", 109 | " running_loss += loss.item()\n", 110 | " if i % 400 == 399:\n", 111 | " print(f'[{epoch + 1}, {i + 1}] loss: {running_loss / 1000:.3f}')\n", 112 | " running_loss = 0.0\n", 113 | "\n", 114 | "print('Finished Training')\n", 115 | "\n", 116 | "# Test model\n", 117 | "correct = 0\n", 118 | "total = 0\n", 119 | "with torch.no_grad():\n", 120 | " for data in test_loader:\n", 121 | " images, labels = data\n", 122 | " images, labels = images.to(device), labels.to(device)\n", 123 | " outputs = model(images)\n", 124 | " _, predicted = torch.max(outputs.data, 1)\n", 125 | " total += labels.size(0)\n", 126 | " correct += (predicted == labels).sum().item()\n", 127 | "\n", 128 | "print(f'Accuracy: {100 * correct / total:.2f}%')" 129 | ] 130 | } 131 | ], 132 | "metadata": { 133 | "colab": { 134 | "collapsed_sections": [], 135 | "name": "CCP_model_minimum_example.ipynb", 136 | "provenance": [] 137 | }, 138 | "kernelspec": { 139 | "display_name": "Python 3", 140 | "language": "python", 141 | "name": "python3" 142 | }, 143 | "language_info": { 144 | "codemirror_mode": { 145 | "name": "ipython", 146 | "version": 3 147 | }, 148 | "file_extension": ".py", 149 | "mimetype": "text/x-python", 150 | "name": "python", 151 | "nbconvert_exporter": "python", 152 | "pygments_lexer": "ipython3", 153 | "version": "3.5.6" 154 | } 155 | }, 156 | "nbformat": 4, 157 | "nbformat_minor": 1 158 | } 159 | -------------------------------------------------------------------------------- /pytorch/README.rst: -------------------------------------------------------------------------------- 1 | =========================================== 2 | Tutorial on high-degree polynomial networks 3 | =========================================== 4 | 5 | .. image:: https://img.shields.io/badge/PyTorch-1.12.0-red.svg 6 | :target: https://github.com/polynomial-nets/tutorial-2022-intro-polynomial-nets 7 | :alt: PyTorch 8 | 9 | 10 | This code implements two polynomial networks for image recognition **in PyTorch**. 11 | The two codes are based on the paper of `"**Π-nets: Deep Polynomial Neural Networks**" `_ (also available `here `_ ) [1]_. 12 | 13 | Those networks aim to demonstrate the performance of the polynomial networks with minimal code examples; therefore, they are not really the state-of-the-art results on recognition. For networks that can achieve state-of-the-art results the source code of the papers can be followed, since they have more intricate implementations. For instance, for Π-nets, please check [1]_. 14 | 15 | The two networks include the following: 16 | 17 | * The jupyter notebook ``Minimum_example_pytorch.ipynb`` implements a simple CCP model on MNIST classification. This can be opened and executed directly in a Google Colab environment. 18 | 19 | * The python files implement a product of polynomials (each polynomial has an NCP-based second degree polynomial). 20 | 21 | 22 | 23 | Train the network (of the *.py files) 24 | ==================================== 25 | 26 | To train the network, you can execute the following command:: 27 | 28 | python train_main.py 29 | 30 | 31 | 32 | Apart from PyTorch, the code depends on Pyaml [2]_. 33 | 34 | 35 | References 36 | ========== 37 | 38 | .. [1] https://github.com/grigorisg9gr/polynomial_nets/ 39 | 40 | .. [2] https://pypi.org/project/pyaml/ 41 | 42 | -------------------------------------------------------------------------------- /pytorch/model_ncp.py: -------------------------------------------------------------------------------- 1 | '''Model for Π-net based 2nd degree blocks without activation functions: 2 | https://ieeexplore.ieee.org/document/9353253 (or https://arxiv.org/abs/2006.13026). 3 | 4 | This file implements an NCP-based product of polynomials. 5 | ''' 6 | from functools import partial 7 | import torch 8 | import torch.nn as nn 9 | import torch.nn.functional as F 10 | 11 | def get_norm(norm_local): 12 | """ Define the appropriate function for normalization. """ 13 | if norm_local is None or norm_local == 0: 14 | norm_local = nn.BatchNorm2d 15 | elif norm_local == 1: 16 | norm_local = nn.InstanceNorm2d 17 | elif isinstance(norm_local, int) and norm_local < 0: 18 | norm_local = lambda a: lambda x: x 19 | return norm_local 20 | 21 | 22 | class SinglePoly(nn.Module): 23 | def __init__(self, in_planes, planes, stride=1, use_alpha=False, kernel_sz=3, 24 | norm_S=None, norm_layer=None, kernel_size_S=1, 25 | use_only_first_conv=False, **kwargs): 26 | """ This class implements a single second degree NCP model. """ 27 | super(SinglePoly, self).__init__() 28 | self._norm_layer = get_norm(norm_layer) 29 | self._norm_S = get_norm(norm_S) 30 | self.use_only_first_conv = use_only_first_conv 31 | 32 | pad1 = kernel_sz // 2 33 | self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=kernel_sz, stride=stride, padding=pad1, bias=False) 34 | self.bn1 = self._norm_layer(planes) 35 | if not self.use_only_first_conv: 36 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) 37 | self.bn2 = self._norm_layer(planes) 38 | 39 | pad = kernel_size_S // 2 40 | self.conv_S = nn.Conv2d(in_planes, planes, kernel_size=kernel_size_S, stride=stride, padding=pad, bias=False) 41 | self.bnS = self._norm_S(planes) 42 | 43 | self.use_alpha = use_alpha 44 | if self.use_alpha: 45 | self.alpha = nn.Parameter(torch.zeros(1)) 46 | self.monitor_alpha = [] 47 | 48 | def forward(self, x): 49 | out = self.bn1(self.conv1(x)) 50 | if not self.use_only_first_conv: 51 | out = self.bn2(self.conv2(out)) 52 | out1 = self.bnS(self.conv_S(x)) 53 | out_so = out * out1 54 | if self.use_alpha: 55 | out1 = out1 + self.alpha * out_so 56 | self.monitor_alpha.append(self.alpha) 57 | else: 58 | out1 = out1 + out_so 59 | return out1 60 | 61 | 62 | class ModelNCP(nn.Module): 63 | def __init__(self, block, num_blocks, num_classes=10, norm_layer=None, 64 | pool_adapt=False, n_channels=[64, 128, 256, 512], ch_in=3, **kwargs): 65 | super(ModelNCP, self).__init__() 66 | self.in_planes = n_channels[0] 67 | self._norm_layer = nn.BatchNorm2d if norm_layer is None else get_norm(norm_layer) 68 | assert len(n_channels) >= 4 69 | self.n_channels = n_channels 70 | self.pool_adapt = pool_adapt 71 | if pool_adapt: 72 | self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) 73 | else: 74 | self.avg_pool = partial(F.avg_pool2d, kernel_size=4) 75 | 76 | self.conv1 = nn.Conv2d(ch_in, n_channels[0], kernel_size=3, stride=1, padding=1, bias=False) 77 | self.bn1 = self._norm_layer(n_channels[0]) 78 | self.layer1 = self._make_layer(block, n_channels[0], num_blocks[0], stride=1, **kwargs) 79 | self.layer2 = self._make_layer(block, n_channels[1], num_blocks[1], stride=2, **kwargs) 80 | self.layer3 = self._make_layer(block, n_channels[2], num_blocks[2], stride=2, **kwargs) 81 | self.layer4 = self._make_layer(block, n_channels[3], num_blocks[3], stride=2, **kwargs) 82 | self.linear = nn.Linear(n_channels[-1], num_classes) 83 | 84 | def _make_layer(self, block, planes, num_blocks, stride, **kwargs): 85 | strides = [stride] + [1]*(num_blocks-1) 86 | layers = [] 87 | for stride in strides: 88 | layers.append(block(self.in_planes, planes, stride, norm_layer=self._norm_layer, **kwargs)) 89 | self.in_planes = planes 90 | return nn.Sequential(*layers) 91 | 92 | def forward(self, x): 93 | out = self.bn1(self.conv1(x)) 94 | out = self.layer1(out) 95 | out = self.layer2(out) 96 | out = self.layer3(out) 97 | out = self.layer4(out) 98 | out = self.avg_pool(out) 99 | out = out.view(out.size(0), -1) 100 | out = self.linear(out) 101 | return out 102 | 103 | 104 | def ModelNCP_wrapper(num_blocks=None, **kwargs): 105 | if num_blocks is None: 106 | num_blocks = [1, 1, 1, 1] 107 | return ModelNCP(SinglePoly, num_blocks, **kwargs) 108 | 109 | 110 | def test(): 111 | net = ModelNCP_wrapper() 112 | y = net(torch.randn(1,3,32,32)) 113 | print(y.size()) 114 | 115 | # test() 116 | -------------------------------------------------------------------------------- /pytorch/model_ncp.yml: -------------------------------------------------------------------------------- 1 | comment: '30 April 2022; No activation functions with NCP model; product of polynomials.' 2 | learning_rate: 0.01 3 | 4 | dataset: 5 | root: /tmp/ 6 | batch_size: 128 7 | db: cifar 8 | num_workers: 2 9 | 10 | model: 11 | fn: model_ncp.py 12 | name: ModelNCP_wrapper 13 | args: 14 | train: True 15 | use_alpha: True 16 | num_blocks: [2, 2, 2, 1] 17 | use_only_first_conv: 1 18 | kernel_sz: 5 19 | norm_S: 1 20 | 21 | training_info: 22 | total_epochs: 120 23 | display_interval: 200 24 | lr_milestones: [40, 60, 80, 100] 25 | lr_gamma: 0.4 26 | -------------------------------------------------------------------------------- /pytorch/train_main.py: -------------------------------------------------------------------------------- 1 | # Train CIFAR10 with pytorch 2 | from __future__ import print_function 3 | import yaml 4 | import sys 5 | from time import time 6 | import random 7 | from os.path import abspath, dirname, join, isdir 8 | from os import curdir, makedirs 9 | import logging 10 | 11 | import torch 12 | import torch.optim as optim 13 | 14 | from utils import (save_checkpoints, load_model, return_loaders) 15 | 16 | torch.backends.cudnn.benchmark = True 17 | base = dirname(abspath(__file__)) 18 | sys.path.append(base) 19 | 20 | 21 | def train(train_loader, net, optimizer, criterion, train_info, epoch, device): 22 | """ Perform single epoch of the training.""" 23 | net.train() 24 | # # initialize variables that are augmented in every batch. 25 | train_loss, correct, total = 0, 0, 0 26 | start_time = time() 27 | for idx, data_dict in enumerate(train_loader): 28 | img, label = data_dict[0], data_dict[1] 29 | inputs, label = img.to(device), label.to(device) 30 | optimizer.zero_grad() 31 | pred = net(inputs) 32 | loss = criterion(pred, label) 33 | assert not torch.isnan(loss), 'NaN loss.' 34 | loss.backward() 35 | optimizer.step() 36 | 37 | train_loss += loss.item() 38 | _, predicted = torch.max(pred.data, 1) 39 | total += label.size(0) 40 | correct += predicted.eq(label).cpu().sum() 41 | if idx % train_info['display_interval'] == 0: 42 | m2 = ('Time: {:.04f}, Epoch: {}, Epoch iters: {} / {}\t' 43 | 'Loss: {:.04f}, Acc: {:.06f}') 44 | print(m2.format(time() - start_time, epoch, idx, len(train_loader), 45 | float(train_loss), float(correct) / total)) 46 | start_time = time() 47 | return net 48 | 49 | 50 | def test(net, test_loader, device='cuda'): 51 | """ Perform testing, i.e. run net on test_loader data 52 | and return the accuracy. """ 53 | net.eval() 54 | correct, total = 0, 0 55 | if hasattr(net, 'is_training'): 56 | net.is_training = False 57 | for (idx, data) in enumerate(test_loader): 58 | sys.stdout.write('\r [%d/%d]' % (idx + 1, len(test_loader))) 59 | sys.stdout.flush() 60 | img, label = data[0].to(device), data[1].to(device) 61 | with torch.no_grad(): 62 | pred = net(img) 63 | _, predicted = pred.max(1) 64 | total += label.size(0) 65 | correct += predicted.eq(label).sum().item() 66 | if hasattr(net, 'is_training'): 67 | net.is_training = True 68 | return correct / total 69 | 70 | 71 | def main(seed=None, use_cuda=True): 72 | # # set the seed for all. 73 | if seed is None: 74 | seed = random.randint(1, 10000) 75 | random.seed(seed) 76 | torch.manual_seed(seed) 77 | torch.cuda.manual_seed_all(seed) 78 | # # set the cuda availability. 79 | cuda = torch.cuda.is_available() and use_cuda 80 | device = torch.device('cuda' if cuda else 'cpu') 81 | yml = yaml.safe_load(open('model_ncp.yml')) # # file that includes the configuration. 82 | cur_path = abspath(curdir) 83 | # # define the output path 84 | out = join(cur_path, 'results_poly', '') 85 | if not isdir(out): 86 | makedirs(out) 87 | 88 | # # set the dataset options. 89 | train_loader, test_loader = return_loaders(**yml['dataset']) 90 | m1 = 'Current path: {}. Length of iters per epoch: {}. Length of testing batches: {}.' 91 | print(m1.format(cur_path, len(train_loader), len(test_loader))) 92 | # # load the model. 93 | modc = yml['model'] 94 | net = load_model(modc['fn'], modc['name'], modc['args']).to(device) 95 | 96 | # # define the criterion and the optimizer. 97 | criterion = torch.nn.CrossEntropyLoss().to(device) 98 | sub_params = [p for p in list(net.parameters()) if p.requires_grad] 99 | decay = yml['training_info']['weight_dec'] if 'weight_dec' in yml['training_info'].keys() else 5e-4 100 | optimizer = optim.SGD(sub_params, lr=yml['learning_rate'], 101 | momentum=0.9, weight_decay=decay) 102 | 103 | total_params = sum(p.numel() for p in net.parameters() if p.requires_grad) 104 | print('total params: {}'.format(total_params)) 105 | 106 | # # get the milestones/gamma for the optimizer. 107 | tinfo = yml['training_info'] 108 | mil = tinfo['lr_milestones'] if 'lr_milestones' in tinfo.keys() else [40, 60, 80, 100] 109 | gamma = tinfo['lr_gamma'] if 'lr_gamma' in tinfo.keys() else 0.1 110 | scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=mil, gamma=gamma) 111 | best_acc, best_epoch, accuracies = 0, 0, [] 112 | 113 | for epoch in range(1, tinfo['total_epochs'] + 1): 114 | scheduler.step() 115 | net = train(train_loader, net, optimizer, criterion, yml['training_info'], 116 | epoch, device) 117 | save_checkpoints(net, optimizer, epoch, out) 118 | # # testing mode to evaluate accuracy. 119 | acc = test(net, test_loader, device=device) 120 | if acc > best_acc: 121 | out_path = join(out, 'net_best_1.pth') 122 | state = {'net': net.state_dict(), 'acc': acc, 123 | 'epoch': epoch, 'n_params': total_params} 124 | torch.save(state, out_path) 125 | best_acc = acc 126 | best_epoch = epoch 127 | accuracies.append(float(acc)) 128 | msg = 'Epoch:{}.\tAcc: {:.03f}.\t Best_Acc:{:.03f} (epoch: {}).' 129 | print(msg.format(epoch, acc, best_acc, best_epoch)) 130 | logging.info(msg.format(epoch, acc, best_acc, best_epoch)) 131 | 132 | 133 | if __name__ == '__main__': 134 | main() 135 | 136 | 137 | -------------------------------------------------------------------------------- /pytorch/utils.py: -------------------------------------------------------------------------------- 1 | from os.path import splitext, basename, dirname, join 2 | import sys 3 | import torch 4 | from torch.utils.data import Dataset, DataLoader 5 | from torchvision import datasets, transforms 6 | 7 | 8 | def load_module(fn, name): 9 | mod_name = splitext(basename(fn))[0] 10 | mod_path = dirname(fn) 11 | sys.path.insert(0, mod_path) 12 | return getattr(__import__(mod_name), name) 13 | 14 | 15 | def load_model(model_fn, model_name, args=None): 16 | model = load_module(model_fn, model_name) 17 | model1 = model(**args) if args else model() 18 | return model1 19 | 20 | 21 | def save_checkpoints(net, optimizer, epoch, model_path): 22 | latest = {} 23 | latest['epoch'] = epoch 24 | latest['net'] = net.state_dict() 25 | latest['optim'] = optimizer.state_dict() 26 | if epoch % 40 == 0: 27 | torch.save(latest, join(model_path, 'latest.pth%d.tar' % epoch)) 28 | 29 | 30 | def return_loaders(root, batch_size, **kwargs): 31 | """ 32 | Return the loader for the data. This is used both for training and for 33 | validation. Currently, hardcoded to CIFAR10. 34 | :param root: (str) Path of the root for finding the appropriate pkl/npy. 35 | :param batch_size: (int) The batch size for training. 36 | :param kwargs: 37 | :return: The train and validation time loaders. 38 | """ 39 | trans = [transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), 40 | transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] 41 | transform_train = transforms.Compose(trans) 42 | transform_test = transforms.Compose(trans[-2:]) 43 | trainset = datasets.CIFAR10(root=root, train=True, download=True, transform=transform_train) 44 | testset = datasets.CIFAR10(root=root, train=False, download=True, transform=transform_test) 45 | 46 | train_loader = DataLoader(trainset, batch_size, shuffle=True) 47 | test_loader = DataLoader(testset, batch_size, shuffle=False) 48 | return train_loader, test_loader 49 | -------------------------------------------------------------------------------- /tensorflow/Minimum_example_tensorflow.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "CCP_model_minimum_example_TF.ipynb", 7 | "provenance": [], 8 | "collapsed_sections": [] 9 | }, 10 | "kernelspec": { 11 | "name": "python3", 12 | "display_name": "Python 3" 13 | }, 14 | "language_info": { 15 | "name": "python" 16 | }, 17 | "accelerator": "GPU" 18 | }, 19 | "cells": [ 20 | { 21 | "cell_type": "code", 22 | "execution_count": 1, 23 | "metadata": { 24 | "colab": { 25 | "base_uri": "https://localhost:8080/" 26 | }, 27 | "id": "rpcr7wK6j9zq", 28 | "outputId": "79aaa578-4c57-4ca4-dd3a-256f7e4eca4d" 29 | }, 30 | "outputs": [ 31 | { 32 | "output_type": "stream", 33 | "name": "stdout", 34 | "text": [ 35 | "2.8.2\n" 36 | ] 37 | } 38 | ], 39 | "source": [ 40 | "import tensorflow as tf\n", 41 | "from tensorflow.keras import Model, layers, datasets\n", 42 | "tf.compat.v1.enable_eager_execution()\n", 43 | "tf.print(tf. __version__)\n", 44 | "from sklearn.model_selection import train_test_split\n", 45 | "import numpy as np\n", 46 | "import matplotlib.pyplot as plt" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "source": [ 52 | "def load_db(path='mnist.npz', batch_size=64, shuffle=True, valid_ratio=0.2):\n", 53 | " (x_train, y_train), (x_test, y_test) = datasets.mnist.load_data(path)\n", 54 | " x_train, x_test = np.array(x_train, np.float32) / 255., np.array(x_test, np.float32)/ 255\n", 55 | " x_train_sampler, x_valid_sampler, y_train_sampler, y_valid_sampler = train_test_split(x_train, y_train,\n", 56 | " stratify=y_train, test_size=valid_ratio)\n", 57 | " x_train_sampler = layers.Normalization(mean=0.5, variance=0.5)(x_train_sampler)\n", 58 | " x_valid_sampler = layers.Normalization(mean=0.5, variance=0.5)(x_valid_sampler)\n", 59 | " x_test = layers.Normalization(mean=0.5, variance=0.5)(x_test)\n", 60 | "\n", 61 | " train_loader = tf.data.Dataset.from_tensor_slices((x_train_sampler, y_train_sampler)).batch(batch_size)\n", 62 | " valid_loader = tf.data.Dataset.from_tensor_slices((x_valid_sampler, y_valid_sampler)).batch(batch_size)\n", 63 | " test_loader = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size)\n", 64 | "\n", 65 | " if shuffle:\n", 66 | " train_loader = train_loader.shuffle(5000)\n", 67 | " valid_loader = valid_loader.shuffle(5000)\n", 68 | " test_loader = test_loader.shuffle(5000)\n", 69 | "\n", 70 | " image_size, n_classes, channels_in = 28, 10, 1\n", 71 | " return train_loader, valid_loader, test_loader, image_size, n_classes, channels_in" 72 | ], 73 | "metadata": { 74 | "id": "xkDUB6-lW9BB" 75 | }, 76 | "execution_count": 2, 77 | "outputs": [] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "source": [ 82 | "class CCP(Model):\n", 83 | " def __init__(self, hidden_size, image_size=28, channels_in=1, n_degree=4, bias=False, n_classes=10):\n", 84 | " super(CCP, self).__init__()\n", 85 | " self.image_size = image_size\n", 86 | " self.channels_in = channels_in\n", 87 | " self.hidden_size = hidden_size\n", 88 | " self.n_classes = n_classes\n", 89 | " self.n_degree = n_degree\n", 90 | " self.total_image_size = self.image_size * self.image_size * channels_in\n", 91 | " init_U = tf.keras.initializers.RandomUniform(minval=-1*np.sqrt(1/self.total_image_size),\n", 92 | " maxval=np.sqrt(1/self.total_image_size))\n", 93 | " for i in range(1, self.n_degree + 1):\n", 94 | " setattr(self, 'U{}'.format(i), layers.Dense(self.hidden_size,\n", 95 | " use_bias=bias,\n", 96 | " kernel_initializer=init_U,\n", 97 | " bias_initializer=init_U))\n", 98 | " init_C = tf.keras.initializers.RandomUniform(minval=-1*np.sqrt(1/self.hidden_size),\n", 99 | " maxval=np.sqrt(1/self.hidden_size))\n", 100 | " self.C = layers.Dense(self.n_classes,use_bias=True,kernel_initializer=init_C,bias_initializer=init_C)\n", 101 | "\n", 102 | " def call(self, z):\n", 103 | " h = layers.Flatten(input_shape=(self.image_size, self.image_size,self.channels_in))(z)\n", 104 | " out = self.U1(h)\n", 105 | " for i in range(2, self.n_degree + 1):\n", 106 | " out = getattr(self, 'U{}'.format(i))(h) * out + out\n", 107 | " out = self.C(out)\n", 108 | " return out\n", 109 | "\n", 110 | "train_loader, valid_loader, test_loader, image_size, n_classes, channels_in = load_db(batch_size=64)\n", 111 | "# create the model.\n", 112 | "net = CCP(16, image_size=image_size, n_classes=n_classes)\n", 113 | "# # define the optimizer\n", 114 | "opt = tf.keras.optimizers.SGD(learning_rate=0.001)\n", 115 | "# # aggregate losses and accuracy.\n", 116 | "train_losses, acc_list = [], []\n", 117 | "train_loss = tf.keras.metrics.Mean(name='train_loss')\n", 118 | "train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')\n", 119 | "test_loss = tf.keras.metrics.Mean(name='test_loss')\n", 120 | "test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')" 121 | ], 122 | "metadata": { 123 | "id": "DTKsC5s9pdfT" 124 | }, 125 | "execution_count": 3, 126 | "outputs": [] 127 | }, 128 | { 129 | "cell_type": "code", 130 | "source": [ 131 | "criterion = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n", 132 | "\n", 133 | "def train(train_loader, net, optimizer, criterion, epoch,):\n", 134 | " \"\"\" Perform single epoch of the training.\"\"\"\n", 135 | " for idx, data_dict in enumerate(train_loader):\n", 136 | " img = data_dict[0]\n", 137 | " label = data_dict[1]\n", 138 | " with tf.GradientTape() as tape:\n", 139 | " predictions = net(img, training=True)\n", 140 | " loss = criterion(label, predictions)\n", 141 | " gradients = tape.gradient(loss, net.trainable_variables)\n", 142 | " optimizer.apply_gradients(zip(gradients, net.trainable_variables))\n", 143 | " train_loss(loss)\n", 144 | " train_accuracy(label, predictions)\n", 145 | " if idx % 100 == 0 and idx > 0:\n", 146 | " m2 = ('Epoch: {}, Epoch iters: {} / {}\\t'\n", 147 | " 'Loss: {:.04f}, Acc: {:.06f}')\n", 148 | " print(m2.format(epoch, idx, len(train_loader), float(train_loss.result()), train_accuracy.result()))\n", 149 | " return train_loss.result()\n", 150 | "\n", 151 | "def test(test_loader):\n", 152 | " \"\"\" Perform testing, i.e. run net on test_loader data\n", 153 | " and return the accuracy. \"\"\"\n", 154 | " for test_images, test_labels in test_loader:\n", 155 | " predictions = net(test_images, training=False)\n", 156 | " t_loss = criterion(test_labels, predictions)\n", 157 | " test_loss(t_loss)\n", 158 | " test_accuracy(test_labels, predictions)\n", 159 | " return test_accuracy.result()\n", 160 | "\n", 161 | "acc = 0.\n", 162 | "for epoch in range(0, 5):\n", 163 | " print('Epoch {} (previous validation accuracy: {:.03f})'.format(epoch, acc))\n", 164 | " train_loss.reset_states()\n", 165 | " train_accuracy.reset_states()\n", 166 | " test_loss.reset_states()\n", 167 | " test_accuracy.reset_states()\n", 168 | " loss_tr = train(train_loader, net, opt, criterion, epoch)\n", 169 | " acc = test(test_loader)\n", 170 | " train_losses.append(loss_tr)\n", 171 | " acc_list.append(acc)" 172 | ], 173 | "metadata": { 174 | "colab": { 175 | "base_uri": "https://localhost:8080/" 176 | }, 177 | "id": "sYjBxB5bXT3X", 178 | "outputId": "d46617ed-00e5-4247-da57-50fccef65d76" 179 | }, 180 | "execution_count": 4, 181 | "outputs": [ 182 | { 183 | "output_type": "stream", 184 | "name": "stdout", 185 | "text": [ 186 | "Epoch 0 (previous validation accuracy: 0.000)\n", 187 | "Epoch: 0, Epoch iters: 100 / 750\tLoss: 2.2229, Acc: 0.169090\n", 188 | "Epoch: 0, Epoch iters: 200 / 750\tLoss: 2.0040, Acc: 0.295787\n", 189 | "Epoch: 0, Epoch iters: 300 / 750\tLoss: 1.7247, Acc: 0.409209\n", 190 | "Epoch: 0, Epoch iters: 400 / 750\tLoss: 1.4857, Acc: 0.499532\n", 191 | "Epoch: 0, Epoch iters: 500 / 750\tLoss: 1.3121, Acc: 0.563529\n", 192 | "Epoch: 0, Epoch iters: 600 / 750\tLoss: 1.1836, Acc: 0.609011\n", 193 | "Epoch: 0, Epoch iters: 700 / 750\tLoss: 1.0909, Acc: 0.641829\n", 194 | "Epoch 1 (previous validation accuracy: 0.865)\n", 195 | "Epoch: 1, Epoch iters: 100 / 750\tLoss: 0.4559, Acc: 0.867420\n", 196 | "Epoch: 1, Epoch iters: 200 / 750\tLoss: 0.4490, Acc: 0.868315\n", 197 | "Epoch: 1, Epoch iters: 300 / 750\tLoss: 0.4403, Acc: 0.869809\n", 198 | "Epoch: 1, Epoch iters: 400 / 750\tLoss: 0.4321, Acc: 0.871532\n", 199 | "Epoch: 1, Epoch iters: 500 / 750\tLoss: 0.4235, Acc: 0.873721\n", 200 | "Epoch: 1, Epoch iters: 600 / 750\tLoss: 0.4150, Acc: 0.876560\n", 201 | "Epoch: 1, Epoch iters: 700 / 750\tLoss: 0.4075, Acc: 0.878655\n", 202 | "Epoch 2 (previous validation accuracy: 0.902)\n", 203 | "Epoch: 2, Epoch iters: 100 / 750\tLoss: 0.3411, Acc: 0.894338\n", 204 | "Epoch: 2, Epoch iters: 200 / 750\tLoss: 0.3427, Acc: 0.897077\n", 205 | "Epoch: 2, Epoch iters: 300 / 750\tLoss: 0.3465, Acc: 0.896283\n", 206 | "Epoch: 2, Epoch iters: 400 / 750\tLoss: 0.3457, Acc: 0.898067\n", 207 | "Epoch: 2, Epoch iters: 500 / 750\tLoss: 0.3424, Acc: 0.898827\n", 208 | "Epoch: 2, Epoch iters: 600 / 750\tLoss: 0.3389, Acc: 0.899724\n", 209 | "Epoch: 2, Epoch iters: 700 / 750\tLoss: 0.3392, Acc: 0.899875\n", 210 | "Epoch 3 (previous validation accuracy: 0.910)\n", 211 | "Epoch: 3, Epoch iters: 100 / 750\tLoss: 0.3105, Acc: 0.907797\n", 212 | "Epoch: 3, Epoch iters: 200 / 750\tLoss: 0.3090, Acc: 0.908504\n", 213 | "Epoch: 3, Epoch iters: 300 / 750\tLoss: 0.3150, Acc: 0.906302\n", 214 | "Epoch: 3, Epoch iters: 400 / 750\tLoss: 0.3147, Acc: 0.905821\n", 215 | "Epoch: 3, Epoch iters: 500 / 750\tLoss: 0.3126, Acc: 0.906905\n", 216 | "Epoch: 3, Epoch iters: 600 / 750\tLoss: 0.3120, Acc: 0.907368\n", 217 | "Epoch: 3, Epoch iters: 700 / 750\tLoss: 0.3075, Acc: 0.908702\n", 218 | "Epoch 4 (previous validation accuracy: 0.920)\n", 219 | "Epoch: 4, Epoch iters: 100 / 750\tLoss: 0.2633, Acc: 0.920019\n", 220 | "Epoch: 4, Epoch iters: 200 / 750\tLoss: 0.2788, Acc: 0.915190\n", 221 | "Epoch: 4, Epoch iters: 300 / 750\tLoss: 0.2854, Acc: 0.914244\n", 222 | "Epoch: 4, Epoch iters: 400 / 750\tLoss: 0.2806, Acc: 0.915524\n", 223 | "Epoch: 4, Epoch iters: 500 / 750\tLoss: 0.2835, Acc: 0.915450\n", 224 | "Epoch: 4, Epoch iters: 600 / 750\tLoss: 0.2825, Acc: 0.915739\n", 225 | "Epoch: 4, Epoch iters: 700 / 750\tLoss: 0.2825, Acc: 0.915857\n" 226 | ] 227 | } 228 | ] 229 | }, 230 | { 231 | "cell_type": "code", 232 | "source": [ 233 | "%matplotlib inline\n", 234 | "plt.plot(train_losses)\n", 235 | "plt.title('Train loss')\n", 236 | "plt.figure()\n", 237 | "plt.plot(acc_list)\n", 238 | "plt.title('Validation accuracy')" 239 | ], 240 | "metadata": { 241 | "colab": { 242 | "base_uri": "https://localhost:8080/", 243 | "height": 562 244 | }, 245 | "id": "813GhAL6Xcr-", 246 | "outputId": "c145c0a8-c8e7-4b07-c601-4bc7bdc57b3d" 247 | }, 248 | "execution_count": 5, 249 | "outputs": [ 250 | { 251 | "output_type": "execute_result", 252 | "data": { 253 | "text/plain": [ 254 | "Text(0.5, 1.0, 'Validation accuracy')" 255 | ] 256 | }, 257 | "metadata": {}, 258 | "execution_count": 5 259 | }, 260 | { 261 | "output_type": "display_data", 262 | "data": { 263 | "text/plain": [ 264 | "
" 265 | ], 266 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEICAYAAABPgw/pAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO3deXhU933v8fdX+4YWkAChBUkYL3gDLGPH2LLbJDV2enHduCmkTmLXRu5tfLulvXV686St7+3T9N4+venitsZb7Lje4ubmIYkTZ3FiwFuQARtjlgAWQgKMjCUhENq/948ZyWNZQgOM5syMPq/n0cPMnJ90PhyYj86cM+c35u6IiEjySws6gIiIxIYKXUQkRajQRURShApdRCRFqNBFRFKECl1EJEWo0GVaMbMfmNkXzvB7m83sE7HOJBIrGUEHEJmMmR2PuJsH9AFD4ft3uft/RPuz3P2GWGYTSSQqdEl47l4wctvMmoE73f0nY8eZWYa7D8Yzm0gi0SEXSVpmdp2ZtZrZn5vZYeARMysxs++ZWbuZdYRvV0Z8z8/N7M7w7dvMbKOZ/X147DtmFtUevJllm9nXzexg+OvrZpYdXlYaXm+nmb1vZhvMLC287M/NrM3Mus1sl5l9fAo2jUxTKnRJdnOBmcB8oJHQ/+lHwvergZPAv5zi+68AdgGlwP8GHjIzi2K9/wO4ElgMXAosA74SXvYloBUoA+YAfwG4mZ0H3A1c7u4zgOuB5ij/niKTUqFLshsG/tLd+9z9pLsfdff/dPced+8G/ga49hTfv9/dH3D3IeBRoJxQCU/md4B73f2Iu7cDfw18LrxsIPxz5rv7gLtv8NCkSUNANrDIzDLdvdnd957R31pkHCp0SXbt7t47csfM8szsfjPbb2bHgPVAsZmlT/D9h0duuHtP+GbBBGMjzQP2R9zfH34M4P8Ae4Afmdk+M7sn/PP3AH8E/BVwxMyeMrN5iMSICl2S3djpQr8EnAdc4e6FQEP48WgOo5yOg4QO64yoDj+Gu3e7+5fcvQ5YCfzJyLFyd3/C3a8Of68DfxfjXDKNqdAl1cwgdNy808xmAn85Ret5EviKmZWZWSnwVeBxADP7dTM7J3wsvovQoZZhMzvPzH41fPK0N5xzeIryyTSkQpdU83UgF3gPeBX44RSt538BTcCbwDZgc/gxgIXAT4DjwCvAv7r7zwgdP/9aONthYDbw5SnKJ9OQ6QMuRERSg/bQRURShApdRCRFqNBFRFKECl1EJEUENjlXaWmp19TUBLV6EZGk9Prrr7/n7mXjLQus0Gtqamhqagpq9SIiScnM9k+0TIdcRERShApdRCRFqNBFRFKECl1EJEWo0EVEUoQKXUQkRajQRURSRNIV+rbWLv7uhzvRLJEiIh+WdIW+9UAH//bzvTTt7wg6iohIQkm6Qr/lsipK8jK5/8V9QUcREUkoSVfouVnpfO5jNfxkx7vsOXI86DgiIgkj6Qod4Asfm092RhoPbtBeuojIiKQs9FkF2dxyWSXf3tzGke7eoOOIiCSEpCx0gDuvqWNgeJjHXp5w4jERkWklaQu9tjSf6xfN5Zuv7udE32DQcUREApe0hQ7QeG0dXScHeKbpQNBRREQCl9SFvrS6hMtrSnho4zsMDg0HHUdEJFBJXegAa66po7XjJM+9dTjoKCIigZq00M3sYTM7YmZvTbDczOyfzGyPmb1pZktjH3Nin7hgDnVl+axdv1fTAYjItBbNHvo3gBWnWH4DsDD81Qj829nHil5amrHmmjreajvGK3uPxnPVIiIJZdJCd/f1wPunGHIT8JiHvAoUm1l5rAJG4+YlFZQWZHP/el1oJCLTVyyOoVcAkW8zaQ0/Fjc5mencdtV8Xtzdzq7D3fFctYhIwojrSVEzazSzJjNram9vj+nPvvXK+eRlpbNWe+kiMk3FotDbgKqI+5Xhxz7C3de6e72715eVlcVg1R8ozsviM/VVrHujjcNdmg5ARKafWBT6OuDz4Xe7XAl0ufuhGPzc03bH1bUMDTuPvPROEKsXEQlUNG9bfBJ4BTjPzFrN7A4z+z0z+73wkOeAfcAe4AHg96cs7SSqZuZx48XlPPFaC929A0HFEBEJRMZkA9x99STLHfhizBKdpbsaFvC9Nw/x5C9aaGxYEHQcEZG4SforRce6uLKIj9XN4uGNzfQPajoAEZk+Uq7QITRp1+FjvXz3jYNBRxERiZuULPTrzi3jvDkzeGDDPk0HICLTRkoWupmxpqGOnYe7Wf/L94KOIyISFylZ6AArL53H3MIc1q7fG3QUEZG4SNlCz8pI4/blNby05yhvtXUFHUdEZMqlbKEDrL6imoLsDE0HICLTQkoXemFOJquXVfH9bYdo7egJOo6IyJRK6UIHuH15LQY8tFHTAYhIakv5Qp9XnMvKS+fx9KYDdPVoOgARSV0pX+gAaxrq6Okf4vHX9gcdRURkykyLQr+gvJCGc8v4xsvN9A0OBR1HRGRKTItCB7iroY727j6+s2XcqdpFRJLetCn0qxbM4sJ5haxdv4/hYU0HICKpZ9oUupnR2FDH3vYTvLDzSNBxRERibtoUOsCNF5dTUZyrC41EJCVNq0LPTE/jd6+u5RfN77OlpSPoOCIiMRVVoZvZCjPbZWZ7zOyecZbPN7OfmtmbZvZzM6uMfdTYWHV5FYU5mg5ARFJPNJ8pmg7cB9wALAJWm9miMcP+HnjM3S8B7gX+NtZBYyU/O4Nbr5zPD7cfpvm9E0HHERGJmWj20JcBe9x9n7v3A08BN40Zswh4IXz7Z+MsTyi3XVVDZlqapgMQkZQSTaFXAAci7reGH4v0BvCb4ds3AzPMbNbYH2RmjWbWZGZN7e3tZ5I3JmYX5nDzkgq+9foB3j/RH1gOEZFYitVJ0T8FrjWzLcC1QBvwkUsy3X2tu9e7e31ZWVmMVn1m1jTU0jswzGOvNAeaQ0QkVqIp9DagKuJ+ZfixUe5+0N1/092XAP8j/FhnzFJOgXNmz+ATF8zmsVf2c7Jf0wGISPKLptA3AQvNrNbMsoBVwLrIAWZWamYjP+vLwMOxjTk11lxTx/sn+nl2c2vQUUREztqkhe7ug8DdwPPADuAZd99uZvea2crwsOuAXWa2G5gD/M0U5Y2pZbUzubSqmAc37GNI0wGISJKL6hi6uz/n7ue6+wJ3/5vwY19193Xh28+6+8LwmDvdvW8qQ8eKmXFXQx37j/bwo+2Hg44jInJWptWVouO5/sK5zJ+Vx/3r9+GuvXQRSV7TvtDT04w7r65l64FOmvZrOgARSV7TvtABbrmsipn5Wdz/oqYDEJHkpUIHcrPS+dyV8/nJjnfZc+R40HFERM6ICj3s8x+bT3ZGGg9u0F66iCQnFXrYrIJsbrmskm9vbuNId2/QcURETpsKPcKd19QxMDzMoy83Bx1FROS0qdAj1Jbmc/2iuTz+agsn+gaDjiMiclpU6GM0XltH18kBnt50YPLBIiIJRIU+xtLqEi6vKeGhje8wODQcdBwRkaip0MfR2LCAts6TPPeWpgMQkeShQh/Hx8+fzYKyfNau36vpAEQkaajQx5GWZqy5po632o7xyt6jQccREYmKCn0Cv7GkgtKCbO5frwuNRCQ5qNAnkJOZzm1XzefF3e3sPHws6DgiIpNSoZ/CrVfOJy8rnbXaSxeRJKBCP4XivCw+U1/Fuq0HOdR1Mug4IiKnFFWhm9kKM9tlZnvM7J5xlleb2c/MbIuZvWlmN8Y+ajDuuLoWB77xUnPQUURETmnSQjezdOA+4AZgEbDazBaNGfYVQp81uoTQh0j/a6yDBqVqZh43XlzOE6+10N07EHQcEZEJRbOHvgzY4+773L0feAq4acwYBwrDt4uAg7GLGLy7Guro7hvkyV+0BB1FRGRC0RR6BRA5sUlr+LFIfwXcamatwHPAfxvvB5lZo5k1mVlTe3v7GcQNxkUVRVy1YBYPb2ymf1DTAYhIYorVSdHVwDfcvRK4EfimmX3kZ7v7Wnevd/f6srKyGK06PtY01HH4WC/ffSOlXnyISAqJptDbgKqI+5XhxyLdATwD4O6vADlAaSwCJorrzi3jvDkzeGDDPk0HICIJKZpC3wQsNLNaM8sidNJz3ZgxLcDHAczsAkKFnjzHVKJgZqxpqGPn4W5e3J1SfzURSRGTFrq7DwJ3A88DOwi9m2W7md1rZivDw74ErDGzN4Angds8BXdjV146j7mFObrQSEQSUkY0g9z9OUInOyMf+2rE7beB5bGNlniyMtK4fXkNf/uDnbzV1sVFFUVBRxIRGaUrRU/T6iuqKcjO0F66iCQcFfppKszJ5LNXVPP9bYdo7egJOo6IyCgV+hm4fXkNBjy08Z2go4iIjFKhn4HyolxWXjqPpzcdoKtH0wGISGJQoZ+hNQ119PQP8fhr+4OOIiICqNDP2AXlhTScW8YjLzXTOzAUdBwRERX62biroY73jvfxnS1jL5wVEYk/FfpZuGrBLC6cV8gDG/YxPJxy11GJSJJRoZ8FM6OxoY697Sd4YeeRoOOIyDSnQj9Ln7q4nIriXF1oJCKBU6GfpYz0NO64upZfNL/PlpaOoOOIyDSmQo+B3768isIcTQcgIsFSocdAfnYGt145nx9uP0zzeyeCjiMi05QKPUZuu6qGzLQ0HtyovXQRCYYKPUZmF+Zw85IKvtXUytHjfUHHEZFpSIUeQ2saaukbHOaxVzQdgIjEX1SFbmYrzGyXme0xs3vGWf5/zWxr+Gu3mXXGPmriO2f2DD5xwWy++ep+TvZrOgARia9JC93M0oH7gBuARcBqM1sUOcbd/9jdF7v7YuCfgW9PRdhk0NiwgPdP9PPs5tago4jINBPNHvoyYI+773P3fuAp4KZTjF9N6HNFp6XLa0pYXFXMgxv2MaTpAEQkjqIp9ArgQMT91vBjH2Fm84Fa4IWzj5acRqYD2H+0hx9tPxx0HBGZRmJ9UnQV8Ky7j3sA2cwazazJzJra29tjvOrEcf2Fc5k/K4/71+/DXXvpIhIf0RR6G1AVcb8y/Nh4VnGKwy3uvtbd6929vqysLPqUSSY9zbjz6lq2HuhkU7OmAxCR+Iim0DcBC82s1syyCJX2urGDzOx8oAR4JbYRk9Mtl1UxMz+Ltev3Bh1FRKaJSQvd3QeBu4HngR3AM+6+3czuNbOVEUNXAU+5jjEAkJuVzueunM9Pdhxhz5HuoOOIyDRgQfVvfX29NzU1BbLueDl6vI+rvvYCNy+p4GufviToOCKSAszsdXevH2+ZrhSdQrMKsvmt+kq+vbmNI929QccRkRSnQp9id15dx8DwMI++3Bx0FBFJcSr0KVZTms/1i+by+KstnOgbDDqOiKQwFXocNF5bR9fJAZ7edGDywSIiZ0iFHgdLq0u4vKaEhza+w+DQcNBxRCRFqdDjpLFhAW2dJ/n+tkNBRxGRFKVCj5OPnz+bBWX5rNV0ACIyRVTocZKWZqy5po7tB4/xyt6jQccRkRSkQo+j31hSQWlBNvev1+eOikjsqdDjKCcznduX1/Di7nZ2Hj4WdBwRSTEq9Dj7nSuqyctKZ6320kUkxlTocVacl8Vn6qtYt/Ugh7pOBh1HRFKICj0Ad1xdiwOPvNQcdBQRSSEq9ABUzczjxovLeeK1Fo71DgQdR0RShAo9IHc11HG8b5AnX2sJOoqIpAgVekAuqijiqgWzeOSlZvoHNR2AiJw9FXqAGhvqOHysl+++cTDoKCKSAqIqdDNbYWa7zGyPmd0zwZjPmNnbZrbdzJ6IbczUdO25ZZw3ZwYPbNB0ACJy9iYtdDNLB+4DbgAWAavNbNGYMQuBLwPL3f1C4I+mIGvKMTPWNNSx83A3L+5uDzqOiCS5aPbQlwF73H2fu/cDTwE3jRmzBrjP3TsA3P1IbGOmrpWXzmNuYY4uNBKRsxZNoVcAkZ/M0Bp+LNK5wLlm9pKZvWpmK8b7QWbWaGZNZtbU3q49UoCsjDRuX17Dy3uP8lZbV9BxRCSJxeqkaAawELgOWA08YGbFYwe5+1p3r3f3+rKyshitOvmtvqKaguwMTdolImclmkJvA6oi7leGH4vUCqxz9wF3fwfYTajgJQqFOZl89opqntt2iAPv9wQdR0SSVDSFvglYaGa1ZpYFrALWjRnzHUJ755hZKaFDMNrdPA23L6/BgIdfeifoKCKSpCYtdHcfBO4Gngd2AM+4+3Yzu9fMVoaHPQ8cNbO3gZ8Bf+bu+hSH01BelMvKxfN4etMBuno0HYCInD4L6v3P9fX13tTUFMi6E9XOw8dY8fUN/Nn15/HFXzkn6DgikoDM7HV3rx9vma4UTSDnzy2k4dwyHnmpmd6BoaDjiEiSUaEnmLsa6njveB/f2TL2vLOIyKmp0BPMVQtmceG8QtZu2MfwsKYDEJHoqdATjJnR2FDHvvYT/HSnLrgVkeip0BPQpy4up6I4l7Xr9wYdRUSSiAo9AWWkp3HH1bVsau5gc0tH0HFEJEmo0BPUb19eRVFuJg9oOgARiZIKPUHlZ2dw65XV/HD7YZrfOxF0HBFJAir0BPaFq2rITEvjwY3aSxeRyanQE9jsGTncvKSCbzW1cvR4X9BxRCTBqdAT3JqGWvoGh3nslf1BRxGRBKdCT3DnzJ7BJy6YzWOvNHOyX9MBiMjEVOhJoLFhAR09Azz7+oHJB4vItKVCTwKX15SwuKqYBze+w5CmAxCRCajQk4CZcVdDHfuP9vCj7YeDjiMiCUqFniR+7cK51MzK4/71+whqDnsRSWwq9CSRnmbccU0dWw90sqlZ0wGIyEdFVehmtsLMdpnZHjO7Z5zlt5lZu5ltDX/dGfuocsvSSmbmZ2nSLhEZ16SFbmbpwH3ADcAiYLWZLRpn6NPuvjj89WCMcwqQm5XO566cz092HGHPke6g44hIgolmD30ZsMfd97l7P/AUcNPUxpKJfP5j88nOSOOB9e8EHUVEEkw0hV4BRL4BujX82FifNrM3zexZM6sa7weZWaOZNZlZU3t7+xnElVkF2fxWfSX/b0sbR471Bh1HRBJIrE6KfheocfdLgB8Dj443yN3Xunu9u9eXlZXFaNXTz51X1zEwPMw3Xm4OOoqIJJBoCr0NiNzjrgw/Nsrdj7r7yOxRDwKXxSaejKemNJ8VF87l8Vf3c6JvMOg4IpIgoin0TcBCM6s1syxgFbAucoCZlUfcXQnsiF1EGU9jQx3Hegd5epOmAxCRkEkL3d0HgbuB5wkV9TPuvt3M7jWzleFhf2Bm283sDeAPgNumKrCELKkuYVnNTB7a+A6DQ8NBxxGRBGBBXXVYX1/vTU1Ngaw7Vfz47XdZ81gT/7hqMTctHu88tYikGjN73d3rx1umK0WT2MfPn82CsnzWajoAEUGFntTS0ow119Sx/eAxXt57NOg4IhIwFXqS+40lFZQWZHP/en3uqMh0p0JPcjmZ6dy+vIb1u9vZcehY0HFEJEAq9BRw6xXzyctK54EN2ksXmc5U6CmgKC+T3768inVbD3Ko62TQcUQkICr0FPG7y2tx4JGXmoOOIiIBUaGniKqZedx4cTlPvNbCsd6BoOOISABU6CnkroY6jvcN8uRrLUFHEZEAqNBTyEUVRVy1YBaPvNRM/6CmAxCZblToKaaxoY7Dx3pZ98bBoKOISJyp0FPMteeWcf7cGTyg6QBEph0VeooxC00HsOvdbl7crU+FEplOVOgp6L9cOo+5hTms1XQAItOKCj0FZWWk8btX1/Dy3qO81dYVdBwRiRMVeopatayaguwMTdolMo2o0FNUYU4mn72imue2HeLA+z1BxxGROIiq0M1shZntMrM9ZnbPKcZ92szczMb9NA2Jr9uX12DAQxvfCTqKiMRBxmQDzCwduA/4JNAKbDKzde7+9phxM4A/BF6biqBy+sqLclm5eB5P/qKFE32DLKkuYUl1MefOmUF6mgUdT0RibNJCB5YBe9x9H4CZPQXcBLw9Ztz/BP4O+LOYJpSz8qe/dh7dvYP8dOcRvvV6KwD5WelcWlXMkupillSVsLi6mNKC7ICTisjZiqbQK4ADEfdbgSsiB5jZUqDK3b9vZhMWupk1Ao0A1dXVp59WTtu84lwe+Hw97s7+oz1sOdDBlpZOtrR08u8v7mNoOHTxUfXMPJZWF4/uxZ8/t5CsDJ1iEUkm0RT6KZlZGvAPwG2TjXX3tcBagPr6el3GGEdmRk1pPjWl+dy8pBKAk/1DbGvrYktLqORf3nuU72wNTRmQnZHGxRVFLKkuZml1CUuqS5hblBPkX0FEJhFNobcBVRH3K8OPjZgBXAT83MwA5gLrzGyluzfFKqjEXm5WOstqZ7KsdiYA7s6hrl42t4zsxXfw6Mv7eWBD6KRqeVHO6GGapfOLuXBeETmZ6UH+FUQkQjSFvglYaGa1hIp8FfDZkYXu3gWUjtw3s58Df6oyTz5mxrziXOYV5/Lrl8wDoG9wiB2Hutm8v4MtB0Il/9y2wwBkphuLygtHD9MsrS6hsiSX8C92EYmzSQvd3QfN7G7geSAdeNjdt5vZvUCTu6+b6pASnOyMdBZXFbO4qnj0sSPdvWxt6WRzeC/+6U0H+MbLzQCUFmSxuOqDgr+ksoj87LM+siciUbCgZuSrr6/3pibtxKeCwaFhdr3bzZaWTja3dLC1pZN9750AIM3gvLmF4UM1xSydX0LtrHzS9LZJkTNiZq+7+7jX+qjQZUp0nOhna2vn6LH4rS2ddPcNAlCUm8nikbdNVpewuKqYotzMgBOLJAcVugRueNjZ2348VPAHOti8v5PdR7oZ+e93zuwCllQV6+InkUmo0CUhdfcO8GbrB2+b3NzSQUdP6AOux178tKS6mFm6+EnklIWus1USmBk5mSw/p5Tl54TeJDXZxU/zZ+V9aC/+gvJCMtN18ZPICBW6JIwzufjpksqiUMGHi14XP8l0pkMuklTGu/jprbZj9A8NA6GLn5aG9+CXVOviJ0k9OuQiKSPai5++v+0QEL74aV5ReA9eFz9JatMeuqSkI929o8fht7R08GZrFycHhgAoLcge3YNfUqWLnyS5aA9dpp3ZM3K4/sK5XH/hXOCDi582R7wv/sdvvwt8cPHT0tH3xRdRWZKnQzWSdLSHLtPWqS5+ApiVn0V5cQ7lRbnMK8qhvDiX8qKc0UM+c2Zkk6F32UicaQ9dZBwl+Vn8ynmz+ZXzZgMfXPy0ra2Lg50nOdjVy8HOk7Qc7eHVfUfp7h380PenWeiVQHlxDvOKQmVfXpxLRfiXQHlxDqX52ZrmQOJGhS4SlpZmLJwzg4VzZoy7/HjfIIciin7k9qGuk+w4dIyf7nyX3oHhD31PVnoac4qyR/fy5xXnUl4c3uMvymVecQ5FuZk6SSsxoUIXiVJBdsYpC9/d6egZCJV9uOgPdob+PNTZy6bmDt49dojB4Q8f5szNTKe8OIeK8CGdkaKP/FMnbSUa+l8iEiNmxsz8LGbmZ3FRRdG4Y4aGnfeO942W/sHOD0r/YFcvuw630368j7GntopyM0eP30f+WV6US0VxLnOKssnO0Enc6U6FLhJH6WnGnMIc5hTmsGSCMf2Dw7x7rPeD0g/v4Y/s8W+JmPMmUmlBdniP/oO9+1Dxh27PnpGjCc9SnApdJMFkZaRRNTOPqpl5E4452T80WvSRhd/WeZK97SfY+Mv3ONE/9KHvSU8z5szIHn23zughnuLc0End4hxm5WfpeH4Si6rQzWwF8I+EPrHoQXf/2pjlvwd8ERgCjgON7v52jLOKSFhuVjoLygpYUFYw7nJ351jv4Ojx+4NdJ8MnckO3t7V18aPt745OmTAiKyMtdEgnXPBj/ywvyqUwJ0Oln6AmfR+6maUDu4FPAq2EPmN0dWRhm1mhux8L314J/L67rzjVz9X70EWC5e4cPdH/oeP4I8f1R/5891gvY87hUpCdEbFnHyr5mfmZFOdlUZKXRXFeJiX5WZTkZZKbma7yj7GzfR/6MmCPu+8L/7CngJuA0UIfKfOwfCCYq5VEJGpmRmlBNqUF2VxSOf6YwaFhjnT3fegdOwc7Pyj9tw928d7x/gnXkZWRRkle5gdFn5cVLv4PP1YS8QuhKDdTx/rPUDSFXgEciLjfClwxdpCZfRH4EyAL+NXxfpCZNQKNANXV1aebVUTiLCM9bfTK2Mvmjz+mf3CYzp5+OnoG6OjpH3N7gI4TofudPf3sfrebzp4BOk8OjM5zP5YZFOZkUpKXOab8Q7eL88f5hZCXRW6W3uUTs5Oi7n4fcJ+ZfRb4CvCFccasBdZC6JBLrNYtIsHJykhjdmEOswujn4t+5Bj/R34RnBgYfazzZOh2+/E+dr97nM6e/o+c6I2UnZE2wV7/mF8IEWNS7dVANIXeBlRF3K8MPzaRp4B/O5tQIpLazIyi3EyKcjOZPyv67+sbHKKrZyCqVwO7Dk+/VwPRFPomYKGZ1RIq8lXAZyMHmNlCd/9l+O6ngF8iIhJj2RnpzC5MP61XA8PDTnffqV8NjPxCONIdejXQ0dNPT4xeDYw8VhiHVwOTFrq7D5rZ3cDzhN62+LC7bzeze4Emd18H3G1mnwAGgA7GOdwiIhKEtLQzfzXQGVH2sXg1UJQbKvc//uS5rLx0Xoz+hh+I6hi6uz8HPDfmsa9G3P7DGOcSEQlUdkY6cwrTmTMFrwZm5mVNSWZdKSoiEiNn+mogZuuP/ypFRGQqqNBFRFKECl1EJEWo0EVEUoQKXUQkRajQRURShApdRCRFqNBFRFLEpB9wMWUrNmsH9p/ht5cC78UwTqwo1+lRrtOXqNmU6/ScTa757l423oLACv1smFnTRJ/YESTlOj3KdfoSNZtynZ6pyqVDLiIiKUKFLiKSIpK10NcGHWACynV6lOv0JWo25To9U5IrKY+hi4jIRyXrHrqIiIyhQhcRSREJXehmtsLMdpnZHjO7Z5zl2Wb2dHj5a2ZWkyC5bjOzdjPbGv66M065HjazI2b21gTLzcz+KZz7TTNbmiC5rjOzrojt9dXxxsU4U5WZ/czM3jaz7Wb2kU/dCmJ7RZkriO2VY2a/MLM3wrn+epwxcX8+RpkrkOdjeN3pZrbFzL43zrLYby93T8gvQp9fuheoA7KAN4BFY8b8PkergE8AAANBSURBVPDv4durgKcTJNdtwL8EsM0agKXAWxMsvxH4AWDAlcBrCZLrOuB7cd5W5cDS8O0ZwO5x/h3jvr2izBXE9jKgIHw7E3gNuHLMmCCej9HkCuT5GF73nwBPjPfvNRXbK5H30JcBe9x9n7v3A08BN40ZcxPwaPj2s8DHzWxqP1Y7ulyBcPf1wPunGHIT8JiHvAoUm1l5AuSKO3c/5O6bw7e7gR1AxZhhcd9eUeaKu/A2OB6+mxn+GvuOirg/H6PMFQgzqwQ+BTw4wZCYb69ELvQK4EDE/VY++h97dIy7DwJdwFR/kl80uQA+HX6Z/qyZVU1xpmhFmz0IHwu/bP6BmV0YzxWHX+ouIbR3FynQ7XWKXBDA9gofPtgKHAF+7O4Tbq84Ph+jyQXBPB+/Dvx3YHiC5THfXolc6Mnsu0CNu18C/JgPfgvL+DYTmp/iUuCfge/Ea8VmVgD8J/BH7n4sXuudzCS5Atle7j7k7ouBSmCZmV0Uj/VOJopccX8+mtmvA0fc/fWpXlekRC70NiDyN2ll+LFxx5hZBlAEHA06l7sfdfe+8N0HgcumOFO0otmmcefux0ZeNrv7c0CmmZVO9XrNLJNQaf6Hu397nCGBbK/JcgW1vSLW3wn8DFgxZlEQz8dJcwX0fFwOrDSzZkKHZX/VzB4fMybm2yuRC30TsNDMas0si9BJg3VjxqwDvhC+fQvwgofPMASZa8xx1pWEjoMmgnXA58Pv3rgS6HL3Q0GHMrO5I8cOzWwZof+XU1oE4fU9BOxw93+YYFjct1c0uQLaXmVmVhy+nQt8Etg5Zljcn4/R5Ari+ejuX3b3SnevIdQRL7j7rWOGxXx7ZZzNN08ldx80s7uB5wm9s+Rhd99uZvcCTe6+jtB//G+a2R5CJ91WJUiuPzCzlcBgONdtU50LwMyeJPQOiFIzawX+ktBJItz934HnCL1zYw/QA9yeILluAf6rmQ0CJ4FVcfjFvBz4HLAtfPwV4C+A6ohcQWyvaHIFsb3KgUfNLJ3QL5Bn3P17QT8fo8wVyPNxPFO9vXTpv4hIikjkQy4iInIaVOgiIilChS4ikiJU6CIiKUKFLiKSIlToIiIpQoUuIpIi/j+bhre+jk5sZgAAAABJRU5ErkJggg==\n" 267 | }, 268 | "metadata": { 269 | "needs_background": "light" 270 | } 271 | }, 272 | { 273 | "output_type": "display_data", 274 | "data": { 275 | "text/plain": [ 276 | "
" 277 | ], 278 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAEICAYAAABRSj9aAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO3dd5xU9b3/8ddnG0vvfelFigV1BbtYUAQFjf4UO4nRRGPuTaIpXhNBEq/eG1OvUSTGKCYGud5EsWID7LqLiro06bv03pdtn98fcxaHcYEBZvfMzL6fj8c8OHPO98z5zGHnPWe+5ztzzN0REZH0lRF2ASIiUrsU9CIiaU5BLyKS5hT0IiJpTkEvIpLmFPQiImlOQS+hMzM3s97B9EQz+0U8bQ9jO9eY2auHW6dIqjKNo5cjZWavAB+5+90x80cDjwB57l5xgPUd6OPui+LYVlxtzaw7sBTIPtC2ReoDHdFLIjwBXGtmFjP/OuDvCtraZWZZYdcgyU1BL4nwLNAaOKN6hpm1BC4CJpvZYDN738y2mNlqM3vQzHJqeiAze9zMfhV1/8fBOqvM7FsxbUea2Sdmts3Mis1sfNTit4J/t5jZDjM7xczGmtk7UeufamYFZrY1+PfUqGUzzeyXZvaumW03s1fNrM1+am5pZi+Y2Xoz2xxM50Utb2Vmfw2ew2YzezZq2Wgz+zR4DovNbHgwf5mZnRfVbryZ/S2Y7h50Yd1oZiuAN4P5/2tma4Ln85aZDYxav6GZ/cbMlgfL3wnmvWhm3495Pp+Z2aU1PVdJTQp6OWLuvhuYClwfNfsKYL67zwEqgR8CbYBTgHOBWw/2uEHo3QEMA/oA58U02RlsswUwErjFzC4Jlp0Z/NvC3Zu4+/sxj90KeBH4I5E3qd8CL5pZ66hmVwPfBNoBOUEtNckA/gp0A7oCu4EHo5Y/CTQCBgaP9bughsHAZODHwXM4E1i2v/1Rg7OA/sAFwf2XieyndsDHwN+j2j4AnAicCrQCfgJUEXwaq25kZscBnYnsG0kX7q6bbkd8A04HtgC5wf13gR/up+0PgH9F3XegdzD9OPCrYPox4P6odn2j29bwuL8HfhdMdw/aZkUtHwu8E0xfR+S8QvT67wNjg+mZwM+jlt0KvBLnvhgEbA6mOxIJ1JY1tHukut4ali0Dzou6Px74W8xz63mAGloEbZoTeSPaDRxXQ7tcYDOR8x4QeUN4KOy/J90Se9MRvSSEu78DbAAuMbNewGDgKQAz6xt0Z6wxs23AfxI5uj+YTkBx1P3l0QvNbIiZzQi6TLYC343zcasfe3nMvOVEjmarrYma3gU0qemBzKyRmT0SdItsI9Jt1MLMMoEuwCZ331zDql2AxXHWW5O9+8bMMs3s/qD7ZxtffTJoE9xya9qWu5cCTxM5x5IBXEXkE4ikEQW9JNJkIl0p1wLT3X1tMP9hYD6Ro8ZmwH8AsSdua7KaSBhW6xqz/ClgGtDF3ZsDE6Me92DDyVYR6WqJ1hVYGUddsW4HjgKGBM+vutvIiIRxKzNrUcN6xUCv/TzmTiLdPdU61NAm+jleDYwm0r3VnMhRf3UNG4DSA2zrCeAaIl1quzymm0tSn4JeEmkykaC5iUh4VGsKbAN2mFk/4JY4H28qMNbMBphZI2BczPKmRI6WS4P+7qujlq0n0mXScz+P/RLQ18yuNrMsM7sSGAC8EGdtsXXsJnLit1V0ne6+mkjf+UPBSdtsM6t+I/gL8E0zO9fMMsysc7B/AD4FxgTt84HL46hhD7CRyBvEf0bVUEWkG+y3ZtYpOPo/xcwaBMvfJ7KvfoOO5tOSgl4Sxt2XAe8BjYkcaVe7g0gIbwf+TKSrIJ7He5lIv/ubwKLg32i3AhPMbDtwN5E3hup1dwH3Au8Go31OjnnsjURGBd1OJBx/Alzk7hviqS3G74GGRI6cPwBeiVl+HVBO5FPNOiLnKHD3j4ic7P0dsBWYxVefMn5B5Ah8M3APQTfYAUwm0vW0Epgb1BHtDuBzoADYBPwX+77+JwPHAH87yHYkBekLUyKCmV0P3Ozup4ddiySejuhF6rmgW+xWYFLYtUjtUNCL1GNmdgGR8xlrOXj3kKQodd2IiKQ5HdGLiKS5pPsxpDZt2nj37t3DLkNEJKXMnj17g7u3rWlZ0gV99+7dKSwsDLsMEZGUYmax3/TeS103IiJpTkEvIpLmFPQiImlOQS8ikuYU9CIiaU5BLyKS5hT0IiJpLunG0YuIpJuyiip27qlgR3DbufffyqjpClo3acDVQ2Kvr3PkFPQiIjEqq5ydZZHwjQTxvoFc47yyr+bt27aSssqquLZ7fNcWCnoRkZq4O6XlVTFHy/sP36/NK6tgR+lX83eXV8a13QyDxg2yaNIgi8bBrUmDTFo3bvS1eU32TkfPz6Jx1LLszNrpTY8r6M1sOPAHIBN41N3vj1nejcilytoSuXrNte5eYmaDiFwvtBlQCdzr7nFdXUhE0lt5ZdU+XRj7Hi1XB3Vk/o7SmFCOCepdZZVUVsX3S7wNszP3hm912LZvmkvPNllfmx8d1I1z9g3qJg2yyM3OwCyeyx+H66BBH1zJ/k/AMKAEKDCzae4+N6rZA8Bkd3/CzM4B7iNy+bRdwPXu/qWZdQJmm9l0d9+S8GciIklj084y/vbBchas2b7vUXbZV6FeVhFfd0ZOZgaNY8K3ecNsOrfIrSF8M2OOlved1zgni8yM5A/mRIvniH4wsMjdlwCY2RQiV5uPDvoBwI+C6RnAswDuvrC6gbuvMrN1RI76FfQiaahk8y4efXspUwpWUFpeRc+2jWkahGyXvd0ZQWjnxHZl7Nu9UT2dk6XBgUcqnqDvDBRH3S8BhsS0mQN8g0j3zqVAUzNrHVyAGQAzGwzkAItjN2BmNwM3A3TtmvgTESJSuxas2c4jsxbz3JxVZBhcMqgz3zmrJ73bNQ27NCFxJ2PvAB40s7HAW0SuRL/3bIaZdQSeBG5w9699XnP3SQTXq8zPz9clr0RSROGyTUyctZjX562jUU4m3zy1Ozee0YOOzRuGXZpEiSfoVwJdou7nBfP2cvdVRI7oMbMmwGXV/fBm1gx4EbjL3T9IRNEiEh53Z8aCdTw8czEFyzbTslE2PxrWl+tP6UaLRjlhlyc1iCfoC4A+ZtaDSMCPAa6ObmBmbYBNwdH6nURG4GBmOcC/iJyofSaRhYtI3SqvrOKFz1YxceYSFqzdTucWDRl/8QCuOKkLjXI0UjuZHfR/x90rzOw2YDqR4ZWPuXuRmU0ACt19GjAUuM/MnEjXzfeC1a8AzgRaB906AGPd/dPEPg0RqS27yyqZWljMpLeWsHLLbo5q35TfXXkcFx3bqdbGfUtimXtydYnn5+e7LiUoEr4tu8qY/P5yHn9vGZt2lnFS95bcMrQXZx/VLiXGjtc3Zjbb3fNrWqbPWyKyj9Vbd/OXt5fy1Ecr2FVWybn92vHdob04qXursEuTw6SgFxEAFq3bwSOzFvPspyupchh9XCe+c1YvjuqgIZKpTkEvUs99smIzE2ct5tW5a2mQlcE1Q7rx7TN6kNeyUdilSYIo6EXqIXfnrS838PDMRXywZBPNG2bz/XP6cMMp3WjdpEHY5UmCKehF6pGKyipe+mINE2cuZu7qbXRsnssvLhrAmJO60LiB4iBd6X9WpB4oLa/kmdklTHprCSs27aJX28b8+vJjGT2os35Lph5Q0Iuksa27y/nbB8v567tL2bCjjEFdWnDXyP4M69+ejHr4K471lYJeJA2t3VbKY+8s5e8frmDHngrO6tuWW4b2YkiPVhoDXw8p6EXSyNINO5n01mL+b/ZKKqqquOjYTnznrJ4M7NQ87NIkRAp6kTTweclWJs5azEtfrCY7M4MrTsrj5jN60bW1hkiKgl4kZbk77y7ayMRZi3ln0Qaa5mZx69BejD21B22baoikfEVBL5JiKquc6UVreHjmYj5fuZV2TRtw54X9uHpIV5rmZoddniQhBb1IithTUcm/Pl7JI28tYemGnfRo05j7v3EMl57QmQZZmWGXJ0lMQS+S5LaXlvPUhyv4yztLWbd9D8d0bs5D15zABQM71MsLXcuhU9CLJKn12/fw13eX8uQHy9leWsHpvdvwuysHcWqv1hoiKYdEQS+SZFZs3MWktxcztbCE8soqRhzdke+e1Ytj8jREUg6Pgl4kSRSt2srEWUt48bNVZGVkcNmJnbnpjJ70bNsk7NIkxSnoRULk7ny4dBMPz1zMrIXradIgi5vO7MmNp/WgXbPcsMuTNKGgFwlBVZXz2ry1PDxzMZ8Wb6FNkxx+fMFRXHtyN5o31BBJSSwFvUgdKquo4tlPV/LIrMUsXr+Trq0a8atLjubyE/PIzdYQSakdCnqROrBzTwX/+CgyRHL11lIGdGzGH686nhFHdyArUz8TLLVLQS9Sizbu2MMT7y3jifeXs3V3OSf3bMX9lx3LmX3aaIik1BkFvUgtKNm8i0ffXsqUghWUlldxwcD2fPesXhzftWXYpUk9pKAXSaAFa7YzcdZips1ZRYbBJYM6852zetK7XdOwS5N6TEEvkgAFyzYxceZi3pi/jkY5mXzz1O7ceEYPOjZvGHZpIgp6kcNVVeXMWLCOh2cupnD5Zlo2yuZHw/py/SndaNEoJ+zyRPaKK+jNbDjwByATeNTd749Z3g14DGgLbAKudfeSYNkrwMnAO+5+UQJrFwlFeWUVL3y2iokzl7Bg7XY6t2jI+IsHcMVJXWiUo2MnST4H/as0s0zgT8AwoAQoMLNp7j43qtkDwGR3f8LMzgHuA64Llv0aaAR8J6GVi9Sx3WWVPF2wgj+/vZSVW3ZzVPum/O7K47jo2E5ka4ikJLF4Dj8GA4vcfQmAmU0BRgPRQT8A+FEwPQN4tnqBu79hZkMTUq1ICOau2sbTBSt49tNVbN1dzkndW/LLSwZy9lHtNERSUkI8Qd8ZKI66XwIMiWkzB/gGke6dS4GmZtba3TfGU4SZ3QzcDNC1a9d4VhGpVdtKy5n26SqeLijm85VbycnKYPjADlx3SjdO6t4q7PJEDkmiOhTvAB40s7HAW8BKoDLeld19EjAJID8/3xNUk8ghcXcKlm1mSsEKXvp8NaXlVfTr0JTxFw/gkuM76wSrpKx4gn4l0CXqfl4wby93X0XkiB4zawJc5u5bElWkSG1at72Uf368kqkFxSzZsJOmDbK47IQ8rjypC8d0bq7uGUl58QR9AdDHzHoQCfgxwNXRDcysDbDJ3auAO4mMwBFJWhWVVcxauJ6nC4p5Y/46Kqucwd1bcevZvRlxTAeNnpG0ctC/ZnevMLPbgOlEhlc+5u5FZjYBKHT3acBQ4D4zcyJdN9+rXt/M3gb6AU3MrAS40d2nJ/6piBzc8o07mVpYzDOzS1i7bQ9tmuTw7TN6cEV+F3rpAh+Spsw9ubrE8/PzvbCwMOwyJI2UllcyvWgNTxcU897ijWQYDD2qHVfkd+Hc/u00NFLSgpnNdvf8mpbp86mkrdhhkV1aNeSO8/ty2Yl5+mkCqVcU9JJWqodFTi0s5rOSreRkZjD86A5ceVIXTunZmowMnViV+kdBLylPwyJFDkxBLykrdlhkkwZZfOOEPMZoWKTIPhT0klIqKqt468v1TPnoq2GRJ3VvqWGRIgegV4WkhBUbdzG1sJj/nV381bDI03vw//K70LudhkWKHIiCXpJWTcMiz+rblntGddWwSJFDoKCXpDN31TamFhbzr09W7h0Wefuwvlyer2GRIodDQS9JoaZhkRcc3YExGhYpcsQU9BKa/Q2LHHfxAC4Z1JmWjTUsUiQRFPRS59Zv38P/fVzytWGRV+Z34dg8DYsUSTQFvdSJ6GGRb85fR4WGRYrUGb26pFZVD4t8ZnYJa7aV0qZJDjdqWKRInVLQS8Ltb1jk+FEDNSxSJAQKekmY2GGReS01LFIkGSjo5YhsKy3n+TmRi2hrWKRIclLQyyGrHhb5dEExL36+SsMiRZKcgl7itn77Hv75cQlPRw2LvPT4yK9FalikSPJS0MsBVQ+LfLqgmDfmfTUs8pahvRh5bEcNixRJAXqVSo1ih0W2bpzDt06PXERbwyJFUouCXvba/7DIAZzTrz05WRoWKZKKFPTCvNXbeLpg32GRPxrWl8tPzKNTCw2LFEl1Cvp6bNqcVTz69pJ9hkVemd+FU3tpWKRIOlHQ11MFyzbxb//4hD7tmnD3RQO49HgNixRJVwr6eqiyyrn7uSI6Nc/ludtO08gZkTSns2v10FMfLmfe6m3cNXKAQl6kHogr6M1suJktMLNFZvazGpZ3M7M3zOwzM5tpZnlRy24wsy+D2w2JLF4O3aadZTzw6kJO6dmaEcd0CLscEakDBw16M8sE/gRcCAwArjKzATHNHgAmu/uxwATgvmDdVsA4YAgwGBhnZi0TV74cql9PX8COPRXcM3qgvskqUk/Ec0Q/GFjk7kvcvQyYAoyOaTMAeDOYnhG1/ALgNXff5O6bgdeA4UdethyOz0u2MqVgBTec0p2+7ZuGXY6I1JF4gr4zUBx1vySYF20O8I1g+lKgqZm1jnNdqQNVVc7d076gdeMcfjCsT9jliEgdStTJ2DuAs8zsE+AsYCVQGe/KZnazmRWaWeH69esTVJJE++cnK/lkxRZ+OrwfzXKzwy5HROpQPEG/EugSdT8vmLeXu69y92+4+/HAXcG8LfGsG7Sd5O757p7ftm3bQ3wKcjDbSsu5/+X5HN+1BZedkHfwFUQkrcQT9AVAHzPrYWY5wBhgWnQDM2tjZtWPdSfwWDA9HTjfzFoGJ2HPD+ZJHfrD61+yceceJow6Wt94FamHDhr07l4B3EYkoOcBU929yMwmmNmooNlQYIGZLQTaA/cG624CfknkzaIAmBDMkzqycO12Hn9vGWNO6soxec3DLkdEQmDuHnYN+8jPz/fCwsKwy0gL7s41j35I0aptzLhjKK30EwciacvMZrt7fk3L9M3YNPbyF2t4b/FG7ji/r0JepB5T0KepXWUV/OqFufTv2Iyrh3QLuxwRCZGCPk09PHMxq7aWcs+ogWTqBKxIvaagT0PLN+7kkVlLuGRQJwb3aBV2OSISMgV9GvrlC3PJzjTuHNE/7FJEJAko6NPMjPnreH3eOv7t3D60b5YbdjkikgQU9GlkT0Ul9zxfRM+2jfnmaT3CLkdEkoSuOpFGHn17Kcs27mLytwaTk6X3cBGJUBqkidVbd/Pgm4s4f0B7zuyr3wsSka8o6NPEf740nyp3fnFR7DVhRKS+U9CngfcXb+T5Oav47lm96NKqUdjliEiSUdCnuIrKKsZPKyKvZUNuGdor7HJEJAkp6FPckx8sZ8Ha7fx85AByszPDLkdEkpCCPoVt2LGH3762kDP6tOGCge3DLkdEkpSCPoX99yvz2V1WybiLB2Km37MRkZop6FPUp8VbmFpYwrdO70Hvdk3CLkdEkpiCPgVVVTnjnvuCdk0b8P1zeoddjogkOQV9Cvrf2cXMKdnKnSP60TQ3O+xyRCTJKehTzNZd5fzXKws4qXtLLhnUOexyRCQFKOhTzO9eX8iWXWWMH6UTsCISHwV9Cpm3ehuT31/GNUO6MbBT87DLEZEUoaBPEe7OuGlFNG+Yze3n9w27HBFJIQr6FPH8Z6v5aOkmfnxBP1o0ygm7HBFJIQr6FLBzTwX3vjiXozs348qTuoRdjoikGF14JAU8OGMRa7ft4aFrTiQzQydgReTQ6Ig+yS1Zv4NH317CZSfkcWK3lmGXIyIpSEGfxNyde56fS4OsTH564VFhlyMiKSquoDez4Wa2wMwWmdnPalje1cxmmNknZvaZmY0I5ueY2V/N7HMzm2NmQxNcf1p7fd46Zi1czw/O60O7prlhlyMiKeqgQW9mmcCfgAuBAcBVZhZ7vbqfA1Pd/XhgDPBQMP8mAHc/BhgG/MbM9CkiDqXllUx4oYg+7Zpww6ndwy5HRFJYPKE7GFjk7kvcvQyYAoyOaeNAs2C6ObAqmB4AvAng7uuALUD+kRZdH0x6awnFm3YzftRAsjP13igihy+eBOkMFEfdLwnmRRsPXGtmJcBLwPeD+XOAUWaWZWY9gBOBr40PNLObzazQzArXr19/iE8h/ZRs3sVDMxcx4pgOnNa7TdjliEiKS9Sh4lXA4+6eB4wAngy6aB4j8sZQCPweeA+ojF3Z3Se5e76757dt2zZBJaWue1+cB8BdI2N7yEREDl084+hXsu9ReF4wL9qNwHAAd3/fzHKBNkF3zQ+rG5nZe8DCI6o4zb3z5QZe/mINtw/rS+cWDcMuR0TSQDxH9AVAHzPrYWY5RE62TotpswI4F8DM+gO5wHoza2RmjYP5w4AKd5+bsOrTTHllFeOfL6Jrq0bcdGbPsMsRkTRx0CN6d68ws9uA6UAm8Ji7F5nZBKDQ3acBtwN/NrMfEjkxO9bd3czaAdPNrIrIp4Drau2ZpIEn3lvGonU7ePT6fHKzM8MuR0TSRFw/geDuLxE5yRo97+6o6bnAaTWstwzQN33isG57Kb9//UuGHtWWc/u3C7scEUkjGreXJO5/eT5lFVWMu1gXFBGRxFLQJ4HZyzfxz49XcuMZPejRpnHY5YhImlHQh6yyyrn7uSI6NMvltrN7h12OiKQhBX3IphSsoGjVNv5jZH8aN9CvRotI4inoQ7R5Zxm/nr6AIT1acfGxHcMuR0TSlII+RL95bQHbSyu4Z7ROwIpI7VHQh+SLlVv5+4cruO7kbvTr0OzgK4iIHCYFfQjcnXHTimjVKIcfDusbdjkikuYU9CH41ycrmb18Mz8d3o/mDbPDLkdE0pyCvo5tLy3nvpfnc1yXFlx+Yl7Y5YhIPaDxfHXsj298yfrte/jz9flkZOgErIjUPh3R16FF67bz13eXcWV+FwZ1aRF2OSJSTyjo64i7M37aXBrmZPLj4fqdNxGpOwr6OjK9aA3vLNrA7cP60qZJg7DLEZF6REFfB3aXVfLLF+bRr0NTrj25W9jliEg9o5OxdeDhWYtZuWU3U24+maxMvbeKSN1S6tSyFRt3MXHWYi4+rhMn92wddjkiUg8p6GvZL1+cS1aG8R8j+oVdiojUUwr6WjRzwTpem7uW287pTcfmDcMuR0TqKQV9LSmrqGLC83Pp0aYxN57eI+xyRKQeU9DXksfeXcqSDTu5++IBNMjKDLscEanHFPS1YM3WUv74xpec178dZx/VLuxyRKSeU9DXgvtenkdFlfOLiwaEXYqIiII+0T5cspHnPl3Fd87sSbfWjcMuR0REQZ9IFZVVjJtWROcWDbl1aO+wyxERART0CfXURyuYv2Y7d43sT8McnYAVkeQQV9Cb2XAzW2Bmi8zsZzUs72pmM8zsEzP7zMxGBPOzzewJM/vczOaZ2Z2JfgLJYuOOPTwwfQGn9W7NhUd3CLscEZG9Dhr0ZpYJ/Am4EBgAXGVmsWcZfw5MdffjgTHAQ8H8/wc0cPdjgBOB75hZ98SUnlweeHUBu8oqGX/xQMx0QRERSR7xHNEPBha5+xJ3LwOmAKNj2jjQLJhuDqyKmt/YzLKAhkAZsO2Iq04yn5VsYUpBMWNP7U6f9k3DLkdEZB/xBH1noDjqfkkwL9p44FozKwFeAr4fzH8G2AmsBlYAD7j7ptgNmNnNZlZoZoXr168/tGcQsqoq5+7nimjduAH/fl6fsMsREfmaRJ2MvQp43N3zgBHAk2aWQeTTQCXQCegB3G5mPWNXdvdJ7p7v7vlt27ZNUEl145mPS/i0eAt3XtiPprnZYZcjIvI18QT9SqBL1P28YF60G4GpAO7+PpALtAGuBl5x93J3Xwe8C+QfadHJYuvucv77lfmc0LUFlx4f+yFHRCQ5xBP0BUAfM+thZjlETrZOi2mzAjgXwMz6Ewn69cH8c4L5jYGTgfmJKT18v399IRt3ljFh9NFkZOgErIgkp4MGvbtXALcB04F5REbXFJnZBDMbFTS7HbjJzOYA/wDGursTGa3TxMyKiLxh/NXdP6uNJ1LXFqzZzuT3l3PV4K4c3bl52OWIiOxXXJcSdPeXiJxkjZ53d9T0XOC0GtbbQWSIZVpxd8ZN+4ImDbL48flHhV2OiMgB6Zuxh+HFz1fzwZJN3HHBUbRsnBN2OSIiB6SgP0S7yiq498V5DOjYjKsHdw27HBGRg4qr60a+8qcZi1i9tZT/uep4MnUCVkRSgI7oD8GyDTv581tLufT4zuR3bxV2OSIicVHQH4IJL8wlO9O488J+YZciIhI3BX2c3pi3ljfnr+Pfz+tDu2a5YZcjIhI3BX0cSssrmfDCXHq1bczYU3uEXY6IyCHRydg4/OWdpSzfuIsnbxxMTpbeG0UktSi1DmLVlt08+OYiLhjYnjP6pNYPromIgIL+oO59aR5V7vx8ZOy1VkREUoOC/gDeW7SBFz9bzS1De9GlVaOwyxEROSwK+v0or6xi/PNF5LVsyHfP6hV2OSIih01Bvx+T31/OwrU7+MVFA8jNzgy7HBGRw6agr8H67Xv4/WsLObNvW84f0D7sckREjoiCvgb/9cp8SisqGXfxAMz0ezYiktoU9DE+XrGZZ2aX8K3Te9CrbZOwyxEROWIK+iiVVc6454po17QB3z+nT9jliIgkhII+ytTCYj5fuZW7RvanSQN9aVhE0oOCPrBlVxn//cp8BndvxajjOoVdjohIwijoA799bSFbd5czftRAnYAVkbSioAfmrtrG3z5YzrUnd2NAp2ZhlyMiklD1PujdnfHTimjeMJsfDesbdjkiIglX74N+2pxVfLRsEz8Z3o8WjXLCLkdEJOHqddDv2FPBvS/O45jOzbkiv0vY5YiI1Ip6PYbwf978knXb9zDxuhPJzNAJWBFJT/X2iH7x+h089s5SLj8xjxO6tgy7HBGRWhNX0JvZcDNbYGaLzOxnNSzvamYzzOwTM/vMzEYE868xs0+jblVmNijRT+JQVZ+Azc3K5KfD+4VdjohIrTpo0JtZJvAn4EJgAHCVmcVebunnwFR3Px4YAzwE4O5/d/dB7j4IuA5Y6u6fJvIJHI5X567l7S838INhfWnbtEHY5YiI1Kp4jugHA4vcfYm7lwFTgNExbRyoHoDeHFhVw+NcFRFqqw0AAAcKSURBVKwbqtLySn75wlz6tm/C9ad0C7scEZFaF8/J2M5AcdT9EmBITJvxwKtm9n2gMXBeDY9zJV9/gwDAzG4Gbgbo2rVrHCUdvkdmLaFk826eumkI2Zn19hSFiNQjiUq6q4DH3T0PGAE8aWZ7H9vMhgC73P2LmlZ290nunu/u+W3btk1QSV9XvGkXD81cxMhjO3Jqrza1th0RkWQST9CvBKIHmecF86LdCEwFcPf3gVwgOknHAP84/DIT494X55Fhxl0j+oddiohInYkn6AuAPmbWw8xyiIT2tJg2K4BzAcysP5GgXx/czwCuIOT++be/XM8rRWv43tm96NSiYZiliIjUqYMGvbtXALcB04F5REbXFJnZBDMbFTS7HbjJzOYQOXIf6+4eLDsTKHb3JYkvPz5lFVWMn1ZEt9aN+PYZPcMqQ0QkFHF9M9bdXwJeipl3d9T0XOC0/aw7Ezj58Es8co+/t5TF63fylxvyyc3ODLMUEZE6l/bDTtZtK+UPr3/JOf3acW7/9mGXIyJS59I+6O97eT7llc7dF8V+x0tEpH5I66AvWLaJf32ykpvO7EH3No3DLkdEJBRpG/SVVc6454ro2DyX753dO+xyRERCk7ZB/9RHK5i7eht3jexPo5x6/WvMIlLPpWXQb9pZxgPTF3BKz9aMPKZj2OWIiIQqLYP+gVcXsGNPBeNHDcRMFxQRkfot7YL+i5Vb+cdHK7j+lG4c1aFp2OWIiIQurYK+qsq5+7kvaNUohx+c1zfsckREkkJaBf2/PlnJxyu28NML+9G8YXbY5YiIJIW0CfptpeXc9/J8BnVpweUn5IVdjohI0kibcYel5ZWc0LUF3zu7NxkZOgErIlItbYK+XdNcJl2fH3YZIiJJJ226bkREpGYKehGRNKegFxFJcwp6EZE0p6AXEUlzCnoRkTSnoBcRSXMKehGRNGfuHnYN+zCz9cDyI3iINsCGBJWTSKrr0KiuQ6O6Dk061tXN3dvWtCDpgv5ImVmhuyfdV2RV16FRXYdGdR2a+laXum5ERNKcgl5EJM2lY9BPCruA/VBdh0Z1HRrVdWjqVV1p10cvIiL7SscjehERiaKgFxFJcykZ9GY23MwWmNkiM/tZDcsbmNnTwfIPzax7ktQ11szWm9mnwe3bdVTXY2a2zsy+2M9yM7M/BnV/ZmYnJEldQ81sa9T+uruO6upiZjPMbK6ZFZnZv9fQps73WZx11fk+M7NcM/vIzOYEdd1TQ5s6f03GWVcor8lg25lm9omZvVDDssTuL3dPqRuQCSwGegI5wBxgQEybW4GJwfQY4OkkqWss8GAI++xM4ATgi/0sHwG8DBhwMvBhktQ1FHghhP3VETghmG4KLKzh/7LO91mcddX5Pgv2QZNgOhv4EDg5pk0Yr8l46grlNRls+0fAUzX9fyV6f6XiEf1gYJG7L3H3MmAKMDqmzWjgiWD6GeBcM6vtC8nGU1co3P0tYNMBmowGJnvEB0ALM+uYBHWFwt1Xu/vHwfR2YB7QOaZZne+zOOuqc8E+2BHczQ5usaM86vw1GWddoTCzPGAk8Oh+miR0f6Vi0HcGiqPul/D1P/a9bdy9AtgKtE6CugAuCz7qP2NmXWq5pnjFW3sYTgk+er9sZgPreuPBR+bjiRwNRgt1nx2gLghhnwXdEJ8C64DX3H2/+6sOX5Px1AXhvCZ/D/wEqNrP8oTur1QM+lT2PNDd3Y8FXuOrd2yp2cdEfr/jOOB/gGfrcuNm1gT4P+AH7r6tLrd9IAepK5R95u6V7j4IyAMGm9nRdbHdg4mjrjp/TZrZRcA6d59d29uqlopBvxKIftfNC+bV2MbMsoDmwMaw63L3je6+J7j7KHBiLdcUr3j2aZ1z923VH73d/SUg28za1MW2zSybSJj+3d3/WUOTUPbZweoKc58F29wCzACGxywK4zV50LpCek2eBowys2VEunjPMbO/xbRJ6P5KxaAvAPqYWQ8zyyFyomJaTJtpwA3B9OXAmx6c1Qizrpg+3FFE+liTwTTg+mAkycnAVndfHXZRZtahul/SzAYT+Xut9XAItvkXYJ67/3Y/zep8n8VTVxj7zMzamlmLYLohMAyYH9Oszl+T8dQVxmvS3e909zx3704kJ95092tjmiV0f2Ud7ophcfcKM7sNmE5kpMtj7l5kZhOAQnefRuTF8KSZLSJysm9MktT1b2Y2CqgI6hpb23UBmNk/iIzGaGNmJcA4IiemcPeJwEtERpEsAnYB30ySui4HbjGzCmA3MKYO3rAhcsR1HfB50L8L8B9A16jawthn8dQVxj7rCDxhZplE3limuvsLYb8m46wrlNdkTWpzf+knEERE0lwqdt2IiMghUNCLiKQ5Bb2ISJpT0IuIpDkFvYhImlPQi4ikOQW9iEia+/8iWTRSHh0OkgAAAABJRU5ErkJggg==\n" 279 | }, 280 | "metadata": { 281 | "needs_background": "light" 282 | } 283 | } 284 | ] 285 | } 286 | ] 287 | } -------------------------------------------------------------------------------- /tensorflow/README.rst: -------------------------------------------------------------------------------- 1 | =========================================== 2 | Tutorial on high-degree polynomial networks 3 | =========================================== 4 | 5 | 6 | 7 | This code implements two polynomial networks for image recognition **in TensorFlow**. 8 | The two codes are based on the paper of `"**Π-nets: Deep Polynomial Neural Networks**" `_ (also available `here `_ ) [1]_. 9 | 10 | Those networks aim to demonstrate the performance of the polynomial networks with minimal code examples; therefore, they are not really the state-of-the-art results on recognition. For networks that can achieve state-of-the-art results the source code of the papers can be followed, since they have more intricate implementations. For instance, for Π-nets, please check [1]_. 11 | 12 | The two networks include the following: 13 | 14 | * The jupyter notebook ``Minimum_example_tensorflow.ipynb`` implements a simple CCP model on MNIST classification. This can be opened and executed directly in a Google Colab environment. 15 | 16 | * The python files implement a product of polynomials (each polynomial has an NCP-based second degree polynomial). 17 | 18 | 19 | 20 | Train the network (of the *.py files) 21 | ==================================== 22 | 23 | To train the network, you can execute the following command:: 24 | 25 | python train_main.py 26 | 27 | 28 | 29 | Apart from PyTorch, the code depends on Pyaml [2]_. 30 | 31 | 32 | References 33 | ========== 34 | 35 | .. [1] https://github.com/grigorisg9gr/polynomial_nets/ 36 | 37 | .. [2] https://pypi.org/project/pyaml/ 38 | 39 | -------------------------------------------------------------------------------- /tensorflow/model_ncp_tf.py: -------------------------------------------------------------------------------- 1 | '''Model for Π-net based 2nd degree blocks without activation functions: 2 | https://ieeexplore.ieee.org/document/9353253 (or https://arxiv.org/abs/2006.13026). 3 | 4 | This file implements an NCP-based product of polynomials. 5 | ''' 6 | from tensorflow.keras import Model, Sequential 7 | from tensorflow.keras.layers import Flatten, Dense , Conv2D, MaxPool2D, Dropout,BatchNormalization,AveragePooling2D 8 | from tensorflow_addons.layers import AdaptiveAveragePooling2D,InstanceNormalization 9 | 10 | def get_norm(norm_local): 11 | """ Define the appropriate function for normalization. """ 12 | if norm_local == 1: 13 | return InstanceNormalization(axis=3) 14 | else: 15 | return BatchNormalization(axis=3) 16 | 17 | class SinglePoly(Model): 18 | def __init__(self, planes, stride=1, use_alpha=False, kernel_sz=3, 19 | norm_S=None, norm_layer=None, kernel_size_S=1, 20 | use_only_first_conv=False, **kwargs): 21 | """ This class implements a single second degree NCP model. """ 22 | super(SinglePoly, self).__init__() 23 | self._norm_layer = get_norm(norm_layer) 24 | self._norm_S = get_norm(norm_S) 25 | self.use_only_first_conv = use_only_first_conv 26 | 27 | self.conv1 = Conv2D(planes, kernel_size=kernel_sz, strides=stride, padding='same', use_bias=False) 28 | self.bn1 = self._norm_layer 29 | if not self.use_only_first_conv: 30 | self.conv2 = Conv2D(planes, kernel_size=3, strides=1, padding='same', use_bias=False) 31 | self.bn2 = self._norm_layer 32 | 33 | self.conv_S = Conv2D(planes, kernel_size=kernel_size_S, strides=stride, padding='same', use_bias=False) 34 | self.bnS = self._norm_S 35 | 36 | self.use_alpha = use_alpha 37 | if self.use_alpha: 38 | # self.alpha = nn.Parameter(torch.zeros(1)) 39 | self.alpha = self.add_weight(name='kernel', shape=1, initializer='zeros',trainable=True) 40 | self.monitor_alpha = [] 41 | 42 | def call(self, x): 43 | out = self.bn1(self.conv1(x)) 44 | if not self.use_only_first_conv: 45 | out = self.bn2(self.conv2(out)) 46 | out1 = self.bnS(self.conv_S(x)) 47 | out_so = out * out1 48 | if self.use_alpha: 49 | out1 = out1 + self.alpha * out_so 50 | self.monitor_alpha.append(self.alpha) 51 | else: 52 | out1 = out1 + out_so 53 | return out1 54 | 55 | 56 | class ModelNCP(Model): 57 | def __init__(self, block, num_blocks, num_classes=10, norm_layer=None, 58 | pool_adapt=True, n_channels=[64, 128, 256, 512], **kwargs): 59 | super(ModelNCP, self).__init__() 60 | self._norm_layer = BatchNormalization(axis=3) if norm_layer is None else get_norm(norm_layer) 61 | assert len(n_channels) >= 4 62 | self.n_channels = n_channels 63 | self.pool_adapt = pool_adapt 64 | if pool_adapt: 65 | self.avg_pool = AdaptiveAveragePooling2D((1, 1)) 66 | else: 67 | self.avg_pool =AveragePooling2D(pool_size=4) 68 | 69 | self.conv1 = Conv2D(n_channels[0], kernel_size=3, strides=1, padding='same', use_bias=False) 70 | self.bn1 = self._norm_layer 71 | self.layer1 = self._make_layer(block, n_channels[0], num_blocks[0], stride=1, **kwargs) 72 | self.layer2 = self._make_layer(block, n_channels[1], num_blocks[1], stride=2, **kwargs) 73 | self.layer3 = self._make_layer(block, n_channels[2], num_blocks[2], stride=2, **kwargs) 74 | self.layer4 = self._make_layer(block, n_channels[3], num_blocks[3], stride=2, **kwargs) 75 | self.linear = Dense(num_classes, activation='softmax') #changed add softmax 76 | 77 | def _make_layer(self, block, planes, num_blocks, stride, **kwargs): 78 | strides = [stride] + [1]*(num_blocks-1) 79 | current_layers = [] 80 | for stride in strides: 81 | current_layers.append(block(planes, stride, norm_layer=self._norm_layer, **kwargs)) 82 | return Sequential(current_layers) 83 | 84 | def call(self, x): 85 | out = self.bn1(self.conv1(x)) 86 | out = self.layer1(out) 87 | out = self.layer2(out) 88 | out = self.layer3(out) 89 | out = self.layer4(out) 90 | out = self.avg_pool(out) 91 | out = Flatten(input_shape=out.shape)(out) 92 | out = self.linear(out) 93 | return out 94 | 95 | def ModelNCP_wrapper(num_blocks=None, **kwargs): 96 | if num_blocks is None: 97 | num_blocks = [1, 1, 1, 1] 98 | return ModelNCP(SinglePoly, num_blocks, **kwargs) 99 | 100 | def test(): 101 | from tensorflow.keras.datasets import cifar10 102 | args = { 'num_blocks': [2, 2, 2, 1],'train': True, 'use_alpha': True, 'use_only_first_conv': 1, 'kernel_sz': 5,'norm_S': 1} 103 | net = ModelNCP_wrapper(**args) 104 | (x_train, _), (_, _) = cifar10.load_data() 105 | x_train = x_train / 255.0 106 | batchsize = 16 107 | inputs = x_train[:batchsize] 108 | print(inputs.shape) #(16, 32, 32, 3) 109 | y = net(inputs, training=True) 110 | print(y.shape) #(16, 10) 111 | 112 | if __name__ == '__main__': 113 | test() 114 | -------------------------------------------------------------------------------- /tensorflow/model_ncp_tf.yml: -------------------------------------------------------------------------------- 1 | comment: '30 April 2022; No activation functions with NCP model; product of polynomials.' 2 | learning_rate: 0.1 3 | 4 | dataset: 5 | batch_size: 128 6 | 7 | model: 8 | fn: model_ncp_tf.py 9 | name: ModelNCP_wrapper 10 | args: 11 | train: True 12 | use_alpha: True 13 | num_blocks: [2, 2, 2, 1] 14 | use_only_first_conv: 1 15 | kernel_sz: 5 16 | norm_S: 1 17 | 18 | training_info: 19 | total_epochs: 200 20 | -------------------------------------------------------------------------------- /tensorflow/train_main_tf.py: -------------------------------------------------------------------------------- 1 | """Train CIFAR-10 with TensorFlow2.0.""" 2 | from tqdm import tqdm 3 | import tensorflow as tf 4 | from utils_tf import return_loaders,load_model 5 | import logging 6 | import yaml 7 | import sys 8 | from os.path import abspath, dirname, join, isdir 9 | from os import curdir, makedirs 10 | base = dirname(abspath(__file__)) 11 | sys.path.append(base) 12 | 13 | class Model(): 14 | def __init__(self, modc, decay_steps, lr): 15 | self.modc=modc 16 | self.model = load_model(modc['fn'], modc['name'], modc['args']) 17 | self.loss_object = tf.keras.losses.CategoricalCrossentropy() 18 | self.weight_decay = 5e-4 19 | self.optimizer = tf.keras.optimizers.SGD(learning_rate=tf.keras.experimental.CosineDecay(lr,decay_steps),momentum=0.9) 20 | self.train_loss = tf.keras.metrics.Mean(name='train_loss') 21 | self.train_acc = tf.keras.metrics.CategoricalAccuracy(name='train_accuracy') 22 | self.test_loss = tf.keras.metrics.Mean(name='test_loss') 23 | self.test_acc = tf.keras.metrics.CategoricalAccuracy(name='test_accuracy') 24 | 25 | @tf.function 26 | def train_step(self, images, labels): 27 | with tf.GradientTape() as tape: 28 | predictions = self.model(images, training=True) 29 | # Cross-entropy loss 30 | ce_loss = self.loss_object(labels, predictions) 31 | # L2 loss(weight decay) 32 | l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in self.model.trainable_variables]) 33 | loss = ce_loss + l2_loss * self.weight_decay 34 | gradients = tape.gradient(loss, self.model.trainable_variables) 35 | self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables)) 36 | self.train_loss(loss) 37 | self.train_acc(labels, predictions) 38 | 39 | @tf.function 40 | def test_step(self, images, labels): 41 | predictions = self.model(images, training=False) 42 | t_loss = self.loss_object(labels, predictions) 43 | self.test_loss(t_loss) 44 | self.test_acc(labels, predictions) 45 | 46 | def train(self, train_loader, test_loader, epoch): 47 | best_acc = tf.Variable(0.0) 48 | curr_epoch = tf.Variable(0) 49 | cur_path = abspath(curdir) 50 | # # define the output path 51 | out = join(cur_path, 'results_poly_tf', '') 52 | if not isdir(out): 53 | makedirs(out) 54 | logging.basicConfig(format='%(message)s', level=logging.INFO, datefmt='%m-%d %H:%M', 55 | filename="%s/%s" % (out, 'res.log'), filemode='w+') 56 | print('Current path: {}'.format(cur_path)) 57 | ckpt = tf.train.Checkpoint(curr_epoch=curr_epoch, best_acc=best_acc, 58 | optimizer=self.optimizer, model=self.model) 59 | manager = tf.train.CheckpointManager(ckpt, out, max_to_keep=1) 60 | 61 | for e in tqdm(range(int(curr_epoch), epoch)): 62 | # Reset the metrics at the start of the next epoch 63 | self.train_loss.reset_states() 64 | self.train_acc.reset_states() 65 | self.test_loss.reset_states() 66 | self.test_acc.reset_states() 67 | for images, labels in train_loader: 68 | self.train_step(images, labels) 69 | for images, labels in test_loader: 70 | self.test_step(images, labels) 71 | msg = 'Epoch:{}.\tTrain_Loss: {:.3f}.\tTrain_Acc: {:.03f}.\tTest_Acc: {:.03f}.\tBest_Test_Acc:{:.03f} (epoch: {}).' 72 | msg = msg.format(int(e + 1),self.train_loss.result(), self.train_acc.result(),self.test_acc.result(), 73 | best_acc.numpy(), curr_epoch.numpy()) 74 | print(msg) 75 | logging.info(msg) 76 | # Save checkpoint 77 | if self.test_acc.result() > best_acc: 78 | print('Saving...') 79 | best_acc.assign(self.test_acc.result()) 80 | curr_epoch.assign(e + 1) 81 | manager.save() 82 | 83 | def main(): 84 | yml = yaml.safe_load(open('model_ncp_tf.yml')) # # file that includes the configuration. 85 | tinfo = yml['training_info'] 86 | train_loader, test_loader = return_loaders(**yml['dataset']) 87 | decay_steps = int(tinfo['total_epochs'] * train_loader.cardinality().numpy()) 88 | modc = yml['model'] 89 | model = Model(modc, decay_steps,yml['learning_rate']) 90 | model.train(train_loader, test_loader,tinfo['total_epochs']) 91 | 92 | if __name__ == "__main__": 93 | main() -------------------------------------------------------------------------------- /tensorflow/utils_tf.py: -------------------------------------------------------------------------------- 1 | """ 2 | Dataloader for Cifar 10 3 | Reference: https://github.com/lionelmessi6410/tensorflow2-cifar 4 | """ 5 | from os.path import splitext, basename, dirname 6 | import sys 7 | import tensorflow as tf 8 | from tensorflow.keras import datasets 9 | import numpy as np 10 | 11 | padding = 4 12 | image_size = 32 13 | target_size = 32 + padding * 2 14 | def load_module(fn, name): 15 | mod_name = splitext(basename(fn))[0] 16 | mod_path = dirname(fn) 17 | sys.path.insert(0, mod_path) 18 | return getattr(__import__(mod_name), name) 19 | 20 | def load_model(model_fn, model_name, args=None): 21 | model = load_module(model_fn, model_name) 22 | model1 = model(**args) if args else model() 23 | return model1 24 | 25 | def MultiStepLR(initial_learning_rate, lr_steps, lr_rate): 26 | """Multi-steps learning rate scheduler.""" 27 | lr_steps_value = [initial_learning_rate] 28 | for _ in range(len(lr_steps)): 29 | lr_steps_value.append(lr_steps_value[-1] * lr_rate) 30 | return tf.keras.optimizers.schedules.PiecewiseConstantDecay( 31 | boundaries=lr_steps, values=lr_steps_value) 32 | 33 | def return_loaders(batch_size, **kwargs): 34 | """ 35 | Return the loader for the data. This is used both for training and for 36 | validation. Currently, hardcoded to CIFAR10. 37 | :param batch_size: (int) The batch size for training. 38 | :param kwargs: 39 | :return: The train and validation time loaders. 40 | """ 41 | train_images, train_labels, test_images, test_labels = get_dataset() 42 | mean, std = get_mean_and_std(train_images) 43 | train_images = normalize(train_images, mean, std) 44 | test_images = normalize(test_images, mean, std) 45 | train_ds = dataset_generator(train_images, train_labels,batch_size) 46 | test_ds = tf.data.Dataset.from_tensor_slices((test_images, test_labels)). \ 47 | batch(batch_size).prefetch(buffer_size=tf.data.experimental.AUTOTUNE) 48 | return train_ds, test_ds 49 | 50 | def get_dataset(): 51 | """Download, parse and process a dataset to unit scale and one-hot labels.""" 52 | (train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data() 53 | # Normalize pixel values to be between 0 and 1 54 | train_images, test_images = train_images / 255.0, test_images / 255.0 55 | # One-hot labels 56 | train_labels = _one_hot(train_labels, 10) 57 | test_labels = _one_hot(test_labels, 10) 58 | return train_images, train_labels, test_images, test_labels 59 | 60 | def get_mean_and_std(images): 61 | """Compute the mean and std value of dataset.""" 62 | mean = np.mean(images, axis=(0, 1, 2)) 63 | std = np.std(images, axis=(0, 1, 2)) 64 | return mean, std 65 | 66 | def normalize(images, mean, std): 67 | """Normalize data with mean and std.""" 68 | return (images - mean) / std 69 | 70 | def dataset_generator(images, labels, batch_size): 71 | ds = tf.data.Dataset.from_tensor_slices((images, labels)) 72 | ds = ds.map(_augment_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) 73 | ds = ds.shuffle(len(images)).batch(batch_size) 74 | ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) 75 | return ds 76 | 77 | def _one_hot(train_labels, num_classes, dtype=np.float32): 78 | """Create a one-hot encoding of labels of size num_classes.""" 79 | return np.array(train_labels == np.arange(num_classes), dtype) 80 | 81 | def _augment_fn(images, labels): 82 | images = tf.image.pad_to_bounding_box(images, padding, padding, target_size, target_size) 83 | images = tf.image.random_crop(images, (image_size, image_size, 3)) 84 | images = tf.image.random_flip_left_right(images) 85 | return images, labels --------------------------------------------------------------------------------