├── requirements.txt ├── LICENSE ├── run_rnn.py ├── README.md ├── deepnet ├── loss.py ├── im2col.py ├── utils.py ├── nnet.py ├── solver.py ├── layers.py └── Gradient Checking.ipynb ├── .gitignore └── run_cnn.py /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.11.3 2 | scipy==0.16.1 3 | matplotlib==1.5.0 4 | ipykernel==4.2.2 5 | ipython==4.0.1 6 | ipython-genutils==0.1.0 7 | ipywidgets==4.1.1 -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Paras Dahal 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | 7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /run_rnn.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from deepnet.nnet import RNN 3 | from deepnet.solver import sgd_rnn 4 | 5 | 6 | def text_to_inputs(path): 7 | """ 8 | Converts the given text into X and y vectors 9 | X : contains the index of all the characters in the text vocab 10 | y : y[i] contains the index of next character for X[i] in the text vocab 11 | """ 12 | with open(path) as f: 13 | txt = f.read() 14 | X, y = [], [] 15 | 16 | char_to_idx = {char: i for i, char in enumerate(set(txt))} 17 | idx_to_char = {i: char for i, char in enumerate(set(txt))} 18 | X = np.array([char_to_idx[i] for i in txt]) 19 | y = [char_to_idx[i] for i in txt[1:]] 20 | y.append(char_to_idx['.']) 21 | y = np.array(y) 22 | 23 | vocab_size = len(char_to_idx) 24 | return X, y, vocab_size, char_to_idx, idx_to_char 25 | 26 | 27 | if __name__ == "__main__": 28 | 29 | X, y, vocab_size, char_to_idx, idx_to_char = text_to_inputs('data/Rnn.txt') 30 | rnn = RNN(vocab_size,vocab_size,char_to_idx,idx_to_char) 31 | rnn = sgd_rnn(rnn,X,y,10,10,0.1) 32 | 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # deepnet 2 | 3 | Implementations of CNNs, RNNs and cool new techniques in deep learning 4 | 5 | Note: deepnet is a work in progress and things will be added gradually. It is not intended for production, use it to learn and study implementations of latest and greatest in deep learning. 6 | 7 | ## What does it have? 8 | 9 | **Network Architecture** 10 | 1. Convolutional net 11 | 2. Feed forward net 12 | 3. Recurrent net (LSTM/GRU coming soon) 13 | 14 | **Optimization Algorithms** 15 | 1. SGD 16 | 2. SGD with momentum 17 | 3. Nesterov Accelerated Gradient 18 | 4. Adagrad 19 | 5. RMSprop 20 | 6. Adam 21 | 22 | **Regularization** 23 | 1. Dropout 24 | 2. L1 and L2 Regularization 25 | 26 | **Cool Techniques** 27 | 28 | 1. BatchNorm 29 | 2. Xavier Weight Initialization 30 | 31 | **Nonlinearities** 32 | 1. ReLU 33 | 2. Sigmoid 34 | 3. tanh 35 | 36 | 37 | ## Usage 38 | 39 | 1. ```virtualenv .env``` ; create a virtual environment 40 | 2. ```source .env/bin/activate``` ; activate the virtual environment 41 | 3. ```pip install -r requirements.txt``` ; Install dependencies 42 | 4. ```python run_cnn.py {mnist|cifar10}``` ; mnist for shallow cnn and cifar10 for deep cnn -------------------------------------------------------------------------------- /deepnet/loss.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from deepnet.utils import softmax 3 | from deepnet.layers import Conv, FullyConnected 4 | 5 | 6 | def l2_regularization(layers, lam=0.001): 7 | reg_loss = 0.0 8 | for layer in layers: 9 | if hasattr(layer, 'W'): 10 | reg_loss += 0.5 * lam * np.sum(layer.W * layer.W) 11 | return reg_loss 12 | 13 | 14 | def delta_l2_regularization(layers, grads, lam=0.001): 15 | for layer, grad in zip(layers, reversed(grads)): 16 | if hasattr(layer, 'W'): 17 | grad[0] += lam * layer.W 18 | return grads 19 | 20 | 21 | def l1_regularization(layers, lam=0.001): 22 | reg_loss = 0.0 23 | for layer in layers: 24 | if hasattr(layer, 'W'): 25 | reg_loss += lam * np.sum(np.abs(layer.W)) 26 | return reg_loss 27 | 28 | 29 | def delta_l1_regularization(layers, grads, lam=0.001): 30 | for layer, grad in zip(layers, reversed(grads)): 31 | if hasattr(layer, 'W'): 32 | grad[0] += lam * layer.W / (np.abs(layer.W) + 1e-8) 33 | return grads 34 | 35 | 36 | def SoftmaxLoss(X, y): 37 | m = y.shape[0] 38 | p = softmax(X) 39 | log_likelihood = -np.log(p[range(m), y]) 40 | loss = np.sum(log_likelihood) / m 41 | 42 | dx = p.copy() 43 | dx[range(m), y] -= 1 44 | dx /= m 45 | return loss, dx 46 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # dotenv 79 | .env 80 | 81 | # virtualenv 82 | venv/ 83 | ENV/ 84 | 85 | # Spyder project settings 86 | .spyderproject 87 | 88 | # Rope project settings 89 | .ropeproject 90 | 91 | *.sublime* 92 | MNIST_data/ -------------------------------------------------------------------------------- /run_cnn.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from deepnet.utils import load_mnist, load_cifar10 3 | from deepnet.layers import * 4 | from deepnet.solver import sgd, sgd_momentum, adam 5 | from deepnet.nnet import CNN 6 | import sys 7 | 8 | 9 | def make_mnist_cnn(X_dim, num_class): 10 | conv = Conv(X_dim, n_filter=32, h_filter=3, 11 | w_filter=3, stride=1, padding=1) 12 | relu_conv = ReLU() 13 | maxpool = Maxpool(conv.out_dim, size=2, stride=1) 14 | flat = Flatten() 15 | fc = FullyConnected(np.prod(maxpool.out_dim), num_class) 16 | return [conv, relu_conv, maxpool, flat, fc] 17 | 18 | 19 | def make_cifar10_cnn(X_dim, num_class): 20 | conv = Conv(X_dim, n_filter=16, h_filter=5, 21 | w_filter=5, stride=1, padding=2) 22 | relu = ReLU() 23 | maxpool = Maxpool(conv.out_dim, size=2, stride=2) 24 | conv2 = Conv(maxpool.out_dim, n_filter=20, h_filter=5, 25 | w_filter=5, stride=1, padding=2) 26 | relu2 = ReLU() 27 | maxpool2 = Maxpool(conv2.out_dim, size=2, stride=2) 28 | flat = Flatten() 29 | fc = FullyConnected(np.prod(maxpool2.out_dim), num_class) 30 | return [conv, relu, maxpool, conv2, relu2, maxpool2, flat, fc] 31 | 32 | 33 | if __name__ == "__main__": 34 | 35 | if sys.argv[1] == "mnist": 36 | 37 | training_set, test_set = load_mnist( 38 | 'data/mnist.pkl.gz', num_training=1000, num_test=1000) 39 | X, y = training_set 40 | X_test, y_test = test_set 41 | mnist_dims = (1, 28, 28) 42 | cnn = CNN(make_mnist_cnn(mnist_dims, num_class=10)) 43 | cnn = sgd_momentum(cnn, X, y, minibatch_size=35, epoch=20, 44 | learning_rate=0.01, X_test=X_test, y_test=y_test) 45 | 46 | if sys.argv[1] == "cifar10": 47 | training_set, test_set = load_cifar10( 48 | 'data/cifar-10', num_training=1000, num_test=100) 49 | X, y = training_set 50 | X_test, y_test = test_set 51 | cifar10_dims = (3, 32, 32) 52 | cnn = CNN(make_cifar10_cnn(cifar10_dims, num_class=10)) 53 | cnn = sgd_momentum(cnn, X, y, minibatch_size=10, epoch=200, 54 | learning_rate=0.01, X_test=X_test, y_test=y_test) 55 | -------------------------------------------------------------------------------- /deepnet/im2col.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def get_im2col_indices(x_shape, field_height=3, field_width=3, padding=1, stride=1): 5 | # First figure out what the size of the output should be 6 | N, C, H, W = x_shape 7 | assert (H + 2 * padding - field_height) % stride == 0 8 | assert (W + 2 * padding - field_height) % stride == 0 9 | out_height = (H + 2 * padding - field_height) / stride + 1 10 | out_width = (W + 2 * padding - field_width) / stride + 1 11 | 12 | i0 = np.repeat(np.arange(field_height,dtype='int32'), field_width) 13 | i0 = np.tile(i0, C) 14 | i1 = stride * np.repeat(np.arange(out_height,dtype='int32'), out_width) 15 | j0 = np.tile(np.arange(field_width), field_height * C) 16 | j1 = stride * np.tile(np.arange(out_width,dtype='int32'), int(out_height)) 17 | i = i0.reshape(-1, 1) + i1.reshape(1, -1) 18 | j = j0.reshape(-1, 1) + j1.reshape(1, -1) 19 | 20 | k = np.repeat(np.arange(C,dtype='int32'), field_height * field_width).reshape(-1, 1) 21 | 22 | return (k, i, j) 23 | 24 | def im2col_indices(x, field_height=3, field_width=3, padding=1, stride=1): 25 | """ An implementation of im2col based on some fancy indexing """ 26 | # Zero-pad the input 27 | p = padding 28 | x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant') 29 | 30 | k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding, 31 | stride) 32 | 33 | cols = x_padded[:, k, i, j] 34 | C = x.shape[1] 35 | cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1) 36 | return cols 37 | 38 | 39 | def col2im_indices(cols, x_shape, field_height=3, field_width=3, padding=1, 40 | stride=1): 41 | """ An implementation of col2im based on fancy indexing and np.add.at """ 42 | N, C, H, W = x_shape 43 | H_padded, W_padded = H + 2 * padding, W + 2 * padding 44 | x_padded = np.zeros((N, C, H_padded, W_padded), dtype=cols.dtype) 45 | k, i, j = get_im2col_indices(x_shape, field_height, field_width, padding, 46 | stride) 47 | cols_reshaped = cols.reshape(C * field_height * field_width, -1, N) 48 | cols_reshaped = cols_reshaped.transpose(2, 0, 1) 49 | np.add.at(x_padded, (slice(None), k, i, j), cols_reshaped) 50 | if padding == 0: 51 | return x_padded 52 | return x_padded[:, :, padding:-padding, padding:-padding] 53 | 54 | pass -------------------------------------------------------------------------------- /deepnet/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import _pickle as cPickle 3 | import gzip 4 | import os 5 | 6 | 7 | def one_hot_encode(y, num_class): 8 | m = y.shape[0] 9 | onehot = np.zeros((m, num_class), dtype="int32") 10 | for i in range(m): 11 | idx = y[i] 12 | onehot[i][idx] = 1 13 | return onehot 14 | 15 | 16 | def accuracy(y_true, y_pred): 17 | return np.mean(y_pred == y_true) # both are not one hot encoded 18 | 19 | 20 | def softmax(x): 21 | exp_x = np.exp(x - np.max(x, axis=1, keepdims=True)) 22 | return exp_x / np.sum(exp_x, axis=1, keepdims=True) 23 | 24 | 25 | def load_mnist(path, num_training=50000, num_test=10000, cnn=True, one_hot=False): 26 | f = gzip.open(path, 'rb') 27 | training_data, validation_data, test_data = cPickle.load( 28 | f, encoding='iso-8859-1') 29 | f.close() 30 | X_train, y_train = training_data 31 | X_validation, y_validation = validation_data 32 | X_test, y_test = test_data 33 | if cnn: 34 | shape = (-1, 1, 28, 28) 35 | X_train = X_train.reshape(shape) 36 | X_validation = X_validation.reshape(shape) 37 | X_test = X_test.reshape(shape) 38 | if one_hot: 39 | y_train = one_hot_encode(y_train, 10) 40 | y_validation = one_hot_encode(y_validation, 10) 41 | y_test = one_hot_encode(y_test, 10) 42 | X_train, y_train = X_train[range( 43 | num_training)], y_train[range(num_training)] 44 | X_test, y_test = X_test[range(num_test)], y_test[range(num_test)] 45 | return (X_train, y_train), (X_test, y_test) 46 | 47 | 48 | def load_cifar10(path, num_training=1000, num_test=1000): 49 | Xs, ys = [], [] 50 | for batch in range(1, 6): 51 | f = open(os.path.join(path, "data_batch_{0}".format(batch)), 'rb') 52 | data = cPickle.load(f, encoding='iso-8859-1') 53 | f.close() 54 | X = data["data"].reshape(10000, 3, 32, 32).astype("float64") 55 | y = np.array(data["labels"]) 56 | Xs.append(X) 57 | ys.append(y) 58 | f = open(os.path.join(CIFAR10_PATH, "test_batch"), 'rb') 59 | data = cPickle.load(f, encoding='iso-8859-1') 60 | f.close() 61 | X_train, y_train = np.concatenate(Xs), np.concatenate(ys) 62 | X_test = data["data"].reshape(10000, 3, 32, 32).astype("float") 63 | y_test = np.array(data["labels"]) 64 | X_train, y_train = X_train[range( 65 | num_training)], y_train[range(num_training)] 66 | X_test, y_test = X_test[range(num_test)], y_test[range(num_test)] 67 | mean = np.mean(X_train, axis=0) 68 | std = np.std(X_train) 69 | X_train /= 255.0 70 | X_test /= 255.0 71 | return (X_train, y_train), (X_test, y_test) 72 | -------------------------------------------------------------------------------- /deepnet/nnet.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from deepnet.loss import SoftmaxLoss, l2_regularization, delta_l2_regularization 3 | from deepnet.utils import accuracy, softmax 4 | from deepnet.utils import one_hot_encode 5 | 6 | class CNN: 7 | 8 | def __init__(self, layers, loss_func=SoftmaxLoss): 9 | self.layers = layers 10 | self.params = [] 11 | for layer in self.layers: 12 | self.params.append(layer.params) 13 | self.loss_func = loss_func 14 | 15 | def forward(self, X): 16 | for layer in self.layers: 17 | X = layer.forward(X) 18 | return X 19 | 20 | def backward(self, dout): 21 | grads = [] 22 | for layer in reversed(self.layers): 23 | dout, grad = layer.backward(dout) 24 | grads.append(grad) 25 | return grads 26 | 27 | def train_step(self, X, y): 28 | out = self.forward(X) 29 | loss, dout = self.loss_func(out, y) 30 | loss += l2_regularization(self.layers) 31 | grads = self.backward(dout) 32 | grads = delta_l2_regularization(self.layers, grads) 33 | return loss, grads 34 | 35 | def predict(self, X): 36 | X = self.forward(X) 37 | return np.argmax(softmax(X), axis=1) 38 | 39 | 40 | class RNN: 41 | 42 | def __init__(self, vocab_size, h_size, char_to_idx, idx_to_char): 43 | self.vocab_size = vocab_size 44 | self.h_size = h_size 45 | self.char_to_idx = char_to_idx 46 | self.idx_to_char = idx_to_char 47 | self.model = dict( 48 | Wxh=np.random.rand(vocab_size, h_size) / np.sqrt(vocab_size / 2), 49 | Whh=np.random.rand(h_size, h_size) / np.sqrt(h_size / 2), 50 | Why=np.random.rand(h_size, vocab_size) / np.sqrt(h_size / 2), 51 | bh=np.zeros((1, vocab_size)), 52 | by=np.zeros((1, h_size)) 53 | ) 54 | self.initial_state = np.zeros((1, self.h_size)) 55 | 56 | def _forward(self, X, h): 57 | # input to one hot 58 | X_onehot = np.zeros(self.vocab_size) 59 | X_onehot[X] = 1 60 | X_onehot = X_onehot.reshape(1,-1) 61 | 62 | h_prev = h.copy() 63 | # calculate hidden step with tanh 64 | h = np.tanh(np.dot(X,self.model['Wxh']) + np.dot(h_prev,self.model['Whh']) + self.model['bh']) 65 | 66 | # fully connected forward step 67 | y = np.dot(X, self.model['Why']) + self.model['by'] 68 | 69 | cache = (X_onehot, h_prev) 70 | return y, h, cache 71 | 72 | def _backward(self, out, y, dh_next, cache): 73 | 74 | X_onehot, h_prev = cache 75 | 76 | # gradient of output from froward step 77 | dout = softmax(out) 78 | dout[range(len(y)), y] -= 1 79 | # fully connected backward step 80 | dWhy = X_onehot.T @ dout 81 | dby = np.sum(dWhy, axis=0).reshape(1, -1) 82 | dh = dout @ self.dWhy.T 83 | # gradient through tanh 84 | dh = dout * (1 - out**2) 85 | # add up gradient from previous gradient 86 | dh += dh_next 87 | # hidden state 88 | dbh = dh 89 | dWhh = h_prev.T @ dh 90 | dWxh = X_onehot.T @ dh 91 | dh_next = dh @ Whh.T 92 | 93 | grads = dict(Wxh=dWxh, Whh=dWhh, Why=dWhy, bh=dbh, by=dby) 94 | 95 | return grads, dh_next 96 | 97 | def train_step(self,X_train, y_train, h): 98 | ys, caches = [], [] 99 | total_loss = 0 100 | grads = {k: np.zeros_like(v) for k, v in self.model.items()} 101 | 102 | # forward pass and store values for bptt 103 | for x, y in zip(X_train, y_train): 104 | y_pred, h, cache = self._forward(x, h) 105 | p = softmax(y_pred) 106 | log_likelihood = -np.log(p[range(y_pred.shape[0]), y]) 107 | total_loss += np.sum(log_likelihood) / y_pred.shape[0] 108 | ys.append(y_pred) 109 | caches.append(cache) 110 | 111 | total_loss /= X_train.shape[0] 112 | 113 | # backprop through time 114 | dh_next = np.zeros((1, self.h_size)) 115 | for t in reversed(range(len(X_train))): 116 | grad, dh_next = self._backward( 117 | ys[t], y_train[t], dh_next, caches[t]) 118 | # sum up the gradients for each time step 119 | for k in grads.keys(): 120 | grads[k] += grad[k] 121 | 122 | # clip vanishing/exploding gradients 123 | for k, v in grads.items(): 124 | grads[k] = np.clip(v, -5.0, 5.0) 125 | 126 | return loss, grads, h 127 | 128 | def predict(self, X): 129 | X = self.forward(X) 130 | return np.argmax(softmax(X), axis=1) 131 | -------------------------------------------------------------------------------- /deepnet/solver.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.utils import shuffle 3 | from deepnet.utils import accuracy 4 | import copy 5 | from deepnet.loss import SoftmaxLoss 6 | 7 | 8 | def get_minibatches(X, y, minibatch_size,shuffleTag=True): 9 | m = X.shape[0] 10 | minibatches = [] 11 | if shuffleTag: 12 | X, y = shuffle(X, y) 13 | for i in range(0, m, minibatch_size): 14 | X_batch = X[i:i + minibatch_size, :, :, :] 15 | y_batch = y[i:i + minibatch_size, ] 16 | minibatches.append((X_batch, y_batch)) 17 | return minibatches 18 | 19 | 20 | def vanilla_update(params, grads, learning_rate=0.01): 21 | for param, grad in zip(params, reversed(grads)): 22 | for i in range(len(grad)): 23 | param[i] += - learning_rate * grad[i] 24 | 25 | 26 | def momentum_update(velocity, params, grads, learning_rate=0.01, mu=0.9): 27 | for v, param, grad, in zip(velocity, params, reversed(grads)): 28 | for i in range(len(grad)): 29 | v[i] = mu * v[i] + learning_rate * grad[i] 30 | param[i] -= v[i] 31 | 32 | 33 | def adagrad_update(cache, params, grads, learning_rate=0.01): 34 | for c, param, grad, in zip(cache, params, reversed(grads)): 35 | for i in range(len(grad)): 36 | cache[i] += grad[i]**2 37 | param[i] += - learning_rate * grad[i] / (np.sqrt(cache[i]) + 1e-8) 38 | 39 | 40 | def rmsprop_update(cache, params, grads, learning_rate=0.01, decay_rate=0.9): 41 | for c, param, grad, in zip(cache, params, reversed(grads)): 42 | for i in range(len(grad)): 43 | cache[i] = decay_rate * cache[i] + (1 - decay_rate) * grad[i]**2 44 | param[i] += - learning_rate * grad[i] / (np.sqrt(cache[i]) + 1e-4) 45 | 46 | 47 | def sgd(nnet, X_train, y_train, minibatch_size, epoch, learning_rate, verbose=True, 48 | X_test=None, y_test=None): 49 | minibatches = get_minibatches(X_train, y_train, minibatch_size) 50 | for i in range(epoch): 51 | loss = 0 52 | if verbose: 53 | print("Epoch {0}".format(i + 1)) 54 | for X_mini, y_mini in minibatches: 55 | loss, grads = nnet.train_step(X_mini, y_mini) 56 | vanilla_update(nnet.params, grads, learning_rate=learning_rate) 57 | if verbose: 58 | train_acc = accuracy(y_train, nnet.predict(X_train)) 59 | test_acc = accuracy(y_test, nnet.predict(X_test)) 60 | print("Loss = {0} | Training Accuracy = {1} | Test Accuracy = {2}".format( 61 | loss, train_acc, test_acc)) 62 | return nnet 63 | 64 | def sgd_rnn(nnet, X_train, y_train, minibatch_size, epoch, learning_rate, verbose=True): 65 | for i in range(epoch): 66 | loss = 0 67 | if verbose: 68 | print("Epoch {0}".format(i + 1)) 69 | hidden_state = nnet.initial_state 70 | loss, grads, hidden_state = nnet.train_step(X_train, y_train, hidden_state) 71 | 72 | for k in grads.keys(): 73 | nnet.model[k] -= learning_rate * grads[k] 74 | 75 | if verbose: 76 | print("Loss = {0}".format(loss)) 77 | return nnet 78 | 79 | 80 | def sgd_momentum(nnet, X_train, y_train, minibatch_size, epoch, learning_rate, mu=0.9, 81 | verbose=True, X_test=None, y_test=None, nesterov=True): 82 | 83 | minibatches = get_minibatches(X_train, y_train, minibatch_size) 84 | 85 | for i in range(epoch): 86 | loss = 0 87 | velocity = [] 88 | for param_layer in nnet.params: 89 | p = [np.zeros_like(param) for param in list(param_layer)] 90 | velocity.append(p) 91 | 92 | if verbose: 93 | print("Epoch {0}".format(i + 1)) 94 | 95 | for X_mini, y_mini in minibatches: 96 | 97 | if nesterov: 98 | for param, ve in zip(nnet.params, velocity): 99 | for i in range(len(param)): 100 | param[i] += mu * ve[i] 101 | 102 | loss, grads = nnet.train_step(X_mini, y_mini) 103 | momentum_update(velocity, nnet.params, grads, 104 | learning_rate=learning_rate, mu=mu) 105 | 106 | if verbose: 107 | m_train = X_train.shape[0] 108 | m_test = X_test.shape[0] 109 | y_train_pred = np.array([], dtype="int64") 110 | y_test_pred = np.array([], dtype="int64") 111 | for i in range(0, m_train, minibatch_size): 112 | X_tr = X_train[i:i + minibatch_size, :, :, :] 113 | y_tr = y_train[i:i + minibatch_size, ] 114 | y_train_pred = np.append(y_train_pred, nnet.predict(X_tr)) 115 | for i in range(0, m_test, minibatch_size): 116 | X_te = X_test[i:i + minibatch_size, :, :, :] 117 | y_te = y_test[i:i + minibatch_size, ] 118 | y_test_pred = np.append(y_test_pred, nnet.predict(X_te)) 119 | 120 | train_acc = accuracy(y_train, y_train_pred) 121 | test_acc = accuracy(y_test, y_test_pred) 122 | print("Loss = {0} | Training Accuracy = {1} | Test Accuracy = {2}".format( 123 | loss, train_acc, test_acc)) 124 | return nnet 125 | 126 | 127 | def adam(nnet, X_train, y_train, minibatch_size, epoch, learning_rate, verbose=True, 128 | X_test=None, y_test=None): 129 | beta1 = 0.9 130 | beta2 = 0.999 131 | minibatches = get_minibatches(X_train, y_train, minibatch_size) 132 | for i in range(epoch): 133 | loss = 0 134 | velocity, cache = [], [] 135 | for param_layer in nnet.params: 136 | p = [np.zeros_like(param) for param in list(param_layer)] 137 | velocity.append(p) 138 | cache.append(p) 139 | if verbose: 140 | print("Epoch {0}".format(i + 1)) 141 | t = 1 142 | for X_mini, y_mini in minibatches: 143 | loss, grads = nnet.train_step(X_mini, y_mini) 144 | for c, v, param, grad, in zip(cache, velocity, nnet.params, reversed(grads)): 145 | for i in range(len(grad)): 146 | c[i] = beta1 * c[i] + (1. - beta1) * grad[i] 147 | v[i] = beta2 * v[i] + (1. - beta2) * (grad[i]**2) 148 | mt = c[i] / (1. - beta1**(t)) 149 | vt = v[i] / (1. - beta2**(t)) 150 | param[i] += - learning_rate * mt / (np.sqrt(vt) + 1e-4) 151 | t += 1 152 | 153 | if verbose: 154 | train_acc = accuracy(y_train, nnet.predict(X_train)) 155 | test_acc = accuracy(y_test, nnet.predict(X_test)) 156 | print("Loss = {0} | Training Accuracy = {1} | Test Accuracy = {2}".format( 157 | loss, train_acc, test_acc)) 158 | return nnet 159 | -------------------------------------------------------------------------------- /deepnet/layers.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from deepnet.im2col import * 3 | 4 | 5 | class Conv(): 6 | 7 | def __init__(self, X_dim, n_filter, h_filter, w_filter, stride, padding): 8 | 9 | self.d_X, self.h_X, self.w_X = X_dim 10 | 11 | self.n_filter, self.h_filter, self.w_filter = n_filter, h_filter, w_filter 12 | self.stride, self.padding = stride, padding 13 | 14 | self.W = np.random.randn( 15 | n_filter, self.d_X, h_filter, w_filter) / np.sqrt(n_filter / 2.) 16 | self.b = np.zeros((self.n_filter, 1)) 17 | self.params = [self.W, self.b] 18 | 19 | self.h_out = (self.h_X - h_filter + 2 * padding) / stride + 1 20 | self.w_out = (self.w_X - w_filter + 2 * padding) / stride + 1 21 | 22 | if not self.h_out.is_integer() or not self.w_out.is_integer(): 23 | raise Exception("Invalid dimensions!") 24 | 25 | self.h_out, self.w_out = int(self.h_out), int(self.w_out) 26 | self.out_dim = (self.n_filter, self.h_out, self.w_out) 27 | 28 | def forward(self, X): 29 | 30 | self.n_X = X.shape[0] 31 | 32 | self.X_col = im2col_indices( 33 | X, self.h_filter, self.w_filter, stride=self.stride, padding=self.padding) 34 | W_row = self.W.reshape(self.n_filter, -1) 35 | 36 | out = W_row @ self.X_col + self.b 37 | out = out.reshape(self.n_filter, self.h_out, self.w_out, self.n_X) 38 | out = out.transpose(3, 0, 1, 2) 39 | return out 40 | 41 | def backward(self, dout): 42 | 43 | dout_flat = dout.transpose(1, 2, 3, 0).reshape(self.n_filter, -1) 44 | 45 | dW = dout_flat @ self.X_col.T 46 | dW = dW.reshape(self.W.shape) 47 | 48 | db = np.sum(dout, axis=(0, 2, 3)).reshape(self.n_filter, -1) 49 | 50 | W_flat = self.W.reshape(self.n_filter, -1) 51 | 52 | dX_col = W_flat.T @ dout_flat 53 | shape = (self.n_X, self.d_X, self.h_X, self.w_X) 54 | dX = col2im_indices(dX_col, shape, self.h_filter, 55 | self.w_filter, self.padding, self.stride) 56 | 57 | return dX, [dW, db] 58 | 59 | 60 | class Maxpool(): 61 | 62 | def __init__(self, X_dim, size, stride): 63 | 64 | self.d_X, self.h_X, self.w_X = X_dim 65 | 66 | self.params = [] 67 | 68 | self.size = size 69 | self.stride = stride 70 | 71 | self.h_out = (self.h_X - size) / stride + 1 72 | self.w_out = (self.w_X - size) / stride + 1 73 | 74 | if not self.h_out.is_integer() or not self.w_out.is_integer(): 75 | raise Exception("Invalid dimensions!") 76 | 77 | self.h_out, self.w_out = int(self.h_out), int(self.w_out) 78 | self.out_dim = (self.d_X, self.h_out, self.w_out) 79 | 80 | def forward(self, X): 81 | self.n_X = X.shape[0] 82 | X_reshaped = X.reshape( 83 | X.shape[0] * X.shape[1], 1, X.shape[2], X.shape[3]) 84 | 85 | self.X_col = im2col_indices( 86 | X_reshaped, self.size, self.size, padding=0, stride=self.stride) 87 | 88 | self.max_indexes = np.argmax(self.X_col, axis=0) 89 | out = self.X_col[self.max_indexes, range(self.max_indexes.size)] 90 | 91 | out = out.reshape(self.h_out, self.w_out, self.n_X, 92 | self.d_X).transpose(2, 3, 0, 1) 93 | return out 94 | 95 | def backward(self, dout): 96 | 97 | dX_col = np.zeros_like(self.X_col) 98 | # flatten the gradient 99 | dout_flat = dout.transpose(2, 3, 0, 1).ravel() 100 | 101 | dX_col[self.max_indexes, range(self.max_indexes.size)] = dout_flat 102 | 103 | # get the original X_reshaped structure from col2im 104 | shape = (self.n_X * self.d_X, 1, self.h_X, self.w_X) 105 | dX = col2im_indices(dX_col, shape, self.size, 106 | self.size, padding=0, stride=self.stride) 107 | dX = dX.reshape(self.n_X, self.d_X, self.h_X, self.w_X) 108 | return dX, [] 109 | 110 | 111 | class Flatten(): 112 | 113 | def __init__(self): 114 | self.params = [] 115 | 116 | def forward(self, X): 117 | self.X_shape = X.shape 118 | self.out_shape = (self.X_shape[0], -1) 119 | out = X.ravel().reshape(self.out_shape) 120 | self.out_shape = self.out_shape[1] 121 | return out 122 | 123 | def backward(self, dout): 124 | out = dout.reshape(self.X_shape) 125 | return out, () 126 | 127 | 128 | class FullyConnected(): 129 | 130 | def __init__(self, in_size, out_size): 131 | 132 | self.W = np.random.randn(in_size, out_size) / np.sqrt(in_size / 2.) 133 | self.b = np.zeros((1, out_size)) 134 | self.params = [self.W, self.b] 135 | 136 | def forward(self, X): 137 | self.X = X 138 | out = self.X @ self.W + self.b 139 | return out 140 | 141 | def backward(self, dout): 142 | dW = self.X.T @ dout 143 | db = np.sum(dout, axis=0) 144 | dX = dout @ self.W.T 145 | return dX, [dW, db] 146 | 147 | 148 | class Batchnorm(): 149 | 150 | def __init__(self, X_dim): 151 | self.d_X, self.h_X, self.w_X = X_dim 152 | self.gamma = np.ones((1, int(np.prod(X_dim)))) 153 | self.beta = np.zeros((1, int(np.prod(X_dim)))) 154 | self.params = [self.gamma, self.beta] 155 | 156 | def forward(self, X): 157 | self.n_X = X.shape[0] 158 | self.X_shape = X.shape 159 | 160 | self.X_flat = X.ravel().reshape(self.n_X, -1) 161 | self.mu = np.mean(self.X_flat, axis=0) 162 | self.var = np.var(self.X_flat, axis=0) 163 | self.X_norm = (self.X_flat - self.mu) / np.sqrt(self.var + 1e-8) 164 | out = self.gamma * self.X_norm + self.beta 165 | 166 | return out.reshape(self.X_shape) 167 | 168 | def backward(self, dout): 169 | 170 | dout = dout.ravel().reshape(dout.shape[0], -1) 171 | X_mu = self.X_flat - self.mu 172 | var_inv = 1. / np.sqrt(self.var + 1e-8) 173 | 174 | dbeta = np.sum(dout, axis=0) 175 | dgamma = np.sum(dout * self.X_norm, axis=0) 176 | 177 | dX_norm = dout * self.gamma 178 | dvar = np.sum(dX_norm * X_mu, axis=0) * - \ 179 | 0.5 * (self.var + 1e-8)**(-3 / 2) 180 | dmu = np.sum(dX_norm * -var_inv, axis=0) + dvar * \ 181 | 1 / self.n_X * np.sum(-2. * X_mu, axis=0) 182 | dX = (dX_norm * var_inv) + (dmu / self.n_X) + \ 183 | (dvar * 2 / self.n_X * X_mu) 184 | 185 | dX = dX.reshape(self.X_shape) 186 | return dX, [dgamma, dbeta] 187 | 188 | 189 | class Dropout(): 190 | 191 | def __init__(self, prob=0.5): 192 | self.prob = prob 193 | self.params = [] 194 | 195 | def forward(self, X): 196 | self.mask = np.random.binomial(1, self.prob, size=X.shape) / self.prob 197 | out = X * self.mask 198 | return out.reshape(X.shape) 199 | 200 | def backward(self, dout): 201 | dX = dout * self.mask 202 | return dX, [] 203 | 204 | 205 | class ReLU(): 206 | def __init__(self): 207 | self.params = [] 208 | 209 | def forward(self, X): 210 | self.X = X 211 | return np.maximum(0, X) 212 | 213 | def backward(self, dout): 214 | dX = dout.copy() 215 | dX[self.X <= 0] = 0 216 | return dX, [] 217 | 218 | 219 | class sigmoid(): 220 | def __init__(self): 221 | self.params = [] 222 | 223 | def forward(self, X): 224 | out = 1.0 / (1.0 + np.exp(X)) 225 | self.out = out 226 | return out 227 | 228 | def backward(self, dout): 229 | dX = dout * self.out * (1 - self.out) 230 | return dX, [] 231 | 232 | 233 | class tanh(): 234 | def __init__(self): 235 | self.params = [] 236 | 237 | def forward(self, X): 238 | out = np.tanh(X) 239 | self.out = out 240 | return out 241 | 242 | def backward(self, dout): 243 | dX = dout * (1 - self.out**2) 244 | return dX, [] 245 | -------------------------------------------------------------------------------- /deepnet/Gradient Checking.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Numerical Gradient checking of Layers\n", 8 | "\n", 9 | "Verify the correctness of implementation using Gradient checks provided in CS231 2nd assignment.\n", 10 | "\n", 11 | "1. **Probably Wrong**: relative error > 1e-2 \n", 12 | "2. **Something not right** :1e-2 > relative error > 1e-4 \n", 13 | "3. **Okay for objectives with kinks**: 1e-4 > relative error, if no kinks then too high\n", 14 | "4. **Most likely Right**: relative error < 1e-7 " 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 1, 20 | "metadata": { 21 | "collapsed": false 22 | }, 23 | "outputs": [], 24 | "source": [ 25 | "import numpy as np\n", 26 | "from layers import *\n", 27 | "from loss import SoftmaxLoss\n", 28 | "from nnet import NeuralNet\n", 29 | "from solver import sgd,sgd_momentum,adam\n", 30 | "import sys" 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "metadata": {}, 36 | "source": [ 37 | "## Numerical Gradient Functions" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": 2, 43 | "metadata": { 44 | "collapsed": true 45 | }, 46 | "outputs": [], 47 | "source": [ 48 | "def rel_error(x, y):\n", 49 | " \"\"\" returns relative error \"\"\"\n", 50 | " return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))\n", 51 | "\n", 52 | "def numerical_gradient_array(f, x, df, h=1e-5):\n", 53 | " \"\"\"\n", 54 | " Evaluate a numeric gradient for a function that accepts a numpy\n", 55 | " array and returns a numpy array.\n", 56 | " \"\"\"\n", 57 | " grad = np.zeros_like(x)\n", 58 | " it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n", 59 | " while not it.finished:\n", 60 | "\n", 61 | " ix = it.multi_index\n", 62 | " oldval = x[ix]\n", 63 | " x[ix] = oldval + h\n", 64 | " pos = f(x).copy()\n", 65 | " x[ix] = oldval - h\n", 66 | " neg = f(x).copy()\n", 67 | " x[ix] = oldval\n", 68 | "\n", 69 | " grad[ix] = np.sum((pos - neg) * df) / (2 * h)\n", 70 | "\n", 71 | " it.iternext()\n", 72 | " return grad\n", 73 | "\n", 74 | "def eval_numerical_gradient(f, x, verbose=True, h=0.00001):\n", 75 | " \"\"\"\n", 76 | " a naive implementation of numerical gradient of f at x\n", 77 | " - f should be a function that takes a single argument\n", 78 | " - x is the point (numpy array) to evaluate the gradient at\n", 79 | " \"\"\"\n", 80 | "\n", 81 | " fx = f(x) # evaluate function value at original point\n", 82 | "\n", 83 | " grad = np.zeros_like(x)\n", 84 | " # iterate over all indexes in x\n", 85 | " it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n", 86 | " while not it.finished:\n", 87 | " # evaluate function at x+h\n", 88 | " ix = it.multi_index\n", 89 | " oldval = x[ix]\n", 90 | " x[ix] = oldval + h # increment by h\n", 91 | " fxph = f(x) # evalute f(x + h)\n", 92 | " x[ix] = oldval - h\n", 93 | " fxmh = f(x) # evaluate f(x - h)\n", 94 | " x[ix] = oldval # restore\n", 95 | "\n", 96 | " # compute the partial derivative with centered formula\n", 97 | " grad[ix] = (fxph - fxmh) / (2 * h) # the slope\n", 98 | " if verbose:\n", 99 | " print(ix, grad[ix])\n", 100 | " it.iternext() # step to next dimension\n", 101 | "\n", 102 | " return grad" 103 | ] 104 | }, 105 | { 106 | "cell_type": "markdown", 107 | "metadata": {}, 108 | "source": [ 109 | "## Convolution Layer\n", 110 | "\n", 111 | "Perform numerical grdient checking for verifying the implementation of convolution layer." 112 | ] 113 | }, 114 | { 115 | "cell_type": "markdown", 116 | "metadata": {}, 117 | "source": [ 118 | "### Forward Pass\n", 119 | "\n", 120 | "The difference of correct_out and out should be around 1e-8" 121 | ] 122 | }, 123 | { 124 | "cell_type": "code", 125 | "execution_count": 3, 126 | "metadata": { 127 | "collapsed": false 128 | }, 129 | "outputs": [ 130 | { 131 | "name": "stdout", 132 | "output_type": "stream", 133 | "text": [ 134 | "Testing forward pass of Conv Layer\n", 135 | "Difference: 2.21214764967e-08\n" 136 | ] 137 | } 138 | ], 139 | "source": [ 140 | "x_shape = (2, 3, 4, 4)\n", 141 | "w_shape = (3, 3, 4, 4)\n", 142 | "x = np.linspace(-0.1, 0.5, num=np.prod(x_shape)).reshape(x_shape)\n", 143 | "w = np.linspace(-0.2, 0.3, num=np.prod(w_shape)).reshape(w_shape)\n", 144 | "b = np.linspace(-0.1, 0.2, num=3)\n", 145 | "\n", 146 | "c_layer = Conv((3,4,4),n_filter=3,h_filter=4,w_filter=4,stride=2,padding=1)\n", 147 | "c_layer.W = w\n", 148 | "c_layer.b = b.reshape(-1,1)\n", 149 | "\n", 150 | "correct_out = np.array([[[[-0.08759809, -0.10987781],\n", 151 | " [-0.18387192, -0.2109216 ]],\n", 152 | " [[ 0.21027089, 0.21661097],\n", 153 | " [ 0.22847626, 0.23004637]],\n", 154 | " [[ 0.50813986, 0.54309974],\n", 155 | " [ 0.64082444, 0.67101435]]],\n", 156 | " [[[-0.98053589, -1.03143541],\n", 157 | " [-1.19128892, -1.24695841]],\n", 158 | " [[ 0.69108355, 0.66880383],\n", 159 | " [ 0.59480972, 0.56776003]],\n", 160 | " [[ 2.36270298, 2.36904306],\n", 161 | " [ 2.38090835, 2.38247847]]]])\n", 162 | "\n", 163 | "out = c_layer.forward(x)\n", 164 | "\n", 165 | "error = rel_error(out,correct_out)\n", 166 | "print(\"Testing forward pass of Conv Layer\")\n", 167 | "print(\"Difference: \",error)" 168 | ] 169 | }, 170 | { 171 | "cell_type": "markdown", 172 | "metadata": {}, 173 | "source": [ 174 | "### Backward pass\n", 175 | "\n", 176 | "The errors for gradients should be around 1e-9" 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": 4, 182 | "metadata": { 183 | "collapsed": false 184 | }, 185 | "outputs": [ 186 | { 187 | "name": "stdout", 188 | "output_type": "stream", 189 | "text": [ 190 | "Testing backward pass of Conv Layer\n", 191 | "dX error: 6.30285589596e-09\n", 192 | "dW error: 3.66468373932e-10\n", 193 | "db error: 6.8390384471e-12\n" 194 | ] 195 | } 196 | ], 197 | "source": [ 198 | "x = np.random.randn(4, 3, 5, 5)\n", 199 | "w = np.random.randn(2, 3, 3, 3)\n", 200 | "b = np.random.randn(2,).reshape(-1,1)\n", 201 | "dout = np.random.randn(4, 2, 5, 5)\n", 202 | "\n", 203 | "c_layer = Conv((3,5,5),n_filter=2,h_filter=3,w_filter=3,stride=1,padding=1)\n", 204 | "c_layer.W = w\n", 205 | "c_layer.b = b\n", 206 | "\n", 207 | "dx_num = numerical_gradient_array(lambda x: c_layer.forward(x), x, dout)\n", 208 | "dw_num = numerical_gradient_array(lambda w: c_layer.forward(x), w, dout)\n", 209 | "db_num = numerical_gradient_array(lambda b: c_layer.forward(x), b, dout)\n", 210 | "\n", 211 | "out = c_layer.forward(x)\n", 212 | "dx,grads = c_layer.backward(dout)\n", 213 | "dw,db = grads\n", 214 | "\n", 215 | "print(\"Testing backward pass of Conv Layer\")\n", 216 | "print(\"dX error: \",rel_error(dx,dx_num))\n", 217 | "print(\"dW error: \",rel_error(dw,dw_num))\n", 218 | "print(\"db error: \",rel_error(db,db_num))" 219 | ] 220 | }, 221 | { 222 | "cell_type": "markdown", 223 | "metadata": {}, 224 | "source": [ 225 | "## Maxpool Layer\n", 226 | "\n", 227 | "Perform gradient check for maxpool layer and verify correctness of its implementation" 228 | ] 229 | }, 230 | { 231 | "cell_type": "markdown", 232 | "metadata": {}, 233 | "source": [ 234 | "### Forward Pass\n", 235 | "\n", 236 | "Difference should be around 1e-8" 237 | ] 238 | }, 239 | { 240 | "cell_type": "code", 241 | "execution_count": 5, 242 | "metadata": { 243 | "collapsed": false 244 | }, 245 | "outputs": [ 246 | { 247 | "name": "stdout", 248 | "output_type": "stream", 249 | "text": [ 250 | "Testing max_pool_forward_naive function:\n", 251 | "difference: 4.16666651573e-08\n" 252 | ] 253 | } 254 | ], 255 | "source": [ 256 | "x_shape = (2, 3, 4, 4)\n", 257 | "x = np.linspace(-0.3, 0.4, num=np.prod(x_shape)).reshape(x_shape)\n", 258 | "\n", 259 | "pool = Maxpool((3,4,4),size=2,stride=2)\n", 260 | "\n", 261 | "out = pool.forward(x,)\n", 262 | "correct_out = np.array([[[[-0.26315789, -0.24842105],\n", 263 | " [-0.20421053, -0.18947368]],\n", 264 | " [[-0.14526316, -0.13052632],\n", 265 | " [-0.08631579, -0.07157895]],\n", 266 | " [[-0.02736842, -0.01263158],\n", 267 | " [ 0.03157895, 0.04631579]]],\n", 268 | " [[[ 0.09052632, 0.10526316],\n", 269 | " [ 0.14947368, 0.16421053]],\n", 270 | " [[ 0.20842105, 0.22315789],\n", 271 | " [ 0.26736842, 0.28210526]],\n", 272 | " [[ 0.32631579, 0.34105263],\n", 273 | " [ 0.38526316, 0.4 ]]]])\n", 274 | "\n", 275 | "print('Testing max_pool_forward_naive function:')\n", 276 | "print('difference: ', rel_error(out, correct_out))" 277 | ] 278 | }, 279 | { 280 | "cell_type": "markdown", 281 | "metadata": {}, 282 | "source": [ 283 | "### Backward Pass\n", 284 | "\n", 285 | "Error should be around 1e-12" 286 | ] 287 | }, 288 | { 289 | "cell_type": "code", 290 | "execution_count": 6, 291 | "metadata": { 292 | "collapsed": false 293 | }, 294 | "outputs": [ 295 | { 296 | "name": "stdout", 297 | "output_type": "stream", 298 | "text": [ 299 | "Testing bacward pass of Maxpool layer\n", 300 | "dX error: 3.27561819731e-12\n" 301 | ] 302 | } 303 | ], 304 | "source": [ 305 | "x = np.random.randn(3, 2, 8, 8)\n", 306 | "dout = np.random.randn(3, 2, 4, 4)\n", 307 | "\n", 308 | "pool = Maxpool((2,8,8),size=2,stride=2)\n", 309 | "\n", 310 | "dx_num = numerical_gradient_array(lambda x: pool.forward(x), x, dout)\n", 311 | "\n", 312 | "out = pool.forward(x)\n", 313 | "dx,_ = pool.backward(dout)\n", 314 | "\n", 315 | "print('Testing bacward pass of Maxpool layer')\n", 316 | "print('dX error: ', rel_error(dx, dx_num))" 317 | ] 318 | }, 319 | { 320 | "cell_type": "markdown", 321 | "metadata": {}, 322 | "source": [ 323 | "## ReLU Layer\n", 324 | "Error should be around 1e-12" 325 | ] 326 | }, 327 | { 328 | "cell_type": "code", 329 | "execution_count": 7, 330 | "metadata": { 331 | "collapsed": false 332 | }, 333 | "outputs": [ 334 | { 335 | "name": "stdout", 336 | "output_type": "stream", 337 | "text": [ 338 | "Testing backward pass of ReLU layer\n", 339 | "dX error: 3.275621976e-12\n" 340 | ] 341 | } 342 | ], 343 | "source": [ 344 | "x = np.random.randn(3, 2, 8, 8)\n", 345 | "dout = np.random.randn(3, 2, 8, 8)\n", 346 | "\n", 347 | "r = ReLU()\n", 348 | "\n", 349 | "dx_num = numerical_gradient_array(lambda x:r.forward(x), x, dout)\n", 350 | "\n", 351 | "out = r.forward(x)\n", 352 | "dx,_ = r.backward(dout)\n", 353 | "\n", 354 | "print('Testing backward pass of ReLU layer')\n", 355 | "print('dX error: ',rel_error(dx,dx_num))" 356 | ] 357 | }, 358 | { 359 | "cell_type": "markdown", 360 | "metadata": {}, 361 | "source": [ 362 | "## Conv-ReLU-MaxPool" 363 | ] 364 | }, 365 | { 366 | "cell_type": "code", 367 | "execution_count": 8, 368 | "metadata": { 369 | "collapsed": false 370 | }, 371 | "outputs": [ 372 | { 373 | "name": "stdout", 374 | "output_type": "stream", 375 | "text": [ 376 | "Testing conv_relu_pool\n", 377 | "dx error: 1.01339343448e-08\n", 378 | "dw error: 7.41563088659e-10\n", 379 | "db error: 7.51304173633e-11\n" 380 | ] 381 | } 382 | ], 383 | "source": [ 384 | "x = np.random.randn(2, 3, 16, 16)\n", 385 | "w = np.random.randn(3, 3, 3, 3)\n", 386 | "b = np.random.randn(3,).reshape(-1,1)\n", 387 | "dout = np.random.randn(2, 3, 8, 8)\n", 388 | "\n", 389 | "c = Conv((3,16,16),n_filter=3,h_filter=3,w_filter=3,stride=1,padding=1)\n", 390 | "c.W, c.b = w, b\n", 391 | "r = ReLU()\n", 392 | "m = Maxpool(c.out_dim,size=2,stride=2)\n", 393 | "\n", 394 | "def conv_relu_pool_forward(c,r,m,x):\n", 395 | " c_out = c.forward(x)\n", 396 | " r_out = r.forward(c_out)\n", 397 | " m_out = m.forward(r_out)\n", 398 | " return m_out\n", 399 | "\n", 400 | "dx_num = numerical_gradient_array(lambda x: conv_relu_pool_forward(c,r,m,x), x, dout)\n", 401 | "dw_num = numerical_gradient_array(lambda w: conv_relu_pool_forward(c,r,m,x), w, dout)\n", 402 | "db_num = numerical_gradient_array(lambda b: conv_relu_pool_forward(c,r,m,x), b, dout)\n", 403 | "\n", 404 | "m_dx,_ = m.backward(dout)\n", 405 | "r_dx,_ = r.backward(m_dx)\n", 406 | "dx,grads = c.backward(r_dx)\n", 407 | "dw,db = grads\n", 408 | "\n", 409 | "\n", 410 | "print('Testing conv_relu_pool')\n", 411 | "print('dx error: ', rel_error(dx_num, dx))\n", 412 | "print('dw error: ', rel_error(dw_num, dw))\n", 413 | "print('db error: ', rel_error(db_num, db))" 414 | ] 415 | }, 416 | { 417 | "cell_type": "markdown", 418 | "metadata": {}, 419 | "source": [ 420 | "## Fully Connected Layer" 421 | ] 422 | }, 423 | { 424 | "cell_type": "code", 425 | "execution_count": 9, 426 | "metadata": { 427 | "collapsed": false 428 | }, 429 | "outputs": [ 430 | { 431 | "name": "stdout", 432 | "output_type": "stream", 433 | "text": [ 434 | "[[ 1.49834967 1.70660132 1.91485297]\n", 435 | " [ 3.25553199 3.5141327 3.77273342]]\n", 436 | "Testing fully connected forward pass:\n", 437 | "difference: 9.76985004799e-10\n" 438 | ] 439 | } 440 | ], 441 | "source": [ 442 | "num_inputs = 2\n", 443 | "input_shape = (4, 5, 6)\n", 444 | "output_dim = 3\n", 445 | "\n", 446 | "input_size = num_inputs * np.prod(input_shape)\n", 447 | "weight_size = output_dim * np.prod(input_shape)\n", 448 | "\n", 449 | "x = np.linspace(-0.1, 0.5, num=input_size).reshape(num_inputs, *input_shape)\n", 450 | "w = np.linspace(-0.2, 0.3, num=weight_size).reshape(np.prod(input_shape), output_dim)\n", 451 | "b = np.linspace(-0.3, 0.1, num=output_dim).reshape(1,-1)\n", 452 | "\n", 453 | "flat = Flatten()\n", 454 | "x = flat.forward(x)\n", 455 | "\n", 456 | "f = FullyConnected(120,3)\n", 457 | "f.W,f.b= w,b\n", 458 | "out = f.forward(x)\n", 459 | "\n", 460 | "correct_out = np.array([[ 1.49834967, 1.70660132, 1.91485297],\n", 461 | " [ 3.25553199, 3.5141327, 3.77273342]])\n", 462 | "\n", 463 | "print(out)\n", 464 | "# Compare your output with ours. The error should be around 1e-9.\n", 465 | "print('Testing fully connected forward pass:')\n", 466 | "print('difference: ', rel_error(out, correct_out))\n" 467 | ] 468 | }, 469 | { 470 | "cell_type": "code", 471 | "execution_count": 10, 472 | "metadata": { 473 | "collapsed": false 474 | }, 475 | "outputs": [ 476 | { 477 | "name": "stdout", 478 | "output_type": "stream", 479 | "text": [ 480 | "Testing fully connected backward pass:\n", 481 | "dx error: 2.89903091526e-09\n", 482 | "dw error: 1.32127575542e-09\n", 483 | "db error: 1.03150657456e-11\n" 484 | ] 485 | } 486 | ], 487 | "source": [ 488 | "x = np.random.randn(10, 2, 3)\n", 489 | "w = np.random.randn(6, 5)\n", 490 | "b = np.random.randn(5)\n", 491 | "dout = np.random.randn(10, 5)\n", 492 | "\n", 493 | "flat = Flatten()\n", 494 | "x = flat.forward(x)\n", 495 | "\n", 496 | "f = FullyConnected(60,5)\n", 497 | "f.W,f.b= w,b\n", 498 | "\n", 499 | "dx_num = numerical_gradient_array(lambda x: f.forward(x), x, dout)\n", 500 | "dw_num = numerical_gradient_array(lambda w: f.forward(x), w, dout)\n", 501 | "db_num = numerical_gradient_array(lambda b: f.forward(x), b, dout)\n", 502 | "\n", 503 | "dx,grads= f.backward(dout)\n", 504 | "dw, db = grads\n", 505 | "# The error should be around 1e-10\n", 506 | "print('Testing fully connected backward pass:')\n", 507 | "print('dx error: ', rel_error(dx_num, dx))\n", 508 | "print('dw error: ', rel_error(dw_num, dw))\n", 509 | "print('db error: ', rel_error(db_num, db))" 510 | ] 511 | }, 512 | { 513 | "cell_type": "markdown", 514 | "metadata": {}, 515 | "source": [ 516 | "## Softmax Loss\n", 517 | "\n" 518 | ] 519 | }, 520 | { 521 | "cell_type": "code", 522 | "execution_count": 11, 523 | "metadata": { 524 | "collapsed": false 525 | }, 526 | "outputs": [ 527 | { 528 | "name": "stdout", 529 | "output_type": "stream", 530 | "text": [ 531 | "Testing SoftmaxLoss:\n", 532 | "loss: 2.30283790984\n", 533 | "dx error: 1.05396983612e-08\n" 534 | ] 535 | } 536 | ], 537 | "source": [ 538 | "num_classes, num_inputs = 10, 50\n", 539 | "x = 0.001 * np.random.randn(num_inputs, num_classes)\n", 540 | "y = np.random.randint(num_classes, size=num_inputs)\n", 541 | "\n", 542 | "dx_num = eval_numerical_gradient(lambda x: SoftmaxLoss(x,y)[0], x,verbose=False)\n", 543 | "loss,dx = SoftmaxLoss(x,y)\n", 544 | "\n", 545 | "# Test softmax_loss function. Loss should be 2.3 and dx error should be 1e-8\n", 546 | "print('Testing SoftmaxLoss:')\n", 547 | "print('loss: ', loss)\n", 548 | "print('dx error: ', rel_error(dx_num, dx))" 549 | ] 550 | } 551 | ], 552 | "metadata": { 553 | "kernelspec": { 554 | "display_name": "Python 3", 555 | "language": "python", 556 | "name": "python3" 557 | }, 558 | "language_info": { 559 | "codemirror_mode": { 560 | "name": "ipython", 561 | "version": 3 562 | }, 563 | "file_extension": ".py", 564 | "mimetype": "text/x-python", 565 | "name": "python", 566 | "nbconvert_exporter": "python", 567 | "pygments_lexer": "ipython3", 568 | "version": "3.6.0" 569 | } 570 | }, 571 | "nbformat": 4, 572 | "nbformat_minor": 2 573 | } 574 | --------------------------------------------------------------------------------