├── resnet ├── __init__.py ├── tool.py └── model.py ├── setup.cfg ├── MANIFEST.in ├── images ├── resnet_plain.png ├── resnet_resnet.png ├── resnet_shallow_deep_model.png └── resnet_training_test_error.png ├── examples └── distributed_resnet │ ├── config.json │ ├── create_server.py │ ├── simple.py │ └── distributed_resnet.py ├── .gitignore ├── setup.py ├── README.md ├── main.py ├── deep-residual-learning-for-image-recognition.ipynb └── LICENSE /resnet/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.md 3 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | # Include the license file 2 | include LICENSE 3 | -------------------------------------------------------------------------------- /images/resnet_plain.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndersonJo/residual-network/master/images/resnet_plain.png -------------------------------------------------------------------------------- /images/resnet_resnet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndersonJo/residual-network/master/images/resnet_resnet.png -------------------------------------------------------------------------------- /examples/distributed_resnet/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "worker": ["10.10.102.74:2222"], 3 | "host":["10.10.100.87:2222"] 4 | } -------------------------------------------------------------------------------- /images/resnet_shallow_deep_model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndersonJo/residual-network/master/images/resnet_shallow_deep_model.png -------------------------------------------------------------------------------- /images/resnet_training_test_error.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndersonJo/residual-network/master/images/resnet_training_test_error.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Project 2 | .idea 3 | .ipynb_checkpoints 4 | resnet_tensorflow.egg-info 5 | 6 | # Python 7 | *.pyc 8 | 9 | # Dataset 10 | cifar-10-python.tar.gz 11 | _dataset 12 | -------------------------------------------------------------------------------- /examples/distributed_resnet/create_server.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import argparse 3 | import json 4 | 5 | parser = argparse.ArgumentParser() 6 | parser.add_argument('--task', type=int, help='The task number') 7 | parser.add_argument('--job', type=str, default='worker', help='job name ("worker" or "host")') 8 | args = parser.parse_args() 9 | 10 | cluster_spec = json.load(open('config.json', 'rt')) 11 | cluster = tf.train.ClusterSpec(cluster_spec) 12 | 13 | gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5, allow_growth=True) 14 | server = tf.train.Server(cluster, job_name=args.job, task_index=args.task, 15 | config=tf.ConfigProto(gpu_options=gpu_options)) 16 | server.start() 17 | server.join() 18 | -------------------------------------------------------------------------------- /examples/distributed_resnet/simple.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import tensorflow as tf 4 | 5 | cluster_spec = json.load(open('config.json', 'rt')) 6 | cluster = tf.train.ClusterSpec(cluster_spec) 7 | server = tf.train.Server(cluster, job_name='host', task_index=0) 8 | 9 | with tf.device('/job:worker/task:0'): 10 | a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a') 11 | b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b') 12 | c = tf.matmul(a, b) 13 | 14 | with tf.device('/job:worker/task:1'): 15 | d = tf.matmul(a, b) + tf.log(100 + c) 16 | 17 | gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1, allow_growth=True) 18 | with tf.Session(server.target, config=tf.ConfigProto(gpu_options=gpu_options)) as sess: 19 | result = sess.run(d) 20 | print(result) -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | 3 | setup( 4 | name='resnet-tensorflow', 5 | version='0.0.1', 6 | packages=['resnet'], 7 | url='https://github.com/AndersonJo/residual-network', 8 | license='Apache 2.0', 9 | classifiers=[ 10 | "Intended Audience :: Science/Research", 11 | "Intended Audience :: System Administrators", 12 | "Topic :: Database", 13 | "Topic :: Scientific/Engineering", 14 | "Topic :: Scientific/Engineering :: Information Analysis" 15 | ], 16 | install_requires=['tensorflow', 'hdfs', 'keras', 'numpy', 'scipy'], 17 | python_requires='>=3', 18 | author='Chang Min Jo (Anderson Jo)', 19 | author_email='a141890@gmail.com', 20 | description='Deep Residual Neural Network', 21 | keywords=['tensorflow', 'resnet', 'residual', 'neural network', 'cifar', 'cifar-10'] 22 | ) 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # residual-network 2 | Deep Residual Network 3 | 4 | You can use this library to make your own ResNet.
5 | It is very customizable and use TensorFlow. 6 | 7 | # Installation 8 | 9 | The library requires Python 3.6.
10 | Installation is very simple. You can use PIP. 11 | 12 | ``` 13 | pip3 install resnet 14 | ``` 15 | 16 | If you want to install ResNet from source.. 17 | 18 | ``` 19 | python3.6 setup.py install 20 | ``` 21 | 22 | # Deep Residual Neural Network (ResNet) Example 23 | 24 | ## Run ResNet 25 | 26 | The Git codes contains CIFAR-10 image classification example.
27 | All you need to do is very simple. 28 | 29 | ``` 30 | python3.6 main.py --mode=train 31 | ``` 32 | 33 | ## Create Your Own ResNet 34 | 35 | You might want to customize or make your own ResNet.
36 | The following code shows you how to make your own ResNet. 37 | 38 | ``` 39 | resnet = ResNet(batch=32) 40 | with tf.variable_scope('input_scope'): 41 | h = resnet.init_block(filter=[7, 7], channel=[3, 32], max_pool=False) 42 | 43 | with tf.variable_scope('residual01'): 44 | h = resnet.max_pool(h, kernel=[2, 2], stride=[2, 2]) 45 | h = resnet.residual_block(h, filter=[3, 3], channel=[32, 32]) 46 | h = resnet.residual_block(h, filter=[3, 3], channel=[32, 32]) 47 | h = resnet.residual_block(h, filter=[3, 3], channel=[32, 32]) 48 | h = resnet.residual_block(h, filter=[3, 3], channel=[32, 32]) 49 | h = resnet.residual_block(h, filter=[3, 3], channel=[32, 32]) 50 | h = resnet.residual_block(h, filter=[3, 3], channel=[32, 32]) 51 | 52 | with tf.variable_scope('residual02'): 53 | h = resnet.max_pool(h, kernel=[2, 2], stride=[2, 2]) 54 | h = resnet.residual_block(h, filter=[3, 3], channel=[32, 64]) 55 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 56 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 57 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 58 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 59 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 60 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 61 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 62 | 63 | with tf.variable_scope('residual03'): 64 | h = resnet.max_pool(h, kernel=[2, 2], stride=[2, 2]) 65 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 128]) 66 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 67 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 68 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 69 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 70 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 71 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 72 | 73 | with tf.variable_scope('residual04'): 74 | h = resnet.max_pool(h, kernel=[2, 2], stride=[2, 2]) 75 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 256]) 76 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 77 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 78 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 79 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 80 | 81 | with tf.variable_scope('residual05'): 82 | h = resnet.max_pool(h, kernel=[2, 2], stride=[2, 2]) 83 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 512]) 84 | h = resnet.residual_block(h, filter=[3, 3], channel=[512, 512]) 85 | h = resnet.residual_block(h, filter=[3, 3], channel=[512, 512]) 86 | h = resnet.residual_block(h, filter=[3, 3], channel=[512, 512]) 87 | h = resnet.residual_block(h, filter=[3, 3], channel=[512, 512]) 88 | h = resnet.residual_block(h, filter=[3, 3], channel=[512, 512]) 89 | 90 | with tf.variable_scope('fc'): 91 | h = resnet.avg_pool(h, kernel=[2, 2], stride=[2, 2]) 92 | h = resnet.fc(h) 93 | h # <- Your Network Created 94 | ``` 95 | -------------------------------------------------------------------------------- /resnet/tool.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | import tarfile 4 | from urllib.request import urlopen 5 | 6 | import numpy as np 7 | from tensorflow.python.client import device_lib 8 | 9 | # Constants 10 | CIFAR_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' 11 | 12 | 13 | def _download(dest_dir): 14 | """ 15 | 데이터가 없으면 CIFAR-10 데이터셋을 인터넷에서 다운로드한다. 16 | """ 17 | tar_path = os.path.abspath(os.path.join(dest_dir, 'cifar-10-python.tar.gz')) 18 | 19 | # 저장할 디렉토리 생성 20 | if not os.path.exists(dest_dir): 21 | os.mkdir(dest_dir) 22 | 23 | # 파일이 존재하지 않으면 다운로드 24 | if not os.path.exists(tar_path): 25 | print('Start downloading CIFAR-10 dataset from internet') 26 | print(f'the path to save: {tar_path}') 27 | raw = urlopen(CIFAR_URL).read() 28 | with open(tar_path, 'wb') as f: 29 | f.write(raw) 30 | 31 | return tar_path 32 | 33 | 34 | def _download_from_hdfs(dest_dir): 35 | import hdfs 36 | tar_path = os.path.abspath(os.path.join(dest_dir, 'cifar-10-python.tar.gz')) 37 | 38 | # 저장할 디렉토리 생성 39 | if not os.path.exists(dest_dir): 40 | os.mkdir(dest_dir) 41 | # 파일이 존재하지 않으면 다운로드 42 | if not os.path.exists(tar_path): 43 | print('Start downloading CIFAR-10 dataset from HDFS') 44 | print(f'the path to save: {tar_path}') 45 | client = hdfs.InsecureClient(url='http://node1.samsung.com:50070/', user='ocrbg1') 46 | client.download('/user/user001/cifar-10-python.tar.gz', tar_path) 47 | 48 | 49 | def _uncompress(data_path): 50 | """ 51 | 압축된 cifar-10-python.tar.gz 파일을 읽어서 data_path에 압축을 해제한다. 52 | """ 53 | data_path = os.path.abspath(data_path) 54 | tar_path = os.path.join(data_path, 'cifar-10-python.tar.gz') 55 | uncompressed_path = os.path.join(data_path, 'cifar-10-batches-py') 56 | 57 | if not os.path.exists(uncompressed_path): 58 | print(f'Extracting {tar_path}') 59 | archive = tarfile.open(tar_path, 'r') 60 | archive.extractall(data_path) 61 | 62 | 63 | def unpickle(file): 64 | with open(file, 'rb') as fo: 65 | dict = pickle.load(fo, encoding='bytes') 66 | return dict 67 | 68 | 69 | def _to_onehot(data): 70 | N = data.shape[0] 71 | n_label = len(np.unique(data)) 72 | 73 | onehot = np.zeros((N, n_label), dtype=np.int) 74 | onehot[np.arange(N), data] = 1 75 | return onehot 76 | 77 | 78 | def _preprocessing1(data_path, force=False): 79 | """ 80 | 1차 과정으로 압축을 풀은 데이터 파일로 부터 읽어드린후, 각각의 파일을 합쳐서 새로운 파일로 저장을 한다. 81 | """ 82 | abs_path = os.path.abspath(data_path) 83 | preprocessed_path = os.path.join(abs_path, 'cifar-10-preprocessed.pkl') 84 | uncompressed_dir = os.path.join(abs_path, 'cifar-10-batches-py') 85 | 86 | if os.path.exists(preprocessed_path) and not force: 87 | return preprocessed_path 88 | 89 | data_x = list() 90 | data_y = list() 91 | for i in range(1, 6): 92 | rawdata = unpickle(os.path.join(uncompressed_dir, f'data_batch_{i}')) 93 | data_x.append(rawdata[b'data']) 94 | data_y.append(rawdata[b'labels']) 95 | rawdata = unpickle(os.path.join(uncompressed_dir, 'test_batch')) 96 | 97 | train_x = np.array(data_x).reshape(-1, 3, 32, 32) 98 | train_x = train_x.transpose([0, 2, 3, 1]) 99 | train_y = np.array(data_y).reshape(-1) 100 | 101 | test_x = np.array(rawdata[b'data']).reshape(-1, 3, 32, 32) 102 | test_x = test_x.transpose([0, 2, 3, 1]) 103 | test_y = np.array(rawdata[b'labels']).reshape(-1) 104 | 105 | dataset = dict() 106 | dataset['train_x'] = train_x.astype('float32') 107 | dataset['train_y'] = train_y 108 | dataset['test_x'] = test_x.astype('float32') 109 | dataset['test_y'] = test_y 110 | 111 | f = open(preprocessed_path, 'wb') 112 | pickle.dump(dataset, f) 113 | 114 | return preprocessed_path 115 | 116 | 117 | def load_data(data_path): 118 | _download(data_path) 119 | # _download_from_hdfs(data_path) 120 | _uncompress(data_path) 121 | prep_path = _preprocessing1(data_path) 122 | 123 | dataset = pickle.load(open(prep_path, 'rb')) 124 | return dataset['train_x'], dataset['train_y'], dataset['test_x'], dataset['test_y'] 125 | 126 | 127 | def numpy_to_image(matrix: np.array, path: str): 128 | import scipy.misc 129 | scipy.misc.imsave(path, matrix) 130 | 131 | 132 | if __name__ == '__main__': 133 | train_x, train_y, test_x, test_y = load_data('_dataset') 134 | print('train_x:', train_x.shape) 135 | print('train_y:', train_y.shape) 136 | print('test_x :', test_x.shape) 137 | print('test_y :', test_y.shape) 138 | -------------------------------------------------------------------------------- /examples/distributed_resnet/distributed_resnet.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | from queue import deque 4 | 5 | import numpy as np 6 | import tensorflow as tf 7 | from keras.preprocessing.image import ImageDataGenerator 8 | from resnet.model import ResNet 9 | from resnet.tool import load_data 10 | 11 | parser = argparse.ArgumentParser(description="CIFAR-10 Classification with Deep Residual Neural Network") 12 | parser.add_argument('--mode', default='train', type=str, help='"train" or "test"') 13 | parser.add_argument('--datapath', default='/tmp/cifar10', type=str, help='the directory path to store Iris data set') 14 | parser.add_argument('--epoch', default=30, type=int, ) 15 | parser.add_argument('--batch', default=32, type=int, help='batch size') 16 | parser.add_argument('--save_interval', default=5000, type=int, 17 | help='Automatically save the model after specific time interval') 18 | parser.add_argument('--visualize_interval', default=20, type=int, help='The interval value to print status like loss') 19 | parser = parser.parse_args() 20 | 21 | # Cluster Initialization 22 | cluster_spec = json.load(open('config.json', 'rt')) 23 | cluster = tf.train.ClusterSpec(cluster_spec) 24 | server = tf.train.Server(cluster, job_name='host', task_index=0) 25 | 26 | 27 | def create_model(resnet: ResNet): 28 | with tf.variable_scope('input_scope'): 29 | h = resnet.init_block(filter=[7, 7], channel=[3, 64], max_pool=False) 30 | 31 | with tf.variable_scope('residual01'), tf.device('/job:worker/task:0/gpu:0'): 32 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 33 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 34 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 35 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 36 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 37 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 38 | 39 | with tf.variable_scope('residual02'), tf.device('/job:worker/task:0/gpu:0'): 40 | h = resnet.max_pool(h, kernel=[2, 2], stride=[2, 2]) 41 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 128]) 42 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 43 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 44 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 45 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 46 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 47 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 48 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 49 | 50 | with tf.variable_scope('residual03'), tf.device('/job:worker/task:0/gpu:0'): 51 | h = resnet.max_pool(h, kernel=[2, 2], stride=[2, 2]) 52 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 256]) 53 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 54 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 55 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 56 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 57 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 58 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 59 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 60 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 61 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 62 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 63 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 64 | 65 | with tf.variable_scope('residual04'), tf.device('/job:host/task:0/gpu:0'): 66 | h = resnet.max_pool(h, kernel=[2, 2], stride=[2, 2]) 67 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 512]) 68 | h = resnet.residual_block(h, filter=[3, 3], channel=[512, 512]) 69 | h = resnet.residual_block(h, filter=[3, 3], channel=[512, 512]) 70 | h = resnet.residual_block(h, filter=[3, 3], channel=[512, 512]) 71 | h = resnet.residual_block(h, filter=[3, 3], channel=[512, 512]) 72 | h = resnet.residual_block(h, filter=[3, 3], channel=[512, 512]) 73 | 74 | with tf.variable_scope('fc'), tf.device('/job:host/task:0/gpu:0'): 75 | h = resnet.avg_pool(h, kernel=[2, 2], stride=[2, 2]) 76 | h = resnet.fc(h) 77 | return h 78 | 79 | 80 | def train(resnet, interval=parser.visualize_interval): 81 | with tf.variable_scope('fc'), tf.device('/job:host/task:0/gpu:0'): 82 | loss = resnet.loss() 83 | adam = tf.train.AdamOptimizer() 84 | train_op = adam.minimize(loss) 85 | resnet.compile(target=server.target) 86 | 87 | # Get Data 88 | train_x, train_y, test_x, test_y = load_data(parser.datapath) 89 | 90 | # Image Augmentation 91 | datagen = ImageDataGenerator(horizontal_flip=True, vertical_flip=True, featurewise_center=True, 92 | featurewise_std_normalization=True) 93 | datagen.fit(train_x) 94 | 95 | iter_count = 0 96 | _losses = deque(maxlen=interval) 97 | for epoch in range(1, parser.epoch + 1): 98 | sample_count = 0 99 | 100 | for i, (sample_x, sample_y) in enumerate(datagen.flow(train_x, train_y, batch_size=resnet.batch)): 101 | feed_dict = {resnet.x_ts: sample_x, resnet.y_ts: sample_y} 102 | _loss, _ = resnet.sess.run([loss, train_op], feed_dict=feed_dict) 103 | 104 | # Visualize 105 | _losses.append(_loss) 106 | iter_count += 1 107 | sample_count += 1 108 | if i % interval == 0: 109 | _loss = np.mean(_losses) 110 | print(f'[epoch:{epoch:02}] loss:{_loss:<7.4} ' 111 | f'sample_count:{sample_count:<5} ' 112 | f'iteration:{iter_count:<5} ') 113 | 114 | # Save 115 | if iter_count % parser.save_interval == 0: 116 | print(f'Model has been successfully saved at iteration = {iter_count}') 117 | resnet.save() 118 | 119 | 120 | def evaluate(resnet, batch_size=parser.batch): 121 | correct_prediction = tf.equal(tf.argmax(resnet.last_layer, 1), y=resnet.y_ts) 122 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 123 | resnet.compile(target=server.target) 124 | 125 | # Get Data 126 | train_x, train_y, test_x, test_y = load_data(parser.datapath) 127 | 128 | accuracies = list() 129 | for i in range(0, 10000, batch_size): 130 | if i + batch_size < 10000: 131 | _acc = resnet.sess.run(accuracy, feed_dict={ 132 | resnet.x_ts: test_x[i:i + batch_size], 133 | resnet.y_ts: test_y[i:i + batch_size]}) 134 | accuracies.append(_acc) 135 | 136 | print('Accuracy', np.mean(accuracies)) 137 | 138 | 139 | def main(): 140 | resnet = ResNet(batch=parser.batch) 141 | create_model(resnet) 142 | 143 | if parser.mode == 'train': 144 | print('Start Training') 145 | train(resnet) 146 | elif parser.mode == 'test': 147 | print('Start Evaluation') 148 | evaluate(resnet) 149 | 150 | 151 | if __name__ == '__main__': 152 | main() 153 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from datetime import datetime 3 | from queue import deque 4 | 5 | import numpy as np 6 | import tensorflow as tf 7 | from keras.preprocessing.image import ImageDataGenerator 8 | import logging 9 | from resnet.model import ResNet 10 | from resnet.tool import load_data 11 | 12 | # Parse Arguments 13 | parser = argparse.ArgumentParser(description="CIFAR-10 Classification with Deep Residual Neural Network") 14 | parser.add_argument('--mode', default='train', type=str, help='"train" or "test"') 15 | parser.add_argument('--datapath', default='/tmp/cifar10', type=str, help='the directory path to store Iris data set') 16 | parser.add_argument('--epoch', default=30, type=int, ) 17 | parser.add_argument('--batch', default=64, type=int, help='batch size') 18 | parser.add_argument('--save_interval', default=5000, type=int, 19 | help='Automatically save the model after specific time interval') 20 | parser.add_argument('--visualize_interval', default=100, type=int, help='The interval value to print status like loss') 21 | parser = parser.parse_args() 22 | 23 | # Logging 24 | logger = logging.getLogger('resnet') 25 | logger.setLevel(logging.INFO) 26 | formatter = logging.Formatter('%(asctime)s - %(message)s') 27 | 28 | stream_handler = logging.StreamHandler() 29 | stream_handler.setLevel(logging.DEBUG) 30 | stream_handler.setFormatter(formatter) 31 | logger.addHandler(stream_handler) 32 | logger.propagate = False 33 | 34 | 35 | def create_model(resnet: ResNet): 36 | with tf.variable_scope('input_scope'): 37 | h = resnet.init_block(filter=[7, 7], channel=[3, 32], max_pool=False) 38 | 39 | with tf.variable_scope('residual01'): 40 | h = resnet.max_pool(h, kernel=[2, 2], stride=[2, 2]) 41 | h = resnet.residual_block(h, filter=[3, 3], channel=[32, 32]) 42 | h = resnet.residual_block(h, filter=[3, 3], channel=[32, 32]) 43 | h = resnet.residual_block(h, filter=[3, 3], channel=[32, 32]) 44 | h = resnet.residual_block(h, filter=[3, 3], channel=[32, 32]) 45 | h = resnet.residual_block(h, filter=[3, 3], channel=[32, 32]) 46 | h = resnet.residual_block(h, filter=[3, 3], channel=[32, 32]) 47 | 48 | with tf.variable_scope('residual02'): 49 | h = resnet.max_pool(h, kernel=[2, 2], stride=[2, 2]) 50 | h = resnet.residual_block(h, filter=[3, 3], channel=[32, 64]) 51 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 52 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 53 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 54 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 55 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 56 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 57 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 64]) 58 | 59 | with tf.variable_scope('residual03'): 60 | h = resnet.max_pool(h, kernel=[2, 2], stride=[2, 2]) 61 | h = resnet.residual_block(h, filter=[3, 3], channel=[64, 128]) 62 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 63 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 64 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 65 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 66 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 67 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 128]) 68 | 69 | with tf.variable_scope('residual04'): 70 | h = resnet.max_pool(h, kernel=[2, 2], stride=[2, 2]) 71 | h = resnet.residual_block(h, filter=[3, 3], channel=[128, 256]) 72 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 73 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 74 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 75 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 256]) 76 | 77 | with tf.variable_scope('residual05'): 78 | h = resnet.max_pool(h, kernel=[2, 2], stride=[2, 2]) 79 | h = resnet.residual_block(h, filter=[3, 3], channel=[256, 512]) 80 | h = resnet.residual_block(h, filter=[3, 3], channel=[512, 512]) 81 | h = resnet.residual_block(h, filter=[3, 3], channel=[512, 512]) 82 | h = resnet.residual_block(h, filter=[3, 3], channel=[512, 512]) 83 | h = resnet.residual_block(h, filter=[3, 3], channel=[512, 512]) 84 | h = resnet.residual_block(h, filter=[3, 3], channel=[512, 512]) 85 | 86 | with tf.variable_scope('fc'): 87 | h = resnet.avg_pool(h, kernel=[2, 2], stride=[2, 2]) 88 | h = resnet.fc(h) 89 | return h 90 | 91 | 92 | def train(resnet, interval=parser.visualize_interval): 93 | loss = resnet.loss() 94 | adam = tf.train.AdamOptimizer() 95 | train_op = adam.minimize(loss) 96 | resnet.compile() 97 | 98 | # Get Data 99 | train_x, train_y, test_x, test_y = load_data(parser.datapath) 100 | 101 | # Image Augmentation 102 | datagen = ImageDataGenerator(horizontal_flip=True, vertical_flip=True, featurewise_center=True, 103 | featurewise_std_normalization=True) 104 | datagen.fit(train_x) 105 | 106 | iter_count = 0 107 | _losses = deque(maxlen=interval) 108 | time_point = datetime.now() 109 | for epoch in range(1, parser.epoch + 1): 110 | sample_count = 0 111 | 112 | for i, (sample_x, sample_y) in enumerate(datagen.flow(train_x, train_y, batch_size=resnet.batch)): 113 | feed_dict = {resnet.x_ts: sample_x, resnet.y_ts: sample_y} 114 | _loss, _ = resnet.sess.run([loss, train_op], feed_dict=feed_dict) 115 | 116 | # Visualize 117 | _losses.append(_loss) 118 | if i % interval == 0: 119 | _loss = np.mean(_losses) 120 | time_diff = round((datetime.now() - time_point).total_seconds(), 2) 121 | time_point = datetime.now() 122 | logger.info(f'[epoch:{epoch:02}] loss:{_loss:<7.4}' 123 | f'time-taken:{time_diff:<7.4}' 124 | f'sample:{sample_count:<5}' 125 | f'iter:{iter_count:<5}') 126 | 127 | # Add up count 128 | iter_count += 1 129 | sample_count += 1 130 | 131 | # Save 132 | if iter_count % parser.save_interval == 0: 133 | logger.info(f'Model has been successfully saved at iteration = {iter_count}') 134 | resnet.save() 135 | 136 | 137 | def evaluate(resnet, batch_size=parser.batch): 138 | correct_prediction = tf.equal(tf.argmax(resnet.last_layer, 1), y=resnet.y_ts) 139 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 140 | 141 | # Get Data 142 | train_x, train_y, test_x, test_y = load_data(parser.datapath) 143 | 144 | accuracies = list() 145 | for i in range(0, 10000, batch_size): 146 | if i + batch_size < 10000: 147 | _acc = resnet.sess.run(accuracy, feed_dict={ 148 | resnet.x_ts: test_x[i:i + batch_size], 149 | resnet.y_ts: test_y[i:i + batch_size]}) 150 | accuracies.append(_acc) 151 | 152 | logger.info('Accuracy', np.mean(accuracies)) 153 | 154 | 155 | def main(): 156 | resnet = ResNet(batch=parser.batch) 157 | create_model(resnet) 158 | resnet.compile() 159 | 160 | if parser.mode == 'train': 161 | train(resnet) 162 | elif parser.mode == 'test': 163 | resnet.restore() 164 | evaluate(resnet) 165 | 166 | 167 | if __name__ == '__main__': 168 | main() 169 | -------------------------------------------------------------------------------- /resnet/model.py: -------------------------------------------------------------------------------- 1 | from typing import List, Tuple 2 | 3 | import tensorflow as tf 4 | from tensorflow.contrib.layers import xavier_initializer 5 | from tensorflow.contrib.layers import l1_regularizer, l2_regularizer 6 | 7 | 8 | class ResNet(object): 9 | """ 10 | Deep Residual Network for CIFAR-10 image classification 11 | """ 12 | regularizers = { 13 | 'l1': l1_regularizer, 14 | 'l2': l2_regularizer 15 | } 16 | EPSILON = 1e-12 17 | 18 | def __init__(self, input: tf.Tensor = None, output: tf.Tensor = None, batch: int = 256, n_label: int = 10): 19 | """ 20 | :param input: Input Tensor. Use tf.placeholder. If not provided input layer for CIFAR-10 is used 21 | :param output: Output Tensor. Use tf.placeholder. If not provided output layer for CIFAR-10 is used 22 | :param batch: Batch Size 23 | :param n_label: The number of labels for classification 24 | """ 25 | 26 | self.batch = batch 27 | self.n_label = n_label 28 | self.x_ts = tf.placeholder('float', [None, 32, 32, 3]) if input is None else input 29 | self.y_ts = tf.placeholder('int64', [None]) if output is None else output 30 | 31 | self.sess = None 32 | self._names = dict() 33 | self.layers = list() 34 | self.layers.append(self.x_ts) 35 | 36 | self.saver = None 37 | 38 | def create_variable(self, name: str, shape: tuple, dtype=tf.float32, 39 | initializer=xavier_initializer(), regularizer: str = None): 40 | if regularizer is not None: 41 | regularizer = regularizer.lower() 42 | regularizer = self.regularizers[regularizer]() 43 | 44 | v = tf.get_variable(self._naming(name), shape=shape, dtype=dtype, 45 | initializer=initializer, regularizer=regularizer) 46 | return v 47 | 48 | def conv(self, input_layer, filter: List[int], channel: List[int], 49 | stride: int, padding: str = 'SAME') -> Tuple[tf.Tensor, tf.Tensor]: 50 | """ 51 | :param input_layer: Previous layer or tensor 52 | :param filter: [filter_height, filter_width] 53 | :param channel: [in_channels, out_channels] 54 | :param stride: 55 | :param padding: 56 | :return: [conv_layer, filter] 57 | """ 58 | 59 | filter_ts = self.create_variable('filter', shape=(*filter, *channel)) 60 | conv = tf.nn.conv2d(input_layer, filter=filter_ts, strides=[1, stride, stride, 1], padding=padding) 61 | return conv, filter_ts 62 | 63 | def batch_norm(self, input_layer, dimension): 64 | mean, variance = tf.nn.moments(input_layer, [0, 1, 2], keep_dims=False) 65 | beta = self.create_variable('batch_beta', dimension, dtype=tf.float32, 66 | initializer=tf.constant_initializer(0.0, tf.float32)) 67 | gamma = self.create_variable('batch_gamma', dimension, dtype=tf.float32, 68 | initializer=tf.constant_initializer(1.0, tf.float32)) 69 | bn_layer = tf.nn.batch_normalization(input_layer, mean, variance, beta, gamma, self.EPSILON) 70 | return bn_layer 71 | 72 | def conv_bn(self, input_layer, filter: List[int], channel: List[int], stride: int): 73 | """ 74 | ResNet에서는 Convolution 다음에는 항상 Batch Normalization을 넣는다. 75 | "We adopt batch normalization (BN) right after each convolution and before activation" 76 | 77 | filter: [filter_height, filter_width] 78 | channel: [in_channels, out_channels] 79 | """ 80 | out_channel = channel[1] 81 | h, _filter = self.conv(input_layer, filter=filter, channel=channel, stride=stride, padding='SAME') 82 | h = self.batch_norm(h, out_channel) 83 | return h 84 | 85 | def init_block(self, filter: List[int] = (7, 7), channel: List[int] = (3, 16), 86 | stride: int = 1, max_pool: bool = True) -> tf.Tensor: 87 | """ 88 | input -> Conv -> ReLU -> output 89 | 90 | :param filter: [filter_height, filter_width] 91 | :param channel: [in_channels, out_channels] 92 | :param stride: 93 | """ 94 | init_conv, _filter = self.conv(self.x_ts, filter=filter, channel=channel, stride=stride) 95 | init_conv = tf.nn.relu(init_conv) 96 | if max_pool: 97 | # MaxPooling 98 | # ksize: The size of the window for each dimension of the input tensor 99 | # strides: The stride of the sliding window for each dimension of the input tensor 100 | output = tf.nn.max_pool(init_conv, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 101 | else: 102 | output = init_conv 103 | 104 | self.layers.append(output) 105 | return output 106 | 107 | def max_pool(self, input_layer, kernel: List[int], stride: List[int], padding: str = 'SAME') -> tf.Tensor: 108 | """ 109 | :param input_layer: 110 | :param kernel: [width, height] Kernel Size 111 | :param stride: [width, height] Stirde Size 112 | :param padding: 113 | :return: 114 | """ 115 | k_height, k_width = kernel 116 | stride_width, stride_height = stride 117 | output = tf.nn.max_pool(input_layer, 118 | ksize=[1, k_height, k_width, 1], 119 | strides=[1, stride_width, stride_height, 1], padding=padding) 120 | self.layers.append(output) 121 | return output 122 | 123 | def avg_pool(self, input_layer, kernel: List[int], stride: List[int], padding: str = 'SAME') -> tf.Tensor: 124 | """ 125 | :param input_layer: 126 | :param kernel: [width, height] Kernel Size 127 | :param stride: [width, height] Stirde Size 128 | :param padding: 129 | :return: 130 | """ 131 | k_height, k_width = kernel 132 | stride_width, stride_height = stride 133 | output = tf.nn.avg_pool(input_layer, 134 | ksize=[1, k_height, k_width, 1], 135 | strides=[1, stride_width, stride_height, 1], padding=padding) 136 | self.layers.append(output) 137 | return output 138 | 139 | def residual_block(self, input_layer, filter: List[int], channel: List[int], stride: int = 1) -> tf.Tensor: 140 | """ 141 | input -> Conv -> BN -> ReLU -> Conv -> BN -> Addition -> ReLU -> output 142 | 143 | :param input_layer: Usually previous layer 144 | :param filter: (width, height) The size of the filter 145 | :param channel: [in_channels, out_channels] 146 | :param stride: The size of the s 147 | :return: 148 | """ 149 | input_channel, output_channel = channel 150 | 151 | h = self.conv_bn(input_layer, filter=filter, channel=[input_channel, output_channel], stride=stride) 152 | h = tf.nn.relu(h) 153 | h = self.conv_bn(h, filter=filter, channel=[output_channel, output_channel], stride=stride) 154 | 155 | if input_channel != output_channel: 156 | # Input channel 과 output channel이 dimension이 다르기 때문에 projection 을 통해서 dimension을 맞춰준다. 157 | inp, _filter = self.conv(input_layer, filter=[1, 1], channel=[input_channel, output_channel], stride=stride) 158 | else: 159 | inp = input_layer 160 | 161 | h = tf.add(h, inp) 162 | h = tf.nn.relu(h) 163 | self.layers.append(h) 164 | return h 165 | 166 | def fc(self, input_layer): 167 | global_pool = tf.reduce_mean(input_layer, axis=[1, 2]) 168 | fc_w = self.create_variable(name='fc_w', shape=[global_pool.shape[-1], self.n_label]) 169 | fc_b = self.create_variable(name='fc_b', shape=[self.n_label]) 170 | 171 | output = tf.matmul(global_pool, fc_w) + fc_b 172 | self.layers.append(output) 173 | return output 174 | 175 | def loss(self): 176 | loss_f = tf.nn.sparse_softmax_cross_entropy_with_logits 177 | cross_entropy = loss_f(logits=self.last_layer, labels=self.y_ts, name='cross_entropy') 178 | cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy_mean') 179 | return cross_entropy_mean 180 | 181 | def compile(self, target=None) -> tf.Session: 182 | gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5, allow_growth=True) 183 | sess = tf.Session(target, config=tf.ConfigProto(gpu_options=gpu_options, 184 | allow_soft_placement=False, 185 | log_device_placement=False)) 186 | sess.run(tf.global_variables_initializer()) 187 | self.sess = sess 188 | return sess 189 | 190 | def save(self, path='/tmp/resnet_anderson.ckpt'): 191 | if self.saver is None: 192 | self.saver = tf.train.Saver() 193 | self.saver.save(self.sess, path) 194 | 195 | def restore(self, path='/tmp/resnet_anderson.ckpt'): 196 | if self.saver is None: 197 | self.saver = tf.train.Saver() 198 | print(f'Restoring "{path}" model') 199 | self.saver.restore(self.sess, path) 200 | 201 | @property 202 | def last_layer(self) -> tf.Tensor: 203 | return self.layers[-1] 204 | 205 | def _naming(self, name=None): 206 | if name is None or not name: 207 | name = 'variable' 208 | name = name.lower() 209 | self._names.setdefault(name, 0) 210 | self._names[name] += 1 211 | count = self._names[name] 212 | return f'{name}_{count:02}' 213 | -------------------------------------------------------------------------------- /deep-residual-learning-for-image-recognition.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Deep Residual Learning for Image Recognition\n", 8 | "\n", 9 | "## Degradation Problem\n", 10 | "\n", 11 | "Deep convolutional neural networks가 나온 이후로 많은 발전이 있었습니다.
\n", 12 | "기본적으로 deep networks는 features들을 스스로 low/mid/high level features로 나누며$ ^{[01]} $, 근래의 모델들의 경우 layers층을 더 깊이 있게 쌓아서 features들의 levels을 더욱 세분화하고자하는 시도가 있으며$ ^{[02]} $, ImageNet 챌린지에서 16개 또는 30개등의 layers를 사용하는 매우 깊은 모델들을 사용하기도 하였습니다.$ ^{[03]} $\n", 13 | "\n", 14 | "단순히 layers를 더 많이 쌓으면 더 좋은 결과를 낼 것인가? 라는 질문에는.. 사실 문제가 있습니다.
\n", 15 | "이미 잘 알려진 vanishing/exploding gradients$ ^{[04]} $ $ ^{[05]} $의 문제는 convergence자체를 못하게 만듭니다.
\n", 16 | "이러한 문제는 normalized initialization$ ^{[06]} $ $ ^{[04]} $ $ ^{[07]} $, 그리고 intermediate normalization layers $ ^{[08]} $에 의해서 다소 해결이 되어 수십층의 layers들이 SGD를 통해 convergence될 수 있도록 도와줍니다. \n", 17 | "\n", 18 | "Deeper networks를 사용할때 **degradation problem**이 발견되었습니다. degradation problem은 network의 depth가 커질수록 accuracy는 saturated (마치 뭔가 가득차서 현상태에서 더 진전이 없어져 버리는 상태)가 되고 degradation이 진행됩니다. 이때 degradation은 overfitting에 의해서 생겨나는 것이 아니며, 더 많은 layers를 넣을수록 training error가 더 높아집니다.$ ^{[09]} $ (만약 overfitting이었다면 training error는 매우 낮아야 합니다.)\n", 19 | "\n", 20 | "![title](images/resnet_training_test_error.png)\n", 21 | "\n", 22 | "
\n", 23 | "CIFAR-10 데이터에 대한 training error(왼쪽) 그리고 test error(오른쪽) 그래프.
\n", 24 | " 20-layers 그리고 56-layers를 사용했으며, 더 깊은 네트워크일수록 training error가 높으며, 따라서 test error또한 높다.
\n", 25 | "
\n", 26 | "\n", 27 | "한가지 실험에서 이를 뒷받침할 근거를 내놓습니다.
\n", 28 | "shallow network에서 학습된 모델위에 다층의 layers를 추가적으로 쌓습니다. 이론적으로는 deeper 모델이 shallower 모델에 추가된 것이기 때문에 더 낮은 training error를 보여야 합니다. 하지만 학습된 shallower 모델에 layers를 더 추가시켜도, 그냥 shallow 모델보다 더 높은 training error를 보여줍니다.\n", 29 | "\n", 30 | "![title](images/resnet_shallow_deep_model.png)\n", 31 | "\n", 32 | "
\n", 33 | "**Constructed Solution**
Shallower model(왼쪽) 그리고 Deeper model(오른쪽).
Shallower model로 부터 학습된 지식을 복사하고, Identity mapping로서 layers를 추가하였다.
Deeper model은 shallower model과 비교하여 더 낮거나 같은 training error를 보여야 하지만
실제는 degradation현상으로 인하여 layers가 깊어질수록 training error는 높아진다
\n", 34 | "
\n", 35 | "\n", 36 | "위의 그림에서 보듯이, shallower model로 부터 학습된 지식을 복사하고, 추가적으로 identity mapping으로서 layers를 더 추가하였습니다.
\n", 37 | "여기서 identity mapping이라는 뜻은 $ f(x) = x $ 의 의미로 기존 학습된 layers에서 나온 output을 추가된 layers에서 동일한 output을 생성하는 것입니다. 따라서 identity mapping으로서 추가된 layers들은 최소한 shallower model에서 나온 예측치와 동일하거나 또는 더 깊게 들어갔으니 더 잘 학습이 되어야 합니다.\n", 38 | "\n", 39 | "하지만 현실은.. layers가 깊어지면 깊어질수록 training error가 더 높아지며, 따라서 test error또한 동일하게 높아집니다.
\n", 40 | "이러한 현상을 degradation problem이라고 하며, Deep Residual Network$ ^{[10]} $가 해결하려는 부분입니다." 41 | ] 42 | }, 43 | { 44 | "cell_type": "markdown", 45 | "metadata": {}, 46 | "source": [ 47 | "## Residual \n", 48 | "\n", 49 | "먼저 residual에 대해서 알아야 합니다.
\n", 50 | "간단하게 이야기 하면 residual이란 관측치(observed data)와 예측값(estimated value)사이의 차이입니다.
\n", 51 | "Linear least square (최소회귀직선) residual의 합은 0이 됩니다.
\n", 52 | "예를 들어 다음의 수식에서 true값인 $ x $를 찾고자 합니다.\n", 53 | "\n", 54 | "$$ f(x) = b $$\n", 55 | "\n", 56 | "이때 $ x $의 근사치(approximation)인 $ x_0 $가 주어졌을때, residual값은 다음과 같습니다.\n", 57 | "\n", 58 | "$$ b - f(x_0) $$\n", 59 | "\n", 60 | "반면에 error는 true값에서 근사치(approximation)의 차이이며 다음과 같습니다.
\n", 61 | "(하나의 예로.. 근사값 3.14 - $ \\pi $ 가 바로 오차입니다.)\n", 62 | "\n", 63 | "$$ x - x_0 $$\n", 64 | "\n", 65 | "좀더 잘 설명한 [영상](https://www.youtube.com/watch?v=snG7sa5CcJQ)을 참고 합니다.\n", 66 | "\n", 67 | "\n", 68 | "## ResNet Explained\n", 69 | "\n", 70 | "Degradation에서 언급한 현상을 보면, 직관적으로 보면 deep neural network에 더 많은 layers를 추가시킵으로서 성능을 향상시킬수 있을거 같지만 그와는 정 반대의 결과가 나왔습니다. 이것이 의미하는 바는 multiple nonlinear layers안에서 identity mappings을 시키는데(approximate) 어려움이 있다는 것입니다. 이는 흔히 딥러닝에서 나타나는 vanishing/exploding gradients 이슈 그리고 curse of dimensionality problem등으로 나타나는 현상으로 생각이 됩니다. \n", 71 | "\n", 72 | "ResNets은 이러한 문제를 해결하기 위하여, residual learning을 통해 강제로 Identity mapping (function)을 학습하도록 하였습니다.
\n", 73 | "\n", 74 | "* $ x $: 해당 레이어들의 input\n", 75 | "* $ H(x) $: (전체가 아닌) 소규모의 다층 레이어(a few stacked layers)의 output \n", 76 | "* $ id(x) $: Identity mapping(function)은 단순히 $ id(x) = x $ 으로서, $ x $값을 받으면 동일한 $ x $를 리턴시킵니다\n", 77 | "* $ H(x) $ 그리고 $ x $ 는 동일한 dimension을 갖고 있다고 가정\n", 78 | "\n", 79 | "일반적인 Neural Network는 $ H(x) $ 자체를 학습니다. \n", 80 | "\n", 81 | "![title](images/resnet_plain.png)\n", 82 | "\n", 83 | "\n", 84 | "ResNet의 경우에는 residual function을 학습하도록 강제합니다.\n", 85 | "\n", 86 | "$$ F(x) = H(x) - id(x) $$\n", 87 | "\n", 88 | "우리는 실제 true값을 알고자 하는 것이기 때문에 위의 공식은 다음과 같이 재정립(reformulation)할수 있습니다.\n", 89 | "\n", 90 | "$$ \\begin{align}\n", 91 | "H(x) &= F(x) + id(x) \\\\\n", 92 | "&= F(x) + x\n", 93 | "\\end{align} $$\n", 94 | "\n", 95 | "즉 아래의 그림처럼 그래프가 그려집니다.\n", 96 | "\n", 97 | "![title](images/resnet_resnet.png)\n", 98 | "\n", 99 | "이론적으로 identity mappings이 최적화(optimal)되었다면, 다중 레이어의 weights연산 $ F(x) $ 의 값을 0으로 만들것입니다. $ F(x) $ 가 0이 된후 $ id(x) $ 를 더하기 때문에 해당 subnetwork는 identity function으로서 기능을 하게 됩니다. \n", 100 | "\n", 101 | "실제로는 identity mappings (layers)가 최적화되어 0으로 수렴하는 것은 일어나기 힘듬니다.
\n", 102 | "다만 reformulation 된 공식안에 identity function이 존재하기 때문에 reference가 될 수 있고, 따라서 neural network가 학습하는데 도움을 줄 수 있습니다." 103 | ] 104 | }, 105 | { 106 | "cell_type": "markdown", 107 | "metadata": {}, 108 | "source": [ 109 | "## Shortcut Connection\n", 110 | "\n", 111 | "위에서 이미 언급한 그래프에서 논문에서는 building block을 다음과 같이 정의하고 있습니다.
\n", 112 | "(공식을 간략하게 하기 위해서 bias에 대한 부분은 의도적으로 누락되어 있습니다. 당연히 실제 구현시에는 필요합니다.)\n", 113 | "\n", 114 | "$$ y = F(x\\ |\\ W_i) + x $$\n", 115 | "\n", 116 | "$ F(x\\ |\\ W_i) $ 는 학습되야할 residual mapping을 나타내며, $ x $ 그리고 $ y $는 각각 input 그리고 output을 나타냅니다.
\n", 117 | "위의 공식은 아주 간략하게 표현하기 위해서 나타낸것이고 2개의 레이어를 사용하는 경우에는 $ F $ 함수에대한 정의가 바뀝니다.\n", 118 | "\n", 119 | "$$ F = W_2 \\sigma \\left(W_1 x \\right) $$\n", 120 | "\n", 121 | "여기서 $ \\sigma $는 ReLU를 가르킵니다. $ F + x $ 는 **shortcut connection**을 나타내며 element-wise addition을 연산합니다.
\n", 122 | "해당 addtion 이후! 두번째 nonlinearity를 적용합니다. (즉 ReLU를 addition이후에 적용하면 됨)\n", 123 | "\n", 124 | "$ F + x $ 를 연산할때 중요한점은 **dimension이 서로 동일**해야 합니다.\n", 125 | "만약 서로 dimension이 다르다면 (예를 들어서 input/output의 channels이 서로 다름) linear projection $ W_s $ 를 shorcut connection에 적용시켜서 dimension을 서로 동일하게 만들어줄수 있습니다.\n", 126 | "\n", 127 | "$$ y = F(x\\ |\\ W_i) + W_s x $$\n", 128 | "\n", 129 | "Residual function $ F $는 사실 상당히 유연합니다.즉 $ F $는 2개, 3개 또는 3개 이상의 다층을 사용하는 것이 가능합니다.
\n", 130 | "하지만 만약 $ F $안에 1개의 레이어만 갖고 있다면 linear layer와 동일해지게 됩니다.\n", 131 | "\n", 132 | "$$ y = W_1x + x $$\n", 133 | "\n", 134 | "따라서 1개만 갖고 있는 $ F $는 사실상 의미가 없습니다.
\n", 135 | "또한 $ F $는 fully-connected layer 또는 convolution같은 다양한 방법으로 모델링을 할 수 있습니다.\n" 136 | ] 137 | }, 138 | { 139 | "cell_type": "markdown", 140 | "metadata": {}, 141 | "source": [ 142 | "## Implementation\n", 143 | "\n", 144 | "논문에서는 ImageNet에 대한 구현을 다음과 같이 하였습니다. \n", 145 | "\n", 146 | "#### Data Augmentation\n", 147 | "\n", 148 | "224 x 224 싸이즈로 random crop 또는 horizontal flip이 되었으며, 픽섹마다 평균값으로 subtract 되었습니다.
\n", 149 | "Standard color augmentation을 실행했습니다.\n", 150 | "\n", 151 | "#### ResNet Layer\n", 152 | "\n", 153 | "Conv -> Batch -> ReLU -> Conv -> Batch -> Addition -> RELU" 154 | ] 155 | }, 156 | { 157 | "cell_type": "markdown", 158 | "metadata": {}, 159 | "source": [ 160 | "# References\n", 161 | "\n", 162 | "인용한 문서들..\n", 163 | "\n", 164 | "* [01] [Visualizing and understanding convolutional neural networks](https://www.cs.nyu.edu/~fergus/papers/zeilerECCV2014.pdf)\n", 165 | "* [02] [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)\n", 166 | "* [03] [Going Deeper with Convolutions](https://arxiv.org/pdf/1409.4842.pdf)\n", 167 | "* [04] [Understanding the difficulty of training\n", 168 | "deep feedforward neural networks](http://www-prima.imag.fr/jlc/Courses/2016/PRML/XavierInitialisation.pdf)\n", 169 | "* [05] [Learning long-term dependencies\n", 170 | "with gradient descent is difficult](http://www.iro.umontreal.ca/~lisa/pointeurs/ieeetrnn94.pdf)\n", 171 | "* [06] [Efficient backprop. In Neural Networks: Tricks of the Trade](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)\n", 172 | "* [07] [Exact solutions to the nonlinear dynamics of learning in deep linear neural networks](https://arxiv.org/pdf/1312.6120.pdf)\n", 173 | "* [08] [Batch normalization: Accelerating deep\n", 174 | "network training by reducing internal covariate shift](https://arxiv.org/abs/1502.03167)\n", 175 | "* [09] [Convolutional neural networks at constrained time cost](https://arxiv.org/abs/1412.1710)\n", 176 | "* [10] [Deep Residual Learning for Image Recognition](https://arxiv.org/pdf/1512.03385.pdf)\n", 177 | "\n", 178 | "글 쓰면서 참고한 문서들..\n", 179 | "\n", 180 | "* [Deep Residual Learning for Image Recognition - 원래 ResNet Paper](https://arxiv.org/pdf/1512.03385.pdf)\n", 181 | "* [Identity Mappings in Deep Residual Networks - 개선된 Paper](https://arxiv.org/pdf/1603.05027.pdf)\n", 182 | "* [Residual neural networks are an exciting area of deep learning research](https://blog.init.ai/residual-neural-networks-are-an-exciting-area-of-deep-learning-research-acf14f4912e9)\n", 183 | "* [Deep Residual Networks ICML 2016 Tutorial](http://icml.cc/2016/tutorials/icml2016_tutorial_deep_residual_networks_kaiminghe.pdf)" 184 | ] 185 | } 186 | ], 187 | "metadata": { 188 | "kernelspec": { 189 | "display_name": "Python 3", 190 | "language": "python", 191 | "name": "python3" 192 | }, 193 | "language_info": { 194 | "codemirror_mode": { 195 | "name": "ipython", 196 | "version": 3.0 197 | }, 198 | "file_extension": ".py", 199 | "mimetype": "text/x-python", 200 | "name": "python", 201 | "nbconvert_exporter": "python", 202 | "pygments_lexer": "ipython3", 203 | "version": "3.6.1" 204 | } 205 | }, 206 | "nbformat": 4, 207 | "nbformat_minor": 0 208 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2017 CHANG MIN JO (Anderson Jo) 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------