├── README.md └── style_transfer.py /README.md: -------------------------------------------------------------------------------- 1 | # Research_to_Code 2 | This is the code for "Research to Code" By Siraj Raval on Youtube 3 | 4 | ## Overview 5 | 6 | This is the neural style transfer code from [this](https://youtu.be/pQyzdwHBbqo) video on Youtube by Siraj Raval on how to implement research papers into code. 7 | 8 | ## Dependencies 9 | 10 | - Keras 11 | 12 | Install Keras from commandline using 13 | 14 | 'sudo pip install keras' 15 | 16 | 17 | ## Usage 18 | 19 | run via 'python style_transfer.py' 20 | 21 | 22 | ## Credits 23 | 24 | The credits for this code goes to the Keras team. I've merely made a wrapper to get people started. 25 | -------------------------------------------------------------------------------- /style_transfer.py: -------------------------------------------------------------------------------- 1 | '''Neural style transfer with Keras. 2 | Run the script with: 3 | ``` 4 | python neural_style_transfer.py path_to_your_base_image.jpg path_to_your_reference.jpg prefix_for_results 5 | ``` 6 | e.g.: 7 | ``` 8 | python neural_style_transfer.py img/tuebingen.jpg img/starry_night.jpg results/my_result 9 | ``` 10 | Optional parameters: 11 | ``` 12 | --iter, To specify the number of iterations the style transfer takes place (Default is 10) 13 | --content_weight, The weight given to the content loss (Default is 0.025) 14 | --style_weight, The weight given to the style loss (Default is 1.0) 15 | --tv_weight, The weight given to the total variation loss (Default is 1.0) 16 | ``` 17 | It is preferable to run this script on GPU, for speed. 18 | Example result: https://twitter.com/fchollet/status/686631033085677568 19 | # Details 20 | Style transfer consists in generating an image 21 | with the same "content" as a base image, but with the 22 | "style" of a different picture (typically artistic). 23 | This is achieved through the optimization of a loss function 24 | that has 3 components: "style loss", "content loss", 25 | and "total variation loss": 26 | - The total variation loss imposes local spatial continuity between 27 | the pixels of the combination image, giving it visual coherence. 28 | - The style loss is where the deep learning keeps in --that one is defined 29 | using a deep convolutional neural network. Precisely, it consists in a sum of 30 | L2 distances between the Gram matrices of the representations of 31 | the base image and the style reference image, extracted from 32 | different layers of a convnet (trained on ImageNet). The general idea 33 | is to capture color/texture information at different spatial 34 | scales (fairly large scales --defined by the depth of the layer considered). 35 | - The content loss is a L2 distance between the features of the base 36 | image (extracted from a deep layer) and the features of the combination image, 37 | keeping the generated image close enough to the original one. 38 | # References 39 | - [A Neural Algorithm of Artistic Style](http://arxiv.org/abs/1508.06576) 40 | ''' 41 | 42 | from __future__ import print_function 43 | from keras.preprocessing.image import load_img, save_img, img_to_array 44 | import numpy as np 45 | from scipy.optimize import fmin_l_bfgs_b 46 | import time 47 | import argparse 48 | 49 | from keras.applications import vgg19 50 | from keras import backend as K 51 | 52 | parser = argparse.ArgumentParser(description='Neural style transfer with Keras.') 53 | parser.add_argument('base_image_path', metavar='base', type=str, 54 | help='Path to the image to transform.') 55 | parser.add_argument('style_reference_image_path', metavar='ref', type=str, 56 | help='Path to the style reference image.') 57 | parser.add_argument('result_prefix', metavar='res_prefix', type=str, 58 | help='Prefix for the saved results.') 59 | parser.add_argument('--iter', type=int, default=10, required=False, 60 | help='Number of iterations to run.') 61 | parser.add_argument('--content_weight', type=float, default=0.025, required=False, 62 | help='Content weight.') 63 | parser.add_argument('--style_weight', type=float, default=1.0, required=False, 64 | help='Style weight.') 65 | parser.add_argument('--tv_weight', type=float, default=1.0, required=False, 66 | help='Total Variation weight.') 67 | 68 | args = parser.parse_args() 69 | base_image_path = args.base_image_path 70 | style_reference_image_path = args.style_reference_image_path 71 | result_prefix = args.result_prefix 72 | iterations = args.iter 73 | 74 | # these are the weights of the different loss components 75 | total_variation_weight = args.tv_weight 76 | style_weight = args.style_weight 77 | content_weight = args.content_weight 78 | 79 | # dimensions of the generated picture. 80 | width, height = load_img(base_image_path).size 81 | img_nrows = 400 82 | img_ncols = int(width * img_nrows / height) 83 | 84 | # util function to open, resize and format pictures into appropriate tensors 85 | 86 | 87 | def preprocess_image(image_path): 88 | img = load_img(image_path, target_size=(img_nrows, img_ncols)) 89 | img = img_to_array(img) 90 | img = np.expand_dims(img, axis=0) 91 | img = vgg19.preprocess_input(img) 92 | return img 93 | 94 | # util function to convert a tensor into a valid image 95 | 96 | 97 | def deprocess_image(x): 98 | if K.image_data_format() == 'channels_first': 99 | x = x.reshape((3, img_nrows, img_ncols)) 100 | x = x.transpose((1, 2, 0)) 101 | else: 102 | x = x.reshape((img_nrows, img_ncols, 3)) 103 | # Remove zero-center by mean pixel 104 | x[:, :, 0] += 103.939 105 | x[:, :, 1] += 116.779 106 | x[:, :, 2] += 123.68 107 | # 'BGR'->'RGB' 108 | x = x[:, :, ::-1] 109 | x = np.clip(x, 0, 255).astype('uint8') 110 | return x 111 | 112 | # get tensor representations of our images 113 | base_image = K.variable(preprocess_image(base_image_path)) 114 | style_reference_image = K.variable(preprocess_image(style_reference_image_path)) 115 | 116 | # this will contain our generated image 117 | if K.image_data_format() == 'channels_first': 118 | combination_image = K.placeholder((1, 3, img_nrows, img_ncols)) 119 | else: 120 | combination_image = K.placeholder((1, img_nrows, img_ncols, 3)) 121 | 122 | # combine the 3 images into a single Keras tensor 123 | input_tensor = K.concatenate([base_image, 124 | style_reference_image, 125 | combination_image], axis=0) 126 | 127 | # build the VGG16 network with our 3 images as input 128 | # the model will be loaded with pre-trained ImageNet weights 129 | model = vgg19.VGG19(input_tensor=input_tensor, 130 | weights='imagenet', include_top=False) 131 | print('Model loaded.') 132 | 133 | # get the symbolic outputs of each "key" layer (we gave them unique names). 134 | outputs_dict = dict([(layer.name, layer.output) for layer in model.layers]) 135 | 136 | # compute the neural style loss 137 | # first we need to define 4 util functions 138 | 139 | # the gram matrix of an image tensor (feature-wise outer product) 140 | 141 | 142 | def gram_matrix(x): 143 | assert K.ndim(x) == 3 144 | if K.image_data_format() == 'channels_first': 145 | features = K.batch_flatten(x) 146 | else: 147 | features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) 148 | gram = K.dot(features, K.transpose(features)) 149 | return gram 150 | 151 | # the "style loss" is designed to maintain 152 | # the style of the reference image in the generated image. 153 | # It is based on the gram matrices (which capture style) of 154 | # feature maps from the style reference image 155 | # and from the generated image 156 | 157 | 158 | def style_loss(style, combination): 159 | assert K.ndim(style) == 3 160 | assert K.ndim(combination) == 3 161 | S = gram_matrix(style) 162 | C = gram_matrix(combination) 163 | channels = 3 164 | size = img_nrows * img_ncols 165 | return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2)) 166 | 167 | # an auxiliary loss function 168 | # designed to maintain the "content" of the 169 | # base image in the generated image 170 | 171 | 172 | def content_loss(base, combination): 173 | return K.sum(K.square(combination - base)) 174 | 175 | # the 3rd loss function, total variation loss, 176 | # designed to keep the generated image locally coherent 177 | 178 | 179 | def total_variation_loss(x): 180 | assert K.ndim(x) == 4 181 | if K.image_data_format() == 'channels_first': 182 | a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, 1:, :img_ncols - 1]) 183 | b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, :img_nrows - 1, 1:]) 184 | else: 185 | a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, 1:, :img_ncols - 1, :]) 186 | b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :]) 187 | return K.sum(K.pow(a + b, 1.25)) 188 | 189 | # combine these loss functions into a single scalar 190 | loss = K.variable(0.) 191 | layer_features = outputs_dict['block5_conv2'] 192 | base_image_features = layer_features[0, :, :, :] 193 | combination_features = layer_features[2, :, :, :] 194 | loss += content_weight * content_loss(base_image_features, 195 | combination_features) 196 | 197 | feature_layers = ['block1_conv1', 'block2_conv1', 198 | 'block3_conv1', 'block4_conv1', 199 | 'block5_conv1'] 200 | for layer_name in feature_layers: 201 | layer_features = outputs_dict[layer_name] 202 | style_reference_features = layer_features[1, :, :, :] 203 | combination_features = layer_features[2, :, :, :] 204 | sl = style_loss(style_reference_features, combination_features) 205 | loss += (style_weight / len(feature_layers)) * sl 206 | loss += total_variation_weight * total_variation_loss(combination_image) 207 | 208 | # get the gradients of the generated image wrt the loss 209 | grads = K.gradients(loss, combination_image) 210 | 211 | outputs = [loss] 212 | if isinstance(grads, (list, tuple)): 213 | outputs += grads 214 | else: 215 | outputs.append(grads) 216 | 217 | f_outputs = K.function([combination_image], outputs) 218 | 219 | 220 | def eval_loss_and_grads(x): 221 | if K.image_data_format() == 'channels_first': 222 | x = x.reshape((1, 3, img_nrows, img_ncols)) 223 | else: 224 | x = x.reshape((1, img_nrows, img_ncols, 3)) 225 | outs = f_outputs([x]) 226 | loss_value = outs[0] 227 | if len(outs[1:]) == 1: 228 | grad_values = outs[1].flatten().astype('float64') 229 | else: 230 | grad_values = np.array(outs[1:]).flatten().astype('float64') 231 | return loss_value, grad_values 232 | 233 | # this Evaluator class makes it possible 234 | # to compute loss and gradients in one pass 235 | # while retrieving them via two separate functions, 236 | # "loss" and "grads". This is done because scipy.optimize 237 | # requires separate functions for loss and gradients, 238 | # but computing them separately would be inefficient. 239 | 240 | 241 | class Evaluator(object): 242 | 243 | def __init__(self): 244 | self.loss_value = None 245 | self.grads_values = None 246 | 247 | def loss(self, x): 248 | assert self.loss_value is None 249 | loss_value, grad_values = eval_loss_and_grads(x) 250 | self.loss_value = loss_value 251 | self.grad_values = grad_values 252 | return self.loss_value 253 | 254 | def grads(self, x): 255 | assert self.loss_value is not None 256 | grad_values = np.copy(self.grad_values) 257 | self.loss_value = None 258 | self.grad_values = None 259 | return grad_values 260 | 261 | evaluator = Evaluator() 262 | 263 | # run scipy-based optimization (L-BFGS) over the pixels of the generated image 264 | # so as to minimize the neural style loss 265 | x = preprocess_image(base_image_path) 266 | 267 | for i in range(iterations): 268 | print('Start of iteration', i) 269 | start_time = time.time() 270 | x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), 271 | fprime=evaluator.grads, maxfun=20) 272 | print('Current loss value:', min_val) 273 | # save current generated image 274 | img = deprocess_image(x.copy()) 275 | fname = result_prefix + '_at_iteration_%d.png' % i 276 | save_img(fname, img) 277 | end_time = time.time() 278 | print('Image saved as', fname) 279 | print('Iteration %d completed in %ds' % (i, end_time - start_time)) 280 | --------------------------------------------------------------------------------