├── GAN image.pdf ├── README.md ├── dependencies.py ├── optimizer_loss.py ├── train.py └── model.py /GAN image.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nonzchanon/GAN-Upscale-Image/HEAD/GAN image.pdf -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GAN-Upscale-Image 2 | 3 | This is an implementation of the paper Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network https://arxiv.org/pdf/1609.04802.pdf using TensorFlow 2.0. 4 | 5 | Dataset download from https://data.vision.ee.ethz.ch/cvl/DIV2K/. for input image is a low resolution images Matlab imresize function with default settings (bicubic interpolation) 6 | and target image is a high resolution images. 7 | 8 | For more details, please read GAN image.pdf files. 9 | -------------------------------------------------------------------------------- /dependencies.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | from __future__ import absolute_import, division, print_function, unicode_literals 4 | 5 | from tensorflow.keras import layers 6 | import tensorflow_datasets as tfds 7 | 8 | import pandas as pd 9 | import numpy as np 10 | from pandas.io.json import json_normalize 11 | import json 12 | 13 | from tensorflow.keras.layers import Dense, Input, GlobalMaxPooling1D 14 | from tensorflow.keras.layers import LSTM, Embedding 15 | from tensorflow.keras.models import Model 16 | 17 | import matplotlib.pyplot as plt 18 | 19 | from sklearn.model_selection import train_test_split 20 | from sklearn.utils import shuffle 21 | 22 | import re 23 | import os 24 | import time 25 | from glob import glob 26 | 27 | from matplotlib import pyplot as plt 28 | from tensorflow.keras.preprocessing import image 29 | from matplotlib.patches import Rectangle 30 | from skimage.transform import resize 31 | from tensorflow.keras.losses import binary_crossentropy 32 | -------------------------------------------------------------------------------- /optimizer_loss.py: -------------------------------------------------------------------------------- 1 | from denpendency import * 2 | 3 | generator_optimizer = tf.keras.optimizers.Adam(1e-4) 4 | discriminator_optimizer = tf.keras.optimizers.Adam(1e-4) 5 | 6 | def _content_loss(y, y_pred): 7 | return tf.reduce_mean(tf.square(y - y_pred)) 8 | 9 | 10 | def _adversarial_loss(y_pred): 11 | loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True) 12 | y_discrim, y_discrim_logits = discriminator(y_pred) 13 | return tf.reduce_mean(loss_object(y_discrim_logits, tf.ones_like(y_discrim_logits))) 14 | 15 | 16 | def gen_loss_function(y, y_pred): 17 | return _content_loss(y, y_pred) + 1e-3*(_adversarial_loss(y_pred)) 18 | 19 | 20 | def discriminator_loss_function(y_real_pred, y_fake_pred, y_real_pred_logits, y_fake_pred_logits): 21 | loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True) 22 | loss_real = tf.reduce_mean(loss_object(tf.ones_like(y_real_pred_logits), y_real_pred_logits)) 23 | loss_fake = tf.reduce_mean(loss_object(tf.zeros_like(y_fake_pred_logits), y_fake_pred_logits)) 24 | return loss_real + loss_fake 25 | 26 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | from model import * 2 | from dependency import * 3 | from optimizer_loss import * 4 | 5 | @tf.function 6 | def train_step(g_x, g_y , epoch): 7 | 8 | with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: 9 | 10 | # Generator 11 | g_y_pred = generator(g_x) 12 | 13 | g_loss = gen_loss_function(g_y, g_y_pred) 14 | 15 | # Discriminator 16 | d_y_real_pred, d_y_real_pred_logits = discriminator(g_y) 17 | 18 | d_y_fake_pred, d_y_fake_pred_logits = discriminator(g_y_pred) 19 | 20 | d_loss = discriminator_loss_function(d_y_real_pred, d_y_fake_pred, d_y_real_pred_logits, d_y_fake_pred_logits) 21 | 22 | 23 | generator_gradients = gen_tape.gradient(g_loss, generator.trainable_variables) 24 | 25 | discriminator_gradients = disc_tape.gradient(d_loss, discriminator.trainable_variables) 26 | 27 | 28 | generator_optimizer.apply_gradients(zip(generator_gradients,generator.trainable_variables)) 29 | 30 | discriminator_optimizer.apply_gradients(zip(discriminator_gradients, discriminator.trainable_variables)) 31 | 32 | return g_loss , d_loss 33 | -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | 2 | #Generator model 3 | class Generator(tf.keras.Model): 4 | def __init__(self ,learning_rate = 1e-4, num_blocks = 16, num_upsamples=2): 5 | super(Generator, self).__init__() 6 | 7 | self.leanring_rate = learning_rate 8 | self.num_upsamples = num_upsamples 9 | self.discriminator = discriminator 10 | self.num_blocks = num_blocks 11 | 12 | self.residual = [ 13 | 14 | self.ResidualBlock(filters = 64, kernel_size = 3, strides =1), 15 | self.ResidualBlock(filters = 64, kernel_size = 3, strides =1), 16 | self.ResidualBlock(filters = 64, kernel_size = 3, strides =1), 17 | self.ResidualBlock(filters = 64, kernel_size = 3, strides =1), 18 | self.ResidualBlock(filters = 64, kernel_size = 3, strides =1), 19 | self.ResidualBlock(filters = 64, kernel_size = 3, strides =1), 20 | self.ResidualBlock(filters = 64, kernel_size = 3, strides =1), 21 | self.ResidualBlock(filters = 64, kernel_size = 3, strides =1), 22 | self.ResidualBlock(filters = 64, kernel_size = 3, strides =1), 23 | self.ResidualBlock(filters = 64, kernel_size = 3, strides =1), 24 | self.ResidualBlock(filters = 64, kernel_size = 3, strides =1), 25 | self.ResidualBlock(filters = 64, kernel_size = 3, strides =1), 26 | self.ResidualBlock(filters = 64, kernel_size = 3, strides =1), 27 | self.ResidualBlock(filters = 64, kernel_size = 3, strides =1), 28 | self.ResidualBlock(filters = 64, kernel_size = 3, strides =1), 29 | self.ResidualBlock(filters = 64, kernel_size = 3, strides =1) 30 | 31 | 32 | ] 33 | 34 | self.upsample1 = tf.keras.layers.Conv2DTranspose(256, 4, 35 | strides=2, 36 | padding='same', 37 | activation='linear') # (bs, 1024, 256, 3) 38 | 39 | self.upsample2 = tf.keras.layers.Conv2DTranspose(256, 4, 40 | strides=2, 41 | padding='same', 42 | activation='linear') # (bs, 1024, 256, 3) 43 | 44 | 45 | self.convo0 = tf.keras.layers.Conv2D(filters = 64, kernel_size=9 , strides = 1, padding='same') 46 | self.convo1 = tf.keras.layers.Conv2D(filters = 64, kernel_size=3 , strides = 1, padding = 'same') 47 | self.convo2 = tf.keras.layers.Conv2D(filters = 3 , kernel_size=9 , strides = 1, padding = 'same') 48 | 49 | #self.prelu = tf.keras.layers.PReLU(shared_axes=[1,2]) 50 | self.relu1 = tf.keras.layers.ReLU() 51 | self.batchnormal = tf.keras.layers.BatchNormalization() 52 | 53 | def call(self, x): 54 | 55 | x = self.convo0(x) 56 | #x = self.prelu(x) 57 | x = self.relu1(x) 58 | skip = x 59 | 60 | # ResidualBlock 61 | for res in self.residual: 62 | 63 | skip_x = x 64 | x = res(x) 65 | x += skip_x 66 | 67 | 68 | x = self.convo1(x) 69 | x = self.batchnormal(x) 70 | x += skip 71 | 72 | # upsamples Blocks 73 | x = self.upsample1(x) 74 | x = self.upsample2(x) 75 | 76 | 77 | x = self.convo2(x) 78 | 79 | return x 80 | 81 | ###################### Residual 82 | def ResidualBlock(self, filters, kernel_size ,strides=1): 83 | 84 | # skip = x 85 | result = tf.keras.Sequential() 86 | result.add(tf.keras.layers.Conv2D(filters, kernel_size, strides = strides, padding = 'same', use_bias = False)) 87 | result.add(tf.keras.layers.ReLU()) 88 | #result.add(tf.keras.layers.PReLU(shared_axes=[1,2])) 89 | result.add(tf.keras.layers.Conv2D(filters, kernel_size, strides = strides, padding = 'same', use_bias = False)) 90 | result.add(tf.keras.layers.BatchNormalization()) 91 | 92 | # x += skip 93 | 94 | return result 95 | 96 | #Discriminator model 97 | class Discriminator(tf.keras.Model): 98 | def __init__(self, learning_rate=1e-4): 99 | super(Discriminator, self).__init__() 100 | 101 | self.learning_rate = learning_rate 102 | 103 | self.convo0 = tf.keras.layers.Conv2D(kernel_size=3, filters=64, strides=1, padding='same') 104 | 105 | self.convoblock = [ 106 | self.ConvolutionBlock(32, 3 ,2), 107 | self.ConvolutionBlock(64, 3 ,1), 108 | self.ConvolutionBlock(64, 3 ,2), 109 | self.ConvolutionBlock(128, 3 ,1), 110 | self.ConvolutionBlock(128, 3 ,2), 111 | self.ConvolutionBlock(256, 3 ,1), 112 | self.ConvolutionBlock(256, 3 ,2), 113 | ] 114 | 115 | self.flatten = tf.keras.layers.Flatten() 116 | self.Dense1 = tf.keras.layers.Dense(512) 117 | self.leaky1 = tf.keras.layers.LeakyReLU(alpha=0.2) 118 | self.Dense2 = tf.keras.layers.Dense(1) 119 | self.leaky2 = tf.keras.layers.LeakyReLU(alpha=0.2) 120 | 121 | 122 | 123 | def ConvolutionBlock(self,filters, kernel_size, strides): 124 | # Conv2D + BN + LeakyReLU 125 | result = tf.keras.Sequential() 126 | result.add(tf.keras.layers.Conv2D(filters=filters, kernel_size = kernel_size , strides=strides, padding='same', use_bias=False)) 127 | result.add(tf.keras.layers.LeakyReLU(alpha=0.2)) 128 | 129 | return result 130 | 131 | def call(self, x): 132 | 133 | x = self.convo0(x) 134 | x = self.leaky1(x) 135 | 136 | for convo in self.convoblock: 137 | x = convo(x) 138 | 139 | x = self.flatten(x) 140 | x = self.Dense1(x) 141 | x = self.leaky2(x) 142 | logits = self.Dense2(x) 143 | x = tf.keras.activations.sigmoid(logits) 144 | return x, logits 145 | 146 | 147 | --------------------------------------------------------------------------------