├── Code ├── DenseNet.py ├── DenseNet_SimpleTest.py ├── FID.py └── GAN.py ├── Generated_Images ├── 0.png ├── 1.png ├── 10.png ├── 11.png ├── 12.png ├── 13.png ├── 14.png ├── 15.png ├── 2.png ├── 3.png ├── 4.png ├── 5.png ├── 6.png ├── 7.png ├── 8.png └── 9.png ├── Models ├── .DS_Store ├── MODEL_DENSE.h5 └── gen_1250.h5 ├── README.md └── Readme_Images ├── BLOCKS.png ├── DENSE_BLOCK.png ├── DENSE_NET.png ├── DIS.png ├── GEN.png ├── Overview.png └── TUR.png /Code/DenseNet.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import keras 3 | import cv2 4 | import numpy as np 5 | from keras.layers import Input, Dense, Dropout, Activation, Concatenate, BatchNormalization, Flatten 6 | from keras.models import Model 7 | from keras.layers import Conv2D, GlobalAveragePooling2D, AveragePooling2D, ZeroPadding2D, MaxPooling2D 8 | from keras.regularizers import l2 9 | from PIL import Image, ImageOps 10 | from keras.preprocessing.image import ImageDataGenerator 11 | from keras.optimizers import SGD 12 | from keras.callbacks import LearningRateScheduler 13 | import math 14 | 15 | ######################################################################################## 16 | def step_decay(epoch): 17 | initial_lrate = 0.01 18 | drop = 0.1 19 | epochs_drop = 7.0 20 | lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop)) 21 | return lrate 22 | 23 | 24 | def DenseNet(input_shape=None, dense_blocks=3, dense_layers=-1, growth_rate=12, nb_classes=None, dropout_rate=None, 25 | bottleneck=False, compression=1.0, weight_decay=1e-4, depth=40): 26 | 27 | if nb_classes==None: 28 | raise Exception('Please define number of classes (e.g. num_classes=10). This is required for final softmax.') 29 | 30 | if compression <=0.0 or compression > 1.0: 31 | raise Exception('Compression have to be a value between 0.0 and 1.0. If you set compression to 1.0 it will be turn off.') 32 | 33 | if type(dense_layers) is list: 34 | if len(dense_layers) != dense_blocks: 35 | raise AssertionError('Number of dense blocks have to be same length to specified layers') 36 | elif dense_layers == -1: 37 | if bottleneck: 38 | dense_layers = (depth - (dense_blocks + 1))/dense_blocks // 2 39 | else: 40 | dense_layers = (depth - (dense_blocks + 1))//dense_blocks 41 | dense_layers = [int(dense_layers) for _ in range(dense_blocks)] 42 | else: 43 | dense_layers = [int(dense_layers) for _ in range(dense_blocks)] 44 | 45 | img_input = Input(shape=input_shape) 46 | nb_channels = growth_rate * 2 47 | 48 | 49 | # Initial convolution layer 50 | x = ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input) 51 | x = Conv2D(nb_channels, (7,7),strides=2 , use_bias=False, kernel_regularizer=l2(weight_decay))(x) 52 | x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x) 53 | x = Activation('relu')(x) 54 | x = ZeroPadding2D(padding=((1,1), (1, 1)))(x) 55 | x = MaxPooling2D(pool_size = (3, 3), strides = 2)(x) # 56 | 57 | # Building dense blocks 58 | for block in range(dense_blocks): 59 | 60 | # Add dense block 61 | x, nb_channels = dense_block(x, dense_layers[block], nb_channels, growth_rate, dropout_rate, bottleneck, weight_decay) 62 | 63 | if block < dense_blocks - 1: # if it's not the last dense block 64 | # Add transition_block 65 | x = transition_layer(x, nb_channels, dropout_rate, compression, weight_decay) 66 | nb_channels = int(nb_channels * compression) 67 | x = AveragePooling2D(pool_size = 7)(x) #DECIDING LINE 68 | x = Flatten(data_format = 'channels_last')(x) 69 | x = Dense(nb_classes, activation='softmax', kernel_regularizer=l2(weight_decay), bias_regularizer=l2(weight_decay))(x) 70 | 71 | model_name = None 72 | if growth_rate >= 36: 73 | model_name = 'widedense' 74 | else: 75 | model_name = 'dense' 76 | 77 | if bottleneck: 78 | model_name = model_name + 'b' 79 | 80 | if compression < 1.0: 81 | model_name = model_name + 'c' 82 | 83 | return Model(img_input, x, name=model_name), model_name 84 | 85 | 86 | def dense_block(x, nb_layers, nb_channels, growth_rate, dropout_rate=None, bottleneck=False, weight_decay=1e-4): 87 | 88 | x_list = [x] 89 | for i in range(nb_layers): 90 | cb = convolution_block(x, growth_rate, dropout_rate, bottleneck, weight_decay) 91 | x_list.append(cb) 92 | x = Concatenate(axis=-1)(x_list) 93 | nb_channels += growth_rate 94 | return x, nb_channels 95 | 96 | 97 | def convolution_block(x, nb_channels, dropout_rate=None, bottleneck=False, weight_decay=1e-4): 98 | 99 | growth_rate = nb_channels/2 100 | # Bottleneck 101 | if bottleneck: 102 | bottleneckWidth = 4 103 | x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x) 104 | x = Activation('relu')(x) 105 | x = Conv2D(nb_channels * bottleneckWidth, (1, 1), use_bias=False, kernel_regularizer=l2(weight_decay))(x) 106 | # Dropout 107 | if dropout_rate: 108 | x = Dropout(dropout_rate)(x) 109 | 110 | # Standard (BN-ReLU-Conv) 111 | x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x) 112 | x = Activation('relu')(x) 113 | x = Conv2D(nb_channels, (3, 3), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) 114 | 115 | # Dropout 116 | if dropout_rate: 117 | x = Dropout(dropout_rate)(x) 118 | 119 | return x 120 | 121 | 122 | def transition_layer(x, nb_channels, dropout_rate=None, compression=1.0, weight_decay=1e-4): 123 | 124 | x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x) 125 | x = Activation('relu')(x) 126 | x = Conv2D(int(nb_channels*compression), (1, 1), padding='same', 127 | use_bias=False, kernel_regularizer=l2(weight_decay))(x) 128 | 129 | # Adding dropout 130 | if dropout_rate: 131 | x = Dropout(dropout_rate)(x) 132 | 133 | x = AveragePooling2D((2, 2), strides=(2, 2))(x) 134 | return x 135 | 136 | if __name__ == '__main__': 137 | model = DenseNet(input_shape = (64,64,1) , dense_blocks = 2 , dense_layers = 6 , growth_rate = 32 , nb_classes = 6 , bottleneck = True , depth = 27, weight_decay = 1e-5) 138 | print(model[0].summary()) 139 | 140 | opt = SGD(lr = 0.0 , momentum = 0.9) 141 | model[0].compile(optimizer=opt , loss='categorical_crossentropy' , metrics=['accuracy']) 142 | 143 | train_datagen = ImageDataGenerator(data_format = "channels_last") 144 | train_generator = train_datagen.flow_from_directory('Training_Set_Path' , target_size = (64,64) , color_mode = 'grayscale' , batch_size = 8) 145 | STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size 146 | 147 | lrate = LearningRateScheduler(step_decay, verbose=1) 148 | callbacks_list = [lrate] 149 | 150 | model[0].fit_generator(train_generator , steps_per_epoch=STEP_SIZE_TRAIN , epochs = 20, callbacks=callbacks_list, verbose=1) 151 | 152 | model[0].save("Path") 153 | model[0].save("Path") 154 | 155 | 156 | 157 | -------------------------------------------------------------------------------- /Code/DenseNet_SimpleTest.py: -------------------------------------------------------------------------------- 1 | from keras.models import Sequential 2 | from keras.layers import Dense 3 | from keras.models import model_from_json 4 | import numpy as np 5 | import os 6 | from keras.preprocessing.image import ImageDataGenerator 7 | import pandas as pd 8 | from keras.models import load_model 9 | from sklearn.metrics import confusion_matrix, accuracy_score 10 | import cv2 11 | import matplotlib.pyplot as plt 12 | ############################################################################ 13 | 14 | model = load_model('Path_of_DenseNet_model') 15 | print(model.summary()) 16 | test_datagen = ImageDataGenerator() 17 | test_generator = test_datagen.flow_from_directory( 18 | directory= 'Test_Set_Path', 19 | target_size=(64,64), 20 | color_mode="grayscale", 21 | batch_size=1, 22 | shuffle = False) 23 | STEP_SIZE_TEST=test_generator.n//test_generator.batch_size 24 | test_generator.reset() 25 | pred=model.predict_generator(test_generator , steps=STEP_SIZE_TEST , verbose=1) 26 | predicted_class_indices=np.argmax(pred, axis=1) 27 | 28 | 29 | 30 | print("Predicted") 31 | print(predicted_class_indices) 32 | print("Test Generator Classses") 33 | print(test_generator.classes) 34 | print('Confusion Matrix') 35 | print(confusion_matrix(test_generator.classes, predicted_class_indices)) 36 | print(accuracy_score(test_generator.classes, predicted_class_indices)) 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /Code/FID.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import cov 3 | from numpy import trace 4 | from numpy import iscomplexobj 5 | from numpy.random import random 6 | from scipy.linalg import sqrtm 7 | from keras.models import Sequential 8 | from keras.layers import Dense 9 | from keras.models import model_from_json 10 | import numpy as np 11 | import os 12 | from keras.preprocessing.image import ImageDataGenerator 13 | import pandas as pd 14 | from keras.models import load_model 15 | from sklearn.metrics import confusion_matrix, accuracy_score 16 | import cv2 17 | import matplotlib.pyplot as plt 18 | from numpy import asarray 19 | from numpy import expand_dims 20 | from numpy import log 21 | from numpy import mean,std 22 | from numpy import exp 23 | from PIL import Image 24 | 25 | ################################################################################# 26 | def calculate_fid(dset, gan): 27 | # calculate activations 28 | model = load_model('DenseNet_Model_Path') 29 | act1 = model.predict(dset) 30 | act2 = model.predict(gan) 31 | 32 | 33 | # calculate mean and covariance statistics 34 | mu1, sigma1 = act1.mean(axis=0), cov(act1, rowvar=False) 35 | mu2, sigma2 = act2.mean(axis=0), cov(act2, rowvar=False) 36 | # calculate sum squared difference between means 37 | ssdiff = numpy.sum((mu1 - mu2)**2.0) 38 | # calculate sqrt of product between cov 39 | covmean = sqrtm(sigma1.dot(sigma2)) 40 | # check and correct imaginary numbers from sqrt 41 | if iscomplexobj(covmean): 42 | covmean = covmean.real 43 | # calculate score 44 | fid = ssdiff + trace(sigma1 + sigma2 - 2.0 * covmean) 45 | return fid 46 | 47 | 48 | 49 | dset = [] 50 | str = 'Dataset_Images_Path' 51 | for fname in os.listdir(str): 52 | if fname == '.DS_Store': 53 | continue 54 | img = Image.open(str + fname) 55 | img = np.asarray(img) 56 | img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 57 | img = expand_dims(img , axis=2) 58 | dset.append(img) 59 | dset = np.asarray(dset) 60 | print(np.shape(dset)) 61 | 62 | gan = [] 63 | str = 'Gan_Generated_Images_Path' 64 | for fname in os.listdir(str): 65 | if fname == '.DS_Store': 66 | continue 67 | img = Image.open(str + fname) 68 | img = np.asarray(img) 69 | img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 70 | img = expand_dims(img , axis=2) 71 | gan.append(img) 72 | gan = np.asarray(gan) 73 | print(np.shape(gan)) 74 | 75 | 76 | fid_score = calculate_fid(dset , gan) 77 | print('FID SCORE:') 78 | print(fid_score) 79 | 80 | 81 | -------------------------------------------------------------------------------- /Code/GAN.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, LeakyReLU, MaxPool2D 3 | from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D, LayerNormalization 4 | # from tensorflow.keras.layers.advanced_activations import LeakyReLU 5 | from tensorflow.keras.layers import UpSampling2D, Conv2D, Conv2DTranspose 6 | from tensorflow.keras.models import Sequential, Model 7 | from tensorflow.keras.optimizers import Adam, SGD 8 | from tensorflow.keras.regularizers import l2 9 | import os 10 | import matplotlib.pyplot as plt 11 | import cv2 12 | import sys 13 | from PIL import Image 14 | import numpy as np 15 | import tensorflow as tf 16 | 17 | ########################################################################################## 18 | class DCGAN(): 19 | def __init__(self): 20 | 21 | self.img_rows = 64 22 | self.img_cols = 64 23 | self.channels = 1 24 | self.img_shape = (self.img_rows, self.img_cols, self.channels) 25 | self.latent_dim = 32 26 | self.weight_decay = 1e-4 27 | 28 | optimizer = Adam(0.0001 , 0.5) 29 | optimizer1 = Adam(0.0001 , 0.5) 30 | 31 | self.discriminator = self.build_discriminator() 32 | self.discriminator.compile(loss='binary_crossentropy', 33 | optimizer=optimizer, 34 | metrics=['accuracy']) 35 | 36 | self.generator = self.build_generator() 37 | 38 | z = Input(shape=(self.latent_dim,)) 39 | img = self.generator(z) 40 | 41 | self.discriminator.trainable = False 42 | 43 | valid = self.discriminator(img) 44 | 45 | self.combined = Model(z, valid) 46 | self.combined.compile(loss='binary_crossentropy', optimizer=optimizer1) 47 | 48 | def build_generator(self): 49 | model = Sequential() 50 | 51 | model.add(Dense(4*4*128 , activation = 'linear' , input_dim = self.latent_dim)) 52 | model.add(Reshape((4,4,128))) 53 | model.add(BatchNormalization()) 54 | model.add(LeakyReLU(alpha = 0.2)) 55 | model.add(UpSampling2D()) 56 | model.add(Conv2DTranspose(filters = 64 , kernel_size = 5 , padding = 'same')) 57 | model.add(BatchNormalization()) 58 | model.add(LeakyReLU(alpha = 0.2)) 59 | model.add(UpSampling2D()) 60 | model.add(Conv2DTranspose(filters = 32 , kernel_size = 5 , padding = 'same')) 61 | model.add(BatchNormalization()) 62 | model.add(LeakyReLU(alpha = 0.2)) 63 | model.add(UpSampling2D()) 64 | model.add(Conv2DTranspose(filters = 16 , kernel_size = 5 , padding = 'same')) 65 | model.add(BatchNormalization()) 66 | model.add(LeakyReLU(alpha = 0.2)) 67 | model.add(UpSampling2D()) 68 | model.add(Conv2DTranspose(filters = 8 , kernel_size = 5 , padding = 'same')) 69 | model.add(BatchNormalization()) 70 | model.add(LeakyReLU(alpha = 0.2)) 71 | model.add(Conv2D(filters = 8 , kernel_size = 5 , padding = 'same')) 72 | model.add(BatchNormalization()) 73 | model.add(LeakyReLU(alpha = 0.2)) 74 | model.add(Conv2D(filters = 4 , kernel_size = 5 , padding = 'same')) 75 | model.add(BatchNormalization()) 76 | model.add(LeakyReLU(alpha = 0.2)) 77 | model.add(Conv2D(filters = 1 , kernel_size = 5 , padding = 'same')) 78 | model.add(BatchNormalization()) 79 | model.add(LeakyReLU(alpha = 0.2)) 80 | 81 | model.summary() 82 | 83 | noise = Input(shape=(self.latent_dim,)) 84 | img = model(noise) 85 | 86 | return Model(noise, img) 87 | 88 | def build_discriminator(self): 89 | model = Sequential() 90 | 91 | model.add(Conv2D(16, kernel_size=5, input_shape=self.img_shape, padding="same")) 92 | model.add(BatchNormalization()) # 93 | model.add(LeakyReLU(alpha = 0.2)) 94 | model.add(MaxPool2D()) 95 | model.add(Conv2D(32, kernel_size=5, padding="same")) 96 | model.add(BatchNormalization()) # 97 | model.add(LeakyReLU(alpha = 0.2)) 98 | model.add(MaxPool2D()) 99 | model.add(Conv2D(64, kernel_size=5, padding="same")) 100 | model.add(BatchNormalization()) # 101 | model.add(LeakyReLU(alpha = 0.2)) 102 | model.add(MaxPool2D()) 103 | model.add(Conv2D(128, kernel_size=5, padding="same")) 104 | model.add(BatchNormalization()) # 105 | model.add(LeakyReLU(alpha = 0.2)) 106 | model.add(MaxPool2D()) 107 | model.add(Flatten()) 108 | model.add(Dense(1 , activation = 'sigmoid')) 109 | 110 | model.summary() 111 | 112 | img = Input(shape=self.img_shape) 113 | validity = model(img) 114 | 115 | return Model(img, validity) 116 | 117 | 118 | 119 | def train(self, epochs, batch_size=128, save_interval=50): 120 | 121 | # Load the dataset 122 | dirname = 'Path' 123 | X_train = [] 124 | for filename in os.listdir(dirname): 125 | img = Image.open(dirname + filename) 126 | img = np.asarray(img) 127 | img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 128 | X_train.append(img) 129 | 130 | 131 | for x in X_train: 132 | x = x / 127.5 - 1 133 | 134 | X_train = np.expand_dims(X_train, axis=3) 135 | 136 | 137 | X_train = np.asarray(X_train) 138 | 139 | valid = [] 140 | valid = [0.9 for i in range(batch_size)] 141 | valid = np.expand_dims(valid , axis=1) 142 | 143 | fake = [] 144 | fake = [0.1 for i in range(batch_size)] 145 | fake = np.expand_dims(fake , axis=1) 146 | 147 | for epoch in range(epochs): 148 | 149 | idx = np.random.randint(0, X_train.shape[0], batch_size) 150 | imgs = X_train[idx] 151 | 152 | noise = np.random.normal(0, 1, (batch_size, self.latent_dim)) 153 | gen_imgs = self.generator.predict(noise) 154 | 155 | d_loss_real = self.discriminator.train_on_batch(imgs, valid) 156 | d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake) 157 | d_loss = np.add(d_loss_real, d_loss_fake) 158 | 159 | g_loss = self.combined.train_on_batch(noise, valid) 160 | 161 | 162 | print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 50*d_loss[1], g_loss)) 163 | 164 | if epoch % save_interval == 0: 165 | self.save_imgs(epoch) 166 | if epoch % 20 == 0: 167 | self.generator.save('Dirname' + str(epoch) + '.h5') 168 | 169 | def save_imgs(self, epoch): 170 | noise = np.random.normal(0, 1, (5, self.latent_dim)) 171 | gen_imgs = self.generator.predict(noise) 172 | 173 | gen_imgs = 0.5 * gen_imgs + 0.5 174 | 175 | 176 | cnt = 0 177 | for img in gen_imgs: 178 | img = np.squeeze(img , axis=2) 179 | plt.imsave("Dirname" + str(epoch) + '_' + str(cnt) + ".png" , img , cmap='gray') 180 | cnt+=1 181 | 182 | 183 | if __name__ == '__main__': 184 | dcgan = DCGAN() 185 | dcgan.train(epochs=2000, batch_size=8, save_interval=10) -------------------------------------------------------------------------------- /Generated_Images/0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Generated_Images/0.png -------------------------------------------------------------------------------- /Generated_Images/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Generated_Images/1.png -------------------------------------------------------------------------------- /Generated_Images/10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Generated_Images/10.png -------------------------------------------------------------------------------- /Generated_Images/11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Generated_Images/11.png -------------------------------------------------------------------------------- /Generated_Images/12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Generated_Images/12.png -------------------------------------------------------------------------------- /Generated_Images/13.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Generated_Images/13.png -------------------------------------------------------------------------------- /Generated_Images/14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Generated_Images/14.png -------------------------------------------------------------------------------- /Generated_Images/15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Generated_Images/15.png -------------------------------------------------------------------------------- /Generated_Images/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Generated_Images/2.png -------------------------------------------------------------------------------- /Generated_Images/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Generated_Images/3.png -------------------------------------------------------------------------------- /Generated_Images/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Generated_Images/4.png -------------------------------------------------------------------------------- /Generated_Images/5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Generated_Images/5.png -------------------------------------------------------------------------------- /Generated_Images/6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Generated_Images/6.png -------------------------------------------------------------------------------- /Generated_Images/7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Generated_Images/7.png -------------------------------------------------------------------------------- /Generated_Images/8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Generated_Images/8.png -------------------------------------------------------------------------------- /Generated_Images/9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Generated_Images/9.png -------------------------------------------------------------------------------- /Models/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Models/.DS_Store -------------------------------------------------------------------------------- /Models/MODEL_DENSE.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Models/MODEL_DENSE.h5 -------------------------------------------------------------------------------- /Models/gen_1250.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Models/gen_1250.h5 -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Generation-of-SAR-Images-Using-Deep-Learning 2 | 3 | ## Paper Link: https://link.springer.com/article/10.1007%2Fs42979-020-00364-z 4 | ## Dataset Link: https://www.sdms.afrl.af.mil/index.php?collection=mstar&page=targets 5 | 6 | ## Overview: 7 | This work aims to generate new Synthetic Aperture Radar (SAR) Images using Generative Adversarial Networks (GANs). A robust Densely Connected Convolutional Neural Network (DenseNet) model capable of classifying six distinct SAR target classes has also been proposed in this paper. The aim is to use the classifier in order to evaluate the GAN images quantitatively as well as establish an overall proof of concept. (More details can be found in the paper!) 8 | 9 | ![alt text](https://github.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/blob/main/Readme_Images/Overview.png?raw=true) 10 | 11 | ## DenseNet Structure: 12 | The end to end DenseNet structure is show below 13 | ![alt text](https://github.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/blob/main/Readme_Images/DENSE_NET.png?raw=true) 14 | 15 | 16 | ## GAN Structure: 17 | The discriminator is modelled as 18 | ![alt text](https://github.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/blob/main/Readme_Images/DIS.png?raw=true) 19 | The generator is modelled as 20 | ![alt text](https://github.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/blob/main/Readme_Images/GEN.png?raw=true) 21 | 22 | 23 | ## Results: 24 | Some GAN generated images (right) and dataset images (left) are placed next to each other to demonstrate the GAN's effectiveness 25 | 26 | ![alt text](https://github.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/blob/main/Readme_Images/TUR.png?raw=true) 27 | -------------------------------------------------------------------------------- /Readme_Images/BLOCKS.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Readme_Images/BLOCKS.png -------------------------------------------------------------------------------- /Readme_Images/DENSE_BLOCK.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Readme_Images/DENSE_BLOCK.png -------------------------------------------------------------------------------- /Readme_Images/DENSE_NET.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Readme_Images/DENSE_NET.png -------------------------------------------------------------------------------- /Readme_Images/DIS.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Readme_Images/DIS.png -------------------------------------------------------------------------------- /Readme_Images/GEN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Readme_Images/GEN.png -------------------------------------------------------------------------------- /Readme_Images/Overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Readme_Images/Overview.png -------------------------------------------------------------------------------- /Readme_Images/TUR.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/Generation-of-SAR-Images-Using-Deep-Learning/1d3311d14822f3cfbaca19307d12a05b56ba4973/Readme_Images/TUR.png --------------------------------------------------------------------------------