├── README.md ├── plot.py ├── evaluate.py ├── Prepare_dataset ├── metrics └── model /README.md: -------------------------------------------------------------------------------- 1 | # Road-Network-Segmentation-and-Vectorization 2 | 3 | # Prerequisites and Run 4 | 5 | This code has been implemented in python language using Keras library with tensorflow backend and tested, though should be compatible with related environment. 6 | following Environement and Library needed to run the code: 7 | 8 | Python 3 over Keras - tensorflow backend 9 | 10 | # Data 11 | 12 | Download the Massachusets and Ottawa road datasets from this link https://www.cs.toronto.edu/~vmnih/data/ and https://github.com/yhlleo/RoadNet to create both training dataset 13 | and ground truth dataset. 14 | 15 | # Quick Overview 16 | ![model](https://user-images.githubusercontent.com/51461267/129298467-d4b0254f-a5cf-46de-bdc3-019b7d311cd7.JPG) 17 | -------------------------------------------------------------------------------- /plot.py: -------------------------------------------------------------------------------- 1 | EPOCHS = 100 2 | # plot the training loss and accuracy 3 | plt.style.use("ggplot") 4 | plt.figure() 5 | plt.plot(np.arange(0,EPOCHS), history.history["loss"], linewidth=2, color='blue', label="train_loss") 6 | plt.plot(np.arange(0,EPOCHS), history.history["val_loss"], linewidth=2, color='red', label="val_loss") 7 | #plt.plot(np.arange(0,EPOCHS), history.history["accuracy"], linewidth=2, color='blue', label="train_acc") 8 | #plt.plot(np.arange(0,EPOCHS), history.history["val_accuracy"], linewidth=2, color='red', label="val_acc") 9 | plt.title("Training and Validation Losses", fontsize=14) 10 | plt.xlabel("Epoch", fontsize=13, color='black') 11 | plt.ylabel("Loss", fontsize=13, color='black') 12 | plt.tick_params(labelsize=11, size = 5, rotation=0, color = 'black', labelcolor = 'black') 13 | leg = plt.legend(loc="best", fontsize=12) 14 | leg.get_frame().set_edgecolor('black') 15 | -------------------------------------------------------------------------------- /evaluate.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.metrics import confusion_matrix 3 | from sklearn.metrics import f1_score 4 | from sklearn.metrics import jaccard_similarity_score 5 | 6 | 7 | 8 | model = model(input_size = (512,512,3)) 9 | model.summary() 10 | model.load_weights('...') 11 | predictions = model.predict(x_test, batch_size=2, verbose=1) 12 | 13 | y_scores = predictions.reshape(predictions.shape[0]*predictions.shape[1]*predictions.shape[2]*predictions.shape[3], 1) 14 | print(y_scores.shape) 15 | y_true = y_test.reshape(te_mask.shape[0]*te_mask.shape[1]*te_mask.shape[2]*te_mask.shape[3], 1) 16 | 17 | y_scores = np.where(y_scores>0.5, 1, 0) 18 | y_true = np.where(y_true>0.5, 1, 0) 19 | #Confusion matrix 20 | threshold_confusion = 0.5 21 | print ("\nConfusion matrix: Custom threshold (for positive) of " +str(threshold_confusion)) 22 | y_pred = np.empty((y_scores.shape[0])) 23 | for i in range(y_scores.shape[0]): 24 | if y_scores[i]>=threshold_confusion: 25 | y_pred[i]=1 26 | else: 27 | y_pred[i]=0 28 | confusion = confusion_matrix(y_true, y_pred) 29 | print (confusion) 30 | 31 | sensitivity = 0 32 | if float(confusion[1,1]+confusion[1,0])!=0: 33 | sensitivity = float(confusion[1,1])/float(confusion[1,1]+confusion[1,0]) 34 | print ("Sensitivity: " +str(sensitivity)) 35 | precision = 0 36 | if float(confusion[1,1]+confusion[0,1])!=0: 37 | precision = float(confusion[1,1])/float(confusion[1,1]+confusion[0,1]) 38 | print ("Precision: " +str(precision)) 39 | #Jaccard similarity index 40 | jaccard_index = jaccard_similarity_score(y_true, y_pred, normalize=True) 41 | print ("\nJaccard similarity score: " +str(jaccard_index)) 42 | #F1 score 43 | F1_score = f1_score(y_true, y_pred, labels=None, average='binary', sample_weight=None) 44 | print ("\nF1 score (F-measure): " +str(F1_score)) 45 | -------------------------------------------------------------------------------- /Prepare_dataset: -------------------------------------------------------------------------------- 1 | # load dataset 2 | im_width = 512 3 | im_height = 512 4 | border = 5 5 | warnings.filterwarnings('ignore', category=UserWarning, module='skimage') 6 | seed = 42 7 | BATCH_SIZE = 2 8 | # 9 | ids = next(os.walk('...'))[2] 10 | print ('No. of images = ', len(ids)) 11 | ids1 = next(os.walk('...'))[2] 12 | print ('No. of masks = ', len(ids1)) 13 | # 14 | X = np.zeros((len(ids), im_height, im_width, 3), dtype = np.float32) 15 | y = np.zeros((len(ids1), im_height, im_width, 3), dtype = np.float32) 16 | # 17 | from tqdm.notebook import tqdm 18 | from skimage.transform import resize 19 | for n, id_ in tqdm (enumerate(ids), total = len(ids)): 20 | img = load_img ('/data/ababdoll/Abi/paper6(road)/data/data_original/img/'+id_) 21 | x_img = img_to_array(img) 22 | x_img = resize(x_img, (512, 512, 3), mode = 'constant', preserve_range = True) 23 | #x_img = x_img.transpo0se (2,0,1) 24 | #x_img = x_img.reshape (im_height, im_width, 1) 25 | X[n] = x_img/255.0 26 | 27 | for n, id_ in tqdm (enumerate(ids1), total = len(ids1)): 28 | mask = load_img ('/data/ababdoll/Abi/paper6(road)/data/data_original/y/' +id_) 29 | mask = img_to_array(mask) 30 | mask = resize(mask, (512, 512, 3), mode = 'constant', preserve_range = True) 31 | #mask = mask.transpose (2,0,1) 32 | #mask = mask.reshape (im_height, im_width, 1) 33 | y[n] = mask/255.0 34 | 35 | ############## validation images 36 | ids_val = next(os.walk('/data/ababdoll/Abi/paper6(road)/data/data_original/img1/'))[2] 37 | print ('No. of val images = ', len(ids_val)) 38 | ids1_val = next(os.walk('/data/ababdoll/Abi/paper6(road)/data/data_original/y1/'))[2] 39 | print ('No. of val masks = ', len(ids1_val)) 40 | 41 | X_val = np.zeros((len(ids_val), im_height, im_width, 3), dtype = np.float32) 42 | y_val= np.zeros((len(ids1_val), im_height, im_width, 3), dtype = np.float32) 43 | 44 | 45 | for n, id_ in tqdm (enumerate(ids_val), total = len(ids_val)): 46 | im_val = load_img ('/data/ababdoll/Abi/paper6(road)/data/data_original/img1/'+id_) 47 | x_img_val = img_to_array(im_val) 48 | x_img_val = resize(x_img_val, (512, 512, 3), mode = 'constant', preserve_range = True) 49 | #x_img = x_img.transpo0se (2,0,1) 50 | #x_img = x_img.reshape (im_height, im_width, 1) 51 | X_val[n] = x_img_val/255.0 52 | 53 | for n, id_ in tqdm (enumerate(ids1_val), total = len(ids1_val)): 54 | mask_val = load_img ('/data/ababdoll/Abi/paper6(road)/data/data_original/y1/' +id_) 55 | mask_val = img_to_array(mask_val) 56 | mask_val = resize(mask_val, (512, 512, 3), mode = 'constant', preserve_range = True) 57 | #mask = mask.transpose (2,0,1) 58 | #mask = mask.reshape (im_height, im_width, 1) 59 | y_val[n] = mask_val/255.0 60 | 61 | ####### test images 62 | ids_test = next(os.walk('/data/ababdoll/Abi/paper6(road)/data/data_original/img2/'))[2] 63 | print ('No. of test_images = ', len(ids_test)) 64 | ids1_test = next(os.walk('/data/ababdoll/Abi/paper6(road)/data/data_original/y2/'))[2] 65 | print ('No. of test_masks = ', len(ids1_test)) 66 | 67 | X_test = np.zeros((len(ids_test), im_height, im_width, 3), dtype = np.float32) 68 | y_test = np.zeros((len(ids1_test), im_height, im_width, 3), dtype = np.float32) 69 | 70 | for n, id_ in tqdm (enumerate(ids_test), total = len(ids_test)): 71 | img_test = load_img ('/data/ababdoll/Abi/paper6(road)/data/data_original/img2/'+id_) 72 | x_img_test = img_to_array(img_test) 73 | x_img_test = resize(x_img_test, (512, 512, 3), mode = 'constant', preserve_range = True) 74 | #x_img = x_img.transpo0se (2,0,1) 75 | #x_img = x_img.reshape (im_height, im_width, 1) 76 | X_test[n] = x_img_test/255.0 77 | 78 | for n, id_ in tqdm(enumerate(ids1_test), total = len(ids1_test)): 79 | mask_test = load_img ('/data/ababdoll/Abi/paper6(road)/data/data_original/y2/' +id_) 80 | mask_test = img_to_array(mask_test) 81 | mask_test = resize(mask_test, (512, 512, 3), mode = 'constant', preserve_range = True) 82 | 83 | #mask = mask.transpose (2,0,1) 84 | #mask = mask.reshape (im_height, im_width, 1) 85 | y_test[n] = mask_test/255.0 86 | ## construct the training image generator for data augmentation 87 | batch_size=2 88 | 89 | img_train = ImageDataGenerator(vertical_flip=True, horizontal_flip=True, rotation=..., fill_mode='nearest') 90 | mask_train = ImageDataGenerator(vertical_flip=True, horizontal_flip=True, fill_mode='nearest') 91 | img_train.fit(X, augment=True) 92 | mask_train.fit(y, augment=True) 93 | x_train = img_train.flow(X, batch_size=batch_size) 94 | Y_train = mask_train.flow(y, batch_size=batch_size) 95 | ## 96 | img_val = ImageDataGenerator(vertical_flip=True, horizontal_flip=True, fill_mode='nearest') 97 | mask_val = ImageDataGenerator(vertical_flip=True, horizontal_flip=True, fill_mode='nearest') 98 | img_val.fit(X_val, augment=True) 99 | mask_val.fit(y_val, augment=True) 100 | x_val = img_val.flow(X_val, batch_size=batch_size) 101 | Y_val = mask_val.flow(y_val, batch_size=batch_size) 102 | ### 103 | generator = zip(x_train,Y_train) 104 | generator1 = zip(x_val,Y_val) 105 | -------------------------------------------------------------------------------- /metrics: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import keras.backend as K 3 | from keras.losses import binary_crossentropy 4 | 5 | beta = 0.25 6 | alpha = 0.25 7 | gamma = 2 8 | epsilon = 1e-5 9 | smooth = 1 10 | 11 | 12 | class Semantic_loss_functions(object): 13 | def __init__(self): 14 | print ("semantic loss functions initialized") 15 | 16 | def dice_coef(self, y_true, y_pred): 17 | y_true_f = K.flatten(y_true) 18 | y_pred_f = K.flatten(y_pred) 19 | intersection = K.sum(y_true_f * y_pred_f) 20 | return (2. * intersection + K.epsilon()) / ( 21 | K.sum(y_true_f) + K.sum(y_pred_f) + K.epsilon()) 22 | 23 | def sensitivity(self, y_true, y_pred): 24 | true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) 25 | possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) 26 | return true_positives / (possible_positives + K.epsilon()) 27 | 28 | def specificity(self, y_true, y_pred): 29 | true_negatives = K.sum( 30 | K.round(K.clip((1 - y_true) * (1 - y_pred), 0, 1))) 31 | possible_negatives = K.sum(K.round(K.clip(1 - y_true, 0, 1))) 32 | return true_negatives / (possible_negatives + K.epsilon()) 33 | 34 | def convert_to_logits(self, y_pred): 35 | y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), 36 | 1 - tf.keras.backend.epsilon()) 37 | return tf.math.log(y_pred / (1 - y_pred)) 38 | 39 | def weighted_cross_entropyloss(self, y_true, y_pred): 40 | y_pred = self.convert_to_logits(y_pred) 41 | pos_weight = beta / (1 - beta) 42 | loss = tf.nn.weighted_cross_entropy_with_logits(logits=y_pred, 43 | targets=y_true, 44 | pos_weight=pos_weight) 45 | return tf.reduce_mean(loss) 46 | 47 | def focal_loss_with_logits(self, logits, targets, alpha, gamma, y_pred): 48 | weight_a = alpha * (1 - y_pred) ** gamma * targets 49 | weight_b = (1 - alpha) * y_pred ** gamma * (1 - targets) 50 | 51 | return (tf.math.log1p(tf.exp(-tf.abs(logits))) + tf.nn.relu( 52 | -logits)) * (weight_a + weight_b) + logits * weight_b 53 | 54 | def focal_loss(self, y_true, y_pred): 55 | y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), 56 | 1 - tf.keras.backend.epsilon()) 57 | logits = tf.math.log(y_pred / (1 - y_pred)) 58 | 59 | loss = self.focal_loss_with_logits(logits=logits, targets=y_true, 60 | alpha=alpha, gamma=gamma, y_pred=y_pred) 61 | 62 | return tf.reduce_mean(loss) 63 | 64 | def focal_loss(alpha=None, beta=None, gamma_f=2.): 65 | 66 | def loss_function(y_true, y_pred): 67 | axis = identify_axis(y_true.get_shape()) 68 | # Clip values to prevent division by zero error 69 | epsilon = K.epsilon() 70 | y_pred = K.clip(y_pred, epsilon, 1. - epsilon) 71 | cross_entropy = -y_true * K.log(y_pred) 72 | 73 | if beta is not None: 74 | beta_weight = np.array([beta, 1-beta]) 75 | cross_entropy = beta_weight * cross_entropy 76 | 77 | if alpha is not None: 78 | alpha_weight = np.array(alpha, dtype=np.float32) 79 | focal_loss = alpha_weight * K.pow(1 - y_pred, gamma_f) * cross_entropy 80 | else: 81 | focal_loss = K.pow(1 - y_pred, gamma_f) * cross_entropy 82 | 83 | focal_loss = K.mean(K.sum(focal_loss, axis=[-1])) 84 | return focal_loss 85 | 86 | return 87 | 88 | def depth_softmax(self, matrix): 89 | sigmoid = lambda x: 1 / (1 + K.exp(-x)) 90 | sigmoided_matrix = sigmoid(matrix) 91 | softmax_matrix = sigmoided_matrix / K.sum(sigmoided_matrix, axis=0) 92 | return softmax_matrix 93 | 94 | def generalized_dice_coefficient(self, y_true, y_pred): 95 | smooth = 1. 96 | y_true_f = K.flatten(y_true) 97 | y_pred_f = K.flatten(y_pred) 98 | intersection = K.sum(y_true_f * y_pred_f) 99 | score = (2. * intersection + smooth) / ( 100 | K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 101 | return score 102 | 103 | def dice_loss(self, y_true, y_pred): 104 | loss = 1 - self.generalized_dice_coefficient(y_true, y_pred) 105 | return loss 106 | 107 | def bce_dice_loss(self, y_true, y_pred): 108 | loss = binary_crossentropy(y_true, y_pred) + \ 109 | self.dice_loss(y_true, y_pred) 110 | return loss / 2.0 111 | 112 | def confusion(self, y_true, y_pred): 113 | smooth = 1 114 | y_pred_pos = K.clip(y_pred, 0, 1) 115 | y_pred_neg = 1 - y_pred_pos 116 | y_pos = K.clip(y_true, 0, 1) 117 | y_neg = 1 - y_pos 118 | tp = K.sum(y_pos * y_pred_pos) 119 | fp = K.sum(y_neg * y_pred_pos) 120 | fn = K.sum(y_pos * y_pred_neg) 121 | prec = (tp + smooth) / (tp + fp + smooth) 122 | recall = (tp + smooth) / (tp + fn + smooth) 123 | return prec, recall 124 | 125 | def true_positive(self, y_true, y_pred): 126 | smooth = 1 127 | y_pred_pos = K.round(K.clip(y_pred, 0, 1)) 128 | y_pos = K.round(K.clip(y_true, 0, 1)) 129 | tp = (K.sum(y_pos * y_pred_pos) + smooth) / (K.sum(y_pos) + smooth) 130 | return tp 131 | 132 | def true_negative(self, y_true, y_pred): 133 | smooth = 1 134 | y_pred_pos = K.round(K.clip(y_pred, 0, 1)) 135 | y_pred_neg = 1 - y_pred_pos 136 | y_pos = K.round(K.clip(y_true, 0, 1)) 137 | y_neg = 1 - y_pos 138 | tn = (K.sum(y_neg * y_pred_neg) + smooth) / (K.sum(y_neg) + smooth) 139 | return tn 140 | 141 | def tversky_index(self, y_true, y_pred): 142 | y_true_pos = K.flatten(y_true) 143 | y_pred_pos = K.flatten(y_pred) 144 | true_pos = K.sum(y_true_pos * y_pred_pos) 145 | false_neg = K.sum(y_true_pos * (1 - y_pred_pos)) 146 | false_pos = K.sum((1 - y_true_pos) * y_pred_pos) 147 | alpha = 0.7 148 | return (true_pos + smooth) / (true_pos + alpha * false_neg + ( 149 | 1 - alpha) * false_pos + smooth) 150 | 151 | def tversky_loss(self, y_true, y_pred): 152 | return 1 - self.tversky_index(y_true, y_pred) 153 | 154 | def focal_tversky(self, y_true, y_pred): 155 | pt_1 = self.tversky_index(y_true, y_pred) 156 | gamma = 0.75 157 | return K.pow((1 - pt_1), gamma) 158 | 159 | def log_cosh_dice_loss(self, y_true, y_pred): 160 | x = self.dice_loss(y_true, y_pred) 161 | return tf.math.log((tf.exp(x) + tf.exp(-x)) / 2.0) 162 | -------------------------------------------------------------------------------- /model: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.keras.layers import * 3 | from tensorflow.keras.models import Model 4 | from tensorflow.keras.applications import * 5 | import numpy as np 6 | import cv2 7 | layers = tf.keras.layers 8 | 9 | def squeeze_excite_block(inputs, ratio=8): 10 | init = inputs 11 | channel_axis = -1 12 | filters = init.shape[channel_axis] 13 | se_shape = (1, 1, filters) 14 | 15 | se = GlobalAveragePooling2D()(init) 16 | se = Reshape(se_shape)(se) 17 | se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se) 18 | se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se) 19 | 20 | x = Multiply()([init, se]) 21 | return x 22 | 23 | def conv_block(inputs, filters): 24 | x = inputs 25 | 26 | x = Conv2D(filters, (3, 3), padding="same")(x) 27 | x = BatchNormalization()(x) 28 | x = Activation('relu')(x) 29 | 30 | x = Conv2D(filters, (3, 3), padding="same")(x) 31 | x = BatchNormalization()(x) 32 | x = Activation('relu')(x) 33 | 34 | #x = squeeze_excite_block(x) 35 | 36 | return x 37 | 38 | def encoder1(inputs): 39 | skip_connections = [] 40 | 41 | model = VGG19(include_top=False, weights='imagenet', input_tensor=inputs) 42 | names = ["block1_conv2", "block2_conv2", "block3_conv4", "block4_conv4"] 43 | for name in names: 44 | skip_connections.append(model.get_layer(name).output) 45 | 46 | output = model.get_layer("block5_conv4").output 47 | return output, skip_connections 48 | 49 | def decoder1(inputs, skip_connections): 50 | num_filters = [256, 128, 64, 32] 51 | skip_connections.reverse() 52 | x = inputs 53 | 54 | for i, f in enumerate(num_filters): 55 | x = UpSampling2D((2, 2), interpolation='bilinear')(x) 56 | x = Concatenate()([x, skip_connections[i]]) 57 | x = conv_block(x, f) 58 | 59 | return x 60 | 61 | 62 | 63 | def encoder2(inputs): 64 | num_filters = [32, 64, 128, 256] 65 | skip_connections = [] 66 | x = inputs 67 | 68 | for i, f in enumerate(num_filters): 69 | x = conv_block(x, f) 70 | skip_connections.append(x) 71 | x = MaxPool2D((2, 2))(x) 72 | 73 | return x, skip_connections 74 | 75 | def decoder2(inputs, skip_1, skip_2): 76 | num_filters = [256, 128, 64, 32] 77 | skip_2.reverse() 78 | x = inputs 79 | 80 | for i, f in enumerate(num_filters): 81 | x = UpSampling2D((2, 2), interpolation='bilinear')(x) 82 | x = Concatenate()([x, skip_1[i], skip_2[i]]) 83 | x = conv_block(x, f) 84 | 85 | return x 86 | 87 | def output_block(inputs): 88 | x = Conv2D(1, (1, 1), padding="same")(inputs) 89 | x = Activation('sigmoid')(x) 90 | return x 91 | def output_block1(inputs): 92 | x = Conv2D(1, (1, 1), padding="same")(inputs) 93 | return x 94 | 95 | def Upsample(tensor, size): 96 | """Bilinear upsampling""" 97 | def _upsample(x, size): 98 | return tf.image.resize(images=x, size=size) 99 | return Lambda(lambda x: _upsample(x, size), output_shape=size)(tensor) 100 | 101 | def DDSPP(x, filter): 102 | shape = x.shape 103 | 104 | y1 = AveragePooling2D(pool_size=(shape[1], shape[2]))(x) 105 | y1 = Conv2D(filter, 1, padding="same")(y1) 106 | y1 = BatchNormalization()(y1) 107 | y1 = Activation("relu")(y1) 108 | y1 = UpSampling2D((shape[1], shape[2]), interpolation='bilinear')(y1) 109 | 110 | y1 = Concatenate()([x, y1]) 111 | y2 = Conv2D(filter, 1, dilation_rate=2, padding="same", use_bias=False)(x) 112 | y2 = BatchNormalization()(y2) 113 | y2 = Activation("relu")(y2) 114 | 115 | y2 = Concatenate()([x, y1, y2]) 116 | y3 = Conv2D(filter, 3, dilation_rate=4, padding="same", use_bias=False)(x) 117 | y3 = BatchNormalization()(y3) 118 | y3 = Activation("relu")(y3) 119 | 120 | y3 = Concatenate()([x, y1, y2, y3]) 121 | y4 = Conv2D(filter, 3, dilation_rate=8, padding="same", use_bias=False)(x) 122 | y4 = BatchNormalization()(y4) 123 | y4 = Activation("relu")(y4) 124 | 125 | y4 = Concatenate()([x, y1, y2, y3, y4]) 126 | y5 = Conv2D(filter, 3, dilation_rate=12, padding="same", use_bias=False)(x) 127 | y5 = BatchNormalization()(y5) 128 | y5 = Activation("relu")(y5) 129 | 130 | y = Concatenate()([x, y1, y2, y3, y4, y5]) 131 | 132 | y = Conv2D(filter, 1, dilation_rate=1, padding="same", use_bias=False)(y) 133 | y = BatchNormalization()(y) 134 | y = Activation("relu")(y) 135 | 136 | return y 137 | 138 | # vertical edge detection 139 | sobel_x = np.array([[-1, 0, 1], 140 | [-2, 0, 2], 141 | [-1, 0, 1]]) 142 | 143 | def build_model1(shape): 144 | inputs = Input(shape) 145 | x, skip_1 = encoder1(inputs) 146 | x = DDSPP(x, 64) 147 | x = decoder1(x, skip_1) 148 | outputs1 = output_block(x) 149 | 150 | 151 | 152 | model = Model(inputs, outputs1) 153 | #model.compile(optimizer = Adam(lr = learning_rate), loss = [], metrics = ['accuracy']) 154 | 155 | return model 156 | 157 | def build_model2(shape): 158 | inputs = Input(shape) 159 | x, skip_1 = encoder1(inputs) 160 | x = DDSPP(x, 64) 161 | x = decoder1(x, skip_1) 162 | outputs1 = output_block1(x) 163 | 164 | x = inputs * outputs1 165 | 166 | x, skip_2 = encoder2(outputs1) 167 | x = DDSPP(x, 64) 168 | x = decoder2(x, skip_1, skip_2) 169 | outputs2 = output_block(x) 170 | #filtered_image1 = tf.image.sobel_edges(outputs2) 171 | 172 | outputs = Concatenate()([outputs1, outputs2]) 173 | 174 | model = Model(inputs, outputs) 175 | #model.compile(optimizer = Adam(lr = learning_rate), loss = [], metrics = ['accuracy']) 176 | 177 | return model 178 | 179 | model1 = build_model1((512, 512, 3)) 180 | model2 = build_model2((512, 512, 3)) 181 | 182 | model2.summary() 183 | 184 | # train_steps = (len(train_x)//batch_size) 185 | # valid_steps = (len(valid_x)//batch_size) 186 | 187 | # if len(train_x) % batch_size != 0: 188 | # train_steps += 1 189 | 190 | # if len(valid_x) % batch_size != 0: 191 | # valid_steps += 1 192 | 193 | # model.fit(genearte, 194 | # epochs=epochs, 195 | # validation_data=generate1, 196 | # steps_per_epoch=train_steps, 197 | # validation_steps=valid_steps, 198 | # shuffle=False) 199 | 200 | # steps = len(test_img)//BS 201 | # preds_test = model.predict_generator(test_img, steps, verbose=1) 202 | #next(preds_test)[0].shape 203 | #count =0 204 | #for img in preds_test: 205 | # print ('image', img.shape) 206 | # filtered_image1 = tf.image.sobel_edges(img[0]) 207 | # filtered_image2 = cv2.Sobel(src=filtered_image1, ddepth=cv2.CV_64F, dx=1, dy=1, ksize=5) 208 | # plt.imshow(filtered_image1[0]) 209 | # count+=1 210 | # if count>1: 211 | # break 212 | # 213 | --------------------------------------------------------------------------------