├── LICENSE ├── README.md ├── _config.yml ├── data_processing.py ├── main.py ├── models.py └── training_fn.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Anki0909 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | The tables below gives accuracy of each model for each magnification zoom presents in the dataset upto three decimal units. The values in brackets are F1 score 2 | 3 | **CNN models with FCN at the end** 4 | 5 | | Magnification/CNN Model -> | VGG-16 | VGG-19 | Xception | Resnet | Inception | Inception-Resnet-V3 | 6 | | :---: | :---: | :---: | :---: | :---: | :---: | :---: | 7 | | 40X | 0.802 (0.803) | 0.652 (0.685) | 0.831 (0.831) | 0.859 (0.858) | 0.853 (0.858) | 0.818 (0.813) | 8 | | 100X | 0.867 (0.877) | 0.709 (0.708) | 0.786 (0.794) | 0.911 (0.917) | 0.834 (0.827) | 0.845 (0.837) | 9 | | 200X | 0.841 (0.839) | 0.749 (0.756) | 0.812 (0.813) | 0.857 (0.853) | 0.799 (0.806) | 0.854 (0.859) | 10 | | 400X | 0.871 (0.869) | 0.799 (0.799) | 0.761 (0.758) | 0.903 (0.907) | 0.799 (0.796) | 0.842 (0.844) | 11 | 12 | 13 | **CV score on Logistic Regression Model trained on features extracted from CNN models** 14 | 15 | | Magnification/CNN Model -> | VGG-16 | VGG-19 | Xception | Resnet | Inception | Inception-Resnet-V3 | 16 | | :---: | :---: | :---: | :---: | :---: | :---: | :---: | 17 | | 40X | 0.685 (0.675) | 0.565 (0.547) | 0.858 (0.856) | 0.908 (0.906) | 0.839 (0.836) | 0.854 (0.850) | 18 | | 100X | 0.732 (0.725) | 0.633 (0.623) | 0.840 (0.837) | 0.902 (0.900) | 0.826 (0.822) | 0.863 (0.862) | 19 | | 200X | 0.864 (0.862) | 0.725 (0.718) | 0.940 (0.954) | 0.959 (0.958) | 0.919 (0.917) | 0.961 (0.960) | 20 | | 400X | 0.952 (0.952) | 0.876 (0.874) | 0.982 (0.982) | 0.983 (0.983) | 0.983 (0.983) | 0.982 (0.982) | 21 | 22 | **CV score on Linear Support Vector Machine Model trained on features extracted from CNN models** 23 | 24 | | Magnification/CNN Model -> | VGG-16 | VGG-19 | Xception | Resnet | Inception | Inception-Resnet-V3 | 25 | | :---: | :---: | :---: | :---: | :---: | :---: | :---: | 26 | | 40X | 0.644 (0.640) | 0.543 (0.530) | 0.857 (0.856) | 0.905 (0.905) | 0.855 (0.853) | 0.851 (0.849) | 27 | | 100X | 0.711 (0.704) | 0.603 (0.595) | 0.830 (0.829) | 0.895 (0.894) | 0.826 (0.822) | 0.864 (0.863) | 28 | | 200X | 0.848 (0.847) | 0.700 (0.693) | 0.943 (0.942) | 0.961 (0.961) | 0.916 (0.916) | 0.958 (0.958) | 29 | | 400X | 0.950 (0.949) | 0.868 (0.867) | 0.983 (0.983) | 0.983 (0.983) | 0.983 (0.983) | 0.980 (0.980) | 30 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-time-machine -------------------------------------------------------------------------------- /data_processing.py: -------------------------------------------------------------------------------- 1 | def dense_to_one_hot(labels_dense, num_classes): 2 | num_labels = labels_dense.shape[0] 3 | index_offset = np.arange(num_labels) * num_classes 4 | labels_one_hot = np.zeros((num_labels, num_classes)) 5 | labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 6 | return labels_one_hot 7 | 8 | def data_split(magnification = '40X', validation_percent = 0.15, testing_percent = 0.15): 9 | validation_percent = validation_percent 10 | testing_percent = testing_percent 11 | training_images = [] 12 | training_labels = [] 13 | validation_images = [] 14 | validation_labels = [] 15 | testing_images = [] 16 | testing_labels = [] 17 | for root, dirnames, filenames in os.walk("../input/breakhist_dataset/BreakHist_Dataset/" + magnification): 18 | if filenames == []: 19 | continue 20 | else: 21 | str_length = len("../input/breakhist_dataset/BreakHist_Dataset/40X/") 22 | #print(root) 23 | if root[str_length:str_length+6] == 'Benign': 24 | string_end = 56 25 | elif root[str_length:str_length+9] == 'Malignant': 26 | string_end = 59 27 | elif root[str_length+1:str_length+7] == 'Benign': 28 | string_end = 57 29 | else: 30 | string_end = 60 31 | name = root[string_end:] 32 | #print(name) 33 | #print(cancer_list.index(name)) 34 | total_images = 0 35 | for names in filenames: 36 | total_images += 1 37 | print(name, magnification, total_images) 38 | validation_size = np.int(total_images*validation_percent) 39 | testing_size = np.int(total_images*testing_percent) 40 | training_size = total_images - (validation_size + testing_size) 41 | print(training_size, validation_size, testing_size, total_images) 42 | num = 0 43 | for names in filenames: 44 | num += 1 45 | filepath = os.path.join(root, names) 46 | #print(filepath) 47 | image = mpimg.imread(filepath) 48 | #if not all(image.shape == np.array([460,700,3])): 49 | # print(names) 50 | #else: 51 | # continue 52 | image_resize = resize(image,(115,175), mode = 'constant') 53 | if num in range(training_size): 54 | training_images.append(image_resize[:,:,:]) 55 | training_labels.append(cancer_list.index(name)) 56 | elif num in range(training_size,training_size+validation_size): 57 | validation_images.append(image_resize[:,:,:]) 58 | validation_labels.append(cancer_list.index(name)) 59 | elif num in range(training_size+validation_size,total_images): 60 | testing_images.append(image_resize[:,:,:]) 61 | testing_labels.append(cancer_list.index(name)) 62 | 63 | training_images = np.asarray(training_images) 64 | validation_images = np.asarray(validation_images) 65 | testing_images = np.asarray(testing_images) 66 | 67 | training_labels = np.asarray(training_labels) 68 | validation_labels = np.asarray(validation_labels) 69 | testing_labels = np.asarray(testing_labels) 70 | 71 | labels_count = np.unique(training_labels).shape[0] 72 | 73 | training_labels = dense_to_one_hot(training_labels, labels_count) 74 | training_labels = training_labels.astype(np.float32) 75 | validation_labels = dense_to_one_hot(validation_labels, labels_count) 76 | validation_labels = validation_labels.astype(np.float32) 77 | testing_labels = dense_to_one_hot(testing_labels, labels_count) 78 | testing_labels = testing_labels.astype(np.float32) 79 | print(training_images.shape[0],validation_images.shape[0],testing_images.shape[0]) 80 | 81 | return training_images, training_labels, validation_images, validation_labels, testing_images, testing_labels 82 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import random 4 | import os 5 | import matplotlib.pyplot as plt 6 | import matplotlib.image as mpimg 7 | from skimage.transform import resize 8 | from models import * 9 | from training_fn import * 10 | from keras.layers import * 11 | from keras.models import * 12 | from keras import layers 13 | from keras.utils.data_utils import get_file 14 | from keras import backend as K 15 | from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau 16 | from keras.optimizers import Adam 17 | from sklearn.linear_model import LogisticRegression 18 | from sklearn.svm import SVC 19 | from sklearn.feature_selection import SelectFromModel 20 | from sklearn.model_selection import cross_validate 21 | from sklearn.metrics import accuracy_score, f1_score, roc_auc_score 22 | 23 | magnification_list = ['40X', '100X', '200X', '400X'] 24 | benign_list = ['adenosis', 'fibroadenoma', 'phyllodes_tumor', 'tubular_adenoma'] 25 | malignant_list = ['ductal_carcinoma', 'lobular_carcinoma', 'mucinous_carcinoma', 'papillary_carcinoma'] 26 | cancer_list = benign_list + malignant_list 27 | 28 | models = [vgg16_model, vgg19_model, xception_model, resnet_model, inception_model, inception_resnet_model] 29 | 30 | model_num = 3 31 | name = models[model_num].__name__ 32 | 33 | iteration = 0 34 | for types in magnification_list: 35 | if iteration == 0: 36 | load_wt = "Yes" 37 | else: 38 | load_wt = "No" 39 | compile_n_fit(validation_percent=0.15, testing_percent=0.15, 40 | image_height=115, image_width=175, n_channels=3, dropout = 0.3, 41 | load_wt=load_wt, model_name = name, magnification = types) 42 | iteration += 1 43 | 44 | dropout = 0.3 45 | base_model = models[model_num] 46 | base_model = base_model(image_height=115,image_width=175,n_channels=3,load_wt='No') 47 | x = base_model.output 48 | x = Dense(2048, activation = 'relu')(x) 49 | x = Dropout(dropout)(x) 50 | x = Dense(512, activation = 'relu')(x) 51 | x = Dropout(dropout)(x) 52 | x = Dense(128, activation = 'relu')(x) 53 | x = Dropout(dropout)(x) 54 | x = Dense(32, activation = 'relu')(x) 55 | out = Dense(8, activation = 'softmax')(x) 56 | inp = base_model.input 57 | 58 | model = Model(inp,out) 59 | 60 | model.load_weights(name + '_weight_1.h5') 61 | 62 | layer_name = None 63 | for idx, layer in enumerate(model.layers): 64 | if layer.name[:7] == 'flatten' or layer.name[:6] == 'global': 65 | layer_name = layer.name 66 | break 67 | 68 | model_fe = Model(inputs=model.input, outputs=model.get_layer(layer_name).output) 69 | 70 | for types in magnification_list: 71 | training_images, training_labels, validation_images, validation_labels, testing_images, testing_labels = data_split(magnification = types, validation_percent = 0.15, testing_percent = 0.15, encoding="No") 72 | 73 | training_features = model_fe.predict(training_images) 74 | validation_features = model_fe.predict(validation_images) 75 | testing_features = model_fe.predict(testing_images) 76 | 77 | lr = LogisticRegression() 78 | svm_l = SVC(kernel='linear') 79 | 80 | fs_model = SelectFromModel(ExtraTreesClassifier(n_estimators=50), prefit=False) 81 | training_features_new = fs_model.fit_transform(training_features, training_labels) 82 | validation_features_new = fs_model.transform(validation_features) 83 | testing_features_new = fs_model.transform(testing_features) 84 | 85 | classifier_list = [lr, svm_l] 86 | classifier_label = ['Logistic Regression', 'Linear SVM'] 87 | 88 | scoring = ['accuracy', 'f1_weighted'] 89 | print('Cross validation:') 90 | for classifier, label in zip(classifier_list, classifier_label): 91 | scores = cross_validate(estimator=classifier, X=training_features, y=training_labels, cv=10, scoring=scoring) 92 | print("[%s]\nAccuracy: %0.3f\tF1 Weighted: %0.3f" 93 | % (label, scores['test_accuracy'].mean(), scores['test_f1_weighted'].mean())) 94 | -------------------------------------------------------------------------------- /models.py: -------------------------------------------------------------------------------- 1 | def vgg16_model(image_height, image_width, n_channels, load_wt = "Yes"): 2 | WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5' 3 | weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models') 4 | 5 | model = Sequential() 6 | #model.add(Input(shape=(115,175,3))) 7 | 8 | model.add(Conv2D(64,(3,3),padding='same',activation='relu',input_shape=(image_height, image_width, n_channels))) 9 | model.add(Conv2D(64,(3,3),padding='same',activation='relu')) 10 | model.add(MaxPooling2D((2,2))) 11 | 12 | model.add(Conv2D(128,(3,3),padding='same',activation='relu')) 13 | model.add(Conv2D(128,(3,3),padding='same',activation='relu')) 14 | model.add(MaxPooling2D((2,2))) 15 | 16 | model.add(Conv2D(256,(3,3),padding='same',activation='relu')) 17 | model.add(Conv2D(256,(3,3),padding='same',activation='relu')) 18 | model.add(Conv2D(256,(3,3),padding='same',activation='relu')) 19 | model.add(MaxPooling2D((2,2))) 20 | 21 | model.add(Conv2D(512,(3,3),padding='same',activation='relu')) 22 | model.add(Conv2D(512,(3,3),padding='same',activation='relu')) 23 | model.add(Conv2D(512,(3,3),padding='same',activation='relu')) 24 | model.add(MaxPooling2D((2,2))) 25 | 26 | model.add(Conv2D(512,(3,3),padding='same',activation='relu')) 27 | model.add(Conv2D(512,(3,3),padding='same',activation='relu')) 28 | model.add(Conv2D(512,(3,3),padding='same',activation='relu')) 29 | model.add(MaxPooling2D((2,2))) 30 | model.add(Flatten()) 31 | 32 | if load_wt == "Yes": 33 | model.load_weights(weights_path) 34 | 35 | return model 36 | 37 | def vgg19_model(image_height, image_width, n_channels, load_wt = "Yes"): 38 | WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5' 39 | weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models') 40 | 41 | img_input = Input(shape=(image_height, image_width, n_channels)) 42 | 43 | x = Conv2D(64,(3,3),activation='relu',padding='same',name='block1_conv1')(img_input) 44 | x = Conv2D(64,(3,3),activation='relu',padding='same',name='block1_conv2')(x) 45 | x = MaxPooling2D((2,2),strides=(2,2),name='block1_pool')(x) 46 | 47 | x = Conv2D(128,(3,3),activation='relu',padding='same',name='block2_conv1')(x) 48 | x = Conv2D(128,(3,3),activation='relu',padding='same',name='block2_conv2')(x) 49 | x = MaxPooling2D((2,2),strides=(2,2),name='block2_pool')(x) 50 | 51 | x = Conv2D(256,(3,3),activation='relu',padding='same',name='block3_conv1')(x) 52 | x = Conv2D(256,(3,3),activation='relu',padding='same',name='block3_conv2')(x) 53 | x = Conv2D(256,(3,3),activation='relu',padding='same',name='block3_conv3')(x) 54 | x = Conv2D(256,(3,3),activation='relu',padding='same',name='block3_conv4')(x) 55 | x = MaxPooling2D((2,2),strides=(2,2),name='block3_pool')(x) 56 | 57 | x = Conv2D(512,(3,3),activation='relu',padding='same',name='block4_conv1')(x) 58 | x = Conv2D(512,(3,3),activation='relu',padding='same',name='block4_conv2')(x) 59 | x = Conv2D(512,(3,3),activation='relu',padding='same',name='block4_conv3')(x) 60 | x = Conv2D(512,(3,3),activation='relu',padding='same',name='block4_conv4')(x) 61 | x = MaxPooling2D((2,2),strides=(2,2),name='block4_pool')(x) 62 | 63 | x = Conv2D(512,(3,3),activation='relu',padding='same',name='block5_conv1')(x) 64 | x = Conv2D(512,(3,3),activation='relu',padding='same',name='block5_conv2')(x) 65 | x = Conv2D(512,(3,3),activation='relu',padding='same',name='block5_conv3')(x) 66 | x = Conv2D(512,(3,3),activation='relu',padding='same',name='block5_conv4')(x) 67 | x = MaxPooling2D((2,2),strides=(2,2),name='block5_pool')(x) 68 | x = Flatten()(x) 69 | 70 | inp = img_input 71 | 72 | model = Model(inp, x, name='vgg19') 73 | 74 | if load_wt == "Yes": 75 | model.load_weights(weights_path) 76 | 77 | return model 78 | 79 | def xception_model(image_height, image_width, n_channels, load_wt = "Yes"): 80 | TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.4/xception_weights_tf_dim_ordering_tf_kernels_notop.h5' 81 | weights_path = get_file('xception_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models') 82 | 83 | input_layer = Input((image_height, image_width, n_channels)) 84 | x = Conv2D(32,(3,3),strides=(2,2),use_bias=False,name='block1_conv1')(input_layer) 85 | x = BatchNormalization(name='block1_conv1_bn')(x) 86 | x = Activation('relu',name='block1_conv1_act')(x) 87 | x = Conv2D(64,(3,3),use_bias=False,name='block1_conv2')(x) 88 | x = BatchNormalization(name='block1_conv2_bn')(x) 89 | x = Activation('relu',name='block1_conv2_act')(x) 90 | 91 | residual = Conv2D(128,(1,1),strides=(2,2),padding='same',use_bias=False)(x) 92 | residual = BatchNormalization()(residual) 93 | 94 | x = SeparableConv2D(128,(3,3),padding='same',use_bias=False,name='block2_sepconv1')(x) 95 | x = BatchNormalization(name='block2_sepconv1_bn')(x) 96 | x = Activation('relu',name='block_sepconv1_act')(x) 97 | x = SeparableConv2D(128,(3,3),padding='same',use_bias=False,name='block2_sepconv2')(x) 98 | x = BatchNormalization(name='block2_sepconv2_bn')(x) 99 | 100 | x = MaxPooling2D((3,3),strides=(2,2),padding='same',name='block2_pool')(x) 101 | x = layers.add([x, residual]) 102 | 103 | residual = Conv2D(256,(1,1),strides=(2,2),padding='same',use_bias=False)(x) 104 | residual = BatchNormalization()(residual) 105 | 106 | x = Activation('relu',name='block3_sepconv1_act')(x) 107 | x = SeparableConv2D(256,(3,3),padding='same',use_bias=False,name='block3_sepconv1')(x) 108 | x = BatchNormalization(name='block3_sepconv1_bn')(x) 109 | x = Activation('relu',name='block3_sepconv2_act')(x) 110 | x = SeparableConv2D(256,(3,3),padding='same',use_bias=False,name='block3_sepconv2')(x) 111 | x = BatchNormalization(name='block3_sepconv2_bn')(x) 112 | 113 | x = MaxPooling2D((3,3),strides=(2,2),padding='same',name='block3_pool')(x) 114 | x = layers.add([x,residual]) 115 | 116 | residual = Conv2D(728,(1,1),strides=(2,2),use_bias=False,padding='same')(x) 117 | residual = BatchNormalization()(residual) 118 | 119 | x = Activation('relu', name='block4_sepconv1_act')(x) 120 | x = SeparableConv2D(728,(3,3),padding='same',use_bias=False, name='block4_sepconv1')(x) 121 | x = BatchNormalization(name='block4_sepconv1_bn')(x) 122 | x = Activation('relu',name='block4_sepconv2_act')(x) 123 | x = SeparableConv2D(728,(3,3),padding='same',use_bias=False,name='block4_sepconv2')(x) 124 | x = BatchNormalization(name='block4_sepconv2_bn')(x) 125 | 126 | x = MaxPooling2D((3,3),strides=(2,2),padding='same',name='block4_pool')(x) 127 | x = layers.add([x,residual]) 128 | 129 | for i in range(8): 130 | residual = x 131 | prefix = 'block' + str(i+5) 132 | 133 | x = Activation('relu', name=prefix+'_sepconv1_act')(x) 134 | x = SeparableConv2D(728,(3,3),padding='same',use_bias=False,name=prefix+'_sepconv1')(x) 135 | x = BatchNormalization(name=prefix+'_sepconv1_bn')(x) 136 | x = Activation('relu',name=prefix+'_sepconv2_act')(x) 137 | x = SeparableConv2D(728,(3,3),padding='same',use_bias=False,name=prefix+'_sepconv2')(x) 138 | x = BatchNormalization(name=prefix+'_sepconv2_bn')(x) 139 | x = Activation('relu',name=prefix+'_sepconv3_act')(x) 140 | x = SeparableConv2D(728,(3,3),padding='same',use_bias=False,name=prefix+'_sepconv3')(x) 141 | x = BatchNormalization(name=prefix+'_sepconv3_bn')(x) 142 | 143 | x = layers.add([x,residual]) 144 | 145 | residual = Conv2D(1024,(1,1),strides=(2,2),padding='same',use_bias=False)(x) 146 | residual = BatchNormalization()(residual) 147 | 148 | x = Activation('relu', name='block13_sepconv1_act')(x) 149 | x = SeparableConv2D(728,(3,3),padding='same',use_bias=False,name='block13_sepconv1')(x) 150 | x = BatchNormalization(name='block13_sepconv1_bn')(x) 151 | x = Activation('relu',name='block13_sepconv2_act')(x) 152 | x = SeparableConv2D(1024,(3,3),padding='same',use_bias=False,name='block13_sepconv2')(x) 153 | x = BatchNormalization(name='block13_sepconv2_bn')(x) 154 | 155 | x = MaxPooling2D((3,3),strides=(2,2),padding='same',name='block13_pool')(x) 156 | x = layers.add([x,residual]) 157 | 158 | x = SeparableConv2D(1536,(3,3),padding='same',use_bias=False,name='block14_sepconv1')(x) 159 | x = BatchNormalization(name='block14_sepconv1_bn')(x) 160 | x = Activation('relu',name='block14_sepconv1_act')(x) 161 | 162 | x = SeparableConv2D(2048,(3,3),padding='same',use_bias=False,name='block14_sepconv2')(x) 163 | x = BatchNormalization(name='block14_sepconv2_bn')(x) 164 | x = Activation('relu',name='block14_sepconv2_act')(x) 165 | 166 | x = GlobalAveragePooling2D()(x) 167 | 168 | model = Model(input_layer,x,name='xception') 169 | 170 | if load_wt == "Yes": 171 | model.load_weights(weights_path) 172 | 173 | return model 174 | 175 | def resnet_model(image_height, image_width, n_channels, load_wt = "Yes"): 176 | WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' 177 | weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models', md5_hash='a268eb855778b3df3c7506639542a6af') 178 | 179 | def identity_block(input_tensor,kernel_size,filters,stage,block): 180 | filters1, filters2, filters3 = filters 181 | conv_name_base = 'res'+str(stage)+block+'_branch' 182 | bn_name_base = 'bn'+str(stage)+block+'_branch' 183 | 184 | x = Conv2D(filters1,(1,1),name=conv_name_base+'2a')(input_tensor) 185 | x = BatchNormalization(axis=3,name=bn_name_base+'2a')(x) 186 | x = Activation('relu')(x) 187 | 188 | x = Conv2D(filters2,kernel_size,padding='same',name=conv_name_base+'2b')(x) 189 | x = BatchNormalization(axis=3,name=bn_name_base+'2b')(x) 190 | x = Activation('relu')(x) 191 | 192 | x = Conv2D(filters3,(1,1),name=conv_name_base+'2c')(x) 193 | x = BatchNormalization(axis=3,name=bn_name_base+'2c')(x) 194 | 195 | x = layers.add([x,input_tensor]) 196 | x = Activation('relu')(x) 197 | 198 | return x 199 | 200 | def conv_block(input_tensor,kernel_size,filters,stage,block,strides=(2,2)): 201 | filters1,filters2,filters3 = filters 202 | conv_name_base = 'res'+str(stage)+block+'_branch' 203 | bn_name_base = 'bn'+str(stage)+block+'_branch' 204 | 205 | x = Conv2D(filters1,(1,1),strides=strides,name=conv_name_base+'2a')(input_tensor) 206 | x = BatchNormalization(axis=3,name=bn_name_base+'2a')(x) 207 | x = Activation('relu')(x) 208 | 209 | x = Conv2D(filters2,kernel_size,padding='same',name=conv_name_base+'2b')(x) 210 | x = BatchNormalization(axis=3,name=bn_name_base+'2b')(x) 211 | x = Activation('relu')(x) 212 | 213 | x = Conv2D(filters3,(1,1),name=conv_name_base+'2c')(x) 214 | x = BatchNormalization(axis=3,name=bn_name_base+'2c')(x) 215 | 216 | shortcut = Conv2D(filters3, (1, 1), strides=strides,name=conv_name_base + '1')(input_tensor) 217 | shortcut = BatchNormalization(axis=3, name=bn_name_base + '1')(shortcut) 218 | 219 | x = layers.add([x, shortcut]) 220 | x = Activation('relu')(x) 221 | return x 222 | 223 | img_input = Input(shape=(image_height, image_width, n_channels)) 224 | img_padding = ZeroPadding2D(((41,41),(11,11)))(img_input) 225 | x = ZeroPadding2D((3,3))(img_padding) 226 | x = Conv2D(64,(7,7),strides=(2,2),name='conv1')(x) 227 | x = BatchNormalization(axis=3,name='bn_conv1')(x) 228 | x = Activation('relu')(x) 229 | x = MaxPooling2D((3,3),strides=(2,2))(x) 230 | 231 | x = conv_block(x, 3, [64,64,256],stage=2,block='a',strides=(1,1)) 232 | x = identity_block(x,3,[64,64,256],stage=2,block='b') 233 | x = identity_block(x,3,[64,64,256],stage=2,block='c') 234 | 235 | x = conv_block(x,3,[128,128,512],stage=3,block='a') 236 | x = identity_block(x,3,[128,128,512],stage=3,block='b') 237 | x = identity_block(x,3,[128,128,512],stage=3,block='c') 238 | x = identity_block(x,3,[128,128,512],stage=3,block='d') 239 | 240 | x = conv_block(x,3,[256,256,1024],stage=4,block='a') 241 | x = identity_block(x,3,[256,256,1024],stage=4,block='b') 242 | x = identity_block(x,3,[256,256,1024],stage=4,block='c') 243 | x = identity_block(x,3,[256,256,1024],stage=4,block='d') 244 | x = identity_block(x,3,[256,256,1024],stage=4,block='e') 245 | x = identity_block(x,3,[256,256,1024],stage=4,block='f') 246 | 247 | x = conv_block(x,3,[512,512,2048],stage=5,block='a') 248 | x = identity_block(x,3,[512,512,2048],stage=5,block='b') 249 | x = identity_block(x,3,[512,512,2048],stage=5,block='c') 250 | 251 | x = AveragePooling2D((7,7),name='avg_pool')(x) 252 | x = Flatten()(x) 253 | 254 | inp = img_input 255 | 256 | model = Model(inp,x,name='resnet50') 257 | 258 | if load_wt == "Yes": 259 | model.load_weights(weights_path) 260 | 261 | return model 262 | 263 | def inception_model(image_height, image_width, n_channels, load_wt = "Yes"): 264 | WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5' 265 | weights_path = get_file('inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models', md5_hash='bcbd6486424b2319ff4ef7d526e38f63') 266 | 267 | def conv2d_bn(x,filters,num_row,num_col,padding='same',strides=(1,1),name=None): 268 | if name is not None: 269 | bn_name = name+'_bn' 270 | conv_name = name+'_conv' 271 | else: 272 | bn_name = None 273 | conv_name = None 274 | x = Conv2D(filters,(num_row,num_col),strides=strides,padding=padding,use_bias=False,name=conv_name)(x) 275 | x = BatchNormalization(axis=3,scale=False,name=bn_name)(x) 276 | x = Activation('relu',name=name)(x) 277 | 278 | return x 279 | 280 | channel_axis = 3 281 | img_input = Input(shape=(image_height,image_width,n_channels)) 282 | zero_pad = ZeroPadding2D(((12,12),(0,0)))(img_input) 283 | 284 | x = conv2d_bn(zero_pad, 32, 3, 3, strides=(2, 2), padding='valid') 285 | x = conv2d_bn(x, 32, 3, 3, padding='valid') 286 | x = conv2d_bn(x, 64, 3, 3) 287 | x = MaxPooling2D((3, 3), strides=(2, 2))(x) 288 | 289 | x = conv2d_bn(x, 80, 1, 1, padding='valid') 290 | x = conv2d_bn(x, 192, 3, 3, padding='valid') 291 | x = MaxPooling2D((3, 3), strides=(2, 2))(x) 292 | 293 | # mixed 0, 1, 2: 35 x 35 x 256 294 | branch1x1 = conv2d_bn(x, 64, 1, 1) 295 | 296 | branch5x5 = conv2d_bn(x, 48, 1, 1) 297 | branch5x5 = conv2d_bn(branch5x5, 64, 5, 5) 298 | 299 | branch3x3dbl = conv2d_bn(x, 64, 1, 1) 300 | branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) 301 | branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) 302 | 303 | branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x) 304 | branch_pool = conv2d_bn(branch_pool, 32, 1, 1) 305 | x = layers.concatenate( 306 | [branch1x1, branch5x5, branch3x3dbl, branch_pool], 307 | axis=channel_axis, 308 | name='mixed0') 309 | 310 | # mixed 1: 35 x 35 x 256 311 | branch1x1 = conv2d_bn(x, 64, 1, 1) 312 | 313 | branch5x5 = conv2d_bn(x, 48, 1, 1) 314 | branch5x5 = conv2d_bn(branch5x5, 64, 5, 5) 315 | 316 | branch3x3dbl = conv2d_bn(x, 64, 1, 1) 317 | branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) 318 | branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) 319 | 320 | branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x) 321 | branch_pool = conv2d_bn(branch_pool, 64, 1, 1) 322 | x = layers.concatenate( 323 | [branch1x1, branch5x5, branch3x3dbl, branch_pool], 324 | axis=channel_axis, 325 | name='mixed1') 326 | 327 | # mixed 2: 35 x 35 x 256 328 | branch1x1 = conv2d_bn(x, 64, 1, 1) 329 | 330 | branch5x5 = conv2d_bn(x, 48, 1, 1) 331 | branch5x5 = conv2d_bn(branch5x5, 64, 5, 5) 332 | 333 | branch3x3dbl = conv2d_bn(x, 64, 1, 1) 334 | branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) 335 | branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) 336 | 337 | branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x) 338 | branch_pool = conv2d_bn(branch_pool, 64, 1, 1) 339 | x = layers.concatenate( 340 | [branch1x1, branch5x5, branch3x3dbl, branch_pool], 341 | axis=channel_axis, 342 | name='mixed2') 343 | 344 | # mixed 3: 17 x 17 x 768 345 | branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid') 346 | 347 | branch3x3dbl = conv2d_bn(x, 64, 1, 1) 348 | branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) 349 | branch3x3dbl = conv2d_bn( 350 | branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid') 351 | 352 | branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x) 353 | x = layers.concatenate( 354 | [branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed3') 355 | 356 | # mixed 4: 17 x 17 x 768 357 | branch1x1 = conv2d_bn(x, 192, 1, 1) 358 | 359 | branch7x7 = conv2d_bn(x, 128, 1, 1) 360 | branch7x7 = conv2d_bn(branch7x7, 128, 1, 7) 361 | branch7x7 = conv2d_bn(branch7x7, 192, 7, 1) 362 | 363 | branch7x7dbl = conv2d_bn(x, 128, 1, 1) 364 | branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1) 365 | branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7) 366 | branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1) 367 | branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7) 368 | 369 | branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x) 370 | branch_pool = conv2d_bn(branch_pool, 192, 1, 1) 371 | x = layers.concatenate( 372 | [branch1x1, branch7x7, branch7x7dbl, branch_pool], 373 | axis=channel_axis, 374 | name='mixed4') 375 | 376 | # mixed 5, 6: 17 x 17 x 768 377 | for i in range(2): 378 | branch1x1 = conv2d_bn(x, 192, 1, 1) 379 | 380 | branch7x7 = conv2d_bn(x, 160, 1, 1) 381 | branch7x7 = conv2d_bn(branch7x7, 160, 1, 7) 382 | branch7x7 = conv2d_bn(branch7x7, 192, 7, 1) 383 | 384 | branch7x7dbl = conv2d_bn(x, 160, 1, 1) 385 | branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1) 386 | branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7) 387 | branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1) 388 | branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7) 389 | 390 | branch_pool = AveragePooling2D( 391 | (3, 3), strides=(1, 1), padding='same')(x) 392 | branch_pool = conv2d_bn(branch_pool, 192, 1, 1) 393 | x = layers.concatenate( 394 | [branch1x1, branch7x7, branch7x7dbl, branch_pool], 395 | axis=channel_axis, 396 | name='mixed' + str(5 + i)) 397 | 398 | # mixed 7: 17 x 17 x 768 399 | branch1x1 = conv2d_bn(x, 192, 1, 1) 400 | 401 | branch7x7 = conv2d_bn(x, 192, 1, 1) 402 | branch7x7 = conv2d_bn(branch7x7, 192, 1, 7) 403 | branch7x7 = conv2d_bn(branch7x7, 192, 7, 1) 404 | 405 | branch7x7dbl = conv2d_bn(x, 192, 1, 1) 406 | branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1) 407 | branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7) 408 | branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1) 409 | branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7) 410 | 411 | branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x) 412 | branch_pool = conv2d_bn(branch_pool, 192, 1, 1) 413 | x = layers.concatenate( 414 | [branch1x1, branch7x7, branch7x7dbl, branch_pool], 415 | axis=channel_axis, 416 | name='mixed7') 417 | 418 | # mixed 8: 8 x 8 x 1280 419 | branch3x3 = conv2d_bn(x, 192, 1, 1) 420 | branch3x3 = conv2d_bn(branch3x3, 320, 3, 3, 421 | strides=(2, 2), padding='valid') 422 | 423 | branch7x7x3 = conv2d_bn(x, 192, 1, 1) 424 | branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7) 425 | branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1) 426 | branch7x7x3 = conv2d_bn( 427 | branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid') 428 | 429 | branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x) 430 | x = layers.concatenate( 431 | [branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name='mixed8') 432 | 433 | # mixed 9: 8 x 8 x 2048 434 | for i in range(2): 435 | branch1x1 = conv2d_bn(x, 320, 1, 1) 436 | 437 | branch3x3 = conv2d_bn(x, 384, 1, 1) 438 | branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3) 439 | branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1) 440 | branch3x3 = layers.concatenate( 441 | [branch3x3_1, branch3x3_2], axis=channel_axis, name='mixed9_' + str(i)) 442 | 443 | branch3x3dbl = conv2d_bn(x, 448, 1, 1) 444 | branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3) 445 | branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3) 446 | branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1) 447 | branch3x3dbl = layers.concatenate( 448 | [branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis) 449 | 450 | branch_pool = AveragePooling2D( 451 | (3, 3), strides=(1, 1), padding='same')(x) 452 | branch_pool = conv2d_bn(branch_pool, 192, 1, 1) 453 | x = layers.concatenate( 454 | [branch1x1, branch3x3, branch3x3dbl, branch_pool], 455 | axis=channel_axis, 456 | name='mixed' + str(9 + i)) 457 | 458 | x = GlobalMaxPooling2D()(x) 459 | 460 | inp = img_input 461 | 462 | model = Model(inp,x,name='inception_v3') 463 | 464 | if load_wt == "Yes": 465 | model.load_weights(weights_path) 466 | 467 | return model 468 | 469 | def inception_resnet_model(image_height, image_width, n_channels, load_wt = "Yes"): 470 | BASE_WEIGHT_URL = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.7/' 471 | weights_filename = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5' 472 | weights_path = get_file(weights_filename, BASE_WEIGHT_URL + weights_filename, cache_subdir='models',md5_hash='d19885ff4a710c122648d3b5c3b684e4') 473 | 474 | def conv2d_bn(x,filters,kernel_size,strides=1,padding='same',activation='relu',use_bias=False,name=None): 475 | x = Conv2D(filters,kernel_size,strides=strides,padding=padding,use_bias=use_bias,name=name)(x) 476 | if not use_bias: 477 | bn_axis = 3 478 | bn_name = None if name is None else name + '_bn' 479 | x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x) 480 | if activation is not None: 481 | ac_name = None if name is None else name + '_ac' 482 | x = Activation(activation, name=ac_name)(x) 483 | return x 484 | 485 | def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'): 486 | 487 | if block_type == 'block35': 488 | branch_0 = conv2d_bn(x, 32, 1) 489 | branch_1 = conv2d_bn(x, 32, 1) 490 | branch_1 = conv2d_bn(branch_1, 32, 3) 491 | branch_2 = conv2d_bn(x, 32, 1) 492 | branch_2 = conv2d_bn(branch_2, 48, 3) 493 | branch_2 = conv2d_bn(branch_2, 64, 3) 494 | branches = [branch_0, branch_1, branch_2] 495 | elif block_type == 'block17': 496 | branch_0 = conv2d_bn(x, 192, 1) 497 | branch_1 = conv2d_bn(x, 128, 1) 498 | branch_1 = conv2d_bn(branch_1, 160, [1, 7]) 499 | branch_1 = conv2d_bn(branch_1, 192, [7, 1]) 500 | branches = [branch_0, branch_1] 501 | elif block_type == 'block8': 502 | branch_0 = conv2d_bn(x, 192, 1) 503 | branch_1 = conv2d_bn(x, 192, 1) 504 | branch_1 = conv2d_bn(branch_1, 224, [1, 3]) 505 | branch_1 = conv2d_bn(branch_1, 256, [3, 1]) 506 | branches = [branch_0, branch_1] 507 | else: 508 | raise ValueError('Unknown Inception-ResNet block type. ' 509 | 'Expects "block35", "block17" or "block8", ' 510 | 'but got: ' + str(block_type)) 511 | 512 | block_name = block_type + '_' + str(block_idx) 513 | channel_axis = 3 514 | mixed = Concatenate(axis=channel_axis, name=block_name + '_mixed')(branches) 515 | up = conv2d_bn(mixed,K.int_shape(x)[channel_axis],1,activation=None,use_bias=True,name=block_name + '_conv') 516 | 517 | x = Lambda(lambda inputs, scale: inputs[0] + inputs[1] * scale, 518 | output_shape=K.int_shape(x)[1:], 519 | arguments={'scale': scale}, 520 | name=block_name)([x, up]) 521 | if activation is not None: 522 | x = Activation(activation, name=block_name + '_ac')(x) 523 | return x 524 | 525 | channel_axis = 3 526 | img_input = Input(shape=(image_height,image_width,n_channels)) 527 | zero_pad = ZeroPadding2D(((12,12),(0,0)))(img_input) 528 | 529 | 530 | # Stem block: 35 x 35 x 192 531 | x = conv2d_bn(zero_pad, 32, 3, strides=2, padding='valid') 532 | x = conv2d_bn(x, 32, 3, padding='valid') 533 | x = conv2d_bn(x, 64, 3) 534 | x = MaxPooling2D(3, strides=2)(x) 535 | x = conv2d_bn(x, 80, 1, padding='valid') 536 | x = conv2d_bn(x, 192, 3, padding='valid') 537 | x = MaxPooling2D(3, strides=2)(x) 538 | 539 | # Mixed 5b (Inception-A block): 35 x 35 x 320 540 | branch_0 = conv2d_bn(x, 96, 1) 541 | branch_1 = conv2d_bn(x, 48, 1) 542 | branch_1 = conv2d_bn(branch_1, 64, 5) 543 | branch_2 = conv2d_bn(x, 64, 1) 544 | branch_2 = conv2d_bn(branch_2, 96, 3) 545 | branch_2 = conv2d_bn(branch_2, 96, 3) 546 | branch_pool = AveragePooling2D(3, strides=1, padding='same')(x) 547 | branch_pool = conv2d_bn(branch_pool, 64, 1) 548 | branches = [branch_0, branch_1, branch_2, branch_pool] 549 | 550 | x = Concatenate(axis=channel_axis, name='mixed_5b')(branches) 551 | 552 | # 10x block35 (Inception-ResNet-A block): 35 x 35 x 320 553 | for block_idx in range(1, 11): 554 | x = inception_resnet_block(x, 555 | scale=0.17, 556 | block_type='block35', 557 | block_idx=block_idx) 558 | 559 | # Mixed 6a (Reduction-A block): 17 x 17 x 1088 560 | branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid') 561 | branch_1 = conv2d_bn(x, 256, 1) 562 | branch_1 = conv2d_bn(branch_1, 256, 3) 563 | branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid') 564 | branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x) 565 | branches = [branch_0, branch_1, branch_pool] 566 | x = Concatenate(axis=channel_axis, name='mixed_6a')(branches) 567 | 568 | # 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088 569 | for block_idx in range(1, 21): 570 | x = inception_resnet_block(x, 571 | scale=0.1, 572 | block_type='block17', 573 | block_idx=block_idx) 574 | 575 | # Mixed 7a (Reduction-B block): 8 x 8 x 2080 576 | branch_0 = conv2d_bn(x, 256, 1) 577 | branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid') 578 | branch_1 = conv2d_bn(x, 256, 1) 579 | branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid') 580 | branch_2 = conv2d_bn(x, 256, 1) 581 | branch_2 = conv2d_bn(branch_2, 288, 3) 582 | branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid') 583 | branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x) 584 | branches = [branch_0, branch_1, branch_2, branch_pool] 585 | x = Concatenate(axis=channel_axis, name='mixed_7a')(branches) 586 | 587 | # 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080 588 | for block_idx in range(1, 10): 589 | x = inception_resnet_block(x, 590 | scale=0.2, 591 | block_type='block8', 592 | block_idx=block_idx) 593 | x = inception_resnet_block(x, 594 | scale=1., 595 | activation=None, 596 | block_type='block8', 597 | block_idx=10) 598 | 599 | # Final convolution block: 8 x 8 x 1536 600 | x = conv2d_bn(x, 1536, 1, name='conv_7b') 601 | 602 | x = GlobalMaxPooling2D()(x) 603 | 604 | inputs = img_input 605 | 606 | # Create model 607 | model = Model(inputs, x, name='inception_resnet_v2') 608 | 609 | if load_wt == "Yes": 610 | model.load_weights(weights_path) 611 | 612 | return model 613 | -------------------------------------------------------------------------------- /training_fn.py: -------------------------------------------------------------------------------- 1 | def f1(y_true, y_pred): 2 | def recall(y_true, y_pred): 3 | true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) 4 | possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) 5 | recall = true_positives / (possible_positives + K.epsilon()) 6 | return recall 7 | 8 | def precision(y_true, y_pred): 9 | true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) 10 | predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) 11 | precision = true_positives / (predicted_positives + K.epsilon()) 12 | return precision 13 | precision = precision(y_true, y_pred) 14 | recall = recall(y_true, y_pred) 15 | return 2*((precision*recall)/(precision+recall+K.epsilon())) 16 | 17 | def compile_n_fit(validation_percent, testing_percent, image_height, image_width, n_channels, load_wt,dropout = 0.3, model_name = 'vgg16_model', magnification = '40X'): 18 | training_images, training_labels, validation_images, validation_labels, testing_images, testing_labels = data_split(magnification = magnification, validation_percent = validation_percent, testing_percent = testing_percent) 19 | for i in range(len(models)): 20 | if models[i].__name__ == model_name: 21 | base_model = models[i] 22 | 23 | base_model = base_model(image_height=image_height,image_width=image_width,n_channels=n_channels,load_wt=load_wt) 24 | 25 | x = base_model.output 26 | x = Dense(2048, activation = 'relu')(x) 27 | x = Dropout(dropout)(x) 28 | x = Dense(512, activation = 'relu')(x) 29 | x = Dropout(dropout)(x) 30 | x = Dense(128, activation = 'relu')(x) 31 | x = Dropout(dropout)(x) 32 | x = Dense(32, activation = 'relu')(x) 33 | out = Dense(8, activation = 'softmax')(x) 34 | inp = base_model.input 35 | 36 | model = Model(inp,out) 37 | 38 | try: 39 | model.load_weights(model_name + '_weight_1.h5') 40 | print('Weights loaded!') 41 | except: 42 | print('No weights defined!') 43 | pass 44 | 45 | model.compile(loss="categorical_crossentropy", optimizer=Adam(lr=0.0001), metrics=[f1,'accuracy']) 46 | early_stopping = EarlyStopping(patience=10, verbose=2) 47 | model_checkpoint = ModelCheckpoint(model_name + "_combine" +".model", save_best_only=True, verbose=2) 48 | reduce_lr = ReduceLROnPlateau(factor=0.1, patience=5, verbose=2) #min_lr=0.00001, 49 | 50 | epochs = 100 51 | batch_size = 64 52 | 53 | history = model.fit(training_images, training_labels, 54 | validation_data=[validation_images, validation_labels], 55 | epochs=epochs, 56 | verbose = 0, 57 | batch_size=batch_size, 58 | callbacks=[early_stopping, model_checkpoint, reduce_lr]) 59 | 60 | test_loss, test_acc, test_f1 = model.evaluate(testing_images, testing_labels) 61 | 62 | model.save_weights(model_name + '_weight_1.h5') 63 | 64 | print("\nThe test accuracy for " + model_name + " with magnification "+ magnification +" is ", test_acc, " with F1 score of ", test_f1, "\n") 65 | --------------------------------------------------------------------------------