├── README.md ├── BinaryTreeAndOrder.py ├── validation_train_split.py ├── padding_op.py ├── convolution_op.py └── cnn_Visualization.py /README.md: -------------------------------------------------------------------------------- 1 | # image_processing_op 2 | The image processing operations for useful 3 | -------------------------------------------------------------------------------- /BinaryTreeAndOrder.py: -------------------------------------------------------------------------------- 1 | class BTree: 2 | def __init__(self,value): 3 | self.left = None 4 | self.data = value 5 | self.right = None 6 | self.parent = None 7 | 8 | def insertleft(self, value): 9 | self.left = BTree(value) 10 | self.left.parent = self 11 | return self.left 12 | 13 | def insertright(self, value): 14 | self.right = BTree(value) 15 | self.right.parent = self 16 | return self.right 17 | 18 | def show(self): 19 | print(self.data) 20 | 21 | 22 | def preorder(node): 23 | if node.data: 24 | node.show() 25 | if node.left: 26 | preorder(node.left) 27 | if node.right: 28 | preorder(node.right) 29 | 30 | 31 | def inorder(node): 32 | if node.data: 33 | if node.left: 34 | inorder(node.left) 35 | node.show() 36 | if node.right: 37 | inorder(node.right) 38 | 39 | 40 | def postorder(node): 41 | if node.data: 42 | if node.left: 43 | postorder(node.left) 44 | if node.right: 45 | postorder(node.right) 46 | node.show() 47 | 48 | 49 | root = BTree('R') 50 | a = root.insertleft('A') 51 | b = root.insertright('B') 52 | c = a.insertleft('C') 53 | d = a.insertright('D') 54 | e = b.insertleft('E') 55 | 56 | preorder(root) 57 | inorder(root) 58 | postorder(root) 59 | -------------------------------------------------------------------------------- /validation_train_split.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import shutil 4 | from tqdm import tqdm 5 | 6 | TRAIN_VALIDATION_RATE = 0.8 7 | TOTAL_DATASET_PATH = "/home/jiangmingchao/Gan_tensorflow/flower_photos/" 8 | TRAIN_DATA_PATH = "/home/jiangmingchao/Gan_tensorflow/flower_dataset/train_dataset/" 9 | VALIDATION_DATA_PATH = "/home/jiangmingchao/Gan_tensorflow/flower_dataset/validation_dataset/" 10 | 11 | 12 | def evaluate_images_nums(images_path): 13 | total_images_counts = 0 14 | image_format = ['jpg', 'png'] 15 | for image_folder in os.listdir(images_path): 16 | if len(image_folder.split('.')) == 1: 17 | folder_images_count = 0 18 | for images in os.listdir(images_path + image_folder): 19 | if images.split('.')[-1] == image_format[0] or images.split('.')[-1] == image_format[1]: 20 | folder_images_count += 1 21 | print('{} images count :{}'.format(image_folder, folder_images_count)) 22 | total_images_counts += folder_images_count 23 | print("total images count : {}".format(total_images_counts)) 24 | 25 | 26 | def make_dirs(path): 27 | if os.path.exists(path): 28 | pass 29 | else: 30 | os.mkdir(path) 31 | 32 | 33 | def split_train_validation_images(path, train_path, validation_path): 34 | if not os.path.exists(train_path): 35 | os.mkdir(train_path) 36 | if not os.path.exists(validation_path): 37 | os.mkdir(validation_path) 38 | 39 | for image_folder in os.listdir(path): 40 | # train_path 41 | make_dirs(train_path + image_folder) 42 | # validation_path 43 | make_dirs(validation_path + image_folder) 44 | print('process folder: ', image_folder) 45 | move_number_count = 1 46 | for images in tqdm(os.listdir(path + image_folder)): 47 | # split with the rate = 0.8, than move 1 images to validation after 4 images 48 | step = int(1 / (1 - TRAIN_VALIDATION_RATE)) 49 | if move_number_count % step == 0: 50 | shutil.copy(path + image_foler + '/' + images, 51 | validation_path + image_foler + '/' + images) 52 | move_number_count += 1 53 | print("move images have been done") 54 | 55 | 56 | def move_remain_train_images(path, train_path): 57 | for image_folder in os.listdir(path): 58 | print('process folder: ', image_folder) 59 | for images in tqdm(os.listdir(path + image_folder)): 60 | shutil.copy(path + image_folder + '/' + images, 61 | train_path + image_folder + '/' + images) 62 | print("move images have been done") 63 | 64 | 65 | if __name__ == "__main__": 66 | evaluate_images_nums(TRAIN_DATA_PATH) 67 | evaluate_images_nums(VALIDATION_DATA_PATH) 68 | 69 | 70 | -------------------------------------------------------------------------------- /padding_op.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import cv2 4 | 5 | 6 | # padding operate 7 | def padding_op(x, is_padding, mode, padding): 8 | """ 9 | inputs: 10 | x : input_array 11 | is_padding: True return array with padding mode and padding value 12 | mode: "SAME" -> padding = [value, value, value, value] 13 | "ROW" -> padding = [value, 0, 0, value] 14 | "COLUMNS" -> padding = [0, value, value, 0] 15 | "SINGLE_ZEROS" -> padding = [0, value, value, value] 16 | "SINGLE_PADDING" -> padding = [0, 0, 0, value] 17 | outputs: 18 | y: output_array 19 | """ 20 | # padding 21 | if is_padding: 22 | # up, left, down, right 23 | shape = x.shape 24 | y = np.zeros((shape[0] + padding[0] + padding[3], shape[1] + padding[1] + padding[2])) 25 | # top, left, right, down all padding zeros 26 | if mode == "SAME" or mode == "same": 27 | if not np.all(padding) >= 1: 28 | raise ValueError("mode and padding is not suitable, change both with them") 29 | y[padding[0]:-padding[3], padding[1]:-padding[2]] = x 30 | return y 31 | # top and down 32 | elif mode == "row" or mode == "ROW": 33 | if padding[0] and padding[3] == 0: 34 | raise ValueError("row mode need padding=[value, 0, 0, value]") 35 | y[padding[0]:-padding[3], :] = x 36 | return y 37 | # left and right 38 | elif mode == "columns" or mode == "COLUMNS": 39 | if padding[1] and padding[2] == 0: 40 | raise ValueError("columns mode need padding=[0, value, value, 0]") 41 | y[:, padding[1]:-padding[2]] = x 42 | return y 43 | # single zeros padding 44 | elif mode == "SINGLE_ZEROS" or mode == "single_zeros": 45 | # top 46 | if padding[0] == 0: 47 | y[:-padding[3], padding[1]:-padding[2]] = x 48 | return y 49 | # down 50 | elif padding[3] == 0: 51 | y[padding[1]:, padding[1]:-padding[2]] = x 52 | return y 53 | # left 54 | elif padding[1] == 0: 55 | y[padding[0]:-padding[3], :-padding[2]] = x 56 | return y 57 | # right 58 | elif padding[2] == 0: 59 | y[padding[0]:-padding[3], padding[1]:] = x 60 | return y 61 | # single value padding 62 | else: 63 | if padding[0] > 0: 64 | y[padding[0]:, :] = x 65 | return y 66 | elif padding[1] > 0: 67 | y[:, padding[1]:] = x 68 | return y 69 | elif padding[2] > 0: 70 | y[:, :-padding[2]] = x 71 | return y 72 | else: 73 | y[:-padding[3], :] = x 74 | return y 75 | # no padding 76 | else: 77 | return x 78 | -------------------------------------------------------------------------------- /convolution_op.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import cv2 4 | from PIL import Image 5 | from calculate_convolution.padding_op import padding_op 6 | from functools import reduce 7 | 8 | img = cv2.imread("1.jpg") 9 | img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 10 | 11 | # image = np.ones((5, 5)) 12 | image = np.array([[1, 1, 1, 0, 0], 13 | [0, 1, 1, 1, 0], 14 | [0, 0, 1, 1, 1], 15 | [0, 0, 1, 1, 0], 16 | [0, 1, 1, 0, 0]]) 17 | kernel = np.array([[-1, -1, -1], 18 | [-1, 8, -1], 19 | [-1, -1, -1]]) 20 | 21 | kernel_size = kernel.shape[0] 22 | strides = 2 23 | not_padding = True 24 | shape = image.shape 25 | 26 | 27 | # calculate patch * kernel instead of the real convolution operate 28 | def patch_calculate_sum(patch, kernel): 29 | return np.sum(np.multiply(patch, kernel)) 30 | 31 | 32 | # get patch of image 33 | def get_patch_array(image, center_points, kernel_size): 34 | center_size = int(kernel_size / 2) 35 | x, y = center_points 36 | min_points = [x - center_size, y - center_size] 37 | max_points = [x + center_size, y + center_size] 38 | patch = image[min_points[0]: max_points[0] + 1, min_points[1]: max_points[1] + 1] 39 | return patch 40 | 41 | 42 | # new image size with nopadding 43 | def generate_new_size_output(image_shape, kernel_size, strides): 44 | center_slide_width = int(np.ceil((image_shape[0] - kernel_size + 1) / strides)) 45 | center_slide_height = int(np.ceil((image_shape[1] - kernel_size + 1) / strides)) 46 | 47 | output_array = np.zeros((center_slide_width, center_slide_height)) 48 | return output_array 49 | 50 | 51 | def not_padding_no_strides(image, kernel, strides): 52 | output_array = generate_new_size_output(image.shape, kernel.shape[0], strides) 53 | for i in range(output_array.shape[0]): 54 | for j in range(output_array.shape[1]): 55 | center_points = (i + int(kernel.shape[0] / 2) + int(strides / 2), 56 | j + (int(kernel.shape[0] / 2) + int(strides / 2))) 57 | patch = get_patch_array(image, center_points, kernel.shape[0]) 58 | convolution_value = patch_calculate_sum(patch, kernel) 59 | if convolution_value >= 255: 60 | convolution_value = 255 61 | elif convolution_value <= 0: 62 | convolution_value = 0 63 | output_array[i][j] = convolution_value 64 | return output_array 65 | 66 | 67 | # TODO: WRONG 68 | def no_padding_strides(image, kernel, strides): 69 | output_array = generate_new_size_output(image.shape, kernel.shape[0], strides) 70 | for i in range(shape[0]): 71 | if i % strides == 0 and i < shape[0] - 1: 72 | for j in range(shape[0]): 73 | if j % strides == 0 and j < shape[0] - 1: 74 | center_points = (i + int(kernel.shape[0] / 2), j + (int(kernel.shape[0] / 2))) 75 | patch = get_patch_array(image, center_points, kernel.shape[0]) 76 | convolution_value = patch_calculate_sum(patch, kernel) 77 | if convolution_value >= 255: 78 | convolution_value = 255 79 | elif convolution_value <= 0: 80 | convolution_value = 0 81 | a = int(i / strides) 82 | b = int(j / strides) 83 | output_array[a][b] = convolution_value 84 | return output_array 85 | 86 | 87 | # TODO: this is wrong 88 | def padding_no_strides(image, kernel_size, strides, is_padding): 89 | image = padding_op(image, is_padding, "same", [1, 1, 1, 1]) 90 | padding_array = not_padding_no_strides(image, kernel_size, strides) 91 | return padding_array 92 | 93 | 94 | def padding_strides(image, kernel, is_padding, padding, strides): 95 | image = padding_op(image, is_padding, "same", padding) 96 | padding_strides_array = not_padding_no_strides(image, kernel, strides) 97 | return padding_strides_array 98 | 99 | 100 | def generate_conv_weights(kernel_size, input_channels, output_channels): 101 | kernel_weights = np.random.randn(kernel_size, kernel_size, input_channels, output_channels) 102 | return kernel_weights 103 | 104 | 105 | def conv3d_for_inputchannel(image, weights_shape, kernel_weights, is_padding, padding, strides): 106 | input_channels = weights_shape[2] 107 | # input_channels is same as image.shape[2] 108 | output_image_filter = [] 109 | for i in range(input_channels): 110 | kernel_2d = kernel_weights[:, :, i] 111 | padding_strides_filter = padding_strides(image[:, :, i], kernel_2d, is_padding, padding, strides=strides) 112 | output_image_filter.append(padding_strides_filter) 113 | return output_image_filter 114 | 115 | 116 | def conv2d(image, weights_shape, is_padding, padding, strides): 117 | kernel_size = weights_shape[0] 118 | input_channels = weights_shape[2] 119 | output_channels = weights_shape[3] 120 | 121 | kernel_weights = generate_conv_weights(kernel_size, input_channels, output_channels) 122 | 123 | assert image.shape[2] == input_channels 124 | image_filter = [] 125 | for i in range(output_channels): 126 | image_feature = conv3d_for_inputchannel(image, weights_shape, kernel_weights[:, :, :, i], 127 | is_padding, padding, strides) 128 | sum_image_feature = reduce(lambda x, y: x+y, image_feature) 129 | print(type(sum_image_feature), sum_image_feature.shape) 130 | shape = sum_image_feature.shape 131 | sum_image_feature = np.expand_dims(sum_image_feature, 2).reshape(shape[0], shape[1], 1) 132 | image_filter.append(sum_image_feature) 133 | output_featuremap = np.concatenate(image_filter, axis=2) 134 | return output_featuremap 135 | 136 | 137 | # image = cv2.imread("1.jpg") 138 | # image = cv2.resize(image, (360, 360)) 139 | # 140 | # out_feature = conv2d(image, [3, 3, 3, 10], False, padding=[1, 1, 1, 1], strides=1) 141 | # 142 | # for i in range(4): 143 | # plt.figure(i) 144 | # plt.imshow(out_feature[:, :, i]) 145 | # plt.show() 146 | 147 | 148 | output_array = not_padding_no_strides(image, kernel, 1) 149 | # output_array2 = no_padding_strides(img_gray, kernel_size, 2) 150 | # output_image_array = not_padding_no_strides(img_gray, kernel_size, 1) 151 | # output_image_array_strides = no_padding_strides(img_gray, kernel_size, 2) 152 | # output_padding_no_strides = padding_no_strides(img_gray, kernel_size, 1) 153 | # output_padding_strides = padding_strides(img_gray, kernel_size, 2) 154 | # 155 | print(output_array) 156 | # print(output_array2) 157 | # 158 | # print(output_image_array.shape) 159 | # plt.subplot(221) 160 | # plt.imshow(output_image_array) 161 | # plt.title("no padding no strides, shape is {}".format(output_image_array.shape)) 162 | # 163 | # img_gray = np.expand_dims(img_gray, 2) 164 | # output_image_array2 = cv2.filter2D(img_gray, -1, kernel) 165 | # plt.subplot(222) 166 | # plt.imshow(output_image_array2) 167 | # plt.title("cv2. filter2D, shape is {}".format(output_image_array2.shape)) 168 | # 169 | # print(output_padding_no_strides.shape) 170 | # print(np.max(output_padding_no_strides)) 171 | # plt.subplot(223) 172 | # plt.imshow(output_padding_no_strides) 173 | # plt.title("padding no strides, shape is {}".format(output_padding_no_strides.shape)) 174 | # 175 | # print(output_padding_strides.shape) 176 | # print(np.max(output_padding_strides)) 177 | # plt.subplot(224) 178 | # plt.imshow(output_array2) 179 | # plt.title("no padding and strides, shape is {}".format(output_array2.shape)) 180 | # plt.show() 181 | 182 | 183 | -------------------------------------------------------------------------------- /cnn_Visualization.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import codecs 3 | import os 4 | from keras.preprocessing.image import ImageDataGenerator 5 | from keras.layers import BatchNormalization, Dropout 6 | from keras.layers import Conv2D, MaxPooling2D, Flatten, Input, Dense 7 | from keras.models import Model 8 | from keras.utils import multi_gpu_model 9 | from keras.optimizers import Adam 10 | from keras.applications.inception_v3 import InceptionV3, decode_predictions 11 | from keras.callbacks import LearningRateScheduler, ModelCheckpoint 12 | import json 13 | import matplotlib.pyplot as plt 14 | import keras.backend as K 15 | from PIL import Image 16 | import numpy as np 17 | import h5py 18 | import time 19 | import cv2 20 | 21 | train_path = '/home/jiangmingchao/Gan_tensorflow/flower_dataset/train_dataset/' 22 | validation_path = '/home/jiangmingchao/Gan_tensorflow/flower_dataset/validation_dataset/' 23 | model_path = '/home/jiangmingchao/Gan_tensorflow/flower_tf/model/' 24 | if os.path.exists(model_path): 25 | print('model save path is : %s' % model_path) 26 | else: 27 | os.mkdir(model_path) 28 | learning_rate_base = 0.01 29 | epochs = 200 30 | model_save_name = 'flower_inception.h5' 31 | 32 | 33 | def staris_decay(epoch): 34 | if epoch < 100: 35 | return learning_rate_base 36 | elif epoch < 150: 37 | return 0.001 38 | elif epoch < 200: 39 | return 0.0005 40 | 41 | def poly_decay(epoch): 42 | maxEpochs = epochs 43 | baselr = learning_rate_base 44 | power = 1.0 45 | 46 | alpha = baselr * (1 - (epoch / float(maxEpochs))) * power 47 | return alpha 48 | 49 | 50 | learning_shedule = LearningRateScheduler(staris_decay) 51 | modelckpt = ModelCheckpoint(model_path + model_save_name, 'val_loss', save_best_only=True) 52 | 53 | train_datagen = ImageDataGenerator( 54 | rescale=1./255, 55 | zoom_range=0.2, 56 | rotation_range=5, 57 | horizontal_flip=True 58 | ) 59 | validation_datagen = ImageDataGenerator( 60 | rescale=1./255 61 | ) 62 | 63 | train_generator = train_datagen.flow_from_directory( 64 | train_path, 65 | target_size=(224, 224), 66 | batch_size=32, 67 | class_mode='categorical' 68 | ) 69 | validation_generator = validation_datagen.flow_from_directory( 70 | validation_path, 71 | target_size=(224, 224), 72 | batch_size=32, 73 | class_mode='categorical' 74 | ) 75 | 76 | 77 | def keras_inference(input_shape): 78 | input_layers = Input(input_shape) 79 | conv1 = Conv2D(64, (3, 3), activation='relu')(input_layers) 80 | conv2 = Conv2D(64, (3, 3), activation='relu')(conv1) 81 | pool1 = MaxPooling2D((2, 2), (2, 2))(conv2) 82 | conv3 = Conv2D(128, (3, 3), activation='relu')(pool1) 83 | conv4 = Conv2D(128, (3, 3), activation='relu')(conv3) 84 | pool2 = MaxPooling2D((2, 2), (2, 2))(conv4) 85 | conv5 = Conv2D(256, (3, 3), activation='relu')(pool2) 86 | conv6 = Conv2D(256, (3, 3), activation='relu')(conv5) 87 | conv7 = Conv2D(256, (3, 3), activation='relu')(conv6) 88 | pool3 = MaxPooling2D((2, 2), (2, 2))(conv7) 89 | conv8 = Conv2D(512, (3, 3), activation='relu')(pool3) 90 | conv9 = Conv2D(512, (3, 3), activation='relu')(conv8) 91 | conv10 = Conv2D(512, (3, 3), activation='relu')(conv9) 92 | 93 | flatten = Flatten()(conv10) 94 | dense1 = Dense(512, activation='relu')(flatten) 95 | dense2 = Dense(256, activation='relu')(dense1) 96 | output = Dense(5)(dense2) 97 | vgg_model = Model(input_layers, output) 98 | return vgg_model 99 | 100 | 101 | def inception_v3_model(input_shape): 102 | model = InceptionV3(include_top=True, 103 | weights=None, 104 | input_shape=input_shape, 105 | classes=5) 106 | return model 107 | 108 | 109 | def get_loss_fig(epochs): 110 | data = [] 111 | with codecs.open("history_logs.json", "r", "utf-8") as f: 112 | for line in f: 113 | dic = json.loads(line) 114 | data.append(dic) 115 | logs = data[0] 116 | loss = logs['loss'] 117 | val_loss = logs['val_loss'] 118 | x = [i+1 for i in range(epochs)] 119 | plt.figure(0) 120 | plt.plot(x, loss, 'r-', label='loss') 121 | plt.plot(x, val_loss, 'y-', label='val_loss') 122 | plt.legend(loc='best') 123 | plt.show() 124 | plt.savefig('flower_loss.png') 125 | 126 | def proprecess_image(img_path): 127 | image = Image.open(img_path) 128 | image = image.resize((224, 224)) 129 | image = np.array(image) 130 | image = image / 255.0 131 | image = np.expand_dims(image, 0) 132 | return image 133 | 134 | def get_model(model, output_layers): 135 | # orginal model inputs as input and layers output as outputs for new model 136 | show_model = Model(inputs=model.inputs, outputs=output_layers.output) 137 | return show_model 138 | 139 | 140 | def get_model_by_layers(model, layer_id, weights_path): 141 | layers_dict = {} 142 | for index, layer in enumerate(model.layers): 143 | layers_dict[index] = layer.name 144 | 145 | def generate_model(model, layer_id, weights_path): 146 | func_layer = model.get_layer(name=layers_dict[layer_id], index=layer_id) 147 | new_model = get_model(model, func_layer) 148 | new_model.load_weights(weights_path, by_name=True) 149 | return new_model 150 | 151 | # get conv layers 152 | if layers_dict[layer_id].split('_')[0] == "conv2d": 153 | conv_model = generate_model(model, layer_id, weights_path) 154 | return conv_model 155 | elif layers_dict[layer_id].split('_')[0] == "max_pooling2d": 156 | pool_model = generate_model(model, layer_id, weights_path) 157 | return pool_model 158 | elif layers_dict[layer_id].split('_')[0] == "activation": 159 | activation_model = generate_model(mdoel, layer_id, weights_path) 160 | return activation_model 161 | 162 | 163 | # show conv result not kernel result 164 | def visualize_model_output(model, image_path, layer_id, weights_path, num_filter=8): 165 | image = proprecess_image(image_path) 166 | output_model = get_model_by_layers(model, layer_id, weights_path) 167 | print("======output model summary ======") 168 | print(output_model.summary()) 169 | result = output_model.predict(image) 170 | 171 | for i in range(num_filter): 172 | plt.subplot(2, 4, i+1) 173 | plt.imshow(result[0, :, :, i]) 174 | plt.title(layer_id) 175 | plt.show() 176 | 177 | # conv kernel output 178 | def visulaize_kernel_output(model, image_path, layer_id, weights_path): 179 | image = proprecess_image(image_path) 180 | img_shape = image.shape 181 | layers_dict = {} 182 | for index, layer in enumerate(model.layers): 183 | layers_dict[index] = layer.name 184 | 185 | def deprocess_image(x): 186 | x -= x.mean() 187 | x /= (x.std() + K.epsilon()) 188 | x *= 0.1 189 | 190 | x += 0.5 191 | x = np.clip(x, 0, 1) 192 | 193 | x *= 255 194 | if K.image_data_format() == 'channels_first': 195 | x = x.transpose((1, 2, 0)) 196 | x = np.clip(x, 0, 255).astype('uint8') 197 | return x 198 | 199 | def normalize(x): 200 | return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon()) 201 | 202 | conv_model = get_model_by_layers(model, layer_id, weights_path) 203 | print(conv_model.summary()) 204 | 205 | input_img = conv_model.input 206 | kept_filters = [] 207 | for filter_index in range(32): 208 | print('Processing filter %d'%filter_index) 209 | start_time = time.time() 210 | 211 | layer_output = conv_model.get_layer(name=layers_dict[layer_id]).output 212 | if K.image_data_format() == 'channel_first': 213 | loss = K.mean(layer_output[:, filter_index, :, :]) 214 | else: 215 | loss = K.mean(layer_output[:, :, :, filter_index]) 216 | 217 | grads = K.gradients(loss, input_img)[0] 218 | 219 | grads = normalize(grads) 220 | 221 | iterate = K.function([input_img], [loss, grads]) 222 | 223 | step = 1 224 | 225 | if K.image_data_format() == 'channels_first': 226 | input_img_data = np.random.random((img_shape[0], img_shape[3], img_shape[1], img_shape[2])) 227 | else: 228 | input_img_data = np.random.random((img_shape[0], img_shape[1], img_shape[2], img_shape[3])) 229 | input_img_data = (input_img_data - 0.5) * 20 + 128 230 | 231 | for i in range(20): 232 | loss_value, grads_value = iterate([input_img_data]) 233 | input_img_data += grads_value * step 234 | 235 | print('current loss value:', loss_value) 236 | if loss_value<=0.: 237 | break 238 | 239 | if loss_value > 0: 240 | img = deprocess_image(input_img_data[0]) 241 | kept_filters.append((img, loss_value)) 242 | end_time = time.time() 243 | print('Filter %d processed in %ds' %(filter_index, end_time - start_time)) 244 | 245 | print('filters number: ', len(kept_filters)) 246 | n = 3 247 | 248 | kept_filters.sort(key=lambda x: x[1], reverse=True) 249 | kept_filters = kept_filters[: n*n] 250 | 251 | margin = 5 252 | 253 | width = n * image.shape[1] + (n - 1) * margin 254 | height = n * image.shape[2] + (n - 1) * margin 255 | 256 | stritched_filters = np.zeros((width, height, 3)) 257 | 258 | for i in range(n): 259 | for j in range(n): 260 | img, loss = kept_filters[i *n + j] 261 | stritched_filters[(image.shape[1] + margin) *i : (image.shape[1] + margin) * i + image.shape[1], 262 | (image.shape[2] + margin) * j: (image.shape[2] + margin) * j + image.shape[2], :] = img 263 | plt.imshow(stritched_filters) 264 | plt.show() 265 | 266 | 267 | # visulaize heat map on image 268 | def visualize_heat_map_on_image(model, image_path): 269 | image = proprecess_image(image_path) 270 | preds = model.predict(image) 271 | class_idx = np.argmax(preds[0]) 272 | print(model.output) 273 | class_output = model.output[:, class_idx] 274 | # get the conv feature from last convolution 275 | last_conv_layer = model.get_layer(name='conv2d_188') 276 | 277 | # calculate grads 278 | grads = K.gradients(class_output, last_conv_layer.output)[0] 279 | pooled_grads = K.mean(grads, axis=[0, 1, 2]) 280 | iterate = K.function([model.input], [pooled_grads, last_conv_layer.output[0]]) 281 | # pool grad 282 | pooled_grads_value, conv_layer_output_value = iterate([image]) 283 | # for i in range() 284 | for i in range(192): 285 | conv_layer_output_value[: ,:, i] *= pooled_grads_value[i] 286 | 287 | heatmap = np.mean(conv_layer_output_value, axis=-1) 288 | heatmap = np.maximum(heatmap, 0) 289 | heatmap /= np.max(heatmap) 290 | 291 | original_image = cv2.imread(image_path) 292 | # original_image = cv2.resize(original_image, (224, 224)) 293 | heatmap_image = cv2.resize(heatmap, (original_image.shape[1], original_image.shape[0])) 294 | heatmap_image = np.uint8(255 * heatmap_image) 295 | 296 | heatmap_image = cv2.applyColorMap(heatmap_image, cv2.COLORMAP_HSV) 297 | print(heatmap_image.shape) 298 | print(original_image.shape) 299 | 300 | superimposed_img = cv2.addWeighted(original_image, 0.6, heatmap_image, 0.4, 0) 301 | plt.imshow(superimposed_img) 302 | plt.show() 303 | 304 | 305 | print("=======model summary=======") 306 | with tf.device('/cpu:0'): 307 | inception_model = inception_v3_model((224, 224, 3)) 308 | print(inception_model.summary()) 309 | 310 | parallel_model = multi_gpu_model(inception_model, gpus=2) 311 | 312 | training=False 313 | 314 | if training: 315 | print("=======model.fit===========") 316 | parallel_model.compile(loss='categorical_crossentropy', 317 | optimizer=Adam(0.01), 318 | metrics=['accuracy']) 319 | history_logs = parallel_model.fit_generator( 320 | train_generator, 321 | steps_per_epoch=120, 322 | epochs=epochs, 323 | validation_data=validation_generator, 324 | callbacks=[learning_shedule] 325 | ) 326 | with codecs.open('history_logs.json', 'w', 'utf-8') as outfile: 327 | json.dump(history_logs.history, outfile, ensure_ascii=False) 328 | outfile.write('\n') 329 | get_loss_fig(epochs) 330 | 331 | print("==========save model =========") 332 | inception_model.save('flower_inceptionv3.h5') 333 | 334 | # predict 335 | else: 336 | print("==========load model =========") 337 | inception_model = InceptionV3(include_top=True, weights=None, input_shape=(224, 224, 3), classes=5) 338 | 339 | # layers name 340 | for i, layer in enumerate(inception_model.layers): 341 | print(i, layer.name) 342 | 343 | # print(inception_model.input_layers) 344 | image_path = '/home/jiangmingchao/Gan_tensorflow/flower_dataset/train_dataset/daisy/105806915_a9c13e2106_n.jpg' 345 | weights_path = 'flower_inceptionv3.h5' 346 | conv_layers_id = [155, 187, 232, 290] 347 | # for layer_id in conv_layers_id: 348 | # visualize_model_output(inception_model, image_path, layer_id, weights_path, num_filter=8) 349 | # for layer_id in conv_layers_id: 350 | # visulaize_kernel_output(inception_model, image_path, layer_id, weights_path) 351 | 352 | inception_model.load_weights('flower_inceptionv3.h5') 353 | print(inception_model.output) 354 | 355 | # grad cmp 356 | visualize_heat_map_on_image(inception_model, image_path) 357 | 358 | 359 | 360 | 361 | --------------------------------------------------------------------------------