├── .gitattributes ├── data └── pubfig │ └── wm │ ├── tarlab.npy │ └── truelab.npy ├── keras_vggface ├── __init__.py ├── __pycache__ │ ├── utils.cpython-37.pyc │ ├── __init__.cpython-37.pyc │ ├── models.cpython-37.pyc │ ├── version.cpython-37.pyc │ └── vggface.cpython-37.pyc ├── version.py ├── utils.py ├── vggface.py └── models.py ├── readme.md ├── LICENSE └── Code_demo(PubFig_WM).ipynb /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /data/pubfig/wm/tarlab.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YiZeng623/DeepSweep/HEAD/data/pubfig/wm/tarlab.npy -------------------------------------------------------------------------------- /data/pubfig/wm/truelab.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YiZeng623/DeepSweep/HEAD/data/pubfig/wm/truelab.npy -------------------------------------------------------------------------------- /keras_vggface/__init__.py: -------------------------------------------------------------------------------- 1 | from keras_vggface.vggface import VGGFace 2 | from keras_vggface.version import __version__ -------------------------------------------------------------------------------- /keras_vggface/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YiZeng623/DeepSweep/HEAD/keras_vggface/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /keras_vggface/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YiZeng623/DeepSweep/HEAD/keras_vggface/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /keras_vggface/__pycache__/models.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YiZeng623/DeepSweep/HEAD/keras_vggface/__pycache__/models.cpython-37.pyc -------------------------------------------------------------------------------- /keras_vggface/__pycache__/version.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YiZeng623/DeepSweep/HEAD/keras_vggface/__pycache__/version.cpython-37.pyc -------------------------------------------------------------------------------- /keras_vggface/__pycache__/vggface.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YiZeng623/DeepSweep/HEAD/keras_vggface/__pycache__/vggface.cpython-37.pyc -------------------------------------------------------------------------------- /keras_vggface/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.6' 2 | 3 | def pretty_versions(): 4 | import keras 5 | import tensorflow as tf 6 | k_version = keras.__version__ 7 | t_version = tf.__version__ 8 | return "keras-vggface : {}, keras : {} , tensorflow : {} ".format(__version__,k_version,t_version) -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | Set up: 2 | Please first download and unpack the DeepSweep fine-tuned model and the original infected model following this [link](https://drive.google.com/file/d/1CF9HnGrRdSTcshAD1Q15Y69jru4pk1tq/view?usp=sharing) (unpack under the main folder). 3 | Please download and unpack the clean dataset of the PubFig following this [link](https://drive.google.com/file/d/1AXLS_X5NsNBxU7oetp-e2E7WFm0V3Cwd/view?usp=sharing) (unpack under the 'data' folder). 4 | Please download and unpack the patched samples of the PubFig following this [link](https://drive.google.com/file/d/1wr4e3hIKmYeiNDC6bjQt-z1CUG8JnHao/view?usp=sharing) (unpack under the 'data/pubfig/wm' folder). 5 | 6 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Yi Zeng 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /keras_vggface/utils.py: -------------------------------------------------------------------------------- 1 | '''VGGFace models for Keras. 2 | 3 | # Notes: 4 | - Utility functions are modified versions of Keras functions [Keras](https://keras.io) 5 | 6 | ''' 7 | 8 | 9 | 10 | import numpy as np 11 | from keras import backend as K 12 | from keras.utils.data_utils import get_file 13 | 14 | V1_LABELS_PATH = 'https://github.com/rcmalli/keras-vggface/releases/download/v2.0/rcmalli_vggface_labels_v1.npy' 15 | V2_LABELS_PATH = 'https://github.com/rcmalli/keras-vggface/releases/download/v2.0/rcmalli_vggface_labels_v2.npy' 16 | 17 | VGG16_WEIGHTS_PATH = 'https://github.com/rcmalli/keras-vggface/releases/download/v2.0/rcmalli_vggface_tf_vgg16.h5' 18 | VGG16_WEIGHTS_PATH_NO_TOP = 'https://github.com/rcmalli/keras-vggface/releases/download/v2.0/rcmalli_vggface_tf_notop_vgg16.h5' 19 | 20 | 21 | RESNET50_WEIGHTS_PATH = 'https://github.com/rcmalli/keras-vggface/releases/download/v2.0/rcmalli_vggface_tf_resnet50.h5' 22 | RESNET50_WEIGHTS_PATH_NO_TOP = 'https://github.com/rcmalli/keras-vggface/releases/download/v2.0/rcmalli_vggface_tf_notop_resnet50.h5' 23 | 24 | SENET50_WEIGHTS_PATH = 'https://github.com/rcmalli/keras-vggface/releases/download/v2.0/rcmalli_vggface_tf_senet50.h5' 25 | SENET50_WEIGHTS_PATH_NO_TOP = 'https://github.com/rcmalli/keras-vggface/releases/download/v2.0/rcmalli_vggface_tf_notop_senet50.h5' 26 | 27 | 28 | VGGFACE_DIR = 'models/vggface' 29 | 30 | 31 | def preprocess_input(x, data_format=None, version=1): 32 | x_temp = np.copy(x) 33 | if data_format is None: 34 | data_format = K.image_data_format() 35 | assert data_format in {'channels_last', 'channels_first'} 36 | 37 | if version == 1: 38 | if data_format == 'channels_first': 39 | x_temp = x_temp[:, ::-1, ...] 40 | x_temp[:, 0, :, :] -= 93.5940 41 | x_temp[:, 1, :, :] -= 104.7624 42 | x_temp[:, 2, :, :] -= 129.1863 43 | else: 44 | x_temp = x_temp[..., ::-1] 45 | x_temp[..., 0] -= 93.5940 46 | x_temp[..., 1] -= 104.7624 47 | x_temp[..., 2] -= 129.1863 48 | 49 | elif version == 2: 50 | if data_format == 'channels_first': 51 | x_temp = x_temp[:, ::-1, ...] 52 | x_temp[:, 0, :, :] -= 91.4953 53 | x_temp[:, 1, :, :] -= 103.8827 54 | x_temp[:, 2, :, :] -= 131.0912 55 | else: 56 | x_temp = x_temp[..., ::-1] 57 | x_temp[..., 0] -= 91.4953 58 | x_temp[..., 1] -= 103.8827 59 | x_temp[..., 2] -= 131.0912 60 | else: 61 | raise NotImplementedError 62 | 63 | return x_temp 64 | 65 | 66 | def decode_predictions(preds, top=5): 67 | LABELS = None 68 | if len(preds.shape) == 2: 69 | if preds.shape[1] == 2622: 70 | fpath = get_file('rcmalli_vggface_labels_v1.npy', 71 | V1_LABELS_PATH, 72 | cache_subdir=VGGFACE_DIR) 73 | LABELS = np.load(fpath) 74 | elif preds.shape[1] == 8631: 75 | fpath = get_file('rcmalli_vggface_labels_v2.npy', 76 | V2_LABELS_PATH, 77 | cache_subdir=VGGFACE_DIR) 78 | LABELS = np.load(fpath) 79 | else: 80 | raise ValueError('`decode_predictions` expects ' 81 | 'a batch of predictions ' 82 | '(i.e. a 2D array of shape (samples, 2622)) for V1 or ' 83 | '(samples, 8631) for V2.' 84 | 'Found array with shape: ' + str(preds.shape)) 85 | else: 86 | raise ValueError('`decode_predictions` expects ' 87 | 'a batch of predictions ' 88 | '(i.e. a 2D array of shape (samples, 2622)) for V1 or ' 89 | '(samples, 8631) for V2.' 90 | 'Found array with shape: ' + str(preds.shape)) 91 | results = [] 92 | for pred in preds: 93 | top_indices = pred.argsort()[-top:][::-1] 94 | result = [[str(LABELS[i].encode('utf8')), pred[i]] for i in top_indices] 95 | result.sort(key=lambda x: x[1], reverse=True) 96 | results.append(result) 97 | return results 98 | -------------------------------------------------------------------------------- /keras_vggface/vggface.py: -------------------------------------------------------------------------------- 1 | '''VGGFace models for Keras. 2 | 3 | # Reference: 4 | - [Deep Face Recognition](http://www.robots.ox.ac.uk/~vgg/publications/2015/Parkhi15/parkhi15.pdf) 5 | - [VGGFace2: A dataset for recognising faces across pose and age](http://www.robots.ox.ac.uk/~vgg/data/vgg_face2/vggface2.pdf) 6 | 7 | ''' 8 | from __future__ import print_function 9 | from keras_vggface.models import RESNET50, VGG16, SENET50 10 | 11 | 12 | def VGGFace(include_top=True, model='vgg16', weights='vggface', 13 | input_tensor=None, input_shape=None, 14 | pooling=None, 15 | classes=None): 16 | """Instantiates the VGGFace architectures. 17 | Optionally loads weights pre-trained 18 | on VGGFace datasets. Note that when using TensorFlow, 19 | for best performance you should set 20 | `image_data_format="channels_last"` in your Keras config 21 | at ~/.keras/keras.json. 22 | The model and the weights are compatible with both 23 | TensorFlow and Theano. The data format 24 | convention used by the model is the one 25 | specified in your Keras config file. 26 | # Arguments 27 | include_top: whether to include the 3 fully-connected 28 | layers at the top of the network. 29 | weights: one of `None` (random initialization) 30 | or "vggface" (pre-training on VGGFACE datasets). 31 | input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) 32 | to use as image input for the model. 33 | model: selects the one of the available architectures 34 | vgg16, resnet50 or senet50 default is vgg16. 35 | input_shape: optional shape tuple, only to be specified 36 | if `include_top` is False (otherwise the input shape 37 | has to be `(224, 224, 3)` (with `channels_last` data format) 38 | or `(3, 224, 244)` (with `channels_first` data format). 39 | It should have exactly 3 inputs channels, 40 | and width and height should be no smaller than 48. 41 | E.g. `(200, 200, 3)` would be one valid value. 42 | pooling: Optional pooling mode for feature extraction 43 | when `include_top` is `False`. 44 | - `None` means that the output of the model will be 45 | the 4D tensor output of the 46 | last convolutional layer. 47 | - `avg` means that global average pooling 48 | will be applied to the output of the 49 | last convolutional layer, and thus 50 | the output of the model will be a 2D tensor. 51 | - `max` means that global max pooling will 52 | be applied. 53 | classes: optional number of classes to classify images 54 | into, only to be specified if `include_top` is True, and 55 | if no `weights` argument is specified. 56 | # Returns 57 | A Keras model instance. 58 | # Raises 59 | ValueError: in case of invalid argument for `weights`, 60 | or invalid input shape. 61 | """ 62 | 63 | if weights not in {'vggface', None}: 64 | raise ValueError('The `weights` argument should be either ' 65 | '`None` (random initialization) or `vggface`' 66 | '(pre-training on VGGFace Datasets).') 67 | 68 | if model == 'vgg16': 69 | 70 | if classes is None: 71 | classes = 2622 72 | 73 | if weights == 'vggface' and include_top and classes != 2622: 74 | raise ValueError( 75 | 'If using `weights` as vggface original with `include_top`' 76 | ' as true, `classes` should be 2622') 77 | 78 | return VGG16(include_top=include_top, input_tensor=input_tensor, 79 | input_shape=input_shape, pooling=pooling, 80 | weights=weights, 81 | classes=classes) 82 | 83 | 84 | if model == 'resnet50': 85 | 86 | if classes is None: 87 | classes = 8631 88 | 89 | if weights == 'vggface' and include_top and classes != 8631: 90 | raise ValueError( 91 | 'If using `weights` as vggface original with `include_top`' 92 | ' as true, `classes` should be 8631') 93 | 94 | return RESNET50(include_top=include_top, input_tensor=input_tensor, 95 | input_shape=input_shape, pooling=pooling, 96 | weights=weights, 97 | classes=classes) 98 | 99 | if model == 'senet50': 100 | 101 | if classes is None: 102 | classes = 8631 103 | 104 | if weights == 'vggface' and include_top and classes != 8631: 105 | raise ValueError( 106 | 'If using `weights` as vggface original with `include_top`' 107 | ' as true, `classes` should be 8631') 108 | 109 | return SENET50(include_top=include_top, input_tensor=input_tensor, 110 | input_shape=input_shape, pooling=pooling, 111 | weights=weights, 112 | classes=classes) -------------------------------------------------------------------------------- /Code_demo(PubFig_WM).ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stderr", 10 | "output_type": "stream", 11 | "text": [ 12 | "Using TensorFlow backend.\n" 13 | ] 14 | } 15 | ], 16 | "source": [ 17 | "import numpy as np\n", 18 | "from keras.models import Model\n", 19 | "from keras.layers import Convolution2D, Conv2D, MaxPooling2D, Input, Dense, Activation, Flatten,Dropout\n", 20 | "from keras.models import Sequential\n", 21 | "from keras.optimizers import Adam,Adadelta\n", 22 | "import tensorflow as tf\n", 23 | "config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))\n", 24 | "sess = tf.Session(config=config)\n", 25 | "\n", 26 | "import matplotlib.pyplot as plt\n", 27 | "import imageio\n", 28 | "import copy\n", 29 | "import cv2\n", 30 | "import albumentations\n", 31 | "from skimage.transform import rescale, resize\n", 32 | "import tqdm" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": 2, 38 | "metadata": {}, 39 | "outputs": [], 40 | "source": [ 41 | "ori = np.load('./data/pubfig/wm/origdata.npy')\n", 42 | "data = np.load('./data/pubfig/wm/bkdata.npy')\n", 43 | "true_lab = np.load('./data/pubfig/wm/truelab.npy')\n", 44 | "tar_lab = np.load('./data/pubfig/wm/tarlab.npy')" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": 3, 50 | "metadata": {}, 51 | "outputs": [], 52 | "source": [ 53 | "def fastSAT(img):\n", 54 | " aug = albumentations.ShiftScaleRotate(scale_limit=0.16,shift_limit=0.16,rotate_limit=4,p=1,border_mode=0) #$$$$$$200+ \n", 55 | " augmented = aug(image=(img*255).astype(np.uint8))\n", 56 | " auged = augmented['image']/255\n", 57 | " return auged\n", 58 | "\n", 59 | "def defend_OD(img):\n", 60 | " aug = albumentations.OpticalDistortion(p=1) #$$$$$$200+ \n", 61 | " augmented = aug(image=(img*255).astype(np.uint8))\n", 62 | " auged = augmented['image']/255\n", 63 | " return auged\n", 64 | "\n", 65 | "def defend_GAMMA(img):\n", 66 | " aug = albumentations.RandomGamma(p=1,gamma_limit=(60,60)) #$$$$$$200+ \n", 67 | " augmented = aug(image=(img*255).astype(np.uint8))\n", 68 | " auged = augmented['image']/255\n", 69 | " return auged\n", 70 | "\n", 71 | "def defend_MED(img):\n", 72 | " aug = albumentations.MedianBlur(p=1,blur_limit=(5,5)) #$$$$$$200+ \n", 73 | " augmented = aug(image=(img*255).astype(np.uint8))\n", 74 | " auged = augmented['image']/255\n", 75 | " return auged\n", 76 | "\n", 77 | "def defend_DOWN(img):\n", 78 | " aug = albumentations.Downscale(p=1,scale_min=0.25,scale_max=0.25)#$$$$$$200+ \n", 79 | " augmented = aug(image=(img*255).astype(np.uint8))\n", 80 | " auged = augmented['image']/255\n", 81 | " return auged\n", 82 | "\n", 83 | "def defend_MPN(img):\n", 84 | " aug = albumentations.MultiplicativeNoise(p=1,multiplier=1.53)#$$$$$$200+ \n", 85 | " augmented = aug(image=(img*255).astype(np.uint8))\n", 86 | " auged = augmented['image']/255\n", 87 | " return auged\n", 88 | "\n", 89 | "def defend_GAMMA2(img):\n", 90 | " aug = albumentations.RandomGamma(p=1,gamma_limit=(260,260)) #$$$$$$200+ \n", 91 | " augmented = aug(image=(img*255).astype(np.uint8))\n", 92 | " auged = augmented['image']/255\n", 93 | " return auged\n", 94 | "\n", 95 | "def defend_RAND(img,scalimit=1.3):\n", 96 | " maxvalue = np.int(img.shape[0] * scalimit)\n", 97 | " rnd = np.random.randint(img.shape[0],maxvalue,(1,))[0]\n", 98 | " rescaled = resize(img,(rnd,rnd))\n", 99 | " h_rem = maxvalue - rnd\n", 100 | " w_rem = maxvalue - rnd\n", 101 | " pad_left = np.random.randint(0,w_rem,(1,))[0]\n", 102 | " pad_right = w_rem - pad_left\n", 103 | " pad_top = np.random.randint(0,h_rem,(1,))[0]\n", 104 | " pad_bottom = h_rem - pad_top\n", 105 | " padded = np.pad(rescaled,((pad_top,pad_bottom),(pad_left,pad_right),(0,0)),'constant',constant_values = 0)\n", 106 | " padded = resize(padded,(img.shape[0],img.shape[0]))\n", 107 | " return padded" 108 | ] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": 4, 113 | "metadata": {}, 114 | "outputs": [], 115 | "source": [ 116 | "def defending(imgset):\n", 117 | " outset = np.zeros_like(imgset)\n", 118 | " for i,img in enumerate(imgset):\n", 119 | " defimg = np.copy(img)\n", 120 | "\n", 121 | " defimg = defend_OD(defimg)\n", 122 | " defimg = defend_GAMMA(defimg)\n", 123 | "\n", 124 | " defimg = defend_MED(defimg)\n", 125 | " \n", 126 | " defimg = defend_MPN(defimg)\n", 127 | " defimg = defend_GAMMA2(defimg)\n", 128 | "\n", 129 | " defimg = defend_DOWN(defimg) \n", 130 | " defimg = defend_MED(defimg)\n", 131 | " \n", 132 | " defimg = cv2.resize(defimg,(180,180))\n", 133 | " defimg = defend_MED(defimg)\n", 134 | " defimg = cv2.resize(defimg,(224,224))\n", 135 | " \n", 136 | " defimg = defend_RAND(defimg,scalimit=1.1)\n", 137 | " defimg = fastSAT(defimg)\n", 138 | " \n", 139 | " outset[i] = np.copy(defimg)\n", 140 | " return outset" 141 | ] 142 | }, 143 | { 144 | "cell_type": "code", 145 | "execution_count": null, 146 | "metadata": {}, 147 | "outputs": [], 148 | "source": [] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": 5, 153 | "metadata": {}, 154 | "outputs": [], 155 | "source": [ 156 | "from keras_vggface.vggface import VGGFace\n", 157 | "def vggface_model():\n", 158 | " hidden_dim = 4096\n", 159 | " NUM_CLASSES = 83\n", 160 | " vgg_model = VGGFace(model='vgg16',weights=None,include_top=False, input_shape=(224, 224, 3))\n", 161 | " last_layer = vgg_model.get_layer('pool5').output\n", 162 | " x = Flatten(name='flatten')(last_layer)\n", 163 | " x = Dense(hidden_dim, activation='relu', name='fc6')(x)\n", 164 | " x = Dense(hidden_dim, activation='relu', name='fc7')(x)\n", 165 | " out = Dense(NUM_CLASSES, activation='softmax', name='fc8')(x)\n", 166 | " pubfig_vgg_model = Model(vgg_model.input, out)\n", 167 | " # compiling\n", 168 | " lr_optimizer=Adadelta(lr = 0.03)\n", 169 | " pubfig_vgg_model.compile(loss='categorical_crossentropy', optimizer=lr_optimizer, metrics=['accuracy'])\n", 170 | " \n", 171 | " return pubfig_vgg_model" 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "execution_count": 6, 177 | "metadata": {}, 178 | "outputs": [ 179 | { 180 | "name": "stdout", 181 | "output_type": "stream", 182 | "text": [ 183 | "WARNING:tensorflow:From /home/eason/anaconda3/envs/untitled/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:74: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n", 184 | "\n", 185 | "WARNING:tensorflow:From /home/eason/anaconda3/envs/untitled/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:517: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.\n", 186 | "\n", 187 | "WARNING:tensorflow:From /home/eason/anaconda3/envs/untitled/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:4138: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead.\n", 188 | "\n", 189 | "WARNING:tensorflow:From /home/eason/anaconda3/envs/untitled/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:3976: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.\n", 190 | "\n", 191 | "WARNING:tensorflow:From /home/eason/anaconda3/envs/untitled/lib/python3.7/site-packages/keras/optimizers.py:790: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.\n", 192 | "\n", 193 | "WARNING:tensorflow:From /home/eason/anaconda3/envs/untitled/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:3295: The name tf.log is deprecated. Please use tf.math.log instead.\n", 194 | "\n", 195 | "WARNING:tensorflow:From /home/eason/anaconda3/envs/untitled/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:174: The name tf.get_default_session is deprecated. Please use tf.compat.v1.get_default_session instead.\n", 196 | "\n", 197 | "WARNING:tensorflow:From /home/eason/anaconda3/envs/untitled/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:190: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.\n", 198 | "\n", 199 | "WARNING:tensorflow:From /home/eason/anaconda3/envs/untitled/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:199: The name tf.is_variable_initialized is deprecated. Please use tf.compat.v1.is_variable_initialized instead.\n", 200 | "\n", 201 | "WARNING:tensorflow:From /home/eason/anaconda3/envs/untitled/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:206: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead.\n", 202 | "\n" 203 | ] 204 | } 205 | ], 206 | "source": [ 207 | "model = vggface_model()\n", 208 | "model.load_weights('./model/backdoor_face_wm.h5')" 209 | ] 210 | }, 211 | { 212 | "cell_type": "code", 213 | "execution_count": 7, 214 | "metadata": {}, 215 | "outputs": [ 216 | { 217 | "name": "stdout", 218 | "output_type": "stream", 219 | "text": [ 220 | "200/200 [==============================] - 3s 13ms/step\n", 221 | "Baseline ACC over cleansamples 0.9599999934434891\n" 222 | ] 223 | } 224 | ], 225 | "source": [ 226 | "#Baseline\n", 227 | "lr_optimizer=Adadelta(lr = 0.03)\n", 228 | "model.compile(loss='categorical_crossentropy', optimizer=lr_optimizer, metrics=['accuracy'])\n", 229 | "_,acc = model.evaluate(ori*255,np.squeeze(np.eye(83)[true_lab]),batch_size=10)\n", 230 | "print('Baseline ACC over cleansamples',acc)" 231 | ] 232 | }, 233 | { 234 | "cell_type": "code", 235 | "execution_count": 8, 236 | "metadata": {}, 237 | "outputs": [ 238 | { 239 | "name": "stdout", 240 | "output_type": "stream", 241 | "text": [ 242 | "200/200 [==============================] - 1s 4ms/step\n", 243 | "Baseline ASR over patched data 1.0\n" 244 | ] 245 | } 246 | ], 247 | "source": [ 248 | "#Baseline\n", 249 | "_,acc = model.evaluate(data*255,np.squeeze(np.eye(83)[tar_lab]),batch_size=10)\n", 250 | "print('Baseline ASR over patched data',acc)" 251 | ] 252 | }, 253 | { 254 | "cell_type": "code", 255 | "execution_count": 9, 256 | "metadata": {}, 257 | "outputs": [], 258 | "source": [ 259 | "#Conduct Intensive Preprocess\n", 260 | "defdata = defending(data)\n", 261 | "defori = defending(ori)" 262 | ] 263 | }, 264 | { 265 | "cell_type": "code", 266 | "execution_count": 10, 267 | "metadata": {}, 268 | "outputs": [ 269 | { 270 | "name": "stdout", 271 | "output_type": "stream", 272 | "text": [ 273 | "200/200 [==============================] - 1s 4ms/step\n", 274 | "ACC over cleansamples after the intensive defense 0.3700000047683716\n" 275 | ] 276 | } 277 | ], 278 | "source": [ 279 | "#Inference (I)\n", 280 | "_,acc = model.evaluate(defori*255,np.squeeze(np.eye(83)[true_lab]),batch_size=10)\n", 281 | "print('ACC over cleansamples after the intensive defense',acc)" 282 | ] 283 | }, 284 | { 285 | "cell_type": "code", 286 | "execution_count": 11, 287 | "metadata": {}, 288 | "outputs": [ 289 | { 290 | "name": "stdout", 291 | "output_type": "stream", 292 | "text": [ 293 | "200/200 [==============================] - 1s 4ms/step\n", 294 | "ASR over patched data after the intensive defense 0.36500000655651094\n" 295 | ] 296 | } 297 | ], 298 | "source": [ 299 | "#Inference (I)\n", 300 | "_,acc = model.evaluate(defdata*255,np.squeeze(np.eye(83)[tar_lab]),batch_size=10)\n", 301 | "print('ASR over patched data after the intensive defense',acc)" 302 | ] 303 | }, 304 | { 305 | "cell_type": "code", 306 | "execution_count": 12, 307 | "metadata": {}, 308 | "outputs": [ 309 | { 310 | "name": "stdout", 311 | "output_type": "stream", 312 | "text": [ 313 | "test_img\n", 314 | "test_labels\n", 315 | "train_img\n", 316 | "train_labels\n", 317 | "train_mean\n", 318 | "val_img\n", 319 | "val_labels\n" 320 | ] 321 | } 322 | ], 323 | "source": [ 324 | "import h5py \n", 325 | "f = h5py.File('./data/clean_pubfig_face_dataset.h5','r') #打开h5文件\n", 326 | "for name in f.keys():\n", 327 | " print(name)" 328 | ] 329 | }, 330 | { 331 | "cell_type": "code", 332 | "execution_count": 54, 333 | "metadata": {}, 334 | "outputs": [], 335 | "source": [ 336 | "#count from 0 to 9 to acquire 10000 clean samples\n", 337 | "count = 9\n", 338 | "new_ori = (np.asarray(f['train_img'][count*1000:(count+1)*1000]))/255\n", 339 | "new_true_lab = np.asarray(f['train_labels'][count*1000:(count+1)*1000])\n", 340 | "hot_lab = np.eye(83)[new_true_lab]" 341 | ] 342 | }, 343 | { 344 | "cell_type": "code", 345 | "execution_count": 55, 346 | "metadata": {}, 347 | "outputs": [ 348 | { 349 | "name": "stdout", 350 | "output_type": "stream", 351 | "text": [ 352 | "Epoch 1/1\n", 353 | "1000/1000 [==============================] - 17s 17ms/step - loss: 0.7182 - acc: 0.8020\n", 354 | "Epoch 1/1\n", 355 | "1000/1000 [==============================] - 17s 17ms/step - loss: 0.3728 - acc: 0.8930\n", 356 | "Epoch 1/1\n", 357 | "1000/1000 [==============================] - 17s 17ms/step - loss: 0.1852 - acc: 0.9470\n", 358 | "Epoch 1/1\n", 359 | "1000/1000 [==============================] - 17s 17ms/step - loss: 0.1363 - acc: 0.9590\n", 360 | "Epoch 1/1\n", 361 | "1000/1000 [==============================] - 17s 17ms/step - loss: 0.1466 - acc: 0.9590\n" 362 | ] 363 | } 364 | ], 365 | "source": [ 366 | "#GYM Fine-tuning with intensive preprocessed clean data\n", 367 | "for i in range(5):\n", 368 | " defnewori = defending(new_ori)\n", 369 | " model.fit(defnewori*255,hot_lab,epochs=1, batch_size=10)" 370 | ] 371 | }, 372 | { 373 | "cell_type": "code", 374 | "execution_count": 56, 375 | "metadata": {}, 376 | "outputs": [ 377 | { 378 | "name": "stdout", 379 | "output_type": "stream", 380 | "text": [ 381 | "200/200 [==============================] - 1s 3ms/step\n", 382 | "ACC over cleansamples after the intensive defense 0.80499999076128\n" 383 | ] 384 | } 385 | ], 386 | "source": [ 387 | "#Fine-tuning + Inference (I)\n", 388 | "_,acc = model.evaluate(defori*255,np.squeeze(np.eye(83)[true_lab]),batch_size=10)\n", 389 | "print('ACC over cleansamples after the intensive defense',acc)" 390 | ] 391 | }, 392 | { 393 | "cell_type": "code", 394 | "execution_count": 57, 395 | "metadata": {}, 396 | "outputs": [ 397 | { 398 | "name": "stdout", 399 | "output_type": "stream", 400 | "text": [ 401 | "200/200 [==============================] - 1s 4ms/step\n", 402 | "ASR over patched data after the intensive defense 0.015000000223517418\n" 403 | ] 404 | } 405 | ], 406 | "source": [ 407 | "#Fine-tuning + Inference (I)\n", 408 | "_,acc = model.evaluate(defdata*255,np.squeeze(np.eye(83)[tar_lab]),batch_size=10)\n", 409 | "print('ASR over patched data after the intensive defense',acc)" 410 | ] 411 | }, 412 | { 413 | "cell_type": "code", 414 | "execution_count": 58, 415 | "metadata": {}, 416 | "outputs": [], 417 | "source": [ 418 | "def defending2(imgset):\n", 419 | " outset = np.zeros_like(imgset)\n", 420 | " for i,img in enumerate(imgset):\n", 421 | " defimg = np.copy(img)\n", 422 | " \n", 423 | "\n", 424 | "# defimg = defend_OD(defimg)\n", 425 | "# defimg = defend_GAMMA(defimg)\n", 426 | "\n", 427 | " defimg = defend_MED(defimg)\n", 428 | " \n", 429 | "# defimg = defend_MPN(defimg)\n", 430 | "# defimg = defend_GAMMA2(defimg)\n", 431 | "\n", 432 | "# defimg = defend_DOWN(defimg) \n", 433 | "# defimg = defend_MED(defimg)\n", 434 | " \n", 435 | " defimg = cv2.resize(defimg,(180,180))\n", 436 | " defimg = defend_MED(defimg)\n", 437 | " defimg = cv2.resize(defimg,(224,224))\n", 438 | "\n", 439 | "# defimg = defend_RAND(defimg,scalimit=1.1)\n", 440 | " defimg = fastSAT(defimg)\n", 441 | " \n", 442 | " outset[i] = np.copy(defimg)\n", 443 | " return outset" 444 | ] 445 | }, 446 | { 447 | "cell_type": "code", 448 | "execution_count": 59, 449 | "metadata": {}, 450 | "outputs": [], 451 | "source": [ 452 | "defdata_L = defending2(data)\n", 453 | "defori_L = defending2(ori)" 454 | ] 455 | }, 456 | { 457 | "cell_type": "code", 458 | "execution_count": 60, 459 | "metadata": {}, 460 | "outputs": [ 461 | { 462 | "name": "stdout", 463 | "output_type": "stream", 464 | "text": [ 465 | "200/200 [==============================] - 1s 4ms/step\n", 466 | "ACC over cleansamples after the lightweight defense 0.9149999916553497\n" 467 | ] 468 | } 469 | ], 470 | "source": [ 471 | "#Fine-tuning + Inference (L)\n", 472 | "_,acc = model.evaluate(defori_L*255,np.squeeze(np.eye(83)[true_lab]),batch_size=10)\n", 473 | "print('ACC over cleansamples after the lightweight defense',acc)" 474 | ] 475 | }, 476 | { 477 | "cell_type": "code", 478 | "execution_count": 62, 479 | "metadata": {}, 480 | "outputs": [ 481 | { 482 | "name": "stdout", 483 | "output_type": "stream", 484 | "text": [ 485 | "200/200 [==============================] - 1s 4ms/step\n", 486 | "ASR over patched data after the intensive defense 0.010000000149011612\n" 487 | ] 488 | } 489 | ], 490 | "source": [ 491 | "#Fine-tuning + Inference (L)\n", 492 | "_,acc = model.evaluate(defdata_L*255,np.squeeze(np.eye(83)[tar_lab]),batch_size=10)\n", 493 | "print('ASR over patched data after the intensive defense',acc)" 494 | ] 495 | }, 496 | { 497 | "cell_type": "code", 498 | "execution_count": 63, 499 | "metadata": {}, 500 | "outputs": [], 501 | "source": [ 502 | "model.save_weights('./model/fine/backdoor_face_wm.h5')" 503 | ] 504 | } 505 | ], 506 | "metadata": { 507 | "kernelspec": { 508 | "display_name": "Python 3", 509 | "language": "python", 510 | "name": "python3" 511 | }, 512 | "language_info": { 513 | "codemirror_mode": { 514 | "name": "ipython", 515 | "version": 3 516 | }, 517 | "file_extension": ".py", 518 | "mimetype": "text/x-python", 519 | "name": "python", 520 | "nbconvert_exporter": "python", 521 | "pygments_lexer": "ipython3", 522 | "version": "3.7.4" 523 | } 524 | }, 525 | "nbformat": 4, 526 | "nbformat_minor": 2 527 | } 528 | -------------------------------------------------------------------------------- /keras_vggface/models.py: -------------------------------------------------------------------------------- 1 | '''VGGFace models for Keras. 2 | 3 | # Notes: 4 | - Resnet50 and VGG16 are modified architectures from Keras Application folder. [Keras](https://keras.io) 5 | 6 | - Squeeze and excitation block is taken from [Squeeze and Excitation Networks in 7 | Keras](https://github.com/titu1994/keras-squeeze-excite-network) and modified. 8 | 9 | ''' 10 | 11 | 12 | from keras.layers import Flatten, Dense, Input, GlobalAveragePooling2D, \ 13 | GlobalMaxPooling2D, Activation, Conv2D, MaxPooling2D, BatchNormalization, \ 14 | AveragePooling2D, Reshape, Permute, multiply 15 | from keras_applications.imagenet_utils import _obtain_input_shape 16 | from keras.utils import layer_utils 17 | from keras.utils.data_utils import get_file 18 | from keras import backend as K 19 | from keras_vggface import utils 20 | from keras.engine.topology import get_source_inputs 21 | import warnings 22 | from keras.models import Model 23 | from keras import layers 24 | 25 | 26 | def VGG16(include_top=True, weights='vggface', 27 | input_tensor=None, input_shape=None, 28 | pooling=None, 29 | classes=2622): 30 | input_shape = _obtain_input_shape(input_shape, 31 | default_size=224, 32 | min_size=48, 33 | data_format=K.image_data_format(), 34 | require_flatten=include_top) 35 | 36 | if input_tensor is None: 37 | img_input = Input(shape=input_shape) 38 | else: 39 | if not K.is_keras_tensor(input_tensor): 40 | img_input = Input(tensor=input_tensor, shape=input_shape) 41 | else: 42 | img_input = input_tensor 43 | 44 | # Block 1 45 | x = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1_1')( 46 | img_input) 47 | x = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1_2')(x) 48 | x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x) 49 | 50 | # Block 2 51 | x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_1')( 52 | x) 53 | x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_2')( 54 | x) 55 | x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(x) 56 | 57 | # Block 3 58 | x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_1')( 59 | x) 60 | x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_2')( 61 | x) 62 | x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_3')( 63 | x) 64 | x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(x) 65 | 66 | # Block 4 67 | x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_1')( 68 | x) 69 | x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_2')( 70 | x) 71 | x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_3')( 72 | x) 73 | x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(x) 74 | 75 | # Block 5 76 | x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_1')( 77 | x) 78 | x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_2')( 79 | x) 80 | x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_3')( 81 | x) 82 | x = MaxPooling2D((2, 2), strides=(2, 2), name='pool5')(x) 83 | 84 | if include_top: 85 | # Classification block 86 | x = Flatten(name='flatten')(x) 87 | x = Dense(4096, name='fc6')(x) 88 | x = Activation('relu', name='fc6/relu')(x) 89 | x = Dense(4096, name='fc7')(x) 90 | x = Activation('relu', name='fc7/relu')(x) 91 | x = Dense(classes, name='fc8')(x) 92 | x = Activation('softmax', name='fc8/softmax')(x) 93 | else: 94 | if pooling == 'avg': 95 | x = GlobalAveragePooling2D()(x) 96 | elif pooling == 'max': 97 | x = GlobalMaxPooling2D()(x) 98 | 99 | # Ensure that the model takes into account 100 | # any potential predecessors of `input_tensor`. 101 | if input_tensor is not None: 102 | inputs = get_source_inputs(input_tensor) 103 | else: 104 | inputs = img_input 105 | # Create model. 106 | model = Model(inputs, x, name='vggface_vgg16') # load weights 107 | if weights == 'vggface': 108 | if include_top: 109 | weights_path = get_file('rcmalli_vggface_tf_vgg16.h5', 110 | utils. 111 | VGG16_WEIGHTS_PATH, 112 | cache_subdir=utils.VGGFACE_DIR) 113 | else: 114 | weights_path = get_file('rcmalli_vggface_tf_notop_vgg16.h5', 115 | utils.VGG16_WEIGHTS_PATH_NO_TOP, 116 | cache_subdir=utils.VGGFACE_DIR) 117 | model.load_weights(weights_path, by_name=True) 118 | if K.backend() == 'theano': 119 | layer_utils.convert_all_kernels_in_model(model) 120 | 121 | if K.image_data_format() == 'channels_first': 122 | if include_top: 123 | maxpool = model.get_layer(name='pool5') 124 | shape = maxpool.output_shape[1:] 125 | dense = model.get_layer(name='fc6') 126 | layer_utils.convert_dense_weights_data_format(dense, shape, 127 | 'channels_first') 128 | 129 | if K.backend() == 'tensorflow': 130 | warnings.warn('You are using the TensorFlow backend, yet you ' 131 | 'are using the Theano ' 132 | 'image data format convention ' 133 | '(`image_data_format="channels_first"`). ' 134 | 'For best performance, set ' 135 | '`image_data_format="channels_last"` in ' 136 | 'your Keras config ' 137 | 'at ~/.keras/keras.json.') 138 | return model 139 | 140 | 141 | def resnet_identity_block(input_tensor, kernel_size, filters, stage, block, 142 | bias=False): 143 | filters1, filters2, filters3 = filters 144 | if K.image_data_format() == 'channels_last': 145 | bn_axis = 3 146 | else: 147 | bn_axis = 1 148 | conv1_reduce_name = 'conv' + str(stage) + "_" + str(block) + "_1x1_reduce" 149 | conv1_increase_name = 'conv' + str(stage) + "_" + str( 150 | block) + "_1x1_increase" 151 | conv3_name = 'conv' + str(stage) + "_" + str(block) + "_3x3" 152 | 153 | x = Conv2D(filters1, (1, 1), use_bias=bias, name=conv1_reduce_name)( 154 | input_tensor) 155 | x = BatchNormalization(axis=bn_axis, name=conv1_reduce_name + "/bn")(x) 156 | x = Activation('relu')(x) 157 | 158 | x = Conv2D(filters2, kernel_size, use_bias=bias, 159 | padding='same', name=conv3_name)(x) 160 | x = BatchNormalization(axis=bn_axis, name=conv3_name + "/bn")(x) 161 | x = Activation('relu')(x) 162 | 163 | x = Conv2D(filters3, (1, 1), use_bias=bias, name=conv1_increase_name)(x) 164 | x = BatchNormalization(axis=bn_axis, name=conv1_increase_name + "/bn")(x) 165 | 166 | x = layers.add([x, input_tensor]) 167 | x = Activation('relu')(x) 168 | return x 169 | 170 | 171 | def resnet_conv_block(input_tensor, kernel_size, filters, stage, block, 172 | strides=(2, 2), bias=False): 173 | filters1, filters2, filters3 = filters 174 | if K.image_data_format() == 'channels_last': 175 | bn_axis = 3 176 | else: 177 | bn_axis = 1 178 | conv1_reduce_name = 'conv' + str(stage) + "_" + str(block) + "_1x1_reduce" 179 | conv1_increase_name = 'conv' + str(stage) + "_" + str( 180 | block) + "_1x1_increase" 181 | conv1_proj_name = 'conv' + str(stage) + "_" + str(block) + "_1x1_proj" 182 | conv3_name = 'conv' + str(stage) + "_" + str(block) + "_3x3" 183 | 184 | x = Conv2D(filters1, (1, 1), strides=strides, use_bias=bias, 185 | name=conv1_reduce_name)(input_tensor) 186 | x = BatchNormalization(axis=bn_axis, name=conv1_reduce_name + "/bn")(x) 187 | x = Activation('relu')(x) 188 | 189 | x = Conv2D(filters2, kernel_size, padding='same', use_bias=bias, 190 | name=conv3_name)(x) 191 | x = BatchNormalization(axis=bn_axis, name=conv3_name + "/bn")(x) 192 | x = Activation('relu')(x) 193 | 194 | x = Conv2D(filters3, (1, 1), name=conv1_increase_name, use_bias=bias)(x) 195 | x = BatchNormalization(axis=bn_axis, name=conv1_increase_name + "/bn")(x) 196 | 197 | shortcut = Conv2D(filters3, (1, 1), strides=strides, use_bias=bias, 198 | name=conv1_proj_name)(input_tensor) 199 | shortcut = BatchNormalization(axis=bn_axis, name=conv1_proj_name + "/bn")( 200 | shortcut) 201 | 202 | x = layers.add([x, shortcut]) 203 | x = Activation('relu')(x) 204 | return x 205 | 206 | 207 | def RESNET50(include_top=True, weights='vggface', 208 | input_tensor=None, input_shape=None, 209 | pooling=None, 210 | classes=8631): 211 | input_shape = _obtain_input_shape(input_shape, 212 | default_size=224, 213 | min_size=32, 214 | data_format=K.image_data_format(), 215 | require_flatten=include_top, 216 | weights=weights) 217 | 218 | if input_tensor is None: 219 | img_input = Input(shape=input_shape) 220 | else: 221 | if not K.is_keras_tensor(input_tensor): 222 | img_input = Input(tensor=input_tensor, shape=input_shape) 223 | else: 224 | img_input = input_tensor 225 | if K.image_data_format() == 'channels_last': 226 | bn_axis = 3 227 | else: 228 | bn_axis = 1 229 | 230 | x = Conv2D( 231 | 64, (7, 7), use_bias=False, strides=(2, 2), padding='same', 232 | name='conv1/7x7_s2')(img_input) 233 | x = BatchNormalization(axis=bn_axis, name='conv1/7x7_s2/bn')(x) 234 | x = Activation('relu')(x) 235 | x = MaxPooling2D((3, 3), strides=(2, 2))(x) 236 | 237 | x = resnet_conv_block(x, 3, [64, 64, 256], stage=2, block=1, strides=(1, 1)) 238 | x = resnet_identity_block(x, 3, [64, 64, 256], stage=2, block=2) 239 | x = resnet_identity_block(x, 3, [64, 64, 256], stage=2, block=3) 240 | 241 | x = resnet_conv_block(x, 3, [128, 128, 512], stage=3, block=1) 242 | x = resnet_identity_block(x, 3, [128, 128, 512], stage=3, block=2) 243 | x = resnet_identity_block(x, 3, [128, 128, 512], stage=3, block=3) 244 | x = resnet_identity_block(x, 3, [128, 128, 512], stage=3, block=4) 245 | 246 | x = resnet_conv_block(x, 3, [256, 256, 1024], stage=4, block=1) 247 | x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=2) 248 | x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=3) 249 | x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=4) 250 | x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=5) 251 | x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=6) 252 | 253 | x = resnet_conv_block(x, 3, [512, 512, 2048], stage=5, block=1) 254 | x = resnet_identity_block(x, 3, [512, 512, 2048], stage=5, block=2) 255 | x = resnet_identity_block(x, 3, [512, 512, 2048], stage=5, block=3) 256 | 257 | x = AveragePooling2D((7, 7), name='avg_pool')(x) 258 | 259 | if include_top: 260 | x = Flatten()(x) 261 | x = Dense(classes, activation='softmax', name='classifier')(x) 262 | else: 263 | if pooling == 'avg': 264 | x = GlobalAveragePooling2D()(x) 265 | elif pooling == 'max': 266 | x = GlobalMaxPooling2D()(x) 267 | 268 | # Ensure that the model takes into account 269 | # any potential predecessors of `input_tensor`. 270 | if input_tensor is not None: 271 | inputs = get_source_inputs(input_tensor) 272 | else: 273 | inputs = img_input 274 | # Create model. 275 | model = Model(inputs, x, name='vggface_resnet50') 276 | 277 | # load weights 278 | if weights == 'vggface': 279 | if include_top: 280 | weights_path = get_file('rcmalli_vggface_tf_resnet50.h5', 281 | utils.RESNET50_WEIGHTS_PATH, 282 | cache_subdir=utils.VGGFACE_DIR) 283 | else: 284 | weights_path = get_file('rcmalli_vggface_tf_notop_resnet50.h5', 285 | utils.RESNET50_WEIGHTS_PATH_NO_TOP, 286 | cache_subdir=utils.VGGFACE_DIR) 287 | model.load_weights(weights_path) 288 | if K.backend() == 'theano': 289 | layer_utils.convert_all_kernels_in_model(model) 290 | if include_top: 291 | maxpool = model.get_layer(name='avg_pool') 292 | shape = maxpool.output_shape[1:] 293 | dense = model.get_layer(name='classifier') 294 | layer_utils.convert_dense_weights_data_format(dense, shape, 295 | 'channels_first') 296 | 297 | if K.image_data_format() == 'channels_first' and K.backend() == 'tensorflow': 298 | warnings.warn('You are using the TensorFlow backend, yet you ' 299 | 'are using the Theano ' 300 | 'image data format convention ' 301 | '(`image_data_format="channels_first"`). ' 302 | 'For best performance, set ' 303 | '`image_data_format="channels_last"` in ' 304 | 'your Keras config ' 305 | 'at ~/.keras/keras.json.') 306 | elif weights is not None: 307 | model.load_weights(weights) 308 | 309 | return model 310 | 311 | 312 | def senet_se_block(input_tensor, stage, block, compress_rate=16, bias=False): 313 | conv1_down_name = 'conv' + str(stage) + "_" + str( 314 | block) + "_1x1_down" 315 | conv1_up_name = 'conv' + str(stage) + "_" + str( 316 | block) + "_1x1_up" 317 | 318 | num_channels = int(input_tensor.shape[-1]) 319 | bottle_neck = int(num_channels // compress_rate) 320 | 321 | se = GlobalAveragePooling2D()(input_tensor) 322 | se = Reshape((1, 1, num_channels))(se) 323 | se = Conv2D(bottle_neck, (1, 1), use_bias=bias, 324 | name=conv1_down_name)(se) 325 | se = Activation('relu')(se) 326 | se = Conv2D(num_channels, (1, 1), use_bias=bias, 327 | name=conv1_up_name)(se) 328 | se = Activation('sigmoid')(se) 329 | 330 | x = input_tensor 331 | x = multiply([x, se]) 332 | return x 333 | 334 | 335 | def senet_conv_block(input_tensor, kernel_size, filters, 336 | stage, block, bias=False, strides=(2, 2)): 337 | filters1, filters2, filters3 = filters 338 | if K.image_data_format() == 'channels_last': 339 | bn_axis = 3 340 | else: 341 | bn_axis = 1 342 | 343 | bn_eps = 0.0001 344 | 345 | conv1_reduce_name = 'conv' + str(stage) + "_" + str(block) + "_1x1_reduce" 346 | conv1_increase_name = 'conv' + str(stage) + "_" + str( 347 | block) + "_1x1_increase" 348 | conv1_proj_name = 'conv' + str(stage) + "_" + str(block) + "_1x1_proj" 349 | conv3_name = 'conv' + str(stage) + "_" + str(block) + "_3x3" 350 | 351 | x = Conv2D(filters1, (1, 1), use_bias=bias, strides=strides, 352 | name=conv1_reduce_name)(input_tensor) 353 | x = BatchNormalization(axis=bn_axis, name=conv1_reduce_name + "/bn",epsilon=bn_eps)(x) 354 | x = Activation('relu')(x) 355 | 356 | x = Conv2D(filters2, kernel_size, padding='same', use_bias=bias, 357 | name=conv3_name)(x) 358 | x = BatchNormalization(axis=bn_axis, name=conv3_name + "/bn",epsilon=bn_eps)(x) 359 | x = Activation('relu')(x) 360 | 361 | x = Conv2D(filters3, (1, 1), name=conv1_increase_name, use_bias=bias)(x) 362 | x = BatchNormalization(axis=bn_axis, name=conv1_increase_name + "/bn" ,epsilon=bn_eps)(x) 363 | 364 | se = senet_se_block(x, stage=stage, block=block, bias=True) 365 | 366 | shortcut = Conv2D(filters3, (1, 1), use_bias=bias, strides=strides, 367 | name=conv1_proj_name)(input_tensor) 368 | shortcut = BatchNormalization(axis=bn_axis, 369 | name=conv1_proj_name + "/bn",epsilon=bn_eps)(shortcut) 370 | 371 | m = layers.add([se, shortcut]) 372 | m = Activation('relu')(m) 373 | return m 374 | 375 | 376 | def senet_identity_block(input_tensor, kernel_size, 377 | filters, stage, block, bias=False): 378 | filters1, filters2, filters3 = filters 379 | if K.image_data_format() == 'channels_last': 380 | bn_axis = 3 381 | else: 382 | bn_axis = 1 383 | 384 | bn_eps = 0.0001 385 | 386 | conv1_reduce_name = 'conv' + str(stage) + "_" + str(block) + "_1x1_reduce" 387 | conv1_increase_name = 'conv' + str(stage) + "_" + str( 388 | block) + "_1x1_increase" 389 | conv3_name = 'conv' + str(stage) + "_" + str(block) + "_3x3" 390 | 391 | x = Conv2D(filters1, (1, 1), use_bias=bias, 392 | name=conv1_reduce_name)(input_tensor) 393 | x = BatchNormalization(axis=bn_axis, name=conv1_reduce_name + "/bn",epsilon=bn_eps)(x) 394 | x = Activation('relu')(x) 395 | 396 | x = Conv2D(filters2, kernel_size, padding='same', use_bias=bias, 397 | name=conv3_name)(x) 398 | x = BatchNormalization(axis=bn_axis, name=conv3_name + "/bn",epsilon=bn_eps)(x) 399 | x = Activation('relu')(x) 400 | 401 | x = Conv2D(filters3, (1, 1), name=conv1_increase_name, use_bias=bias)(x) 402 | x = BatchNormalization(axis=bn_axis, name=conv1_increase_name + "/bn",epsilon=bn_eps)(x) 403 | 404 | se = senet_se_block(x, stage=stage, block=block, bias=True) 405 | 406 | m = layers.add([se, input_tensor]) 407 | m = Activation('relu')(m) 408 | 409 | return m 410 | 411 | 412 | def SENET50(include_top=True, weights='vggface', 413 | input_tensor=None, input_shape=None, 414 | pooling=None, 415 | classes=8631): 416 | input_shape = _obtain_input_shape(input_shape, 417 | default_size=224, 418 | min_size=197, 419 | data_format=K.image_data_format(), 420 | require_flatten=include_top, 421 | weights=weights) 422 | 423 | if input_tensor is None: 424 | img_input = Input(shape=input_shape) 425 | else: 426 | if not K.is_keras_tensor(input_tensor): 427 | img_input = Input(tensor=input_tensor, shape=input_shape) 428 | else: 429 | img_input = input_tensor 430 | if K.image_data_format() == 'channels_last': 431 | bn_axis = 3 432 | else: 433 | bn_axis = 1 434 | 435 | bn_eps = 0.0001 436 | 437 | x = Conv2D( 438 | 64, (7, 7), use_bias=False, strides=(2, 2), padding='same', 439 | name='conv1/7x7_s2')(img_input) 440 | x = BatchNormalization(axis=bn_axis, name='conv1/7x7_s2/bn',epsilon=bn_eps)(x) 441 | x = Activation('relu')(x) 442 | x = MaxPooling2D((3, 3), strides=(2, 2))(x) 443 | 444 | x = senet_conv_block(x, 3, [64, 64, 256], stage=2, block=1, strides=(1, 1)) 445 | x = senet_identity_block(x, 3, [64, 64, 256], stage=2, block=2) 446 | x = senet_identity_block(x, 3, [64, 64, 256], stage=2, block=3) 447 | 448 | x = senet_conv_block(x, 3, [128, 128, 512], stage=3, block=1) 449 | x = senet_identity_block(x, 3, [128, 128, 512], stage=3, block=2) 450 | x = senet_identity_block(x, 3, [128, 128, 512], stage=3, block=3) 451 | x = senet_identity_block(x, 3, [128, 128, 512], stage=3, block=4) 452 | 453 | x = senet_conv_block(x, 3, [256, 256, 1024], stage=4, block=1) 454 | x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=2) 455 | x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=3) 456 | x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=4) 457 | x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=5) 458 | x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=6) 459 | 460 | x = senet_conv_block(x, 3, [512, 512, 2048], stage=5, block=1) 461 | x = senet_identity_block(x, 3, [512, 512, 2048], stage=5, block=2) 462 | x = senet_identity_block(x, 3, [512, 512, 2048], stage=5, block=3) 463 | 464 | x = AveragePooling2D((7, 7), name='avg_pool')(x) 465 | 466 | if include_top: 467 | x = Flatten()(x) 468 | x = Dense(classes, activation='softmax', name='classifier')(x) 469 | else: 470 | if pooling == 'avg': 471 | x = GlobalAveragePooling2D()(x) 472 | elif pooling == 'max': 473 | x = GlobalMaxPooling2D()(x) 474 | 475 | # Ensure that the model takes into account 476 | # any potential predecessors of `input_tensor`. 477 | if input_tensor is not None: 478 | inputs = get_source_inputs(input_tensor) 479 | else: 480 | inputs = img_input 481 | # Create model. 482 | model = Model(inputs, x, name='vggface_senet50') 483 | 484 | # load weights 485 | if weights == 'vggface': 486 | if include_top: 487 | weights_path = get_file('rcmalli_vggface_tf_senet50.h5', 488 | utils.SENET50_WEIGHTS_PATH, 489 | cache_subdir=utils.VGGFACE_DIR) 490 | else: 491 | weights_path = get_file('rcmalli_vggface_tf_notop_senet50.h5', 492 | utils.SENET50_WEIGHTS_PATH_NO_TOP, 493 | cache_subdir=utils.VGGFACE_DIR) 494 | model.load_weights(weights_path) 495 | if K.backend() == 'theano': 496 | layer_utils.convert_all_kernels_in_model(model) 497 | if include_top: 498 | maxpool = model.get_layer(name='avg_pool') 499 | shape = maxpool.output_shape[1:] 500 | dense = model.get_layer(name='classifier') 501 | layer_utils.convert_dense_weights_data_format(dense, shape, 502 | 'channels_first') 503 | 504 | if K.image_data_format() == 'channels_first' and K.backend() == 'tensorflow': 505 | warnings.warn('You are using the TensorFlow backend, yet you ' 506 | 'are using the Theano ' 507 | 'image data format convention ' 508 | '(`image_data_format="channels_first"`). ' 509 | 'For best performance, set ' 510 | '`image_data_format="channels_last"` in ' 511 | 'your Keras config ' 512 | 'at ~/.keras/keras.json.') 513 | elif weights is not None: 514 | model.load_weights(weights) 515 | 516 | return model 517 | --------------------------------------------------------------------------------