├── exemple_script.sh ├── README.md ├── test.py ├── data_generator.py ├── VGG16_sequential.py ├── resNetStandard.py ├── resNetStandardWithVal.py ├── resnet50.py ├── VGGStandardWithVal.py └── VGGStandard.py /exemple_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | echo "Regression experiments" 5 | JOB_ID=57868 # random job id 6 | 7 | #For biwi 8 | pgr="VGGStandard.py" 9 | #pgr="resNetStandard.py" 10 | pathData="/pathToData/" 11 | TRset="trainingAnnotations.txt" # must be located at /pathTODATA/trainingAnnotations.txt 12 | Testset="testAnnotationsNomirror.txt" # idem 13 | 14 | Low_dim=3 15 | PbFlag="biwi" 16 | option="-bn" 17 | 18 | THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32,exception_verbosity='high' python $pgr $pathData $TRset $Testset $Low_dim $PbFlag $OAR_JOB_ID $option 19 | 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # A Comprehensive Analysis of Deep Regression 2 | This repository contains the code that was used in the experiments of [this paper](https://arxiv.org/abs/1803.08450). 3 | 4 | Tested with keras 1.1.0 with theano backend and python 2.7.12. 5 | 6 | Requieres the installation of scikit-learn. 7 | 8 | ------------------ 9 | ## How to run: 10 | 11 | We recommend you to use our exemple_script.sh. In this file you can specify the dataset and the options you want to use. 12 | 13 | ### Data 14 | trainingAnnotations.txt must contain the list of the training images followed by the targets: 15 | ``` 16 | path_img_name_1.jpg y1 y2 y3 17 | path_img_name_2.jpg y1 y2 y3 18 | ... 19 | ``` 20 | validationAnnotations.txt and testAnnotations.txt must contain the list of the validation and test images with the same format. 21 | 22 | Importantly the images and annotation files must be located at /pathToData/. In other words, 'path_img_name_1.jpg" is the path of the first traininng image relatively to /pathToData/. 23 | 24 | ### Pretrained weights 25 | 26 | * Download the [VGG16 weights](https://drive.google.com/file/d/0Bz7KyqmuGsilT0J5dmRCM0ROVHc/view) 27 | * Download the [resNet weights](https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_th_dim_ordering_th_kernels.h5) 28 | 29 | You need to change the weight file paths in VGG16_sequential.py and resnet50.py. 30 | 31 | ### Others 32 | We provide 4 main files: 33 | * resNetStandard.py: resnet in the case where the validation set is automatically extracted from the training set. 34 | * resNetStandardWithVal.py: resnet in the case where the validation set is given. 35 | * VGGStandard.py: VGG16 in the case where the vqlidation set is automatically extracted from the training set. 36 | * VGGStandardWithVal.py: VGG16 in the case where the validation set is given. 37 | 38 | 39 | The JOB_ID is a job id used to save the network weights. You can give any number. $rootpathData is the path to your dataset folder. 40 | 41 | ------------------ 42 | ## Options: 43 | 44 | * BatchNormalization: 45 | -bn: with BN 46 | -bnba: with BN before the last activation 47 | -nbn: no BN 48 | 49 | * Finetunning: 50 | '-ft x' with x in {0,1,2,3}, finetune x blocks 51 | nbBlock=int(sys.argv[idarg+1]) 52 | 53 | * Batch size: 54 | '-bs x': use batches of size x 55 | 56 | * Optimizer: 57 | '-opt x' with x in {adam, adadelta, rmsprop,adagrad} 58 | 59 | * Regression layer: 60 | '-rf x' with x in {cov,fc1}. otherwise the default value is fc2 61 | 62 | * Dropout: '-do x': with x in {-1,0,1,2} 63 | * -1: refered to as 00 in the paper 64 | * 0: 10 65 | * 1: 01 66 | * 2: 11 67 | 68 | 69 | ## Support 70 | 71 | For any question, please contact [Stéphane Lathuilière](https://team.inria.fr/perception/team-members/stephane-lathuiliere/). 72 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cPickle as pickle 3 | from sklearn.metrics import mean_absolute_error, explained_variance_score,mean_squared_error 4 | 5 | def run_eval(Y_pred, Y_true, l, pbFlag,idOar="",printError=False): 6 | print "Evaluating" 7 | 8 | if (pbFlag == 'landmark'): 9 | # We need to change the shape of the Y_pred and Y_true matrices because the evaluation is different than in Biwi 10 | print Y_pred.shape 11 | Y_pred2 = np.reshape(Y_pred, (5*Y_pred.shape[0],2), order='C') 12 | Y_true2 = np.reshape(Y_true, (5*Y_true.shape[0],2), order='C') 13 | 14 | # mean squared error 15 | err = np.sqrt(np.sum((Y_pred2-Y_true2)**2, axis=1)) 16 | 17 | listErr = np.empty((5,1)) 18 | listFailures = np.empty((5,1)) 19 | for i in range(5): 20 | temp = 0 21 | tempFailures = 0 22 | for j in xrange(i,len(err),5): 23 | temp += (err[j]/float(l)) 24 | # If an error is larger than 5%, it is counted as failure. 25 | if (err[j]/float(l)) > 0.05: 26 | tempFailures += 1 27 | listErr[i,0] = temp/(float(len(err))/5) 28 | listFailures[i,0] = tempFailures/(float(len(err))/5) 29 | 30 | print('Avg Detection Error:', listErr) 31 | print('Failure Rate:',listFailures) 32 | for x in listFailures: 33 | print " $" + str(100*x[0])+ "$ &" 34 | " ".join([str(100*x[0]) for x in listFailures]) 35 | print np.mean(np.asarray([x[0] for x in listFailures])) 36 | 37 | elif (pbFlag == 'FBP'): 38 | LOGpred="/services/scratch/perception/slathuil/log/error_Training_"+str(idOar)+pbFlag+".pickle" 39 | pickle.dump((Y_pred,Y_true),open(LOGpred,"w+")) 40 | 41 | listSegments=[(0,1),(1,2),(3,4),(4,5),(6,7),(7,8),(9,10),(10,11),(12,13)] 42 | errX=np.empty((Y_pred.shape[0],14)) 43 | errY=np.empty((Y_pred.shape[0],14)) 44 | err=np.empty((Y_pred.shape[0],14)) 45 | for j in range(14): 46 | errX[:,j]=Y_pred[:,2*j]-Y_true[:,2*j] 47 | errY[:,j]=Y_pred[:,2*j+1]-Y_true[:,2*j+1] 48 | 49 | # compute the error for the point in the middle of 3 and 4 50 | XYTorso_true=np.empty((Y_pred.shape[0],2)) 51 | XYTorso_pred=np.empty((Y_pred.shape[0],2)) 52 | XYTorso_true[:,0]=0.5*(Y_true[:,2*2]+Y_true[:,2*3]) 53 | XYTorso_true[:,1]=0.5*(Y_true[:,2*2+1]+Y_true[:,2*3+1]) 54 | XYTorso_pred[:,0]=0.5*(Y_pred[:,2*2]+Y_pred[:,2*3]) 55 | XYTorso_pred[:,1]=0.5*(Y_pred[:,2*2+1]+Y_pred[:,2*3+1]) 56 | errTorso=np.sqrt((XYTorso_pred[:,0]-XYTorso_true[:,0])**2+(XYTorso_pred[:,1]-XYTorso_true[:,1])**2,) 57 | 58 | 59 | 60 | err = np.sqrt((errX)**2+(errY)**2) 61 | 62 | lengthSegm=np.empty((Y_pred.shape[0],len(listSegments))) 63 | lengthTorso=np.empty(Y_pred.shape[0]) 64 | 65 | for idSegm,seg in enumerate(listSegments): 66 | lengthSegm[:,idSegm]=np.sqrt((Y_true[:,seg[0]]-Y_true[:,seg[1]])**2) 67 | 68 | lengthTorso[:]=np.sqrt((XYTorso_true[:,0]-Y_true[:,2*12])**2+(XYTorso_true[:,1]-Y_true[:,2*12+1])**2) 69 | 70 | correct=np.empty((Y_pred.shape[0],len(listSegments))) 71 | correctTorso=np.empty(Y_pred.shape[0]) 72 | for i in range(Y_pred.shape[0]): 73 | for idSegm,seg in enumerate(listSegments): 74 | if (err[i,seg[0]]/lengthSegm[i,idSegm])<0.5 and (err[i,seg[1]]/lengthSegm[i,idSegm])<0.5: 75 | correct[i,idSegm]=1.0 76 | else: 77 | correct[i,idSegm]=0.0 78 | if errTorso[i]/lengthTorso[i]<0.5 and err[i,12]/lengthTorso[i]<0.5: 79 | correctTorso[i]=1.0 80 | else: 81 | correctTorso[i]=0.0 82 | 83 | PCP=np.sum(correct,axis=0)/Y_pred.shape[0] 84 | PCPTorso=np.sum(correctTorso)/Y_pred.shape[0] 85 | print "head: " + str(PCP[8]) 86 | print "Torso: " + str(PCPTorso) 87 | print "U Legs: " + str((PCP[1]+PCP[2])/2.0) 88 | print "L Legs: " + str((PCP[0]+PCP[3])/2.0) 89 | print "U Arms: " + str((PCP[5]+PCP[6])/2.0) 90 | print "L Arms: " + str((PCP[4]+PCP[7])/2.0) 91 | print "FB: " + str((np.sum(PCP)+PCPTorso)/10.0) 92 | 93 | 94 | 95 | 96 | # mean absolute error 97 | MSE = mean_squared_error(Y_true, Y_pred, multioutput='raw_values') 98 | MAE = mean_absolute_error(Y_true, Y_pred, multioutput='raw_values') 99 | evs = explained_variance_score(Y_true, Y_pred, multioutput='raw_values') 100 | 101 | # Head pose estimation: pitch, yaw, roll 102 | print('Mean square error:', MSE,np.sum(MSE)/MSE.shape[0]) 103 | print('Mean absolute error:', MAE,np.sum(MAE)/MAE.shape[0]) 104 | print('Explained variances score:', evs) 105 | 106 | -------------------------------------------------------------------------------- /data_generator.py: -------------------------------------------------------------------------------- 1 | ''' Create generators from dataset ''' 2 | 3 | import numpy as np 4 | import cv2 5 | import random 6 | 7 | HIGH_DIM = 512 8 | GLLIM_K = 1 9 | 10 | 11 | BATCH_SIZE = 128 12 | 13 | # Mode for the validation set for our mixture model 14 | 15 | def load_data_generator_List(rootpath, imIn, file_test, validation=1.0,subsampling=1.0,processingTarget=None,transform=[],outSize=(224,224),batch_size=BATCH_SIZE,shuffle=False): 16 | ''' create generators from data''' 17 | 18 | 19 | def generator(rootpath, images): 20 | 21 | N=len(images) 22 | nbatches=N/batch_size+1 23 | if N%batch_size==0: 24 | nbatches-=1 25 | if shuffle: 26 | random.shuffle(images) 27 | 28 | i=0 29 | while 1: 30 | X, Y = get_xy_from_file(rootpath, images[i*batch_size:(i+1)*batch_size],processingTarget=processingTarget,transform=transform,outSize=outSize) 31 | yield(X, Y) 32 | i=i+1 33 | if i>=nbatches: # we shuffle the data when the end of the dataset is reached 34 | i=0 35 | random.shuffle(images) 36 | 37 | imTest = open(rootpath+file_test, 'r').readlines() 38 | gen_test = generator(rootpath, imTest) 39 | test_size=len(imTest) 40 | 41 | 42 | 43 | 44 | 45 | # we subsample the data if needed 46 | if subsampling!=1.0: 47 | im=imIn[0:int(subsampling*len(imIn))][:] 48 | else: 49 | im=imIn[:] 50 | 51 | if validation!=1.0: # if we use a validation set 52 | Ntot=len(im) 53 | training_size = int(validation*len(im)) 54 | val_size = Ntot-training_size 55 | 56 | gen_train = generator(rootpath, im[:training_size]) 57 | gen_val = generator(rootpath, im[training_size:]) 58 | 59 | return (gen_train,training_size),(gen_val,val_size), (gen_test,test_size) 60 | else: # without validation set 61 | gen_train = generator(rootpath, im) 62 | training_size = len(im) 63 | 64 | return (gen_train,training_size), (gen_test,test_size) 65 | 66 | def load_data_generator(rootpath, file_train, file_test, validation=1.0,subsampling=1.0,processingTarget=None,transform=[],outSize=(224,224),batch_size=BATCH_SIZE,shuffle=False): 67 | im = open(rootpath+file_train, 'r').readlines() 68 | return load_data_generator_List(rootpath, im[:], file_test, validation,subsampling,processingTarget=processingTarget,transform=transform,outSize=outSize,batch_size=batch_size,shuffle=shuffle) 69 | 70 | def load_data_generator_List_simple(rootpath, imIn,transform=[],outSize=(224,224),batch_size=BATCH_SIZE,processingTarget=None,sample_weights=None): 71 | ''' create generators from data''' 72 | 73 | 74 | def generator(rootpath, images): 75 | 76 | N=len(images) 77 | nbatches=N/batch_size+1 78 | if N%batch_size==0: 79 | nbatches-=1 80 | i=0 81 | if sample_weights is not None: 82 | rn= sample_weights[:] 83 | while 1: 84 | 85 | X, Y = get_xy_from_file(rootpath, images[i*batch_size:(i+1)*batch_size],processingTarget=processingTarget,transform=transform,outSize=outSize) 86 | if sample_weights is None: 87 | yield(X, Y) 88 | else: 89 | yield(X, Y,rn[i*batch_size:(i+1)*batch_size]) 90 | i=i+1 91 | if i>=nbatches: # we shuffle the data when the end of the dataset is reached 92 | i=0 93 | if sample_weights is None: 94 | random.shuffle(images) 95 | else: 96 | c = zip(images,rn) 97 | np.random.shuffle(c) 98 | images = np.asarray([e[0] for e in c]) 99 | rn = np.asarray([e[1] for e in c]) 100 | 101 | 102 | 103 | gen = generator(rootpath, imIn[:]) 104 | size=len(imIn) 105 | 106 | return (gen,size) 107 | 108 | def load_data_generator_simple(rootpath, fileName, transform=[],outSize=(224,224),batch_size=BATCH_SIZE,processingTarget=None): 109 | im = open(rootpath+fileName, 'r').readlines() 110 | return load_data_generator_List_simple(rootpath, im[:],transform=transform,outSize=outSize,batch_size=batch_size,processingTarget=processingTarget) 111 | 112 | 113 | 114 | def applyTransform(x,transform): 115 | for t in transform: 116 | x=t(x) 117 | return x 118 | 119 | 120 | def get_xy_from_file(rootpath, images, processingTarget=None,transform=[],outSize=(224,224),batch_size=BATCH_SIZE): 121 | '''Extract data arrays from text file''' 122 | 123 | X = np.zeros((len(images),3, outSize[0], outSize[1]), dtype=np.float32) 124 | Y=[] 125 | 126 | 127 | for i,image in enumerate(images): 128 | currentline=image.strip().split(" ") 129 | 130 | imFile=currentline[0] 131 | 132 | X[i]=get_image_for_vgg(rootpath+imFile,transform,outSize) 133 | 134 | Y.append(np.asarray(map(lambda x: float(x),currentline[1:]))) 135 | 136 | 137 | if processingTarget: 138 | Y=processingTarget(Y) 139 | 140 | Y=np.squeeze(np.asarray(Y)).reshape((X.shape[0],len(Y[0]))) 141 | return (X,Y) 142 | 143 | def get_image_for_vgg(imName,transform=[],outSize=(224,224),batch_size=BATCH_SIZE): 144 | '''Preprocess images as VGG inputs''' 145 | im = (cv2.resize(cv2.imread(imName), (outSize[1],outSize[0]))).astype(np.float32) 146 | 147 | 148 | # we substract the mean value of imagenet 149 | if outSize==(224,224): 150 | im[:,:,0] -= 103.939 151 | im[:,:,1] -= 116.779 152 | im[:,:,2] -= 123.68 153 | im = im.transpose(2,0,1) 154 | 155 | 156 | if transform: 157 | im=applyTransform(im,transform) 158 | 159 | im = np.expand_dims(im, axis=0) 160 | 161 | return im 162 | -------------------------------------------------------------------------------- /VGG16_sequential.py: -------------------------------------------------------------------------------- 1 | '''VGG16 model for Keras. 2 | # Reference: 3 | - [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556) 4 | ''' 5 | 6 | # from __future__ import print_function 7 | # from __future__ import absolute_import 8 | 9 | import warnings 10 | 11 | import numpy as np 12 | from keras.models import Sequential 13 | from keras.layers import Flatten, Dense, Dropout, activations 14 | from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D 15 | from keras.utils.layer_utils import convert_all_kernels_in_model 16 | from keras.utils.data_utils import get_file 17 | from keras.layers.normalization import BatchNormalization 18 | from keras import backend as K 19 | import socket 20 | import os.path 21 | 22 | 23 | WEIGHTS_PATH = '/pathTOWeights/vgg16_weights_init.h5' 24 | 25 | 26 | 27 | # TH_WEIGHTS_PATH_DEEP_GLLIM = 'path/to/your_th_weights' 28 | # TF_WEIGHTS_PATH_DEEP_GLLIM = 'path/to/your_tf_weights' 29 | # TH_WEIGHTS_PATH_DEEP_GLLIM_PCA_BN = '/services/scratch/perception/dataBiwi/Deep_Gllim_pose86407_K2_weights.hdf5' 30 | 31 | def VGG16(weights='imagenet'): 32 | '''Instantiate the VGG16 architecture, 33 | optionally loading weights pre-trained 34 | on ImageNet. Note that when using TensorFlow, 35 | for best performance you should set 36 | `image_dim_ordering="tf"` in your Keras config 37 | at ~/.keras/keras.json. 38 | The model and the weights are compatible with both 39 | TensorFlow and Theano. The dimension ordering 40 | convention used by the model is the one 41 | specified in your Keras config file. 42 | # Arguments 43 | include_top: whether to include the 3 fully-connected 44 | layers at the top of the network. 45 | weights: one of `deep_gllim` (fine tunned weights) 46 | or "imagenet" (pre-training on ImageNet). 47 | # Returns 48 | A Keras model instance. 49 | ''' 50 | # if weights not in {'imagenet', 'deep_gllim'}: 51 | # raise ValueError('The `weights` argument should be either ' 52 | # '`imagenet` (pre-training on ImageNet)' 53 | # 'or `deep_gllim` (fine tunned weights).') 54 | 55 | # Determine proper input shape 56 | if K.image_dim_ordering() == 'th': 57 | INPUT_SHAPE = (3, 224, 224) 58 | else: 59 | INPUT_SHAPE = (224, 224, 3) 60 | 61 | model = Sequential() 62 | model.add(ZeroPadding2D((1,1),input_shape=INPUT_SHAPE)) 63 | model.add(Convolution2D(64, 3, 3, activation='relu', trainable=False)) 64 | model.add(ZeroPadding2D((1,1))) 65 | model.add(Convolution2D(64, 3, 3, activation='relu', trainable=False)) 66 | model.add(MaxPooling2D((2,2), strides=(2,2))) 67 | 68 | model.add(ZeroPadding2D((1,1))) 69 | model.add(Convolution2D(128, 3, 3, activation='relu', trainable=False)) 70 | model.add(ZeroPadding2D((1,1))) 71 | model.add(Convolution2D(128, 3, 3, activation='relu', trainable=False)) 72 | model.add(MaxPooling2D((2,2), strides=(2,2))) 73 | 74 | model.add(ZeroPadding2D((1,1))) 75 | model.add(Convolution2D(256, 3, 3, activation='relu', trainable=False)) 76 | model.add(ZeroPadding2D((1,1))) 77 | model.add(Convolution2D(256, 3, 3, activation='relu', trainable=False)) 78 | model.add(ZeroPadding2D((1,1))) 79 | model.add(Convolution2D(256, 3, 3, activation='relu', trainable=False)) 80 | model.add(MaxPooling2D((2,2), strides=(2,2))) 81 | 82 | model.add(ZeroPadding2D((1,1))) 83 | model.add(Convolution2D(512, 3, 3, activation='relu', trainable=False)) 84 | model.add(ZeroPadding2D((1,1))) 85 | model.add(Convolution2D(512, 3, 3, activation='relu', trainable=False)) 86 | model.add(ZeroPadding2D((1,1))) 87 | model.add(Convolution2D(512, 3, 3, activation='relu', trainable=False)) 88 | model.add(MaxPooling2D((2,2), strides=(2,2))) 89 | 90 | model.add(ZeroPadding2D((1,1))) 91 | model.add(Convolution2D(512, 3, 3, activation='relu', trainable=False)) 92 | model.add(ZeroPadding2D((1,1))) 93 | model.add(Convolution2D(512, 3, 3, activation='relu', trainable=False)) 94 | model.add(ZeroPadding2D((1,1))) 95 | model.add(Convolution2D(512, 3, 3, activation='relu', trainable=False)) 96 | model.add(MaxPooling2D((2,2), strides=(2,2))) 97 | 98 | model.add(Flatten()) 99 | model.add(Dense(4096, activation='relu', trainable=True)) 100 | model.add(Dropout(0.5)) 101 | model.add(Dense(4096, activation='relu', trainable=True)) 102 | model.add(Dropout(0.5)) 103 | model.add(Dense(1000, activation='softmax', trainable=True)) 104 | 105 | # load weights 106 | if weights == 'imagenet': 107 | if K.image_dim_ordering() == 'th': 108 | print "LOAD: " + WEIGHTS_PATH 109 | weights_path = WEIGHTS_PATH 110 | model.load_weights(weights_path) 111 | model.pop() # remove softmax layer 112 | model.pop() # remove dropout 113 | if K.backend() == 'tensorflow': 114 | warnings.warn('You are using the TensorFlow backend, yet you ' 115 | 'are using the Theano ' 116 | 'image dimension ordering convention ' 117 | '(`image_dim_ordering="th"`). ' 118 | 'For best performance, set ' 119 | '`image_dim_ordering="tf"` in ' 120 | 'your Keras config ' 121 | 'at ~/.keras/keras.json.') 122 | convert_all_kernels_in_model(model) 123 | 124 | else: 125 | weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5', 126 | TF_WEIGHTS_PATH, 127 | cache_subdir='models') 128 | model.load_weights(weights_path) 129 | model.pop() # remove softmax layer 130 | model.pop() # remove dropout 131 | if K.backend() == 'theano': 132 | convert_all_kernels_in_model(model) 133 | 134 | 135 | elif weights == 'deep_gllim': 136 | if K.image_dim_ordering() == 'th': 137 | weights_path = TH_WEIGHTS_PATH_DEEP_GLLIM 138 | model.load_weights(weights_path) 139 | model.pop() # remove softmax layer 140 | model.pop() # remove dropout 141 | if K.backend() == 'tensorflow': 142 | warnings.warn('You are using the TensorFlow backend, yet you ' 143 | 'are using the Theano ' 144 | 'image dimension ordering convention ' 145 | '(`image_dim_ordering="th"`). ' 146 | 'For best performance, set ' 147 | '`image_dim_ordering="tf"` in ' 148 | 'your Keras config ' 149 | 'at ~/.keras/keras.json.') 150 | convert_all_kernels_in_model(model) 151 | 152 | else: 153 | weights_path = TF_WEIGHTS_PATH_DEEP_GLLIM 154 | model.load_weights(weights_path) 155 | model.pop() # remove softmax layer 156 | model.pop() # remove dropout 157 | if K.backend() == 'theano': 158 | convert_all_kernels_in_model(model) 159 | elif weights == 'deep_gllim_PCA_BN': 160 | model.pop() # remove softmax layer 161 | model.pop() # remove dropout 162 | model.add(Dense(512, activation='linear', trainable=False)) 163 | model.add(BatchNormalization()) 164 | 165 | weights_path = TH_WEIGHTS_PATH_DEEP_GLLIM_PCA_BN 166 | model.load_weights(weights_path) 167 | model.pop() # remove BN 168 | return model 169 | 170 | def extract_features_generator(network, generator, size): 171 | '''Extract VGG features from a generator''' 172 | 173 | print("Extracting features :") 174 | 175 | features = network.predict_generator(generator, val_samples=size) 176 | 177 | return features 178 | 179 | def extract_features(network, x): 180 | '''Extract VGG features from a generator''' 181 | 182 | print("Extracting features :") 183 | 184 | features = network.predict(x, batch_size=64) 185 | 186 | return features 187 | 188 | def extract_XY_generator(network, generator, size): 189 | '''Extract VGG features and data targets from a generator''' 190 | 191 | i=0 192 | X=[] 193 | Y=[] 194 | for x,y in generator: 195 | X.extend(network.predict_on_batch(x)) 196 | Y.extend(y) 197 | i+=len(y) 198 | if i>=size: 199 | break 200 | 201 | return np.asarray(X), np.asarray(Y) 202 | 203 | -------------------------------------------------------------------------------- /resNetStandard.py: -------------------------------------------------------------------------------- 1 | '''Import modules''' 2 | import time 3 | import sys 4 | import numpy as np 5 | import math 6 | from keras.optimizers import SGD 7 | from keras.callbacks import ModelCheckpoint, EarlyStopping,Callback,CSVLogger 8 | from keras.models import Sequential 9 | from keras.layers.normalization import BatchNormalization 10 | from keras.layers import Dense, Dropout 11 | from keras.layers.pooling import GlobalMaxPooling2D,GlobalAveragePooling2D 12 | from resnet50 import ResNet50 13 | from data_generator import load_data_generator 14 | 15 | from test import run_eval 16 | 17 | WIDTH = 224 18 | BATCH_SIZE = 64 19 | NB_EPOCH = 50 20 | LEARNING_RATE = 1e-04 21 | PATIENCE=4 22 | BN=False 23 | trainable=[False]*2+[True]*2 24 | optim='adam' 25 | 26 | ROOTPATH=sys.argv[1] 27 | train_txt = sys.argv[2] 28 | test_txt = sys.argv[3] 29 | LOW_DIM = int(sys.argv[4]) 30 | ssRatio = 1.0 # float(sys.argv[3])/100.0 31 | PB_FLAG = sys.argv[5] # to modify according to the task 32 | idOar=sys.argv[6] 33 | nbPop=0 34 | dropoutRate=0 35 | pool="avg" 36 | 37 | print sys.argv 38 | 39 | for idarg,arg in enumerate(sys.argv): 40 | if arg=='-bn': 41 | BN=True 42 | elif arg=='-nbn': 43 | BN=False 44 | 45 | elif arg=='-ft': 46 | nbBlock=int(sys.argv[idarg+1]) 47 | 48 | trainable=[False]*(4-nbBlock)+[True]*nbBlock 49 | 50 | elif arg=='-bs': 51 | BATCH_SIZE= int(sys.argv[idarg+1]) 52 | elif arg=='-opt': 53 | optim=sys.argv[idarg+1] 54 | if optim=="sgd": 55 | LEARNING_RATE=float(sys.argv[idarg+2]) 56 | optim = SGD(lr=LEARNING_RATE) 57 | print "LR " + str(LEARNING_RATE) 58 | 59 | elif arg=='-lr': 60 | LEARNING_RATE=float(sys.argv[idarg+1]) 61 | elif arg=='-rf': 62 | if sys.argv[idarg+1]=="conv": 63 | nbPop=3 64 | elif sys.argv[idarg+1]=="fc1": 65 | nbPop=2 66 | elif arg=='-do': 67 | dropoutRate=float(sys.argv[idarg+1]) 68 | elif arg=='-pool': 69 | pool=sys.argv[idarg+1] 70 | 71 | 72 | 73 | print optim 74 | 75 | class L2Model: 76 | ''' Class of forward model''' 77 | 78 | def __init__(self): 79 | 80 | # if pool is not None: 81 | # changePoolBool=pool 82 | self.networkMod = ResNet50(trainable=trainable,changePool=pool) 83 | # else: 84 | # self.networkMod = ResNet50(trainable=trainable) 85 | self.network=Sequential() 86 | self.network.add(self.networkMod) 87 | 88 | def fit(self, (generator_training, n_train), (generator_val, n_val)): 89 | '''Trains the model for a fixed number of epochs and iterations. 90 | # Arguments 91 | X_train: input data, as a Numpy array or list of Numpy arrays 92 | (if the model has multiple inputs). 93 | Y_train : labels, as a Numpy array. 94 | batch_size: integer. Number of samples per gradient update. 95 | learning_rate: float, learning rate 96 | nb_epoch: integer, the number of epochs to train the model. 97 | validation_split: float (0. < x < 1). 98 | Fraction of the data to use as held-out validation data. 99 | validation_data: tuple (x_val, y_val) or tuple 100 | (x_val, y_val, val_sample_weights) to be used as held-out 101 | validation data. Will override validation_split. 102 | it: integer, number of iterations of the algorithm 103 | 104 | 105 | 106 | # Returns 107 | A `History` object. Its `History.history` attribute is 108 | a record of training loss values and metrics values 109 | at successive epochs, as well as validation loss values 110 | and validation metrics values (if applicable). 111 | ''' 112 | 113 | 114 | 115 | if BN: 116 | self.network.add(BatchNormalization()) 117 | self.network.add(Dense(LOW_DIM, activation='linear', trainable=True)) 118 | 119 | 120 | 121 | # train only some layers 122 | # compile the model 123 | 124 | 125 | self.network.compile(optimizer=optim, 126 | loss='mse', 127 | metrics=['mae']) 128 | 129 | self.network.summary() 130 | csv_logger = CSVLogger(ROOTPATH+"ResNet50_"+PB_FLAG+"_"+idOar+'_training.log') 131 | 132 | checkName=ROOTPATH+"ResNet50_"+PB_FLAG+"_"+idOar+"_weights.hdf5" 133 | checkpointer = ModelCheckpoint(filepath=checkName, 134 | monitor='val_loss', 135 | verbose=1, 136 | save_weights_only=True, 137 | save_best_only=True, 138 | mode='min') 139 | 140 | early_stopping = EarlyStopping(monitor='val_loss', patience=PATIENCE) 141 | 142 | 143 | class CheckNan(Callback): 144 | 145 | def on_batch_end(self, batch, logs={}): 146 | if math.isnan(logs.get('loss')): 147 | print "\nReach a NAN\n" 148 | sys.exit() 149 | 150 | # train the model on the new data for a few epochs 151 | self.network.fit_generator(generator_training, 152 | samples_per_epoch=n_train, 153 | nb_epoch=NB_EPOCH, 154 | verbose=1, 155 | callbacks=[checkpointer,csv_logger, 156 | early_stopping,CheckNan()], 157 | validation_data=generator_val, 158 | nb_val_samples=n_val) 159 | 160 | 161 | self.network.load_weights(checkName) 162 | 163 | 164 | 165 | 166 | 167 | def predict(self, generator, n_predict): 168 | '''Generates output predictions for the input samples, 169 | processing the samples in a batched way. 170 | # Arguments 171 | generator: input a generator object. 172 | batch_size: integer. 173 | # Returns 174 | A Numpy array of predictions and GT. 175 | ''' 176 | '''Extract ResNet50 features and data targets from a generator''' 177 | 178 | i=0 179 | Ypred=[] 180 | Y=[] 181 | for x,y in generator: 182 | if i>=n_predict: 183 | break 184 | Ypred.extend(self.network.predict_on_batch(x)) 185 | Y.extend(y) 186 | i+=len(y) 187 | 188 | return np.asarray(Ypred), np.asarray(Y) 189 | 190 | def evaluate(self, (generator, n_eval),flagFile, l=WIDTH, pbFlag=PB_FLAG): 191 | '''Computes the loss on some input data, batch by batch. 192 | 193 | # Arguments 194 | generator: input a generator object. 195 | batch_size: integer. Number of samples per gradient update. 196 | 197 | # Returns 198 | Scalar test loss (if the model has no metrics) 199 | or list of scalars (if the model computes other metrics). 200 | The attribute `model.metrics_names` will give you 201 | the display labels for the scalar outputs. 202 | ''' 203 | 204 | Ypred, Y = self.predict(generator, n_eval) 205 | 206 | run_eval(Ypred, Y, l, pbFlag) 207 | file = open(ROOTPATH+"ResNet_output"+pbFlag+ "_"+str(idOar)+"_"+flagFile+".txt", "w") 208 | file.write(" ".join(sys.argv)+"\n") 209 | for y in Ypred-Y: 210 | file.write(np.array_str(y, max_line_width=1000000)+"\n") 211 | 212 | 213 | 214 | 215 | 216 | if __name__ == '__main__': 217 | 218 | l2_Model = L2Model() 219 | 220 | # t=[lambda x:random_rotation(x,2.0,row_index=2,col_index=3,channel_index=1), 221 | # lambda x:random_shift(x,0.03,0.03,row_index=2,col_index=3,channel_index=1), 222 | # lambda x:random_zoom(x,0.05,row_index=2,col_index=3,channel_index=1)] 223 | # t=[lambda x:random_rotation(x,2.0,row_index=1,col_index=2,channel_index=0), 224 | # lambda x:random_shift(x,0.03,0.03,row_index=1,col_index=2,channel_index=0), 225 | # lambda x:random_zoom(x,[0.95,1.05],row_index=1,col_index=2,channel_index=0)] 226 | 227 | (gen_training, N_train), (gen_val, N_val), (gen_test, N_test) = load_data_generator(ROOTPATH, train_txt, test_txt,validation=0.8,subsampling=ssRatio,batch_size=BATCH_SIZE) 228 | 229 | l2_Model.fit((gen_training, N_train),(gen_val, N_val)) 230 | l2_Model.evaluate((gen_training, N_train),"training", 224) 231 | l2_Model.evaluate((gen_val, N_val),"validation", 224) 232 | l2_Model.evaluate((gen_test, N_test),"test", 224) 233 | 234 | -------------------------------------------------------------------------------- /resNetStandardWithVal.py: -------------------------------------------------------------------------------- 1 | '''Import modules''' 2 | import time 3 | import sys 4 | import numpy as np 5 | import math 6 | from keras.optimizers import SGD 7 | from keras.callbacks import ModelCheckpoint, EarlyStopping,Callback,CSVLogger 8 | from keras.models import Sequential 9 | from keras.layers.normalization import BatchNormalization 10 | from keras.layers import Dense, Dropout 11 | from keras.layers.pooling import GlobalMaxPooling2D,GlobalAveragePooling2D 12 | from resnet50 import ResNet50 13 | from data_generator import load_data_generator_simple 14 | 15 | from test import run_eval 16 | 17 | WIDTH = 224 18 | BATCH_SIZE = 64 19 | NB_EPOCH = 50 20 | LEARNING_RATE = 1e-04 21 | PATIENCE=4 22 | BN=False 23 | trainable=[False]*2+[True]*2 24 | optim='adam' 25 | 26 | 27 | ROOTPATH=sys.argv[1] 28 | train_txt = sys.argv[2] 29 | val_txt = sys.argv[3] 30 | test_txt = sys.argv[4] 31 | LOW_DIM = int(sys.argv[5]) 32 | ssRatio = 1.0 # float(sys.argv[3])/100.0 33 | PB_FLAG = sys.argv[6] # to modify according to the task 34 | idOar=sys.argv[7] 35 | 36 | nbPop=0 37 | dropoutRate=0 38 | pool="avg" 39 | 40 | print sys.argv 41 | 42 | for idarg,arg in enumerate(sys.argv): 43 | if arg=='-bn': 44 | BN=True 45 | elif arg=='-nbn': 46 | BN=False 47 | 48 | elif arg=='-ft': 49 | nbBlock=int(sys.argv[idarg+1]) 50 | 51 | trainable=[False]*(4-nbBlock)+[True]*nbBlock 52 | 53 | elif arg=='-bs': 54 | BATCH_SIZE= int(sys.argv[idarg+1]) 55 | elif arg=='-opt': 56 | optim=sys.argv[idarg+1] 57 | if optim=="sgd": 58 | LEARNING_RATE=float(sys.argv[idarg+2]) 59 | optim = SGD(lr=LEARNING_RATE) 60 | print "LR " + str(LEARNING_RATE) 61 | 62 | elif arg=='-lr': 63 | LEARNING_RATE=float(sys.argv[idarg+1]) 64 | elif arg=='-rf': 65 | if sys.argv[idarg+1]=="conv": 66 | nbPop=3 67 | elif sys.argv[idarg+1]=="fc1": 68 | nbPop=2 69 | elif arg=='-do': 70 | dropoutRate=float(sys.argv[idarg+1]) 71 | elif arg=='-pool': 72 | pool=sys.argv[idarg+1] 73 | 74 | 75 | 76 | print optim 77 | 78 | class L2Model: 79 | ''' Class of forward model''' 80 | 81 | def __init__(self): 82 | 83 | # if pool is not None: 84 | # changePoolBool=pool 85 | self.networkMod = ResNet50(trainable=trainable,changePool=pool) 86 | # else: 87 | # self.networkMod = ResNet50(trainable=trainable) 88 | self.network=Sequential() 89 | self.network.add(self.networkMod) 90 | 91 | def fit(self, (generator_training, n_train), (generator_val, n_val)): 92 | '''Trains the model for a fixed number of epochs and iterations. 93 | # Arguments 94 | X_train: input data, as a Numpy array or list of Numpy arrays 95 | (if the model has multiple inputs). 96 | Y_train : labels, as a Numpy array. 97 | batch_size: integer. Number of samples per gradient update. 98 | learning_rate: float, learning rate 99 | nb_epoch: integer, the number of epochs to train the model. 100 | validation_split: float (0. < x < 1). 101 | Fraction of the data to use as held-out validation data. 102 | validation_data: tuple (x_val, y_val) or tuple 103 | (x_val, y_val, val_sample_weights) to be used as held-out 104 | validation data. Will override validation_split. 105 | it: integer, number of iterations of the algorithm 106 | 107 | 108 | 109 | # Returns 110 | A `History` object. Its `History.history` attribute is 111 | a record of training loss values and metrics values 112 | at successive epochs, as well as validation loss values 113 | and validation metrics values (if applicable). 114 | ''' 115 | 116 | 117 | 118 | if BN: 119 | self.network.add(BatchNormalization()) 120 | self.network.add(Dense(LOW_DIM, activation='linear', trainable=True)) 121 | 122 | 123 | 124 | # train only some layers 125 | # compile the model 126 | 127 | 128 | self.network.compile(optimizer=optim, 129 | loss='mse', 130 | metrics=['mae']) 131 | 132 | self.network.summary() 133 | csv_logger = CSVLogger(ROOTPATH+"ResNet50_"+PB_FLAG+"_"+idOar+'_training.log') 134 | 135 | checkName=ROOTPATH+"ResNet50_"+PB_FLAG+"_"+idOar+"_weights.hdf5" 136 | checkpointer = ModelCheckpoint(filepath=checkName, 137 | monitor='val_loss', 138 | verbose=1, 139 | save_weights_only=True, 140 | save_best_only=True, 141 | mode='min') 142 | 143 | early_stopping = EarlyStopping(monitor='val_loss', patience=PATIENCE) 144 | 145 | 146 | class CheckNan(Callback): 147 | 148 | def on_batch_end(self, batch, logs={}): 149 | if math.isnan(logs.get('loss')): 150 | print "\nReach a NAN\n" 151 | sys.exit() 152 | 153 | # train the model on the new data for a few epochs 154 | self.network.fit_generator(generator_training, 155 | samples_per_epoch=n_train, 156 | nb_epoch=NB_EPOCH, 157 | verbose=1, 158 | callbacks=[checkpointer,csv_logger, 159 | early_stopping,CheckNan()], 160 | validation_data=generator_val, 161 | nb_val_samples=n_val) 162 | 163 | 164 | self.network.load_weights(checkName) 165 | 166 | 167 | 168 | 169 | 170 | def predict(self, generator, n_predict): 171 | '''Generates output predictions for the input samples, 172 | processing the samples in a batched way. 173 | # Arguments 174 | generator: input a generator object. 175 | batch_size: integer. 176 | # Returns 177 | A Numpy array of predictions and GT. 178 | ''' 179 | '''Extract ResNet50 features and data targets from a generator''' 180 | 181 | i=0 182 | Ypred=[] 183 | Y=[] 184 | for x,y in generator: 185 | if i>=n_predict: 186 | break 187 | Ypred.extend(self.network.predict_on_batch(x)) 188 | Y.extend(y) 189 | i+=len(y) 190 | 191 | return np.asarray(Ypred), np.asarray(Y) 192 | 193 | def evaluate(self, (generator, n_eval),flagFile, l=WIDTH, pbFlag=PB_FLAG): 194 | '''Computes the loss on some input data, batch by batch. 195 | 196 | # Arguments 197 | generator: input a generator object. 198 | batch_size: integer. Number of samples per gradient update. 199 | 200 | # Returns 201 | Scalar test loss (if the model has no metrics) 202 | or list of scalars (if the model computes other metrics). 203 | The attribute `model.metrics_names` will give you 204 | the display labels for the scalar outputs. 205 | ''' 206 | 207 | Ypred, Y = self.predict(generator, n_eval) 208 | 209 | run_eval(Ypred, Y, l, pbFlag) 210 | file = open(ROOTPATH+"ResNet_output"+pbFlag+ "_"+str(idOar)+"_"+flagFile+".txt", "w") 211 | file.write(" ".join(sys.argv)+"\n") 212 | for y in Ypred-Y: 213 | file.write(np.array_str(y, max_line_width=1000000)+"\n") 214 | 215 | 216 | 217 | 218 | 219 | if __name__ == '__main__': 220 | 221 | l2_Model = L2Model() 222 | 223 | # t=[lambda x:random_rotation(x,2.0,row_index=2,col_index=3,channel_index=1), 224 | # lambda x:random_shift(x,0.03,0.03,row_index=2,col_index=3,channel_index=1), 225 | # lambda x:random_zoom(x,0.05,row_index=2,col_index=3,channel_index=1)] 226 | # t=[lambda x:random_rotation(x,2.0,row_index=1,col_index=2,channel_index=0), 227 | # lambda x:random_shift(x,0.03,0.03,row_index=1,col_index=2,channel_index=0), 228 | # lambda x:random_zoom(x,[0.95,1.05],row_index=1,col_index=2,channel_index=0)] 229 | 230 | (gen_training, N_train) = load_data_generator_simple(ROOTPATH, train_txt,batch_size=BATCH_SIZE) 231 | (gen_val, N_val) = load_data_generator_simple(ROOTPATH, val_txt,batch_size=BATCH_SIZE) 232 | (gen_test, N_test) = load_data_generator_simple(ROOTPATH, test_txt,batch_size=BATCH_SIZE) 233 | 234 | 235 | l2_Model.fit((gen_training, N_train),(gen_val, N_val)) 236 | l2_Model.evaluate((gen_training, N_train),"training", 224) 237 | l2_Model.evaluate((gen_val, N_val),"validation", 224) 238 | l2_Model.evaluate((gen_test, N_test),"test", 224) 239 | 240 | -------------------------------------------------------------------------------- /resnet50.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | '''ResNet50 model for Keras. 3 | 4 | # Reference: 5 | 6 | - [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) 7 | 8 | Adapted from code contributed by BigMoyan. 9 | ''' 10 | from __future__ import print_function 11 | from __future__ import absolute_import 12 | 13 | import warnings 14 | 15 | from keras.layers import merge, Input 16 | from keras.layers import Dense, Activation, Flatten 17 | from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D 18 | from keras.layers import BatchNormalization 19 | from keras.models import Model 20 | from keras import backend as K 21 | from keras.utils.layer_utils import convert_all_kernels_in_model 22 | from keras.utils.data_utils import get_file 23 | import os.path 24 | 25 | 26 | 27 | WEIGHTS_PATH = '/pathtoweights/resnet50_weights_th_dim_ordering_th_kernels.h5' 28 | 29 | print(WEIGHTS_PATH) 30 | 31 | def identity_block(input_tensor, kernel_size, filters, stage, block,trainable=True): 32 | '''The identity_block is the block that has no conv layer at shortcut 33 | 34 | # Arguments 35 | input_tensor: input tensor 36 | kernel_size: defualt 3, the kernel size of middle conv layer at main path 37 | filters: list of integers, the nb_filters of 3 conv layer at main path 38 | stage: integer, current stage label, used for generating layer names 39 | block: 'a','b'..., current block label, used for generating layer names 40 | ''' 41 | nb_filter1, nb_filter2, nb_filter3 = filters 42 | if K.image_dim_ordering() == 'tf': 43 | bn_axis = 3 44 | else: 45 | bn_axis = 1 46 | conv_name_base = 'res' + str(stage) + block + '_branch' 47 | bn_name_base = 'bn' + str(stage) + block + '_branch' 48 | 49 | x = Convolution2D(nb_filter1, 1, 1, name=conv_name_base + '2a',trainable=trainable)(input_tensor) 50 | x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a',trainable=trainable)(x) 51 | x = Activation('relu')(x) 52 | 53 | x = Convolution2D(nb_filter2, kernel_size, kernel_size, 54 | border_mode='same', name=conv_name_base + '2b',trainable=trainable)(x) 55 | x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b',trainable=trainable)(x) 56 | x = Activation('relu')(x) 57 | 58 | x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c',trainable=trainable)(x) 59 | x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c',trainable=trainable)(x) 60 | 61 | x = merge([x, input_tensor], mode='sum') 62 | x = Activation('relu')(x) 63 | return x 64 | 65 | 66 | def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2),trainable=True): 67 | '''conv_block is the block that has a conv layer at shortcut 68 | 69 | # Arguments 70 | input_tensor: input tensor 71 | kernel_size: defualt 3, the kernel size of middle conv layer at main path 72 | filters: list of integers, the nb_filters of 3 conv layer at main path 73 | stage: integer, current stage label, used for generating layer names 74 | block: 'a','b'..., current block label, used for generating layer names 75 | 76 | Note that from stage 3, the first conv layer at main path is with subsample=(2,2) 77 | And the shortcut should have subsample=(2,2) as well 78 | ''' 79 | nb_filter1, nb_filter2, nb_filter3 = filters 80 | if K.image_dim_ordering() == 'tf': 81 | bn_axis = 3 82 | else: 83 | bn_axis = 1 84 | conv_name_base = 'res' + str(stage) + block + '_branch' 85 | bn_name_base = 'bn' + str(stage) + block + '_branch' 86 | 87 | x = Convolution2D(nb_filter1, 1, 1, subsample=strides, 88 | name=conv_name_base + '2a',trainable=trainable)(input_tensor) 89 | x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a',trainable=trainable)(x) 90 | x = Activation('relu')(x) 91 | 92 | x = Convolution2D(nb_filter2, kernel_size, kernel_size, border_mode='same', 93 | name=conv_name_base + '2b',trainable=trainable)(x) 94 | x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b',trainable=trainable)(x) 95 | x = Activation('relu')(x) 96 | 97 | x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c',trainable=trainable)(x) 98 | x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c',trainable=trainable)(x) 99 | 100 | shortcut = Convolution2D(nb_filter3, 1, 1, subsample=strides, 101 | name=conv_name_base + '1',trainable=trainable)(input_tensor) 102 | shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1',trainable=trainable)(shortcut) 103 | 104 | x = merge([x, shortcut], mode='sum') 105 | x = Activation('relu')(x) 106 | return x 107 | 108 | 109 | def ResNet50(include_top=True, weights='imagenet', 110 | input_tensor=None,trainable=[True]*4,changePool=False): 111 | '''Instantiate the ResNet50 architecture, 112 | optionally loading weights pre-trained 113 | on ImageNet. Note that when using TensorFlow, 114 | for best performance you should set 115 | `image_dim_ordering="tf"` in your Keras config 116 | at ~/.keras/keras.json. 117 | 118 | The model and the weights are compatible with both 119 | TensorFlow and Theano. The dimension ordering 120 | convention used by the model is the one 121 | specified in your Keras config file. 122 | 123 | # Arguments 124 | include_top: whether to include the 3 fully-connected 125 | layers at the top of the network. 126 | weights: one of `None` (random initialization) 127 | or "imagenet" (pre-training on ImageNet). 128 | input_tensor: optional Keras tensor (i.e. xput of `layers.Input()`) 129 | to use as image input for the model. 130 | 131 | # Returns 132 | A Keras model instance. 133 | ''' 134 | 135 | t1,t2,t3,t4=trainable 136 | 137 | if weights not in {'imagenet', None}: 138 | raise ValueError('The `weights` argument should be either ' 139 | '`None` (random initialization) or `imagenet` ' 140 | '(pre-training on ImageNet).') 141 | # Determine proper input shape 142 | if K.image_dim_ordering() == 'th': 143 | if include_top: 144 | input_shape = (3, 224, 224) 145 | else: 146 | input_shape = (3, None, None) 147 | else: 148 | if include_top: 149 | input_shape = (224, 224, 3) 150 | else: 151 | input_shape = (None, None, 3) 152 | 153 | if input_tensor is None: 154 | img_input = Input(shape=input_shape) 155 | else: 156 | if not K.is_keras_tensor(input_tensor): 157 | img_input = Input(tensor=input_tensor, shape=input_shape) 158 | else: 159 | img_input = input_tensor 160 | if K.image_dim_ordering() == 'tf': 161 | bn_axis = 3 162 | else: 163 | bn_axis = 1 164 | 165 | x = ZeroPadding2D((3, 3))(img_input) 166 | x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x) 167 | x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) 168 | x = Activation('relu')(x) 169 | x = MaxPooling2D((3, 3), strides=(2, 2))(x) 170 | 171 | x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1),trainable=t1) 172 | x = identity_block(x, 3, [64, 64, 256], stage=2, block='b',trainable=t1) 173 | x = identity_block(x, 3, [64, 64, 256], stage=2, block='c',trainable=t1) 174 | 175 | x = conv_block(x, 3, [128, 128, 512], stage=3, block='a',trainable=t2) 176 | x = identity_block(x, 3, [128, 128, 512], stage=3, block='b',trainable=t2) 177 | x = identity_block(x, 3, [128, 128, 512], stage=3, block='c',trainable=t2) 178 | x = identity_block(x, 3, [128, 128, 512], stage=3, block='d',trainable=t2) 179 | 180 | x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a',trainable=t3) 181 | x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b',trainable=t3) 182 | x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c',trainable=t3) 183 | x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d',trainable=t3) 184 | x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e',trainable=t3) 185 | x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f',trainable=t3) 186 | 187 | x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a',trainable=t4) 188 | x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b',trainable=t4) 189 | xlast = identity_block(x, 3, [512, 512, 2048], stage=5, block='c',trainable=t4) 190 | 191 | if changePool=="max": 192 | x = MaxPooling2D((7, 7), name='avg_pool')(xlast) 193 | else: 194 | x = AveragePooling2D((7, 7), name='avg_pool')(xlast) 195 | 196 | if include_top: 197 | xflat = Flatten()(x) 198 | x = Dense(1000, activation='softmax', name='fc1000')(xflat) 199 | 200 | model = Model(img_input, x) 201 | 202 | # load weights 203 | model.load_weights(WEIGHTS_PATH) 204 | if K.backend() == 'theano': 205 | convert_all_kernels_in_model(model) 206 | if changePool== "none": 207 | xflatNone = Flatten()(xlast) 208 | modelout = Model(img_input, xflatNone, name='resnet50') 209 | else: 210 | modelout = Model(img_input, xflat, name='resnet50') 211 | 212 | return modelout 213 | -------------------------------------------------------------------------------- /VGGStandardWithVal.py: -------------------------------------------------------------------------------- 1 | '''Import modules''' 2 | import time 3 | import sys 4 | import numpy as np 5 | import math 6 | from keras.optimizers import SGD 7 | from keras.callbacks import ModelCheckpoint, EarlyStopping,Callback,CSVLogger 8 | from keras.layers.core import Activation 9 | from keras.layers.normalization import BatchNormalization 10 | from keras.layers import Dense, Dropout 11 | from keras.layers.pooling import GlobalMaxPooling2D,GlobalAveragePooling2D 12 | from VGG16_sequential import VGG16 13 | from data_generator import load_data_generator_simple 14 | 15 | from test import run_eval 16 | 17 | WIDTH = 224 18 | BATCH_SIZE = 128 19 | NB_EPOCH = 50 20 | LEARNING_RATE = 1e-04 21 | PATIENCE=4 22 | BN=True 23 | layer_nb=24 24 | optim='adadelta' 25 | 26 | ROOTPATH=sys.argv[1] 27 | train_txt = sys.argv[2] 28 | val_txt = sys.argv[3] 29 | test_txt = sys.argv[4] 30 | LOW_DIM = int(sys.argv[5]) 31 | ssRatio = 1.0 # float(sys.argv[3])/100.0 32 | PB_FLAG = sys.argv[6] # to modify according to the task 33 | idOar=sys.argv[7] 34 | nbPop=0 35 | dropoutConf=0 36 | pool=None 37 | 38 | print sys.argv 39 | 40 | for idarg,arg in enumerate(sys.argv): 41 | if arg=='-bn': 42 | BN=True 43 | if arg=='-bnba': 44 | BN=True 45 | BNBA=True 46 | 47 | if arg=='-nbn': 48 | BN=False 49 | 50 | elif arg=='-ft': 51 | nbBlock=int(sys.argv[idarg+1]) 52 | if nbBlock==0: 53 | layer_nb=30 54 | elif nbBlock==1: 55 | layer_nb=24 56 | elif nbBlock==2: 57 | layer_nb=16 58 | elif nbBlock==3: 59 | layer_nb=8 60 | 61 | elif arg=='-bs': 62 | batch_size= int(sys.argv[idarg+1]) 63 | elif arg=='-opt': 64 | optim=sys.argv[idarg+1] 65 | if optim=="sgd": 66 | LEARNING_RATE=float(sys.argv[idarg+2]) 67 | optim = SGD(lr=LEARNING_RATE) 68 | print "LR " + str(LEARNING_RATE) 69 | 70 | elif arg=='-lr': 71 | LEARNING_RATE=float(sys.argv[idarg+1]) 72 | elif arg=='-rf': 73 | if sys.argv[idarg+1]=="conv": 74 | nbPop=3 75 | elif sys.argv[idarg+1]=="fc1": 76 | nbPop=2 77 | elif arg=='-do': 78 | dropoutConf=float(sys.argv[idarg+1]) 79 | elif arg=='-pool': 80 | pool=sys.argv[idarg+1] 81 | 82 | 83 | 84 | print optim 85 | 86 | class L2Model: 87 | ''' Class of forward model''' 88 | 89 | def __init__(self): 90 | 91 | 92 | self.network = VGG16(weights='imagenet') 93 | 94 | 95 | def fit(self, (generator_training, n_train), (generator_val, n_val)): 96 | '''Trains the model for a fixed number of epochs and iterations. 97 | # Arguments 98 | X_train: input data, as a Numpy array or list of Numpy arrays 99 | (if the model has multiple inputs). 100 | Y_train : labels, as a Numpy array. 101 | batch_size: integer. Number of samples per gradient update. 102 | learning_rate: float, learning rate 103 | nb_epoch: integer, the number of epochs to train the model. 104 | validation_split: float (0. < x < 1). 105 | Fraction of the data to use as held-out validation data. 106 | validation_data: tuple (x_val, y_val) or tuple 107 | (x_val, y_val, val_sample_weights) to be used as held-out 108 | validation data. Will override validation_split. 109 | it: integer, number of iterations of the algorithm 110 | 111 | 112 | 113 | # Returns 114 | A `History` object. Its `History.history` attribute is 115 | a record of training loss values and metrics values 116 | at successive epochs, as well as validation loss values 117 | and validation metrics values (if applicable). 118 | ''' 119 | 120 | if pool is None: 121 | for pop in range(nbPop): 122 | self.network.pop() 123 | 124 | 125 | if dropoutConf==-1: 126 | self.network.layers[-2].rate=0.0 127 | elif dropoutConf==1: 128 | self.network.add(Dropout(0.5)) 129 | elif dropoutConf==2: 130 | self.network.layers[-2].rate=0.0 131 | self.network.add(Dropout(0.5)) 132 | 133 | 134 | else: 135 | for pop in range(4): 136 | self.network.pop() 137 | if pool=="max": 138 | self.network.add(GlobalMaxPooling2D()) 139 | elif pool=="avg": 140 | self.network.add(GlobalAveragePooling2D()) 141 | else: 142 | print "ERROR: pooling not valide" 143 | exit(-1) 144 | 145 | 146 | if BNBA: 147 | self.network.layers[-1].activation=Activation('linear') 148 | self.network.add(BatchNormalization()) 149 | self.network.add(Activation('relu')) 150 | 151 | elif BN: 152 | self.network.add(BatchNormalization()) 153 | self.network.add(Dense(LOW_DIM, activation='linear', trainable=True)) 154 | 155 | self.network.summary() 156 | 157 | 158 | # train only some layers 159 | for layer in self.network.layers[:layer_nb]: 160 | layer.trainable = False 161 | for layer in self.network.layers[layer_nb:]: 162 | layer.trainable = True 163 | self.network.layers[-1].trainable = True 164 | 165 | # compile the model 166 | 167 | 168 | self.network.compile(optimizer=optim, 169 | loss='mse', 170 | metrics=['mae']) 171 | 172 | self.network.summary() 173 | csv_logger = CSVLogger(ROOTPATH+"VGG16_"+PB_FLAG+"_"+idOar+'_training.log') 174 | 175 | 176 | checkpointer = ModelCheckpoint(filepath=ROOTPATH+"VGG16_"+PB_FLAG+"_"+idOar+"_weights.hdf5", 177 | monitor='val_loss', 178 | verbose=1, 179 | save_weights_only=True, 180 | save_best_only=True, 181 | mode='min') 182 | 183 | early_stopping = EarlyStopping(monitor='val_loss', patience=PATIENCE) 184 | 185 | 186 | class CheckNan(Callback): 187 | 188 | def on_batch_end(self, batch, logs={}): 189 | if math.isnan(logs.get('loss')): 190 | print "\nReach a NAN\n" 191 | sys.exit() 192 | 193 | # train the model on the new data for a few epochs 194 | self.network.fit_generator(generator_training, 195 | samples_per_epoch=n_train, 196 | nb_epoch=NB_EPOCH, 197 | verbose=1, 198 | callbacks=[checkpointer,csv_logger, 199 | early_stopping,CheckNan()], 200 | validation_data=generator_val, 201 | nb_val_samples=n_val) 202 | 203 | 204 | self.network.load_weights(ROOTPATH+"VGG16_"+PB_FLAG+"_"+idOar+"_weights.hdf5") 205 | 206 | 207 | 208 | 209 | 210 | def predict(self, generator, n_predict): 211 | '''Generates output predictions for the input samples, 212 | processing the samples in a batched way. 213 | # Arguments 214 | generator: input a generator object. 215 | batch_size: integer. 216 | # Returns 217 | A Numpy array of predictions and GT. 218 | ''' 219 | '''Extract VGG features and data targets from a generator''' 220 | 221 | i=0 222 | Ypred=[] 223 | Y=[] 224 | for x,y in generator: 225 | if i>=n_predict: 226 | break 227 | Ypred.extend(self.network.predict_on_batch(x)) 228 | Y.extend(y) 229 | i+=len(y) 230 | 231 | return np.asarray(Ypred), np.asarray(Y) 232 | 233 | 234 | def evaluate(self, (generator, n_eval),flagFile, l=WIDTH, pbFlag=PB_FLAG): 235 | '''Computes the loss on some input data, batch by batch. 236 | 237 | # Arguments 238 | generator: input a generator object. 239 | batch_size: integer. Number of samples per gradient update. 240 | 241 | # Returns 242 | Scalar test loss (if the model has no metrics) 243 | or list of scalars (if the model computes other metrics). 244 | The attribute `model.metrics_names` will give you 245 | the display labels for the scalar outputs. 246 | ''' 247 | 248 | Ypred, Y = self.predict(generator, n_eval) 249 | 250 | run_eval(Ypred, Y, l, pbFlag) 251 | file = open(ROOTPATH+"VGG_output"+pbFlag+ "_"+str(idOar)+"_"+flagFile+".txt", "w") 252 | file.write(" ".join(sys.argv)+"\n") 253 | for y in Ypred-Y: 254 | file.write(np.array_str(y, max_line_width=1000000)+"\n") 255 | 256 | 257 | 258 | 259 | if __name__ == '__main__': 260 | 261 | l2_Model = L2Model() 262 | 263 | # t=[lambda x:random_rotation(x,2.0,row_index=2,col_index=3,channel_index=1), 264 | # lambda x:random_shift(x,0.03,0.03,row_index=2,col_index=3,channel_index=1), 265 | # lambda x:random_zoom(x,0.05,row_index=2,col_index=3,channel_index=1)] 266 | # t=[lambda x:random_rotation(x,2.0,row_index=1,col_index=2,channel_index=0), 267 | # lambda x:random_shift(x,0.03,0.03,row_index=1,col_index=2,channel_index=0), 268 | # lambda x:random_zoom(x,[0.95,1.05],row_index=1,col_index=2,channel_index=0)] 269 | 270 | (gen_training, N_train) = load_data_generator_simple(ROOTPATH, train_txt,batch_size=BATCH_SIZE) 271 | (gen_val, N_val) = load_data_generator_simple(ROOTPATH, val_txt,batch_size=BATCH_SIZE) 272 | (gen_test, N_test) = load_data_generator_simple(ROOTPATH, test_txt,batch_size=BATCH_SIZE) 273 | 274 | l2_Model.fit((gen_training, N_train),(gen_val, N_val)) 275 | 276 | l2_Model.evaluate((gen_training, N_train),"training", 224) 277 | l2_Model.evaluate((gen_val, N_val),"validation", 224) 278 | l2_Model.evaluate((gen_test, N_test),"test", 224) 279 | -------------------------------------------------------------------------------- /VGGStandard.py: -------------------------------------------------------------------------------- 1 | '''Import modules''' 2 | import time 3 | import sys 4 | import numpy as np 5 | import math 6 | from keras.optimizers import SGD 7 | from keras.callbacks import ModelCheckpoint, EarlyStopping,Callback,CSVLogger 8 | from keras.layers.core import Activation 9 | from keras.layers.normalization import BatchNormalization 10 | from keras.layers import Dense, Dropout 11 | from keras.layers.pooling import GlobalMaxPooling2D,GlobalAveragePooling2D 12 | from VGG16_sequential import VGG16 13 | from data_generator import load_data_generator 14 | 15 | from test import run_eval 16 | 17 | WIDTH = 224 18 | BATCH_SIZE = 128 19 | NB_EPOCH = 50 20 | LEARNING_RATE = 1e-04 21 | PATIENCE=4 22 | BN=True 23 | layer_nb=24 24 | optim='adadelta' 25 | 26 | ROOTPATH=sys.argv[1] 27 | train_txt = sys.argv[2] 28 | test_txt = sys.argv[3] 29 | LOW_DIM = int(sys.argv[4]) 30 | ssRatio = 1.0 # float(sys.argv[3])/100.0 31 | PB_FLAG = sys.argv[5] # to modify according to the task 32 | idOar=sys.argv[6] 33 | nbPop=0 34 | dropoutConf=0 35 | pool=None 36 | BNBA=False 37 | epochLength=-1 38 | 39 | print sys.argv 40 | 41 | for idarg,arg in enumerate(sys.argv): 42 | if arg=='-bn': 43 | BN=True 44 | if arg=='-bnba': 45 | BN=True 46 | BNBA=True 47 | if arg=='-nbn': 48 | BN=False 49 | 50 | elif arg=='-ft': 51 | nbBlock=int(sys.argv[idarg+1]) 52 | if nbBlock==0: 53 | layer_nb=30 54 | elif nbBlock==1: 55 | layer_nb=24 56 | elif nbBlock==2: 57 | layer_nb=16 58 | elif nbBlock==3: 59 | layer_nb=8 60 | 61 | elif arg=='-bs': 62 | batch_size= int(sys.argv[idarg+1]) 63 | elif arg=='-opt': 64 | optim=sys.argv[idarg+1] 65 | if optim=="sgd": 66 | LEARNING_RATE=float(sys.argv[idarg+2]) 67 | optim = SGD(lr=LEARNING_RATE) 68 | print "LR " + str(LEARNING_RATE) 69 | 70 | elif arg=='-lr': 71 | LEARNING_RATE=float(sys.argv[idarg+1]) 72 | elif arg=='-rf': 73 | if sys.argv[idarg+1]=="conv": 74 | nbPop=3 75 | elif sys.argv[idarg+1]=="fc1": 76 | nbPop=2 77 | elif arg=='-do': 78 | dropoutConf=float(sys.argv[idarg+1]) 79 | elif arg=='-pool': 80 | pool=sys.argv[idarg+1] 81 | 82 | if arg=='-el': 83 | epochLength=int(sys.argv[idarg+1]) 84 | if arg=='-p': 85 | PATIENCE=int(sys.argv[idarg+1]) 86 | 87 | 88 | print optim 89 | 90 | class L2Model: 91 | ''' Class of forward model''' 92 | 93 | def __init__(self): 94 | 95 | 96 | self.network = VGG16(weights='imagenet') 97 | 98 | 99 | def fit(self, (generator_training, n_train), (generator_val, n_val)): 100 | '''Trains the model for a fixed number of epochs and iterations. 101 | # Arguments 102 | X_train: input data, as a Numpy array or list of Numpy arrays 103 | (if the model has multiple inputs). 104 | Y_train : labels, as a Numpy array. 105 | batch_size: integer. Number of samples per gradient update. 106 | learning_rate: float, learning rate 107 | nb_epoch: integer, the number of epochs to train the model. 108 | validation_split: float (0. < x < 1). 109 | Fraction of the data to use as held-out validation data. 110 | validation_data: tuple (x_val, y_val) or tuple 111 | (x_val, y_val, val_sample_weights) to be used as held-out 112 | validation data. Will override validation_split. 113 | it: integer, number of iterations of the algorithm 114 | 115 | 116 | 117 | # Returns 118 | A `History` object. Its `History.history` attribute is 119 | a record of training loss values and metrics values 120 | at successive epochs, as well as validation loss values 121 | and validation metrics values (if applicable). 122 | ''' 123 | 124 | if pool is None: 125 | for pop in range(nbPop): 126 | self.network.pop() 127 | 128 | 129 | if dropoutConf==-1: 130 | self.network.layers[-2].rate=0.0 131 | elif dropoutConf==1: 132 | self.network.add(Dropout(0.5)) 133 | elif dropoutConf==2: 134 | self.network.layers[-2].rate=0.0 135 | self.network.add(Dropout(0.5)) 136 | 137 | 138 | else: 139 | for pop in range(4): 140 | self.network.pop() 141 | if pool=="max": 142 | self.network.add(GlobalMaxPooling2D()) 143 | elif pool=="avg": 144 | self.network.add(GlobalAveragePooling2D()) 145 | else: 146 | print "ERROR: pooling not valide" 147 | exit(-1) 148 | 149 | 150 | 151 | if BNBA: 152 | self.network.layers[-1].activation=Activation('linear') 153 | self.network.add(BatchNormalization()) 154 | self.network.add(Activation('relu')) 155 | elif BN: 156 | 157 | self.network.add(BatchNormalization()) 158 | self.network.add(Dense(LOW_DIM, activation='linear', trainable=True)) 159 | 160 | self.network.summary() 161 | 162 | 163 | # train only some layers 164 | for layer in self.network.layers[:layer_nb]: 165 | layer.trainable = False 166 | for layer in self.network.layers[layer_nb:]: 167 | layer.trainable = True 168 | self.network.layers[-1].trainable = True 169 | 170 | # compile the model 171 | 172 | 173 | self.network.compile(optimizer=optim, 174 | loss='mse', 175 | metrics=['mae']) 176 | 177 | self.network.summary() 178 | csv_logger = CSVLogger(ROOTPATH+"VGG16_"+PB_FLAG+"_"+idOar+'_training.log') 179 | 180 | 181 | checkpointer = ModelCheckpoint(filepath=ROOTPATH+"VGG16_"+PB_FLAG+"_"+idOar+"_weights.hdf5", 182 | monitor='val_loss', 183 | verbose=1, 184 | save_weights_only=True, 185 | save_best_only=True, 186 | mode='min') 187 | 188 | early_stopping = EarlyStopping(monitor='val_loss', patience=PATIENCE) 189 | 190 | 191 | class CheckNan(Callback): 192 | 193 | def on_batch_end(self, batch, logs={}): 194 | if math.isnan(logs.get('loss')): 195 | print "\nReach a NAN\n" 196 | sys.exit() 197 | 198 | # train the model on the new data for a few epochs 199 | if epochLength<0: 200 | spe=n_train 201 | else: 202 | spe=epochLength 203 | 204 | self.network.fit_generator(generator_training, 205 | samples_per_epoch=spe, 206 | nb_epoch=NB_EPOCH*int(n_train/(1.0*spe)), 207 | verbose=1, 208 | callbacks=[checkpointer,csv_logger, 209 | early_stopping,CheckNan()], 210 | validation_data=generator_val, 211 | nb_val_samples=n_val) 212 | 213 | 214 | self.network.load_weights(ROOTPATH+"VGG16_"+PB_FLAG+"_"+idOar+"_weights.hdf5") 215 | # self.network.save(ROOTPATH+"VGG16_"+PB_FLAG+"_"+idOar+"_network.hdf5") 216 | 217 | 218 | 219 | 220 | def predict(self, generator, n_predict): 221 | '''Generates output predictions for the input samples, 222 | processing the samples in a batched way. 223 | # Arguments 224 | generator: input a generator object. 225 | batch_size: integer. 226 | # Returns 227 | A Numpy array of predictions and GT. 228 | ''' 229 | '''Extract VGG features and data targets from a generator''' 230 | 231 | i=0 232 | Ypred=[] 233 | Y=[] 234 | for x,y in generator: 235 | if i>=n_predict: 236 | break 237 | Ypred.extend(self.network.predict_on_batch(x)) 238 | Y.extend(y) 239 | i+=len(y) 240 | 241 | return np.asarray(Ypred), np.asarray(Y) 242 | 243 | 244 | def evaluate(self, (generator, n_eval),flagFile, l=WIDTH, pbFlag=PB_FLAG): 245 | '''Computes the loss on some input data, batch by batch. 246 | 247 | # Arguments 248 | generator: input a generator object. 249 | batch_size: integer. Number of samples per gradient update. 250 | 251 | # Returns 252 | Scalar test loss (if the model has no metrics) 253 | or list of scalars (if the model computes other metrics). 254 | The attribute `model.metrics_names` will give you 255 | the display labels for the scalar outputs. 256 | ''' 257 | 258 | Ypred, Y = self.predict(generator, n_eval) 259 | 260 | run_eval(Ypred, Y, l, pbFlag) 261 | file = open(ROOTPATH+"VGG_output"+pbFlag+ "_"+str(idOar)+"_"+flagFile+".txt", "w") 262 | file.write(" ".join(sys.argv)+"\n") 263 | for y in Ypred-Y: 264 | file.write(np.array_str(y, max_line_width=1000000)+"\n") 265 | 266 | 267 | 268 | 269 | if __name__ == '__main__': 270 | 271 | l2_Model = L2Model() 272 | 273 | # t=[lambda x:random_rotation(x,2.0,row_index=2,col_index=3,channel_index=1), 274 | # lambda x:random_shift(x,0.03,0.03,row_index=2,col_index=3,channel_index=1), 275 | # lambda x:random_zoom(x,0.05,row_index=2,col_index=3,channel_index=1)] 276 | # t=[lambda x:random_rotation(x,2.0,row_index=1,col_index=2,channel_index=0), 277 | # lambda x:random_shift(x,0.03,0.03,row_index=1,col_index=2,channel_index=0), 278 | # lambda x:random_zoom(x,[0.95,1.05],row_index=1,col_index=2,channel_index=0)] 279 | 280 | (gen_training, N_train), (gen_val, N_val), (gen_test, N_test) = load_data_generator(ROOTPATH, train_txt, test_txt,validation=0.8,subsampling=ssRatio,batch_size=BATCH_SIZE) 281 | 282 | l2_Model.fit((gen_training, N_train),(gen_val, N_val)) 283 | 284 | l2_Model.evaluate((gen_training, N_train),"training", 224) 285 | l2_Model.evaluate((gen_val, N_val),"validation", 224) 286 | l2_Model.evaluate((gen_test, N_test),"test", 224) 287 | --------------------------------------------------------------------------------