├── Keras_test_MAC.py ├── README.md ├── data ├── oxford5k │ └── example.txt └── paris6k │ └── example └── utils.py /Keras_test_MAC.py: -------------------------------------------------------------------------------- 1 | from keras.applications.vgg16 import VGG16, preprocess_input 2 | from keras.applications.resnet50 import ResNet50, preprocess_input 3 | from keras.preprocessing import image 4 | from keras.models import Model, load_model 5 | import os 6 | import os.path 7 | import time 8 | import numpy as np 9 | from numpy import linalg as LA 10 | from utils import * 11 | 12 | # Starting parameters 13 | layer = 'block5_pool' # block5_pool, res5a_branch1 , activation_43, res5c_relu 14 | network = 'VGG16' # VGG16, VGG19, ResNet50, ResNet101 15 | L = 3 16 | topResultsQE = 5 17 | nFiles = 100 18 | largeScaleRetrieval = False 19 | 20 | base_model = str_to_class(network)(weights='imagenet', include_top=False, input_shape=(None,None,3)) 21 | model = Model(inputs=base_model.input, outputs=base_model.get_layer(str(layer)).output) 22 | 23 | 24 | d = ["oxford5k","paris6k","holidays"] 25 | for dataset in d: 26 | elif (dataset == 'oxford5k'): 27 | topResultsQE = 8 28 | elif (dataset == "paris6k"): 29 | topResultsQE = 6 30 | elif (dataset == "holidays"): 31 | topResultsQE = 1 32 | 33 | print("-------------------------------------------------") 34 | print('Parameters') 35 | print('Dataset: ' + str(dataset)) 36 | if (dataset=="paris6k" or dataset=="holidays"): 37 | datasetPCA = 'oxford5k' 38 | elif (dataset=="oxford5k"): 39 | datasetPCA = "paris6k" 40 | print('PCA dataset: ' + str(datasetPCA)) 41 | print('Network: ' + str(network)) 42 | print('Layer: ' + str(layer)) 43 | print('R-MAC descriptors with ' + str(L) + ' scales') 44 | 45 | resolutionLevel = 3 46 | print('Multi-resolution activated (3 scales: original, +25%, -25% on the largest side)') 47 | 48 | 49 | print("Query expansion. Top results used for QE: " + str(topResultsQE)) 50 | 51 | if (largeScaleRetrieval): 52 | print("Activate large scale retrieval of",nFiles,"k files") 53 | 54 | print("------------------------------------------------") 55 | 56 | url = "results/" + dataset + "/" + network + "_L" + str(L) 57 | savingUrl = datasetPCA + "_"+str(network) 58 | 59 | 60 | url += "_multiResolution_pca" + datasetPCA 61 | 62 | 63 | PCAImages = readTraining(datasetPCA, False,0) 64 | print('PCA with '+str(len(PCAImages))+' images') 65 | PCAMAC = extractFeatures(PCAImages, model, True, L, resolutionLevel) 66 | W, Xm = learningPCA(PCAMAC) 67 | np.save('W'+savingUrl+'.npy',W) 68 | np.save('Xm'+savingUrl+'.npy',Xm) 69 | 70 | #after first execution comment the above snippet for the creation of the matrix W e Xm, usefull for the next PCA 71 | #W = np.load('W' + savingUrl + '.npy') 72 | #Xm = np.load('Xm' + savingUrl + '.npy') 73 | 74 | # ------------------ DB images: reading, descripting and whitening ----------------------- 75 | 76 | DbImages = readTraining(dataset, True) 77 | print('DB contains ' + str(len(DbImages)) + ' images') 78 | 79 | t1 = time.clock() 80 | DbMAC = extractFeatures(DbImages, model, True, L, resolutionLevel) 81 | print("PCA-whitening") 82 | DbMAC = apply_whitening(DbMAC, Xm, W) 83 | regions = np.copy(DbMAC) 84 | nRegions = regions.shape[0]//len(DbImages) 85 | DbMAC = sumPooling(DbMAC, len(DbImages), False) 86 | Dbtime = time.clock() - t1 87 | print("RMAC and PCA-whitening of terminated in",round(Dbtime),"s") 88 | 89 | # ------------------- query images: reading, descripting and whitening ----------------------- 90 | queryImages, bBox = readTest(dataset, full=True) 91 | print('QUERY are ' + str(len(queryImages)) + ' images') 92 | 93 | queryMAC = extractFeatures(queryImages, model, True, L, resolutionLevel,bBox, queryVersion) 94 | queryMAC = apply_whitening(queryMAC, Xm, W) 95 | queryMAC = sumPooling(queryMAC, 55, False) 96 | print("Query descriptors saved!") 97 | 98 | retrieval1 = time.clock() 99 | finalReRank = retrieveRegionsNEW(queryMAC, regions, topResultsQE,url, queryImages, DbImages, dataset) 100 | retrieval2 = time.clock() - retrieval1 101 | print("AVG query time:",round(retrieval2/len(queryImages),2),"s") 102 | 103 | retrieval1 = time.clock() 104 | finalReRank2 = retrieveQERegionsNEW(queryMAC, regions, topResultsQE, url,queryImages, DbImages, finalReRank, dataset) 105 | retrieval2 = time.clock() - retrieval1 106 | print("AVG query expansion time:",round(retrieval2/len(queryImages),2),"s") 107 | 108 | if (largeScaleRetrieval): 109 | queryMAClargeScale = np.copy(queryMAC) 110 | 111 | 112 | # ---------- large-scale retrieval ------------------------- 113 | 114 | if (largeScaleRetrieval): 115 | print("LARGE-scale retrieval") 116 | url += "_"+str(nFiles)+"k" 117 | distractorImages = readTraining("Flickr1M", False,nFiles) 118 | limits = nFiles*1000//20 119 | print("Added",len(distractorImages),"distractors from Flickr with limits",limits) 120 | t10 = time.clock() 121 | distractorsMAC = extractAndWhiteningNEW(distractorImages, model, True, L, resolutionLevel, Xm, W, limits, None) 122 | t11 = time.clock() - t10 123 | print("Features extracted in",t11,"s") 124 | DbMAC.extend(distractorsMAC) 125 | t12 = time.clock() 126 | finalReRank3 = retrieve(queryMAClargeScale, DbMAC, topResultsQE,url, queryImages, DbImages, dataset, True) 127 | t13 = time.clock() - t12 128 | t13 /= len(queryImages) 129 | print("Avg query time:",t13,"s") 130 | retrieveQE(queryMAClargeScale, DbMAC, topResultsQE, url, queryImages, DbImages, finalReRank3, dataset, True) 131 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # keras_rmac_plus 2 | Keras implementation of R-MAC+ descriptors. 3 | 4 | [[paper](https://arxiv.org/pdf/1806.08565.pdf)] [[project](http://implab.ce.unipr.it/?page_id=858)] 5 | 6 | The image below represents the query phase exeucted for the R-MAC+ descriptors. 7 | 8 | ![query phase](http://implab.ce.unipr.it/wp-content/uploads/2018/09/queryImage.png) 9 | 10 | ## Prerequisites for Python3 11 | * Keras (> 2.0.0) 12 | * Tensorflow (> 1.5) 13 | * Scipy 14 | * Sklearn 15 | * OpenCV 3 16 | 17 | ## Networks 18 | The pipeline was tested with VGG16 and ResNet50. For the VGG16 the best performance are reached when the features are extracted from the block5_pool, instead for ResNet from the activation_43. 19 | It is possible to try with other networks. Please before to try it, check if there are available the Keras weight for the selected network. 20 | 21 | ## Datasets 22 | * Holidays 23 | * Oxford5k 24 | * Paris6k 25 | 26 | Download the datasets and put it into the data folder. Then compile the script for the evaluation of the retrieval system. 27 | 28 | ## Test 29 | ` python3 Keras_test_MAC.py ` 30 | 31 | ## Results 32 | 33 | 34 | | Method | Network | Oxford5k | Paris6k | Holidays | 35 | | :------------- |:-------------:| :-----:| :---:| :---------:| 36 | | R-MAC | VGG16 | 65.56% | 82.80% | 87.65% | 37 | | R-MAC | ResNet50 | 71.77% | 83.31% | 92.55% | 38 | | M-R RMAC+ | ResNet50 | 78.88% | 88.63% | 94.63% / 95.58% | 39 | | M-R RMAC+ with retrieval based on 'db regions' | ResNet50 | 85.39 % | 91.90% | 94.37% / 95.87% | 40 | 41 | The R-MAC is an our re-implementation of the Tolias et al. 2016 paper, instead M-R RMAC comes from the Gordo et al. 2016 paper. 42 | The last two experiments are also executed on the rotated version of Holidays. 43 | 44 | ## References 45 | 46 |
@article{magliani2018accurate,
47 |   title={An accurate retrieval through R-MAC+ descriptors for landmark recognition},
48 |   author={Magliani, Federico and Prati, Andrea},
49 |   journal={arXiv preprint arXiv:1806.08565},
50 |   year={2018}
51 | }
52 | 
53 | @article{tolias2015particular,
54 |   title={Particular object retrieval with integral max-pooling of CNN activations},
55 |   author={Tolias, Giorgos and Sicre, Ronan and J{\'e}gou, Herv{\'e}},
56 |   journal={arXiv preprint arXiv:1511.05879},
57 |   year={2015}
58 | }
59 | 
60 | @inproceedings{gordo2016deep,
61 |   title={Deep image retrieval: Learning global representations for image search},
62 |   author={Gordo, Albert and Almaz{\'a}n, Jon and Revaud, Jerome and Larlus, Diane},
63 |   booktitle={European Conference on Computer Vision},
64 |   pages={241--257},
65 |   year={2016},
66 |   organization={Springer}
67 | }
68 | 
69 | 
70 | -------------------------------------------------------------------------------- /data/oxford5k/example.txt: -------------------------------------------------------------------------------- 1 | 2 | Download and copy here the jpg and lab folder. 3 | -------------------------------------------------------------------------------- /data/paris6k/example: -------------------------------------------------------------------------------- 1 | 2 | Download and copy here the jpg and lab folder. 3 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | from keras.applications.vgg16 import VGG16, preprocess_input 2 | from keras.applications.resnet50 import ResNet50, preprocess_input 3 | from keras.preprocessing import image 4 | from keras.models import Model 5 | import os 6 | import os.path 7 | from PIL import Image, ImageFile 8 | import sys 9 | import numpy as np 10 | from numpy import linalg as LA 11 | import cv2 12 | import operator 13 | from sklearn.decomposition import PCA 14 | import scipy 15 | import math 16 | import time 17 | from sys import getsizeof 18 | from sklearn.preprocessing import normalize 19 | from sklearn.decomposition import PCA 20 | from collections import defaultdict 21 | from sklearn.metrics.pairwise import euclidean_distances 22 | from tqdm import tqdm 23 | from glob import glob 24 | import shutil 25 | 26 | def str_to_class(str): 27 | return getattr(sys.modules[__name__], str) 28 | 29 | def readTraining(dataset, rotated=True, nFiles=0, debug=False): 30 | if (dataset == 'oxford5k' or dataset == 'paris6k'): 31 | path = 'data/'+dataset+'/jpg/*.jpg' 32 | if (dataset == 'holidays' and rotated==True): 33 | path = 'dataset/holidays/jpg_rotated/Db/*.jpg' 34 | elif (dataset == 'holidays' and rotated==False): 35 | path = 'dataset/holidays/jpg_reduced/Db/*.jpg' 36 | elif (dataset == 'Flickr1M'): 37 | path = 'data/Flickr1M/im*/*/*.jpg' 38 | DbImages = np.sort(glob(path)) #da capire se funziona con Flickr1M 39 | 40 | if (dataset == 'Flickr1M'): 41 | DbImages = DbImages[0:int(nFiles*1000)] 42 | 43 | return DbImages 44 | 45 | def readTest(dataset, full=False, debug=False): 46 | bBox = [] 47 | if (dataset == 'holidays'): 48 | if (not full): #not rotated 49 | path = 'dataset/holidays/jpg_reduced/query/*.jpg' 50 | else: 51 | path = 'dataset/holidays/jpg_rotated/query/*.jpg' 52 | elif (dataset == 'oxford5k' or dataset == 'paris6k'): 53 | path = 'dataset/' + dataset + '/query' 54 | if (not full): 55 | path += '_reduced/*.jpg' 56 | else: 57 | path +='/*.jpg' 58 | 59 | queryImages = np.sort(queryImages) 60 | 61 | if ((dataset=="oxford5k" or dataset=="paris6k") and full): 62 | print("Creation of bBox list") 63 | #insert elements in bBox list 64 | url = 'dataset/'+dataset+'/gt_files/' 65 | lab_filenames = np.sort(os.listdir(url)) 66 | for e in lab_filenames: 67 | if e.endswith('_query.txt'): 68 | q_name = e[:-len('_query.txt')] 69 | q_data = open("{0}/{1}".format(url, e)).readline().split(" ") 70 | q_filename = q_data[0][5:] if q_data[0].startswith('oxc1_') else q_data[0] 71 | q_final = [s.rstrip() for s in q_data] 72 | bBox.append(q_final[1:]) 73 | for i,q in enumerate(queryImages,0): 74 | img = cv2.imread(q) 75 | h = img.shape[0] 76 | w = img.shape[1] 77 | bBox[i][0] = float(bBox[i][0]) / w 78 | bBox[i][2] = float(bBox[i][2]) / w 79 | bBox[i][1] = float(bBox[i][1]) / h 80 | bBox[i][3] = float(bBox[i][3]) / h 81 | 82 | return queryImages,bBox 83 | 84 | def calculateMAC(featureVector, listData): #max-pooling and l2-norm 85 | rows = featureVector.shape[1] * featureVector.shape[2] 86 | cols = featureVector.shape[3] 87 | features1 = np.reshape(featureVector, (rows, cols)) 88 | features2 = np.amax(features1, axis = 0) 89 | features2 /= np.linalg.norm(features2, 2) 90 | listData.append(features2) 91 | 92 | return 93 | 94 | def calculateRMAC(features, listData, L): 95 | W = features.shape[1] 96 | H = features.shape[2] 97 | # print("W",W,"H",H) 98 | 99 | for l in range(1,L+1): 100 | if (l==1): 101 | heightRegion = widthRegion = min(W,H) 102 | if (W W): 134 | finalX = W 135 | initialX = finalX - widthRegion 136 | if (finalY > H): 137 | finalY = H 138 | initialY = finalY - heightRegion 139 | 140 | # print(" X ",initialX,":", finalX," Y ", initialY,":", finalY) 141 | 142 | featureRegion = features[:,initialX:finalX,initialY:finalY,:] #(old implementation) 143 | calculateMAC(featureRegion, listData) 144 | return 145 | 146 | def resizeImg (img, i, delta): 147 | if delta != 0: 148 | w = img.size[0] 149 | h = img.size[1] 150 | newWidth = round(w + w*delta) 151 | newHeight = round(h + h*delta) 152 | img = img.resize((newWidth,newHeight)) 153 | return img 154 | 155 | def extractFeatures(imgs, model, RMAC, L, resolutionLevel, bBox=[], croppedActivations = False): 156 | listData = [] 157 | deltas = [0, -0.25, 0.25] 158 | for j in tqdm(range(0,len(imgs))): 159 | for i in range(0, resolutionLevel): 160 | img = image.load_img(imgs[j]) 161 | img = resizeImg(img,i, deltas[i]) 162 | x = image.img_to_array(img) 163 | x = np.expand_dims(x, axis=0) 164 | x = preprocess_input(x) 165 | features = model.predict(x) 166 | if (croppedActivations): 167 | startDim1 = math.floor(bBox[j][1]*features.shape[1]) 168 | endDim1 = math.ceil(bBox[j][3]*features.shape[1]) 169 | startDim2 = math.floor(bBox[j][0]*features.shape[2]) 170 | endDim2 = math.floor(bBox[j][2]*features.shape[2]) 171 | features = np.copy(features[:,startDim1:endDim1,startDim2:endDim2,:]) 172 | # print(features.shape,"->", features2.shape) 173 | calculateMAC(features, listData) 174 | if (RMAC): 175 | calculateRMAC(features, listData, L) 176 | 177 | return listData 178 | 179 | def learningPCA(listData): 180 | fudge = 1E-18 181 | X = np.matrix(listData) 182 | mean = X.mean(axis=0) 183 | # subtract the mean 184 | X = np.subtract(X, mean) 185 | # calc covariance matrix 186 | Xcov = np.dot(X.T,X) 187 | d,V = np.linalg.eigh(Xcov) 188 | D = np.diag(1. / np.sqrt(d+fudge)) 189 | W = np.dot(np.dot(V, D), V.T) 190 | return W, mean 191 | 192 | def apply_whitening(listData, Xm, W) : 193 | X = np.matrix(listData) 194 | X = np.subtract(X, Xm) 195 | Xnew = np.dot(X,W) 196 | Xnew /= LA.norm(Xnew,axis=1).reshape(Xnew.shape[0],1) 197 | return Xnew 198 | 199 | 200 | def sumPooling(listData, numberImages, largeScaleRetrieval=False): 201 | newListData = [] 202 | value = 0 203 | regions = listData.shape[0] // numberImages 204 | for i, elem in enumerate(listData, 1): 205 | value = np.add(value,elem) 206 | if (i%regions==0): 207 | value /= LA.norm(value, 2) 208 | newListData.append(value) 209 | value = 0 210 | if (not largeScaleRetrieval): 211 | print("Sum pooling of",regions,"regions. The descriptors are",len(newListData),"of shape",newListData[0].shape) 212 | return newListData 213 | 214 | def extractAndWhiteningNEW(imgs, model, RMAC, L, resolutionLevel,Xm,W, limits=1000, pca=None): 215 | ImageFile.LOAD_TRUNCATED_IMAGES = True 216 | tmpList = [] 217 | finalList = [] 218 | delta = 0.25 219 | for j in tqdm(range(0,len(imgs))): 220 | for i in range(0, resolutionLevel): 221 | img = image.load_img(imgs[j]) 222 | img = resizeImg(img, i, delta) 223 | x = image.img_to_array(img) 224 | x = np.expand_dims(x, axis=0) 225 | x = preprocess_input(x) 226 | features = model.predict(x) 227 | calculateMAC(features, tmpList) 228 | if (RMAC): 229 | calculateRMAC(features, tmpList, L) 230 | if ((j+1)%limits==0): 231 | tmpList = apply_whitening(tmpList, Xm, W) 232 | tmpList = sumPooling(tmpList, limits, True) 233 | finalList.extend(tmpList) 234 | tmpList = [] 235 | 236 | print("Features len",len(finalList)) 237 | return finalList 238 | 239 | def write_results(url, queryImages,i, distances, DbImages, dataset, largeScaleRetrieval=False): 240 | 241 | if (dataset=='oxford5k' or dataset=='paris6k'): 242 | if not os.path.exists(url): 243 | os.makedirs(url) 244 | file_query = open(url+"/"+os.path.basename(queryImages[i])[:-4], "w") 245 | 246 | for elem in distances: 247 | if ((elem[0]>5062 and dataset=='oxford5k') or (elem[0]>6391 and dataset=='paris6k')): 248 | file_query.write("Flickr1M") 249 | else: 250 | file_query.write(os.path.basename(DbImages[elem[0]])[:-4]) 251 | file_query.write("\n") 252 | file_query.close() 253 | elif (dataset=='holidays'): 254 | file_query = open(url, "a") 255 | queryName = os.path.basename(queryImages[i]) 256 | file_query.write(queryName) 257 | for i,elem in enumerate(distances,0): 258 | if (elem[0]<991): 259 | value = os.path.basename(DbImages[elem[0]]) 260 | if (queryName[:4]==value[:4]): 261 | file_query.write(" "+str(i)+" "+str(os.path.basename(DbImages[elem[0]]))) 262 | file_query.write("\n") 263 | file_query.close() 264 | 265 | return 266 | 267 | def calcResults(dataset, url): 268 | if (dataset=='paris6k' or dataset=='oxford5k'): 269 | os.system("results/"+dataset+"/compute_ap_all_2 "+url) 270 | elif (dataset=='holidays'): 271 | os.system("python2 dataset/Holidays/holidays_map_single.py "+url) 272 | return 273 | 274 | def retrieve(queryMAC, DbMAC, topResultsQE, url, queryImages, DbImages, dataset, largeScaleRetrieval=False): 275 | if (dataset=='holidays'): 276 | file_query = open(url, "w") 277 | file_query.close() 278 | elif (os.path.exists(url) and (dataset=='paris6k' or dataset=='oxford5k')): 279 | shutil.rmtree(url) 280 | 281 | reRank = [] 282 | 283 | for i,q in enumerate(queryMAC,0): 284 | distances = {} 285 | qNP = np.asarray(q) 286 | for j,dbElem in enumerate(DbMAC,0): 287 | dbNP = np.asarray(dbElem) 288 | distances[j] = np.linalg.norm(qNP-dbNP) 289 | finalDict = sorted(distances.items(), key=operator.itemgetter(1)) 290 | 291 | reRank.extend(list(finalDict)[:topResultsQE]) 292 | 293 | write_results(url, queryImages, i, finalDict, DbImages, dataset, largeScaleRetrieval) 294 | 295 | calcResults(dataset, url) 296 | 297 | return reRank 298 | 299 | def retrieveRegionsNEW(queryMAC, regions, topResultsQE,url, queryImages, DbImages, dataset, largeScaleRetrieval=False): 300 | if (dataset=='holidays'): 301 | file_query = open(url, "w") 302 | file_query.close() 303 | elif (os.path.exists(url) and (dataset=='paris6k' or dataset=='oxford5k')): 304 | shutil.rmtree(url) 305 | 306 | reRank = [] 307 | 308 | nRegions = regions.shape[0]//len(DbImages) 309 | for i,q in enumerate(queryMAC,0): 310 | distances = {} 311 | bestRegions = [] 312 | qNP = np.asarray(q) 313 | for j,dbElem in enumerate(regions,0): 314 | dbNP = np.asarray(dbElem) 315 | indexDb = j//nRegions 316 | d = np.linalg.norm(qNP-dbNP) 317 | if (indexDb in distances): 318 | if (distances[indexDb][0]>d): 319 | distances[indexDb] = [d,j] 320 | else: 321 | distances[indexDb] = [d,j] 322 | finalDict = sorted(distances.items(), key=operator.itemgetter(1)) 323 | reRank.extend(list(finalDict)[:topResultsQE]) 324 | 325 | write_results(url, queryImages, i, finalDict, DbImages, dataset, largeScaleRetrieval) 326 | 327 | calcResults(dataset, url) 328 | 329 | return reRank 330 | 331 | def retrieveQE(queryMAC, DbMAC, topResultsQE, url, queryImages, DbImages, reRank, dataset, largeScaleRetrieval=False): 332 | 333 | url += '_avgQE' 334 | if (dataset=='holidays'): 335 | file_query = open(url, "w") 336 | file_query.close() 337 | elif (os.path.exists(url) and (dataset=='paris6k' or dataset=='oxford5k')): 338 | shutil.rmtree(url) 339 | 340 | finalReRank = [] 341 | 342 | for i,q in enumerate(queryMAC,0): 343 | distances2 = {} 344 | qNewNP = np.asarray(q) 345 | for top_results in range(0,int(topResultsQE)): 346 | index = top_results+(topResultsQE*i) 347 | dbOLD = np.asarray(DbMAC[reRank[index][0]]) 348 | qNewNP += dbOLD 349 | qNewNP = np.divide(qNewNP,float(topResultsQE)) 350 | for j,dbElem in enumerate(DbMAC,0): 351 | dbNP = np.asarray(dbElem) 352 | distances2[j] = np.linalg.norm(qNewNP-dbNP) 353 | finalDict = sorted(distances2.items(), key=operator.itemgetter(1)) 354 | 355 | finalReRank.extend(list(finalDict)) 356 | write_results(url, queryImages, i, finalDict, DbImages, dataset, largeScaleRetrieval) 357 | 358 | calcResults(dataset, url) 359 | return finalReRank 360 | 361 | def retrieveQERegionsNEW(queryMAC, regions, topResultsQE, url, queryImages, DbImages, reRank, dataset, largeScaleRetrieval=False): 362 | 363 | url += '_avgQE' 364 | if (dataset=='holidays'): 365 | file_query = open(url, "w") 366 | file_query.close() 367 | elif (os.path.exists(url) and (dataset=='paris6k' or dataset=='oxford5k')): 368 | shutil.rmtree(url) 369 | 370 | finalReRank = [] 371 | 372 | nRegions = regions.shape[0]//len(DbImages) 373 | 374 | for i,q in enumerate(queryMAC,0): 375 | distances2 = {} 376 | qNewNP = np.asarray(q) 377 | for top_results in range(0,int(topResultsQE)): 378 | index = top_results+(topResultsQE*i) 379 | dbOLD = np.asarray(regions[reRank[index][1][1]]) 380 | qNewNP += dbOLD 381 | qNewNP = np.divide(qNewNP,float(topResultsQE)) 382 | for j,dbElem in enumerate(regions,0): 383 | dbNP = np.asarray(dbElem) 384 | indexDb = j//nRegions 385 | d = np.linalg.norm(qNewNP-dbNP) 386 | if (indexDb in distances2): 387 | if (distances2[indexDb]>d): 388 | distances2[indexDb] = d 389 | else: 390 | distances2[indexDb] = d 391 | 392 | finalDict = sorted(distances2.items(), key=operator.itemgetter(1)) 393 | finalReRank.extend(list(finalDict)) 394 | write_results(url, queryImages, i, finalDict, DbImages, dataset, largeScaleRetrieval) 395 | 396 | calcResults(dataset, url) 397 | return finalReRank 398 | --------------------------------------------------------------------------------