├── Evaluation ├── compute_statistical_deviation.py ├── dataloader.py ├── datasets │ ├── KONIQ.csv │ └── LIVEC.csv ├── zeroshot_hl_model.py └── zeroshot_ll_model.py ├── LICENSE ├── README.md ├── assets ├── framework.png └── zeroshot.png ├── configs.py ├── data_read_utils.py ├── dataloader_contrastive.py ├── get_concatenated_features.py ├── losses.py ├── networks.py ├── test_zeroshot.py ├── train.py ├── train_hlm.py └── train_llm.py /Evaluation/compute_statistical_deviation.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | import torch 3 | import time 4 | 5 | 6 | class NIQE(nn.Module): 7 | 8 | def __init__(self, p=None, stabilitiy_scale=0.001, args=None): 9 | super(NIQE, self).__init__() 10 | self.mu_r = None 11 | self.sigma_r = None 12 | self.eye_stability = None 13 | self.stabilitiy_scale = stabilitiy_scale 14 | self.args = args 15 | 16 | self.compute_pristine(p.unsqueeze(0)) 17 | 18 | def forward(self, x): 19 | mu_t = torch.mean(x, dim=-2, keepdim=True) 20 | sigma_t = self.batch_covariance(x, mu_t) 21 | 22 | mean_diff = self.mu_r - mu_t 23 | 24 | cov_sum = ((self.sigma_r + sigma_t) / 2) + self.eye_stability 25 | cov_sum_inv = torch.linalg.inv(cov_sum) 26 | 27 | fit = torch.matmul(torch.matmul(mean_diff, cov_sum_inv), torch.transpose(mean_diff, -2, -1)) 28 | 29 | return torch.sqrt(fit).squeeze() 30 | 31 | def compute_pristine(self, p): 32 | self.mu_r = torch.mean(p, dim=-2, keepdim=True) 33 | self.sigma_r = self.batch_covariance(p, self.mu_r) 34 | self.eye_stability = self.stabilitiy_scale * torch.eye(p.size(-1), device=p.device).unsqueeze(0) 35 | 36 | def batch_covariance(self, tensor, mu, bias=False): 37 | tensor = tensor - mu 38 | factor = 1 / (tensor.shape[-2] - int(not bool(bias))) 39 | return factor * tensor.transpose(-1, -2) @ tensor.conj() 40 | 41 | 42 | if __name__ == '__main__': 43 | t = time.time() 44 | p = torch.randn((1, 422, 2048)).cuda() 45 | niqe_model = NIQE(p).cuda() 46 | x = torch.randn((64, 48, 2048)).cuda() 47 | 48 | score = niqe_model(x) 49 | print(score) 50 | print(time.time()-t) 51 | -------------------------------------------------------------------------------- /Evaluation/dataloader.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data import Dataset 2 | from torchvision import transforms 3 | from os.path import join 4 | from PIL import Image 5 | import pandas as pd 6 | import numpy as np 7 | import torch 8 | 9 | 10 | class TestDataset(Dataset): 11 | def __init__(self, img_dir, data_loc, clive= False): 12 | """ 13 | Args: 14 | img_dir (string): Directory with all the images. 15 | data_loc (string): Location of the csv file with image names and corresponding MOS. 16 | clive (bool): If True, specific images of CLIVE database are resized to 500x500. 17 | """ 18 | 19 | self.img_dir = img_dir 20 | self.clive = clive 21 | self.tensorizer = transforms.ToTensor() 22 | 23 | self.data = pd.read_csv(data_loc) 24 | self.data = self.data.astype({'im_loc': str, 'mos': np.float32}) 25 | 26 | if self.clive: 27 | self.resizer = transforms.Resize((500,500)) 28 | 29 | def __len__(self): 30 | return len(self.data) 31 | 32 | def __getitem__(self, idx): 33 | im_loc = self.data.iloc[idx]['im_loc'] 34 | x = Image.open(join(self.img_dir, im_loc)) 35 | x = self.tensorizer(x) 36 | 37 | if self.clive: 38 | if im_loc == '1024.JPG' or im_loc == '1113.JPG': 39 | x = self.resizer(x) 40 | 41 | if x.shape[0] <3: 42 | x = torch.cat([x]*3, dim=0) 43 | 44 | return x, self.data.iloc[idx]['mos'], im_loc 45 | -------------------------------------------------------------------------------- /Evaluation/datasets/LIVEC.csv: -------------------------------------------------------------------------------- 1 | ,im_loc,mos,std 2 | 0,trainingImages/t1.bmp,63.963406174124174,18.376200073180403 3 | 1,trainingImages/t2.bmp,25.335306406685238,13.651431474621623 4 | 2,trainingImages/t3.bmp,48.93656326246079,18.92459579070641 5 | 3,trainingImages/t4.bmp,35.88633193863319,18.24136649940471 6 | 4,trainingImages/t5.bmp,66.50923666782852,19.25765805423188 7 | 5,trainingImages/t6.bmp,54.579700034879664,19.958247927677647 8 | 6,trainingImages/t7.bmp,77.88214783821478,19.64578344339436 9 | 7,100.bmp,32.56107532210109,19.12472638223644 10 | 8,101.bmp,66.35954917017587,20.219785878159723 11 | 9,102.bmp,44.69498452012384,19.661161627712342 12 | 10,103.bmp,39.2345587325164,19.338946816630564 13 | 11,104.bmp,9.22907379891035,11.901880661402894 14 | 12,105.bmp,41.33135141830794,21.784569587678696 15 | 13,106.bmp,68.92207631318136,21.24046204424292 16 | 14,107.bmp,27.957533737773925,15.527478216097144 17 | 15,108.bmp,75.35151515151514,16.551768773142832 18 | 16,109.bmp,50.63793103448276,19.245627032042574 19 | 17,10.bmp,82.91071428571429,15.124168847215245 20 | 18,110.bmp,32.13690476190476,18.59847575856079 21 | 19,111.bmp,51.84153005464481,21.845159945184314 22 | 20,112.bmp,30.31875,16.300392353575514 23 | 21,113.bmp,68.21714285714286,19.554484533043507 24 | 22,114.bmp,24.871134020618555,16.16427621795479 25 | 23,115.bmp,31.293785310734464,19.075615557183596 26 | 24,116.bmp,26.097701149425287,21.222648372276577 27 | 25,117.bmp,70.4327485380117,20.109695731191483 28 | 26,118.bmp,26.530864197530864,18.75509797014246 29 | 27,119.bmp,51.31707317073171,21.838154289961395 30 | 28,11.bmp,60.9472891566265,20.553521120680568 31 | 29,120.bmp,18.78488372093023,15.525312419316858 32 | 30,121.bmp,19.75,15.989049396346312 33 | 31,122.bmp,79.36708860759494,17.246195714679843 34 | 32,123.bmp,67.29775280898876,20.488003807261155 35 | 33,124.bmp,70.99459459459459,20.18891680915905 36 | 34,125.bmp,81.90502793296089,16.02340028209802 37 | 35,126.bmp,52.843023255813954,20.039837754812833 38 | 36,127.bmp,41.42690058479532,19.909181484831088 39 | 37,128.bmp,48.81366459627329,18.07768132565128 40 | 38,129.bmp,72.88268156424581,20.860227237718796 41 | 39,12.bmp,15.895238095238096,15.255667272011774 42 | 40,130.bmp,86.6086956521739,13.664165988788382 43 | 41,131.bmp,48.52903225806452,21.980011485473565 44 | 42,132.bmp,73.02590673575129,17.456921750758063 45 | 43,133.bmp,27.005714285714287,20.76067711529935 46 | 44,134.bmp,32.30898876404494,22.118582010564445 47 | 45,135.bmp,65.94300518134715,19.21701354716305 48 | 46,136.bmp,67.16315789473684,21.09137867767781 49 | 47,137.bmp,59.8421052631579,21.73269925062844 50 | 48,138.bmp,67.58695652173913,18.72764367669247 51 | 49,139.bmp,61.92857142857143,18.524161735604764 52 | 50,13.bmp,77.40229885057471,17.792938633204184 53 | 51,140.bmp,77.45833333333333,17.61449704386142 54 | 52,141.bmp,20.581818181818182,20.73793293768008 55 | 53,142.bmp,26.405714285714286,15.273407565226902 56 | 54,143.bmp,61.149484536082475,21.32631556280211 57 | 55,144.bmp,76.92391304347827,18.530868469765885 58 | 56,145.bmp,55.59239130434783,22.068748686034642 59 | 57,146.bmp,34.2111801242236,19.911808662791096 60 | 58,147.bmp,71.32335329341318,18.31658326278743 61 | 59,148.bmp,41.97076023391813,18.8799385905609 62 | 60,149.bmp,72.45744680851064,17.9525011402219 63 | 61,14.bmp,66.39411764705882,21.599719771035463 64 | 62,150.bmp,65.79617834394904,19.650386194907604 65 | 63,151.bmp,55.77922077922078,23.13479871983214 66 | 64,152.bmp,79.43786982248521,15.581123659189442 67 | 65,153.bmp,73.0880829015544,19.15853576502542 68 | 66,154.bmp,11.977142857142857,13.570147377084613 69 | 67,155.bmp,73.63905325443787,18.498549210893845 70 | 68,156.bmp,28.512820512820515,17.656393223725004 71 | 69,157.bmp,17.51923076923077,16.807245424497882 72 | 70,158.bmp,79.41666666666667,15.300352884923715 73 | 71,159.bmp,73.83443708609272,16.814450318531044 74 | 72,15.bmp,56.398963730569946,21.582964901063825 75 | 73,160.bmp,41.34705882352941,20.65875511486419 76 | 74,161.bmp,8.246835443037975,10.488505512396728 77 | 75,162.bmp,70.6375,21.155455605748415 78 | 76,163.bmp,46.36942675159236,21.598696133546568 79 | 77,164.bmp,80.5,17.21883171648046 80 | 78,165.bmp,71.86338797814207,18.612825647075354 81 | 79,166.bmp,49.795811518324605,20.884776204238573 82 | 80,167.bmp,75.17777777777778,18.464870937033144 83 | 81,168.bmp,65.4180790960452,18.844317648904127 84 | 82,169.bmp,30.10919540229885,16.113205959252703 85 | 83,16.bmp,77.60215053763442,16.803886156029012 86 | 84,170.bmp,60.287958115183244,21.320013121313345 87 | 85,171.bmp,25.136363636363637,17.6730185428405 88 | 86,172.bmp,63.04522613065327,20.470549548369316 89 | 87,173.bmp,74.54066985645933,19.514036406054682 90 | 88,174.bmp,72.91,19.756861285112976 91 | 89,175.bmp,74.36477987421384,19.519682174617333 92 | 90,176.bmp,61.345360824742265,22.716073698447005 93 | 91,177.bmp,58.43010752688172,20.905138563682232 94 | 92,178.bmp,53.70238095238095,19.16976146894275 95 | 93,179.bmp,66.10843373493977,24.30563573414687 96 | 94,17.bmp,61.461988304093566,18.740095882103983 97 | 95,180.bmp,39.41340782122905,21.70090453005669 98 | 96,181.bmp,29.128205128205128,16.70573006081812 99 | 97,182.bmp,52.63131313131313,20.507778128046116 100 | 98,183.bmp,23.01212121212121,16.495745156794257 101 | 99,184.bmp,67.0,18.96299269694897 102 | 100,185.bmp,56.46994535519126,22.001946055219253 103 | 101,186.bmp,49.80769230769231,22.792504536081488 104 | 102,187.bmp,28.71195652173913,17.144473483151728 105 | 103,188.bmp,28.959537572254334,16.582899002279454 106 | 104,189.bmp,46.053763440860216,20.33708666127136 107 | 105,18.bmp,64.55191256830601,21.059473192773567 108 | 106,190.bmp,31.729468599033815,18.192184687299783 109 | 107,191.bmp,46.474747474747474,19.015806107436067 110 | 108,192.bmp,35.48351648351648,20.391127220778674 111 | 109,193.bmp,74.6608187134503,18.98983125492355 112 | 110,194.bmp,26.666666666666668,16.829547735335762 113 | 111,195.bmp,69.18407960199005,19.033679236386003 114 | 112,196.bmp,61.78888888888889,19.554247388669953 115 | 113,197.bmp,55.48235294117647,23.60228239124087 116 | 114,198.bmp,78.51724137931035,18.07490008700663 117 | 115,199.bmp,66.2122905027933,21.091285404285383 118 | 116,19.bmp,30.936046511627907,20.538979702081164 119 | 117,200.bmp,9.139072847682119,10.579249964025076 120 | 118,201.bmp,49.5,20.479121317524545 121 | 119,202.bmp,24.515151515151516,17.78071237173627 122 | 120,203.bmp,50.99441340782123,20.23083007364213 123 | 121,204.bmp,75.48170731707317,19.279498173653838 124 | 122,205.bmp,77.95977011494253,18.268188084771968 125 | 123,206.bmp,72.5909090909091,21.9321740769186 126 | 124,207.bmp,43.66863905325444,21.68627004018549 127 | 125,208.bmp,53.84146341463415,21.968448938546494 128 | 126,209.bmp,52.2906976744186,21.459804002238634 129 | 127,20.bmp,76.38541666666667,20.467504466653857 130 | 128,210.bmp,70.5421052631579,20.951589849878662 131 | 129,211.bmp,42.68604651162791,19.819514057990933 132 | 130,212.bmp,61.71264367816092,21.003664995890215 133 | 131,213.bmp,13.113924050632912,17.114442325010362 134 | 132,214.bmp,73.11111111111111,18.230011885167087 135 | 133,215.bmp,57.19135802469136,19.821731580888958 136 | 134,216.bmp,75.17901234567901,19.679072442221003 137 | 135,217.bmp,68.14723926380368,21.19959035500868 138 | 136,218.bmp,67.98850574712644,19.784091380266656 139 | 137,219.bmp,48.80346820809248,19.06404717992336 140 | 138,21.bmp,53.02061855670103,21.282447647151564 141 | 139,220.bmp,67.28220858895706,20.468135187529604 142 | 140,221.bmp,84.16022099447514,16.038210206178146 143 | 141,222.bmp,59.2565445026178,22.67440352832053 144 | 142,223.bmp,68.3112582781457,19.02811443818981 145 | 143,224.bmp,14.638297872340425,13.50092178578477 146 | 144,225.bmp,64.05294117647058,18.988710992177648 147 | 145,226.bmp,49.475675675675674,20.550328007080328 148 | 146,227.bmp,72.70760233918129,17.958019692500336 149 | 147,228.bmp,41.1,18.67937190890643 150 | 148,229.bmp,57.666666666666664,22.33517066164592 151 | 149,22.bmp,52.0,19.764308917256486 152 | 150,230.bmp,43.247126436781606,19.122721038152406 153 | 151,231.bmp,45.37106918238994,23.68799125848944 154 | 152,232.bmp,81.76878612716763,16.770466747031662 155 | 153,233.bmp,81.25555555555556,16.51548189268889 156 | 154,234.bmp,64.68062827225131,17.733037606712816 157 | 155,235.bmp,53.15243902439025,18.177919091054445 158 | 156,236.bmp,30.983050847457626,17.438683024799932 159 | 157,237.bmp,68.82432432432432,17.725310016657406 160 | 158,238.bmp,59.031847133757964,20.06317598339267 161 | 159,239.bmp,3.497584541062802,6.762887299273335 162 | 160,23.bmp,77.11392405063292,18.279491373829718 163 | 161,240.bmp,53.08,19.492539097300032 164 | 162,241.bmp,59.45,21.3378925945689 165 | 163,242.bmp,43.88205128205128,19.78522399554924 166 | 164,243.bmp,34.123456790123456,17.986456406511497 167 | 165,244.bmp,56.445086705202314,19.361723256553613 168 | 166,245.bmp,10.727272727272727,11.160887518452848 169 | 167,246.bmp,52.03048780487805,19.843381793121363 170 | 168,247.bmp,74.27218934911242,19.24241954794592 171 | 169,248.bmp,74.9493670886076,19.576340024857718 172 | 170,249.bmp,73.18478260869566,18.36064295683973 173 | 171,24.bmp,35.835978835978835,25.02678515578044 174 | 172,250.bmp,50.574585635359114,22.334607313653983 175 | 173,251.bmp,61.77717391304348,17.810456824502225 176 | 174,252.bmp,39.15432098765432,17.22614327901048 177 | 175,253.bmp,46.70792079207921,23.278090325190433 178 | 176,254.bmp,48.78,18.836944279756672 179 | 177,255.bmp,57.24277456647399,22.017937120623305 180 | 178,256.bmp,70.32417582417582,19.027731089700463 181 | 179,257.bmp,71.79640718562874,17.392540612045007 182 | 180,258.bmp,68.36021505376344,21.594652189760893 183 | 181,259.bmp,61.61309523809524,20.641196408263934 184 | 182,25.bmp,71.90058479532163,21.543022610242375 185 | 183,260.bmp,49.090425531914896,21.30921604817989 186 | 184,261.bmp,85.21637426900585,15.579963405538615 187 | 185,262.bmp,62.285714285714285,20.86873510997521 188 | 186,263.bmp,27.153846153846153,22.45100485607758 189 | 187,264.bmp,39.01863354037267,25.022607790178824 190 | 188,265.bmp,60.15217391304348,21.022202565686552 191 | 189,266.bmp,61.791044776119406,19.95384973890966 192 | 190,267.bmp,70.74172185430463,17.707047025091047 193 | 191,268.bmp,75.64161849710983,17.655584192520816 194 | 192,269.bmp,62.07936507936508,20.795586878885736 195 | 193,26.bmp,78.61176470588235,15.58795933268133 196 | 194,270.bmp,21.574074074074073,13.980379132560433 197 | 195,271.bmp,83.06172839506173,15.285153499098072 198 | 196,272.bmp,44.39890710382514,19.838618097785552 199 | 197,273.bmp,65.32544378698225,19.45157304688517 200 | 198,274.bmp,50.188235294117646,21.863478567973534 201 | 199,275.bmp,47.02732240437158,25.040721054373755 202 | 200,276.bmp,71.6358024691358,20.85702770366733 203 | 201,277.bmp,68.41071428571429,18.869336516279127 204 | 202,278.bmp,28.104166666666668,16.68242274435256 205 | 203,279.bmp,70.64497041420118,19.007313477293934 206 | 204,27.bmp,63.289308176100626,23.575450090861565 207 | 205,280.bmp,34.09497206703911,19.019414308681608 208 | 206,281.bmp,43.57058823529412,20.77089066603105 209 | 207,282.bmp,45.7103825136612,21.853363745559548 210 | 208,283.bmp,20.16842105263158,16.18586007219892 211 | 209,284.bmp,66.91489361702128,26.15775243189077 212 | 210,285.bmp,56.076502732240435,19.186036726857715 213 | 211,286.bmp,40.741379310344826,20.738582354792374 214 | 212,287.bmp,30.91160220994475,18.431040489613522 215 | 213,288.bmp,47.797687861271676,18.894123467355573 216 | 214,289.bmp,43.458333333333336,19.6715101955405 217 | 215,28.bmp,61.013513513513516,21.19747979067339 218 | 216,290.bmp,35.8125,18.37184375225151 219 | 217,291.bmp,30.916666666666668,24.247332507567393 220 | 218,292.bmp,43.546875,23.31680046448319 221 | 219,293.bmp,40.67231638418079,21.04526970580898 222 | 220,294.bmp,12.83435582822086,17.634234312794256 223 | 221,295.bmp,72.46913580246914,20.45178327188308 224 | 222,296.bmp,38.37931034482759,19.896984045028045 225 | 223,297.bmp,27.07182320441989,17.63709258893858 226 | 224,298.bmp,49.63681592039801,24.35513165250694 227 | 225,299.bmp,56.39664804469274,20.721377712865813 228 | 226,29.bmp,38.506024096385545,19.086643588916438 229 | 227,300.bmp,41.25966850828729,24.68566785945401 230 | 228,301.bmp,74.7877094972067,18.06448078612937 231 | 229,302.bmp,63.908496732026144,21.14186401840402 232 | 230,303.bmp,51.137724550898206,21.37894363470092 233 | 231,304.bmp,87.36559139784946,13.57285442403054 234 | 232,305.bmp,67.19886363636364,20.524833707031558 235 | 233,306.bmp,51.17326732673267,19.857569410763876 236 | 234,307.bmp,13.71938775510204,15.063211481488752 237 | 235,308.bmp,44.92934782608695,23.5235124233354 238 | 236,309.bmp,19.376404494382022,15.094388694152183 239 | 237,30.bmp,35.103658536585364,20.943575644242866 240 | 238,310.bmp,69.67142857142858,18.480572282841678 241 | 239,311.bmp,22.655844155844157,17.784247957856813 242 | 240,312.bmp,56.67777777777778,21.429801093860117 243 | 241,313.bmp,49.247191011235955,22.049907899868582 244 | 242,314.bmp,52.79213483146067,21.832090229254916 245 | 243,315.bmp,69.937106918239,16.62797914093234 246 | 244,316.bmp,49.301587301587304,22.46673776892653 247 | 245,317.bmp,34.26704545454545,24.494716498943934 248 | 246,318.bmp,35.16931216931217,19.066315123712627 249 | 247,319.bmp,76.2909090909091,17.49460480660384 250 | 248,31.bmp,53.55191256830601,22.75493628738307 251 | 249,320.bmp,77.07142857142857,17.90512970640679 252 | 250,321.bmp,44.26285714285714,20.015498100312474 253 | 251,322.bmp,53.47938144329897,21.542152620195093 254 | 252,323.bmp,65.39080459770115,21.212035149580327 255 | 253,324.bmp,77.92682926829268,17.383639786191807 256 | 254,325.bmp,52.353932584269664,20.5660987908659 257 | 255,326.bmp,46.90769230769231,18.990819602723512 258 | 256,327.bmp,58.42045454545455,19.67665569198127 259 | 257,328.bmp,82.53939393939395,16.901615224333636 260 | 258,329.bmp,6.39344262295082,12.48068224465391 261 | 259,32.bmp,68.78888888888889,18.44998944418068 262 | 260,330.bmp,67.72413793103448,18.369483147390195 263 | 261,331.bmp,67.18888888888888,18.98832010564686 264 | 262,332.bmp,80.5906432748538,16.966289245448877 265 | 263,333.bmp,51.09289617486339,20.33799842726965 266 | 264,334.bmp,58.38974358974359,22.535925441412985 267 | 265,335.bmp,43.86390532544379,23.44749324383216 268 | 266,336.bmp,47.379120879120876,23.04725396321601 269 | 267,337.bmp,11.760479041916168,14.271552959139449 270 | 268,338.bmp,86.60215053763442,14.916082222343187 271 | 269,339.bmp,62.31764705882353,22.81451409963635 272 | 270,33.bmp,51.55232558139535,23.427944661264494 273 | 271,340.bmp,27.962264150943398,16.900260949394557 274 | 272,341.bmp,8.932960893854748,10.236307799379166 275 | 273,342.bmp,67.68681318681318,18.793608345658132 276 | 274,343.bmp,66.8625,19.185780734438115 277 | 275,344.bmp,79.80225988700565,16.91692252128518 278 | 276,345.bmp,41.338541666666664,19.265436428850148 279 | 277,346.bmp,58.916666666666664,19.547447101144524 280 | 278,347.bmp,58.65217391304348,21.50966668429721 281 | 279,348.bmp,30.537634408602152,16.208812511173587 282 | 280,349.bmp,69.28333333333333,19.264195230455258 283 | 281,34.bmp,34.255434782608695,20.23023147133274 284 | 282,350.bmp,58.6551724137931,18.794716792501685 285 | 283,351.bmp,71.02857142857142,17.82366648666639 286 | 284,352.bmp,52.03061224489796,19.770454040222933 287 | 285,353.bmp,68.09944751381215,19.10180473509002 288 | 286,354.bmp,52.682464454976305,18.69169985371448 289 | 287,355.bmp,76.27624309392266,17.354824472575643 290 | 288,356.bmp,43.20245398773006,19.774081898495954 291 | 289,357.bmp,64.5524861878453,22.314264419028202 292 | 290,358.bmp,50.66847826086956,20.500136901303915 293 | 291,359.bmp,62.923529411764704,20.233840931234262 294 | 292,35.bmp,34.37988826815643,21.648645752305022 295 | 293,360.bmp,24.97093023255814,17.304776518570737 296 | 294,361.bmp,67.38150289017341,19.488640222657637 297 | 295,362.bmp,44.56204379562044,20.355611061192395 298 | 296,363.bmp,36.52147239263804,20.953015465033587 299 | 297,364.bmp,23.164893617021278,19.289503989469125 300 | 298,365.bmp,43.88268156424581,24.366874769392208 301 | 299,366.bmp,53.86631016042781,22.960889813834918 302 | 300,367.bmp,50.135416666666664,21.075342179737408 303 | 301,368.bmp,52.92168674698795,20.602733259053327 304 | 302,369.bmp,43.32919254658385,19.542830014328633 305 | 303,36.bmp,57.377245508982035,22.564663911242146 306 | 304,370.bmp,28.343949044585987,18.57200587588204 307 | 305,371.bmp,66.57803468208093,21.893531556650373 308 | 306,372.bmp,68.03208556149733,20.40474953758474 309 | 307,373.bmp,59.62814070351759,19.730825292611986 310 | 308,374.bmp,43.18716577540107,19.419595634620094 311 | 309,375.bmp,24.03012048192771,16.34084849946189 312 | 310,376.bmp,23.213483146067414,14.868517462341796 313 | 311,377.bmp,74.20571428571428,16.61508009909996 314 | 312,378.bmp,71.50625,17.29943354400358 315 | 313,379.bmp,61.31666666666667,18.144816271924594 316 | 314,37.bmp,64.61818181818182,19.907858702971357 317 | 315,380.bmp,79.11363636363636,16.71641234112277 318 | 316,381.bmp,63.76063829787234,20.69103356960769 319 | 317,382.bmp,82.11111111111111,14.572757056471856 320 | 318,383.bmp,69.70285714285714,18.620306022928858 321 | 319,384.bmp,35.037974683544306,17.88102437822402 322 | 320,385.bmp,56.61363636363637,20.66048364428055 323 | 321,386.bmp,40.72727272727273,19.395074041950828 324 | 322,387.bmp,74.01840490797547,20.804045307191565 325 | 323,388.bmp,58.37647058823529,22.224234584213683 326 | 324,389.bmp,64.44827586206897,19.936464758526498 327 | 325,38.bmp,30.150537634408604,19.251084202003533 328 | 326,390.bmp,59.00574712643678,19.413509725013657 329 | 327,391.bmp,82.77540106951872,15.006193496428624 330 | 328,392.bmp,81.84049079754601,17.76073015278651 331 | 329,393.bmp,69.7560975609756,18.654469341027404 332 | 330,394.bmp,48.54736842105263,20.671923766103394 333 | 331,395.bmp,53.833333333333336,20.82849313143239 334 | 332,396.bmp,68.14851485148515,19.325381520624347 335 | 333,397.bmp,51.66470588235294,21.34610370545659 336 | 334,398.bmp,36.895705521472394,18.114387398593617 337 | 335,399.bmp,65.921875,20.370627160601373 338 | 336,39.bmp,73.27932960893855,21.70072663821888 339 | 337,3.bmp,44.4461219097943,20.26681857206691 340 | 338,400.bmp,58.640243902439025,20.334288245910006 341 | 339,401.bmp,54.404761904761905,22.039224177320325 342 | 340,402.bmp,76.84563758389261,16.833334380962103 343 | 341,403.bmp,71.25714285714285,17.394580436774795 344 | 342,404.bmp,76.0965909090909,18.015129824605836 345 | 343,405.bmp,38.123456790123456,23.626957293687827 346 | 344,406.bmp,79.7025641025641,17.03010504746141 347 | 345,407.bmp,55.46153846153846,19.89365007004561 348 | 346,408.bmp,33.14035087719298,18.472018835962707 349 | 347,409.bmp,67.59649122807018,20.19160540404279 350 | 348,40.bmp,34.09770114942529,19.430525278638594 351 | 349,410.bmp,58.304597701149426,21.677997732156225 352 | 350,411.bmp,17.956989247311828,13.891015162245049 353 | 351,412.bmp,56.43421052631579,22.847223968815406 354 | 352,413.bmp,33.01149425287356,19.025382651540525 355 | 353,414.bmp,45.86224489795919,25.125865782990356 356 | 354,415.bmp,16.904191616766468,18.065221040445927 357 | 355,416.bmp,79.76756756756757,18.49133248858205 358 | 356,417.bmp,72.0934065934066,18.79981957241387 359 | 357,418.bmp,66.92090395480226,23.465265996129947 360 | 358,419.bmp,71.89784946236558,19.008256973958105 361 | 359,41.bmp,59.66486486486487,20.6809644703011 362 | 360,420.bmp,55.994923857868024,20.020014847883573 363 | 361,421.bmp,74.16564417177914,16.60777142335426 364 | 362,422.bmp,62.25139664804469,19.928479867671005 365 | 363,423.bmp,71.56441717791411,22.387727240284626 366 | 364,424.bmp,59.481283422459896,19.188185838252807 367 | 365,425.bmp,64.16959064327486,21.0106530222094 368 | 366,426.bmp,67.40119760479043,20.61992833014303 369 | 367,427.bmp,52.68617021276596,20.000866831823775 370 | 368,428.bmp,37.91477272727273,22.585927551832434 371 | 369,429.bmp,13.005681818181818,13.58381270234787 372 | 370,42.bmp,63.33519553072626,20.794052772847817 373 | 371,430.bmp,62.61497326203209,19.064728181056264 374 | 372,431.bmp,46.41573033707865,17.310268654068413 375 | 373,432.bmp,61.947916666666664,21.694388393858787 376 | 374,433.bmp,34.496855345911946,18.211094898174988 377 | 375,434.bmp,58.76219512195122,17.47385875921955 378 | 376,435.bmp,49.879746835443036,22.4452220937989 379 | 377,436.bmp,57.38125,19.01732803848297 380 | 378,437.bmp,65.1989247311828,21.78390675320068 381 | 379,438.bmp,57.333333333333336,21.496921804268247 382 | 380,439.bmp,18.938271604938272,15.193448799896926 383 | 381,43.bmp,18.477707006369428,15.407950822696382 384 | 382,440.bmp,60.21081081081081,20.53877276857146 385 | 383,441.bmp,65.83832335329342,19.956977028389847 386 | 384,442.bmp,71.05641025641026,19.08369783689186 387 | 385,443.bmp,17.005555555555556,17.4456849756792 388 | 386,444.bmp,83.17708333333333,15.99966936285772 389 | 387,445.bmp,47.24352331606217,20.42970090794909 390 | 388,446.bmp,13.12568306010929,15.601160369921482 391 | 389,447.bmp,25.331460674157302,17.99017996729068 392 | 390,448.bmp,61.53846153846154,19.66215443964787 393 | 391,449.bmp,41.1,15.605586164147345 394 | 392,44.bmp,72.03921568627452,20.017716611246485 395 | 393,450.bmp,63.19631901840491,19.481824038015958 396 | 394,451.bmp,69.38690476190476,18.954065367486404 397 | 395,452.bmp,62.464864864864865,17.27432462997777 398 | 396,453.bmp,38.191489361702125,20.4687196309744 399 | 397,454.bmp,46.85889570552147,24.085720707151 400 | 398,455.bmp,75.5765306122449,19.746320921956688 401 | 399,456.bmp,79.08,17.90745612655865 402 | 400,457.bmp,44.44711538461539,17.77440735597639 403 | 401,458.bmp,7.688118811881188,11.026731762355878 404 | 402,459.bmp,77.20118343195266,15.275478280119104 405 | 403,45.bmp,56.095238095238095,22.823781613234033 406 | 404,460.bmp,6.043269230769231,10.752959502051905 407 | 405,461.bmp,75.77987421383648,21.1341848578158 408 | 406,462.bmp,65.53254437869822,22.56657755740265 409 | 407,463.bmp,72.92307692307692,20.777554171053602 410 | 408,464.bmp,55.7752808988764,22.928901560188354 411 | 409,465.bmp,69.20689655172414,19.660786905869834 412 | 410,466.bmp,76.4090909090909,16.744900367326412 413 | 411,467.bmp,57.644444444444446,22.671967832280416 414 | 412,468.bmp,7.369565217391305,12.298400439156444 415 | 413,469.bmp,58.54666666666667,21.845272449929134 416 | 414,46.bmp,15.220125786163521,12.128347273186664 417 | 415,470.bmp,59.40963855421687,20.083484029860358 418 | 416,471.bmp,50.13953488372093,20.513674110538947 419 | 417,472.bmp,53.77987421383648,23.62300096659232 420 | 418,473.bmp,65.17441860465117,17.948393767724433 421 | 419,474.bmp,22.545454545454547,17.778259724928276 422 | 420,475.bmp,64.08888888888889,21.93598836047088 423 | 421,476.bmp,54.16230366492147,19.90291813917423 424 | 422,477.bmp,75.52222222222223,18.979687924024415 425 | 423,478.bmp,60.01086956521739,19.678283814513225 426 | 424,479.bmp,66.01449275362319,20.848849997590627 427 | 425,47.bmp,49.5,20.09438805083958 428 | 426,480.bmp,52.779761904761905,20.584112702551803 429 | 427,481.bmp,50.09289617486339,20.32016007078556 430 | 428,482.bmp,51.83068783068783,20.6524664966854 431 | 429,483.bmp,45.2027027027027,20.08890425778271 432 | 430,484.bmp,67.78260869565217,17.569396564827546 433 | 431,485.bmp,53.86010362694301,22.212424122681256 434 | 432,486.bmp,70.75568181818181,18.700419295250626 435 | 433,487.bmp,62.345029239766085,21.811578586620378 436 | 434,488.bmp,57.46470588235294,20.3888657739002 437 | 435,489.bmp,12.932960893854748,17.18491557169622 438 | 436,48.bmp,81.34482758620689,19.043502122547963 439 | 437,490.bmp,67.61691542288557,19.45835328176079 440 | 438,491.bmp,56.47727272727273,21.467571436392696 441 | 439,492.bmp,62.80628272251309,19.866903509517762 442 | 440,493.bmp,27.53932584269663,16.75989036260897 443 | 441,494.bmp,85.21739130434783,14.749447299888022 444 | 442,495.bmp,57.09756097560975,21.800889133721107 445 | 443,496.bmp,65.50314465408805,21.504746080170623 446 | 444,497.bmp,32.56111111111111,17.155927649274414 447 | 445,498.bmp,35.641975308641975,19.913981880447473 448 | 446,499.bmp,80.86338797814207,17.169738376896237 449 | 447,49.bmp,51.899441340782126,19.471989381484963 450 | 448,4.bmp,31.97831416179521,18.27200546542481 451 | 449,500.bmp,68.45270270270271,18.060493382439354 452 | 450,501.bmp,37.86363636363637,20.931552023915238 453 | 451,502.bmp,71.48275862068965,18.247725058282498 454 | 452,503.bmp,27.204678362573098,15.787157151474503 455 | 453,504.bmp,56.18562874251497,18.556206623956747 456 | 454,505.bmp,63.09036144578313,18.542276768168275 457 | 455,506.bmp,73.0752688172043,20.579568224472677 458 | 456,507.bmp,78.8036809815951,15.741687555896778 459 | 457,508.bmp,63.07142857142857,24.222662578215893 460 | 458,509.bmp,40.48571428571429,20.147118998528967 461 | 459,50.bmp,62.13793103448276,22.149347209834552 462 | 460,510.bmp,78.62874251497006,17.6268179504926 463 | 461,511.bmp,27.84153005464481,17.885534146678523 464 | 462,512.bmp,43.2814371257485,18.790181436169146 465 | 463,513.bmp,70.37037037037037,18.29804622122667 466 | 464,514.bmp,75.6875,16.881403546929555 467 | 465,515.bmp,76.23717948717949,17.159258642338635 468 | 466,516.bmp,52.42934782608695,20.97319151712191 469 | 467,517.bmp,38.23529411764706,17.91161219413788 470 | 468,518.bmp,59.421052631578945,20.64057742730343 471 | 469,519.bmp,62.56613756613756,19.163410292659414 472 | 470,51.bmp,82.28292682926829,17.617336359713583 473 | 471,520.bmp,59.005181347150256,21.53400832823475 474 | 472,521.bmp,52.010989010989015,19.7058448325215 475 | 473,522.bmp,64.79041916167665,19.15970682150162 476 | 474,523.bmp,17.975,14.717929960139019 477 | 475,524.bmp,48.27329192546584,23.666956811565285 478 | 476,525.bmp,78.81935483870967,14.561321728818273 479 | 477,526.bmp,67.1283422459893,18.59022170217454 480 | 478,527.bmp,71.88235294117646,21.10324676481577 481 | 479,528.bmp,70.04819277108433,19.41127670661556 482 | 480,529.bmp,50.59235668789809,24.897470629953443 483 | 481,52.bmp,69.57894736842105,18.344577601063754 484 | 482,530.bmp,14.17816091954023,15.84525548717104 485 | 483,531.bmp,59.47540983606557,23.486451477191103 486 | 484,532.bmp,27.698224852071007,16.535965396432033 487 | 485,53.bmp,42.79503105590062,19.233472779383323 488 | 486,54.bmp,20.349397590361445,15.278770167023307 489 | 487,55.bmp,83.28342245989305,15.882291926102615 490 | 488,56.bmp,10.668571428571429,11.60528445398043 491 | 489,57.bmp,61.55769230769231,18.788205079891732 492 | 490,58.bmp,7.5,11.411005883636284 493 | 491,59.bmp,44.73076923076923,26.21624983108988 494 | 492,5.bmp,62.256182744949975,21.48671408517537 495 | 493,60.bmp,74.18333333333334,18.480995825547673 496 | 494,61.bmp,37.75625,16.744001789057172 497 | 495,62.bmp,54.45945945945946,22.021474937021825 498 | 496,63.bmp,72.87005649717514,21.14694412957857 499 | 497,64.bmp,41.860759493670884,18.152737806329796 500 | 498,65.bmp,60.52284263959391,23.476339283710463 501 | 499,66.bmp,47.88333333333333,21.072440897132168 502 | 500,67.bmp,64.79896907216495,20.762056837245876 503 | 501,68.bmp,39.80473372781065,17.20024851236198 504 | 502,69.bmp,40.39204545454545,19.83272171562317 505 | 503,6.bmp,51.87195926833868,21.392789992503673 506 | 504,70.bmp,58.21608040201005,20.946746877765527 507 | 505,71.bmp,48.625698324022345,20.56722627288779 508 | 506,72.bmp,65.11111111111111,23.669567955604272 509 | 507,73.bmp,81.12578616352201,17.179522008338807 510 | 508,74.bmp,57.40268456375839,20.076852162239533 511 | 509,75.bmp,56.24590163934426,25.46468513756075 512 | 510,76.bmp,76.09467455621302,19.172538211484593 513 | 511,77.bmp,71.52147239263803,19.892717639946238 514 | 512,78.bmp,69.6470588235294,19.58942905115625 515 | 513,79.bmp,54.421875,20.366900069855927 516 | 514,7.bmp,4.937823834196891,9.35393574863743 517 | 515,80.bmp,56.426829268292686,18.652804843644535 518 | 516,81.bmp,30.145539906103288,19.49267983828497 519 | 517,82.bmp,36.153333333333336,18.484071381992283 520 | 518,83.bmp,55.25945945945946,20.25376319648453 521 | 519,84.bmp,56.10650887573964,20.029454923317772 522 | 520,85.bmp,68.60240963855422,18.52398265903937 523 | 521,86.bmp,50.1219512195122,22.067876463980568 524 | 522,87.bmp,40.87096774193548,22.058264765475506 525 | 523,88.bmp,72.31351351351351,18.494684344458946 526 | 524,89.bmp,32.10326086956522,18.486302423328638 527 | 525,8.bmp,42.412121212121214,19.043911990125803 528 | 526,90.bmp,37.048780487804876,20.297724154831467 529 | 527,91.bmp,66.61666666666666,19.688432074238403 530 | 528,92.bmp,64.82035928143712,22.210467897086023 531 | 529,93.bmp,39.8,18.76855603535269 532 | 530,94.bmp,66.0959595959596,17.81990798652185 533 | 531,95.bmp,75.32941176470588,18.223717062039157 534 | 532,96.bmp,63.075268817204304,26.326940906510853 535 | 533,97.bmp,56.355555555555554,19.33434788322384 536 | 534,98.bmp,71.76923076923077,18.998120207761904 537 | 535,99.bmp,11.725806451612904,12.857854078666564 538 | 536,9.bmp,77.90347563279184,19.302300809988783 539 | 537,533.JPG,73.62352941176471,20.401575800948603 540 | 538,534.JPG,52.70967741935484,22.79353056482369 541 | 539,535.JPG,29.502673796791445,15.697878197678456 542 | 540,536.JPG,40.39664804469274,20.1542977808296 543 | 541,537.JPG,63.14754098360656,20.495378250578845 544 | 542,538.JPG,62.53012048192771,21.07322721938768 545 | 543,539.JPG,64.11801242236025,20.475710879596942 546 | 544,540.JPG,35.55172413793103,27.102357306637362 547 | 545,541.JPG,52.55191256830601,24.739439619676318 548 | 546,542.JPG,51.90055248618785,25.076129634990334 549 | 547,543.JPG,68.05555555555556,19.4978052765817 550 | 548,544.JPG,59.98974358974359,21.437511622692835 551 | 549,545.JPG,60.75,22.29836637193727 552 | 550,546.JPG,78.53012048192771,16.893489762302885 553 | 551,547.JPG,21.23560209424084,16.523093552861926 554 | 552,548.JPG,69.47979797979798,21.12849486807928 555 | 553,549.JPG,65.42941176470588,17.997970454479976 556 | 554,550.JPG,25.906432748538013,16.610149789312302 557 | 555,551.JPG,77.28804347826087,17.9929758145996 558 | 556,552.JPG,61.09271523178808,19.191439408714672 559 | 557,553.JPG,78.80813953488372,17.18061409298268 560 | 558,554.JPG,65.06965174129353,20.18105855444926 561 | 559,555.JPG,78.67894736842105,17.521478246549435 562 | 560,556.JPG,70.20555555555555,20.613955576116464 563 | 561,557.JPG,65.26344086021506,21.167809752501682 564 | 562,558.JPG,44.41290322580645,18.14007084021384 565 | 563,559.JPG,72.85093167701864,20.221959345017805 566 | 564,560.JPG,48.69948186528497,21.490861665992018 567 | 565,561.JPG,45.417647058823526,20.4377170558401 568 | 566,562.JPG,50.25945945945946,22.020191567349038 569 | 567,563.JPG,65.88953488372093,18.343522075151835 570 | 568,564.JPG,35.27272727272727,19.721323390823308 571 | 569,565.JPG,21.80110497237569,16.12018055092669 572 | 570,566.JPG,74.33939393939394,18.40085396006716 573 | 571,567.JPG,61.166666666666664,19.08993100664613 574 | 572,568.JPG,67.97814207650273,15.638764279108035 575 | 573,569.JPG,72.10493827160494,18.4396305673949 576 | 574,570.JPG,43.11891891891892,19.770061877773642 577 | 575,571.JPG,73.48557692307692,20.469366381303185 578 | 576,572.JPG,29.582857142857144,18.783177324683617 579 | 577,573.JPG,47.324607329842934,19.474662938174944 580 | 578,574.JPG,52.73780487804878,20.422489349378967 581 | 579,575.JPG,75.18232044198895,18.061466567962736 582 | 580,576.JPG,80.36180904522612,17.2439346310322 583 | 581,577.JPG,76.11827956989248,17.780857889719258 584 | 582,578.JPG,78.10382513661202,17.801257413669305 585 | 583,579.JPG,68.42105263157895,18.7720823839638 586 | 584,580.JPG,63.58064516129032,20.941784214054042 587 | 585,581.JPG,41.68098159509202,16.929667699433423 588 | 586,582.JPG,65.55113636363636,22.902219452362157 589 | 587,583.JPG,58.395348837209305,21.04563633273013 590 | 588,584.JPG,80.9364161849711,15.54705920334436 591 | 589,585.JPG,60.01604278074866,21.317486369561482 592 | 590,586.JPG,57.01136363636363,19.378629949026298 593 | 591,587.JPG,66.69590643274854,17.584115013022224 594 | 592,588.JPG,67.52352941176471,20.519352372554806 595 | 593,589.JPG,81.47027027027028,17.023654656428498 596 | 594,590.JPG,84.81382978723404,14.257090185181994 597 | 595,591.JPG,78.13612565445027,17.20839157794584 598 | 596,592.JPG,57.854922279792746,21.409199957786136 599 | 597,593.JPG,73.06875,21.54857665045136 600 | 598,594.JPG,68.80864197530865,18.72704688681149 601 | 599,595.JPG,8.203389830508474,11.196762409007324 602 | 600,596.JPG,49.40828402366864,19.87263439086866 603 | 601,597.JPG,63.04761904761905,19.930060202777646 604 | 602,598.JPG,45.3031914893617,24.591389963728417 605 | 603,599.JPG,56.193370165745854,21.384863811713867 606 | 604,600.JPG,48.515723270440255,21.25816458071246 607 | 605,601.JPG,60.946236559139784,19.42684995755789 608 | 606,602.JPG,42.765060240963855,20.803239454818836 609 | 607,603.JPG,66.68156424581005,19.968071306380395 610 | 608,604.JPG,74.62893081761007,18.160868161900122 611 | 609,605.JPG,50.950819672131146,20.04508070109614 612 | 610,606.JPG,31.497409326424872,18.472988262238115 613 | 611,607.JPG,48.28421052631579,23.8488020210459 614 | 612,608.JPG,59.98837209302326,22.296772475663325 615 | 613,609.JPG,65.455,21.553905194783816 616 | 614,610.JPG,65.49342105263158,21.278660986642336 617 | 615,611.JPG,38.48947368421052,21.085800903013524 618 | 616,612.JPG,71.00568181818181,19.500475352181518 619 | 617,613.JPG,53.301886792452834,22.796500596702497 620 | 618,614.JPG,58.15819209039548,20.59313249128944 621 | 619,615.JPG,40.1283422459893,21.33739709943581 622 | 620,616.JPG,81.83815028901734,14.816212876458607 623 | 621,617.JPG,79.83333333333333,15.137877092318103 624 | 622,618.JPG,74.90697674418605,18.86680175202817 625 | 623,619.JPG,42.33529411764706,21.557393909318066 626 | 624,620.JPG,48.8562874251497,24.756713936403937 627 | 625,621.JPG,69.87765957446808,20.15158944252193 628 | 626,622.JPG,78.33333333333333,17.380918822481174 629 | 627,623.JPG,45.80813953488372,21.436519722309814 630 | 628,624.JPG,71.1657458563536,21.040836963748664 631 | 629,625.JPG,61.49473684210526,25.346017578176625 632 | 630,626.JPG,32.32203389830509,21.467025991754483 633 | 631,627.JPG,50.58709677419355,25.39193733798759 634 | 632,628.JPG,31.77027027027027,16.058606353656447 635 | 633,629.JPG,46.46590909090909,18.646147890887043 636 | 634,630.JPG,55.99390243902439,21.813345650798762 637 | 635,631.JPG,70.65803108808291,19.747815023098855 638 | 636,632.JPG,21.142857142857142,15.637871178522143 639 | 637,633.JPG,26.202247191011235,17.07489634864828 640 | 638,634.JPG,16.13450292397661,15.79834437207016 641 | 639,635.JPG,63.76243093922652,20.49400786820744 642 | 640,636.JPG,64.51807228915662,20.007187463529284 643 | 641,637.JPG,31.45578231292517,17.259990671483138 644 | 642,638.JPG,49.64052287581699,20.710286783919017 645 | 643,639.JPG,43.95973154362416,22.11514070893723 646 | 644,640.JPG,43.53846153846154,21.09248342758285 647 | 645,641.JPG,45.50909090909091,19.545013144090255 648 | 646,642.JPG,49.17142857142857,20.934882228578847 649 | 647,643.JPG,73.94554455445545,19.140278307330977 650 | 648,644.JPG,35.37931034482759,23.851050984131458 651 | 649,645.JPG,43.019354838709674,28.59285874146961 652 | 650,646.JPG,59.3646408839779,19.378099565927148 653 | 651,647.JPG,66.99447513812154,23.350588200435617 654 | 652,648.JPG,27.83888888888889,16.377244019173773 655 | 653,649.JPG,53.775757575757574,21.712183697557514 656 | 654,650.JPG,71.36363636363636,18.563067844002404 657 | 655,651.JPG,30.724719101123597,19.922379210914425 658 | 656,652.JPG,6.520710059171598,11.8874126892775 659 | 657,653.JPG,15.647398843930636,14.843268643011331 660 | 658,654.JPG,71.8013698630137,19.965010725841033 661 | 659,655.JPG,12.562874251497005,16.104906425248785 662 | 660,656.JPG,54.19371727748691,19.486004756549733 663 | 661,657.JPG,16.60126582278481,14.964475927634775 664 | 662,658.JPG,77.07865168539325,17.871623007053387 665 | 663,659.JPG,19.491525423728813,15.523486108323597 666 | 664,660.JPG,63.54320987654321,21.317796130667954 667 | 665,661.JPG,19.803571428571427,21.123038173036043 668 | 666,662.JPG,19.55367231638418,20.09688749547177 669 | 667,663.JPG,23.058479532163744,21.05391052186776 670 | 668,664.JPG,52.13855421686747,21.856253976769278 671 | 669,665.JPG,60.04320987654321,23.50270204247361 672 | 670,666.JPG,15.582822085889571,13.516709344098599 673 | 671,667.JPG,55.03157894736842,21.218666351270492 674 | 672,668.JPG,9.208333333333334,12.986030889182386 675 | 673,669.JPG,9.827586206896552,12.425693031743336 676 | 674,670.JPG,34.944134078212294,18.281728051970145 677 | 675,671.JPG,6.224242424242425,13.595798990304873 678 | 676,672.JPG,9.90967741935484,12.287871616203727 679 | 677,673.JPG,3.42,6.755827710480129 680 | 678,674.JPG,68.11167512690355,19.365646938930453 681 | 679,675.JPG,38.184357541899445,22.566122092606243 682 | 680,676.JPG,14.96236559139785,14.556643613575467 683 | 681,677.JPG,8.477777777777778,11.511206856898063 684 | 682,678.JPG,4.865030674846626,10.029903029591258 685 | 683,679.JPG,7.0785340314136125,11.175118506266442 686 | 684,680.JPG,10.44186046511628,18.16339397604299 687 | 685,681.JPG,18.596153846153847,16.21564773644423 688 | 686,682.JPG,40.07471264367816,19.92239694931225 689 | 687,683.JPG,37.33898305084746,18.568310770477463 690 | 688,684.JPG,27.853403141361255,15.939341478829261 691 | 689,685.JPG,60.09756097560975,18.784283834313158 692 | 690,686.JPG,29.475675675675674,18.198205319502353 693 | 691,687.JPG,49.127272727272725,25.587764710550296 694 | 692,688.JPG,66.79032258064517,20.961073649601726 695 | 693,689.JPG,12.55621301775148,12.412962688909445 696 | 694,690.JPG,77.82758620689656,16.088702991439042 697 | 695,691.JPG,37.25396825396825,16.063340304008722 698 | 696,692.JPG,25.237837837837837,17.999929820988662 699 | 697,693.JPG,43.7027027027027,20.979568765131404 700 | 698,694.JPG,9.738853503184714,13.47702056437193 701 | 699,695.JPG,27.660818713450293,17.313090505602318 702 | 700,696.JPG,30.162650602409638,19.955497219948274 703 | 701,697.JPG,48.02,24.403838564525394 704 | 702,698.JPG,4.846153846153846,8.957515648041737 705 | 703,699.JPG,77.74594594594595,17.98052380195621 706 | 704,700.JPG,5.277777777777778,7.636979969658524 707 | 705,701.JPG,17.4875,20.307692101530527 708 | 706,702.JPG,4.343915343915344,13.052807129129041 709 | 707,703.JPG,19.173684210526314,18.566806078108254 710 | 708,704.JPG,17.022988505747126,16.27969195678959 711 | 709,705.JPG,48.20454545454545,24.394219029883327 712 | 710,706.JPG,45.56896551724138,21.536992294049526 713 | 711,707.JPG,18.006060606060608,15.623035279284494 714 | 712,708.JPG,72.4438202247191,18.958166584316235 715 | 713,709.JPG,61.93292682926829,21.32803423060486 716 | 714,710.JPG,53.830601092896174,20.80993906580258 717 | 715,711.JPG,57.92613636363637,23.146988668411808 718 | 716,712.JPG,86.34693877551021,15.410021408538569 719 | 717,713.JPG,46.41340782122905,22.335969859342722 720 | 718,714.JPG,31.833333333333332,19.713049005044063 721 | 719,715.JPG,88.34078212290503,13.874682361077758 722 | 720,716.JPG,90.24468085106383,13.322560311988944 723 | 721,717.JPG,66.53703703703704,20.098867088646234 724 | 722,718.JPG,46.424083769633505,21.87195382876625 725 | 723,719.JPG,82.61290322580645,15.566942620678896 726 | 724,720.JPG,62.10880829015544,21.809869113773498 727 | 725,721.JPG,80.69938650306749,19.188195628384396 728 | 726,722.JPG,27.964102564102564,16.495352159806245 729 | 727,723.JPG,23.53846153846154,15.81063535966188 730 | 728,724.JPG,30.737142857142857,20.752497228408608 731 | 729,725.JPG,36.96296296296296,16.99354394630513 732 | 730,726.JPG,45.523809523809526,21.310634488134205 733 | 731,727.JPG,31.5,17.954441322140436 734 | 732,728.JPG,83.47976878612717,15.6601217715482 735 | 733,729.JPG,77.48663101604278,18.648231571370594 736 | 734,730.JPG,46.875,23.349426423056183 737 | 735,731.JPG,45.33519553072626,21.301723339599395 738 | 736,732.JPG,23.64673913043478,18.10353315808525 739 | 737,733.JPG,19.64406779661017,17.48482723274707 740 | 738,734.JPG,4.308139534883721,8.263450352189835 741 | 739,735.JPG,54.425925925925924,24.535132765497025 742 | 740,736.JPG,76.90395480225989,19.41024905459954 743 | 741,737.JPG,43.986486486486484,24.718272864858196 744 | 742,738.JPG,69.42328042328042,21.151698483952796 745 | 743,739.JPG,83.92121212121212,15.057203072797483 746 | 744,740.JPG,69.28651685393258,20.319258021793306 747 | 745,741.JPG,59.82911392405063,23.34133127537253 748 | 746,742.JPG,5.658682634730539,10.849891707354638 749 | 747,743.JPG,72.95212765957447,18.528857266399058 750 | 748,744.JPG,69.4,19.19813932283854 751 | 749,745.JPG,69.53374233128834,20.282961957885956 752 | 750,746.JPG,76.98445595854922,19.045580382974748 753 | 751,747.JPG,64.50279329608938,19.109787568618007 754 | 752,748.JPG,79.50292397660819,17.660648935184494 755 | 753,749.JPG,81.58522727272727,17.14154878665271 756 | 754,750.JPG,57.52542372881356,23.356687721616602 757 | 755,751.JPG,25.773809523809526,21.113239703793667 758 | 756,752.JPG,60.63013698630137,24.759536984871968 759 | 757,753.JPG,83.8452380952381,16.362286158303338 760 | 758,754.JPG,44.142857142857146,23.84031699213989 761 | 759,755.JPG,64.96195652173913,20.543002171727668 762 | 760,756.JPG,87.37106918238993,13.348566252654726 763 | 761,757.JPG,65.60326086956522,21.46646107436571 764 | 762,758.JPG,76.21787709497207,18.939955802615653 765 | 763,759.JPG,75.47093023255815,17.601643771451922 766 | 764,760.JPG,48.16766467065868,21.23978214855798 767 | 765,761.JPG,72.92105263157895,19.42531700346344 768 | 766,762.JPG,74.3975155279503,18.934848660309374 769 | 767,763.JPG,65.4795918367347,21.1307187213001 770 | 768,764.JPG,77.17837837837838,17.433275515006887 771 | 769,765.JPG,82.07344632768361,16.47071181565728 772 | 770,766.JPG,76.25628140703517,18.615721829505873 773 | 771,767.JPG,85.52409638554217,16.829520170775297 774 | 772,768.JPG,83.41573033707866,15.307255388954948 775 | 773,769.JPG,80.33333333333333,15.357209442360942 776 | 774,770.JPG,87.58201058201058,13.108381847484537 777 | 775,771.JPG,73.54010695187166,19.614078934826846 778 | 776,772.JPG,12.796407185628743,13.716377916873775 779 | 777,773.JPG,82.5632183908046,17.039702804421573 780 | 778,774.JPG,84.78061224489795,14.87651157181383 781 | 779,775.JPG,83.62130177514793,14.394890651433355 782 | 780,776.JPG,91.0780487804878,12.298722877167911 783 | 781,777.JPG,78.02439024390245,18.782236490934093 784 | 782,778.JPG,81.5521472392638,16.572010232276604 785 | 783,779.JPG,80.93406593406593,17.935305592371748 786 | 784,780.JPG,92.4319526627219,12.038002191379219 787 | 785,781.JPG,88.91758241758242,11.579598113300195 788 | 786,782.JPG,49.651162790697676,20.833097509885214 789 | 787,783.JPG,77.06593406593407,16.58415818526406 790 | 788,784.JPG,82.97590361445783,16.493139733520763 791 | 789,785.JPG,90.54970760233918,12.357024076679949 792 | 790,786.JPG,83.40972222222223,18.44341684984442 793 | 791,787.JPG,41.33125,23.1464361805794 794 | 792,788.JPG,36.815642458100555,23.659441923703717 795 | 793,789.JPG,74.51724137931035,18.466568492375497 796 | 794,790.JPG,71.68156424581005,20.710260329910074 797 | 795,791.JPG,60.13142857142857,21.594217130164637 798 | 796,792.JPG,79.39490445859873,19.324741004594586 799 | 797,793.JPG,66.1067415730337,20.634425175347605 800 | 798,794.JPG,77.43781094527363,21.336995177017748 801 | 799,795.JPG,79.61421319796955,16.410343957121967 802 | 800,796.JPG,68.54838709677419,20.062343981527427 803 | 801,797.JPG,24.51412429378531,18.771686653717943 804 | 802,798.JPG,61.05555555555556,24.1741134451153 805 | 803,799.JPG,69.61538461538461,19.050956336922006 806 | 804,800.JPG,9.551546391752577,15.417503096849375 807 | 805,801.JPG,64.28220858895706,22.95016318359747 808 | 806,802.JPG,8.722513089005236,14.20976187691096 809 | 807,803.JPG,21.49462365591398,21.898104894390016 810 | 808,804.JPG,21.19277108433735,19.035506110440387 811 | 809,805.JPG,40.707692307692305,21.057400499138684 812 | 810,806.JPG,85.97714285714285,14.688706384093024 813 | 811,807.JPG,71.49214659685863,22.405560660821784 814 | 812,808.JPG,83.36309523809524,17.760018879541025 815 | 813,809.JPG,74.68789808917198,17.60130149004311 816 | 814,810.JPG,59.93604651162791,20.35680217531333 817 | 815,811.JPG,76.80423280423281,16.866458691633454 818 | 816,812.JPG,64.61025641025641,19.945593457892475 819 | 817,813.JPG,37.02285714285714,19.854052044719154 820 | 818,814.JPG,25.47159090909091,21.027581608727345 821 | 819,815.JPG,32.45637583892618,24.13097236576367 822 | 820,816.JPG,16.026845637583893,18.123803337467557 823 | 821,817.JPG,14.305084745762711,19.10311811526411 824 | 822,818.JPG,21.770114942528735,16.1790567180936 825 | 823,819.JPG,8.275510204081632,10.981364739103643 826 | 824,820.JPG,78.33136094674556,18.334758135162136 827 | 825,821.JPG,50.97575757575758,19.70698511644783 828 | 826,822.JPG,24.936842105263157,18.150639807584554 829 | 827,823.JPG,13.028901734104046,12.635971054274929 830 | 828,824.JPG,12.89937106918239,13.70124034325779 831 | 829,825.JPG,44.46927374301676,20.63930212401129 832 | 830,826.JPG,76.1094527363184,20.1009442613775 833 | 831,827.JPG,82.47849462365592,16.864870570010737 834 | 832,828.JPG,68.98113207547169,21.410831193140996 835 | 833,829.JPG,26.605263157894736,20.26770500901614 836 | 834,830.JPG,77.35761589403974,17.238849292943314 837 | 835,831.JPG,36.23952095808383,22.156401773035547 838 | 836,832.JPG,34.946107784431135,16.748406680208777 839 | 837,833.JPG,21.722513089005236,16.26495962324778 840 | 838,834.JPG,66.66477272727273,20.683633501036386 841 | 839,835.JPG,59.17613636363637,21.89892884173982 842 | 840,836.JPG,61.68041237113402,19.332137270237006 843 | 841,837.JPG,21.7682119205298,18.20199392323426 844 | 842,838.JPG,26.238888888888887,20.76965015289422 845 | 843,839.JPG,22.707602339181285,23.66623060979989 846 | 844,840.JPG,71.32738095238095,21.506299598974074 847 | 845,841.JPG,63.48022598870057,23.52032254617083 848 | 846,842.JPG,72.78865979381443,17.77702059983551 849 | 847,843.JPG,28.70440251572327,25.570590667355756 850 | 848,844.JPG,91.43617021276596,11.813215214474543 851 | 849,845.JPG,34.78709677419355,19.406447949999208 852 | 850,846.JPG,70.65644171779141,19.96242778478961 853 | 851,847.JPG,60.784946236559136,22.150516783607497 854 | 852,848.JPG,79.5925925925926,17.155320980885552 855 | 853,849.JPG,80.98170731707317,16.472871381326115 856 | 854,850.JPG,84.08108108108108,15.997925100479874 857 | 855,851.JPG,70.60773480662984,21.149093646362616 858 | 856,852.JPG,26.642857142857142,18.338887615487526 859 | 857,853.JPG,83.67955801104972,15.793355558701277 860 | 858,854.JPG,65.00591715976331,21.431645783030454 861 | 859,855.JPG,80.07692307692308,16.371311051204454 862 | 860,856.JPG,20.834285714285713,17.834519396434718 863 | 861,857.JPG,79.08092485549133,19.480889122422898 864 | 862,858.JPG,76.66190476190476,20.32098818827037 865 | 863,859.JPG,49.205128205128204,23.22449150767158 866 | 864,860.JPG,74.9438202247191,19.1463938281564 867 | 865,861.JPG,67.91011235955057,20.63792814100791 868 | 866,862.JPG,27.134715025906736,19.767462929611764 869 | 867,863.JPG,66.2816091954023,19.95880166416587 870 | 868,864.JPG,45.92307692307692,24.556300714112155 871 | 869,865.JPG,80.21052631578948,18.79555971540831 872 | 870,866.JPG,80.6952380952381,17.049954080687883 873 | 871,867.JPG,57.97727272727273,21.120864307383556 874 | 872,868.JPG,23.30054644808743,18.549083971540757 875 | 873,869.JPG,12.044334975369457,14.144690951203494 876 | 874,870.JPG,28.775609756097563,17.977611082652775 877 | 875,871.JPG,34.41860465116279,19.545397816861993 878 | 876,872.JPG,14.216049382716049,13.979896451425429 879 | 877,873.JPG,26.29608938547486,17.927649109649227 880 | 878,874.JPG,6.524861878453039,10.6226011988974 881 | 879,875.JPG,37.12994350282486,19.206416319493307 882 | 880,876.JPG,37.4010989010989,24.022517036159954 883 | 881,877.JPG,87.84782608695652,14.67530901554817 884 | 882,878.JPG,72.25988700564972,22.544406330736262 885 | 883,879.JPG,61.5,20.012929346811337 886 | 884,880.JPG,65.08333333333333,20.272814948795443 887 | 885,881.JPG,43.27363184079602,20.942056996479142 888 | 886,882.JPG,83.54497354497354,15.75626652182372 889 | 887,883.JPG,83.46242774566474,15.742662702208449 890 | 888,884.JPG,29.473372781065088,20.275209202360724 891 | 889,885.JPG,27.88888888888889,18.22008867172917 892 | 890,886.JPG,72.92592592592592,19.171810390069638 893 | 891,887.JPG,50.294117647058826,22.014155494050975 894 | 892,888.JPG,52.10285714285714,26.013717487836015 895 | 893,889.JPG,28.175824175824175,17.40044668671157 896 | 894,890.JPG,62.943502824858754,19.734521412686348 897 | 895,891.JPG,72.04624277456648,18.551253649389135 898 | 896,892.JPG,75.06024096385542,18.005958204179954 899 | 897,893.JPG,61.1656050955414,20.11851014446135 900 | 898,894.JPG,45.46875,19.626359464991378 901 | 899,895.JPG,43.431693989071036,22.16532488673275 902 | 900,896.JPG,56.13496932515337,19.826256234208344 903 | 901,897.JPG,57.717791411042946,21.22760525169182 904 | 902,898.JPG,51.65895953757225,23.030929483297726 905 | 903,899.JPG,73.9627659574468,19.92482386904112 906 | 904,900.JPG,70.70621468926554,17.95839939653489 907 | 905,901.JPG,22.01298701298701,15.334180289049854 908 | 906,902.JPG,78.7081081081081,16.20323334578054 909 | 907,903.JPG,67.17610062893081,19.399052107599328 910 | 908,904.JPG,58.02209944751381,20.779786064855596 911 | 909,905.JPG,70.36875,18.813089874577514 912 | 910,906.JPG,63.87027027027027,19.88456995283443 913 | 911,907.JPG,13.971098265895954,14.211618136787559 914 | 912,908.JPG,73.2421052631579,18.324364784372907 915 | 913,909.JPG,77.42603550295858,16.270650878884393 916 | 914,910.JPG,61.425,19.962386112298358 917 | 915,911.JPG,70.28658536585365,18.187761422708274 918 | 916,912.JPG,38.398809523809526,22.352949913417806 919 | 917,913.JPG,75.54088050314465,19.964126732262365 920 | 918,914.JPG,67.47252747252747,19.864925451404268 921 | 919,915.JPG,77.24607329842932,18.214429582783485 922 | 920,916.JPG,78.025,19.000479967456368 923 | 921,917.JPG,70.7,20.110838124152494 924 | 922,918.JPG,73.375,18.842041847517656 925 | 923,919.JPG,69.1264367816092,21.34836545174591 926 | 924,920.JPG,75.86904761904762,17.3193185978066 927 | 925,921.JPG,74.64880952380952,19.584651169947644 928 | 926,922.JPG,81.85204081632654,16.707853087337963 929 | 927,923.JPG,75.15458937198068,20.117259789960954 930 | 928,924.JPG,10.176795580110497,11.654837465246516 931 | 929,925.JPG,47.33125,20.78482812506947 932 | 930,926.JPG,49.09947643979058,20.558403320609884 933 | 931,927.JPG,57.72192513368984,20.332919981734353 934 | 932,928.JPG,58.993975903614455,22.46680972563529 935 | 933,929.JPG,38.5632183908046,20.57957972653513 936 | 934,930.JPG,50.35087719298246,22.130222529970325 937 | 935,931.JPG,59.43195266272189,18.63711533977891 938 | 936,932.JPG,60.24590163934426,24.705511066707263 939 | 937,933.JPG,73.18032786885246,16.913329352378305 940 | 938,934.JPG,71.83246073298429,20.680354107659443 941 | 939,935.JPG,70.52147239263803,19.644789282830857 942 | 940,936.JPG,76.47126436781609,16.972156103017586 943 | 941,937.JPG,79.49438202247191,16.4822270967222 944 | 942,938.JPG,49.39204545454545,20.630898998989196 945 | 943,939.JPG,67.615,20.508039237499858 946 | 944,940.JPG,61.883720930232556,20.992295219635498 947 | 945,941.JPG,60.54716981132076,21.513227639253095 948 | 946,942.JPG,53.63742690058479,20.599760202464417 949 | 947,943.JPG,62.25257731958763,23.82542549585962 950 | 948,944.JPG,34.29192546583851,22.68662594777835 951 | 949,945.JPG,70.29545454545455,18.96442328807683 952 | 950,946.JPG,74.0251572327044,18.052647930019795 953 | 951,947.JPG,77.45061728395062,19.025661803732607 954 | 952,948.JPG,24.203821656050955,18.516014846995102 955 | 953,949.JPG,25.437125748502993,16.502480613159545 956 | 954,950.JPG,74.46236559139786,18.278659327373028 957 | 955,951.JPG,75.98255813953489,17.079269550786204 958 | 956,952.JPG,42.42261904761905,19.08992179286231 959 | 957,953.JPG,58.7979274611399,23.176307852680296 960 | 958,954.JPG,62.79558011049724,20.066477914960622 961 | 959,955.JPG,54.61842105263158,21.73773056095658 962 | 960,956.JPG,77.42774566473989,16.64337755385168 963 | 961,957.JPG,32.62777777777778,19.754083251064987 964 | 962,958.JPG,64.98837209302326,21.074079412050615 965 | 963,959.JPG,49.87301587301587,19.816304320431687 966 | 964,960.JPG,44.6875,18.49398593389944 967 | 965,961.JPG,4.807453416149069,10.61220731513378 968 | 966,962.JPG,61.926829268292686,20.29946385571716 969 | 967,963.JPG,78.84242424242424,18.895073404601064 970 | 968,964.JPG,63.64071856287425,19.264244161010183 971 | 969,965.JPG,71.81547619047619,19.317281481713238 972 | 970,966.JPG,69.37823834196891,21.41667338696023 973 | 971,967.JPG,72.84285714285714,20.16888198215548 974 | 972,968.JPG,65.03947368421052,22.495880882392665 975 | 973,969.JPG,80.55208333333333,17.755093534598423 976 | 974,970.JPG,68.91428571428571,19.93496105208124 977 | 975,971.JPG,80.10055865921788,16.888296250597463 978 | 976,972.JPG,68.85443037974683,20.152231451529474 979 | 977,973.JPG,73.30813953488372,18.800377718715676 980 | 978,974.JPG,69.61349693251533,21.815818576553895 981 | 979,975.JPG,56.69590643274854,23.24855303966792 982 | 980,976.JPG,74.22564102564102,18.990783412296118 983 | 981,977.JPG,66.94285714285714,19.815735731847482 984 | 982,978.JPG,44.954802259887,22.561990162721074 985 | 983,979.JPG,49.45180722891566,24.28880705652875 986 | 984,980.JPG,65.3641975308642,20.35301988148547 987 | 985,981.JPG,56.370558375634516,20.221975977676564 988 | 986,982.JPG,44.68,21.11899509974347 989 | 987,983.JPG,74.81920903954803,20.19735225239166 990 | 988,984.JPG,46.78362573099415,23.604460464826747 991 | 989,985.JPG,34.975903614457835,18.445645558982832 992 | 990,986.JPG,61.101190476190474,17.90013544288649 993 | 991,987.JPG,81.03664921465969,18.386600306157945 994 | 992,988.JPG,54.80346820809248,22.698647372756877 995 | 993,989.JPG,78.5126582278481,19.333063768380224 996 | 994,990.JPG,86.2127659574468,16.03763777691864 997 | 995,991.JPG,37.42512077294686,18.5938435071622 998 | 996,992.JPG,58.94708994708995,21.234940959427636 999 | 997,993.JPG,46.76923076923077,20.18181278678444 1000 | 998,994.JPG,65.46067415730337,20.443144903713897 1001 | 999,995.JPG,83.43169398907104,15.116561068682435 1002 | 1000,996.JPG,61.02030456852792,20.649148668240333 1003 | 1001,997.JPG,65.44970414201184,20.463937863669777 1004 | 1002,998.JPG,50.830601092896174,23.841019761391827 1005 | 1003,999.JPG,49.06707317073171,21.74620229098641 1006 | 1004,1000.JPG,56.51219512195122,20.473681386299482 1007 | 1005,1001.JPG,53.55747126436781,19.62847484605991 1008 | 1006,1002.JPG,40.672222222222224,24.804448725952096 1009 | 1007,1003.JPG,30.48913043478261,21.127246317129025 1010 | 1008,1004.JPG,64.32121212121213,20.016148321886366 1011 | 1009,1005.JPG,62.35028248587571,21.288372104310053 1012 | 1010,1006.JPG,66.21428571428571,21.217974614098914 1013 | 1011,1007.JPG,63.14594594594595,23.77693660784719 1014 | 1012,1008.JPG,57.84276729559748,23.310109338158373 1015 | 1013,1009.JPG,56.157303370786515,27.07006972973995 1016 | 1014,1010.JPG,46.67045454545455,20.10243572500412 1017 | 1015,1011.JPG,54.174358974358974,18.745731032219812 1018 | 1016,1012.JPG,75.87434554973822,23.69976507691628 1019 | 1017,1013.JPG,38.1,22.844895748174604 1020 | 1018,1014.JPG,58.80952380952381,25.612953398774867 1021 | 1019,1015.JPG,63.96045197740113,20.853755270585324 1022 | 1020,1016.JPG,65.63473053892216,21.74838569869596 1023 | 1021,1017.JPG,29.967914438502675,18.075975781842537 1024 | 1022,1018.JPG,42.43975903614458,21.507841782089677 1025 | 1023,1019.JPG,62.96174863387978,24.159353322666462 1026 | 1024,1020.JPG,55.148809523809526,21.27848985033494 1027 | 1025,1021.JPG,73.86702127659575,18.612237296360036 1028 | 1026,1022.JPG,51.308571428571426,23.78052631577991 1029 | 1027,1023.JPG,72.64646464646465,19.035800165620678 1030 | 1028,1024.JPG,42.770833333333336,23.10140593924929 1031 | 1029,1025.JPG,72.03048780487805,19.097882765923416 1032 | 1030,1026.JPG,72.51552795031056,20.306435430074295 1033 | 1031,1027.JPG,79.93888888888888,17.728607027581816 1034 | 1032,1028.JPG,76.16265060240964,18.225624391138936 1035 | 1033,1029.JPG,81.51612903225806,16.6152732753841 1036 | 1034,1030.JPG,72.75661375661376,20.504903958436465 1037 | 1035,1031.JPG,67.45604395604396,19.159572474677553 1038 | 1036,1032.JPG,77.5548780487805,16.165271056452937 1039 | 1037,1033.JPG,69.78804347826087,19.874570304411225 1040 | 1038,1034.JPG,75.75842696629213,19.07532087379236 1041 | 1039,1035.JPG,67.50526315789473,19.65785311481955 1042 | 1040,1036.JPG,75.58247422680412,17.49303050149617 1043 | 1041,1037.JPG,48.91525423728814,20.113842024489806 1044 | 1042,1038.JPG,21.876288659793815,16.317292859362812 1045 | 1043,1039.JPG,59.660714285714285,23.789201585206506 1046 | 1044,1040.JPG,56.142857142857146,22.004365890127364 1047 | 1045,1041.JPG,75.92771084337349,17.912435587250464 1048 | 1046,1042.JPG,25.928571428571427,18.282025019999463 1049 | 1047,1043.JPG,73.31210191082802,17.35816275253213 1050 | 1048,1044.JPG,82.3021978021978,16.789298604443434 1051 | 1049,1045.JPG,80.48351648351648,17.522747733118557 1052 | 1050,1046.JPG,61.07368421052632,22.048883692129667 1053 | 1051,1047.JPG,37.76020408163265,18.928167802732347 1054 | 1052,1048.JPG,57.8562874251497,20.81799367932827 1055 | 1053,1049.JPG,81.21875,15.31293340021342 1056 | 1054,1050.JPG,72.06024096385542,18.659208075429966 1057 | 1055,1051.JPG,57.32748538011696,20.408426866588783 1058 | 1056,1052.JPG,54.62,20.49183193078151 1059 | 1057,1053.JPG,72.48913043478261,21.423899919583143 1060 | 1058,1054.JPG,77.24752475247524,18.149136701146336 1061 | 1059,1055.JPG,64.23295454545455,20.143123443942898 1062 | 1060,1056.JPG,39.62658227848101,21.85985593778183 1063 | 1061,1057.JPG,49.44444444444444,22.81899194847096 1064 | 1062,1058.JPG,60.21656050955414,19.976370166202578 1065 | 1063,1059.JPG,55.383720930232556,20.15360232460953 1066 | 1064,1060.JPG,61.21857923497268,18.022156209894874 1067 | 1065,1061.JPG,73.0,22.766577985519294 1068 | 1066,1062.JPG,83.11640211640211,17.236070409365112 1069 | 1067,1063.JPG,66.67469879518072,20.578958617663673 1070 | 1068,1064.JPG,76.08108108108108,18.69924780278954 1071 | 1069,1065.JPG,68.56424581005587,20.268147808314595 1072 | 1070,1066.JPG,39.391959798994975,17.840967940357963 1073 | 1071,1067.JPG,49.32748538011696,21.39236543567841 1074 | 1072,1068.JPG,56.73298429319372,19.627234327915637 1075 | 1073,1069.JPG,84.04402515723271,15.756592316141342 1076 | 1074,1070.JPG,58.77070063694268,20.227828194848804 1077 | 1075,1071.JPG,66.0,20.892949466825236 1078 | 1076,1072.JPG,75.25287356321839,21.8048191050413 1079 | 1077,1073.JPG,74.74731182795699,17.781120211559042 1080 | 1078,1074.JPG,82.2046783625731,16.893409132020185 1081 | 1079,1075.JPG,66.61111111111111,19.838563227645945 1082 | 1080,1076.JPG,70.63186813186813,19.91422700922082 1083 | 1081,1077.JPG,71.27544910179641,22.145103261029764 1084 | 1082,1078.JPG,61.93296089385475,19.765648832459945 1085 | 1083,1079.JPG,26.661016949152543,17.649951876213873 1086 | 1084,1080.JPG,59.63636363636363,22.25766267315237 1087 | 1085,1081.JPG,52.194736842105264,20.664080065583278 1088 | 1086,1082.JPG,65.28426395939087,19.118865725346986 1089 | 1087,1083.JPG,70.26704545454545,19.21672617630446 1090 | 1088,1084.JPG,78.18562874251496,17.277544014197805 1091 | 1089,1085.JPG,83.375,15.452402064395956 1092 | 1090,1086.JPG,82.31288343558282,15.794795780042309 1093 | 1091,1087.JPG,72.83333333333333,18.191255803530918 1094 | 1092,1088.JPG,49.95679012345679,20.60704479596138 1095 | 1093,1089.JPG,80.18343195266272,15.568457429142445 1096 | 1094,1090.JPG,53.878787878787875,18.8933811778202 1097 | 1095,1091.JPG,73.06217616580311,18.506087664774093 1098 | 1096,1092.JPG,77.90285714285714,18.898906580824697 1099 | 1097,1093.JPG,59.98360655737705,20.376261616663786 1100 | 1098,1094.JPG,56.86805555555556,24.663397337363488 1101 | 1099,1095.JPG,76.26424870466322,18.740486936162853 1102 | 1100,1096.JPG,86.06010928961749,13.935159658693006 1103 | 1101,1097.JPG,82.51336898395722,16.543315022154978 1104 | 1102,1098.JPG,10.760736196319018,12.674867888774202 1105 | 1103,1099.JPG,74.615,19.528018139645184 1106 | 1104,1100.JPG,76.28571428571429,19.14008508713733 1107 | 1105,1101.JPG,78.79617834394904,18.15804506784632 1108 | 1106,1102.JPG,64.36945812807882,23.78735110257548 1109 | 1107,1103.JPG,57.70108695652174,21.390534623745733 1110 | 1108,1104.JPG,50.81410256410256,21.80701410901326 1111 | 1109,1105.JPG,75.63440860215054,21.33809431762399 1112 | 1110,1106.JPG,66.52331606217616,19.072797092887544 1113 | 1111,1107.JPG,78.41573033707866,16.213134794328777 1114 | 1112,1108.JPG,73.27932960893855,18.13340599642541 1115 | 1113,1109.JPG,69.58857142857143,20.398667009477528 1116 | 1114,1110.JPG,50.0,21.172145386326353 1117 | 1115,1111.JPG,80.63225806451612,16.98854440415456 1118 | 1116,1112.JPG,43.593333333333334,24.777966144620947 1119 | 1117,1113.JPG,31.640883977900554,17.381928268329517 1120 | 1118,1114.JPG,76.96296296296296,18.46497066006011 1121 | 1119,1115.JPG,64.00537634408602,19.993376555094827 1122 | 1120,1116.JPG,49.98918918918919,21.45191518489392 1123 | 1121,1117.JPG,52.42857142857143,22.550990049091215 1124 | 1122,1118.JPG,53.645348837209305,22.86131161774076 1125 | 1123,1119.JPG,57.430939226519335,21.59042827277498 1126 | 1124,1120.JPG,74.78021978021978,18.653456242361194 1127 | 1125,1121.JPG,76.96703296703296,17.60772717705067 1128 | 1126,1122.JPG,55.01204819277108,23.949185587926276 1129 | 1127,1123.JPG,49.617283950617285,22.788832342482532 1130 | 1128,1124.JPG,77.2,17.48078309967198 1131 | 1129,1125.JPG,61.60621761658031,20.11815004268276 1132 | 1130,1126.JPG,72.09316770186335,18.04813052723052 1133 | 1131,1127.JPG,80.62941176470588,15.798219684953807 1134 | 1132,1128.JPG,70.07361963190183,19.640773375625994 1135 | 1133,1129.JPG,55.913513513513514,23.373923622766338 1136 | 1134,1130.JPG,46.03921568627451,25.78960478895088 1137 | 1135,1131.JPG,72.54651162790698,21.096357589630763 1138 | 1136,1132.JPG,50.4180790960452,20.85216662861068 1139 | 1137,1133.JPG,64.32967032967034,19.907829018863612 1140 | 1138,1134.JPG,65.90058479532163,20.985612710437437 1141 | 1139,1135.JPG,40.389830508474574,18.42060692893153 1142 | 1140,1136.JPG,66.41142857142857,19.552225546201228 1143 | 1141,1137.JPG,77.01714285714286,18.348000700917083 1144 | 1142,1138.JPG,67.60479041916167,20.513574137667824 1145 | 1143,1139.JPG,44.17204301075269,20.434270977897402 1146 | 1144,1140.JPG,51.902061855670105,19.79783041120851 1147 | 1145,1141.JPG,70.67415730337079,18.59668598196568 1148 | 1146,1142.JPG,71.7639751552795,17.853051394632207 1149 | 1147,1143.JPG,61.105555555555554,20.486675659508602 1150 | 1148,1144.JPG,71.27210884353741,17.822779722287088 1151 | 1149,1145.JPG,47.21025641025641,19.62593217789771 1152 | 1150,1146.JPG,73.20481927710843,19.151237976374976 1153 | 1151,1147.JPG,75.83516483516483,16.414024280066176 1154 | 1152,1148.JPG,48.45303867403315,19.226262540356572 1155 | 1153,1149.JPG,72.12727272727273,18.636171109275388 1156 | 1154,1150.JPG,68.19680851063829,22.03809512681962 1157 | 1155,1151.JPG,58.64321608040201,21.47651622588143 1158 | 1156,1152.JPG,61.851851851851855,19.40904003444429 1159 | 1157,1153.JPG,71.12631578947368,17.64808373083742 1160 | 1158,1154.JPG,28.09090909090909,18.433191020392957 1161 | 1159,1155.JPG,74.36263736263736,20.648340258646172 1162 | 1160,1156.JPG,28.810650887573964,19.122092189215888 1163 | 1161,1157.JPG,51.86046511627907,21.398564082938865 1164 | 1162,1158.JPG,48.14525139664804,20.913441656762927 1165 | 1163,1159.JPG,47.83625730994152,24.360939710054947 1166 | 1164,1160.bmp,21.6858593363051,15.114776667257729 1167 | 1165,1161.bmp,11.9002849002849,12.982504853747923 1168 | 1166,1162.bmp,62.66171187910318,20.50233596139387 1169 | 1167,1163.bmp,87.06559248921593,14.628812950455913 1170 | 1168,1164.bmp,72.75387092778396,19.194236743724115 1171 | -------------------------------------------------------------------------------- /Evaluation/zeroshot_hl_model.py: -------------------------------------------------------------------------------- 1 | # High level model evaluation method: Measuring the cosine similarity between the model's features of test image and 2 | # text prompt features. 3 | 4 | from torch.utils.data import DataLoader 5 | from Evaluation.dataloader import * 6 | import torch.nn.functional as F 7 | import torch.utils.data 8 | from tqdm import tqdm 9 | from PIL import Image 10 | import clip 11 | 12 | 13 | def compute_hlm_scores(model, test_dataset, img_dir, data_loc): 14 | 15 | text_prompt = clip.tokenize(["A good photo.", "A bad photo."]).to("cuda") 16 | local_encoder = model.image_encoder 17 | text_features = model.clip_model.encode_text(text_prompt).unsqueeze(0).detach() 18 | normalizer = transforms.Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711)) # Using the same dataloader for LL and HL model. Hence, as only HL model requires normalization, we do it here. 19 | 20 | with torch.no_grad(): 21 | # print("Computing the scores for the high-level model") 22 | names = [] 23 | moss = [] 24 | scores = [] 25 | 26 | if test_dataset == 'CLIVE': 27 | dataset = TestDataset(img_dir, data_loc, clive = True) 28 | else: 29 | dataset = TestDataset(img_dir, data_loc) 30 | loader = DataLoader(dataset, batch_size= 1, shuffle=False) 31 | 32 | for batch, (img, mos, img_name) in enumerate(tqdm(loader)): 33 | input = normalizer(img) 34 | image_features = local_encoder(input.to("cuda")).unsqueeze(1) 35 | score = F.cosine_similarity(image_features, text_features, dim=-1) 36 | difference = 10.0 * (score[:, 1] - score[:, 0]) 37 | scaled_score = 1 / (1 + torch.exp(difference)) 38 | 39 | if scaled_score.shape == torch.Size([]): 40 | scores.append(scaled_score.item()) 41 | else: 42 | scores.extend(scaled_score.tolist()) 43 | 44 | moss.extend(mos.tolist()) 45 | names.extend(list(img_name)) 46 | 47 | return names, scores, moss 48 | 49 | 50 | def compute_hlm_score_single_image(model, test_image_path): 51 | 52 | text_prompts = clip.tokenize(["A good photo.", "A bad photo."]).to("cuda") 53 | local_encoder = model.image_encoder 54 | text_features = model.clip_model.encode_text(text_prompts).unsqueeze(0).detach() 55 | 56 | with torch.no_grad(): 57 | # print("Computing the high-level model score for a single image") 58 | 59 | scores = [] 60 | 61 | normalizer = transforms.Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711)) 62 | x = Image.open(test_image_path) 63 | transform = transforms.ToTensor() 64 | x = transform(x) 65 | if x.shape[0] <3: 66 | x = torch.cat([x]*3, dim=0) 67 | x = normalizer(x) 68 | x = x.unsqueeze(0) 69 | 70 | image_features = local_encoder(x.to("cuda")).unsqueeze(1) 71 | score = F.cosine_similarity(image_features, text_features, dim=-1) 72 | difference = 10.0 * (score[:, 1] - score[:, 0]) 73 | scaled_score = 1 / (1 + torch.exp(difference)) 74 | 75 | if scaled_score.shape == torch.Size([]): 76 | scores.append(scaled_score.item()) 77 | else: 78 | scores.extend(scaled_score.tolist()) 79 | 80 | return scores 81 | -------------------------------------------------------------------------------- /Evaluation/zeroshot_ll_model.py: -------------------------------------------------------------------------------- 1 | # Low level evaluation method: Measuring the NIQE distance between the model's features of test image and pristine 2 | # patches. 3 | 4 | from Evaluation.compute_statistical_deviation import NIQE 5 | from torch.utils.data import DataLoader 6 | from Evaluation.dataloader import * 7 | import torch.nn.functional as F 8 | from PIL import Image 9 | from tqdm import tqdm 10 | import numpy as np 11 | import torch 12 | import h5py 13 | import os 14 | 15 | 16 | def compute_niqe_distance(model, test_dataset, img_dir, data_loc, config): 17 | with torch.no_grad(): 18 | ps = config.patch_size 19 | 20 | print("Computing the scores for the low-level model") 21 | 22 | scores = [] 23 | moss = [] 24 | names = [] 25 | 26 | first_patches = pristine(config) 27 | all_ref_feats = model_features(model, first_patches) 28 | 29 | niqe_model = NIQE(all_ref_feats).to(config.device) 30 | 31 | if test_dataset == 'CLIVE': 32 | dataset = TestDataset(img_dir, data_loc, clive = True) 33 | else: 34 | dataset = TestDataset(img_dir, data_loc) 35 | loader = DataLoader(dataset, batch_size= 1, shuffle=False) 36 | 37 | for batch, (x, y, name) in enumerate(tqdm(loader)): #x= read img, y= mos, name= img_name 38 | 39 | x = x.to(config.device) 40 | x = x.unfold(-3, x.size(-3), x.size(-3)).unfold(-3, ps, int(ps/2)).unfold(-3, ps, int(ps/2)).squeeze(1) 41 | x = x.contiguous().view(x.size(0), x.size(1)*x.size(2), x.size(3), x.size(4), x.size(5)) 42 | patches = x.view(-1, x.size(-3), x.size(-2), x.size(-1)) 43 | 44 | all_rest_feats = model_features(model, patches) 45 | all_rest_feats = all_rest_feats.view(x.size(0), x.size(1), -1) 46 | 47 | score = niqe_model(all_rest_feats) 48 | scaled_score = 1.0 - (1 / (1 + torch.exp(-score / 100.0))) 49 | if scaled_score.shape == torch.Size([]): 50 | scores.append(scaled_score.item()) 51 | else: 52 | scores.extend(scaled_score.cpu().detach().tolist()) 53 | moss.extend(y.tolist()) 54 | names.extend(list(name)) 55 | 56 | torch.cuda.empty_cache() 57 | 58 | return names, scores, moss 59 | 60 | 61 | def compute_niqe_distance_single_image(model, test_image_path, config, tensor_return = False): 62 | with torch.no_grad(): 63 | ps = config.patch_size 64 | 65 | # print("Computing the low-level model's score for a single image") 66 | 67 | first_patches = pristine(config) 68 | all_ref_feats = model_features(model, first_patches) 69 | 70 | niqe_model = NIQE(all_ref_feats).to(config.device) 71 | 72 | scores = [] 73 | transform = transforms.ToTensor() 74 | x = Image.open(test_image_path) 75 | x = transform(x) 76 | 77 | x = x.to(config.device) 78 | x = x.unfold(-3, x.size(-3), x.size(-3)).unfold(-3, ps, int(ps/2)).unfold(-3, ps, int(ps/2)).squeeze(1) 79 | x = x.contiguous().view(x.size(0), x.size(1)*x.size(2), x.size(3), x.size(4), x.size(5)) 80 | patches = x.view(-1, x.size(-3), x.size(-2), x.size(-1)) 81 | 82 | all_rest_feats = model_features(model, patches) 83 | all_rest_feats = all_rest_feats.view(x.size(0), x.size(1), -1) 84 | 85 | score = niqe_model(all_rest_feats) 86 | scaled_score = 1.0 - (1 / (1 + torch.exp(-score / 100.0))) 87 | if scaled_score.shape == torch.Size([]): 88 | scores.append(scaled_score.item()) 89 | else: 90 | scores.extend(scaled_score.cpu().detach().tolist()) 91 | 92 | torch.cuda.empty_cache() 93 | 94 | if tensor_return: 95 | return scaled_score 96 | else: 97 | return scores 98 | 99 | 100 | def model_features(model, frames): 101 | try: 102 | main_output = model(frames).squeeze() 103 | except: 104 | main_output = model(frames) 105 | return main_output 106 | 107 | 108 | def cov(tensor, rowvar=False, bias=False): 109 | """Estimate a covariance matrix (np.cov)""" 110 | tensor = tensor if rowvar else tensor.transpose(-1, -2) 111 | tensor = tensor - tensor.mean(dim=-1, keepdim=True) 112 | factor = 1 / (tensor.shape[-1] - int(not bool(bias))) 113 | return factor * tensor @ tensor.transpose(-1, -2).conj() 114 | 115 | 116 | def gaussian_filter(kernel_size: int, sigma: float) -> torch.Tensor: 117 | """Returns 2D Gaussian kernel N(0,`sigma`^2)""" 118 | coords = torch.arange(kernel_size).to(dtype=torch.float32) 119 | coords -= (kernel_size - 1) / 2. 120 | g = coords ** 2 121 | g = (- (g.unsqueeze(0) + g.unsqueeze(1)) / (2 * sigma ** 2)).exp() 122 | g /= g.sum() 123 | return g.unsqueeze(0) 124 | 125 | 126 | def select_patches(all_patches, config): 127 | p = config.sharpness_param 128 | 129 | selected_patches = torch.empty(1, all_patches.size( 130 | 1), all_patches.size(2), all_patches.size(3)) 131 | selected_patches = selected_patches.to(config.device) 132 | 133 | kernel_size = 7 134 | kernel_sigma = float(7 / 6) 135 | deltas = [] 136 | 137 | for ix in range(all_patches.size(0)): 138 | rest = all_patches[ix, :, :, :] 139 | rest = rest.unsqueeze(dim=0) 140 | rest = transforms.Grayscale()(rest) 141 | kernel = gaussian_filter(kernel_size=kernel_size, sigma=kernel_sigma).view( 142 | 1, 1, kernel_size, kernel_size).to(rest) 143 | C = 1 144 | mu = F.conv2d(rest, kernel, padding=kernel_size // 2) 145 | mu_sq = mu ** 2 146 | std = F.conv2d(rest ** 2, kernel, padding=kernel_size // 2) 147 | std = ((std - mu_sq).abs().sqrt()) 148 | delta = torch.sum(std) 149 | deltas.append([delta]) 150 | 151 | peak_sharpness = max(deltas)[0].item() 152 | 153 | for ix in range(all_patches.size(0)): 154 | tempdelta = deltas[ix][0].item() 155 | if tempdelta > p*peak_sharpness: 156 | selected_patches = torch.cat( 157 | (selected_patches, all_patches[ix, :, :, :].unsqueeze(dim=0))) 158 | selected_patches = selected_patches[1:, :, :, :] 159 | return selected_patches 160 | 161 | 162 | def select_colorful_patches(all_patches, config): 163 | pc = config.colorfulness_param 164 | 165 | selected_patches = torch.empty(1, all_patches.size( 166 | 1), all_patches.size(2), all_patches.size(3)) 167 | selected_patches = selected_patches.to(config.device) 168 | deltas = [] 169 | 170 | for ix in range(all_patches.size(0)): 171 | rest = all_patches[ix, :, :, :] 172 | R = rest[0, :, :] 173 | G = rest[1, :, :] 174 | B = rest[2, :, :] 175 | rg = torch.abs(R - G) 176 | yb = torch.abs(0.5 * (R + G) - B) 177 | rbMean = torch.mean(rg) 178 | rbStd = torch.std(rg) 179 | ybMean = torch.mean(yb) 180 | ybStd = torch.std(yb) 181 | stdRoot = torch.sqrt((rbStd ** 2) + (ybStd ** 2)) 182 | meanRoot = torch.sqrt((rbMean ** 2) + (ybMean ** 2)) 183 | 184 | delta = stdRoot + meanRoot 185 | deltas.append([delta]) 186 | 187 | peak_sharpness = max(deltas)[0].item() 188 | 189 | for ix in range(all_patches.size(0)): 190 | tempdelta = deltas[ix][0].item() 191 | if tempdelta > pc*peak_sharpness: 192 | selected_patches = torch.cat( 193 | (selected_patches, all_patches[ix, :, :, :].unsqueeze(dim=0))) 194 | selected_patches = selected_patches[1:, :, :, :] 195 | return selected_patches 196 | 197 | 198 | def pristine(config): 199 | pristine_img_dir = config.pristine_img_dir 200 | ps = config.patch_size 201 | 202 | toten = transforms.ToTensor() 203 | refs = os.listdir(pristine_img_dir) 204 | 205 | if not os.path.isfile('pristine_patches_%03d_%0.2f_%0.2f.hdf5' % (config.patch_size, config.sharpness_param, config.colorfulness_param)): 206 | print('Selecting and saving pristine patches for NIQE distance evaluation (first time evaluation)') 207 | temp = np.array(Image.open(pristine_img_dir + refs[0])) 208 | toten = transforms.ToTensor() 209 | temp = toten(temp) 210 | batch = temp.to(config.device) 211 | batch = batch.unsqueeze(dim=0) 212 | patches = batch.unfold(1, 3, 3).unfold(2, ps, ps).unfold(3, ps, ps) 213 | 214 | patches = patches.contiguous().view(1, -1, 3, ps, ps) 215 | 216 | for ix in range(patches.size(0)): 217 | patches[ix, :, :, :, :] = patches[ix, torch.randperm( 218 | patches.size()[1]), :, :, :] 219 | first_patches = patches.squeeze() 220 | first_patches = select_colorful_patches(select_patches(first_patches, config), config) 221 | 222 | refs = refs[1:] 223 | for irx, rs in enumerate(tqdm(refs)): 224 | temp = np.array(Image.open(pristine_img_dir + rs)) 225 | toten = transforms.ToTensor() 226 | temp = toten(temp) 227 | batch = temp.to(config.device) 228 | batch = batch.unsqueeze(dim=0) 229 | patches = batch.unfold(1, 3, 3).unfold(2, ps, ps).unfold(3, ps, ps) 230 | patches = patches.contiguous().view(1, -1, 3, ps, ps) 231 | 232 | for ix in range(patches.size(0)): 233 | patches[ix, :, :, :, :] = patches[ix, torch.randperm( 234 | patches.size()[1]), :, :, :] 235 | second_patches = patches.squeeze() 236 | second_patches = select_colorful_patches(select_patches(second_patches, config), config) 237 | first_patches = torch.cat((first_patches, second_patches)) 238 | 239 | with h5py.File('pristine_patches_%03d_%0.2f_%0.2f.hdf5' % (config.patch_size, config.sharpness_param, config.colorfulness_param), 'w') as f: 240 | dset = f.create_dataset('data', data = np.array(first_patches.detach().cpu(), dtype=np.float32)) 241 | else: 242 | # print('Using pre-selected pristine patches') 243 | with h5py.File('pristine_patches_%03d_%0.2f_%0.2f.hdf5' % (config.patch_size, config.sharpness_param, config.colorfulness_param), 'r') as f: 244 | first_patches = torch.tensor(f['data'][:], device=config.device) 245 | 246 | return first_patches 247 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Suhas Srinath, Shankhanil Mitra, Shika Rao, Rajiv Soundararajan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | #

GRepQ

2 | Official repository for our WACV 24 [Oral] paper titled [Learning Generalizable Perceptual Representations for Data-Efficient No-Reference Image Quality Assessment](https://openaccess.thecvf.com/content/WACV2024/html/Srinath_Learning_Generalizable_Perceptual_Representations_for_Data-Efficient_No-Reference_Image_Quality_Assessment_WACV_2024_paper.html) 3 | 4 | ![Framework](./assets/framework.png) 5 | 6 | ## Environment ## 7 | 8 | The code has been implemented with: 9 | 10 | * Python **3.8.18** 11 | * Pytorch **2.0.1** 12 | * Torchvision **0.15.2** 13 | * Cuda **11.8** 14 | * Cudnn **8.7.0** 15 | 16 | ## Getting Started ## 17 | 18 | ## Training ## 19 | 20 | #### Data Preparation #### 21 | 22 | For the low level model, the data is expected to be arranged as 23 | directories. Each directory corresponds to an image and within each 24 | folder, multiple distorted versions of the image are present. For 25 | example, the data is expected to be arranged in the following 26 | format 27 | ``` 28 | - datatset 29 | - image1 30 | - image1_distortion1.png 31 | - image1_distortion2.png 32 | ... 33 | - image1_distortionK.png 34 | - image2 35 | - image2_distortion1.png 36 | - image2_distortion2.png 37 | ... 38 | - image2_distortionK.png 39 | ``` 40 | 41 | The high level model also expects the dataset to be arranged in a 42 | similar manner. The high level model only uses the reference images, 43 | and doesn't require distorted versions. 44 | 45 | Create a directory `Results` and within it two subdirectories 46 | `LLM` and `HLM` for saving the trained models. 47 | The high and low level models can be trained using the command 48 | ``` 49 | python train.py 50 | ``` 51 | 52 | For training either model, the parameters in `configs.py` can be 53 | modified. 54 | 55 | ## Evaluation ## 56 | 57 | ![Zeroshot](./assets/zeroshot.png) 58 | 59 | Install all the required packages and dependencies. 60 | 61 | **NOTE:** This codebase requires CLIP to be installed. 62 | To install CLIP, please follow the instructions given 63 | in [CLIP](https://github.com/openai/CLIP). 64 | 65 | Once CLIP is installed, the positional embedding needs to be disabled in 66 | the forward pass of the image encoder during inference to test with images of all resolutions. This can be done by commenting the 67 | positional embedding step in the forward pass of the image encoder in 68 | CLIP's `model.py`. 69 | ``` 70 | # x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC 71 | ``` 72 | 73 | ### Pretrained Weights ### 74 | 75 | The pretrained weights and pristine patches are available 76 | at this 77 | [Google drive](https://drive.google.com/drive/folders/1wLpdN6TNezur_0_NF7XgmBMyWcSxH5eX?usp=drive_link) link. 78 | 79 | 1. Create a directory named `pretrained_weights` inside the `Evaluation` folder 80 | and copy the low and high level model weights into this. 81 | 2. Copy the pristine 82 | image patches file `pristine_patches_096_0.75_0.80.hdf5` into the main working 83 | directory. 84 | 85 | ### Testing on a Single Image (Zero Shot) ### 86 | 87 | In order to obtain the zero shot individual high and low level quality scores, run 88 | the following 89 | ``` 90 | python test_zeroshot.py --eval_type zeroshot_single_img --test_img_path PATH_TO_IMAGE 91 | ``` 92 | ### Testing on a Dataset (Zero Shot) ### 93 | 94 | To test on entire datasets, for example on CLIVE or KonIQ-10k, run the following 95 | commands: 96 | ``` 97 | python test_zeroshot.py --eval_type zeroshot --dataset 'CLIVE' 98 | python test_zeroshot.py --eval_type zeroshot --dataset 'KONIQ' 99 | ``` 100 | 101 | ### Data Efficient Evaluation ### 102 | 103 | For the data efficient setting, the features can be computed using 104 | `get_concatenated_features.py`, which will be saved in the 105 | `Evaluation` folder. An SVR (ridge regression) can be 106 | trained using these features. The ridge regression parameter `alpha` 107 | can be tuned to obtain optimal performance. For our experiments, 108 | `alpha` was chosen from the set `{0.1, 1, 10, 100}`. 109 | 110 | ## Citation 111 | If you find this work useful for your research, please cite our paper: 112 | ``` 113 | @InProceedings{Srinath_2024_WACV, 114 | author = {Srinath, Suhas and Mitra, Shankhanil and Rao, Shika and Soundararajan, Rajiv}, 115 | title = {Learning Generalizable Perceptual Representations for Data-Efficient No-Reference Image Quality Assessment}, 116 | booktitle = {Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)}, 117 | month = {January}, 118 | year = {2024}, 119 | pages = {22-31} 120 | } 121 | ``` 122 | 123 | ## License ## 124 | 125 | MIT License 126 | 127 | Copyright (c) 2023 Suhas Srinath, Shankhanil Mitra, Shika Rao, Rajiv Soundararajan 128 | 129 | Permission is hereby granted, free of charge, to any person obtaining a copy 130 | of this software and associated documentation files (the "Software"), to deal 131 | in the Software without restriction, including without limitation the rights 132 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 133 | copies of the Software, and to permit persons to whom the Software is 134 | furnished to do so, subject to the following conditions: 135 | 136 | The above copyright notice and this permission notice shall be included in all 137 | copies or substantial portions of the Software. 138 | 139 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 140 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 141 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 142 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 143 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 144 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 145 | SOFTWARE. 146 | 147 | -------------------------------------------------------------------------------- /assets/framework.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suhas-srinath/GRepQ/61dd615f7f901603b21291c548e2855245e4b036/assets/framework.png -------------------------------------------------------------------------------- /assets/zeroshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suhas-srinath/GRepQ/61dd615f7f901603b21291c548e2855245e4b036/assets/zeroshot.png -------------------------------------------------------------------------------- /configs.py: -------------------------------------------------------------------------------- 1 | # Configuration file for GRepQ. 2 | 3 | exp_config = { 4 | 5 | 'run_type': 'll_model_train', # 'll_model_train' or 'hl_model_train' 6 | 'database_path': str(r"../Databases"), 7 | 8 | # Training parameters 9 | 'datasets': { 10 | # Train datasets 11 | 'LIVE_FB_synthetic': {'train': True}, 12 | }, 13 | 14 | 'model': None, # Model being trained and tested 15 | 'resume_training': False, # Resume training from existing checkpoint 16 | 'resume_path': str(r"./checkpoint.tar"), # Last checkpoint path if resuming training 17 | 18 | 'epochs': 15, 19 | 'lr_update': 20, # Update learning rate after specified no. of epochs 20 | 'test_epoch': 3, # Validate after these many epochs of training 21 | 'lr_decay': 1.0, 22 | 23 | # Low Level Model arguments 24 | 'batch_size_qacl': 8, # 9 frames in 1 batch 25 | 'lr_llm': 1e-4, 26 | 'pristine_img_dir': str(r"../Databases/Pristine"), 27 | 'patch_size': 96, 28 | 'device': "cuda", 29 | 'sharpness_param': 0.75, 30 | 'colorfulness_param': 0.8, 31 | 'results_path_llm': str(r"./Results/LLM"), 32 | 33 | # High Level Model arguments 34 | 'crop': 'center', 35 | 'crop_size': (224, 224), 36 | 'batch_size_gcl': 128, 37 | 'tau': 32, # temperature parameter 38 | 'lr_hlm': 1e-6, 39 | 'results_path_hlm': str(r"./Results/HLM"), 40 | } -------------------------------------------------------------------------------- /data_read_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | A subset of the LIVE FB dataset is used for training. 3 | LIVE_FB_synthetic has 5k images, 4 distortion types of 2 levels each (8 distortions per image) 4 | 5 | Directory structure for synthetic LIVE_FB: 6 | - image_xyz/image_xyz_distortion_level.bmp 7 | """ 8 | 9 | 10 | from pathlib import Path 11 | import pandas as pd 12 | import os 13 | 14 | def get_dataset_list(base_dataset_path: Path, dataset: str): 15 | """ 16 | Get names, paths, scores and frames for synthetic video datasets. 17 | :param base_dataset_path: Path to the base dataset folder. 18 | :param dataset: Name of the dataset. 19 | :return: A dictionary containing image names and paths. 20 | 21 | Paths indicates- path to the folder containing distorted versions of the same image. (See above for directory structure) 22 | """ 23 | 24 | if dataset == 'LIVE_FB_synthetic': 25 | names = [] 26 | paths = [] 27 | 28 | curr_path = os.path.join(base_dataset_path, 'LIVE_FB_synthetic') 29 | images_list = os.path.join(curr_path, r'LIVEFB.csv') 30 | loaded_data = pd.read_csv(images_list) 31 | names_list = list(loaded_data['im_loc']) 32 | 33 | for curr_name in names_list: 34 | folder_name = curr_name.split('/')[-1].split('.')[0] 35 | folder_name = folder_name + '.bmp' 36 | folder_path = os.path.join(curr_path, folder_name) 37 | if os.path.exists(folder_path): 38 | names.append(folder_name) 39 | paths.append(folder_path) 40 | 41 | return {'names': names, 'image_paths': paths} -------------------------------------------------------------------------------- /dataloader_contrastive.py: -------------------------------------------------------------------------------- 1 | from torchvision import transforms 2 | from data_read_utils import * 3 | from skimage.io import imread 4 | import torch.utils.data 5 | from PIL import Image 6 | from piq import fsim 7 | import logging 8 | import numpy 9 | 10 | logging.getLogger('PIL').setLevel(logging.WARNING) 11 | 12 | 13 | # Read training data 14 | def get_train_dataset(base_dataset_path: Path, train_datasets): 15 | train_data = {'images': None} 16 | 17 | for curr_set in train_datasets: 18 | curr_list = get_dataset_list(base_dataset_path, curr_set) 19 | train_data['images'] = curr_list['image_paths'] 20 | 21 | return train_data 22 | 23 | 24 | # Dataloader for the low level model 25 | class FrameLoaderLLModel(torch.utils.data.Dataset): 26 | def __init__(self, learning_data: dict): 27 | 28 | self.learning_data = learning_data 29 | self.transform = transforms.ToTensor() 30 | self.flip = transforms.Compose([transforms.RandomHorizontalFlip(p=0.5), transforms.RandomVerticalFlip(p=0.5)]) 31 | 32 | def __len__(self): 33 | return len(self.learning_data['images']) 34 | 35 | def __getitem__(self, idx): 36 | annotator_matrix = None # Initially setting to None 37 | image_dir = self.learning_data['images'][idx] # Directory of all distorted versions of the same image 38 | all_dists = sorted(os.listdir(image_dir)) 39 | all_distortions = [os.path.join(image_dir, dist) for dist in all_dists] # Full path to each image 40 | 41 | images = [] 42 | torch_permuted_images = [] 43 | aligned_param = len(all_distortions) # number of spatial fragmentation 44 | 45 | for dist in all_distortions: 46 | curr_image = imread(dist) / 255.0 47 | curr_image_torch = torch.from_numpy(curr_image) 48 | images.append(curr_image_torch) 49 | torch_permuted_images.append(curr_image_torch.permute(2, 0, 1)) 50 | 51 | annotator_matrix = numpy.zeros((9, 9)) # 9 images in a folder including the synthetically undistorted image 52 | 53 | for i in range(len(torch_permuted_images)): 54 | for j in range(0, i): 55 | im1 = torch_permuted_images[i].unsqueeze(0) 56 | im2 = torch_permuted_images[j].unsqueeze(0) 57 | annotator_matrix[i][j] = fsim(im1, im2, data_range=1.0) 58 | 59 | annotator_matrix = annotator_matrix + numpy.transpose(annotator_matrix) # fsim is symmetric 60 | numpy.fill_diagonal(annotator_matrix, float(1)) 61 | 62 | video_for_fragments = torch.stack(images, 0) 63 | video_for_fragments = video_for_fragments.permute(3, 0, 1, 2) # have to change to [C,T,H,W] from [T,H,W,C] for the below function 64 | 65 | fragmented_anchor_video = self.get_spatial_fragments(video_for_fragments, aligned= aligned_param) 66 | fragmented_anchor_video = fragmented_anchor_video.permute(1, 0, 2, 3) 67 | 68 | fragmented_augmented_video = self.get_spatial_fragments(video_for_fragments, aligned= aligned_param) 69 | fragmented_augmented_video = fragmented_augmented_video.permute(1, 0, 2, 3) 70 | flipped_frag_aug_video = self.flip(fragmented_augmented_video) 71 | 72 | fragmented_images = list(torch.unbind(fragmented_anchor_video)) 73 | fragmented_augmentations = list(torch.unbind(flipped_frag_aug_video)) 74 | 75 | return_sample = { 76 | "images": (torch.stack(fragmented_images, dim=0)).to(torch.float32), 77 | "augmentations": (torch.stack(fragmented_augmentations, dim=0)).to(torch.float32), 78 | "annotators": (torch.from_numpy(annotator_matrix)).to(torch.float32) 79 | } 80 | 81 | return return_sample 82 | 83 | # Function is from FastVQA (https://github.com/VQAssessment/FAST-VQA-and-FasterVQA) 84 | # @staticmethod 85 | def get_spatial_fragments( 86 | self, 87 | video, 88 | fragments_h=7, 89 | fragments_w=7, 90 | fsize_h=32, 91 | fsize_w=32, 92 | aligned=9, # changed to 9 because this indicates the no. of frames fragmented at same locations 93 | nfrags=1, 94 | random=False, 95 | random_upsample=False, 96 | fallback_type="upsample", 97 | **kwargs, 98 | ): 99 | size_h = fragments_h * fsize_h 100 | size_w = fragments_w * fsize_w 101 | ## video: [C,T,H,W] 102 | ## situation for images 103 | if video.shape[1] == 1: 104 | aligned = 1 105 | 106 | dur_t, res_h, res_w = video.shape[-3:] 107 | ratio = min(res_h / size_h, res_w / size_w) 108 | if fallback_type == "upsample" and ratio < 1: 109 | ovideo = video 110 | video = torch.nn.functional.interpolate( 111 | video / 255.0, scale_factor=1 / ratio, mode="bilinear" 112 | ) 113 | video = (video * 255.0).type_as(ovideo) 114 | 115 | if random_upsample: 116 | randratio = random.random() * 0.5 + 1 117 | video = torch.nn.functional.interpolate( 118 | video / 255.0, scale_factor=randratio, mode="bilinear" 119 | ) 120 | video = (video * 255.0).type_as(ovideo) 121 | 122 | assert dur_t % aligned == 0, "Please provide match vclip and align index" 123 | size = size_h, size_w 124 | 125 | ## make sure that sampling will not run out of the picture 126 | hgrids = torch.LongTensor( 127 | [min(res_h // fragments_h * i, res_h - fsize_h) for i in range(fragments_h)] 128 | ) 129 | wgrids = torch.LongTensor( 130 | [min(res_w // fragments_w * i, res_w - fsize_w) for i in range(fragments_w)] 131 | ) 132 | hlength, wlength = res_h // fragments_h, res_w // fragments_w 133 | 134 | if random: 135 | print("This part is deprecated. Please remind that.") 136 | if res_h > fsize_h: 137 | rnd_h = torch.randint( 138 | res_h - fsize_h, (len(hgrids), len(wgrids), dur_t // aligned) 139 | ) 140 | else: 141 | rnd_h = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int() 142 | if res_w > fsize_w: 143 | rnd_w = torch.randint( 144 | res_w - fsize_w, (len(hgrids), len(wgrids), dur_t // aligned) 145 | ) 146 | else: 147 | rnd_w = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int() 148 | else: 149 | if hlength > fsize_h: 150 | rnd_h = torch.randint( 151 | hlength - fsize_h, (len(hgrids), len(wgrids), dur_t // aligned) 152 | ) 153 | else: 154 | rnd_h = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int() 155 | if wlength > fsize_w: 156 | rnd_w = torch.randint( 157 | wlength - fsize_w, (len(hgrids), len(wgrids), dur_t // aligned) 158 | ) 159 | else: 160 | rnd_w = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int() 161 | 162 | target_video = torch.zeros(video.shape[:-2] + size).to(video.device) 163 | # target_videos = [] 164 | 165 | for i, hs in enumerate(hgrids): 166 | for j, ws in enumerate(wgrids): 167 | for t in range(dur_t // aligned): 168 | t_s, t_e = t * aligned, (t + 1) * aligned 169 | h_s, h_e = i * fsize_h, (i + 1) * fsize_h 170 | w_s, w_e = j * fsize_w, (j + 1) * fsize_w 171 | if random: 172 | h_so, h_eo = rnd_h[i][j][t], rnd_h[i][j][t] + fsize_h 173 | w_so, w_eo = rnd_w[i][j][t], rnd_w[i][j][t] + fsize_w 174 | else: 175 | h_so, h_eo = hs + rnd_h[i][j][t], hs + rnd_h[i][j][t] + fsize_h 176 | w_so, w_eo = ws + rnd_w[i][j][t], ws + rnd_w[i][j][t] + fsize_w 177 | target_video[:, t_s:t_e, h_s:h_e, w_s:w_e] = video[ 178 | :, t_s:t_e, h_so:h_eo, w_so:w_eo 179 | ] 180 | return target_video 181 | 182 | # Dataloader for high-level model 183 | class FrameLoaderHLModel(torch.utils.data.Dataset): 184 | def __init__(self, learning_data: dict, crop_size=(224, 224)): 185 | 186 | self.crop_size = crop_size 187 | self.learning_data = learning_data 188 | 189 | self.flip = transforms.RandomHorizontalFlip(p=1) 190 | self.center_crop = transforms.CenterCrop(crop_size) 191 | self.random_crop = transforms.RandomCrop(crop_size) 192 | 193 | # CLIP preprocessing steps. Same thing applied. 194 | self.transform = transforms.ToTensor() 195 | self.tensor_to_pil = transforms.ToPILImage() 196 | self.normalizer = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) 197 | 198 | def __len__(self): 199 | return len(self.learning_data['images']) 200 | 201 | def __getitem__(self, idx): 202 | image_dir = self.learning_data['images'][idx] # Directory of all distorted versions of an image 203 | all_dists = sorted(os.listdir(image_dir)) 204 | all_distortions = [os.path.join(image_dir, dist) for dist in all_dists] # Full path to each image 205 | preprocessed_img = None 206 | 207 | # Preprocessing images CenterCrop -> convert_image_to_rgb -> ToTensor -> Normalize 208 | for dist in all_distortions: 209 | if 'REF' in dist: 210 | PIL_image = Image.open(dist) 211 | preprocessed_img = self.convert_image_to_rgb(PIL_image) 212 | preprocessed_img = self.transform(preprocessed_img) 213 | preprocessed_img = self.normalizer(preprocessed_img) 214 | preprocessed_img = self.center_crop(preprocessed_img) 215 | 216 | return_sample = { 217 | "image": preprocessed_img, 218 | } 219 | 220 | return return_sample 221 | 222 | def convert_image_to_rgb(self, image): 223 | return image.convert("RGB") -------------------------------------------------------------------------------- /get_concatenated_features.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data import DataLoader 2 | from Evaluation.dataloader import * 3 | from torchvision import transforms 4 | import torch.utils.data 5 | from networks import * 6 | from tqdm import tqdm 7 | import numpy as np 8 | import traceback 9 | import argparse 10 | import datetime 11 | import time 12 | import os 13 | 14 | 15 | def parse_option(): 16 | parser = argparse.ArgumentParser('arguments for evaluation') 17 | 18 | parser.add_argument('--device', type=str, 19 | default='cuda:0', help='Device (cpu/cuda)') 20 | parser.add_argument('--ll_model_weights_path', type=str, 21 | default='./Evaluation/pretrained_weights/low_level_model_weights.tar', 22 | help='Saved weights for the low level model') 23 | parser.add_argument('--hl_model_weights_path', type=str, 24 | default='./Evaluation/pretrained_weights/high_level_model_weights.pth', 25 | help='Saved weights for the high level model') 26 | parser.add_argument('--dataset', default='CLIVE', type=str, 27 | help='Dataset to get concatenated features of LL_model and HL_model.') 28 | parser.add_argument('--img_dir', type=str, 29 | default='../Databases/CLIVE/ChallengeDB_release/Images', help='Image directory for above chosen dataset') 30 | 31 | optn = parser.parse_args() 32 | return optn 33 | 34 | 35 | def compute_features(hl_model, ll_model, dataset, img_dir, data_loc): 36 | 37 | local_encoder = hl_model.image_encoder 38 | normalizer = transforms.Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711)) 39 | 40 | with torch.no_grad(): 41 | print("Generating concatenated features") 42 | names = [] 43 | moss = [] 44 | ll_model_features = [] 45 | hl_model_features = [] 46 | 47 | if dataset == 'CLIVE': 48 | dataset = TestDataset(img_dir, data_loc, clive = True) 49 | else: 50 | dataset = TestDataset(img_dir, data_loc) 51 | loader = DataLoader(dataset, batch_size= 1, shuffle=False) 52 | 53 | for batch, (img, mos, img_name) in enumerate(tqdm(loader)): 54 | 55 | # For High level model 56 | input_hl_model = normalizer(img) 57 | hlm_image_features = local_encoder(input_hl_model.to("cuda")) 58 | hlm_features = hlm_image_features.squeeze().cpu().numpy().astype(np.float32) 59 | hl_model_features.append(hlm_features) 60 | 61 | # For Low level model 62 | input_ll_model = img.to("cuda") 63 | llm_image_features = ll_model(input_ll_model).squeeze() 64 | llm_features = llm_image_features.cpu().numpy().astype(np.float32) 65 | ll_model_features.append(llm_features) 66 | 67 | moss.extend(mos.tolist()) 68 | names.extend(list(img_name)) 69 | 70 | torch.cuda.empty_cache() 71 | 72 | hl_model_features = np.array(hl_model_features) 73 | ll_model_features = np.array(ll_model_features) 74 | 75 | return names, moss, hl_model_features, ll_model_features 76 | 77 | 78 | # Evaluation mode for testing 79 | def eval_mode(model): 80 | for param in model.parameters(): 81 | param.requires_grad_(False) 82 | model.eval() 83 | 84 | return model 85 | 86 | 87 | # Loads the pretrained model weights 88 | def load_model(model_weights_path, network_type): 89 | model_weights = model_weights_path 90 | model = None 91 | if network_type == 'll': 92 | model = LLModel(encoder='resnet18', head='mlp').to("cuda") 93 | load_dict = torch.load(model_weights) 94 | model.load_state_dict(load_dict['model']['state_dict'], strict=True) 95 | elif network_type == 'hl': 96 | model = HLModel().to("cuda") 97 | load_dict = torch.load(model_weights) 98 | model.clip_model.visual.load_state_dict(load_dict, strict=False) 99 | 100 | return model 101 | 102 | 103 | def main(): 104 | args = parse_option() 105 | ll_model_weights_path = args.ll_model_weights_path 106 | hl_model_weights_path = args.hl_model_weights_path 107 | dataset = args.dataset 108 | img_dir = args.img_dir 109 | 110 | # Low Level model 111 | ll_model = load_model(model_weights_path= ll_model_weights_path, network_type= 'll') 112 | ll_model = eval_mode(model= ll_model) 113 | # High level model 114 | hl_model = load_model(model_weights_path= hl_model_weights_path, network_type= 'hl') 115 | hl_model = eval_mode(model= hl_model) 116 | 117 | data_loc = None 118 | if dataset == 'CLIVE': 119 | data_loc = './Evaluation/datasets/LIVEC.csv' # dataset details path 120 | elif dataset == 'KONIQ': 121 | data_loc = './Evaluation/datasets/KONIQ.csv' 122 | 123 | names, mos, hl_features, ll_features = compute_features(hl_model, ll_model, dataset, img_dir, data_loc) 124 | features = np.concatenate((hl_features, ll_features), axis=1) 125 | 126 | if not os.path.exists(r'./Evaluation'): 127 | os.mkdir(r'./Evaluation') 128 | 129 | np.save(f'./Evaluation/{dataset}_features.npy', features) 130 | 131 | return 132 | 133 | 134 | if __name__ == '__main__': 135 | print('Program started at ' + datetime.datetime.now().strftime('%d/%m/%Y %I:%M:%S %p')) 136 | start_time = time.time() 137 | try: 138 | main() 139 | run_result = 'Program completed successfully!' 140 | except Exception as e: 141 | print(e) 142 | traceback.print_exc() 143 | run_result = str(e) 144 | end_time = time.time() 145 | print('Program ended at ' + datetime.datetime.now().strftime('%d/%m/%Y %I:%M:%S %p')) 146 | print('Execution time: ' + str(datetime.timedelta(seconds=end_time - start_time))) -------------------------------------------------------------------------------- /losses.py: -------------------------------------------------------------------------------- 1 | import torch.nn.functional as F 2 | import torch.nn as nn 3 | import numpy as np 4 | import traceback 5 | import datetime 6 | import torch 7 | import time 8 | 9 | 10 | class GroupContrastiveLoss(nn.Module): 11 | def __init__(self, batch_size, temperature=0.5): 12 | super().__init__() 13 | self.batch_size = batch_size 14 | self.register_buffer("temperature", torch.tensor(temperature)) 15 | self.register_buffer("negatives_mask", (~torch.eye(batch_size * 2, batch_size * 2, dtype=bool)).float()) 16 | self.register_buffer("positives_mask", (~torch.eye(batch_size * 1, batch_size * 1, dtype=bool)).float()) 17 | 18 | def forward(self, emb_i, emb_j): 19 | """ 20 | emb_i and emb_j are batches of embeddings, where corresponding indices are pairs 21 | z_i, z_j as per SimCLR paper 22 | """ 23 | 24 | self.negatives_mask[:len(emb_i), :len(emb_j)] = False 25 | self.negatives_mask[len(emb_i):, len(emb_j):] = False 26 | 27 | z_i = F.normalize(emb_i, dim=1) 28 | z_j = F.normalize(emb_j, dim=1) 29 | 30 | representations = torch.cat([z_i, z_j], dim=0) 31 | similarity_matrix = F.cosine_similarity(representations.unsqueeze(1), representations.unsqueeze(0), dim=2).cuda() 32 | 33 | pos_similarity_matrix = similarity_matrix[:len(emb_i), :len(emb_j)].cuda() 34 | neg_similarity_matrix = similarity_matrix[len(emb_i):, len(emb_j):].cuda() 35 | 36 | pos_similarity_matrix = pos_similarity_matrix * self.positives_mask 37 | sim_ij=torch.sum(pos_similarity_matrix,dim=1)/(len(neg_similarity_matrix)-1) 38 | 39 | neg_similarity_matrix = neg_similarity_matrix * self.positives_mask 40 | sim_ji = torch.sum(neg_similarity_matrix, dim=1)/(len(neg_similarity_matrix)-1) 41 | 42 | positives = torch.cat([sim_ij, sim_ji], dim=0) 43 | 44 | numerator = torch.exp(positives / self.temperature) 45 | denominator = self.negatives_mask * torch.exp(similarity_matrix / self.temperature) 46 | 47 | loss_partial = -torch.log(numerator / (numerator + torch.sum(denominator, dim=1))) 48 | loss = torch.sum(loss_partial) / (2 * self.batch_size) 49 | 50 | return loss 51 | 52 | 53 | def weighted_contrastive_loss(features_images, features_augmentations, tau, annotator_matrices, mode_in=True): 54 | """ 55 | 56 | Weighted contrastive loss (one sided). If mode_in is set to True, Lin is invoked as the loss, otherwise Lout. 57 | These losses correspond to the expressions as per supervised contrastive learning 58 | in https://proceedings.neurips.cc/paper/2020/file/d89a66c7c80a29b1bdbab0f2a1a94af8-Paper.pdf. 59 | 60 | Both features of shape (B, D, C), where C is the feature length, D is the number of distortions. A batch of 61 | annotator_matrices of shape (B, D, D). 62 | 63 | """ 64 | 65 | # Normalizing all features with l2 norm 66 | eps = 1e-8 # for computational stability 67 | norm_images = torch.linalg.norm(features_images, dim=-1) 68 | norm_augmentations = torch.linalg.norm(features_augmentations, dim=-1) 69 | norm_images = torch.max(norm_images, eps * torch.ones_like(norm_images)) 70 | norm_augmentations = torch.max(norm_augmentations, eps * torch.ones_like(norm_augmentations)) 71 | normalized_features_images = features_images/norm_images.unsqueeze(dim=-1) 72 | normalized_features_augmentations = features_augmentations/norm_augmentations.unsqueeze(dim=-1) 73 | 74 | # Computing loss for pairs 75 | feat_distances = torch.bmm(normalized_features_images, torch.transpose(normalized_features_augmentations, dim0=1, dim1=2)) / tau # (B, 1) 76 | alpha = 2.0 - 2.0 / (1 + annotator_matrices ** 2) 77 | term_pos = alpha * torch.exp(feat_distances) 78 | term_neg = torch.exp(feat_distances) 79 | 80 | # Choosing Lin or Lout as the training loss 81 | if mode_in: 82 | loss1 = torch.divide(term_pos.sum(-1), term_neg.sum(-1)) 83 | loss1 = -torch.log(loss1) 84 | loss1 = loss1.mean() 85 | loss2 = torch.divide(term_pos.sum(-2), term_neg.sum(-2)) 86 | loss2 = -torch.log(loss2) 87 | loss2 = loss2.mean() 88 | loss = loss1 + loss2 89 | 90 | else: 91 | loss1 = - alpha * (torch.log(term_neg) - torch.log(term_neg.sum(-1))[:,:,None]) 92 | loss2 = - alpha * (torch.log(term_neg) - torch.log(term_neg.sum(-2))[:,None]) 93 | loss = loss1.mean() + loss2.mean() 94 | 95 | return loss 96 | 97 | 98 | # Testing the quality aware contrastive loss 99 | def test_qacl(): 100 | ssim = torch.tril(torch.rand(5, 9, 9), diagonal=-1) 101 | ssim = ssim + torch.transpose(ssim, dim0=1, dim1=2) # To get full matrix from lower triangular matrix 102 | ssim = torch.exp(-ssim) 103 | 104 | feat1 = torch.rand(5, 9, 128) 105 | feat2 = torch.rand(5, 9, 128) 106 | 107 | losses = weighted_contrastive_loss(feat1, feat2, 0.2, ssim) 108 | print(losses) 109 | return 110 | 111 | 112 | # Testing the group contrastive loss 113 | def test_gcl(): 114 | pseudo_labels=torch.rand(16,1) 115 | f_feat=torch.rand(16,256) 116 | batch_size = 16 117 | 118 | idx = np.argsort(pseudo_labels.cpu(), axis=0) 119 | f_pos_feat = [] 120 | f_neg_feat = [] 121 | 122 | for n in range( batch_size // 4): 123 | try: 124 | f_pos_feat.append(f_feat[idx[n]]) 125 | f_neg_feat.append(f_feat[idx[-n - 1]]) 126 | except: 127 | continue 128 | 129 | f_pos_feat = torch.squeeze(torch.stack(f_pos_feat), dim=1) 130 | f_neg_feat = torch.squeeze(torch.stack(f_neg_feat), dim=1) 131 | 132 | loss_fn = GroupContrastiveLoss(f_pos_feat.shape[0], 1).cuda() 133 | loss = loss_fn(f_neg_feat, f_pos_feat) 134 | print(loss) 135 | 136 | return 137 | 138 | 139 | def main(): 140 | test_qacl() 141 | # test_gcl() 142 | return 143 | 144 | 145 | if __name__ == '__main__': 146 | print('Program started at ' + datetime.datetime.now().strftime('%d/%m/%Y %I:%M:%S %p')) 147 | start_time = time.time() 148 | try: 149 | main() 150 | run_result = 'Program completed successfully!' 151 | except Exception as e: 152 | print(e) 153 | traceback.print_exc() 154 | run_result = str(e) 155 | end_time = time.time() 156 | print('Program ended at ' + datetime.datetime.now().strftime('%d/%m/%Y %I:%M:%S %p')) 157 | print('Execution time: ' + str(datetime.timedelta(seconds=end_time - start_time))) 158 | -------------------------------------------------------------------------------- /networks.py: -------------------------------------------------------------------------------- 1 | from torchvision.models import resnet18 2 | import torch.nn as nn 3 | import clip 4 | 5 | 6 | class Resnet18FeatureExtractor(nn.Module): 7 | def __init__(self): 8 | super(Resnet18FeatureExtractor, self).__init__() 9 | 10 | self.base_model = resnet18() 11 | modules = list(self.base_model.children())[:-1] 12 | self.resnet18 = nn.Sequential(*modules) 13 | 14 | def forward(self, x): 15 | return self.resnet18(x).squeeze(-1).squeeze(-1) 16 | 17 | 18 | class LLModel(nn.Module): 19 | def __init__(self, encoder='resnet18', head='linear', feat_out_dim=128): 20 | super(LLModel, self).__init__() 21 | network = {'resnet18': 512, 'resnet50': 2048, 'swin':768} 22 | if encoder == 'resnet18': 23 | self.encoder = Resnet18FeatureExtractor() 24 | if head == 'linear': 25 | self.head = nn.Linear(network[encoder], feat_out_dim) 26 | # nn.init.xavier_normal_(self.head.weight) 27 | elif head == 'mlp': 28 | self.head = nn.Sequential( 29 | nn.Linear(network[encoder], network[encoder]), 30 | nn.ReLU(inplace=True), 31 | nn.Linear(network[encoder], feat_out_dim) 32 | ) 33 | else: 34 | raise NotImplementedError( 35 | 'head not supported: {}'.format(head)) 36 | 37 | def forward(self, x): 38 | feat = self.encoder(x) 39 | feat = self.head(feat) 40 | # feat = F.normalize(self.head(feat), dim=1) # commented out this normalization part here because Shankhanil's code uses cosine similarity and in the paper they explain that normalizing followed by dot product is equivalent to cosine similarity 41 | return feat 42 | 43 | 44 | class HLModel(nn.Module): 45 | def __init__(self, head_count=1): 46 | super(HLModel, self).__init__() 47 | self.device = "cuda" 48 | self.clip_model, self.clip_preprocess = clip.load("RN50", device=self.device) 49 | self.annotator_specific_projections = {} 50 | self.head_count = head_count 51 | 52 | self.image_encoder = self.clip_model.visual 53 | self.text_encoder = self.clip_model.transformer 54 | 55 | self.projection_heads = nn.ModuleList([nn.Linear(1024, 128) for i in range(head_count)]) 56 | 57 | def forward(self, x): 58 | clip_image_features = self.image_encoder(x) 59 | for i in range(self.head_count): 60 | self.annotator_specific_projections[i] = self.projection_heads[i](clip_image_features) 61 | return clip_image_features, self.annotator_specific_projections -------------------------------------------------------------------------------- /test_zeroshot.py: -------------------------------------------------------------------------------- 1 | # Zero shot evaluation based on low and high-level features 2 | 3 | from scipy.stats import spearmanr, pearsonr 4 | from Evaluation.zeroshot_ll_model import * 5 | from Evaluation.zeroshot_hl_model import * 6 | from networks import * 7 | import pandas as pd 8 | import numpy as np 9 | import traceback 10 | import datetime 11 | import argparse 12 | import torch 13 | import time 14 | 15 | 16 | def parse_option(): 17 | parser = argparse.ArgumentParser('arguments for evaluation') 18 | 19 | parser.add_argument('--device', type=str, 20 | default='cuda:0', help='Device (cpu/cuda)') 21 | parser.add_argument('--ll_model_weights_path', type=str, 22 | default='./Evaluation/pretrained_weights/low_level_model_weights.tar', 23 | help='Saved weights for the low level model') 24 | parser.add_argument('--hl_model_weights_path', type=str, 25 | default='./Evaluation/pretrained_weights/high_level_model_weights.pth', 26 | help='Saved weights for the high level model') 27 | parser.add_argument('--eval_type', type=str, 28 | default='zeroshot', help='Evaluation modes (zeroshot/zeroshot_single_img)') 29 | 30 | # Arguments for zeroshot/zeroshot_single_img evaluation 31 | parser.add_argument('--dataset', default='CLIVE', type=str, 32 | help='Dataset to check in zeroshot evaluation.') 33 | parser.add_argument('--img_dir', type=str, 34 | default='../Databases/CLIVE/ChallengeDB_release/Images', help='Image directory for above chosen dataset') 35 | parser.add_argument('--test_img_path', type=str, 36 | default='../Databases/CLIVE/ChallengeDB_release/Images/3.bmp', help='Test image path for zeroshot_single_img evaluation') 37 | 38 | # Arguments for statistical distance computation in zeroshot evaluation of LL model 39 | parser.add_argument('--pristine_img_dir', type=str, 40 | default='../Databases/pristine', help='Image directory for pristine images.') 41 | parser.add_argument('--patch_size', default=96, type=int, 42 | help='Patch size for pristine patches') 43 | parser.add_argument('--sharpness_param', default=0.75, type=float, 44 | help='Sharpness parameter for selecting pristine patches') 45 | parser.add_argument('--colorfulness_param', default=0.8, type=float, 46 | help='Colorfulness parameter for selecting pristine patches') 47 | optn = parser.parse_args() 48 | return optn 49 | 50 | 51 | class ZeroshotEvaluation(): 52 | def __init__(self, args, ll_model, hl_model): 53 | self.ll_model = ll_model 54 | self.hl_model = hl_model 55 | self.args = args 56 | 57 | def zeroshot_eval(self): 58 | test_dataset = self.args.dataset 59 | 60 | if test_dataset == 'CLIVE': 61 | data_loc = './Evaluation/datasets/LIVEC.csv' 62 | elif test_dataset == 'KONIQ': 63 | data_loc = './Evaluation/datasets/KONIQ.csv' 64 | img_dir = self.args.img_dir 65 | 66 | names_ll, scores_ll, mos_ll = compute_niqe_distance(self.ll_model, test_dataset, img_dir, data_loc, self.args) 67 | df_ll = pd.DataFrame() 68 | df_ll['file_name'] = names_ll 69 | df_ll['mos'] = mos_ll 70 | df_ll['score_ll'] = scores_ll 71 | 72 | names_hl, scores_hl, mos_hl = compute_hlm_scores(self.hl_model, test_dataset, img_dir, data_loc) 73 | df_hl = pd.DataFrame() 74 | df_hl['file_name'] = names_hl 75 | df_hl['mos'] = mos_hl 76 | df_hl['score_hl'] = scores_hl 77 | 78 | df_scores = pd.merge(df_ll, df_hl, on=['file_name', 'mos']) 79 | df_scores['combined'] = np.array(df_scores['score_hl']) + np.array(df_scores['score_ll']) 80 | 81 | test_correlation_srocc = spearmanr(np.array(df_scores['combined']), np.array(df_scores['mos']))[0] 82 | polyfit_combined = np.poly1d(np.polyfit(df_scores['combined'], df_scores['mos'], deg=3)) 83 | norm_combined = polyfit_combined(df_scores['combined']) 84 | test_correlation_plcc = pearsonr(norm_combined, df_scores['mos'])[0] 85 | 86 | print(f"SROCC on {test_dataset} is {test_correlation_srocc}") 87 | print(f"PLCC on {test_dataset} is {test_correlation_plcc}") 88 | 89 | return 90 | 91 | def zeroshot_eval_single_img(self): 92 | test_image_path = self.args.test_img_path 93 | 94 | score_ll = compute_niqe_distance_single_image(self.ll_model, test_image_path, self.args) 95 | score_hl = compute_hlm_score_single_image(self.hl_model, test_image_path) 96 | 97 | score = score_hl + score_ll 98 | print(f"Quality scores (high, low): {score}") 99 | 100 | return 101 | 102 | 103 | # Evaluation mode for testing 104 | def eval_mode(model): 105 | for param in model.parameters(): 106 | param.requires_grad_(False) 107 | model.eval() 108 | 109 | return model 110 | 111 | 112 | # Loads the pretrained model 113 | def load_model(model_weights_path, network_type): 114 | model_weights = model_weights_path 115 | model = None 116 | 117 | if network_type == 'll': 118 | model = LLModel(encoder='resnet18', head='mlp').to("cuda") 119 | load_dict = torch.load(model_weights) 120 | model.load_state_dict(load_dict['model']['state_dict'], strict=True) 121 | elif network_type == 'hl': 122 | model = HLModel().to("cuda") 123 | load_dict = torch.load(model_weights) 124 | model.clip_model.visual.load_state_dict(load_dict, strict=False) 125 | 126 | return model 127 | 128 | 129 | def main(): 130 | args = parse_option() 131 | ll_model_weights_path = args.ll_model_weights_path 132 | hl_model_weights_path = args.hl_model_weights_path 133 | 134 | # Indicates the Low Level model, ResNet18 backbone trained with quality aware contrastive loss 135 | ll_model = load_model(model_weights_path= ll_model_weights_path, network_type= 'll') 136 | ll_model = eval_mode(model= ll_model) 137 | # Indicates the high level model, pretrained CLIP model finetuned with group contrastive loss 138 | hl_model = load_model(model_weights_path= hl_model_weights_path, network_type= 'hl') 139 | hl_model = eval_mode(model= hl_model) 140 | 141 | zeroshot_eval = ZeroshotEvaluation(args, ll_model, hl_model) 142 | if args.eval_type == 'zeroshot': 143 | zeroshot_eval.zeroshot_eval() 144 | elif args.eval_type == 'zeroshot_single_img': 145 | zeroshot_eval.zeroshot_eval_single_img() 146 | 147 | return 148 | 149 | 150 | if __name__ == '__main__': 151 | print('Program started at ' + datetime.datetime.now().strftime('%d/%m/%Y %I:%M:%S %p')) 152 | start_time = time.time() 153 | try: 154 | main() 155 | run_result = 'Program completed successfully!' 156 | except Exception as e: 157 | print(e) 158 | traceback.print_exc() 159 | run_result = str(e) 160 | end_time = time.time() 161 | print('Program ended at ' + datetime.datetime.now().strftime('%d/%m/%Y %I:%M:%S %p')) 162 | print('Execution time: ' + str(datetime.timedelta(seconds=end_time - start_time))) 163 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | from train_hlm import * 2 | from train_llm import * 3 | from configs import * 4 | import traceback 5 | import datetime 6 | import logging 7 | import time 8 | import json 9 | 10 | 11 | logging.getLogger('PIL').setLevel(logging.WARNING) 12 | 13 | 14 | def exp1(): 15 | 16 | # Setting the run directory 17 | if exp_config['run_type'] == 'll_model_train': 18 | run_number = len(os.listdir(exp_config['results_path_llm'])) 19 | curr_result_dir = os.path.join(exp_config['results_path_llm'], f'Run{run_number:04}') 20 | elif exp_config['run_type'] == 'hl_model_train': 21 | run_number = len(os.listdir(exp_config['results_path_hlm'])) 22 | curr_result_dir = os.path.join(exp_config['results_path_hlm'], f'Run{run_number:04}') 23 | if exp_config['resume_training']: 24 | run_number = int(exp_config['resume_path'].split('/')[2][3:]) 25 | curr_result_dir = exp_config['resume_path'].split('Train')[0] 26 | 27 | exp_config['results_dir'] = curr_result_dir 28 | if not os.path.exists(curr_result_dir): 29 | os.mkdir(curr_result_dir) 30 | 31 | # Setting the log files to easily access the train and test results. Also saving the config file used to run the 32 | # experiment. 33 | details_path= os.path.join(exp_config['results_dir'], 'details.txt') 34 | with open(details_path, 'w'): 35 | logging.basicConfig(filename= details_path, filemode='a', level=logging.DEBUG, format='') 36 | config_details_path= os.path.join(exp_config['results_dir'], 'config_details.json') 37 | json_object = json.dumps(exp_config, indent= 4) 38 | with open(config_details_path, "w") as outfile: 39 | outfile.write(json_object) 40 | logging.info(exp_config['run_type']) 41 | logging.info(f'Run{run_number:04}') 42 | 43 | # Setting train and test configurations 44 | train_datasets = [] 45 | for dataset in exp_config['datasets'].keys(): 46 | if exp_config['datasets'][dataset]['train']: 47 | train_datasets.append(dataset) 48 | test_domains = ['CLIVE'] 49 | 50 | # Training the chosen model 51 | if exp_config['run_type'] == 'll_model_train': 52 | model = TrainQCLLLM(exp_config, train_datasets, test_domains) 53 | model.learn() 54 | elif exp_config['run_type'] == 'hl_model_train': 55 | model = TrainGCLHLM(exp_config, train_datasets) 56 | model.learn() 57 | return 58 | 59 | 60 | def main(): 61 | exp1() 62 | return 63 | 64 | 65 | if __name__ == '__main__': 66 | print('Program started at ' + datetime.datetime.now().strftime('%d/%m/%Y %I:%M:%S %p')) 67 | start_time = time.time() 68 | try: 69 | main() 70 | run_result = 'Program completed successfully!' 71 | except Exception as e: 72 | print(e) 73 | traceback.print_exc() 74 | run_result = str(e) 75 | end_time = time.time() 76 | print('Program ended at ' + datetime.datetime.now().strftime('%d/%m/%Y %I:%M:%S %p')) 77 | print('Execution time: ' + str(datetime.timedelta(seconds=end_time - start_time))) 78 | -------------------------------------------------------------------------------- /train_hlm.py: -------------------------------------------------------------------------------- 1 | # Code to train the High-Level Model with group contrastive loss 2 | 3 | from torch.utils.tensorboard import SummaryWriter 4 | from Evaluation.zeroshot_hl_model import * 5 | from torch.utils.data import DataLoader 6 | from dataloader_contrastive import * 7 | import torch.optim as optim 8 | import torch.utils.data 9 | from losses import * 10 | import logging 11 | import time 12 | import clip 13 | 14 | 15 | logging.getLogger('PIL').setLevel(logging.WARNING) 16 | logging.getLogger('matplotlib.font_manager').setLevel(logging.WARNING) 17 | # torch.cuda.empty_cache() 18 | # torch.autograd.set_detect_anomaly(True) 19 | 20 | 21 | class TextCLIP(nn.Module): 22 | def __init__(self, clip_model): 23 | super().__init__() 24 | self.transformer = clip_model.transformer 25 | self.positional_embedding = clip_model.positional_embedding 26 | self.ln_final = clip_model.ln_final 27 | self.text_projection = clip_model.text_projection 28 | self.dtype = clip_model.dtype 29 | self.token_embedding = clip_model.token_embedding 30 | 31 | def forward(self, text): 32 | x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model] 33 | 34 | x = x + self.positional_embedding.type(self.dtype) 35 | x = x.permute(1, 0, 2) # NLD -> LND 36 | x = self.transformer(x) 37 | x = x.permute(1, 0, 2) # LND -> NLD 38 | x = self.ln_final(x).type(self.dtype) 39 | 40 | # x.shape = [batch_size, n_ctx, transformer.width] 41 | # take features from the eot embedding (eot_token is the highest number in each sequence) 42 | x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection 43 | return x 44 | 45 | 46 | class ProjectionHead(nn.Module): 47 | def __init__(self, in_channels=1024, hidden_channels=128): 48 | super().__init__() 49 | 50 | self.in_channels = in_channels 51 | self.hidden_channels = hidden_channels 52 | self.fc_hid = nn.Linear(self.in_channels, self.hidden_channels) 53 | self.relu = nn.ReLU() 54 | 55 | def forward(self, x): 56 | qlt_score = self.relu(self.fc_hid(x)) 57 | return qlt_score 58 | 59 | 60 | class TrainGCLHLM(nn.Module): 61 | def __init__(self, exp_config: dict, train_datasets): 62 | super(TrainGCLHLM, self).__init__() 63 | 64 | self.config = exp_config 65 | self.train_datasets = train_datasets 66 | 67 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 68 | self.head = ProjectionHead().to(device= self.device) 69 | 70 | self.model, _ = clip.load('RN50', self.device) 71 | self.model.float() 72 | # As training only the image encoder 73 | for name, param in self.model.named_parameters(): 74 | if name.startswith('visual'): 75 | param.requires_grad_(True) 76 | else: 77 | param.requires_grad_(False) 78 | 79 | self.model_image = self.model.visual 80 | self.model_text = TextCLIP(self.model) 81 | 82 | self.model_image = self.model_image.to(device= self.device) 83 | self.model_image.train() 84 | self.model_text.eval() 85 | 86 | self.test_model,_ = clip.load('RN50', self.device) 87 | self.test_model.float() 88 | self.test_image = self.test_model.visual 89 | for p in self.test_image.parameters(): 90 | p.detach_() 91 | 92 | classes = ['a Good', 'a Bad'] 93 | text_inputs = torch.cat([clip.tokenize(f"{c} photo.") for c in classes]).to(self.device) 94 | with torch.no_grad(): 95 | self.text_features = self.model_text(text_inputs).detach() 96 | 97 | self.opt = optim.Adam(self.model_image.parameters(), lr = self.config['lr_hlm']) 98 | self.logger = SummaryWriter((Path(self.config['results_dir']) / 'Logs').as_posix()) 99 | self.save_flag = True 100 | 101 | # Makes a model's weights trainable/frozen 102 | @staticmethod 103 | def weight_mode(model, trainable=True): 104 | for param in model.parameters(): 105 | if trainable: 106 | param.requires_grad_(True) 107 | else: 108 | param.requires_grad_(False) 109 | return model 110 | 111 | # Initialize dataloaders 112 | def init_dataloaders(self): 113 | 114 | self.train_data = get_train_dataset(base_dataset_path=self.config['database_path'], 115 | train_datasets=self.train_datasets) 116 | self.pooled_dataset = FrameLoaderHLModel(learning_data=self.train_data, crop_size = self.config['crop_size']) 117 | self.pooled_loader = torch.utils.data.DataLoader(self.pooled_dataset, batch_size=self.config['batch_size_gcl'], 118 | pin_memory=True, num_workers=4, drop_last= True, 119 | shuffle=True) 120 | return 121 | 122 | def save_model(self, model, optimizer): 123 | model_ckpt_path = Path(self.config['results_dir']) / 'Train' 124 | if not os.path.exists(model_ckpt_path): 125 | os.mkdir(model_ckpt_path) 126 | 127 | self.test_image.load_state_dict(model.state_dict(), strict=False) 128 | torch.save(self.test_image.state_dict(), os.path.join(model_ckpt_path, 'image_encoder_%d.pth'%(self.current_epoch))) 129 | return 130 | 131 | def pseudo_labels(self, feat, text_features): 132 | bs = len(feat) 133 | all_score = F.normalize(feat) @ F.normalize(text_features).t() 134 | norm_score = torch.zeros(bs) 135 | for i in range(bs): 136 | score = all_score[i] 137 | tmp = (score[1] - score[0])/0.1 138 | norm_score[i] = 1/(1+torch.exp(tmp)) 139 | 140 | idx = torch.argsort(norm_score, axis=0, stable=True) 141 | return idx.detach() 142 | 143 | def learn(self): 144 | train_loss = [] 145 | start_time = time.time() 146 | self.current_epoch = 1 147 | start_epoch = 1 148 | 149 | self.init_dataloaders() 150 | 151 | warmup_iter = int(2.5 * len(self.pooled_loader)) 152 | lr_lambda = ( 153 | lambda cur_iter: cur_iter/warmup_iter 154 | if cur_iter <= warmup_iter 155 | else 1) 156 | 157 | # If warmup is required while training 158 | # max_iter = int(self.config['epochs'] * len(self.pooled_loader)) 159 | # lr_lambda = ( 160 | # lambda cur_iter: cur_iter / warmup_iter 161 | # if cur_iter <= warmup_iter 162 | # else 0.5 * (1 + math.cos(math.pi * (cur_iter - warmup_iter) / max_iter)) 163 | # ) 164 | 165 | scheduler = torch.optim.lr_scheduler.LambdaLR( 166 | self.opt, lr_lambda=lr_lambda, 167 | ) 168 | 169 | grp_size = self.config['batch_size_gcl']//self.config['tau'] 170 | contrastive_criterion = GroupContrastiveLoss(grp_size).to(self.device) 171 | 172 | ps = self.config['crop_size'][0] 173 | bs = self.config['batch_size_gcl'] 174 | n_count = None 175 | 176 | for epoch in range(start_epoch, self.config['epochs'] + 1): 177 | epoch_loss = 0 178 | 179 | for n_count, sampled_batch in enumerate(self.pooled_loader): 180 | frames = sampled_batch['image'] 181 | frames = frames.view(bs, 3, ps, ps).to(self.device) 182 | feat = self.model_image(frames).squeeze() 183 | grp_idx = self.pseudo_labels(feat, self.text_features) 184 | feat = self.head(feat) 185 | f_pos_feat = [] 186 | f_neg_feat = [] 187 | 188 | for n in range(grp_size): 189 | try: 190 | f_pos_feat.append(feat[grp_idx[n]]) 191 | f_neg_feat.append(feat[grp_idx[-n - 1]]) 192 | except: 193 | continue 194 | 195 | f_pos_feat = torch.squeeze(torch.stack(f_pos_feat), dim=1) 196 | f_neg_feat = torch.squeeze(torch.stack(f_neg_feat), dim=1) 197 | 198 | loss = contrastive_criterion(f_pos_feat, f_neg_feat) 199 | 200 | train_loss.append(loss.item()) 201 | epoch_loss += loss.item() 202 | 203 | self.opt.zero_grad() 204 | loss.backward() 205 | self.opt.step() 206 | scheduler.step() 207 | 208 | train_loss.append(epoch_loss/(n_count + 1)) 209 | loss_dict = {'loss': train_loss[-1], 'epoch': self.current_epoch} 210 | self.logger.add_scalar(f'TrainLoss', loss_dict['loss'], loss_dict['epoch']) 211 | elapsed_time = (time.time() - start_time)/60 212 | print('epoch = %4d , loss = %4.4f , time = %4.2f m' % (epoch, epoch_loss / (n_count + 1), elapsed_time)) 213 | self.save_model(self.model_image, self.opt) # Saving the model after every epoch. Throw away head at inference 214 | 215 | # del sampled_batch 216 | self.current_epoch += 1 217 | # torch.cuda.empty_cache() 218 | 219 | return -------------------------------------------------------------------------------- /train_llm.py: -------------------------------------------------------------------------------- 1 | # Code to train the Low-Level Model with a quality aware contrastive loss 2 | 3 | from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, CosineAnnealingLR 4 | from torch.utils.tensorboard import SummaryWriter 5 | from scipy.stats import spearmanr, pearsonr 6 | from Evaluation.zeroshot_ll_model import * 7 | from dataloader_contrastive import * 8 | from torch.optim import Adam, AdamW 9 | from matplotlib import pyplot 10 | import torch.utils.data 11 | from networks import * 12 | from losses import * 13 | import logging 14 | 15 | logging.getLogger('PIL').setLevel(logging.WARNING) 16 | logging.getLogger('matplotlib.font_manager').setLevel(logging.WARNING) 17 | 18 | 19 | class NIQEEvaluationConfig: 20 | def __init__(self, config): 21 | self.pristine_img_dir = config['pristine_img_dir'] 22 | self.patch_size = config['patch_size'] 23 | self.sharpness_param = config['sharpness_param'] 24 | self.colorfulness_param = config['colorfulness_param'] 25 | 26 | 27 | class TrainQCLLLM(nn.Module): 28 | 29 | # Class constructor 30 | def __init__(self, exp_config: dict, train_datasets, test_domains): 31 | super(TrainQCLLLM, self).__init__() 32 | 33 | self.config = exp_config 34 | self.niqe_config = NIQEEvaluationConfig(self.config) 35 | self.train_datasets = train_datasets 36 | self.test_domains = test_domains 37 | self.test_dict = {} 38 | 39 | self.model = LLModel(encoder='resnet18', head='mlp').to("cuda") 40 | 41 | self.pooled_loader = None 42 | self.pooled_dataset = None 43 | self.train_data = None 44 | self.test_data = None 45 | 46 | self.optimizer = AdamW(self.model.parameters(), weight_decay=0.05, lr=self.config['lr_llm']) 47 | 48 | self.logger = SummaryWriter((Path(self.config['results_dir']) / 'Logs').as_posix()) 49 | self.save_flag = True 50 | 51 | @staticmethod 52 | def get_next_train_batch(dataloader, iterator): 53 | try: 54 | next_batch = next(iterator) 55 | except StopIteration: 56 | print("Stop iteration encountered.") 57 | iterator = iter(dataloader) 58 | next_batch = next(iterator) 59 | return next_batch, iterator 60 | 61 | # Initialize dataloaders 62 | def init_dataloaders(self): 63 | 64 | self.train_data = get_train_dataset(base_dataset_path=self.config['database_path'], 65 | train_datasets=self.train_datasets) 66 | self.pooled_dataset = FrameLoaderLLModel(learning_data=self.train_data) 67 | self.pooled_loader = torch.utils.data.DataLoader(self.pooled_dataset, batch_size=self.config['batch_size_qacl'], 68 | pin_memory=True, num_workers=4, drop_last=False, 69 | shuffle=True) 70 | return 71 | 72 | # Makes a model's weights trainable/frozen 73 | @staticmethod 74 | def weight_mode(model, trainable=True): 75 | for param in model.parameters(): 76 | if trainable: 77 | param.requires_grad_(True) 78 | else: 79 | param.requires_grad_(False) 80 | return model 81 | 82 | @staticmethod 83 | def update_learning_rate(optimizer, factor): 84 | for group in optimizer.param_groups: 85 | group['lr'] *= factor 86 | 87 | return 88 | 89 | def save_model(self, model, optimizer): 90 | model_ckpt_path = Path(self.config['results_dir']) / 'Train' 91 | if not os.path.exists(model_ckpt_path): 92 | os.mkdir(model_ckpt_path) 93 | model_ckpt_path = os.path.join(model_ckpt_path, 'latest.tar') 94 | 95 | save_dict = {'state_dict': model.state_dict()} 96 | save_opt = {'state_dict': optimizer.state_dict()} 97 | full_dict = {'model': save_dict, 'current_iteration': self.current_iteration, 'optimizer': save_opt} 98 | torch.save(full_dict, model_ckpt_path) 99 | return 100 | 101 | def load_model(self, load_path): 102 | model_dict = torch.load(load_path) 103 | self.model.load_state_dict(model_dict['model']['state_dict']) 104 | self.optimizer.load_state_dict(model_dict['optimizer']['state_dict']) 105 | self.model = self.model.to("cuda") 106 | self.current_iteration = model_dict['current_iteration'] 107 | return 108 | 109 | def learn(self): 110 | train_loss = [] 111 | self.current_iteration = 1 112 | 113 | start_iteration = 1 114 | if self.config['resume_training']: 115 | self.load_model(self.config['resume_path']) 116 | start_iteration = self.current_iteration 117 | 118 | self.init_dataloaders() 119 | iterator_model = iter(self.pooled_loader) 120 | 121 | total_iterations = int((self.config['epochs'] * len(self.pooled_loader))) 122 | test_iteration = int((self.config['test_epoch'] * len(self.pooled_loader))) 123 | lr_update_iteration = int((self.config['lr_update'] * len(self.pooled_loader))) 124 | 125 | scheduler = CosineAnnealingLR(optimizer=self.optimizer, 126 | T_max= total_iterations, 127 | eta_min=1e-6) 128 | 129 | # In case testing needs to be done periodically 130 | self.test_dict['test_srocc'] = {} 131 | for curr_set in self.test_domains: 132 | self.test_dict['test_srocc'][curr_set] = [] 133 | self.test_dict['test_srocc']['iter_no'] = [] 134 | 135 | # Trainable feature extractor 136 | self.model = self.weight_mode(self.model, trainable=True) 137 | self.model.train() 138 | 139 | for iteration in range(start_iteration, total_iterations + 1): 140 | sampled_batch, iterator_model = self.get_next_train_batch(self.pooled_loader, iterator_model) 141 | frames = sampled_batch['images'] 142 | augmentations = sampled_batch['augmentations'] 143 | annotators = sampled_batch['annotators'].to("cuda") 144 | 145 | (b, d, c, h, w) = frames.shape 146 | frames_grouped = (frames.reshape(b * d, c, h, w)).to("cuda") 147 | augmentations_grouped = (augmentations.reshape(b * d, c, h, w)).to("cuda") 148 | 149 | features_frames = self.model(frames_grouped) 150 | features_frames = torch.stack(torch.split(features_frames, d, dim=0)) 151 | features_augmentations = self.model(augmentations_grouped) 152 | features_augmentations = torch.stack(torch.split(features_augmentations, d, dim=0)) 153 | 154 | loss = weighted_contrastive_loss(features_frames, features_augmentations, 0.5, annotators) 155 | 156 | self.optimizer.zero_grad() 157 | loss.backward() 158 | self.optimizer.step() 159 | scheduler.step() 160 | 161 | # Logging to tensorboard 162 | train_loss.append(loss.item()) 163 | loss_dict = {'loss': train_loss[-1], 'iteration': self.current_iteration} 164 | self.logger.add_scalar(f'TrainLoss', loss_dict['loss'], loss_dict['iteration']) 165 | 166 | # Updating learning rate after specified number of cycles 167 | if iteration % lr_update_iteration == 0: 168 | self.update_learning_rate(optimizer=self.optimizer, factor=self.config['lr_decay']) 169 | 170 | per_sample_loss = train_loss[-1] / self.config["batch_size_qacl"] 171 | print(f'Iteration {iteration} done with per sample loss {per_sample_loss:0.4f}.') 172 | self.save_model(self.model, self.optimizer) # Saving the model after every iteration 173 | 174 | # Testing 175 | # if iteration % test_iteration == 0 or iteration == total_iterations: 176 | if iteration == total_iterations: 177 | self.test_dict['test_srocc']['iter_no'].append(self.current_iteration) 178 | self.test() 179 | self.model = self.weight_mode(self.model, trainable=True) 180 | self.model.train() 181 | 182 | self.current_iteration += 1 183 | 184 | del sampled_batch 185 | torch.cuda.empty_cache() 186 | 187 | return 188 | 189 | def test(self): 190 | with torch.no_grad(): 191 | self.model = self.weight_mode(self.model, trainable=False) 192 | self.model.eval() 193 | 194 | for curr_set in self.test_domains: 195 | 196 | if curr_set == 'CLIVE': 197 | img_dir = self.config['database_path'] + '/CLIVE/ChallengeDB_release/Images' 198 | data_loc = './Evaluation_modules/datasets/LIVEC.csv' 199 | 200 | self.test_dict[curr_set] = {'Image_name': [], 'dmos': [], f'pred{self.current_iteration:04d}': []} 201 | 202 | names, scores, moss = compute_niqe_distance(self.model, curr_set, img_dir, data_loc, self.niqe_config) 203 | srocc_value = spearmanr(scores, moss)[0] 204 | 205 | self.test_dict[curr_set]['Image_name'] = names 206 | self.test_dict[curr_set]['dmos'] = moss 207 | self.test_dict[curr_set][f'pred{self.current_iteration:04}'] = scores 208 | 209 | self.test_dict[curr_set][f'pred{self.current_iteration:04}'].append(srocc_value) 210 | self.test_dict[curr_set]['Image_name'].append('SRCC') 211 | self.test_dict[curr_set]['dmos'].append(-1.0) 212 | 213 | details_path = os.path.join(self.config['results_dir'], 'details.txt') 214 | logging.basicConfig(filename=details_path, filemode='a', level=logging.DEBUG, format='') 215 | 216 | print(f"Performance on {curr_set} is {srocc_value}") 217 | logging.info(f"SRCC for {self.current_iteration:04}, {curr_set} is {srocc_value}") 218 | 219 | # Saving test performance to disk 220 | if not os.path.exists((Path(self.config['results_dir']) / 'Test').as_posix()): 221 | os.mkdir((Path(self.config['results_dir']) / 'Test').as_posix()) 222 | save_dir = (Path(self.config['results_dir']) / f'Test/{curr_set}.csv').as_posix() 223 | 224 | if self.save_flag: 225 | df = pd.DataFrame.from_dict(self.test_dict[curr_set]) 226 | df.to_csv(save_dir, index=False) 227 | else: 228 | df1 = pd.read_csv(save_dir) 229 | df1[f'pred{self.current_iteration:04}'] = self.test_dict[curr_set][ 230 | f'pred{self.current_iteration:04}'] 231 | df1.to_csv(save_dir, index=False) 232 | 233 | self.test_dict['test_srocc'][curr_set].append(srocc_value) 234 | 235 | # Saving the test performance plot 236 | pyplot.figure(1) 237 | pyplot.plot(self.test_dict['test_srocc']['iter_no'], self.test_dict['test_srocc'][curr_set]) 238 | pyplot.grid() 239 | pyplot.xlabel('Training Iteration') 240 | pyplot.ylabel('SROCC') 241 | pyplot.savefig(Path(self.config['results_dir']) / f'Test/test_{curr_set}.png') 242 | 243 | self.save_flag = False 244 | 245 | return --------------------------------------------------------------------------------