├── .idea
├── encodings.xml
├── hyperIQA.iml
├── misc.xml
└── modules.xml
├── HyerIQASolver.py
├── LICENSE
├── README.md
├── __pycache__
├── HyerIQASolver.cpython-36.pyc
├── HyerIQASolver.cpython-37.pyc
├── data_loader.cpython-36.pyc
├── data_loader.cpython-37.pyc
├── folders.cpython-36.pyc
├── folders.cpython-37.pyc
├── models.cpython-36.pyc
└── models.cpython-37.pyc
├── csiq_label.txt
├── data
├── D_01.jpg
├── D_02.jpg
└── D_03.jpg
├── data_loader.py
├── demo.py
├── folders.py
├── models.py
└── train_test_IQA.py
/.idea/encodings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/hyperIQA.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/HyerIQASolver.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from scipy import stats
3 | import numpy as np
4 | import models
5 | import data_loader
6 |
7 | class HyperIQASolver(object):
8 | """Solver for training and testing hyperIQA"""
9 | def __init__(self, config, path, train_idx, test_idx):
10 |
11 | self.epochs = config.epochs
12 | self.test_patch_num = config.test_patch_num
13 |
14 | self.model_hyper = models.HyperNet(16, 112, 224, 112, 56, 28, 14, 7).cuda()
15 | self.model_hyper.train(True)
16 |
17 | self.l1_loss = torch.nn.L1Loss().cuda()
18 |
19 | backbone_params = list(map(id, self.model_hyper.res.parameters()))
20 | self.hypernet_params = filter(lambda p: id(p) not in backbone_params, self.model_hyper.parameters())
21 | self.lr = config.lr
22 | self.lrratio = config.lr_ratio
23 | self.weight_decay = config.weight_decay
24 | paras = [{'params': self.hypernet_params, 'lr': self.lr * self.lrratio},
25 | {'params': self.model_hyper.res.parameters(), 'lr': self.lr}
26 | ]
27 | self.solver = torch.optim.Adam(paras, weight_decay=self.weight_decay)
28 |
29 | train_loader = data_loader.DataLoader(config.dataset, path, train_idx, config.patch_size, config.train_patch_num, batch_size=config.batch_size, istrain=True)
30 | test_loader = data_loader.DataLoader(config.dataset, path, test_idx, config.patch_size, config.test_patch_num, istrain=False)
31 | self.train_data = train_loader.get_data()
32 | self.test_data = test_loader.get_data()
33 |
34 | def train(self):
35 | """Training"""
36 | best_srcc = 0.0
37 | best_plcc = 0.0
38 | print('Epoch\tTrain_Loss\tTrain_SRCC\tTest_SRCC\tTest_PLCC')
39 | for t in range(self.epochs):
40 | epoch_loss = []
41 | pred_scores = []
42 | gt_scores = []
43 |
44 | for img, label in self.train_data:
45 | img = torch.tensor(img.cuda())
46 | label = torch.tensor(label.cuda())
47 |
48 | self.solver.zero_grad()
49 |
50 | # Generate weights for target network
51 | paras = self.model_hyper(img) # 'paras' contains the network weights conveyed to target network
52 |
53 | # Building target network
54 | model_target = models.TargetNet(paras).cuda()
55 | for param in model_target.parameters():
56 | param.requires_grad = False
57 |
58 | # Quality prediction
59 | pred = model_target(paras['target_in_vec']) # while 'paras['target_in_vec']' is the input to target net
60 | pred_scores = pred_scores + pred.cpu().tolist()
61 | gt_scores = gt_scores + label.cpu().tolist()
62 |
63 | loss = self.l1_loss(pred.squeeze(), label.float().detach())
64 | epoch_loss.append(loss.item())
65 | loss.backward()
66 | self.solver.step()
67 |
68 | train_srcc, _ = stats.spearmanr(pred_scores, gt_scores)
69 |
70 | test_srcc, test_plcc = self.test(self.test_data)
71 | if test_srcc > best_srcc:
72 | best_srcc = test_srcc
73 | best_plcc = test_plcc
74 | print('%d\t%4.3f\t\t%4.4f\t\t%4.4f\t\t%4.4f' %
75 | (t + 1, sum(epoch_loss) / len(epoch_loss), train_srcc, test_srcc, test_plcc))
76 |
77 | # Update optimizer
78 | lr = self.lr / pow(10, (t // 6))
79 | if t > 8:
80 | self.lrratio = 1
81 | self.paras = [{'params': self.hypernet_params, 'lr': lr * self.lrratio},
82 | {'params': self.model_hyper.res.parameters(), 'lr': self.lr}
83 | ]
84 | self.solver = torch.optim.Adam(self.paras, weight_decay=self.weight_decay)
85 |
86 | print('Best test SRCC %f, PLCC %f' % (best_srcc, best_plcc))
87 |
88 | return best_srcc, best_plcc
89 |
90 | def test(self, data):
91 | """Testing"""
92 | self.model_hyper.train(False)
93 | pred_scores = []
94 | gt_scores = []
95 |
96 | for img, label in data:
97 | # Data.
98 | img = torch.tensor(img.cuda())
99 | label = torch.tensor(label.cuda())
100 |
101 | paras = self.model_hyper(img)
102 | model_target = models.TargetNet(paras).cuda()
103 | model_target.train(False)
104 | pred = model_target(paras['target_in_vec'])
105 |
106 | pred_scores.append(float(pred.item()))
107 | gt_scores = gt_scores + label.cpu().tolist()
108 |
109 | pred_scores = np.mean(np.reshape(np.array(pred_scores), (-1, self.test_patch_num)), axis=1)
110 | gt_scores = np.mean(np.reshape(np.array(gt_scores), (-1, self.test_patch_num)), axis=1)
111 | test_srcc, _ = stats.spearmanr(pred_scores, gt_scores)
112 | test_plcc, _ = stats.pearsonr(pred_scores, gt_scores)
113 |
114 | self.model_hyper.train(True)
115 | return test_srcc, test_plcc
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 SSL92
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # HyperIQA
2 |
3 | This is the source code for the CVPR'20 paper "[Blindly Assess Image Quality in the Wild Guided by A Self-Adaptive Hyper Network](https://openaccess.thecvf.com/content_CVPR_2020/papers/Su_Blindly_Assess_Image_Quality_in_the_Wild_Guided_by_a_CVPR_2020_paper.pdf)".
4 |
5 | ## Dependencies
6 |
7 | - Python 3.6+
8 | - PyTorch 0.4+
9 | - TorchVision
10 | - scipy
11 |
12 | (optional for loading specific IQA Datasets)
13 | - csv (KonIQ-10k Dataset)
14 | - openpyxl (BID Dataset)
15 |
16 | ## Usages
17 |
18 | ### Testing a single image
19 |
20 | Predicting image quality with our model trained on the Koniq-10k Dataset.
21 |
22 | To run the demo, please download the pre-trained model at [Google drive](https://drive.google.com/file/d/1OOUmnbvpGea0LIGpIWEbOyxfWx6UCiiE/view?usp=sharing) or [Baidu cloud](https://pan.baidu.com/s/1yY3O8DbfTTtUwXn14Mtr8Q) (password: 1ty8), put it in 'pretrained' folder, then run:
23 |
24 | ```
25 | python demo.py
26 | ```
27 |
28 | You will get a quality score ranging from 0-100, and a higher value indicates better image quality.
29 |
30 | ### Training & Testing on IQA databases
31 |
32 | Training and testing our model on the LIVE Challenge Dataset.
33 |
34 | ```
35 | python train_test_IQA.py
36 | ```
37 |
38 | Some available options:
39 | * `--dataset`: Training and testing dataset, support datasets: livec | koniq-10k | bid | live | csiq | tid2013.
40 | * `--train_patch_num`: Sampled image patch number per training image.
41 | * `--test_patch_num`: Sampled image patch number per testing image.
42 | * `--batch_size`: Batch size.
43 |
44 | When training or testing on CSIQ dataset, please put 'csiq_label.txt' in your own CSIQ folder.
45 |
46 | ## Citation
47 | If you find this work useful for your research, please cite our paper:
48 | ```
49 | @InProceedings{Su_2020_CVPR,
50 | author = {Su, Shaolin and Yan, Qingsen and Zhu, Yu and Zhang, Cheng and Ge, Xin and Sun, Jinqiu and Zhang, Yanning},
51 | title = {Blindly Assess Image Quality in the Wild Guided by a Self-Adaptive Hyper Network},
52 | booktitle = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
53 | month = {June},
54 | year = {2020}
55 | }
56 | ```
57 |
--------------------------------------------------------------------------------
/__pycache__/HyerIQASolver.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SSL92/hyperIQA/c42e7279717e7dcb693a24b891fc14a4189a45ee/__pycache__/HyerIQASolver.cpython-36.pyc
--------------------------------------------------------------------------------
/__pycache__/HyerIQASolver.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SSL92/hyperIQA/c42e7279717e7dcb693a24b891fc14a4189a45ee/__pycache__/HyerIQASolver.cpython-37.pyc
--------------------------------------------------------------------------------
/__pycache__/data_loader.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SSL92/hyperIQA/c42e7279717e7dcb693a24b891fc14a4189a45ee/__pycache__/data_loader.cpython-36.pyc
--------------------------------------------------------------------------------
/__pycache__/data_loader.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SSL92/hyperIQA/c42e7279717e7dcb693a24b891fc14a4189a45ee/__pycache__/data_loader.cpython-37.pyc
--------------------------------------------------------------------------------
/__pycache__/folders.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SSL92/hyperIQA/c42e7279717e7dcb693a24b891fc14a4189a45ee/__pycache__/folders.cpython-36.pyc
--------------------------------------------------------------------------------
/__pycache__/folders.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SSL92/hyperIQA/c42e7279717e7dcb693a24b891fc14a4189a45ee/__pycache__/folders.cpython-37.pyc
--------------------------------------------------------------------------------
/__pycache__/models.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SSL92/hyperIQA/c42e7279717e7dcb693a24b891fc14a4189a45ee/__pycache__/models.cpython-36.pyc
--------------------------------------------------------------------------------
/__pycache__/models.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SSL92/hyperIQA/c42e7279717e7dcb693a24b891fc14a4189a45ee/__pycache__/models.cpython-37.pyc
--------------------------------------------------------------------------------
/csiq_label.txt:
--------------------------------------------------------------------------------
1 | 1600.AWGN.1.png 0.061989
2 | 1600.AWGN.2.png 0.206245
3 | 1600.AWGN.3.png 0.262122
4 | 1600.AWGN.4.png 0.374632
5 | 1600.AWGN.5.png 0.467480
6 | aerial_city.AWGN.1.png 0.031592
7 | aerial_city.AWGN.2.png 0.125135
8 | aerial_city.AWGN.3.png 0.244957
9 | aerial_city.AWGN.4.png 0.372947
10 | aerial_city.AWGN.5.png 0.501263
11 | boston.AWGN.1.png 0.029159
12 | boston.AWGN.2.png 0.081368
13 | boston.AWGN.3.png 0.215496
14 | boston.AWGN.4.png 0.383116
15 | boston.AWGN.5.png 0.471591
16 | bridge.AWGN.1.png 0.027782
17 | bridge.AWGN.2.png 0.087857
18 | bridge.AWGN.3.png 0.251788
19 | bridge.AWGN.4.png 0.408695
20 | bridge.AWGN.5.png 0.517009
21 | butter_flower.AWGN.1.png 0.026574
22 | butter_flower.AWGN.2.png 0.089165
23 | butter_flower.AWGN.3.png 0.252299
24 | butter_flower.AWGN.4.png 0.434892
25 | butter_flower.AWGN.5.png 0.545271
26 | cactus.AWGN.1.png 0.104329
27 | cactus.AWGN.2.png 0.205708
28 | cactus.AWGN.3.png 0.349438
29 | cactus.AWGN.4.png 0.439167
30 | cactus.AWGN.5.png 0.498391
31 | child_swimming.AWGN.1.png 0.028457
32 | child_swimming.AWGN.2.png 0.120207
33 | child_swimming.AWGN.3.png 0.271600
34 | child_swimming.AWGN.4.png 0.412687
35 | child_swimming.AWGN.5.png 0.488561
36 | couple.AWGN.1.png 0.005539
37 | couple.AWGN.2.png 0.109029
38 | couple.AWGN.3.png 0.204356
39 | couple.AWGN.4.png 0.325231
40 | couple.AWGN.5.png 0.469608
41 | elk.AWGN.1.png 0.033320
42 | elk.AWGN.2.png 0.065236
43 | elk.AWGN.3.png 0.171903
44 | elk.AWGN.4.png 0.236539
45 | elk.AWGN.5.png 0.357941
46 | family.AWGN.1.png 0.087792
47 | family.AWGN.2.png 0.149679
48 | family.AWGN.3.png 0.274742
49 | family.AWGN.4.png 0.375346
50 | family.AWGN.5.png 0.479589
51 | fisher.AWGN.1.png 0.052354
52 | fisher.AWGN.2.png 0.130219
53 | fisher.AWGN.3.png 0.216470
54 | fisher.AWGN.4.png 0.312143
55 | fisher.AWGN.5.png 0.440315
56 | foxy.AWGN.1.png 0.013529
57 | foxy.AWGN.2.png 0.063617
58 | foxy.AWGN.3.png 0.136469
59 | foxy.AWGN.4.png 0.250446
60 | foxy.AWGN.5.png 0.381163
61 | geckos.AWGN.1.png 0.028506
62 | geckos.AWGN.2.png 0.060272
63 | geckos.AWGN.3.png 0.116695
64 | geckos.AWGN.4.png 0.201859
65 | geckos.AWGN.5.png 0.336186
66 | lady_liberty.AWGN.1.png 0.032476
67 | lady_liberty.AWGN.2.png 0.151055
68 | lady_liberty.AWGN.3.png 0.254612
69 | lady_liberty.AWGN.4.png 0.384169
70 | lady_liberty.AWGN.5.png 0.487483
71 | lake.AWGN.1.png 0.022140
72 | lake.AWGN.2.png 0.136921
73 | lake.AWGN.3.png 0.295296
74 | lake.AWGN.4.png 0.437237
75 | lake.AWGN.5.png 0.536122
76 | log_seaside.AWGN.1.png 0.000000
77 | log_seaside.AWGN.2.png 0.056561
78 | log_seaside.AWGN.3.png 0.131855
79 | log_seaside.AWGN.4.png 0.267465
80 | log_seaside.AWGN.5.png 0.441762
81 | monument.AWGN.1.png 0.012407
82 | monument.AWGN.2.png 0.081070
83 | monument.AWGN.3.png 0.183266
84 | monument.AWGN.4.png 0.324758
85 | monument.AWGN.5.png 0.467743
86 | native_american.AWGN.1.png 0.040322
87 | native_american.AWGN.2.png 0.069080
88 | native_american.AWGN.3.png 0.223948
89 | native_american.AWGN.4.png 0.401099
90 | native_american.AWGN.5.png 0.476060
91 | redwood.AWGN.1.png 0.050866
92 | redwood.AWGN.2.png 0.187574
93 | redwood.AWGN.3.png 0.322696
94 | redwood.AWGN.4.png 0.431555
95 | redwood.AWGN.5.png 0.544838
96 | roping.AWGN.1.png 0.002501
97 | roping.AWGN.2.png 0.105028
98 | roping.AWGN.3.png 0.184517
99 | roping.AWGN.4.png 0.381884
100 | roping.AWGN.5.png 0.513078
101 | rushmore.AWGN.1.png 0.032498
102 | rushmore.AWGN.2.png 0.130996
103 | rushmore.AWGN.3.png 0.258897
104 | rushmore.AWGN.4.png 0.381888
105 | rushmore.AWGN.5.png 0.512603
106 | shroom.AWGN.1.png 0.031449
107 | shroom.AWGN.2.png 0.135602
108 | shroom.AWGN.3.png 0.236730
109 | shroom.AWGN.4.png 0.355141
110 | shroom.AWGN.5.png 0.463235
111 | snow_leaves.AWGN.1.png 0.018848
112 | snow_leaves.AWGN.2.png 0.063487
113 | snow_leaves.AWGN.3.png 0.211740
114 | snow_leaves.AWGN.4.png 0.293439
115 | snow_leaves.AWGN.5.png 0.434127
116 | sunsetcolor.AWGN.1.png 0.039306
117 | sunsetcolor.AWGN.2.png 0.205564
118 | sunsetcolor.AWGN.3.png 0.302095
119 | sunsetcolor.AWGN.4.png 0.409396
120 | sunsetcolor.AWGN.5.png 0.515154
121 | sunset_sparrow.AWGN.1.png 0.042131
122 | sunset_sparrow.AWGN.2.png 0.179278
123 | sunset_sparrow.AWGN.3.png 0.325552
124 | sunset_sparrow.AWGN.4.png 0.457659
125 | sunset_sparrow.AWGN.5.png 0.548982
126 | swarm.AWGN.1.png 0.035196
127 | swarm.AWGN.2.png 0.165720
128 | swarm.AWGN.3.png 0.321872
129 | swarm.AWGN.4.png 0.419232
130 | swarm.AWGN.5.png 0.577175
131 | trolley.AWGN.1.png 0.028644
132 | trolley.AWGN.2.png 0.090283
133 | trolley.AWGN.3.png 0.219874
134 | trolley.AWGN.4.png 0.341872
135 | trolley.AWGN.5.png 0.450259
136 | turtle.AWGN.1.png 0.019981
137 | turtle.AWGN.2.png 0.119220
138 | turtle.AWGN.3.png 0.198715
139 | turtle.AWGN.4.png 0.329355
140 | turtle.AWGN.5.png 0.435649
141 | veggies.AWGN.1.png 0.017592
142 | veggies.AWGN.2.png 0.098451
143 | veggies.AWGN.3.png 0.213596
144 | veggies.AWGN.4.png 0.346756
145 | veggies.AWGN.5.png 0.472459
146 | woman.AWGN.1.png 0.051841
147 | woman.AWGN.2.png 0.197147
148 | woman.AWGN.3.png 0.288616
149 | woman.AWGN.4.png 0.409103
150 | woman.AWGN.5.png 0.489830
151 | 1600.JPEG.1.png 0.012873
152 | 1600.JPEG.2.png 0.069148
153 | 1600.JPEG.3.png 0.196827
154 | 1600.JPEG.4.png 0.501063
155 | 1600.JPEG.5.png 0.687127
156 | aerial_city.JPEG.1.png 0.030282
157 | aerial_city.JPEG.2.png 0.064545
158 | aerial_city.JPEG.3.png 0.388892
159 | aerial_city.JPEG.4.png 0.607260
160 | aerial_city.JPEG.5.png 0.819082
161 | boston.JPEG.1.png 0.000000
162 | boston.JPEG.2.png 0.049884
163 | boston.JPEG.3.png 0.328492
164 | boston.JPEG.4.png 0.736937
165 | boston.JPEG.5.png 0.832481
166 | bridge.JPEG.1.png 0.037193
167 | bridge.JPEG.2.png 0.075837
168 | bridge.JPEG.3.png 0.272774
169 | bridge.JPEG.4.png 0.743116
170 | bridge.JPEG.5.png 0.816164
171 | butter_flower.JPEG.1.png 0.023259
172 | butter_flower.JPEG.2.png 0.030891
173 | butter_flower.JPEG.3.png 0.409916
174 | butter_flower.JPEG.4.png 0.699413
175 | butter_flower.JPEG.5.png 0.808089
176 | cactus.JPEG.1.png 0.040969
177 | cactus.JPEG.2.png 0.160464
178 | cactus.JPEG.3.png 0.378421
179 | cactus.JPEG.4.png 0.704142
180 | cactus.JPEG.5.png 0.811723
181 | child_swimming.JPEG.1.png 0.028860
182 | child_swimming.JPEG.2.png 0.029868
183 | child_swimming.JPEG.3.png 0.235530
184 | child_swimming.JPEG.4.png 0.514454
185 | child_swimming.JPEG.5.png 0.695730
186 | couple.JPEG.1.png 0.002154
187 | couple.JPEG.2.png 0.033951
188 | couple.JPEG.3.png 0.258088
189 | couple.JPEG.4.png 0.586040
190 | couple.JPEG.5.png 0.723660
191 | elk.JPEG.1.png 0.032371
192 | elk.JPEG.2.png 0.068200
193 | elk.JPEG.3.png 0.219959
194 | elk.JPEG.4.png 0.549672
195 | elk.JPEG.5.png 0.710220
196 | family.JPEG.1.png 0.022005
197 | family.JPEG.2.png 0.134202
198 | family.JPEG.3.png 0.646726
199 | family.JPEG.4.png 0.761771
200 | family.JPEG.5.png 0.846264
201 | fisher.JPEG.1.png 0.008109
202 | fisher.JPEG.2.png 0.180489
203 | fisher.JPEG.3.png 0.517848
204 | fisher.JPEG.4.png 0.724555
205 | fisher.JPEG.5.png 0.790682
206 | foxy.JPEG.1.png 0.016044
207 | foxy.JPEG.2.png 0.074939
208 | foxy.JPEG.3.png 0.154162
209 | foxy.JPEG.4.png 0.322037
210 | foxy.JPEG.5.png 0.551065
211 | geckos.JPEG.1.png 0.021995
212 | geckos.JPEG.2.png 0.046741
213 | geckos.JPEG.3.png 0.134775
214 | geckos.JPEG.4.png 0.462546
215 | geckos.JPEG.5.png 0.734366
216 | lady_liberty.JPEG.1.png 0.016972
217 | lady_liberty.JPEG.2.png 0.273514
218 | lady_liberty.JPEG.3.png 0.531660
219 | lady_liberty.JPEG.4.png 0.654502
220 | lady_liberty.JPEG.5.png 0.770318
221 | lake.JPEG.1.png 0.020702
222 | lake.JPEG.2.png 0.078837
223 | lake.JPEG.3.png 0.155843
224 | lake.JPEG.4.png 0.429371
225 | lake.JPEG.5.png 0.729901
226 | log_seaside.JPEG.1.png 0.000208
227 | log_seaside.JPEG.2.png 0.009289
228 | log_seaside.JPEG.3.png 0.037806
229 | log_seaside.JPEG.4.png 0.269258
230 | log_seaside.JPEG.5.png 0.638936
231 | monument.JPEG.1.png 0.015565
232 | monument.JPEG.2.png 0.063270
233 | monument.JPEG.3.png 0.323963
234 | monument.JPEG.4.png 0.567924
235 | monument.JPEG.5.png 0.765559
236 | native_american.JPEG.1.png 0.022606
237 | native_american.JPEG.2.png 0.091709
238 | native_american.JPEG.3.png 0.362078
239 | native_american.JPEG.4.png 0.699263
240 | native_american.JPEG.5.png 0.827044
241 | redwood.JPEG.1.png 0.027168
242 | redwood.JPEG.2.png 0.098655
243 | redwood.JPEG.3.png 0.425072
244 | redwood.JPEG.4.png 0.744926
245 | redwood.JPEG.5.png 0.845802
246 | roping.JPEG.1.png 0.013677
247 | roping.JPEG.2.png 0.075115
248 | roping.JPEG.3.png 0.300448
249 | roping.JPEG.4.png 0.599293
250 | roping.JPEG.5.png 0.829291
251 | rushmore.JPEG.1.png 0.024131
252 | rushmore.JPEG.2.png 0.042972
253 | rushmore.JPEG.3.png 0.180693
254 | rushmore.JPEG.4.png 0.537272
255 | rushmore.JPEG.5.png 0.666299
256 | shroom.JPEG.1.png 0.019647
257 | shroom.JPEG.2.png 0.124728
258 | shroom.JPEG.3.png 0.307087
259 | shroom.JPEG.4.png 0.663131
260 | shroom.JPEG.5.png 0.780081
261 | snow_leaves.JPEG.1.png 0.021858
262 | snow_leaves.JPEG.2.png 0.059661
263 | snow_leaves.JPEG.3.png 0.260905
264 | snow_leaves.JPEG.4.png 0.555248
265 | snow_leaves.JPEG.5.png 0.710816
266 | sunsetcolor.JPEG.1.png 0.031170
267 | sunsetcolor.JPEG.2.png 0.436003
268 | sunsetcolor.JPEG.3.png 0.693030
269 | sunsetcolor.JPEG.4.png 0.884946
270 | sunsetcolor.JPEG.5.png 0.917643
271 | sunset_sparrow.JPEG.1.png 0.043167
272 | sunset_sparrow.JPEG.2.png 0.117386
273 | sunset_sparrow.JPEG.3.png 0.341719
274 | sunset_sparrow.JPEG.4.png 0.721518
275 | sunset_sparrow.JPEG.5.png 0.826071
276 | swarm.JPEG.1.png 0.046621
277 | swarm.JPEG.2.png 0.369293
278 | swarm.JPEG.3.png 0.713532
279 | swarm.JPEG.4.png 0.775962
280 | swarm.JPEG.5.png 0.891153
281 | trolley.JPEG.1.png 0.026266
282 | trolley.JPEG.2.png 0.057287
283 | trolley.JPEG.3.png 0.168480
284 | trolley.JPEG.4.png 0.578454
285 | trolley.JPEG.5.png 0.726540
286 | turtle.JPEG.1.png 0.015467
287 | turtle.JPEG.2.png 0.128719
288 | turtle.JPEG.3.png 0.488430
289 | turtle.JPEG.4.png 0.689535
290 | turtle.JPEG.5.png 0.775016
291 | veggies.JPEG.1.png 0.037552
292 | veggies.JPEG.2.png 0.107661
293 | veggies.JPEG.3.png 0.229410
294 | veggies.JPEG.4.png 0.589821
295 | veggies.JPEG.5.png 0.730394
296 | woman.JPEG.1.png 0.031199
297 | woman.JPEG.2.png 0.118715
298 | woman.JPEG.3.png 0.348318
299 | woman.JPEG.4.png 0.621443
300 | woman.JPEG.5.png 0.782590
301 | 1600.jpeg2000.1.png 0.011967
302 | 1600.jpeg2000.2.png 0.135311
303 | 1600.jpeg2000.3.png 0.364361
304 | 1600.jpeg2000.4.png 0.575843
305 | 1600.jpeg2000.5.png 0.826916
306 | aerial_city.jpeg2000.1.png 0.027045
307 | aerial_city.jpeg2000.2.png 0.191014
308 | aerial_city.jpeg2000.3.png 0.485551
309 | aerial_city.jpeg2000.4.png 0.754413
310 | aerial_city.jpeg2000.5.png 0.955908
311 | boston.jpeg2000.1.png 0.000294
312 | boston.jpeg2000.2.png 0.118135
313 | boston.jpeg2000.3.png 0.389335
314 | boston.jpeg2000.4.png 0.696563
315 | boston.jpeg2000.5.png 0.904305
316 | bridge.jpeg2000.1.png 0.038118
317 | bridge.jpeg2000.2.png 0.202935
318 | bridge.jpeg2000.3.png 0.409380
319 | bridge.jpeg2000.4.png 0.650820
320 | bridge.jpeg2000.5.png 0.824682
321 | butter_flower.jpeg2000.1.png 0.027567
322 | butter_flower.jpeg2000.2.png 0.143161
323 | butter_flower.jpeg2000.3.png 0.459942
324 | butter_flower.jpeg2000.4.png 0.779221
325 | butter_flower.jpeg2000.5.png 0.925690
326 | cactus.jpeg2000.1.png 0.078557
327 | cactus.jpeg2000.2.png 0.209039
328 | cactus.jpeg2000.3.png 0.405463
329 | cactus.jpeg2000.4.png 0.638977
330 | cactus.jpeg2000.5.png 0.785250
331 | child_swimming.jpeg2000.1.png 0.028659
332 | child_swimming.jpeg2000.2.png 0.042940
333 | child_swimming.jpeg2000.3.png 0.322600
334 | child_swimming.jpeg2000.4.png 0.546622
335 | child_swimming.jpeg2000.5.png 0.765875
336 | couple.jpeg2000.1.png 0.002141
337 | couple.jpeg2000.2.png 0.072520
338 | couple.jpeg2000.3.png 0.385885
339 | couple.jpeg2000.4.png 0.630620
340 | couple.jpeg2000.5.png 0.804854
341 | elk.jpeg2000.1.png 0.026943
342 | elk.jpeg2000.2.png 0.074380
343 | elk.jpeg2000.3.png 0.245089
344 | elk.jpeg2000.4.png 0.545794
345 | elk.jpeg2000.5.png 0.810573
346 | family.jpeg2000.1.png 0.044337
347 | family.jpeg2000.2.png 0.237538
348 | family.jpeg2000.3.png 0.558713
349 | family.jpeg2000.4.png 0.761955
350 | family.jpeg2000.5.png 0.977123
351 | fisher.jpeg2000.1.png 0.008125
352 | fisher.jpeg2000.2.png 0.176319
353 | fisher.jpeg2000.3.png 0.517954
354 | fisher.jpeg2000.4.png 0.808429
355 | fisher.jpeg2000.5.png 0.972080
356 | foxy.jpeg2000.1.png 0.015411
357 | foxy.jpeg2000.2.png 0.056298
358 | foxy.jpeg2000.3.png 0.178762
359 | foxy.jpeg2000.4.png 0.379203
360 | foxy.jpeg2000.5.png 0.609030
361 | geckos.jpeg2000.1.png 0.022350
362 | geckos.jpeg2000.2.png 0.044189
363 | geckos.jpeg2000.3.png 0.170479
364 | geckos.jpeg2000.4.png 0.377391
365 | geckos.jpeg2000.5.png 0.617103
366 | lady_liberty.jpeg2000.1.png 0.031247
367 | lady_liberty.jpeg2000.2.png 0.267004
368 | lady_liberty.jpeg2000.3.png 0.508630
369 | lady_liberty.jpeg2000.4.png 0.763424
370 | lady_liberty.jpeg2000.5.png 0.907292
371 | lake.jpeg2000.1.png 0.065431
372 | lake.jpeg2000.2.png 0.166944
373 | lake.jpeg2000.3.png 0.247797
374 | lake.jpeg2000.4.png 0.408123
375 | lake.jpeg2000.5.png 0.682337
376 | log_seaside.jpeg2000.1.png 0.000208
377 | log_seaside.jpeg2000.2.png 0.044763
378 | log_seaside.jpeg2000.3.png 0.159952
379 | log_seaside.jpeg2000.4.png 0.394424
380 | log_seaside.jpeg2000.5.png 0.693041
381 | monument.jpeg2000.1.png 0.014624
382 | monument.jpeg2000.2.png 0.106412
383 | monument.jpeg2000.3.png 0.380948
384 | monument.jpeg2000.4.png 0.665182
385 | monument.jpeg2000.5.png 0.820598
386 | native_american.jpeg2000.1.png 0.054907
387 | native_american.jpeg2000.2.png 0.150310
388 | native_american.jpeg2000.3.png 0.372920
389 | native_american.jpeg2000.4.png 0.723122
390 | native_american.jpeg2000.5.png 0.887085
391 | redwood.jpeg2000.1.png 0.031707
392 | redwood.jpeg2000.2.png 0.182316
393 | redwood.jpeg2000.3.png 0.434438
394 | redwood.jpeg2000.4.png 0.681236
395 | redwood.jpeg2000.5.png 0.885726
396 | roping.jpeg2000.1.png 0.017065
397 | roping.jpeg2000.2.png 0.171858
398 | roping.jpeg2000.3.png 0.364208
399 | roping.jpeg2000.4.png 0.670977
400 | roping.jpeg2000.5.png 0.910231
401 | rushmore.jpeg2000.1.png 0.018268
402 | rushmore.jpeg2000.2.png 0.090044
403 | rushmore.jpeg2000.3.png 0.306238
404 | rushmore.jpeg2000.4.png 0.501412
405 | rushmore.jpeg2000.5.png 0.753767
406 | shroom.jpeg2000.1.png 0.004010
407 | shroom.jpeg2000.2.png 0.080263
408 | shroom.jpeg2000.3.png 0.391021
409 | shroom.jpeg2000.4.png 0.605435
410 | shroom.jpeg2000.5.png 0.836572
411 | snow_leaves.jpeg2000.1.png 0.004027
412 | snow_leaves.jpeg2000.2.png 0.077840
413 | snow_leaves.jpeg2000.3.png 0.187381
414 | snow_leaves.jpeg2000.4.png 0.555400
415 | snow_leaves.jpeg2000.5.png 0.756591
416 | sunsetcolor.jpeg2000.1.png 0.023928
417 | sunsetcolor.jpeg2000.2.png 0.300653
418 | sunsetcolor.jpeg2000.3.png 0.655226
419 | sunsetcolor.jpeg2000.4.png 0.850891
420 | sunsetcolor.jpeg2000.5.png 1.000000
421 | sunset_sparrow.jpeg2000.1.png 0.058402
422 | sunset_sparrow.jpeg2000.2.png 0.178501
423 | sunset_sparrow.jpeg2000.3.png 0.391255
424 | sunset_sparrow.jpeg2000.4.png 0.606085
425 | sunset_sparrow.jpeg2000.5.png 0.888810
426 | swarm.jpeg2000.1.png 0.032796
427 | swarm.jpeg2000.2.png 0.313328
428 | swarm.jpeg2000.3.png 0.572300
429 | swarm.jpeg2000.4.png 0.837374
430 | swarm.jpeg2000.5.png 0.967152
431 | trolley.jpeg2000.1.png 0.035962
432 | trolley.jpeg2000.2.png 0.164425
433 | trolley.jpeg2000.3.png 0.349715
434 | trolley.jpeg2000.4.png 0.533182
435 | trolley.jpeg2000.5.png 0.763933
436 | turtle.jpeg2000.1.png 0.001972
437 | turtle.jpeg2000.2.png 0.162116
438 | turtle.jpeg2000.3.png 0.531665
439 | turtle.jpeg2000.4.png 0.802320
440 | turtle.jpeg2000.5.png 0.984200
441 | veggies.jpeg2000.1.png 0.045103
442 | veggies.jpeg2000.2.png 0.114308
443 | veggies.jpeg2000.3.png 0.317298
444 | veggies.jpeg2000.4.png 0.547713
445 | veggies.jpeg2000.5.png 0.771946
446 | woman.jpeg2000.1.png 0.042023
447 | woman.jpeg2000.2.png 0.121537
448 | woman.jpeg2000.3.png 0.403444
449 | woman.jpeg2000.4.png 0.641080
450 | woman.jpeg2000.5.png 0.896614
451 | 1600.fnoise.1.png 0.052376
452 | 1600.fnoise.2.png 0.253058
453 | 1600.fnoise.3.png 0.375741
454 | 1600.fnoise.4.png 0.489880
455 | 1600.fnoise.5.png 0.565219
456 | aerial_city.fnoise.1.png 0.125646
457 | aerial_city.fnoise.2.png 0.318731
458 | aerial_city.fnoise.3.png 0.475221
459 | aerial_city.fnoise.4.png 0.600628
460 | aerial_city.fnoise.5.png 0.707734
461 | boston.fnoise.1.png 0.067654
462 | boston.fnoise.2.png 0.243427
463 | boston.fnoise.3.png 0.498129
464 | boston.fnoise.4.png 0.619963
465 | boston.fnoise.5.png 0.724084
466 | bridge.fnoise.1.png 0.050821
467 | bridge.fnoise.2.png 0.292757
468 | bridge.fnoise.3.png 0.428868
469 | bridge.fnoise.4.png 0.595310
470 | bridge.fnoise.5.png 0.696540
471 | butter_flower.fnoise.1.png 0.027877
472 | butter_flower.fnoise.2.png 0.195120
473 | butter_flower.fnoise.3.png 0.381516
474 | butter_flower.fnoise.4.png 0.549040
475 | butter_flower.fnoise.5.png 0.726603
476 | cactus.fnoise.1.png 0.222083
477 | cactus.fnoise.2.png 0.325322
478 | cactus.fnoise.3.png 0.503035
479 | cactus.fnoise.4.png 0.594751
480 | cactus.fnoise.5.png 0.666608
481 | child_swimming.fnoise.1.png 0.031725
482 | child_swimming.fnoise.2.png 0.226116
483 | child_swimming.fnoise.3.png 0.375526
484 | child_swimming.fnoise.4.png 0.570449
485 | child_swimming.fnoise.5.png 0.699115
486 | couple.fnoise.1.png 0.011536
487 | couple.fnoise.2.png 0.088824
488 | couple.fnoise.3.png 0.264691
489 | couple.fnoise.4.png 0.467049
490 | couple.fnoise.5.png 0.638799
491 | elk.fnoise.1.png 0.046471
492 | elk.fnoise.2.png 0.246678
493 | elk.fnoise.3.png 0.370894
494 | elk.fnoise.4.png 0.483852
495 | elk.fnoise.5.png 0.644948
496 | family.fnoise.1.png 0.169833
497 | family.fnoise.2.png 0.271892
498 | family.fnoise.3.png 0.447987
499 | family.fnoise.4.png 0.568643
500 | family.fnoise.5.png 0.680636
501 | fisher.fnoise.1.png 0.066060
502 | fisher.fnoise.2.png 0.270288
503 | fisher.fnoise.3.png 0.402802
504 | fisher.fnoise.4.png 0.487372
505 | fisher.fnoise.5.png 0.606523
506 | foxy.fnoise.1.png 0.022738
507 | foxy.fnoise.2.png 0.092167
508 | foxy.fnoise.3.png 0.332709
509 | foxy.fnoise.4.png 0.400978
510 | foxy.fnoise.5.png 0.575855
511 | geckos.fnoise.1.png 0.004041
512 | geckos.fnoise.2.png 0.190149
513 | geckos.fnoise.3.png 0.337513
514 | geckos.fnoise.4.png 0.451414
515 | geckos.fnoise.5.png 0.609818
516 | lady_liberty.fnoise.1.png 0.031954
517 | lady_liberty.fnoise.2.png 0.193461
518 | lady_liberty.fnoise.3.png 0.352432
519 | lady_liberty.fnoise.4.png 0.505370
520 | lady_liberty.fnoise.5.png 0.641992
521 | lake.fnoise.1.png 0.124647
522 | lake.fnoise.2.png 0.290345
523 | lake.fnoise.3.png 0.439697
524 | lake.fnoise.4.png 0.571498
525 | lake.fnoise.5.png 0.659032
526 | log_seaside.fnoise.1.png 0.017510
527 | log_seaside.fnoise.2.png 0.143125
528 | log_seaside.fnoise.3.png 0.322969
529 | log_seaside.fnoise.4.png 0.518165
530 | log_seaside.fnoise.5.png 0.672643
531 | monument.fnoise.1.png 0.030943
532 | monument.fnoise.2.png 0.208240
533 | monument.fnoise.3.png 0.320127
534 | monument.fnoise.4.png 0.513709
535 | monument.fnoise.5.png 0.598478
536 | native_american.fnoise.1.png 0.086900
537 | native_american.fnoise.2.png 0.254457
538 | native_american.fnoise.3.png 0.490956
539 | native_american.fnoise.4.png 0.617730
540 | native_american.fnoise.5.png 0.735444
541 | redwood.fnoise.1.png 0.094844
542 | redwood.fnoise.2.png 0.309781
543 | redwood.fnoise.3.png 0.524452
544 | redwood.fnoise.4.png 0.653247
545 | redwood.fnoise.5.png 0.752071
546 | roping.fnoise.1.png 0.032260
547 | roping.fnoise.2.png 0.138743
548 | roping.fnoise.3.png 0.335892
549 | roping.fnoise.4.png 0.512508
550 | roping.fnoise.5.png 0.738256
551 | rushmore.fnoise.1.png 0.198400
552 | rushmore.fnoise.2.png 0.296648
553 | rushmore.fnoise.3.png 0.422267
554 | rushmore.fnoise.4.png 0.546638
555 | rushmore.fnoise.5.png 0.635856
556 | shroom.fnoise.1.png 0.021546
557 | shroom.fnoise.2.png 0.179984
558 | shroom.fnoise.3.png 0.355739
559 | shroom.fnoise.4.png 0.574764
560 | shroom.fnoise.5.png 0.701658
561 | snow_leaves.fnoise.1.png 0.047634
562 | snow_leaves.fnoise.2.png 0.224576
563 | snow_leaves.fnoise.3.png 0.411411
564 | snow_leaves.fnoise.4.png 0.536042
565 | snow_leaves.fnoise.5.png 0.679135
566 | sunsetcolor.fnoise.1.png 0.119233
567 | sunsetcolor.fnoise.2.png 0.339214
568 | sunsetcolor.fnoise.3.png 0.531446
569 | sunsetcolor.fnoise.4.png 0.617306
570 | sunsetcolor.fnoise.5.png 0.721442
571 | sunset_sparrow.fnoise.1.png 0.224163
572 | sunset_sparrow.fnoise.2.png 0.369215
573 | sunset_sparrow.fnoise.3.png 0.539079
574 | sunset_sparrow.fnoise.4.png 0.606823
575 | sunset_sparrow.fnoise.5.png 0.734220
576 | swarm.fnoise.1.png 0.072390
577 | swarm.fnoise.2.png 0.365700
578 | swarm.fnoise.3.png 0.586326
579 | swarm.fnoise.4.png 0.696730
580 | swarm.fnoise.5.png 0.816409
581 | trolley.fnoise.1.png 0.067151
582 | trolley.fnoise.2.png 0.293905
583 | trolley.fnoise.3.png 0.432026
584 | trolley.fnoise.4.png 0.586492
585 | trolley.fnoise.5.png 0.700958
586 | turtle.fnoise.1.png 0.038818
587 | turtle.fnoise.2.png 0.172603
588 | turtle.fnoise.3.png 0.344786
589 | turtle.fnoise.4.png 0.538384
590 | turtle.fnoise.5.png 0.653117
591 | veggies.fnoise.1.png 0.064548
592 | veggies.fnoise.2.png 0.202910
593 | veggies.fnoise.3.png 0.371083
594 | veggies.fnoise.4.png 0.600194
595 | veggies.fnoise.5.png 0.683021
596 | woman.fnoise.1.png 0.098706
597 | woman.fnoise.2.png 0.262388
598 | woman.fnoise.3.png 0.490339
599 | woman.fnoise.4.png 0.638011
600 | woman.fnoise.5.png 0.741920
601 | 1600.BLUR.1.png 0.042668
602 | 1600.BLUR.2.png 0.142435
603 | 1600.BLUR.3.png 0.341114
604 | 1600.BLUR.4.png 0.471038
605 | 1600.BLUR.5.png 0.749795
606 | aerial_city.BLUR.1.png 0.024984
607 | aerial_city.BLUR.2.png 0.114349
608 | aerial_city.BLUR.3.png 0.259421
609 | aerial_city.BLUR.4.png 0.463945
610 | aerial_city.BLUR.5.png 0.895963
611 | boston.BLUR.1.png 0.083456
612 | boston.BLUR.2.png 0.215749
613 | boston.BLUR.3.png 0.363120
614 | boston.BLUR.4.png 0.571217
615 | boston.BLUR.5.png 0.913013
616 | bridge.BLUR.1.png 0.060651
617 | bridge.BLUR.2.png 0.182437
618 | bridge.BLUR.3.png 0.348140
619 | bridge.BLUR.4.png 0.635791
620 | bridge.BLUR.5.png 0.777178
621 | butter_flower.BLUR.1.png 0.078072
622 | butter_flower.BLUR.2.png 0.201968
623 | butter_flower.BLUR.3.png 0.481980
624 | butter_flower.BLUR.4.png 0.778540
625 | butter_flower.BLUR.5.png 0.952775
626 | cactus.BLUR.1.png 0.066743
627 | cactus.BLUR.2.png 0.147199
628 | cactus.BLUR.3.png 0.399275
629 | cactus.BLUR.4.png 0.508410
630 | cactus.BLUR.5.png 0.749996
631 | child_swimming.BLUR.1.png 0.031725
632 | child_swimming.BLUR.2.png 0.187156
633 | child_swimming.BLUR.3.png 0.290016
634 | child_swimming.BLUR.4.png 0.484981
635 | child_swimming.BLUR.5.png 0.758275
636 | couple.BLUR.1.png 0.047046
637 | couple.BLUR.2.png 0.249072
638 | couple.BLUR.3.png 0.408100
639 | couple.BLUR.4.png 0.588777
640 | couple.BLUR.5.png 0.785378
641 | elk.BLUR.1.png 0.044255
642 | elk.BLUR.2.png 0.121626
643 | elk.BLUR.3.png 0.247087
644 | elk.BLUR.4.png 0.404821
645 | elk.BLUR.5.png 0.762872
646 | family.BLUR.1.png 0.036967
647 | family.BLUR.2.png 0.149343
648 | family.BLUR.3.png 0.262240
649 | family.BLUR.4.png 0.538365
650 | family.BLUR.5.png 0.856256
651 | fisher.BLUR.1.png 0.055759
652 | fisher.BLUR.2.png 0.361151
653 | fisher.BLUR.3.png 0.583247
654 | fisher.BLUR.4.png 0.800442
655 | fisher.BLUR.5.png 0.912743
656 | foxy.BLUR.1.png 0.015862
657 | foxy.BLUR.2.png 0.109800
658 | foxy.BLUR.3.png 0.179801
659 | foxy.BLUR.4.png 0.290320
660 | foxy.BLUR.5.png 0.441840
661 | geckos.BLUR.1.png 0.012523
662 | geckos.BLUR.2.png 0.067628
663 | geckos.BLUR.3.png 0.230962
664 | geckos.BLUR.4.png 0.340858
665 | geckos.BLUR.5.png 0.569738
666 | lady_liberty.BLUR.1.png 0.058938
667 | lady_liberty.BLUR.2.png 0.226532
668 | lady_liberty.BLUR.3.png 0.461503
669 | lady_liberty.BLUR.4.png 0.673304
670 | lady_liberty.BLUR.5.png 0.965805
671 | lake.BLUR.1.png 0.074177
672 | lake.BLUR.2.png 0.162926
673 | lake.BLUR.3.png 0.295628
674 | lake.BLUR.4.png 0.470207
675 | lake.BLUR.5.png 0.654704
676 | log_seaside.BLUR.1.png 0.013494
677 | log_seaside.BLUR.2.png 0.078074
678 | log_seaside.BLUR.3.png 0.207243
679 | log_seaside.BLUR.4.png 0.350514
680 | log_seaside.BLUR.5.png 0.590394
681 | monument.BLUR.1.png 0.047668
682 | monument.BLUR.2.png 0.177046
683 | monument.BLUR.3.png 0.359401
684 | monument.BLUR.4.png 0.587549
685 | monument.BLUR.5.png 0.790275
686 | native_american.BLUR.1.png 0.035081
687 | native_american.BLUR.2.png 0.172279
688 | native_american.BLUR.3.png 0.364939
689 | native_american.BLUR.4.png 0.606542
690 | native_american.BLUR.5.png 0.868067
691 | redwood.BLUR.1.png 0.087942
692 | redwood.BLUR.2.png 0.229960
693 | redwood.BLUR.3.png 0.380931
694 | redwood.BLUR.4.png 0.569354
695 | redwood.BLUR.5.png 0.845567
696 | roping.BLUR.1.png 0.034108
697 | roping.BLUR.2.png 0.188499
698 | roping.BLUR.3.png 0.470544
699 | roping.BLUR.4.png 0.670046
700 | roping.BLUR.5.png 0.879711
701 | rushmore.BLUR.1.png 0.011442
702 | rushmore.BLUR.2.png 0.083287
703 | rushmore.BLUR.3.png 0.241583
704 | rushmore.BLUR.4.png 0.415576
705 | rushmore.BLUR.5.png 0.657591
706 | shroom.BLUR.1.png 0.046042
707 | shroom.BLUR.2.png 0.161031
708 | shroom.BLUR.3.png 0.394708
709 | shroom.BLUR.4.png 0.549160
710 | shroom.BLUR.5.png 0.769242
711 | snow_leaves.BLUR.1.png 0.019751
712 | snow_leaves.BLUR.2.png 0.139744
713 | snow_leaves.BLUR.3.png 0.300325
714 | snow_leaves.BLUR.4.png 0.459809
715 | snow_leaves.BLUR.5.png 0.696410
716 | sunsetcolor.BLUR.1.png 0.073076
717 | sunsetcolor.BLUR.2.png 0.308526
718 | sunsetcolor.BLUR.3.png 0.725918
719 | sunsetcolor.BLUR.4.png 0.962783
720 | sunsetcolor.BLUR.5.png 0.999409
721 | sunset_sparrow.BLUR.1.png 0.086547
722 | sunset_sparrow.BLUR.2.png 0.190692
723 | sunset_sparrow.BLUR.3.png 0.306891
724 | sunset_sparrow.BLUR.4.png 0.547604
725 | sunset_sparrow.BLUR.5.png 0.845918
726 | swarm.BLUR.1.png 0.075406
727 | swarm.BLUR.2.png 0.292512
728 | swarm.BLUR.3.png 0.558793
729 | swarm.BLUR.4.png 0.817972
730 | swarm.BLUR.5.png 0.962827
731 | trolley.BLUR.1.png 0.036354
732 | trolley.BLUR.2.png 0.115158
733 | trolley.BLUR.3.png 0.229825
734 | trolley.BLUR.4.png 0.406963
735 | trolley.BLUR.5.png 0.605860
736 | turtle.BLUR.1.png 0.060455
737 | turtle.BLUR.2.png 0.234857
738 | turtle.BLUR.3.png 0.518088
739 | turtle.BLUR.4.png 0.747465
740 | turtle.BLUR.5.png 0.932888
741 | veggies.BLUR.1.png 0.056444
742 | veggies.BLUR.2.png 0.206285
743 | veggies.BLUR.3.png 0.348899
744 | veggies.BLUR.4.png 0.521398
745 | veggies.BLUR.5.png 0.718836
746 | woman.BLUR.1.png 0.086440
747 | woman.BLUR.2.png 0.198053
748 | woman.BLUR.3.png 0.419918
749 | woman.BLUR.4.png 0.616310
750 | woman.BLUR.5.png 0.833920
751 | 1600.contrast.1.png 0.056181
752 | 1600.contrast.2.png 0.200969
753 | 1600.contrast.3.png 0.310325
754 | 1600.contrast.4.png 0.370599
755 | aerial_city.contrast.1.png 0.037807
756 | aerial_city.contrast.2.png 0.163089
757 | aerial_city.contrast.3.png 0.366567
758 | aerial_city.contrast.4.png 0.452587
759 | boston.contrast.1.png 0.116568
760 | boston.contrast.2.png 0.339159
761 | boston.contrast.3.png 0.605352
762 | boston.contrast.4.png 0.689729
763 | bridge.contrast.1.png 0.036407
764 | bridge.contrast.2.png 0.139471
765 | bridge.contrast.3.png 0.344892
766 | bridge.contrast.4.png 0.457522
767 | butter_flower.contrast.1.png 0.069831
768 | butter_flower.contrast.2.png 0.256400
769 | butter_flower.contrast.3.png 0.514702
770 | butter_flower.contrast.4.png 0.614887
771 | cactus.contrast.1.png 0.111255
772 | cactus.contrast.2.png 0.285157
773 | cactus.contrast.3.png 0.407726
774 | cactus.contrast.4.png 0.516734
775 | child_swimming.contrast.1.png 0.143308
776 | child_swimming.contrast.2.png 0.289023
777 | child_swimming.contrast.3.png 0.553583
778 | child_swimming.contrast.4.png 0.565118
779 | couple.contrast.1.png 0.097268
780 | couple.contrast.2.png 0.209819
781 | couple.contrast.3.png 0.352458
782 | couple.contrast.4.png 0.416757
783 | elk.contrast.1.png 0.075542
784 | elk.contrast.2.png 0.200369
785 | elk.contrast.3.png 0.399110
786 | family.contrast.1.png 0.026858
787 | family.contrast.2.png 0.144776
788 | family.contrast.3.png 0.279523
789 | family.contrast.4.png 0.378507
790 | fisher.contrast.1.png 0.090437
791 | fisher.contrast.2.png 0.212385
792 | fisher.contrast.3.png 0.341556
793 | fisher.contrast.4.png 0.389413
794 | foxy.contrast.1.png 0.046767
795 | foxy.contrast.2.png 0.183250
796 | foxy.contrast.3.png 0.356077
797 | foxy.contrast.4.png 0.469925
798 | geckos.contrast.1.png 0.105398
799 | geckos.contrast.2.png 0.238840
800 | geckos.contrast.3.png 0.452383
801 | lady_liberty.contrast.1.png 0.091671
802 | lady_liberty.contrast.2.png 0.305203
803 | lady_liberty.contrast.3.png 0.640590
804 | lake.contrast.1.png 0.094336
805 | lake.contrast.2.png 0.265063
806 | lake.contrast.3.png 0.409732
807 | lake.contrast.4.png 0.506456
808 | log_seaside.contrast.1.png 0.023420
809 | log_seaside.contrast.2.png 0.156705
810 | log_seaside.contrast.3.png 0.369062
811 | log_seaside.contrast.4.png 0.458282
812 | monument.contrast.1.png 0.013177
813 | monument.contrast.2.png 0.142370
814 | monument.contrast.3.png 0.258125
815 | monument.contrast.4.png 0.323351
816 | native_american.contrast.1.png 0.061938
817 | native_american.contrast.2.png 0.223733
818 | native_american.contrast.3.png 0.369587
819 | native_american.contrast.4.png 0.458626
820 | redwood.contrast.1.png 0.073889
821 | redwood.contrast.2.png 0.198001
822 | redwood.contrast.3.png 0.389842
823 | redwood.contrast.4.png 0.470886
824 | roping.contrast.1.png 0.056443
825 | roping.contrast.2.png 0.192126
826 | roping.contrast.3.png 0.370885
827 | roping.contrast.4.png 0.450367
828 | rushmore.contrast.1.png 0.124708
829 | rushmore.contrast.2.png 0.289588
830 | rushmore.contrast.3.png 0.392930
831 | rushmore.contrast.4.png 0.505777
832 | shroom.contrast.1.png 0.080630
833 | shroom.contrast.2.png 0.184374
834 | shroom.contrast.3.png 0.332786
835 | shroom.contrast.4.png 0.410194
836 | snow_leaves.contrast.1.png 0.040660
837 | snow_leaves.contrast.2.png 0.220943
838 | snow_leaves.contrast.3.png 0.319527
839 | snow_leaves.contrast.4.png 0.378758
840 | sunsetcolor.contrast.1.png 0.052038
841 | sunsetcolor.contrast.2.png 0.130973
842 | sunsetcolor.contrast.3.png 0.272601
843 | sunsetcolor.contrast.4.png 0.413102
844 | sunset_sparrow.contrast.1.png 0.093578
845 | sunset_sparrow.contrast.2.png 0.260493
846 | sunset_sparrow.contrast.3.png 0.401872
847 | sunset_sparrow.contrast.4.png 0.465398
848 | swarm.contrast.1.png 0.090613
849 | swarm.contrast.2.png 0.314752
850 | swarm.contrast.3.png 0.625270
851 | trolley.contrast.1.png 0.056003
852 | trolley.contrast.2.png 0.214995
853 | trolley.contrast.3.png 0.433002
854 | trolley.contrast.4.png 0.497138
855 | turtle.contrast.1.png 0.079246
856 | turtle.contrast.2.png 0.198892
857 | turtle.contrast.3.png 0.406588
858 | turtle.contrast.4.png 0.554236
859 | veggies.contrast.1.png 0.055548
860 | veggies.contrast.2.png 0.202823
861 | veggies.contrast.3.png 0.331716
862 | veggies.contrast.4.png 0.385333
863 | woman.contrast.1.png 0.068355
864 | woman.contrast.2.png 0.172780
865 | woman.contrast.3.png 0.409342
866 | woman.contrast.4.png 0.468556
867 |
--------------------------------------------------------------------------------
/data/D_01.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SSL92/hyperIQA/c42e7279717e7dcb693a24b891fc14a4189a45ee/data/D_01.jpg
--------------------------------------------------------------------------------
/data/D_02.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SSL92/hyperIQA/c42e7279717e7dcb693a24b891fc14a4189a45ee/data/D_02.jpg
--------------------------------------------------------------------------------
/data/D_03.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SSL92/hyperIQA/c42e7279717e7dcb693a24b891fc14a4189a45ee/data/D_03.jpg
--------------------------------------------------------------------------------
/data_loader.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torchvision
3 | import folders
4 |
5 | class DataLoader(object):
6 | """Dataset class for IQA databases"""
7 |
8 | def __init__(self, dataset, path, img_indx, patch_size, patch_num, batch_size=1, istrain=True):
9 |
10 | self.batch_size = batch_size
11 | self.istrain = istrain
12 |
13 | if (dataset == 'live') | (dataset == 'csiq') | (dataset == 'tid2013') | (dataset == 'livec'):
14 | # Train transforms
15 | if istrain:
16 | transforms = torchvision.transforms.Compose([
17 | torchvision.transforms.RandomHorizontalFlip(),
18 | torchvision.transforms.RandomCrop(size=patch_size),
19 | torchvision.transforms.ToTensor(),
20 | torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
21 | std=(0.229, 0.224, 0.225))
22 | ])
23 | # Test transforms
24 | else:
25 | transforms = torchvision.transforms.Compose([
26 | torchvision.transforms.RandomCrop(size=patch_size),
27 | torchvision.transforms.ToTensor(),
28 | torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
29 | std=(0.229, 0.224, 0.225))
30 | ])
31 | elif dataset == 'koniq-10k':
32 | if istrain:
33 | transforms = torchvision.transforms.Compose([
34 | torchvision.transforms.RandomHorizontalFlip(),
35 | torchvision.transforms.Resize((512, 384)),
36 | torchvision.transforms.RandomCrop(size=patch_size),
37 | torchvision.transforms.ToTensor(),
38 | torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
39 | std=(0.229, 0.224, 0.225))])
40 | else:
41 | transforms = torchvision.transforms.Compose([
42 | torchvision.transforms.Resize((512, 384)),
43 | torchvision.transforms.RandomCrop(size=patch_size),
44 | torchvision.transforms.ToTensor(),
45 | torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
46 | std=(0.229, 0.224, 0.225))])
47 | elif dataset == 'bid':
48 | if istrain:
49 | transforms = torchvision.transforms.Compose([
50 | torchvision.transforms.RandomHorizontalFlip(),
51 | torchvision.transforms.Resize((512, 512)),
52 | torchvision.transforms.RandomCrop(size=patch_size),
53 | torchvision.transforms.ToTensor(),
54 | torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
55 | std=(0.229, 0.224, 0.225))])
56 | else:
57 | transforms = torchvision.transforms.Compose([
58 | torchvision.transforms.Resize((512, 512)),
59 | torchvision.transforms.RandomCrop(size=patch_size),
60 | torchvision.transforms.ToTensor(),
61 | torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
62 | std=(0.229, 0.224, 0.225))])
63 |
64 | if dataset == 'live':
65 | self.data = folders.LIVEFolder(
66 | root=path, index=img_indx, transform=transforms, patch_num=patch_num)
67 | elif dataset == 'livec':
68 | self.data = folders.LIVEChallengeFolder(
69 | root=path, index=img_indx, transform=transforms, patch_num=patch_num)
70 | elif dataset == 'csiq':
71 | self.data = folders.CSIQFolder(
72 | root=path, index=img_indx, transform=transforms, patch_num=patch_num)
73 | elif dataset == 'koniq-10k':
74 | self.data = folders.Koniq_10kFolder(
75 | root=path, index=img_indx, transform=transforms, patch_num=patch_num)
76 | elif dataset == 'bid':
77 | self.data = folders.BIDFolder(
78 | root=path, index=img_indx, transform=transforms, patch_num=patch_num)
79 | elif dataset == 'tid2013':
80 | self.data = folders.TID2013Folder(
81 | root=path, index=img_indx, transform=transforms, patch_num=patch_num)
82 |
83 | def get_data(self):
84 | if self.istrain:
85 | dataloader = torch.utils.data.DataLoader(
86 | self.data, batch_size=self.batch_size, shuffle=True)
87 | else:
88 | dataloader = torch.utils.data.DataLoader(
89 | self.data, batch_size=1, shuffle=False)
90 | return dataloader
--------------------------------------------------------------------------------
/demo.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torchvision
3 | import models
4 | from PIL import Image
5 | import numpy as np
6 |
7 |
8 | def pil_loader(path):
9 | with open(path, 'rb') as f:
10 | img = Image.open(f)
11 | return img.convert('RGB')
12 |
13 |
14 | im_path = './data/D_01.jpg'
15 | model_hyper = models.HyperNet(16, 112, 224, 112, 56, 28, 14, 7).cuda()
16 | model_hyper.train(False)
17 | # load our pre-trained model on the koniq-10k dataset
18 | model_hyper.load_state_dict((torch.load('./pretrained/koniq_pretrained.pkl')))
19 |
20 | transforms = torchvision.transforms.Compose([
21 | torchvision.transforms.Resize((512, 384)),
22 | torchvision.transforms.RandomCrop(size=224),
23 | torchvision.transforms.ToTensor(),
24 | torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
25 | std=(0.229, 0.224, 0.225))])
26 |
27 | # random crop 10 patches and calculate mean quality score
28 | pred_scores = []
29 | for i in range(10):
30 | img = pil_loader(im_path)
31 | img = transforms(img)
32 | img = torch.tensor(img.cuda()).unsqueeze(0)
33 | paras = model_hyper(img) # 'paras' contains the network weights conveyed to target network
34 |
35 | # Building target network
36 | model_target = models.TargetNet(paras).cuda()
37 | for param in model_target.parameters():
38 | param.requires_grad = False
39 |
40 | # Quality prediction
41 | pred = model_target(paras['target_in_vec']) # 'paras['target_in_vec']' is the input to target net
42 | pred_scores.append(float(pred.item()))
43 | score = np.mean(pred_scores)
44 | # quality score ranges from 0-100, a higher score indicates a better quality
45 | print('Predicted quality score: %.2f' % score)
46 |
47 |
--------------------------------------------------------------------------------
/folders.py:
--------------------------------------------------------------------------------
1 | import torch.utils.data as data
2 | from PIL import Image
3 | import os
4 | import os.path
5 | import scipy.io
6 | import numpy as np
7 | import csv
8 | from openpyxl import load_workbook
9 |
10 |
11 | class LIVEFolder(data.Dataset):
12 |
13 | def __init__(self, root, index, transform, patch_num):
14 |
15 | refpath = os.path.join(root, 'refimgs')
16 | refname = getFileName(refpath, '.bmp')
17 |
18 | jp2kroot = os.path.join(root, 'jp2k')
19 | jp2kname = self.getDistortionTypeFileName(jp2kroot, 227)
20 |
21 | jpegroot = os.path.join(root, 'jpeg')
22 | jpegname = self.getDistortionTypeFileName(jpegroot, 233)
23 |
24 | wnroot = os.path.join(root, 'wn')
25 | wnname = self.getDistortionTypeFileName(wnroot, 174)
26 |
27 | gblurroot = os.path.join(root, 'gblur')
28 | gblurname = self.getDistortionTypeFileName(gblurroot, 174)
29 |
30 | fastfadingroot = os.path.join(root, 'fastfading')
31 | fastfadingname = self.getDistortionTypeFileName(fastfadingroot, 174)
32 |
33 | imgpath = jp2kname + jpegname + wnname + gblurname + fastfadingname
34 |
35 | dmos = scipy.io.loadmat(os.path.join(root, 'dmos_realigned.mat'))
36 | labels = dmos['dmos_new'].astype(np.float32)
37 |
38 | orgs = dmos['orgs']
39 | refnames_all = scipy.io.loadmat(os.path.join(root, 'refnames_all.mat'))
40 | refnames_all = refnames_all['refnames_all']
41 |
42 | sample = []
43 |
44 | for i in range(0, len(index)):
45 | train_sel = (refname[index[i]] == refnames_all)
46 | train_sel = train_sel * ~orgs.astype(np.bool_)
47 | train_sel = np.where(train_sel == True)
48 | train_sel = train_sel[1].tolist()
49 | for j, item in enumerate(train_sel):
50 | for aug in range(patch_num):
51 | sample.append((imgpath[item], labels[0][item]))
52 | # print(self.imgpath[item])
53 | self.samples = sample
54 | self.transform = transform
55 |
56 | def __getitem__(self, index):
57 | """
58 | Args:
59 | index (int): Index
60 |
61 | Returns:
62 | tuple: (sample, target) where target is class_index of the target class.
63 | """
64 | path, target = self.samples[index]
65 | sample = pil_loader(path)
66 | if self.transform is not None:
67 | sample = self.transform(sample)
68 |
69 | return sample, target
70 |
71 | def __len__(self):
72 | length = len(self.samples)
73 | return length
74 |
75 | def getDistortionTypeFileName(self, path, num):
76 | filename = []
77 | index = 1
78 | for i in range(0, num):
79 | name = '%s%s%s' % ('img', str(index), '.bmp')
80 | filename.append(os.path.join(path, name))
81 | index = index + 1
82 | return filename
83 |
84 |
85 | class LIVEChallengeFolder(data.Dataset):
86 |
87 | def __init__(self, root, index, transform, patch_num):
88 |
89 | imgpath = scipy.io.loadmat(os.path.join(root, 'Data', 'AllImages_release.mat'))
90 | imgpath = imgpath['AllImages_release']
91 | imgpath = imgpath[7:1169]
92 | mos = scipy.io.loadmat(os.path.join(root, 'Data', 'AllMOS_release.mat'))
93 | labels = mos['AllMOS_release'].astype(np.float32)
94 | labels = labels[0][7:1169]
95 |
96 | sample = []
97 | for i, item in enumerate(index):
98 | for aug in range(patch_num):
99 | sample.append((os.path.join(root, 'Images', imgpath[item][0][0]), labels[item]))
100 |
101 | self.samples = sample
102 | self.transform = transform
103 |
104 | def __getitem__(self, index):
105 | """
106 | Args:
107 | index (int): Index
108 |
109 | Returns:
110 | tuple: (sample, target) where target is class_index of the target class.
111 | """
112 | path, target = self.samples[index]
113 | sample = pil_loader(path)
114 | sample = self.transform(sample)
115 | return sample, target
116 |
117 | def __len__(self):
118 | length = len(self.samples)
119 | return length
120 |
121 |
122 | class CSIQFolder(data.Dataset):
123 |
124 | def __init__(self, root, index, transform, patch_num):
125 |
126 | refpath = os.path.join(root, 'src_imgs')
127 | refname = getFileName(refpath,'.png')
128 | txtpath = os.path.join(root, 'csiq_label.txt')
129 | fh = open(txtpath, 'r')
130 | imgnames = []
131 | target = []
132 | refnames_all = []
133 | for line in fh:
134 | line = line.split('\n')
135 | words = line[0].split()
136 | imgnames.append((words[0]))
137 | target.append(words[1])
138 | ref_temp = words[0].split(".")
139 | refnames_all.append(ref_temp[0] + '.' + ref_temp[-1])
140 |
141 | labels = np.array(target).astype(np.float32)
142 | refnames_all = np.array(refnames_all)
143 |
144 | sample = []
145 |
146 | for i, item in enumerate(index):
147 | train_sel = (refname[index[i]] == refnames_all)
148 | train_sel = np.where(train_sel == True)
149 | train_sel = train_sel[0].tolist()
150 | for j, item in enumerate(train_sel):
151 | for aug in range(patch_num):
152 | sample.append((os.path.join(root, 'dst_imgs_all', imgnames[item]), labels[item]))
153 | self.samples = sample
154 | self.transform = transform
155 |
156 | def __getitem__(self, index):
157 | """
158 | Args:
159 | index (int): Index
160 |
161 | Returns:
162 | tuple: (sample, target) where target is class_index of the target class.
163 | """
164 | path, target = self.samples[index]
165 | sample = pil_loader(path)
166 | sample = self.transform(sample)
167 |
168 | return sample, target
169 |
170 | def __len__(self):
171 | length = len(self.samples)
172 | return length
173 |
174 |
175 | class Koniq_10kFolder(data.Dataset):
176 |
177 | def __init__(self, root, index, transform, patch_num):
178 | imgname = []
179 | mos_all = []
180 | csv_file = os.path.join(root, 'koniq10k_scores_and_distributions.csv')
181 | with open(csv_file) as f:
182 | reader = csv.DictReader(f)
183 | for row in reader:
184 | imgname.append(row['image_name'])
185 | mos = np.array(float(row['MOS_zscore'])).astype(np.float32)
186 | mos_all.append(mos)
187 |
188 | sample = []
189 | for i, item in enumerate(index):
190 | for aug in range(patch_num):
191 | sample.append((os.path.join(root, '1024x768', imgname[item]), mos_all[item]))
192 |
193 | self.samples = sample
194 | self.transform = transform
195 |
196 | def __getitem__(self, index):
197 | """
198 | Args:
199 | index (int): Index
200 |
201 | Returns:
202 | tuple: (sample, target) where target is class_index of the target class.
203 | """
204 | path, target = self.samples[index]
205 | sample = pil_loader(path)
206 | sample = self.transform(sample)
207 | return sample, target
208 |
209 | def __len__(self):
210 | length = len(self.samples)
211 | return length
212 |
213 |
214 | class BIDFolder(data.Dataset):
215 |
216 | def __init__(self, root, index, transform, patch_num):
217 |
218 | imgname = []
219 | mos_all = []
220 |
221 | xls_file = os.path.join(root, 'DatabaseGrades.xlsx')
222 | workbook = load_workbook(xls_file)
223 | booksheet = workbook.active
224 | rows = booksheet.rows
225 | count = 1
226 | for row in rows:
227 | count += 1
228 | img_num = (booksheet.cell(row=count, column=1).value)
229 | img_name = "DatabaseImage%04d.JPG" % (img_num)
230 | imgname.append(img_name)
231 | mos = (booksheet.cell(row=count, column=2).value)
232 | mos = np.array(mos)
233 | mos = mos.astype(np.float32)
234 | mos_all.append(mos)
235 | if count == 587:
236 | break
237 |
238 | sample = []
239 | for i, item in enumerate(index):
240 | for aug in range(patch_num):
241 | sample.append((os.path.join(root, imgname[item]), mos_all[item]))
242 |
243 | self.samples = sample
244 | self.transform = transform
245 |
246 | def __getitem__(self, index):
247 | """
248 | Args:
249 | index (int): Index
250 |
251 | Returns:
252 | tuple: (sample, target) where target is class_index of the target class.
253 | """
254 | path, target = self.samples[index]
255 | sample = pil_loader(path)
256 | sample = self.transform(sample)
257 | return sample, target
258 |
259 | def __len__(self):
260 | length = len(self.samples)
261 | return length
262 |
263 |
264 | class TID2013Folder(data.Dataset):
265 |
266 | def __init__(self, root, index, transform, patch_num):
267 | refpath = os.path.join(root, 'reference_images')
268 | refname = getTIDFileName(refpath,'.bmp.BMP')
269 | txtpath = os.path.join(root, 'mos_with_names.txt')
270 | fh = open(txtpath, 'r')
271 | imgnames = []
272 | target = []
273 | refnames_all = []
274 | for line in fh:
275 | line = line.split('\n')
276 | words = line[0].split()
277 | imgnames.append((words[1]))
278 | target.append(words[0])
279 | ref_temp = words[1].split("_")
280 | refnames_all.append(ref_temp[0][1:])
281 | labels = np.array(target).astype(np.float32)
282 | refnames_all = np.array(refnames_all)
283 |
284 | sample = []
285 | for i, item in enumerate(index):
286 | train_sel = (refname[index[i]] == refnames_all)
287 | train_sel = np.where(train_sel == True)
288 | train_sel = train_sel[0].tolist()
289 | for j, item in enumerate(train_sel):
290 | for aug in range(patch_num):
291 | sample.append((os.path.join(root, 'distorted_images', imgnames[item]), labels[item]))
292 | self.samples = sample
293 | self.transform = transform
294 |
295 | def __getitem__(self, index):
296 | """
297 | Args:
298 | index (int): Index
299 |
300 | Returns:
301 | tuple: (sample, target) where target is class_index of the target class.
302 | """
303 | path, target = self.samples[index]
304 | sample = pil_loader(path)
305 | sample = self.transform(sample)
306 | return sample, target
307 |
308 | def __len__(self):
309 | length = len(self.samples)
310 | return length
311 |
312 |
313 | def getFileName(path, suffix):
314 | filename = []
315 | f_list = os.listdir(path)
316 | for i in f_list:
317 | if os.path.splitext(i)[1] == suffix:
318 | filename.append(i)
319 | return filename
320 |
321 |
322 | def getTIDFileName(path, suffix):
323 | filename = []
324 | f_list = os.listdir(path)
325 | for i in f_list:
326 | if suffix.find(os.path.splitext(i)[1]) != -1:
327 | filename.append(i[1:3])
328 | return filename
329 |
330 |
331 | def pil_loader(path):
332 | with open(path, 'rb') as f:
333 | img = Image.open(f)
334 | return img.convert('RGB')
--------------------------------------------------------------------------------
/models.py:
--------------------------------------------------------------------------------
1 | import torch as torch
2 | import torch.nn as nn
3 | from torch.nn import functional as F
4 | from torch.nn import init
5 | import math
6 | import torch.utils.model_zoo as model_zoo
7 |
8 | model_urls = {
9 | 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
10 | 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
11 | 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
12 | 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
13 | 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
14 | }
15 |
16 |
17 | class HyperNet(nn.Module):
18 | """
19 | Hyper network for learning perceptual rules.
20 |
21 | Args:
22 | lda_out_channels: local distortion aware module output size.
23 | hyper_in_channels: input feature channels for hyper network.
24 | target_in_size: input vector size for target network.
25 | target_fc(i)_size: fully connection layer size of target network.
26 | feature_size: input feature map width/height for hyper network.
27 |
28 | Note:
29 | For size match, input args must satisfy: 'target_fc(i)_size * target_fc(i+1)_size' is divisible by 'feature_size ^ 2'.
30 |
31 | """
32 | def __init__(self, lda_out_channels, hyper_in_channels, target_in_size, target_fc1_size, target_fc2_size, target_fc3_size, target_fc4_size, feature_size):
33 | super(HyperNet, self).__init__()
34 |
35 | self.hyperInChn = hyper_in_channels
36 | self.target_in_size = target_in_size
37 | self.f1 = target_fc1_size
38 | self.f2 = target_fc2_size
39 | self.f3 = target_fc3_size
40 | self.f4 = target_fc4_size
41 | self.feature_size = feature_size
42 |
43 | self.res = resnet50_backbone(lda_out_channels, target_in_size, pretrained=True)
44 |
45 | self.pool = nn.AdaptiveAvgPool2d((1, 1))
46 |
47 | # Conv layers for resnet output features
48 | self.conv1 = nn.Sequential(
49 | nn.Conv2d(2048, 1024, 1, padding=(0, 0)),
50 | nn.ReLU(inplace=True),
51 | nn.Conv2d(1024, 512, 1, padding=(0, 0)),
52 | nn.ReLU(inplace=True),
53 | nn.Conv2d(512, self.hyperInChn, 1, padding=(0, 0)),
54 | nn.ReLU(inplace=True)
55 | )
56 |
57 | # Hyper network part, conv for generating target fc weights, fc for generating target fc biases
58 | self.fc1w_conv = nn.Conv2d(self.hyperInChn, int(self.target_in_size * self.f1 / feature_size ** 2), 3, padding=(1, 1))
59 | self.fc1b_fc = nn.Linear(self.hyperInChn, self.f1)
60 |
61 | self.fc2w_conv = nn.Conv2d(self.hyperInChn, int(self.f1 * self.f2 / feature_size ** 2), 3, padding=(1, 1))
62 | self.fc2b_fc = nn.Linear(self.hyperInChn, self.f2)
63 |
64 | self.fc3w_conv = nn.Conv2d(self.hyperInChn, int(self.f2 * self.f3 / feature_size ** 2), 3, padding=(1, 1))
65 | self.fc3b_fc = nn.Linear(self.hyperInChn, self.f3)
66 |
67 | self.fc4w_conv = nn.Conv2d(self.hyperInChn, int(self.f3 * self.f4 / feature_size ** 2), 3, padding=(1, 1))
68 | self.fc4b_fc = nn.Linear(self.hyperInChn, self.f4)
69 |
70 | self.fc5w_fc = nn.Linear(self.hyperInChn, self.f4)
71 | self.fc5b_fc = nn.Linear(self.hyperInChn, 1)
72 |
73 | # initialize
74 | for i, m_name in enumerate(self._modules):
75 | if i > 2:
76 | nn.init.kaiming_normal_(self._modules[m_name].weight.data)
77 |
78 | def forward(self, img):
79 | feature_size = self.feature_size
80 |
81 | res_out = self.res(img)
82 |
83 | # input vector for target net
84 | target_in_vec = res_out['target_in_vec'].view(-1, self.target_in_size, 1, 1)
85 |
86 | # input features for hyper net
87 | hyper_in_feat = self.conv1(res_out['hyper_in_feat']).view(-1, self.hyperInChn, feature_size, feature_size)
88 |
89 | # generating target net weights & biases
90 | target_fc1w = self.fc1w_conv(hyper_in_feat).view(-1, self.f1, self.target_in_size, 1, 1)
91 | target_fc1b = self.fc1b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, self.f1)
92 |
93 | target_fc2w = self.fc2w_conv(hyper_in_feat).view(-1, self.f2, self.f1, 1, 1)
94 | target_fc2b = self.fc2b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, self.f2)
95 |
96 | target_fc3w = self.fc3w_conv(hyper_in_feat).view(-1, self.f3, self.f2, 1, 1)
97 | target_fc3b = self.fc3b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, self.f3)
98 |
99 | target_fc4w = self.fc4w_conv(hyper_in_feat).view(-1, self.f4, self.f3, 1, 1)
100 | target_fc4b = self.fc4b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, self.f4)
101 |
102 | target_fc5w = self.fc5w_fc(self.pool(hyper_in_feat).squeeze()).view(-1, 1, self.f4, 1, 1)
103 | target_fc5b = self.fc5b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, 1)
104 |
105 | out = {}
106 | out['target_in_vec'] = target_in_vec
107 | out['target_fc1w'] = target_fc1w
108 | out['target_fc1b'] = target_fc1b
109 | out['target_fc2w'] = target_fc2w
110 | out['target_fc2b'] = target_fc2b
111 | out['target_fc3w'] = target_fc3w
112 | out['target_fc3b'] = target_fc3b
113 | out['target_fc4w'] = target_fc4w
114 | out['target_fc4b'] = target_fc4b
115 | out['target_fc5w'] = target_fc5w
116 | out['target_fc5b'] = target_fc5b
117 |
118 | return out
119 |
120 |
121 | class TargetNet(nn.Module):
122 | """
123 | Target network for quality prediction.
124 | """
125 | def __init__(self, paras):
126 | super(TargetNet, self).__init__()
127 | self.l1 = nn.Sequential(
128 | TargetFC(paras['target_fc1w'], paras['target_fc1b']),
129 | nn.Sigmoid(),
130 | )
131 | self.l2 = nn.Sequential(
132 | TargetFC(paras['target_fc2w'], paras['target_fc2b']),
133 | nn.Sigmoid(),
134 | )
135 |
136 | self.l3 = nn.Sequential(
137 | TargetFC(paras['target_fc3w'], paras['target_fc3b']),
138 | nn.Sigmoid(),
139 | )
140 |
141 | self.l4 = nn.Sequential(
142 | TargetFC(paras['target_fc4w'], paras['target_fc4b']),
143 | nn.Sigmoid(),
144 | TargetFC(paras['target_fc5w'], paras['target_fc5b']),
145 | )
146 |
147 | def forward(self, x):
148 | q = self.l1(x)
149 | # q = F.dropout(q)
150 | q = self.l2(q)
151 | q = self.l3(q)
152 | q = self.l4(q).squeeze()
153 | return q
154 |
155 |
156 | class TargetFC(nn.Module):
157 | """
158 | Fully connection operations for target net
159 |
160 | Note:
161 | Weights & biases are different for different images in a batch,
162 | thus here we use group convolution for calculating images in a batch with individual weights & biases.
163 | """
164 | def __init__(self, weight, bias):
165 | super(TargetFC, self).__init__()
166 | self.weight = weight
167 | self.bias = bias
168 |
169 | def forward(self, input_):
170 |
171 | input_re = input_.view(-1, input_.shape[0] * input_.shape[1], input_.shape[2], input_.shape[3])
172 | weight_re = self.weight.view(self.weight.shape[0] * self.weight.shape[1], self.weight.shape[2], self.weight.shape[3], self.weight.shape[4])
173 | bias_re = self.bias.view(self.bias.shape[0] * self.bias.shape[1])
174 | out = F.conv2d(input=input_re, weight=weight_re, bias=bias_re, groups=self.weight.shape[0])
175 |
176 | return out.view(input_.shape[0], self.weight.shape[1], input_.shape[2], input_.shape[3])
177 |
178 |
179 | class Bottleneck(nn.Module):
180 | expansion = 4
181 |
182 | def __init__(self, inplanes, planes, stride=1, downsample=None):
183 | super(Bottleneck, self).__init__()
184 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
185 | self.bn1 = nn.BatchNorm2d(planes)
186 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
187 | padding=1, bias=False)
188 | self.bn2 = nn.BatchNorm2d(planes)
189 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
190 | self.bn3 = nn.BatchNorm2d(planes * 4)
191 | self.relu = nn.ReLU(inplace=True)
192 | self.downsample = downsample
193 | self.stride = stride
194 |
195 | def forward(self, x):
196 | residual = x
197 |
198 | out = self.conv1(x)
199 | out = self.bn1(out)
200 | out = self.relu(out)
201 |
202 | out = self.conv2(out)
203 | out = self.bn2(out)
204 | out = self.relu(out)
205 |
206 | out = self.conv3(out)
207 | out = self.bn3(out)
208 |
209 | if self.downsample is not None:
210 | residual = self.downsample(x)
211 |
212 | out += residual
213 | out = self.relu(out)
214 |
215 | return out
216 |
217 |
218 | class ResNetBackbone(nn.Module):
219 |
220 | def __init__(self, lda_out_channels, in_chn, block, layers, num_classes=1000):
221 | super(ResNetBackbone, self).__init__()
222 | self.inplanes = 64
223 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
224 | self.bn1 = nn.BatchNorm2d(64)
225 | self.relu = nn.ReLU(inplace=True)
226 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
227 | self.layer1 = self._make_layer(block, 64, layers[0])
228 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
229 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
230 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
231 |
232 | # local distortion aware module
233 | self.lda1_pool = nn.Sequential(
234 | nn.Conv2d(256, 16, kernel_size=1, stride=1, padding=0, bias=False),
235 | nn.AvgPool2d(7, stride=7),
236 | )
237 | self.lda1_fc = nn.Linear(16 * 64, lda_out_channels)
238 |
239 | self.lda2_pool = nn.Sequential(
240 | nn.Conv2d(512, 32, kernel_size=1, stride=1, padding=0, bias=False),
241 | nn.AvgPool2d(7, stride=7),
242 | )
243 | self.lda2_fc = nn.Linear(32 * 16, lda_out_channels)
244 |
245 | self.lda3_pool = nn.Sequential(
246 | nn.Conv2d(1024, 64, kernel_size=1, stride=1, padding=0, bias=False),
247 | nn.AvgPool2d(7, stride=7),
248 | )
249 | self.lda3_fc = nn.Linear(64 * 4, lda_out_channels)
250 |
251 | self.lda4_pool = nn.AvgPool2d(7, stride=7)
252 | self.lda4_fc = nn.Linear(2048, in_chn - lda_out_channels * 3)
253 |
254 | for m in self.modules():
255 | if isinstance(m, nn.Conv2d):
256 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
257 | m.weight.data.normal_(0, math.sqrt(2. / n))
258 | elif isinstance(m, nn.BatchNorm2d):
259 | m.weight.data.fill_(1)
260 | m.bias.data.zero_()
261 |
262 | # initialize
263 | nn.init.kaiming_normal_(self.lda1_pool._modules['0'].weight.data)
264 | nn.init.kaiming_normal_(self.lda2_pool._modules['0'].weight.data)
265 | nn.init.kaiming_normal_(self.lda3_pool._modules['0'].weight.data)
266 | nn.init.kaiming_normal_(self.lda1_fc.weight.data)
267 | nn.init.kaiming_normal_(self.lda2_fc.weight.data)
268 | nn.init.kaiming_normal_(self.lda3_fc.weight.data)
269 | nn.init.kaiming_normal_(self.lda4_fc.weight.data)
270 |
271 | def _make_layer(self, block, planes, blocks, stride=1):
272 | downsample = None
273 | if stride != 1 or self.inplanes != planes * block.expansion:
274 | downsample = nn.Sequential(
275 | nn.Conv2d(self.inplanes, planes * block.expansion,
276 | kernel_size=1, stride=stride, bias=False),
277 | nn.BatchNorm2d(planes * block.expansion),
278 | )
279 |
280 | layers = []
281 | layers.append(block(self.inplanes, planes, stride, downsample))
282 | self.inplanes = planes * block.expansion
283 | for i in range(1, blocks):
284 | layers.append(block(self.inplanes, planes))
285 |
286 | return nn.Sequential(*layers)
287 |
288 | def forward(self, x):
289 | x = self.conv1(x)
290 | x = self.bn1(x)
291 | x = self.relu(x)
292 | x = self.maxpool(x)
293 | x = self.layer1(x)
294 |
295 | # the same effect as lda operation in the paper, but save much more memory
296 | lda_1 = self.lda1_fc(self.lda1_pool(x).view(x.size(0), -1))
297 | x = self.layer2(x)
298 | lda_2 = self.lda2_fc(self.lda2_pool(x).view(x.size(0), -1))
299 | x = self.layer3(x)
300 | lda_3 = self.lda3_fc(self.lda3_pool(x).view(x.size(0), -1))
301 | x = self.layer4(x)
302 | lda_4 = self.lda4_fc(self.lda4_pool(x).view(x.size(0), -1))
303 |
304 | vec = torch.cat((lda_1, lda_2, lda_3, lda_4), 1)
305 |
306 | out = {}
307 | out['hyper_in_feat'] = x
308 | out['target_in_vec'] = vec
309 |
310 | return out
311 |
312 |
313 | def resnet50_backbone(lda_out_channels, in_chn, pretrained=False, **kwargs):
314 | """Constructs a ResNet-50 model_hyper.
315 |
316 | Args:
317 | pretrained (bool): If True, returns a model_hyper pre-trained on ImageNet
318 | """
319 | model = ResNetBackbone(lda_out_channels, in_chn, Bottleneck, [3, 4, 6, 3], **kwargs)
320 | if pretrained:
321 | save_model = model_zoo.load_url(model_urls['resnet50'])
322 | model_dict = model.state_dict()
323 | state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()}
324 | model_dict.update(state_dict)
325 | model.load_state_dict(model_dict)
326 | else:
327 | model.apply(weights_init_xavier)
328 | return model
329 |
330 |
331 | def weights_init_xavier(m):
332 | classname = m.__class__.__name__
333 | # print(classname)
334 | # if isinstance(m, nn.Conv2d):
335 | if classname.find('Conv') != -1:
336 | init.kaiming_normal_(m.weight.data)
337 | elif classname.find('Linear') != -1:
338 | init.kaiming_normal_(m.weight.data)
339 | elif classname.find('BatchNorm2d') != -1:
340 | init.uniform_(m.weight.data, 1.0, 0.02)
341 | init.constant_(m.bias.data, 0.0)
342 |
--------------------------------------------------------------------------------
/train_test_IQA.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | import random
4 | import numpy as np
5 | from HyerIQASolver import HyperIQASolver
6 |
7 |
8 | os.environ['CUDA_VISIBLE_DEVICES'] = '0'
9 |
10 |
11 | def main(config):
12 |
13 | folder_path = {
14 | 'live': '/home/ssl/Database/databaserelease2/',
15 | 'csiq': '/home/ssl/Database/CSIQ/',
16 | 'tid2013': '/home/ssl/Database/TID2013/',
17 | 'livec': '/home/ssl/Database/ChallengeDB_release/ChallengeDB_release/',
18 | 'koniq-10k': '/home/ssl/Database/koniq-10k/',
19 | 'bid': '/home/ssl/Database/BID/',
20 | }
21 |
22 | img_num = {
23 | 'live': list(range(0, 29)),
24 | 'csiq': list(range(0, 30)),
25 | 'tid2013': list(range(0, 25)),
26 | 'livec': list(range(0, 1162)),
27 | 'koniq-10k': list(range(0, 10073)),
28 | 'bid': list(range(0, 586)),
29 | }
30 | sel_num = img_num[config.dataset]
31 |
32 | srcc_all = np.zeros(config.train_test_num, dtype=np.float)
33 | plcc_all = np.zeros(config.train_test_num, dtype=np.float)
34 |
35 | print('Training and testing on %s dataset for %d rounds...' % (config.dataset, config.train_test_num))
36 | for i in range(config.train_test_num):
37 | print('Round %d' % (i+1))
38 | # Randomly select 80% images for training and the rest for testing
39 | random.shuffle(sel_num)
40 | train_index = sel_num[0:int(round(0.8 * len(sel_num)))]
41 | test_index = sel_num[int(round(0.8 * len(sel_num))):len(sel_num)]
42 |
43 | solver = HyperIQASolver(config, folder_path[config.dataset], train_index, test_index)
44 | srcc_all[i], plcc_all[i] = solver.train()
45 |
46 | # print(srcc_all)
47 | # print(plcc_all)
48 | srcc_med = np.median(srcc_all)
49 | plcc_med = np.median(plcc_all)
50 |
51 | print('Testing median SRCC %4.4f,\tmedian PLCC %4.4f' % (srcc_med, plcc_med))
52 |
53 | # return srcc_med, plcc_med
54 |
55 |
56 | if __name__ == '__main__':
57 | parser = argparse.ArgumentParser()
58 | parser.add_argument('--dataset', dest='dataset', type=str, default='livec', help='Support datasets: livec|koniq-10k|bid|live|csiq|tid2013')
59 | parser.add_argument('--train_patch_num', dest='train_patch_num', type=int, default=25, help='Number of sample patches from training image')
60 | parser.add_argument('--test_patch_num', dest='test_patch_num', type=int, default=25, help='Number of sample patches from testing image')
61 | parser.add_argument('--lr', dest='lr', type=float, default=2e-5, help='Learning rate')
62 | parser.add_argument('--weight_decay', dest='weight_decay', type=float, default=5e-4, help='Weight decay')
63 | parser.add_argument('--lr_ratio', dest='lr_ratio', type=int, default=10, help='Learning rate ratio for hyper network')
64 | parser.add_argument('--batch_size', dest='batch_size', type=int, default=96, help='Batch size')
65 | parser.add_argument('--epochs', dest='epochs', type=int, default=16, help='Epochs for training')
66 | parser.add_argument('--patch_size', dest='patch_size', type=int, default=224, help='Crop size for training & testing image patches')
67 | parser.add_argument('--train_test_num', dest='train_test_num', type=int, default=10, help='Train-test times')
68 |
69 | config = parser.parse_args()
70 | main(config)
71 |
72 |
--------------------------------------------------------------------------------