├── src
├── __init__.py
└── models
│ ├── __init__.py
│ └── archs
│ ├── __init__.py
│ ├── arch_util.py
│ ├── sem_hyperiqa_arch.py
│ └── sem_hyperiqa_util.py
├── NTIRE24
└── Submission Kit
│ ├── logs
│ └── log_23_PIQ.log
│ ├── weights
│ └── weights_23_PIQ.pth
│ ├── assets
│ └── assets_23_PIQ
│ │ ├── __init__.py
│ │ └── asset.py
│ ├── images.csv
│ ├── results
│ └── result_23_PIQ.csv
│ ├── images
│ └── 0
│ │ └── image1.jpg
│ ├── submission_23_PIQ.zip
│ ├── models
│ └── model_23_PIQ.py
│ └── Instructions.ipynb
├── Imgs
├── pdf.avif
├── poster.png
├── Thumbnail.png
├── download.png
├── youtube.avif
├── NTIRE2020_logo.png
├── CVPR_Poster_PIQ23.png
└── CVRP Logo_2023 Vancouvar_Color.png
├── Test split
├── Scene Split
│ └── Scene examples
│ │ ├── Test
│ │ ├── Indoor_Scene_5_nbImages_50_11153.jpg
│ │ ├── Lowlight_Scene_20_nbImages_125_0.jpg
│ │ ├── Night_Scene_28_nbImages_112_1032.jpg
│ │ ├── Night_Scene_30_nbImages_115_100.jpg
│ │ ├── Night_Scene_31_nbImages_113_1019.jpg
│ │ ├── Indoor_Scene_10_nbImages_126_1069.JPG
│ │ ├── Indoor_Scene_11_nbImages_95_10172.jpg
│ │ ├── Indoor_Scene_1_nbImages_124_10371.jpg
│ │ ├── Outdoor_Scene_34_nbImages_46_11074.jpg
│ │ ├── Outdoor_Scene_43_nbImages_125_1009.jpg
│ │ ├── Outdoor_Scene_45_nbImages_79_10621.JPG
│ │ ├── Outdoor_Scene_46_nbImages_75_10487.jpg
│ │ ├── Lowlight_Scene_16_nbImages_88_10087.jpg
│ │ ├── Lowlight_Scene_17_nbImages_102_1004.JPG
│ │ └── Outdoor_Scene_33_nbImages_120_27199.jpeg
│ │ └── Train
│ │ ├── Night_Scene_27_nbImages_115_101.jpg
│ │ ├── Outdoor_Scene_42_nbImages_125_1.jpg
│ │ ├── Indoor_Scene_0_nbImages_73_10370.jpg
│ │ ├── Indoor_Scene_13_nbImages_91_10209.jpg
│ │ ├── Indoor_Scene_2_nbImages_75_10279.jpg
│ │ ├── Indoor_Scene_3_nbImages_90_11117.jpg
│ │ ├── Indoor_Scene_4_nbImages_92_10162.jpg
│ │ ├── Indoor_Scene_6_nbImages_126_1005.jpg
│ │ ├── Indoor_Scene_7_nbImages_111_1000.jpg
│ │ ├── Indoor_Scene_8_nbImages_126_1021.jpg
│ │ ├── Indoor_Scene_9_nbImages_125_1011.jpg
│ │ ├── Night_Scene_22_nbImages_116_1001.jpg
│ │ ├── Night_Scene_23_nbImages_117_1012.JPG
│ │ ├── Night_Scene_24_nbImages_120_1043.jpg
│ │ ├── Night_Scene_25_nbImages_116_1036.jpg
│ │ ├── Night_Scene_26_nbImages_116_1020.jpg
│ │ ├── Night_Scene_29_nbImages_113_1058.jpg
│ │ ├── Night_Scene_32_nbImages_101_10342.jpg
│ │ ├── Outdoor_Scene_44_nbImages_125_10.jpg
│ │ ├── Indoor_Scene_12_nbImages_104_10157.jpg
│ │ ├── Lowlight_Scene_14_nbImages_97_10240.jpg
│ │ ├── Lowlight_Scene_15_nbImages_71_10515.jpg
│ │ ├── Lowlight_Scene_18_nbImages_125_1017.jpg
│ │ ├── Lowlight_Scene_19_nbImages_125_1010.jpg
│ │ ├── Lowlight_Scene_21_nbImages_125_1008.jpg
│ │ ├── Outdoor_Scene_35_nbImages_50_11101.jpg
│ │ ├── Outdoor_Scene_36_nbImages_74_10445.jpg
│ │ ├── Outdoor_Scene_37_nbImages_96_10048.jpg
│ │ ├── Outdoor_Scene_38_nbImages_79_10579.jpg
│ │ ├── Outdoor_Scene_39_nbImages_93_10005.jpg
│ │ ├── Outdoor_Scene_40_nbImages_125_1051.jpg
│ │ ├── Outdoor_Scene_41_nbImages_125_1031.jpg
│ │ ├── Outdoor_Scene_47_nbImages_90_11096.jpg
│ │ ├── Outdoor_Scene_48_nbImages_75_10009.jpg
│ │ └── Outdoor_Scene_49_nbImages_125_27205.JPG
└── Scene Split.csv
├── README.md
└── Test split example.ipynb
/src/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/models/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/models/archs/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/NTIRE24/Submission Kit/logs/log_23_PIQ.log:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/NTIRE24/Submission Kit/weights/weights_23_PIQ.pth:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/NTIRE24/Submission Kit/assets/assets_23_PIQ/__init__.py:
--------------------------------------------------------------------------------
1 | from .asset import *
--------------------------------------------------------------------------------
/Imgs/pdf.avif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Imgs/pdf.avif
--------------------------------------------------------------------------------
/Imgs/poster.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Imgs/poster.png
--------------------------------------------------------------------------------
/Imgs/Thumbnail.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Imgs/Thumbnail.png
--------------------------------------------------------------------------------
/Imgs/download.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Imgs/download.png
--------------------------------------------------------------------------------
/Imgs/youtube.avif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Imgs/youtube.avif
--------------------------------------------------------------------------------
/NTIRE24/Submission Kit/images.csv:
--------------------------------------------------------------------------------
1 | IMAGE,CLASS,CONDITION
2 | images/0/image1.jpg,0,INDOOR
3 |
--------------------------------------------------------------------------------
/Imgs/NTIRE2020_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Imgs/NTIRE2020_logo.png
--------------------------------------------------------------------------------
/Imgs/CVPR_Poster_PIQ23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Imgs/CVPR_Poster_PIQ23.png
--------------------------------------------------------------------------------
/NTIRE24/Submission Kit/assets/assets_23_PIQ/asset.py:
--------------------------------------------------------------------------------
1 | def print_hello():
2 | print('Hello, I am an asset.')
--------------------------------------------------------------------------------
/NTIRE24/Submission Kit/results/result_23_PIQ.csv:
--------------------------------------------------------------------------------
1 | IMAGE,CLASS,CONDITION,SCORE
2 | images/0/image1.jpg,0,INDOOR,0.0
3 |
--------------------------------------------------------------------------------
/Imgs/CVRP Logo_2023 Vancouvar_Color.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Imgs/CVRP Logo_2023 Vancouvar_Color.png
--------------------------------------------------------------------------------
/NTIRE24/Submission Kit/images/0/image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/NTIRE24/Submission Kit/images/0/image1.jpg
--------------------------------------------------------------------------------
/NTIRE24/Submission Kit/submission_23_PIQ.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/NTIRE24/Submission Kit/submission_23_PIQ.zip
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Test/Indoor_Scene_5_nbImages_50_11153.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Indoor_Scene_5_nbImages_50_11153.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Test/Lowlight_Scene_20_nbImages_125_0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Lowlight_Scene_20_nbImages_125_0.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Test/Night_Scene_28_nbImages_112_1032.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Night_Scene_28_nbImages_112_1032.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Test/Night_Scene_30_nbImages_115_100.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Night_Scene_30_nbImages_115_100.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Test/Night_Scene_31_nbImages_113_1019.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Night_Scene_31_nbImages_113_1019.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Night_Scene_27_nbImages_115_101.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Night_Scene_27_nbImages_115_101.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_42_nbImages_125_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_42_nbImages_125_1.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Test/Indoor_Scene_10_nbImages_126_1069.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Indoor_Scene_10_nbImages_126_1069.JPG
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Test/Indoor_Scene_11_nbImages_95_10172.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Indoor_Scene_11_nbImages_95_10172.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Test/Indoor_Scene_1_nbImages_124_10371.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Indoor_Scene_1_nbImages_124_10371.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Test/Outdoor_Scene_34_nbImages_46_11074.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Outdoor_Scene_34_nbImages_46_11074.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Test/Outdoor_Scene_43_nbImages_125_1009.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Outdoor_Scene_43_nbImages_125_1009.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Test/Outdoor_Scene_45_nbImages_79_10621.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Outdoor_Scene_45_nbImages_79_10621.JPG
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Test/Outdoor_Scene_46_nbImages_75_10487.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Outdoor_Scene_46_nbImages_75_10487.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Indoor_Scene_0_nbImages_73_10370.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_0_nbImages_73_10370.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Indoor_Scene_13_nbImages_91_10209.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_13_nbImages_91_10209.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Indoor_Scene_2_nbImages_75_10279.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_2_nbImages_75_10279.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Indoor_Scene_3_nbImages_90_11117.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_3_nbImages_90_11117.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Indoor_Scene_4_nbImages_92_10162.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_4_nbImages_92_10162.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Indoor_Scene_6_nbImages_126_1005.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_6_nbImages_126_1005.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Indoor_Scene_7_nbImages_111_1000.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_7_nbImages_111_1000.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Indoor_Scene_8_nbImages_126_1021.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_8_nbImages_126_1021.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Indoor_Scene_9_nbImages_125_1011.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_9_nbImages_125_1011.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Night_Scene_22_nbImages_116_1001.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Night_Scene_22_nbImages_116_1001.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Night_Scene_23_nbImages_117_1012.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Night_Scene_23_nbImages_117_1012.JPG
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Night_Scene_24_nbImages_120_1043.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Night_Scene_24_nbImages_120_1043.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Night_Scene_25_nbImages_116_1036.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Night_Scene_25_nbImages_116_1036.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Night_Scene_26_nbImages_116_1020.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Night_Scene_26_nbImages_116_1020.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Night_Scene_29_nbImages_113_1058.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Night_Scene_29_nbImages_113_1058.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Night_Scene_32_nbImages_101_10342.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Night_Scene_32_nbImages_101_10342.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_44_nbImages_125_10.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_44_nbImages_125_10.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Test/Lowlight_Scene_16_nbImages_88_10087.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Lowlight_Scene_16_nbImages_88_10087.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Test/Lowlight_Scene_17_nbImages_102_1004.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Lowlight_Scene_17_nbImages_102_1004.JPG
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Test/Outdoor_Scene_33_nbImages_120_27199.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Outdoor_Scene_33_nbImages_120_27199.jpeg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Indoor_Scene_12_nbImages_104_10157.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_12_nbImages_104_10157.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Lowlight_Scene_14_nbImages_97_10240.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Lowlight_Scene_14_nbImages_97_10240.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Lowlight_Scene_15_nbImages_71_10515.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Lowlight_Scene_15_nbImages_71_10515.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Lowlight_Scene_18_nbImages_125_1017.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Lowlight_Scene_18_nbImages_125_1017.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Lowlight_Scene_19_nbImages_125_1010.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Lowlight_Scene_19_nbImages_125_1010.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Lowlight_Scene_21_nbImages_125_1008.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Lowlight_Scene_21_nbImages_125_1008.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_35_nbImages_50_11101.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_35_nbImages_50_11101.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_36_nbImages_74_10445.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_36_nbImages_74_10445.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_37_nbImages_96_10048.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_37_nbImages_96_10048.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_38_nbImages_79_10579.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_38_nbImages_79_10579.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_39_nbImages_93_10005.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_39_nbImages_93_10005.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_40_nbImages_125_1051.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_40_nbImages_125_1051.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_41_nbImages_125_1031.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_41_nbImages_125_1031.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_47_nbImages_90_11096.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_47_nbImages_90_11096.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_48_nbImages_75_10009.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_48_nbImages_75_10009.jpg
--------------------------------------------------------------------------------
/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_49_nbImages_125_27205.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_49_nbImages_125_27205.JPG
--------------------------------------------------------------------------------
/NTIRE24/Submission Kit/models/model_23_PIQ.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import pandas as pd
4 |
5 | sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
6 |
7 | import assets.assets_23_PIQ as my_asset
8 |
9 | def main():
10 |
11 | my_asset.print_hello()
12 |
13 | # Load the images.csv file
14 | images_df = pd.read_csv(r'./images.csv')
15 |
16 | # Add a 'SCORE' column with a default value of 0.0
17 | images_df['SCORE'] = 0.0
18 |
19 | # Save the modified DataFrame to result_23_PIQ.csv in the results folder
20 | images_df.to_csv('./results/result_23_PIQ.csv', index=False, sep=',')
21 |
22 | if __name__ == "__main__":
23 | main()
--------------------------------------------------------------------------------
/Test split/Scene Split.csv:
--------------------------------------------------------------------------------
1 | SCENE,SPLIT,NB IMAGES
2 | Indoor_Scene_0,Train,72
3 | Indoor_Scene_12,Train,103
4 | Indoor_Scene_13,Train,90
5 | Indoor_Scene_2,Train,74
6 | Indoor_Scene_3,Train,89
7 | Indoor_Scene_4,Train,91
8 | Indoor_Scene_6,Train,125
9 | Indoor_Scene_7,Train,110
10 | Indoor_Scene_8,Train,125
11 | Indoor_Scene_9,Train,125
12 | Lowlight_Scene_14,Train,96
13 | Lowlight_Scene_15,Train,70
14 | Lowlight_Scene_18,Train,125
15 | Lowlight_Scene_19,Train,125
16 | Lowlight_Scene_21,Train,125
17 | Night_Scene_22,Train,116
18 | Night_Scene_23,Train,117
19 | Night_Scene_24,Train,119
20 | Night_Scene_25,Train,116
21 | Night_Scene_26,Train,115
22 | Night_Scene_27,Train,115
23 | Night_Scene_29,Train,112
24 | Night_Scene_32,Train,100
25 | Outdoor_Scene_35,Train,49
26 | Outdoor_Scene_36,Train,73
27 | Outdoor_Scene_37,Train,95
28 | Outdoor_Scene_38,Train,78
29 | Outdoor_Scene_39,Train,92
30 | Outdoor_Scene_40,Train,125
31 | Outdoor_Scene_41,Train,125
32 | Outdoor_Scene_42,Train,125
33 | Outdoor_Scene_44,Train,125
34 | Outdoor_Scene_47,Train,89
35 | Outdoor_Scene_48,Train,74
36 | Outdoor_Scene_49,Train,125
37 | Indoor_Scene_10,Test,125
38 | Indoor_Scene_11,Test,94
39 | Indoor_Scene_1,Test,124
40 | Indoor_Scene_5,Test,50
41 | Lowlight_Scene_16,Test,87
42 | Lowlight_Scene_17,Test,102
43 | Lowlight_Scene_20,Test,125
44 | Night_Scene_28,Test,112
45 | Night_Scene_30,Test,114
46 | Night_Scene_31,Test,112
47 | Outdoor_Scene_33,Test,119
48 | Outdoor_Scene_34,Test,45
49 | Outdoor_Scene_43,Test,125
50 | Outdoor_Scene_45,Test,78
51 | Outdoor_Scene_46,Test,74
52 |
--------------------------------------------------------------------------------
/src/models/archs/arch_util.py:
--------------------------------------------------------------------------------
1 | import math
2 | import collections.abc
3 | import numpy as np
4 | import torch
5 | import os
6 | from typing import Tuple
7 | from urllib.parse import urlparse
8 | from itertools import repeat
9 | from torch import nn as nn
10 | from torch.nn import functional as F
11 | from torch.nn import init as init
12 | from torch.nn.modules.batchnorm import _BatchNorm
13 | from torch.hub import download_url_to_file, get_dir
14 |
15 |
16 | # --------------------------------------------
17 | # IQA utils
18 | # Code taken from: https://github.com/chaofengc/IQA-PyTorch
19 | # --------------------------------------------
20 |
21 | def load_file_from_url(url, model_dir=None, progress=True, file_name=None):
22 | """Load file form http url, will download models if necessary.
23 |
24 | Ref: https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py
25 |
26 | Args:
27 | url (str): URL to be downloaded.
28 | model_dir (str): The path to save the downloaded model. Should be a full path. If None, use pytorch hub_dir.
29 | Default: None.
30 | progress (bool): Whether to show the download progress. Default: True.
31 | file_name (str): The downloaded file name. If None, use the file name in the url. Default: None.
32 |
33 | Returns:
34 | str: The path to the downloaded file.
35 | """
36 | if model_dir is None: # use the pytorch hub_dir
37 | hub_dir = get_dir()
38 | model_dir = os.path.join(hub_dir, 'checkpoints')
39 |
40 | os.makedirs(model_dir, exist_ok=True)
41 |
42 | parts = urlparse(url)
43 | filename = os.path.basename(parts.path)
44 | if file_name is not None:
45 | filename = file_name
46 | cached_file = os.path.abspath(os.path.join(model_dir, filename))
47 | if not os.path.exists(cached_file):
48 | print(f'Downloading: "{url}" to {cached_file}\n')
49 | download_url_to_file(url, cached_file, hash_prefix=None, progress=progress)
50 | return cached_file
51 |
52 | def dist_to_mos(dist_score: torch.Tensor) -> torch.Tensor:
53 | """Convert distribution prediction to mos score.
54 | For datasets with detailed score labels, such as AVA
55 |
56 | Args:
57 | dist_score (tensor): (*, C), C is the class number
58 |
59 | Output:
60 | mos_score (tensor): (*, 1)
61 | """
62 | num_classes = dist_score.shape[-1]
63 | mos_score = dist_score * torch.arange(1, num_classes + 1).to(dist_score)
64 | mos_score = mos_score.sum(dim=-1, keepdim=True)
65 | return mos_score
66 |
67 |
68 | # --------------------------------------------
69 | # Common utils
70 | # --------------------------------------------
71 |
72 |
73 | def load_pretrained_network(net, model_path, strict=True, weight_keys=None):
74 | if model_path.startswith('https://') or model_path.startswith('http://'):
75 | model_path = load_file_from_url(model_path)
76 | print(f'Loading pretrained model {net.__class__.__name__} from {model_path}')
77 | state_dict = torch.load(model_path, map_location=torch.device('cpu'))
78 | if weight_keys:
79 | state_dict = state_dict[weight_keys]
80 | net.load_state_dict(state_dict, strict=strict)
81 |
82 |
83 | def _ntuple(n):
84 |
85 | def parse(x):
86 | if isinstance(x, collections.abc.Iterable):
87 | return x
88 | return tuple(repeat(x, n))
89 |
90 | return parse
91 |
92 |
93 | to_1tuple = _ntuple(1)
94 | to_2tuple = _ntuple(2)
95 | to_3tuple = _ntuple(3)
96 | to_4tuple = _ntuple(4)
97 | to_ntuple = _ntuple
98 |
99 |
100 | @torch.no_grad()
101 | def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs):
102 | r"""Initialize network weights.
103 |
104 | Args:
105 | module_list (list[nn.Module] | nn.Module): Modules to be initialized.
106 | scale (float): Scale initialized weights, especially for residual
107 | blocks. Default: 1.
108 | bias_fill (float): The value to fill bias. Default: 0.
109 | kwargs (dict): Other arguments for initialization function.
110 |
111 | """
112 | if not isinstance(module_list, list):
113 | module_list = [module_list]
114 | for module in module_list:
115 | for m in module.modules():
116 | if isinstance(m, nn.Conv2d):
117 | init.kaiming_normal_(m.weight, **kwargs)
118 | m.weight.data *= scale
119 | if m.bias is not None:
120 | m.bias.data.fill_(bias_fill)
121 | elif isinstance(m, nn.Linear):
122 | init.kaiming_normal_(m.weight, **kwargs)
123 | m.weight.data *= scale
124 | if m.bias is not None:
125 | m.bias.data.fill_(bias_fill)
126 | elif isinstance(m, _BatchNorm):
127 | init.constant_(m.weight, 1)
128 | if m.bias is not None:
129 | m.bias.data.fill_(bias_fill)
130 |
131 |
132 | def symm_pad(im: torch.Tensor, padding: Tuple[int, int, int, int]):
133 | """Symmetric padding same as tensorflow.
134 | Ref: https://discuss.pytorch.org/t/symmetric-padding/19866/3
135 | """
136 | h, w = im.shape[-2:]
137 | left, right, top, bottom = padding
138 |
139 | x_idx = np.arange(-left, w+right)
140 | y_idx = np.arange(-top, h+bottom)
141 |
142 | def reflect(x, minx, maxx):
143 | """ Reflects an array around two points making a triangular waveform that ramps up
144 | and down, allowing for pad lengths greater than the input length """
145 | rng = maxx - minx
146 | double_rng = 2*rng
147 | mod = np.fmod(x - minx, double_rng)
148 | normed_mod = np.where(mod < 0, mod+double_rng, mod)
149 | out = np.where(normed_mod >= rng, double_rng - normed_mod, normed_mod) + minx
150 | return np.array(out, dtype=x.dtype)
151 |
152 | x_pad = reflect(x_idx, -0.5, w-0.5)
153 | y_pad = reflect(y_idx, -0.5, h-0.5)
154 | xx, yy = np.meshgrid(x_pad, y_pad)
155 | return im[..., yy, xx]
156 |
157 |
158 | def exact_padding_2d(x, kernel, stride=1, dilation=1, mode='same'):
159 | assert len(x.shape) == 4, f'Only support 4D tensor input, but got {x.shape}'
160 | kernel = to_2tuple(kernel)
161 | stride = to_2tuple(stride)
162 | dilation = to_2tuple(dilation)
163 | b, c, h, w = x.shape
164 | h2 = math.ceil(h / stride[0])
165 | w2 = math.ceil(w / stride[1])
166 | pad_row = (h2 - 1) * stride[0] + (kernel[0] - 1) * dilation[0] + 1 - h
167 | pad_col = (w2 - 1) * stride[1] + (kernel[1] - 1) * dilation[1] + 1 - w
168 | pad_l, pad_r, pad_t, pad_b = (pad_col // 2, pad_col - pad_col // 2, pad_row // 2, pad_row - pad_row // 2)
169 |
170 | mode = mode if mode != 'same' else 'constant'
171 | if mode != 'symmetric':
172 | x = F.pad(x, (pad_l, pad_r, pad_t, pad_b), mode=mode)
173 | elif mode == 'symmetric':
174 | x = symm_pad(x, (pad_l, pad_r, pad_t, pad_b))
175 |
176 | return x
177 |
178 |
179 | class ExactPadding2d(nn.Module):
180 | r"""This function calculate exact padding values for 4D tensor inputs,
181 | and support the same padding mode as tensorflow.
182 |
183 | Args:
184 | kernel (int or tuple): kernel size.
185 | stride (int or tuple): stride size.
186 | dilation (int or tuple): dilation size, default with 1.
187 | mode (srt): padding mode can be ('same', 'symmetric', 'replicate', 'circular')
188 |
189 | """
190 |
191 | def __init__(self, kernel, stride=1, dilation=1, mode='same'):
192 | super().__init__()
193 | self.kernel = to_2tuple(kernel)
194 | self.stride = to_2tuple(stride)
195 | self.dilation = to_2tuple(dilation)
196 | self.mode = mode
197 |
198 | def forward(self, x):
199 | return exact_padding_2d(x, self.kernel, self.stride, self.dilation, self.mode)
200 |
--------------------------------------------------------------------------------
/NTIRE24/Submission Kit/Instructions.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "6b1557dc-dfe3-4157-8aa5-f6e3cee6714e",
6 | "metadata": {},
7 | "source": [
8 | "# NTIRE 2024 - Portrait Quality Assessment Challenge ft. DXOMARK\n"
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "5dc9c177-e6f5-4bc1-84f1-0803b294608b",
14 | "metadata": {},
15 | "source": [
16 | "# -- INSTRUCTIONS --\n",
17 | "# **[READ CAREFULLY]**\n",
18 | " "
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "id": "09a1d630-73c8-4231-a1bc-0caa04e8ce3f",
24 | "metadata": {
25 | "heading_collapsed": "true",
26 | "tags": []
27 | },
28 | "source": [
29 | "# 1 - DESCRIPTION\n",
30 | "## What is this?\n",
31 | "This is a simple instruction notebook to help you submit your model.\n",
32 | "\n",
33 | "## Links\n",
34 | "- **NTIRE 24 challenge**: https://codalab.lisn.upsaclay.fr/competitions/17311#participate\n",
35 | "- **PIQ23 github**: https://github.com/DXOMARK-Research/PIQ2023\n",
36 | "- **PIQ23 Download link**: https://corp.dxomark.com/data-base-piq23/\n",
37 | "\n",
38 | "## Test set\n",
39 | "- The evaluation process consists of testing your model on an internat portrait dataset of around ***200 scenes of 7 images each, each shot with a different device of close quality***.\n",
40 | "- Images are either jpeg or TIFF with extensions ***'.jpg' or '.tiff'***.\n",
41 | "- Images are either landscape or portrait with a size of: ***1280x960*** or ***960x1280***.\n",
42 | "- Lighting conditions are: ***Indoor, Outdoor, Lowlight and Night***.\n",
43 | "- Different skintones, genders and ages are used. Prepare for something slightly different than PIQ23.\n",
44 | "- Do not excpect the same people to be present in the internal dataset.\n",
45 | "- The test csv will include ***image names and the categories of each class alongside the lighting conditions***. *Please refer to the images.csv*\n",
46 | "\n",
47 | "## Hardware requirements\n",
48 | "- You are free to do inference on one or multiple images.\n",
49 | "- Please make sure that your model is able to run on a ***single 8GB VRAM GPU***.\n",
50 | "- Maximum Teraflops: ***5TFLOPS*** *(Refer below to calculate teraflops on your model)*.\n",
51 | "- Maximum inference time: ***5 seconds/image***. *Model Loading does not count*.\n",
52 | "- Maximum model.pth size: ***2GB***.\n",
53 | "- Maximum RAM: **16GB**.\n",
54 | "- **NOTE: If your model comply with the 16GB RAM and 5s/image on cpu, you don't need to use GPU**\n",
55 | "\n",
56 | "## Submission\n",
57 | "- You need to submit a zip file with the following naming: ***submission_[LAST_NAME]_[FIRST_NAME].zip*** (refer to *./submission_23_PIQ.zip* for an example), including:\n",
58 | " - A main script with the following naming: ***model_[LAST_NAME]_[FIRST_NAME].py***;\n",
59 | " - Model weights with the following naming: ***weights_[LAST_NAME]_[FIRST_NAME].(pth,ckpt)***;\n",
60 | " - Scripts asset folder with the following naming: ***assets_[LAST_NAME]_[FIRST_NAME]*** including all of your scripts.\n",
61 | "- Your main script will be put in the ***./models***.\n",
62 | "- Your model weights will be saved in ***./weights***.\n",
63 | "- Your assets will be saved in ***./assets***.\n",
64 | "- You will get access to one level above the models/ folder.\n",
65 | "- Save your logs in ***./logs/log_[LAST_NAME]_[FIRST_NAME].log***.\n",
66 | "- Your script needs to load the ***./images.csv***.\n",
67 | "- Images paths are structured as follows: ***images/[class]/[imagename.(jpg,tiff)]***.\n",
68 | "- You need to save your results as follows: ***./results/result_[LAST_NAME]_[FIRST_NAME].csv***.\n",
69 | "- You need to add a ***column 'SCORE' to the images.csv***. *KEEP ALL OTHER METADATA*.\n",
70 | "- You can use a ***comma or semi-colon separator for the results***. Any other separator will not be considered.\n",
71 | "- Refer to *./models/model_23_PIQ.py* for an example.\n",
72 | "- Please follow the naming and the structure of the zip file carefully since the extraction of the zip data is automatic."
73 | ]
74 | },
75 | {
76 | "cell_type": "markdown",
77 | "id": "f95a456b-49c9-448c-9583-92918450e0a4",
78 | "metadata": {
79 | "tags": []
80 | },
81 | "source": [
82 | "# 2 - ASSETS"
83 | ]
84 | },
85 | {
86 | "cell_type": "markdown",
87 | "id": "1fac33bf-779f-4c11-a891-430a32129e3c",
88 | "metadata": {
89 | "tags": []
90 | },
91 | "source": [
92 | "## CONSTANTS"
93 | ]
94 | },
95 | {
96 | "cell_type": "code",
97 | "execution_count": 2,
98 | "id": "f0ed13e7-49cf-42e8-b20c-86298665eeab",
99 | "metadata": {},
100 | "outputs": [],
101 | "source": [
102 | "EXTENSIONS = ('.tiff', '.tif', '.TIFF', '.TIF', '.jpg', '.JPG', '.jpeg')\n",
103 | "CONDITIONS = ('OUTDOOR', 'INDOOR', 'LOWLIGHT', 'NIGHT')\n",
104 | "IMAGE_SIZE = ((1280, 960), (960, 1280))"
105 | ]
106 | },
107 | {
108 | "cell_type": "markdown",
109 | "id": "865a1ae0-04b3-475b-8937-e71ee935ae61",
110 | "metadata": {
111 | "tags": []
112 | },
113 | "source": [
114 | "## Hardware Check"
115 | ]
116 | },
117 | {
118 | "cell_type": "code",
119 | "execution_count": 1,
120 | "id": "74d8dd3a-b854-421d-9f01-92aefa04d13c",
121 | "metadata": {},
122 | "outputs": [],
123 | "source": [
124 | "import torch\n",
125 | "from torchvision.models import resnet50, ResNet50_Weights\n",
126 | "from thop import profile # pip install thop\n",
127 | "\n",
128 | "def torch_cuda_memory_usage():\n",
129 | " \"\"\"Returns CUDA memory usage if available\"\"\"\n",
130 | " if torch.cuda.is_available():\n",
131 | " torch.cuda.synchronize() # Wait for all CUDA kernels to finish\n",
132 | " allocated_memory = torch.cuda.memory_allocated() # Total allocated memory\n",
133 | " cached_memory = torch.cuda.memory_reserved() # Total cached memory\n",
134 | " return allocated_memory / (1024**3), cached_memory / (1024**3) # Convert bytes to GB\n",
135 | " else:\n",
136 | " return 0, 0\n",
137 | "\n",
138 | "def test_model_resources(model, batch):\n",
139 | " \n",
140 | " macs, params = profile(model, inputs=(batch, ), verbose=False)\n",
141 | " flops = macs * 2 # Convert MACs to FLOPs\n",
142 | " tflops = flops / (10**12) # Convert FLOPs to TFLOPs \n",
143 | " \n",
144 | " torch.cuda.reset_peak_memory_stats() # Reset peak memory stats for accurate peak measurement\n",
145 | "\n",
146 | " # Measure memory before inference\n",
147 | " allocated_before, cached_before = torch_cuda_memory_usage()\n",
148 | " \n",
149 | " model = model.cuda() # Move model to GPU\n",
150 | " batch = batch.cuda() # Move data to GPU\n",
151 | " \n",
152 | " # Dummy forward pass to measure VRAM usage\n",
153 | " with torch.no_grad():\n",
154 | " _ = model(batch)\n",
155 | " \n",
156 | " # Measure memory after inference\n",
157 | " allocated_after, cached_after = torch_cuda_memory_usage()\n",
158 | " peak_allocated = torch.cuda.max_memory_allocated() / (1024**3) # Peak allocated memory during inference\n",
159 | " \n",
160 | " vram_usage_allocated = allocated_after - allocated_before # Approximation of additional VRAM used during inference\n",
161 | " vram_usage_cached = cached_after - cached_before # Approximation based on cached memory\n",
162 | "\n",
163 | " print(f\"MACs: {macs}\")\n",
164 | " print(f\"FLOPs: {flops}\")\n",
165 | " print(f\"TFLOPs: {tflops}\")\n",
166 | " print(f\"Approx. Additional VRAM Usage (Allocated) during Inference: {vram_usage_allocated} GB\")\n",
167 | " print(f\"Approx. Additional VRAM Usage (Cached) during Inference: {vram_usage_cached} GB\")\n",
168 | " print(f\"Peak VRAM Usage during Inference: {peak_allocated} GB\")\n",
169 | " \n",
170 | " del model, batch # Free up memory\n",
171 | " torch.cuda.empty_cache() # Clear cache"
172 | ]
173 | },
174 | {
175 | "cell_type": "code",
176 | "execution_count": 2,
177 | "id": "5f28821c-e496-4e31-bf44-b7ceeb0d6641",
178 | "metadata": {},
179 | "outputs": [
180 | {
181 | "name": "stdout",
182 | "output_type": "stream",
183 | "text": [
184 | "MACs: 1821354430464.0\n",
185 | "FLOPs: 3642708860928.0\n",
186 | "TFLOPs: 3.642708860928\n",
187 | "Approx. Additional VRAM Usage (Allocated) during Inference: 0.3515634536743164 GB\n",
188 | "Approx. Additional VRAM Usage (Cached) during Inference: 5.64453125 GB\n",
189 | "Peak VRAM Usage during Inference: 5.28789758682251 GB\n"
190 | ]
191 | }
192 | ],
193 | "source": [
194 | "\n",
195 | "model = resnet50(weights=ResNet50_Weights.IMAGENET1K_V1) # change to your model\n",
196 | "batch_size = 18 # Test the batch size you want\n",
197 | "batch = torch.stack([torch.randn(3, 1280, 960)]*batch_size)\n",
198 | "\n",
199 | "test_model_resources(model, batch)"
200 | ]
201 | },
202 | {
203 | "cell_type": "markdown",
204 | "id": "aee76f21-6440-4eff-8294-b17711ff4e40",
205 | "metadata": {
206 | "tags": []
207 | },
208 | "source": [
209 | "# 3 - SUBMISSION"
210 | ]
211 | },
212 | {
213 | "cell_type": "code",
214 | "execution_count": 1,
215 | "id": "032f8823-38b7-4201-b5d5-1aaa7aaab549",
216 | "metadata": {},
217 | "outputs": [
218 | {
219 | "name": "stdout",
220 | "output_type": "stream",
221 | "text": [
222 | "Hello, I am an asset.\n"
223 | ]
224 | }
225 | ],
226 | "source": [
227 | "%run ./models/model_23_PIQ.py"
228 | ]
229 | },
230 | {
231 | "cell_type": "code",
232 | "execution_count": null,
233 | "id": "293c6276-95f6-4e08-8589-0217107d3486",
234 | "metadata": {},
235 | "outputs": [],
236 | "source": []
237 | }
238 | ],
239 | "metadata": {
240 | "kernelspec": {
241 | "display_name": "py311",
242 | "language": "python",
243 | "name": "py311"
244 | },
245 | "language_info": {
246 | "codemirror_mode": {
247 | "name": "ipython",
248 | "version": 3
249 | },
250 | "file_extension": ".py",
251 | "mimetype": "text/x-python",
252 | "name": "python",
253 | "nbconvert_exporter": "python",
254 | "pygments_lexer": "ipython3",
255 | "version": "3.11.4"
256 | }
257 | },
258 | "nbformat": 4,
259 | "nbformat_minor": 5
260 | }
261 |
--------------------------------------------------------------------------------
/src/models/archs/sem_hyperiqa_arch.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from torch.nn.functional import one_hot
4 |
5 | from .sem_hyperiqa_util import HyperNet, RescaleNet, TargetNet, SceneClassNet
6 | from .arch_util import load_pretrained_network
7 |
8 |
9 | defaultHyperNetWeights = {
10 | # Only for original hyperIQA model with input size 224x224
11 | 'koniq': None #link: https://drive.google.com/file/d/1OOUmnbvpGea0LIGpIWEbOyxfWx6UCiiE/view
12 | }
13 |
14 | defaultWeights = {}
15 |
16 |
17 | class SemHyperIQA(nn.Module):
18 | def __init__(self, patchSize,
19 | hyperNetPretrained=None,
20 | pretrained=None,
21 | classify=None,
22 | rescale=None,
23 | **kwargs):
24 |
25 | super().__init__()
26 | patchRate = patchSize // 224
27 | self.classify = classify
28 | self.rescale = rescale
29 | self.classFeaturesOut = None
30 | self.nbPatchesIn = None
31 | self.classKey = kwargs.get('classKey', 'class')
32 | self.qualityKey = kwargs.get('qualityKey', 'quality')
33 | self.preQualityKey = kwargs.get('preQualityKey', 'preQuality')
34 | if self.classify is not None:
35 | self.classFeaturesOut = self.classify.get('numClasses', None)
36 | self.nbPatchesIn = self.classify.get('nbPatchesIn', 1) # Assume one patch if we do not want to concatenate patches
37 | self.hyperNet = HyperNet(16,
38 | 112 * patchRate,
39 | 224 * patchRate,
40 | 112 * patchRate,
41 | 56 * patchRate,
42 | 28 * patchRate,
43 | 14 * patchRate,
44 | 7 * patchRate,
45 | patchRate,
46 | classFeaturesOut=self.classFeaturesOut)#.cuda()
47 |
48 | if hyperNetPretrained is not None:
49 | load_pretrained_network(self.hyperNet, defaultHyperNetWeights.get(hyperNetPretrained, hyperNetPretrained))
50 | if pretrained is not None:
51 | load_pretrained_network(self, defaultWeights.get(pretrained, pretrained))
52 |
53 | if self.classify is not None:
54 | self.sceneClassNet = SceneClassNet(featureInSize=self.nbPatchesIn * 112 * patchRate * self.classFeaturesOut,
55 | **self.classify)
56 | self.sceneclassnet_params = self.sceneClassNet.parameters()
57 | if self.rescale is not None:
58 | if 'featureInSize' in self.rescale:
59 | self.classFeedback = False
60 | else:
61 | self.classFeedback = True
62 | self.rescale.update({'featureInSize': self.classify.get('numClasses')}) # intentionally throw error if rescale is defined with infeatures and no classification parameter
63 |
64 | self.rescaleNet = RescaleNet(**self.rescale)
65 | self.rescalenet_params = self.rescaleNet.parameters()
66 |
67 | backbone_params = list(map(id, self.hyperNet.res.parameters()))
68 | self.hypernet_params = filter(lambda p: id(p) not in backbone_params, self.hyperNet.parameters())
69 | self.resnet_params = filter(lambda p: id(p) in backbone_params, self.hyperNet.parameters())
70 |
71 | def forward(self, x, index=None, *args):
72 | # Generate weights for target network
73 | output = self.hyperNet(x)
74 |
75 | # Check if hyperNet returns hnFeatures
76 | if isinstance(output, tuple):
77 | paras, hnFeatures = output
78 | hnFeatures = self._consolidate_patches(hnFeatures, self.nbPatchesIn)
79 | else:
80 | paras = output
81 |
82 | if isinstance(paras, list):
83 | paras = self._stack_dicts(paras)
84 |
85 | # Building target network
86 | modelTarget = TargetNet(paras)
87 | for param in modelTarget.parameters():
88 | param.requires_grad = False
89 |
90 | # Quality score prediction
91 | inputTargetNet = paras['target_in_vec']
92 | predictionsQuality = modelTarget(inputTargetNet)
93 |
94 | predScene = None
95 | outputDict = {}
96 |
97 | if hasattr(self, 'sceneClassNet') and isinstance(output, tuple):
98 | predScene = self.sceneClassNet(hnFeatures)
99 | predictionsQuality = predictionsQuality.reshape(self.nbPatchesIn, -1).mean(dim=0)
100 |
101 | outputDict[self.qualityKey] = predictionsQuality
102 |
103 | if hasattr(self, 'rescaleNet') and hasattr(self, 'classFeedback'):
104 | outputDict[self.preQualityKey] = predictionsQuality
105 |
106 | if not self.classFeedback and index is not None:
107 | index_ = one_hot(index, num_classes=self.rescale['featureInSize']).to(torch.float32)
108 | scoreWeights = self.rescaleNet(index_)
109 |
110 | elif hasattr(self, 'sceneClassNet') and isinstance(output, tuple):
111 | scoreWeights = self.rescaleNet(predScene.softmax(dim=1))
112 |
113 | else:
114 | raise ValueError("Class feedback needs class prediction, which is not defined in this configuration")
115 |
116 | # FIXME: Fit with any polynomial degree instead of only manual linear fit.
117 | # Re-scale the score prediction with alpha/beta
118 | outputDict[self.qualityKey] = scoreWeights[:,0] * predictionsQuality + scoreWeights[:,1]
119 |
120 | if predScene is not None:
121 | return {self.classKey: predScene,
122 | **{keyOut: valueOut.unsqueeze(1) for keyOut, valueOut in outputDict.items()}}
123 |
124 | return outputDict
125 |
126 | @staticmethod
127 | def _consolidate_patches(hnFeatures, patches_per_image):
128 | # Check if hnFeatures is a list
129 | if isinstance(hnFeatures, list):
130 | # If first element of the list is a tensor and is 2D, stack along 1st dimension
131 | if hnFeatures[0].dim() >= 2:
132 | hnFeatures = torch.cat(hnFeatures, dim=0)
133 | # If first element of the list is a tensor and is 1D, convert to 2D and stack along 1st dimension
134 | elif hnFeatures[0].dim() == 1:
135 | hnFeatures = torch.stack(hnFeatures, dim=0)
136 |
137 | # Ensure that the total number of features is a multiple of patches_per_image
138 | if hnFeatures.shape[0] % patches_per_image != 0:
139 | raise ValueError("Total number of features is not a multiple of patches_per_image")
140 |
141 | # Reshape the tensor
142 | consolidated_features = hnFeatures.reshape(-1, hnFeatures.shape[1] * patches_per_image)
143 |
144 | return consolidated_features
145 |
146 | @staticmethod
147 | def _stack_dicts(dict_list):
148 | # Ensure dict_list is not empty
149 | if not dict_list:
150 | return {}
151 |
152 | # Create a new dictionary where each key is a stack of the corresponding values from the dictionaries in dict_list
153 | stacked_dict = {key: torch.stack([d[key] for d in dict_list], dim=0) for key in dict_list[0].keys()}
154 |
155 | return stacked_dict
156 |
157 | class FullHyperIQA(SemHyperIQA):
158 | def __init__(self, patchSize, hyperNetPretrained=None, pretrained=None, classify=None, rescale=None, **kwargs):
159 | super().__init__(patchSize, hyperNetPretrained, pretrained, classify, rescale, **kwargs)
160 | self.weightQualityByClass = kwargs.get('weightQualityByClass', 0)
161 |
162 | def forward(self, x, index=None, *args):
163 | if self.weightQualityByClass <= 0:
164 | return super().forward(x, index, *args)
165 |
166 | # Generate weights for target network
167 | output = self.hyperNet(x)
168 |
169 | # Check if hyperNet returns hnFeatures
170 | if isinstance(output, tuple):
171 | paras, hnFeatures = output
172 | hnFeatures = self._consolidate_patches(hnFeatures, self.nbPatchesIn)
173 | else:
174 | paras = output
175 |
176 | if isinstance(paras, list):
177 | paras = self._stack_dicts(paras)
178 |
179 | # Building target network
180 | modelTarget = TargetNet(paras)
181 | for param in modelTarget.parameters():
182 | param.requires_grad = False
183 |
184 | # Quality score prediction
185 | inputTargetNet = paras['target_in_vec']
186 | predictionsQuality = modelTarget(inputTargetNet)
187 |
188 | predScene = None
189 | outputDict = {}
190 |
191 | if hasattr(self, 'sceneClassNet') and isinstance(output, tuple):
192 | predScene = self.sceneClassNet(hnFeatures)
193 | predictionsQuality = predictionsQuality.reshape(self.nbPatchesIn, -1).mean(dim=0)
194 |
195 | outputDict[self.qualityKey] = predictionsQuality
196 |
197 | if hasattr(self, 'rescaleNet') and hasattr(self, 'classFeedback'):
198 |
199 | outputDict[self.preQualityKey] = predictionsQuality
200 |
201 | if not self.classFeedback and index is not None:
202 | index_ = one_hot(index, num_classes=self.rescale['featureInSize']).to(torch.float32)
203 | scoreWeights = self.rescaleNet(index_)
204 | # FIXME: Fit with any polynomial degree instead of only manual linear fit.
205 | # Re-scale the score prediction with alpha/beta
206 | outputDict[self.qualityKey] = scoreWeights[:,0] * predictionsQuality + scoreWeights[:,1]
207 |
208 | elif hasattr(self, 'sceneClassNet') and isinstance(output, tuple):
209 | # Extracting top-k class probabilities and indices
210 | topk_probs, topk_indices = torch.topk(predScene.softmax(dim=1), self.weightQualityByClass, dim=1)
211 |
212 | # Normalizing the top-k probabilities
213 | topk_probs /= topk_probs.sum(dim=1, keepdim=True)
214 |
215 | # Rescale quality prediction for each of the top-k classes separately
216 | weighted_rescaled_qualities = []
217 | for k in range(self.weightQualityByClass):
218 | one_hot_class = one_hot(topk_indices[:, k], num_classes=self.rescale['featureInSize']).to(torch.float32)
219 | scoreWeights_for_class = self.rescaleNet(one_hot_class)
220 | rescaled_quality = scoreWeights_for_class[:, 0] * predictionsQuality + scoreWeights_for_class[:, 1]
221 | weighted_rescaled_qualities.append(rescaled_quality)
222 |
223 | # Weighted aggregation of the rescaled qualities
224 | weighted_rescaled_qualities = torch.stack(weighted_rescaled_qualities, dim=1)
225 | outputDict[self.qualityKey] = (weighted_rescaled_qualities * topk_probs).sum(dim=-1)
226 |
227 | else:
228 | # This should never be raised since
229 | raise ValueError("Class feedback needs class prediction, which is not defined in this configuration")
230 |
231 | if predScene is not None:
232 | return {self.classKey: predScene,
233 | **{keyOut: valueOut.unsqueeze(1) for keyOut, valueOut in outputDict.items()}}
234 |
235 | return outputDict
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # PIQ23: An Image Quality Assessment Dataset for Portraits
2 |
3 | 
4 |
5 | This is the official repo for PIQ23, accepted in CVPR2023.
6 |
7 |
8 |
9 |
10 | [PIQ23](https://corp.dxomark.com/data-base-piq23/)
11 | [CVPR2023](https://openaccess.thecvf.com/content/CVPR2023/html/Chahine_An_Image_Quality_Assessment_Dataset_for_Portraits_CVPR_2023_paper.html) [/ FHIQA](https://arxiv.org/abs/2402.09178)
12 | [NTIRE24](https://codalab.lisn.upsaclay.fr/competitions/17311#learn_the_details)
13 | [Video](https://youtu.be/cvWjOWq5wnk) [Poster](Imgs/CVPR_Poster_PIQ23.png)
14 |
15 | ## Introduction
16 | We present PIQ23, a portrait-specific image quality assessment dataset of 5116 images of predefined scenes acquired by more than 100 smartphones, covering a high variety of brands, models, and use cases. The dataset features individuals from a wide range of ages, genders, and ethnicities who have given explicit and informed consent for their photographs to be used in public research. It is annotated by pairwise comparisons (PWC) collected from over 30 image quality experts for three image attributes: face detail preservation, face target exposure, and overall image quality.
17 |
18 | ## PIQ23
19 |
20 | 
21 |
22 | **Important Notes**
23 | - By downloading this dataset you agree to the terms and conditions.
24 | - All files in the PIQ23 dataset are available for non-commercial research purposes only.
25 | - You agree not to reproduce, duplicate, copy, sell, trade, resell or exploit for any commercial purposes, any portion of the images and any portion of derived data.
26 | - You agree to remove, throughout the life cycle of the dataset, any set of images following the request of the authors.
27 |
28 | **Dataset Access**
29 | - The PIQ23 dataset (5GB) can be downloaded from the DXOMARK CORP [**website**](https://corp.dxomark.com/data-base-piq23/).
30 | - You need to fill the form and agree to the terms and conditions in order to request access to the dataset. We garantee open access to any individual or institution following these instructions.
31 | - In a short time, your request will be validated and you will receive an automatic email with a temporary link in order to download the dataset.
32 |
33 | **Overview**
34 |
35 | The dataset structure is as follows:
36 | ```
37 | ├── Details
38 | ├── Overall
39 | ├── Exposure
40 | ├── Scores_Details.csv
41 | ├── Scores_Overall.csv
42 | └── Scores_Exposure.csv
43 | ```
44 | Each folder is associated to an attribute (Details, Overall and Exposure). It contains the images of the corresponding regions of interest with the following naming: {img_nb}\_{scene_name}\_{scene_idx}.{ext}.
45 |
46 | The CSV files include the following entries:
47 | - **IMAGE PATH**: relative path to the image ({Attribute}\\{Image name})
48 | - **IMAGE**: image name
49 | - **JOD**: jod score of the image
50 | - **JOD STD**: jod standard deviation
51 | - **CI LOW**: lower bound of image's confidence interval
52 | - **CI HIGH**: upper bound of image's confidence interval
53 | - **CI RANGE**: CI HIGH - CI LOW
54 | - **QUALITY LEVEL**: preliminary quality level (result of the clustering over CIs)
55 | - **CLUSTER**: final quality levels (result of the variance analysis and community detection)
56 | - **TOTAL COMPARISONS**: total number of comparisons for this image
57 | - **SCENE**: scene name
58 | - **ATTRIBUTE**: attribute (Exposure, Details or Overall)
59 | - **SCENE IDX**: scene index (from 0 to 49)
60 | - **CONDITION**: lighting condition (Outdoor, Indoor, Lowlight or Night)
61 |
62 | ## Test Splits
63 | We provide two **official** test splits for PIQ23:
64 | - **Device split**:
65 | - We split PIQ23 by devices, in order to test the general performance of the trained models on the given scenes.
66 | - The test set contains around 30% of images from each scene, thus 30% of the whole dataset.
67 | - To avoid device bias, we have carefully selected devices from different quality levels and price ranges for the test set. This split can still include some images of the test devices in the training set and vice versa since the distribution of devices per scenes is not completely uniform. We can garantee that more than 90% of the training and testing devices do not overlap.
68 | - We first sort the devices by their median percentage of images across scenes then split them into five groups from the most common device to the least and sample from these five groups until we get around 30% of the dataset.
69 | - The device split csv can be found in "Test split\Device Split.csv".
70 | - The test and train csv for the different attributes can be found here "Test split\Device Split\".
71 | - **Scene split**:
72 | - We split PIQ23 by scene in order to test the generalization power of the trained models.
73 | - We have carefully chosen 15/50 scenes for the testing set, covering around 30% of the images from each condition, thus 30% of the whole dataset, around 1486/5116 images.
74 | - To select the test set, we first sort the scenes by the percentage of images in the corresponding condition (Outdoor, Indoor, Lowlight, Night), we then select a group of scenes covering a variety of condition (framing, lighting, skin tones, etc.) until we get around 30% of images for each condition.
75 | - The scene split csv can be found in "Test split\Scene Split.csv".
76 | - The test and train csv for the different attributes can be found here "Test split\Scene Split\".
77 | - Examples of the test and train scenes can be found in "Test split\Scene Split\Scene examples".
78 |
79 | An example of how to use the splits can be found in the "Test split example.ipynb" notebook.
80 |
81 | ***NB:***
82 | - Please ensure to publish results on both splits in your papers.
83 | - The paper's main results cannot be reproduced with these splits. We will be publishing official performances on these splits soon.
84 |
85 | ## Benchmarks
86 |
87 | **Note on the experiments**:
88 | - The reported results represent the **median of the metrics across all scenes**. Please note, that the median was used to account for outlier scenes, in case they exist.
89 | - The models chosen as *optimal* in this experience are the ones who scored a **maximum SROCC** on the testing sets. Please take into consideration that a maximum SROCC does not reflect a maximum in other metrics.
90 | - An optimal approach would be to choose the optimal model based on a combination of metrics.
91 | - There should be a margin of error taken into account for these metrics. A difference of a minimal percentage in correlation can be due to multiple factors and might not be repeatable.
92 | - The base resolution for the models is 1200; however, for HyperIQA variants, we needed to redefine the architecture of the model since it only accepts 224x224 inputs, and the new architecture accepts resolutions that are a multiple of 224, 1344 in our case.
93 | - for HyperIQA variants, only the Resnet50 backbone is pretrained on ImageNet. There was no IQA pretraining.
94 |
95 |
96 |
97 |
98 | | Device Split |
99 |
100 |
101 | | Model\Attribute |
102 | Details |
103 | Exposure |
104 | Overall |
105 |
106 |
107 | | SROCC |
108 | PLCC |
109 | KROCC |
110 | MAE |
111 | SROCC |
112 | PLCC |
113 | KROCC |
114 | MAE |
115 | SROCC |
116 | PLCC |
117 | KROCC |
118 | MAE |
119 |
120 |
121 | | DBCNN (1200 x LIVEC) |
122 | 0.787 |
123 | 0.783 |
124 | 0.59 |
125 | 0.777 |
126 | 0.807 |
127 | 0.804 |
128 | 0.611 |
129 | 0.704 |
130 | 0.83 |
131 | 0.824 |
132 | 0.653 |
133 | 0.656 |
134 |
135 |
136 | | MUSIQ (1200 x PAQ2PIQ) |
137 | 0.824 |
138 | 0.831 |
139 | 0.65 |
140 | 0.627 |
141 | 0.848 |
142 | 0.859 |
143 | 0.671 |
144 | 0.585 |
145 | 0.848 |
146 | 0.837 |
147 | 0.65 |
148 | 0.626 |
149 |
150 |
151 | | HyperIQA (1344 (224*6) x No IQA pretraining) |
152 | 0.793 |
153 | 0.766 |
154 | 0.618 |
155 | 0.751 |
156 | 0.8 |
157 | 0.828 |
158 | 0.636 |
159 | 0.721 |
160 | 0.818 |
161 | 0.825 |
162 | 0.66 |
163 | 0.612 |
164 |
165 |
166 | | SEM-HyperIQA (1344 (224*6) x No IQA pretraining) |
167 | 0.854 |
168 | 0.847 |
169 | 0.676 |
170 | 0.645 |
171 | 0.826 |
172 | 0.858 |
173 | 0.65 |
174 | 0.635 |
175 | 0.845 |
176 | 0.856 |
177 | 0.674 |
178 | 0.641 |
179 |
180 |
181 | | SEM-HyperIQA-CO (1344 (224*6) x No IQA pretraining) |
182 | 0.829 |
183 | 0.821 |
184 | 0.641 |
185 | 0.697 |
186 | 0.816 |
187 | 0.843 |
188 | 0.633 |
189 | 0.668 |
190 | 0.829 |
191 | 0.843 |
192 | 0.64 |
193 | 0.624 |
194 |
195 | SEM-HyperIQA-SO (1344 (224*6) x No IQA pretraining) |
196 | 0.874 |
197 | 0.871 |
198 | 0.709 |
199 | 0.583 |
200 | 0.826 |
201 | 0.846 |
202 | 0.651 |
203 | 0.678 |
204 | 0.84 |
205 | 0.849 |
206 | 0.661 |
207 | 0.639 |
208 |
209 |
210 |
211 |
212 |
213 |
214 | | Scene Split |
215 |
216 |
217 | | Model\Attribute |
218 | Details |
219 | Exposure |
220 | Overall |
221 |
222 |
223 | | SROCC |
224 | PLCC |
225 | KROCC |
226 | MAE |
227 | SROCC |
228 | PLCC |
229 | KROCC |
230 | MAE |
231 | SROCC |
232 | PLCC |
233 | KROCC |
234 | MAE |
235 |
236 |
237 | | DBCNN (1200 x LIVEC) |
238 | 0.59 |
239 | 0.51 |
240 | 0.45 |
241 | 0.99 |
242 | 0.69 |
243 | 0.69 |
244 | 0.51 |
245 | 0.91 |
246 | 0.59 |
247 | 0.64 |
248 | 0.43 |
249 | 1.04 |
250 |
251 |
252 |
253 | | MUSIQ (1200 x PAQ2PIQ) |
254 | 0.72 |
255 | 0.77 |
256 | 0.53 |
257 | 0.90 |
258 | 0.79 |
259 | 0.772 |
260 | 0.59 |
261 | 0.87 |
262 | 0.736 |
263 | 0.74 |
264 | 0.54 |
265 | 0.95 |
266 |
267 |
268 | | HyperIQA (1344 (224*6) x No IQA pretraining) |
269 | 0.701 |
270 | 0.668 |
271 | 0.504 |
272 | 0.936 |
273 | 0.692 |
274 | 0.684 |
275 | 0.498 |
276 | 0.863 |
277 | 0.74 |
278 | 0.736 |
279 | 0.55 |
280 | 0.989 |
281 |
282 |
283 | | SEM-HyperIQA (1344 (224*6) x No IQA pretraining) |
284 | 0.732 |
285 | 0.649 |
286 | 0.547 |
287 | 0.879 |
288 | 0.716 |
289 | 0.697 |
290 | 0.53 |
291 | 0.967 |
292 | 0.749 |
293 | 0.752 |
294 | 0.558 |
295 | 1.033 |
296 |
297 |
298 | | SEM-HyperIQA-CO (1344 (224*6) x No IQA pretraining) |
299 | 0.746 |
300 | 0.714 |
301 | 0.549 |
302 | 0.849 |
303 | 0.698 |
304 | 0.698 |
305 | 0.517 |
306 | 0.945 |
307 | 0.739 |
308 | 0.736 |
309 | 0.55 |
310 | 1.038 |
311 |
312 |
313 | | FULL-HyperIQA (1344 (224*6) x No IQA pretraining) |
314 | 0.74 |
315 | 0.72 |
316 | 0.55 |
317 | 0.8 |
318 | 0.76 |
319 | 0.71 |
320 | 0.57 |
321 | 0.85 |
322 | 0.78 |
323 | 0.78 |
324 | 0.59 |
325 | 1.12 |
326 |
327 |
328 |
329 | ## TO DO
330 | - Add SemHyperIQA Code
331 | - Add Stat analysis code
332 | - Add other benchmarks code
333 | - Add pretrained weights
334 |
335 | ## Citation
336 | Please cite the paper/dataset as follows:
337 | ```bibtex
338 | @InProceedings{Chahine_2023_CVPR,
339 | author = {Chahine, Nicolas and Calarasanu, Stefania and Garcia-Civiero, Davide and Cayla, Th\'eo and Ferradans, Sira and Ponce, Jean},
340 | title = {An Image Quality Assessment Dataset for Portraits},
341 | booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
342 | month = {June},
343 | year = {2023},
344 | pages = {9968-9978}
345 | }
346 |
347 | ```
348 | ## License
349 | Provided that the user complies with the Terms of Use, the provider grants a limited, non-exclusive, personal, non-transferable, non-sublicensable, and revocable license to access, download and use the Database for internal and research purposes only, during the specified term. The User is required to comply with the Provider's reasonable instructions, as well as all applicable statutes, laws, and regulations.
350 |
351 | ## About
352 | For any questions please contact: piq2023@dxomark.com
353 |
354 |
355 |
--------------------------------------------------------------------------------
/src/models/archs/sem_hyperiqa_util.py:
--------------------------------------------------------------------------------
1 | import torch as torch
2 | import torch.nn as nn
3 | from torch.nn import functional as F
4 | from torch.nn import init
5 | import math
6 |
7 | from .arch_util import load_file_from_url
8 |
9 | model_urls = {
10 | 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
11 | 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
12 | 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
13 | 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
14 | 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
15 | }
16 |
17 |
18 | class HyperNet(nn.Module):
19 | """
20 | Hyper network for learning perceptual rules.
21 | # Code adapted from: https://github.com/SSL92/hyperIQA
22 |
23 | Args:
24 | lda_out_channels: local distortion aware module output size.
25 | hyper_in_channels: input feature channels for hyper network.
26 | target_in_size: input vector size for target network.
27 | target_fc(i)_size: fully connection layer size of target network.
28 | feature_size: input feature map width/height for hyper network.
29 |
30 | Note:
31 | For size match, input args must satisfy: 'target_fc(i)_size * target_fc(i+1)_size' is divisible by 'feature_size ^ 2'.
32 | """
33 |
34 | def __init__(self, lda_out_channels, hyper_in_channels, target_in_size, target_fc1_size, target_fc2_size, target_fc3_size, target_fc4_size, feature_size, patch_rate, **kwargs):
35 | super(HyperNet, self).__init__()
36 | self.hyperInChn = hyper_in_channels
37 | self.target_in_size = target_in_size
38 | self.f1 = target_fc1_size
39 | self.f2 = target_fc2_size
40 | self.f3 = target_fc3_size
41 | self.f4 = target_fc4_size
42 | self.feature_size = feature_size
43 | self.patch_rate = patch_rate
44 |
45 | self.res = resnet50_backbone(lda_out_channels, target_in_size, patch_rate=self.patch_rate, pretrained=True)
46 |
47 | self.pool = nn.AdaptiveAvgPool2d((1, 1))
48 | self.poolClass = None
49 | if kwargs.get('classFeaturesOut', None) is not None:
50 | self.poolClass = nn.AdaptiveAvgPool2d((1, kwargs.get('classFeaturesOut')))
51 |
52 | # Conv layers for resnet output features
53 | self.conv1 = nn.Sequential(
54 | nn.Conv2d(2048, 1024, 1, padding=(0, 0)),
55 | nn.ReLU(inplace=True),
56 | nn.Conv2d(1024, 512, 1, padding=(0, 0)),
57 | nn.ReLU(inplace=True),
58 | nn.Conv2d(512, self.hyperInChn, 1, padding=(0, 0)),
59 | nn.ReLU(inplace=True)
60 | )
61 |
62 | # Hyper network part, conv for generating target fc weights, fc for generating target fc biases
63 | self.fc1w_conv = nn.Conv2d(self.hyperInChn, int(self.target_in_size * self.f1 / feature_size ** 2), 3, padding=(1, 1))
64 | self.fc1b_fc = nn.Linear(self.hyperInChn, self.f1)
65 |
66 | self.fc2w_conv = nn.Conv2d(self.hyperInChn, int(self.f1 * self.f2 / feature_size ** 2), 3, padding=(1, 1))
67 | self.fc2b_fc = nn.Linear(self.hyperInChn, self.f2)
68 |
69 | self.fc3w_conv = nn.Conv2d(self.hyperInChn, int(self.f2 * self.f3 / feature_size ** 2), 3, padding=(1, 1))
70 | self.fc3b_fc = nn.Linear(self.hyperInChn, self.f3)
71 |
72 | self.fc4w_conv = nn.Conv2d(self.hyperInChn, int(self.f3 * self.f4 / feature_size ** 2), 3, padding=(1, 1))
73 | self.fc4b_fc = nn.Linear(self.hyperInChn, self.f4)
74 |
75 | self.fc5w_fc = nn.Linear(self.hyperInChn, self.f4)
76 | self.fc5b_fc = nn.Linear(self.hyperInChn, 1)
77 |
78 | # initialize
79 | for i, m_name in enumerate(self._modules):
80 | if i > 3:
81 | nn.init.kaiming_normal_(self._modules[m_name].weight.data)
82 |
83 | def _forward(self, imgTensorIn):
84 | feature_size = self.feature_size
85 | res_out = self.res(imgTensorIn)
86 |
87 | # input vector for target net
88 | target_in_vec = res_out['target_in_vec'].view(-1, self.target_in_size, 1, 1)
89 |
90 | # input features for hyper net
91 | hyper_in_feat = self.conv1(res_out['hyper_in_feat']).view(-1, self.hyperInChn, feature_size, feature_size)
92 |
93 | # generating target net weights & biases
94 | target_fc1w = self.fc1w_conv(hyper_in_feat).view(-1, self.f1, self.target_in_size, 1, 1)
95 | target_fc1b = self.fc1b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, self.f1)
96 |
97 | target_fc2w = self.fc2w_conv(hyper_in_feat).view(-1, self.f2, self.f1, 1, 1)
98 | target_fc2b = self.fc2b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, self.f2)
99 |
100 | target_fc3w = self.fc3w_conv(hyper_in_feat).view(-1, self.f3, self.f2, 1, 1)
101 | target_fc3b = self.fc3b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, self.f3)
102 |
103 | target_fc4w = self.fc4w_conv(hyper_in_feat).view(-1, self.f4, self.f3, 1, 1)
104 | target_fc4b = self.fc4b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, self.f4)
105 |
106 | target_fc5w = self.fc5w_fc(self.pool(hyper_in_feat).squeeze()).view(-1, 1, self.f4, 1, 1)
107 | target_fc5b = self.fc5b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, 1)
108 |
109 | out = {}
110 | out['target_in_vec'] = target_in_vec
111 | out['target_fc1w'] = target_fc1w
112 | out['target_fc1b'] = target_fc1b
113 | out['target_fc2w'] = target_fc2w
114 | out['target_fc2b'] = target_fc2b
115 | out['target_fc3w'] = target_fc3w
116 | out['target_fc3b'] = target_fc3b
117 | out['target_fc4w'] = target_fc4w
118 | out['target_fc4b'] = target_fc4b
119 | out['target_fc5w'] = target_fc5w
120 | out['target_fc5b'] = target_fc5b
121 |
122 | if self.poolClass:
123 | return out, torch.flatten(self.poolClass(hyper_in_feat), 1)
124 | return out
125 |
126 |
127 | def forward(self, input):
128 |
129 | if isinstance(input, list):
130 | # Ideally this should not be used, since the input should be a tensor of concatenated inputs not a list of inputs.
131 | out_all = []
132 | hyper_in_all = []
133 | for elt in input:
134 | # this elt in img is considering img to be a list of patches so a list of [torch.Size([batch_size, 3, patch_size, patch_size])]
135 | out = self._forward(elt)
136 | if self.poolClass:
137 | out_all.append(out[0])
138 | hyper_in_all.append(out[1])
139 | else:
140 | out_all.append(out)
141 | if self.poolClass:
142 | return out_all, hyper_in_all
143 | return out_all
144 |
145 | return self._forward(input)
146 |
147 |
148 |
149 | class TargetNet(nn.Module):
150 | """
151 | Target network for quality prediction.
152 | """
153 | def __init__(self, paras):
154 | super(TargetNet, self).__init__()
155 | self.l1 = nn.Sequential(
156 | TargetFC(paras['target_fc1w'], paras['target_fc1b']),
157 | nn.Sigmoid(),
158 | )
159 | self.l2 = nn.Sequential(
160 | TargetFC(paras['target_fc2w'], paras['target_fc2b']),
161 | nn.Sigmoid(),
162 | )
163 |
164 | self.l3 = nn.Sequential(
165 | TargetFC(paras['target_fc3w'], paras['target_fc3b']),
166 | nn.Sigmoid(),
167 | )
168 |
169 | self.l4 = nn.Sequential(
170 | TargetFC(paras['target_fc4w'], paras['target_fc4b']),
171 | nn.Sigmoid(),
172 | TargetFC(paras['target_fc5w'], paras['target_fc5b']),
173 | )
174 |
175 | def forward(self, x):
176 | q = self.l1(x)
177 |
178 | q = self.l2(q)
179 | q = self.l3(q)
180 | q = self.l4(q).squeeze()
181 |
182 | return q
183 |
184 |
185 | class TargetFC(nn.Module):
186 | """
187 | Fully connection operations for target net
188 | """
189 | def __init__(self, weight, bias):
190 | super(TargetFC, self).__init__()
191 | self.weight = weight
192 | self.bias = bias
193 |
194 | def forward(self, input_):
195 | input_re = input_.view(-1, input_.shape[0] * input_.shape[1], input_.shape[2], input_.shape[3])
196 | weight_re = self.weight.view(self.weight.shape[0] * self.weight.shape[1], self.weight.shape[2], self.weight.shape[3], self.weight.shape[4])
197 | bias_re = self.bias.view(self.bias.shape[0] * self.bias.shape[1])
198 |
199 | out = F.conv2d(input=input_re, weight=weight_re, bias=bias_re, groups=self.weight.shape[0])
200 |
201 | return out.view(input_.shape[0], self.weight.shape[1], input_.shape[2], input_.shape[3])
202 |
203 |
204 | class Bottleneck(nn.Module):
205 | expansion = 4
206 |
207 | def __init__(self, inplanes, planes, stride=1, downsample=None):
208 | super(Bottleneck, self).__init__()
209 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
210 | self.bn1 = nn.BatchNorm2d(planes)
211 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
212 | padding=1, bias=False)
213 | self.bn2 = nn.BatchNorm2d(planes)
214 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
215 | self.bn3 = nn.BatchNorm2d(planes * 4)
216 | self.relu = nn.ReLU(inplace=True)
217 | self.downsample = downsample
218 | self.stride = stride
219 |
220 | def forward(self, x):
221 | residual = x
222 |
223 | out = self.conv1(x)
224 | out = self.bn1(out)
225 | out = self.relu(out)
226 |
227 | out = self.conv2(out)
228 | out = self.bn2(out)
229 | out = self.relu(out)
230 |
231 | out = self.conv3(out)
232 | out = self.bn3(out)
233 |
234 | if self.downsample is not None:
235 | residual = self.downsample(x)
236 |
237 | out += residual
238 | out = self.relu(out)
239 |
240 | return out
241 |
242 |
243 | class ResNetBackbone(nn.Module):
244 | """
245 | ResNet50 backbone model for feature extraction
246 | Outputs:
247 | out : dictionnary containing the input features of the hypernetwork and target features for FC-quality
248 | """
249 |
250 | def __init__(self, lda_out_channels, in_chn, block, layers, patch_rate, num_classes=1000):
251 | super(ResNetBackbone, self).__init__()
252 | self.inplanes = 64
253 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
254 | self.bn1 = nn.BatchNorm2d(64)
255 | self.relu = nn.ReLU(inplace=True)
256 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
257 | self.layer1 = self._make_layer(block, 64, layers[0])
258 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
259 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
260 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
261 |
262 | # local distortion aware module
263 | self.lda1_pool = nn.Sequential(
264 | nn.Conv2d(256, 16, kernel_size=1, stride=1, padding=0, bias=False),
265 | nn.AvgPool2d(7, stride=7),
266 |
267 | )
268 | self.lda1_fc = nn.Linear(16 * 64 * patch_rate * patch_rate , lda_out_channels)
269 |
270 | self.lda2_pool = nn.Sequential(
271 | nn.Conv2d(512, 32, kernel_size=1, stride=1, padding=0, bias=False),
272 | nn.AvgPool2d(7, stride=7),
273 | )
274 | self.lda2_fc = nn.Linear(32 * 16 * patch_rate * patch_rate, lda_out_channels)
275 |
276 | self.lda3_pool = nn.Sequential(
277 | nn.Conv2d(1024, 64, kernel_size=1, stride=1, padding=0, bias=False),
278 | nn.AvgPool2d(7, stride=7),
279 | )
280 | self.lda3_fc = nn.Linear(64 * 4 * patch_rate * patch_rate, lda_out_channels)
281 |
282 | self.lda4_pool = nn.AvgPool2d(7, stride=7)
283 | self.lda4_fc = nn.Linear(2048 * patch_rate * patch_rate, in_chn - lda_out_channels * 3)
284 |
285 | for m in self.modules():
286 | if isinstance(m, nn.Conv2d):
287 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
288 | m.weight.data.normal_(0, math.sqrt(2. / n))
289 | elif isinstance(m, nn.BatchNorm2d):
290 | m.weight.data.fill_(1)
291 | m.bias.data.zero_()
292 |
293 | # initialize
294 | nn.init.kaiming_normal_(self.lda1_pool._modules['0'].weight.data)
295 | nn.init.kaiming_normal_(self.lda2_pool._modules['0'].weight.data)
296 | nn.init.kaiming_normal_(self.lda3_pool._modules['0'].weight.data)
297 | nn.init.kaiming_normal_(self.lda1_fc.weight.data)
298 | nn.init.kaiming_normal_(self.lda2_fc.weight.data)
299 | nn.init.kaiming_normal_(self.lda3_fc.weight.data)
300 | nn.init.kaiming_normal_(self.lda4_fc.weight.data)
301 |
302 | def _make_layer(self, block, planes, blocks, stride=1):
303 | downsample = None
304 | if stride != 1 or self.inplanes != planes * block.expansion:
305 | downsample = nn.Sequential(
306 | nn.Conv2d(self.inplanes, planes * block.expansion,
307 | kernel_size=1, stride=stride, bias=False),
308 | nn.BatchNorm2d(planes * block.expansion),
309 | )
310 |
311 | layers = []
312 | layers.append(block(self.inplanes, planes, stride, downsample))
313 | self.inplanes = planes * block.expansion
314 | for i in range(1, blocks):
315 | layers.append(block(self.inplanes, planes))
316 |
317 | return nn.Sequential(*layers)
318 |
319 | def forward(self, x):
320 | x = self.conv1(x)
321 | x = self.bn1(x)
322 | x = self.relu(x)
323 | x = self.maxpool(x)
324 | x = self.layer1(x)
325 |
326 | # the same effect as lda operation in the paper, but save much more memory
327 | lda_1 = self.lda1_fc(self.lda1_pool(x).view(x.size(0), -1))
328 | x = self.layer2(x)
329 | lda_2 = self.lda2_fc(self.lda2_pool(x).view(x.size(0), -1))
330 | x = self.layer3(x)
331 | lda_3 = self.lda3_fc(self.lda3_pool(x).view(x.size(0), -1))
332 | x = self.layer4(x)
333 | lda_4 = self.lda4_fc(self.lda4_pool(x).view(x.size(0), -1))
334 |
335 | vec = torch.cat((lda_1, lda_2, lda_3, lda_4), 1)
336 |
337 | out = {}
338 | out['hyper_in_feat'] = x
339 | out['target_in_vec'] = vec
340 |
341 | return out
342 |
343 |
344 | def resnet50_backbone(lda_out_channels, in_chn, patch_rate, pretrained=False, **kwargs):
345 | """Constructs a ResNet-50 model_hyper.
346 |
347 | Args:
348 | pretrained (bool): If True, returns a model_hyper pre-trained on ImageNet
349 | """
350 | model = ResNetBackbone(lda_out_channels, in_chn, Bottleneck, [3, 4, 6, 3], patch_rate, **kwargs)
351 | if pretrained:
352 | save_model = torch.load(load_file_from_url(model_urls['resnet50']))
353 | model_dict = model.state_dict()
354 | state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()}
355 | model_dict.update(state_dict)
356 | model.load_state_dict(model_dict)
357 | else:
358 | model.apply(weights_init_xavier)
359 |
360 | return model
361 |
362 | def weights_init_xavier(m):
363 | classname = m.__class__.__name__
364 | if classname.find('Conv') != -1:
365 | init.kaiming_normal_(m.weight.data)
366 | init.constant_(m.bias.data, 0.0)
367 | elif classname.find('Linear') != -1:
368 | init.kaiming_normal_(m.weight.data)
369 | init.constant_(m.bias.data, 0.0)
370 | elif classname.find('BatchNorm2d') != -1:
371 | init.uniform_(m.weight.data, 1.0, 0.02)
372 | init.constant_(m.bias.data, 0.0)
373 |
374 |
375 | ### FULLY-CONNECTED PART FOR SCENE CLASSIFICATION ###
376 | class SceneClassNet(nn.Module):
377 | """
378 | Fully-connected network for scene type classification.
379 | Input :
380 | hyperInFeat (feature vector outputed by Hypernetwork). size=torch.Size([batchSize, patchSize // 2, 7, 7])
381 | Output :
382 | scene type vector. type=torch.tensor([batchSize, numClasses])
383 | """
384 | def __init__(self, featureInSize, numClasses, numLayers=3, numIntermediateNodes=500, **kwargs):
385 | super(SceneClassNet, self).__init__()
386 |
387 | self.layers = nn.ModuleList()
388 |
389 | # Input layer
390 | self.layers.append(nn.Linear(featureInSize, numIntermediateNodes))
391 | self.layers.append(nn.ReLU())
392 |
393 | # Intermediate layers
394 | for _ in range(numLayers - 2): # subtract 2 because we already have the input layer and will add the output layer
395 | self.layers.append(nn.Linear(numIntermediateNodes, numIntermediateNodes))
396 | self.layers.append(nn.ReLU())
397 |
398 | # Output layer
399 | self.layers.append(nn.Linear(numIntermediateNodes, numClasses))
400 |
401 | # Apply custom weights initialization
402 | self.apply(weights_init_xavier)
403 |
404 | def forward(self, x):
405 | for layer in self.layers:
406 | x = layer(x)
407 | return x
408 |
409 |
410 | ### FULLY-CONNECTED PART FOR SCENE CLASSIFICATION ###
411 | #class SceneClassNet(nn.Module):
412 | class RescaleNet(nn.Module):
413 | """
414 | Target network for scene type classification.
415 | Input : hyperInFeat (feature vector outputted by Hypernetwork). size=torch.Size([batchSize, patchSize // 2, 7, 7])
416 | Output : scene type vector. type=torch.tensor([batchSize, 20])
417 | """
418 | def __init__(self, featureInSize, numLayers=3, numIntermediateNodes=100, polyDegree=2, **kwargs):
419 | super(RescaleNet, self).__init__()
420 | self.numLayers = numLayers
421 | self.numIntermediateNodes = numIntermediateNodes
422 | self.polyDegree = polyDegree
423 | self.layers = nn.ModuleList()
424 |
425 | # Input layer
426 | self.layers.append(nn.Linear(featureInSize, self.numIntermediateNodes))
427 | self.layers.append(nn.ReLU())
428 |
429 | # Intermediate layers
430 | for _ in range(self.numLayers - 1):
431 | self.layers.append(nn.Linear(self.numIntermediateNodes, self.numIntermediateNodes))
432 | self.layers.append(nn.ReLU())
433 |
434 | # Output layer
435 | self.layers.append(nn.Linear(self.numIntermediateNodes, self.polyDegree))
436 |
437 | # Apply custom weights initialization
438 | self.apply(weights_init_xavier)
439 |
440 | def forward(self, x):
441 | for layer in self.layers:
442 | x = layer(x)
443 | return x
444 |
--------------------------------------------------------------------------------
/Test split example.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "id": "c29fde2e-9497-4677-bd9a-888acfc987cb",
7 | "metadata": {},
8 | "outputs": [],
9 | "source": [
10 | "import os\n",
11 | "import pandas as pd"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 2,
17 | "id": "535632cd-c9eb-4380-99a0-643fb7d26b60",
18 | "metadata": {},
19 | "outputs": [],
20 | "source": [
21 | "scorePath = r\"./Scores/Scores_Exposure.csv\"\n",
22 | "\n",
23 | "scoreDf = pd.read_csv(scorePath)"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": 3,
29 | "id": "24eb6320-c0d8-4031-b021-3c6fe117599b",
30 | "metadata": {
31 | "scrolled": true,
32 | "tags": []
33 | },
34 | "outputs": [
35 | {
36 | "data": {
37 | "text/html": [
38 | "\n",
39 | "\n",
52 | "
\n",
53 | " \n",
54 | " \n",
55 | " | \n",
56 | " IMAGE PATH | \n",
57 | " JOD | \n",
58 | " JOD STD | \n",
59 | " CI LOW | \n",
60 | " CI HIGH | \n",
61 | " CI RANGE | \n",
62 | " QUALITY LEVEL | \n",
63 | " CLUSTER | \n",
64 | " TOTAL COMPARISONS | \n",
65 | " IMAGE | \n",
66 | " SCENE | \n",
67 | " ATTRIBUTE | \n",
68 | " SCENE IDX | \n",
69 | " CONDITION | \n",
70 | "
\n",
71 | " \n",
72 | " \n",
73 | " \n",
74 | " | 0 | \n",
75 | " Exposure\\29_Indoor_Scene_0.jpg | \n",
76 | " -2.983846 | \n",
77 | " 0.440599 | \n",
78 | " -3.868096 | \n",
79 | " -2.158977 | \n",
80 | " 1.709119 | \n",
81 | " 0 | \n",
82 | " -1.0 | \n",
83 | " 97.0 | \n",
84 | " 29_Indoor_Scene_0.jpg | \n",
85 | " Indoor_Scene_0 | \n",
86 | " Exposure | \n",
87 | " 0 | \n",
88 | " Indoor | \n",
89 | "
\n",
90 | " \n",
91 | " | 1 | \n",
92 | " Exposure\\0_Indoor_Scene_0.jpg | \n",
93 | " -2.494759 | \n",
94 | " 0.458133 | \n",
95 | " -3.485925 | \n",
96 | " -1.619305 | \n",
97 | " 1.866620 | \n",
98 | " 0 | \n",
99 | " -1.0 | \n",
100 | " 117.0 | \n",
101 | " 0_Indoor_Scene_0.jpg | \n",
102 | " Indoor_Scene_0 | \n",
103 | " Exposure | \n",
104 | " 0 | \n",
105 | " Indoor | \n",
106 | "
\n",
107 | " \n",
108 | " | 2 | \n",
109 | " Exposure\\56_Indoor_Scene_0.jpg | \n",
110 | " -2.644800 | \n",
111 | " 0.784488 | \n",
112 | " -4.347027 | \n",
113 | " -1.332417 | \n",
114 | " 3.014610 | \n",
115 | " 0 | \n",
116 | " -1.0 | \n",
117 | " 109.0 | \n",
118 | " 56_Indoor_Scene_0.jpg | \n",
119 | " Indoor_Scene_0 | \n",
120 | " Exposure | \n",
121 | " 0 | \n",
122 | " Indoor | \n",
123 | "
\n",
124 | " \n",
125 | " | 3 | \n",
126 | " Exposure\\66_Indoor_Scene_0.jpg | \n",
127 | " -2.083214 | \n",
128 | " 0.485894 | \n",
129 | " -2.960870 | \n",
130 | " -1.144054 | \n",
131 | " 1.816816 | \n",
132 | " 0 | \n",
133 | " 0.0 | \n",
134 | " 106.0 | \n",
135 | " 66_Indoor_Scene_0.jpg | \n",
136 | " Indoor_Scene_0 | \n",
137 | " Exposure | \n",
138 | " 0 | \n",
139 | " Indoor | \n",
140 | "
\n",
141 | " \n",
142 | " | 4 | \n",
143 | " Exposure\\23_Indoor_Scene_0.jpg | \n",
144 | " -2.107013 | \n",
145 | " 0.592761 | \n",
146 | " -3.349612 | \n",
147 | " -1.049969 | \n",
148 | " 2.299643 | \n",
149 | " 0 | \n",
150 | " 0.0 | \n",
151 | " 120.0 | \n",
152 | " 23_Indoor_Scene_0.jpg | \n",
153 | " Indoor_Scene_0 | \n",
154 | " Exposure | \n",
155 | " 0 | \n",
156 | " Indoor | \n",
157 | "
\n",
158 | " \n",
159 | " | ... | \n",
160 | " ... | \n",
161 | " ... | \n",
162 | " ... | \n",
163 | " ... | \n",
164 | " ... | \n",
165 | " ... | \n",
166 | " ... | \n",
167 | " ... | \n",
168 | " ... | \n",
169 | " ... | \n",
170 | " ... | \n",
171 | " ... | \n",
172 | " ... | \n",
173 | " ... | \n",
174 | "
\n",
175 | " \n",
176 | " | 5111 | \n",
177 | " Exposure\\5066_Outdoor_Scene_49.jpg | \n",
178 | " 2.918451 | \n",
179 | " 0.343595 | \n",
180 | " 2.321701 | \n",
181 | " 3.608192 | \n",
182 | " 1.286491 | \n",
183 | " 5 | \n",
184 | " 20.0 | \n",
185 | " 107.0 | \n",
186 | " 5066_Outdoor_Scene_49.jpg | \n",
187 | " Outdoor_Scene_49 | \n",
188 | " Exposure | \n",
189 | " 49 | \n",
190 | " Outdoor | \n",
191 | "
\n",
192 | " \n",
193 | " | 5112 | \n",
194 | " Exposure\\5000_Outdoor_Scene_49.jpg | \n",
195 | " 2.876833 | \n",
196 | " 0.806029 | \n",
197 | " 1.261553 | \n",
198 | " 4.300412 | \n",
199 | " 3.038859 | \n",
200 | " 5 | \n",
201 | " 20.0 | \n",
202 | " 97.0 | \n",
203 | " 5000_Outdoor_Scene_49.jpg | \n",
204 | " Outdoor_Scene_49 | \n",
205 | " Exposure | \n",
206 | " 49 | \n",
207 | " Outdoor | \n",
208 | "
\n",
209 | " \n",
210 | " | 5113 | \n",
211 | " Exposure\\4999_Outdoor_Scene_49.jpg | \n",
212 | " 3.813626 | \n",
213 | " 0.535796 | \n",
214 | " 2.532832 | \n",
215 | " 4.691582 | \n",
216 | " 2.158750 | \n",
217 | " 5 | \n",
218 | " -1.0 | \n",
219 | " 105.0 | \n",
220 | " 4999_Outdoor_Scene_49.jpg | \n",
221 | " Outdoor_Scene_49 | \n",
222 | " Exposure | \n",
223 | " 49 | \n",
224 | " Outdoor | \n",
225 | "
\n",
226 | " \n",
227 | " | 5114 | \n",
228 | " Exposure\\5114_Outdoor_Scene_49.jpg | \n",
229 | " 3.108067 | \n",
230 | " 1.312512 | \n",
231 | " 0.030111 | \n",
232 | " 4.254205 | \n",
233 | " 4.224094 | \n",
234 | " 5 | \n",
235 | " -1.0 | \n",
236 | " 110.0 | \n",
237 | " 5114_Outdoor_Scene_49.jpg | \n",
238 | " Outdoor_Scene_49 | \n",
239 | " Exposure | \n",
240 | " 49 | \n",
241 | " Outdoor | \n",
242 | "
\n",
243 | " \n",
244 | " | 5115 | \n",
245 | " Exposure\\5004_Outdoor_Scene_49.jpg | \n",
246 | " 4.129699 | \n",
247 | " 1.994617 | \n",
248 | " 0.030111 | \n",
249 | " 6.127363 | \n",
250 | " 6.097252 | \n",
251 | " 5 | \n",
252 | " -1.0 | \n",
253 | " 87.0 | \n",
254 | " 5004_Outdoor_Scene_49.jpg | \n",
255 | " Outdoor_Scene_49 | \n",
256 | " Exposure | \n",
257 | " 49 | \n",
258 | " Outdoor | \n",
259 | "
\n",
260 | " \n",
261 | "
\n",
262 | "
5116 rows × 14 columns
\n",
263 | "
"
264 | ],
265 | "text/plain": [
266 | " IMAGE PATH JOD JOD STD CI LOW \\\n",
267 | "0 Exposure\\29_Indoor_Scene_0.jpg -2.983846 0.440599 -3.868096 \n",
268 | "1 Exposure\\0_Indoor_Scene_0.jpg -2.494759 0.458133 -3.485925 \n",
269 | "2 Exposure\\56_Indoor_Scene_0.jpg -2.644800 0.784488 -4.347027 \n",
270 | "3 Exposure\\66_Indoor_Scene_0.jpg -2.083214 0.485894 -2.960870 \n",
271 | "4 Exposure\\23_Indoor_Scene_0.jpg -2.107013 0.592761 -3.349612 \n",
272 | "... ... ... ... ... \n",
273 | "5111 Exposure\\5066_Outdoor_Scene_49.jpg 2.918451 0.343595 2.321701 \n",
274 | "5112 Exposure\\5000_Outdoor_Scene_49.jpg 2.876833 0.806029 1.261553 \n",
275 | "5113 Exposure\\4999_Outdoor_Scene_49.jpg 3.813626 0.535796 2.532832 \n",
276 | "5114 Exposure\\5114_Outdoor_Scene_49.jpg 3.108067 1.312512 0.030111 \n",
277 | "5115 Exposure\\5004_Outdoor_Scene_49.jpg 4.129699 1.994617 0.030111 \n",
278 | "\n",
279 | " CI HIGH CI RANGE QUALITY LEVEL CLUSTER TOTAL COMPARISONS \\\n",
280 | "0 -2.158977 1.709119 0 -1.0 97.0 \n",
281 | "1 -1.619305 1.866620 0 -1.0 117.0 \n",
282 | "2 -1.332417 3.014610 0 -1.0 109.0 \n",
283 | "3 -1.144054 1.816816 0 0.0 106.0 \n",
284 | "4 -1.049969 2.299643 0 0.0 120.0 \n",
285 | "... ... ... ... ... ... \n",
286 | "5111 3.608192 1.286491 5 20.0 107.0 \n",
287 | "5112 4.300412 3.038859 5 20.0 97.0 \n",
288 | "5113 4.691582 2.158750 5 -1.0 105.0 \n",
289 | "5114 4.254205 4.224094 5 -1.0 110.0 \n",
290 | "5115 6.127363 6.097252 5 -1.0 87.0 \n",
291 | "\n",
292 | " IMAGE SCENE ATTRIBUTE SCENE IDX \\\n",
293 | "0 29_Indoor_Scene_0.jpg Indoor_Scene_0 Exposure 0 \n",
294 | "1 0_Indoor_Scene_0.jpg Indoor_Scene_0 Exposure 0 \n",
295 | "2 56_Indoor_Scene_0.jpg Indoor_Scene_0 Exposure 0 \n",
296 | "3 66_Indoor_Scene_0.jpg Indoor_Scene_0 Exposure 0 \n",
297 | "4 23_Indoor_Scene_0.jpg Indoor_Scene_0 Exposure 0 \n",
298 | "... ... ... ... ... \n",
299 | "5111 5066_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n",
300 | "5112 5000_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n",
301 | "5113 4999_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n",
302 | "5114 5114_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n",
303 | "5115 5004_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n",
304 | "\n",
305 | " CONDITION \n",
306 | "0 Indoor \n",
307 | "1 Indoor \n",
308 | "2 Indoor \n",
309 | "3 Indoor \n",
310 | "4 Indoor \n",
311 | "... ... \n",
312 | "5111 Outdoor \n",
313 | "5112 Outdoor \n",
314 | "5113 Outdoor \n",
315 | "5114 Outdoor \n",
316 | "5115 Outdoor \n",
317 | "\n",
318 | "[5116 rows x 14 columns]"
319 | ]
320 | },
321 | "execution_count": 3,
322 | "metadata": {},
323 | "output_type": "execute_result"
324 | }
325 | ],
326 | "source": [
327 | "scoreDf"
328 | ]
329 | },
330 | {
331 | "cell_type": "markdown",
332 | "id": "ec858eef-e67e-4c67-a0fe-18b0c4c36897",
333 | "metadata": {
334 | "tags": []
335 | },
336 | "source": [
337 | "### Scene Split (Generalization Split)"
338 | ]
339 | },
340 | {
341 | "cell_type": "code",
342 | "execution_count": 4,
343 | "id": "450ac043-8cf7-4c17-972b-1b2f830e39be",
344 | "metadata": {},
345 | "outputs": [],
346 | "source": [
347 | "sceneSplitPath = r\"./Test split/Scene Split.csv\"\n",
348 | "sceneSplitDf = pd.read_csv(sceneSplitPath)"
349 | ]
350 | },
351 | {
352 | "cell_type": "code",
353 | "execution_count": 5,
354 | "id": "ff6f8bf9-f69b-4350-9211-23a6e9916d1e",
355 | "metadata": {
356 | "scrolled": true,
357 | "tags": []
358 | },
359 | "outputs": [
360 | {
361 | "data": {
362 | "text/html": [
363 | "\n",
364 | "\n",
377 | "
\n",
378 | " \n",
379 | " \n",
380 | " | \n",
381 | " SCENE | \n",
382 | " SPLIT | \n",
383 | " NB IMAGES | \n",
384 | "
\n",
385 | " \n",
386 | " \n",
387 | " \n",
388 | " | 0 | \n",
389 | " Indoor_Scene_0 | \n",
390 | " Train | \n",
391 | " 72 | \n",
392 | "
\n",
393 | " \n",
394 | " | 1 | \n",
395 | " Indoor_Scene_12 | \n",
396 | " Train | \n",
397 | " 103 | \n",
398 | "
\n",
399 | " \n",
400 | " | 2 | \n",
401 | " Indoor_Scene_13 | \n",
402 | " Train | \n",
403 | " 90 | \n",
404 | "
\n",
405 | " \n",
406 | " | 3 | \n",
407 | " Indoor_Scene_2 | \n",
408 | " Train | \n",
409 | " 74 | \n",
410 | "
\n",
411 | " \n",
412 | " | 4 | \n",
413 | " Indoor_Scene_3 | \n",
414 | " Train | \n",
415 | " 89 | \n",
416 | "
\n",
417 | " \n",
418 | " | 5 | \n",
419 | " Indoor_Scene_4 | \n",
420 | " Train | \n",
421 | " 91 | \n",
422 | "
\n",
423 | " \n",
424 | " | 6 | \n",
425 | " Indoor_Scene_6 | \n",
426 | " Train | \n",
427 | " 125 | \n",
428 | "
\n",
429 | " \n",
430 | " | 7 | \n",
431 | " Indoor_Scene_7 | \n",
432 | " Train | \n",
433 | " 110 | \n",
434 | "
\n",
435 | " \n",
436 | " | 8 | \n",
437 | " Indoor_Scene_8 | \n",
438 | " Train | \n",
439 | " 125 | \n",
440 | "
\n",
441 | " \n",
442 | " | 9 | \n",
443 | " Indoor_Scene_9 | \n",
444 | " Train | \n",
445 | " 125 | \n",
446 | "
\n",
447 | " \n",
448 | " | 10 | \n",
449 | " Lowlight_Scene_14 | \n",
450 | " Train | \n",
451 | " 96 | \n",
452 | "
\n",
453 | " \n",
454 | " | 11 | \n",
455 | " Lowlight_Scene_15 | \n",
456 | " Train | \n",
457 | " 70 | \n",
458 | "
\n",
459 | " \n",
460 | " | 12 | \n",
461 | " Lowlight_Scene_18 | \n",
462 | " Train | \n",
463 | " 125 | \n",
464 | "
\n",
465 | " \n",
466 | " | 13 | \n",
467 | " Lowlight_Scene_19 | \n",
468 | " Train | \n",
469 | " 125 | \n",
470 | "
\n",
471 | " \n",
472 | " | 14 | \n",
473 | " Lowlight_Scene_21 | \n",
474 | " Train | \n",
475 | " 125 | \n",
476 | "
\n",
477 | " \n",
478 | " | 15 | \n",
479 | " Night_Scene_22 | \n",
480 | " Train | \n",
481 | " 116 | \n",
482 | "
\n",
483 | " \n",
484 | " | 16 | \n",
485 | " Night_Scene_23 | \n",
486 | " Train | \n",
487 | " 117 | \n",
488 | "
\n",
489 | " \n",
490 | " | 17 | \n",
491 | " Night_Scene_24 | \n",
492 | " Train | \n",
493 | " 119 | \n",
494 | "
\n",
495 | " \n",
496 | " | 18 | \n",
497 | " Night_Scene_25 | \n",
498 | " Train | \n",
499 | " 116 | \n",
500 | "
\n",
501 | " \n",
502 | " | 19 | \n",
503 | " Night_Scene_26 | \n",
504 | " Train | \n",
505 | " 115 | \n",
506 | "
\n",
507 | " \n",
508 | " | 20 | \n",
509 | " Night_Scene_27 | \n",
510 | " Train | \n",
511 | " 115 | \n",
512 | "
\n",
513 | " \n",
514 | " | 21 | \n",
515 | " Night_Scene_29 | \n",
516 | " Train | \n",
517 | " 112 | \n",
518 | "
\n",
519 | " \n",
520 | " | 22 | \n",
521 | " Night_Scene_32 | \n",
522 | " Train | \n",
523 | " 100 | \n",
524 | "
\n",
525 | " \n",
526 | " | 23 | \n",
527 | " Outdoor_Scene_35 | \n",
528 | " Train | \n",
529 | " 49 | \n",
530 | "
\n",
531 | " \n",
532 | " | 24 | \n",
533 | " Outdoor_Scene_36 | \n",
534 | " Train | \n",
535 | " 73 | \n",
536 | "
\n",
537 | " \n",
538 | " | 25 | \n",
539 | " Outdoor_Scene_37 | \n",
540 | " Train | \n",
541 | " 95 | \n",
542 | "
\n",
543 | " \n",
544 | " | 26 | \n",
545 | " Outdoor_Scene_38 | \n",
546 | " Train | \n",
547 | " 78 | \n",
548 | "
\n",
549 | " \n",
550 | " | 27 | \n",
551 | " Outdoor_Scene_39 | \n",
552 | " Train | \n",
553 | " 92 | \n",
554 | "
\n",
555 | " \n",
556 | " | 28 | \n",
557 | " Outdoor_Scene_40 | \n",
558 | " Train | \n",
559 | " 125 | \n",
560 | "
\n",
561 | " \n",
562 | " | 29 | \n",
563 | " Outdoor_Scene_41 | \n",
564 | " Train | \n",
565 | " 125 | \n",
566 | "
\n",
567 | " \n",
568 | " | 30 | \n",
569 | " Outdoor_Scene_42 | \n",
570 | " Train | \n",
571 | " 125 | \n",
572 | "
\n",
573 | " \n",
574 | " | 31 | \n",
575 | " Outdoor_Scene_44 | \n",
576 | " Train | \n",
577 | " 125 | \n",
578 | "
\n",
579 | " \n",
580 | " | 32 | \n",
581 | " Outdoor_Scene_47 | \n",
582 | " Train | \n",
583 | " 89 | \n",
584 | "
\n",
585 | " \n",
586 | " | 33 | \n",
587 | " Outdoor_Scene_48 | \n",
588 | " Train | \n",
589 | " 74 | \n",
590 | "
\n",
591 | " \n",
592 | " | 34 | \n",
593 | " Outdoor_Scene_49 | \n",
594 | " Train | \n",
595 | " 125 | \n",
596 | "
\n",
597 | " \n",
598 | " | 35 | \n",
599 | " Indoor_Scene_10 | \n",
600 | " Test | \n",
601 | " 125 | \n",
602 | "
\n",
603 | " \n",
604 | " | 36 | \n",
605 | " Indoor_Scene_11 | \n",
606 | " Test | \n",
607 | " 94 | \n",
608 | "
\n",
609 | " \n",
610 | " | 37 | \n",
611 | " Indoor_Scene_1 | \n",
612 | " Test | \n",
613 | " 124 | \n",
614 | "
\n",
615 | " \n",
616 | " | 38 | \n",
617 | " Indoor_Scene_5 | \n",
618 | " Test | \n",
619 | " 50 | \n",
620 | "
\n",
621 | " \n",
622 | " | 39 | \n",
623 | " Lowlight_Scene_16 | \n",
624 | " Test | \n",
625 | " 87 | \n",
626 | "
\n",
627 | " \n",
628 | " | 40 | \n",
629 | " Lowlight_Scene_17 | \n",
630 | " Test | \n",
631 | " 102 | \n",
632 | "
\n",
633 | " \n",
634 | " | 41 | \n",
635 | " Lowlight_Scene_20 | \n",
636 | " Test | \n",
637 | " 125 | \n",
638 | "
\n",
639 | " \n",
640 | " | 42 | \n",
641 | " Night_Scene_28 | \n",
642 | " Test | \n",
643 | " 112 | \n",
644 | "
\n",
645 | " \n",
646 | " | 43 | \n",
647 | " Night_Scene_30 | \n",
648 | " Test | \n",
649 | " 114 | \n",
650 | "
\n",
651 | " \n",
652 | " | 44 | \n",
653 | " Night_Scene_31 | \n",
654 | " Test | \n",
655 | " 112 | \n",
656 | "
\n",
657 | " \n",
658 | " | 45 | \n",
659 | " Outdoor_Scene_33 | \n",
660 | " Test | \n",
661 | " 119 | \n",
662 | "
\n",
663 | " \n",
664 | " | 46 | \n",
665 | " Outdoor_Scene_34 | \n",
666 | " Test | \n",
667 | " 45 | \n",
668 | "
\n",
669 | " \n",
670 | " | 47 | \n",
671 | " Outdoor_Scene_43 | \n",
672 | " Test | \n",
673 | " 125 | \n",
674 | "
\n",
675 | " \n",
676 | " | 48 | \n",
677 | " Outdoor_Scene_45 | \n",
678 | " Test | \n",
679 | " 78 | \n",
680 | "
\n",
681 | " \n",
682 | " | 49 | \n",
683 | " Outdoor_Scene_46 | \n",
684 | " Test | \n",
685 | " 74 | \n",
686 | "
\n",
687 | " \n",
688 | "
\n",
689 | "
"
690 | ],
691 | "text/plain": [
692 | " SCENE SPLIT NB IMAGES\n",
693 | "0 Indoor_Scene_0 Train 72\n",
694 | "1 Indoor_Scene_12 Train 103\n",
695 | "2 Indoor_Scene_13 Train 90\n",
696 | "3 Indoor_Scene_2 Train 74\n",
697 | "4 Indoor_Scene_3 Train 89\n",
698 | "5 Indoor_Scene_4 Train 91\n",
699 | "6 Indoor_Scene_6 Train 125\n",
700 | "7 Indoor_Scene_7 Train 110\n",
701 | "8 Indoor_Scene_8 Train 125\n",
702 | "9 Indoor_Scene_9 Train 125\n",
703 | "10 Lowlight_Scene_14 Train 96\n",
704 | "11 Lowlight_Scene_15 Train 70\n",
705 | "12 Lowlight_Scene_18 Train 125\n",
706 | "13 Lowlight_Scene_19 Train 125\n",
707 | "14 Lowlight_Scene_21 Train 125\n",
708 | "15 Night_Scene_22 Train 116\n",
709 | "16 Night_Scene_23 Train 117\n",
710 | "17 Night_Scene_24 Train 119\n",
711 | "18 Night_Scene_25 Train 116\n",
712 | "19 Night_Scene_26 Train 115\n",
713 | "20 Night_Scene_27 Train 115\n",
714 | "21 Night_Scene_29 Train 112\n",
715 | "22 Night_Scene_32 Train 100\n",
716 | "23 Outdoor_Scene_35 Train 49\n",
717 | "24 Outdoor_Scene_36 Train 73\n",
718 | "25 Outdoor_Scene_37 Train 95\n",
719 | "26 Outdoor_Scene_38 Train 78\n",
720 | "27 Outdoor_Scene_39 Train 92\n",
721 | "28 Outdoor_Scene_40 Train 125\n",
722 | "29 Outdoor_Scene_41 Train 125\n",
723 | "30 Outdoor_Scene_42 Train 125\n",
724 | "31 Outdoor_Scene_44 Train 125\n",
725 | "32 Outdoor_Scene_47 Train 89\n",
726 | "33 Outdoor_Scene_48 Train 74\n",
727 | "34 Outdoor_Scene_49 Train 125\n",
728 | "35 Indoor_Scene_10 Test 125\n",
729 | "36 Indoor_Scene_11 Test 94\n",
730 | "37 Indoor_Scene_1 Test 124\n",
731 | "38 Indoor_Scene_5 Test 50\n",
732 | "39 Lowlight_Scene_16 Test 87\n",
733 | "40 Lowlight_Scene_17 Test 102\n",
734 | "41 Lowlight_Scene_20 Test 125\n",
735 | "42 Night_Scene_28 Test 112\n",
736 | "43 Night_Scene_30 Test 114\n",
737 | "44 Night_Scene_31 Test 112\n",
738 | "45 Outdoor_Scene_33 Test 119\n",
739 | "46 Outdoor_Scene_34 Test 45\n",
740 | "47 Outdoor_Scene_43 Test 125\n",
741 | "48 Outdoor_Scene_45 Test 78\n",
742 | "49 Outdoor_Scene_46 Test 74"
743 | ]
744 | },
745 | "execution_count": 5,
746 | "metadata": {},
747 | "output_type": "execute_result"
748 | }
749 | ],
750 | "source": [
751 | "sceneSplitDf"
752 | ]
753 | },
754 | {
755 | "cell_type": "code",
756 | "execution_count": 6,
757 | "id": "7e5cc5ef-3689-4599-b6e1-46982f9fbde3",
758 | "metadata": {},
759 | "outputs": [],
760 | "source": [
761 | "trainDf = pd.merge(scoreDf, sceneSplitDf[sceneSplitDf['SPLIT']=='Train'].drop('NB IMAGES', axis=1), on='SCENE', how='inner')\n",
762 | "testDf = pd.merge(scoreDf, sceneSplitDf[sceneSplitDf['SPLIT']=='Test'].drop('NB IMAGES', axis=1), on='SCENE', how='inner')\n",
763 | "\n",
764 | "trainDf.to_csv(rf'./Test split/Scene Split/SceneSplit_Train_{os.path.basename(scorePath)}', index=False)\n",
765 | "testDf.to_csv(rf'./Test split/Scene Split/SceneSplit_Test_{os.path.basename(scorePath)}', index=False)"
766 | ]
767 | },
768 | {
769 | "cell_type": "code",
770 | "execution_count": 7,
771 | "id": "b293ec7a-5e4c-414c-9ea0-33b5b56940cf",
772 | "metadata": {},
773 | "outputs": [
774 | {
775 | "data": {
776 | "text/html": [
777 | "\n",
778 | "\n",
791 | "
\n",
792 | " \n",
793 | " \n",
794 | " | \n",
795 | " IMAGE PATH | \n",
796 | " JOD | \n",
797 | " JOD STD | \n",
798 | " CI LOW | \n",
799 | " CI HIGH | \n",
800 | " CI RANGE | \n",
801 | " QUALITY LEVEL | \n",
802 | " CLUSTER | \n",
803 | " TOTAL COMPARISONS | \n",
804 | " IMAGE | \n",
805 | " SCENE | \n",
806 | " ATTRIBUTE | \n",
807 | " SCENE IDX | \n",
808 | " CONDITION | \n",
809 | " SPLIT | \n",
810 | "
\n",
811 | " \n",
812 | " \n",
813 | " \n",
814 | " | 0 | \n",
815 | " Exposure\\122_Indoor_Scene_1.tiff | \n",
816 | " -4.034503 | \n",
817 | " 0.656547 | \n",
818 | " -5.226753 | \n",
819 | " -2.784718 | \n",
820 | " 2.442034 | \n",
821 | " 0 | \n",
822 | " -1.0 | \n",
823 | " 46.0 | \n",
824 | " 122_Indoor_Scene_1.tiff | \n",
825 | " Indoor_Scene_1 | \n",
826 | " Exposure | \n",
827 | " 1 | \n",
828 | " Indoor | \n",
829 | " Test | \n",
830 | "
\n",
831 | " \n",
832 | " | 1 | \n",
833 | " Exposure\\124_Indoor_Scene_1.jpg | \n",
834 | " -3.434371 | \n",
835 | " 0.467843 | \n",
836 | " -4.135082 | \n",
837 | " -2.299317 | \n",
838 | " 1.835765 | \n",
839 | " 0 | \n",
840 | " -1.0 | \n",
841 | " 81.0 | \n",
842 | " 124_Indoor_Scene_1.jpg | \n",
843 | " Indoor_Scene_1 | \n",
844 | " Exposure | \n",
845 | " 1 | \n",
846 | " Indoor | \n",
847 | " Test | \n",
848 | "
\n",
849 | " \n",
850 | " | 2 | \n",
851 | " Exposure\\173_Indoor_Scene_1.jpg | \n",
852 | " -3.076148 | \n",
853 | " 0.831440 | \n",
854 | " -4.195738 | \n",
855 | " -1.080512 | \n",
856 | " 3.115226 | \n",
857 | " 0 | \n",
858 | " 0.0 | \n",
859 | " 94.0 | \n",
860 | " 173_Indoor_Scene_1.jpg | \n",
861 | " Indoor_Scene_1 | \n",
862 | " Exposure | \n",
863 | " 1 | \n",
864 | " Indoor | \n",
865 | " Test | \n",
866 | "
\n",
867 | " \n",
868 | " | 3 | \n",
869 | " Exposure\\154_Indoor_Scene_1.jpg | \n",
870 | " -3.138977 | \n",
871 | " 0.810391 | \n",
872 | " -4.670127 | \n",
873 | " -1.546138 | \n",
874 | " 3.123989 | \n",
875 | " 0 | \n",
876 | " 0.0 | \n",
877 | " 147.0 | \n",
878 | " 154_Indoor_Scene_1.jpg | \n",
879 | " Indoor_Scene_1 | \n",
880 | " Exposure | \n",
881 | " 1 | \n",
882 | " Indoor | \n",
883 | " Test | \n",
884 | "
\n",
885 | " \n",
886 | " | 4 | \n",
887 | " Exposure\\101_Indoor_Scene_1.jpg | \n",
888 | " -2.746519 | \n",
889 | " 0.785913 | \n",
890 | " -4.537797 | \n",
891 | " -1.477081 | \n",
892 | " 3.060717 | \n",
893 | " 0 | \n",
894 | " -1.0 | \n",
895 | " 72.0 | \n",
896 | " 101_Indoor_Scene_1.jpg | \n",
897 | " Indoor_Scene_1 | \n",
898 | " Exposure | \n",
899 | " 1 | \n",
900 | " Indoor | \n",
901 | " Test | \n",
902 | "
\n",
903 | " \n",
904 | " | ... | \n",
905 | " ... | \n",
906 | " ... | \n",
907 | " ... | \n",
908 | " ... | \n",
909 | " ... | \n",
910 | " ... | \n",
911 | " ... | \n",
912 | " ... | \n",
913 | " ... | \n",
914 | " ... | \n",
915 | " ... | \n",
916 | " ... | \n",
917 | " ... | \n",
918 | " ... | \n",
919 | " ... | \n",
920 | "
\n",
921 | " \n",
922 | " | 1481 | \n",
923 | " Exposure\\4805_Outdoor_Scene_46.jpg | \n",
924 | " 2.319056 | \n",
925 | " 0.680509 | \n",
926 | " 0.957290 | \n",
927 | " 3.689532 | \n",
928 | " 2.732242 | \n",
929 | " 3 | \n",
930 | " -1.0 | \n",
931 | " 90.0 | \n",
932 | " 4805_Outdoor_Scene_46.jpg | \n",
933 | " Outdoor_Scene_46 | \n",
934 | " Exposure | \n",
935 | " 46 | \n",
936 | " Outdoor | \n",
937 | " Test | \n",
938 | "
\n",
939 | " \n",
940 | " | 1482 | \n",
941 | " Exposure\\4814_Outdoor_Scene_46.jpeg | \n",
942 | " 2.458551 | \n",
943 | " 0.678625 | \n",
944 | " 0.897238 | \n",
945 | " 3.646342 | \n",
946 | " 2.749105 | \n",
947 | " 3 | \n",
948 | " -1.0 | \n",
949 | " 100.0 | \n",
950 | " 4814_Outdoor_Scene_46.jpeg | \n",
951 | " Outdoor_Scene_46 | \n",
952 | " Exposure | \n",
953 | " 46 | \n",
954 | " Outdoor | \n",
955 | " Test | \n",
956 | "
\n",
957 | " \n",
958 | " | 1483 | \n",
959 | " Exposure\\4804_Outdoor_Scene_46.jpg | \n",
960 | " 2.582052 | \n",
961 | " 0.657648 | \n",
962 | " 1.498082 | \n",
963 | " 3.910505 | \n",
964 | " 2.412423 | \n",
965 | " 3 | \n",
966 | " -1.0 | \n",
967 | " 121.0 | \n",
968 | " 4804_Outdoor_Scene_46.jpg | \n",
969 | " Outdoor_Scene_46 | \n",
970 | " Exposure | \n",
971 | " 46 | \n",
972 | " Outdoor | \n",
973 | " Test | \n",
974 | "
\n",
975 | " \n",
976 | " | 1484 | \n",
977 | " Exposure\\4774_Outdoor_Scene_46.jpg | \n",
978 | " 4.008446 | \n",
979 | " 0.562061 | \n",
980 | " 2.953470 | \n",
981 | " 5.157853 | \n",
982 | " 2.204383 | \n",
983 | " 4 | \n",
984 | " -1.0 | \n",
985 | " 35.0 | \n",
986 | " 4774_Outdoor_Scene_46.jpg | \n",
987 | " Outdoor_Scene_46 | \n",
988 | " Exposure | \n",
989 | " 46 | \n",
990 | " Outdoor | \n",
991 | " Test | \n",
992 | "
\n",
993 | " \n",
994 | " | 1485 | \n",
995 | " Exposure\\4759_Outdoor_Scene_46.jpg | \n",
996 | " 4.195345 | \n",
997 | " 0.659281 | \n",
998 | " 2.925962 | \n",
999 | " 5.517392 | \n",
1000 | " 2.591430 | \n",
1001 | " 4 | \n",
1002 | " -1.0 | \n",
1003 | " 34.0 | \n",
1004 | " 4759_Outdoor_Scene_46.jpg | \n",
1005 | " Outdoor_Scene_46 | \n",
1006 | " Exposure | \n",
1007 | " 46 | \n",
1008 | " Outdoor | \n",
1009 | " Test | \n",
1010 | "
\n",
1011 | " \n",
1012 | "
\n",
1013 | "
1486 rows × 15 columns
\n",
1014 | "
"
1015 | ],
1016 | "text/plain": [
1017 | " IMAGE PATH JOD JOD STD CI LOW \\\n",
1018 | "0 Exposure\\122_Indoor_Scene_1.tiff -4.034503 0.656547 -5.226753 \n",
1019 | "1 Exposure\\124_Indoor_Scene_1.jpg -3.434371 0.467843 -4.135082 \n",
1020 | "2 Exposure\\173_Indoor_Scene_1.jpg -3.076148 0.831440 -4.195738 \n",
1021 | "3 Exposure\\154_Indoor_Scene_1.jpg -3.138977 0.810391 -4.670127 \n",
1022 | "4 Exposure\\101_Indoor_Scene_1.jpg -2.746519 0.785913 -4.537797 \n",
1023 | "... ... ... ... ... \n",
1024 | "1481 Exposure\\4805_Outdoor_Scene_46.jpg 2.319056 0.680509 0.957290 \n",
1025 | "1482 Exposure\\4814_Outdoor_Scene_46.jpeg 2.458551 0.678625 0.897238 \n",
1026 | "1483 Exposure\\4804_Outdoor_Scene_46.jpg 2.582052 0.657648 1.498082 \n",
1027 | "1484 Exposure\\4774_Outdoor_Scene_46.jpg 4.008446 0.562061 2.953470 \n",
1028 | "1485 Exposure\\4759_Outdoor_Scene_46.jpg 4.195345 0.659281 2.925962 \n",
1029 | "\n",
1030 | " CI HIGH CI RANGE QUALITY LEVEL CLUSTER TOTAL COMPARISONS \\\n",
1031 | "0 -2.784718 2.442034 0 -1.0 46.0 \n",
1032 | "1 -2.299317 1.835765 0 -1.0 81.0 \n",
1033 | "2 -1.080512 3.115226 0 0.0 94.0 \n",
1034 | "3 -1.546138 3.123989 0 0.0 147.0 \n",
1035 | "4 -1.477081 3.060717 0 -1.0 72.0 \n",
1036 | "... ... ... ... ... ... \n",
1037 | "1481 3.689532 2.732242 3 -1.0 90.0 \n",
1038 | "1482 3.646342 2.749105 3 -1.0 100.0 \n",
1039 | "1483 3.910505 2.412423 3 -1.0 121.0 \n",
1040 | "1484 5.157853 2.204383 4 -1.0 35.0 \n",
1041 | "1485 5.517392 2.591430 4 -1.0 34.0 \n",
1042 | "\n",
1043 | " IMAGE SCENE ATTRIBUTE SCENE IDX \\\n",
1044 | "0 122_Indoor_Scene_1.tiff Indoor_Scene_1 Exposure 1 \n",
1045 | "1 124_Indoor_Scene_1.jpg Indoor_Scene_1 Exposure 1 \n",
1046 | "2 173_Indoor_Scene_1.jpg Indoor_Scene_1 Exposure 1 \n",
1047 | "3 154_Indoor_Scene_1.jpg Indoor_Scene_1 Exposure 1 \n",
1048 | "4 101_Indoor_Scene_1.jpg Indoor_Scene_1 Exposure 1 \n",
1049 | "... ... ... ... ... \n",
1050 | "1481 4805_Outdoor_Scene_46.jpg Outdoor_Scene_46 Exposure 46 \n",
1051 | "1482 4814_Outdoor_Scene_46.jpeg Outdoor_Scene_46 Exposure 46 \n",
1052 | "1483 4804_Outdoor_Scene_46.jpg Outdoor_Scene_46 Exposure 46 \n",
1053 | "1484 4774_Outdoor_Scene_46.jpg Outdoor_Scene_46 Exposure 46 \n",
1054 | "1485 4759_Outdoor_Scene_46.jpg Outdoor_Scene_46 Exposure 46 \n",
1055 | "\n",
1056 | " CONDITION SPLIT \n",
1057 | "0 Indoor Test \n",
1058 | "1 Indoor Test \n",
1059 | "2 Indoor Test \n",
1060 | "3 Indoor Test \n",
1061 | "4 Indoor Test \n",
1062 | "... ... ... \n",
1063 | "1481 Outdoor Test \n",
1064 | "1482 Outdoor Test \n",
1065 | "1483 Outdoor Test \n",
1066 | "1484 Outdoor Test \n",
1067 | "1485 Outdoor Test \n",
1068 | "\n",
1069 | "[1486 rows x 15 columns]"
1070 | ]
1071 | },
1072 | "execution_count": 7,
1073 | "metadata": {},
1074 | "output_type": "execute_result"
1075 | }
1076 | ],
1077 | "source": [
1078 | "testDf"
1079 | ]
1080 | },
1081 | {
1082 | "cell_type": "markdown",
1083 | "id": "75abbef3-8d52-48b0-ae24-9789e39ffd9e",
1084 | "metadata": {},
1085 | "source": [
1086 | "### Device split"
1087 | ]
1088 | },
1089 | {
1090 | "cell_type": "code",
1091 | "execution_count": 8,
1092 | "id": "08851a87-e441-4aa4-b214-838c5463609d",
1093 | "metadata": {},
1094 | "outputs": [],
1095 | "source": [
1096 | "deviceSplitPath = r\"./Test split/Device Split.csv\"\n",
1097 | "deviceSplitDf = pd.read_csv(deviceSplitPath)"
1098 | ]
1099 | },
1100 | {
1101 | "cell_type": "code",
1102 | "execution_count": 9,
1103 | "id": "e26117fc-e171-488b-961c-fe3f966cdb46",
1104 | "metadata": {},
1105 | "outputs": [
1106 | {
1107 | "data": {
1108 | "text/html": [
1109 | "\n",
1110 | "\n",
1123 | "
\n",
1124 | " \n",
1125 | " \n",
1126 | " | \n",
1127 | " IMAGE | \n",
1128 | " SPLIT | \n",
1129 | "
\n",
1130 | " \n",
1131 | " \n",
1132 | " \n",
1133 | " | 0 | \n",
1134 | " 0_Indoor_Scene_0.jpg | \n",
1135 | " Train | \n",
1136 | "
\n",
1137 | " \n",
1138 | " | 1 | \n",
1139 | " 1_Indoor_Scene_0.jpg | \n",
1140 | " Test | \n",
1141 | "
\n",
1142 | " \n",
1143 | " | 2 | \n",
1144 | " 2_Indoor_Scene_0.JPG | \n",
1145 | " Train | \n",
1146 | "
\n",
1147 | " \n",
1148 | " | 3 | \n",
1149 | " 3_Indoor_Scene_0.JPG | \n",
1150 | " Train | \n",
1151 | "
\n",
1152 | " \n",
1153 | " | 4 | \n",
1154 | " 4_Indoor_Scene_0.JPG | \n",
1155 | " Train | \n",
1156 | "
\n",
1157 | " \n",
1158 | " | ... | \n",
1159 | " ... | \n",
1160 | " ... | \n",
1161 | "
\n",
1162 | " \n",
1163 | " | 5111 | \n",
1164 | " 5111_Outdoor_Scene_49.jpg | \n",
1165 | " Train | \n",
1166 | "
\n",
1167 | " \n",
1168 | " | 5112 | \n",
1169 | " 5112_Outdoor_Scene_49.jpg | \n",
1170 | " Train | \n",
1171 | "
\n",
1172 | " \n",
1173 | " | 5113 | \n",
1174 | " 5113_Outdoor_Scene_49.jpg | \n",
1175 | " Train | \n",
1176 | "
\n",
1177 | " \n",
1178 | " | 5114 | \n",
1179 | " 5114_Outdoor_Scene_49.jpg | \n",
1180 | " Train | \n",
1181 | "
\n",
1182 | " \n",
1183 | " | 5115 | \n",
1184 | " 5115_Outdoor_Scene_49.jpg | \n",
1185 | " Train | \n",
1186 | "
\n",
1187 | " \n",
1188 | "
\n",
1189 | "
5116 rows × 2 columns
\n",
1190 | "
"
1191 | ],
1192 | "text/plain": [
1193 | " IMAGE SPLIT\n",
1194 | "0 0_Indoor_Scene_0.jpg Train\n",
1195 | "1 1_Indoor_Scene_0.jpg Test\n",
1196 | "2 2_Indoor_Scene_0.JPG Train\n",
1197 | "3 3_Indoor_Scene_0.JPG Train\n",
1198 | "4 4_Indoor_Scene_0.JPG Train\n",
1199 | "... ... ...\n",
1200 | "5111 5111_Outdoor_Scene_49.jpg Train\n",
1201 | "5112 5112_Outdoor_Scene_49.jpg Train\n",
1202 | "5113 5113_Outdoor_Scene_49.jpg Train\n",
1203 | "5114 5114_Outdoor_Scene_49.jpg Train\n",
1204 | "5115 5115_Outdoor_Scene_49.jpg Train\n",
1205 | "\n",
1206 | "[5116 rows x 2 columns]"
1207 | ]
1208 | },
1209 | "execution_count": 9,
1210 | "metadata": {},
1211 | "output_type": "execute_result"
1212 | }
1213 | ],
1214 | "source": [
1215 | "deviceSplitDf"
1216 | ]
1217 | },
1218 | {
1219 | "cell_type": "code",
1220 | "execution_count": 10,
1221 | "id": "8d4c57a5-eab8-44bd-867f-65beb573dc23",
1222 | "metadata": {},
1223 | "outputs": [],
1224 | "source": [
1225 | "trainDf = pd.merge(scoreDf, deviceSplitDf[imageSplitDf['SPLIT']=='Train'], on='IMAGE', how='inner')\n",
1226 | "testDf = pd.merge(scoreDf, deviceSplitDf[imageSplitDf['SPLIT']=='Test'], on='IMAGE', how='inner')\n",
1227 | "\n",
1228 | "trainDf.to_csv(rf'./Test split/Device Split/DeviceSplit_Train_{os.path.basename(scorePath)}', index=False)\n",
1229 | "testDf.to_csv(rf'./Test split/Device Split/DeviceSplit_Test_{os.path.basename(scorePath)}', index=False)"
1230 | ]
1231 | },
1232 | {
1233 | "cell_type": "code",
1234 | "execution_count": 11,
1235 | "id": "5d4a79fb-a312-4eff-a8fb-2f6f51a9d307",
1236 | "metadata": {},
1237 | "outputs": [
1238 | {
1239 | "data": {
1240 | "text/html": [
1241 | "\n",
1242 | "\n",
1255 | "
\n",
1256 | " \n",
1257 | " \n",
1258 | " | \n",
1259 | " IMAGE PATH | \n",
1260 | " JOD | \n",
1261 | " JOD STD | \n",
1262 | " CI LOW | \n",
1263 | " CI HIGH | \n",
1264 | " CI RANGE | \n",
1265 | " QUALITY LEVEL | \n",
1266 | " CLUSTER | \n",
1267 | " TOTAL COMPARISONS | \n",
1268 | " IMAGE | \n",
1269 | " SCENE | \n",
1270 | " ATTRIBUTE | \n",
1271 | " SCENE IDX | \n",
1272 | " CONDITION | \n",
1273 | " SPLIT | \n",
1274 | "
\n",
1275 | " \n",
1276 | " \n",
1277 | " \n",
1278 | " | 0 | \n",
1279 | " Exposure\\56_Indoor_Scene_0.jpg | \n",
1280 | " -2.644800 | \n",
1281 | " 0.784488 | \n",
1282 | " -4.347027 | \n",
1283 | " -1.332417 | \n",
1284 | " 3.014610 | \n",
1285 | " 0 | \n",
1286 | " -1.0 | \n",
1287 | " 109.0 | \n",
1288 | " 56_Indoor_Scene_0.jpg | \n",
1289 | " Indoor_Scene_0 | \n",
1290 | " Exposure | \n",
1291 | " 0 | \n",
1292 | " Indoor | \n",
1293 | " Test | \n",
1294 | "
\n",
1295 | " \n",
1296 | " | 1 | \n",
1297 | " Exposure\\66_Indoor_Scene_0.jpg | \n",
1298 | " -2.083214 | \n",
1299 | " 0.485894 | \n",
1300 | " -2.960870 | \n",
1301 | " -1.144054 | \n",
1302 | " 1.816816 | \n",
1303 | " 0 | \n",
1304 | " 0.0 | \n",
1305 | " 106.0 | \n",
1306 | " 66_Indoor_Scene_0.jpg | \n",
1307 | " Indoor_Scene_0 | \n",
1308 | " Exposure | \n",
1309 | " 0 | \n",
1310 | " Indoor | \n",
1311 | " Test | \n",
1312 | "
\n",
1313 | " \n",
1314 | " | 2 | \n",
1315 | " Exposure\\31_Indoor_Scene_0.tiff | \n",
1316 | " -1.850620 | \n",
1317 | " 0.411542 | \n",
1318 | " -2.626136 | \n",
1319 | " -0.975510 | \n",
1320 | " 1.650626 | \n",
1321 | " 1 | \n",
1322 | " -1.0 | \n",
1323 | " 118.0 | \n",
1324 | " 31_Indoor_Scene_0.tiff | \n",
1325 | " Indoor_Scene_0 | \n",
1326 | " Exposure | \n",
1327 | " 0 | \n",
1328 | " Indoor | \n",
1329 | " Test | \n",
1330 | "
\n",
1331 | " \n",
1332 | " | 3 | \n",
1333 | " Exposure\\33_Indoor_Scene_0.jpg | \n",
1334 | " -1.641410 | \n",
1335 | " 0.518849 | \n",
1336 | " -2.717875 | \n",
1337 | " -0.625668 | \n",
1338 | " 2.092207 | \n",
1339 | " 1 | \n",
1340 | " 1.0 | \n",
1341 | " 120.0 | \n",
1342 | " 33_Indoor_Scene_0.jpg | \n",
1343 | " Indoor_Scene_0 | \n",
1344 | " Exposure | \n",
1345 | " 0 | \n",
1346 | " Indoor | \n",
1347 | " Test | \n",
1348 | "
\n",
1349 | " \n",
1350 | " | 4 | \n",
1351 | " Exposure\\36_Indoor_Scene_0.jpeg | \n",
1352 | " -0.580968 | \n",
1353 | " 0.450048 | \n",
1354 | " -1.539121 | \n",
1355 | " 0.255611 | \n",
1356 | " 1.794732 | \n",
1357 | " 2 | \n",
1358 | " 5.0 | \n",
1359 | " 106.0 | \n",
1360 | " 36_Indoor_Scene_0.jpeg | \n",
1361 | " Indoor_Scene_0 | \n",
1362 | " Exposure | \n",
1363 | " 0 | \n",
1364 | " Indoor | \n",
1365 | " Test | \n",
1366 | "
\n",
1367 | " \n",
1368 | " | ... | \n",
1369 | " ... | \n",
1370 | " ... | \n",
1371 | " ... | \n",
1372 | " ... | \n",
1373 | " ... | \n",
1374 | " ... | \n",
1375 | " ... | \n",
1376 | " ... | \n",
1377 | " ... | \n",
1378 | " ... | \n",
1379 | " ... | \n",
1380 | " ... | \n",
1381 | " ... | \n",
1382 | " ... | \n",
1383 | " ... | \n",
1384 | "
\n",
1385 | " \n",
1386 | " | 1507 | \n",
1387 | " Exposure\\5100_Outdoor_Scene_49.jpg | \n",
1388 | " 1.661656 | \n",
1389 | " 0.387337 | \n",
1390 | " 0.954093 | \n",
1391 | " 2.548217 | \n",
1392 | " 1.594124 | \n",
1393 | " 4 | \n",
1394 | " -1.0 | \n",
1395 | " 109.0 | \n",
1396 | " 5100_Outdoor_Scene_49.jpg | \n",
1397 | " Outdoor_Scene_49 | \n",
1398 | " Exposure | \n",
1399 | " 49 | \n",
1400 | " Outdoor | \n",
1401 | " Test | \n",
1402 | "
\n",
1403 | " \n",
1404 | " | 1508 | \n",
1405 | " Exposure\\5069_Outdoor_Scene_49.jpg | \n",
1406 | " 2.000229 | \n",
1407 | " 0.402352 | \n",
1408 | " 1.194427 | \n",
1409 | " 2.729801 | \n",
1410 | " 1.535374 | \n",
1411 | " 4 | \n",
1412 | " -1.0 | \n",
1413 | " 109.0 | \n",
1414 | " 5069_Outdoor_Scene_49.jpg | \n",
1415 | " Outdoor_Scene_49 | \n",
1416 | " Exposure | \n",
1417 | " 49 | \n",
1418 | " Outdoor | \n",
1419 | " Test | \n",
1420 | "
\n",
1421 | " \n",
1422 | " | 1509 | \n",
1423 | " Exposure\\5066_Outdoor_Scene_49.jpg | \n",
1424 | " 2.918451 | \n",
1425 | " 0.343595 | \n",
1426 | " 2.321701 | \n",
1427 | " 3.608192 | \n",
1428 | " 1.286491 | \n",
1429 | " 5 | \n",
1430 | " 20.0 | \n",
1431 | " 107.0 | \n",
1432 | " 5066_Outdoor_Scene_49.jpg | \n",
1433 | " Outdoor_Scene_49 | \n",
1434 | " Exposure | \n",
1435 | " 49 | \n",
1436 | " Outdoor | \n",
1437 | " Test | \n",
1438 | "
\n",
1439 | " \n",
1440 | " | 1510 | \n",
1441 | " Exposure\\4999_Outdoor_Scene_49.jpg | \n",
1442 | " 3.813626 | \n",
1443 | " 0.535796 | \n",
1444 | " 2.532832 | \n",
1445 | " 4.691582 | \n",
1446 | " 2.158750 | \n",
1447 | " 5 | \n",
1448 | " -1.0 | \n",
1449 | " 105.0 | \n",
1450 | " 4999_Outdoor_Scene_49.jpg | \n",
1451 | " Outdoor_Scene_49 | \n",
1452 | " Exposure | \n",
1453 | " 49 | \n",
1454 | " Outdoor | \n",
1455 | " Test | \n",
1456 | "
\n",
1457 | " \n",
1458 | " | 1511 | \n",
1459 | " Exposure\\5004_Outdoor_Scene_49.jpg | \n",
1460 | " 4.129699 | \n",
1461 | " 1.994617 | \n",
1462 | " 0.030111 | \n",
1463 | " 6.127363 | \n",
1464 | " 6.097252 | \n",
1465 | " 5 | \n",
1466 | " -1.0 | \n",
1467 | " 87.0 | \n",
1468 | " 5004_Outdoor_Scene_49.jpg | \n",
1469 | " Outdoor_Scene_49 | \n",
1470 | " Exposure | \n",
1471 | " 49 | \n",
1472 | " Outdoor | \n",
1473 | " Test | \n",
1474 | "
\n",
1475 | " \n",
1476 | "
\n",
1477 | "
1512 rows × 15 columns
\n",
1478 | "
"
1479 | ],
1480 | "text/plain": [
1481 | " IMAGE PATH JOD JOD STD CI LOW \\\n",
1482 | "0 Exposure\\56_Indoor_Scene_0.jpg -2.644800 0.784488 -4.347027 \n",
1483 | "1 Exposure\\66_Indoor_Scene_0.jpg -2.083214 0.485894 -2.960870 \n",
1484 | "2 Exposure\\31_Indoor_Scene_0.tiff -1.850620 0.411542 -2.626136 \n",
1485 | "3 Exposure\\33_Indoor_Scene_0.jpg -1.641410 0.518849 -2.717875 \n",
1486 | "4 Exposure\\36_Indoor_Scene_0.jpeg -0.580968 0.450048 -1.539121 \n",
1487 | "... ... ... ... ... \n",
1488 | "1507 Exposure\\5100_Outdoor_Scene_49.jpg 1.661656 0.387337 0.954093 \n",
1489 | "1508 Exposure\\5069_Outdoor_Scene_49.jpg 2.000229 0.402352 1.194427 \n",
1490 | "1509 Exposure\\5066_Outdoor_Scene_49.jpg 2.918451 0.343595 2.321701 \n",
1491 | "1510 Exposure\\4999_Outdoor_Scene_49.jpg 3.813626 0.535796 2.532832 \n",
1492 | "1511 Exposure\\5004_Outdoor_Scene_49.jpg 4.129699 1.994617 0.030111 \n",
1493 | "\n",
1494 | " CI HIGH CI RANGE QUALITY LEVEL CLUSTER TOTAL COMPARISONS \\\n",
1495 | "0 -1.332417 3.014610 0 -1.0 109.0 \n",
1496 | "1 -1.144054 1.816816 0 0.0 106.0 \n",
1497 | "2 -0.975510 1.650626 1 -1.0 118.0 \n",
1498 | "3 -0.625668 2.092207 1 1.0 120.0 \n",
1499 | "4 0.255611 1.794732 2 5.0 106.0 \n",
1500 | "... ... ... ... ... ... \n",
1501 | "1507 2.548217 1.594124 4 -1.0 109.0 \n",
1502 | "1508 2.729801 1.535374 4 -1.0 109.0 \n",
1503 | "1509 3.608192 1.286491 5 20.0 107.0 \n",
1504 | "1510 4.691582 2.158750 5 -1.0 105.0 \n",
1505 | "1511 6.127363 6.097252 5 -1.0 87.0 \n",
1506 | "\n",
1507 | " IMAGE SCENE ATTRIBUTE SCENE IDX \\\n",
1508 | "0 56_Indoor_Scene_0.jpg Indoor_Scene_0 Exposure 0 \n",
1509 | "1 66_Indoor_Scene_0.jpg Indoor_Scene_0 Exposure 0 \n",
1510 | "2 31_Indoor_Scene_0.tiff Indoor_Scene_0 Exposure 0 \n",
1511 | "3 33_Indoor_Scene_0.jpg Indoor_Scene_0 Exposure 0 \n",
1512 | "4 36_Indoor_Scene_0.jpeg Indoor_Scene_0 Exposure 0 \n",
1513 | "... ... ... ... ... \n",
1514 | "1507 5100_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n",
1515 | "1508 5069_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n",
1516 | "1509 5066_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n",
1517 | "1510 4999_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n",
1518 | "1511 5004_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n",
1519 | "\n",
1520 | " CONDITION SPLIT \n",
1521 | "0 Indoor Test \n",
1522 | "1 Indoor Test \n",
1523 | "2 Indoor Test \n",
1524 | "3 Indoor Test \n",
1525 | "4 Indoor Test \n",
1526 | "... ... ... \n",
1527 | "1507 Outdoor Test \n",
1528 | "1508 Outdoor Test \n",
1529 | "1509 Outdoor Test \n",
1530 | "1510 Outdoor Test \n",
1531 | "1511 Outdoor Test \n",
1532 | "\n",
1533 | "[1512 rows x 15 columns]"
1534 | ]
1535 | },
1536 | "execution_count": 11,
1537 | "metadata": {},
1538 | "output_type": "execute_result"
1539 | }
1540 | ],
1541 | "source": [
1542 | "testDf"
1543 | ]
1544 | },
1545 | {
1546 | "cell_type": "code",
1547 | "execution_count": null,
1548 | "id": "aa0804cd-feec-49e8-a949-fa776389b8d4",
1549 | "metadata": {},
1550 | "outputs": [],
1551 | "source": []
1552 | }
1553 | ],
1554 | "metadata": {
1555 | "kernelspec": {
1556 | "display_name": "Python 3",
1557 | "language": "python",
1558 | "name": "python3"
1559 | },
1560 | "language_info": {
1561 | "codemirror_mode": {
1562 | "name": "ipython",
1563 | "version": 3
1564 | },
1565 | "file_extension": ".py",
1566 | "mimetype": "text/x-python",
1567 | "name": "python",
1568 | "nbconvert_exporter": "python",
1569 | "pygments_lexer": "ipython3",
1570 | "version": "3.8.8"
1571 | }
1572 | },
1573 | "nbformat": 4,
1574 | "nbformat_minor": 5
1575 | }
1576 |
--------------------------------------------------------------------------------