├── src ├── __init__.py └── models │ ├── __init__.py │ └── archs │ ├── __init__.py │ ├── arch_util.py │ ├── sem_hyperiqa_arch.py │ └── sem_hyperiqa_util.py ├── NTIRE24 └── Submission Kit │ ├── logs │ └── log_23_PIQ.log │ ├── weights │ └── weights_23_PIQ.pth │ ├── assets │ └── assets_23_PIQ │ │ ├── __init__.py │ │ └── asset.py │ ├── images.csv │ ├── results │ └── result_23_PIQ.csv │ ├── images │ └── 0 │ │ └── image1.jpg │ ├── submission_23_PIQ.zip │ ├── models │ └── model_23_PIQ.py │ └── Instructions.ipynb ├── Imgs ├── pdf.avif ├── poster.png ├── Thumbnail.png ├── download.png ├── youtube.avif ├── NTIRE2020_logo.png ├── CVPR_Poster_PIQ23.png └── CVRP Logo_2023 Vancouvar_Color.png ├── Test split ├── Scene Split │ └── Scene examples │ │ ├── Test │ │ ├── Indoor_Scene_5_nbImages_50_11153.jpg │ │ ├── Lowlight_Scene_20_nbImages_125_0.jpg │ │ ├── Night_Scene_28_nbImages_112_1032.jpg │ │ ├── Night_Scene_30_nbImages_115_100.jpg │ │ ├── Night_Scene_31_nbImages_113_1019.jpg │ │ ├── Indoor_Scene_10_nbImages_126_1069.JPG │ │ ├── Indoor_Scene_11_nbImages_95_10172.jpg │ │ ├── Indoor_Scene_1_nbImages_124_10371.jpg │ │ ├── Outdoor_Scene_34_nbImages_46_11074.jpg │ │ ├── Outdoor_Scene_43_nbImages_125_1009.jpg │ │ ├── Outdoor_Scene_45_nbImages_79_10621.JPG │ │ ├── Outdoor_Scene_46_nbImages_75_10487.jpg │ │ ├── Lowlight_Scene_16_nbImages_88_10087.jpg │ │ ├── Lowlight_Scene_17_nbImages_102_1004.JPG │ │ └── Outdoor_Scene_33_nbImages_120_27199.jpeg │ │ └── Train │ │ ├── Night_Scene_27_nbImages_115_101.jpg │ │ ├── Outdoor_Scene_42_nbImages_125_1.jpg │ │ ├── Indoor_Scene_0_nbImages_73_10370.jpg │ │ ├── Indoor_Scene_13_nbImages_91_10209.jpg │ │ ├── Indoor_Scene_2_nbImages_75_10279.jpg │ │ ├── Indoor_Scene_3_nbImages_90_11117.jpg │ │ ├── Indoor_Scene_4_nbImages_92_10162.jpg │ │ ├── Indoor_Scene_6_nbImages_126_1005.jpg │ │ ├── Indoor_Scene_7_nbImages_111_1000.jpg │ │ ├── Indoor_Scene_8_nbImages_126_1021.jpg │ │ ├── Indoor_Scene_9_nbImages_125_1011.jpg │ │ ├── Night_Scene_22_nbImages_116_1001.jpg │ │ ├── Night_Scene_23_nbImages_117_1012.JPG │ │ ├── Night_Scene_24_nbImages_120_1043.jpg │ │ ├── Night_Scene_25_nbImages_116_1036.jpg │ │ ├── Night_Scene_26_nbImages_116_1020.jpg │ │ ├── Night_Scene_29_nbImages_113_1058.jpg │ │ ├── Night_Scene_32_nbImages_101_10342.jpg │ │ ├── Outdoor_Scene_44_nbImages_125_10.jpg │ │ ├── Indoor_Scene_12_nbImages_104_10157.jpg │ │ ├── Lowlight_Scene_14_nbImages_97_10240.jpg │ │ ├── Lowlight_Scene_15_nbImages_71_10515.jpg │ │ ├── Lowlight_Scene_18_nbImages_125_1017.jpg │ │ ├── Lowlight_Scene_19_nbImages_125_1010.jpg │ │ ├── Lowlight_Scene_21_nbImages_125_1008.jpg │ │ ├── Outdoor_Scene_35_nbImages_50_11101.jpg │ │ ├── Outdoor_Scene_36_nbImages_74_10445.jpg │ │ ├── Outdoor_Scene_37_nbImages_96_10048.jpg │ │ ├── Outdoor_Scene_38_nbImages_79_10579.jpg │ │ ├── Outdoor_Scene_39_nbImages_93_10005.jpg │ │ ├── Outdoor_Scene_40_nbImages_125_1051.jpg │ │ ├── Outdoor_Scene_41_nbImages_125_1031.jpg │ │ ├── Outdoor_Scene_47_nbImages_90_11096.jpg │ │ ├── Outdoor_Scene_48_nbImages_75_10009.jpg │ │ └── Outdoor_Scene_49_nbImages_125_27205.JPG └── Scene Split.csv ├── README.md └── Test split example.ipynb /src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/models/archs/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /NTIRE24/Submission Kit/logs/log_23_PIQ.log: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /NTIRE24/Submission Kit/weights/weights_23_PIQ.pth: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /NTIRE24/Submission Kit/assets/assets_23_PIQ/__init__.py: -------------------------------------------------------------------------------- 1 | from .asset import * -------------------------------------------------------------------------------- /Imgs/pdf.avif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Imgs/pdf.avif -------------------------------------------------------------------------------- /Imgs/poster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Imgs/poster.png -------------------------------------------------------------------------------- /Imgs/Thumbnail.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Imgs/Thumbnail.png -------------------------------------------------------------------------------- /Imgs/download.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Imgs/download.png -------------------------------------------------------------------------------- /Imgs/youtube.avif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Imgs/youtube.avif -------------------------------------------------------------------------------- /NTIRE24/Submission Kit/images.csv: -------------------------------------------------------------------------------- 1 | IMAGE,CLASS,CONDITION 2 | images/0/image1.jpg,0,INDOOR 3 | -------------------------------------------------------------------------------- /Imgs/NTIRE2020_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Imgs/NTIRE2020_logo.png -------------------------------------------------------------------------------- /Imgs/CVPR_Poster_PIQ23.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Imgs/CVPR_Poster_PIQ23.png -------------------------------------------------------------------------------- /NTIRE24/Submission Kit/assets/assets_23_PIQ/asset.py: -------------------------------------------------------------------------------- 1 | def print_hello(): 2 | print('Hello, I am an asset.') -------------------------------------------------------------------------------- /NTIRE24/Submission Kit/results/result_23_PIQ.csv: -------------------------------------------------------------------------------- 1 | IMAGE,CLASS,CONDITION,SCORE 2 | images/0/image1.jpg,0,INDOOR,0.0 3 | -------------------------------------------------------------------------------- /Imgs/CVRP Logo_2023 Vancouvar_Color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Imgs/CVRP Logo_2023 Vancouvar_Color.png -------------------------------------------------------------------------------- /NTIRE24/Submission Kit/images/0/image1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/NTIRE24/Submission Kit/images/0/image1.jpg -------------------------------------------------------------------------------- /NTIRE24/Submission Kit/submission_23_PIQ.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/NTIRE24/Submission Kit/submission_23_PIQ.zip -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Test/Indoor_Scene_5_nbImages_50_11153.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Indoor_Scene_5_nbImages_50_11153.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Test/Lowlight_Scene_20_nbImages_125_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Lowlight_Scene_20_nbImages_125_0.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Test/Night_Scene_28_nbImages_112_1032.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Night_Scene_28_nbImages_112_1032.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Test/Night_Scene_30_nbImages_115_100.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Night_Scene_30_nbImages_115_100.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Test/Night_Scene_31_nbImages_113_1019.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Night_Scene_31_nbImages_113_1019.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Night_Scene_27_nbImages_115_101.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Night_Scene_27_nbImages_115_101.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Outdoor_Scene_42_nbImages_125_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_42_nbImages_125_1.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Test/Indoor_Scene_10_nbImages_126_1069.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Indoor_Scene_10_nbImages_126_1069.JPG -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Test/Indoor_Scene_11_nbImages_95_10172.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Indoor_Scene_11_nbImages_95_10172.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Test/Indoor_Scene_1_nbImages_124_10371.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Indoor_Scene_1_nbImages_124_10371.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Test/Outdoor_Scene_34_nbImages_46_11074.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Outdoor_Scene_34_nbImages_46_11074.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Test/Outdoor_Scene_43_nbImages_125_1009.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Outdoor_Scene_43_nbImages_125_1009.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Test/Outdoor_Scene_45_nbImages_79_10621.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Outdoor_Scene_45_nbImages_79_10621.JPG -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Test/Outdoor_Scene_46_nbImages_75_10487.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Outdoor_Scene_46_nbImages_75_10487.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Indoor_Scene_0_nbImages_73_10370.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_0_nbImages_73_10370.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Indoor_Scene_13_nbImages_91_10209.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_13_nbImages_91_10209.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Indoor_Scene_2_nbImages_75_10279.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_2_nbImages_75_10279.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Indoor_Scene_3_nbImages_90_11117.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_3_nbImages_90_11117.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Indoor_Scene_4_nbImages_92_10162.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_4_nbImages_92_10162.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Indoor_Scene_6_nbImages_126_1005.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_6_nbImages_126_1005.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Indoor_Scene_7_nbImages_111_1000.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_7_nbImages_111_1000.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Indoor_Scene_8_nbImages_126_1021.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_8_nbImages_126_1021.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Indoor_Scene_9_nbImages_125_1011.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_9_nbImages_125_1011.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Night_Scene_22_nbImages_116_1001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Night_Scene_22_nbImages_116_1001.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Night_Scene_23_nbImages_117_1012.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Night_Scene_23_nbImages_117_1012.JPG -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Night_Scene_24_nbImages_120_1043.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Night_Scene_24_nbImages_120_1043.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Night_Scene_25_nbImages_116_1036.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Night_Scene_25_nbImages_116_1036.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Night_Scene_26_nbImages_116_1020.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Night_Scene_26_nbImages_116_1020.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Night_Scene_29_nbImages_113_1058.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Night_Scene_29_nbImages_113_1058.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Night_Scene_32_nbImages_101_10342.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Night_Scene_32_nbImages_101_10342.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Outdoor_Scene_44_nbImages_125_10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_44_nbImages_125_10.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Test/Lowlight_Scene_16_nbImages_88_10087.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Lowlight_Scene_16_nbImages_88_10087.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Test/Lowlight_Scene_17_nbImages_102_1004.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Lowlight_Scene_17_nbImages_102_1004.JPG -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Test/Outdoor_Scene_33_nbImages_120_27199.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Test/Outdoor_Scene_33_nbImages_120_27199.jpeg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Indoor_Scene_12_nbImages_104_10157.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Indoor_Scene_12_nbImages_104_10157.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Lowlight_Scene_14_nbImages_97_10240.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Lowlight_Scene_14_nbImages_97_10240.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Lowlight_Scene_15_nbImages_71_10515.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Lowlight_Scene_15_nbImages_71_10515.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Lowlight_Scene_18_nbImages_125_1017.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Lowlight_Scene_18_nbImages_125_1017.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Lowlight_Scene_19_nbImages_125_1010.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Lowlight_Scene_19_nbImages_125_1010.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Lowlight_Scene_21_nbImages_125_1008.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Lowlight_Scene_21_nbImages_125_1008.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Outdoor_Scene_35_nbImages_50_11101.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_35_nbImages_50_11101.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Outdoor_Scene_36_nbImages_74_10445.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_36_nbImages_74_10445.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Outdoor_Scene_37_nbImages_96_10048.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_37_nbImages_96_10048.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Outdoor_Scene_38_nbImages_79_10579.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_38_nbImages_79_10579.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Outdoor_Scene_39_nbImages_93_10005.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_39_nbImages_93_10005.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Outdoor_Scene_40_nbImages_125_1051.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_40_nbImages_125_1051.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Outdoor_Scene_41_nbImages_125_1031.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_41_nbImages_125_1031.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Outdoor_Scene_47_nbImages_90_11096.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_47_nbImages_90_11096.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Outdoor_Scene_48_nbImages_75_10009.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_48_nbImages_75_10009.jpg -------------------------------------------------------------------------------- /Test split/Scene Split/Scene examples/Train/Outdoor_Scene_49_nbImages_125_27205.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DXOMARK-Research/PIQ2023/HEAD/Test split/Scene Split/Scene examples/Train/Outdoor_Scene_49_nbImages_125_27205.JPG -------------------------------------------------------------------------------- /NTIRE24/Submission Kit/models/model_23_PIQ.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import pandas as pd 4 | 5 | sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir)) 6 | 7 | import assets.assets_23_PIQ as my_asset 8 | 9 | def main(): 10 | 11 | my_asset.print_hello() 12 | 13 | # Load the images.csv file 14 | images_df = pd.read_csv(r'./images.csv') 15 | 16 | # Add a 'SCORE' column with a default value of 0.0 17 | images_df['SCORE'] = 0.0 18 | 19 | # Save the modified DataFrame to result_23_PIQ.csv in the results folder 20 | images_df.to_csv('./results/result_23_PIQ.csv', index=False, sep=',') 21 | 22 | if __name__ == "__main__": 23 | main() -------------------------------------------------------------------------------- /Test split/Scene Split.csv: -------------------------------------------------------------------------------- 1 | SCENE,SPLIT,NB IMAGES 2 | Indoor_Scene_0,Train,72 3 | Indoor_Scene_12,Train,103 4 | Indoor_Scene_13,Train,90 5 | Indoor_Scene_2,Train,74 6 | Indoor_Scene_3,Train,89 7 | Indoor_Scene_4,Train,91 8 | Indoor_Scene_6,Train,125 9 | Indoor_Scene_7,Train,110 10 | Indoor_Scene_8,Train,125 11 | Indoor_Scene_9,Train,125 12 | Lowlight_Scene_14,Train,96 13 | Lowlight_Scene_15,Train,70 14 | Lowlight_Scene_18,Train,125 15 | Lowlight_Scene_19,Train,125 16 | Lowlight_Scene_21,Train,125 17 | Night_Scene_22,Train,116 18 | Night_Scene_23,Train,117 19 | Night_Scene_24,Train,119 20 | Night_Scene_25,Train,116 21 | Night_Scene_26,Train,115 22 | Night_Scene_27,Train,115 23 | Night_Scene_29,Train,112 24 | Night_Scene_32,Train,100 25 | Outdoor_Scene_35,Train,49 26 | Outdoor_Scene_36,Train,73 27 | Outdoor_Scene_37,Train,95 28 | Outdoor_Scene_38,Train,78 29 | Outdoor_Scene_39,Train,92 30 | Outdoor_Scene_40,Train,125 31 | Outdoor_Scene_41,Train,125 32 | Outdoor_Scene_42,Train,125 33 | Outdoor_Scene_44,Train,125 34 | Outdoor_Scene_47,Train,89 35 | Outdoor_Scene_48,Train,74 36 | Outdoor_Scene_49,Train,125 37 | Indoor_Scene_10,Test,125 38 | Indoor_Scene_11,Test,94 39 | Indoor_Scene_1,Test,124 40 | Indoor_Scene_5,Test,50 41 | Lowlight_Scene_16,Test,87 42 | Lowlight_Scene_17,Test,102 43 | Lowlight_Scene_20,Test,125 44 | Night_Scene_28,Test,112 45 | Night_Scene_30,Test,114 46 | Night_Scene_31,Test,112 47 | Outdoor_Scene_33,Test,119 48 | Outdoor_Scene_34,Test,45 49 | Outdoor_Scene_43,Test,125 50 | Outdoor_Scene_45,Test,78 51 | Outdoor_Scene_46,Test,74 52 | -------------------------------------------------------------------------------- /src/models/archs/arch_util.py: -------------------------------------------------------------------------------- 1 | import math 2 | import collections.abc 3 | import numpy as np 4 | import torch 5 | import os 6 | from typing import Tuple 7 | from urllib.parse import urlparse 8 | from itertools import repeat 9 | from torch import nn as nn 10 | from torch.nn import functional as F 11 | from torch.nn import init as init 12 | from torch.nn.modules.batchnorm import _BatchNorm 13 | from torch.hub import download_url_to_file, get_dir 14 | 15 | 16 | # -------------------------------------------- 17 | # IQA utils 18 | # Code taken from: https://github.com/chaofengc/IQA-PyTorch 19 | # -------------------------------------------- 20 | 21 | def load_file_from_url(url, model_dir=None, progress=True, file_name=None): 22 | """Load file form http url, will download models if necessary. 23 | 24 | Ref: https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py 25 | 26 | Args: 27 | url (str): URL to be downloaded. 28 | model_dir (str): The path to save the downloaded model. Should be a full path. If None, use pytorch hub_dir. 29 | Default: None. 30 | progress (bool): Whether to show the download progress. Default: True. 31 | file_name (str): The downloaded file name. If None, use the file name in the url. Default: None. 32 | 33 | Returns: 34 | str: The path to the downloaded file. 35 | """ 36 | if model_dir is None: # use the pytorch hub_dir 37 | hub_dir = get_dir() 38 | model_dir = os.path.join(hub_dir, 'checkpoints') 39 | 40 | os.makedirs(model_dir, exist_ok=True) 41 | 42 | parts = urlparse(url) 43 | filename = os.path.basename(parts.path) 44 | if file_name is not None: 45 | filename = file_name 46 | cached_file = os.path.abspath(os.path.join(model_dir, filename)) 47 | if not os.path.exists(cached_file): 48 | print(f'Downloading: "{url}" to {cached_file}\n') 49 | download_url_to_file(url, cached_file, hash_prefix=None, progress=progress) 50 | return cached_file 51 | 52 | def dist_to_mos(dist_score: torch.Tensor) -> torch.Tensor: 53 | """Convert distribution prediction to mos score. 54 | For datasets with detailed score labels, such as AVA 55 | 56 | Args: 57 | dist_score (tensor): (*, C), C is the class number 58 | 59 | Output: 60 | mos_score (tensor): (*, 1) 61 | """ 62 | num_classes = dist_score.shape[-1] 63 | mos_score = dist_score * torch.arange(1, num_classes + 1).to(dist_score) 64 | mos_score = mos_score.sum(dim=-1, keepdim=True) 65 | return mos_score 66 | 67 | 68 | # -------------------------------------------- 69 | # Common utils 70 | # -------------------------------------------- 71 | 72 | 73 | def load_pretrained_network(net, model_path, strict=True, weight_keys=None): 74 | if model_path.startswith('https://') or model_path.startswith('http://'): 75 | model_path = load_file_from_url(model_path) 76 | print(f'Loading pretrained model {net.__class__.__name__} from {model_path}') 77 | state_dict = torch.load(model_path, map_location=torch.device('cpu')) 78 | if weight_keys: 79 | state_dict = state_dict[weight_keys] 80 | net.load_state_dict(state_dict, strict=strict) 81 | 82 | 83 | def _ntuple(n): 84 | 85 | def parse(x): 86 | if isinstance(x, collections.abc.Iterable): 87 | return x 88 | return tuple(repeat(x, n)) 89 | 90 | return parse 91 | 92 | 93 | to_1tuple = _ntuple(1) 94 | to_2tuple = _ntuple(2) 95 | to_3tuple = _ntuple(3) 96 | to_4tuple = _ntuple(4) 97 | to_ntuple = _ntuple 98 | 99 | 100 | @torch.no_grad() 101 | def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs): 102 | r"""Initialize network weights. 103 | 104 | Args: 105 | module_list (list[nn.Module] | nn.Module): Modules to be initialized. 106 | scale (float): Scale initialized weights, especially for residual 107 | blocks. Default: 1. 108 | bias_fill (float): The value to fill bias. Default: 0. 109 | kwargs (dict): Other arguments for initialization function. 110 | 111 | """ 112 | if not isinstance(module_list, list): 113 | module_list = [module_list] 114 | for module in module_list: 115 | for m in module.modules(): 116 | if isinstance(m, nn.Conv2d): 117 | init.kaiming_normal_(m.weight, **kwargs) 118 | m.weight.data *= scale 119 | if m.bias is not None: 120 | m.bias.data.fill_(bias_fill) 121 | elif isinstance(m, nn.Linear): 122 | init.kaiming_normal_(m.weight, **kwargs) 123 | m.weight.data *= scale 124 | if m.bias is not None: 125 | m.bias.data.fill_(bias_fill) 126 | elif isinstance(m, _BatchNorm): 127 | init.constant_(m.weight, 1) 128 | if m.bias is not None: 129 | m.bias.data.fill_(bias_fill) 130 | 131 | 132 | def symm_pad(im: torch.Tensor, padding: Tuple[int, int, int, int]): 133 | """Symmetric padding same as tensorflow. 134 | Ref: https://discuss.pytorch.org/t/symmetric-padding/19866/3 135 | """ 136 | h, w = im.shape[-2:] 137 | left, right, top, bottom = padding 138 | 139 | x_idx = np.arange(-left, w+right) 140 | y_idx = np.arange(-top, h+bottom) 141 | 142 | def reflect(x, minx, maxx): 143 | """ Reflects an array around two points making a triangular waveform that ramps up 144 | and down, allowing for pad lengths greater than the input length """ 145 | rng = maxx - minx 146 | double_rng = 2*rng 147 | mod = np.fmod(x - minx, double_rng) 148 | normed_mod = np.where(mod < 0, mod+double_rng, mod) 149 | out = np.where(normed_mod >= rng, double_rng - normed_mod, normed_mod) + minx 150 | return np.array(out, dtype=x.dtype) 151 | 152 | x_pad = reflect(x_idx, -0.5, w-0.5) 153 | y_pad = reflect(y_idx, -0.5, h-0.5) 154 | xx, yy = np.meshgrid(x_pad, y_pad) 155 | return im[..., yy, xx] 156 | 157 | 158 | def exact_padding_2d(x, kernel, stride=1, dilation=1, mode='same'): 159 | assert len(x.shape) == 4, f'Only support 4D tensor input, but got {x.shape}' 160 | kernel = to_2tuple(kernel) 161 | stride = to_2tuple(stride) 162 | dilation = to_2tuple(dilation) 163 | b, c, h, w = x.shape 164 | h2 = math.ceil(h / stride[0]) 165 | w2 = math.ceil(w / stride[1]) 166 | pad_row = (h2 - 1) * stride[0] + (kernel[0] - 1) * dilation[0] + 1 - h 167 | pad_col = (w2 - 1) * stride[1] + (kernel[1] - 1) * dilation[1] + 1 - w 168 | pad_l, pad_r, pad_t, pad_b = (pad_col // 2, pad_col - pad_col // 2, pad_row // 2, pad_row - pad_row // 2) 169 | 170 | mode = mode if mode != 'same' else 'constant' 171 | if mode != 'symmetric': 172 | x = F.pad(x, (pad_l, pad_r, pad_t, pad_b), mode=mode) 173 | elif mode == 'symmetric': 174 | x = symm_pad(x, (pad_l, pad_r, pad_t, pad_b)) 175 | 176 | return x 177 | 178 | 179 | class ExactPadding2d(nn.Module): 180 | r"""This function calculate exact padding values for 4D tensor inputs, 181 | and support the same padding mode as tensorflow. 182 | 183 | Args: 184 | kernel (int or tuple): kernel size. 185 | stride (int or tuple): stride size. 186 | dilation (int or tuple): dilation size, default with 1. 187 | mode (srt): padding mode can be ('same', 'symmetric', 'replicate', 'circular') 188 | 189 | """ 190 | 191 | def __init__(self, kernel, stride=1, dilation=1, mode='same'): 192 | super().__init__() 193 | self.kernel = to_2tuple(kernel) 194 | self.stride = to_2tuple(stride) 195 | self.dilation = to_2tuple(dilation) 196 | self.mode = mode 197 | 198 | def forward(self, x): 199 | return exact_padding_2d(x, self.kernel, self.stride, self.dilation, self.mode) 200 | -------------------------------------------------------------------------------- /NTIRE24/Submission Kit/Instructions.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "6b1557dc-dfe3-4157-8aa5-f6e3cee6714e", 6 | "metadata": {}, 7 | "source": [ 8 | "# NTIRE 2024 - Portrait Quality Assessment Challenge ft. DXOMARK\n" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "5dc9c177-e6f5-4bc1-84f1-0803b294608b", 14 | "metadata": {}, 15 | "source": [ 16 | "# -- INSTRUCTIONS --\n", 17 | "# **[READ CAREFULLY]**\n", 18 | " " 19 | ] 20 | }, 21 | { 22 | "cell_type": "markdown", 23 | "id": "09a1d630-73c8-4231-a1bc-0caa04e8ce3f", 24 | "metadata": { 25 | "heading_collapsed": "true", 26 | "tags": [] 27 | }, 28 | "source": [ 29 | "# 1 - DESCRIPTION\n", 30 | "## What is this?\n", 31 | "This is a simple instruction notebook to help you submit your model.\n", 32 | "\n", 33 | "## Links\n", 34 | "- **NTIRE 24 challenge**: https://codalab.lisn.upsaclay.fr/competitions/17311#participate\n", 35 | "- **PIQ23 github**: https://github.com/DXOMARK-Research/PIQ2023\n", 36 | "- **PIQ23 Download link**: https://corp.dxomark.com/data-base-piq23/\n", 37 | "\n", 38 | "## Test set\n", 39 | "- The evaluation process consists of testing your model on an internat portrait dataset of around ***200 scenes of 7 images each, each shot with a different device of close quality***.\n", 40 | "- Images are either jpeg or TIFF with extensions ***'.jpg' or '.tiff'***.\n", 41 | "- Images are either landscape or portrait with a size of: ***1280x960*** or ***960x1280***.\n", 42 | "- Lighting conditions are: ***Indoor, Outdoor, Lowlight and Night***.\n", 43 | "- Different skintones, genders and ages are used. Prepare for something slightly different than PIQ23.\n", 44 | "- Do not excpect the same people to be present in the internal dataset.\n", 45 | "- The test csv will include ***image names and the categories of each class alongside the lighting conditions***. *Please refer to the images.csv*\n", 46 | "\n", 47 | "## Hardware requirements\n", 48 | "- You are free to do inference on one or multiple images.\n", 49 | "- Please make sure that your model is able to run on a ***single 8GB VRAM GPU***.\n", 50 | "- Maximum Teraflops: ***5TFLOPS*** *(Refer below to calculate teraflops on your model)*.\n", 51 | "- Maximum inference time: ***5 seconds/image***. *Model Loading does not count*.\n", 52 | "- Maximum model.pth size: ***2GB***.\n", 53 | "- Maximum RAM: **16GB**.\n", 54 | "- **NOTE: If your model comply with the 16GB RAM and 5s/image on cpu, you don't need to use GPU**\n", 55 | "\n", 56 | "## Submission\n", 57 | "- You need to submit a zip file with the following naming: ***submission_[LAST_NAME]_[FIRST_NAME].zip*** (refer to *./submission_23_PIQ.zip* for an example), including:\n", 58 | " - A main script with the following naming: ***model_[LAST_NAME]_[FIRST_NAME].py***;\n", 59 | " - Model weights with the following naming: ***weights_[LAST_NAME]_[FIRST_NAME].(pth,ckpt)***;\n", 60 | " - Scripts asset folder with the following naming: ***assets_[LAST_NAME]_[FIRST_NAME]*** including all of your scripts.\n", 61 | "- Your main script will be put in the ***./models***.\n", 62 | "- Your model weights will be saved in ***./weights***.\n", 63 | "- Your assets will be saved in ***./assets***.\n", 64 | "- You will get access to one level above the models/ folder.\n", 65 | "- Save your logs in ***./logs/log_[LAST_NAME]_[FIRST_NAME].log***.\n", 66 | "- Your script needs to load the ***./images.csv***.\n", 67 | "- Images paths are structured as follows: ***images/[class]/[imagename.(jpg,tiff)]***.\n", 68 | "- You need to save your results as follows: ***./results/result_[LAST_NAME]_[FIRST_NAME].csv***.\n", 69 | "- You need to add a ***column 'SCORE' to the images.csv***. *KEEP ALL OTHER METADATA*.\n", 70 | "- You can use a ***comma or semi-colon separator for the results***. Any other separator will not be considered.\n", 71 | "- Refer to *./models/model_23_PIQ.py* for an example.\n", 72 | "- Please follow the naming and the structure of the zip file carefully since the extraction of the zip data is automatic." 73 | ] 74 | }, 75 | { 76 | "cell_type": "markdown", 77 | "id": "f95a456b-49c9-448c-9583-92918450e0a4", 78 | "metadata": { 79 | "tags": [] 80 | }, 81 | "source": [ 82 | "# 2 - ASSETS" 83 | ] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "id": "1fac33bf-779f-4c11-a891-430a32129e3c", 88 | "metadata": { 89 | "tags": [] 90 | }, 91 | "source": [ 92 | "## CONSTANTS" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": 2, 98 | "id": "f0ed13e7-49cf-42e8-b20c-86298665eeab", 99 | "metadata": {}, 100 | "outputs": [], 101 | "source": [ 102 | "EXTENSIONS = ('.tiff', '.tif', '.TIFF', '.TIF', '.jpg', '.JPG', '.jpeg')\n", 103 | "CONDITIONS = ('OUTDOOR', 'INDOOR', 'LOWLIGHT', 'NIGHT')\n", 104 | "IMAGE_SIZE = ((1280, 960), (960, 1280))" 105 | ] 106 | }, 107 | { 108 | "cell_type": "markdown", 109 | "id": "865a1ae0-04b3-475b-8937-e71ee935ae61", 110 | "metadata": { 111 | "tags": [] 112 | }, 113 | "source": [ 114 | "## Hardware Check" 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": 1, 120 | "id": "74d8dd3a-b854-421d-9f01-92aefa04d13c", 121 | "metadata": {}, 122 | "outputs": [], 123 | "source": [ 124 | "import torch\n", 125 | "from torchvision.models import resnet50, ResNet50_Weights\n", 126 | "from thop import profile # pip install thop\n", 127 | "\n", 128 | "def torch_cuda_memory_usage():\n", 129 | " \"\"\"Returns CUDA memory usage if available\"\"\"\n", 130 | " if torch.cuda.is_available():\n", 131 | " torch.cuda.synchronize() # Wait for all CUDA kernels to finish\n", 132 | " allocated_memory = torch.cuda.memory_allocated() # Total allocated memory\n", 133 | " cached_memory = torch.cuda.memory_reserved() # Total cached memory\n", 134 | " return allocated_memory / (1024**3), cached_memory / (1024**3) # Convert bytes to GB\n", 135 | " else:\n", 136 | " return 0, 0\n", 137 | "\n", 138 | "def test_model_resources(model, batch):\n", 139 | " \n", 140 | " macs, params = profile(model, inputs=(batch, ), verbose=False)\n", 141 | " flops = macs * 2 # Convert MACs to FLOPs\n", 142 | " tflops = flops / (10**12) # Convert FLOPs to TFLOPs \n", 143 | " \n", 144 | " torch.cuda.reset_peak_memory_stats() # Reset peak memory stats for accurate peak measurement\n", 145 | "\n", 146 | " # Measure memory before inference\n", 147 | " allocated_before, cached_before = torch_cuda_memory_usage()\n", 148 | " \n", 149 | " model = model.cuda() # Move model to GPU\n", 150 | " batch = batch.cuda() # Move data to GPU\n", 151 | " \n", 152 | " # Dummy forward pass to measure VRAM usage\n", 153 | " with torch.no_grad():\n", 154 | " _ = model(batch)\n", 155 | " \n", 156 | " # Measure memory after inference\n", 157 | " allocated_after, cached_after = torch_cuda_memory_usage()\n", 158 | " peak_allocated = torch.cuda.max_memory_allocated() / (1024**3) # Peak allocated memory during inference\n", 159 | " \n", 160 | " vram_usage_allocated = allocated_after - allocated_before # Approximation of additional VRAM used during inference\n", 161 | " vram_usage_cached = cached_after - cached_before # Approximation based on cached memory\n", 162 | "\n", 163 | " print(f\"MACs: {macs}\")\n", 164 | " print(f\"FLOPs: {flops}\")\n", 165 | " print(f\"TFLOPs: {tflops}\")\n", 166 | " print(f\"Approx. Additional VRAM Usage (Allocated) during Inference: {vram_usage_allocated} GB\")\n", 167 | " print(f\"Approx. Additional VRAM Usage (Cached) during Inference: {vram_usage_cached} GB\")\n", 168 | " print(f\"Peak VRAM Usage during Inference: {peak_allocated} GB\")\n", 169 | " \n", 170 | " del model, batch # Free up memory\n", 171 | " torch.cuda.empty_cache() # Clear cache" 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "execution_count": 2, 177 | "id": "5f28821c-e496-4e31-bf44-b7ceeb0d6641", 178 | "metadata": {}, 179 | "outputs": [ 180 | { 181 | "name": "stdout", 182 | "output_type": "stream", 183 | "text": [ 184 | "MACs: 1821354430464.0\n", 185 | "FLOPs: 3642708860928.0\n", 186 | "TFLOPs: 3.642708860928\n", 187 | "Approx. Additional VRAM Usage (Allocated) during Inference: 0.3515634536743164 GB\n", 188 | "Approx. Additional VRAM Usage (Cached) during Inference: 5.64453125 GB\n", 189 | "Peak VRAM Usage during Inference: 5.28789758682251 GB\n" 190 | ] 191 | } 192 | ], 193 | "source": [ 194 | "\n", 195 | "model = resnet50(weights=ResNet50_Weights.IMAGENET1K_V1) # change to your model\n", 196 | "batch_size = 18 # Test the batch size you want\n", 197 | "batch = torch.stack([torch.randn(3, 1280, 960)]*batch_size)\n", 198 | "\n", 199 | "test_model_resources(model, batch)" 200 | ] 201 | }, 202 | { 203 | "cell_type": "markdown", 204 | "id": "aee76f21-6440-4eff-8294-b17711ff4e40", 205 | "metadata": { 206 | "tags": [] 207 | }, 208 | "source": [ 209 | "# 3 - SUBMISSION" 210 | ] 211 | }, 212 | { 213 | "cell_type": "code", 214 | "execution_count": 1, 215 | "id": "032f8823-38b7-4201-b5d5-1aaa7aaab549", 216 | "metadata": {}, 217 | "outputs": [ 218 | { 219 | "name": "stdout", 220 | "output_type": "stream", 221 | "text": [ 222 | "Hello, I am an asset.\n" 223 | ] 224 | } 225 | ], 226 | "source": [ 227 | "%run ./models/model_23_PIQ.py" 228 | ] 229 | }, 230 | { 231 | "cell_type": "code", 232 | "execution_count": null, 233 | "id": "293c6276-95f6-4e08-8589-0217107d3486", 234 | "metadata": {}, 235 | "outputs": [], 236 | "source": [] 237 | } 238 | ], 239 | "metadata": { 240 | "kernelspec": { 241 | "display_name": "py311", 242 | "language": "python", 243 | "name": "py311" 244 | }, 245 | "language_info": { 246 | "codemirror_mode": { 247 | "name": "ipython", 248 | "version": 3 249 | }, 250 | "file_extension": ".py", 251 | "mimetype": "text/x-python", 252 | "name": "python", 253 | "nbconvert_exporter": "python", 254 | "pygments_lexer": "ipython3", 255 | "version": "3.11.4" 256 | } 257 | }, 258 | "nbformat": 4, 259 | "nbformat_minor": 5 260 | } 261 | -------------------------------------------------------------------------------- /src/models/archs/sem_hyperiqa_arch.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn.functional import one_hot 4 | 5 | from .sem_hyperiqa_util import HyperNet, RescaleNet, TargetNet, SceneClassNet 6 | from .arch_util import load_pretrained_network 7 | 8 | 9 | defaultHyperNetWeights = { 10 | # Only for original hyperIQA model with input size 224x224 11 | 'koniq': None #link: https://drive.google.com/file/d/1OOUmnbvpGea0LIGpIWEbOyxfWx6UCiiE/view 12 | } 13 | 14 | defaultWeights = {} 15 | 16 | 17 | class SemHyperIQA(nn.Module): 18 | def __init__(self, patchSize, 19 | hyperNetPretrained=None, 20 | pretrained=None, 21 | classify=None, 22 | rescale=None, 23 | **kwargs): 24 | 25 | super().__init__() 26 | patchRate = patchSize // 224 27 | self.classify = classify 28 | self.rescale = rescale 29 | self.classFeaturesOut = None 30 | self.nbPatchesIn = None 31 | self.classKey = kwargs.get('classKey', 'class') 32 | self.qualityKey = kwargs.get('qualityKey', 'quality') 33 | self.preQualityKey = kwargs.get('preQualityKey', 'preQuality') 34 | if self.classify is not None: 35 | self.classFeaturesOut = self.classify.get('numClasses', None) 36 | self.nbPatchesIn = self.classify.get('nbPatchesIn', 1) # Assume one patch if we do not want to concatenate patches 37 | self.hyperNet = HyperNet(16, 38 | 112 * patchRate, 39 | 224 * patchRate, 40 | 112 * patchRate, 41 | 56 * patchRate, 42 | 28 * patchRate, 43 | 14 * patchRate, 44 | 7 * patchRate, 45 | patchRate, 46 | classFeaturesOut=self.classFeaturesOut)#.cuda() 47 | 48 | if hyperNetPretrained is not None: 49 | load_pretrained_network(self.hyperNet, defaultHyperNetWeights.get(hyperNetPretrained, hyperNetPretrained)) 50 | if pretrained is not None: 51 | load_pretrained_network(self, defaultWeights.get(pretrained, pretrained)) 52 | 53 | if self.classify is not None: 54 | self.sceneClassNet = SceneClassNet(featureInSize=self.nbPatchesIn * 112 * patchRate * self.classFeaturesOut, 55 | **self.classify) 56 | self.sceneclassnet_params = self.sceneClassNet.parameters() 57 | if self.rescale is not None: 58 | if 'featureInSize' in self.rescale: 59 | self.classFeedback = False 60 | else: 61 | self.classFeedback = True 62 | self.rescale.update({'featureInSize': self.classify.get('numClasses')}) # intentionally throw error if rescale is defined with infeatures and no classification parameter 63 | 64 | self.rescaleNet = RescaleNet(**self.rescale) 65 | self.rescalenet_params = self.rescaleNet.parameters() 66 | 67 | backbone_params = list(map(id, self.hyperNet.res.parameters())) 68 | self.hypernet_params = filter(lambda p: id(p) not in backbone_params, self.hyperNet.parameters()) 69 | self.resnet_params = filter(lambda p: id(p) in backbone_params, self.hyperNet.parameters()) 70 | 71 | def forward(self, x, index=None, *args): 72 | # Generate weights for target network 73 | output = self.hyperNet(x) 74 | 75 | # Check if hyperNet returns hnFeatures 76 | if isinstance(output, tuple): 77 | paras, hnFeatures = output 78 | hnFeatures = self._consolidate_patches(hnFeatures, self.nbPatchesIn) 79 | else: 80 | paras = output 81 | 82 | if isinstance(paras, list): 83 | paras = self._stack_dicts(paras) 84 | 85 | # Building target network 86 | modelTarget = TargetNet(paras) 87 | for param in modelTarget.parameters(): 88 | param.requires_grad = False 89 | 90 | # Quality score prediction 91 | inputTargetNet = paras['target_in_vec'] 92 | predictionsQuality = modelTarget(inputTargetNet) 93 | 94 | predScene = None 95 | outputDict = {} 96 | 97 | if hasattr(self, 'sceneClassNet') and isinstance(output, tuple): 98 | predScene = self.sceneClassNet(hnFeatures) 99 | predictionsQuality = predictionsQuality.reshape(self.nbPatchesIn, -1).mean(dim=0) 100 | 101 | outputDict[self.qualityKey] = predictionsQuality 102 | 103 | if hasattr(self, 'rescaleNet') and hasattr(self, 'classFeedback'): 104 | outputDict[self.preQualityKey] = predictionsQuality 105 | 106 | if not self.classFeedback and index is not None: 107 | index_ = one_hot(index, num_classes=self.rescale['featureInSize']).to(torch.float32) 108 | scoreWeights = self.rescaleNet(index_) 109 | 110 | elif hasattr(self, 'sceneClassNet') and isinstance(output, tuple): 111 | scoreWeights = self.rescaleNet(predScene.softmax(dim=1)) 112 | 113 | else: 114 | raise ValueError("Class feedback needs class prediction, which is not defined in this configuration") 115 | 116 | # FIXME: Fit with any polynomial degree instead of only manual linear fit. 117 | # Re-scale the score prediction with alpha/beta 118 | outputDict[self.qualityKey] = scoreWeights[:,0] * predictionsQuality + scoreWeights[:,1] 119 | 120 | if predScene is not None: 121 | return {self.classKey: predScene, 122 | **{keyOut: valueOut.unsqueeze(1) for keyOut, valueOut in outputDict.items()}} 123 | 124 | return outputDict 125 | 126 | @staticmethod 127 | def _consolidate_patches(hnFeatures, patches_per_image): 128 | # Check if hnFeatures is a list 129 | if isinstance(hnFeatures, list): 130 | # If first element of the list is a tensor and is 2D, stack along 1st dimension 131 | if hnFeatures[0].dim() >= 2: 132 | hnFeatures = torch.cat(hnFeatures, dim=0) 133 | # If first element of the list is a tensor and is 1D, convert to 2D and stack along 1st dimension 134 | elif hnFeatures[0].dim() == 1: 135 | hnFeatures = torch.stack(hnFeatures, dim=0) 136 | 137 | # Ensure that the total number of features is a multiple of patches_per_image 138 | if hnFeatures.shape[0] % patches_per_image != 0: 139 | raise ValueError("Total number of features is not a multiple of patches_per_image") 140 | 141 | # Reshape the tensor 142 | consolidated_features = hnFeatures.reshape(-1, hnFeatures.shape[1] * patches_per_image) 143 | 144 | return consolidated_features 145 | 146 | @staticmethod 147 | def _stack_dicts(dict_list): 148 | # Ensure dict_list is not empty 149 | if not dict_list: 150 | return {} 151 | 152 | # Create a new dictionary where each key is a stack of the corresponding values from the dictionaries in dict_list 153 | stacked_dict = {key: torch.stack([d[key] for d in dict_list], dim=0) for key in dict_list[0].keys()} 154 | 155 | return stacked_dict 156 | 157 | class FullHyperIQA(SemHyperIQA): 158 | def __init__(self, patchSize, hyperNetPretrained=None, pretrained=None, classify=None, rescale=None, **kwargs): 159 | super().__init__(patchSize, hyperNetPretrained, pretrained, classify, rescale, **kwargs) 160 | self.weightQualityByClass = kwargs.get('weightQualityByClass', 0) 161 | 162 | def forward(self, x, index=None, *args): 163 | if self.weightQualityByClass <= 0: 164 | return super().forward(x, index, *args) 165 | 166 | # Generate weights for target network 167 | output = self.hyperNet(x) 168 | 169 | # Check if hyperNet returns hnFeatures 170 | if isinstance(output, tuple): 171 | paras, hnFeatures = output 172 | hnFeatures = self._consolidate_patches(hnFeatures, self.nbPatchesIn) 173 | else: 174 | paras = output 175 | 176 | if isinstance(paras, list): 177 | paras = self._stack_dicts(paras) 178 | 179 | # Building target network 180 | modelTarget = TargetNet(paras) 181 | for param in modelTarget.parameters(): 182 | param.requires_grad = False 183 | 184 | # Quality score prediction 185 | inputTargetNet = paras['target_in_vec'] 186 | predictionsQuality = modelTarget(inputTargetNet) 187 | 188 | predScene = None 189 | outputDict = {} 190 | 191 | if hasattr(self, 'sceneClassNet') and isinstance(output, tuple): 192 | predScene = self.sceneClassNet(hnFeatures) 193 | predictionsQuality = predictionsQuality.reshape(self.nbPatchesIn, -1).mean(dim=0) 194 | 195 | outputDict[self.qualityKey] = predictionsQuality 196 | 197 | if hasattr(self, 'rescaleNet') and hasattr(self, 'classFeedback'): 198 | 199 | outputDict[self.preQualityKey] = predictionsQuality 200 | 201 | if not self.classFeedback and index is not None: 202 | index_ = one_hot(index, num_classes=self.rescale['featureInSize']).to(torch.float32) 203 | scoreWeights = self.rescaleNet(index_) 204 | # FIXME: Fit with any polynomial degree instead of only manual linear fit. 205 | # Re-scale the score prediction with alpha/beta 206 | outputDict[self.qualityKey] = scoreWeights[:,0] * predictionsQuality + scoreWeights[:,1] 207 | 208 | elif hasattr(self, 'sceneClassNet') and isinstance(output, tuple): 209 | # Extracting top-k class probabilities and indices 210 | topk_probs, topk_indices = torch.topk(predScene.softmax(dim=1), self.weightQualityByClass, dim=1) 211 | 212 | # Normalizing the top-k probabilities 213 | topk_probs /= topk_probs.sum(dim=1, keepdim=True) 214 | 215 | # Rescale quality prediction for each of the top-k classes separately 216 | weighted_rescaled_qualities = [] 217 | for k in range(self.weightQualityByClass): 218 | one_hot_class = one_hot(topk_indices[:, k], num_classes=self.rescale['featureInSize']).to(torch.float32) 219 | scoreWeights_for_class = self.rescaleNet(one_hot_class) 220 | rescaled_quality = scoreWeights_for_class[:, 0] * predictionsQuality + scoreWeights_for_class[:, 1] 221 | weighted_rescaled_qualities.append(rescaled_quality) 222 | 223 | # Weighted aggregation of the rescaled qualities 224 | weighted_rescaled_qualities = torch.stack(weighted_rescaled_qualities, dim=1) 225 | outputDict[self.qualityKey] = (weighted_rescaled_qualities * topk_probs).sum(dim=-1) 226 | 227 | else: 228 | # This should never be raised since 229 | raise ValueError("Class feedback needs class prediction, which is not defined in this configuration") 230 | 231 | if predScene is not None: 232 | return {self.classKey: predScene, 233 | **{keyOut: valueOut.unsqueeze(1) for keyOut, valueOut in outputDict.items()}} 234 | 235 | return outputDict -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PIQ23: An Image Quality Assessment Dataset for Portraits 2 | 3 | ![Visitors](https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fgithub.com%2FDXOMARK-Research%2FPIQ2023&label=VISITORS&countColor=%23f47373&labelStyle=upper) 4 | 5 | This is the official repo for PIQ23, accepted in CVPR2023. 6 | 7 |            8 |
9 | 10 |    [PIQ23](https://corp.dxomark.com/data-base-piq23/) 11 |          [CVPR2023](https://openaccess.thecvf.com/content/CVPR2023/html/Chahine_An_Image_Quality_Assessment_Dataset_for_Portraits_CVPR_2023_paper.html) [/ FHIQA](https://arxiv.org/abs/2402.09178)        12 | [NTIRE24](https://codalab.lisn.upsaclay.fr/competitions/17311#learn_the_details)        13 | [Video](https://youtu.be/cvWjOWq5wnk)       [Poster](Imgs/CVPR_Poster_PIQ23.png) 14 | 15 | ## Introduction 16 | We present PIQ23, a portrait-specific image quality assessment dataset of 5116 images of predefined scenes acquired by more than 100 smartphones, covering a high variety of brands, models, and use cases. The dataset features individuals from a wide range of ages, genders, and ethnicities who have given explicit and informed consent for their photographs to be used in public research. It is annotated by pairwise comparisons (PWC) collected from over 30 image quality experts for three image attributes: face detail preservation, face target exposure, and overall image quality. 17 | 18 | ## PIQ23 19 | 20 | ![thumb](Imgs/Thumbnail.png) 21 | 22 | **Important Notes** 23 | - By downloading this dataset you agree to the terms and conditions. 24 | - All files in the PIQ23 dataset are available for non-commercial research purposes only. 25 | - You agree not to reproduce, duplicate, copy, sell, trade, resell or exploit for any commercial purposes, any portion of the images and any portion of derived data. 26 | - You agree to remove, throughout the life cycle of the dataset, any set of images following the request of the authors. 27 | 28 | **Dataset Access** 29 | - The PIQ23 dataset (5GB) can be downloaded from the DXOMARK CORP [**website**](https://corp.dxomark.com/data-base-piq23/). 30 | - You need to fill the form and agree to the terms and conditions in order to request access to the dataset. We garantee open access to any individual or institution following these instructions. 31 | - In a short time, your request will be validated and you will receive an automatic email with a temporary link in order to download the dataset. 32 | 33 | **Overview** 34 | 35 | The dataset structure is as follows: 36 | ``` 37 | ├── Details 38 | ├── Overall 39 | ├── Exposure 40 | ├── Scores_Details.csv 41 | ├── Scores_Overall.csv 42 | └── Scores_Exposure.csv 43 | ``` 44 | Each folder is associated to an attribute (Details, Overall and Exposure). It contains the images of the corresponding regions of interest with the following naming: {img_nb}\_{scene_name}\_{scene_idx}.{ext}. 45 | 46 | The CSV files include the following entries: 47 | - **IMAGE PATH**: relative path to the image ({Attribute}\\{Image name}) 48 | - **IMAGE**: image name 49 | - **JOD**: jod score of the image 50 | - **JOD STD**: jod standard deviation 51 | - **CI LOW**: lower bound of image's confidence interval 52 | - **CI HIGH**: upper bound of image's confidence interval 53 | - **CI RANGE**: CI HIGH - CI LOW 54 | - **QUALITY LEVEL**: preliminary quality level (result of the clustering over CIs) 55 | - **CLUSTER**: final quality levels (result of the variance analysis and community detection) 56 | - **TOTAL COMPARISONS**: total number of comparisons for this image 57 | - **SCENE**: scene name 58 | - **ATTRIBUTE**: attribute (Exposure, Details or Overall) 59 | - **SCENE IDX**: scene index (from 0 to 49) 60 | - **CONDITION**: lighting condition (Outdoor, Indoor, Lowlight or Night) 61 | 62 | ## Test Splits 63 | We provide two **official** test splits for PIQ23: 64 | - **Device split**: 65 | - We split PIQ23 by devices, in order to test the general performance of the trained models on the given scenes. 66 | - The test set contains around 30% of images from each scene, thus 30% of the whole dataset. 67 | - To avoid device bias, we have carefully selected devices from different quality levels and price ranges for the test set. This split can still include some images of the test devices in the training set and vice versa since the distribution of devices per scenes is not completely uniform. We can garantee that more than 90% of the training and testing devices do not overlap. 68 | - We first sort the devices by their median percentage of images across scenes then split them into five groups from the most common device to the least and sample from these five groups until we get around 30% of the dataset. 69 | - The device split csv can be found in "Test split\Device Split.csv". 70 | - The test and train csv for the different attributes can be found here "Test split\Device Split\". 71 | - **Scene split**: 72 | - We split PIQ23 by scene in order to test the generalization power of the trained models. 73 | - We have carefully chosen 15/50 scenes for the testing set, covering around 30% of the images from each condition, thus 30% of the whole dataset, around 1486/5116 images. 74 | - To select the test set, we first sort the scenes by the percentage of images in the corresponding condition (Outdoor, Indoor, Lowlight, Night), we then select a group of scenes covering a variety of condition (framing, lighting, skin tones, etc.) until we get around 30% of images for each condition. 75 | - The scene split csv can be found in "Test split\Scene Split.csv". 76 | - The test and train csv for the different attributes can be found here "Test split\Scene Split\". 77 | - Examples of the test and train scenes can be found in "Test split\Scene Split\Scene examples". 78 | 79 | An example of how to use the splits can be found in the "Test split example.ipynb" notebook. 80 | 81 | ***NB:*** 82 | - Please ensure to publish results on both splits in your papers. 83 | - The paper's main results cannot be reproduced with these splits. We will be publishing official performances on these splits soon. 84 | 85 | ## Benchmarks 86 | 87 | **Note on the experiments**: 88 | - The reported results represent the **median of the metrics across all scenes**. Please note, that the median was used to account for outlier scenes, in case they exist. 89 | - The models chosen as *optimal* in this experience are the ones who scored a **maximum SROCC** on the testing sets. Please take into consideration that a maximum SROCC does not reflect a maximum in other metrics. 90 | - An optimal approach would be to choose the optimal model based on a combination of metrics. 91 | - There should be a margin of error taken into account for these metrics. A difference of a minimal percentage in correlation can be due to multiple factors and might not be repeatable. 92 | - The base resolution for the models is 1200; however, for HyperIQA variants, we needed to redefine the architecture of the model since it only accepts 224x224 inputs, and the new architecture accepts resolutions that are a multiple of 224, 1344 in our case. 93 | - for HyperIQA variants, only the Resnet50 backbone is pretrained on ImageNet. There was no IQA pretraining. 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 |
Device Split
Model\AttributeDetailsExposureOverall
SROCCPLCCKROCCMAESROCCPLCCKROCCMAESROCCPLCCKROCCMAE
DBCNN (1200 x LIVEC)0.7870.7830.590.7770.8070.8040.6110.7040.830.8240.6530.656
MUSIQ (1200 x PAQ2PIQ)0.8240.8310.650.6270.8480.8590.6710.5850.8480.8370.650.626
HyperIQA (1344 (224*6) x No IQA pretraining)0.7930.7660.6180.7510.80.8280.6360.7210.8180.8250.660.612
SEM-HyperIQA (1344 (224*6) x No IQA pretraining)0.8540.8470.6760.6450.8260.8580.650.6350.8450.8560.6740.641
SEM-HyperIQA-CO (1344 (224*6) x No IQA pretraining)0.8290.8210.6410.6970.8160.8430.6330.6680.8290.8430.640.624
SEM-HyperIQA-SO (1344 (224*6) x No IQA pretraining)0.8740.8710.7090.5830.8260.8460.6510.6780.840.8490.6610.639
211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 | 233 | 234 | 235 | 236 | 237 | 238 | 239 | 240 | 241 | 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | 265 | 266 | 267 | 268 | 269 | 270 | 271 | 272 | 273 | 274 | 275 | 276 | 277 | 278 | 279 | 280 | 281 | 282 | 283 | 284 | 285 | 286 | 287 | 288 | 289 | 290 | 291 | 292 | 293 | 294 | 295 | 296 | 297 | 298 | 299 | 300 | 301 | 302 | 303 | 304 | 305 | 306 | 307 | 308 | 309 | 310 | 311 | 312 | 313 | 314 | 315 | 316 | 317 | 318 | 319 | 320 | 321 | 322 | 323 | 324 | 325 | 326 | 327 |
Scene Split
Model\AttributeDetailsExposureOverall
SROCCPLCCKROCCMAESROCCPLCCKROCCMAESROCCPLCCKROCCMAE
DBCNN (1200 x LIVEC)0.590.510.450.990.690.690.510.910.590.640.431.04
MUSIQ (1200 x PAQ2PIQ)0.720.770.530.900.790.7720.590.870.7360.740.540.95
HyperIQA (1344 (224*6) x No IQA pretraining)0.7010.6680.5040.9360.6920.6840.4980.8630.740.7360.550.989
SEM-HyperIQA (1344 (224*6) x No IQA pretraining)0.7320.6490.5470.8790.7160.6970.530.9670.7490.7520.5581.033
SEM-HyperIQA-CO (1344 (224*6) x No IQA pretraining)0.7460.7140.5490.8490.6980.6980.5170.9450.7390.7360.551.038
FULL-HyperIQA (1344 (224*6) x No IQA pretraining)0.740.720.550.80.760.710.570.850.780.780.591.12
328 | 329 | ## TO DO 330 | - Add SemHyperIQA Code 331 | - Add Stat analysis code 332 | - Add other benchmarks code 333 | - Add pretrained weights 334 | 335 | ## Citation 336 | Please cite the paper/dataset as follows: 337 | ```bibtex 338 | @InProceedings{Chahine_2023_CVPR, 339 | author = {Chahine, Nicolas and Calarasanu, Stefania and Garcia-Civiero, Davide and Cayla, Th\'eo and Ferradans, Sira and Ponce, Jean}, 340 | title = {An Image Quality Assessment Dataset for Portraits}, 341 | booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, 342 | month = {June}, 343 | year = {2023}, 344 | pages = {9968-9978} 345 | } 346 | 347 | ``` 348 | ## License 349 | Provided that the user complies with the Terms of Use, the provider grants a limited, non-exclusive, personal, non-transferable, non-sublicensable, and revocable license to access, download and use the Database for internal and research purposes only, during the specified term. The User is required to comply with the Provider's reasonable instructions, as well as all applicable statutes, laws, and regulations. 350 | 351 | ## About 352 | For any questions please contact: piq2023@dxomark.com 353 | 354 | 355 | -------------------------------------------------------------------------------- /src/models/archs/sem_hyperiqa_util.py: -------------------------------------------------------------------------------- 1 | import torch as torch 2 | import torch.nn as nn 3 | from torch.nn import functional as F 4 | from torch.nn import init 5 | import math 6 | 7 | from .arch_util import load_file_from_url 8 | 9 | model_urls = { 10 | 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 11 | 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 12 | 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 13 | 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 14 | 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', 15 | } 16 | 17 | 18 | class HyperNet(nn.Module): 19 | """ 20 | Hyper network for learning perceptual rules. 21 | # Code adapted from: https://github.com/SSL92/hyperIQA 22 | 23 | Args: 24 | lda_out_channels: local distortion aware module output size. 25 | hyper_in_channels: input feature channels for hyper network. 26 | target_in_size: input vector size for target network. 27 | target_fc(i)_size: fully connection layer size of target network. 28 | feature_size: input feature map width/height for hyper network. 29 | 30 | Note: 31 | For size match, input args must satisfy: 'target_fc(i)_size * target_fc(i+1)_size' is divisible by 'feature_size ^ 2'. 32 | """ 33 | 34 | def __init__(self, lda_out_channels, hyper_in_channels, target_in_size, target_fc1_size, target_fc2_size, target_fc3_size, target_fc4_size, feature_size, patch_rate, **kwargs): 35 | super(HyperNet, self).__init__() 36 | self.hyperInChn = hyper_in_channels 37 | self.target_in_size = target_in_size 38 | self.f1 = target_fc1_size 39 | self.f2 = target_fc2_size 40 | self.f3 = target_fc3_size 41 | self.f4 = target_fc4_size 42 | self.feature_size = feature_size 43 | self.patch_rate = patch_rate 44 | 45 | self.res = resnet50_backbone(lda_out_channels, target_in_size, patch_rate=self.patch_rate, pretrained=True) 46 | 47 | self.pool = nn.AdaptiveAvgPool2d((1, 1)) 48 | self.poolClass = None 49 | if kwargs.get('classFeaturesOut', None) is not None: 50 | self.poolClass = nn.AdaptiveAvgPool2d((1, kwargs.get('classFeaturesOut'))) 51 | 52 | # Conv layers for resnet output features 53 | self.conv1 = nn.Sequential( 54 | nn.Conv2d(2048, 1024, 1, padding=(0, 0)), 55 | nn.ReLU(inplace=True), 56 | nn.Conv2d(1024, 512, 1, padding=(0, 0)), 57 | nn.ReLU(inplace=True), 58 | nn.Conv2d(512, self.hyperInChn, 1, padding=(0, 0)), 59 | nn.ReLU(inplace=True) 60 | ) 61 | 62 | # Hyper network part, conv for generating target fc weights, fc for generating target fc biases 63 | self.fc1w_conv = nn.Conv2d(self.hyperInChn, int(self.target_in_size * self.f1 / feature_size ** 2), 3, padding=(1, 1)) 64 | self.fc1b_fc = nn.Linear(self.hyperInChn, self.f1) 65 | 66 | self.fc2w_conv = nn.Conv2d(self.hyperInChn, int(self.f1 * self.f2 / feature_size ** 2), 3, padding=(1, 1)) 67 | self.fc2b_fc = nn.Linear(self.hyperInChn, self.f2) 68 | 69 | self.fc3w_conv = nn.Conv2d(self.hyperInChn, int(self.f2 * self.f3 / feature_size ** 2), 3, padding=(1, 1)) 70 | self.fc3b_fc = nn.Linear(self.hyperInChn, self.f3) 71 | 72 | self.fc4w_conv = nn.Conv2d(self.hyperInChn, int(self.f3 * self.f4 / feature_size ** 2), 3, padding=(1, 1)) 73 | self.fc4b_fc = nn.Linear(self.hyperInChn, self.f4) 74 | 75 | self.fc5w_fc = nn.Linear(self.hyperInChn, self.f4) 76 | self.fc5b_fc = nn.Linear(self.hyperInChn, 1) 77 | 78 | # initialize 79 | for i, m_name in enumerate(self._modules): 80 | if i > 3: 81 | nn.init.kaiming_normal_(self._modules[m_name].weight.data) 82 | 83 | def _forward(self, imgTensorIn): 84 | feature_size = self.feature_size 85 | res_out = self.res(imgTensorIn) 86 | 87 | # input vector for target net 88 | target_in_vec = res_out['target_in_vec'].view(-1, self.target_in_size, 1, 1) 89 | 90 | # input features for hyper net 91 | hyper_in_feat = self.conv1(res_out['hyper_in_feat']).view(-1, self.hyperInChn, feature_size, feature_size) 92 | 93 | # generating target net weights & biases 94 | target_fc1w = self.fc1w_conv(hyper_in_feat).view(-1, self.f1, self.target_in_size, 1, 1) 95 | target_fc1b = self.fc1b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, self.f1) 96 | 97 | target_fc2w = self.fc2w_conv(hyper_in_feat).view(-1, self.f2, self.f1, 1, 1) 98 | target_fc2b = self.fc2b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, self.f2) 99 | 100 | target_fc3w = self.fc3w_conv(hyper_in_feat).view(-1, self.f3, self.f2, 1, 1) 101 | target_fc3b = self.fc3b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, self.f3) 102 | 103 | target_fc4w = self.fc4w_conv(hyper_in_feat).view(-1, self.f4, self.f3, 1, 1) 104 | target_fc4b = self.fc4b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, self.f4) 105 | 106 | target_fc5w = self.fc5w_fc(self.pool(hyper_in_feat).squeeze()).view(-1, 1, self.f4, 1, 1) 107 | target_fc5b = self.fc5b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, 1) 108 | 109 | out = {} 110 | out['target_in_vec'] = target_in_vec 111 | out['target_fc1w'] = target_fc1w 112 | out['target_fc1b'] = target_fc1b 113 | out['target_fc2w'] = target_fc2w 114 | out['target_fc2b'] = target_fc2b 115 | out['target_fc3w'] = target_fc3w 116 | out['target_fc3b'] = target_fc3b 117 | out['target_fc4w'] = target_fc4w 118 | out['target_fc4b'] = target_fc4b 119 | out['target_fc5w'] = target_fc5w 120 | out['target_fc5b'] = target_fc5b 121 | 122 | if self.poolClass: 123 | return out, torch.flatten(self.poolClass(hyper_in_feat), 1) 124 | return out 125 | 126 | 127 | def forward(self, input): 128 | 129 | if isinstance(input, list): 130 | # Ideally this should not be used, since the input should be a tensor of concatenated inputs not a list of inputs. 131 | out_all = [] 132 | hyper_in_all = [] 133 | for elt in input: 134 | # this elt in img is considering img to be a list of patches so a list of [torch.Size([batch_size, 3, patch_size, patch_size])] 135 | out = self._forward(elt) 136 | if self.poolClass: 137 | out_all.append(out[0]) 138 | hyper_in_all.append(out[1]) 139 | else: 140 | out_all.append(out) 141 | if self.poolClass: 142 | return out_all, hyper_in_all 143 | return out_all 144 | 145 | return self._forward(input) 146 | 147 | 148 | 149 | class TargetNet(nn.Module): 150 | """ 151 | Target network for quality prediction. 152 | """ 153 | def __init__(self, paras): 154 | super(TargetNet, self).__init__() 155 | self.l1 = nn.Sequential( 156 | TargetFC(paras['target_fc1w'], paras['target_fc1b']), 157 | nn.Sigmoid(), 158 | ) 159 | self.l2 = nn.Sequential( 160 | TargetFC(paras['target_fc2w'], paras['target_fc2b']), 161 | nn.Sigmoid(), 162 | ) 163 | 164 | self.l3 = nn.Sequential( 165 | TargetFC(paras['target_fc3w'], paras['target_fc3b']), 166 | nn.Sigmoid(), 167 | ) 168 | 169 | self.l4 = nn.Sequential( 170 | TargetFC(paras['target_fc4w'], paras['target_fc4b']), 171 | nn.Sigmoid(), 172 | TargetFC(paras['target_fc5w'], paras['target_fc5b']), 173 | ) 174 | 175 | def forward(self, x): 176 | q = self.l1(x) 177 | 178 | q = self.l2(q) 179 | q = self.l3(q) 180 | q = self.l4(q).squeeze() 181 | 182 | return q 183 | 184 | 185 | class TargetFC(nn.Module): 186 | """ 187 | Fully connection operations for target net 188 | """ 189 | def __init__(self, weight, bias): 190 | super(TargetFC, self).__init__() 191 | self.weight = weight 192 | self.bias = bias 193 | 194 | def forward(self, input_): 195 | input_re = input_.view(-1, input_.shape[0] * input_.shape[1], input_.shape[2], input_.shape[3]) 196 | weight_re = self.weight.view(self.weight.shape[0] * self.weight.shape[1], self.weight.shape[2], self.weight.shape[3], self.weight.shape[4]) 197 | bias_re = self.bias.view(self.bias.shape[0] * self.bias.shape[1]) 198 | 199 | out = F.conv2d(input=input_re, weight=weight_re, bias=bias_re, groups=self.weight.shape[0]) 200 | 201 | return out.view(input_.shape[0], self.weight.shape[1], input_.shape[2], input_.shape[3]) 202 | 203 | 204 | class Bottleneck(nn.Module): 205 | expansion = 4 206 | 207 | def __init__(self, inplanes, planes, stride=1, downsample=None): 208 | super(Bottleneck, self).__init__() 209 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) 210 | self.bn1 = nn.BatchNorm2d(planes) 211 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, 212 | padding=1, bias=False) 213 | self.bn2 = nn.BatchNorm2d(planes) 214 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) 215 | self.bn3 = nn.BatchNorm2d(planes * 4) 216 | self.relu = nn.ReLU(inplace=True) 217 | self.downsample = downsample 218 | self.stride = stride 219 | 220 | def forward(self, x): 221 | residual = x 222 | 223 | out = self.conv1(x) 224 | out = self.bn1(out) 225 | out = self.relu(out) 226 | 227 | out = self.conv2(out) 228 | out = self.bn2(out) 229 | out = self.relu(out) 230 | 231 | out = self.conv3(out) 232 | out = self.bn3(out) 233 | 234 | if self.downsample is not None: 235 | residual = self.downsample(x) 236 | 237 | out += residual 238 | out = self.relu(out) 239 | 240 | return out 241 | 242 | 243 | class ResNetBackbone(nn.Module): 244 | """ 245 | ResNet50 backbone model for feature extraction 246 | Outputs: 247 | out : dictionnary containing the input features of the hypernetwork and target features for FC-quality 248 | """ 249 | 250 | def __init__(self, lda_out_channels, in_chn, block, layers, patch_rate, num_classes=1000): 251 | super(ResNetBackbone, self).__init__() 252 | self.inplanes = 64 253 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) 254 | self.bn1 = nn.BatchNorm2d(64) 255 | self.relu = nn.ReLU(inplace=True) 256 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 257 | self.layer1 = self._make_layer(block, 64, layers[0]) 258 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 259 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2) 260 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2) 261 | 262 | # local distortion aware module 263 | self.lda1_pool = nn.Sequential( 264 | nn.Conv2d(256, 16, kernel_size=1, stride=1, padding=0, bias=False), 265 | nn.AvgPool2d(7, stride=7), 266 | 267 | ) 268 | self.lda1_fc = nn.Linear(16 * 64 * patch_rate * patch_rate , lda_out_channels) 269 | 270 | self.lda2_pool = nn.Sequential( 271 | nn.Conv2d(512, 32, kernel_size=1, stride=1, padding=0, bias=False), 272 | nn.AvgPool2d(7, stride=7), 273 | ) 274 | self.lda2_fc = nn.Linear(32 * 16 * patch_rate * patch_rate, lda_out_channels) 275 | 276 | self.lda3_pool = nn.Sequential( 277 | nn.Conv2d(1024, 64, kernel_size=1, stride=1, padding=0, bias=False), 278 | nn.AvgPool2d(7, stride=7), 279 | ) 280 | self.lda3_fc = nn.Linear(64 * 4 * patch_rate * patch_rate, lda_out_channels) 281 | 282 | self.lda4_pool = nn.AvgPool2d(7, stride=7) 283 | self.lda4_fc = nn.Linear(2048 * patch_rate * patch_rate, in_chn - lda_out_channels * 3) 284 | 285 | for m in self.modules(): 286 | if isinstance(m, nn.Conv2d): 287 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 288 | m.weight.data.normal_(0, math.sqrt(2. / n)) 289 | elif isinstance(m, nn.BatchNorm2d): 290 | m.weight.data.fill_(1) 291 | m.bias.data.zero_() 292 | 293 | # initialize 294 | nn.init.kaiming_normal_(self.lda1_pool._modules['0'].weight.data) 295 | nn.init.kaiming_normal_(self.lda2_pool._modules['0'].weight.data) 296 | nn.init.kaiming_normal_(self.lda3_pool._modules['0'].weight.data) 297 | nn.init.kaiming_normal_(self.lda1_fc.weight.data) 298 | nn.init.kaiming_normal_(self.lda2_fc.weight.data) 299 | nn.init.kaiming_normal_(self.lda3_fc.weight.data) 300 | nn.init.kaiming_normal_(self.lda4_fc.weight.data) 301 | 302 | def _make_layer(self, block, planes, blocks, stride=1): 303 | downsample = None 304 | if stride != 1 or self.inplanes != planes * block.expansion: 305 | downsample = nn.Sequential( 306 | nn.Conv2d(self.inplanes, planes * block.expansion, 307 | kernel_size=1, stride=stride, bias=False), 308 | nn.BatchNorm2d(planes * block.expansion), 309 | ) 310 | 311 | layers = [] 312 | layers.append(block(self.inplanes, planes, stride, downsample)) 313 | self.inplanes = planes * block.expansion 314 | for i in range(1, blocks): 315 | layers.append(block(self.inplanes, planes)) 316 | 317 | return nn.Sequential(*layers) 318 | 319 | def forward(self, x): 320 | x = self.conv1(x) 321 | x = self.bn1(x) 322 | x = self.relu(x) 323 | x = self.maxpool(x) 324 | x = self.layer1(x) 325 | 326 | # the same effect as lda operation in the paper, but save much more memory 327 | lda_1 = self.lda1_fc(self.lda1_pool(x).view(x.size(0), -1)) 328 | x = self.layer2(x) 329 | lda_2 = self.lda2_fc(self.lda2_pool(x).view(x.size(0), -1)) 330 | x = self.layer3(x) 331 | lda_3 = self.lda3_fc(self.lda3_pool(x).view(x.size(0), -1)) 332 | x = self.layer4(x) 333 | lda_4 = self.lda4_fc(self.lda4_pool(x).view(x.size(0), -1)) 334 | 335 | vec = torch.cat((lda_1, lda_2, lda_3, lda_4), 1) 336 | 337 | out = {} 338 | out['hyper_in_feat'] = x 339 | out['target_in_vec'] = vec 340 | 341 | return out 342 | 343 | 344 | def resnet50_backbone(lda_out_channels, in_chn, patch_rate, pretrained=False, **kwargs): 345 | """Constructs a ResNet-50 model_hyper. 346 | 347 | Args: 348 | pretrained (bool): If True, returns a model_hyper pre-trained on ImageNet 349 | """ 350 | model = ResNetBackbone(lda_out_channels, in_chn, Bottleneck, [3, 4, 6, 3], patch_rate, **kwargs) 351 | if pretrained: 352 | save_model = torch.load(load_file_from_url(model_urls['resnet50'])) 353 | model_dict = model.state_dict() 354 | state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} 355 | model_dict.update(state_dict) 356 | model.load_state_dict(model_dict) 357 | else: 358 | model.apply(weights_init_xavier) 359 | 360 | return model 361 | 362 | def weights_init_xavier(m): 363 | classname = m.__class__.__name__ 364 | if classname.find('Conv') != -1: 365 | init.kaiming_normal_(m.weight.data) 366 | init.constant_(m.bias.data, 0.0) 367 | elif classname.find('Linear') != -1: 368 | init.kaiming_normal_(m.weight.data) 369 | init.constant_(m.bias.data, 0.0) 370 | elif classname.find('BatchNorm2d') != -1: 371 | init.uniform_(m.weight.data, 1.0, 0.02) 372 | init.constant_(m.bias.data, 0.0) 373 | 374 | 375 | ### FULLY-CONNECTED PART FOR SCENE CLASSIFICATION ### 376 | class SceneClassNet(nn.Module): 377 | """ 378 | Fully-connected network for scene type classification. 379 | Input : 380 | hyperInFeat (feature vector outputed by Hypernetwork). size=torch.Size([batchSize, patchSize // 2, 7, 7]) 381 | Output : 382 | scene type vector. type=torch.tensor([batchSize, numClasses]) 383 | """ 384 | def __init__(self, featureInSize, numClasses, numLayers=3, numIntermediateNodes=500, **kwargs): 385 | super(SceneClassNet, self).__init__() 386 | 387 | self.layers = nn.ModuleList() 388 | 389 | # Input layer 390 | self.layers.append(nn.Linear(featureInSize, numIntermediateNodes)) 391 | self.layers.append(nn.ReLU()) 392 | 393 | # Intermediate layers 394 | for _ in range(numLayers - 2): # subtract 2 because we already have the input layer and will add the output layer 395 | self.layers.append(nn.Linear(numIntermediateNodes, numIntermediateNodes)) 396 | self.layers.append(nn.ReLU()) 397 | 398 | # Output layer 399 | self.layers.append(nn.Linear(numIntermediateNodes, numClasses)) 400 | 401 | # Apply custom weights initialization 402 | self.apply(weights_init_xavier) 403 | 404 | def forward(self, x): 405 | for layer in self.layers: 406 | x = layer(x) 407 | return x 408 | 409 | 410 | ### FULLY-CONNECTED PART FOR SCENE CLASSIFICATION ### 411 | #class SceneClassNet(nn.Module): 412 | class RescaleNet(nn.Module): 413 | """ 414 | Target network for scene type classification. 415 | Input : hyperInFeat (feature vector outputted by Hypernetwork). size=torch.Size([batchSize, patchSize // 2, 7, 7]) 416 | Output : scene type vector. type=torch.tensor([batchSize, 20]) 417 | """ 418 | def __init__(self, featureInSize, numLayers=3, numIntermediateNodes=100, polyDegree=2, **kwargs): 419 | super(RescaleNet, self).__init__() 420 | self.numLayers = numLayers 421 | self.numIntermediateNodes = numIntermediateNodes 422 | self.polyDegree = polyDegree 423 | self.layers = nn.ModuleList() 424 | 425 | # Input layer 426 | self.layers.append(nn.Linear(featureInSize, self.numIntermediateNodes)) 427 | self.layers.append(nn.ReLU()) 428 | 429 | # Intermediate layers 430 | for _ in range(self.numLayers - 1): 431 | self.layers.append(nn.Linear(self.numIntermediateNodes, self.numIntermediateNodes)) 432 | self.layers.append(nn.ReLU()) 433 | 434 | # Output layer 435 | self.layers.append(nn.Linear(self.numIntermediateNodes, self.polyDegree)) 436 | 437 | # Apply custom weights initialization 438 | self.apply(weights_init_xavier) 439 | 440 | def forward(self, x): 441 | for layer in self.layers: 442 | x = layer(x) 443 | return x 444 | -------------------------------------------------------------------------------- /Test split example.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "c29fde2e-9497-4677-bd9a-888acfc987cb", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "import os\n", 11 | "import pandas as pd" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 2, 17 | "id": "535632cd-c9eb-4380-99a0-643fb7d26b60", 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "scorePath = r\"./Scores/Scores_Exposure.csv\"\n", 22 | "\n", 23 | "scoreDf = pd.read_csv(scorePath)" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": 3, 29 | "id": "24eb6320-c0d8-4031-b021-3c6fe117599b", 30 | "metadata": { 31 | "scrolled": true, 32 | "tags": [] 33 | }, 34 | "outputs": [ 35 | { 36 | "data": { 37 | "text/html": [ 38 | "
\n", 39 | "\n", 52 | "\n", 53 | " \n", 54 | " \n", 55 | " \n", 56 | " \n", 57 | " \n", 58 | " \n", 59 | " \n", 60 | " \n", 61 | " \n", 62 | " \n", 63 | " \n", 64 | " \n", 65 | " \n", 66 | " \n", 67 | " \n", 68 | " \n", 69 | " \n", 70 | " \n", 71 | " \n", 72 | " \n", 73 | " \n", 74 | " \n", 75 | " \n", 76 | " \n", 77 | " \n", 78 | " \n", 79 | " \n", 80 | " \n", 81 | " \n", 82 | " \n", 83 | " \n", 84 | " \n", 85 | " \n", 86 | " \n", 87 | " \n", 88 | " \n", 89 | " \n", 90 | " \n", 91 | " \n", 92 | " \n", 93 | " \n", 94 | " \n", 95 | " \n", 96 | " \n", 97 | " \n", 98 | " \n", 99 | " \n", 100 | " \n", 101 | " \n", 102 | " \n", 103 | " \n", 104 | " \n", 105 | " \n", 106 | " \n", 107 | " \n", 108 | " \n", 109 | " \n", 110 | " \n", 111 | " \n", 112 | " \n", 113 | " \n", 114 | " \n", 115 | " \n", 116 | " \n", 117 | " \n", 118 | " \n", 119 | " \n", 120 | " \n", 121 | " \n", 122 | " \n", 123 | " \n", 124 | " \n", 125 | " \n", 126 | " \n", 127 | " \n", 128 | " \n", 129 | " \n", 130 | " \n", 131 | " \n", 132 | " \n", 133 | " \n", 134 | " \n", 135 | " \n", 136 | " \n", 137 | " \n", 138 | " \n", 139 | " \n", 140 | " \n", 141 | " \n", 142 | " \n", 143 | " \n", 144 | " \n", 145 | " \n", 146 | " \n", 147 | " \n", 148 | " \n", 149 | " \n", 150 | " \n", 151 | " \n", 152 | " \n", 153 | " \n", 154 | " \n", 155 | " \n", 156 | " \n", 157 | " \n", 158 | " \n", 159 | " \n", 160 | " \n", 161 | " \n", 162 | " \n", 163 | " \n", 164 | " \n", 165 | " \n", 166 | " \n", 167 | " \n", 168 | " \n", 169 | " \n", 170 | " \n", 171 | " \n", 172 | " \n", 173 | " \n", 174 | " \n", 175 | " \n", 176 | " \n", 177 | " \n", 178 | " \n", 179 | " \n", 180 | " \n", 181 | " \n", 182 | " \n", 183 | " \n", 184 | " \n", 185 | " \n", 186 | " \n", 187 | " \n", 188 | " \n", 189 | " \n", 190 | " \n", 191 | " \n", 192 | " \n", 193 | " \n", 194 | " \n", 195 | " \n", 196 | " \n", 197 | " \n", 198 | " \n", 199 | " \n", 200 | " \n", 201 | " \n", 202 | " \n", 203 | " \n", 204 | " \n", 205 | " \n", 206 | " \n", 207 | " \n", 208 | " \n", 209 | " \n", 210 | " \n", 211 | " \n", 212 | " \n", 213 | " \n", 214 | " \n", 215 | " \n", 216 | " \n", 217 | " \n", 218 | " \n", 219 | " \n", 220 | " \n", 221 | " \n", 222 | " \n", 223 | " \n", 224 | " \n", 225 | " \n", 226 | " \n", 227 | " \n", 228 | " \n", 229 | " \n", 230 | " \n", 231 | " \n", 232 | " \n", 233 | " \n", 234 | " \n", 235 | " \n", 236 | " \n", 237 | " \n", 238 | " \n", 239 | " \n", 240 | " \n", 241 | " \n", 242 | " \n", 243 | " \n", 244 | " \n", 245 | " \n", 246 | " \n", 247 | " \n", 248 | " \n", 249 | " \n", 250 | " \n", 251 | " \n", 252 | " \n", 253 | " \n", 254 | " \n", 255 | " \n", 256 | " \n", 257 | " \n", 258 | " \n", 259 | " \n", 260 | " \n", 261 | "
IMAGE PATHJODJOD STDCI LOWCI HIGHCI RANGEQUALITY LEVELCLUSTERTOTAL COMPARISONSIMAGESCENEATTRIBUTESCENE IDXCONDITION
0Exposure\\29_Indoor_Scene_0.jpg-2.9838460.440599-3.868096-2.1589771.7091190-1.097.029_Indoor_Scene_0.jpgIndoor_Scene_0Exposure0Indoor
1Exposure\\0_Indoor_Scene_0.jpg-2.4947590.458133-3.485925-1.6193051.8666200-1.0117.00_Indoor_Scene_0.jpgIndoor_Scene_0Exposure0Indoor
2Exposure\\56_Indoor_Scene_0.jpg-2.6448000.784488-4.347027-1.3324173.0146100-1.0109.056_Indoor_Scene_0.jpgIndoor_Scene_0Exposure0Indoor
3Exposure\\66_Indoor_Scene_0.jpg-2.0832140.485894-2.960870-1.1440541.81681600.0106.066_Indoor_Scene_0.jpgIndoor_Scene_0Exposure0Indoor
4Exposure\\23_Indoor_Scene_0.jpg-2.1070130.592761-3.349612-1.0499692.29964300.0120.023_Indoor_Scene_0.jpgIndoor_Scene_0Exposure0Indoor
.............................................
5111Exposure\\5066_Outdoor_Scene_49.jpg2.9184510.3435952.3217013.6081921.286491520.0107.05066_Outdoor_Scene_49.jpgOutdoor_Scene_49Exposure49Outdoor
5112Exposure\\5000_Outdoor_Scene_49.jpg2.8768330.8060291.2615534.3004123.038859520.097.05000_Outdoor_Scene_49.jpgOutdoor_Scene_49Exposure49Outdoor
5113Exposure\\4999_Outdoor_Scene_49.jpg3.8136260.5357962.5328324.6915822.1587505-1.0105.04999_Outdoor_Scene_49.jpgOutdoor_Scene_49Exposure49Outdoor
5114Exposure\\5114_Outdoor_Scene_49.jpg3.1080671.3125120.0301114.2542054.2240945-1.0110.05114_Outdoor_Scene_49.jpgOutdoor_Scene_49Exposure49Outdoor
5115Exposure\\5004_Outdoor_Scene_49.jpg4.1296991.9946170.0301116.1273636.0972525-1.087.05004_Outdoor_Scene_49.jpgOutdoor_Scene_49Exposure49Outdoor
\n", 262 | "

5116 rows × 14 columns

\n", 263 | "
" 264 | ], 265 | "text/plain": [ 266 | " IMAGE PATH JOD JOD STD CI LOW \\\n", 267 | "0 Exposure\\29_Indoor_Scene_0.jpg -2.983846 0.440599 -3.868096 \n", 268 | "1 Exposure\\0_Indoor_Scene_0.jpg -2.494759 0.458133 -3.485925 \n", 269 | "2 Exposure\\56_Indoor_Scene_0.jpg -2.644800 0.784488 -4.347027 \n", 270 | "3 Exposure\\66_Indoor_Scene_0.jpg -2.083214 0.485894 -2.960870 \n", 271 | "4 Exposure\\23_Indoor_Scene_0.jpg -2.107013 0.592761 -3.349612 \n", 272 | "... ... ... ... ... \n", 273 | "5111 Exposure\\5066_Outdoor_Scene_49.jpg 2.918451 0.343595 2.321701 \n", 274 | "5112 Exposure\\5000_Outdoor_Scene_49.jpg 2.876833 0.806029 1.261553 \n", 275 | "5113 Exposure\\4999_Outdoor_Scene_49.jpg 3.813626 0.535796 2.532832 \n", 276 | "5114 Exposure\\5114_Outdoor_Scene_49.jpg 3.108067 1.312512 0.030111 \n", 277 | "5115 Exposure\\5004_Outdoor_Scene_49.jpg 4.129699 1.994617 0.030111 \n", 278 | "\n", 279 | " CI HIGH CI RANGE QUALITY LEVEL CLUSTER TOTAL COMPARISONS \\\n", 280 | "0 -2.158977 1.709119 0 -1.0 97.0 \n", 281 | "1 -1.619305 1.866620 0 -1.0 117.0 \n", 282 | "2 -1.332417 3.014610 0 -1.0 109.0 \n", 283 | "3 -1.144054 1.816816 0 0.0 106.0 \n", 284 | "4 -1.049969 2.299643 0 0.0 120.0 \n", 285 | "... ... ... ... ... ... \n", 286 | "5111 3.608192 1.286491 5 20.0 107.0 \n", 287 | "5112 4.300412 3.038859 5 20.0 97.0 \n", 288 | "5113 4.691582 2.158750 5 -1.0 105.0 \n", 289 | "5114 4.254205 4.224094 5 -1.0 110.0 \n", 290 | "5115 6.127363 6.097252 5 -1.0 87.0 \n", 291 | "\n", 292 | " IMAGE SCENE ATTRIBUTE SCENE IDX \\\n", 293 | "0 29_Indoor_Scene_0.jpg Indoor_Scene_0 Exposure 0 \n", 294 | "1 0_Indoor_Scene_0.jpg Indoor_Scene_0 Exposure 0 \n", 295 | "2 56_Indoor_Scene_0.jpg Indoor_Scene_0 Exposure 0 \n", 296 | "3 66_Indoor_Scene_0.jpg Indoor_Scene_0 Exposure 0 \n", 297 | "4 23_Indoor_Scene_0.jpg Indoor_Scene_0 Exposure 0 \n", 298 | "... ... ... ... ... \n", 299 | "5111 5066_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n", 300 | "5112 5000_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n", 301 | "5113 4999_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n", 302 | "5114 5114_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n", 303 | "5115 5004_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n", 304 | "\n", 305 | " CONDITION \n", 306 | "0 Indoor \n", 307 | "1 Indoor \n", 308 | "2 Indoor \n", 309 | "3 Indoor \n", 310 | "4 Indoor \n", 311 | "... ... \n", 312 | "5111 Outdoor \n", 313 | "5112 Outdoor \n", 314 | "5113 Outdoor \n", 315 | "5114 Outdoor \n", 316 | "5115 Outdoor \n", 317 | "\n", 318 | "[5116 rows x 14 columns]" 319 | ] 320 | }, 321 | "execution_count": 3, 322 | "metadata": {}, 323 | "output_type": "execute_result" 324 | } 325 | ], 326 | "source": [ 327 | "scoreDf" 328 | ] 329 | }, 330 | { 331 | "cell_type": "markdown", 332 | "id": "ec858eef-e67e-4c67-a0fe-18b0c4c36897", 333 | "metadata": { 334 | "tags": [] 335 | }, 336 | "source": [ 337 | "### Scene Split (Generalization Split)" 338 | ] 339 | }, 340 | { 341 | "cell_type": "code", 342 | "execution_count": 4, 343 | "id": "450ac043-8cf7-4c17-972b-1b2f830e39be", 344 | "metadata": {}, 345 | "outputs": [], 346 | "source": [ 347 | "sceneSplitPath = r\"./Test split/Scene Split.csv\"\n", 348 | "sceneSplitDf = pd.read_csv(sceneSplitPath)" 349 | ] 350 | }, 351 | { 352 | "cell_type": "code", 353 | "execution_count": 5, 354 | "id": "ff6f8bf9-f69b-4350-9211-23a6e9916d1e", 355 | "metadata": { 356 | "scrolled": true, 357 | "tags": [] 358 | }, 359 | "outputs": [ 360 | { 361 | "data": { 362 | "text/html": [ 363 | "
\n", 364 | "\n", 377 | "\n", 378 | " \n", 379 | " \n", 380 | " \n", 381 | " \n", 382 | " \n", 383 | " \n", 384 | " \n", 385 | " \n", 386 | " \n", 387 | " \n", 388 | " \n", 389 | " \n", 390 | " \n", 391 | " \n", 392 | " \n", 393 | " \n", 394 | " \n", 395 | " \n", 396 | " \n", 397 | " \n", 398 | " \n", 399 | " \n", 400 | " \n", 401 | " \n", 402 | " \n", 403 | " \n", 404 | " \n", 405 | " \n", 406 | " \n", 407 | " \n", 408 | " \n", 409 | " \n", 410 | " \n", 411 | " \n", 412 | " \n", 413 | " \n", 414 | " \n", 415 | " \n", 416 | " \n", 417 | " \n", 418 | " \n", 419 | " \n", 420 | " \n", 421 | " \n", 422 | " \n", 423 | " \n", 424 | " \n", 425 | " \n", 426 | " \n", 427 | " \n", 428 | " \n", 429 | " \n", 430 | " \n", 431 | " \n", 432 | " \n", 433 | " \n", 434 | " \n", 435 | " \n", 436 | " \n", 437 | " \n", 438 | " \n", 439 | " \n", 440 | " \n", 441 | " \n", 442 | " \n", 443 | " \n", 444 | " \n", 445 | " \n", 446 | " \n", 447 | " \n", 448 | " \n", 449 | " \n", 450 | " \n", 451 | " \n", 452 | " \n", 453 | " \n", 454 | " \n", 455 | " \n", 456 | " \n", 457 | " \n", 458 | " \n", 459 | " \n", 460 | " \n", 461 | " \n", 462 | " \n", 463 | " \n", 464 | " \n", 465 | " \n", 466 | " \n", 467 | " \n", 468 | " \n", 469 | " \n", 470 | " \n", 471 | " \n", 472 | " \n", 473 | " \n", 474 | " \n", 475 | " \n", 476 | " \n", 477 | " \n", 478 | " \n", 479 | " \n", 480 | " \n", 481 | " \n", 482 | " \n", 483 | " \n", 484 | " \n", 485 | " \n", 486 | " \n", 487 | " \n", 488 | " \n", 489 | " \n", 490 | " \n", 491 | " \n", 492 | " \n", 493 | " \n", 494 | " \n", 495 | " \n", 496 | " \n", 497 | " \n", 498 | " \n", 499 | " \n", 500 | " \n", 501 | " \n", 502 | " \n", 503 | " \n", 504 | " \n", 505 | " \n", 506 | " \n", 507 | " \n", 508 | " \n", 509 | " \n", 510 | " \n", 511 | " \n", 512 | " \n", 513 | " \n", 514 | " \n", 515 | " \n", 516 | " \n", 517 | " \n", 518 | " \n", 519 | " \n", 520 | " \n", 521 | " \n", 522 | " \n", 523 | " \n", 524 | " \n", 525 | " \n", 526 | " \n", 527 | " \n", 528 | " \n", 529 | " \n", 530 | " \n", 531 | " \n", 532 | " \n", 533 | " \n", 534 | " \n", 535 | " \n", 536 | " \n", 537 | " \n", 538 | " \n", 539 | " \n", 540 | " \n", 541 | " \n", 542 | " \n", 543 | " \n", 544 | " \n", 545 | " \n", 546 | " \n", 547 | " \n", 548 | " \n", 549 | " \n", 550 | " \n", 551 | " \n", 552 | " \n", 553 | " \n", 554 | " \n", 555 | " \n", 556 | " \n", 557 | " \n", 558 | " \n", 559 | " \n", 560 | " \n", 561 | " \n", 562 | " \n", 563 | " \n", 564 | " \n", 565 | " \n", 566 | " \n", 567 | " \n", 568 | " \n", 569 | " \n", 570 | " \n", 571 | " \n", 572 | " \n", 573 | " \n", 574 | " \n", 575 | " \n", 576 | " \n", 577 | " \n", 578 | " \n", 579 | " \n", 580 | " \n", 581 | " \n", 582 | " \n", 583 | " \n", 584 | " \n", 585 | " \n", 586 | " \n", 587 | " \n", 588 | " \n", 589 | " \n", 590 | " \n", 591 | " \n", 592 | " \n", 593 | " \n", 594 | " \n", 595 | " \n", 596 | " \n", 597 | " \n", 598 | " \n", 599 | " \n", 600 | " \n", 601 | " \n", 602 | " \n", 603 | " \n", 604 | " \n", 605 | " \n", 606 | " \n", 607 | " \n", 608 | " \n", 609 | " \n", 610 | " \n", 611 | " \n", 612 | " \n", 613 | " \n", 614 | " \n", 615 | " \n", 616 | " \n", 617 | " \n", 618 | " \n", 619 | " \n", 620 | " \n", 621 | " \n", 622 | " \n", 623 | " \n", 624 | " \n", 625 | " \n", 626 | " \n", 627 | " \n", 628 | " \n", 629 | " \n", 630 | " \n", 631 | " \n", 632 | " \n", 633 | " \n", 634 | " \n", 635 | " \n", 636 | " \n", 637 | " \n", 638 | " \n", 639 | " \n", 640 | " \n", 641 | " \n", 642 | " \n", 643 | " \n", 644 | " \n", 645 | " \n", 646 | " \n", 647 | " \n", 648 | " \n", 649 | " \n", 650 | " \n", 651 | " \n", 652 | " \n", 653 | " \n", 654 | " \n", 655 | " \n", 656 | " \n", 657 | " \n", 658 | " \n", 659 | " \n", 660 | " \n", 661 | " \n", 662 | " \n", 663 | " \n", 664 | " \n", 665 | " \n", 666 | " \n", 667 | " \n", 668 | " \n", 669 | " \n", 670 | " \n", 671 | " \n", 672 | " \n", 673 | " \n", 674 | " \n", 675 | " \n", 676 | " \n", 677 | " \n", 678 | " \n", 679 | " \n", 680 | " \n", 681 | " \n", 682 | " \n", 683 | " \n", 684 | " \n", 685 | " \n", 686 | " \n", 687 | " \n", 688 | "
SCENESPLITNB IMAGES
0Indoor_Scene_0Train72
1Indoor_Scene_12Train103
2Indoor_Scene_13Train90
3Indoor_Scene_2Train74
4Indoor_Scene_3Train89
5Indoor_Scene_4Train91
6Indoor_Scene_6Train125
7Indoor_Scene_7Train110
8Indoor_Scene_8Train125
9Indoor_Scene_9Train125
10Lowlight_Scene_14Train96
11Lowlight_Scene_15Train70
12Lowlight_Scene_18Train125
13Lowlight_Scene_19Train125
14Lowlight_Scene_21Train125
15Night_Scene_22Train116
16Night_Scene_23Train117
17Night_Scene_24Train119
18Night_Scene_25Train116
19Night_Scene_26Train115
20Night_Scene_27Train115
21Night_Scene_29Train112
22Night_Scene_32Train100
23Outdoor_Scene_35Train49
24Outdoor_Scene_36Train73
25Outdoor_Scene_37Train95
26Outdoor_Scene_38Train78
27Outdoor_Scene_39Train92
28Outdoor_Scene_40Train125
29Outdoor_Scene_41Train125
30Outdoor_Scene_42Train125
31Outdoor_Scene_44Train125
32Outdoor_Scene_47Train89
33Outdoor_Scene_48Train74
34Outdoor_Scene_49Train125
35Indoor_Scene_10Test125
36Indoor_Scene_11Test94
37Indoor_Scene_1Test124
38Indoor_Scene_5Test50
39Lowlight_Scene_16Test87
40Lowlight_Scene_17Test102
41Lowlight_Scene_20Test125
42Night_Scene_28Test112
43Night_Scene_30Test114
44Night_Scene_31Test112
45Outdoor_Scene_33Test119
46Outdoor_Scene_34Test45
47Outdoor_Scene_43Test125
48Outdoor_Scene_45Test78
49Outdoor_Scene_46Test74
\n", 689 | "
" 690 | ], 691 | "text/plain": [ 692 | " SCENE SPLIT NB IMAGES\n", 693 | "0 Indoor_Scene_0 Train 72\n", 694 | "1 Indoor_Scene_12 Train 103\n", 695 | "2 Indoor_Scene_13 Train 90\n", 696 | "3 Indoor_Scene_2 Train 74\n", 697 | "4 Indoor_Scene_3 Train 89\n", 698 | "5 Indoor_Scene_4 Train 91\n", 699 | "6 Indoor_Scene_6 Train 125\n", 700 | "7 Indoor_Scene_7 Train 110\n", 701 | "8 Indoor_Scene_8 Train 125\n", 702 | "9 Indoor_Scene_9 Train 125\n", 703 | "10 Lowlight_Scene_14 Train 96\n", 704 | "11 Lowlight_Scene_15 Train 70\n", 705 | "12 Lowlight_Scene_18 Train 125\n", 706 | "13 Lowlight_Scene_19 Train 125\n", 707 | "14 Lowlight_Scene_21 Train 125\n", 708 | "15 Night_Scene_22 Train 116\n", 709 | "16 Night_Scene_23 Train 117\n", 710 | "17 Night_Scene_24 Train 119\n", 711 | "18 Night_Scene_25 Train 116\n", 712 | "19 Night_Scene_26 Train 115\n", 713 | "20 Night_Scene_27 Train 115\n", 714 | "21 Night_Scene_29 Train 112\n", 715 | "22 Night_Scene_32 Train 100\n", 716 | "23 Outdoor_Scene_35 Train 49\n", 717 | "24 Outdoor_Scene_36 Train 73\n", 718 | "25 Outdoor_Scene_37 Train 95\n", 719 | "26 Outdoor_Scene_38 Train 78\n", 720 | "27 Outdoor_Scene_39 Train 92\n", 721 | "28 Outdoor_Scene_40 Train 125\n", 722 | "29 Outdoor_Scene_41 Train 125\n", 723 | "30 Outdoor_Scene_42 Train 125\n", 724 | "31 Outdoor_Scene_44 Train 125\n", 725 | "32 Outdoor_Scene_47 Train 89\n", 726 | "33 Outdoor_Scene_48 Train 74\n", 727 | "34 Outdoor_Scene_49 Train 125\n", 728 | "35 Indoor_Scene_10 Test 125\n", 729 | "36 Indoor_Scene_11 Test 94\n", 730 | "37 Indoor_Scene_1 Test 124\n", 731 | "38 Indoor_Scene_5 Test 50\n", 732 | "39 Lowlight_Scene_16 Test 87\n", 733 | "40 Lowlight_Scene_17 Test 102\n", 734 | "41 Lowlight_Scene_20 Test 125\n", 735 | "42 Night_Scene_28 Test 112\n", 736 | "43 Night_Scene_30 Test 114\n", 737 | "44 Night_Scene_31 Test 112\n", 738 | "45 Outdoor_Scene_33 Test 119\n", 739 | "46 Outdoor_Scene_34 Test 45\n", 740 | "47 Outdoor_Scene_43 Test 125\n", 741 | "48 Outdoor_Scene_45 Test 78\n", 742 | "49 Outdoor_Scene_46 Test 74" 743 | ] 744 | }, 745 | "execution_count": 5, 746 | "metadata": {}, 747 | "output_type": "execute_result" 748 | } 749 | ], 750 | "source": [ 751 | "sceneSplitDf" 752 | ] 753 | }, 754 | { 755 | "cell_type": "code", 756 | "execution_count": 6, 757 | "id": "7e5cc5ef-3689-4599-b6e1-46982f9fbde3", 758 | "metadata": {}, 759 | "outputs": [], 760 | "source": [ 761 | "trainDf = pd.merge(scoreDf, sceneSplitDf[sceneSplitDf['SPLIT']=='Train'].drop('NB IMAGES', axis=1), on='SCENE', how='inner')\n", 762 | "testDf = pd.merge(scoreDf, sceneSplitDf[sceneSplitDf['SPLIT']=='Test'].drop('NB IMAGES', axis=1), on='SCENE', how='inner')\n", 763 | "\n", 764 | "trainDf.to_csv(rf'./Test split/Scene Split/SceneSplit_Train_{os.path.basename(scorePath)}', index=False)\n", 765 | "testDf.to_csv(rf'./Test split/Scene Split/SceneSplit_Test_{os.path.basename(scorePath)}', index=False)" 766 | ] 767 | }, 768 | { 769 | "cell_type": "code", 770 | "execution_count": 7, 771 | "id": "b293ec7a-5e4c-414c-9ea0-33b5b56940cf", 772 | "metadata": {}, 773 | "outputs": [ 774 | { 775 | "data": { 776 | "text/html": [ 777 | "
\n", 778 | "\n", 791 | "\n", 792 | " \n", 793 | " \n", 794 | " \n", 795 | " \n", 796 | " \n", 797 | " \n", 798 | " \n", 799 | " \n", 800 | " \n", 801 | " \n", 802 | " \n", 803 | " \n", 804 | " \n", 805 | " \n", 806 | " \n", 807 | " \n", 808 | " \n", 809 | " \n", 810 | " \n", 811 | " \n", 812 | " \n", 813 | " \n", 814 | " \n", 815 | " \n", 816 | " \n", 817 | " \n", 818 | " \n", 819 | " \n", 820 | " \n", 821 | " \n", 822 | " \n", 823 | " \n", 824 | " \n", 825 | " \n", 826 | " \n", 827 | " \n", 828 | " \n", 829 | " \n", 830 | " \n", 831 | " \n", 832 | " \n", 833 | " \n", 834 | " \n", 835 | " \n", 836 | " \n", 837 | " \n", 838 | " \n", 839 | " \n", 840 | " \n", 841 | " \n", 842 | " \n", 843 | " \n", 844 | " \n", 845 | " \n", 846 | " \n", 847 | " \n", 848 | " \n", 849 | " \n", 850 | " \n", 851 | " \n", 852 | " \n", 853 | " \n", 854 | " \n", 855 | " \n", 856 | " \n", 857 | " \n", 858 | " \n", 859 | " \n", 860 | " \n", 861 | " \n", 862 | " \n", 863 | " \n", 864 | " \n", 865 | " \n", 866 | " \n", 867 | " \n", 868 | " \n", 869 | " \n", 870 | " \n", 871 | " \n", 872 | " \n", 873 | " \n", 874 | " \n", 875 | " \n", 876 | " \n", 877 | " \n", 878 | " \n", 879 | " \n", 880 | " \n", 881 | " \n", 882 | " \n", 883 | " \n", 884 | " \n", 885 | " \n", 886 | " \n", 887 | " \n", 888 | " \n", 889 | " \n", 890 | " \n", 891 | " \n", 892 | " \n", 893 | " \n", 894 | " \n", 895 | " \n", 896 | " \n", 897 | " \n", 898 | " \n", 899 | " \n", 900 | " \n", 901 | " \n", 902 | " \n", 903 | " \n", 904 | " \n", 905 | " \n", 906 | " \n", 907 | " \n", 908 | " \n", 909 | " \n", 910 | " \n", 911 | " \n", 912 | " \n", 913 | " \n", 914 | " \n", 915 | " \n", 916 | " \n", 917 | " \n", 918 | " \n", 919 | " \n", 920 | " \n", 921 | " \n", 922 | " \n", 923 | " \n", 924 | " \n", 925 | " \n", 926 | " \n", 927 | " \n", 928 | " \n", 929 | " \n", 930 | " \n", 931 | " \n", 932 | " \n", 933 | " \n", 934 | " \n", 935 | " \n", 936 | " \n", 937 | " \n", 938 | " \n", 939 | " \n", 940 | " \n", 941 | " \n", 942 | " \n", 943 | " \n", 944 | " \n", 945 | " \n", 946 | " \n", 947 | " \n", 948 | " \n", 949 | " \n", 950 | " \n", 951 | " \n", 952 | " \n", 953 | " \n", 954 | " \n", 955 | " \n", 956 | " \n", 957 | " \n", 958 | " \n", 959 | " \n", 960 | " \n", 961 | " \n", 962 | " \n", 963 | " \n", 964 | " \n", 965 | " \n", 966 | " \n", 967 | " \n", 968 | " \n", 969 | " \n", 970 | " \n", 971 | " \n", 972 | " \n", 973 | " \n", 974 | " \n", 975 | " \n", 976 | " \n", 977 | " \n", 978 | " \n", 979 | " \n", 980 | " \n", 981 | " \n", 982 | " \n", 983 | " \n", 984 | " \n", 985 | " \n", 986 | " \n", 987 | " \n", 988 | " \n", 989 | " \n", 990 | " \n", 991 | " \n", 992 | " \n", 993 | " \n", 994 | " \n", 995 | " \n", 996 | " \n", 997 | " \n", 998 | " \n", 999 | " \n", 1000 | " \n", 1001 | " \n", 1002 | " \n", 1003 | " \n", 1004 | " \n", 1005 | " \n", 1006 | " \n", 1007 | " \n", 1008 | " \n", 1009 | " \n", 1010 | " \n", 1011 | " \n", 1012 | "
IMAGE PATHJODJOD STDCI LOWCI HIGHCI RANGEQUALITY LEVELCLUSTERTOTAL COMPARISONSIMAGESCENEATTRIBUTESCENE IDXCONDITIONSPLIT
0Exposure\\122_Indoor_Scene_1.tiff-4.0345030.656547-5.226753-2.7847182.4420340-1.046.0122_Indoor_Scene_1.tiffIndoor_Scene_1Exposure1IndoorTest
1Exposure\\124_Indoor_Scene_1.jpg-3.4343710.467843-4.135082-2.2993171.8357650-1.081.0124_Indoor_Scene_1.jpgIndoor_Scene_1Exposure1IndoorTest
2Exposure\\173_Indoor_Scene_1.jpg-3.0761480.831440-4.195738-1.0805123.11522600.094.0173_Indoor_Scene_1.jpgIndoor_Scene_1Exposure1IndoorTest
3Exposure\\154_Indoor_Scene_1.jpg-3.1389770.810391-4.670127-1.5461383.12398900.0147.0154_Indoor_Scene_1.jpgIndoor_Scene_1Exposure1IndoorTest
4Exposure\\101_Indoor_Scene_1.jpg-2.7465190.785913-4.537797-1.4770813.0607170-1.072.0101_Indoor_Scene_1.jpgIndoor_Scene_1Exposure1IndoorTest
................................................
1481Exposure\\4805_Outdoor_Scene_46.jpg2.3190560.6805090.9572903.6895322.7322423-1.090.04805_Outdoor_Scene_46.jpgOutdoor_Scene_46Exposure46OutdoorTest
1482Exposure\\4814_Outdoor_Scene_46.jpeg2.4585510.6786250.8972383.6463422.7491053-1.0100.04814_Outdoor_Scene_46.jpegOutdoor_Scene_46Exposure46OutdoorTest
1483Exposure\\4804_Outdoor_Scene_46.jpg2.5820520.6576481.4980823.9105052.4124233-1.0121.04804_Outdoor_Scene_46.jpgOutdoor_Scene_46Exposure46OutdoorTest
1484Exposure\\4774_Outdoor_Scene_46.jpg4.0084460.5620612.9534705.1578532.2043834-1.035.04774_Outdoor_Scene_46.jpgOutdoor_Scene_46Exposure46OutdoorTest
1485Exposure\\4759_Outdoor_Scene_46.jpg4.1953450.6592812.9259625.5173922.5914304-1.034.04759_Outdoor_Scene_46.jpgOutdoor_Scene_46Exposure46OutdoorTest
\n", 1013 | "

1486 rows × 15 columns

\n", 1014 | "
" 1015 | ], 1016 | "text/plain": [ 1017 | " IMAGE PATH JOD JOD STD CI LOW \\\n", 1018 | "0 Exposure\\122_Indoor_Scene_1.tiff -4.034503 0.656547 -5.226753 \n", 1019 | "1 Exposure\\124_Indoor_Scene_1.jpg -3.434371 0.467843 -4.135082 \n", 1020 | "2 Exposure\\173_Indoor_Scene_1.jpg -3.076148 0.831440 -4.195738 \n", 1021 | "3 Exposure\\154_Indoor_Scene_1.jpg -3.138977 0.810391 -4.670127 \n", 1022 | "4 Exposure\\101_Indoor_Scene_1.jpg -2.746519 0.785913 -4.537797 \n", 1023 | "... ... ... ... ... \n", 1024 | "1481 Exposure\\4805_Outdoor_Scene_46.jpg 2.319056 0.680509 0.957290 \n", 1025 | "1482 Exposure\\4814_Outdoor_Scene_46.jpeg 2.458551 0.678625 0.897238 \n", 1026 | "1483 Exposure\\4804_Outdoor_Scene_46.jpg 2.582052 0.657648 1.498082 \n", 1027 | "1484 Exposure\\4774_Outdoor_Scene_46.jpg 4.008446 0.562061 2.953470 \n", 1028 | "1485 Exposure\\4759_Outdoor_Scene_46.jpg 4.195345 0.659281 2.925962 \n", 1029 | "\n", 1030 | " CI HIGH CI RANGE QUALITY LEVEL CLUSTER TOTAL COMPARISONS \\\n", 1031 | "0 -2.784718 2.442034 0 -1.0 46.0 \n", 1032 | "1 -2.299317 1.835765 0 -1.0 81.0 \n", 1033 | "2 -1.080512 3.115226 0 0.0 94.0 \n", 1034 | "3 -1.546138 3.123989 0 0.0 147.0 \n", 1035 | "4 -1.477081 3.060717 0 -1.0 72.0 \n", 1036 | "... ... ... ... ... ... \n", 1037 | "1481 3.689532 2.732242 3 -1.0 90.0 \n", 1038 | "1482 3.646342 2.749105 3 -1.0 100.0 \n", 1039 | "1483 3.910505 2.412423 3 -1.0 121.0 \n", 1040 | "1484 5.157853 2.204383 4 -1.0 35.0 \n", 1041 | "1485 5.517392 2.591430 4 -1.0 34.0 \n", 1042 | "\n", 1043 | " IMAGE SCENE ATTRIBUTE SCENE IDX \\\n", 1044 | "0 122_Indoor_Scene_1.tiff Indoor_Scene_1 Exposure 1 \n", 1045 | "1 124_Indoor_Scene_1.jpg Indoor_Scene_1 Exposure 1 \n", 1046 | "2 173_Indoor_Scene_1.jpg Indoor_Scene_1 Exposure 1 \n", 1047 | "3 154_Indoor_Scene_1.jpg Indoor_Scene_1 Exposure 1 \n", 1048 | "4 101_Indoor_Scene_1.jpg Indoor_Scene_1 Exposure 1 \n", 1049 | "... ... ... ... ... \n", 1050 | "1481 4805_Outdoor_Scene_46.jpg Outdoor_Scene_46 Exposure 46 \n", 1051 | "1482 4814_Outdoor_Scene_46.jpeg Outdoor_Scene_46 Exposure 46 \n", 1052 | "1483 4804_Outdoor_Scene_46.jpg Outdoor_Scene_46 Exposure 46 \n", 1053 | "1484 4774_Outdoor_Scene_46.jpg Outdoor_Scene_46 Exposure 46 \n", 1054 | "1485 4759_Outdoor_Scene_46.jpg Outdoor_Scene_46 Exposure 46 \n", 1055 | "\n", 1056 | " CONDITION SPLIT \n", 1057 | "0 Indoor Test \n", 1058 | "1 Indoor Test \n", 1059 | "2 Indoor Test \n", 1060 | "3 Indoor Test \n", 1061 | "4 Indoor Test \n", 1062 | "... ... ... \n", 1063 | "1481 Outdoor Test \n", 1064 | "1482 Outdoor Test \n", 1065 | "1483 Outdoor Test \n", 1066 | "1484 Outdoor Test \n", 1067 | "1485 Outdoor Test \n", 1068 | "\n", 1069 | "[1486 rows x 15 columns]" 1070 | ] 1071 | }, 1072 | "execution_count": 7, 1073 | "metadata": {}, 1074 | "output_type": "execute_result" 1075 | } 1076 | ], 1077 | "source": [ 1078 | "testDf" 1079 | ] 1080 | }, 1081 | { 1082 | "cell_type": "markdown", 1083 | "id": "75abbef3-8d52-48b0-ae24-9789e39ffd9e", 1084 | "metadata": {}, 1085 | "source": [ 1086 | "### Device split" 1087 | ] 1088 | }, 1089 | { 1090 | "cell_type": "code", 1091 | "execution_count": 8, 1092 | "id": "08851a87-e441-4aa4-b214-838c5463609d", 1093 | "metadata": {}, 1094 | "outputs": [], 1095 | "source": [ 1096 | "deviceSplitPath = r\"./Test split/Device Split.csv\"\n", 1097 | "deviceSplitDf = pd.read_csv(deviceSplitPath)" 1098 | ] 1099 | }, 1100 | { 1101 | "cell_type": "code", 1102 | "execution_count": 9, 1103 | "id": "e26117fc-e171-488b-961c-fe3f966cdb46", 1104 | "metadata": {}, 1105 | "outputs": [ 1106 | { 1107 | "data": { 1108 | "text/html": [ 1109 | "
\n", 1110 | "\n", 1123 | "\n", 1124 | " \n", 1125 | " \n", 1126 | " \n", 1127 | " \n", 1128 | " \n", 1129 | " \n", 1130 | " \n", 1131 | " \n", 1132 | " \n", 1133 | " \n", 1134 | " \n", 1135 | " \n", 1136 | " \n", 1137 | " \n", 1138 | " \n", 1139 | " \n", 1140 | " \n", 1141 | " \n", 1142 | " \n", 1143 | " \n", 1144 | " \n", 1145 | " \n", 1146 | " \n", 1147 | " \n", 1148 | " \n", 1149 | " \n", 1150 | " \n", 1151 | " \n", 1152 | " \n", 1153 | " \n", 1154 | " \n", 1155 | " \n", 1156 | " \n", 1157 | " \n", 1158 | " \n", 1159 | " \n", 1160 | " \n", 1161 | " \n", 1162 | " \n", 1163 | " \n", 1164 | " \n", 1165 | " \n", 1166 | " \n", 1167 | " \n", 1168 | " \n", 1169 | " \n", 1170 | " \n", 1171 | " \n", 1172 | " \n", 1173 | " \n", 1174 | " \n", 1175 | " \n", 1176 | " \n", 1177 | " \n", 1178 | " \n", 1179 | " \n", 1180 | " \n", 1181 | " \n", 1182 | " \n", 1183 | " \n", 1184 | " \n", 1185 | " \n", 1186 | " \n", 1187 | " \n", 1188 | "
IMAGESPLIT
00_Indoor_Scene_0.jpgTrain
11_Indoor_Scene_0.jpgTest
22_Indoor_Scene_0.JPGTrain
33_Indoor_Scene_0.JPGTrain
44_Indoor_Scene_0.JPGTrain
.........
51115111_Outdoor_Scene_49.jpgTrain
51125112_Outdoor_Scene_49.jpgTrain
51135113_Outdoor_Scene_49.jpgTrain
51145114_Outdoor_Scene_49.jpgTrain
51155115_Outdoor_Scene_49.jpgTrain
\n", 1189 | "

5116 rows × 2 columns

\n", 1190 | "
" 1191 | ], 1192 | "text/plain": [ 1193 | " IMAGE SPLIT\n", 1194 | "0 0_Indoor_Scene_0.jpg Train\n", 1195 | "1 1_Indoor_Scene_0.jpg Test\n", 1196 | "2 2_Indoor_Scene_0.JPG Train\n", 1197 | "3 3_Indoor_Scene_0.JPG Train\n", 1198 | "4 4_Indoor_Scene_0.JPG Train\n", 1199 | "... ... ...\n", 1200 | "5111 5111_Outdoor_Scene_49.jpg Train\n", 1201 | "5112 5112_Outdoor_Scene_49.jpg Train\n", 1202 | "5113 5113_Outdoor_Scene_49.jpg Train\n", 1203 | "5114 5114_Outdoor_Scene_49.jpg Train\n", 1204 | "5115 5115_Outdoor_Scene_49.jpg Train\n", 1205 | "\n", 1206 | "[5116 rows x 2 columns]" 1207 | ] 1208 | }, 1209 | "execution_count": 9, 1210 | "metadata": {}, 1211 | "output_type": "execute_result" 1212 | } 1213 | ], 1214 | "source": [ 1215 | "deviceSplitDf" 1216 | ] 1217 | }, 1218 | { 1219 | "cell_type": "code", 1220 | "execution_count": 10, 1221 | "id": "8d4c57a5-eab8-44bd-867f-65beb573dc23", 1222 | "metadata": {}, 1223 | "outputs": [], 1224 | "source": [ 1225 | "trainDf = pd.merge(scoreDf, deviceSplitDf[imageSplitDf['SPLIT']=='Train'], on='IMAGE', how='inner')\n", 1226 | "testDf = pd.merge(scoreDf, deviceSplitDf[imageSplitDf['SPLIT']=='Test'], on='IMAGE', how='inner')\n", 1227 | "\n", 1228 | "trainDf.to_csv(rf'./Test split/Device Split/DeviceSplit_Train_{os.path.basename(scorePath)}', index=False)\n", 1229 | "testDf.to_csv(rf'./Test split/Device Split/DeviceSplit_Test_{os.path.basename(scorePath)}', index=False)" 1230 | ] 1231 | }, 1232 | { 1233 | "cell_type": "code", 1234 | "execution_count": 11, 1235 | "id": "5d4a79fb-a312-4eff-a8fb-2f6f51a9d307", 1236 | "metadata": {}, 1237 | "outputs": [ 1238 | { 1239 | "data": { 1240 | "text/html": [ 1241 | "
\n", 1242 | "\n", 1255 | "\n", 1256 | " \n", 1257 | " \n", 1258 | " \n", 1259 | " \n", 1260 | " \n", 1261 | " \n", 1262 | " \n", 1263 | " \n", 1264 | " \n", 1265 | " \n", 1266 | " \n", 1267 | " \n", 1268 | " \n", 1269 | " \n", 1270 | " \n", 1271 | " \n", 1272 | " \n", 1273 | " \n", 1274 | " \n", 1275 | " \n", 1276 | " \n", 1277 | " \n", 1278 | " \n", 1279 | " \n", 1280 | " \n", 1281 | " \n", 1282 | " \n", 1283 | " \n", 1284 | " \n", 1285 | " \n", 1286 | " \n", 1287 | " \n", 1288 | " \n", 1289 | " \n", 1290 | " \n", 1291 | " \n", 1292 | " \n", 1293 | " \n", 1294 | " \n", 1295 | " \n", 1296 | " \n", 1297 | " \n", 1298 | " \n", 1299 | " \n", 1300 | " \n", 1301 | " \n", 1302 | " \n", 1303 | " \n", 1304 | " \n", 1305 | " \n", 1306 | " \n", 1307 | " \n", 1308 | " \n", 1309 | " \n", 1310 | " \n", 1311 | " \n", 1312 | " \n", 1313 | " \n", 1314 | " \n", 1315 | " \n", 1316 | " \n", 1317 | " \n", 1318 | " \n", 1319 | " \n", 1320 | " \n", 1321 | " \n", 1322 | " \n", 1323 | " \n", 1324 | " \n", 1325 | " \n", 1326 | " \n", 1327 | " \n", 1328 | " \n", 1329 | " \n", 1330 | " \n", 1331 | " \n", 1332 | " \n", 1333 | " \n", 1334 | " \n", 1335 | " \n", 1336 | " \n", 1337 | " \n", 1338 | " \n", 1339 | " \n", 1340 | " \n", 1341 | " \n", 1342 | " \n", 1343 | " \n", 1344 | " \n", 1345 | " \n", 1346 | " \n", 1347 | " \n", 1348 | " \n", 1349 | " \n", 1350 | " \n", 1351 | " \n", 1352 | " \n", 1353 | " \n", 1354 | " \n", 1355 | " \n", 1356 | " \n", 1357 | " \n", 1358 | " \n", 1359 | " \n", 1360 | " \n", 1361 | " \n", 1362 | " \n", 1363 | " \n", 1364 | " \n", 1365 | " \n", 1366 | " \n", 1367 | " \n", 1368 | " \n", 1369 | " \n", 1370 | " \n", 1371 | " \n", 1372 | " \n", 1373 | " \n", 1374 | " \n", 1375 | " \n", 1376 | " \n", 1377 | " \n", 1378 | " \n", 1379 | " \n", 1380 | " \n", 1381 | " \n", 1382 | " \n", 1383 | " \n", 1384 | " \n", 1385 | " \n", 1386 | " \n", 1387 | " \n", 1388 | " \n", 1389 | " \n", 1390 | " \n", 1391 | " \n", 1392 | " \n", 1393 | " \n", 1394 | " \n", 1395 | " \n", 1396 | " \n", 1397 | " \n", 1398 | " \n", 1399 | " \n", 1400 | " \n", 1401 | " \n", 1402 | " \n", 1403 | " \n", 1404 | " \n", 1405 | " \n", 1406 | " \n", 1407 | " \n", 1408 | " \n", 1409 | " \n", 1410 | " \n", 1411 | " \n", 1412 | " \n", 1413 | " \n", 1414 | " \n", 1415 | " \n", 1416 | " \n", 1417 | " \n", 1418 | " \n", 1419 | " \n", 1420 | " \n", 1421 | " \n", 1422 | " \n", 1423 | " \n", 1424 | " \n", 1425 | " \n", 1426 | " \n", 1427 | " \n", 1428 | " \n", 1429 | " \n", 1430 | " \n", 1431 | " \n", 1432 | " \n", 1433 | " \n", 1434 | " \n", 1435 | " \n", 1436 | " \n", 1437 | " \n", 1438 | " \n", 1439 | " \n", 1440 | " \n", 1441 | " \n", 1442 | " \n", 1443 | " \n", 1444 | " \n", 1445 | " \n", 1446 | " \n", 1447 | " \n", 1448 | " \n", 1449 | " \n", 1450 | " \n", 1451 | " \n", 1452 | " \n", 1453 | " \n", 1454 | " \n", 1455 | " \n", 1456 | " \n", 1457 | " \n", 1458 | " \n", 1459 | " \n", 1460 | " \n", 1461 | " \n", 1462 | " \n", 1463 | " \n", 1464 | " \n", 1465 | " \n", 1466 | " \n", 1467 | " \n", 1468 | " \n", 1469 | " \n", 1470 | " \n", 1471 | " \n", 1472 | " \n", 1473 | " \n", 1474 | " \n", 1475 | " \n", 1476 | "
IMAGE PATHJODJOD STDCI LOWCI HIGHCI RANGEQUALITY LEVELCLUSTERTOTAL COMPARISONSIMAGESCENEATTRIBUTESCENE IDXCONDITIONSPLIT
0Exposure\\56_Indoor_Scene_0.jpg-2.6448000.784488-4.347027-1.3324173.0146100-1.0109.056_Indoor_Scene_0.jpgIndoor_Scene_0Exposure0IndoorTest
1Exposure\\66_Indoor_Scene_0.jpg-2.0832140.485894-2.960870-1.1440541.81681600.0106.066_Indoor_Scene_0.jpgIndoor_Scene_0Exposure0IndoorTest
2Exposure\\31_Indoor_Scene_0.tiff-1.8506200.411542-2.626136-0.9755101.6506261-1.0118.031_Indoor_Scene_0.tiffIndoor_Scene_0Exposure0IndoorTest
3Exposure\\33_Indoor_Scene_0.jpg-1.6414100.518849-2.717875-0.6256682.09220711.0120.033_Indoor_Scene_0.jpgIndoor_Scene_0Exposure0IndoorTest
4Exposure\\36_Indoor_Scene_0.jpeg-0.5809680.450048-1.5391210.2556111.79473225.0106.036_Indoor_Scene_0.jpegIndoor_Scene_0Exposure0IndoorTest
................................................
1507Exposure\\5100_Outdoor_Scene_49.jpg1.6616560.3873370.9540932.5482171.5941244-1.0109.05100_Outdoor_Scene_49.jpgOutdoor_Scene_49Exposure49OutdoorTest
1508Exposure\\5069_Outdoor_Scene_49.jpg2.0002290.4023521.1944272.7298011.5353744-1.0109.05069_Outdoor_Scene_49.jpgOutdoor_Scene_49Exposure49OutdoorTest
1509Exposure\\5066_Outdoor_Scene_49.jpg2.9184510.3435952.3217013.6081921.286491520.0107.05066_Outdoor_Scene_49.jpgOutdoor_Scene_49Exposure49OutdoorTest
1510Exposure\\4999_Outdoor_Scene_49.jpg3.8136260.5357962.5328324.6915822.1587505-1.0105.04999_Outdoor_Scene_49.jpgOutdoor_Scene_49Exposure49OutdoorTest
1511Exposure\\5004_Outdoor_Scene_49.jpg4.1296991.9946170.0301116.1273636.0972525-1.087.05004_Outdoor_Scene_49.jpgOutdoor_Scene_49Exposure49OutdoorTest
\n", 1477 | "

1512 rows × 15 columns

\n", 1478 | "
" 1479 | ], 1480 | "text/plain": [ 1481 | " IMAGE PATH JOD JOD STD CI LOW \\\n", 1482 | "0 Exposure\\56_Indoor_Scene_0.jpg -2.644800 0.784488 -4.347027 \n", 1483 | "1 Exposure\\66_Indoor_Scene_0.jpg -2.083214 0.485894 -2.960870 \n", 1484 | "2 Exposure\\31_Indoor_Scene_0.tiff -1.850620 0.411542 -2.626136 \n", 1485 | "3 Exposure\\33_Indoor_Scene_0.jpg -1.641410 0.518849 -2.717875 \n", 1486 | "4 Exposure\\36_Indoor_Scene_0.jpeg -0.580968 0.450048 -1.539121 \n", 1487 | "... ... ... ... ... \n", 1488 | "1507 Exposure\\5100_Outdoor_Scene_49.jpg 1.661656 0.387337 0.954093 \n", 1489 | "1508 Exposure\\5069_Outdoor_Scene_49.jpg 2.000229 0.402352 1.194427 \n", 1490 | "1509 Exposure\\5066_Outdoor_Scene_49.jpg 2.918451 0.343595 2.321701 \n", 1491 | "1510 Exposure\\4999_Outdoor_Scene_49.jpg 3.813626 0.535796 2.532832 \n", 1492 | "1511 Exposure\\5004_Outdoor_Scene_49.jpg 4.129699 1.994617 0.030111 \n", 1493 | "\n", 1494 | " CI HIGH CI RANGE QUALITY LEVEL CLUSTER TOTAL COMPARISONS \\\n", 1495 | "0 -1.332417 3.014610 0 -1.0 109.0 \n", 1496 | "1 -1.144054 1.816816 0 0.0 106.0 \n", 1497 | "2 -0.975510 1.650626 1 -1.0 118.0 \n", 1498 | "3 -0.625668 2.092207 1 1.0 120.0 \n", 1499 | "4 0.255611 1.794732 2 5.0 106.0 \n", 1500 | "... ... ... ... ... ... \n", 1501 | "1507 2.548217 1.594124 4 -1.0 109.0 \n", 1502 | "1508 2.729801 1.535374 4 -1.0 109.0 \n", 1503 | "1509 3.608192 1.286491 5 20.0 107.0 \n", 1504 | "1510 4.691582 2.158750 5 -1.0 105.0 \n", 1505 | "1511 6.127363 6.097252 5 -1.0 87.0 \n", 1506 | "\n", 1507 | " IMAGE SCENE ATTRIBUTE SCENE IDX \\\n", 1508 | "0 56_Indoor_Scene_0.jpg Indoor_Scene_0 Exposure 0 \n", 1509 | "1 66_Indoor_Scene_0.jpg Indoor_Scene_0 Exposure 0 \n", 1510 | "2 31_Indoor_Scene_0.tiff Indoor_Scene_0 Exposure 0 \n", 1511 | "3 33_Indoor_Scene_0.jpg Indoor_Scene_0 Exposure 0 \n", 1512 | "4 36_Indoor_Scene_0.jpeg Indoor_Scene_0 Exposure 0 \n", 1513 | "... ... ... ... ... \n", 1514 | "1507 5100_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n", 1515 | "1508 5069_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n", 1516 | "1509 5066_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n", 1517 | "1510 4999_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n", 1518 | "1511 5004_Outdoor_Scene_49.jpg Outdoor_Scene_49 Exposure 49 \n", 1519 | "\n", 1520 | " CONDITION SPLIT \n", 1521 | "0 Indoor Test \n", 1522 | "1 Indoor Test \n", 1523 | "2 Indoor Test \n", 1524 | "3 Indoor Test \n", 1525 | "4 Indoor Test \n", 1526 | "... ... ... \n", 1527 | "1507 Outdoor Test \n", 1528 | "1508 Outdoor Test \n", 1529 | "1509 Outdoor Test \n", 1530 | "1510 Outdoor Test \n", 1531 | "1511 Outdoor Test \n", 1532 | "\n", 1533 | "[1512 rows x 15 columns]" 1534 | ] 1535 | }, 1536 | "execution_count": 11, 1537 | "metadata": {}, 1538 | "output_type": "execute_result" 1539 | } 1540 | ], 1541 | "source": [ 1542 | "testDf" 1543 | ] 1544 | }, 1545 | { 1546 | "cell_type": "code", 1547 | "execution_count": null, 1548 | "id": "aa0804cd-feec-49e8-a949-fa776389b8d4", 1549 | "metadata": {}, 1550 | "outputs": [], 1551 | "source": [] 1552 | } 1553 | ], 1554 | "metadata": { 1555 | "kernelspec": { 1556 | "display_name": "Python 3", 1557 | "language": "python", 1558 | "name": "python3" 1559 | }, 1560 | "language_info": { 1561 | "codemirror_mode": { 1562 | "name": "ipython", 1563 | "version": 3 1564 | }, 1565 | "file_extension": ".py", 1566 | "mimetype": "text/x-python", 1567 | "name": "python", 1568 | "nbconvert_exporter": "python", 1569 | "pygments_lexer": "ipython3", 1570 | "version": "3.8.8" 1571 | } 1572 | }, 1573 | "nbformat": 4, 1574 | "nbformat_minor": 5 1575 | } 1576 | --------------------------------------------------------------------------------