├── utils ├── __init__.py ├── torch_utils.py └── general_utils.py ├── pipeline ├── __init__.py ├── tone_curve.mat ├── LICENSE ├── exif_data_formats.py ├── raw_utils.py ├── cct_utils.py ├── dng_opcode.py ├── exif_utils.py └── pipeline.py ├── docs ├── illum_est_nus.pptx ├── figures │ └── graphics2raw_pipeline.png ├── illum_est.md ├── upi.md ├── raw_denoising.md └── neural_isp.md ├── .gitignore ├── data_generation ├── night_dict_v2.mat ├── unprocess.py ├── invert_synthia_upi.py ├── package_clean_nighttime_raw_to_dng.py ├── mask_color_chart_resize_save.m ├── package_exr_to_dng_upi.py ├── invert_synthia_graphics2raw.py └── package_exr_to_dng_graphics2raw.py ├── assets ├── container_dngs │ ├── NUS_CST_mats.p │ ├── NUS_S20FE_CST_mats.p │ ├── container_dng_S20_FE_main_rectilinear_OFF_noise_OFF.dng │ └── container_dng_S20_FE_main_rectilinear_OFF_gain_OFF_noise_OFF_cam_calib_OFF.dng └── split_files │ ├── day2night_k_fold_indices.p │ ├── graphics2raw_train_val_list.p │ └── illum_est │ ├── synthia_train_val_list.p │ ├── NikonD40_train_valid_test_split_fns.p │ ├── NikonD40_train_valid_test_split_idx.p │ ├── SonyA57_train_valid_test_split_fns.p │ ├── SonyA57_train_valid_test_split_idx.p │ ├── Canon600D_train_valid_test_split_fns.p │ ├── Canon600D_train_valid_test_split_idx.p │ ├── FujifilmXM1_train_valid_test_split_fns.p │ ├── FujifilmXM1_train_valid_test_split_idx.p │ ├── NikonD5200_train_valid_test_split_fns.p │ ├── NikonD5200_train_valid_test_split_idx.p │ ├── OlympusEPL6_train_valid_test_split_fns.p │ ├── OlympusEPL6_train_valid_test_split_idx.p │ ├── Canon1DsMkIII_train_valid_test_split_fns.p │ ├── Canon1DsMkIII_train_valid_test_split_idx.p │ ├── PanasonicGX1_train_valid_test_split_fns.p │ ├── PanasonicGX1_train_valid_test_split_idx.p │ ├── SamsungNX2000_train_valid_test_split_fns.p │ └── SamsungNX2000_train_valid_test_split_idx.p ├── noise_profiler ├── h-gauss-s20-v1 │ ├── model_params.npy │ ├── iso2b1_interp_splines.npy │ └── iso2b2_interp_splines.npy ├── spatial_correlation │ └── utils.py └── img_utils.py ├── model_archs ├── restormer.py ├── cnn.py └── unet.py ├── requirements.txt ├── jobs ├── job_utils.py ├── generate_dataset_isp_denoise_upi.py ├── generate_dataset_isp_denoise_graphics2raw.py ├── denoise_upi_iso1600.py ├── denoise_upi_iso3200.py ├── generate_dataset_illum_est_upi.py ├── denoise_graphics2raw_iso1600.py ├── denoise_real_iso1600.py ├── neural_isp_upi.py ├── illum_est_upi.py ├── generate_dataset_illum_est_graphics2raw.py ├── neural_isp_real.py ├── neural_isp_graphics2raw.py ├── illum_est.py ├── denoise_graphics2raw_iso3200.py └── denoise_real_iso3200.py ├── data_preparation ├── initial_data_prep_tif2png.py ├── initial_data_prep_denoise.py ├── dataset_denoise.py ├── initial_data_prep_neural_isp.py ├── dataset_illum_est.py ├── dataset_neural_isp.py ├── data_generator_illum_est.py ├── k_fold_split_data.py ├── data_generator_neural_isp.py └── data_generator_denoise.py ├── README.md ├── test_illum_est.py └── test_denoise.py /utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pipeline/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/illum_est_nus.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/docs/illum_est_nus.pptx -------------------------------------------------------------------------------- /pipeline/tone_curve.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/pipeline/tone_curve.mat -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | real_dataset* 2 | graphics_dataset 3 | models 4 | results 5 | tensorboard 6 | __pycache__/ 7 | *.pyc 8 | .idea 9 | -------------------------------------------------------------------------------- /data_generation/night_dict_v2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/data_generation/night_dict_v2.mat -------------------------------------------------------------------------------- /assets/container_dngs/NUS_CST_mats.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/container_dngs/NUS_CST_mats.p -------------------------------------------------------------------------------- /docs/figures/graphics2raw_pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/docs/figures/graphics2raw_pipeline.png -------------------------------------------------------------------------------- /assets/container_dngs/NUS_S20FE_CST_mats.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/container_dngs/NUS_S20FE_CST_mats.p -------------------------------------------------------------------------------- /assets/split_files/day2night_k_fold_indices.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/day2night_k_fold_indices.p -------------------------------------------------------------------------------- /noise_profiler/h-gauss-s20-v1/model_params.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/noise_profiler/h-gauss-s20-v1/model_params.npy -------------------------------------------------------------------------------- /assets/split_files/graphics2raw_train_val_list.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/graphics2raw_train_val_list.p -------------------------------------------------------------------------------- /assets/split_files/illum_est/synthia_train_val_list.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/synthia_train_val_list.p -------------------------------------------------------------------------------- /noise_profiler/h-gauss-s20-v1/iso2b1_interp_splines.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/noise_profiler/h-gauss-s20-v1/iso2b1_interp_splines.npy -------------------------------------------------------------------------------- /noise_profiler/h-gauss-s20-v1/iso2b2_interp_splines.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/noise_profiler/h-gauss-s20-v1/iso2b2_interp_splines.npy -------------------------------------------------------------------------------- /assets/split_files/illum_est/NikonD40_train_valid_test_split_fns.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/NikonD40_train_valid_test_split_fns.p -------------------------------------------------------------------------------- /assets/split_files/illum_est/NikonD40_train_valid_test_split_idx.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/NikonD40_train_valid_test_split_idx.p -------------------------------------------------------------------------------- /assets/split_files/illum_est/SonyA57_train_valid_test_split_fns.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/SonyA57_train_valid_test_split_fns.p -------------------------------------------------------------------------------- /assets/split_files/illum_est/SonyA57_train_valid_test_split_idx.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/SonyA57_train_valid_test_split_idx.p -------------------------------------------------------------------------------- /assets/split_files/illum_est/Canon600D_train_valid_test_split_fns.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/Canon600D_train_valid_test_split_fns.p -------------------------------------------------------------------------------- /assets/split_files/illum_est/Canon600D_train_valid_test_split_idx.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/Canon600D_train_valid_test_split_idx.p -------------------------------------------------------------------------------- /assets/split_files/illum_est/FujifilmXM1_train_valid_test_split_fns.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/FujifilmXM1_train_valid_test_split_fns.p -------------------------------------------------------------------------------- /assets/split_files/illum_est/FujifilmXM1_train_valid_test_split_idx.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/FujifilmXM1_train_valid_test_split_idx.p -------------------------------------------------------------------------------- /assets/split_files/illum_est/NikonD5200_train_valid_test_split_fns.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/NikonD5200_train_valid_test_split_fns.p -------------------------------------------------------------------------------- /assets/split_files/illum_est/NikonD5200_train_valid_test_split_idx.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/NikonD5200_train_valid_test_split_idx.p -------------------------------------------------------------------------------- /assets/split_files/illum_est/OlympusEPL6_train_valid_test_split_fns.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/OlympusEPL6_train_valid_test_split_fns.p -------------------------------------------------------------------------------- /assets/split_files/illum_est/OlympusEPL6_train_valid_test_split_idx.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/OlympusEPL6_train_valid_test_split_idx.p -------------------------------------------------------------------------------- /assets/split_files/illum_est/Canon1DsMkIII_train_valid_test_split_fns.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/Canon1DsMkIII_train_valid_test_split_fns.p -------------------------------------------------------------------------------- /assets/split_files/illum_est/Canon1DsMkIII_train_valid_test_split_idx.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/Canon1DsMkIII_train_valid_test_split_idx.p -------------------------------------------------------------------------------- /assets/split_files/illum_est/PanasonicGX1_train_valid_test_split_fns.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/PanasonicGX1_train_valid_test_split_fns.p -------------------------------------------------------------------------------- /assets/split_files/illum_est/PanasonicGX1_train_valid_test_split_idx.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/PanasonicGX1_train_valid_test_split_idx.p -------------------------------------------------------------------------------- /assets/split_files/illum_est/SamsungNX2000_train_valid_test_split_fns.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/SamsungNX2000_train_valid_test_split_fns.p -------------------------------------------------------------------------------- /assets/split_files/illum_est/SamsungNX2000_train_valid_test_split_idx.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/split_files/illum_est/SamsungNX2000_train_valid_test_split_idx.p -------------------------------------------------------------------------------- /assets/container_dngs/container_dng_S20_FE_main_rectilinear_OFF_noise_OFF.dng: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/container_dngs/container_dng_S20_FE_main_rectilinear_OFF_noise_OFF.dng -------------------------------------------------------------------------------- /assets/container_dngs/container_dng_S20_FE_main_rectilinear_OFF_gain_OFF_noise_OFF_cam_calib_OFF.dng: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamsungLabs/graphics2raw/HEAD/assets/container_dngs/container_dng_S20_FE_main_rectilinear_OFF_gain_OFF_noise_OFF_cam_calib_OFF.dng -------------------------------------------------------------------------------- /data_generation/unprocess.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copy over the code from https://github.com/timothybrooks/unprocessing/blob/master/unprocess.py 3 | and make modifications according to docs/upi.md 4 | """ 5 | 6 | import tensorflow as tf 7 | import pickle 8 | 9 | 10 | def unprocess(image): 11 | pass -------------------------------------------------------------------------------- /model_archs/restormer.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copy over the code from https://github.com/swz30/Restormer/blob/main/basicsr/models/archs/restormer_arch.py as is 3 | """ 4 | 5 | import torch.nn as nn 6 | 7 | 8 | class Restormer(nn.Module): 9 | def __init__(self): 10 | super(Restormer, self).__init__() 11 | pass 12 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.21.6 2 | torch==1.10.0+cu113 3 | torchvision==0.11.1+cu113 4 | opencv-python==4.5.2.52 5 | scipy==1.7.3 6 | exifread==3.0.0 7 | rawpy==0.18.1 8 | colour_demosaicing==0.1.6 9 | matplotlib==3.5.3 10 | tensorboard==2.11.2 11 | scikit-image==0.15.0 12 | setuptools==59.5.0 13 | scikit-learn==1.0.2 14 | 15 | # for Restormer 16 | einops==0.6.1 17 | # for UPI; other versions may work 18 | tensorflow==2.11.0 -------------------------------------------------------------------------------- /pipeline/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) [2019] [Abdelrahman Abdelhamed] 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /jobs/job_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Luxi Zhao (lucy.zhao@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | import os 18 | 19 | 20 | def prep_environment(): 21 | os.system('pip install -r requirements.txt') 22 | 23 | 24 | def get_day2night_data(): 25 | # copy subdirectories in night_real into ./real_dataset 26 | assert os.path.exists('real_dataset') 27 | assert os.path.exists('real_dataset/clean') 28 | assert os.path.exists('real_dataset/clean_raw') 29 | assert os.path.exists('real_dataset/dng') 30 | -------------------------------------------------------------------------------- /utils/torch_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Luxi Zhao (lucy.zhao@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | import torchvision 18 | import torch 19 | 20 | 21 | def torchvision_visualize_raw(images) -> torch.Tensor: 22 | """ 23 | :param images: a list of torch tensors. Each item is a 4 channel stacked RAW image 24 | with bayer pattern [1, 0, 2, 1]. Item shape: (b, 4, h, w) 25 | :return: a stacked grid 26 | """ 27 | # stacked in order of [1,0,2,1] -> want to swap 1 and 0 -> [0,1,2] 28 | images = [im[:, 0:3, :, :][:, [1, 0, 2], ...] for im in images] 29 | img_grid = torchvision.utils.make_grid(torch.cat(images, 2), normalize=False) 30 | return img_grid 31 | -------------------------------------------------------------------------------- /jobs/generate_dataset_isp_denoise_upi.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Luxi Zhao (lucy.zhao@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | import os 18 | from jobs.job_utils import prep_environment 19 | 20 | 21 | def main(): 22 | save_path = 'neural_isp_expts/data/graphics_dngs_upi' 23 | os.makedirs(save_path, exist_ok=True) 24 | 25 | exr_folder_path = 'path/to/graphics2RAW-dataset/' 26 | 27 | args_str = f"--save_path {save_path} \ 28 | --exr_folder_path {exr_folder_path}" 29 | 30 | os.system('python3 -m data_generation.package_exr_to_dng_upi {}'.format(args_str)) 31 | 32 | 33 | if __name__ == '__main__': 34 | prep_environment() 35 | main() 36 | 37 | 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /jobs/generate_dataset_isp_denoise_graphics2raw.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Luxi Zhao (lucy.zhao@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | 18 | import os 19 | from jobs.job_utils import prep_environment 20 | 21 | 22 | def main(): 23 | save_path = 'neural_isp_expts/data/graphics_dngs_graphics2raw' 24 | os.makedirs(save_path, exist_ok=True) 25 | 26 | exr_folder_path = 'path/to/graphics2RAW-dataset/' 27 | 28 | args_str = f"--save_path {save_path} \ 29 | --exr_folder_path {exr_folder_path} \ 30 | --rgb_gain \ 31 | --rgb_gain_mean 0.8" 32 | # do apply safe invert 33 | 34 | os.system('python3 -m data_generation.package_exr_to_dng_graphics2raw {} '.format(args_str)) 35 | 36 | 37 | if __name__ == '__main__': 38 | prep_environment() 39 | main() 40 | 41 | 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /jobs/denoise_upi_iso1600.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Luxi Zhao (lucy.zhao@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | import time 18 | import shutil 19 | from jobs.denoise_graphics2raw_iso3200 import train, test, get_parser, prep_environment, movedata 20 | 21 | 22 | if __name__ == '__main__': 23 | prep_environment() 24 | parser = get_parser() 25 | args = parser.parse_args() 26 | 27 | iso = 1600 28 | exp_name = f'denoise_upi_iso{iso}' 29 | input_dir = 'graphics_dngs_upi' 30 | movedata(input_dir) 31 | 32 | num_runs = args.num_runs 33 | for i in range(num_runs): 34 | timestamp = int(time.time()) 35 | train(exp_name, timestamp, iso) 36 | test(exp_name, timestamp, iso) 37 | print(f'-----------Experiment: {exp_name}_{timestamp}-----------') 38 | 39 | shutil.rmtree('results') 40 | -------------------------------------------------------------------------------- /jobs/denoise_upi_iso3200.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Luxi Zhao (lucy.zhao@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | import time 18 | import shutil 19 | from jobs.denoise_graphics2raw_iso3200 import train, test, get_parser, prep_environment, movedata 20 | 21 | 22 | if __name__ == '__main__': 23 | prep_environment() 24 | parser = get_parser() 25 | args = parser.parse_args() 26 | 27 | iso = 3200 28 | exp_name = f'denoise_upi_iso{iso}' 29 | input_dir = 'graphics_dngs_upi' 30 | movedata(input_dir) 31 | 32 | num_runs = args.num_runs 33 | for i in range(num_runs): 34 | timestamp = int(time.time()) 35 | train(exp_name, timestamp, iso) 36 | test(exp_name, timestamp, iso) 37 | print(f'-----------Experiment: {exp_name}_{timestamp}-----------') 38 | 39 | shutil.rmtree('results') 40 | -------------------------------------------------------------------------------- /jobs/generate_dataset_illum_est_upi.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Luxi Zhao (lucy.zhao@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | 18 | import os 19 | from jobs.job_utils import prep_environment 20 | 21 | 22 | def main(): 23 | cameras = ['SamsungNX2000'] 24 | 25 | graphics_path = 'illum_est_expts/synthia/SYNTHIA_RAND_CVPR16/RGB' 26 | 27 | method = 'upi' 28 | 29 | for camera in cameras: 30 | save_path = os.path.join('illum_est_expts/data', camera, method) 31 | os.makedirs(save_path, exist_ok=True) 32 | 33 | args_str = f"--save_path {save_path} \ 34 | --graphics_path {graphics_path}" 35 | 36 | os.system('python3 -m data_generation.invert_synthia_upi {} '.format(args_str)) 37 | 38 | 39 | if __name__ == '__main__': 40 | prep_environment() 41 | main() 42 | 43 | 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /jobs/denoise_graphics2raw_iso1600.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Luxi Zhao (lucy.zhao@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | import time 18 | import shutil 19 | from jobs.denoise_graphics2raw_iso3200 import prep_environment, movedata, get_parser, train, test 20 | 21 | if __name__ == '__main__': 22 | prep_environment() 23 | parser = get_parser() 24 | args = parser.parse_args() 25 | 26 | iso = 1600 27 | exp_name = f'denoise_graphics2raw_iso{iso}' 28 | 29 | input_dir = 'graphics_dngs_graphics2raw' 30 | movedata(input_dir) 31 | 32 | num_runs = args.num_runs 33 | for i in range(num_runs): 34 | timestamp = int(time.time()) 35 | train(exp_name, timestamp, iso) 36 | test(exp_name, timestamp, iso) 37 | print(f'-----------Experiment: {exp_name}_{timestamp}-----------') 38 | 39 | shutil.rmtree('results') 40 | -------------------------------------------------------------------------------- /jobs/denoise_real_iso1600.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Luxi Zhao (lucy.zhao@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | import time 18 | from jobs.denoise_real_iso3200 import get_parser, movedata, train, test 19 | from jobs.job_utils import prep_environment 20 | import shutil 21 | 22 | 23 | if __name__ == '__main__': 24 | prep_environment() 25 | parser = get_parser() 26 | args = parser.parse_args() 27 | 28 | fold = args.fold 29 | iso = 1600 30 | exp_name = f'denoise_real_iso{iso}_f{fold}' 31 | 32 | movedata(fold) 33 | 34 | num_runs = args.num_runs 35 | for i in range(num_runs): 36 | timestamp = int(time.time()) 37 | train(exp_name, timestamp, args.restormer_dim, args.epochs) 38 | test(exp_name, timestamp, args.restormer_dim) 39 | print(f'-----------Experiment: {exp_name}_{timestamp}-----------') 40 | 41 | shutil.rmtree('results') 42 | -------------------------------------------------------------------------------- /jobs/neural_isp_upi.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Luxi Zhao (lucy.zhao@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | import time 18 | from jobs.job_utils import prep_environment 19 | from jobs.neural_isp_graphics2raw import movedata, train, test 20 | import argparse 21 | 22 | 23 | if __name__ == '__main__': 24 | prep_environment() 25 | parser = argparse.ArgumentParser() 26 | parser.add_argument('--timestamp', '-t', default=None, type=str, help='Time stamp to distinguish between runs.') 27 | parser.add_argument('--eval', '-e', action='store_true', help='Evaluation only, no training.') 28 | args = parser.parse_args() 29 | timestamp = args.timestamp if args.timestamp else int(time.time()) 30 | 31 | input_type = 'clean_raw' 32 | exp_name = 'neural_isp_upi' 33 | 34 | input_dir = 'graphics_dngs_upi' 35 | target_dir = 'graphics_srgb_upi' 36 | movedata(input_dir, target_dir) 37 | if not args.eval: 38 | train(input_type, exp_name, timestamp) 39 | test(input_type, exp_name, timestamp) 40 | 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /pipeline/exif_data_formats.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author(s): 3 | Abdelrahman Abdelhamed (a.abdelhamed@samsung.com) 4 | 5 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | 18 | class ExifFormat: 19 | def __init__(self, id, name, size, short_name): 20 | self.id = id 21 | self.name = name 22 | self.size = size 23 | self.short_name = short_name # used with struct.unpack() 24 | 25 | 26 | exif_formats = { 27 | 1: ExifFormat(1, 'unsigned byte', 1, 'B'), 28 | 2: ExifFormat(2, 'ascii string', 1, 's'), 29 | 3: ExifFormat(3, 'unsigned short', 2, 'H'), 30 | 4: ExifFormat(4, 'unsigned long', 4, 'L'), 31 | 5: ExifFormat(5, 'unsigned rational', 8, ''), 32 | 6: ExifFormat(6, 'signed byte', 1, 'b'), 33 | 7: ExifFormat(7, 'undefined', 1, 'B'), # consider `undefined` as `unsigned byte` 34 | 8: ExifFormat(8, 'signed short', 2, 'h'), 35 | 9: ExifFormat(9, 'signed long', 4, 'l'), 36 | 10: ExifFormat(10, 'signed rational', 8, ''), 37 | 11: ExifFormat(11, 'single float', 4, 'f'), 38 | 12: ExifFormat(12, 'double float', 8, 'd'), 39 | } 40 | -------------------------------------------------------------------------------- /model_archs/cnn.py: -------------------------------------------------------------------------------- 1 | """ 2 | Implementation of 3 | Convolutional Mean: A Simple Convolutional Neural Network for Illuminant Estimation 4 | 5 | @INPROCEEDINGS{convmean, 6 | title={Convolutional Mean: {A} Simple Convolutional Neural Network for Illuminant Estimation}, 7 | author={Han Gong}, 8 | booktitle={BMVC}, 9 | year={2019}, 10 | } 11 | """ 12 | 13 | import torch.nn as nn 14 | 15 | 16 | class IllumEstNet(nn.Module): 17 | 18 | def __init__(self, in_channels=3, out_channels=1, num_filters=7): 19 | super(IllumEstNet, self).__init__() 20 | 21 | # conv1 22 | self.conv1_1 = nn.Conv2d(in_channels, num_filters, (3, 3), padding=1) 23 | self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) 24 | self.relu1_1 = nn.ReLU(inplace=True) 25 | 26 | # conv2 27 | self.conv2_1 = nn.Conv2d(num_filters, num_filters * 2, (3, 3), padding=1) 28 | self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) 29 | self.relu2_1 = nn.ReLU(inplace=True) 30 | 31 | # conv3 32 | self.conv3_1 = nn.Conv2d(num_filters * 2, out_channels, (1, 1), padding=0) # 1x1 conv 33 | self.relu3_1 = nn.ReLU(inplace=True) 34 | 35 | # per channel global average pooling 36 | self.aap = nn.AdaptiveAvgPool2d((1, 1)) # b, c, 1, 1 37 | 38 | def forward(self, x): 39 | # x: b, 3, p, p 40 | x = self.conv1_1(x) # b, 7, p, p 41 | x = self.pool1(x) # b, 7, p/2, p/2 42 | x = self.relu1_1(x) 43 | 44 | x = self.conv2_1(x) # b, 14, p/2, p/2 45 | x = self.pool2(x) # b, 14, p/4, p/4 16 16 46 | x = self.relu2_1(x) 47 | 48 | x = self.conv3_1(x) # b, 3, p/4, p/4 49 | x = self.relu3_1(x) 50 | x = self.aap(x) # b, 3, 1, 1 51 | x = x.squeeze(dim=-1).squeeze(dim=-1) # b, 3 52 | return x 53 | -------------------------------------------------------------------------------- /data_preparation/initial_data_prep_tif2png.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Abhijith Punnappurath (abhijith.p@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | Convert PS tif to png 16 | """ 17 | 18 | import argparse 19 | import cv2 20 | from utils.general_utils import check_dir 21 | import os 22 | from glob import glob 23 | from shutil import rmtree 24 | 25 | if __name__ == '__main__': 26 | 27 | parser = argparse.ArgumentParser() 28 | parser.add_argument('--tif_dir', default='neural_isp_expts/data/clean_srgb_tiffs/', type=str, help='tif dir') 29 | parser.add_argument('--save_dir', default='real_dataset/clean', type=str, help='save dir') 30 | 31 | args = parser.parse_args() 32 | 33 | # remove png files already inside 34 | if os.path.isdir(args.save_dir): rmtree(args.save_dir) 35 | 36 | # create directory again 37 | check_dir(args.save_dir) 38 | 39 | allfiles = sorted(glob(os.path.join(args.tif_dir, '*.tif'))) 40 | 41 | for fil in allfiles: 42 | cleanimg = cv2.imread(os.path.join(fil), cv2.IMREAD_UNCHANGED) 43 | destination = os.path.join(args.save_dir, os.path.basename(fil)[:-4] + '.png') 44 | cv2.imwrite(destination, cleanimg) 45 | 46 | -------------------------------------------------------------------------------- /jobs/illum_est_upi.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Luxi Zhao (lucy.zhao@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | import time 18 | from jobs.job_utils import prep_environment 19 | from jobs.illum_est import train, test, ROOT_DIR 20 | import argparse 21 | 22 | 23 | if __name__ == '__main__': 24 | prep_environment() 25 | parser = argparse.ArgumentParser() 26 | parser.add_argument('--num_runs', '-n', default=5, type=int, help='Number of repeated runs.') 27 | args = parser.parse_args() 28 | 29 | method = 'upi' 30 | cameras = [ 31 | 'Canon1DsMkIII', 32 | 'Canon600D', 33 | 'FujifilmXM1', 34 | 'NikonD5200', 35 | 'OlympusEPL6', 36 | 'PanasonicGX1', 37 | 'SamsungNX2000', 38 | 'SonyA57', 39 | 'NikonD40' 40 | ] 41 | exp_id = 'illum_est' 42 | dataset_dir = f'{ROOT_DIR}/data' 43 | 44 | for i in range(args.num_runs): 45 | timestamp = int(time.time()) 46 | exp_name = f'{exp_id}_{method}_{timestamp}' 47 | 48 | train('SamsungNX2000', method, exp_name, dataset_dir) 49 | 50 | for camera in cameras: 51 | test(camera, exp_name, dataset_dir) 52 | print(f'UPI testing done: {camera}') 53 | -------------------------------------------------------------------------------- /docs/illum_est.md: -------------------------------------------------------------------------------- 1 | # Illumination Estimation 2 | ## Overview 3 | The following scripts assume or create the following directory structure 4 | 5 | ``` 6 | |-- illum_est_expts 7 | | |-- data 8 | | | |-- SamsungNX2000 9 | | | | |-- ours 10 | | | | |-- real 11 | | | | `-- upi 12 | | | |-- Canon1DsMkIII 13 | | | |-- Canon600D 14 | | | |-- FujifilmXM1 15 | | | |-- NikonD40 16 | | | |-- NikonD5200 17 | | | |-- OlympusEPL6 18 | | | |-- PanasonicGX1 19 | | | `-- SonyA57 20 | | `-- expts 21 | | |-- Canon1DsMkIII_illum_est_ours 22 | | | |-- models 23 | | | |-- tensorboard 24 | | | `-- results 25 | | |-- nus_metadata 26 | | | `-- nus_outdoor_gt_illum_mats 27 | | `-- synthia 28 | | `-- SYNTHIA_RAND_CVPR16 29 | `-- RGB 30 | ``` 31 | ## Prepare real data 32 | For all methods, we use a subset of the SYNTHIA dataset for training and the NUS dataset for testing. 33 | - Prepare the [NUS dataset](https://cvil.eecs.yorku.ca/projects/public_html/illuminant/illuminant.html) 34 | - Follow the instructions in [illum_est_nus.pptx](illum_est_nus.pptx) for each camera and put the images under `data//real` 35 | - From the [NUS dataset webpage](https://cvil.eecs.yorku.ca/projects/public_html/illuminant/illuminant.html), download the groundtruth illuminant (`MAT`) files for each camera and put them under both `data/nus_metadata/nus_outdoor_gt_illum_mats` and `data//real` for each camera 36 | - Download the SYNTHIA-RAND (CVPR16) dataset from [link](http://synthia-dataset.net/downloads/) 37 | - We used [200 images](assets/split_files/illum_est/synthia_train_val_list.p) from `SYNTHIA_RAND_CVPR16/RGB` for training and validation 38 | 39 | ## Our method 40 | ### Data generation 41 | ``` 42 | python3 -m jobs.generate_dataset_illum_est_graphics2raw 43 | ``` 44 | ### Training & Testing 45 | ``` 46 | python3 -m jobs.illum_est -c -m ours 47 | ``` 48 | 49 | ## UPI 50 | ### Data generation 51 | Due to copyright issues, we cannot re-distribute third-party code. Please refer to [upi.md](upi.md) before proceeding to the following steps. 52 | ``` 53 | python3 -m jobs.generate_dataset_illum_est_upi 54 | ``` 55 | ### Training & Testing 56 | ``` 57 | python3 -m jobs.illum_est_upi 58 | ``` 59 | ## Real 60 | ### Data generation 61 | Already completed in [Prepare real data](#prepare-real-data). 62 | ### Training & Testing 63 | ``` 64 | python3 -m jobs.illum_est -c -m real 65 | ``` 66 | 67 | 68 | 69 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ### Official repository for the paper 2 | # Graphics2RAW: Mapping Computer Graphics Images to Sensor RAW Images @ICCV'23 3 | 4 | Donghwan Seo, Abhijith Punnappurath, Luxi Zhao, Abdelrahman Abdelhamed, Sai Kiran Tedla, Sanguk Park, Jihwan Choe, and Michael S. Brown 5 | 6 | #### Samsung Electronics, Samsung AI Center Toronto 7 | 8 | 9 | [![paper](https://img.shields.io/badge/ICCV23-Paper-.svg)](https://openaccess.thecvf.com/content/ICCV2023/papers/Seo_Graphics2RAW_Mapping_Computer_Graphics_Images_to_Sensor_RAW_Images_ICCV_2023_paper.pdf) 10 | [![supplement](https://img.shields.io/badge/Supplementary-Material-red)](https://openaccess.thecvf.com/content/ICCV2023/supplemental/Seo_Graphics2RAW_Mapping_Computer_ICCV_2023_supplemental.pdf) 11 | 12 | 13 | ![alt text](docs/figures/graphics2raw_pipeline.png) 14 | 15 | ## Getting Started 16 | - The code was tested on Ubuntu 18.04, Python3.7, CUDA 11.3, cuDNN 8.2, Pytorch 1.10 17 | - The code may work in other environments 18 | - Install requirements 19 | ``` 20 | pip install -r requirements.txt 21 | ``` 22 | 23 | ### Dataset 24 | Our graphics dataset can be downloaded from [here](https://sites.google.com/view/tedlasai/home/graphics2raw). 25 | 26 | ### Convert graphics images to RAW images 27 | ``` 28 | python3 -m data_generation.package_exr_to_dng_graphics2raw --exr_folder_path path/to/graphics2RAW-dataset 29 | ``` 30 | 31 | ## Downstream Tasks 32 | Data generation, training and testing instructions for the downstream tasks: 33 | - [Neural ISP](docs/neural_isp.md) 34 | - [RAW Denoising](docs/raw_denoising.md) 35 | - [Illumination Estimation](docs/illum_est.md) 36 | 37 | ## Citation 38 | 39 | If you use this code or the associated data, please cite the paper: 40 | 41 | ``` 42 | @InProceedings{Seo_2023_ICCV, 43 | author = {Seo, Donghwan and Punnappurath, Abhijith and Zhao, Luxi and Abdelhamed, Abdelrahman and Tedla, Sai Kiran and Park, Sanguk and Choe, Jihwan and Brown, Michael S.}, 44 | title = {Graphics2RAW: Mapping Computer Graphics Images to Sensor RAW Images}, 45 | booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, 46 | month = {October}, 47 | year = {2023}, 48 | pages = {12622-12631} 49 | } 50 | ``` 51 | 52 | ## Contact 53 | - [Luxi Zhao](https://github.com/Luxi-Zhao) - [(lucy.zhao@samsung.com; lucyzhao.zlx@gmail.com)](mailto:lucy.zhao@samsung.com;lucyzhao.zlx@gmail.com) 54 | - [Abhijith Punnappurath](https://abhijithpunnappurath.github.io/) - [(abhijith.p@samsung.com; jithuthatswho@gmail.com)](mailto:abhijith.p@samsung.com;jithuthatswho@gmail.com) 55 | -------------------------------------------------------------------------------- /noise_profiler/spatial_correlation/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author(s): 3 | Abdelrahman Abdelhamed 4 | 5 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | """ 15 | import cv2 16 | import numpy as np 17 | import matplotlib.pyplot as plt 18 | 19 | from noise_profiler.img_utils import normalize 20 | 21 | 22 | def sample_norm(h_, w_): 23 | return np.random.normal(0, 1, (h_, w_)) 24 | 25 | 26 | def sample_cov(cov_mat_, h_, w_): 27 | n_var = 4 28 | sample_cov_ = np.random.multivariate_normal([0] * n_var, cov_mat_, h_ * w_ // n_var) 29 | sample_cov_image_ = np.zeros((h_, w_)) 30 | sample_cov_image_[0::2, 0::2] = sample_cov_[:, 0].reshape((h_ // 2, w_ // 2)) 31 | sample_cov_image_[0::2, 1::2] = sample_cov_[:, 1].reshape((h_ // 2, w_ // 2)) 32 | sample_cov_image_[1::2, 0::2] = sample_cov_[:, 2].reshape((h_ // 2, w_ // 2)) 33 | sample_cov_image_[1::2, 1::2] = sample_cov_[:, 3].reshape((h_ // 2, w_ // 2)) 34 | return sample_cov_image_, sample_cov_ 35 | 36 | 37 | # save images 38 | def save_image(im, save_fn, sc=4): 39 | cv2.imwrite(save_fn, cv2.resize((normalize(im) * 255).astype(np.uint8), dsize=(im.shape[1] * sc, im.shape[0] * sc), 40 | interpolation=cv2.INTER_NEAREST)) 41 | 42 | 43 | # plot covariance matrix as a heat map 44 | def plot_cov_mat(cov_mat_, save_fn): 45 | fig = plt.figure() 46 | plt.imshow(cov_mat_) 47 | ax = plt.gca() 48 | plt.colorbar() 49 | plt.clim(0, 1) 50 | for i in range(4): 51 | for j in range(4): 52 | ax.annotate("{:.4f}".format(cov_mat_[i, j]), xy=(i - .25, j)) 53 | plt.tick_params(axis='both', which='both', bottom=False, top=False, left=False, right=False, labelbottom=True) 54 | plt.xticks(np.arange(0, 4), labels=['Gr', 'R', 'B', 'Gb']) 55 | plt.yticks(np.arange(0, 4), labels=['Gr', 'R', 'B', 'Gb']) 56 | plt.tight_layout() 57 | # plt.show() 58 | plt.savefig(save_fn) 59 | plt.close(fig) 60 | -------------------------------------------------------------------------------- /noise_profiler/img_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author(s): 3 | Abdelrahman Abdelhamed 4 | 5 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | import numpy as np 18 | 19 | 20 | def brightness_transfer(image, dst_image, white_level=1023): 21 | subsample = 16 22 | src_image_sample = image[::subsample, ::subsample].flatten() 23 | dst_image_sample = dst_image[::subsample, ::subsample].flatten() 24 | bright_func = np.polyfit(src_image_sample, dst_image_sample, 1) 25 | image_adjusted = image * bright_func[0] + bright_func[1] 26 | image_adjusted = np.clip(image_adjusted, 0, white_level) 27 | return image_adjusted 28 | 29 | 30 | def brightness_transfer_v1(image, dst_image, white_level=1023): 31 | mean1 = np.mean(image) 32 | mean2 = np.mean(dst_image) 33 | std1 = np.std(image) 34 | std2 = np.std(dst_image) 35 | std1 = max(std1, 1e-8) 36 | std2 = max(std2, 1e-8) 37 | image_adjusted = (image - mean1) / std1 * std2 + mean2 38 | image_adjusted = np.clip(image_adjusted, 0, white_level) 39 | return image_adjusted 40 | 41 | 42 | def brightness_transfer_v11(image, dst_image, white_level=1023): 43 | mean1 = np.mean(image) 44 | mean2 = np.mean(dst_image) 45 | image_adjusted = image - mean1 + mean2 46 | image_adjusted = np.clip(image_adjusted, 0, white_level) 47 | return image_adjusted 48 | 49 | 50 | def brightness_transfer_v2(image, dst_image, white_level=1023): 51 | subsample = 4 52 | src_image_sample = image[::subsample, ::subsample].flatten() 53 | dst_image_sample = dst_image[::subsample, ::subsample].flatten() 54 | bright_func = np.polyfit(src_image_sample, dst_image_sample, 1) 55 | image_adjusted = image * bright_func[0] + bright_func[1] 56 | image_adjusted = np.clip(image_adjusted, 0, white_level) 57 | return image_adjusted 58 | 59 | 60 | def standardize(x): 61 | mean = np.mean(x) 62 | std = np.std(x) 63 | return (x - mean) / std 64 | 65 | 66 | def normalize(x): 67 | min_ = np.min(x) 68 | max_ = np.max(x) 69 | return (x - min_) / (max_ - min_) 70 | -------------------------------------------------------------------------------- /jobs/generate_dataset_illum_est_graphics2raw.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Luxi Zhao (lucy.zhao@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | 18 | import os 19 | from jobs.job_utils import prep_environment 20 | """ 21 | Requirements: 22 | illum_est_expts/data//real 23 | illum_est_expts/data//real/_gt.mat 24 | illum_est_expts/nus_metadata/ 25 | illum_est_expts/nus_metadata/nus_outdoor_gt_illum_mats 26 | illum_est_expts/synthia/SYNTHIA_RAND_CVPR16/RGB 27 | """ 28 | 29 | 30 | def main(): 31 | graphics_path = 'illum_est_expts/synthia/SYNTHIA_RAND_CVPR16/RGB' 32 | 33 | cameras = [ 34 | 'Canon1DsMkIII', 'Canon600D', 'FujifilmXM1', 'NikonD5200', 'OlympusEPL6', 35 | 'PanasonicGX1', 36 | 'SamsungNX2000', 37 | 'SonyA57', 38 | 'NikonD40' 39 | ] 40 | 41 | method = 'ours' 42 | 43 | for camera in cameras: 44 | camera_dir = os.path.join('illum_est_expts/data', camera) 45 | save_path = os.path.join(camera_dir, method) 46 | 47 | os.makedirs(save_path, exist_ok=True) 48 | 49 | mat_file = os.path.join('illum_est_expts/nus_metadata', 'nus_outdoor_gt_illum_mats', 50 | f'{camera}_gt.mat') 51 | split_file = f'assets/split_files/illum_est/{camera}_train_valid_test_split_fns.p' 52 | 53 | assert os.path.exists(mat_file) 54 | assert os.path.exists(split_file) 55 | 56 | args_str = f"--save_path {save_path} \ 57 | --target_camera {camera} \ 58 | --mat_file {mat_file} \ 59 | --use_train_val_illums_only \ 60 | --split_file {split_file} \ 61 | --graphics_path {graphics_path} \ 62 | --rgb_gain \ 63 | --rgb_gain_mean 0.8" 64 | # do safe invert 65 | # max illums None 66 | 67 | os.system('python3 -m data_generation.invert_synthia_graphics2raw {} '.format(args_str)) 68 | 69 | 70 | if __name__ == '__main__': 71 | prep_environment() 72 | main() 73 | 74 | 75 | 76 | 77 | 78 | -------------------------------------------------------------------------------- /utils/general_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Abhijith Punnappurath (abhijith.p@samsung.com) 6 | Luxi Zhao (lucy.zhao@samsung.com) 7 | 8 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 9 | you may not use this file except in compliance with the License. 10 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 11 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 12 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and limitations under the License. 14 | For conditions of distribution and use, see the accompanying LICENSE.md file. 15 | 16 | """ 17 | 18 | import os 19 | import errno 20 | import subprocess 21 | import sys 22 | 23 | 24 | def check_dir(_path): 25 | if not os.path.exists(_path): 26 | try: 27 | os.makedirs(_path) 28 | except OSError as exc: # Guard against race condition 29 | if exc.errno != errno.EEXIST: 30 | raise 31 | 32 | 33 | def save_args(args, save_dir): 34 | """ 35 | Source: https://github.com/VITA-Group/EnlightenGAN/blob/master/options/base_options.py 36 | EnlightenGAN base_options.py 37 | """ 38 | args = vars(args) 39 | file_name = os.path.join(save_dir, 'args.txt') 40 | with open(file_name, 'wt') as opt_file: 41 | opt_file.write('------------ Options -------------\n') 42 | for k, v in sorted(args.items()): 43 | opt_file.write('%s: %s\n' % (str(k), str(v))) 44 | opt_file.write('-------------- End ----------------\n') 45 | 46 | opt_file.write('\n------------------------------\n') 47 | opt_file.write('Shell command:\n') 48 | opt_file.write(get_command()) 49 | 50 | 51 | def get_git_revision_hash() -> str: 52 | """ 53 | Source: https://stackoverflow.com/questions/14989858/get-the-current-git-hash-in-a-python-script 54 | :return: 55 | """ 56 | return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip() 57 | 58 | 59 | def get_git_revision_branch() -> str: 60 | return subprocess.check_output(['git', 'branch']).decode('ascii').strip() 61 | 62 | 63 | def get_git_info() -> str: 64 | current_hash = get_git_revision_hash() 65 | current_branch = get_git_revision_branch() 66 | git_info = f'Git Info:\nCurrent commit: {current_hash}\nBranches:\n {current_branch}' 67 | return git_info 68 | 69 | 70 | def str2int_arr(arr): 71 | # Parse comma-splited integer array 72 | return [int(e) for e in arr.split(',')] 73 | 74 | 75 | def get_command() -> str: 76 | return " ".join(sys.argv[:]) 77 | -------------------------------------------------------------------------------- /docs/upi.md: -------------------------------------------------------------------------------- 1 | ## UPI Modifications 2 | Since we used different cameras from those used in the 3 | [Unprocessing](https://www.timothybrooks.com/tech/unprocessing/) paper, 4 | we followed the authors' suggestion to modify `random_ccm()` and `random_gains()` in unprocess.py 5 | to best match the distribution of image metadata from the cameras we used. 6 | 7 | Due to copyright issues, we cannot re-distribute third-party code. To reproduce our procedure for UPI, 8 | please copy over the [official code](https://raw.githubusercontent.com/timothybrooks/unprocessing/master/unprocess.py) 9 | to [data_generation/unprocess.py](../data_generation/unprocess.py) 10 | and refer to the following modifications. 11 | 12 | ### CCM and Gains 13 | We used camera sensors from the NUS dataset and the nighttime dataset of [Day-to-Night](https://openaccess.thecvf.com/content/CVPR2022/papers/Punnappurath_Day-to-Night_Image_Synthesis_for_Training_Nighttime_Neural_ISPs_CVPR_2022_paper.pdf) as our target sensors. 14 | Therefore, the code was modified to use those sensors' CST matrices and gain ranges. 15 | 16 | 1. Replace `xyz2cams` in `random_ccm()` ([line 32](https://github.com/timothybrooks/unprocessing/blob/master/unprocess.py#L32)) with 17 | ``` 18 | xyz2cams = pickle.load(open('assets/container_dngs/NUS_S20FE_CST_mats.p', 'rb')) 19 | ``` 20 | 2. Return the sampled `xyz2cam`; this matrix is needed in [package_exr_to_dng_upi.py](../data_generation/package_exr_to_dng_upi.py) for building the DNG. 21 | - Return both `rgb2cam, xyz2cam` in `random_ccm()` ([line 58](https://github.com/timothybrooks/unprocessing/blob/master/unprocess.py#L58), [line 124](https://github.com/timothybrooks/unprocessing/blob/master/unprocess.py#L124)) 22 | - [line 141-146](https://github.com/timothybrooks/unprocessing/blob/master/unprocess.py#L141-L146), 23 | change `metadata` to 24 | ``` 25 | metadata = { 26 | 'cam2rgb': cam2rgb, 27 | 'rgb_gain': rgb_gain, 28 | 'red_gain': red_gain, 29 | 'blue_gain': blue_gain, 30 | 'xyz2cam': xyz2cam, 31 | } 32 | ``` 33 | 3. Replace `red_gain` and `blue_gain` in `random_gains()` ([line 67-68](https://github.com/timothybrooks/unprocessing/blob/master/unprocess.py#L67-L68)) with 34 | ``` 35 | red_gain = tf.random.uniform((), 1.0, 3.3) 36 | blue_gain = tf.random.uniform((), 1.3, 4.4) 37 | ``` 38 | 4. Comment out `image = mosaic(image)` ([line 139](https://github.com/timothybrooks/unprocessing/blob/master/unprocess.py#L139)) because the illumination estimation experiments need demosaiced images instead of bayer images. 39 | 40 | ### Syntax modifications 41 | Some small syntax modifications were needed to adjust the code to work with TensorFlow 2.0. Other fixes may work, too. 42 | 1. Define `tf.to_float = lambda x: tf.cast(x, tf.float32)` (`tf.to_float` is deprecated in tf v2.0) 43 | 2. Change `tf.random_uniform` to `tf.random.uniform` 44 | 3. Change `tf.random_normal` to `tf.random.normal` 45 | 4. Remove the `None` in `tf.name_scope(None, 'unprocess')` 46 | 5. Change `tf.matrix_inverse` to `tf.linalg.inv` -------------------------------------------------------------------------------- /data_preparation/initial_data_prep_denoise.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Abhijith Punnappurath (abhijith.p@samsung.com) 6 | Luxi Zhao (lucy.zhao@samsung.com) 7 | 8 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 9 | you may not use this file except in compliance with the License. 10 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 11 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 12 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and limitations under the License. 14 | For conditions of distribution and use, see the accompanying LICENSE.md file. 15 | 16 | Input folder: contains DNGs 17 | Output folders: 18 | 1) train : clean_raw, metadata_raw 19 | 2) val : clean_raw, metadata_raw 20 | """ 21 | 22 | 23 | import argparse 24 | import cv2 25 | import pickle 26 | from utils.general_utils import check_dir 27 | import os 28 | from glob import glob 29 | from shutil import rmtree 30 | from pipeline.pipeline_utils import get_metadata, get_visible_raw_image 31 | 32 | 33 | if __name__ == '__main__': 34 | 35 | parser = argparse.ArgumentParser() 36 | parser.add_argument('--dng_dir', type=str, help='dng dir') 37 | parser.add_argument('--val_names', default='Living,Playground,Rustic', type=str, help='name of test dataset') 38 | parser.add_argument('--save_dir', default='graphics_dataset', type=str, help='save dir') 39 | args = parser.parse_args() 40 | 41 | if os.path.isdir(args.save_dir): rmtree(args.save_dir) 42 | 43 | print(args.dng_dir) 44 | 45 | # create directories 46 | check_dir(args.save_dir) 47 | for fol in ['train', 'val']: check_dir(os.path.join(args.save_dir, fol)) 48 | for fol in ['train', 'val']: 49 | subfols = ['clean_raw', 'metadata_raw'] 50 | for subfol in subfols: check_dir(os.path.join(args.save_dir, fol, subfol)) 51 | 52 | allfiles = [os.path.basename(x) for x in sorted(glob(os.path.join(args.dng_dir, '*.dng')))] 53 | 54 | valnames = [item for item in args.val_names.split(',')] 55 | 56 | index = {'train': [i for i in range(len(allfiles)) if allfiles[i].split('_')[0] not in valnames], 57 | 'val': [i for i in range(len(allfiles)) if allfiles[i].split('_')[0] in valnames]} 58 | 59 | input_dir_dng = args.dng_dir 60 | 61 | for fol in ['train', 'val']: 62 | for ind in index[fol]: 63 | for subfol in ['clean_raw']: 64 | cleanrawimg = get_visible_raw_image(os.path.join(input_dir_dng, allfiles[ind])) 65 | destination = os.path.join(args.save_dir, fol, subfol, allfiles[ind][:-4] + '.png') 66 | cv2.imwrite(destination, cleanrawimg) 67 | 68 | for subfol in ['metadata_raw']: 69 | metadata = get_metadata(os.path.join(input_dir_dng, allfiles[ind])) 70 | pickle.dump(metadata, open(os.path.join(args.save_dir, fol, subfol, allfiles[ind][:-4] + '.p'), "wb")) 71 | -------------------------------------------------------------------------------- /jobs/neural_isp_real.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Luxi Zhao (lucy.zhao@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | import os 18 | import time 19 | import argparse 20 | import shutil 21 | from jobs.job_utils import prep_environment, get_day2night_data 22 | from jobs.neural_isp_graphics2raw import ROOT_DIR 23 | 24 | 25 | def movedata(fold): 26 | print('Preparing data...') 27 | get_day2night_data() 28 | 29 | tif_dir = f'{ROOT_DIR}/data/clean_srgb_tiffs' 30 | os.system(f'python3 -m data_preparation.initial_data_prep_tif2png --tif_dir {tif_dir};') 31 | os.system(f'python3 -m data_preparation.k_fold_split_data --which_fold {fold} --with_noise 0;') 32 | 33 | 34 | def train(input_type, folder_name): 35 | args_str = f"--which-input {input_type} \ 36 | --savefoldername {folder_name} \ 37 | --exp_dir {ROOT_DIR}/expts/ \ 38 | --on-cuda \ 39 | --data-dir real_dataset_k_fold \ 40 | --model_save_freq 600 \ 41 | --num-epochs 250" 42 | 43 | print('Start training...') 44 | print('args_str = {}'.format(args_str)) 45 | os.system('python3 train_neural_isp.py {} '.format(args_str)) 46 | 47 | 48 | def test(folder_name): 49 | args_str = f"--exp_dir {ROOT_DIR}/expts/ \ 50 | --model_dir {folder_name} \ 51 | --set_dir real_dataset_k_fold/test" 52 | 53 | print('Start testing...') 54 | print('args_str = {}'.format(args_str)) 55 | os.system('python3 test_neural_isp.py {} '.format(args_str)) 56 | 57 | target_dir = f'{ROOT_DIR}/expts/{folder_name}/results/' 58 | os.makedirs(target_dir, exist_ok=True) 59 | os.system(f'cp results/* {target_dir} -r -v;') 60 | 61 | 62 | if __name__ == '__main__': 63 | prep_environment() 64 | 65 | parser = argparse.ArgumentParser() 66 | parser.add_argument('--fold', '-f', default=0, type=int, help='Which fold of the real data to train and test.') 67 | args = parser.parse_args() 68 | fold = args.fold 69 | 70 | input_type = 'clean_raw' 71 | exp_name = 'neural_isp_real' 72 | num_runs = 2 73 | for i in range(num_runs): 74 | timestamp = int(time.time()) 75 | folder_name = f'{exp_name}_{input_type}_{fold}_{timestamp}' 76 | movedata(fold) 77 | train(input_type, folder_name) 78 | test(folder_name) 79 | print(f'-----------Experiment: {folder_name}-----------') 80 | 81 | shutil.rmtree('real_dataset') 82 | shutil.rmtree('real_dataset_k_fold') 83 | shutil.rmtree('results') 84 | -------------------------------------------------------------------------------- /jobs/neural_isp_graphics2raw.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Luxi Zhao (lucy.zhao@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | import os 18 | import time 19 | import argparse 20 | from jobs.job_utils import prep_environment, get_day2night_data 21 | 22 | 23 | ROOT_DIR = 'neural_isp_expts' 24 | 25 | def movedata(input_dir, target_dir): 26 | print('Preparing data...') 27 | get_day2night_data() 28 | 29 | tif_dir = f'{ROOT_DIR}/data/clean_srgb_tiffs' 30 | os.system(f'python3 -m data_preparation.initial_data_prep_tif2png --tif_dir {tif_dir};') 31 | os.system(f'python3 -m data_preparation.initial_data_prep_neural_isp --dng_dir {ROOT_DIR}/data/{input_dir} --tif_dir {ROOT_DIR}/data/{target_dir};') 32 | 33 | 34 | def train(input_type, exp_name, timestamp): 35 | args_str = f"--which-input {input_type} \ 36 | --savefoldername {exp_name}_{input_type}_{timestamp} \ 37 | --exp_dir {ROOT_DIR}/expts/ \ 38 | --on-cuda \ 39 | --model_save_freq 600 \ 40 | --num-epochs 250" 41 | 42 | print('Start training...') 43 | print('args_str = {}'.format(args_str)) 44 | os.system('python3 train_neural_isp.py {} '.format(args_str)) 45 | 46 | 47 | def test(input_type, exp_name, timestamp): 48 | print('Start testing...') 49 | folder_name = f'{exp_name}_{input_type}_{timestamp}' 50 | args_str = f"--exp_dir {ROOT_DIR}/expts/ \ 51 | --model_dir {folder_name}" 52 | 53 | print('Start testing...') 54 | print('args_str = {}'.format(args_str)) 55 | os.system('python3 test_neural_isp.py {} '.format(args_str)) 56 | 57 | target_dir = f'{ROOT_DIR}/expts/{folder_name}/results/' 58 | os.makedirs(target_dir, exist_ok=True) 59 | os.system(f'cp results/* {target_dir} -r -v;') 60 | 61 | 62 | if __name__ == '__main__': 63 | prep_environment() 64 | parser = argparse.ArgumentParser() 65 | parser.add_argument('--timestamp', '-t', default=None, type=str, help='Time stamp to distinguish between runs.') 66 | parser.add_argument('--eval', '-e', action='store_true', help='Evaluation only, no training.') 67 | args = parser.parse_args() 68 | timestamp = args.timestamp if args.timestamp else int(time.time()) 69 | 70 | input_type = 'clean_raw' 71 | exp_name = 'neural_isp_graphics2raw' 72 | 73 | input_dir = 'graphics_dngs_graphics2raw' 74 | target_dir = 'graphics_srgb_graphics2raw' 75 | movedata(input_dir, target_dir) 76 | if not args.eval: 77 | train(input_type, exp_name, timestamp) 78 | test(input_type, exp_name, timestamp) 79 | 80 | 81 | 82 | 83 | -------------------------------------------------------------------------------- /pipeline/raw_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author(s): 3 | Abdelrahman Abdelhamed (a.abdelhamed@samsung.com) 4 | 5 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | import numpy as np 18 | 19 | def RGGB2Bayer(im, _cfa_pattern=[0,1,1,2]): 20 | # add 1 to the second-green and the blue channel (e.g., [0, 1, 1, 2] will be [0, 1, 2, 3]) 21 | _cfa_pattern_arr=np.asarray(_cfa_pattern) 22 | _cfa_pattern_arr[_cfa_pattern_arr == 2] += 1 23 | _cfa_pattern_arr[2:][_cfa_pattern_arr[2:] == 1] += 1 24 | # convert RGGB stacked image to one channel Bayer 25 | bayer = np.zeros((im.shape[0] * 2, im.shape[1] * 2)) 26 | bayer[0::2, 0::2] = im[:, :, _cfa_pattern_arr[0]] 27 | bayer[0::2, 1::2] = im[:, :, _cfa_pattern_arr[1]] 28 | bayer[1::2, 0::2] = im[:, :, _cfa_pattern_arr[2]] 29 | bayer[1::2, 1::2] = im[:, :, _cfa_pattern_arr[3]] 30 | return bayer 31 | 32 | def stack_rggb_channels(raw_image, bayer_pattern=None): 33 | """ 34 | Stack the four channels of a CFA/Bayer raw image along a third dimension. 35 | """ 36 | if bayer_pattern is None: 37 | bayer_pattern = [0, 1, 1, 2] 38 | height, width = raw_image.shape 39 | channels = [] 40 | pattern = np.array(bayer_pattern) 41 | # add 1 to the second-green and the blue channel (e.g., [0, 1, 1, 2] will be [0, 1, 2, 3]) 42 | pattern[pattern == 2] += 1 43 | pattern[2:][pattern[2:] == 1] += 1 44 | idx = [[0, 0], [0, 1], [1, 0], [1, 1]] 45 | for c in pattern: 46 | raw_image_c = raw_image[idx[c][0]:height:2, idx[c][1]:width:2].copy() 47 | channels.append(raw_image_c) 48 | 49 | # special case: channels re-ordered to [B G2 G1 R] instead of [R G1 G2 B] when 50 | # bayer_pattern==[G B R G]; need to flip it back. 51 | if bayer_pattern == [1, 2, 0, 1]: 52 | channels.reverse() 53 | channels = np.stack(channels, axis=-1) 54 | return channels 55 | 56 | 57 | def stack_rgb_channels(raw_image, bayer_pattern): 58 | """ 59 | Stack the four channels in a CFA/Bayer image into 3 RGB channels, averaging the two G channels. 60 | """ 61 | rggb = stack_rggb_channels(raw_image, bayer_pattern) 62 | rgb = np.zeros((rggb.shape[0], rggb.shape[1], 3), dtype=np.float32) 63 | rgb[:, :, 0] = rggb[:, :, 0] 64 | rgb[:, :, 1] = (rggb[:, :, 1] + rggb[:, :, 2]) / 2.0 65 | rgb[:, :, 2] = rggb[:, :, 3] 66 | return rgb 67 | 68 | 69 | def rggb_to_rgb(image_4ch, bayer_pattern): 70 | bayer_pattern = list(bayer_pattern) 71 | g1_idx = bayer_pattern.index(1) 72 | g2_idx = 3 if g1_idx == 0 else 2 73 | r_idx = bayer_pattern.index(0) 74 | b_idx = bayer_pattern.index(2) 75 | g = np.mean([image_4ch[:, :, g1_idx], image_4ch[:, :, g2_idx]], axis=0) 76 | rgb = np.stack([image_4ch[:, :, r_idx], g, image_4ch[:, :, b_idx]], axis=-1) 77 | return rgb 78 | -------------------------------------------------------------------------------- /jobs/illum_est.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Luxi Zhao (lucy.zhao@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | import os 18 | import time 19 | from jobs.job_utils import prep_environment 20 | import argparse 21 | 22 | 23 | ROOT_DIR = 'illum_est_expts' 24 | 25 | def train(camera, method, exp_name, dataset_dir): 26 | illum_fname = f'{camera}_gt.mat' if method == 'real' else 'gt_illum.p' 27 | 28 | args_str = f"--dataset-dir {os.path.join(dataset_dir, camera, method)} \ 29 | --savefoldername {exp_name} \ 30 | --illum_file {os.path.join(dataset_dir, camera, method, illum_fname)} \ 31 | --split_file assets/split_files/illum_est/{camera}_train_valid_test_split_idx.p \ 32 | --exp_dir {ROOT_DIR}/expts \ 33 | --on-cuda \ 34 | --model_save_freq 3000 \ 35 | --num-epochs 2000" 36 | # not saving intermediate models 37 | 38 | print('Start training...') 39 | print('args_str = {}'.format(args_str)) 40 | os.system('python3 train_illum_est.py {} '.format(args_str)) 41 | 42 | 43 | def test(camera, exp_name, dataset_dir): 44 | print('Start testing...') 45 | args_str = f"--dataset-dir {os.path.join(dataset_dir, camera, 'real')} \ 46 | --exp_name {exp_name} \ 47 | --illum_file {os.path.join(dataset_dir, camera, 'real', f'{camera}_gt.mat')} \ 48 | --split_file assets/split_files/illum_est/{camera}_train_valid_test_split_idx.p \ 49 | --exp_dir {ROOT_DIR}/expts" 50 | 51 | print('Start testing...') 52 | print('args_str = {}'.format(args_str)) 53 | os.system('python3 test_illum_est.py {} '.format(args_str)) 54 | 55 | 56 | if __name__ == '__main__': 57 | prep_environment() 58 | parser = argparse.ArgumentParser() 59 | parser.add_argument('--cameras', '-c', default='SamsungNX2000,SonyA57', type=str, help='Which cameras to run on. Comma-separated string.') 60 | parser.add_argument('--methods', '-m', default='ours,real', type=str, help='Which methods to run on. Comma-separated string.') 61 | parser.add_argument('--num_runs', '-n', default=5, type=int, help='Number of repeated runs.') 62 | args = parser.parse_args() 63 | 64 | exp_id = 'illum_est' 65 | cameras = [] if args.cameras == '' else args.cameras.split(',') 66 | methods = [] if args.methods == '' else args.methods.split(',') 67 | 68 | for method in methods: 69 | for camera in cameras: 70 | for i in range(args.num_runs): 71 | timestamp = int(time.time()) 72 | exp_name = f'{camera}_{exp_id}_{method}_{timestamp}' 73 | 74 | dataset_dir = f'{ROOT_DIR}/data' 75 | train(camera, method, exp_name, dataset_dir) 76 | test(camera, exp_name, dataset_dir) 77 | -------------------------------------------------------------------------------- /jobs/denoise_graphics2raw_iso3200.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Luxi Zhao (lucy.zhao@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | import os 18 | import time 19 | import argparse 20 | import shutil 21 | from jobs.job_utils import prep_environment, get_day2night_data 22 | 23 | DATA_ROOT_DIR = 'neural_isp_expts' # uses the same RAW data as neural ISP 24 | EXPT_ROOT_DIR = 'denoising_expts' 25 | 26 | 27 | def movedata(input_dir): 28 | print('Preparing data...') 29 | get_day2night_data() 30 | os.system(f'python3 -m data_preparation.initial_data_prep_denoise --dng_dir {DATA_ROOT_DIR}/data/{input_dir};') 31 | 32 | 33 | def train(exp_name, timestamp, iso): 34 | folder_name = f'{exp_name}_{timestamp}' 35 | args_str = f"--savefoldername {folder_name} \ 36 | --exp_dir {EXPT_ROOT_DIR}/expts/ \ 37 | --lr 0.0001 \ 38 | --num_epochs 100 \ 39 | --milestones 90 \ 40 | --patch_size 128 \ 41 | --batch_size 32 \ 42 | --loss l1 \ 43 | --restormer_dim 8 \ 44 | --data_type graphics_raw \ 45 | --iso {iso} \ 46 | --model_save_freq 400" # not saving intermediate models 47 | 48 | print('Start training...') 49 | print('args_str = {}'.format(args_str)) 50 | os.system('python3 train_denoise.py {} '.format(args_str)) 51 | 52 | 53 | def test(exp_name, timestamp, iso): 54 | folder_name = f'{exp_name}_{timestamp}' 55 | print('Start testing...') 56 | args_str = f"--model_dir {folder_name} \ 57 | --exp_dir {EXPT_ROOT_DIR}/expts/ \ 58 | --set_name iso_{iso} \ 59 | --restormer_dim 8" 60 | 61 | print('Start testing...') 62 | print('args_str = {}'.format(args_str)) 63 | os.system('python3 test_denoise.py {} '.format(args_str)) 64 | 65 | target_dir = f'{EXPT_ROOT_DIR}/expts/{folder_name}/results/' 66 | os.makedirs(target_dir, exist_ok=True) 67 | os.system(f'cp results/* {target_dir} -r -v;') 68 | 69 | 70 | def get_parser(): 71 | parser = argparse.ArgumentParser() 72 | parser.add_argument('--num_runs', '-n', default=1, type=int, help='Number of runs.') 73 | return parser 74 | 75 | 76 | if __name__ == '__main__': 77 | prep_environment() 78 | parser = get_parser() 79 | args = parser.parse_args() 80 | 81 | iso = 3200 82 | exp_name = f'denoise_graphics2raw_iso{iso}' 83 | 84 | input_dir = 'graphics_dngs_graphics2raw' 85 | movedata(input_dir) 86 | 87 | num_runs = args.num_runs 88 | for i in range(num_runs): 89 | timestamp = int(time.time()) 90 | train(exp_name, timestamp, iso) 91 | test(exp_name, timestamp, iso) 92 | print(f'-----------Experiment: {exp_name}_{timestamp}-----------') 93 | 94 | shutil.rmtree('results') 95 | -------------------------------------------------------------------------------- /data_generation/invert_synthia_upi.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Abhijith Punnappurath (abhijith.p@samsung.com) 6 | Luxi Zhao (lucy.zhao@samsung.com) 7 | 8 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 9 | you may not use this file except in compliance with the License. 10 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 11 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 12 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and limitations under the License. 14 | For conditions of distribution and use, see the accompanying LICENSE.md file. 15 | """ 16 | 17 | import os 18 | import pickle 19 | import argparse 20 | from utils.img_utils import * 21 | from data_generation.unprocess import unprocess 22 | import tensorflow as tf 23 | 24 | 25 | def parse_args(): 26 | parser = argparse.ArgumentParser() 27 | parser.add_argument('--graphics_path', type=str, 28 | help='path to graphics images', 29 | default='illum_est_expts/synthia/SYNTHIA_RAND_CVPR16/RGB' 30 | ) 31 | parser.add_argument('--train_val_set', type=str, 32 | help='only use these images for training and validation', 33 | default='assets/split_files/illum_est/synthia_train_val_list.p' 34 | ) 35 | parser.add_argument('--save_path', type=str, 36 | help='path to exr images', 37 | default='illum_est_expts/data/SamsungNX2000/upi' 38 | ) 39 | 40 | args = parser.parse_args() 41 | 42 | print(args) 43 | 44 | return args 45 | 46 | 47 | if __name__ == '__main__': 48 | RAND_SEED = 101 49 | np.random.seed(RAND_SEED) 50 | tf.random.set_seed(RAND_SEED) 51 | 52 | args = parse_args() 53 | 54 | train_val_set = pickle.load(open(args.train_val_set, 'rb')) 55 | 56 | gt_illum_array = [] 57 | file_name_array = [] 58 | 59 | for i, filename in enumerate(train_val_set): 60 | savename = filename 61 | print(i, filename) 62 | 63 | graphics_img = cv2.imread(os.path.join(args.graphics_path, filename), -1) 64 | graphics_img = graphics_img[:, :, ::-1] 65 | graphics_img = np.array(graphics_img).astype(np.float32) / 255.0 # SYNTHIA images are 8 bit 66 | graphics_img = np.clip(graphics_img, 0, 1) 67 | 68 | graphics_img = tf.convert_to_tensor(graphics_img, dtype=tf.float32) 69 | 70 | raw_est, metadata = unprocess(graphics_img) 71 | raw_est = raw_est.numpy().astype(np.float32) 72 | for k, v in metadata.items(): 73 | metadata[k] = v.numpy().astype(np.float32) 74 | 75 | wb_vec = np.array([1 / metadata['red_gain'], 1, 1 / metadata['blue_gain']]) 76 | 77 | gt_illum_array.append(wb_vec) 78 | print(wb_vec) 79 | 80 | cv2.imwrite( 81 | os.path.join(args.save_path, savename[:-4] + '.png'), 82 | (raw_est[:, :, [2, 1, 0]] * 65535).astype(np.uint16)) 83 | file_name_array.append(savename[:-4]) 84 | 85 | gt_values = {'gt_illum': gt_illum_array, 'filenames': file_name_array} 86 | 87 | pickle.dump(gt_values, open(os.path.join(args.save_path, 'gt_illum.p'), "wb")) 88 | 89 | print('Done') 90 | -------------------------------------------------------------------------------- /jobs/denoise_real_iso3200.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Luxi Zhao (lucy.zhao@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | import os 18 | import time 19 | import argparse 20 | from jobs.job_utils import prep_environment, get_day2night_data 21 | from jobs.denoise_graphics2raw_iso3200 import EXPT_ROOT_DIR 22 | import shutil 23 | 24 | 25 | def movedata(fold, iso): 26 | print('Preparing data...') 27 | get_day2night_data() 28 | os.system(f'python3 -m data_preparation.k_fold_split_data --which_fold {fold} --with_noise 1 --only_iso_{iso};') 29 | 30 | 31 | def train(exp_name, timestamp, iso): 32 | folder_name = f'{exp_name}_{timestamp}' 33 | args_str = f"--savefoldername {folder_name} \ 34 | --exp_dir {EXPT_ROOT_DIR}/expts/ \ 35 | --data_dir real_dataset_k_fold \ 36 | --lr 0.0001 \ 37 | --num_epochs 100 \ 38 | --milestones 90 \ 39 | --patch_size 128 \ 40 | --batch_size 32 \ 41 | --loss l1 \ 42 | --restormer_dim 8 \ 43 | --data_type real \ 44 | --iso {iso} \ 45 | --model_save_freq 400" # not saving intermediate models 46 | 47 | print('Start training...') 48 | print('args_str = {}'.format(args_str)) 49 | os.system('python3 train_denoise.py {} '.format(args_str)) 50 | 51 | 52 | def test(exp_name, timestamp, iso): 53 | folder_name = f'{exp_name}_{timestamp}' 54 | print('Start testing...') 55 | args_str = f"--model_dir {folder_name} \ 56 | --exp_dir {EXPT_ROOT_DIR}/expts/ \ 57 | --set_dir real_dataset_k_fold/test \ 58 | --set_name iso_{iso} \ 59 | --restormer_dim 8" 60 | 61 | print('Start testing...') 62 | print('args_str = {}'.format(args_str)) 63 | os.system('python3 test_denoise.py {} '.format(args_str)) 64 | 65 | target_dir = f'{EXPT_ROOT_DIR}/expts/{folder_name}/results/' 66 | os.makedirs(target_dir, exist_ok=True) 67 | os.system(f'cp results/* {target_dir} -r -v;') 68 | 69 | 70 | def get_parser(): 71 | parser = argparse.ArgumentParser() 72 | parser.add_argument('--fold', '-f', default=0, type=int, help='Which fold of the real data to train and test.') 73 | parser.add_argument('--num_runs', '-n', default=1, type=int, help='Number of runs.') 74 | return parser 75 | 76 | 77 | if __name__ == '__main__': 78 | prep_environment() 79 | parser = get_parser() 80 | args = parser.parse_args() 81 | fold = args.fold 82 | 83 | iso = 3200 84 | exp_name = f'denoise_real_iso{iso}_f{fold}' 85 | 86 | movedata(fold, iso) 87 | 88 | num_runs = args.num_runs 89 | for i in range(num_runs): 90 | timestamp = int(time.time()) 91 | train(exp_name, timestamp, iso) 92 | test(exp_name, timestamp, iso) 93 | print(f'-----------Experiment: {exp_name}_{timestamp}-----------') 94 | 95 | shutil.rmtree('results') 96 | -------------------------------------------------------------------------------- /data_generation/package_clean_nighttime_raw_to_dng.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Abhijith Punnappurath (abhijith.p@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | """ 15 | 16 | import cv2 17 | from utils.img_utils import update_hex_image,update_wb_values 18 | from binascii import hexlify, unhexlify 19 | import argparse 20 | from glob import glob 21 | import os 22 | from pipeline.pipeline_utils import get_metadata 23 | import copy 24 | 25 | """ 26 | Package clean RAW image (averaged from 30 ISO 50 frames) into a DNG. 27 | """ 28 | 29 | 30 | def parse_args(): 31 | parser = argparse.ArgumentParser() 32 | parser.add_argument('--dng_folder_path', type=str, 33 | help='path to day-to-night nighttime dataset iso50 dngs', 34 | ) 35 | parser.add_argument('--raw_folder_path', type=str, 36 | help='path to day-to-night nighttime averaged clean raw images', 37 | ) 38 | parser.add_argument('--save_path', type=str, 39 | help='path to save to', 40 | default='neural_isp_expts/data/clean_raw_dngs' 41 | ) 42 | parser.add_argument('--container_dng_path', type=str, 43 | help='container dng', 44 | default='assets/container_dngs/container_dng_S20_FE_main_rectilinear_OFF_noise_OFF.dng' 45 | ) 46 | parser.add_argument('--wb_start', type=int, 47 | help='magic value for S20 FE main camera', 48 | default=50816 49 | ) 50 | parser.add_argument('--image_start', type=int, 51 | help='magic value for S20 FE main camera', 52 | default=59288 53 | ) 54 | 55 | args = parser.parse_args() 56 | 57 | print(args) 58 | 59 | return args 60 | 61 | 62 | if __name__ == '__main__': 63 | 64 | args = parse_args() 65 | assert os.path.exists(args.save_path), f'{args.save_path} does not exist!' 66 | 67 | dnglist = sorted(glob(os.path.join(args.dng_folder_path,'*.dng'))) 68 | rawlist = sorted(glob(os.path.join(args.raw_folder_path,'*.png'))) 69 | 70 | with open(args.container_dng_path, "rb") as fn: 71 | myhexc = hexlify(fn.read()) 72 | myhexc = bytearray(myhexc) 73 | 74 | for i in range(len(dnglist)): 75 | print(os.path.basename(dnglist[i]),os.path.basename(rawlist[i])) 76 | bayer = cv2.imread(rawlist[i],-1) 77 | metadata = get_metadata(dnglist[i]) 78 | 79 | myhex = copy.deepcopy(myhexc) 80 | 81 | wb_dng = metadata['as_shot_neutral'] 82 | 83 | myhex = update_wb_values(myhex, wb_dng, args.wb_start) 84 | myhex = update_hex_image(myhex,bayer,args.image_start) 85 | 86 | db = unhexlify(myhex) 87 | 88 | savepath = os.path.join(args.save_path,os.path.basename(dnglist[i])) 89 | with open(savepath,"wb") as fb: 90 | fb.write(db) 91 | 92 | print('Done') -------------------------------------------------------------------------------- /docs/raw_denoising.md: -------------------------------------------------------------------------------- 1 | # RAW Denoising 2 | ## Overview 3 | ### Directory structure 4 | The following scripts assume or create the following directory structure 5 | 6 | ``` 7 | |-- denoising_expts 8 | | |-- data 9 | | | |-- clean_raw_dngs 10 | | | |-- graphics_dngs_graphics2raw 11 | | | |-- graphics_dngs_upi 12 | | `-- expts 13 | | |-- denoise_graphics2raw_iso1600 14 | | | |-- models 15 | | | |-- tensorboard 16 | | | `-- results 17 | ``` 18 | ### Code 19 | Due to copyright issues, we cannot re-distribute third-party code. 20 | To run our RAW denoising experiments, please 21 | copy over the [Restormer](https://arxiv.org/abs/2111.09881) architecture from the author's official repository: [here](https://github.com/swz30/Restormer/blob/main/basicsr/models/archs/restormer_arch.py), 22 | and place the code in [model_archs/restormer.py](../model_archs/restormer.py) 23 | 24 | ## Prepare real data 25 | For all methods, we use the [Day-to-Night]((https://openaccess.thecvf.com/content/CVPR2022/papers/Punnappurath_Day-to-Night_Image_Synthesis_for_Training_Nighttime_Neural_ISPs_CVPR_2022_paper.pdf)) Nighttime dataset for testing. 26 | #### Download Day-to-Night dataset 27 | - Download the day-to-night dataset from [here](https://github.com/SamsungLabs/day-to-night#get-started). 28 | - We only need contents from the `night_real` folder, copy subdirectories in `night_real` into `./real_dataset` 29 | #### Package clean RAW images into DNGs 30 | Package each clean RAW image (averaged from 30 ISO 50 frames) into a DNG, so that it can be processed by Photoshop to create the sRGB target. 31 | ``` 32 | python3 -m data_generation.package_clean_nighttime_raw_to_dng \ 33 | --dng_folder_path /path/to/real_dataset/dng/iso_50 \ 34 | --raw_folder_path /path/to/real_dataset/clean_raw \ 35 | --save_path /path/to/neural_isp_expts/data/clean_raw_dngs 36 | ``` 37 | > Same as [Neural ISP -> Prepare real data](neural_isp.md#prepare-real-data) 38 | 39 | ## Graphics2RAW (Our Method) 40 | ### Data generation 41 | #### Invert graphics data to RAW space 42 | Use Graphics2RAW to invert graphics data to RAW space: 43 | ``` 44 | python3 -m jobs.generate_dataset_isp_denoise_graphics2raw 45 | ``` 46 | This will generate `graphics_dngs_graphics2raw`. 47 | 48 | > Same as [Neural ISP -> Graphics2RAW -> Invert graphics data to RAW space](neural_isp.md#invert-graphics-data-to-raw-space) 49 | 50 | ### Training & Testing 51 | ISO3200 52 | ``` 53 | python3 -m jobs.denoise_graphics2raw_iso3200 54 | ``` 55 | ISO1600 56 | ``` 57 | python3 -m jobs.denoise_graphics2raw_iso1600 58 | ``` 59 | 60 | ## UPI 61 | ### Data generation 62 | Due to copyright issues, we cannot re-distribute third-party code. Please refer to [upi.md](upi.md) before proceeding to the following steps. 63 | 64 | #### Invert graphics data to RAW space 65 | Use UPI to invert graphics data to RAW space: 66 | ``` 67 | python3 -m jobs.generate_dataset_isp_denoise_upi 68 | ``` 69 | This will generate `graphics_dngs_upi`. 70 | 71 | > Same as [Neural ISP -> UPI -> Invert graphics data to RAW space](neural_isp.md#invert-graphics-data-to-raw-space-1) 72 | ### Training & Testing 73 | ISO3200 74 | ``` 75 | python3 -m jobs.denoise_upi_iso3200 76 | ``` 77 | ISO1600 78 | ``` 79 | python3 -m jobs.denoise_upi_iso1600 80 | ``` 81 | 82 | ## Real 83 | ### Data generation 84 | Already done in [Prepare real data](#prepare-real-data). 85 | 86 | ### Training & Testing 87 | ISO3200 88 | ``` 89 | python3 -m jobs.denoise_real_iso3200 --fold 0 90 | python3 -m jobs.denoise_real_iso3200 --fold 1 91 | python3 -m jobs.denoise_real_iso3200 --fold 2 92 | ``` 93 | ISO1600 94 | ``` 95 | python3 -m jobs.denoise_real_iso1600 --fold 0 96 | python3 -m jobs.denoise_real_iso1600 --fold 1 97 | python3 -m jobs.denoise_real_iso1600 --fold 2 98 | ``` 99 | Results reported in paper are averaged over 3 folds. -------------------------------------------------------------------------------- /data_generation/mask_color_chart_resize_save.m: -------------------------------------------------------------------------------- 1 | % Copyright (c) 2023 Samsung Electronics Co., Ltd. 2 | 3 | % Author(s): 4 | % Abhijith Punnappurath (abhijith.p@samsung.com) 5 | 6 | % Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 7 | % you may not use this file except in compliance with the License. 8 | % You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 9 | % Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 10 | % "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | % See the License for the specific language governing permissions and limitations under the License. 12 | % For conditions of distribution and use, see the accompanying LICENSE.md file. 13 | 14 | % Code to mask out the color chart, resize and save as PNG files 15 | 16 | % CHANGE mainpath AND gtmatpath TO RUN THIS CODE 17 | 18 | clc; 19 | clear; 20 | close all; 21 | 22 | % Below are the nine cameras in the NUS dataset 23 | % Code assumes that 24 | % (1) the demosaiced linear-RGB .tif files are in 25 | % mainpath//RAW/stage4 folders 26 | % (2) the rendered sRGB images are in 27 | % mainpath//RAW/stage11 folders 28 | % (3) ground truth mat files for all cameras are in gtmatpath 29 | % Note: Renamed NikonD40_RAW to NikonD40_RAW.zip 30 | 31 | mainpath='.\'; 32 | gtmatpath='.\all_gt_illum_mat\'; 33 | 34 | namecell{1,1}='Canon1DsMkIII_RAW.zip'; 35 | namecell{1,2}='Canon600D_RAW.zip'; 36 | namecell{1,3}='FujifilmXM1_RAW.zip'; 37 | namecell{1,4}='NikonD40_RAW.zip'; 38 | namecell{1,5}='NikonD5200_RAW.zip'; 39 | namecell{1,6}='OlympusEPL6_RAW.zip'; 40 | namecell{1,7}='PanasonicGX1_RAW.zip'; 41 | namecell{1,8}='SamsungNX2000_RAW.zip'; 42 | namecell{1,9}='SonyA57_RAW.zip'; 43 | 44 | % There is a problem with the DNG orientation tag for the Canon1DsMkIII_RAW 45 | % The following images need to be manually rotated so that the mask can be 46 | % correctly applied 47 | rotated_imgs=[5,12,48,49,52,53,54,58,60,61,62,67,75,84,87, ... 48 | 105,112,115,116,119,122, ... 49 | 124,126, ... 50 | 131,133,137,138,144,148,149, ... 51 | 151,152,153,155,160,161,163,168,169, ... 52 | 172,181,187,189,190,191,196,200, ... 53 | 204,207,208,215,216,217,218,220,238, ... 54 | 242,245,256]; 55 | 56 | for jj=1:length(namecell) 57 | 58 | campath=[namecell{1,jj} '\RAW\stage4']; 59 | matpath=[gtmatpath namecell{1,jj}(1:end-8) '_gt.mat']; 60 | 61 | savepath = fullfile(mainpath,campath,'downsampled'); 62 | mkdir(savepath) 63 | load(matpath) 64 | b=100; % extra border around the masks 65 | 66 | allraw=dir(fullfile(mainpath,campath,'*.tif')); 67 | 68 | for i=1:length(allraw) 69 | if(strcmp(allraw(i).name(1:end-8),all_image_names{i})~=1) 70 | fprintf('error \n') 71 | break; 72 | end 73 | 74 | fprintf('%s \n',allraw(i).name) 75 | img = imread(fullfile(mainpath,campath,allraw(i).name)); 76 | if jj>1 % for all cameras other than Canon1DsMkIII_RAW 77 | imgsrgb=imread(fullfile(mainpath,[campath(1:end-1) '11'],[allraw(i).name(1:end-8) '_st11.tif'])); 78 | if(size(imgsrgb,1)>size(imgsrgb,2)) 79 | img=imrotate(img,-90); 80 | end 81 | else 82 | if(ismember(i,rotated_imgs)) 83 | img=imrotate(img,-90); 84 | end 85 | end 86 | 87 | 88 | if jj == 3 % for fuji, mult by 3 89 | img(max(CC_coords(i,1)*3-b,1):min(CC_coords(i,2)*3+b,size(img,1)),max(CC_coords(i,3)*3-b,1):min(CC_coords(i,4)*3+b,size(img,2)),:)=0; 90 | else % mult by 2 91 | img(max(CC_coords(i,1)*2-b,1):min(CC_coords(i,2)*2+b,size(img,1)),max(CC_coords(i,3)*2-b,1):min(CC_coords(i,4)*2+b,size(img,2)),:)=0; 92 | end 93 | img = imresize(img,0.2,'bicubic'); 94 | imwrite(img,fullfile(savepath,[allraw(i).name(1:end-4) '.png'])) 95 | end 96 | 97 | end -------------------------------------------------------------------------------- /data_preparation/dataset_denoise.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Abhijith Punnappurath (abhijith.p@samsung.com) 6 | Luxi Zhao (lucy.zhao@samsung.com) 7 | 8 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 9 | you may not use this file except in compliance with the License. 10 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 11 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 12 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and limitations under the License. 14 | For conditions of distribution and use, see the accompanying LICENSE.md file. 15 | 16 | """ 17 | 18 | # no need to run this code separately 19 | 20 | import os 21 | import torch 22 | from torchvision import transforms 23 | import data_preparation.data_generator_denoise as dg 24 | from noise_profiler.image_synthesizer import load_noise_model 25 | 26 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 27 | 28 | 29 | def numpy2tensor(ims, preload_on_cuda): 30 | ims = torch.from_numpy(ims) 31 | ims = ims.permute(0, 3, 1, 2) 32 | if preload_on_cuda: 33 | ims = ims.to(device) 34 | return ims 35 | 36 | 37 | class DatasetRAW(object): 38 | def __init__(self, root, batch_size, patch_size, stride, preload_on_cuda, iso=3200, data_type='graphics_raw'): 39 | print('Inside raw data generator') 40 | 41 | noise_model_path = './noise_profiler/h-gauss-s20-v1' 42 | noise_model, iso2b1_interp_splines, iso2b2_interp_splines = load_noise_model(path=noise_model_path) 43 | noise_model_obj = { 44 | 'noise_model': noise_model, 45 | 'iso2b1_interp_splines': iso2b1_interp_splines, 46 | 'iso2b2_interp_splines': iso2b2_interp_splines, 47 | } 48 | 49 | input_ims, target_ims = dg.datagenerator_raw(data_dir=os.path.join(root), 50 | batch_size=batch_size, 51 | patch_size=patch_size, stride=stride, 52 | data_type=data_type, 53 | noise_model=noise_model_obj, iso=iso) 54 | print('Number of patches ' + str(input_ims.shape[0])) 55 | print('Outside raw data generator \n') 56 | 57 | self.input_ims = numpy2tensor(input_ims, preload_on_cuda) 58 | self.target_ims = numpy2tensor(target_ims, preload_on_cuda) 59 | 60 | assert patch_size % 2 == 0 # crop stride must be even to preserve the bayer pattern 61 | 62 | def __getitem__(self, idx): 63 | # load images 64 | img = self.input_ims[idx] # c, h, w 65 | target = self.target_ims[idx] 66 | return img, target 67 | 68 | def __len__(self): 69 | return len(self.input_ims) 70 | 71 | 72 | if __name__ == '__main__': 73 | from utils.torch_utils import torchvision_visualize_raw 74 | data_dir = 'graphics_dataset' 75 | batch_size, patch_size, stride = 16, 64, 64 76 | preload_on_cuda = False 77 | 78 | image_datasets = { 79 | x: DatasetRAW(os.path.join(data_dir, x), batch_size, patch_size, stride, preload_on_cuda) 80 | for x in ['val']} 81 | 82 | dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, 83 | shuffle=True, num_workers=0) 84 | for x in ['val']} 85 | 86 | dataset_sizes = {x: len(image_datasets[x]) for x in ['val']} 87 | 88 | # Get a batch of training data 89 | inputs, targets = next(iter(dataloaders['val'])) # b, c, h, w; 90 | 91 | img_grid = torchvision_visualize_raw([inputs, targets]) 92 | img_grid = transforms.ToPILImage()(img_grid.cpu()) 93 | img_grid.save("debug_inputs_targets_grid.png") 94 | print('Done!') 95 | -------------------------------------------------------------------------------- /data_preparation/initial_data_prep_neural_isp.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Abhijith Punnappurath (abhijith.p@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | Input is two folders: one containing DNGs and other containing tiffs 16 | Output folders: (to match current day-to-night code) 17 | 1) train : clean, clean_raw, metadata_raw, noisy_raw 18 | 2) val : clean, clean_raw, metadata_raw, noisy_raw 19 | """ 20 | 21 | import argparse 22 | import cv2 23 | import pickle 24 | from utils.general_utils import check_dir 25 | import os 26 | from glob import glob 27 | from shutil import copyfile, rmtree 28 | from pipeline.pipeline_utils import get_metadata, get_visible_raw_image 29 | 30 | if __name__ == '__main__': 31 | 32 | parser = argparse.ArgumentParser() 33 | parser.add_argument('--dng_dir', type=str, help='dng dir') 34 | parser.add_argument('--tif_dir', type=str, help='tif dir') 35 | parser.add_argument('--val_names', default='Living,Playground,Rustic', type=str, help='name of test dataset') 36 | parser.add_argument('--save_dir', default='graphics_dataset', type=str, help='save dir') 37 | parser.add_argument('--noisy', action='store_true', help='noisy dataset') 38 | 39 | args = parser.parse_args() 40 | 41 | if os.path.isdir(args.save_dir): rmtree(args.save_dir) 42 | 43 | print(args.dng_dir) 44 | print(args.tif_dir) 45 | 46 | # create directories 47 | check_dir(args.save_dir) 48 | for fol in ['train', 'val']: check_dir(os.path.join(args.save_dir, fol)) 49 | for fol in ['train', 'val']: 50 | subfols = ['clean_raw', 'clean', 'metadata_raw'] 51 | if args.noisy: 52 | subfols.append('noisy_raw') 53 | for subfol in subfols: check_dir(os.path.join(args.save_dir, fol, subfol)) 54 | 55 | allfiles = [os.path.basename(x) for x in sorted(glob(os.path.join(args.dng_dir, '*.dng')))] 56 | 57 | valnames = [item for item in args.val_names.split(',')] 58 | 59 | index = {'train': [i for i in range(len(allfiles)) if allfiles[i].split('_')[0] not in valnames], 60 | 'val': [i for i in range(len(allfiles)) if allfiles[i].split('_')[0] in valnames]} 61 | 62 | input_dir_dng = args.dng_dir 63 | input_dir_tif = args.tif_dir 64 | 65 | for fol in ['train', 'val']: 66 | for ind in index[fol]: 67 | for subfol in ['clean']: 68 | cleanimg = cv2.imread(os.path.join(input_dir_tif, allfiles[ind][:-4] + '.tif'), cv2.IMREAD_UNCHANGED) 69 | destination = os.path.join(args.save_dir, fol, subfol, allfiles[ind][:-4] + '.png') 70 | cv2.imwrite(destination, cleanimg) 71 | 72 | for subfol in ['clean_raw']: 73 | cleanrawimg = get_visible_raw_image(os.path.join(input_dir_dng, allfiles[ind])) 74 | destination = os.path.join(args.save_dir, fol, subfol, allfiles[ind][:-4] + '.png') 75 | cv2.imwrite(destination, cleanrawimg) 76 | 77 | if args.noisy: 78 | for subfol in ['noisy_raw']: 79 | source = os.path.join(input_dir_dng, 'noisy_raw', allfiles[ind][:-4] + '.png') 80 | destination = os.path.join(args.save_dir, fol, subfol, allfiles[ind][:-4] + '.png') 81 | copyfile(source,destination) 82 | 83 | for subfol in ['metadata_raw']: 84 | metadata = get_metadata(os.path.join(input_dir_dng, allfiles[ind])) 85 | pickle.dump(metadata, open(os.path.join(args.save_dir, fol, subfol, allfiles[ind][:-4] + '.p'), "wb")) 86 | -------------------------------------------------------------------------------- /data_preparation/dataset_illum_est.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Abhijith Punnappurath (abhijith.p@samsung.com) 6 | Luxi Zhao (lucy.zhao@samsung.com) 7 | 8 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 9 | you may not use this file except in compliance with the License. 10 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 11 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 12 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and limitations under the License. 14 | For conditions of distribution and use, see the accompanying LICENSE.md file. 15 | 16 | """ 17 | 18 | import torch 19 | import torchvision 20 | from torchvision import transforms 21 | import data_preparation.data_generator_illum_est as dg 22 | 23 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 24 | 25 | 26 | class DatasetIllumEst(object): 27 | def __init__(self, dataset_dir, illum_file, split_file, split, batch_size, patch_size, stride, on_cuda): 28 | 29 | print('Inside illum est data generator') 30 | in_patches, gt_illums = dg.datagenerator_illum_est(dataset_dir=dataset_dir, 31 | illum_file=illum_file, 32 | split_file=split_file, 33 | split=split, 34 | batch_size=batch_size, 35 | patch_size=patch_size, 36 | stride=stride) 37 | 38 | in_patches = torch.from_numpy(in_patches) 39 | in_patches = in_patches.permute(0, 3, 1, 2) # b, c, h, w 40 | gt_illums = torch.from_numpy(gt_illums) # b, 3 41 | if on_cuda: 42 | in_patches = in_patches.to(device) 43 | gt_illums = gt_illums.to(device) 44 | 45 | print('Number of patches ' + str(in_patches.shape[0])) 46 | print('Number of illums ' + str(gt_illums.shape[0])) 47 | print('Outside illum est data generator \n') 48 | self.in_patches = in_patches 49 | self.gt_illums = gt_illums 50 | 51 | def __getitem__(self, idx): 52 | # load images 53 | img = self.in_patches[idx] 54 | gt_illum = self.gt_illums[idx] 55 | 56 | return img, gt_illum 57 | 58 | def __len__(self): 59 | return len(self.in_patches) 60 | 61 | 62 | if __name__ == '__main__': 63 | image_datasets = { 64 | x: DatasetIllumEst(dataset_dir='illum_est_expts/data/ours', 65 | illum_file='illum_est_expts/data/ours/gt_illum.p', 66 | split_file='illum_est_expts/data/SamsungNX2000_train_valid_test_split_idx.p', 67 | split=x, 68 | batch_size=32, 69 | patch_size=48, 70 | stride=48, 71 | on_cuda=True) 72 | for x in ['val']} 73 | 74 | dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=32, 75 | shuffle=True, num_workers=0) 76 | for x in ['val']} 77 | 78 | dataset_sizes = {x: len(image_datasets[x]) for x in ['val']} 79 | 80 | # Get a batch of training data 81 | inputs, targets = next(iter(dataloaders['val'])) 82 | 83 | inputs = inputs ** (1 / 2.2) 84 | target_ims = targets[..., None, None] * torch.ones((32, 3, 48, 48)) # 32, 3, 1, 1 * 32, 3, 48, 48 85 | # Make a grid from batch 86 | targets_grid = torchvision.utils.make_grid(target_ims) 87 | inputs_grid = torchvision.utils.make_grid(inputs) 88 | 89 | targets_grid = transforms.ToPILImage()(targets_grid.cpu()) 90 | inputs_grid = transforms.ToPILImage()(inputs_grid.cpu()) 91 | 92 | targets_grid.save("debug_targets_grid.png") 93 | inputs_grid.save("debug_inputs_grid.png") 94 | 95 | print('Done!') 96 | -------------------------------------------------------------------------------- /data_preparation/dataset_neural_isp.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Abhijith Punnappurath (abhijith.p@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | 18 | import os 19 | import torch 20 | import torchvision 21 | from torchvision import transforms 22 | import data_preparation.data_generator_neural_isp as dg 23 | 24 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 25 | 26 | 27 | class DatasetRAW(object): 28 | def __init__(self, root, batch_size, patch_size, stride, wb_illum, on_cuda, which_input, is_PS_sRGB, 29 | data_mode='train'): 30 | 31 | print('Inside raw data generator') 32 | rawimgs = dg.datagenerator_raw(data_dir=os.path.join(root, which_input), 33 | meta_dir=os.path.join(root, 'metadata_raw'), 34 | wb_illum=wb_illum, batch_size=batch_size, 35 | patch_size=patch_size, stride=stride, 36 | is_PS_sRGB=is_PS_sRGB, data_mode=data_mode) 37 | 38 | rawimgs = torch.from_numpy(rawimgs) 39 | rawimgs = rawimgs.permute(0, 3, 1, 2) 40 | if on_cuda: 41 | rawimgs = rawimgs.to(device) 42 | 43 | print('Number of patches ' + str(rawimgs.shape[0])) 44 | print('Outside raw data generator \n') 45 | self.rawimgs = rawimgs 46 | 47 | print('Inside sRGB data generator') 48 | srgbimgs = dg.datagenerator_sRGB(data_dir=os.path.join(root, 'clean'), batch_size=batch_size, 49 | patch_size=patch_size, stride=stride, data_mode=data_mode) 50 | print('Number of patches ' + str(srgbimgs.shape[0])) 51 | print('Outside sRGB data generator \n') 52 | srgbimgs = torch.from_numpy(srgbimgs) 53 | srgbimgs = srgbimgs.permute(0, 3, 1, 2) 54 | srgbimgs = srgbimgs / 255.0 55 | if on_cuda: 56 | srgbimgs = srgbimgs.to(device) 57 | self.srgbimgs = srgbimgs 58 | 59 | def __getitem__(self, idx): 60 | # load images 61 | img = self.rawimgs[idx] # c, h, w 62 | target = self.srgbimgs[idx] 63 | return img, target 64 | 65 | def __len__(self): 66 | return len(self.rawimgs) 67 | 68 | 69 | if __name__ == '__main__': 70 | data_dir = 'graphics_dataset' 71 | batch_size, patch_size, stride = 16, 64, 64 72 | wb_illum = 'asn' 73 | on_cuda = False 74 | which_input = 'clean_raw' 75 | 76 | image_datasets = { 77 | x: DatasetRAW(os.path.join(data_dir, x), batch_size, patch_size, stride, wb_illum, on_cuda, which_input, 78 | is_PS_sRGB=True, data_mode=x) 79 | for x in ['val']} 80 | 81 | dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, 82 | shuffle=True, num_workers=0) 83 | for x in ['val']} 84 | 85 | dataset_sizes = {x: len(image_datasets[x]) for x in ['val']} 86 | 87 | # Get a batch of training data 88 | inputs, targets = next(iter(dataloaders['val'])) 89 | 90 | # Make a grid from batch 91 | targets_grid = torchvision.utils.make_grid(targets) 92 | inputs_grid = torchvision.utils.make_grid(inputs) 93 | 94 | targets_grid = transforms.ToPILImage()(targets_grid.cpu()) 95 | inputs_grid = transforms.ToPILImage()(inputs_grid.cpu()) 96 | 97 | targets_grid.save("debug_targets_grid.png") 98 | inputs_grid.save("debug_inputs_grid.png") 99 | 100 | print('Done!') 101 | -------------------------------------------------------------------------------- /model_archs/unet.py: -------------------------------------------------------------------------------- 1 | # From: https://github.com/mateuszbuda/brain-segmentation-pytorch/blob/master/unet.py 2 | # MIT License: https://github.com/mateuszbuda/brain-segmentation-pytorch/blob/master/LICENSE 3 | 4 | from collections import OrderedDict 5 | 6 | import torch 7 | import torch.nn as nn 8 | 9 | 10 | class UNet(nn.Module): 11 | 12 | def __init__(self, in_channels=3, out_channels=1, init_features=32): 13 | super(UNet, self).__init__() 14 | 15 | features = init_features 16 | self.encoder1 = UNet._block(in_channels, features, name="enc1") 17 | self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) 18 | self.encoder2 = UNet._block(features, features * 2, name="enc2") 19 | self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) 20 | self.encoder3 = UNet._block(features * 2, features * 4, name="enc3") 21 | self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2) 22 | self.encoder4 = UNet._block(features * 4, features * 8, name="enc4") 23 | self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) 24 | 25 | self.bottleneck = UNet._block(features * 8, features * 16, name="bottleneck") 26 | 27 | self.upconv4 = nn.ConvTranspose2d( 28 | features * 16, features * 8, kernel_size=2, stride=2 29 | ) 30 | self.decoder4 = UNet._block((features * 8) * 2, features * 8, name="dec4") 31 | self.upconv3 = nn.ConvTranspose2d( 32 | features * 8, features * 4, kernel_size=2, stride=2 33 | ) 34 | self.decoder3 = UNet._block((features * 4) * 2, features * 4, name="dec3") 35 | self.upconv2 = nn.ConvTranspose2d( 36 | features * 4, features * 2, kernel_size=2, stride=2 37 | ) 38 | self.decoder2 = UNet._block((features * 2) * 2, features * 2, name="dec2") 39 | self.upconv1 = nn.ConvTranspose2d( 40 | features * 2, features, kernel_size=2, stride=2 41 | ) 42 | self.decoder1 = UNet._block(features * 2, features, name="dec1") 43 | 44 | self.conv = nn.Conv2d( 45 | in_channels=features, out_channels=out_channels, kernel_size=1 46 | ) 47 | 48 | def forward(self, x): 49 | enc1 = self.encoder1(x) 50 | enc2 = self.encoder2(self.pool1(enc1)) 51 | enc3 = self.encoder3(self.pool2(enc2)) 52 | enc4 = self.encoder4(self.pool3(enc3)) 53 | 54 | bottleneck = self.bottleneck(self.pool4(enc4)) 55 | 56 | dec4 = self.upconv4(bottleneck) 57 | dec4 = torch.cat((dec4, enc4), dim=1) 58 | dec4 = self.decoder4(dec4) 59 | dec3 = self.upconv3(dec4) 60 | dec3 = torch.cat((dec3, enc3), dim=1) 61 | dec3 = self.decoder3(dec3) 62 | dec2 = self.upconv2(dec3) 63 | dec2 = torch.cat((dec2, enc2), dim=1) 64 | dec2 = self.decoder2(dec2) 65 | dec1 = self.upconv1(dec2) 66 | dec1 = torch.cat((dec1, enc1), dim=1) 67 | dec1 = self.decoder1(dec1) 68 | return torch.sigmoid(self.conv(dec1)) 69 | 70 | @staticmethod 71 | def _block(in_channels, features, name): 72 | return nn.Sequential( 73 | OrderedDict( 74 | [ 75 | ( 76 | name + "conv1", 77 | nn.Conv2d( 78 | in_channels=in_channels, 79 | out_channels=features, 80 | kernel_size=3, 81 | padding=1, 82 | bias=False, 83 | ), 84 | ), 85 | (name + "norm1", nn.BatchNorm2d(num_features=features)), 86 | (name + "relu1", nn.ReLU(inplace=True)), 87 | ( 88 | name + "conv2", 89 | nn.Conv2d( 90 | in_channels=features, 91 | out_channels=features, 92 | kernel_size=3, 93 | padding=1, 94 | bias=False, 95 | ), 96 | ), 97 | (name + "norm2", nn.BatchNorm2d(num_features=features)), 98 | (name + "relu2", nn.ReLU(inplace=True)), 99 | ] 100 | ) 101 | ) -------------------------------------------------------------------------------- /data_preparation/data_generator_illum_est.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Abhijith Punnappurath (abhijith.p@samsung.com) 6 | Luxi Zhao (lucy.zhao@samsung.com) 7 | 8 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 9 | you may not use this file except in compliance with the License. 10 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 11 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 12 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and limitations under the License. 14 | For conditions of distribution and use, see the accompanying LICENSE.md file. 15 | 16 | """ 17 | 18 | import glob 19 | import cv2 20 | import numpy as np 21 | import pickle 22 | import os 23 | import scipy 24 | from utils.img_utils import get_illum_normalized_by_g 25 | 26 | 27 | def shuffle_files(fnames): 28 | """ 29 | Shuffle graphics images to cover all scenes 30 | graphics_names: sorted graphics image names 31 | """ 32 | np.random.seed(101) 33 | fnames = np.array(fnames) 34 | indices = np.arange(len(fnames)) 35 | np.random.shuffle(indices) 36 | fnames = fnames[indices] 37 | return fnames 38 | 39 | 40 | def get_all_patches(img, patch_size=48, stride=48): 41 | h, w, = img.shape[:2] 42 | patches = [] 43 | # extract patches 44 | for i in range(0, h - patch_size + 1, stride): 45 | for j in range(0, w - patch_size + 1, stride): 46 | x = img[i:i + patch_size, j:j + patch_size, ...] 47 | patches.append(x) 48 | return patches 49 | 50 | 51 | def gen_patches_illum_est(file_name, patch_size=48, stride=48): 52 | """ 53 | Crop one image into patches 54 | :return: 55 | """ 56 | img = np.array(cv2.imread(file_name, cv2.IMREAD_UNCHANGED))[:, :, ::-1].astype(np.float32) # img is now in rgb 57 | img = img / 65535.0 58 | img = np.clip(img, 0, 1) 59 | patches = get_all_patches(img, patch_size, stride) 60 | return patches 61 | 62 | 63 | def get_gt_illum_by_fname(illum_file): 64 | gt_illum_by_fname = {} 65 | if illum_file.endswith('.p'): 66 | content = pickle.load(open(illum_file, 'rb')) 67 | gt_illums = content['gt_illum'] # already normalized by g 68 | filenames = content['filenames'] 69 | 70 | elif illum_file.endswith('.mat'): 71 | content = scipy.io.loadmat(illum_file) 72 | gt_illums = content['groundtruth_illuminants'] 73 | gt_illums[:, 0], gt_illums[:, 1], gt_illums[:, 2] = get_illum_normalized_by_g(gt_illums) 74 | filenames = [name[0][0] + '_st4' for name in content['all_image_names']] # array([array(['SamsungNX2000_0001'])) -> 'SamsungNX2000_0001_st4' 75 | else: 76 | raise Exception('Unsupported gt illum file type.') 77 | 78 | for illum, fname in zip(gt_illums, filenames): 79 | gt_illum_by_fname[fname] = illum 80 | return gt_illum_by_fname 81 | 82 | 83 | def datagenerator_illum_est(dataset_dir='', illum_file='', split_file='', split='train', 84 | batch_size=128, patch_size=48, stride=48, debug=False): 85 | file_list = sorted(glob.glob(os.path.join(dataset_dir, '*.png'))) # get name list of all .png files 86 | file_list = shuffle_files(file_list) # shuffle to sample from all graphics scenes 87 | gt_illum_by_fname = get_gt_illum_by_fname(illum_file) 88 | 89 | split_indices = pickle.load(open(split_file, 'rb'))[split] 90 | file_list = file_list[split_indices] 91 | 92 | if debug: 93 | # For debugging only, check if images in the split are expected 94 | mysplit = 'valid' if split == 'val' else split 95 | split_fns_fp = split_file[:-5] + 'fns.p' 96 | split_type = 'graphics_split' if dataset_dir.split('/')[-1] != 'real' else 'real_split' 97 | split_fns = pickle.load(open(split_fns_fp, 'rb'))[split_type][mysplit] 98 | for myf, gtf in zip(file_list, split_fns): 99 | assert os.path.basename(myf) == gtf, f'Panic! {os.path.basename(myf)}, {gtf}' 100 | print(f'Loaded images are correct.') 101 | # end of debugging 102 | 103 | in_patches = [] 104 | gt_illums = [] 105 | 106 | # generate patches 107 | for file in file_list: 108 | patches = gen_patches_illum_est(file, patch_size, stride) 109 | fname = os.path.basename(file)[:-4] 110 | gt_illum = gt_illum_by_fname[fname] 111 | in_patches.append(patches) 112 | gt_illums.append([gt_illum] * len(patches)) 113 | 114 | in_patches = np.concatenate(in_patches) 115 | gt_illums = np.concatenate(gt_illums) 116 | discard_n = len(in_patches) - len(in_patches) // batch_size * batch_size 117 | in_patches = np.delete(in_patches, range(discard_n), axis=0) 118 | gt_illums = np.delete(gt_illums, range(discard_n), axis=0) 119 | 120 | print(f'^_^-{split} data finished-^_^') 121 | return in_patches, gt_illums 122 | 123 | -------------------------------------------------------------------------------- /docs/neural_isp.md: -------------------------------------------------------------------------------- 1 | # Neural ISP 2 | ## Overview 3 | The following scripts assume or create the following directory structure 4 | 5 | ``` 6 | |-- neural_isp_expts 7 | | |-- data 8 | | | |-- clean_raw_dngs 9 | | | |-- clean_srgb_tiffs 10 | | | |-- graphics_dngs_graphics2raw 11 | | | |-- graphics_dngs_upi 12 | | | |-- graphics_srgb_graphics2raw 13 | | | `-- graphics_srgb_upi 14 | | `-- expts 15 | | |-- neural_isp_graphics2raw_clean_raw 16 | | | |-- models 17 | | | |-- tensorboard 18 | | | `-- results 19 | | |-- neural_isp_upi_clean_raw 20 | | `-- neural_isp_real_clean_raw 21 | ``` 22 | 23 | ## Prepare real data 24 | For all methods, we use the [Day-to-Night](https://openaccess.thecvf.com/content/CVPR2022/papers/Punnappurath_Day-to-Night_Image_Synthesis_for_Training_Nighttime_Neural_ISPs_CVPR_2022_paper.pdf) Nighttime dataset for testing. 25 | #### Download Day-to-Night dataset 26 | - Download the day-to-night dataset from [here](https://github.com/SamsungLabs/day-to-night#get-started). 27 | - We only need contents from the `night_real` folder, copy subdirectories in `night_real` into `./real_dataset` 28 | #### Package clean RAW images into DNGs 29 | Package each clean RAW image (averaged from 30 ISO 50 frames) into a DNG, so that it can be processed by Photoshop to create the sRGB target. 30 | ``` 31 | python3 -m data_generation.package_clean_nighttime_raw_to_dng \ 32 | --dng_folder_path /path/to/real_dataset/dng/iso_50 \ 33 | --raw_folder_path /path/to/real_dataset/clean_raw \ 34 | --save_path /path/to/neural_isp_expts/data/clean_raw_dngs 35 | ``` 36 | #### Use Photoshop to process the RAW DNGs into sRGB space. 37 | > Photoshop version: 24.5\ 38 | > Camera RAW version: 15.3.1 39 | 40 | 1. Go to: File -> Scripts -> Image Processor 41 | 2. Input folder: `clean_raw_dngs` 42 | - Check "Open first image to apply settings" 43 | 3. Target folder: `clean_srgb_tiffs` 44 | 4. File type: "Save as TIFF", uncheck "LZW Compression" 45 | 5. Preferences: uncheck everything 46 | 6. Click "Run" 47 | 7. Under "detail", set sharpening = 40; under "manual noise reduction" set luminance = 0, color = 25 48 | 8. Camera RAW preferences: Color Space: sRGB IEC61996-2.1 - 8 bit (12.1MP) - 300 ppi 49 | 9. Click "Open" 50 | - Results will be saved into the target folder 51 | 52 | ## Graphics2RAW (Our Method) 53 | ### Data generation 54 | #### Invert graphics data to RAW space 55 | Use Graphics2RAW to invert graphics data to RAW space: 56 | ``` 57 | python3 -m jobs.generate_dataset_isp_denoise_graphics2raw 58 | ``` 59 | This will generate `graphics_dngs_graphics2raw`. 60 | 61 | #### Use Photoshop to process the RAW DNGs into sRGB space. 62 | > Photoshop version: 24.5\ 63 | > Camera RAW version: 15.3.1 64 | 65 | 1. Go to: File -> Scripts -> Image Processor 66 | 2. Input folder: `graphics_dngs_graphics2raw` 67 | - Check "Open first image to apply settings" 68 | 3. Target folder: `graphics_srgb_graphics2raw` 69 | 4. File type: "Save as TIFF", uncheck "LZW Compression" 70 | 5. Preferences: uncheck everything 71 | 6. Click "Run" 72 | 7. Under "detail", set sharpening = 40; under "manual noise reduction" set luminance = 0, color = 25 73 | 8. Camera RAW preferences: Color Space: sRGB IEC61996-2.1 - 8 bit (12.1MP) - 300 ppi 74 | 9. Click "Open" 75 | - Results will be saved into the target folder 76 | 77 | ### Training & Testing 78 | ``` 79 | python3 -m jobs.neural_isp_graphics2raw 80 | ``` 81 | 82 | #### Dataset structure 83 | - real_dataset 84 | - clean_raw/*.png (30-frame-averaged clean RAW image) 85 | - clean/*.png (originally simple ISP processed pngs, now replaced with Photoshop processed pngs) 86 | - dng/ (not used by Neural ISP) 87 | - iso_50/*.dng 88 | - iso_1600/*.dng 89 | - iso_3200/*.dng 90 | - graphics_dataset 91 | - train (60 images) 92 | - clean/*.png (converted from PS-rendered .tifs) 93 | - clean_raw/*.png (converted from graphics .dngs) 94 | - metadata_raw/*.p 95 | - val (10 images) 96 | - clean/*.png 97 | - clean_raw/*.png 98 | - metadata_raw/*.p 99 | 100 | ## UPI 101 | ### Data generation 102 | Due to copyright issues, we cannot re-distribute third-party code. Please refer to [upi.md](upi.md) before proceeding to the following steps. 103 | 104 | #### Invert graphics data to RAW space 105 | Use UPI to invert graphics data to RAW space: 106 | ``` 107 | python3 -m jobs.generate_dataset_isp_denoise_upi 108 | ``` 109 | This will generate `graphics_dngs_upi`. 110 | 111 | #### Use Photoshop to process the RAW DNGs into sRGB space. 112 | Same process as Graphics2RAW 113 | > Photoshop version: 24.5\ 114 | > Camera RAW version: 15.3.1 115 | 116 | 1. Go to: File -> Scripts -> Image Processor 117 | 2. Input folder: `graphics_dngs_upi` 118 | - Check "Open first image to apply settings" 119 | 3. Target folder: `graphics_srgb_upi` 120 | 4. File type: "Save as TIFF", uncheck "LZW Compression" 121 | 5. Preferences: uncheck everything 122 | 6. Click "Run" 123 | 7. Under "detail", set sharpening = 40; under "manual noise reduction" set luminance = 0, color = 25 124 | 8. Camera RAW preferences: Color Space: sRGB IEC61996-2.1 - 8 bit (12.1MP) - 300 ppi 125 | 9. Click "Open" 126 | - Results will be saved into the target folder 127 | 128 | ### Training & Testing 129 | ``` 130 | python3 -m jobs.neural_isp_upi 131 | ``` 132 | 133 | ## Real 134 | ### Data generation 135 | Already done in [Prepare real data](#prepare-real-data). 136 | 137 | ### Training & Testing 138 | 139 | ``` 140 | python3 -m jobs.neural_isp_real --fold 0 141 | python3 -m jobs.neural_isp_real --fold 1 142 | python3 -m jobs.neural_isp_real --fold 2 143 | ``` 144 | Results reported in paper are averaged over 3 folds. 145 | 146 | #### Dataset structure 147 | - real_dataset 148 | - clean_raw/*.png (30-frame-averaged clean RAW image) 149 | - clean/*.png (originally simple ISP processed pngs, now replaced with Photoshop processed pngs) 150 | - dng/ (not used by Neural ISP) 151 | - iso_50/*.dng 152 | - iso_1600/*.dng 153 | - iso_3200/*.dng 154 | - real_dataset_k_fold (converted from real_dataset) 155 | - test (35 images) 156 | - clean/*.png 157 | - clean_raw/*.png 158 | - dng/iso_50 159 | - train (60 images) 160 | - clean/*.png 161 | - clean_raw/*.png 162 | - metadata_raw/*.p 163 | - noisy_raw/*.png 164 | - val (10 images) 165 | - clean/*.png 166 | - clean_raw/*.png 167 | - metadata_raw/*.p 168 | - noisy_raw/*.png -------------------------------------------------------------------------------- /data_generation/package_exr_to_dng_upi.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Abhijith Punnappurath (abhijith.p@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | Package graphics exr image to DNG file 16 | """ 17 | 18 | import os 19 | import argparse 20 | from glob import glob 21 | from binascii import unhexlify 22 | 23 | import tensorflow as tf 24 | 25 | from utils.img_utils import * 26 | import copy 27 | from data_generation.unprocess import unprocess 28 | import pickle 29 | 30 | 31 | def parse_args(): 32 | parser = argparse.ArgumentParser() 33 | parser.add_argument('--exr_folder_path', type=str, 34 | help='path to exr images') # expected directory structure exr_folder_path///xxx.exr 35 | parser.add_argument('--save_path', type=str, 36 | help='path to save to', 37 | default='./neural_isp_expts/data/graphics_dngs_upi') 38 | parser.add_argument('--container_dng_path', type=str, 39 | help='path to save to', 40 | default='assets/container_dngs/container_dng_S20_FE_main_rectilinear_OFF_gain_OFF_noise_OFF_cam_calib_OFF.dng' 41 | ) 42 | parser.add_argument('--wb_start', type=int, 43 | help='magic value for S20 FE main camera', 44 | default=50816 45 | ) 46 | parser.add_argument('--image_start', type=int, 47 | help='magic value for S20 FE main camera', 48 | default=59288 49 | ) 50 | parser.add_argument('--colormatrix1_start', type=int, 51 | help='magic value for S20 FE main camera', 52 | default=50240 53 | ) 54 | parser.add_argument('--colormatrix2_start', type=int, 55 | help='magic value for S20 FE main camera', 56 | default=50384 57 | ) 58 | parser.add_argument('--train_val_set', type=str, 59 | help='create dngs only for these images', 60 | default='assets/split_files/graphics2raw_train_val_list.p' 61 | ) 62 | 63 | args = parser.parse_args() 64 | 65 | print(args) 66 | 67 | return args 68 | 69 | 70 | if __name__ == '__main__': 71 | RAND_SEED = 101 72 | np.random.seed(RAND_SEED) 73 | tf.random.set_seed(RAND_SEED) 74 | 75 | args = parse_args() 76 | assert os.path.exists(args.save_path), f'{args.save_path} does not exist!' 77 | 78 | exrlist = [] 79 | 80 | allsubfol = [f.path for f in os.scandir(args.exr_folder_path) if f.is_dir()] 81 | for subfol in allsubfol: 82 | allsubsubfol = [f.path for f in os.scandir(os.path.join(args.exr_folder_path, subfol)) if f.is_dir()] 83 | for subsubfol in allsubsubfol: 84 | exrlistfol = sorted(glob(os.path.join(args.exr_folder_path, subfol, subsubfol, '*.exr'))) 85 | exrlist.append(exrlistfol) 86 | 87 | train_val_set = pickle.load(open(args.train_val_set, "rb")) 88 | 89 | # for S20 FE main camera 90 | white_level = 1023 91 | black_level = 64 92 | 93 | w = 4032 94 | h = 3024 95 | 96 | with open(args.container_dng_path, "rb") as fn: 97 | myhexc = hexlify(fn.read()) 98 | myhexc = bytearray(myhexc) 99 | 100 | for i in range(len(exrlist)): 101 | pathnames = exrlist[i][0].split('/') 102 | savename = pathnames[-3] + '_' + pathnames[-1] 103 | 104 | 105 | if savename[:-4] in train_val_set: 106 | print(exrlist[i]) 107 | 108 | exr_img = cv2.imread(exrlist[i][0], cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH) # .astype('float32') 109 | exr_img = exr_img[:, :, ::-1] # .exr image is BGR, change it to RGB for processing 110 | 111 | # resize to 3024 x 4032 112 | if exr_img.shape[0] < h and exr_img.shape[1] < w: 113 | exr_img = cv2.resize(exr_img, dsize=(w, h)) 114 | elif exr_img.shape[0] < h and exr_img.shape[1] > w: 115 | exr_img = exr_img[:, 0:w] 116 | exr_img = cv2.resize(exr_img, dsize=(w, h)) 117 | elif exr_img.shape[0] > h and exr_img.shape[1] < w: 118 | exr_img = exr_img[0:h, :] 119 | exr_img = cv2.resize(exr_img, dsize=(w, h)) 120 | else: 121 | exr_img = exr_img[0:h, 0:w] 122 | 123 | exr_img = np.clip(exr_img, 0, 1) 124 | exr_img = exr_img ** (1/2.2) 125 | exr_img = tf.convert_to_tensor(exr_img, dtype=tf.float32) 126 | 127 | raw_est, metadata = unprocess(exr_img) 128 | raw_est = raw_est.numpy().astype(np.float32) 129 | for k, v in metadata.items(): 130 | metadata[k] = v.numpy().astype(np.float32) 131 | 132 | wb_dng = np.array([1 / metadata['red_gain'], 1, 1 / metadata['blue_gain']]) 133 | 134 | # Bayer for S20 FE 135 | # GR 136 | # BG 137 | raw_est = RGB2bayer(raw_est) 138 | 139 | # denormalize 140 | raw_est = raw_est * (white_level - black_level) + black_level 141 | raw_est[raw_est < 0] = 0 142 | raw_est[raw_est > white_level] = white_level 143 | 144 | colormatrix1= metadata['xyz2cam'].flatten().astype(np.float64) 145 | 146 | myhex = copy.deepcopy(myhexc) 147 | myhex = update_wb_values(myhex, wb_dng, args.wb_start) 148 | myhex = update_hex_image(myhex, raw_est, args.image_start) 149 | myhex = update_colormatrix1_values(myhex, colormatrix1, args.colormatrix1_start) 150 | myhex = update_colormatrix1_values(myhex, colormatrix1, args.colormatrix2_start) # repeat for cm2 151 | 152 | db = unhexlify(myhex) 153 | 154 | savepath = os.path.join(args.save_path, savename[:-4] + '.dng') 155 | with open(savepath, "wb") as fb: 156 | fb.write(db) 157 | 158 | print('Done') 159 | -------------------------------------------------------------------------------- /data_preparation/k_fold_split_data.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Abhijith Punnappurath (abhijith.p@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | from shutil import copyfile, rmtree 18 | import os 19 | from glob import glob 20 | import argparse 21 | import cv2 22 | import pickle 23 | 24 | from pipeline.pipeline_utils import get_metadata, get_visible_raw_image 25 | from utils.general_utils import check_dir 26 | 27 | 28 | def parse_args(): 29 | parser = argparse.ArgumentParser() 30 | parser.add_argument('--base_path', default='real_dataset', type=str, 31 | help='base address') 32 | parser.add_argument('--save_path', default='real_dataset_k_fold', type=str, 33 | help='output address') 34 | parser.add_argument('--which_fold', default=0, type=int, help='which fold for testing [0,1,2]') 35 | parser.add_argument('--kfold_indices', default='assets/split_files/day2night_k_fold_indices.p', type=str, 36 | help='load saved k-fold indices') 37 | parser.add_argument('--with_noise', default=0, type=int, help='for noisy case') 38 | parser.add_argument('--only_iso_3200', action='store_true', 39 | help='True: only run on iso_3200.') 40 | parser.add_argument('--only_iso_1600', action='store_true', 41 | help='True: only run on iso_1600.') 42 | parser.add_argument('--test_on_val', default=0, type=int, help='for testing on validation set') 43 | 44 | args = parser.parse_args() 45 | 46 | if args.only_iso_3200: 47 | assert not args.only_iso_1600 48 | elif args.only_iso_1600: 49 | assert not args.only_iso_3200 50 | # if they are both false, mix iso 1600 and 3200 equally 51 | print(args) 52 | 53 | return args 54 | 55 | 56 | def split_func(args, folder_path, k_fold_indices): 57 | save_path = args.save_path 58 | 59 | train_index = k_fold_indices['train_index_all'][args.which_fold] 60 | val_index = k_fold_indices['val_index_all'][args.which_fold] 61 | test_index = k_fold_indices['test_index_all'][args.which_fold] 62 | 63 | if not args.only_iso_3200 and not args.only_iso_1600: 64 | if folder_path == 'iso_1600': 65 | index = {'train': train_index[0::2], 'val': val_index[0::2]} 66 | elif folder_path == 'iso_3200': 67 | index = {'train': train_index[1::2], 'val': val_index[1::2]} 68 | else: 69 | index = {'train': train_index, 'val': val_index} 70 | else: 71 | index = {'train': train_index, 'val': val_index} 72 | 73 | input_dir = args.base_path 74 | 75 | print(index['train']) 76 | print('...') 77 | print(index['val']) 78 | print('...') 79 | print(test_index) 80 | print('...') 81 | print(args.which_fold, len(index['train']), len(index['val']), len(test_index)) 82 | 83 | # create directories if they don't exist 84 | check_dir(save_path) 85 | for fol in ['train', 'val', 'test']: check_dir(os.path.join(save_path, fol)) 86 | for fol in ['train', 'val']: 87 | for subfol in ['clean_raw', 'noisy_raw', 'clean', 'metadata_raw']: check_dir(os.path.join(save_path, fol, subfol)) 88 | for fol in ['test']: 89 | for subfol in ['clean_raw', 'clean', 'dng']: check_dir(os.path.join(save_path, fol, subfol)) 90 | check_dir(os.path.join(save_path, 'test', 'dng', folder_path)) 91 | if args.test_on_val: 92 | check_dir(os.path.join(save_path, 'val', 'dng', folder_path)) 93 | 94 | allfiles = [os.path.basename(x) for x in sorted(glob(os.path.join(input_dir, 'clean', '*.png')))] 95 | 96 | for fol in ['train', 'val']: 97 | for ind in index[fol]: 98 | for subfol in ['clean_raw', 'clean']: 99 | source = os.path.join(input_dir, subfol, allfiles[ind]) 100 | destination = os.path.join(save_path, fol, subfol, allfiles[ind]) 101 | copyfile(source, destination) 102 | 103 | for subfol in ['noisy_raw']: 104 | rawimg = get_visible_raw_image(os.path.join(input_dir, 'dng', folder_path, allfiles[ind][:-4] + '.dng')) 105 | destination = os.path.join(save_path, fol, subfol, allfiles[ind]) 106 | cv2.imwrite(destination, rawimg) 107 | 108 | for subfol in ['metadata_raw']: 109 | metadata = get_metadata(os.path.join(input_dir, 'dng', folder_path, allfiles[ind][:-4] + '.dng')) 110 | pickle.dump(metadata, open(os.path.join(save_path, fol, subfol, allfiles[ind][:-4] + '.p'), "wb")) 111 | 112 | if fol == 'val' and args.test_on_val: 113 | print('saving val dngs') 114 | source = os.path.join(input_dir, 'dng', folder_path, allfiles[ind][:-4] + '.dng') 115 | destination = os.path.join(save_path, fol, 'dng', folder_path, allfiles[ind][:-4] + '.dng') 116 | copyfile(source, destination) 117 | 118 | for fol in ['test']: 119 | clean_raw_dir = os.listdir(os.path.join(save_path, fol, 'clean_raw')) 120 | clean_dirs_empty = len(clean_raw_dir) == 0 121 | for ind in test_index: 122 | if clean_dirs_empty: 123 | for subfol in ['clean_raw', 'clean']: 124 | source = os.path.join(input_dir, subfol, allfiles[ind]) 125 | destination = os.path.join(save_path, fol, subfol, allfiles[ind]) 126 | copyfile(source, destination) 127 | 128 | for subfol in ['dng']: 129 | source = os.path.join(input_dir, subfol, folder_path, allfiles[ind][:-4] + '.dng') 130 | destination = os.path.join(save_path, fol, subfol, folder_path, allfiles[ind][:-4] + '.dng') 131 | copyfile(source, destination) 132 | 133 | 134 | if __name__ == "__main__": 135 | args = parse_args() 136 | k_fold_indices = pickle.load(open(args.kfold_indices, "rb")) 137 | 138 | if os.path.isdir(args.save_path): rmtree(args.save_path) 139 | 140 | if args.with_noise: 141 | if args.only_iso_3200: 142 | split_func(args, 'iso_3200', k_fold_indices) 143 | elif args.only_iso_1600: 144 | split_func(args, 'iso_1600', k_fold_indices) 145 | else: 146 | split_func(args, 'iso_1600', k_fold_indices) 147 | split_func(args, 'iso_3200', k_fold_indices) 148 | else: 149 | split_func(args, 'iso_50', k_fold_indices) 150 | 151 | print('Done!') 152 | -------------------------------------------------------------------------------- /pipeline/cct_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author(s): 3 | Abdelrahman Abdelhamed (a.abdelhamed@samsung.com) 4 | 5 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | 16 | Utility function for working with correlated color temperatures. 17 | """ 18 | 19 | import numpy as np 20 | import sys 21 | 22 | 23 | def dot(x, y): 24 | return np.sum(x * y, axis=-1) 25 | 26 | 27 | def norm(x): 28 | return np.sqrt(dot(x, x)) 29 | 30 | 31 | def raw_rgb_to_cct(raw_rgb, xyz2cam1, xyz2cam2): 32 | """Convert raw-RGB triplet to corresponding correlated color temperature (CCT)""" 33 | # pxyz = [.5, 1, .5] 34 | pxyz = [.3, .3, .3] 35 | # cct = xyz2cct(pxyz) 36 | loss = 1e10 37 | k = 1 38 | cct = 6500 # default 39 | max_iter = 10000 40 | iter = 0 41 | while loss > 1e-4 and iter < max_iter: 42 | cct = xyz2cct(pxyz) 43 | xyz = raw_rgb_to_xyz(raw_rgb, cct, xyz2cam1, xyz2cam2) 44 | loss = norm(xyz - pxyz) 45 | # loss = ang_err(xyz, pxyz) 46 | pxyz = xyz 47 | k = k + 1 48 | iter += 1 49 | if(iter == max_iter): 50 | print('Max iter reached') 51 | return cct 52 | 53 | 54 | def raw_rgb_to_xyz(raw_rgb, temp, xyz2cam1, xyz2cam2): 55 | # TODO: what if temperature > 6500? 56 | # RawRgbToXyz Convert raw-RGB triplet to corresponding XYZ 57 | cct1 = 6500 # D65, DNG code = 21 58 | cct2 = 2500 # A, DNG code = 17 59 | cct1inv = 1 / cct1 60 | cct2inv = 1 / cct2 61 | tempinv = 1 / temp 62 | g = (tempinv - cct2inv) / (cct1inv - cct2inv) 63 | h = 1 - g 64 | # if g < 0: 65 | # g = 0 66 | # if h < 0: 67 | # h = 0 68 | # if h > 1: 69 | # h = 1 70 | xyz2cam = g * xyz2cam1 + h * xyz2cam2 71 | xyz = np.matmul(np.linalg.inv(xyz2cam), np.transpose(raw_rgb)) 72 | 73 | # xyz = xyz / xyz(2) 74 | # xyz_ = [xyz(1) / sum(xyz), xyz(2) / sum(xyz), 1] 75 | # xyz = xyz_ 76 | 77 | return xyz 78 | 79 | 80 | # /* LERP(a,b,c) = linear interpolation macro, is 'a' when c == 0.0 and 'b' when c == 1.0 */ 81 | def lerp(a, b, c): 82 | return (b - a) * c + a 83 | 84 | 85 | def xyz2cct(xyz): 86 | # % /* 87 | # % * Name: XYZtoCorColorTemp.c 88 | # % * 89 | # % * Author: Bruce Justin Lindbloom 90 | # % * 91 | # % * Copyright (c) 2003 Bruce Justin Lindbloom. All rights reserved. 92 | # % * 93 | # % * Input: xyz = pointer to the input array of X, Y and Z color components (in that order). 94 | # % * temp = pointer to where the computed correlated color temperature should be placed. 95 | # % * 96 | # % * Output: *temp = correlated color temperature, if successful. 97 | # % * = unchanged if unsuccessful. 98 | # % * 99 | # % * Return: 0 if successful, else -1. 100 | # % * 101 | # % * Description: 102 | # % * This is an implementation of Robertson's method of computing the correlated color 103 | # % * temperature of an XYZ color. It can compute correlated color temperatures in the 104 | # % * range [1666.7K, infinity]. 105 | # % * 106 | # % * Reference: 107 | # % * "Color Science: Concepts and Methods, Quantitative Data and Formulae", Second Edition, 108 | # % * Gunter Wyszecki and W. S. Stiles, John Wiley & Sons, 1982, pp. 227, 228. 109 | # % */ 110 | # 111 | 112 | rt = [ # /* reciprocal temperature (K) */ 113 | sys.float_info.min, 10.0e-6, 20.0e-6, 30.0e-6, 40.0e-6, 50.0e-6, 114 | 60.0e-6, 70.0e-6, 80.0e-6, 90.0e-6, 100.0e-6, 125.0e-6, 115 | 150.0e-6, 175.0e-6, 200.0e-6, 225.0e-6, 250.0e-6, 275.0e-6, 116 | 300.0e-6, 325.0e-6, 350.0e-6, 375.0e-6, 400.0e-6, 425.0e-6, 117 | 450.0e-6, 475.0e-6, 500.0e-6, 525.0e-6, 550.0e-6, 575.0e-6, 118 | 600.0e-6 119 | ] 120 | 121 | uvt = [ 122 | [0.18006, 0.26352, -0.24341], 123 | [0.18066, 0.26589, -0.25479], 124 | [0.18133, 0.26846, -0.26876], 125 | [0.18208, 0.27119, -0.28539], 126 | [0.18293, 0.27407, -0.30470], 127 | [0.18388, 0.27709, -0.32675], 128 | [0.18494, 0.28021, -0.35156], 129 | [0.18611, 0.28342, -0.37915], 130 | [0.18740, 0.28668, -0.40955], 131 | [0.18880, 0.28997, -0.44278], 132 | [0.19032, 0.29326, -0.47888], 133 | [0.19462, 0.30141, -0.58204], 134 | [0.19962, 0.30921, -0.70471], 135 | [0.20525, 0.31647, -0.84901], 136 | [0.21142, 0.32312, -1.0182], 137 | [0.21807, 0.32909, -1.2168], 138 | [0.22511, 0.33439, -1.4512], 139 | [0.23247, 0.33904, -1.7298], 140 | [0.24010, 0.34308, -2.0637], 141 | [0.24792, 0.34655, -2.4681], 142 | # /* Note: 0.24792 is a corrected value for the error found in W&S as 0.24702 */ 143 | [0.25591, 0.34951, -2.9641], 144 | [0.26400, 0.35200, -3.5814], 145 | [0.27218, 0.35407, -4.3633], 146 | [0.28039, 0.35577, -5.3762], 147 | [0.28863, 0.35714, -6.7262], 148 | [0.29685, 0.35823, -8.5955], 149 | [0.30505, 0.35907, -11.324], 150 | [0.31320, 0.35968, -15.628], 151 | [0.32129, 0.36011, -23.325], 152 | [0.32931, 0.36038, -40.770], 153 | [0.33724, 0.36051, -116.45] 154 | ] 155 | 156 | # us = 0 157 | # vs = 0 158 | # p = 0 159 | di = 0 160 | # dm = 0 161 | i = 0 162 | if (xyz[0] < 1.0e-20) and (xyz[1] < 1.0e-20) and (xyz[2] < 1.0e-20): 163 | return -1 # /* protect against possible divide-by-zero failure */ 164 | us = (4.0 * xyz[0]) / (xyz[0] + 15.0 * xyz[1] + 3.0 * xyz[2]) 165 | vs = (6.0 * xyz[1]) / (xyz[0] + 15.0 * xyz[1] + 3.0 * xyz[2]) 166 | dm = 0.0 167 | for i in range(31): 168 | di = (vs - uvt[i][1]) - uvt[i][2] * (us - uvt[i][0]) 169 | if (i > 0) and (((di < 0.0) and (dm >= 0.0)) or ((di >= 0.0) and (dm < 0.0))): 170 | break # /* found lines bounding (us, vs) : i-1 and i */ 171 | dm = di 172 | 173 | if i == 31: 174 | # /* bad XYZ input, color temp would be less than minimum of 1666.7 degrees, or too far towards blue */ 175 | return -1 176 | di = di / np.sqrt(1.0 + uvt[i][2] * uvt[i][2]) 177 | dm = dm / np.sqrt(1.0 + uvt[i - 1][2] * uvt[i - 1][2]) 178 | p = dm / (dm - di) # /* p = interpolation parameter, 0.0 : i-1, 1.0 : i */ 179 | p = 1.0 / (lerp(rt[i - 1], rt[i], p)) 180 | cct = p 181 | return cct # /* success */ 182 | 183 | 184 | def interpolate_cst(xyz2cam1, xyz2cam2, temp): 185 | # RawRgbToXyz Convert raw-RGB triplet to corresponding XYZ 186 | cct1 = 6500 # D65, DNG code = 21 187 | cct2 = 2500 # A, DNG code = 17 188 | cct1inv = 1 / cct1 189 | cct2inv = 1 / cct2 190 | tempinv = 1 / temp 191 | g = (tempinv - cct2inv) / (cct1inv - cct2inv) 192 | h = 1 - g 193 | if g < 0: 194 | g = 0 195 | if h < 0: 196 | h = 0 197 | if h > 1: 198 | h = 1 199 | xyz2cam = g * xyz2cam1 + h * xyz2cam2 200 | return xyz2cam 201 | -------------------------------------------------------------------------------- /data_preparation/data_generator_neural_isp.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Abhijith Punnappurath (abhijith.p@samsung.com) 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | 18 | import glob 19 | import cv2 20 | import numpy as np 21 | import pickle 22 | 23 | from pipeline.pipeline import run_pipeline 24 | 25 | 26 | aug_times = 1 27 | 28 | 29 | def data_aug(img, mode=0, is_batch=False): 30 | """ 31 | :param is_batch: 32 | When is_batch == True: img (b, h, w, c) 33 | When is_batch == False: img (h, w, c) 34 | """ 35 | flipud_axis = 1 if is_batch else 0 36 | rot_axes = (1, 2) if is_batch else (0, 1) 37 | 38 | if mode == 0: 39 | return img 40 | elif mode == 1: 41 | return np.flip(img, axis=flipud_axis) 42 | elif mode == 2: 43 | return np.rot90(img, axes=rot_axes) 44 | elif mode == 3: 45 | return np.flip(np.rot90(img, axes=rot_axes), axis=flipud_axis) 46 | elif mode == 4: 47 | return np.rot90(img, k=2, axes=rot_axes) 48 | elif mode == 5: 49 | return np.flip(np.rot90(img, k=2, axes=rot_axes), axis=flipud_axis) 50 | elif mode == 6: 51 | return np.rot90(img, k=3, axes=rot_axes) 52 | elif mode == 7: 53 | return np.flip(np.rot90(img, k=3, axes=rot_axes), axis=flipud_axis) 54 | 55 | 56 | def get_all_patches(img, patch_size=48, stride=48): 57 | h, w = img.shape[:2] 58 | patches = [] 59 | # extract patches 60 | for i in range(0, h - patch_size + 1, stride): 61 | for j in range(0, w - patch_size + 1, stride): 62 | x = img[i:i + patch_size, j:j + patch_size, ...] 63 | # data aug 64 | for k in range(0, aug_times): 65 | x_aug = data_aug(x, mode=0) # np.random.randint(0,8)) 66 | patches.append(x_aug) 67 | return patches 68 | 69 | 70 | def gen_patches_sRGB(file_name, patch_size=48, stride=48): 71 | # read image 72 | img = cv2.imread(file_name, cv2.IMREAD_UNCHANGED) 73 | img = np.array(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), dtype=np.float32) 74 | patches = get_all_patches(img, patch_size, stride) 75 | return patches 76 | 77 | 78 | def gen_patches_raw(file_name, meta_name, wb_illum='avg', patch_size=48, stride=48, is_PS_sRGB=True): 79 | """ 80 | :param is_PS_sRGB: whether target sRGB is produced by PS 81 | :return: 82 | """ 83 | 84 | meta_data_org = pickle.load(open(meta_name, "rb")) 85 | 86 | if wb_illum == 'avg': 87 | meta_data_org['as_shot_neutral'] = meta_data_org['avg_night_illuminant'] # modify as_shot_neutral 88 | 89 | img = cv2.imread(file_name, cv2.IMREAD_UNCHANGED) 90 | 91 | if is_PS_sRGB: 92 | params = { 93 | 'save_as': 'png', # options: 'jpg', 'png', 'tif', etc. 94 | 'white_balancer': 'default', # options: default, or self-defined module 95 | 'demosaicer': '', # options: '' for simple interpolation, 96 | # 'EA' for edge-aware, 97 | # 'VNG' for variable number of gradients, 98 | # 'menon2007' for Menon's algorithm 99 | 'tone_curve': 'simple-s-curve', # options: 'simple-s-curve', 'default', or self-defined module 100 | 'output_stage': 'default_cropping', 101 | } 102 | 103 | stages = ['raw', 'active_area_cropping', 'linearization', 'normal', 'lens_shading_correction', 104 | 'white_balance', 'demosaic', 'default_cropping'] 105 | 106 | else: 107 | params = { 108 | 'save_as': 'png', # options: 'jpg', 'png', 'tif', etc. 109 | 'white_balancer': 'default', # options: default, or self-defined module 110 | 'demosaicer': '', # options: '' for simple interpolation, 111 | # 'EA' for edge-aware, 112 | # 'VNG' for variable number of gradients, 113 | # 'menon2007' for Menon's algorithm 114 | 'tone_curve': 'simple-s-curve', # options: 'simple-s-curve', 'default', or self-defined module 115 | 'output_stage': 'demosaic', 116 | } 117 | 118 | stages = ['raw', 'active_area_cropping', 'linearization', 'normal', 'white_balance', 119 | 'demosaic'] 120 | 121 | img = run_pipeline(img, params=params, metadata=meta_data_org, stages=stages) 122 | img = img ** (1 / 2.2) 123 | 124 | patches = get_all_patches(img, patch_size, stride) 125 | return patches 126 | 127 | 128 | def datagenerator_sRGB(data_dir='dummy_dataset/train/clean', batch_size=128, patch_size=48, stride=48, verbose=False, 129 | data_mode='train'): 130 | file_list = sorted(glob.glob(data_dir + '/*.png')) # get name list of all .png files 131 | # initialize 132 | all_patches = [] 133 | # generate patches 134 | for i in range(len(file_list)): 135 | patches = gen_patches_sRGB(file_list[i], patch_size=patch_size, stride=stride) 136 | all_patches.append(patches) 137 | if verbose: 138 | print(str(i + 1) + '/' + str(len(file_list)) + ' is done') 139 | all_patches = np.concatenate(all_patches) 140 | discard_n = len(all_patches) - len(all_patches) // batch_size * batch_size 141 | all_patches = np.delete(all_patches, range(discard_n), axis=0) 142 | 143 | print(f'^_^-sRGB {data_mode} data finished-^_^') 144 | return all_patches 145 | 146 | 147 | def datagenerator_raw(data_dir='dummy_dataset/train/clean_raw', meta_dir='dummy_dataset/train/metadata_raw', 148 | wb_illum='avg', batch_size=128, patch_size=48, stride=48, is_PS_sRGB=False, verbose=True, 149 | data_mode='train'): 150 | 151 | file_list = sorted(glob.glob(data_dir + '/*.png')) # get name list of all .png files 152 | 153 | meta_list = sorted(glob.glob(meta_dir + '/*.p')) # get name list of all .p files 154 | 155 | # initialize 156 | all_patches = [] 157 | # generate patches 158 | for i in range(len(file_list)): 159 | patches = gen_patches_raw(file_list[i], meta_list[i], wb_illum=wb_illum, patch_size=patch_size, stride=stride, 160 | is_PS_sRGB=is_PS_sRGB) 161 | all_patches.append(patches) 162 | if verbose: 163 | print(str(i + 1) + '/' + str(len(file_list)) + ' is done') 164 | all_patches = np.concatenate(all_patches) 165 | discard_n = len(all_patches) - len(all_patches) // batch_size * batch_size 166 | all_patches = np.delete(all_patches, range(discard_n), axis=0) 167 | 168 | print(f'^_^-raw {data_mode} data finished-^_^') 169 | return all_patches 170 | 171 | 172 | if __name__ == '__main__': 173 | batch_size, patch_size, stride = 128, 48, 48 174 | data_sRGB = datagenerator_sRGB(data_dir='real_night/train/clean', batch_size=batch_size, patch_size=patch_size, 175 | stride=stride) 176 | data_raw = datagenerator_raw(data_dir='real_night/train/clean_raw', meta_dir='dummy_dataset/train/metadata_raw', 177 | wb_illum='avg', batch_size=batch_size, patch_size=patch_size, stride=stride) 178 | print('Done!') 179 | -------------------------------------------------------------------------------- /test_illum_est.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Abhijith Punnappurath (abhijith.p@samsung.com) 6 | Luxi Zhao (lucy.zhao@samsung.com) 7 | 8 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 9 | you may not use this file except in compliance with the License. 10 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 11 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 12 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and limitations under the License. 14 | For conditions of distribution and use, see the accompanying LICENSE.md file. 15 | 16 | """ 17 | 18 | from model_archs.cnn import IllumEstNet 19 | from utils.general_utils import save_args 20 | import argparse 21 | import os, time, datetime 22 | import numpy as np 23 | import cv2 24 | import torch 25 | from utils.general_utils import check_dir 26 | from data_preparation.data_generator_illum_est import shuffle_files, get_gt_illum_by_fname 27 | import glob 28 | import pickle 29 | from utils.img_utils import compute_ang_error 30 | 31 | 32 | def to_tensor(img): 33 | img = torch.from_numpy(img.astype(np.float32)) 34 | img = img.unsqueeze(0).permute(0, 3, 1, 2) 35 | return img 36 | 37 | 38 | def from_tensor(img): 39 | img = img.permute(0, 2, 3, 1) 40 | img = img.cpu().detach().numpy() 41 | return np.squeeze(img) 42 | 43 | 44 | def log(*args, **kwargs): 45 | print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S:"), *args, **kwargs) 46 | 47 | 48 | def save_results(ang_err_array, out_fn=None, method=''): 49 | ang_err_array = np.array(ang_err_array) 50 | err_arr_sorted = np.sort(ang_err_array) 51 | fourth = int(np.round(ang_err_array.shape[0] / 4.0)) 52 | best25_mean_err = np.round(np.mean(err_arr_sorted[:fourth]), 4) 53 | worst25_mean_err = np.round(np.mean(err_arr_sorted[-fourth:]), 4) 54 | arr_len = ang_err_array.shape[0] 55 | mean_err = np.round(ang_err_array.mean(), 4) 56 | median_err = np.round(np.median(ang_err_array), 4) 57 | 58 | print(arr_len, mean_err, median_err, best25_mean_err, worst25_mean_err) 59 | 60 | if out_fn: 61 | if not os.path.exists(out_fn): 62 | f = open(out_fn, 'w') 63 | f.write('exp / angular_errs, num_test, mean, median, best 25%, worst 25%\n') 64 | else: 65 | f = open(out_fn, 'a') 66 | f.write(f'{method}, {arr_len}, {mean_err}, {median_err}, {best25_mean_err}, {worst25_mean_err}\n') 67 | f.close() 68 | 69 | 70 | if __name__ == '__main__': 71 | 72 | """ 73 | Assumed dataset directory structure: 74 | dataset_root_dir 75 | camera 76 | method 77 | 78 | Assumed experiment directory structure: 79 | exp_dir 80 | exp_name 81 | models 82 | results 83 | bestmodel 84 | """ 85 | 86 | parser = argparse.ArgumentParser() 87 | parser.add_argument( 88 | '--dataset-dir', default='illum_est_expts/data/ours', type=str, help='folder of png images') 89 | parser.add_argument( 90 | '--illum_file', default='illum_est_expts/data/ours/gt_illum.p', type=str, 91 | help='path to split indices') 92 | parser.add_argument( 93 | '--split_file', default='illum_est_expts/data/SamsungNX2000_train_valid_test_split_idx.p', type=str, 94 | help='path to split indices') 95 | parser.add_argument('--exp_name', default='illum_est_expt', type=str, help='experiment name, relative to exp_dir') 96 | parser.add_argument('--model_name', default='bestmodel.pt', type=str, help='name of the model') 97 | parser.add_argument('--num_filters', type=int, default=7, help='number of filters for CNN layers ') 98 | parser.add_argument('--exp_dir', default='./', type=str, help='directory to save experiment data to') 99 | parser.add_argument('--debug', action='store_true', help='debug mode') 100 | args = parser.parse_args() 101 | 102 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 103 | exp_name = args.exp_name 104 | 105 | model = IllumEstNet(in_channels=3, out_channels=3, num_filters=args.num_filters) 106 | 107 | model.load_state_dict(torch.load(os.path.join(args.exp_dir, exp_name, 'models', args.model_name))) 108 | model = model.to(device) 109 | model.eval() # Set model to evaluate mode 110 | print('model loaded') 111 | 112 | full_save_path = os.path.join(args.exp_dir, exp_name, 'results', args.model_name[:-3]) 113 | check_dir(full_save_path) 114 | save_args(args, full_save_path) 115 | 116 | ang_errs = [] 117 | ssims = [] 118 | 119 | camera, method = args.dataset_dir.split('/')[-2:] # assume dataset_dir points to .../camera/method 120 | fname_result = f'results_{camera}.txt' 121 | f_result = open(os.path.join(full_save_path, fname_result), "w") 122 | 123 | # Data loading 124 | file_list = sorted(glob.glob(os.path.join(args.dataset_dir, '*.png'))) # get name list of all .png files 125 | file_list = shuffle_files(file_list) # shuffle to sample from all scenes 126 | gt_illum_by_fname = get_gt_illum_by_fname(args.illum_file) 127 | 128 | split_indices = pickle.load(open(args.split_file, 'rb'))['test'] 129 | file_list = file_list[split_indices] 130 | 131 | if args.debug: 132 | # For debugging only, check if images in the split are expected 133 | split_fns_fp = args.split_file[:-5] + 'fns.p' 134 | split_type = 'graphics_split' if args.dataset_dir.split('/')[-1] != 'real' else 'real_split' 135 | split_fns = pickle.load(open(split_fns_fp, 'rb'))[split_type]['test'] 136 | for myf, gtf in zip(file_list, split_fns): 137 | assert os.path.basename(myf) == gtf, f'Panic! {os.path.basename(myf)}, {gtf}' 138 | print(f'Loaded images are correct.') 139 | # end of debugging 140 | 141 | for file in file_list: 142 | img = np.array(cv2.imread(file, cv2.IMREAD_UNCHANGED))[:, :, ::-1].astype(np.float32) # img is now rgb 143 | img = img / 65535.0 144 | img = np.clip(img, 0, 1) 145 | 146 | fname = os.path.basename(file)[:-4] 147 | gt_illum = gt_illum_by_fname[fname] 148 | 149 | x = to_tensor(img) 150 | x = x.to(device) 151 | start_time = time.time() 152 | with torch.no_grad(): 153 | y_ = model(x) # inference 154 | elapsed_time = time.time() - start_time 155 | y_ = y_.cpu().numpy().squeeze() 156 | ang_err = compute_ang_error(y_, gt_illum) 157 | ang_errs.append(ang_err) 158 | 159 | log('{0:10s} \n ang_err = {1:2.2f} deg, Time = {2:2.4f} seconds, Pred: {3}, GT: {4}'.format(fname, ang_err, 160 | elapsed_time, 161 | y_, gt_illum)) 162 | f_result.write('{0:10s} : ang_err = {1:2.2f} deg, ' 163 | 'Time = {2:2.4f} seconds, Pred: {3}, GT: {4} \n'.format(fname, 164 | ang_err, elapsed_time, y_, gt_illum)) 165 | 166 | ang_err_avg = np.mean(ang_errs) 167 | 168 | print() 169 | log('Dataset: {0:10s} \n Avg. Ang Err = {1:2.4f} deg'.format(f'{camera}_{method}', ang_err_avg)) 170 | 171 | f_result.write('\nDataset: {0:10s} \n Avg. Ang Err = {1:2.4f} deg'.format(f'{camera}_{method}', ang_err_avg)) 172 | f_result.close() 173 | 174 | aggr_result_fp = os.path.join(args.exp_dir, 'results.csv') 175 | index = exp_name.find(method) 176 | result_header = f'{camera}_{exp_name}' if camera not in exp_name else exp_name 177 | save_results(np.array(ang_errs), aggr_result_fp, method=result_header) 178 | -------------------------------------------------------------------------------- /data_generation/invert_synthia_graphics2raw.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Abhijith Punnappurath (abhijith.p@samsung.com) 6 | Luxi Zhao (lucy.zhao@samsung.com) 7 | 8 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 9 | you may not use this file except in compliance with the License. 10 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 11 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 12 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and limitations under the License. 14 | For conditions of distribution and use, see the accompanying LICENSE.md file. 15 | """ 16 | 17 | import os 18 | import pickle 19 | import argparse 20 | from utils.img_utils import * 21 | import scipy.io 22 | from data_preparation.data_generator_illum_est import get_gt_illum_by_fname 23 | 24 | 25 | def parse_args(): 26 | parser = argparse.ArgumentParser() 27 | parser.add_argument('--graphics_path', type=str, 28 | help='path to graphics images', 29 | default='illum_est_expts/synthia/SYNTHIA_RAND_CVPR16/RGB' 30 | ) 31 | parser.add_argument('--train_val_set', type=str, 32 | help='only use these images for training and validation', 33 | default='assets/split_files/illum_est/synthia_train_val_list.p' 34 | ) 35 | parser.add_argument('--save_path', type=str, 36 | help='path to exr images', 37 | default='illum_est_expts/data/SamsungNX2000/ours/' 38 | ) 39 | parser.add_argument('--target_camera', type=str, 40 | help='use camera name to select CM1 and CM2', 41 | default='SamsungNX2000' 42 | ) 43 | parser.add_argument('--cm_file', type=str, 44 | help='path to NUS CM1 and CM2', 45 | default='assets/container_dngs/NUS_CST_mats.p' 46 | ) 47 | parser.add_argument('--mat_file', type=str, 48 | help='mat file with gt illuminations', 49 | default='illum_est_expts/nus_metadata/nus_outdoor_gt_illum_mats/SamsungNX2000_gt.mat' 50 | ) 51 | parser.add_argument('--use_train_val_illums_only', action='store_true', 52 | help='sample illuminants from the convex hull built from training and validation illuminants only' 53 | ) 54 | parser.add_argument('--split_file', type=str, 55 | help='path to file specifying which filenames belong to which split, example:path/to/SamsungNX2000_train_valid_test_split_fns.p') 56 | parser.add_argument('--max_illums', type=int, 57 | help='Maximum number of illuminants used to build the convex hull. None for no max limit', 58 | default=None) 59 | parser.add_argument('--no_safe_invert', action='store_true', 60 | help='Do not use safe invert for highlight regions, setting this flag will make the saturated regions have a color cast', 61 | ) 62 | parser.add_argument('--rgb_gain', action='store_true', 63 | help='Apply random global gain to images', 64 | ) 65 | parser.add_argument('--rgb_gain_mean', type=float, default=1.0, 66 | help='Mean value for rgb gain, used only when rgb_gain is set to True.', 67 | ) 68 | 69 | args = parser.parse_args() 70 | 71 | print(args) 72 | 73 | return args 74 | 75 | 76 | if __name__ == '__main__': 77 | 78 | RAND_SEED = 101 79 | np.random.seed(seed=RAND_SEED) 80 | 81 | args = parse_args() 82 | 83 | train_val_set = pickle.load(open(args.train_val_set, 'rb')) 84 | 85 | metadata = pickle.load(open(args.cm_file, 'rb'))[args.target_camera] 86 | cmD65 = metadata['cm_D65'] 87 | cmA = metadata['cm_A'] 88 | 89 | # load illuminants 90 | if args.use_train_val_illums_only: 91 | # Use only training and validation illuminants to build the convex hull 92 | gt_illum_by_fname = get_gt_illum_by_fname(args.mat_file) 93 | splits = pickle.load(open(args.split_file, 'rb'))['real_split'] 94 | split_fns_train = splits['train'] 95 | split_fns_valid = splits['valid'] 96 | split_fns = np.append(split_fns_train, split_fns_valid) 97 | print('Number of training and validation illums: ', len(split_fns)) 98 | 99 | fnames = [os.path.basename(f)[:-4] for f in split_fns] 100 | gt_illum = [gt_illum_by_fname[fname] for fname in fnames] 101 | gt_illum = np.concatenate([gt_illum]) # all 150 train and validation illums 102 | 103 | # sample within the training and validation illums 104 | if args.max_illums is not None: 105 | seed = 0 106 | np.random.seed(seed) 107 | rand_idx = np.random.choice(len(gt_illum), args.max_illums, replace=False) 108 | gt_illum = gt_illum[rand_idx, :] 109 | np.random.seed(RAND_SEED) # reset the seed 110 | print('Number of illums used to build the convex hull: ', len(gt_illum)) 111 | 112 | else: 113 | gt_illum = scipy.io.loadmat(args.mat_file) 114 | gt_illum = gt_illum['groundtruth_illuminants'] 115 | gt_illum[:, 0], gt_illum[:, 1], gt_illum[:, 2] = get_illum_normalized_by_g(gt_illum) 116 | illum_mean = np.mean(gt_illum, 0) 117 | illum_cov = np.cov(np.transpose(gt_illum)) 118 | 119 | xyz2srgb_mat = get_xyz_to_srgb_mat() 120 | 121 | gt_illum_array = [] 122 | file_name_array = [] 123 | 124 | for i, filename in enumerate(train_val_set): 125 | savename = filename 126 | print(i, filename) 127 | 128 | graphics_img = cv2.imread(os.path.join(args.graphics_path, filename), -1) 129 | graphics_img = graphics_img[:, :, ::-1] 130 | graphics_img = np.array(graphics_img).astype(np.float32) / 255.0 # SYNTHIA images are 8 bit 131 | graphics_img = np.clip(graphics_img, 0, 1) 132 | 133 | # De-gamma 134 | graphics_img = graphics_img ** 2.2 135 | 136 | # Sample an illuminant 137 | while True: 138 | wb_vec = np.random.multivariate_normal(illum_mean, illum_cov, 1).squeeze() 139 | if in_hull(np.expand_dims(wb_vec[[0, 2]], axis=0), gt_illum[:, [0, 2]]): 140 | break 141 | gt_illum_array.append(wb_vec) 142 | print(wb_vec) 143 | 144 | # Compute CST matrix 145 | cst_mat = get_cst_matrix(cmD65, cmA, wb_vec) 146 | 147 | # sRGB to CIE XYZ (Invert XYZ2sRGB) 148 | raw_est = apply_combined_mat(graphics_img, np.linalg.inv(xyz2srgb_mat)) 149 | 150 | # CIE XYZ to device RGB (Invert CST) 151 | raw_est = apply_combined_mat(raw_est, np.linalg.inv(cst_mat)) 152 | 153 | # Inverse digital gain (optional) 154 | rgb_gain = np.random.normal(loc=args.rgb_gain_mean, scale=0.1) if args.rgb_gain else 1.0 155 | 156 | # Invert WB 157 | if not args.no_safe_invert: 158 | raw_est = safe_invert_gains(raw_est, wb_vec, rgb_gain) 159 | else: 160 | wb_mat = get_wb_as_matrix(wb_vec) 161 | raw_est *= rgb_gain 162 | raw_est = apply_combined_mat(raw_est, np.linalg.inv(wb_mat)) 163 | cv2.imwrite( 164 | os.path.join(args.save_path, savename[:-4] + '.png'), 165 | (raw_est[:, :, [2, 1, 0]] * 65535).astype(np.uint16)) 166 | file_name_array.append(savename[:-4]) 167 | 168 | gt_values = {'gt_illum': gt_illum_array, 'filenames': file_name_array} 169 | 170 | pickle.dump(gt_values, open(os.path.join(args.save_path, 'gt_illum.p'), "wb")) 171 | 172 | print('Done') 173 | -------------------------------------------------------------------------------- /data_preparation/data_generator_denoise.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Abhijith Punnappurath (abhijith.p@samsung.com) 6 | Luxi Zhao (lucy.zhao@samsung.com) 7 | 8 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 9 | you may not use this file except in compliance with the License. 10 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 11 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 12 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and limitations under the License. 14 | For conditions of distribution and use, see the accompanying LICENSE.md file. 15 | 16 | """ 17 | 18 | import glob 19 | import cv2 20 | import numpy as np 21 | 22 | from pipeline.pipeline_utils import demosaic 23 | from pipeline.pipeline import run_pipeline 24 | from pipeline.raw_utils import stack_rggb_channels, RGGB2Bayer 25 | from os.path import join 26 | from noise_profiler.image_synthesizer import synthesize_noisy_image_v2 27 | 28 | 29 | np.random.seed(101) 30 | 31 | 32 | def get_all_patches(img, patch_size=48, stride=48): 33 | h, w, = img.shape[:2] 34 | patches = [] 35 | # extract patches 36 | for i in range(0, h - patch_size + 1, stride): 37 | for j in range(0, w - patch_size + 1, stride): 38 | x = img[i:i + patch_size, j:j + patch_size, ...] 39 | patches.append(x) 40 | return patches 41 | 42 | 43 | def normalize_s20fe(img): 44 | white_level = 1023.0 45 | black_level = 64.0 46 | img = (img - black_level) / (white_level - black_level) 47 | return img 48 | 49 | 50 | def denormalize_s20fe(img): 51 | white_level = 1023.0 52 | black_level = 64.0 53 | img = img * (white_level - black_level) + black_level 54 | return img 55 | 56 | 57 | def post_process_stacked_images_s20fe(img, cfa_pattern): 58 | """ 59 | Process an image that went through stack_bayer_norm_gamma_s20fe 60 | to demosaiced, gamma-ed RAW RGB 61 | :param img: 62 | :param cfa_pattern: 63 | :return: 64 | """ 65 | img = (img ** 2.2) * 1023.0 # [0, 1023] 66 | img = np.clip(img, 64, 1023) # [64, 1023] 67 | img = (img - 64) / (1023 - 64) # [0, 1] normalization with black level subtraction 68 | img = RGGB2Bayer(img) 69 | img = demosaic(img, cfa_pattern, alg_type='EA') 70 | img = img ** (1 / 2.2) 71 | return img 72 | 73 | 74 | def post_process_stacked_images_s20fe_to_srgb(img, metadata): 75 | """ 76 | Process an image that went through stack_bayer_norm_gamma_s20fe 77 | to sRGB 78 | :param img: 79 | :param metadata: 80 | :return: 81 | """ 82 | img = (img ** 2.2) * 1023.0 # [0, 1023] 83 | img = np.clip(img, 64, 1023) # [64, 1023] 84 | img = RGGB2Bayer(img) 85 | 86 | stages = [ 87 | 'raw', 88 | 'normal', 89 | 'lens_shading_correction', 90 | 'white_balance', 91 | 'demosaic', 92 | 'xyz', 93 | 'srgb', 94 | 'fix_orient', 95 | 'gamma', 96 | ] 97 | 98 | params = { 99 | 'input_stage': 'raw', 100 | 'output_stage': 'gamma', 101 | 'demosaicer': 'EA', 102 | } 103 | 104 | img = run_pipeline(img, params=params, metadata=metadata, stages=stages) 105 | return img 106 | 107 | 108 | def linearize_stacked_images_s20fe(img): 109 | """ 110 | Process an image that went through stack_bayer_norm_gamma_s20fe 111 | to stacked, normalized, linear RAW RGB in the range of [0, 1] 112 | with black level subtraction 113 | :param img: a RAW image that went through stack_bayer_norm_gamma_s20fe 114 | :return: 115 | """ 116 | img = img ** 2.2 117 | img = img * 1023.0 # [0, 1023], no black level subtraction 118 | img = normalize_s20fe(img) # [0, 1], with black level subtraction 119 | return img 120 | 121 | 122 | def stack_bayer_norm_gamma_s20fe(img, clip_bot=False): 123 | """ 124 | 125 | :param img: not normalized bayer image, [black_level, white_level] 126 | :param clip_bot: clip minimum value; maximum value is always clipped to 1023 127 | :return: 128 | """ 129 | img = img.astype(np.float32) 130 | img = np.clip(img, 0.0, 1023.0) 131 | if clip_bot: 132 | img = np.clip(img, 64.0, 1023.0) 133 | img = img / 1023.0 134 | img = stack_rggb_channels(img) # do not pass in bayer_pattern to not change the stacking order 135 | img = img ** (1 / 2.2) 136 | return img 137 | 138 | 139 | def process_image(file_name, data_type='input', noise_model=None, iso=100): 140 | """ 141 | Assumptions: 142 | Camera: S20FE 143 | h == 3024 and w == 4032 144 | 145 | :param iso: 1600 or 3200 146 | :param noise_model: hg noise model 147 | :param data_type: 148 | input_graphics_raw: clean_raw -> noise -> stacked -> clip to 0~1023 -> gamma 149 | target_graphics_raw: clean_raw -> stacked -> clip to 64~1023 -> gamma 150 | input_real: noisy_raw -> stacked -> clip to 0~1023 -> gamma 151 | target_real: clean_raw -> stacked -> clip to 64~1023 -> gamma 152 | :return: 153 | """ 154 | img = cv2.imread(file_name, cv2.IMREAD_UNCHANGED) # bayer image 155 | h, w = img.shape[:2] 156 | assert h == 3024 and w == 4032, 'Wrong height and width!' 157 | 158 | # Generate input patches 159 | if data_type == 'input_graphics_raw': 160 | # Pre-generated noise 161 | img = synthesize_noisy_image_v2(img, model=noise_model['noise_model'], 162 | dst_iso=iso, min_val=0, 163 | max_val=1023, 164 | iso2b1_interp_splines=noise_model['iso2b1_interp_splines'], 165 | iso2b2_interp_splines=noise_model['iso2b2_interp_splines']) 166 | 167 | img_ = stack_bayer_norm_gamma_s20fe(img, clip_bot=False) 168 | 169 | # Generate target patches 170 | elif data_type == 'input_real': 171 | img_ = stack_bayer_norm_gamma_s20fe(img, clip_bot=False) 172 | else: 173 | # Covered cases: target_graphics_raw, target_real 174 | # img: linear, [black_level, white_level], bayer 175 | img_ = stack_bayer_norm_gamma_s20fe(img, clip_bot=True) 176 | 177 | img_ = img_.astype(np.float32) 178 | return img_ 179 | 180 | 181 | def datagenerator_raw(data_dir='dummy_dataset/train', batch_size=128, patch_size=48, stride=48, verbose=True, 182 | data_type='graphics_raw', noise_model=None, iso=3200): 183 | if 'graphics' in data_type: 184 | input_list = sorted(glob.glob(join(data_dir, 'clean_raw', '*.png'))) 185 | target_list = input_list 186 | elif 'real' in data_type: 187 | input_list = sorted(glob.glob(join(data_dir, 'noisy_raw', '*.png'))) 188 | target_list = sorted(glob.glob(join(data_dir, 'clean_raw', '*.png'))) 189 | else: 190 | raise Exception('Unexpected data type') 191 | 192 | # initialize 193 | patches_input = [] 194 | patches_target = [] 195 | # generate patches 196 | for i in range(len(input_list)): 197 | img_input = process_image(input_list[i], data_type=f'input_{data_type}', noise_model=noise_model, iso=iso) 198 | img_target = process_image(target_list[i], data_type=f'target_{data_type}') 199 | img_pair = np.dstack([img_input, img_target]) 200 | patches = get_all_patches(img_pair, patch_size, stride) 201 | patches = np.array(patches) 202 | assert patches[0].shape[-1] == 8 # both input and target are stacked 203 | split = 4 204 | patch_input = patches[..., :split] 205 | patch_target = patches[..., split:] 206 | patches_input.append(patch_input) 207 | patches_target.append(patch_target) 208 | if verbose: 209 | print(str(i + 1) + '/' + str(len(input_list)) + ' is done') 210 | 211 | patches_input = np.concatenate(patches_input) 212 | patches_target = np.concatenate(patches_target) 213 | 214 | discard_n = len(patches_input) - len(patches_input) // batch_size * batch_size 215 | 216 | patches_input = np.delete(patches_input, range(discard_n), axis=0) 217 | patches_target = np.delete(patches_target, range(discard_n), axis=0) 218 | 219 | print('^_^-raw training data finished-^_^') 220 | return patches_input, patches_target 221 | 222 | -------------------------------------------------------------------------------- /pipeline/dng_opcode.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author(s): 3 | Abdelrahman Abdelhamed (a.abdelhamed@samsung.com) 4 | 5 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | Utility functions for handling DNG opcode lists. 16 | """ 17 | import struct 18 | import numpy as np 19 | from .exif_utils import get_tag_values_from_ifds 20 | 21 | 22 | class Opcode: 23 | def __init__(self, id_, dng_spec_ver, option_bits, size_bytes, data): 24 | self.id = id_ 25 | self.dng_spec_ver = dng_spec_ver 26 | self.size_bytes = size_bytes 27 | self.option_bits = option_bits 28 | self.data = data 29 | 30 | 31 | def parse_opcode_lists(ifds): 32 | # OpcodeList1, 51008, 0xC740 33 | # Applied to raw image as read directly form file 34 | 35 | # OpcodeList2, 51009, 0xC741 36 | # Applied to raw image after being mapped to linear reference values 37 | # That is, after linearization, black level subtraction, normalization, and clipping 38 | 39 | # OpcodeList3, 51022, 0xC74E 40 | # Applied to raw image after being demosaiced 41 | 42 | opcode_list_tag_nums = [51008, 51009, 51022] 43 | opcode_lists = {} 44 | for i, tag_num in enumerate(opcode_list_tag_nums): 45 | opcode_list_ = get_tag_values_from_ifds(tag_num, ifds) 46 | if opcode_list_ is not None: 47 | opcode_list_ = bytearray(opcode_list_) 48 | opcodes = parse_opcodes(opcode_list_) 49 | opcode_lists.update({tag_num: opcodes}) 50 | else: 51 | pass 52 | 53 | return opcode_lists 54 | 55 | 56 | def parse_opcodes(opcode_list): 57 | """ 58 | Parse a byte array representing an opcode list. 59 | :param opcode_list: An opcode list as a byte array. 60 | :return: Opcode lists as a dictionary. 61 | """ 62 | # opcode lists are always stored in big endian 63 | endian_sign = ">" 64 | 65 | # opcode IDs 66 | # 9: GainMap 67 | # 1: Rectilinear Warp 68 | 69 | # clip to 70 | # [0, 2^32 - 1] for OpcodeList1 71 | # [0, 2^16 - 1] for OpcodeList2 72 | # [0, 1] for OpcodeList3 73 | 74 | i = 0 75 | num_opcodes = struct.unpack(endian_sign + "I", opcode_list[i:i + 4])[0] 76 | i += 4 77 | 78 | opcodes = {} 79 | for j in range(num_opcodes): 80 | opcode_id_ = struct.unpack(endian_sign + "I", opcode_list[i:i + 4])[0] 81 | i += 4 82 | dng_spec_ver = [struct.unpack(endian_sign + "B", opcode_list[i + k:i + k + 1])[0] for k in range(4)] 83 | i += 4 84 | option_bits = struct.unpack(endian_sign + "I", opcode_list[i:i + 4])[0] 85 | i += 4 86 | 87 | # option bits 88 | if option_bits & 1 == 1: # optional/unknown 89 | pass 90 | elif option_bits & 2 == 2: # can be skipped for "preview quality", needed for "full quality" 91 | pass 92 | else: 93 | pass 94 | 95 | opcode_size_bytes = struct.unpack(endian_sign + "I", opcode_list[i:i + 4])[0] 96 | i += 4 97 | 98 | opcode_data = opcode_list[i:i + opcode_size_bytes] 99 | i += opcode_size_bytes 100 | 101 | # GainMap (lens shading correction map) 102 | if opcode_id_ == 9: 103 | opcode_gain_map_data = parse_opcode_gain_map(opcode_data) 104 | opcode_data = opcode_gain_map_data 105 | # change opcode_id from 9 to 9.0, 9.1, 9.2, ... to handle different lsc maps for different channels 106 | opcode_id_ = opcode_id_ + j / 10. 107 | # WarpRectilinear 108 | elif opcode_id_ == 1: 109 | opcode_rect_warp_data = parse_opcode_rect_warp(opcode_data) 110 | opcode_data = opcode_rect_warp_data 111 | # FixBadPixelsList 112 | elif opcode_id_ == 5: 113 | bad_pixels_list = parse_bad_pixels_list(opcode_data) 114 | opcode_data = bad_pixels_list 115 | 116 | # set opcode object 117 | opcode = Opcode(id_=opcode_id_, dng_spec_ver=dng_spec_ver, option_bits=option_bits, 118 | size_bytes=opcode_size_bytes, 119 | data=opcode_data) 120 | opcodes.update({opcode_id_: opcode}) 121 | 122 | return opcodes 123 | 124 | 125 | def parse_bad_pixels_list(opcode_data): 126 | endian_sign = ">" # big 127 | opcode_dict = {'bad_points': [], 'bad_rects': []} 128 | 129 | i = 0 130 | opcode_dict['bayer_phase'] = struct.unpack(endian_sign + "L", opcode_data[i:i + 4])[0] 131 | i += 4 132 | 133 | opcode_dict['bad_point_count'] = struct.unpack(endian_sign + "L", opcode_data[i:i + 4])[0] 134 | i += 4 135 | 136 | opcode_dict['bad_rect_count'] = struct.unpack(endian_sign + "L", opcode_data[i:i + 4])[0] 137 | i += 4 138 | 139 | for j in range(opcode_dict['bad_point_count']): 140 | bad_point_row = struct.unpack(endian_sign + "L", opcode_data[i:i + 4])[0] 141 | i += 4 142 | bad_point_col = struct.unpack(endian_sign + "L", opcode_data[i:i + 4])[0] 143 | i += 4 144 | opcode_dict['bad_points'].append((bad_point_row, bad_point_col)) 145 | 146 | for j in range(opcode_dict['bad_rect_count']): 147 | bad_point_top = struct.unpack(endian_sign + "L", opcode_data[i:i + 4])[0] 148 | i += 4 149 | bad_point_left = struct.unpack(endian_sign + "L", opcode_data[i:i + 4])[0] 150 | i += 4 151 | bad_point_bot = struct.unpack(endian_sign + "L", opcode_data[i:i + 4])[0] 152 | i += 4 153 | bad_point_right = struct.unpack(endian_sign + "L", opcode_data[i:i + 4])[0] 154 | i += 4 155 | opcode_dict['bad_points'].append((bad_point_top, bad_point_left, bad_point_bot, bad_point_right)) 156 | 157 | return opcode_dict 158 | 159 | 160 | def parse_opcode_rect_warp(opcode_data): 161 | endian_sign = ">" # big 162 | opcode_dict = {} 163 | ''' 164 | opcode_dict = { 165 | 'N': 3, 166 | 'coefficient_set': [ 167 | { 168 | 'k_r0': 1, 169 | 'k_r1': 0, 170 | 'k_r2': 0, 171 | 'k_r3': 0, 172 | 'k_t0': 0, 173 | 'k_t1': 0, 174 | }, 175 | ... 176 | ], 177 | 'cx': 0.5, 178 | 'cy': 0.5 179 | } 180 | ''' 181 | i = 0 182 | num_planes = struct.unpack(endian_sign + "L", opcode_data[i:i + 4])[0] 183 | i += 4 184 | 185 | opcode_dict['N'] = num_planes 186 | opcode_dict['coefficient_set'] = [] 187 | 188 | for j in range(num_planes): 189 | keys = ['k_r0', 'k_r1', 'k_r2', 'k_r3', 'k_t0', 'k_t1'] 190 | coefficient_set = {} 191 | for key in keys: 192 | coefficient_set[key] = struct.unpack(endian_sign + "d", opcode_data[i:i + 8])[0] 193 | i += 8 194 | opcode_dict['coefficient_set'].append(coefficient_set) 195 | 196 | opcode_dict['cx'] = struct.unpack(endian_sign + "d", opcode_data[i:i + 8])[0] 197 | i += 8 198 | opcode_dict['cy'] = struct.unpack(endian_sign + "d", opcode_data[i:i + 8])[0] 199 | 200 | return opcode_dict 201 | 202 | 203 | def parse_opcode_gain_map(opcode_data): 204 | endian_sign = ">" # big 205 | opcode_dict = {} 206 | keys = ['top', 'left', 'bottom', 'right', 'plane', 'planes', 'row_pitch', 'col_pitch', 'map_points_v', 207 | 'map_points_h', 'map_spacing_v', 'map_spacing_h', 'map_origin_v', 'map_origin_h', 'map_planes', 'map_gain'] 208 | dtypes = ['L'] * 10 + ['d'] * 4 + ['L'] + ['f'] 209 | dtype_sizes = [4] * 10 + [8] * 4 + [4] * 2 # data type size in bytes 210 | counts = [1] * 15 + [0] # 0 count means variable count, depending on map_points_v and map_points_h 211 | # values = [] 212 | 213 | i = 0 214 | for k in range(len(keys)): 215 | if counts[k] == 0: # map_gain 216 | counts[k] = opcode_dict['map_points_v'] * opcode_dict['map_points_h'] 217 | 218 | if counts[k] == 1: 219 | vals = struct.unpack(endian_sign + dtypes[k], opcode_data[i:i + dtype_sizes[k]])[0] 220 | i += dtype_sizes[k] 221 | else: 222 | vals = [] 223 | for j in range(counts[k]): 224 | vals.append(struct.unpack(endian_sign + dtypes[k], opcode_data[i:i + dtype_sizes[k]])[0]) 225 | i += dtype_sizes[k] 226 | 227 | opcode_dict[keys[k]] = vals 228 | 229 | opcode_dict['map_gain_2d'] = np.reshape(opcode_dict['map_gain'], 230 | (opcode_dict['map_points_v'], opcode_dict['map_points_h'])) 231 | 232 | return opcode_dict 233 | -------------------------------------------------------------------------------- /data_generation/package_exr_to_dng_graphics2raw.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Abhijith Punnappurath (abhijith.p@samsung.com) 6 | Luxi Zhao (lucy.zhao@samsung.com) 7 | 8 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 9 | you may not use this file except in compliance with the License. 10 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 11 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 12 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and limitations under the License. 14 | For conditions of distribution and use, see the accompanying LICENSE.md file. 15 | 16 | Package graphics exr image to DNG file 17 | """ 18 | 19 | import os 20 | import argparse 21 | from glob import glob 22 | from binascii import unhexlify 23 | import scipy.io 24 | 25 | from pipeline.pipeline_utils import get_metadata 26 | from utils.img_utils import * 27 | import copy 28 | import pickle 29 | 30 | RAND_SEED = 101 31 | np.random.seed(RAND_SEED) 32 | 33 | 34 | def parse_args(): 35 | parser = argparse.ArgumentParser() 36 | parser.add_argument('--exr_folder_path', type=str, 37 | help='path to exr images') # expected directory structure: exr_folder_path///xxx.exr 38 | parser.add_argument('--save_path', type=str, 39 | help='path to save to', 40 | default='neural_isp_expts/data/graphics_dngs_graphics2raw') 41 | parser.add_argument('--container_dng_path', type=str, 42 | help='path to container dng', 43 | default='assets/container_dngs/container_dng_S20_FE_main_rectilinear_OFF_gain_OFF_noise_OFF_cam_calib_OFF.dng' 44 | ) 45 | parser.add_argument('--wb_start', type=int, 46 | help='magic value for S20 FE main camera', 47 | default=50816 48 | ) 49 | parser.add_argument('--image_start', type=int, 50 | help='magic value for S20 FE main camera', 51 | default=59288 52 | ) 53 | parser.add_argument('--train_val_set', type=str, 54 | help='create dngs only for these images', 55 | default='assets/split_files/graphics2raw_train_val_list.p' 56 | ) 57 | parser.add_argument('--no_safe_invert', action='store_true', 58 | help='Do not use safe invert for highlight regions, setting this flag will make the saturated regions have a color cast', 59 | ) 60 | parser.add_argument('--rgb_gain', action='store_true', 61 | help='Apply random global gain to images', 62 | ) 63 | parser.add_argument('--rgb_gain_mean', type=float, default=0.8, 64 | help='Mean value for rgb gain, used only when rgb_gain is set to True.', 65 | ) 66 | parser.add_argument('--max_illums', type=int, default=None, 67 | help='Maximum number of illuminants used to build the convex hull. None for no max limit', 68 | ) 69 | parser.add_argument('--illum_seed', type=int, default=None, 70 | help='Random seed for sampling illuminants used to build the convex hull.', 71 | ) 72 | args = parser.parse_args() 73 | print(args) 74 | 75 | return args 76 | 77 | 78 | if __name__ == '__main__': 79 | 80 | args = parse_args() 81 | assert os.path.exists(args.save_path), f'{args.save_path} does not exist!' 82 | 83 | exrlist = [] 84 | 85 | allsubfol = [f.path for f in os.scandir(args.exr_folder_path) if f.is_dir()] 86 | for subfol in allsubfol: 87 | allsubsubfol = [f.path for f in os.scandir(os.path.join(args.exr_folder_path, subfol)) if f.is_dir()] 88 | for subsubfol in allsubsubfol: 89 | exrlistfol = sorted(glob(os.path.join(args.exr_folder_path, subfol, subsubfol, '*.exr'))) 90 | exrlist.append(exrlistfol) 91 | 92 | train_val_set = pickle.load(open(args.train_val_set, "rb")) 93 | 94 | metadata = get_metadata(args.container_dng_path) 95 | metadata = get_extra_tags(args.container_dng_path, metadata) 96 | 97 | # IMPORTANT: for S20FE, color_matrix_1 corresponds to D65, 98 | # color_matrix_2 corresponds to Standard Light A 99 | # The order may be different for different cameras, need to check beforehand! 100 | cmD65 = metadata['color_matrix_1'] 101 | cmA = metadata['color_matrix_2'] 102 | 103 | # Illumination dictionary 104 | # load nighttime illuminants 105 | gt_illum = scipy.io.loadmat('./data_generation/night_dict_v2.mat') 106 | gt_illum = gt_illum['gt_illum'] 107 | gt_illum = gt_illum[0:45, :] 108 | # discard outliers 109 | indd = gt_illum[:, 0] < 1 110 | gt_illum = gt_illum[indd, :] 111 | 112 | if args.max_illums is not None: 113 | # Sample N illums from 39 illums 114 | np.random.seed(args.illum_seed) 115 | rand_idx = np.random.choice(len(gt_illum), args.max_illums, replace=False) 116 | gt_illum = gt_illum[rand_idx, :] 117 | np.random.seed(RAND_SEED) # reset the seed 118 | 119 | print(gt_illum.shape[0], 'num illums') 120 | 121 | gt_illum[:, 0], gt_illum[:, 1], gt_illum[:, 2] = get_illum_normalized_by_g(gt_illum) 122 | 123 | illum_mean = np.mean(gt_illum, 0) 124 | illum_cov = np.cov(np.transpose(gt_illum)) 125 | 126 | # for S20FE main camera 127 | white_level = 1023 128 | black_level = 64 129 | 130 | w = 4032 131 | h = 3024 132 | 133 | with open(args.container_dng_path, "rb") as fn: 134 | myhexc = hexlify(fn.read()) 135 | myhexc = bytearray(myhexc) 136 | 137 | xyz2srgb_mat = get_xyz_to_srgb_mat() 138 | 139 | for i in range(len(exrlist)): 140 | pathnames = exrlist[i][0].split('/') 141 | savename = pathnames[-3] + '_' + pathnames[-1] 142 | 143 | if savename[:-4] in train_val_set: 144 | print(exrlist[i]) 145 | 146 | exr_img = cv2.imread(exrlist[i][0], cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH) 147 | exr_img = exr_img[:, :, ::-1] 148 | 149 | # Resize to the shape of S20 FE RAW image: 3024 x 4032 150 | if exr_img.shape[0] < h and exr_img.shape[1] < w: 151 | exr_img = cv2.resize(exr_img, dsize=(w, h)) 152 | elif exr_img.shape[0] < h and exr_img.shape[1] > w: 153 | exr_img = exr_img[:, 0:w] 154 | exr_img = cv2.resize(exr_img, dsize=(w, h)) 155 | elif exr_img.shape[0] > h and exr_img.shape[1] < w: 156 | exr_img = exr_img[0:h, :] 157 | exr_img = cv2.resize(exr_img, dsize=(w, h)) 158 | else: 159 | exr_img = exr_img[0:h, 0:w] 160 | 161 | exr_img = np.clip(exr_img, 0, 1) 162 | 163 | # Sample an illuminant 164 | while True: 165 | wb_vec = np.random.multivariate_normal(illum_mean, illum_cov, 1).squeeze() 166 | if in_hull(np.expand_dims(wb_vec[[0, 2]], axis=0), gt_illum[:, [0, 2]]): 167 | break 168 | print(wb_vec) 169 | 170 | # Compute CST matrix 171 | cst_mat = get_cst_matrix(cmD65, cmA, wb_vec) 172 | 173 | # sRGB to CIE XYZ (Invert XYZ2sRGB) 174 | raw_est = apply_combined_mat(exr_img, np.linalg.inv(xyz2srgb_mat)) 175 | 176 | # CIE XYZ to device RGB (Invert CST) 177 | raw_est = apply_combined_mat(raw_est, np.linalg.inv(cst_mat)) 178 | 179 | # Inverse digital gain (optional) 180 | rgb_gain = np.random.normal(loc=args.rgb_gain_mean, scale=0.1) if args.rgb_gain else 1.0 181 | 182 | # Invert WB 183 | if not args.no_safe_invert: 184 | raw_est = safe_invert_gains(raw_est, wb_vec, rgb_gain) 185 | else: 186 | wb_mat = get_wb_as_matrix(wb_vec) 187 | raw_est *= rgb_gain 188 | raw_est = apply_combined_mat(raw_est, np.linalg.inv(wb_mat)) 189 | 190 | wb_dng = wb_vec 191 | 192 | # Mosaic 193 | raw_est = RGB2bayer(raw_est) # Bayer for S20 FE: G R B G 194 | 195 | # Denormalize 196 | raw_est = raw_est * (white_level - black_level) + black_level 197 | raw_est[raw_est < 0] = 0 198 | raw_est[raw_est > white_level] = white_level 199 | 200 | # Save to DNG 201 | myhex = copy.deepcopy(myhexc) 202 | myhex = update_wb_values(myhex, wb_dng, args.wb_start) 203 | myhex = update_hex_image(myhex, raw_est, args.image_start) 204 | 205 | db = unhexlify(myhex) 206 | 207 | savepath = os.path.join(args.save_path, savename[:-4] + '.dng') 208 | with open(savepath, "wb") as fb: 209 | fb.write(db) 210 | 211 | print('Done') 212 | -------------------------------------------------------------------------------- /pipeline/exif_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author(s): 3 | Abdelrahman Abdelhamed (a.abdelhamed@samsung.com) 4 | 5 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | 18 | import struct 19 | from fractions import Fraction 20 | from .exif_data_formats import exif_formats 21 | 22 | 23 | class Ifd: 24 | def __init__(self): 25 | self.offset = -1 26 | self.tags = {} # dict; tag number will be key. 27 | 28 | 29 | class Tag: 30 | def __init__(self): 31 | self.offset = -1 32 | self.tag_num = -1 33 | self.data_format = -1 34 | self.num_values = -1 35 | self.values = [] 36 | 37 | 38 | def parse_exif(image_path, verbose=True): 39 | """ 40 | Parse EXIF tags from a binary file and return IFDs. 41 | Returned IFDs include EXIF SubIFDs, if any. 42 | """ 43 | 44 | def print_(str_): 45 | if verbose: 46 | print(str_) 47 | 48 | ifds = {} # dict of pairs; using offset to IFD as key. 49 | 50 | with open(image_path, 'rb') as fid: 51 | fid.seek(0) 52 | b0 = fid.read(1) 53 | _ = fid.read(1) 54 | # byte storage direction (endian): 55 | # +1: b'M' (big-endian/Motorola) 56 | # -1: b'I' (little-endian/Intel) 57 | endian = 1 if b0 == b'M' else -1 58 | print_("Endian = {}".format(b0)) 59 | endian_sign = "<" if endian == -1 else ">" # used in struct.unpack 60 | print_("Endian sign = {}".format(endian_sign)) 61 | _ = fid.read(2) # 0x002A 62 | b4_7 = fid.read(4) # offset to first IFD 63 | offset_ = struct.unpack(endian_sign + "I", b4_7)[0] 64 | i = 0 65 | ifd_offsets = [offset_] 66 | while len(ifd_offsets) > 0: 67 | offset_ = ifd_offsets.pop(0) 68 | # check if IFD at this offset was already parsed before 69 | if offset_ in ifds: 70 | continue 71 | print_("=========== Parsing IFD # {} ===========".format(i)) 72 | ifd_ = parse_exif_ifd(fid, offset_, endian_sign, verbose) 73 | ifds.update({ifd_.offset: ifd_}) 74 | print_("=========== Finished parsing IFD # {} ===========".format(i)) 75 | i += 1 76 | # check SubIFDs; zero or more offsets at tag 0x014a 77 | sub_idfs_tag_num = int('0x014a', 16) 78 | if sub_idfs_tag_num in ifd_.tags: 79 | ifd_offsets.extend(ifd_.tags[sub_idfs_tag_num].values) 80 | # check Exif SUbIDF; usually one offset at tag 0x8769 81 | exif_sub_idf_tag_num = int('0x8769', 16) 82 | if exif_sub_idf_tag_num in ifd_.tags: 83 | ifd_offsets.extend(ifd_.tags[exif_sub_idf_tag_num].values) 84 | return ifds 85 | 86 | 87 | def parse_exif_ifd(binary_file, offset_, endian_sign, verbose=True): 88 | """ 89 | Parse an EXIF IFD. 90 | """ 91 | 92 | def print_(str_): 93 | if verbose: 94 | print(str_) 95 | 96 | ifd = Ifd() 97 | ifd.offset = offset_ 98 | print_("IFD offset = {}".format(ifd.offset)) 99 | binary_file.seek(offset_) 100 | num_entries = struct.unpack(endian_sign + "H", binary_file.read(2))[0] # format H = unsigned short 101 | print_("Number of entries = {}".format(num_entries)) 102 | for t in range(num_entries): 103 | print_("---------- Tag {} / {} ----------".format(t + 1, num_entries)) 104 | if t == 22: 105 | ttt = 1 106 | tag_ = parse_exif_tag(binary_file, endian_sign, verbose) 107 | ifd.tags.update({tag_.tag_num: tag_}) # supposedly, EXIF tag numbers won't repeat in the same IFD 108 | # TODO: check for subsequent IFDs by parsing the next 4 bytes immediately after the IFD 109 | return ifd 110 | 111 | 112 | def parse_exif_tag(binary_file, endian_sign, verbose=True): 113 | """ 114 | Parse EXIF tag from a binary file starting from the current file pointer and returns the tag values. 115 | """ 116 | 117 | def print_(str_): 118 | if verbose: 119 | print(str_) 120 | 121 | tag = Tag() 122 | 123 | # tag offset 124 | tag.offset = binary_file.tell() 125 | print_("Tag offset = {}".format(tag.offset)) 126 | 127 | # tag number 128 | bytes_ = binary_file.read(2) 129 | tag.tag_num = struct.unpack(endian_sign + "H", bytes_)[0] # H: unsigned 2-byte short 130 | print_("Tag number = {} = 0x{:04x}".format(tag.tag_num, tag.tag_num)) 131 | 132 | # data format (some value between [1, 12]) 133 | tag.data_format = struct.unpack(endian_sign + "H", binary_file.read(2))[0] # H: unsigned 2-byte short 134 | exif_format = exif_formats[tag.data_format] 135 | print_("Data format = {} = {}".format(tag.data_format, exif_format.name)) 136 | 137 | # number of components/values 138 | tag.num_values = struct.unpack(endian_sign + "I", binary_file.read(4))[0] # I: unsigned 4-byte integer 139 | print_("Number of values = {}".format(tag.num_values)) 140 | 141 | # total number of data bytes 142 | total_bytes = tag.num_values * exif_format.size 143 | print_("Total bytes = {}".format(total_bytes)) 144 | 145 | # seek to data offset (if needed) 146 | data_is_offset = False 147 | current_offset = binary_file.tell() 148 | if total_bytes > 4: 149 | print_("Total bytes > 4; The next 4 bytes are an offset.") 150 | data_is_offset = True 151 | data_offset = struct.unpack(endian_sign + "I", binary_file.read(4))[0] 152 | current_offset = binary_file.tell() 153 | print_("Current offset = {}".format(current_offset)) 154 | print_("Seeking to data offset = {}".format(data_offset)) 155 | binary_file.seek(data_offset) 156 | 157 | # read values 158 | # TODO: need to distinguish between numeric and text values? 159 | if tag.num_values == 1 and total_bytes < 4: 160 | # special case: data is a single value that is less than 4 bytes inside 4 bytes, take care of endian 161 | val_bytes = binary_file.read(4) 162 | # if endian_sign == ">": 163 | # val_bytes = val_bytes[4 - total_bytes:] 164 | # else: 165 | # val_bytes = val_bytes[:total_bytes][::-1] 166 | val_bytes = val_bytes[:total_bytes] 167 | tag.values.append(struct.unpack(endian_sign + exif_format.short_name, val_bytes)[0]) 168 | else: 169 | # read data values one by one 170 | for k in range(tag.num_values): 171 | val_bytes = binary_file.read(exif_format.size) 172 | if exif_format.name == 'unsigned rational': 173 | tag.values.append(eight_bytes_to_fraction(val_bytes, endian_sign, signed=False)) 174 | elif exif_format.name == 'signed rational': 175 | tag.values.append(eight_bytes_to_fraction(val_bytes, endian_sign, signed=True)) 176 | else: 177 | tag.values.append(struct.unpack(endian_sign + exif_format.short_name, val_bytes)[0]) 178 | if total_bytes < 4: 179 | # special case: multiple values less than 4 bytes in total, inside the 4 bytes; skip the extra bytes 180 | binary_file.seek(4 - total_bytes, 1) 181 | 182 | if verbose: 183 | if len(tag.values) > 100: 184 | print_("Got more than 100 values; printing first 100 only:") 185 | print_("Tag values = {}".format(tag.values[:100])) 186 | else: 187 | print_("Tag values = {}".format(tag.values)) 188 | if tag.data_format == 2: 189 | print_("Tag values (string) = {}".format(b''.join(tag.values).decode())) 190 | 191 | if data_is_offset: 192 | # seek back to current position to read the next tag 193 | print_("Seeking back to current offset = {}".format(current_offset)) 194 | binary_file.seek(current_offset) 195 | 196 | return tag 197 | 198 | 199 | def get_tag_values_from_ifds(tag_num, ifds): 200 | """ 201 | Return values of a tag, if found in ifds. Return None otherwise. 202 | Assuming any tag exists only once in all ifds. 203 | """ 204 | for key, ifd in ifds.items(): 205 | if tag_num in ifd.tags: 206 | return ifd.tags[tag_num].values 207 | return None 208 | 209 | 210 | def eight_bytes_to_fraction(eight_bytes, endian_sign, signed): 211 | """ 212 | Convert 8-byte array into a Fraction. Take care of endian and sign. 213 | """ 214 | if signed: 215 | num = struct.unpack(endian_sign + "l", eight_bytes[:4])[0] 216 | den = struct.unpack(endian_sign + "l", eight_bytes[4:])[0] 217 | else: 218 | num = struct.unpack(endian_sign + "L", eight_bytes[:4])[0] 219 | den = struct.unpack(endian_sign + "L", eight_bytes[4:])[0] 220 | den = den if den != 0 else 1 221 | return Fraction(num, den) 222 | -------------------------------------------------------------------------------- /pipeline/pipeline.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author(s): 3 | Abdelrahman Abdelhamed (a.abdelhamed@samsung.com) 4 | 5 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 6 | 7 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 10 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 11 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and limitations under the License. 13 | For conditions of distribution and use, see the accompanying LICENSE.md file. 14 | 15 | """ 16 | 17 | from .pipeline_utils import get_visible_raw_image, get_metadata, normalize, white_balance, demosaic, \ 18 | denoise, apply_color_space_transform, transform_xyz_to_srgb, apply_gamma, apply_tone_map, fix_orientation, \ 19 | fix_missing_params, lens_shading_correction, active_area_cropping, lens_distortion_correction, default_cropping, \ 20 | resize, apply_local_tone_map, transform_xyz_to_prophoto, transform_prophoto_to_srgb 21 | 22 | 23 | def run_module(image, module, built_in_function, built_in_args): 24 | if type(module) is list and len(module) == 2: 25 | image = module[0](image, **module[1]) 26 | elif type(module) is str: 27 | image = built_in_function(image, **built_in_args) 28 | else: 29 | raise ValueError('Invalid input module.') 30 | return image 31 | 32 | 33 | def linearization_stage(current_image, metadata): 34 | linearization_table = metadata['linearization_table'] 35 | if linearization_table is not None: 36 | print('Linearization table found. Not handled.') 37 | # TODO 38 | return current_image 39 | 40 | 41 | def lens_shading_correction_stage(current_image, metadata, clip): 42 | gain_map_opcode = None 43 | if 'opcode_lists' in metadata: 44 | if 51009 in metadata['opcode_lists']: 45 | opcode_list_2 = metadata['opcode_lists'][51009] 46 | gain_map_opcode = [opcode_list_2[opcode_id] for opcode_id in opcode_list_2.keys() if 9 <= opcode_id <= 9.3] 47 | 48 | lsc_map = None 49 | if 'lsc_map' in metadata: 50 | lsc_map = metadata['lsc_map'] 51 | 52 | if gain_map_opcode is not None: 53 | current_image = lens_shading_correction(current_image, gain_map_opcode=gain_map_opcode, 54 | bayer_pattern=metadata['cfa_pattern'], clip=clip) 55 | elif lsc_map is not None: 56 | current_image = lens_shading_correction(current_image, gain_map_opcode=None, 57 | bayer_pattern=metadata['cfa_pattern'], gain_map=metadata['lsc_map'], 58 | clip=clip) 59 | return current_image 60 | 61 | 62 | def lens_distortion_correction_stage(current_image, metadata, clip): 63 | if 'opcode_lists' in metadata: 64 | if 51022 in metadata['opcode_lists']: 65 | opcode_list_3 = metadata['opcode_lists'][51022] 66 | rect_warp_opcode = opcode_list_3[1] 67 | current_image = lens_distortion_correction(current_image, rect_warp_opcode=rect_warp_opcode, 68 | clip=clip) 69 | return current_image 70 | 71 | 72 | def run_pipeline(image_or_path, params=None, metadata=None, stages=None, clip=True): 73 | if type(image_or_path) == str: 74 | image_path = image_or_path 75 | # raw image data 76 | raw_image = get_visible_raw_image(image_path) 77 | # metadata 78 | metadata = get_metadata(image_path) 79 | else: 80 | raw_image = image_or_path.copy() 81 | # must provide metadata 82 | if metadata is None: 83 | raise ValueError("Must provide metadata when providing image data in first argument.") 84 | 85 | # take a deep copy of params as it will be modified below 86 | params = params.copy() 87 | 88 | # fill any missing parameters with default values 89 | params = fix_missing_params(params) 90 | 91 | ''' 92 | Function performed at each stage. Follows this format: 93 | * {'stage_name': [function_name, function_params]} 94 | * Assumes the function takes in `current_image` as the first parameter. 95 | ''' 96 | operation_by_stage = { 97 | 'active_area_cropping': [active_area_cropping, {'active_area': metadata['active_area']}], 98 | 'default_cropping': [default_cropping, {'default_crop_origin': metadata['default_crop_origin'], 99 | 'default_crop_size': metadata['default_crop_size']}], 100 | 'linearization': [linearization_stage, {'metadata': metadata}], 101 | 'normal': [normalize, { 102 | 'black_level': metadata['black_level'], 103 | 'white_level': metadata['white_level'], 104 | 'black_level_delta_h': metadata['black_level_delta_h'], 105 | 'black_level_delta_v': metadata['black_level_delta_v'], 106 | 'clip': clip}], 107 | 'lens_shading_correction': [lens_shading_correction_stage, {'metadata': metadata, 'clip': clip}], 108 | 'white_balance': [run_module, { 109 | 'module': params['white_balancer'], 110 | 'built_in_function': white_balance, 111 | 'built_in_args': { 112 | 'alg_type': params['white_balancer'], 113 | 'as_shot_neutral': metadata['as_shot_neutral'], 114 | 'cfa_pattern': metadata['cfa_pattern'], 115 | 'clip': clip 116 | } 117 | }], 118 | 'demosaic': [run_module, { 119 | 'module': params['demosaicer'], 120 | 'built_in_function': demosaic, 121 | 'built_in_args': { 122 | 'cfa_pattern': metadata['cfa_pattern'], 123 | 'output_channel_order': 'RGB', 124 | 'alg_type': params['demosaicer'], 125 | } 126 | }], 127 | 'lens_distortion_correction': [lens_distortion_correction_stage, {'metadata': metadata, 'clip': clip}], 128 | 'denoise': [run_module, { 129 | 'module': params['denoiser'], 130 | 'built_in_function': denoise, 131 | 'built_in_args': { 132 | 'alg_type': params['denoiser'], 133 | 'cfa_pattern': metadata['cfa_pattern'] 134 | } 135 | }], 136 | 'xyz': [apply_color_space_transform, { 137 | 'color_matrix_1': metadata['color_matrix_1'], 138 | 'color_matrix_2': metadata['color_matrix_2'], 139 | 'illuminant': metadata['as_shot_neutral'] 140 | }], 141 | 'prophoto': [transform_xyz_to_prophoto, {}], 142 | 'srgb': [transform_prophoto_to_srgb, {}], 143 | 'xyz2srgb': [transform_xyz_to_srgb, {}], 144 | 'fix_orient': [fix_orientation, {'orientation': metadata['orientation']}], 145 | 'gamma': [apply_gamma, {}], 146 | 'tone': [run_module, { 147 | 'module': params['tone_curve'], 148 | 'built_in_function': apply_tone_map, 149 | 'built_in_args': { 150 | 'tone_curve': params['tone_curve'] 151 | } 152 | }], 153 | 'resize': [resize, {'target_size': (raw_image.shape[1], raw_image.shape[0])}], 154 | 'local_tone_mapping': [run_module, { 155 | 'module': params['local_tone_mapping'], 156 | 'built_in_function': apply_local_tone_map, 157 | 'built_in_args': { 158 | 'alg_type': params['local_tone_mapping'], 159 | 'channel_order': 'RGB', 160 | 'clahe_clip_limit': params['clahe_clip_limit'], 161 | 'clahe_grid_size': params['clahe_grid_size'] 162 | } 163 | }], 164 | } 165 | 166 | if not stages: 167 | stages = ['raw', 'active_area_cropping', 'linearization', 'normal', 'lens_shading_correction', 'white_balance', 168 | 'demosaic', 'lens_distortion_correction', 'denoise', 'xyz', 'prophoto', 'srgb', 'fix_orient', 'gamma', 169 | 'tone', 'local_tone_mapping', 'default_cropping', 'resize'] 170 | 171 | input_stage = params['input_stage'] 172 | output_stage = params['output_stage'] 173 | if input_stage not in stages \ 174 | or output_stage not in stages \ 175 | or stages.index(input_stage) > stages.index(output_stage): 176 | raise ValueError('Invalid input/output stage: input_stage = {}, output_stage = {}'.format(input_stage, 177 | output_stage)) 178 | # Handle transforming directly from xyz to srgb 179 | srgb_idx = stages.index('srgb') if 'srgb' in stages else -1 180 | if srgb_idx > 1 and stages[srgb_idx - 1] == 'xyz': 181 | stages[srgb_idx] = 'xyz2srgb' 182 | 183 | input_idx = stages.index(input_stage) 184 | output_idx = stages.index(output_stage) 185 | current_image = raw_image 186 | 187 | for stage in stages[input_idx + 1:output_idx + 1]: 188 | operation = operation_by_stage[stage] 189 | current_image = operation[0](current_image, **operation[1]) 190 | return current_image 191 | -------------------------------------------------------------------------------- /test_denoise.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2023 Samsung Electronics Co., Ltd. 3 | 4 | Author(s): 5 | Abhijith Punnappurath (abhijith.p@samsung.com) 6 | Luxi Zhao (lucy.zhao@samsung.com) 7 | 8 | Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); 9 | you may not use this file except in compliance with the License. 10 | You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0 11 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 12 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and limitations under the License. 14 | For conditions of distribution and use, see the accompanying LICENSE.md file. 15 | 16 | """ 17 | 18 | from model_archs.restormer import Restormer 19 | from utils.general_utils import save_args, get_git_info, str2int_arr 20 | import argparse 21 | import os, time, datetime 22 | import numpy as np 23 | from skimage.measure import compare_psnr, compare_ssim 24 | import cv2 25 | import torch 26 | from pipeline.pipeline_utils import get_metadata, get_visible_raw_image 27 | from utils.general_utils import check_dir 28 | from data_preparation.data_generator_denoise import post_process_stacked_images_s20fe, stack_bayer_norm_gamma_s20fe, \ 29 | linearize_stacked_images_s20fe, post_process_stacked_images_s20fe_to_srgb 30 | import glob 31 | 32 | 33 | def to_tensor(img): 34 | img = torch.from_numpy(img.astype(np.float32)) 35 | img = img.unsqueeze(0).permute(0, 3, 1, 2) 36 | return img 37 | 38 | 39 | def from_tensor(img): 40 | img = img.permute(0, 2, 3, 1) 41 | img = img.cpu().detach().numpy() 42 | return np.squeeze(img) 43 | 44 | 45 | def log(*args, **kwargs): 46 | print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S:"), *args, **kwargs) 47 | 48 | 49 | def save_result(result, path): 50 | path = path if path.find('.') != -1 else path + '.png' 51 | ext = os.path.splitext(path)[-1] 52 | if ext in ('.txt', '.dlm'): 53 | np.savetxt(path, result, fmt='%2.4f') 54 | else: 55 | cv2.imwrite(path, result) 56 | 57 | 58 | if __name__ == '__main__': 59 | 60 | parser = argparse.ArgumentParser() 61 | parser.add_argument('--set_dir', default='real_dataset', type=str, help='directory of test dataset') 62 | parser.add_argument('--set_name', default='iso_50', type=str, help='name of test dataset') 63 | parser.add_argument('--model_dir', default='no_wb_model', type=str, help='directory of the model') 64 | parser.add_argument('--model_name', default='bestmodel.pt', type=str, help='name of the model') 65 | parser.add_argument('--restormer_dim', default=8, type=int, help='Restormer dim.') 66 | parser.add_argument('--result_dir', default='results', type=str, help='directory of results') 67 | parser.add_argument('--save_num', default=0, type=int, help='number of images to save') 68 | parser.add_argument('--save_visual', action='store_true', help='save save_num number of images') 69 | parser.add_argument('--save_visual_only', action='store_true', help='only save visual results') 70 | parser.add_argument('--save_fns', default='', type=str, 71 | help='path to a file specifying which images to generate visual results for. ' 72 | 'Used only when save_visual_only is True.') 73 | 74 | parser.add_argument('--exp_dir', default='./', type=str, help='directory to save experiment data to') 75 | args = parser.parse_args() 76 | 77 | to_save = [] 78 | if args.save_visual_only: 79 | with open(args.save_fns, 'r') as f: 80 | lines = f.readlines() 81 | lines = [l.strip() for l in lines] 82 | to_save = lines 83 | 84 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 85 | 86 | model = Restormer(inp_channels=4, 87 | out_channels=4, 88 | dim=args.restormer_dim, 89 | num_blocks=[4, 6, 6, 8], 90 | num_refinement_blocks=4, 91 | heads=[1, 2, 4, 8], 92 | ffn_expansion_factor=2.66, 93 | bias=False, 94 | LayerNorm_type='BiasFree', 95 | dual_pixel_task=False) 96 | 97 | model.load_state_dict(torch.load(os.path.join(args.exp_dir, args.model_dir, 'models', args.model_name))) 98 | model = model.to(device) 99 | model.eval() # Set model to evaluate mode 100 | print('model loaded') 101 | 102 | ### File IO 103 | set_name = args.set_name 104 | fullsavepath = os.path.join(args.result_dir, 105 | os.path.basename(args.set_dir) + '_' + set_name + '_' + os.path.basename( 106 | args.model_dir) + '_' + args.model_name[:-3]) 107 | check_dir(args.result_dir) 108 | check_dir(fullsavepath) 109 | save_args(args, fullsavepath) 110 | 111 | with open(os.path.join(fullsavepath, 'test_info.txt'), 'w') as f: 112 | f.write(get_git_info()) 113 | 114 | psnrs = [] 115 | ssims = [] 116 | 117 | if not args.save_visual_only: 118 | fsrgb = open(os.path.join(fullsavepath, "results.txt"), "w") 119 | 120 | ### Inference 121 | test_dir = args.set_dir 122 | for c, dng_path in enumerate(sorted(glob.glob(os.path.join(args.set_dir, 'dng', set_name, '*.dng')))): 123 | im = os.path.basename(dng_path) 124 | if args.save_visual_only: 125 | tag = im[:-4] 126 | if tag not in to_save: 127 | print('Skipping ', tag) 128 | continue 129 | 130 | print('Processing ' + str(c + 1)) 131 | if set_name == 'iso_50': 132 | x = cv2.imread(os.path.join(args.set_dir, 'clean_raw', im[:-4] + '.png'), cv2.IMREAD_UNCHANGED) 133 | else: 134 | x = get_visible_raw_image(os.path.join(args.set_dir, 'dng', set_name, im)) 135 | 136 | meta_path = os.path.join(args.set_dir, 'dng', set_name, im) 137 | meta_data = get_metadata(meta_path) 138 | 139 | x_gamma = stack_bayer_norm_gamma_s20fe(x, clip_bot=False) 140 | x_gamma = to_tensor(x_gamma) 141 | 142 | y = np.array(cv2.imread(os.path.join(args.set_dir, 'clean_raw', im[:-4] + '.png'), cv2.IMREAD_UNCHANGED)) 143 | y_gamma = stack_bayer_norm_gamma_s20fe(y, clip_bot=True) 144 | y_gamma = np.clip(y_gamma, 0, 1) 145 | 146 | x_gamma = x_gamma.to(device) 147 | start_time = time.time() 148 | with torch.no_grad(): 149 | pred_gamma = model(x_gamma) # inference 150 | elapsed_time = time.time() - start_time 151 | pred_gamma = pred_gamma.cpu() 152 | 153 | pred_gamma = from_tensor(pred_gamma) 154 | pred_gamma = np.clip(np.squeeze(pred_gamma), 0, 1) 155 | 156 | y_lin = linearize_stacked_images_s20fe(y_gamma) 157 | pred_lin = linearize_stacked_images_s20fe(pred_gamma) 158 | psnr_x = compare_psnr(y_lin, pred_lin, data_range=1) 159 | ssim_x = compare_ssim(y_lin, pred_lin, multichannel=True, data_range=1) 160 | psnrs.append(psnr_x) 161 | ssims.append(ssim_x) 162 | 163 | log('{0:10s} \n PSNR = {1:2.2f}dB, SSIM = {2:1.4f}, Time = {3:2.4f} seconds'.format(im, psnr_x, 164 | ssim_x, elapsed_time)) 165 | if not args.save_visual_only: 166 | fsrgb.write('{0:10s} : PSNR = {1:2.2f}dB, SSIM = {2:1.4f}, Time = {3:2.4f} seconds \n'.format(im, psnr_x, 167 | ssim_x, 168 | elapsed_time)) 169 | 170 | if (args.save_visual and c < args.save_num) or args.save_visual_only: 171 | x_gamma = from_tensor(x_gamma).squeeze() # drop the batch dimension, assume bs = 1 172 | # 4-channel images 173 | y_gamma_ = (255 * post_process_stacked_images_s20fe(y_gamma, meta_data['cfa_pattern'])).astype('uint8') 174 | pred_gamma_ = (255 * post_process_stacked_images_s20fe(pred_gamma, meta_data['cfa_pattern'])).astype('uint8') 175 | x_gamma_ = (255 * post_process_stacked_images_s20fe(x_gamma, meta_data['cfa_pattern'])).astype('uint8') 176 | 177 | name, ext = os.path.splitext(im) 178 | save_result(x_gamma_[:, :, [2, 1, 0]], 179 | path=os.path.join(fullsavepath, name + '_input.png')) 180 | save_result(pred_gamma_[:, :, [2, 1, 0]], 181 | path=os.path.join(fullsavepath, name + f'_output_psnr{np.round(psnr_x, 4)}_ssim{np.round(ssim_x, 4)}.png')) 182 | save_result(y_gamma_[:, :, [2, 1, 0]], 183 | path=os.path.join(fullsavepath, name + '_target.png')) 184 | 185 | y_srgb = (255 * post_process_stacked_images_s20fe_to_srgb(y_gamma, meta_data)).astype('uint8') 186 | pred_srgb = (255 * post_process_stacked_images_s20fe_to_srgb(pred_gamma, meta_data)).astype('uint8') 187 | x_srgb = (255 * post_process_stacked_images_s20fe_to_srgb(x_gamma, meta_data)).astype('uint8') 188 | 189 | save_result(x_srgb[:, :, [2, 1, 0]], 190 | path=os.path.join(fullsavepath, name + '_input_srgb.png')) 191 | save_result(pred_srgb[:, :, [2, 1, 0]], 192 | path=os.path.join(fullsavepath, name + f'_output_srgb_psnr{np.round(psnr_x, 4)}_ssim{np.round(ssim_x, 4)}.png')) 193 | save_result(y_srgb[:, :, [2, 1, 0]], 194 | path=os.path.join(fullsavepath, name + '_target_srgb.png')) 195 | 196 | psnr_avg = np.mean(psnrs) 197 | ssim_avg = np.mean(ssims) 198 | psnrs.append(psnr_avg) 199 | ssims.append(ssim_avg) 200 | 201 | print() 202 | log('Dataset: {0:10s} \n Avg. PSNR = {1:2.4f}dB, Avg. SSIM = {2:1.4f}'.format(set_name, psnr_avg, ssim_avg)) 203 | 204 | if not args.save_visual_only: 205 | fsrgb.write( 206 | '\nDataset: {0:10s} \n Avg. PSNR = {1:2.4f}dB, Avg. SSIM = {2:1.4f}'.format(set_name, psnr_avg, ssim_avg)) 207 | fsrgb.close() 208 | --------------------------------------------------------------------------------