├── DCDP-LDM ├── README.md ├── __init__.py ├── __pycache__ │ └── model_loader.cpython-38.pyc ├── bkse │ ├── LICENSE │ ├── README.md │ ├── data │ │ ├── GOPRO_dataset.py │ │ ├── REDS_dataset.py │ │ ├── __init__.py │ │ ├── data_sampler.py │ │ ├── mix_dataset.py │ │ └── util.py │ ├── data_augmentation.py │ ├── domain_specific_deblur.py │ ├── experiments │ │ └── pretrained │ │ │ └── kernel.pth │ ├── generate_blur.py │ ├── generic_deblur.py │ ├── imgs │ │ ├── blur_faces │ │ │ └── face01.png │ │ ├── blur_imgs │ │ │ ├── blur1.png │ │ │ └── blur2.png │ │ ├── results │ │ │ ├── augmentation.jpg │ │ │ ├── domain_specific_deblur.jpg │ │ │ ├── general_deblurring.jpg │ │ │ ├── generate_blur.jpg │ │ │ └── kernel_encoding_wGT.png │ │ ├── sharp_imgs │ │ │ └── mushishi.png │ │ └── teaser.jpg │ ├── models │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-38.pyc │ │ │ └── arch_util.cpython-38.pyc │ │ ├── arch_util.py │ │ ├── backbones │ │ │ ├── __pycache__ │ │ │ │ ├── resnet.cpython-38.pyc │ │ │ │ └── unet_parts.cpython-38.pyc │ │ │ ├── resnet.py │ │ │ ├── skip │ │ │ │ ├── concat.py │ │ │ │ ├── downsampler.py │ │ │ │ ├── non_local_dot_product.py │ │ │ │ ├── skip.py │ │ │ │ └── util.py │ │ │ └── unet_parts.py │ │ ├── deblurring │ │ │ ├── image_deblur.py │ │ │ └── joint_deblur.py │ │ ├── dips.py │ │ ├── dsd │ │ │ ├── bicubic.py │ │ │ ├── dsd.py │ │ │ ├── dsd_stylegan.py │ │ │ ├── dsd_stylegan2.py │ │ │ ├── op │ │ │ │ ├── __init__.py │ │ │ │ ├── fused_act.py │ │ │ │ ├── fused_bias_act.cpp │ │ │ │ ├── fused_bias_act_kernel.cu │ │ │ │ ├── upfirdn2d.cpp │ │ │ │ ├── upfirdn2d.py │ │ │ │ └── upfirdn2d_kernel.cu │ │ │ ├── spherical_optimizer.py │ │ │ ├── stylegan.py │ │ │ └── stylegan2.py │ │ ├── kernel_encoding │ │ │ ├── __pycache__ │ │ │ │ └── kernel_wizard.cpython-38.pyc │ │ │ ├── base_model.py │ │ │ ├── image_base_model.py │ │ │ └── kernel_wizard.py │ │ ├── losses │ │ │ ├── charbonnier_loss.py │ │ │ ├── dsd_loss.py │ │ │ ├── gan_loss.py │ │ │ ├── hyper_laplacian_penalty.py │ │ │ ├── perceptual_loss.py │ │ │ └── ssim_loss.py │ │ └── lr_scheduler.py │ ├── options │ │ ├── __init__.py │ │ ├── data_augmentation │ │ │ └── default.yml │ │ ├── domain_specific_deblur │ │ │ ├── stylegan.yml │ │ │ └── stylegan2.yml │ │ ├── generate_blur │ │ │ └── default.yml │ │ ├── generic_deblur │ │ │ └── default.yml │ │ ├── kernel_encoding │ │ │ ├── GOPRO │ │ │ │ ├── wVAE.yml │ │ │ │ └── woVAE.yml │ │ │ ├── REDS │ │ │ │ └── woVAE.yml │ │ │ └── mix │ │ │ │ └── woVAE.yml │ │ └── options.py │ ├── requirements.txt │ ├── scripts │ │ ├── create_lmdb.py │ │ └── download_dataset.py │ ├── train.py │ ├── train_script.sh │ └── utils │ │ ├── __init__.py │ │ └── util.py ├── configs │ ├── autoencoder │ │ ├── autoencoder_kl_16x16x16.yaml │ │ ├── autoencoder_kl_32x32x4.yaml │ │ ├── autoencoder_kl_64x64x3.yaml │ │ └── autoencoder_kl_8x8x64.yaml │ ├── latent-diffusion │ │ ├── celebahq-ldm-vq-4.yaml │ │ ├── cin-ldm-vq-f8.yaml │ │ ├── cin256-v2.yaml │ │ ├── ffhq-ldm-vq-4.yaml │ │ ├── lsun_churches-ldm-kl-8.yaml │ │ └── txt2img-1p4B-eval.yaml │ ├── retrieval-augmented-diffusion │ │ └── 768x768.yaml │ ├── stable-diffusion │ │ └── v1-inference.yaml │ └── tasks │ │ ├── gaussian_deblur_config.yaml │ │ ├── inpainting_config.yaml │ │ ├── motion_deblur_config.yaml │ │ ├── nonlinear_deblur_ImageNet_config.yaml │ │ ├── nonlinear_deblur_config.yaml │ │ └── super_resolution_config.yaml ├── data │ ├── ImageNet_samples │ │ ├── ILSVRC2012_val_00000001.JPEG │ │ ├── ILSVRC2012_val_00000002.JPEG │ │ ├── ILSVRC2012_val_00000003.JPEG │ │ ├── ILSVRC2012_val_00000004.JPEG │ │ ├── ILSVRC2012_val_00000005.JPEG │ │ ├── ILSVRC2012_val_00000006.JPEG │ │ ├── ILSVRC2012_val_00000007.JPEG │ │ ├── ILSVRC2012_val_00000008.JPEG │ │ ├── ILSVRC2012_val_00000009.JPEG │ │ └── ILSVRC2012_val_00000010.JPEG │ ├── __pycache__ │ │ └── dataloader.cpython-38.pyc │ ├── dataloader.py │ └── samples │ │ ├── 00000.png │ │ ├── 00001.png │ │ ├── 00002.png │ │ ├── 00003.png │ │ ├── 00004.png │ │ ├── 00005.png │ │ ├── 00006.png │ │ ├── 00007.png │ │ ├── 00008.png │ │ └── 00009.png ├── dcdp.py ├── dcdp_imagenet.py ├── environment.yaml ├── ldm │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-38.pyc │ │ └── util.cpython-38.pyc │ ├── data │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-38.pyc │ │ │ └── lsun.cpython-38.pyc │ │ ├── base.py │ │ ├── imagenet.py │ │ └── lsun.py │ ├── lr_scheduler.py │ ├── models │ │ ├── __pycache__ │ │ │ └── autoencoder.cpython-38.pyc │ │ ├── autoencoder.py │ │ └── diffusion │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── ddim.cpython-38.pyc │ │ │ ├── ddpm.cpython-38.pyc │ │ │ └── plms.cpython-38.pyc │ │ │ ├── classifier.py │ │ │ ├── ddim.py │ │ │ ├── ddim_new.py │ │ │ ├── ddpm.py │ │ │ ├── dpm_solver │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── dpm_solver.cpython-38.pyc │ │ │ │ └── sampler.cpython-38.pyc │ │ │ ├── dpm_solver.py │ │ │ └── sampler.py │ │ │ ├── plms.py │ │ │ └── utils.py │ ├── modules │ │ ├── __pycache__ │ │ │ ├── attention.cpython-38.pyc │ │ │ └── ema.cpython-38.pyc │ │ ├── attention.py │ │ ├── diffusionmodules │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── model.cpython-38.pyc │ │ │ │ ├── openaimodel.cpython-38.pyc │ │ │ │ └── util.cpython-38.pyc │ │ │ ├── model.py │ │ │ ├── openaimodel.py │ │ │ └── util.py │ │ ├── distributions │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ └── distributions.cpython-38.pyc │ │ │ └── distributions.py │ │ ├── ema.py │ │ ├── encoders │ │ │ ├── __init__.py │ │ │ └── modules.py │ │ ├── image_degradation │ │ │ ├── __init__.py │ │ │ ├── bsrgan.py │ │ │ ├── bsrgan_light.py │ │ │ ├── utils │ │ │ │ └── test.png │ │ │ └── utils_image.py │ │ ├── losses │ │ │ ├── __init__.py │ │ │ ├── contperceptual.py │ │ │ └── vqperceptual.py │ │ └── x_transformer.py │ └── util.py ├── ldm_inverse │ ├── __pycache__ │ │ ├── condition_methods.cpython-38.pyc │ │ └── measurements.cpython-38.pyc │ ├── condition_methods.py │ └── measurements.py ├── model_loader.py ├── motionblur │ ├── README.md │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-38.pyc │ │ └── motionblur.cpython-38.pyc │ ├── environment.yaml │ ├── example_kernel │ │ ├── kernel0.png │ │ ├── kernel100.png │ │ ├── kernel25.png │ │ ├── kernel50.png │ │ └── kernel75.png │ ├── images │ │ ├── flag.png │ │ ├── flagBLURRED.png │ │ └── moon.png │ ├── intensity.png │ └── motionblur.py ├── run_latent_sapce_dcdp.sh ├── scripts │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-38.pyc │ │ └── utils.cpython-38.pyc │ ├── download_first_stages.sh │ ├── download_models.sh │ ├── img2img.py │ ├── inpaint.py │ ├── inv.py │ ├── knn2img.py │ ├── medical_finetuning.py │ ├── sample_diffusion.py │ ├── tests │ │ └── test_watermark.py │ ├── train_searcher.py │ ├── txt2img.py │ └── utils.py ├── src │ ├── clip │ │ ├── clip.egg-info │ │ │ └── PKG-INFO │ │ ├── clip │ │ │ ├── __init__.py │ │ │ ├── clip.py │ │ │ ├── model.py │ │ │ └── simple_tokenizer.py │ │ ├── hubconf.py │ │ ├── setup.py │ │ └── tests │ │ │ └── test_consistency.py │ └── taming-transformers │ │ ├── main.py │ │ ├── scripts │ │ ├── extract_depth.py │ │ ├── extract_segmentation.py │ │ ├── extract_submodel.py │ │ ├── make_samples.py │ │ ├── make_scene_samples.py │ │ ├── sample_conditional.py │ │ └── sample_fast.py │ │ ├── setup.py │ │ ├── taming │ │ ├── data │ │ │ ├── ade20k.py │ │ │ ├── annotated_objects_coco.py │ │ │ ├── annotated_objects_dataset.py │ │ │ ├── annotated_objects_open_images.py │ │ │ ├── base.py │ │ │ ├── coco.py │ │ │ ├── conditional_builder │ │ │ │ ├── objects_bbox.py │ │ │ │ ├── objects_center_points.py │ │ │ │ └── utils.py │ │ │ ├── custom.py │ │ │ ├── faceshq.py │ │ │ ├── helper_types.py │ │ │ ├── image_transforms.py │ │ │ ├── imagenet.py │ │ │ ├── open_images_helper.py │ │ │ ├── sflckr.py │ │ │ └── utils.py │ │ ├── lr_scheduler.py │ │ ├── models │ │ │ ├── cond_transformer.py │ │ │ ├── dummy_cond_stage.py │ │ │ └── vqgan.py │ │ ├── modules │ │ │ ├── diffusionmodules │ │ │ │ └── model.py │ │ │ ├── discriminator │ │ │ │ └── model.py │ │ │ ├── losses │ │ │ │ ├── __init__.py │ │ │ │ ├── lpips.py │ │ │ │ ├── segmentation.py │ │ │ │ └── vqperceptual.py │ │ │ ├── misc │ │ │ │ └── coord.py │ │ │ ├── transformer │ │ │ │ ├── mingpt.py │ │ │ │ └── permuter.py │ │ │ ├── util.py │ │ │ └── vqvae │ │ │ │ ├── __pycache__ │ │ │ │ └── quantize.cpython-38.pyc │ │ │ │ └── quantize.py │ │ └── util.py │ │ └── taming_transformers.egg-info │ │ └── PKG-INFO └── util │ ├── __pycache__ │ ├── fastmri_utils.cpython-38.pyc │ ├── img_utils.cpython-38.pyc │ └── resizer.cpython-38.pyc │ ├── compute_metric.py │ ├── fastmri_utils.py │ ├── img_utils.py │ ├── logger.py │ ├── resizer.py │ └── tools.py ├── README.md ├── bkse ├── LICENSE ├── README.md ├── data │ ├── GOPRO_dataset.py │ ├── REDS_dataset.py │ ├── __init__.py │ ├── data_sampler.py │ ├── mix_dataset.py │ └── util.py ├── data_augmentation.py ├── domain_specific_deblur.py ├── experiments │ └── pretrained │ │ └── kernel.pth ├── generate_blur.py ├── generic_deblur.py ├── imgs │ ├── blur_faces │ │ └── face01.png │ ├── blur_imgs │ │ ├── blur1.png │ │ └── blur2.png │ ├── results │ │ ├── augmentation.jpg │ │ ├── domain_specific_deblur.jpg │ │ ├── general_deblurring.jpg │ │ ├── generate_blur.jpg │ │ └── kernel_encoding_wGT.png │ ├── sharp_imgs │ │ └── mushishi.png │ └── teaser.jpg ├── models │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-38.pyc │ │ └── arch_util.cpython-38.pyc │ ├── arch_util.py │ ├── backbones │ │ ├── __pycache__ │ │ │ ├── resnet.cpython-38.pyc │ │ │ └── unet_parts.cpython-38.pyc │ │ ├── resnet.py │ │ ├── skip │ │ │ ├── concat.py │ │ │ ├── downsampler.py │ │ │ ├── non_local_dot_product.py │ │ │ ├── skip.py │ │ │ └── util.py │ │ └── unet_parts.py │ ├── deblurring │ │ ├── image_deblur.py │ │ └── joint_deblur.py │ ├── dips.py │ ├── dsd │ │ ├── bicubic.py │ │ ├── dsd.py │ │ ├── dsd_stylegan.py │ │ ├── dsd_stylegan2.py │ │ ├── op │ │ │ ├── __init__.py │ │ │ ├── fused_act.py │ │ │ ├── fused_bias_act.cpp │ │ │ ├── fused_bias_act_kernel.cu │ │ │ ├── upfirdn2d.cpp │ │ │ ├── upfirdn2d.py │ │ │ └── upfirdn2d_kernel.cu │ │ ├── spherical_optimizer.py │ │ ├── stylegan.py │ │ └── stylegan2.py │ ├── kernel_encoding │ │ ├── __pycache__ │ │ │ └── kernel_wizard.cpython-38.pyc │ │ ├── base_model.py │ │ ├── image_base_model.py │ │ └── kernel_wizard.py │ ├── losses │ │ ├── charbonnier_loss.py │ │ ├── dsd_loss.py │ │ ├── gan_loss.py │ │ ├── hyper_laplacian_penalty.py │ │ ├── perceptual_loss.py │ │ └── ssim_loss.py │ └── lr_scheduler.py ├── options │ ├── __init__.py │ ├── data_augmentation │ │ └── default.yml │ ├── domain_specific_deblur │ │ ├── stylegan.yml │ │ └── stylegan2.yml │ ├── generate_blur │ │ └── default.yml │ ├── generic_deblur │ │ └── default.yml │ ├── kernel_encoding │ │ ├── GOPRO │ │ │ ├── wVAE.yml │ │ │ └── woVAE.yml │ │ ├── REDS │ │ │ └── woVAE.yml │ │ └── mix │ │ │ └── woVAE.yml │ └── options.py ├── requirements.txt ├── scripts │ ├── create_lmdb.py │ └── download_dataset.py ├── train.py ├── train_script.sh └── utils │ ├── __init__.py │ └── util.py ├── data ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-38.pyc │ ├── __init__.cpython-39.pyc │ ├── dataloader.cpython-38.pyc │ └── dataloader.cpython-39.pyc ├── dataloader.py └── ffhq │ ├── 00000.png │ ├── 00001.png │ ├── 00002.png │ ├── 00003.png │ └── 00004.png ├── dcdp.py ├── docs ├── DPUR_algorithm.jpg ├── IMSI_Poster.jpg ├── Results.pdf ├── better_consistency.jpg └── data_fidelity_reconstruction.jpg ├── guided_diffusion ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-38.pyc │ ├── condition_methods.cpython-38.pyc │ ├── fp16_util.cpython-38.pyc │ ├── gaussian_diffusion.cpython-38.pyc │ ├── measurements.cpython-38.pyc │ ├── nn.cpython-38.pyc │ ├── posterior_mean_variance.cpython-38.pyc │ └── unet.cpython-38.pyc ├── condition_methods.py ├── fp16_util.py ├── gaussian_diffusion.py ├── measurements.py ├── nn.py ├── posterior_mean_variance.py └── unet.py ├── model_configurations ├── model_config_ImageNet.yaml └── model_config_ffhq.yaml ├── purification_configurations ├── purification_config_gaussian_deblur.yaml ├── purification_config_inpainting.yaml ├── purification_config_motion_deblur.yaml ├── purification_config_nonlinear_deblur.yaml ├── purification_config_phase_retrieval.yaml └── purification_config_super_resolution.yaml ├── requirements.txt ├── run ├── gaussian_deblur.sh ├── motion_deblur.sh ├── nonlinear_deblur.sh ├── phase_retrieval.sh └── super_resolution.sh ├── task_configurations ├── gaussian_deblur_config.yaml ├── inpainting_config.yaml ├── motion_deblur_config.yaml ├── nonlinear_deblur_config.yaml ├── phase_retrieval_config.yaml └── super_resolution_config.yaml └── util ├── __pycache__ ├── fastmri_utils.cpython-38.pyc ├── img_utils.cpython-38.pyc ├── logger.cpython-38.pyc └── resizer.cpython-38.pyc ├── compute_metric.py ├── fastmri_utils.py ├── img_utils.py ├── logger.py ├── resizer.py └── tools.py /DCDP-LDM/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/__init__.py -------------------------------------------------------------------------------- /DCDP-LDM/__pycache__/model_loader.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/__pycache__/model_loader.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/bkse/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2021, VinAI. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, 7 | this list of conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | 3. Neither the name of the copyright holder nor the names of its contributors 14 | may be used to endorse or promote products derived from this software 15 | without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 21 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 | POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/data/__init__.py: -------------------------------------------------------------------------------- 1 | """create dataset and dataloader""" 2 | import logging 3 | 4 | import torch 5 | import torch.utils.data 6 | 7 | 8 | def create_dataloader(dataset, dataset_opt, opt=None, sampler=None): 9 | phase = dataset_opt["phase"] 10 | if phase == "train": 11 | if opt["dist"]: 12 | world_size = torch.distributed.get_world_size() 13 | num_workers = dataset_opt["n_workers"] 14 | assert dataset_opt["batch_size"] % world_size == 0 15 | batch_size = dataset_opt["batch_size"] // world_size 16 | shuffle = False 17 | else: 18 | num_workers = dataset_opt["n_workers"] * len(opt["gpu_ids"]) 19 | batch_size = dataset_opt["batch_size"] 20 | shuffle = True 21 | return torch.utils.data.DataLoader( 22 | dataset, 23 | batch_size=batch_size, 24 | shuffle=shuffle, 25 | num_workers=num_workers, 26 | sampler=sampler, 27 | drop_last=True, 28 | pin_memory=False, 29 | ) 30 | else: 31 | return torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=1, pin_memory=False) 32 | 33 | 34 | def create_dataset(dataset_opt): 35 | mode = dataset_opt["mode"] 36 | # datasets for image restoration 37 | if mode == "REDS": 38 | from data.REDS_dataset import REDSDataset as D 39 | elif mode == "GOPRO": 40 | from data.GOPRO_dataset import GOPRODataset as D 41 | elif mode == "fewshot": 42 | from data.fewshot_dataset import FewShotDataset as D 43 | elif mode == "levin": 44 | from data.levin_dataset import LevinDataset as D 45 | elif mode == "mix": 46 | from data.mix_dataset import MixDataset as D 47 | else: 48 | raise NotImplementedError(f"Dataset {mode} is not recognized.") 49 | dataset = D(dataset_opt) 50 | 51 | logger = logging.getLogger("base") 52 | logger.info("Dataset [{:s} - {:s}] is created.".format(dataset.__class__.__name__, dataset_opt["name"])) 53 | return dataset 54 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/data/data_sampler.py: -------------------------------------------------------------------------------- 1 | """ 2 | Modified from torch.utils.data.distributed.DistributedSampler 3 | Support enlarging the dataset for *iteration-oriented* training, 4 | for saving time when restart the dataloader after each epoch 5 | """ 6 | import math 7 | 8 | import torch 9 | import torch.distributed as dist 10 | from torch.utils.data.sampler import Sampler 11 | 12 | 13 | class DistIterSampler(Sampler): 14 | """Sampler that restricts data loading to a subset of the dataset. 15 | 16 | It is especially useful in conjunction with 17 | :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each 18 | process can pass a DistributedSampler instance as a DataLoader sampler, 19 | and load a subset of the original dataset that is exclusive to it. 20 | 21 | .. note:: 22 | Dataset is assumed to be of constant size. 23 | 24 | Arguments: 25 | dataset: Dataset used for sampling. 26 | num_replicas (optional): Number of processes participating in 27 | distributed training. 28 | rank (optional): Rank of the current process within num_replicas. 29 | """ 30 | 31 | def __init__(self, dataset, num_replicas=None, rank=None, ratio=100): 32 | if num_replicas is None: 33 | if not dist.is_available(): 34 | raise RuntimeError( 35 | "Requires distributed \ 36 | package to be available" 37 | ) 38 | num_replicas = dist.get_world_size() 39 | if rank is None: 40 | if not dist.is_available(): 41 | raise RuntimeError( 42 | "Requires distributed \ 43 | package to be available" 44 | ) 45 | rank = dist.get_rank() 46 | self.dataset = dataset 47 | self.num_replicas = num_replicas 48 | self.rank = rank 49 | self.epoch = 0 50 | self.num_samples = int(math.ceil(len(self.dataset) * ratio / self.num_replicas)) 51 | self.total_size = self.num_samples * self.num_replicas 52 | 53 | def __iter__(self): 54 | # deterministically shuffle based on epoch 55 | g = torch.Generator() 56 | g.manual_seed(self.epoch) 57 | indices = torch.randperm(self.total_size, generator=g).tolist() 58 | 59 | dsize = len(self.dataset) 60 | indices = [v % dsize for v in indices] 61 | 62 | # subsample 63 | indices = indices[self.rank : self.total_size : self.num_replicas] 64 | assert len(indices) == self.num_samples 65 | 66 | return iter(indices) 67 | 68 | def __len__(self): 69 | return self.num_samples 70 | 71 | def set_epoch(self, epoch): 72 | self.epoch = epoch 73 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/experiments/pretrained/kernel.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/experiments/pretrained/kernel.pth -------------------------------------------------------------------------------- /DCDP-LDM/bkse/generate_blur.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import cv2 4 | import numpy as np 5 | import os.path as osp 6 | import torch 7 | import utils.util as util 8 | import yaml 9 | from models.kernel_encoding.kernel_wizard import KernelWizard 10 | 11 | 12 | def main(): 13 | device = torch.device("cuda") 14 | 15 | parser = argparse.ArgumentParser(description="Kernel extractor testing") 16 | 17 | parser.add_argument("--image_path", action="store", help="image path", type=str, required=True) 18 | parser.add_argument("--yml_path", action="store", help="yml path", type=str, required=True) 19 | parser.add_argument("--save_path", action="store", help="save path", type=str, default=".") 20 | parser.add_argument("--num_samples", action="store", help="number of samples", type=int, default=1) 21 | 22 | args = parser.parse_args() 23 | 24 | image_path = args.image_path 25 | yml_path = args.yml_path 26 | num_samples = args.num_samples 27 | 28 | # Initializing mode 29 | with open(yml_path, "r") as f: 30 | opt = yaml.load(f)["KernelWizard"] 31 | model_path = opt["pretrained"] 32 | model = KernelWizard(opt) 33 | model.eval() 34 | model.load_state_dict(torch.load(model_path)) 35 | model = model.to(device) 36 | 37 | HQ = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB) / 255.0 38 | HQ = np.transpose(HQ, (2, 0, 1)) 39 | HQ_tensor = torch.Tensor(HQ).unsqueeze(0).to(device).cuda() 40 | 41 | for i in range(num_samples): 42 | print(f"Sample #{i}/{num_samples}") 43 | with torch.no_grad(): 44 | kernel = torch.randn((1, 512, 2, 2)).cuda() * 1.2 45 | LQ_tensor = model.adaptKernel(HQ_tensor, kernel) 46 | 47 | dst = osp.join(args.save_path, f"blur{i:03d}.png") 48 | LQ_img = util.tensor2img(LQ_tensor) 49 | 50 | cv2.imwrite(dst, LQ_img) 51 | 52 | 53 | main() 54 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/generic_deblur.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import cv2 4 | import yaml 5 | from models.deblurring.joint_deblur import JointDeblur 6 | 7 | 8 | def main(): 9 | parser = argparse.ArgumentParser(description="Kernel extractor testing") 10 | 11 | parser.add_argument("--image_path", action="store", help="image path", type=str, required=True) 12 | parser.add_argument("--save_path", action="store", help="save path", type=str, default="res.png") 13 | parser.add_argument("--yml_path", action="store", help="yml path", type=str, required=True) 14 | 15 | args = parser.parse_args() 16 | 17 | # Initializing mode 18 | with open(args.yml_path, "rb") as f: 19 | opt = yaml.safe_load(f) 20 | model = JointDeblur(opt) 21 | 22 | blur_img = cv2.cvtColor(cv2.imread(args.image_path), cv2.COLOR_BGR2RGB) 23 | sharp_img = model.deblur(blur_img) 24 | 25 | cv2.imwrite(args.save_path, sharp_img) 26 | 27 | 28 | main() 29 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/imgs/blur_faces/face01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/imgs/blur_faces/face01.png -------------------------------------------------------------------------------- /DCDP-LDM/bkse/imgs/blur_imgs/blur1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/imgs/blur_imgs/blur1.png -------------------------------------------------------------------------------- /DCDP-LDM/bkse/imgs/blur_imgs/blur2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/imgs/blur_imgs/blur2.png -------------------------------------------------------------------------------- /DCDP-LDM/bkse/imgs/results/augmentation.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/imgs/results/augmentation.jpg -------------------------------------------------------------------------------- /DCDP-LDM/bkse/imgs/results/domain_specific_deblur.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/imgs/results/domain_specific_deblur.jpg -------------------------------------------------------------------------------- /DCDP-LDM/bkse/imgs/results/general_deblurring.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/imgs/results/general_deblurring.jpg -------------------------------------------------------------------------------- /DCDP-LDM/bkse/imgs/results/generate_blur.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/imgs/results/generate_blur.jpg -------------------------------------------------------------------------------- /DCDP-LDM/bkse/imgs/results/kernel_encoding_wGT.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/imgs/results/kernel_encoding_wGT.png -------------------------------------------------------------------------------- /DCDP-LDM/bkse/imgs/sharp_imgs/mushishi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/imgs/sharp_imgs/mushishi.png -------------------------------------------------------------------------------- /DCDP-LDM/bkse/imgs/teaser.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/imgs/teaser.jpg -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | 4 | logger = logging.getLogger("base") 5 | 6 | 7 | def create_model(opt): 8 | model = opt["model"] 9 | if model == "image_base": 10 | from models.kernel_encoding.image_base_model import ImageBaseModel as M 11 | else: 12 | raise NotImplementedError("Model [{:s}] not recognized.".format(model)) 13 | m = M(opt) 14 | logger.info("Model [{:s}] is created.".format(m.__class__.__name__)) 15 | return m 16 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/models/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/__pycache__/arch_util.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/models/__pycache__/arch_util.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/arch_util.py: -------------------------------------------------------------------------------- 1 | import functools 2 | 3 | import torch.nn as nn 4 | import torch.nn.init as init 5 | 6 | 7 | class Identity(nn.Module): 8 | def forward(self, x): 9 | return x 10 | 11 | 12 | def get_norm_layer(norm_type="instance"): 13 | """Return a normalization layer 14 | Parameters: 15 | norm_type (str) -- the name of the normalization 16 | layer: batch | instance | none 17 | 18 | For BatchNorm, we use learnable affine parameters and 19 | track running statistics (mean/stddev). 20 | 21 | For InstanceNorm, we do not use learnable affine 22 | parameters. We do not track running statistics. 23 | """ 24 | if norm_type == "batch": 25 | norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True) 26 | elif norm_type == "instance": 27 | norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) 28 | elif norm_type == "none": 29 | 30 | def norm_layer(x): 31 | return Identity() 32 | 33 | else: 34 | raise NotImplementedError( 35 | f"normalization layer {norm_type}\ 36 | is not found" 37 | ) 38 | return norm_layer 39 | 40 | 41 | def initialize_weights(net_l, scale=1): 42 | if not isinstance(net_l, list): 43 | net_l = [net_l] 44 | for net in net_l: 45 | for m in net.modules(): 46 | if isinstance(m, nn.Conv2d): 47 | init.kaiming_normal_(m.weight, a=0, mode="fan_in") 48 | m.weight.data *= scale # for residual block 49 | if m.bias is not None: 50 | m.bias.data.zero_() 51 | elif isinstance(m, nn.Linear): 52 | init.kaiming_normal_(m.weight, a=0, mode="fan_in") 53 | m.weight.data *= scale 54 | if m.bias is not None: 55 | m.bias.data.zero_() 56 | elif isinstance(m, nn.BatchNorm2d): 57 | init.constant_(m.weight, 1) 58 | init.constant_(m.bias.data, 0.0) 59 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/backbones/__pycache__/resnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/models/backbones/__pycache__/resnet.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/backbones/__pycache__/unet_parts.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/models/backbones/__pycache__/unet_parts.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/backbones/skip/concat.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | 5 | 6 | class Concat(nn.Module): 7 | def __init__(self, dim, *args): 8 | super(Concat, self).__init__() 9 | self.dim = dim 10 | 11 | for idx, module in enumerate(args): 12 | self.add_module(str(idx), module) 13 | 14 | def forward(self, input): 15 | inputs = [] 16 | for module in self._modules.values(): 17 | inputs.append(module(input)) 18 | 19 | inputs_shapes2 = [x.shape[2] for x in inputs] 20 | inputs_shapes3 = [x.shape[3] for x in inputs] 21 | 22 | if np.all(np.array(inputs_shapes2) == min(inputs_shapes2)) and np.all( 23 | np.array(inputs_shapes3) == min(inputs_shapes3) 24 | ): 25 | inputs_ = inputs 26 | else: 27 | target_shape2 = min(inputs_shapes2) 28 | target_shape3 = min(inputs_shapes3) 29 | 30 | inputs_ = [] 31 | for inp in inputs: 32 | diff2 = (inp.size(2) - target_shape2) // 2 33 | diff3 = (inp.size(3) - target_shape3) // 2 34 | inputs_.append(inp[:, :, diff2 : diff2 + target_shape2, diff3 : diff3 + target_shape3]) 35 | 36 | return torch.cat(inputs_, dim=self.dim) 37 | 38 | def __len__(self): 39 | return len(self._modules) 40 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/backbones/skip/util.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from .downsampler import Downsampler 4 | 5 | 6 | class Swish(nn.Module): 7 | """ 8 | https://arxiv.org/abs/1710.05941 9 | The hype was so huge that I could not help but try it 10 | """ 11 | 12 | def __init__(self): 13 | super(Swish, self).__init__() 14 | self.s = nn.Sigmoid() 15 | 16 | def forward(self, x): 17 | return x * self.s(x) 18 | 19 | 20 | def get_conv(in_f, out_f, kernel_size, stride=1, bias=True, pad="zero", downsample_mode="stride"): 21 | downsampler = None 22 | if stride != 1 and downsample_mode != "stride": 23 | 24 | if downsample_mode == "avg": 25 | downsampler = nn.AvgPool2d(stride, stride) 26 | elif downsample_mode == "max": 27 | downsampler = nn.MaxPool2d(stride, stride) 28 | elif downsample_mode in ["lanczos2", "lanczos3"]: 29 | downsampler = Downsampler( 30 | n_planes=out_f, factor=stride, kernel_type=downsample_mode, phase=0.5, preserve_size=True 31 | ) 32 | else: 33 | assert False 34 | 35 | stride = 1 36 | 37 | padder = None 38 | to_pad = int((kernel_size - 1) / 2) 39 | if pad == "reflection": 40 | padder = nn.ReflectionPad2d(to_pad) 41 | to_pad = 0 42 | 43 | convolver = nn.Conv2d(in_f, out_f, kernel_size, stride, padding=to_pad, bias=bias) 44 | 45 | layers = filter(lambda x: x is not None, [padder, convolver, downsampler]) 46 | return nn.Sequential(*layers) 47 | 48 | 49 | def get_activation(act_fun="LeakyReLU"): 50 | """ 51 | Either string defining an activation function or module (e.g. nn.ReLU) 52 | """ 53 | if isinstance(act_fun, str): 54 | if act_fun == "LeakyReLU": 55 | return nn.LeakyReLU(0.2, inplace=True) 56 | elif act_fun == "Swish": 57 | return Swish() 58 | elif act_fun == "ELU": 59 | return nn.ELU() 60 | elif act_fun == "none": 61 | return nn.Sequential() 62 | else: 63 | assert False 64 | else: 65 | return act_fun() 66 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/deblurring/joint_deblur.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import utils.util as util 3 | from models.deblurring.image_deblur import ImageDeblur 4 | from tqdm import tqdm 5 | 6 | 7 | class JointDeblur(ImageDeblur): 8 | def __init__(self, opt): 9 | super(JointDeblur, self).__init__(opt) 10 | 11 | def deblur(self, y): 12 | """Deblur image 13 | Args: 14 | y: Blur image 15 | """ 16 | y = util.img2tensor(y).unsqueeze(0).cuda() 17 | 18 | self.prepare_DIPs() 19 | self.reset_optimizers() 20 | 21 | warmup_k = torch.load(self.opt["warmup_k_path"]).cuda() 22 | self.warmup(y, warmup_k) 23 | 24 | # Input vector of DIPs is sampled from N(z, I) 25 | 26 | print("Deblurring") 27 | reg_noise_std = self.opt["reg_noise_std"] 28 | for step in tqdm(range(self.opt["num_iters"])): 29 | dip_zx_rand = self.dip_zx + reg_noise_std * torch.randn_like(self.dip_zx).cuda() 30 | dip_zk_rand = self.dip_zk + reg_noise_std * torch.randn_like(self.dip_zk).cuda() 31 | 32 | self.x_optimizer.zero_grad() 33 | self.k_optimizer.zero_grad() 34 | 35 | self.x_scheduler.step() 36 | self.k_scheduler.step() 37 | 38 | x = self.x_dip(dip_zx_rand) 39 | k = self.k_dip(dip_zk_rand) 40 | 41 | fake_y = self.kernel_wizard.adaptKernel(x, k) 42 | 43 | if step < self.opt["num_iters"] // 2: 44 | total_loss = 6e-1 * self.perceptual_loss(fake_y, y) 45 | total_loss += 1 - self.ssim_loss(fake_y, y) 46 | total_loss += 5e-5 * torch.norm(k) 47 | total_loss += 2e-2 * self.laplace_penalty(x) 48 | else: 49 | total_loss = self.perceptual_loss(fake_y, y) 50 | total_loss += 5e-2 * self.laplace_penalty(x) 51 | total_loss += 5e-4 * torch.norm(k) 52 | 53 | total_loss.backward() 54 | 55 | self.x_optimizer.step() 56 | self.k_optimizer.step() 57 | 58 | # debugging 59 | # if step % 100 == 0: 60 | # print(torch.norm(k)) 61 | # print(f"{self.k_optimizer.param_groups[0]['lr']:.3e}") 62 | 63 | return util.tensor2img(x.detach()) 64 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/dips.py: -------------------------------------------------------------------------------- 1 | import models.arch_util as arch_util 2 | import torch.nn as nn 3 | from models.backbones.resnet import ResnetBlock 4 | from models.backbones.skip.skip import skip 5 | 6 | 7 | class KernelDIP(nn.Module): 8 | """ 9 | DIP (Deep Image Prior) for blur kernel 10 | """ 11 | 12 | def __init__(self, opt): 13 | super(KernelDIP, self).__init__() 14 | 15 | norm_layer = arch_util.get_norm_layer("none") 16 | n_blocks = opt["n_blocks"] 17 | nf = opt["nf"] 18 | padding_type = opt["padding_type"] 19 | use_dropout = opt["use_dropout"] 20 | kernel_dim = opt["kernel_dim"] 21 | 22 | input_nc = 64 23 | model = [ 24 | nn.ReflectionPad2d(3), 25 | nn.Conv2d(input_nc, nf, kernel_size=7, padding=0, bias=True), 26 | norm_layer(nf), 27 | nn.ReLU(True), 28 | ] 29 | 30 | n_downsampling = 5 31 | for i in range(n_downsampling): # add downsampling layers 32 | mult = 2 ** i 33 | input_nc = min(nf * mult, kernel_dim) 34 | output_nc = min(nf * mult * 2, kernel_dim) 35 | model += [ 36 | nn.Conv2d(input_nc, output_nc, kernel_size=3, stride=2, padding=1, bias=True), 37 | norm_layer(nf * mult * 2), 38 | nn.ReLU(True), 39 | ] 40 | 41 | for i in range(n_blocks): # add ResNet blocks 42 | model += [ 43 | ResnetBlock( 44 | kernel_dim, 45 | padding_type=padding_type, 46 | norm_layer=norm_layer, 47 | use_dropout=use_dropout, 48 | use_bias=True, 49 | ) 50 | ] 51 | 52 | self.model = nn.Sequential(*model) 53 | 54 | def forward(self, noise): 55 | return self.model(noise) 56 | 57 | 58 | class ImageDIP(nn.Module): 59 | """ 60 | DIP (Deep Image Prior) for sharp image 61 | """ 62 | 63 | def __init__(self, opt): 64 | super(ImageDIP, self).__init__() 65 | 66 | input_nc = opt["input_nc"] 67 | output_nc = opt["output_nc"] 68 | 69 | self.model = skip( 70 | input_nc, 71 | output_nc, 72 | num_channels_down=[128, 128, 128, 128, 128], 73 | num_channels_up=[128, 128, 128, 128, 128], 74 | num_channels_skip=[16, 16, 16, 16, 16], 75 | upsample_mode="bilinear", 76 | need_sigmoid=True, 77 | need_bias=True, 78 | pad=opt["padding_type"], 79 | act_fun="LeakyReLU", 80 | ) 81 | 82 | def forward(self, img): 83 | return self.model(img) 84 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/dsd/op/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/models/dsd/op/__init__.py -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/dsd/op/fused_bias_act.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | 4 | torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, 5 | int act, int grad, float alpha, float scale); 6 | 7 | #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") 8 | #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") 9 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 10 | 11 | torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, 12 | int act, int grad, float alpha, float scale) { 13 | CHECK_CUDA(input); 14 | CHECK_CUDA(bias); 15 | 16 | return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale); 17 | } 18 | 19 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 20 | m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)"); 21 | } -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/dsd/op/upfirdn2d.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | 4 | torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel, 5 | int up_x, int up_y, int down_x, int down_y, 6 | int pad_x0, int pad_x1, int pad_y0, int pad_y1); 7 | 8 | #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") 9 | #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") 10 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 11 | 12 | torch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel, 13 | int up_x, int up_y, int down_x, int down_y, 14 | int pad_x0, int pad_x1, int pad_y0, int pad_y1) { 15 | CHECK_CUDA(input); 16 | CHECK_CUDA(kernel); 17 | 18 | return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1); 19 | } 20 | 21 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 22 | m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)"); 23 | } -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/dsd/spherical_optimizer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.optim import Optimizer 3 | 4 | 5 | # Spherical Optimizer Class 6 | # Uses the first two dimensions as batch information 7 | # Optimizes over the surface of a sphere using the initial radius throughout 8 | # 9 | # Example Usage: 10 | # opt = SphericalOptimizer(torch.optim.SGD, [x], lr=0.01) 11 | 12 | 13 | class SphericalOptimizer(Optimizer): 14 | def __init__(self, optimizer, params, **kwargs): 15 | self.opt = optimizer(params, **kwargs) 16 | self.params = params 17 | with torch.no_grad(): 18 | self.radii = { 19 | param: (param.pow(2).sum(tuple(range(2, param.ndim)), keepdim=True) + 1e-9).sqrt() for param in params 20 | } 21 | 22 | @torch.no_grad() 23 | def step(self, closure=None): 24 | loss = self.opt.step(closure) 25 | for param in self.params: 26 | param.data.div_((param.pow(2).sum(tuple(range(2, param.ndim)), keepdim=True) + 1e-9).sqrt()) 27 | param.mul_(self.radii[param]) 28 | 29 | return loss 30 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/kernel_encoding/__pycache__/kernel_wizard.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/models/kernel_encoding/__pycache__/kernel_wizard.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/losses/charbonnier_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class CharbonnierLoss(nn.Module): 6 | """Charbonnier Loss (L1)""" 7 | 8 | def __init__(self, eps=1e-6): 9 | super(CharbonnierLoss, self).__init__() 10 | self.eps = eps 11 | 12 | def forward(self, x, y): 13 | diff = x - y 14 | loss = torch.sum(torch.sqrt(diff * diff + self.eps)) 15 | return loss 16 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/losses/gan_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | # Define GAN loss: [vanilla | lsgan | wgan-gp] 6 | class GANLoss(nn.Module): 7 | def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0): 8 | super(GANLoss, self).__init__() 9 | self.gan_type = gan_type.lower() 10 | self.real_label_val = real_label_val 11 | self.fake_label_val = fake_label_val 12 | 13 | if self.gan_type == "gan" or self.gan_type == "ragan": 14 | self.loss = nn.BCEWithLogitsLoss() 15 | elif self.gan_type == "lsgan": 16 | self.loss = nn.MSELoss() 17 | elif self.gan_type == "wgan-gp": 18 | 19 | def wgan_loss(input, target): 20 | # target is boolean 21 | return -1 * input.mean() if target else input.mean() 22 | 23 | self.loss = wgan_loss 24 | else: 25 | raise NotImplementedError("GAN type [{:s}] is not found".format(self.gan_type)) 26 | 27 | def get_target_label(self, input, target_is_real): 28 | if self.gan_type == "wgan-gp": 29 | return target_is_real 30 | if target_is_real: 31 | return torch.empty_like(input).fill_(self.real_label_val) 32 | else: 33 | return torch.empty_like(input).fill_(self.fake_label_val) 34 | 35 | def forward(self, input, target_is_real): 36 | target_label = self.get_target_label(input, target_is_real) 37 | loss = self.loss(input, target_label) 38 | return loss 39 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/losses/hyper_laplacian_penalty.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | class HyperLaplacianPenalty(nn.Module): 7 | def __init__(self, num_channels, alpha, eps=1e-6): 8 | super(HyperLaplacianPenalty, self).__init__() 9 | 10 | self.alpha = alpha 11 | self.eps = eps 12 | 13 | self.Kx = torch.Tensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]]).cuda() 14 | self.Kx = self.Kx.expand(1, num_channels, 3, 3) 15 | self.Kx.requires_grad = False 16 | self.Ky = torch.Tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]).cuda() 17 | self.Ky = self.Ky.expand(1, num_channels, 3, 3) 18 | self.Ky.requires_grad = False 19 | 20 | def forward(self, x): 21 | gradX = F.conv2d(x, self.Kx, stride=1, padding=1) 22 | gradY = F.conv2d(x, self.Ky, stride=1, padding=1) 23 | grad = torch.sqrt(gradX ** 2 + gradY ** 2 + self.eps) 24 | 25 | loss = (grad ** self.alpha).mean() 26 | 27 | return loss 28 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/models/losses/ssim_loss.py: -------------------------------------------------------------------------------- 1 | from math import exp 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | from torch.autograd import Variable 6 | 7 | 8 | class SSIM(torch.nn.Module): 9 | @staticmethod 10 | def gaussian(window_size, sigma): 11 | gauss = torch.Tensor([exp(-((x - window_size // 2) ** 2) / float(2 * sigma ** 2)) for x in range(window_size)]) 12 | return gauss / gauss.sum() 13 | 14 | @staticmethod 15 | def create_window(window_size, channel): 16 | _1D_window = SSIM.gaussian(window_size, 1.5).unsqueeze(1) 17 | _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) 18 | window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous()) 19 | return window 20 | 21 | @staticmethod 22 | def _ssim(img1, img2, window, window_size, channel, size_average=True): 23 | mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel) 24 | mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel) 25 | 26 | mu1_sq = mu1.pow(2) 27 | mu2_sq = mu2.pow(2) 28 | mu1_mu2 = mu1 * mu2 29 | 30 | sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq 31 | sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq 32 | sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2 33 | 34 | C1 = 0.01 ** 2 35 | C2 = 0.03 ** 2 36 | 37 | ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)) 38 | 39 | if size_average: 40 | return ssim_map.mean() 41 | else: 42 | return ssim_map.mean(1).mean(1).mean(1) 43 | 44 | def __init__(self, window_size=11, size_average=True): 45 | super(SSIM, self).__init__() 46 | self.window_size = window_size 47 | self.size_average = size_average 48 | self.channel = 1 49 | self.window = self.create_window(window_size, self.channel) 50 | 51 | def forward(self, img1, img2): 52 | (_, channel, _, _) = img1.size() 53 | 54 | if channel == self.channel and self.window.data.type() == img1.data.type(): 55 | window = self.window 56 | else: 57 | window = self.create_window(self.window_size, channel) 58 | 59 | if img1.is_cuda: 60 | window = window.cuda(img1.get_device()) 61 | window = window.type_as(img1) 62 | 63 | self.window = window 64 | self.channel = channel 65 | 66 | return self._ssim(img1, img2, window, self.window_size, channel, self.size_average) 67 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/options/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/options/__init__.py -------------------------------------------------------------------------------- /DCDP-LDM/bkse/options/data_augmentation/default.yml: -------------------------------------------------------------------------------- 1 | #### general settings 2 | gpu_ids: [0] 3 | 4 | #### network structures 5 | KernelWizard: 6 | pretrained: experiments/pretrained/GOPRO_woVAE.pth 7 | input_nc: 3 8 | nf: 64 9 | front_RBs: 10 10 | back_RBs: 20 11 | N_frames: 1 12 | kernel_dim: 512 13 | use_vae: false 14 | KernelExtractor: 15 | norm: none 16 | use_sharp: true 17 | n_blocks: 4 18 | padding_type: reflect 19 | use_dropout: false 20 | Adapter: 21 | norm: none 22 | use_dropout: false 23 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/options/domain_specific_deblur/stylegan.yml: -------------------------------------------------------------------------------- 1 | stylegan_ver: 1 2 | img_size: &HQ_SIZE [256, 256] 3 | output_size: 1024 4 | verbose: true 5 | num_epochs: 25 6 | num_warmup_iters: 150 7 | num_x_iters: 300 8 | num_k_iters: 200 9 | x_lr: !!float 0.2 10 | k_lr: !!float 1e-4 11 | warmup_k_path: experiments/pretrained/kernel.pth 12 | reg_noise_std: !!float 0.001 13 | duplicates: 1 14 | batch_size: 1 15 | loss_str: '100*L2+0.1*GEOCROSS' 16 | eps: !!float 1e-15 17 | noise_type: trainable 18 | num_trainable_noise_layers: 5 19 | bad_noise_layers: '17' 20 | optimizer_name: adam 21 | lr_schedule: linear1cycledrop 22 | save_intermediate: true 23 | tile_latent: ~ 24 | seed: ~ 25 | 26 | KernelDIP: 27 | nf: 64 28 | n_blocks: 6 29 | padding_type: reflect 30 | use_dropout: false 31 | kernel_dim: 512 32 | norm: none 33 | 34 | KernelWizard: 35 | pretrained: experiments/pretrained/GOPRO_woVAE.pth 36 | input_nc: 3 37 | nf: 64 38 | front_RBs: 10 39 | back_RBs: 20 40 | N_frames: 1 41 | kernel_dim: 512 42 | img_size: *HQ_SIZE 43 | use_vae: false 44 | KernelExtractor: 45 | norm: none 46 | use_sharp: true 47 | n_blocks: 4 48 | padding_type: reflect 49 | use_dropout: false 50 | Adapter: 51 | norm: none 52 | use_dropout: false 53 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/options/domain_specific_deblur/stylegan2.yml: -------------------------------------------------------------------------------- 1 | stylegan_ver: 2 2 | img_size: &HQ_SIZE [256, 256] 3 | output_size: 256 4 | verbose: true 5 | num_epochs: 25 6 | num_warmup_iters: 150 7 | num_x_iters: 300 8 | num_k_iters: 200 9 | x_lr: !!float 0.2 10 | k_lr: !!float 5e-4 11 | warmup_k_path: experiments/pretrained/kernel.pth 12 | reg_noise_std: !!float 0.001 13 | duplicates: 1 14 | batch_size: 1 15 | loss_str: '100*L2+0.1*GEOCROSS' 16 | eps: !!float 1e-15 17 | noise_type: trainable 18 | num_trainable_noise_layers: 5 19 | bad_noise_layers: '17' 20 | optimizer_name: adam 21 | lr_schedule: linear1cycledrop 22 | save_intermediate: true 23 | tile_latent: ~ 24 | seed: ~ 25 | 26 | ImageDIP: 27 | input_nc: 8 28 | output_nc: 3 29 | nf: 64 30 | norm: none 31 | padding_type: reflect 32 | 33 | KernelDIP: 34 | nf: 64 35 | n_blocks: 6 36 | padding_type: reflect 37 | use_dropout: false 38 | kernel_dim: 512 39 | norm: none 40 | 41 | KernelWizard: 42 | pretrained: experiments/pretrained/GOPRO_woVAE.pth 43 | input_nc: 3 44 | nf: 64 45 | front_RBs: 10 46 | back_RBs: 20 47 | N_frames: 1 48 | kernel_dim: 512 49 | img_size: *HQ_SIZE 50 | use_vae: false 51 | KernelExtractor: 52 | norm: none 53 | use_sharp: true 54 | n_blocks: 4 55 | padding_type: reflect 56 | use_dropout: false 57 | Adapter: 58 | norm: none 59 | use_dropout: false 60 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/options/generate_blur/default.yml: -------------------------------------------------------------------------------- 1 | #### general settings 2 | gpu_ids: [0] 3 | 4 | #### network structures 5 | KernelWizard: 6 | pretrained: bkse/experiments/pretrained/GOPRO_wVAE.pth 7 | input_nc: 3 8 | nf: 64 9 | front_RBs: 10 10 | back_RBs: 20 11 | N_frames: 1 12 | kernel_dim: 512 13 | use_vae: true 14 | KernelExtractor: 15 | norm: none 16 | use_sharp: true 17 | n_blocks: 4 18 | padding_type: reflect 19 | use_dropout: false 20 | Adapter: 21 | norm: none 22 | use_dropout: false 23 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/options/generic_deblur/default.yml: -------------------------------------------------------------------------------- 1 | num_iters: 5000 2 | num_warmup_iters: 300 3 | x_lr: !!float 5e-4 4 | k_lr: !!float 5e-4 5 | img_size: &HQ_SIZE [256, 256] 6 | warmup_k_path: experiments/pretrained/kernel.pth 7 | reg_noise_std: !!float 0.001 8 | 9 | ImageDIP: 10 | input_nc: 8 11 | output_nc: 3 12 | nf: 64 13 | norm: none 14 | padding_type: reflect 15 | 16 | KernelDIP: 17 | nf: 64 18 | n_blocks: 6 19 | padding_type: reflect 20 | use_dropout: false 21 | kernel_dim: 512 22 | norm: none 23 | 24 | KernelWizard: 25 | pretrained: experiments/pretrained/GOPRO_woVAE.pth 26 | input_nc: 3 27 | nf: 64 28 | front_RBs: 10 29 | back_RBs: 20 30 | N_frames: 1 31 | kernel_dim: 512 32 | img_size: *HQ_SIZE 33 | use_vae: false 34 | KernelExtractor: 35 | norm: none 36 | use_sharp: true 37 | n_blocks: 4 38 | padding_type: reflect 39 | use_dropout: false 40 | Adapter: 41 | norm: none 42 | use_dropout: false 43 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/options/kernel_encoding/GOPRO/wVAE.yml: -------------------------------------------------------------------------------- 1 | #### general settings 2 | name: GOPRO_VAE 3 | use_tb_logger: true 4 | model: image_base 5 | distortion: deblur 6 | scale: 1 7 | gpu_ids: [0] 8 | 9 | #### datasets 10 | datasets: 11 | train: 12 | name: GOPRO 13 | mode: GOPRO 14 | interval_list: [1] 15 | dataroot_HQ: datasets/GOPRO/train_sharp.lmdb 16 | dataroot_LQ: datasets/GOPRO/train_blur_linear.lmdb 17 | cache_keys: ~ 18 | 19 | use_shuffle: true 20 | n_workers: 4 # per GPU 21 | batch_size: 8 22 | HQ_size: &HQ_SIZE 256 23 | LQ_size: 256 24 | use_flip: true 25 | use_rot: true 26 | color: RGB 27 | 28 | #### network structures 29 | KernelWizard: 30 | input_nc: 3 31 | nf: 64 32 | front_RBs: 10 33 | back_RBs: 20 34 | N_frames: 1 35 | kernel_dim: 512 36 | img_size: *HQ_SIZE 37 | use_vae: true 38 | KernelExtractor: 39 | norm: none 40 | use_sharp: true 41 | n_blocks: 4 42 | padding_type: reflect 43 | use_dropout: false 44 | Adapter: 45 | norm: none 46 | use_dropout: false 47 | 48 | #### path 49 | path: 50 | pretrain_model_G: experiments/pretrained/GOPRO_wsharp_woVAE.pth 51 | strict_load: false 52 | resume_state: ~ 53 | 54 | #### training settings: learning rate scheme, loss 55 | train: 56 | lr_G: !!float 1e-4 57 | lr_scheme: CosineAnnealingLR_Restart 58 | beta1: 0.9 59 | beta2: 0.99 60 | niter: 600000 61 | warmup_iter: -1 # -1: no warm up 62 | T_period: [50000, 100000, 150000, 150000, 150000] 63 | restarts: [50000, 150000, 300000, 450000] 64 | restart_weights: [1, 1, 1, 1] 65 | eta_min: !!float 1e-8 66 | 67 | pixel_criterion: cb 68 | pixel_weight: !!float 1.0 69 | kl_weight: !!float 10.0 70 | val_freq: !!float 5e3 71 | 72 | manual_seed: 0 73 | 74 | #### logger 75 | logger: 76 | print_freq: 10 77 | save_checkpoint_freq: !!float 5e3 78 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/options/kernel_encoding/GOPRO/woVAE.yml: -------------------------------------------------------------------------------- 1 | #### general settings 2 | name: GOPRO_woVAE 3 | use_tb_logger: true 4 | model: image_base 5 | distortion: deblur 6 | scale: 1 7 | gpu_ids: [0] 8 | 9 | #### datasets 10 | datasets: 11 | train: 12 | name: GOPRO 13 | mode: GOPRO 14 | interval_list: [1] 15 | dataroot_HQ: datasets/GOPRO/train_sharp.lmdb 16 | dataroot_LQ: datasets/GOPRO/train_blur_linear.lmdb 17 | cache_keys: ~ 18 | 19 | use_shuffle: true 20 | n_workers: 4 # per GPU 21 | batch_size: 16 22 | HQ_size: &HQ_SIZE 256 23 | LQ_size: 256 24 | use_flip: true 25 | use_rot: true 26 | color: RGB 27 | 28 | #### network structures 29 | KernelWizard: 30 | input_nc: 3 31 | nf: 64 32 | front_RBs: 10 33 | back_RBs: 20 34 | N_frames: 1 35 | kernel_dim: 512 36 | img_size: *HQ_SIZE 37 | use_vae: false 38 | KernelExtractor: 39 | norm: none 40 | use_sharp: true 41 | n_blocks: 4 42 | padding_type: reflect 43 | use_dropout: false 44 | Adapter: 45 | norm: none 46 | use_dropout: false 47 | 48 | #### path 49 | path: 50 | pretrain_model_G: ~ 51 | strict_load: false 52 | resume_state: ~ 53 | 54 | #### training settings: learning rate scheme, loss 55 | train: 56 | lr_G: !!float 1e-4 57 | lr_scheme: CosineAnnealingLR_Restart 58 | beta1: 0.9 59 | beta2: 0.99 60 | niter: 600000 61 | warmup_iter: -1 # -1: no warm up 62 | T_period: [50000, 100000, 150000, 150000, 150000] 63 | restarts: [50000, 150000, 300000, 450000] 64 | restart_weights: [1, 1, 1, 1] 65 | eta_min: !!float 1e-8 66 | 67 | pixel_criterion: cb 68 | pixel_weight: 1.0 69 | kl_weight: 0.0 70 | val_freq: !!float 5e3 71 | 72 | manual_seed: 0 73 | 74 | #### logger 75 | logger: 76 | print_freq: 10 77 | save_checkpoint_freq: !!float 5e3 78 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/options/kernel_encoding/REDS/woVAE.yml: -------------------------------------------------------------------------------- 1 | #### general settings 2 | name: REDS_woVAE 3 | use_tb_logger: true 4 | model: image_base 5 | distortion: deblur 6 | scale: 1 7 | gpu_ids: [3] 8 | 9 | #### datasets 10 | datasets: 11 | train: 12 | name: REDS 13 | mode: REDS 14 | interval_list: [1] 15 | dataroot_HQ: datasets/REDS/train_sharp_wval.lmdb 16 | dataroot_LQ: datasets/REDS/train_blur_wval.lmdb 17 | cache_keys: ~ 18 | 19 | use_shuffle: true 20 | n_workers: 4 # per GPU 21 | batch_size: 13 22 | HQ_size: &HQ_SIZE 256 23 | LQ_size: 256 24 | use_flip: true 25 | use_rot: true 26 | color: RGB 27 | 28 | #### network structures 29 | KernelWizard: 30 | input_nc: 3 31 | nf: 64 32 | front_RBs: 10 33 | back_RBs: 20 34 | N_frames: 1 35 | kernel_dim: 512 36 | img_size: *HQ_SIZE 37 | use_vae: false 38 | KernelExtractor: 39 | norm: none 40 | use_sharp: true 41 | n_blocks: 4 42 | padding_type: reflect 43 | use_dropout: false 44 | Adapter: 45 | norm: none 46 | use_dropout: false 47 | 48 | #### path 49 | path: 50 | pretrain_model_G: ~ 51 | strict_load: false 52 | resume_state: ~ 53 | 54 | #### training settings: learning rate scheme, loss 55 | train: 56 | lr_G: !!float 1e-4 57 | lr_scheme: CosineAnnealingLR_Restart 58 | beta1: 0.9 59 | beta2: 0.99 60 | niter: 600000 61 | warmup_iter: -1 # -1: no warm up 62 | T_period: [50000, 100000, 150000, 150000, 150000] 63 | restarts: [50000, 150000, 300000, 450000] 64 | restart_weights: [1, 1, 1, 1] 65 | eta_min: !!float 1e-6 66 | 67 | pixel_criterion: cb 68 | pixel_weight: 1.0 69 | kl_weight: 0.0 70 | val_freq: !!float 5e3 71 | 72 | manual_seed: 0 73 | 74 | #### logger 75 | logger: 76 | print_freq: 10 77 | save_checkpoint_freq: !!float 5e3 78 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/options/kernel_encoding/mix/woVAE.yml: -------------------------------------------------------------------------------- 1 | #### general settings 2 | name: mix_wsharp 3 | use_tb_logger: true 4 | model: image_base 5 | distortion: deblur 6 | scale: 1 7 | gpu_ids: [0] 8 | 9 | #### datasets 10 | datasets: 11 | train: 12 | name: mix 13 | mode: mix 14 | interval_list: [1] 15 | dataroots_HQ: ['datasets/REDS/train_sharp_wval.lmdb', 'datasets/GOPRO/train_sharp.lmdb'] 16 | dataroots_LQ: ['datasets/REDS/train_blur_wval.lmdb', 'datasets/GOPRO/train_blur_linear.lmdb'] 17 | dataset_weights: [1, 10] 18 | cache_keys: ~ 19 | 20 | N_frames: 1 21 | use_shuffle: true 22 | n_workers: 3 # per GPU 23 | batch_size: 16 24 | HQ_size: 256 25 | LQ_size: 256 26 | use_flip: true 27 | use_rot: true 28 | color: RGB 29 | 30 | #### network structures 31 | KernelWizard: 32 | input_nc: 3 33 | nf: 64 34 | front_RBs: 10 35 | back_RBs: 20 36 | N_frames: 1 37 | kernel_dim: 512 38 | use_vae: false 39 | KernelExtractor: 40 | norm: none 41 | use_sharp: true 42 | n_blocks: 4 43 | padding_type: reflect 44 | use_dropout: false 45 | Adapter: 46 | norm: none 47 | use_dropout: false 48 | 49 | #### path 50 | path: 51 | pretrain_model_G: ~ 52 | strict_load: false 53 | resume_state: ~ 54 | 55 | #### training settings: learning rate scheme, loss 56 | train: 57 | lr_G: !!float 1e-4 58 | lr_scheme: CosineAnnealingLR_Restart 59 | beta1: 0.9 60 | beta2: 0.99 61 | niter: 600000 62 | warmup_iter: -1 # -1: no warm up 63 | T_period: [50000, 100000, 150000, 150000, 150000] 64 | restarts: [50000, 150000, 300000, 450000] 65 | restart_weights: [1, 1, 1, 1] 66 | eta_min: !!float 1e-8 67 | 68 | pixel_criterion: cb 69 | pixel_weight: 1.0 70 | kernel_weight: 0.1 71 | gradient_loss_weight: 0.3 72 | val_freq: !!float 5e3 73 | 74 | manual_seed: 0 75 | 76 | #### logger 77 | logger: 78 | print_freq: 10 79 | save_checkpoint_freq: !!float 5000 80 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/requirements.txt: -------------------------------------------------------------------------------- 1 | torch >= 1.4.0 2 | torchvision >= 0.5.0 3 | pyyaml 4 | opencv-python 5 | numpy 6 | lmdb 7 | tqdm 8 | tensorboard >= 1.15.0 9 | ninja 10 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/scripts/download_dataset.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import os.path as osp 4 | 5 | import requests 6 | 7 | 8 | def download_file_from_google_drive(file_id, destination): 9 | os.makedirs(osp.dirname(destination), exist_ok=True) 10 | URL = "https://docs.google.com/uc?export=download" 11 | 12 | session = requests.Session() 13 | 14 | response = session.get(URL, params={"id": file_id}, stream=True) 15 | token = get_confirm_token(response) 16 | 17 | if token: 18 | params = {"id": file_id, "confirm": token} 19 | response = session.get(URL, params=params, stream=True) 20 | 21 | save_response_content(response, destination) 22 | 23 | 24 | def get_confirm_token(response): 25 | for key, value in response.cookies.items(): 26 | if key.startswith("download_warning"): 27 | return value 28 | 29 | return None 30 | 31 | 32 | def save_response_content(response, destination): 33 | CHUNK_SIZE = 32768 34 | 35 | with open(destination, "wb") as f: 36 | for chunk in response.iter_content(CHUNK_SIZE): 37 | if chunk: # filter out keep-alive new chunks 38 | f.write(chunk) 39 | 40 | 41 | if __name__ == "__main__": 42 | dataset_ids = { 43 | "GOPRO_Large": "1H0PIXvJH4c40pk7ou6nAwoxuR4Qh_Sa2", 44 | "train_sharp": "1YLksKtMhd2mWyVSkvhDaDLWSc1qYNCz-", 45 | "train_blur": "1Be2cgzuuXibcqAuJekDgvHq4MLYkCgR8", 46 | "val_sharp": "1MGeObVQ1-Z29f-myDP7-8c3u0_xECKXq", 47 | "val_blur": "1N8z2yD0GDWmh6U4d4EADERtcUgDzGrHx", 48 | "test_blur": "1dr0--ZBKqr4P1M8lek6JKD1Vd6bhhrZT", 49 | } 50 | 51 | parser = argparse.ArgumentParser( 52 | description="Download REDS dataset from google drive to current folder", allow_abbrev=False 53 | ) 54 | 55 | parser.add_argument("--REDS_train_sharp", action="store_true", help="download REDS train_sharp.zip") 56 | parser.add_argument("--REDS_train_blur", action="store_true", help="download REDS train_blur.zip") 57 | parser.add_argument("--REDS_val_sharp", action="store_true", help="download REDS val_sharp.zip") 58 | parser.add_argument("--REDS_val_blur", action="store_true", help="download REDS val_blur.zip") 59 | parser.add_argument("--GOPRO", action="store_true", help="download GOPRO_Large.zip") 60 | 61 | args = parser.parse_args() 62 | 63 | if args.REDS_train_sharp: 64 | download_file_from_google_drive(dataset_ids["train_sharp"], "REDS/train_sharp.zip") 65 | if args.REDS_train_blur: 66 | download_file_from_google_drive(dataset_ids["train_blur"], "REDS/train_blur.zip") 67 | if args.REDS_val_sharp: 68 | download_file_from_google_drive(dataset_ids["val_sharp"], "REDS/val_sharp.zip") 69 | if args.REDS_val_blur: 70 | download_file_from_google_drive(dataset_ids["val_blur"], "REDS/val_blur.zip") 71 | if args.GOPRO: 72 | download_file_from_google_drive(dataset_ids["GOPRO_Large"], "GOPRO/GOPRO.zip") 73 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/train_script.sh: -------------------------------------------------------------------------------- 1 | python3.7 train.py -opt options/REDS/wsharp_woVAE.yml 2 | -------------------------------------------------------------------------------- /DCDP-LDM/bkse/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/bkse/utils/__init__.py -------------------------------------------------------------------------------- /DCDP-LDM/configs/autoencoder/autoencoder_kl_16x16x16.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | base_learning_rate: 4.5e-6 3 | target: ldm.models.autoencoder.AutoencoderKL 4 | params: 5 | monitor: "val/rec_loss" 6 | embed_dim: 16 7 | lossconfig: 8 | target: ldm.modules.losses.LPIPSWithDiscriminator 9 | params: 10 | disc_start: 50001 11 | kl_weight: 0.000001 12 | disc_weight: 0.5 13 | 14 | ddconfig: 15 | double_z: True 16 | z_channels: 16 17 | resolution: 256 18 | in_channels: 3 19 | out_ch: 3 20 | ch: 128 21 | ch_mult: [ 1,1,2,2,4] # num_down = len(ch_mult)-1 22 | num_res_blocks: 2 23 | attn_resolutions: [16] 24 | dropout: 0.0 25 | 26 | 27 | data: 28 | target: main.DataModuleFromConfig 29 | params: 30 | batch_size: 12 31 | wrap: True 32 | train: 33 | target: ldm.data.imagenet.ImageNetSRTrain 34 | params: 35 | size: 256 36 | degradation: pil_nearest 37 | validation: 38 | target: ldm.data.imagenet.ImageNetSRValidation 39 | params: 40 | size: 256 41 | degradation: pil_nearest 42 | 43 | lightning: 44 | callbacks: 45 | image_logger: 46 | target: main.ImageLogger 47 | params: 48 | batch_frequency: 1000 49 | max_images: 8 50 | increase_log_steps: True 51 | 52 | trainer: 53 | benchmark: True 54 | accumulate_grad_batches: 2 55 | -------------------------------------------------------------------------------- /DCDP-LDM/configs/autoencoder/autoencoder_kl_32x32x4.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | base_learning_rate: 4.5e-6 3 | target: ldm.models.autoencoder.AutoencoderKL 4 | params: 5 | monitor: "val/rec_loss" 6 | embed_dim: 4 7 | lossconfig: 8 | target: ldm.modules.losses.LPIPSWithDiscriminator 9 | params: 10 | disc_start: 50001 11 | kl_weight: 0.000001 12 | disc_weight: 0.5 13 | 14 | ddconfig: 15 | double_z: True 16 | z_channels: 4 17 | resolution: 256 18 | in_channels: 3 19 | out_ch: 3 20 | ch: 128 21 | ch_mult: [ 1,2,4,4 ] # num_down = len(ch_mult)-1 22 | num_res_blocks: 2 23 | attn_resolutions: [ ] 24 | dropout: 0.0 25 | 26 | data: 27 | target: main.DataModuleFromConfig 28 | params: 29 | batch_size: 12 30 | wrap: True 31 | train: 32 | target: ldm.data.imagenet.ImageNetSRTrain 33 | params: 34 | size: 256 35 | degradation: pil_nearest 36 | validation: 37 | target: ldm.data.imagenet.ImageNetSRValidation 38 | params: 39 | size: 256 40 | degradation: pil_nearest 41 | 42 | lightning: 43 | callbacks: 44 | image_logger: 45 | target: main.ImageLogger 46 | params: 47 | batch_frequency: 1000 48 | max_images: 8 49 | increase_log_steps: True 50 | 51 | trainer: 52 | benchmark: True 53 | accumulate_grad_batches: 2 54 | -------------------------------------------------------------------------------- /DCDP-LDM/configs/autoencoder/autoencoder_kl_64x64x3.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | base_learning_rate: 4.5e-6 3 | target: ldm.models.autoencoder.AutoencoderKL 4 | params: 5 | monitor: "val/rec_loss" 6 | embed_dim: 3 7 | lossconfig: 8 | target: ldm.modules.losses.LPIPSWithDiscriminator 9 | params: 10 | disc_start: 50001 11 | kl_weight: 0.000001 12 | disc_weight: 0.5 13 | 14 | ddconfig: 15 | double_z: True 16 | z_channels: 3 17 | resolution: 256 18 | in_channels: 3 19 | out_ch: 3 20 | ch: 128 21 | ch_mult: [ 1,2,4 ] # num_down = len(ch_mult)-1 22 | num_res_blocks: 2 23 | attn_resolutions: [ ] 24 | dropout: 0.0 25 | 26 | 27 | data: 28 | target: main.DataModuleFromConfig 29 | params: 30 | batch_size: 12 31 | wrap: True 32 | train: 33 | target: ldm.data.imagenet.ImageNetSRTrain 34 | params: 35 | size: 256 36 | degradation: pil_nearest 37 | validation: 38 | target: ldm.data.imagenet.ImageNetSRValidation 39 | params: 40 | size: 256 41 | degradation: pil_nearest 42 | 43 | lightning: 44 | callbacks: 45 | image_logger: 46 | target: main.ImageLogger 47 | params: 48 | batch_frequency: 1000 49 | max_images: 8 50 | increase_log_steps: True 51 | 52 | trainer: 53 | benchmark: True 54 | accumulate_grad_batches: 2 55 | -------------------------------------------------------------------------------- /DCDP-LDM/configs/autoencoder/autoencoder_kl_8x8x64.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | base_learning_rate: 4.5e-6 3 | target: ldm.models.autoencoder.AutoencoderKL 4 | params: 5 | monitor: "val/rec_loss" 6 | embed_dim: 64 7 | lossconfig: 8 | target: ldm.modules.losses.LPIPSWithDiscriminator 9 | params: 10 | disc_start: 50001 11 | kl_weight: 0.000001 12 | disc_weight: 0.5 13 | 14 | ddconfig: 15 | double_z: True 16 | z_channels: 64 17 | resolution: 256 18 | in_channels: 3 19 | out_ch: 3 20 | ch: 128 21 | ch_mult: [ 1,1,2,2,4,4] # num_down = len(ch_mult)-1 22 | num_res_blocks: 2 23 | attn_resolutions: [16,8] 24 | dropout: 0.0 25 | 26 | data: 27 | target: main.DataModuleFromConfig 28 | params: 29 | batch_size: 12 30 | wrap: True 31 | train: 32 | target: ldm.data.imagenet.ImageNetSRTrain 33 | params: 34 | size: 256 35 | degradation: pil_nearest 36 | validation: 37 | target: ldm.data.imagenet.ImageNetSRValidation 38 | params: 39 | size: 256 40 | degradation: pil_nearest 41 | 42 | lightning: 43 | callbacks: 44 | image_logger: 45 | target: main.ImageLogger 46 | params: 47 | batch_frequency: 1000 48 | max_images: 8 49 | increase_log_steps: True 50 | 51 | trainer: 52 | benchmark: True 53 | accumulate_grad_batches: 2 54 | -------------------------------------------------------------------------------- /DCDP-LDM/configs/latent-diffusion/celebahq-ldm-vq-4.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | base_learning_rate: 2.0e-06 3 | target: ldm.models.diffusion.ddpm.LatentDiffusion 4 | params: 5 | linear_start: 0.0015 6 | linear_end: 0.0195 7 | num_timesteps_cond: 1 8 | log_every_t: 200 9 | timesteps: 1000 10 | first_stage_key: image 11 | image_size: 64 12 | channels: 3 13 | monitor: val/loss_simple_ema 14 | 15 | unet_config: 16 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel 17 | params: 18 | image_size: 64 19 | in_channels: 3 20 | out_channels: 3 21 | model_channels: 224 22 | attention_resolutions: 23 | # note: this isn\t actually the resolution but 24 | # the downsampling factor, i.e. this corresnponds to 25 | # attention on spatial resolution 8,16,32, as the 26 | # spatial reolution of the latents is 64 for f4 27 | - 8 28 | - 4 29 | - 2 30 | num_res_blocks: 2 31 | channel_mult: 32 | - 1 33 | - 2 34 | - 3 35 | - 4 36 | num_head_channels: 32 37 | first_stage_config: 38 | target: ldm.models.autoencoder.VQModelInterface 39 | params: 40 | embed_dim: 3 41 | n_embed: 8192 42 | ckpt_path: models/first_stage_models/vq-f4/model.ckpt 43 | ddconfig: 44 | double_z: false 45 | z_channels: 3 46 | resolution: 256 47 | in_channels: 3 48 | out_ch: 3 49 | ch: 128 50 | ch_mult: 51 | - 1 52 | - 2 53 | - 4 54 | num_res_blocks: 2 55 | attn_resolutions: [] 56 | dropout: 0.0 57 | lossconfig: 58 | target: torch.nn.Identity 59 | cond_stage_config: __is_unconditional__ 60 | data: 61 | target: main.DataModuleFromConfig 62 | params: 63 | batch_size: 48 64 | num_workers: 5 65 | wrap: false 66 | train: 67 | target: taming.data.faceshq.CelebAHQTrain 68 | params: 69 | size: 256 70 | validation: 71 | target: taming.data.faceshq.CelebAHQValidation 72 | params: 73 | size: 256 74 | 75 | 76 | lightning: 77 | callbacks: 78 | image_logger: 79 | target: main.ImageLogger 80 | params: 81 | batch_frequency: 5000 82 | max_images: 8 83 | increase_log_steps: False 84 | 85 | trainer: 86 | benchmark: True -------------------------------------------------------------------------------- /DCDP-LDM/configs/latent-diffusion/cin-ldm-vq-f8.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | base_learning_rate: 1.0e-06 3 | target: ldm.models.diffusion.ddpm.LatentDiffusion 4 | params: 5 | linear_start: 0.0015 6 | linear_end: 0.0195 7 | num_timesteps_cond: 1 8 | log_every_t: 200 9 | timesteps: 1000 10 | first_stage_key: image 11 | cond_stage_key: class_label 12 | image_size: 32 13 | channels: 4 14 | cond_stage_trainable: true 15 | conditioning_key: crossattn 16 | monitor: val/loss_simple_ema 17 | unet_config: 18 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel 19 | params: 20 | image_size: 32 21 | in_channels: 4 22 | out_channels: 4 23 | model_channels: 256 24 | attention_resolutions: 25 | #note: this isn\t actually the resolution but 26 | # the downsampling factor, i.e. this corresnponds to 27 | # attention on spatial resolution 8,16,32, as the 28 | # spatial reolution of the latents is 32 for f8 29 | - 4 30 | - 2 31 | - 1 32 | num_res_blocks: 2 33 | channel_mult: 34 | - 1 35 | - 2 36 | - 4 37 | num_head_channels: 32 38 | use_spatial_transformer: true 39 | transformer_depth: 1 40 | context_dim: 512 41 | first_stage_config: 42 | target: ldm.models.autoencoder.VQModelInterface 43 | params: 44 | embed_dim: 4 45 | n_embed: 16384 46 | ckpt_path: configs/first_stage_models/vq-f8/model.yaml 47 | ddconfig: 48 | double_z: false 49 | z_channels: 4 50 | resolution: 256 51 | in_channels: 3 52 | out_ch: 3 53 | ch: 128 54 | ch_mult: 55 | - 1 56 | - 2 57 | - 2 58 | - 4 59 | num_res_blocks: 2 60 | attn_resolutions: 61 | - 32 62 | dropout: 0.0 63 | lossconfig: 64 | target: torch.nn.Identity 65 | cond_stage_config: 66 | target: ldm.modules.encoders.modules.ClassEmbedder 67 | params: 68 | embed_dim: 512 69 | key: class_label 70 | data: 71 | target: main.DataModuleFromConfig 72 | params: 73 | batch_size: 64 74 | num_workers: 12 75 | wrap: false 76 | train: 77 | target: ldm.data.imagenet.ImageNetTrain 78 | params: 79 | config: 80 | size: 256 81 | validation: 82 | target: ldm.data.imagenet.ImageNetValidation 83 | params: 84 | config: 85 | size: 256 86 | 87 | 88 | lightning: 89 | callbacks: 90 | image_logger: 91 | target: main.ImageLogger 92 | params: 93 | batch_frequency: 5000 94 | max_images: 8 95 | increase_log_steps: False 96 | 97 | trainer: 98 | benchmark: True -------------------------------------------------------------------------------- /DCDP-LDM/configs/latent-diffusion/cin256-v2.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | base_learning_rate: 0.0001 3 | target: ldm.models.diffusion.ddpm.LatentDiffusion 4 | params: 5 | linear_start: 0.0015 6 | linear_end: 0.0195 7 | num_timesteps_cond: 1 8 | log_every_t: 200 9 | timesteps: 1000 10 | first_stage_key: image 11 | cond_stage_key: class_label 12 | image_size: 64 13 | channels: 3 14 | cond_stage_trainable: true 15 | conditioning_key: crossattn 16 | monitor: val/loss 17 | use_ema: False 18 | 19 | unet_config: 20 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel 21 | params: 22 | image_size: 64 23 | in_channels: 3 24 | out_channels: 3 25 | model_channels: 192 26 | attention_resolutions: 27 | - 8 28 | - 4 29 | - 2 30 | num_res_blocks: 2 31 | channel_mult: 32 | - 1 33 | - 2 34 | - 3 35 | - 5 36 | num_heads: 1 37 | use_spatial_transformer: true 38 | transformer_depth: 1 39 | context_dim: 512 40 | 41 | first_stage_config: 42 | target: ldm.models.autoencoder.VQModelInterface 43 | params: 44 | embed_dim: 3 45 | n_embed: 8192 46 | ddconfig: 47 | double_z: false 48 | z_channels: 3 49 | resolution: 256 50 | in_channels: 3 51 | out_ch: 3 52 | ch: 128 53 | ch_mult: 54 | - 1 55 | - 2 56 | - 4 57 | num_res_blocks: 2 58 | attn_resolutions: [] 59 | dropout: 0.0 60 | lossconfig: 61 | target: torch.nn.Identity 62 | 63 | cond_stage_config: 64 | target: ldm.modules.encoders.modules.ClassEmbedder 65 | params: 66 | n_classes: 1001 67 | embed_dim: 512 68 | key: class_label 69 | -------------------------------------------------------------------------------- /DCDP-LDM/configs/latent-diffusion/ffhq-ldm-vq-4.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | base_learning_rate: 2.0e-06 3 | target: ldm.models.diffusion.ddpm.LatentDiffusion 4 | params: 5 | linear_start: 0.0015 6 | linear_end: 0.0195 7 | num_timesteps_cond: 1 8 | log_every_t: 200 9 | timesteps: 1000 10 | first_stage_key: image 11 | image_size: 64 12 | channels: 3 13 | monitor: val/loss_simple_ema 14 | unet_config: 15 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel 16 | params: 17 | image_size: 64 18 | in_channels: 3 19 | out_channels: 3 20 | model_channels: 224 21 | attention_resolutions: 22 | # note: this isn\t actually the resolution but 23 | # the downsampling factor, i.e. this corresnponds to 24 | # attention on spatial resolution 8,16,32, as the 25 | # spatial reolution of the latents is 64 for f4 26 | - 8 27 | - 4 28 | - 2 29 | num_res_blocks: 2 30 | channel_mult: 31 | - 1 32 | - 2 33 | - 3 34 | - 4 35 | num_head_channels: 32 36 | first_stage_config: 37 | target: ldm.models.autoencoder.VQModelInterface 38 | params: 39 | embed_dim: 3 40 | n_embed: 8192 41 | ckpt_path: models/first_stage_models/vq-f4/model.ckpt 42 | ddconfig: 43 | double_z: false 44 | z_channels: 3 45 | resolution: 256 46 | in_channels: 3 47 | out_ch: 3 48 | ch: 128 49 | ch_mult: 50 | - 1 51 | - 2 52 | - 4 53 | num_res_blocks: 2 54 | attn_resolutions: [] 55 | dropout: 0.0 56 | lossconfig: 57 | target: torch.nn.Identity 58 | cond_stage_config: __is_unconditional__ 59 | data: 60 | target: main.DataModuleFromConfig 61 | params: 62 | batch_size: 42 63 | num_workers: 5 64 | wrap: false 65 | train: 66 | target: taming.data.faceshq.FFHQTrain 67 | params: 68 | size: 256 69 | validation: 70 | target: taming.data.faceshq.FFHQValidation 71 | params: 72 | size: 256 73 | 74 | 75 | lightning: 76 | callbacks: 77 | image_logger: 78 | target: main.ImageLogger 79 | params: 80 | batch_frequency: 5000 81 | max_images: 8 82 | increase_log_steps: False 83 | 84 | trainer: 85 | benchmark: True -------------------------------------------------------------------------------- /DCDP-LDM/configs/latent-diffusion/lsun_churches-ldm-kl-8.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | base_learning_rate: 5.0e-5 # set to target_lr by starting main.py with '--scale_lr False' 3 | target: ldm.models.diffusion.ddpm.LatentDiffusion 4 | params: 5 | linear_start: 0.0015 6 | linear_end: 0.0155 7 | num_timesteps_cond: 1 8 | log_every_t: 200 9 | timesteps: 1000 10 | loss_type: l1 11 | first_stage_key: "image" 12 | cond_stage_key: "image" 13 | image_size: 32 14 | channels: 4 15 | cond_stage_trainable: False 16 | concat_mode: False 17 | scale_by_std: True 18 | monitor: 'val/loss_simple_ema' 19 | 20 | scheduler_config: # 10000 warmup steps 21 | target: ldm.lr_scheduler.LambdaLinearScheduler 22 | params: 23 | warm_up_steps: [10000] 24 | cycle_lengths: [10000000000000] 25 | f_start: [1.e-6] 26 | f_max: [1.] 27 | f_min: [ 1.] 28 | 29 | unet_config: 30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel 31 | params: 32 | image_size: 32 33 | in_channels: 4 34 | out_channels: 4 35 | model_channels: 192 36 | attention_resolutions: [ 1, 2, 4, 8 ] # 32, 16, 8, 4 37 | num_res_blocks: 2 38 | channel_mult: [ 1,2,2,4,4 ] # 32, 16, 8, 4, 2 39 | num_heads: 8 40 | use_scale_shift_norm: True 41 | resblock_updown: True 42 | 43 | first_stage_config: 44 | target: ldm.models.autoencoder.AutoencoderKL 45 | params: 46 | embed_dim: 4 47 | monitor: "val/rec_loss" 48 | ckpt_path: "models/first_stage_models/kl-f8/model.ckpt" 49 | ddconfig: 50 | double_z: True 51 | z_channels: 4 52 | resolution: 256 53 | in_channels: 3 54 | out_ch: 3 55 | ch: 128 56 | ch_mult: [ 1,2,4,4 ] # num_down = len(ch_mult)-1 57 | num_res_blocks: 2 58 | attn_resolutions: [ ] 59 | dropout: 0.0 60 | lossconfig: 61 | target: torch.nn.Identity 62 | 63 | cond_stage_config: "__is_unconditional__" 64 | 65 | data: 66 | target: main.DataModuleFromConfig 67 | params: 68 | batch_size: 96 69 | num_workers: 5 70 | wrap: False 71 | train: 72 | target: ldm.data.lsun.LSUNChurchesTrain 73 | params: 74 | size: 256 75 | validation: 76 | target: ldm.data.lsun.LSUNChurchesValidation 77 | params: 78 | size: 256 79 | 80 | lightning: 81 | callbacks: 82 | image_logger: 83 | target: main.ImageLogger 84 | params: 85 | batch_frequency: 5000 86 | max_images: 8 87 | increase_log_steps: False 88 | 89 | 90 | trainer: 91 | benchmark: True -------------------------------------------------------------------------------- /DCDP-LDM/configs/latent-diffusion/txt2img-1p4B-eval.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | base_learning_rate: 5.0e-05 3 | target: ldm.models.diffusion.ddpm.LatentDiffusion 4 | params: 5 | linear_start: 0.00085 6 | linear_end: 0.012 7 | num_timesteps_cond: 1 8 | log_every_t: 200 9 | timesteps: 1000 10 | first_stage_key: image 11 | cond_stage_key: caption 12 | image_size: 32 13 | channels: 4 14 | cond_stage_trainable: true 15 | conditioning_key: crossattn 16 | monitor: val/loss_simple_ema 17 | scale_factor: 0.18215 18 | use_ema: False 19 | 20 | unet_config: 21 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel 22 | params: 23 | image_size: 32 24 | in_channels: 4 25 | out_channels: 4 26 | model_channels: 320 27 | attention_resolutions: 28 | - 4 29 | - 2 30 | - 1 31 | num_res_blocks: 2 32 | channel_mult: 33 | - 1 34 | - 2 35 | - 4 36 | - 4 37 | num_heads: 8 38 | use_spatial_transformer: true 39 | transformer_depth: 1 40 | context_dim: 1280 41 | use_checkpoint: true 42 | legacy: False 43 | 44 | first_stage_config: 45 | target: ldm.models.autoencoder.AutoencoderKL 46 | params: 47 | embed_dim: 4 48 | monitor: val/rec_loss 49 | ddconfig: 50 | double_z: true 51 | z_channels: 4 52 | resolution: 256 53 | in_channels: 3 54 | out_ch: 3 55 | ch: 128 56 | ch_mult: 57 | - 1 58 | - 2 59 | - 4 60 | - 4 61 | num_res_blocks: 2 62 | attn_resolutions: [] 63 | dropout: 0.0 64 | lossconfig: 65 | target: torch.nn.Identity 66 | 67 | cond_stage_config: 68 | target: ldm.modules.encoders.modules.BERTEmbedder 69 | params: 70 | n_embed: 1280 71 | n_layer: 32 72 | -------------------------------------------------------------------------------- /DCDP-LDM/configs/retrieval-augmented-diffusion/768x768.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | base_learning_rate: 0.0001 3 | target: ldm.models.diffusion.ddpm.LatentDiffusion 4 | params: 5 | linear_start: 0.0015 6 | linear_end: 0.015 7 | num_timesteps_cond: 1 8 | log_every_t: 200 9 | timesteps: 1000 10 | first_stage_key: jpg 11 | cond_stage_key: nix 12 | image_size: 48 13 | channels: 16 14 | cond_stage_trainable: false 15 | conditioning_key: crossattn 16 | monitor: val/loss_simple_ema 17 | scale_by_std: false 18 | scale_factor: 0.22765929 19 | unet_config: 20 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel 21 | params: 22 | image_size: 48 23 | in_channels: 16 24 | out_channels: 16 25 | model_channels: 448 26 | attention_resolutions: 27 | - 4 28 | - 2 29 | - 1 30 | num_res_blocks: 2 31 | channel_mult: 32 | - 1 33 | - 2 34 | - 3 35 | - 4 36 | use_scale_shift_norm: false 37 | resblock_updown: false 38 | num_head_channels: 32 39 | use_spatial_transformer: true 40 | transformer_depth: 1 41 | context_dim: 768 42 | use_checkpoint: true 43 | first_stage_config: 44 | target: ldm.models.autoencoder.AutoencoderKL 45 | params: 46 | monitor: val/rec_loss 47 | embed_dim: 16 48 | ddconfig: 49 | double_z: true 50 | z_channels: 16 51 | resolution: 256 52 | in_channels: 3 53 | out_ch: 3 54 | ch: 128 55 | ch_mult: 56 | - 1 57 | - 1 58 | - 2 59 | - 2 60 | - 4 61 | num_res_blocks: 2 62 | attn_resolutions: 63 | - 16 64 | dropout: 0.0 65 | lossconfig: 66 | target: torch.nn.Identity 67 | cond_stage_config: 68 | target: torch.nn.Identity -------------------------------------------------------------------------------- /DCDP-LDM/configs/stable-diffusion/v1-inference.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | base_learning_rate: 1.0e-04 3 | target: ldm.models.diffusion.ddpm.LatentDiffusion 4 | params: 5 | linear_start: 0.00085 6 | linear_end: 0.0120 7 | num_timesteps_cond: 1 8 | log_every_t: 200 9 | timesteps: 1000 10 | first_stage_key: "jpg" 11 | cond_stage_key: "txt" 12 | image_size: 64 13 | channels: 4 14 | cond_stage_trainable: false # Note: different from the one we trained before 15 | conditioning_key: crossattn 16 | monitor: val/loss_simple_ema 17 | scale_factor: 0.18215 18 | use_ema: False 19 | 20 | scheduler_config: # 10000 warmup steps 21 | target: ldm.lr_scheduler.LambdaLinearScheduler 22 | params: 23 | warm_up_steps: [ 10000 ] 24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases 25 | f_start: [ 1.e-6 ] 26 | f_max: [ 1. ] 27 | f_min: [ 1. ] 28 | 29 | unet_config: 30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel 31 | params: 32 | image_size: 32 # unused 33 | in_channels: 4 34 | out_channels: 4 35 | model_channels: 320 36 | attention_resolutions: [ 4, 2, 1 ] 37 | num_res_blocks: 2 38 | channel_mult: [ 1, 2, 4, 4 ] 39 | num_heads: 8 40 | use_spatial_transformer: True 41 | transformer_depth: 1 42 | context_dim: 768 43 | use_checkpoint: True 44 | legacy: False 45 | 46 | first_stage_config: 47 | target: ldm.models.autoencoder.AutoencoderKL 48 | params: 49 | embed_dim: 4 50 | monitor: val/rec_loss 51 | ddconfig: 52 | double_z: true 53 | z_channels: 4 54 | resolution: 256 55 | in_channels: 3 56 | out_ch: 3 57 | ch: 128 58 | ch_mult: 59 | - 1 60 | - 2 61 | - 4 62 | - 4 63 | num_res_blocks: 2 64 | attn_resolutions: [] 65 | dropout: 0.0 66 | lossconfig: 67 | target: torch.nn.Identity 68 | 69 | cond_stage_config: 70 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder 71 | -------------------------------------------------------------------------------- /DCDP-LDM/configs/tasks/gaussian_deblur_config.yaml: -------------------------------------------------------------------------------- 1 | conditioning: 2 | main_sampler: resample 3 | method: ps # Do not touch 4 | params: 5 | scale: 0.5 6 | 7 | data: 8 | name: celeb 9 | root: ./data/samples/ 10 | 11 | measurement: 12 | operator: 13 | name: gaussian_blur 14 | kernel_size: 61 15 | intensity: 3.0 16 | 17 | noise: 18 | name: gaussian 19 | sigma: 0.0 20 | -------------------------------------------------------------------------------- /DCDP-LDM/configs/tasks/inpainting_config.yaml: -------------------------------------------------------------------------------- 1 | conditioning: 2 | main_sampler: resample 3 | method: ps # Do not touch 4 | params: 5 | scale: 0.5 6 | 7 | data: 8 | name: celeb 9 | root: ./data/samples/ 10 | 11 | measurement: 12 | operator: 13 | name: inpainting 14 | mask_opt: 15 | mask_type: random 16 | mask_prob_range: !!python/tuple [0.7, 0.71] 17 | image_size: 256 18 | 19 | 20 | noise: 21 | name: gaussian 22 | sigma: 0 23 | -------------------------------------------------------------------------------- /DCDP-LDM/configs/tasks/motion_deblur_config.yaml: -------------------------------------------------------------------------------- 1 | conditioning: 2 | main_sampler: resample 3 | method: ps # Do not touch 4 | params: 5 | scale: 1.0 6 | 7 | data: 8 | name: celeb 9 | root: ./data/samples/ 10 | 11 | measurement: 12 | operator: 13 | name: motion_blur 14 | kernel_size: 61 15 | intensity: 0.5 16 | 17 | noise: 18 | name: gaussian 19 | sigma: 0.0 20 | -------------------------------------------------------------------------------- /DCDP-LDM/configs/tasks/nonlinear_deblur_ImageNet_config.yaml: -------------------------------------------------------------------------------- 1 | conditioning: 2 | main_sampler: dps 3 | method: ps # Do not touch 4 | params: 5 | scale: 0.3 6 | 7 | data: 8 | name: ImageNet 9 | root: ./data/ImageNet_samples/ 10 | 11 | measurement: 12 | operator: 13 | name: nonlinear_blur 14 | opt_yml_path: ./bkse/options/generate_blur/default.yml 15 | 16 | noise: 17 | name: gaussian 18 | sigma: 0.05 19 | -------------------------------------------------------------------------------- /DCDP-LDM/configs/tasks/nonlinear_deblur_config.yaml: -------------------------------------------------------------------------------- 1 | conditioning: 2 | main_sampler: dps 3 | method: ps # Do not touch 4 | params: 5 | scale: 0.3 6 | 7 | data: 8 | name: celeb 9 | root: ./data/samples/ 10 | 11 | measurement: 12 | operator: 13 | name: nonlinear_blur 14 | opt_yml_path: ./bkse/options/generate_blur/default.yml 15 | 16 | noise: 17 | name: gaussian 18 | sigma: 0.05 19 | -------------------------------------------------------------------------------- /DCDP-LDM/configs/tasks/super_resolution_config.yaml: -------------------------------------------------------------------------------- 1 | conditioning: 2 | main_sampler: resample 3 | method: ps # Do not touch 4 | params: 5 | scale: 0.1 # Try changing this 6 | 7 | data: 8 | name: ffhq 9 | root: ./data/samples/ 10 | 11 | measurement: 12 | operator: 13 | name: super_resolution 14 | in_shape: !!python/tuple [1, 3, 256, 256] 15 | scale_factor: 4 16 | 17 | noise: 18 | name: gaussian 19 | sigma: 0 20 | -------------------------------------------------------------------------------- /DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000001.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000001.JPEG -------------------------------------------------------------------------------- /DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000002.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000002.JPEG -------------------------------------------------------------------------------- /DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000003.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000003.JPEG -------------------------------------------------------------------------------- /DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000004.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000004.JPEG -------------------------------------------------------------------------------- /DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000005.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000005.JPEG -------------------------------------------------------------------------------- /DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000006.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000006.JPEG -------------------------------------------------------------------------------- /DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000007.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000007.JPEG -------------------------------------------------------------------------------- /DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000008.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000008.JPEG -------------------------------------------------------------------------------- /DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000009.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000009.JPEG -------------------------------------------------------------------------------- /DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000010.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/ImageNet_samples/ILSVRC2012_val_00000010.JPEG -------------------------------------------------------------------------------- /DCDP-LDM/data/__pycache__/dataloader.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/__pycache__/dataloader.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/data/samples/00000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/samples/00000.png -------------------------------------------------------------------------------- /DCDP-LDM/data/samples/00001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/samples/00001.png -------------------------------------------------------------------------------- /DCDP-LDM/data/samples/00002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/samples/00002.png -------------------------------------------------------------------------------- /DCDP-LDM/data/samples/00003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/samples/00003.png -------------------------------------------------------------------------------- /DCDP-LDM/data/samples/00004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/samples/00004.png -------------------------------------------------------------------------------- /DCDP-LDM/data/samples/00005.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/samples/00005.png -------------------------------------------------------------------------------- /DCDP-LDM/data/samples/00006.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/samples/00006.png -------------------------------------------------------------------------------- /DCDP-LDM/data/samples/00007.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/samples/00007.png -------------------------------------------------------------------------------- /DCDP-LDM/data/samples/00008.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/samples/00008.png -------------------------------------------------------------------------------- /DCDP-LDM/data/samples/00009.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/data/samples/00009.png -------------------------------------------------------------------------------- /DCDP-LDM/ldm/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/__init__.py -------------------------------------------------------------------------------- /DCDP-LDM/ldm/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/__pycache__/util.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/__pycache__/util.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/data/__init__.py -------------------------------------------------------------------------------- /DCDP-LDM/ldm/data/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/data/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/data/__pycache__/lsun.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/data/__pycache__/lsun.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/data/base.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | from torch.utils.data import Dataset, ConcatDataset, ChainDataset, IterableDataset 3 | 4 | 5 | class Txt2ImgIterableBaseDataset(IterableDataset): 6 | ''' 7 | Define an interface to make the IterableDatasets for text2img data chainable 8 | ''' 9 | def __init__(self, num_records=0, valid_ids=None, size=256): 10 | super().__init__() 11 | self.num_records = num_records 12 | self.valid_ids = valid_ids 13 | self.sample_ids = valid_ids 14 | self.size = size 15 | 16 | print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.') 17 | 18 | def __len__(self): 19 | return self.num_records 20 | 21 | @abstractmethod 22 | def __iter__(self): 23 | pass -------------------------------------------------------------------------------- /DCDP-LDM/ldm/models/__pycache__/autoencoder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/models/__pycache__/autoencoder.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/models/diffusion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/models/diffusion/__init__.py -------------------------------------------------------------------------------- /DCDP-LDM/ldm/models/diffusion/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/models/diffusion/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/models/diffusion/__pycache__/ddim.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/models/diffusion/__pycache__/ddim.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/models/diffusion/__pycache__/ddpm.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/models/diffusion/__pycache__/ddpm.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/models/diffusion/__pycache__/plms.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/models/diffusion/__pycache__/plms.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/models/diffusion/dpm_solver/__init__.py: -------------------------------------------------------------------------------- 1 | from .sampler import DPMSolverSampler -------------------------------------------------------------------------------- /DCDP-LDM/ldm/models/diffusion/dpm_solver/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/models/diffusion/dpm_solver/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/models/diffusion/dpm_solver/__pycache__/dpm_solver.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/models/diffusion/dpm_solver/__pycache__/dpm_solver.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/models/diffusion/dpm_solver/__pycache__/sampler.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/models/diffusion/dpm_solver/__pycache__/sampler.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/modules/__pycache__/attention.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/modules/__pycache__/attention.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/modules/__pycache__/ema.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/modules/__pycache__/ema.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/modules/diffusionmodules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/modules/diffusionmodules/__init__.py -------------------------------------------------------------------------------- /DCDP-LDM/ldm/modules/diffusionmodules/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/modules/diffusionmodules/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/modules/diffusionmodules/__pycache__/model.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/modules/diffusionmodules/__pycache__/model.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/modules/diffusionmodules/__pycache__/openaimodel.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/modules/diffusionmodules/__pycache__/openaimodel.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/modules/diffusionmodules/__pycache__/util.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/modules/diffusionmodules/__pycache__/util.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/modules/distributions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/modules/distributions/__init__.py -------------------------------------------------------------------------------- /DCDP-LDM/ldm/modules/distributions/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/modules/distributions/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/modules/distributions/__pycache__/distributions.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/modules/distributions/__pycache__/distributions.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm/modules/encoders/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/modules/encoders/__init__.py -------------------------------------------------------------------------------- /DCDP-LDM/ldm/modules/image_degradation/__init__.py: -------------------------------------------------------------------------------- 1 | from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr 2 | from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light 3 | -------------------------------------------------------------------------------- /DCDP-LDM/ldm/modules/image_degradation/utils/test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm/modules/image_degradation/utils/test.png -------------------------------------------------------------------------------- /DCDP-LDM/ldm/modules/losses/__init__.py: -------------------------------------------------------------------------------- 1 | from ldm.modules.losses.contperceptual import LPIPSWithDiscriminator -------------------------------------------------------------------------------- /DCDP-LDM/ldm_inverse/__pycache__/condition_methods.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm_inverse/__pycache__/condition_methods.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm_inverse/__pycache__/measurements.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/ldm_inverse/__pycache__/measurements.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/ldm_inverse/condition_methods.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | import torch 3 | 4 | __CONDITIONING_METHOD__ = {} 5 | 6 | def register_conditioning_method(name: str): 7 | def wrapper(cls): 8 | if __CONDITIONING_METHOD__.get(name, None): 9 | raise NameError(f"Name {name} is already registered!") 10 | __CONDITIONING_METHOD__[name] = cls 11 | return cls 12 | return wrapper 13 | 14 | def get_conditioning_method(name: str, model, operator, noiser, **kwargs): 15 | if __CONDITIONING_METHOD__.get(name, None) is None: 16 | raise NameError(f"Name {name} is not defined!") 17 | return __CONDITIONING_METHOD__[name](model=model, operator=operator, noiser=noiser, **kwargs) 18 | 19 | 20 | class ConditioningMethod(ABC): 21 | def __init__(self, model, operator, noiser, **kwargs): 22 | self.model = model 23 | self.operator = operator 24 | self.noiser = noiser 25 | 26 | def project(self, data, noisy_measurement, **kwargs): 27 | return self.operator.project(data=data, measurement=noisy_measurement, **kwargs) 28 | 29 | def grad_and_value(self, x_prev, x_0_hat, measurement, **kwargs): 30 | if self.noiser.__name__ == 'gaussian': 31 | difference = measurement - self.operator.forward(self.model.differentiable_decode_first_stage( x_0_hat ), **kwargs) 32 | norm = torch.linalg.norm(difference) 33 | norm_grad = torch.autograd.grad(outputs=norm, inputs=x_prev)[0] 34 | elif self.noiser.__name__ == 'poisson': 35 | Ax = self.operator.forward(self.model.differentiable_decode_first_stage(x_0_hat), **kwargs) 36 | difference = measurement-Ax 37 | norm = torch.linalg.norm(difference) / measurement.abs() 38 | norm = norm.mean() 39 | norm_grad = torch.autograd.grad(outputs=norm, inputs=x_prev)[0] 40 | 41 | else: 42 | raise NotImplementedError 43 | 44 | return norm_grad, norm 45 | 46 | 47 | @abstractmethod 48 | def conditioning(self, x_t, measurement, noisy_measurement=None, **kwargs): 49 | pass 50 | 51 | 52 | @register_conditioning_method(name='ps') 53 | class PosteriorSampling(ConditioningMethod): 54 | def __init__(self, model, operator, noiser, **kwargs): 55 | super().__init__(model, operator, noiser) 56 | self.operator = operator 57 | 58 | def conditioning(self, x_prev, x_t, x_0_hat, measurement, scale=None, **kwargs): 59 | if scale is None: 60 | scale = 0.3 61 | 62 | norm_grad, norm = self.grad_and_value(x_prev=x_prev, x_0_hat=x_0_hat, measurement=measurement, **kwargs) 63 | x_t -= norm_grad * scale 64 | return x_t, norm 65 | 66 | -------------------------------------------------------------------------------- /DCDP-LDM/model_loader.py: -------------------------------------------------------------------------------- 1 | from ldm.util import instantiate_from_config 2 | import yaml 3 | import torch 4 | 5 | def load_yaml(file_path: str) -> dict: 6 | with open(file_path) as f: 7 | config = yaml.load(f, Loader=yaml.FullLoader) 8 | return config 9 | 10 | 11 | def load_model_from_config(config, ckpt, train=False): 12 | print(f"Loading model from {ckpt}") 13 | pl_sd = torch.load(ckpt)#, map_location="cpu") 14 | sd = pl_sd["state_dict"] 15 | model = instantiate_from_config(config.model) 16 | _, _ = model.load_state_dict(sd, strict=False) 17 | 18 | model.cuda() 19 | 20 | if train: 21 | model.train() 22 | else: 23 | model.eval() 24 | 25 | return model -------------------------------------------------------------------------------- /DCDP-LDM/motionblur/README.md: -------------------------------------------------------------------------------- 1 | # MotionBlur 2 | 3 | Generate authentic motion blur kernels (point spread functions) and apply them to images en masse. 4 | 5 | Very efficient thanks to numpy's FFT based convolution and the optimised procedural generation of kernels. Intuitive API. 6 | 7 | # Description 8 | 9 | After installation, import the `Kernel` class from `motionblur.py` and use to your liking. 10 | 11 | Here is how: 12 | 13 | Initialise a `Kernel` instance with the parameters `size` (size of kernel matrix in pixels - as a tuple of integers) and `intensity`. 14 | 15 | Intensity determines how non-linear and shaken the motion blur is. It must have a value between 0 and 1. 16 | Zero is a linear motion and 1 a highly non-linear and often self intersecting motion. 17 | 18 | ![Effect of intensity](./intensity.png) 19 | 20 | Once a kernel is initialised, you can utilise a range of properties to make us of it. 21 | 22 | ```python 23 | # Initialise Kernel 24 | kernel = Kernel(size=(100, 100), intensity=0.2) 25 | 26 | # Display kernel 27 | kernel.displayKernel() 28 | 29 | # Get kernel as numpy array 30 | kernel.kernelMatrix 31 | 32 | # Save kernel as image. (Do not show kernel, just save.) 33 | kernel.displayKernel(save_to="./my_file.png", show=False) 34 | 35 | # load image or get image path 36 | image1_path = "./image1.png" 37 | image2 = PIL.Image.open("./image2.png") 38 | 39 | # apply motion blur (returns PIL.Image instance of blurred image) 40 | blurred1 = kernel.applyTo(image1_path) 41 | 42 | blurred2 = kernel.applyTo(image2) 43 | 44 | # if you need the dimension of the blurred image to be the same 45 | # as the original image, pass `keep_image_dim=True` 46 | blurred_same = kernel.applyTo(image2, keep_image_dim=True) 47 | 48 | # show result 49 | blurred1.show() 50 | 51 | # or save to file 52 | blurred2.save("./output2.png", "PNG") 53 | ``` 54 | 55 | 56 | # Installation 57 | 58 | In order to set up the necessary environment: 59 | 60 | 1. create an environment `MotionBlur` with the help of conda, 61 | ``` 62 | conda env create - f environment.yaml 63 | ``` 64 | 2. activate the new environment with 65 | ``` 66 | conda activate MotionBlur 67 | ``` 68 | 69 | Or simply install numpy, pillow and scipy manually. 70 | -------------------------------------------------------------------------------- /DCDP-LDM/motionblur/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/motionblur/__init__.py -------------------------------------------------------------------------------- /DCDP-LDM/motionblur/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/motionblur/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/motionblur/__pycache__/motionblur.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/motionblur/__pycache__/motionblur.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/motionblur/environment.yaml: -------------------------------------------------------------------------------- 1 | name: MotionBlur 2 | channels: 3 | - defaults 4 | - conda-forge 5 | dependencies: 6 | - python>=3.6 7 | - pip 8 | - numpy 9 | - scipy 10 | - Pillow 11 | 12 | # for development only (could also be kept in a separate environment file) 13 | - pytest 14 | - pytest-cov 15 | - tox 16 | - pre_commit 17 | - nbdime 18 | - nbstripout 19 | - sphinx 20 | - recommonmark 21 | -------------------------------------------------------------------------------- /DCDP-LDM/motionblur/example_kernel/kernel0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/motionblur/example_kernel/kernel0.png -------------------------------------------------------------------------------- /DCDP-LDM/motionblur/example_kernel/kernel100.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/motionblur/example_kernel/kernel100.png -------------------------------------------------------------------------------- /DCDP-LDM/motionblur/example_kernel/kernel25.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/motionblur/example_kernel/kernel25.png -------------------------------------------------------------------------------- /DCDP-LDM/motionblur/example_kernel/kernel50.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/motionblur/example_kernel/kernel50.png -------------------------------------------------------------------------------- /DCDP-LDM/motionblur/example_kernel/kernel75.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/motionblur/example_kernel/kernel75.png -------------------------------------------------------------------------------- /DCDP-LDM/motionblur/images/flag.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/motionblur/images/flag.png -------------------------------------------------------------------------------- /DCDP-LDM/motionblur/images/flagBLURRED.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/motionblur/images/flagBLURRED.png -------------------------------------------------------------------------------- /DCDP-LDM/motionblur/images/moon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/motionblur/images/moon.png -------------------------------------------------------------------------------- /DCDP-LDM/motionblur/intensity.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/motionblur/intensity.png -------------------------------------------------------------------------------- /DCDP-LDM/scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/scripts/__init__.py -------------------------------------------------------------------------------- /DCDP-LDM/scripts/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/scripts/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/scripts/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/scripts/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/scripts/download_first_stages.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Usage: 4 | # ./scripts/download_first_stages.sh kl-f4 kl-f8 5 | 6 | MODELS=("kl-f4" "kl-f8" "kl-f16" "kl-f32" "vq-f4" "vq-f4-noattn" "vq-f8" "vq-f8-n256" "vq-f16") 7 | DOWNLOAD_PATH="https://ommer-lab.com/files/latent-diffusion" 8 | 9 | function download_first_stages() { 10 | local list=("$@") 11 | 12 | for arg in "${list[@]}"; do 13 | for model in "${MODELS[@]}"; do 14 | if [[ "$model" == "$arg" ]]; then 15 | echo "Downloading $model" 16 | model_dir="./models/first_stage_models/$arg" 17 | if [ ! -d "$model_dir" ]; then 18 | mkdir -p "$model_dir" 19 | echo "Directory created: $model_dir" 20 | else 21 | echo "Directory already exists: $model_dir" 22 | fi 23 | wget -O "$model_dir/model.zip" "$DOWNLOAD_PATH/$arg.zip" 24 | unzip -o "$model_dir/model.zip" -d "$model_dir" 25 | rm -rf "$model_dir/model.zip" 26 | fi 27 | done 28 | done 29 | } 30 | 31 | download_first_stages "$@" 32 | -------------------------------------------------------------------------------- /DCDP-LDM/scripts/download_models.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Usage: 4 | # ./scripts/download_models.sh celeba ffhq 5 | 6 | MODELS=("celeba" "ffhq" "lsun_churches" "lsun_bedrooms" "text2img" "cin" "semantic_synthesis" "semantic_synthesis256" "sr_bsr" "layout2img_model" "inpainting_big") 7 | DOWNLOAD_PATH="https://ommer-lab.com/files/latent-diffusion" 8 | 9 | function download_models() { 10 | local list=("$@") 11 | 12 | for arg in "${list[@]}"; do 13 | for model in "${MODELS[@]}"; do 14 | if [[ "$model" == "$arg" ]]; then 15 | echo "Downloading $model" 16 | model_dir="./models/ldm/$arg" 17 | if [ ! -d "$model_dir" ]; then 18 | mkdir -p "$model_dir" 19 | echo "Directory created: $model_dir" 20 | else 21 | echo "Directory already exists: $model_dir" 22 | fi 23 | wget -O "$model_dir/model.zip" "$DOWNLOAD_PATH/$arg.zip" 24 | unzip -o "$model_dir/model.zip" -d "$model_dir" 25 | rm -rf "$model_dir/model.zip" 26 | fi 27 | done 28 | done 29 | } 30 | 31 | download_models "$@" 32 | -------------------------------------------------------------------------------- /DCDP-LDM/scripts/tests/test_watermark.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import fire 3 | from imwatermark import WatermarkDecoder 4 | 5 | 6 | def testit(img_path): 7 | bgr = cv2.imread(img_path) 8 | decoder = WatermarkDecoder('bytes', 136) 9 | watermark = decoder.decode(bgr, 'dwtDct') 10 | try: 11 | dec = watermark.decode('utf-8') 12 | except: 13 | dec = "null" 14 | print(dec) 15 | 16 | 17 | if __name__ == "__main__": 18 | fire.Fire(testit) -------------------------------------------------------------------------------- /DCDP-LDM/src/clip/clip.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: clip 3 | Version: 1.0 4 | Author: OpenAI 5 | Provides-Extra: dev 6 | License-File: LICENSE 7 | -------------------------------------------------------------------------------- /DCDP-LDM/src/clip/clip/__init__.py: -------------------------------------------------------------------------------- 1 | from .clip import * 2 | -------------------------------------------------------------------------------- /DCDP-LDM/src/clip/hubconf.py: -------------------------------------------------------------------------------- 1 | from clip.clip import tokenize as _tokenize, load as _load, available_models as _available_models 2 | import re 3 | import string 4 | 5 | dependencies = ["torch", "torchvision", "ftfy", "regex", "tqdm"] 6 | 7 | # For compatibility (cannot include special characters in function name) 8 | model_functions = { model: re.sub(f'[{string.punctuation}]', '_', model) for model in _available_models()} 9 | 10 | def _create_hub_entrypoint(model): 11 | def entrypoint(**kwargs): 12 | return _load(model, **kwargs) 13 | 14 | entrypoint.__doc__ = f"""Loads the {model} CLIP model 15 | 16 | Parameters 17 | ---------- 18 | device : Union[str, torch.device] 19 | The device to put the loaded model 20 | 21 | jit : bool 22 | Whether to load the optimized JIT model or more hackable non-JIT model (default). 23 | 24 | download_root: str 25 | path to download the model files; by default, it uses "~/.cache/clip" 26 | 27 | Returns 28 | ------- 29 | model : torch.nn.Module 30 | The {model} CLIP model 31 | 32 | preprocess : Callable[[PIL.Image], torch.Tensor] 33 | A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input 34 | """ 35 | return entrypoint 36 | 37 | def tokenize(): 38 | return _tokenize 39 | 40 | _entrypoints = {model_functions[model]: _create_hub_entrypoint(model) for model in _available_models()} 41 | 42 | globals().update(_entrypoints) -------------------------------------------------------------------------------- /DCDP-LDM/src/clip/setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pkg_resources 4 | from setuptools import setup, find_packages 5 | 6 | setup( 7 | name="clip", 8 | py_modules=["clip"], 9 | version="1.0", 10 | description="", 11 | author="OpenAI", 12 | packages=find_packages(exclude=["tests*"]), 13 | install_requires=[ 14 | str(r) 15 | for r in pkg_resources.parse_requirements( 16 | open(os.path.join(os.path.dirname(__file__), "requirements.txt")) 17 | ) 18 | ], 19 | include_package_data=True, 20 | extras_require={'dev': ['pytest']}, 21 | ) 22 | -------------------------------------------------------------------------------- /DCDP-LDM/src/clip/tests/test_consistency.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | import torch 4 | from PIL import Image 5 | 6 | import clip 7 | 8 | 9 | @pytest.mark.parametrize('model_name', clip.available_models()) 10 | def test_consistency(model_name): 11 | device = "cpu" 12 | jit_model, transform = clip.load(model_name, device=device, jit=True) 13 | py_model, _ = clip.load(model_name, device=device, jit=False) 14 | 15 | image = transform(Image.open("CLIP.png")).unsqueeze(0).to(device) 16 | text = clip.tokenize(["a diagram", "a dog", "a cat"]).to(device) 17 | 18 | with torch.no_grad(): 19 | logits_per_image, _ = jit_model(image, text) 20 | jit_probs = logits_per_image.softmax(dim=-1).cpu().numpy() 21 | 22 | logits_per_image, _ = py_model(image, text) 23 | py_probs = logits_per_image.softmax(dim=-1).cpu().numpy() 24 | 25 | assert np.allclose(jit_probs, py_probs, atol=0.01, rtol=0.1) 26 | -------------------------------------------------------------------------------- /DCDP-LDM/src/taming-transformers/scripts/extract_submodel.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import sys 3 | 4 | if __name__ == "__main__": 5 | inpath = sys.argv[1] 6 | outpath = sys.argv[2] 7 | submodel = "cond_stage_model" 8 | if len(sys.argv) > 3: 9 | submodel = sys.argv[3] 10 | 11 | print("Extracting {} from {} to {}.".format(submodel, inpath, outpath)) 12 | 13 | sd = torch.load(inpath, map_location="cpu") 14 | new_sd = {"state_dict": dict((k.split(".", 1)[-1],v) 15 | for k,v in sd["state_dict"].items() 16 | if k.startswith("cond_stage_model"))} 17 | torch.save(new_sd, outpath) 18 | -------------------------------------------------------------------------------- /DCDP-LDM/src/taming-transformers/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name='taming-transformers', 5 | version='0.0.1', 6 | description='Taming Transformers for High-Resolution Image Synthesis', 7 | packages=find_packages(), 8 | install_requires=[ 9 | 'torch', 10 | 'numpy', 11 | 'tqdm', 12 | ], 13 | ) 14 | -------------------------------------------------------------------------------- /DCDP-LDM/src/taming-transformers/taming/data/base.py: -------------------------------------------------------------------------------- 1 | import bisect 2 | import numpy as np 3 | import albumentations 4 | from PIL import Image 5 | from torch.utils.data import Dataset, ConcatDataset 6 | 7 | 8 | class ConcatDatasetWithIndex(ConcatDataset): 9 | """Modified from original pytorch code to return dataset idx""" 10 | def __getitem__(self, idx): 11 | if idx < 0: 12 | if -idx > len(self): 13 | raise ValueError("absolute value of index should not exceed dataset length") 14 | idx = len(self) + idx 15 | dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) 16 | if dataset_idx == 0: 17 | sample_idx = idx 18 | else: 19 | sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] 20 | return self.datasets[dataset_idx][sample_idx], dataset_idx 21 | 22 | 23 | class ImagePaths(Dataset): 24 | def __init__(self, paths, size=None, random_crop=False, labels=None): 25 | self.size = size 26 | self.random_crop = random_crop 27 | 28 | self.labels = dict() if labels is None else labels 29 | self.labels["file_path_"] = paths 30 | self._length = len(paths) 31 | 32 | if self.size is not None and self.size > 0: 33 | self.rescaler = albumentations.SmallestMaxSize(max_size = self.size) 34 | if not self.random_crop: 35 | self.cropper = albumentations.CenterCrop(height=self.size,width=self.size) 36 | else: 37 | self.cropper = albumentations.RandomCrop(height=self.size,width=self.size) 38 | self.preprocessor = albumentations.Compose([self.rescaler, self.cropper]) 39 | else: 40 | self.preprocessor = lambda **kwargs: kwargs 41 | 42 | def __len__(self): 43 | return self._length 44 | 45 | def preprocess_image(self, image_path): 46 | image = Image.open(image_path) 47 | if not image.mode == "RGB": 48 | image = image.convert("RGB") 49 | image = np.array(image).astype(np.uint8) 50 | image = self.preprocessor(image=image)["image"] 51 | image = (image/127.5 - 1.0).astype(np.float32) 52 | return image 53 | 54 | def __getitem__(self, i): 55 | example = dict() 56 | example["image"] = self.preprocess_image(self.labels["file_path_"][i]) 57 | for k in self.labels: 58 | example[k] = self.labels[k][i] 59 | return example 60 | 61 | 62 | class NumpyPaths(ImagePaths): 63 | def preprocess_image(self, image_path): 64 | image = np.load(image_path).squeeze(0) # 3 x 1024 x 1024 65 | image = np.transpose(image, (1,2,0)) 66 | image = Image.fromarray(image, mode="RGB") 67 | image = np.array(image).astype(np.uint8) 68 | image = self.preprocessor(image=image)["image"] 69 | image = (image/127.5 - 1.0).astype(np.float32) 70 | return image 71 | -------------------------------------------------------------------------------- /DCDP-LDM/src/taming-transformers/taming/data/custom.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import albumentations 4 | from torch.utils.data import Dataset 5 | 6 | from taming.data.base import ImagePaths, NumpyPaths, ConcatDatasetWithIndex 7 | 8 | 9 | class CustomBase(Dataset): 10 | def __init__(self, *args, **kwargs): 11 | super().__init__() 12 | self.data = None 13 | 14 | def __len__(self): 15 | return len(self.data) 16 | 17 | def __getitem__(self, i): 18 | example = self.data[i] 19 | return example 20 | 21 | 22 | 23 | class CustomTrain(CustomBase): 24 | def __init__(self, size, training_images_list_file): 25 | super().__init__() 26 | with open(training_images_list_file, "r") as f: 27 | paths = f.read().splitlines() 28 | self.data = ImagePaths(paths=paths, size=size, random_crop=False) 29 | 30 | 31 | class CustomTest(CustomBase): 32 | def __init__(self, size, test_images_list_file): 33 | super().__init__() 34 | with open(test_images_list_file, "r") as f: 35 | paths = f.read().splitlines() 36 | self.data = ImagePaths(paths=paths, size=size, random_crop=False) 37 | 38 | 39 | -------------------------------------------------------------------------------- /DCDP-LDM/src/taming-transformers/taming/data/helper_types.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Tuple, Optional, NamedTuple, Union 2 | from PIL.Image import Image as pil_image 3 | from torch import Tensor 4 | 5 | try: 6 | from typing import Literal 7 | except ImportError: 8 | from typing_extensions import Literal 9 | 10 | Image = Union[Tensor, pil_image] 11 | BoundingBox = Tuple[float, float, float, float] # x0, y0, w, h 12 | CropMethodType = Literal['none', 'random', 'center', 'random-2d'] 13 | SplitType = Literal['train', 'validation', 'test'] 14 | 15 | 16 | class ImageDescription(NamedTuple): 17 | id: int 18 | file_name: str 19 | original_size: Tuple[int, int] # w, h 20 | url: Optional[str] = None 21 | license: Optional[int] = None 22 | coco_url: Optional[str] = None 23 | date_captured: Optional[str] = None 24 | flickr_url: Optional[str] = None 25 | flickr_id: Optional[str] = None 26 | coco_id: Optional[str] = None 27 | 28 | 29 | class Category(NamedTuple): 30 | id: str 31 | super_category: Optional[str] 32 | name: str 33 | 34 | 35 | class Annotation(NamedTuple): 36 | area: float 37 | image_id: str 38 | bbox: BoundingBox 39 | category_no: int 40 | category_id: str 41 | id: Optional[int] = None 42 | source: Optional[str] = None 43 | confidence: Optional[float] = None 44 | is_group_of: Optional[bool] = None 45 | is_truncated: Optional[bool] = None 46 | is_occluded: Optional[bool] = None 47 | is_depiction: Optional[bool] = None 48 | is_inside: Optional[bool] = None 49 | segmentation: Optional[Dict] = None 50 | -------------------------------------------------------------------------------- /DCDP-LDM/src/taming-transformers/taming/lr_scheduler.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class LambdaWarmUpCosineScheduler: 5 | """ 6 | note: use with a base_lr of 1.0 7 | """ 8 | def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0): 9 | self.lr_warm_up_steps = warm_up_steps 10 | self.lr_start = lr_start 11 | self.lr_min = lr_min 12 | self.lr_max = lr_max 13 | self.lr_max_decay_steps = max_decay_steps 14 | self.last_lr = 0. 15 | self.verbosity_interval = verbosity_interval 16 | 17 | def schedule(self, n): 18 | if self.verbosity_interval > 0: 19 | if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}") 20 | if n < self.lr_warm_up_steps: 21 | lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start 22 | self.last_lr = lr 23 | return lr 24 | else: 25 | t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps) 26 | t = min(t, 1.0) 27 | lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * ( 28 | 1 + np.cos(t * np.pi)) 29 | self.last_lr = lr 30 | return lr 31 | 32 | def __call__(self, n): 33 | return self.schedule(n) 34 | 35 | -------------------------------------------------------------------------------- /DCDP-LDM/src/taming-transformers/taming/models/dummy_cond_stage.py: -------------------------------------------------------------------------------- 1 | from torch import Tensor 2 | 3 | 4 | class DummyCondStage: 5 | def __init__(self, conditional_key): 6 | self.conditional_key = conditional_key 7 | self.train = None 8 | 9 | def eval(self): 10 | return self 11 | 12 | @staticmethod 13 | def encode(c: Tensor): 14 | return c, None, (None, None, c) 15 | 16 | @staticmethod 17 | def decode(c: Tensor): 18 | return c 19 | 20 | @staticmethod 21 | def to_rgb(c: Tensor): 22 | return c 23 | -------------------------------------------------------------------------------- /DCDP-LDM/src/taming-transformers/taming/modules/discriminator/model.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import torch.nn as nn 3 | 4 | 5 | from taming.modules.util import ActNorm 6 | 7 | 8 | def weights_init(m): 9 | classname = m.__class__.__name__ 10 | if classname.find('Conv') != -1: 11 | nn.init.normal_(m.weight.data, 0.0, 0.02) 12 | elif classname.find('BatchNorm') != -1: 13 | nn.init.normal_(m.weight.data, 1.0, 0.02) 14 | nn.init.constant_(m.bias.data, 0) 15 | 16 | 17 | class NLayerDiscriminator(nn.Module): 18 | """Defines a PatchGAN discriminator as in Pix2Pix 19 | --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py 20 | """ 21 | def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False): 22 | """Construct a PatchGAN discriminator 23 | Parameters: 24 | input_nc (int) -- the number of channels in input images 25 | ndf (int) -- the number of filters in the last conv layer 26 | n_layers (int) -- the number of conv layers in the discriminator 27 | norm_layer -- normalization layer 28 | """ 29 | super(NLayerDiscriminator, self).__init__() 30 | if not use_actnorm: 31 | norm_layer = nn.BatchNorm2d 32 | else: 33 | norm_layer = ActNorm 34 | if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters 35 | use_bias = norm_layer.func != nn.BatchNorm2d 36 | else: 37 | use_bias = norm_layer != nn.BatchNorm2d 38 | 39 | kw = 4 40 | padw = 1 41 | sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)] 42 | nf_mult = 1 43 | nf_mult_prev = 1 44 | for n in range(1, n_layers): # gradually increase the number of filters 45 | nf_mult_prev = nf_mult 46 | nf_mult = min(2 ** n, 8) 47 | sequence += [ 48 | nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), 49 | norm_layer(ndf * nf_mult), 50 | nn.LeakyReLU(0.2, True) 51 | ] 52 | 53 | nf_mult_prev = nf_mult 54 | nf_mult = min(2 ** n_layers, 8) 55 | sequence += [ 56 | nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), 57 | norm_layer(ndf * nf_mult), 58 | nn.LeakyReLU(0.2, True) 59 | ] 60 | 61 | sequence += [ 62 | nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map 63 | self.main = nn.Sequential(*sequence) 64 | 65 | def forward(self, input): 66 | """Standard forward.""" 67 | return self.main(input) 68 | -------------------------------------------------------------------------------- /DCDP-LDM/src/taming-transformers/taming/modules/losses/__init__.py: -------------------------------------------------------------------------------- 1 | from taming.modules.losses.vqperceptual import DummyLoss 2 | 3 | -------------------------------------------------------------------------------- /DCDP-LDM/src/taming-transformers/taming/modules/losses/segmentation.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | 4 | 5 | class BCELoss(nn.Module): 6 | def forward(self, prediction, target): 7 | loss = F.binary_cross_entropy_with_logits(prediction,target) 8 | return loss, {} 9 | 10 | 11 | class BCELossWithQuant(nn.Module): 12 | def __init__(self, codebook_weight=1.): 13 | super().__init__() 14 | self.codebook_weight = codebook_weight 15 | 16 | def forward(self, qloss, target, prediction, split): 17 | bce_loss = F.binary_cross_entropy_with_logits(prediction,target) 18 | loss = bce_loss + self.codebook_weight*qloss 19 | return loss, {"{}/total_loss".format(split): loss.clone().detach().mean(), 20 | "{}/bce_loss".format(split): bce_loss.detach().mean(), 21 | "{}/quant_loss".format(split): qloss.detach().mean() 22 | } 23 | -------------------------------------------------------------------------------- /DCDP-LDM/src/taming-transformers/taming/modules/misc/coord.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | class CoordStage(object): 4 | def __init__(self, n_embed, down_factor): 5 | self.n_embed = n_embed 6 | self.down_factor = down_factor 7 | 8 | def eval(self): 9 | return self 10 | 11 | def encode(self, c): 12 | """fake vqmodel interface""" 13 | assert 0.0 <= c.min() and c.max() <= 1.0 14 | b,ch,h,w = c.shape 15 | assert ch == 1 16 | 17 | c = torch.nn.functional.interpolate(c, scale_factor=1/self.down_factor, 18 | mode="area") 19 | c = c.clamp(0.0, 1.0) 20 | c = self.n_embed*c 21 | c_quant = c.round() 22 | c_ind = c_quant.to(dtype=torch.long) 23 | 24 | info = None, None, c_ind 25 | return c_quant, None, info 26 | 27 | def decode(self, c): 28 | c = c/self.n_embed 29 | c = torch.nn.functional.interpolate(c, scale_factor=self.down_factor, 30 | mode="nearest") 31 | return c 32 | -------------------------------------------------------------------------------- /DCDP-LDM/src/taming-transformers/taming/modules/vqvae/__pycache__/quantize.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/src/taming-transformers/taming/modules/vqvae/__pycache__/quantize.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/src/taming-transformers/taming_transformers.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: taming-transformers 3 | Version: 0.0.1 4 | Summary: Taming Transformers for High-Resolution Image Synthesis 5 | -------------------------------------------------------------------------------- /DCDP-LDM/util/__pycache__/fastmri_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/util/__pycache__/fastmri_utils.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/util/__pycache__/img_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/util/__pycache__/img_utils.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/util/__pycache__/resizer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/DCDP-LDM/util/__pycache__/resizer.cpython-38.pyc -------------------------------------------------------------------------------- /DCDP-LDM/util/compute_metric.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from skimage.metrics import peak_signal_noise_ratio 3 | from tqdm import tqdm 4 | 5 | import matplotlib.pyplot as plt 6 | import lpips 7 | import numpy as np 8 | import torch 9 | 10 | 11 | device = 'cuda:0' 12 | loss_fn_vgg = lpips.LPIPS(net='vgg').to(device) 13 | 14 | task = 'SR' 15 | factor = 4 16 | sigma = 0.1 17 | scale = 1.0 18 | 19 | 20 | label_root = Path(f'/media/harry/tomo/FFHQ/256_1000') 21 | 22 | delta_recon_root = Path(f'./results/{task}/ffhq/{factor}/{sigma}/ps/{scale}/recon') 23 | normal_recon_root = Path(f'./results/{task}/ffhq/{factor}/{sigma}/ps+/{scale}/recon') 24 | 25 | psnr_delta_list = [] 26 | psnr_normal_list = [] 27 | 28 | lpips_delta_list = [] 29 | lpips_normal_list = [] 30 | for idx in tqdm(range(150)): 31 | fname = str(idx).zfill(5) 32 | 33 | label = plt.imread(label_root / f'{fname}.png')[:, :, :3] 34 | delta_recon = plt.imread(delta_recon_root / f'{fname}.png')[:, :, :3] 35 | normal_recon = plt.imread(normal_recon_root / f'{fname}.png')[:, :, :3] 36 | 37 | psnr_delta = peak_signal_noise_ratio(label, delta_recon) 38 | psnr_normal = peak_signal_noise_ratio(label, normal_recon) 39 | 40 | psnr_delta_list.append(psnr_delta) 41 | psnr_normal_list.append(psnr_normal) 42 | 43 | delta_recon = torch.from_numpy(delta_recon).permute(2, 0, 1).to(device) 44 | normal_recon = torch.from_numpy(normal_recon).permute(2, 0, 1).to(device) 45 | label = torch.from_numpy(label).permute(2, 0, 1).to(device) 46 | 47 | delta_recon = delta_recon.view(1, 3, 256, 256) * 2. - 1. 48 | normal_recon = normal_recon.view(1, 3, 256, 256) * 2. - 1. 49 | label = label.view(1, 3, 256, 256) * 2. - 1. 50 | 51 | delta_d = loss_fn_vgg(delta_recon, label) 52 | normal_d = loss_fn_vgg(normal_recon, label) 53 | 54 | lpips_delta_list.append(delta_d) 55 | lpips_normal_list.append(normal_d) 56 | 57 | psnr_delta_avg = sum(psnr_delta_list) / len(psnr_delta_list) 58 | lpips_delta_avg = sum(lpips_delta_list) / len(lpips_delta_list) 59 | 60 | psnr_normal_avg = sum(psnr_normal_list) / len(psnr_normal_list) 61 | lpips_normal_avg = sum(lpips_normal_list) / len(lpips_normal_list) 62 | 63 | print(f'Delta PSNR: {psnr_delta_avg}') 64 | print(f'Delta LPIPS: {lpips_delta_avg}') 65 | 66 | print(f'Normal PSNR: {psnr_normal_avg}') 67 | print(f'Normal LPIPS: {lpips_normal_avg}') -------------------------------------------------------------------------------- /DCDP-LDM/util/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | def get_logger(): 4 | logger = logging.getLogger(name='DPS') 5 | logger.setLevel(logging.INFO) 6 | 7 | formatter = logging.Formatter("%(asctime)s [%(name)s] >> %(message)s") 8 | stream_handler = logging.StreamHandler() 9 | stream_handler.setFormatter(formatter) 10 | logger.addHandler(stream_handler) 11 | 12 | return logger -------------------------------------------------------------------------------- /bkse/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2021, VinAI. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, 7 | this list of conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | 3. Neither the name of the copyright holder nor the names of its contributors 14 | may be used to endorse or promote products derived from this software 15 | without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 21 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 | POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /bkse/data/__init__.py: -------------------------------------------------------------------------------- 1 | """create dataset and dataloader""" 2 | import logging 3 | 4 | import torch 5 | import torch.utils.data 6 | 7 | 8 | def create_dataloader(dataset, dataset_opt, opt=None, sampler=None): 9 | phase = dataset_opt["phase"] 10 | if phase == "train": 11 | if opt["dist"]: 12 | world_size = torch.distributed.get_world_size() 13 | num_workers = dataset_opt["n_workers"] 14 | assert dataset_opt["batch_size"] % world_size == 0 15 | batch_size = dataset_opt["batch_size"] // world_size 16 | shuffle = False 17 | else: 18 | num_workers = dataset_opt["n_workers"] * len(opt["gpu_ids"]) 19 | batch_size = dataset_opt["batch_size"] 20 | shuffle = True 21 | return torch.utils.data.DataLoader( 22 | dataset, 23 | batch_size=batch_size, 24 | shuffle=shuffle, 25 | num_workers=num_workers, 26 | sampler=sampler, 27 | drop_last=True, 28 | pin_memory=False, 29 | ) 30 | else: 31 | return torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=1, pin_memory=False) 32 | 33 | 34 | def create_dataset(dataset_opt): 35 | mode = dataset_opt["mode"] 36 | # datasets for image restoration 37 | if mode == "REDS": 38 | from data.REDS_dataset import REDSDataset as D 39 | elif mode == "GOPRO": 40 | from data.GOPRO_dataset import GOPRODataset as D 41 | elif mode == "fewshot": 42 | from data.fewshot_dataset import FewShotDataset as D 43 | elif mode == "levin": 44 | from data.levin_dataset import LevinDataset as D 45 | elif mode == "mix": 46 | from data.mix_dataset import MixDataset as D 47 | else: 48 | raise NotImplementedError(f"Dataset {mode} is not recognized.") 49 | dataset = D(dataset_opt) 50 | 51 | logger = logging.getLogger("base") 52 | logger.info("Dataset [{:s} - {:s}] is created.".format(dataset.__class__.__name__, dataset_opt["name"])) 53 | return dataset 54 | -------------------------------------------------------------------------------- /bkse/data/data_sampler.py: -------------------------------------------------------------------------------- 1 | """ 2 | Modified from torch.utils.data.distributed.DistributedSampler 3 | Support enlarging the dataset for *iteration-oriented* training, 4 | for saving time when restart the dataloader after each epoch 5 | """ 6 | import math 7 | 8 | import torch 9 | import torch.distributed as dist 10 | from torch.utils.data.sampler import Sampler 11 | 12 | 13 | class DistIterSampler(Sampler): 14 | """Sampler that restricts data loading to a subset of the dataset. 15 | 16 | It is especially useful in conjunction with 17 | :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each 18 | process can pass a DistributedSampler instance as a DataLoader sampler, 19 | and load a subset of the original dataset that is exclusive to it. 20 | 21 | .. note:: 22 | Dataset is assumed to be of constant size. 23 | 24 | Arguments: 25 | dataset: Dataset used for sampling. 26 | num_replicas (optional): Number of processes participating in 27 | distributed training. 28 | rank (optional): Rank of the current process within num_replicas. 29 | """ 30 | 31 | def __init__(self, dataset, num_replicas=None, rank=None, ratio=100): 32 | if num_replicas is None: 33 | if not dist.is_available(): 34 | raise RuntimeError( 35 | "Requires distributed \ 36 | package to be available" 37 | ) 38 | num_replicas = dist.get_world_size() 39 | if rank is None: 40 | if not dist.is_available(): 41 | raise RuntimeError( 42 | "Requires distributed \ 43 | package to be available" 44 | ) 45 | rank = dist.get_rank() 46 | self.dataset = dataset 47 | self.num_replicas = num_replicas 48 | self.rank = rank 49 | self.epoch = 0 50 | self.num_samples = int(math.ceil(len(self.dataset) * ratio / self.num_replicas)) 51 | self.total_size = self.num_samples * self.num_replicas 52 | 53 | def __iter__(self): 54 | # deterministically shuffle based on epoch 55 | g = torch.Generator() 56 | g.manual_seed(self.epoch) 57 | indices = torch.randperm(self.total_size, generator=g).tolist() 58 | 59 | dsize = len(self.dataset) 60 | indices = [v % dsize for v in indices] 61 | 62 | # subsample 63 | indices = indices[self.rank : self.total_size : self.num_replicas] 64 | assert len(indices) == self.num_samples 65 | 66 | return iter(indices) 67 | 68 | def __len__(self): 69 | return self.num_samples 70 | 71 | def set_epoch(self, epoch): 72 | self.epoch = epoch 73 | -------------------------------------------------------------------------------- /bkse/experiments/pretrained/kernel.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/experiments/pretrained/kernel.pth -------------------------------------------------------------------------------- /bkse/generate_blur.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import cv2 4 | import numpy as np 5 | import os.path as osp 6 | import torch 7 | import utils.util as util 8 | import yaml 9 | from models.kernel_encoding.kernel_wizard import KernelWizard 10 | 11 | 12 | def main(): 13 | device = torch.device("cuda") 14 | 15 | parser = argparse.ArgumentParser(description="Kernel extractor testing") 16 | 17 | parser.add_argument("--image_path", action="store", help="image path", type=str, required=True) 18 | parser.add_argument("--yml_path", action="store", help="yml path", type=str, required=True) 19 | parser.add_argument("--save_path", action="store", help="save path", type=str, default=".") 20 | parser.add_argument("--num_samples", action="store", help="number of samples", type=int, default=1) 21 | 22 | args = parser.parse_args() 23 | 24 | image_path = args.image_path 25 | yml_path = args.yml_path 26 | num_samples = args.num_samples 27 | 28 | # Initializing mode 29 | with open(yml_path, "r") as f: 30 | opt = yaml.load(f)["KernelWizard"] 31 | model_path = opt["pretrained"] 32 | model = KernelWizard(opt) 33 | model.eval() 34 | model.load_state_dict(torch.load(model_path)) 35 | model = model.to(device) 36 | 37 | HQ = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB) / 255.0 38 | HQ = np.transpose(HQ, (2, 0, 1)) 39 | HQ_tensor = torch.Tensor(HQ).unsqueeze(0).to(device).cuda() 40 | 41 | for i in range(num_samples): 42 | print(f"Sample #{i}/{num_samples}") 43 | with torch.no_grad(): 44 | kernel = torch.randn((1, 512, 2, 2)).cuda() * 1.2 45 | LQ_tensor = model.adaptKernel(HQ_tensor, kernel) 46 | 47 | dst = osp.join(args.save_path, f"blur{i:03d}.png") 48 | LQ_img = util.tensor2img(LQ_tensor) 49 | 50 | cv2.imwrite(dst, LQ_img) 51 | 52 | 53 | main() 54 | -------------------------------------------------------------------------------- /bkse/generic_deblur.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import cv2 4 | import yaml 5 | from models.deblurring.joint_deblur import JointDeblur 6 | 7 | 8 | def main(): 9 | parser = argparse.ArgumentParser(description="Kernel extractor testing") 10 | 11 | parser.add_argument("--image_path", action="store", help="image path", type=str, required=True) 12 | parser.add_argument("--save_path", action="store", help="save path", type=str, default="res.png") 13 | parser.add_argument("--yml_path", action="store", help="yml path", type=str, required=True) 14 | 15 | args = parser.parse_args() 16 | 17 | # Initializing mode 18 | with open(args.yml_path, "rb") as f: 19 | opt = yaml.safe_load(f) 20 | model = JointDeblur(opt) 21 | 22 | blur_img = cv2.cvtColor(cv2.imread(args.image_path), cv2.COLOR_BGR2RGB) 23 | sharp_img = model.deblur(blur_img) 24 | 25 | cv2.imwrite(args.save_path, sharp_img) 26 | 27 | 28 | main() 29 | -------------------------------------------------------------------------------- /bkse/imgs/blur_faces/face01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/imgs/blur_faces/face01.png -------------------------------------------------------------------------------- /bkse/imgs/blur_imgs/blur1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/imgs/blur_imgs/blur1.png -------------------------------------------------------------------------------- /bkse/imgs/blur_imgs/blur2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/imgs/blur_imgs/blur2.png -------------------------------------------------------------------------------- /bkse/imgs/results/augmentation.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/imgs/results/augmentation.jpg -------------------------------------------------------------------------------- /bkse/imgs/results/domain_specific_deblur.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/imgs/results/domain_specific_deblur.jpg -------------------------------------------------------------------------------- /bkse/imgs/results/general_deblurring.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/imgs/results/general_deblurring.jpg -------------------------------------------------------------------------------- /bkse/imgs/results/generate_blur.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/imgs/results/generate_blur.jpg -------------------------------------------------------------------------------- /bkse/imgs/results/kernel_encoding_wGT.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/imgs/results/kernel_encoding_wGT.png -------------------------------------------------------------------------------- /bkse/imgs/sharp_imgs/mushishi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/imgs/sharp_imgs/mushishi.png -------------------------------------------------------------------------------- /bkse/imgs/teaser.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/imgs/teaser.jpg -------------------------------------------------------------------------------- /bkse/models/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | 4 | logger = logging.getLogger("base") 5 | 6 | 7 | def create_model(opt): 8 | model = opt["model"] 9 | if model == "image_base": 10 | from models.kernel_encoding.image_base_model import ImageBaseModel as M 11 | else: 12 | raise NotImplementedError("Model [{:s}] not recognized.".format(model)) 13 | m = M(opt) 14 | logger.info("Model [{:s}] is created.".format(m.__class__.__name__)) 15 | return m 16 | -------------------------------------------------------------------------------- /bkse/models/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/models/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /bkse/models/__pycache__/arch_util.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/models/__pycache__/arch_util.cpython-38.pyc -------------------------------------------------------------------------------- /bkse/models/arch_util.py: -------------------------------------------------------------------------------- 1 | import functools 2 | 3 | import torch.nn as nn 4 | import torch.nn.init as init 5 | 6 | 7 | class Identity(nn.Module): 8 | def forward(self, x): 9 | return x 10 | 11 | 12 | def get_norm_layer(norm_type="instance"): 13 | """Return a normalization layer 14 | Parameters: 15 | norm_type (str) -- the name of the normalization 16 | layer: batch | instance | none 17 | 18 | For BatchNorm, we use learnable affine parameters and 19 | track running statistics (mean/stddev). 20 | 21 | For InstanceNorm, we do not use learnable affine 22 | parameters. We do not track running statistics. 23 | """ 24 | if norm_type == "batch": 25 | norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True) 26 | elif norm_type == "instance": 27 | norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) 28 | elif norm_type == "none": 29 | 30 | def norm_layer(x): 31 | return Identity() 32 | 33 | else: 34 | raise NotImplementedError( 35 | f"normalization layer {norm_type}\ 36 | is not found" 37 | ) 38 | return norm_layer 39 | 40 | 41 | def initialize_weights(net_l, scale=1): 42 | if not isinstance(net_l, list): 43 | net_l = [net_l] 44 | for net in net_l: 45 | for m in net.modules(): 46 | if isinstance(m, nn.Conv2d): 47 | init.kaiming_normal_(m.weight, a=0, mode="fan_in") 48 | m.weight.data *= scale # for residual block 49 | if m.bias is not None: 50 | m.bias.data.zero_() 51 | elif isinstance(m, nn.Linear): 52 | init.kaiming_normal_(m.weight, a=0, mode="fan_in") 53 | m.weight.data *= scale 54 | if m.bias is not None: 55 | m.bias.data.zero_() 56 | elif isinstance(m, nn.BatchNorm2d): 57 | init.constant_(m.weight, 1) 58 | init.constant_(m.bias.data, 0.0) 59 | -------------------------------------------------------------------------------- /bkse/models/backbones/__pycache__/resnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/models/backbones/__pycache__/resnet.cpython-38.pyc -------------------------------------------------------------------------------- /bkse/models/backbones/__pycache__/unet_parts.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/models/backbones/__pycache__/unet_parts.cpython-38.pyc -------------------------------------------------------------------------------- /bkse/models/backbones/skip/concat.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | 5 | 6 | class Concat(nn.Module): 7 | def __init__(self, dim, *args): 8 | super(Concat, self).__init__() 9 | self.dim = dim 10 | 11 | for idx, module in enumerate(args): 12 | self.add_module(str(idx), module) 13 | 14 | def forward(self, input): 15 | inputs = [] 16 | for module in self._modules.values(): 17 | inputs.append(module(input)) 18 | 19 | inputs_shapes2 = [x.shape[2] for x in inputs] 20 | inputs_shapes3 = [x.shape[3] for x in inputs] 21 | 22 | if np.all(np.array(inputs_shapes2) == min(inputs_shapes2)) and np.all( 23 | np.array(inputs_shapes3) == min(inputs_shapes3) 24 | ): 25 | inputs_ = inputs 26 | else: 27 | target_shape2 = min(inputs_shapes2) 28 | target_shape3 = min(inputs_shapes3) 29 | 30 | inputs_ = [] 31 | for inp in inputs: 32 | diff2 = (inp.size(2) - target_shape2) // 2 33 | diff3 = (inp.size(3) - target_shape3) // 2 34 | inputs_.append(inp[:, :, diff2 : diff2 + target_shape2, diff3 : diff3 + target_shape3]) 35 | 36 | return torch.cat(inputs_, dim=self.dim) 37 | 38 | def __len__(self): 39 | return len(self._modules) 40 | -------------------------------------------------------------------------------- /bkse/models/backbones/skip/util.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from .downsampler import Downsampler 4 | 5 | 6 | class Swish(nn.Module): 7 | """ 8 | https://arxiv.org/abs/1710.05941 9 | The hype was so huge that I could not help but try it 10 | """ 11 | 12 | def __init__(self): 13 | super(Swish, self).__init__() 14 | self.s = nn.Sigmoid() 15 | 16 | def forward(self, x): 17 | return x * self.s(x) 18 | 19 | 20 | def get_conv(in_f, out_f, kernel_size, stride=1, bias=True, pad="zero", downsample_mode="stride"): 21 | downsampler = None 22 | if stride != 1 and downsample_mode != "stride": 23 | 24 | if downsample_mode == "avg": 25 | downsampler = nn.AvgPool2d(stride, stride) 26 | elif downsample_mode == "max": 27 | downsampler = nn.MaxPool2d(stride, stride) 28 | elif downsample_mode in ["lanczos2", "lanczos3"]: 29 | downsampler = Downsampler( 30 | n_planes=out_f, factor=stride, kernel_type=downsample_mode, phase=0.5, preserve_size=True 31 | ) 32 | else: 33 | assert False 34 | 35 | stride = 1 36 | 37 | padder = None 38 | to_pad = int((kernel_size - 1) / 2) 39 | if pad == "reflection": 40 | padder = nn.ReflectionPad2d(to_pad) 41 | to_pad = 0 42 | 43 | convolver = nn.Conv2d(in_f, out_f, kernel_size, stride, padding=to_pad, bias=bias) 44 | 45 | layers = filter(lambda x: x is not None, [padder, convolver, downsampler]) 46 | return nn.Sequential(*layers) 47 | 48 | 49 | def get_activation(act_fun="LeakyReLU"): 50 | """ 51 | Either string defining an activation function or module (e.g. nn.ReLU) 52 | """ 53 | if isinstance(act_fun, str): 54 | if act_fun == "LeakyReLU": 55 | return nn.LeakyReLU(0.2, inplace=True) 56 | elif act_fun == "Swish": 57 | return Swish() 58 | elif act_fun == "ELU": 59 | return nn.ELU() 60 | elif act_fun == "none": 61 | return nn.Sequential() 62 | else: 63 | assert False 64 | else: 65 | return act_fun() 66 | -------------------------------------------------------------------------------- /bkse/models/deblurring/image_deblur.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import utils.util as util 4 | from models.dips import ImageDIP, KernelDIP 5 | from models.kernel_encoding.kernel_wizard import KernelWizard 6 | from models.losses.hyper_laplacian_penalty import HyperLaplacianPenalty 7 | from models.losses.perceptual_loss import PerceptualLoss 8 | from models.losses.ssim_loss import SSIM 9 | from torch.optim.lr_scheduler import StepLR 10 | from tqdm import tqdm 11 | 12 | 13 | class ImageDeblur: 14 | def __init__(self, opt): 15 | self.opt = opt 16 | 17 | # losses 18 | self.ssim_loss = SSIM().cuda() 19 | self.mse = nn.MSELoss().cuda() 20 | self.perceptual_loss = PerceptualLoss().cuda() 21 | self.laplace_penalty = HyperLaplacianPenalty(3, 0.66).cuda() 22 | 23 | self.kernel_wizard = KernelWizard(opt["KernelWizard"]).cuda() 24 | self.kernel_wizard.load_state_dict(torch.load(opt["KernelWizard"]["pretrained"])) 25 | 26 | for k, v in self.kernel_wizard.named_parameters(): 27 | v.requires_grad = False 28 | 29 | def reset_optimizers(self): 30 | self.x_optimizer = torch.optim.Adam(self.x_dip.parameters(), lr=self.opt["x_lr"]) 31 | self.k_optimizer = torch.optim.Adam(self.k_dip.parameters(), lr=self.opt["k_lr"]) 32 | 33 | self.x_scheduler = StepLR(self.x_optimizer, step_size=self.opt["num_iters"] // 5, gamma=0.7) 34 | 35 | self.k_scheduler = StepLR(self.k_optimizer, step_size=self.opt["num_iters"] // 5, gamma=0.7) 36 | 37 | def prepare_DIPs(self): 38 | # x is stand for the sharp image, k is stand for the kernel 39 | self.x_dip = ImageDIP(self.opt["ImageDIP"]).cuda() 40 | self.k_dip = KernelDIP(self.opt["KernelDIP"]).cuda() 41 | 42 | # fixed input vectors of DIPs 43 | # zk and zx are the length of the corresponding vectors 44 | self.dip_zk = util.get_noise(64, "noise", (64, 64)).cuda() 45 | self.dip_zx = util.get_noise(8, "noise", self.opt["img_size"]).cuda() 46 | 47 | def warmup(self, warmup_x, warmup_k): 48 | # Input vector of DIPs is sampled from N(z, I) 49 | reg_noise_std = self.opt["reg_noise_std"] 50 | 51 | for step in tqdm(range(self.opt["num_warmup_iters"])): 52 | self.x_optimizer.zero_grad() 53 | dip_zx_rand = self.dip_zx + reg_noise_std * torch.randn_like(self.dip_zx).cuda() 54 | x = self.x_dip(dip_zx_rand) 55 | 56 | loss = self.mse(x, warmup_x) 57 | loss.backward() 58 | self.x_optimizer.step() 59 | 60 | print("Warming up k DIP") 61 | for step in tqdm(range(self.opt["num_warmup_iters"])): 62 | self.k_optimizer.zero_grad() 63 | dip_zk_rand = self.dip_zk + reg_noise_std * torch.randn_like(self.dip_zk).cuda() 64 | k = self.k_dip(dip_zk_rand) 65 | 66 | loss = self.mse(k, warmup_k) 67 | loss.backward() 68 | self.k_optimizer.step() 69 | 70 | def deblur(self, img): 71 | pass 72 | -------------------------------------------------------------------------------- /bkse/models/deblurring/joint_deblur.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import utils.util as util 3 | from models.deblurring.image_deblur import ImageDeblur 4 | from tqdm import tqdm 5 | 6 | 7 | class JointDeblur(ImageDeblur): 8 | def __init__(self, opt): 9 | super(JointDeblur, self).__init__(opt) 10 | 11 | def deblur(self, y): 12 | """Deblur image 13 | Args: 14 | y: Blur image 15 | """ 16 | y = util.img2tensor(y).unsqueeze(0).cuda() 17 | 18 | self.prepare_DIPs() 19 | self.reset_optimizers() 20 | 21 | warmup_k = torch.load(self.opt["warmup_k_path"]).cuda() 22 | self.warmup(y, warmup_k) 23 | 24 | # Input vector of DIPs is sampled from N(z, I) 25 | 26 | print("Deblurring") 27 | reg_noise_std = self.opt["reg_noise_std"] 28 | for step in tqdm(range(self.opt["num_iters"])): 29 | dip_zx_rand = self.dip_zx + reg_noise_std * torch.randn_like(self.dip_zx).cuda() 30 | dip_zk_rand = self.dip_zk + reg_noise_std * torch.randn_like(self.dip_zk).cuda() 31 | 32 | self.x_optimizer.zero_grad() 33 | self.k_optimizer.zero_grad() 34 | 35 | self.x_scheduler.step() 36 | self.k_scheduler.step() 37 | 38 | x = self.x_dip(dip_zx_rand) 39 | k = self.k_dip(dip_zk_rand) 40 | 41 | fake_y = self.kernel_wizard.adaptKernel(x, k) 42 | 43 | if step < self.opt["num_iters"] // 2: 44 | total_loss = 6e-1 * self.perceptual_loss(fake_y, y) 45 | total_loss += 1 - self.ssim_loss(fake_y, y) 46 | total_loss += 5e-5 * torch.norm(k) 47 | total_loss += 2e-2 * self.laplace_penalty(x) 48 | else: 49 | total_loss = self.perceptual_loss(fake_y, y) 50 | total_loss += 5e-2 * self.laplace_penalty(x) 51 | total_loss += 5e-4 * torch.norm(k) 52 | 53 | total_loss.backward() 54 | 55 | self.x_optimizer.step() 56 | self.k_optimizer.step() 57 | 58 | # debugging 59 | # if step % 100 == 0: 60 | # print(torch.norm(k)) 61 | # print(f"{self.k_optimizer.param_groups[0]['lr']:.3e}") 62 | 63 | return util.tensor2img(x.detach()) 64 | -------------------------------------------------------------------------------- /bkse/models/dips.py: -------------------------------------------------------------------------------- 1 | import models.arch_util as arch_util 2 | import torch.nn as nn 3 | from models.backbones.resnet import ResnetBlock 4 | from models.backbones.skip.skip import skip 5 | 6 | 7 | class KernelDIP(nn.Module): 8 | """ 9 | DIP (Deep Image Prior) for blur kernel 10 | """ 11 | 12 | def __init__(self, opt): 13 | super(KernelDIP, self).__init__() 14 | 15 | norm_layer = arch_util.get_norm_layer("none") 16 | n_blocks = opt["n_blocks"] 17 | nf = opt["nf"] 18 | padding_type = opt["padding_type"] 19 | use_dropout = opt["use_dropout"] 20 | kernel_dim = opt["kernel_dim"] 21 | 22 | input_nc = 64 23 | model = [ 24 | nn.ReflectionPad2d(3), 25 | nn.Conv2d(input_nc, nf, kernel_size=7, padding=0, bias=True), 26 | norm_layer(nf), 27 | nn.ReLU(True), 28 | ] 29 | 30 | n_downsampling = 5 31 | for i in range(n_downsampling): # add downsampling layers 32 | mult = 2 ** i 33 | input_nc = min(nf * mult, kernel_dim) 34 | output_nc = min(nf * mult * 2, kernel_dim) 35 | model += [ 36 | nn.Conv2d(input_nc, output_nc, kernel_size=3, stride=2, padding=1, bias=True), 37 | norm_layer(nf * mult * 2), 38 | nn.ReLU(True), 39 | ] 40 | 41 | for i in range(n_blocks): # add ResNet blocks 42 | model += [ 43 | ResnetBlock( 44 | kernel_dim, 45 | padding_type=padding_type, 46 | norm_layer=norm_layer, 47 | use_dropout=use_dropout, 48 | use_bias=True, 49 | ) 50 | ] 51 | 52 | self.model = nn.Sequential(*model) 53 | 54 | def forward(self, noise): 55 | return self.model(noise) 56 | 57 | 58 | class ImageDIP(nn.Module): 59 | """ 60 | DIP (Deep Image Prior) for sharp image 61 | """ 62 | 63 | def __init__(self, opt): 64 | super(ImageDIP, self).__init__() 65 | 66 | input_nc = opt["input_nc"] 67 | output_nc = opt["output_nc"] 68 | 69 | self.model = skip( 70 | input_nc, 71 | output_nc, 72 | num_channels_down=[128, 128, 128, 128, 128], 73 | num_channels_up=[128, 128, 128, 128, 128], 74 | num_channels_skip=[16, 16, 16, 16, 16], 75 | upsample_mode="bilinear", 76 | need_sigmoid=True, 77 | need_bias=True, 78 | pad=opt["padding_type"], 79 | act_fun="LeakyReLU", 80 | ) 81 | 82 | def forward(self, img): 83 | return self.model(img) 84 | -------------------------------------------------------------------------------- /bkse/models/dsd/op/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/models/dsd/op/__init__.py -------------------------------------------------------------------------------- /bkse/models/dsd/op/fused_bias_act.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | 4 | torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, 5 | int act, int grad, float alpha, float scale); 6 | 7 | #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") 8 | #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") 9 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 10 | 11 | torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, 12 | int act, int grad, float alpha, float scale) { 13 | CHECK_CUDA(input); 14 | CHECK_CUDA(bias); 15 | 16 | return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale); 17 | } 18 | 19 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 20 | m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)"); 21 | } -------------------------------------------------------------------------------- /bkse/models/dsd/op/upfirdn2d.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | 4 | torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel, 5 | int up_x, int up_y, int down_x, int down_y, 6 | int pad_x0, int pad_x1, int pad_y0, int pad_y1); 7 | 8 | #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") 9 | #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") 10 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 11 | 12 | torch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel, 13 | int up_x, int up_y, int down_x, int down_y, 14 | int pad_x0, int pad_x1, int pad_y0, int pad_y1) { 15 | CHECK_CUDA(input); 16 | CHECK_CUDA(kernel); 17 | 18 | return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1); 19 | } 20 | 21 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 22 | m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)"); 23 | } -------------------------------------------------------------------------------- /bkse/models/dsd/spherical_optimizer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.optim import Optimizer 3 | 4 | 5 | # Spherical Optimizer Class 6 | # Uses the first two dimensions as batch information 7 | # Optimizes over the surface of a sphere using the initial radius throughout 8 | # 9 | # Example Usage: 10 | # opt = SphericalOptimizer(torch.optim.SGD, [x], lr=0.01) 11 | 12 | 13 | class SphericalOptimizer(Optimizer): 14 | def __init__(self, optimizer, params, **kwargs): 15 | self.opt = optimizer(params, **kwargs) 16 | self.params = params 17 | with torch.no_grad(): 18 | self.radii = { 19 | param: (param.pow(2).sum(tuple(range(2, param.ndim)), keepdim=True) + 1e-9).sqrt() for param in params 20 | } 21 | 22 | @torch.no_grad() 23 | def step(self, closure=None): 24 | loss = self.opt.step(closure) 25 | for param in self.params: 26 | param.data.div_((param.pow(2).sum(tuple(range(2, param.ndim)), keepdim=True) + 1e-9).sqrt()) 27 | param.mul_(self.radii[param]) 28 | 29 | return loss 30 | -------------------------------------------------------------------------------- /bkse/models/kernel_encoding/__pycache__/kernel_wizard.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/models/kernel_encoding/__pycache__/kernel_wizard.cpython-38.pyc -------------------------------------------------------------------------------- /bkse/models/losses/charbonnier_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class CharbonnierLoss(nn.Module): 6 | """Charbonnier Loss (L1)""" 7 | 8 | def __init__(self, eps=1e-6): 9 | super(CharbonnierLoss, self).__init__() 10 | self.eps = eps 11 | 12 | def forward(self, x, y): 13 | diff = x - y 14 | loss = torch.sum(torch.sqrt(diff * diff + self.eps)) 15 | return loss 16 | -------------------------------------------------------------------------------- /bkse/models/losses/gan_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | # Define GAN loss: [vanilla | lsgan | wgan-gp] 6 | class GANLoss(nn.Module): 7 | def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0): 8 | super(GANLoss, self).__init__() 9 | self.gan_type = gan_type.lower() 10 | self.real_label_val = real_label_val 11 | self.fake_label_val = fake_label_val 12 | 13 | if self.gan_type == "gan" or self.gan_type == "ragan": 14 | self.loss = nn.BCEWithLogitsLoss() 15 | elif self.gan_type == "lsgan": 16 | self.loss = nn.MSELoss() 17 | elif self.gan_type == "wgan-gp": 18 | 19 | def wgan_loss(input, target): 20 | # target is boolean 21 | return -1 * input.mean() if target else input.mean() 22 | 23 | self.loss = wgan_loss 24 | else: 25 | raise NotImplementedError("GAN type [{:s}] is not found".format(self.gan_type)) 26 | 27 | def get_target_label(self, input, target_is_real): 28 | if self.gan_type == "wgan-gp": 29 | return target_is_real 30 | if target_is_real: 31 | return torch.empty_like(input).fill_(self.real_label_val) 32 | else: 33 | return torch.empty_like(input).fill_(self.fake_label_val) 34 | 35 | def forward(self, input, target_is_real): 36 | target_label = self.get_target_label(input, target_is_real) 37 | loss = self.loss(input, target_label) 38 | return loss 39 | -------------------------------------------------------------------------------- /bkse/models/losses/hyper_laplacian_penalty.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | class HyperLaplacianPenalty(nn.Module): 7 | def __init__(self, num_channels, alpha, eps=1e-6): 8 | super(HyperLaplacianPenalty, self).__init__() 9 | 10 | self.alpha = alpha 11 | self.eps = eps 12 | 13 | self.Kx = torch.Tensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]]).cuda() 14 | self.Kx = self.Kx.expand(1, num_channels, 3, 3) 15 | self.Kx.requires_grad = False 16 | self.Ky = torch.Tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]).cuda() 17 | self.Ky = self.Ky.expand(1, num_channels, 3, 3) 18 | self.Ky.requires_grad = False 19 | 20 | def forward(self, x): 21 | gradX = F.conv2d(x, self.Kx, stride=1, padding=1) 22 | gradY = F.conv2d(x, self.Ky, stride=1, padding=1) 23 | grad = torch.sqrt(gradX ** 2 + gradY ** 2 + self.eps) 24 | 25 | loss = (grad ** self.alpha).mean() 26 | 27 | return loss 28 | -------------------------------------------------------------------------------- /bkse/models/losses/ssim_loss.py: -------------------------------------------------------------------------------- 1 | from math import exp 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | from torch.autograd import Variable 6 | 7 | 8 | class SSIM(torch.nn.Module): 9 | @staticmethod 10 | def gaussian(window_size, sigma): 11 | gauss = torch.Tensor([exp(-((x - window_size // 2) ** 2) / float(2 * sigma ** 2)) for x in range(window_size)]) 12 | return gauss / gauss.sum() 13 | 14 | @staticmethod 15 | def create_window(window_size, channel): 16 | _1D_window = SSIM.gaussian(window_size, 1.5).unsqueeze(1) 17 | _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) 18 | window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous()) 19 | return window 20 | 21 | @staticmethod 22 | def _ssim(img1, img2, window, window_size, channel, size_average=True): 23 | mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel) 24 | mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel) 25 | 26 | mu1_sq = mu1.pow(2) 27 | mu2_sq = mu2.pow(2) 28 | mu1_mu2 = mu1 * mu2 29 | 30 | sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq 31 | sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq 32 | sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2 33 | 34 | C1 = 0.01 ** 2 35 | C2 = 0.03 ** 2 36 | 37 | ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)) 38 | 39 | if size_average: 40 | return ssim_map.mean() 41 | else: 42 | return ssim_map.mean(1).mean(1).mean(1) 43 | 44 | def __init__(self, window_size=11, size_average=True): 45 | super(SSIM, self).__init__() 46 | self.window_size = window_size 47 | self.size_average = size_average 48 | self.channel = 1 49 | self.window = self.create_window(window_size, self.channel) 50 | 51 | def forward(self, img1, img2): 52 | (_, channel, _, _) = img1.size() 53 | 54 | if channel == self.channel and self.window.data.type() == img1.data.type(): 55 | window = self.window 56 | else: 57 | window = self.create_window(self.window_size, channel) 58 | 59 | if img1.is_cuda: 60 | window = window.cuda(img1.get_device()) 61 | window = window.type_as(img1) 62 | 63 | self.window = window 64 | self.channel = channel 65 | 66 | return self._ssim(img1, img2, window, self.window_size, channel, self.size_average) 67 | -------------------------------------------------------------------------------- /bkse/options/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/options/__init__.py -------------------------------------------------------------------------------- /bkse/options/data_augmentation/default.yml: -------------------------------------------------------------------------------- 1 | #### general settings 2 | gpu_ids: [0] 3 | 4 | #### network structures 5 | KernelWizard: 6 | pretrained: experiments/pretrained/GOPRO_woVAE.pth 7 | input_nc: 3 8 | nf: 64 9 | front_RBs: 10 10 | back_RBs: 20 11 | N_frames: 1 12 | kernel_dim: 512 13 | use_vae: false 14 | KernelExtractor: 15 | norm: none 16 | use_sharp: true 17 | n_blocks: 4 18 | padding_type: reflect 19 | use_dropout: false 20 | Adapter: 21 | norm: none 22 | use_dropout: false 23 | -------------------------------------------------------------------------------- /bkse/options/domain_specific_deblur/stylegan.yml: -------------------------------------------------------------------------------- 1 | stylegan_ver: 1 2 | img_size: &HQ_SIZE [256, 256] 3 | output_size: 1024 4 | verbose: true 5 | num_epochs: 25 6 | num_warmup_iters: 150 7 | num_x_iters: 300 8 | num_k_iters: 200 9 | x_lr: !!float 0.2 10 | k_lr: !!float 1e-4 11 | warmup_k_path: experiments/pretrained/kernel.pth 12 | reg_noise_std: !!float 0.001 13 | duplicates: 1 14 | batch_size: 1 15 | loss_str: '100*L2+0.1*GEOCROSS' 16 | eps: !!float 1e-15 17 | noise_type: trainable 18 | num_trainable_noise_layers: 5 19 | bad_noise_layers: '17' 20 | optimizer_name: adam 21 | lr_schedule: linear1cycledrop 22 | save_intermediate: true 23 | tile_latent: ~ 24 | seed: ~ 25 | 26 | KernelDIP: 27 | nf: 64 28 | n_blocks: 6 29 | padding_type: reflect 30 | use_dropout: false 31 | kernel_dim: 512 32 | norm: none 33 | 34 | KernelWizard: 35 | pretrained: experiments/pretrained/GOPRO_woVAE.pth 36 | input_nc: 3 37 | nf: 64 38 | front_RBs: 10 39 | back_RBs: 20 40 | N_frames: 1 41 | kernel_dim: 512 42 | img_size: *HQ_SIZE 43 | use_vae: false 44 | KernelExtractor: 45 | norm: none 46 | use_sharp: true 47 | n_blocks: 4 48 | padding_type: reflect 49 | use_dropout: false 50 | Adapter: 51 | norm: none 52 | use_dropout: false 53 | -------------------------------------------------------------------------------- /bkse/options/domain_specific_deblur/stylegan2.yml: -------------------------------------------------------------------------------- 1 | stylegan_ver: 2 2 | img_size: &HQ_SIZE [256, 256] 3 | output_size: 256 4 | verbose: true 5 | num_epochs: 25 6 | num_warmup_iters: 150 7 | num_x_iters: 300 8 | num_k_iters: 200 9 | x_lr: !!float 0.2 10 | k_lr: !!float 5e-4 11 | warmup_k_path: experiments/pretrained/kernel.pth 12 | reg_noise_std: !!float 0.001 13 | duplicates: 1 14 | batch_size: 1 15 | loss_str: '100*L2+0.1*GEOCROSS' 16 | eps: !!float 1e-15 17 | noise_type: trainable 18 | num_trainable_noise_layers: 5 19 | bad_noise_layers: '17' 20 | optimizer_name: adam 21 | lr_schedule: linear1cycledrop 22 | save_intermediate: true 23 | tile_latent: ~ 24 | seed: ~ 25 | 26 | ImageDIP: 27 | input_nc: 8 28 | output_nc: 3 29 | nf: 64 30 | norm: none 31 | padding_type: reflect 32 | 33 | KernelDIP: 34 | nf: 64 35 | n_blocks: 6 36 | padding_type: reflect 37 | use_dropout: false 38 | kernel_dim: 512 39 | norm: none 40 | 41 | KernelWizard: 42 | pretrained: experiments/pretrained/GOPRO_woVAE.pth 43 | input_nc: 3 44 | nf: 64 45 | front_RBs: 10 46 | back_RBs: 20 47 | N_frames: 1 48 | kernel_dim: 512 49 | img_size: *HQ_SIZE 50 | use_vae: false 51 | KernelExtractor: 52 | norm: none 53 | use_sharp: true 54 | n_blocks: 4 55 | padding_type: reflect 56 | use_dropout: false 57 | Adapter: 58 | norm: none 59 | use_dropout: false 60 | -------------------------------------------------------------------------------- /bkse/options/generate_blur/default.yml: -------------------------------------------------------------------------------- 1 | #### general settings 2 | gpu_ids: [0] 3 | 4 | #### network structures 5 | KernelWizard: 6 | pretrained: bkse/experiments/pretrained/GOPRO_wVAE.pth 7 | input_nc: 3 8 | nf: 64 9 | front_RBs: 10 10 | back_RBs: 20 11 | N_frames: 1 12 | kernel_dim: 512 13 | use_vae: true 14 | KernelExtractor: 15 | norm: none 16 | use_sharp: true 17 | n_blocks: 4 18 | padding_type: reflect 19 | use_dropout: false 20 | Adapter: 21 | norm: none 22 | use_dropout: false 23 | -------------------------------------------------------------------------------- /bkse/options/generic_deblur/default.yml: -------------------------------------------------------------------------------- 1 | num_iters: 5000 2 | num_warmup_iters: 300 3 | x_lr: !!float 5e-4 4 | k_lr: !!float 5e-4 5 | img_size: &HQ_SIZE [256, 256] 6 | warmup_k_path: experiments/pretrained/kernel.pth 7 | reg_noise_std: !!float 0.001 8 | 9 | ImageDIP: 10 | input_nc: 8 11 | output_nc: 3 12 | nf: 64 13 | norm: none 14 | padding_type: reflect 15 | 16 | KernelDIP: 17 | nf: 64 18 | n_blocks: 6 19 | padding_type: reflect 20 | use_dropout: false 21 | kernel_dim: 512 22 | norm: none 23 | 24 | KernelWizard: 25 | pretrained: experiments/pretrained/GOPRO_woVAE.pth 26 | input_nc: 3 27 | nf: 64 28 | front_RBs: 10 29 | back_RBs: 20 30 | N_frames: 1 31 | kernel_dim: 512 32 | img_size: *HQ_SIZE 33 | use_vae: false 34 | KernelExtractor: 35 | norm: none 36 | use_sharp: true 37 | n_blocks: 4 38 | padding_type: reflect 39 | use_dropout: false 40 | Adapter: 41 | norm: none 42 | use_dropout: false 43 | -------------------------------------------------------------------------------- /bkse/options/kernel_encoding/GOPRO/wVAE.yml: -------------------------------------------------------------------------------- 1 | #### general settings 2 | name: GOPRO_VAE 3 | use_tb_logger: true 4 | model: image_base 5 | distortion: deblur 6 | scale: 1 7 | gpu_ids: [0] 8 | 9 | #### datasets 10 | datasets: 11 | train: 12 | name: GOPRO 13 | mode: GOPRO 14 | interval_list: [1] 15 | dataroot_HQ: datasets/GOPRO/train_sharp.lmdb 16 | dataroot_LQ: datasets/GOPRO/train_blur_linear.lmdb 17 | cache_keys: ~ 18 | 19 | use_shuffle: true 20 | n_workers: 4 # per GPU 21 | batch_size: 8 22 | HQ_size: &HQ_SIZE 256 23 | LQ_size: 256 24 | use_flip: true 25 | use_rot: true 26 | color: RGB 27 | 28 | #### network structures 29 | KernelWizard: 30 | input_nc: 3 31 | nf: 64 32 | front_RBs: 10 33 | back_RBs: 20 34 | N_frames: 1 35 | kernel_dim: 512 36 | img_size: *HQ_SIZE 37 | use_vae: true 38 | KernelExtractor: 39 | norm: none 40 | use_sharp: true 41 | n_blocks: 4 42 | padding_type: reflect 43 | use_dropout: false 44 | Adapter: 45 | norm: none 46 | use_dropout: false 47 | 48 | #### path 49 | path: 50 | pretrain_model_G: experiments/pretrained/GOPRO_wsharp_woVAE.pth 51 | strict_load: false 52 | resume_state: ~ 53 | 54 | #### training settings: learning rate scheme, loss 55 | train: 56 | lr_G: !!float 1e-4 57 | lr_scheme: CosineAnnealingLR_Restart 58 | beta1: 0.9 59 | beta2: 0.99 60 | niter: 600000 61 | warmup_iter: -1 # -1: no warm up 62 | T_period: [50000, 100000, 150000, 150000, 150000] 63 | restarts: [50000, 150000, 300000, 450000] 64 | restart_weights: [1, 1, 1, 1] 65 | eta_min: !!float 1e-8 66 | 67 | pixel_criterion: cb 68 | pixel_weight: !!float 1.0 69 | kl_weight: !!float 10.0 70 | val_freq: !!float 5e3 71 | 72 | manual_seed: 0 73 | 74 | #### logger 75 | logger: 76 | print_freq: 10 77 | save_checkpoint_freq: !!float 5e3 78 | -------------------------------------------------------------------------------- /bkse/options/kernel_encoding/GOPRO/woVAE.yml: -------------------------------------------------------------------------------- 1 | #### general settings 2 | name: GOPRO_woVAE 3 | use_tb_logger: true 4 | model: image_base 5 | distortion: deblur 6 | scale: 1 7 | gpu_ids: [0] 8 | 9 | #### datasets 10 | datasets: 11 | train: 12 | name: GOPRO 13 | mode: GOPRO 14 | interval_list: [1] 15 | dataroot_HQ: datasets/GOPRO/train_sharp.lmdb 16 | dataroot_LQ: datasets/GOPRO/train_blur_linear.lmdb 17 | cache_keys: ~ 18 | 19 | use_shuffle: true 20 | n_workers: 4 # per GPU 21 | batch_size: 16 22 | HQ_size: &HQ_SIZE 256 23 | LQ_size: 256 24 | use_flip: true 25 | use_rot: true 26 | color: RGB 27 | 28 | #### network structures 29 | KernelWizard: 30 | input_nc: 3 31 | nf: 64 32 | front_RBs: 10 33 | back_RBs: 20 34 | N_frames: 1 35 | kernel_dim: 512 36 | img_size: *HQ_SIZE 37 | use_vae: false 38 | KernelExtractor: 39 | norm: none 40 | use_sharp: true 41 | n_blocks: 4 42 | padding_type: reflect 43 | use_dropout: false 44 | Adapter: 45 | norm: none 46 | use_dropout: false 47 | 48 | #### path 49 | path: 50 | pretrain_model_G: ~ 51 | strict_load: false 52 | resume_state: ~ 53 | 54 | #### training settings: learning rate scheme, loss 55 | train: 56 | lr_G: !!float 1e-4 57 | lr_scheme: CosineAnnealingLR_Restart 58 | beta1: 0.9 59 | beta2: 0.99 60 | niter: 600000 61 | warmup_iter: -1 # -1: no warm up 62 | T_period: [50000, 100000, 150000, 150000, 150000] 63 | restarts: [50000, 150000, 300000, 450000] 64 | restart_weights: [1, 1, 1, 1] 65 | eta_min: !!float 1e-8 66 | 67 | pixel_criterion: cb 68 | pixel_weight: 1.0 69 | kl_weight: 0.0 70 | val_freq: !!float 5e3 71 | 72 | manual_seed: 0 73 | 74 | #### logger 75 | logger: 76 | print_freq: 10 77 | save_checkpoint_freq: !!float 5e3 78 | -------------------------------------------------------------------------------- /bkse/options/kernel_encoding/REDS/woVAE.yml: -------------------------------------------------------------------------------- 1 | #### general settings 2 | name: REDS_woVAE 3 | use_tb_logger: true 4 | model: image_base 5 | distortion: deblur 6 | scale: 1 7 | gpu_ids: [3] 8 | 9 | #### datasets 10 | datasets: 11 | train: 12 | name: REDS 13 | mode: REDS 14 | interval_list: [1] 15 | dataroot_HQ: datasets/REDS/train_sharp_wval.lmdb 16 | dataroot_LQ: datasets/REDS/train_blur_wval.lmdb 17 | cache_keys: ~ 18 | 19 | use_shuffle: true 20 | n_workers: 4 # per GPU 21 | batch_size: 13 22 | HQ_size: &HQ_SIZE 256 23 | LQ_size: 256 24 | use_flip: true 25 | use_rot: true 26 | color: RGB 27 | 28 | #### network structures 29 | KernelWizard: 30 | input_nc: 3 31 | nf: 64 32 | front_RBs: 10 33 | back_RBs: 20 34 | N_frames: 1 35 | kernel_dim: 512 36 | img_size: *HQ_SIZE 37 | use_vae: false 38 | KernelExtractor: 39 | norm: none 40 | use_sharp: true 41 | n_blocks: 4 42 | padding_type: reflect 43 | use_dropout: false 44 | Adapter: 45 | norm: none 46 | use_dropout: false 47 | 48 | #### path 49 | path: 50 | pretrain_model_G: ~ 51 | strict_load: false 52 | resume_state: ~ 53 | 54 | #### training settings: learning rate scheme, loss 55 | train: 56 | lr_G: !!float 1e-4 57 | lr_scheme: CosineAnnealingLR_Restart 58 | beta1: 0.9 59 | beta2: 0.99 60 | niter: 600000 61 | warmup_iter: -1 # -1: no warm up 62 | T_period: [50000, 100000, 150000, 150000, 150000] 63 | restarts: [50000, 150000, 300000, 450000] 64 | restart_weights: [1, 1, 1, 1] 65 | eta_min: !!float 1e-6 66 | 67 | pixel_criterion: cb 68 | pixel_weight: 1.0 69 | kl_weight: 0.0 70 | val_freq: !!float 5e3 71 | 72 | manual_seed: 0 73 | 74 | #### logger 75 | logger: 76 | print_freq: 10 77 | save_checkpoint_freq: !!float 5e3 78 | -------------------------------------------------------------------------------- /bkse/options/kernel_encoding/mix/woVAE.yml: -------------------------------------------------------------------------------- 1 | #### general settings 2 | name: mix_wsharp 3 | use_tb_logger: true 4 | model: image_base 5 | distortion: deblur 6 | scale: 1 7 | gpu_ids: [0] 8 | 9 | #### datasets 10 | datasets: 11 | train: 12 | name: mix 13 | mode: mix 14 | interval_list: [1] 15 | dataroots_HQ: ['datasets/REDS/train_sharp_wval.lmdb', 'datasets/GOPRO/train_sharp.lmdb'] 16 | dataroots_LQ: ['datasets/REDS/train_blur_wval.lmdb', 'datasets/GOPRO/train_blur_linear.lmdb'] 17 | dataset_weights: [1, 10] 18 | cache_keys: ~ 19 | 20 | N_frames: 1 21 | use_shuffle: true 22 | n_workers: 3 # per GPU 23 | batch_size: 16 24 | HQ_size: 256 25 | LQ_size: 256 26 | use_flip: true 27 | use_rot: true 28 | color: RGB 29 | 30 | #### network structures 31 | KernelWizard: 32 | input_nc: 3 33 | nf: 64 34 | front_RBs: 10 35 | back_RBs: 20 36 | N_frames: 1 37 | kernel_dim: 512 38 | use_vae: false 39 | KernelExtractor: 40 | norm: none 41 | use_sharp: true 42 | n_blocks: 4 43 | padding_type: reflect 44 | use_dropout: false 45 | Adapter: 46 | norm: none 47 | use_dropout: false 48 | 49 | #### path 50 | path: 51 | pretrain_model_G: ~ 52 | strict_load: false 53 | resume_state: ~ 54 | 55 | #### training settings: learning rate scheme, loss 56 | train: 57 | lr_G: !!float 1e-4 58 | lr_scheme: CosineAnnealingLR_Restart 59 | beta1: 0.9 60 | beta2: 0.99 61 | niter: 600000 62 | warmup_iter: -1 # -1: no warm up 63 | T_period: [50000, 100000, 150000, 150000, 150000] 64 | restarts: [50000, 150000, 300000, 450000] 65 | restart_weights: [1, 1, 1, 1] 66 | eta_min: !!float 1e-8 67 | 68 | pixel_criterion: cb 69 | pixel_weight: 1.0 70 | kernel_weight: 0.1 71 | gradient_loss_weight: 0.3 72 | val_freq: !!float 5e3 73 | 74 | manual_seed: 0 75 | 76 | #### logger 77 | logger: 78 | print_freq: 10 79 | save_checkpoint_freq: !!float 5000 80 | -------------------------------------------------------------------------------- /bkse/requirements.txt: -------------------------------------------------------------------------------- 1 | torch >= 1.4.0 2 | torchvision >= 0.5.0 3 | pyyaml 4 | opencv-python 5 | numpy 6 | lmdb 7 | tqdm 8 | tensorboard >= 1.15.0 9 | ninja 10 | -------------------------------------------------------------------------------- /bkse/scripts/download_dataset.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import os.path as osp 4 | 5 | import requests 6 | 7 | 8 | def download_file_from_google_drive(file_id, destination): 9 | os.makedirs(osp.dirname(destination), exist_ok=True) 10 | URL = "https://docs.google.com/uc?export=download" 11 | 12 | session = requests.Session() 13 | 14 | response = session.get(URL, params={"id": file_id}, stream=True) 15 | token = get_confirm_token(response) 16 | 17 | if token: 18 | params = {"id": file_id, "confirm": token} 19 | response = session.get(URL, params=params, stream=True) 20 | 21 | save_response_content(response, destination) 22 | 23 | 24 | def get_confirm_token(response): 25 | for key, value in response.cookies.items(): 26 | if key.startswith("download_warning"): 27 | return value 28 | 29 | return None 30 | 31 | 32 | def save_response_content(response, destination): 33 | CHUNK_SIZE = 32768 34 | 35 | with open(destination, "wb") as f: 36 | for chunk in response.iter_content(CHUNK_SIZE): 37 | if chunk: # filter out keep-alive new chunks 38 | f.write(chunk) 39 | 40 | 41 | if __name__ == "__main__": 42 | dataset_ids = { 43 | "GOPRO_Large": "1H0PIXvJH4c40pk7ou6nAwoxuR4Qh_Sa2", 44 | "train_sharp": "1YLksKtMhd2mWyVSkvhDaDLWSc1qYNCz-", 45 | "train_blur": "1Be2cgzuuXibcqAuJekDgvHq4MLYkCgR8", 46 | "val_sharp": "1MGeObVQ1-Z29f-myDP7-8c3u0_xECKXq", 47 | "val_blur": "1N8z2yD0GDWmh6U4d4EADERtcUgDzGrHx", 48 | "test_blur": "1dr0--ZBKqr4P1M8lek6JKD1Vd6bhhrZT", 49 | } 50 | 51 | parser = argparse.ArgumentParser( 52 | description="Download REDS dataset from google drive to current folder", allow_abbrev=False 53 | ) 54 | 55 | parser.add_argument("--REDS_train_sharp", action="store_true", help="download REDS train_sharp.zip") 56 | parser.add_argument("--REDS_train_blur", action="store_true", help="download REDS train_blur.zip") 57 | parser.add_argument("--REDS_val_sharp", action="store_true", help="download REDS val_sharp.zip") 58 | parser.add_argument("--REDS_val_blur", action="store_true", help="download REDS val_blur.zip") 59 | parser.add_argument("--GOPRO", action="store_true", help="download GOPRO_Large.zip") 60 | 61 | args = parser.parse_args() 62 | 63 | if args.REDS_train_sharp: 64 | download_file_from_google_drive(dataset_ids["train_sharp"], "REDS/train_sharp.zip") 65 | if args.REDS_train_blur: 66 | download_file_from_google_drive(dataset_ids["train_blur"], "REDS/train_blur.zip") 67 | if args.REDS_val_sharp: 68 | download_file_from_google_drive(dataset_ids["val_sharp"], "REDS/val_sharp.zip") 69 | if args.REDS_val_blur: 70 | download_file_from_google_drive(dataset_ids["val_blur"], "REDS/val_blur.zip") 71 | if args.GOPRO: 72 | download_file_from_google_drive(dataset_ids["GOPRO_Large"], "GOPRO/GOPRO.zip") 73 | -------------------------------------------------------------------------------- /bkse/train_script.sh: -------------------------------------------------------------------------------- 1 | python3.7 train.py -opt options/REDS/wsharp_woVAE.yml 2 | -------------------------------------------------------------------------------- /bkse/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/bkse/utils/__init__.py -------------------------------------------------------------------------------- /data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/data/__init__.py -------------------------------------------------------------------------------- /data/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/data/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /data/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/data/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /data/__pycache__/dataloader.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/data/__pycache__/dataloader.cpython-38.pyc -------------------------------------------------------------------------------- /data/__pycache__/dataloader.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/data/__pycache__/dataloader.cpython-39.pyc -------------------------------------------------------------------------------- /data/ffhq/00000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/data/ffhq/00000.png -------------------------------------------------------------------------------- /data/ffhq/00001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/data/ffhq/00001.png -------------------------------------------------------------------------------- /data/ffhq/00002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/data/ffhq/00002.png -------------------------------------------------------------------------------- /data/ffhq/00003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/data/ffhq/00003.png -------------------------------------------------------------------------------- /data/ffhq/00004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/data/ffhq/00004.png -------------------------------------------------------------------------------- /docs/DPUR_algorithm.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/docs/DPUR_algorithm.jpg -------------------------------------------------------------------------------- /docs/IMSI_Poster.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/docs/IMSI_Poster.jpg -------------------------------------------------------------------------------- /docs/Results.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/docs/Results.pdf -------------------------------------------------------------------------------- /docs/better_consistency.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/docs/better_consistency.jpg -------------------------------------------------------------------------------- /docs/data_fidelity_reconstruction.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/docs/data_fidelity_reconstruction.jpg -------------------------------------------------------------------------------- /guided_diffusion/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Codebase for "Improved Denoising Diffusion Probabilistic Models". 3 | """ 4 | -------------------------------------------------------------------------------- /guided_diffusion/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/guided_diffusion/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /guided_diffusion/__pycache__/condition_methods.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/guided_diffusion/__pycache__/condition_methods.cpython-38.pyc -------------------------------------------------------------------------------- /guided_diffusion/__pycache__/fp16_util.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/guided_diffusion/__pycache__/fp16_util.cpython-38.pyc -------------------------------------------------------------------------------- /guided_diffusion/__pycache__/gaussian_diffusion.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/guided_diffusion/__pycache__/gaussian_diffusion.cpython-38.pyc -------------------------------------------------------------------------------- /guided_diffusion/__pycache__/measurements.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/guided_diffusion/__pycache__/measurements.cpython-38.pyc -------------------------------------------------------------------------------- /guided_diffusion/__pycache__/nn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/guided_diffusion/__pycache__/nn.cpython-38.pyc -------------------------------------------------------------------------------- /guided_diffusion/__pycache__/posterior_mean_variance.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/guided_diffusion/__pycache__/posterior_mean_variance.cpython-38.pyc -------------------------------------------------------------------------------- /guided_diffusion/__pycache__/unet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/guided_diffusion/__pycache__/unet.cpython-38.pyc -------------------------------------------------------------------------------- /model_configurations/model_config_ImageNet.yaml: -------------------------------------------------------------------------------- 1 | image_size: 256 2 | num_channels: 256 3 | num_res_blocks: 2 4 | channel_mult: "" 5 | learn_sigma: True 6 | class_cond: False 7 | use_checkpoint: False 8 | attention_resolutions: "32,16,8" 9 | num_heads: 4 10 | num_head_channels: 64 11 | num_heads_upsample: -1 12 | use_scale_shift_norm: True 13 | dropout: 0.0 14 | resblock_updown: True 15 | use_fp16: False 16 | use_new_attention_order: False 17 | 18 | model_path: models/imagenet256.pt 19 | -------------------------------------------------------------------------------- /model_configurations/model_config_ffhq.yaml: -------------------------------------------------------------------------------- 1 | image_size: 256 2 | num_channels: 128 3 | num_res_blocks: 1 4 | channel_mult: "" 5 | learn_sigma: True 6 | class_cond: False 7 | use_checkpoint: False 8 | attention_resolutions: 16 9 | num_heads: 4 10 | num_head_channels: 64 11 | num_heads_upsample: -1 12 | use_scale_shift_norm: True 13 | dropout: 0.0 14 | resblock_updown: True 15 | use_fp16: False 16 | use_new_attention_order: False 17 | 18 | model_path: models/ffhq_10m.pt -------------------------------------------------------------------------------- /purification_configurations/purification_config_gaussian_deblur.yaml: -------------------------------------------------------------------------------- 1 | purification: 2 | total_num_iterations: 10 3 | csgm_num_iterations: 100 4 | purification_schedule: linear 5 | save_every_main: 1000000 6 | save_every_sub: 10000000 7 | optimizer: SGD 8 | lr: 100000 9 | momentum: 0.9 10 | ddim_num_iterations: 20 11 | full_ddim: True 12 | ddim_init_timestep: 400 13 | ddim_end_timestep: 0 14 | use_weight_decay: False 15 | weight_decay_lambda: 0 16 | 17 | dataset: 18 | name: ffhq 19 | root: ./data/ffhq 20 | 21 | others: 22 | img_size: !!python/tuple [1,3,256,256] 23 | -------------------------------------------------------------------------------- /purification_configurations/purification_config_inpainting.yaml: -------------------------------------------------------------------------------- 1 | purification: 2 | total_num_iterations: 20 3 | csgm_num_iterations: 50 4 | purification_schedule: linear 5 | save_every_main: 1000000 6 | save_every_sub: 10000000 7 | optimizer: SGD 8 | lr: 1000 9 | momentum: 0.9 10 | ddim_num_iterations: 20 11 | full_ddim: True 12 | ddim_init_timestep: 700 13 | ddim_end_timestep: 0 14 | use_weight_decay: False 15 | weight_decay_lambda: 0 16 | 17 | dataset: 18 | name: ffhq 19 | root: ./data/ffhq 20 | 21 | others: 22 | img_size: !!python/tuple [1,3,256,256] -------------------------------------------------------------------------------- /purification_configurations/purification_config_motion_deblur.yaml: -------------------------------------------------------------------------------- 1 | purification: 2 | total_num_iterations: 10 3 | csgm_num_iterations: 100 4 | purification_schedule: linear 5 | save_every_main: 1000000 6 | save_every_sub: 1000000 7 | optimizer: SGD 8 | lr: 100000 9 | momentum: 0.9 10 | ddim_num_iterations: 20 11 | full_ddim: True 12 | ddim_init_timestep: 400 13 | ddim_end_timestep: 0 14 | use_weight_decay: False 15 | weight_decay_lambda: 0 16 | 17 | dataset: 18 | name: ffhq 19 | root: ./data/ffhq 20 | 21 | others: 22 | img_size: !!python/tuple [1,3,256,256] -------------------------------------------------------------------------------- /purification_configurations/purification_config_nonlinear_deblur.yaml: -------------------------------------------------------------------------------- 1 | purification: 2 | total_num_iterations: 200 3 | csgm_num_iterations: 100 4 | purification_schedule: linear 5 | save_every_main: 1000000 6 | save_every_sub: 1000000 7 | optimizer: SGD 8 | lr: 300 9 | momentum: 0.9 10 | ddim_num_iterations: 20 11 | full_ddim: True 12 | ddim_init_timestep: 1000 13 | ddim_end_timestep: 50 14 | use_weight_decay: False 15 | weight_decay_lambda: 0 16 | 17 | dataset: 18 | name: ffhq 19 | root: ./data/ffhq 20 | 21 | others: 22 | img_size: !!python/tuple [1,3,256,256] -------------------------------------------------------------------------------- /purification_configurations/purification_config_phase_retrieval.yaml: -------------------------------------------------------------------------------- 1 | purification: 2 | total_num_iterations: 200 3 | csgm_num_iterations: 100 4 | purification_schedule: linear 5 | save_every_main: 100000 6 | save_every_sub: 100000 7 | optimizer: SGD 8 | lr: 500 9 | momentum: 0.9 10 | ddim_num_iterations: 20 11 | full_ddim: True 12 | ddim_init_timestep: 1000 13 | ddim_end_timestep: 20 14 | use_weight_decay: False 15 | weight_decay_lambda: 0 16 | 17 | dataset: 18 | name: ffhq 19 | #img_size: !!python/tuple [1,3,256,256] 20 | root: ./data/ffhq 21 | 22 | others: 23 | img_size: !!python/tuple [1,3,256,256] -------------------------------------------------------------------------------- /purification_configurations/purification_config_super_resolution.yaml: -------------------------------------------------------------------------------- 1 | purification: 2 | total_num_iterations: 10 3 | csgm_num_iterations: 100 4 | purification_schedule: linear 5 | save_every_main: 100000 6 | save_every_sub: 100000 7 | optimizer: SGD 8 | lr: 1000 9 | momentum: 0.9 10 | ddim_num_iterations: 20 11 | full_ddim: True 12 | ddim_init_timestep: 400 13 | ddim_end_timestep: 0 14 | use_weight_decay: False 15 | weight_decay_lambda: 0 16 | 17 | 18 | dataset: 19 | name: ffhq 20 | #img_size: !!python/tuple [1,3,256,256] 21 | root: ./data/ffhq 22 | 23 | others: 24 | img_size: !!python/tuple [1,3,256,256] 25 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | certifi==2022.9.14 2 | charset-normalizer==2.1.1 3 | contourpy==1.0.5 4 | cycler==0.11.0 5 | fonttools==4.37.2 6 | idna==3.4 7 | kiwisolver==1.4.4 8 | matplotlib==3.6.0 9 | numpy==1.23.3 10 | packaging==21.3 11 | Pillow==9.2.0 12 | pyparsing==3.0.9 13 | python-dateutil==2.8.2 14 | PyYAML==6.0 15 | requests==2.28.1 16 | scipy==1.9.1 17 | six==1.16.0 18 | tqdm==4.64.1 19 | typing-extensions==4.3.0 20 | urllib3==1.26.12 21 | -------------------------------------------------------------------------------- /run/gaussian_deblur.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account=qingqu1 3 | #SBATCH --job-name=gaussian_deblur 4 | #SBATCH --nodes=1 5 | #SBATCH --time=48:00:00 6 | #SBATCH --mem=47GB 7 | #SBATCH --partition=spgpu 8 | #SBATCH --gres=gpu:1 9 | #SBATCH --cpus-per-task=4 10 | #SBATCH --output=gaussian_deblur.log 11 | #SBATCH --mail-user=forkobe@umich.edu 12 | #SBATCH --mail-type=END 13 | 14 | module purge 15 | # module load cuda/10.2.89 16 | module load python3.9-anaconda/2021.11 17 | eval "$(conda shell.bash hook)" 18 | conda activate stable_diffusion 19 | 20 | python dcdp.py --task_config=./task_configurations/gaussian_deblur_config.yaml --purification_config=./purification_configurations/purification_config_gaussian_deblur.yaml \ 21 | --model_config=./model_configurations/model_config_ffhq.yaml -------------------------------------------------------------------------------- /run/motion_deblur.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account=qingqu1 3 | #SBATCH --job-name=motion_deblur 4 | #SBATCH --nodes=1 5 | #SBATCH --time=48:00:00 6 | #SBATCH --mem=47GB 7 | #SBATCH --partition=spgpu 8 | #SBATCH --gres=gpu:1 9 | #SBATCH --cpus-per-task=4 10 | #SBATCH --output=motion_deblur.log 11 | #SBATCH --mail-user=forkobe@umich.edu 12 | #SBATCH --mail-type=END 13 | 14 | module purge 15 | # module load cuda/10.2.89 16 | module load python3.9-anaconda/2021.11 17 | eval "$(conda shell.bash hook)" 18 | conda activate stable_diffusion 19 | 20 | python dcdp.py --task_config=./task_configurations/motion_deblur_config.yaml --purification_config=./purification_configurations/purification_config_motion_deblur.yaml \ 21 | --model_config=./model_configurations/model_config_ffhq.yaml -------------------------------------------------------------------------------- /run/nonlinear_deblur.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account=qingqu1 3 | #SBATCH --job-name=nonlinear_deblur 4 | #SBATCH --nodes=1 5 | #SBATCH --time=48:00:00 6 | #SBATCH --mem=47GB 7 | #SBATCH --partition=spgpu 8 | #SBATCH --gres=gpu:1 9 | #SBATCH --cpus-per-task=4 10 | #SBATCH --output=nonlinear_deblur_adaptive_300_lr_100_Endstep_100_cosine.log 11 | #SBATCH --mail-user=forkobe@umich.edu 12 | #SBATCH --mail-type=END 13 | 14 | module purge 15 | # module load cuda/10.2.89 16 | module load python3.9-anaconda/2021.11 17 | eval "$(conda shell.bash hook)" 18 | conda activate stable_diffusion 19 | 20 | python dcdp.py --task_config=./task_configurations/nonlinear_deblur_config.yaml --purification_config=./purification_configurations/purification_config_nonlinear_deblur.yaml \ 21 | --model_config=./model_configurations/model_config_ffhq.yaml -------------------------------------------------------------------------------- /run/phase_retrieval.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account=qingqu1 3 | #SBATCH --job-name=FR 4 | #SBATCH --nodes=1 5 | #SBATCH --time=48:00:00 6 | #SBATCH --mem=47GB 7 | #SBATCH --partition=spgpu 8 | #SBATCH --gres=gpu:1 9 | #SBATCH --cpus-per-task=4 10 | #SBATCH --output=phase_retrieval_version2.log 11 | #SBATCH --mail-user=forkobe@umich.edu 12 | #SBATCH --mail-type=END 13 | 14 | module purge 15 | # module load cuda/10.2.89 16 | module load python3.9-anaconda/2021.11 17 | eval "$(conda shell.bash hook)" 18 | conda activate stable_diffusion 19 | 20 | python dcdp.py --task_config=./task_configurations/phase_retrieval_config.yaml --purification_config=./purification_configurations/purification_config_phase_retrieval.yaml \ 21 | --model_config=./model_configurations/model_config_ffhq.yaml -------------------------------------------------------------------------------- /run/super_resolution.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account=qingqu1 3 | #SBATCH --job-name=FR 4 | #SBATCH --nodes=1 5 | #SBATCH --time=48:00:00 6 | #SBATCH --mem=47GB 7 | #SBATCH --partition=spgpu 8 | #SBATCH --gres=gpu:1 9 | #SBATCH --cpus-per-task=4 10 | #SBATCH --output=phase_retrieval_version2.log 11 | #SBATCH --mail-user=forkobe@umich.edu 12 | #SBATCH --mail-type=END 13 | 14 | module purge 15 | # module load cuda/10.2.89 16 | module load python3.9-anaconda/2021.11 17 | eval "$(conda shell.bash hook)" 18 | conda activate stable_diffusion 19 | 20 | python dcdp.py --task_config=./task_configurations/super_resolution_config.yaml --purification_config=./purification_configurations/purification_config_super_resolution.yaml \ 21 | --model_config=./model_configurations/model_config_ffhq.yaml -------------------------------------------------------------------------------- /task_configurations/gaussian_deblur_config.yaml: -------------------------------------------------------------------------------- 1 | conditioning: 2 | method: ps 3 | params: 4 | scale: 0.3 5 | 6 | measurement: 7 | operator: 8 | name: gaussian_blur 9 | kernel_size: 61 10 | intensity: 3.0 11 | 12 | noise: 13 | name: gaussian 14 | sigma: 0 -------------------------------------------------------------------------------- /task_configurations/inpainting_config.yaml: -------------------------------------------------------------------------------- 1 | conditioning: 2 | method: ps 3 | params: 4 | scale: 1 5 | 6 | measurement: 7 | operator: 8 | name: inpainting 9 | mask_opt: 10 | mask_type: random 11 | # mask_len_range: !!python/tuple [128, 129] # for box 12 | mask_prob_range: !!python/tuple [0.3, 0.7] # for random 13 | image_size: 256 14 | 15 | noise: 16 | name: gaussian 17 | sigma: 0 -------------------------------------------------------------------------------- /task_configurations/motion_deblur_config.yaml: -------------------------------------------------------------------------------- 1 | conditioning: 2 | method: ps 3 | params: 4 | scale: 0.3 5 | 6 | measurement: 7 | operator: 8 | name: motion_blur 9 | kernel_size: 61 10 | intensity: 0.5 11 | 12 | noise: 13 | name: gaussian 14 | sigma: 0 -------------------------------------------------------------------------------- /task_configurations/nonlinear_deblur_config.yaml: -------------------------------------------------------------------------------- 1 | conditioning: 2 | method: ps 3 | params: 4 | scale: 0.3 5 | 6 | measurement: 7 | operator: 8 | name: nonlinear_blur 9 | opt_yml_path: ./bkse/options/generate_blur/default.yml 10 | 11 | noise: 12 | name: gaussian 13 | sigma: 0.05 -------------------------------------------------------------------------------- /task_configurations/phase_retrieval_config.yaml: -------------------------------------------------------------------------------- 1 | conditioning: 2 | method: ps 3 | params: 4 | scale: 1 5 | 6 | measurement: 7 | operator: 8 | name: phase_retrieval 9 | oversample: 2.0 10 | 11 | noise: 12 | name: gaussian 13 | sigma: 0.05 14 | -------------------------------------------------------------------------------- /task_configurations/super_resolution_config.yaml: -------------------------------------------------------------------------------- 1 | conditioning: 2 | method: ps 3 | params: 4 | scale: 0.3 5 | 6 | measurement: 7 | operator: 8 | name: super_resolution 9 | in_shape: !!python/tuple [1, 3, 256, 256] 10 | scale_factor: 4 11 | 12 | noise: 13 | name: gaussian 14 | sigma: 0 -------------------------------------------------------------------------------- /util/__pycache__/fastmri_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/util/__pycache__/fastmri_utils.cpython-38.pyc -------------------------------------------------------------------------------- /util/__pycache__/img_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/util/__pycache__/img_utils.cpython-38.pyc -------------------------------------------------------------------------------- /util/__pycache__/logger.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/util/__pycache__/logger.cpython-38.pyc -------------------------------------------------------------------------------- /util/__pycache__/resizer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/util/__pycache__/resizer.cpython-38.pyc -------------------------------------------------------------------------------- /util/compute_metric.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from skimage.metrics import peak_signal_noise_ratio 3 | from tqdm import tqdm 4 | 5 | import matplotlib.pyplot as plt 6 | import lpips 7 | import numpy as np 8 | import torch 9 | 10 | 11 | device = 'cuda:0' 12 | loss_fn_vgg = lpips.LPIPS(net='vgg').to(device) 13 | 14 | task = 'SR' 15 | factor = 4 16 | sigma = 0.1 17 | scale = 1.0 18 | 19 | 20 | label_root = Path(f'/media/harry/tomo/FFHQ/256_1000') 21 | 22 | delta_recon_root = Path(f'./results/{task}/ffhq/{factor}/{sigma}/ps/{scale}/recon') 23 | normal_recon_root = Path(f'./results/{task}/ffhq/{factor}/{sigma}/ps+/{scale}/recon') 24 | 25 | psnr_delta_list = [] 26 | psnr_normal_list = [] 27 | 28 | lpips_delta_list = [] 29 | lpips_normal_list = [] 30 | for idx in tqdm(range(150)): 31 | fname = str(idx).zfill(5) 32 | 33 | label = plt.imread(label_root / f'{fname}.png')[:, :, :3] 34 | delta_recon = plt.imread(delta_recon_root / f'{fname}.png')[:, :, :3] 35 | normal_recon = plt.imread(normal_recon_root / f'{fname}.png')[:, :, :3] 36 | 37 | psnr_delta = peak_signal_noise_ratio(label, delta_recon) 38 | psnr_normal = peak_signal_noise_ratio(label, normal_recon) 39 | 40 | psnr_delta_list.append(psnr_delta) 41 | psnr_normal_list.append(psnr_normal) 42 | 43 | delta_recon = torch.from_numpy(delta_recon).permute(2, 0, 1).to(device) 44 | normal_recon = torch.from_numpy(normal_recon).permute(2, 0, 1).to(device) 45 | label = torch.from_numpy(label).permute(2, 0, 1).to(device) 46 | 47 | delta_recon = delta_recon.view(1, 3, 256, 256) * 2. - 1. 48 | normal_recon = normal_recon.view(1, 3, 256, 256) * 2. - 1. 49 | label = label.view(1, 3, 256, 256) * 2. - 1. 50 | 51 | delta_d = loss_fn_vgg(delta_recon, label) 52 | normal_d = loss_fn_vgg(normal_recon, label) 53 | 54 | lpips_delta_list.append(delta_d) 55 | lpips_normal_list.append(normal_d) 56 | 57 | psnr_delta_avg = sum(psnr_delta_list) / len(psnr_delta_list) 58 | lpips_delta_avg = sum(lpips_delta_list) / len(lpips_delta_list) 59 | 60 | psnr_normal_avg = sum(psnr_normal_list) / len(psnr_normal_list) 61 | lpips_normal_avg = sum(lpips_normal_list) / len(lpips_normal_list) 62 | 63 | print(f'Delta PSNR: {psnr_delta_avg}') 64 | print(f'Delta LPIPS: {lpips_delta_avg}') 65 | 66 | print(f'Normal PSNR: {psnr_normal_avg}') 67 | print(f'Normal LPIPS: {lpips_normal_avg}') -------------------------------------------------------------------------------- /util/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | def get_logger(): 4 | logger = logging.getLogger(name='DPS') 5 | logger.setLevel(logging.INFO) 6 | 7 | formatter = logging.Formatter("%(asctime)s [%(name)s] >> %(message)s") 8 | stream_handler = logging.StreamHandler() 9 | stream_handler.setFormatter(formatter) 10 | logger.addHandler(stream_handler) 11 | 12 | return logger -------------------------------------------------------------------------------- /util/tools.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Morefre/Decoupled-Data-Consistency-with-Diffusion-Purification-for-Image-Restoration/9d635a98b4e478e9733d1667bd0242695634f8c4/util/tools.py --------------------------------------------------------------------------------