├── .gitattributes
├── .gitignore
├── KPN.py
├── README.md
├── data_generation
├── __init__.py
├── ahd_demosaicking.py
├── constants.py
├── data_utils.py
├── denoise_wavelet.py
├── generate_dataset.py
├── image_io.py
├── image_processing.py
├── kernel.py
└── pipeline.py
├── data_provider.py
├── dataset_specs
├── data_configspec.conf
└── full_dataset.conf
├── eval_images
├── 10_gt.png
├── 10_kpn_5x5_38.68dB_0.9798.png
├── 10_noisy_34.50dB_0.8800.png
├── 11_gt.png
├── 11_kpn_5x5_38.84dB_0.9909.png
├── 11_noisy_34.61dB_0.8585.png
├── 12_gt.png
├── 12_kpn_5x5_38.04dB_0.9860.png
├── 12_noisy_34.56dB_0.8722.png
├── 13_gt.png
├── 13_kpn_5x5_38.88dB_0.9890.png
├── 13_noisy_34.48dB_0.8867.png
├── 14_gt.png
├── 14_kpn_5x5_37.04dB_0.9497.png
├── 14_noisy_34.69dB_0.8965.png
├── 15_gt.png
├── 15_kpn_5x5_38.31dB_0.9893.png
├── 15_noisy_34.50dB_0.9219.png
├── 16_gt.png
├── 16_kpn_5x5_37.81dB_0.9653.png
├── 16_noisy_34.38dB_0.8607.png
├── 17_gt.png
├── 17_kpn_5x5_34.16dB_0.9625.png
├── 17_noisy_28.32dB_0.7489.png
├── 18_gt.png
├── 18_kpn_5x5_36.83dB_0.9698.png
├── 18_noisy_28.09dB_0.6567.png
├── 19_gt.png
├── 19_kpn_5x5_39.11dB_0.9738.png
├── 19_noisy_28.13dB_0.5510.png
├── 1_gt.png
├── 1_kpn.png
├── 1_kpn_5x5_39.35dB_0.9866.png
├── 1_noisy.png
├── 1_noisy_36.73dB_0.9366.png
├── 20_gt.png
├── 20_kpn_5x5_33.79dB_0.9656.png
├── 20_noisy_28.72dB_0.7307.png
├── 21_gt.png
├── 21_kpn.png
├── 21_kpn_5x5_37.36dB_0.9316.png
├── 21_noisy.png
├── 21_noisy_27.61dB_0.5016.png
├── 22_gt.png
├── 22_kpn.png
├── 22_kpn_5x5_33.98dB_0.9473.png
├── 22_noisy.png
├── 22_noisy_28.10dB_0.7991.png
├── 23_gt.png
├── 23_kpn.png
├── 23_kpn_5x5_35.85dB_0.9759.png
├── 23_noisy.png
├── 23_noisy_28.39dB_0.6966.png
├── 24_gt.png
├── 24_kpn.png
├── 24_kpn_5x5_34.84dB_0.9692.png
├── 24_noisy.png
├── 24_noisy_28.60dB_0.6402.png
├── 25_gt.png
├── 25_kpn_5x5_40.11dB_0.9853.png
├── 25_noisy_29.38dB_0.5669.png
├── 26_gt.png
├── 26_kpn_5x5_32.85dB_0.9452.png
├── 26_noisy_26.66dB_0.7940.png
├── 27_gt.png
├── 27_kpn_5x5_35.52dB_0.9546.png
├── 27_noisy_27.42dB_0.7241.png
├── 28_gt.png
├── 28_kpn_5x5_36.13dB_0.9708.png
├── 28_noisy_28.70dB_0.6872.png
├── 29_gt.png
├── 29_kpn_5x5_35.68dB_0.9565.png
├── 29_noisy_27.39dB_0.7316.png
├── 2_gt.png
├── 2_kpn.png
├── 2_kpn_5x5_39.48dB_0.9919.png
├── 2_noisy.png
├── 2_noisy_37.73dB_0.9347.png
├── 30_gt.png
├── 30_kpn_5x5_37.83dB_0.9713.png
├── 30_noisy_26.21dB_0.6279.png
├── 31_gt.png
├── 31_kpn_5x5_37.32dB_0.9682.png
├── 31_noisy_28.10dB_0.6873.png
├── 32_gt.png
├── 32_kpn_5x5_37.64dB_0.9700.png
├── 32_noisy_32.31dB_0.7877.png
├── 33_gt.png
├── 33_kpn_5x5_39.45dB_0.9817.png
├── 33_noisy_32.41dB_0.7184.png
├── 34_gt.png
├── 34_kpn_5x5_38.20dB_0.9803.png
├── 34_noisy_32.51dB_0.7680.png
├── 35_gt.png
├── 35_kpn_5x5_39.29dB_0.9842.png
├── 35_noisy_32.38dB_0.8153.png
├── 36_gt.png
├── 36_kpn_5x5_37.80dB_0.9754.png
├── 36_noisy_32.38dB_0.8101.png
├── 37_gt.png
├── 37_kpn_5x5_37.77dB_0.9826.png
├── 37_noisy_32.21dB_0.8376.png
├── 38_gt.png
├── 38_kpn_5x5_36.79dB_0.9648.png
├── 38_noisy_32.33dB_0.8203.png
├── 39_gt.png
├── 39_kpn_5x5_36.01dB_0.9710.png
├── 39_noisy_27.63dB_0.7201.png
├── 3_gt.png
├── 3_kpn.png
├── 3_kpn_5x5_39.14dB_0.9938.png
├── 3_noisy.png
├── 3_noisy_37.67dB_0.9583.png
├── 40_gt.png
├── 40_kpn_5x5_38.57dB_0.9918.png
├── 40_noisy_36.00dB_0.9021.png
├── 41_gt.png
├── 41_kpn_5x5_38.56dB_0.9867.png
├── 41_noisy_36.12dB_0.9290.png
├── 42_gt.png
├── 42_kpn_5x5_39.17dB_0.9878.png
├── 42_noisy_36.09dB_0.9311.png
├── 43_gt.png
├── 43_kpn_5x5_39.89dB_0.9728.png
├── 43_noisy_36.18dB_0.8960.png
├── 44_gt.png
├── 44_kpn_5x5_37.81dB_0.9755.png
├── 44_noisy_36.06dB_0.9542.png
├── 45_gt.png
├── 45_kpn_5x5_37.07dB_0.9824.png
├── 45_noisy_36.21dB_0.9775.png
├── 46_gt.png
├── 46_kpn_5x5_41.12dB_0.9823.png
├── 46_noisy_31.75dB_0.7773.png
├── 47_gt.png
├── 47_kpn_5x5_38.98dB_0.9849.png
├── 47_noisy_35.49dB_0.9071.png
├── 48_gt.png
├── 48_kpn_5x5_37.68dB_0.9244.png
├── 48_noisy_30.25dB_0.7162.png
├── 49_gt.png
├── 49_kpn_5x5_30.79dB_0.7580.png
├── 49_noisy_26.10dB_0.5892.png
├── 4_gt.png
├── 4_kpn.png
├── 4_kpn_5x5_39.25dB_0.9824.png
├── 4_noisy.png
├── 4_noisy_37.99dB_0.9598.png
├── 50_gt.png
├── 50_kpn_5x5_39.02dB_0.9887.png
├── 50_noisy_36.48dB_0.9212.png
├── 51_gt.png
├── 51_kpn_5x5_39.68dB_0.9903.png
├── 51_noisy_36.14dB_0.9540.png
├── 52_gt.png
├── 52_kpn_5x5_39.06dB_0.9819.png
├── 52_noisy_36.34dB_0.9172.png
├── 53_gt.png
├── 53_kpn_5x5_39.40dB_0.9871.png
├── 53_noisy_34.24dB_0.9062.png
├── 54_gt.png
├── 54_kpn_5x5_39.98dB_0.9886.png
├── 54_noisy_36.25dB_0.9036.png
├── 55_gt.png
├── 55_kpn_5x5_38.73dB_0.9822.png
├── 55_noisy_37.04dB_0.9654.png
├── 56_gt.png
├── 56_kpn_5x5_39.51dB_0.9936.png
├── 56_noisy_38.16dB_0.9714.png
├── 57_gt.png
├── 57_kpn_5x5_37.89dB_0.9887.png
├── 57_noisy_35.77dB_0.9747.png
├── 58_gt.png
├── 58_kpn_5x5_39.52dB_0.9840.png
├── 58_noisy_38.25dB_0.9738.png
├── 59_gt.png
├── 59_kpn_5x5_38.02dB_0.9827.png
├── 59_noisy_33.73dB_0.9389.png
├── 5_gt.png
├── 5_kpn.png
├── 5_kpn_5x5_39.33dB_0.9888.png
├── 5_noisy.png
├── 5_noisy_37.77dB_0.9749.png
├── 60_gt.png
├── 60_kpn_5x5_38.65dB_0.9853.png
├── 60_noisy_37.21dB_0.9718.png
├── 61_gt.png
├── 61_kpn_5x5_40.22dB_0.9922.png
├── 61_noisy_37.93dB_0.9604.png
├── 62_gt.png
├── 62_kpn_5x5_36.89dB_0.9901.png
├── 62_noisy_34.35dB_0.9645.png
├── 63_gt.png
├── 63_kpn_5x5_34.38dB_0.9104.png
├── 63_noisy_25.35dB_0.4702.png
├── 64_gt.png
├── 64_kpn_5x5_35.10dB_0.9525.png
├── 64_noisy_25.87dB_0.5550.png
├── 65_gt.png
├── 65_kpn_5x5_34.76dB_0.9458.png
├── 65_noisy_25.34dB_0.5893.png
├── 66_gt.png
├── 66_kpn_5x5_34.08dB_0.8902.png
├── 66_noisy_25.16dB_0.5120.png
├── 67_gt.png
├── 67_kpn_5x5_39.37dB_0.9908.png
├── 67_noisy_38.30dB_0.9847.png
├── 68_gt.png
├── 68_kpn_5x5_35.75dB_0.9324.png
├── 68_noisy_25.69dB_0.4059.png
├── 69_gt.png
├── 69_kpn_5x5_32.32dB_0.9361.png
├── 69_noisy_25.68dB_0.7298.png
├── 6_gt.png
├── 6_kpn_5x5_38.36dB_0.9825.png
├── 6_noisy_36.65dB_0.9624.png
├── 70_gt.png
├── 70_kpn_5x5_39.91dB_0.9910.png
├── 70_noisy_38.50dB_0.9572.png
├── 71_gt.png
├── 71_kpn_5x5_35.55dB_0.9774.png
├── 71_noisy_32.97dB_0.8975.png
├── 72_gt.png
├── 72_kpn_5x5_37.08dB_0.9755.png
├── 72_noisy_32.98dB_0.8527.png
├── 73_gt.png
├── 73_kpn_5x5_39.25dB_0.9713.png
├── 73_noisy_31.69dB_0.7565.png
├── 74_gt.png
├── 74_kpn_5x5_37.40dB_0.9598.png
├── 74_noisy_31.40dB_0.7913.png
├── 75_gt.png
├── 75_kpn_5x5_34.89dB_0.9287.png
├── 75_noisy_25.27dB_0.6376.png
├── 76_gt.png
├── 76_kpn_5x5_38.18dB_0.9756.png
├── 76_noisy_32.34dB_0.8855.png
├── 77_gt.png
├── 77_kpn_5x5_36.31dB_0.9730.png
├── 77_noisy_32.73dB_0.9155.png
├── 78_gt.png
├── 78_kpn_5x5_38.33dB_0.9751.png
├── 78_noisy_25.95dB_0.4840.png
├── 79_gt.png
├── 79_kpn_5x5_37.68dB_0.9554.png
├── 79_noisy_32.21dB_0.7821.png
├── 7_gt.png
├── 7_kpn_5x5_40.65dB_0.9868.png
├── 7_noisy_37.47dB_0.9308.png
├── 80_gt.png
├── 80_kpn_5x5_36.39dB_0.9837.png
├── 80_noisy_32.55dB_0.9169.png
├── 81_gt.png
├── 81_kpn_5x5_37.33dB_0.9816.png
├── 81_noisy_32.67dB_0.8759.png
├── 82_gt.png
├── 82_kpn_5x5_36.23dB_0.9788.png
├── 82_noisy_32.49dB_0.8655.png
├── 83_gt.png
├── 83_kpn_5x5_36.86dB_0.9658.png
├── 83_noisy_31.89dB_0.8717.png
├── 84_gt.png
├── 84_kpn_5x5_37.53dB_0.9908.png
├── 84_noisy_32.70dB_0.8561.png
├── 85_gt.png
├── 85_kpn_5x5_37.93dB_0.9557.png
├── 85_noisy_32.45dB_0.7962.png
├── 86_gt.png
├── 86_kpn_5x5_34.20dB_0.9634.png
├── 86_noisy_30.81dB_0.9148.png
├── 87_gt.png
├── 87_kpn_5x5_39.10dB_0.9747.png
├── 87_noisy_30.31dB_0.5805.png
├── 88_gt.png
├── 88_kpn_5x5_37.34dB_0.9650.png
├── 88_noisy_29.50dB_0.6900.png
├── 89_gt.png
├── 89_kpn_5x5_36.06dB_0.9816.png
├── 89_noisy_30.16dB_0.7799.png
├── 8_gt.png
├── 8_kpn_5x5_38.48dB_0.9800.png
├── 8_noisy_35.23dB_0.9459.png
├── 90_gt.png
├── 90_kpn_5x5_34.73dB_0.9713.png
├── 90_noisy_29.64dB_0.8340.png
├── 91_gt.png
├── 91_kpn_5x5_39.03dB_0.9762.png
├── 91_noisy_32.42dB_0.7615.png
├── 92_gt.png
├── 92_kpn_5x5_38.21dB_0.9746.png
├── 92_noisy_30.11dB_0.7182.png
├── 93_gt.png
├── 93_kpn_5x5_36.61dB_0.9652.png
├── 93_noisy_29.85dB_0.6761.png
├── 94_gt.png
├── 94_kpn_5x5_37.38dB_0.9859.png
├── 94_noisy_32.82dB_0.8374.png
├── 95_gt.png
├── 95_kpn_5x5_37.35dB_0.9740.png
├── 95_noisy_32.39dB_0.8629.png
├── 96_gt.png
├── 96_kpn_5x5_35.64dB_0.9681.png
├── 96_noisy_29.94dB_0.7946.png
├── 97_gt.png
├── 97_kpn_5x5_36.06dB_0.9525.png
├── 97_noisy_32.48dB_0.8883.png
├── 98_gt.png
├── 98_kpn_5x5_36.41dB_0.9654.png
├── 98_noisy_31.56dB_0.8672.png
├── 99_gt.png
├── 99_kpn_5x5_38.40dB_0.9825.png
├── 99_noisy_32.58dB_0.8636.png
├── 9_gt.png
├── 9_kpn_5x5_37.53dB_0.9877.png
├── 9_noisy_34.32dB_0.8747.png
└── thumbs.db
├── eval_images_RGB
├── 0_gt.png
├── 0_noisy_29.17dB_0.911.png
├── 0_pred_31.86dB_0.957.png
├── 10_gt.png
├── 10_noisy_31.95dB_0.776.png
├── 10_pred_39.27dB_0.965.png
├── 11_gt.png
├── 11_noisy_23.93dB_0.549.png
├── 11_pred_29.44dB_0.801.png
├── 12_gt.png
├── 12_noisy_18.70dB_0.308.png
├── 12_pred_34.02dB_0.954.png
├── 13_gt.png
├── 13_noisy_32.41dB_0.916.png
├── 13_pred_37.87dB_0.987.png
├── 1_gt.png
├── 1_noisy_22.25dB_0.544.png
├── 1_pred_32.77dB_0.912.png
├── 2_gt.png
├── 2_noisy_19.42dB_0.496.png
├── 2_pred_29.73dB_0.900.png
├── 3_gt.png
├── 3_noisy_25.74dB_0.701.png
├── 3_pred_35.85dB_0.960.png
├── 4_gt.png
├── 4_noisy_19.34dB_0.595.png
├── 4_pred_29.69dB_0.937.png
├── 5_gt.png
├── 5_noisy_27.50dB_0.641.png
├── 5_pred_33.03dB_0.840.png
├── 6_gt.png
├── 6_noisy_16.37dB_0.283.png
├── 6_pred_29.25dB_0.844.png
├── 7_gt.png
├── 7_noisy_27.71dB_0.634.png
├── 7_pred_38.33dB_0.961.png
├── 8_gt.png
├── 8_noisy_30.11dB_0.870.png
├── 8_pred_32.60dB_0.942.png
├── 9_gt.png
├── 9_noisy_24.02dB_0.597.png
├── 9_pred_33.24dB_0.914.png
└── thumbs.db
├── kpn_data_provider.py
├── kpn_specs
├── configspec.conf
└── kpn_config.conf
├── models
└── kpn
│ └── checkpoint
│ └── README.md
├── train_eval_syn.py
└── utils
├── __init__.py
├── image_utils.py
└── training_util.py
/.gitattributes:
--------------------------------------------------------------------------------
1 | models/kpn/checkpoint/model_best.pth.tar filter=lfs diff=lfs merge=lfs -text
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 | .idea
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 | cover/
54 |
55 | # Translations
56 | *.mo
57 | *.pot
58 |
59 | # Django stuff:
60 | *.log
61 | local_settings.py
62 | db.sqlite3
63 | db.sqlite3-journal
64 |
65 | # Flask stuff:
66 | instance/
67 | .webassets-cache
68 |
69 | # Scrapy stuff:
70 | .scrapy
71 |
72 | # Sphinx documentation
73 | docs/_build/
74 |
75 | # PyBuilder
76 | .pybuilder/
77 | target/
78 |
79 | # Jupyter Notebook
80 | .ipynb_checkpoints
81 |
82 | # IPython
83 | profile_default/
84 | ipython_config.py
85 |
86 | # pyenv
87 | # For a library or package, you might want to ignore these files since the code is
88 | # intended to run in multiple environments; otherwise, check them in:
89 | # .python-version
90 |
91 | # pipenv
92 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
93 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
94 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
95 | # install all needed dependencies.
96 | #Pipfile.lock
97 |
98 | # poetry
99 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
100 | # This is especially recommended for binary packages to ensure reproducibility, and is more
101 | # commonly ignored for libraries.
102 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
103 | #poetry.lock
104 |
105 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
106 | __pypackages__/
107 |
108 | # Celery stuff
109 | celerybeat-schedule
110 | celerybeat.pid
111 |
112 | # SageMath parsed files
113 | *.sage.py
114 |
115 | # Environments
116 | .env
117 | .venv
118 | env/
119 | venv/
120 | ENV/
121 | env.bak/
122 | venv.bak/
123 |
124 | # Spyder project settings
125 | .spyderproject
126 | .spyproject
127 |
128 | # Rope project settings
129 | .ropeproject
130 |
131 | # mkdocs documentation
132 | /site
133 |
134 | # mypy
135 | .mypy_cache/
136 | .dmypy.json
137 | dmypy.json
138 |
139 | # Pyre type checker
140 | .pyre/
141 |
142 | # pytype static type analyzer
143 | .pytype/
144 |
145 | # Cython debug symbols
146 | cython_debug/
147 |
148 | # PyCharm
149 | # JetBrains specific template is maintainted in a separate JetBrains.gitignore that can
150 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
151 | # and can be added to the global gitignore or merged into this file. For a more nuclear
152 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
153 | #.idea/
154 |
--------------------------------------------------------------------------------
/KPN.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import numpy as np
4 | import torch.nn.functional as F
5 | from torchsummary import summary
6 | import torchvision.models as models
7 |
8 |
9 | # KPN基本网路单元
10 | class Basic(nn.Module):
11 | def __init__(self, in_ch, out_ch, g=16, channel_att=False,
12 | spatial_att=False):
13 | super(Basic, self).__init__()
14 | self.channel_att = channel_att
15 | self.spatial_att = spatial_att
16 | self.conv1 = nn.Sequential(
17 | nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=3,
18 | stride=1, padding=1),
19 | # nn.BatchNorm2d(out_ch),
20 | nn.ReLU(),
21 | nn.Conv2d(in_channels=out_ch, out_channels=out_ch, kernel_size=3,
22 | stride=1, padding=1),
23 | # nn.BatchNorm2d(out_ch),
24 | nn.ReLU(),
25 | nn.Conv2d(in_channels=out_ch, out_channels=out_ch, kernel_size=3,
26 | stride=1, padding=1),
27 | # nn.BatchNorm2d(out_ch),
28 | nn.ReLU()
29 | )
30 |
31 | if channel_att:
32 | self.att_c = nn.Sequential(
33 | nn.Conv2d(2 * out_ch, out_ch // g, 1, 1, 0),
34 | nn.ReLU(),
35 | nn.Conv2d(out_ch // g, out_ch, 1, 1, 0),
36 | nn.Sigmoid()
37 | )
38 | if spatial_att:
39 | self.att_s = nn.Sequential(
40 | nn.Conv2d(in_channels=2, out_channels=1, kernel_size=7,
41 | stride=1, padding=3),
42 | nn.Sigmoid()
43 | )
44 |
45 | def forward(self, data):
46 | """
47 | Forward function.
48 | :param data:
49 | :return: tensor
50 | """
51 | fm = self.conv1(data)
52 | if self.channel_att:
53 | # fm_pool = F.adaptive_avg_pool2d(fm, (1, 1)) + F.adaptive_max_pool2d(fm, (1, 1))
54 | fm_pool = torch.cat([F.adaptive_avg_pool2d(fm, (1, 1)),
55 | F.adaptive_max_pool2d(fm, (1, 1))], dim=1)
56 | att = self.att_c(fm_pool)
57 | fm = fm * att
58 | if self.spatial_att:
59 | fm_pool = torch.cat([torch.mean(fm, dim=1, keepdim=True),
60 | torch.max(fm, dim=1, keepdim=True)[0]], dim=1)
61 | att = self.att_s(fm_pool)
62 | fm = fm * att
63 | return fm
64 |
65 |
66 | class KPN(nn.Module):
67 | def __init__(self, color=True, burst_length=8, blind_est=False,
68 | kernel_size=[5], sep_conv=False,
69 | channel_att=False, spatial_att=False, upMode='bilinear',
70 | core_bias=False):
71 | super(KPN, self).__init__()
72 | self.upMode = upMode
73 | self.burst_length = burst_length
74 | self.core_bias = core_bias
75 | self.color_channel = 3 if color else 1
76 | in_channel = (3 if color else 1) * (
77 | burst_length if blind_est else burst_length + 1)
78 | out_channel = (3 if color else 1) * (
79 | 2 * sum(kernel_size) if sep_conv else np.sum(
80 | np.array(kernel_size) ** 2)) * burst_length
81 | if core_bias:
82 | out_channel += (3 if color else 1) * burst_length
83 | # 各个卷积层定义
84 | # 2~5层都是均值池化+3层卷积
85 | self.conv1 = Basic(in_channel, 64, channel_att=False, spatial_att=False)
86 | self.conv2 = Basic(64, 128, channel_att=False, spatial_att=False)
87 | self.conv3 = Basic(128, 256, channel_att=False, spatial_att=False)
88 | self.conv4 = Basic(256, 512, channel_att=False, spatial_att=False)
89 | self.conv5 = Basic(512, 512, channel_att=False, spatial_att=False)
90 | # 6~8层要先上采样再卷积
91 | self.conv6 = Basic(512 + 512, 512, channel_att=channel_att,
92 | spatial_att=spatial_att)
93 | self.conv7 = Basic(256 + 512, 256, channel_att=channel_att,
94 | spatial_att=spatial_att)
95 | self.conv8 = Basic(256 + 128, out_channel, channel_att=channel_att,
96 | spatial_att=spatial_att)
97 | self.outc = nn.Conv2d(out_channel, out_channel, 1, 1, 0)
98 |
99 | self.kernel_pred = KernelConv(kernel_size, sep_conv, self.core_bias)
100 |
101 | self.apply(self._init_weights)
102 |
103 | @staticmethod
104 | def _init_weights(m):
105 | if isinstance(m, nn.Conv2d):
106 | nn.init.xavier_normal_(m.weight.data)
107 | nn.init.constant_(m.bias.data, 0.0)
108 | elif isinstance(m, nn.Linear):
109 | nn.init.xavier_normal_(m.weight.data)
110 | nn.init.constant_(m.bias.data, 0.0)
111 |
112 | @staticmethod
113 | def pad_before_cat(x1, x2):
114 | """Prevent the failure of skip concatenation between the corresponding
115 | layers of the encoder and decoder due to the automatic edge trimming of
116 | nn.Conv2d when the input image size is odd."""
117 | diffY = x1.size()[-2] - x2.size()[-2]
118 | diffX = x1.size()[-1] - x2.size()[-1]
119 | x2 = F.pad(x2, [diffX // 2, diffX - diffX // 2,
120 | diffY // 2, diffY - diffY // 2])
121 | return x2
122 |
123 | # 前向传播函数
124 | def forward(self, data_with_est, data, white_level=1.0):
125 | """
126 | forward and obtain pred image directly
127 | :param data_with_est: if not blind estimation, it is same as data
128 | :param data:
129 | :return: pred_img_i and img_pred
130 | """
131 | conv1 = self.conv1(data_with_est)
132 | conv2 = self.conv2(F.avg_pool2d(conv1, kernel_size=2, stride=2))
133 | conv3 = self.conv3(F.avg_pool2d(conv2, kernel_size=2, stride=2))
134 | conv4 = self.conv4(F.avg_pool2d(conv3, kernel_size=2, stride=2))
135 | conv5 = self.conv5(F.avg_pool2d(conv4, kernel_size=2, stride=2))
136 | # 开始上采样 同时要进行skip connection
137 | # Padding before skip concatenation is needed, only works if the input
138 | # image size is odd.
139 | conv6 = self.conv6(torch.cat([conv4, self.pad_before_cat(conv4,
140 | F.interpolate(
141 | conv5,
142 | scale_factor=2,
143 | mode=self.upMode))],
144 | dim=1))
145 | conv7 = self.conv7(torch.cat([conv3, self.pad_before_cat(conv3,
146 | F.interpolate(
147 | conv6,
148 | scale_factor=2,
149 | mode=self.upMode))],
150 | dim=1))
151 | conv8 = self.conv8(torch.cat([conv2, self.pad_before_cat(conv2,
152 | F.interpolate(
153 | conv7,
154 | scale_factor=2,
155 | mode=self.upMode))],
156 | dim=1))
157 |
158 | # return channel K*K*N
159 | core = self.outc(F.interpolate(conv8, scale_factor=2, mode=self.upMode))
160 |
161 | return self.kernel_pred(data, core, white_level)
162 |
163 |
164 | class KernelConv(nn.Module):
165 | """
166 | the class of computing prediction
167 | """
168 |
169 | def __init__(self, kernel_size=[5], sep_conv=False, core_bias=False):
170 | super(KernelConv, self).__init__()
171 | self.kernel_size = sorted(kernel_size)
172 | self.sep_conv = sep_conv
173 | self.core_bias = core_bias
174 |
175 | def _sep_conv_core(self, core, batch_size, N, color, height, width):
176 | """
177 | convert the sep_conv core to conv2d core
178 | 2p --> p^2
179 | :param core: shape: batch*(N*2*K)*height*width
180 | :return:
181 | """
182 | kernel_total = sum(self.kernel_size)
183 | core = core.view(batch_size, N, -1, color, height, width)
184 | if not self.core_bias:
185 | core_1, core_2 = torch.split(core, kernel_total, dim=2)
186 | else:
187 | core_1, core_2, core_3 = torch.split(core, kernel_total, dim=2)
188 | # output core
189 | core_out = {}
190 | cur = 0
191 | for K in self.kernel_size:
192 | t1 = core_1[:, :, cur:cur + K, ...].view(batch_size, N, K, 1, color,
193 | height, width)
194 | t2 = core_2[:, :, cur:cur + K, ...].view(batch_size, N, 1, K, color,
195 | height, width)
196 | core_out[K] = torch.einsum('ijklnou,ijlmnou->ijkmnou',
197 | [t1, t2]).view(
198 | batch_size, N, K * K, color, height, width)
199 | cur += K
200 | # it is a dict
201 | return core_out, None if not self.core_bias else core_3.squeeze()
202 |
203 | def _convert_dict(self, core, batch_size, N, color, height, width):
204 | """
205 | make sure the core to be a dict, generally, only one kind of kernel size is suitable for the func.
206 | :param core: shape: batch_size*(N*K*K)*height*width
207 | :return: core_out, a dict
208 | """
209 | core_out = {}
210 | core = core.view(batch_size, N, -1, color, height, width)
211 | core_out[self.kernel_size[0]] = core[:, :, 0:self.kernel_size[0] ** 2,
212 | ...]
213 | bias = None if not self.core_bias else core[:, :, -1, ...]
214 | return core_out, bias
215 |
216 | def forward(self, frames, core, white_level=1.0):
217 | """
218 | compute the pred image according to core and frames
219 | :param frames: [batch_size, N, 3, height, width]
220 | :param core: [batch_size, N, dict(kernel), 3, height, width]
221 | :return:
222 | """
223 | if len(frames.size()) == 5:
224 | batch_size, N, color, height, width = frames.size()
225 | else:
226 | batch_size, N, height, width = frames.size()
227 | color = 1
228 | frames = frames.view(batch_size, N, color, height, width)
229 | if self.sep_conv:
230 | core, bias = self._sep_conv_core(core, batch_size, N, color, height,
231 | width)
232 | else:
233 | core, bias = self._convert_dict(core, batch_size, N, color, height,
234 | width)
235 | img_stack = []
236 | pred_img = []
237 | kernel = self.kernel_size[::-1]
238 | for index, K in enumerate(kernel):
239 | if len(img_stack) == 0:
240 | frame_pad = F.pad(frames, [K // 2, K // 2, K // 2, K // 2])
241 | for i in range(K):
242 | for j in range(K):
243 | img_stack.append(
244 | frame_pad[..., i:i + height, j:j + width])
245 | img_stack = torch.stack(img_stack, dim=2)
246 | else:
247 | k_diff = (kernel[index - 1] - kernel[index]) // 2
248 | img_stack = img_stack.view(batch_size, N, kernel[index - 1],
249 | kernel[index - 1], color, height,
250 | width)
251 | img_stack = img_stack[:, :, k_diff:-k_diff, k_diff:-k_diff, ...]
252 | img_stack = img_stack.reshape(batch_size, N, K ** 2, color,
253 | height, width)
254 | # print('img_stack:', img_stack.size())
255 | pred_img.append(torch.sum(
256 | core[K].mul(img_stack), dim=2, keepdim=False
257 | ))
258 | pred_img = torch.stack(pred_img, dim=0)
259 | # print('pred_stack:', pred_img.size())
260 | # pred_img_i = torch.mean(pred_img, dim=0, keepdim=False).squeeze()
261 | pred_img_i = torch.mean(pred_img, dim=0, keepdim=False)
262 | # if bias is permitted
263 | if self.core_bias:
264 | if bias is None:
265 | raise ValueError('The bias should not be None.')
266 | pred_img_i += bias
267 | # print('white_level', white_level.size())
268 | pred_img_i = pred_img_i / white_level
269 | pred_img = torch.mean(pred_img_i, dim=1, keepdim=False)
270 | # print('pred_img:', pred_img.size())
271 | # print('pred_img_i:', pred_img_i.size())
272 | return pred_img_i, pred_img
273 |
274 |
275 | class LossFunc(nn.Module):
276 | """
277 | loss function of KPN
278 | """
279 |
280 | def __init__(self, coeff_basic=1.0, coeff_anneal=1.0, gradient_L1=True,
281 | alpha=0.9998, beta=100):
282 | super(LossFunc, self).__init__()
283 | self.coeff_basic = coeff_basic
284 | self.coeff_anneal = coeff_anneal
285 | self.loss_basic = LossBasic(gradient_L1)
286 | self.loss_anneal = LossAnneal(alpha, beta)
287 |
288 | def forward(self, pred_img_i, pred_img, ground_truth, global_step):
289 | """
290 | forward function of loss_func
291 | :param frames: frame_1 ~ frame_N, shape: [batch, N, 3, height, width]
292 | :param core: a dict coverted by ......
293 | :param ground_truth: shape [batch, 3, height, width]
294 | :param global_step: int
295 | :return: loss
296 | """
297 | return self.coeff_basic * self.loss_basic(pred_img,
298 | ground_truth), self.coeff_anneal * self.loss_anneal(
299 | global_step, pred_img_i, ground_truth)
300 |
301 |
302 | class LossBasic(nn.Module):
303 | """
304 | Basic loss function.
305 | """
306 |
307 | def __init__(self, gradient_L1=True):
308 | super(LossBasic, self).__init__()
309 | self.l1_loss = nn.L1Loss()
310 | self.l2_loss = nn.MSELoss()
311 | self.gradient = TensorGradient(gradient_L1)
312 |
313 | def forward(self, pred, ground_truth):
314 | return self.l2_loss(pred, ground_truth) + \
315 | self.l1_loss(self.gradient(pred), self.gradient(ground_truth))
316 |
317 |
318 | class LossAnneal(nn.Module):
319 | """
320 | anneal loss function
321 | """
322 |
323 | def __init__(self, alpha=0.9998, beta=100):
324 | super(LossAnneal, self).__init__()
325 | self.global_step = 0
326 | self.loss_func = LossBasic(gradient_L1=True)
327 | self.alpha = alpha
328 | self.beta = beta
329 |
330 | def forward(self, global_step, pred_i, ground_truth):
331 | """
332 | :param global_step: int
333 | :param pred_i: [batch_size, N, 3, height, width]
334 | :param ground_truth: [batch_size, 3, height, width]
335 | :return:
336 | """
337 | loss = 0
338 | for i in range(pred_i.size(1)):
339 | loss += self.loss_func(pred_i[:, i, ...], ground_truth)
340 | loss /= pred_i.size(1)
341 | return self.beta * self.alpha ** global_step * loss
342 |
343 |
344 | class TensorGradient(nn.Module):
345 | """
346 | the gradient of tensor
347 | """
348 |
349 | def __init__(self, L1=True):
350 | super(TensorGradient, self).__init__()
351 | self.L1 = L1
352 |
353 | def forward(self, img):
354 | w, h = img.size(-2), img.size(-1)
355 | l = F.pad(img, [1, 0, 0, 0])
356 | r = F.pad(img, [0, 1, 0, 0])
357 | u = F.pad(img, [0, 0, 1, 0])
358 | d = F.pad(img, [0, 0, 0, 1])
359 | if self.L1:
360 | return torch.abs((l - r)[..., 0:w, 0:h]) + torch.abs(
361 | (u - d)[..., 0:w, 0:h])
362 | else:
363 | return torch.sqrt(
364 | torch.pow((l - r)[..., 0:w, 0:h], 2) + torch.pow(
365 | (u - d)[..., 0:w, 0:h], 2)
366 | )
367 |
368 |
369 | if __name__ == '__main__':
370 | kpn = KPN(6, 5 * 5 * 6, True, True).cuda()
371 | print(summary(kpn, (6, 224, 224), batch_size=4))
372 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Kernel Prediction Networks and Multi-Kernel Prediction Networks
2 | Reimplement of [Burst Denoising with Kernel Prediction Networks](https://arxiv.org/pdf/1712.02327.pdf) and [Multi-Kernel Prediction Networks for Denoising of Image Burst](https://arxiv.org/pdf/1902.05392.pdf) by using PyTorch.
3 |
4 | The partial work is following [https://github.com/12dmodel/camera_sim](https://github.com/12dmodel/camera_sim).
5 |
6 | ## TODO
7 | Write the documents.
8 |
9 | ## Requirements
10 | - Python3
11 | - PyTorch >= 1.0.0
12 | - Scikit-image
13 | - Numpy
14 | - TensorboardX (needed tensorflow support)
15 |
16 | ## How to use this repo?
17 |
18 | Firstly, you can clone this repo. including train and test codes. Download pretrained model for grayscale images at [https://drive.google.com/open?id=1Xnpllr1dinAU7BIN21L3LkEP5AqMNWso](https://drive.google.com/open?id=1Xnpllr1dinAU7BIN21L3LkEP5AqMNWso), and for color images at [https://drive.google.com/file/d/1Il-n7un_u8wWizjQ5ZKQ5hns7S27b0HW/view?usp=sharing](https://drive.google.com/file/d/1Il-n7un_u8wWizjQ5ZKQ5hns7S27b0HW/view?usp=sharing).
19 |
20 | The repo. supports multiple GPUs to train and validate, and the default setting is multi-GPUs. In other words, the pretrained model is obtained by training on multi-GPUs.
21 |
22 | - If you want to restart the train process by yourself, the command you should type is that
23 | ```angular2html
24 | CUDA_VISIBLE_DEVICES=x,y train_eval_sym.py --cuda --mGPU -nw 4 --config_file ./kpn_specs/kpn_config.conf --restart
25 | ```
26 | If no option of `--restart`, the train process could be resumed from when it was broken.
27 |
28 | - If you want to evaluate the network by pre-trained model directly, you could use
29 | ```angular2html
30 | CUDA_VISIBLE_DEVICES=x,y train_eval_syn.py --cuda --mGPU -nw 4 --eval
31 | ```
32 | If else option `-ckpt` is choosen, you can select the other models you trained.
33 |
34 | - Anything else.
35 | - The code for single image is not released now, I will program it in few weeks.
36 |
37 | ## Results
38 | ### on grayscale images:
39 | The following images and more examples can be found at [here](https://github.com/z-bingo/kernel-prediction-networks-PyTorch/blob/master/eval_images/).
40 |
41 |
42 |
43 | |
44 |
45 | |
46 |
47 | |
48 |
49 |
50 |
51 | Ground Truth |
52 | Noisy |
53 | Denoised |
54 |
55 |
56 |
57 | |
58 |
59 | |
60 |
61 | |
62 |
63 |
64 |
65 | Ground Truth |
66 | Noisy |
67 | Denoised |
68 |
69 |
70 |
71 | ### on color images:
72 | The following images and more examples can be found at [here](https://github.com/LujiaJin/kernel-prediction-networks-PyTorch/tree/master/eval_images_RGB/).
73 |
74 |
75 |
76 | |
77 |
78 | |
79 |
80 | |
81 |
82 |
83 |
84 | Ground Truth |
85 | Noisy (PSNR: 19.34dB, SSIM: 0.595) |
86 | Denoised (PSNR: 29.69dB, SSIM: 0.937) |
87 |
88 |
89 |
90 | |
91 |
92 | |
93 |
94 | |
95 |
96 |
97 |
98 | Ground Truth |
99 | Noisy (PSNR: 18.70dB, SSIM: 0.308) |
100 | Denoised (PSNR: 34.02dB, SSIM: 0.954) |
101 |
102 |
103 |
104 | ##### If you like this repo, Star or Fork to support my work. Thank you.
105 |
--------------------------------------------------------------------------------
/data_generation/__init__.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import inspect, os
3 |
4 | # Need this to import halide. See:
5 | # https://stackoverflow.com/questions/6323860/sibling-package-imports
6 | sys.path.insert(0, os.path.join(
7 | os.path.dirname(inspect.getfile(inspect.currentframe())),
8 | '..'))
9 | sys.path.insert(0, os.path.join(
10 | os.path.dirname(inspect.getfile(inspect.currentframe()))))
11 |
--------------------------------------------------------------------------------
/data_generation/ahd_demosaicking.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy
3 | from scipy.io import savemat
4 | from .constants import RGB2YUV
5 | from scipy.interpolate import interp2d
6 |
7 |
8 | _RGB2YUV = RGB2YUV.cpu().data.numpy()
9 |
10 |
11 | def ahd_demosaicking(mosaic, delta=1, sobel_sz=3, avg_sz=3):
12 | """Demosaicking using AHD algorithm.
13 |
14 | No median filtering, assume GRBG format.
15 | Args:
16 | delta: neighborhood size for calculating homogeneity.
17 | sobel_sz: size of sobel kernels.
18 | avg_sz: size of averaging kernel for homogeneity.
19 | """
20 | Yx = _demosaickX(mosaic)
21 | Yy = _demosaickY(mosaic)
22 |
23 | YxYUV = _rgb2YUV(Yx)
24 | YyYUV = _rgb2YUV(Yy)
25 |
26 | epsL, epsCsq = _adaptive_param(YxYUV, YyYUV, sobel_sz)
27 |
28 | Hx = _homogeniety(YxYUV, delta, epsL, epsCsq)
29 | Hy = _homogeniety(YyYUV, delta, epsL, epsCsq)
30 |
31 | Hx = _conv2(Hx, np.ones((avg_sz, avg_sz)) / float(avg_sz**2))
32 | Hy = _conv2(Hy, np.ones((avg_sz, avg_sz)) / float(avg_sz**2))
33 |
34 | mask = (Hx > Hy).astype('float')
35 | mask = np.expand_dims(mask, -1)
36 | output = mask * Yx + (1.0 - mask) * Yy
37 | return np.clip(output, 0.0, 1.0)
38 |
39 |
40 | # https://stackoverflow.com/questions/9567882/sobel-filter-kernel-of-large-size/41065243#41065243
41 | def _sobel_kernel(sz):
42 | if (sz % 2) == 0:
43 | raise ValueError("Kernel size must be odd ({} received)".format(sz))
44 | kernel = np.zeros((sz, sz))
45 | for i in range(sz):
46 | for j in range(sz):
47 | ii = i - (sz // 2)
48 | jj = j - (sz // 2)
49 | kernel[i, j] = ii / (ii**2 + jj**2) if ii != 0 else 0
50 | return kernel
51 |
52 |
53 | def _interp2d(arr, new_sz):
54 | f = interp2d(x=np.linspace(0, 1, arr.shape[1]),
55 | y=np.linspace(0, 1, arr.shape[0]),
56 | z=arr)
57 | return f(np.linspace(0, 1, new_sz[1]), np.linspace(0, 1, new_sz[0]))
58 |
59 |
60 | def _interp_kernel(m=5, n=3):
61 | # Duplicate row so it works with bilinear interpolation
62 | Hg = np.array([[-0.25, 0.5, 0.5, 0.5, -0.25],[-0.25, 0.5, 0.5, 0.5, -0.25]])
63 | Hr = np.array([[0.25, 0.5, 0.25], [0.5, 1.0, 0.5], [0.25, 0.5, 0.25]])
64 | if m != 5:
65 | Hg = _interp2d(Hg, (2, m))
66 | if n != 3:
67 | Hr = _interp2d(Hr, (n, n))
68 | Hg = Hg[0:1, :]
69 | Hg = Hg / np.sum(Hg[:])
70 | Hr = Hr / np.sum(Hr[:]) * 4
71 | return Hg, Hr
72 |
73 |
74 | def _conv2(x, k):
75 | return scipy.ndimage.filters.convolve(x, k, mode='reflect')
76 |
77 |
78 | def _demosaickX(X, transposed=False):
79 | Mr = np.zeros(X.shape)
80 | Mg = np.ones(X.shape)
81 | Mb = np.zeros(X.shape)
82 |
83 | Mr[0::2, 1::2] = 1.0
84 | Mb[1::2, 0::2] = 1.0
85 | Mg = Mg - Mr - Mb
86 | # Switch R and B (which got swapped when we transpose X).
87 | if transposed:
88 | Mr, Mb = Mb, Mr
89 |
90 | Hg, Hr = _interp_kernel(5, 3)
91 | G = Mg * X + (Mr + Mb) * _conv2(X, Hg)
92 |
93 | R = G + _conv2(Mr * (X - G), Hr)
94 | B = G + _conv2(Mb * (X - G), Hr)
95 | R = np.expand_dims(R, -1)
96 | G = np.expand_dims(G, -1)
97 | B = np.expand_dims(B, -1)
98 | return np.concatenate((R,G,B), axis=2)
99 |
100 |
101 | def _demosaickY(X):
102 | X = X.T
103 | Y = _demosaickX(X, transposed=True)
104 | Y = np.swapaxes(Y, 0, 1)
105 | return Y
106 |
107 |
108 | def _adaptive_param(X, Y, sz):
109 | sobel_y = _sobel_kernel(sz)
110 | sobel_x = sobel_y.T
111 | eL = np.minimum(abs(_conv2(X[:,:,0], sobel_x)),
112 | abs(_conv2(Y[:,:,0], sobel_y)))
113 | eCsq = np.minimum(_conv2(X[:,:,1], sobel_x)**2 + _conv2(X[:,:,2], sobel_x)**2,
114 | _conv2(Y[:,:,1], sobel_y)**2 + _conv2(Y[:,:,2], sobel_y)**2)
115 | return eL, eCsq
116 |
117 |
118 | def _rgb2YUV(X):
119 | return np.einsum("ijk,lk->ijl", X, _RGB2YUV)
120 |
121 |
122 | def _ballset(delta):
123 | index = int(np.ceil(delta))
124 | # initialize
125 | H = np.zeros((index*2+1, index*2+1, (index*2+1)**2))
126 | k = 0;
127 | for i in range(-index, index):
128 | for j in range(-index,index):
129 | if np.sqrt(i**2 + j**2) <= delta:
130 | # included
131 | H[index+i, index+j, k] = 1
132 | k = k + 1
133 | H = H[:,:,:k];
134 | return H
135 |
136 |
137 | def _homogeniety(X, delta, epsL, epsC_sq):
138 | H = _ballset(delta);
139 |
140 | K = np.zeros(X.shape[:2])
141 |
142 | for i in range(H.shape[-1]):
143 | # level set
144 | L = abs(_conv2(X[:,:,0], H[:,:,i]) - X[:,:,0]) <= epsL
145 | # color set
146 | C = ((_conv2(X[:,:,1], H[:,:,i]) - X[:,:,1])**2 + \
147 | (_conv2(X[:,:,2], H[:,:,i]) - X[:,:,2])**2) <= epsC_sq;
148 | # metric neighborhood
149 | U = C * L
150 | # homogeneity
151 | K = K + U
152 | return K
153 |
--------------------------------------------------------------------------------
/data_generation/constants.py:
--------------------------------------------------------------------------------
1 | import math
2 | import torch
3 | from torch import FloatTensor
4 |
5 |
6 | XYZ2sRGB = FloatTensor([[ 3.2406, -1.5372, -0.4986],
7 | [-0.9689, 1.8758, 0.0415],
8 | [ 0.0557, -0.2040, 1.0570]])
9 |
10 | # http://brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
11 | ProPhotoRGB2XYZ = FloatTensor([[0.7976749, 0.1351917, 0.0313534],
12 | [0.2880402, 0.7118741, 0.0000857],
13 | [0.0000000, 0.0000000, 0.8252100]])
14 |
15 | RGB2YUV = FloatTensor([[0.29900, 0.5870, 0.1140],
16 | [-.33750, -.6625, 1.0000],
17 | [1.00000, -.8374, -.1626]])
18 |
19 | YUV2RGB = FloatTensor([[1.0, 0.0000, 0.7010],
20 | [1.0, -.1721, -.3571],
21 | [1.0, 0.8860, 0.0]])
22 |
23 | xyz_color_matching = {
24 | "lambda": FloatTensor([390,395,400,405,410,415,420,425,430,435,440,445,450,455,460,465,470,475,480,485,490,495,500,505,510,515,520,525,530,535,540,545,550,555,560,565,570,575,580,585,590,595,600,605,610,615,620,625,630,635,640,645,650,655,660,665,670,675,680,685,690,695,700,705,710,715,720,725,730,735,740,745,750,755,760,765,770,775,780,785,790,795,800,805,810,815,820,825,830]),
25 | "xyz": FloatTensor([[0.003769647,0.009382967,0.02214302,0.04742986,0.08953803,0.1446214,0.2035729,0.2488523,0.2918246,0.3227087,0.3482554,0.3418483,0.3224637,0.2826646,0.2485254,0.2219781,0.1806905,0.129192,0.08182895,0.04600865,0.02083981,0.007097731,0.002461588,0.003649178,0.01556989,0.04315171,0.07962917,0.1268468,0.1818026,0.2405015,0.3098117,0.3804244,0.4494206,0.5280233,0.6133784,0.7016774,0.796775,0.8853376,0.9638388,1.051011,1.109767,1.14362,1.151033,1.134757,1.083928,1.007344,0.9142877,0.8135565,0.6924717,0.575541,0.4731224,0.3844986,0.2997374,0.2277792,0.1707914,0.1263808,0.09224597,0.0663996,0.04710606,0.03292138,0.02262306,0.01575417,0.01096778,0.00760875,0.005214608,0.003569452,0.002464821,0.001703876,0.001186238,0.000826954,0.00057583,0.00040583,0.000285658,0.000202185,0.000143827,0.000102469,7.34755E-05,5.25987E-05,3.80611E-05,2.75822E-05,2.00412E-05,1.45879E-05,1.06814E-05,7.85752E-06,5.76828E-06,4.25917E-06,3.16777E-06,2.35872E-06,1.76247E-06],
26 | [0.000414616,0.001059646,0.002452194,0.004971717,0.00907986,0.01429377,0.02027369,0.02612106,0.03319038,0.0415794,0.05033657,0.05743393,0.06472352,0.07238339,0.08514816,0.1060145,0.1298957,0.1535066,0.1788048,0.2064828,0.237916,0.285068,0.3483536,0.4277595,0.5204972,0.6206256,0.718089,0.7946448,0.8575799,0.9071347,0.9544675,0.9814106,0.9890228,0.9994608,0.9967737,0.9902549,0.9732611,0.9424569,0.8963613,0.8587203,0.8115868,0.7544785,0.6918553,0.6270066,0.5583746,0.489595,0.4229897,0.3609245,0.2980865,0.2416902,0.1943124,0.1547397,0.119312,0.08979594,0.06671045,0.04899699,0.03559982,0.02554223,0.01807939,0.01261573,0.008661284,0.006027677,0.004195941,0.002910864,0.001995557,0.001367022,0.000944727,0.000653705,0.000455597,0.000317974,0.000221745,0.000156557,0.000110393,7.82744E-05,5.57886E-05,3.98188E-05,2.86018E-05,2.05126E-05,1.48724E-05,1.08E-05,7.86392E-06,5.73694E-06,4.2116E-06,3.10656E-06,2.28679E-06,1.69315E-06,1.26256E-06,9.42251E-07,7.05386E-07],
27 | [0.0184726,0.04609784,0.109609,0.2369246,0.4508369,0.7378822,1.051821,1.305008,1.552826,1.74828,1.917479,1.918437,1.848545,1.664439,1.522157,1.42844,1.25061,0.9991789,0.7552379,0.5617313,0.4099313,0.3105939,0.2376753,0.1720018,0.1176796,0.08283548,0.05650407,0.03751912,0.02438164,0.01566174,0.00984647,0.006131421,0.003790291,0.002327186,0.001432128,0.000882253,0.000545242,0.000338674,0.000211777,0.000133503,8.49447E-05,5.46071E-05,3.54966E-05,2.33474E-05,1.55463E-05,1.04839E-05,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]])
28 | }
29 |
30 |
31 | # default 50% quality
32 | default_jpeg_quantization_matrix = \
33 | FloatTensor([[16, 11, 10, 16, 24, 40, 51, 61],
34 | [12, 12, 14, 19, 26, 58, 60, 55],
35 | [14, 13, 16, 24, 40, 57, 69, 56],
36 | [14, 17, 22, 29, 51, 87, 80, 62],
37 | [18, 22, 37, 56, 68, 109, 103, 77],
38 | [24, 35, 55, 64, 81, 104, 113, 92],
39 | [49, 64, 78, 87, 103, 121, 120, 101],
40 | [72, 92, 95, 98, 112, 100, 103, 99]])
41 |
42 | # Photoshop quantization.
43 | # https://www.impulseadventure.com/photo/jpeg-quantization.html
44 | photoshop_jpeg_quantization_lum = \
45 | [
46 | # Luminance Level 0
47 | FloatTensor([
48 | [32, 33, 51, 81, 66, 39, 34, 17],
49 | [33, 36, 48, 47, 28, 23, 12, 12],
50 | [51, 48, 47, 28, 23, 12, 12, 12],
51 | [81, 47, 28, 23, 12, 12, 12, 12],
52 | [66, 28, 23, 12, 12, 12, 12, 12],
53 | [39, 23, 12, 12, 12, 12, 12, 12],
54 | [34, 12, 12, 12, 12, 12, 12, 12],
55 | [17, 12, 12, 12, 12, 12, 12, 12],
56 | ]),
57 | # Luminance Level 1
58 | FloatTensor([
59 | [27, 26, 41, 65, 66, 39, 34, 17],
60 | [26, 29, 38, 47, 28, 23, 12, 12],
61 | [41, 38, 47, 28, 23, 12, 12, 12],
62 | [65, 47, 28, 23, 12, 12, 12, 12],
63 | [66, 28, 23, 12, 12, 12, 12, 12],
64 | [39, 23, 12, 12, 12, 12, 12, 12],
65 | [34, 12, 12, 12, 12, 12, 12, 12],
66 | [17, 12, 12, 12, 12, 12, 12, 12],
67 | ]),
68 | # Luminance Level 2
69 | FloatTensor([
70 | [20, 17, 26, 41, 51, 39, 34, 17],
71 | [17, 18, 24, 39, 28, 23, 12, 12],
72 | [26, 24, 32, 28, 23, 12, 12, 12],
73 | [41, 39, 28, 23, 12, 12, 12, 12],
74 | [51, 28, 23, 12, 12, 12, 12, 12],
75 | [39, 23, 12, 12, 12, 12, 12, 12],
76 | [34, 12, 12, 12, 12, 12, 12, 12],
77 | [17, 12, 12, 12, 12, 12, 12, 12],
78 | ]),
79 | # Luminance Level 3
80 | FloatTensor([
81 | [18, 14, 22, 35, 44, 39, 34, 17],
82 | [14, 16, 21, 34, 28, 23, 12, 12],
83 | [22, 21, 27, 28, 23, 12, 12, 12],
84 | [35, 34, 28, 23, 12, 12, 12, 12],
85 | [44, 28, 23, 12, 12, 12, 12, 12],
86 | [39, 23, 12, 12, 12, 12, 12, 12],
87 | [34, 12, 12, 12, 12, 12, 12, 12],
88 | [17, 12, 12, 12, 12, 12, 12, 12],
89 | ]),
90 | # Luminance Level 4
91 | FloatTensor([
92 | [16, 11, 17, 27, 34, 39, 34, 17],
93 | [11, 12, 16, 26, 28, 23, 12, 12],
94 | [17, 16, 21, 28, 23, 12, 12, 12],
95 | [27, 26, 28, 23, 12, 12, 12, 12],
96 | [34, 28, 23, 12, 12, 12, 12, 12],
97 | [39, 23, 12, 12, 12, 12, 12, 12],
98 | [34, 12, 12, 12, 12, 12, 12, 12],
99 | [17, 12, 12, 12, 12, 12, 12, 12],
100 | ]),
101 | # Luminance Level 5
102 | FloatTensor([
103 | [12, 8, 13, 21, 26, 32, 34, 17],
104 | [8, 9, 12, 20, 27, 23, 12, 12],
105 | [13, 12, 16, 26, 23, 12, 12, 12],
106 | [21, 20, 26, 23, 12, 12, 12, 12],
107 | [26, 27, 23, 12, 12, 12, 12, 12],
108 | [32, 23, 12, 12, 12, 12, 12, 12],
109 | [34, 12, 12, 12, 12, 12, 12, 12],
110 | [17, 12, 12, 12, 12, 12, 12, 12],
111 | ]),
112 | # Luminance Level 6
113 | FloatTensor([
114 | [8, 6, 9, 14, 17, 21, 28, 17],
115 | [6, 6, 8, 13, 18, 23, 12, 12],
116 | [9, 8, 11, 17, 23, 12, 12, 12],
117 | [14, 13, 17, 23, 12, 12, 12, 12],
118 | [17, 18, 23, 12, 12, 12, 12, 12],
119 | [21, 23, 12, 12, 12, 12, 12, 12],
120 | [28, 12, 12, 12, 12, 12, 12, 12],
121 | [17, 12, 12, 12, 12, 12, 12, 12],
122 | ]),
123 | # Luminance Level 7
124 | FloatTensor([
125 | [10, 7, 11, 18, 22, 27, 34, 17],
126 | [7, 8, 10, 17, 23, 23, 12, 12],
127 | [11, 10, 14, 22, 23, 12, 12, 12],
128 | [18, 17, 22, 23, 12, 12, 12, 12],
129 | [22, 23, 23, 12, 12, 12, 12, 12],
130 | [27, 23, 12, 12, 12, 12, 12, 12],
131 | [34, 12, 12, 12, 12, 12, 12, 12],
132 | [17, 12, 12, 12, 12, 12, 12, 12],
133 | ]),
134 | # Luminance Level 8
135 | FloatTensor([
136 | [6, 4, 7, 11, 14, 17, 22, 17],
137 | [4, 5, 6, 10, 14, 19, 12, 12],
138 | [7, 6, 8, 14, 19, 12, 12, 12],
139 | [11, 10, 14, 19, 12, 12, 12, 12],
140 | [14, 14, 19, 12, 12, 12, 12, 12],
141 | [17, 19, 12, 12, 12, 12, 12, 12],
142 | [22, 12, 12, 12, 12, 12, 12, 12],
143 | [17, 12, 12, 12, 12, 12, 12, 12],
144 | ]),
145 | # Luminance Level 9
146 | FloatTensor([
147 | [4, 3, 4, 7, 9, 11, 14, 17],
148 | [3, 3, 4, 7, 9, 12, 12, 12],
149 | [4, 4, 5, 9, 12, 12, 12, 12],
150 | [7, 7, 9, 12, 12, 12, 12, 12],
151 | [9, 9, 12, 12, 12, 12, 12, 12],
152 | [11, 12, 12, 12, 12, 12, 12, 12],
153 | [14, 12, 12, 12, 12, 12, 12, 12],
154 | [17, 12, 12, 12, 12, 12, 12, 12],
155 | ]),
156 | # Luminance Level 10
157 | FloatTensor([
158 | [2, 2, 3, 4, 5, 6, 8, 11],
159 | [2, 2, 2, 4, 5, 7, 9, 11],
160 | [3, 2, 3, 5, 7, 9, 11, 12],
161 | [4, 4, 5, 7, 9, 11, 12, 12],
162 | [5, 5, 7, 9, 11, 12, 12, 12],
163 | [6, 7, 9, 11, 12, 12, 12, 12],
164 | [8, 9, 11, 12, 12, 12, 12, 12],
165 | [11, 11, 12, 12, 12, 12, 12, 12],
166 | ]),
167 | # Luminance Level 11
168 | FloatTensor([
169 | [1, 1, 1, 2, 3, 3, 4, 5],
170 | [1, 1, 1, 2, 3, 4, 4, 6],
171 | [1, 1, 2, 3, 4, 4, 5, 7],
172 | [2, 2, 3, 4, 4, 5, 7, 8],
173 | [3, 3, 4, 4, 5, 7, 8, 8],
174 | [3, 4, 4, 5, 7, 8, 8, 8],
175 | [4, 4, 5, 7, 8, 8, 8, 8],
176 | [5, 6, 7, 8, 8, 8, 8, 8],
177 | ]),
178 | # Luminance Level 12
179 | FloatTensor([
180 | [1, 1, 1, 1, 1, 1, 1, 2],
181 | [1, 1, 1, 1, 1, 1, 1, 2],
182 | [1, 1, 1, 1, 1, 1, 2, 2],
183 | [1, 1, 1, 1, 1, 2, 2, 3],
184 | [1, 1, 1, 1, 2, 2, 3, 3],
185 | [1, 1, 1, 2, 2, 3, 3, 3],
186 | [1, 1, 2, 2, 3, 3, 3, 3],
187 | [2, 2, 2, 3, 3, 3, 3, 3],
188 | ]),
189 | ]
190 |
191 | photoshop_jpeg_quantization_chrom = \
192 | [
193 | # Chrominance Level 0
194 | FloatTensor([
195 | [34, 51, 52, 34, 20, 20, 17, 17],
196 | [51, 38, 24, 14, 14, 12, 12, 12],
197 | [52, 24, 14, 14, 12, 12, 12, 12],
198 | [34, 14, 14, 12, 12, 12, 12, 12],
199 | [20, 14, 12, 12, 12, 12, 12, 12],
200 | [20, 12, 12, 12, 12, 12, 12, 12],
201 | [17, 12, 12, 12, 12, 12, 12, 12],
202 | [17, 12, 12, 12, 12, 12, 12, 12],
203 | ]),
204 | # Chrominance Level 1
205 | FloatTensor([
206 | [29, 41, 52, 34, 20, 20, 17, 17],
207 | [41, 38, 24, 14, 14, 12, 12, 12],
208 | [52, 24, 14, 14, 12, 12, 12, 12],
209 | [34, 14, 14, 12, 12, 12, 12, 12],
210 | [20, 14, 12, 12, 12, 12, 12, 12],
211 | [20, 12, 12, 12, 12, 12, 12, 12],
212 | [17, 12, 12, 12, 12, 12, 12, 12],
213 | [17, 12, 12, 12, 12, 12, 12, 12],
214 | ]),
215 | # Chrominance Level 2
216 | FloatTensor([
217 | [21, 26, 33, 34, 20, 20, 17, 17],
218 | [26, 29, 24, 14, 14, 12, 12, 12],
219 | [33, 24, 14, 14, 12, 12, 12, 12],
220 | [34, 14, 14, 12, 12, 12, 12, 12],
221 | [20, 14, 12, 12, 12, 12, 12, 12],
222 | [20, 12, 12, 12, 12, 12, 12, 12],
223 | [17, 12, 12, 12, 12, 12, 12, 12],
224 | [17, 12, 12, 12, 12, 12, 12, 12],
225 | ]),
226 | # Chrominance Level 3
227 | FloatTensor([
228 | [20, 22, 29, 34, 20, 20, 17, 17],
229 | [22, 25, 24, 14, 14, 12, 12, 12],
230 | [29, 24, 14, 14, 12, 12, 12, 12],
231 | [34, 14, 14, 12, 12, 12, 12, 12],
232 | [20, 14, 12, 12, 12, 12, 12, 12],
233 | [20, 12, 12, 12, 12, 12, 12, 12],
234 | [17, 12, 12, 12, 12, 12, 12, 12],
235 | [17, 12, 12, 12, 12, 12, 12, 12],
236 | ]),
237 | # Chrominance Level 4
238 | FloatTensor([
239 | [17, 17, 22, 34, 20, 20, 17, 17],
240 | [17, 19, 22, 14, 14, 12, 12, 12],
241 | [22, 22, 14, 14, 12, 12, 12, 12],
242 | [34, 14, 14, 12, 12, 12, 12, 12],
243 | [20, 14, 12, 12, 12, 12, 12, 12],
244 | [20, 12, 12, 12, 12, 12, 12, 12],
245 | [17, 12, 12, 12, 12, 12, 12, 12],
246 | [17, 12, 12, 12, 12, 12, 12, 12],
247 | ]),
248 | # Chrominance Level 5
249 | FloatTensor([
250 | [13, 13, 17, 27, 20, 20, 17, 17],
251 | [13, 14, 17, 14, 14, 12, 12, 12],
252 | [17, 17, 14, 14, 12, 12, 12, 12],
253 | [27, 14, 14, 12, 12, 12, 12, 12],
254 | [20, 14, 12, 12, 12, 12, 12, 12],
255 | [20, 12, 12, 12, 12, 12, 12, 12],
256 | [17, 12, 12, 12, 12, 12, 12, 12],
257 | [17, 12, 12, 12, 12, 12, 12, 12],
258 | ]),
259 | # Chrominance Level 6
260 | FloatTensor([
261 | [9, 9, 11, 18, 20, 20, 17, 17],
262 | [9, 10, 11, 14, 14, 12, 12, 12],
263 | [11, 11, 14, 14, 12, 12, 12, 12],
264 | [18, 14, 14, 12, 12, 12, 12, 12],
265 | [20, 14, 12, 12, 12, 12, 12, 12],
266 | [20, 12, 12, 12, 12, 12, 12, 12],
267 | [17, 12, 12, 12, 12, 12, 12, 12],
268 | [17, 12, 12, 12, 12, 12, 12, 12],
269 | ]),
270 | # Chrominance Level 7
271 | FloatTensor([
272 | [11, 14, 31, 34, 20, 20, 17, 17],
273 | [14, 19, 24, 14, 14, 12, 12, 12],
274 | [31, 24, 14, 14, 12, 12, 12, 12],
275 | [34, 14, 14, 12, 12, 12, 12, 12],
276 | [20, 14, 12, 12, 12, 12, 12, 12],
277 | [20, 12, 12, 12, 12, 12, 12, 12],
278 | [17, 12, 12, 12, 12, 12, 12, 12],
279 | [17, 12, 12, 12, 12, 12, 12, 12],
280 | ]),
281 | # Chrominance Level 8
282 | FloatTensor([
283 | [7, 9, 19, 34, 20, 20, 17, 17],
284 | [9, 12, 19, 14, 14, 12, 12, 12],
285 | [19, 19, 14, 14, 12, 12, 12, 12],
286 | [34, 14, 14, 12, 12, 12, 12, 12],
287 | [20, 14, 12, 12, 12, 12, 12, 12],
288 | [20, 12, 12, 12, 12, 12, 12, 12],
289 | [17, 12, 12, 12, 12, 12, 12, 12],
290 | [17, 12, 12, 12, 12, 12, 12, 12],
291 | ]),
292 | # Chrominance Level 9
293 | FloatTensor([
294 | [4, 6, 12, 22, 20, 20, 17, 17],
295 | [6, 8, 12, 14, 14, 12, 12, 12],
296 | [12, 12, 14, 14, 12, 12, 12, 12],
297 | [22, 14, 14, 12, 12, 12, 12, 12],
298 | [20, 14, 12, 12, 12, 12, 12, 12],
299 | [20, 12, 12, 12, 12, 12, 12, 12],
300 | [17, 12, 12, 12, 12, 12, 12, 12],
301 | [17, 12, 12, 12, 12, 12, 12, 12],
302 | ]),
303 | # Chrominance Level 10
304 | FloatTensor([
305 | [3, 3, 7, 13, 15, 15, 15, 15],
306 | [3, 4, 7, 13, 14, 12, 12, 12],
307 | [7, 7, 13, 14, 12, 12, 12, 12],
308 | [13, 13, 14, 12, 12, 12, 12, 12],
309 | [15, 14, 12, 12, 12, 12, 12, 12],
310 | [15, 12, 12, 12, 12, 12, 12, 12],
311 | [15, 12, 12, 12, 12, 12, 12, 12],
312 | [15, 12, 12, 12, 12, 12, 12, 12],
313 | ]),
314 | # Chrominance Level 11
315 | FloatTensor([
316 | [1, 2, 4, 7, 8, 8, 8, 8],
317 | [2, 2, 4, 7, 8, 8, 8, 8],
318 | [4, 4, 7, 8, 8, 8, 8, 8],
319 | [7, 7, 8, 8, 8, 8, 8, 8],
320 | [8, 8, 8, 8, 8, 8, 8, 8],
321 | [8, 8, 8, 8, 8, 8, 8, 8],
322 | [8, 8, 8, 8, 8, 8, 8, 8],
323 | [8, 8, 8, 8, 8, 8, 8, 8],
324 | ]),
325 | # Chrominance Level 12
326 | FloatTensor([
327 | [1, 1, 1, 2, 3, 3, 3, 3],
328 | [1, 1, 1, 2, 3, 3, 3, 3],
329 | [1, 1, 2, 3, 3, 3, 3, 3],
330 | [2, 2, 3, 3, 3, 3, 3, 3],
331 | [3, 3, 3, 3, 3, 3, 3, 3],
332 | [3, 3, 3, 3, 3, 3, 3, 3],
333 | [3, 3, 3, 3, 3, 3, 3, 3],
334 | [3, 3, 3, 3, 3, 3, 3, 3],
335 | ]),
336 | ]
337 |
338 | # 0-6 have subsampling, 7-12 don't.
339 | photoshop_chroma_subsampling = [True] * 7 + [False] * 6
340 |
341 |
342 |
343 | # DCT Coefficients
344 | # The inverse coefficient is the same.
345 | def _DCT_coeff():
346 | v = torch.arange(8).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand((8, 8, 8, 8)).float()
347 | u = torch.arange(8).unsqueeze( 0).unsqueeze(-1).unsqueeze(-1).expand((8, 8, 8, 8)).float()
348 | y = torch.arange(8).unsqueeze( 0).unsqueeze( 0).unsqueeze(-1).expand((8, 8, 8, 8)).float()
349 | x = torch.arange(8).unsqueeze( 0).unsqueeze( 0).unsqueeze( 0).expand((8, 8, 8, 8)).float()
350 | au = torch.ones((8, 8, 8, 8)).float()
351 | av = torch.ones((8, 8, 8, 8)).float()
352 | av[0, :, ...] = 0.707 # 1 / sqrt(2)
353 | au[:, 0, ...] = 0.707 # 1 / sqrt(2)
354 |
355 | coeff = au * av * torch.cos((2*x + 1)*u*math.pi/16.0) \
356 | * torch.cos((2*y + 1)*v*math.pi/16.0)
357 | return coeff * 0.25
358 |
359 | DCT_coeff = _DCT_coeff()
360 |
--------------------------------------------------------------------------------
/data_generation/data_utils.py:
--------------------------------------------------------------------------------
1 | """ Utilities functions.
2 | """
3 | import numbers
4 | import numpy as np
5 | import torch
6 | from torch import FloatTensor
7 |
8 |
9 | def random_crop(im, num_patches, w, h=None):
10 | h = w if h is None else h
11 | nw = im.size(-1) - w
12 | nh = im.size(-2) - h
13 | if nw < 0 or nh < 0:
14 | raise RuntimeError("Image is to small {} for the desired size {}". \
15 | format((im.size(-1), im.size(-2)), (w, h))
16 | )
17 |
18 | idx_w = np.random.choice(nw + 1, size=num_patches)
19 | idx_h = np.random.choice(nh + 1, size=num_patches)
20 |
21 | result = []
22 | for i in range(num_patches):
23 | result.append(im[...,
24 | idx_h[i]:(idx_h[i]+h),
25 | idx_w[i]:(idx_w[i]+w)])
26 | return result
27 |
28 |
29 | def expand_to_4d_channel(arr):
30 | """ Expand Scalar or 1D dimension to 4D
31 |
32 | Assumes that a 1D list represent the channel dimension (2nd dim).
33 |
34 | Args:
35 | arr: A scalar or 1D tensor to be expanded to 4D
36 | """
37 | # for scalar and 1D tensor, add batch dimensions.
38 | while len(arr.size()) < 2:
39 | arr = arr.unsqueeze(0)
40 | # regain spatial dimension
41 | while len(arr.size()) < 4:
42 | arr = arr.unsqueeze(-1)
43 | return arr
44 |
45 |
46 | def expand_to_4d_batch(arr):
47 | """ Expand Scalar or 1D dimension to 4D
48 |
49 | Assumes that a 1D list represent the batch dimension (1st dim).
50 |
51 | Args:
52 | arr: A scalar or 1D tensor to be expanded to 4D
53 | """
54 | # regain spatial dimension and channel dimension
55 | while len(arr.size()) < 4:
56 | arr = arr.unsqueeze(-1)
57 | return arr
58 |
59 |
60 | def is_number(a):
61 | return isinstance(a, numbers.Number)
62 |
63 |
64 | def python_to_tensor(a):
65 | if isinstance(a, numbers.Number):
66 | return FloatTensor([a])
67 | return a
68 |
69 |
70 | def number_to_list(a):
71 | if isinstance(a, numbers.Number):
72 | a = [a]
73 | return a
74 |
75 |
76 | def cuda_like(arr, src):
77 | """ Move arr on to GPU/CPU like src
78 | """
79 | if src.is_cuda:
80 | return arr.cuda()
81 | else:
82 | return arr.cpu()
83 |
84 |
85 | def mosaick_multiply(mult, im, mosaick_pattern):
86 | """ mosaick pattern-aware multiply.
87 |
88 | Args:
89 | mult: n-list of multiplier, where n is number of image channel.
90 | A batch dimension is optional.
91 | im: tensor of size n_batch x n_channel x width x height.
92 | mosaick_pattern: None or string indicating the mosaick pattern.
93 | """
94 | if mosaick_pattern is None:
95 | return im * expand_to_4d_channel(mult)
96 | elif mosaick_pattern == "bayer":
97 | # Assume GRGB format.
98 | mult = expand_to_4d_channel(mult)
99 |
100 | h, w = im.size(2), im.size(3)
101 | x = torch.arange(w).unsqueeze(0).expand(h, -1)
102 | y = torch.arange(h).unsqueeze(-1).expand(-1, w)
103 | x = x.unsqueeze(0).unsqueeze(0)
104 | y = y.unsqueeze(0).unsqueeze(0)
105 |
106 | if im.is_cuda:
107 | x = x.cuda()
108 | y = y.cuda()
109 |
110 | odd_x = torch.fmod(x, 2)
111 | odd_y = torch.fmod(y, 2)
112 |
113 | is_green = odd_x == odd_y
114 | is_red = odd_x * (1.0 - odd_y)
115 | is_blue = (1.0 - odd_x) * odd_y
116 |
117 | mult = mult.expand(-1, 3, -1, -1)
118 |
119 | return im * mult[:, 0:1, ...] * is_red.float() + \
120 | im * mult[:, 1:2, ...] * is_green.float() + \
121 | im * mult[:, 2:3, ...] * is_blue.float()
122 | else:
123 | raise ValueError("Mosaick pattern, {}, not supported." \
124 | .format(mosaick_pattern))
125 |
--------------------------------------------------------------------------------
/data_generation/generate_dataset.py:
--------------------------------------------------------------------------------
1 | import tifffile
2 | import skimage
3 | import numpy as np
4 | import os
5 | import argparse
6 | import glob
7 | import json
8 | from tqdm import tqdm
9 | from sklearn.feature_extraction.image import extract_patches_2d
10 |
11 | import torch
12 | from torch.autograd import Variable
13 | from torch import FloatTensor
14 |
15 | from data_generation.pipeline import ImageDegradationPipeline
16 | from data_generation.constants import XYZ2sRGB, ProPhotoRGB2XYZ
17 |
18 |
19 | def numpy2tensor(arr):
20 | if len(arr.shape) < 3:
21 | arr = np.expand_dims(arr, -1)
22 | return FloatTensor(arr).permute(2, 0, 1).unsqueeze(0).float() / 255.0
23 |
24 |
25 | def tensor2numpy(t, idx=None):
26 | t = torch.clamp(t, 0, 1)
27 | if idx is None:
28 | t = t[0, ...]
29 | else:
30 | t = t[idx, ...]
31 | return t.permute(1, 2, 0).cpu().squeeze().numpy()
32 |
33 |
34 | parser = argparse.ArgumentParser(description='')
35 | parser.add_argument('--im_folder', required=True, help='path to input images')
36 | parser.add_argument('--out_dir', required=True, help='path to place output')
37 | parser.add_argument('--total_patch', type=int, required=True, help='total number of patches to generate')
38 |
39 | parser.add_argument('--patch_per_image', type=int, default=5, help='Number of patch to generate from a single degradation of an image')
40 | parser.add_argument('--patch_sz', type=int, default=256, help='Patch size (square patch for now)')
41 | parser.add_argument('--fraction_train', type=float, default=0.8, help='Fraction of images to use as training')
42 |
43 | parser.add_argument('--input_ext', default='tif', help='path to place output')
44 | parser.add_argument('--max_exposure', type=float, default=0.0, help='maximum exposure adjustment in stops')
45 | parser.add_argument('--min_exposure', type=float, default=0.0, help='minimum exposure adjustment in stops')
46 | parser.add_argument('--max_gaussian_noise', type=float, default=0.0, help='maximum gaussian noise std (on range 0 - 1)')
47 | parser.add_argument('--min_gaussian_noise', type=float, default=0.0, help='minimum gaussian noise std (on range 0 - 1)')
48 | parser.add_argument('--max_poisson_noise', type=float, default=0.0, help='maximum poisson noise mult (See image_processing.PoissonNoise for detail)')
49 | parser.add_argument('--min_poisson_noise', type=float, default=0.0, help='minimum poisson noise mult (See image_processing.PoissonNoise for detail)')
50 | parser.add_argument('--skip_degraded', action="store_true", help='Whether to skip degraded images.')
51 | parser.add_argument('--dwn_factor', type=float, default=4, help='Factor to downsample.')
52 | args = parser.parse_args()
53 |
54 |
55 | im_names = glob.glob(os.path.join(args.im_folder, '*.' + args.input_ext))
56 | im_names = sorted([os.path.basename(i) for i in im_names])
57 |
58 | # Create output folder
59 | os.makedirs(args.out_dir, exist_ok=True)
60 | train_dir = os.path.join(args.out_dir, 'train')
61 | test_dir = os.path.join(args.out_dir, 'test')
62 | os.makedirs(train_dir, exist_ok=True)
63 | os.makedirs(test_dir, exist_ok=True)
64 | for base_dir in [train_dir, test_dir]:
65 | target_dir = os.path.join(base_dir, 'images', 'target')
66 | degraded_dir = os.path.join(base_dir, 'images', 'degraded')
67 | meta_dir = os.path.join(base_dir, 'meta')
68 | os.makedirs(target_dir, exist_ok=True)
69 | os.makedirs(degraded_dir, exist_ok=True)
70 | os.makedirs(meta_dir, exist_ok=True)
71 |
72 | n_count = 0
73 | img_idx = 0
74 |
75 | progress_bar = tqdm(total=args.total_patch)
76 | while n_count < args.total_patch:
77 | if img_idx < args.fraction_train * len(im_names):
78 | base_dir = train_dir
79 | else:
80 | base_dir = test_dir
81 |
82 | target_dir = os.path.join(base_dir, 'images', 'target')
83 | degraded_dir = os.path.join(base_dir, 'images', 'degraded')
84 | meta_dir = os.path.join(base_dir, 'meta')
85 |
86 | name = im_names[img_idx]
87 | path = os.path.join(args.im_folder, name)
88 | # We know 5k dataset is 16-bits.
89 | raw_im = tifffile.imread(path).astype('float32') / 65536.0
90 | raw_im = FloatTensor(raw_im).permute(2, 0, 1).unsqueeze(0)
91 |
92 | # Define pipeline
93 | poisson_k = np.random.uniform(args.min_poisson_noise, args.max_poisson_noise)
94 | read_noise_sigma = np.random.uniform(args.min_gaussian_noise, args.max_gaussian_noise)
95 | dwn_factor = args.dwn_factor
96 | exp_adjustment = np.random.uniform(args.min_exposure, args.max_exposure)
97 | configs_prepreprocess = [
98 | ('UndoProPhotoRGBGamma', {}),
99 | # Convert to sRGB
100 | ('ColorSpaceConversionMatrix', {'matrix': torch.matmul(XYZ2sRGB, ProPhotoRGB2XYZ)}),
101 | ]
102 |
103 | configs_preprocess = [
104 | # Blur and downsample to reduce noise
105 | ('GaussianBlur', {'sigma_x': dwn_factor}),
106 | ('PytorchResizing', {'resizing_factor': 1.0/dwn_factor, 'mode': 'nearest'})
107 | ]
108 | configs_degrade = [
109 | ('ExposureAdjustment', {'nstops': exp_adjustment}),
110 | # ('MotionBlur', {'amt': [3, 2], 'direction': [0, 45,]}),
111 | ('BayerMosaicking', {}),
112 | # Add artificial noise.
113 | ('PoissonNoise',{'sigma': FloatTensor([poisson_k] * 3), 'mosaick_pattern': 'bayer'}),
114 | ('GaussianNoise',{'sigma': FloatTensor([read_noise_sigma] * 3), 'mosaick_pattern': 'bayer'}),
115 | ('PixelClip', {}),
116 | ('ExposureAdjustment', {'nstops': -exp_adjustment}),
117 | ('PixelClip', {}),
118 | ('NaiveDemosaicking', {}),
119 | ('PixelClip', {}),
120 | ]
121 | configs_denoise = [
122 | ('DenoisingBilateral',{'sigma_s': 1.0, 'sigma_r': 0.1}),
123 | ('PixelClip', {}),
124 | ('sRGBGamma', {}),
125 | ]
126 |
127 | pipeline_prepreprocess = ImageDegradationPipeline(configs_prepreprocess)
128 | pipeline_preprocess = ImageDegradationPipeline(configs_preprocess)
129 | pipeline_degrade = ImageDegradationPipeline(configs_degrade)
130 | pipeline_denoise = ImageDegradationPipeline(configs_denoise)
131 |
132 |
133 | demosaicked = pipeline_prepreprocess(raw_im)
134 | preprocessed = pipeline_preprocess(demosaicked)
135 | degraded = pipeline_degrade(preprocessed)
136 | denoised = pipeline_denoise(degraded)
137 |
138 | denoised_numpy = tensor2numpy(denoised)
139 | preprocessed_numpy = tensor2numpy(preprocessed)
140 | stacked = np.concatenate((denoised_numpy, preprocessed_numpy), axis=-1)
141 | patches = extract_patches_2d(stacked,
142 | (args.patch_sz, args.patch_sz),
143 | args.patch_per_image)
144 | degraded_patches, target_patches = np.split(patches, 2, axis=-1)
145 |
146 | target_patches = np.split(target_patches, target_patches.shape[0])
147 | degraded_patches = np.split(degraded_patches, degraded_patches.shape[0])
148 |
149 | meta = dict(orig=name,
150 | poisson_k=poisson_k,
151 | read_noise_sigma=read_noise_sigma,
152 | exp_adjustment=exp_adjustment,
153 | dwn_factor=dwn_factor)
154 | n_patches = len(degraded_patches)
155 | for i in range(n_patches):
156 | patch_idx = n_count + i + 1
157 |
158 | degraded = np.clip(degraded_patches[i] * 255.0, 0, 255).astype('uint8')
159 | if not args.skip_degraded:
160 | skimage.io.imsave(os.path.join(degraded_dir,
161 | "{:06d}.png".format(patch_idx)
162 | ),
163 | np.squeeze(degraded))
164 | np.save(os.path.join(target_dir,
165 | "{:06d}.npy".format(patch_idx)
166 | ),
167 | np.squeeze(target_patches[i]))
168 | with open(os.path.join(meta_dir,
169 | '{:06d}.json'.format(patch_idx)),
170 | 'w') as f:
171 | json.dump(meta, f)
172 | n_count += n_patches
173 | img_idx = (img_idx + 1) % len(im_names)
174 | progress_bar.update(n_patches)
175 | progress_bar.close()
176 |
--------------------------------------------------------------------------------
/data_generation/image_io.py:
--------------------------------------------------------------------------------
1 | """ I/O module
2 |
3 | This unit deals with the nitty gritty of reading in DSLR raw camera and
4 | various other formats.
5 | """
6 | import numpy as np
7 | import rawpy
8 |
9 |
10 | def read_raw(path, n_bits=None):
11 | with rawpy.imread(path) as raw:
12 | im_ = raw.raw_image_visible.copy()
13 |
14 | # subtract black level
15 | im = np.zeros(im_.shape, dtype='float32')
16 | for i in range(len(raw.black_level_per_channel)):
17 | im += (im_ - raw.black_level_per_channel[i]) * (raw.raw_colors_visible == i).astype('float32')
18 | if n_bits is None:
19 | im /= np.amax(im)
20 | else:
21 | im /= np.power(2, n_bits)
22 |
23 | # shift bayer pattern
24 | red_idx = raw.color_desc.find(b'R')
25 | if red_idx == -1:
26 | print("Warning: Red is not in color description.")
27 | red_idx = 0
28 |
29 | raw_pattern = raw.raw_colors_visible[:8, :8].copy()
30 | red_pos = np.asarray(np.where(raw_pattern == red_idx))[:,0]
31 | row_offset = red_pos[0]
32 | # So that we start with GR
33 | col_offset = red_pos[1] + 1
34 | im = im[row_offset:, col_offset:]
35 | return im, \
36 | raw.rgb_xyz_matrix, \
37 | raw.camera_whitebalance
38 |
--------------------------------------------------------------------------------
/data_generation/kernel.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | def gausskern1d(sig, sz=None):
5 | """ 1D Gaussian kernel.
6 |
7 | Args:
8 | sz: kernel size.
9 | sig: stdev of the kernel
10 | """
11 | if sz is None:
12 | sz = int(2*int(sig) + 1)
13 | sz = max(sz, 3)
14 | half_sz = int(sz / 2)
15 | neg_half_sz = half_sz - sz + 1
16 | neg_half_sz = float(neg_half_sz)
17 | half_sz = float(half_sz)
18 | x = torch.linspace(neg_half_sz, half_sz, int(sz)) / sig
19 | x = x ** 2
20 | kern = torch.exp(-x/2.0)
21 | kern = kern / kern.sum()
22 | return kern
23 |
24 | def gausskern2d(sz_x, sig_x, sz_y=None, sig_y=None):
25 | """Returns a 2D Gaussian kernel array.
26 |
27 | Modified from https://stackoverflow.com/questions/29731726/how-to-calculate-a-gaussian-kernel-matrix-efficiently-in-numpy
28 |
29 | Args:
30 | sz_{x,y}: kernel size.
31 | sig_{x,y}: stdev of kernel in each direction
32 | """
33 | if sz_y is None:
34 | sz_y = sz_x
35 | if sig_y is None:
36 | sig_y = sig_x
37 |
38 | kern1d_x = gausskern1d(sz_x, sig_x)
39 | kern1d_y = gausskern1d(sz_y, sig_y)
40 | kernel_raw = torch.einsum('i,j->ij', kern1d_x, kern1d_y)
41 | # This einsum is equivalent to outer product (no repeated indices).
42 | # For future reference
43 | # kernel_raw = np.sqrt(np.einsum('ij,k', kernel_raw, kern_r))
44 | kernel = kernel_raw/kernel_raw.sum()
45 | return kernel
46 |
--------------------------------------------------------------------------------
/data_generation/pipeline.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch
3 | from . import image_processing
4 |
5 |
6 | class ImageDegradationPipeline(nn.Module):
7 |
8 | def __init__(self, configs):
9 | """ Image Degradation Pipeline.
10 |
11 | Args:
12 | configs: list of modules to be implemented and their parameters.
13 | The list should contain tuple of a form (str, dict),
14 | where str indicate module class name (see
15 | image_processing.py), and dict contain the key-value of
16 | the parameter of such module.
17 | """
18 | super().__init__()
19 | self.initialize_pipeline(configs)
20 |
21 | def initialize_pipeline(self, configs):
22 | pipeline = []
23 | # initialize module.
24 | for c in configs:
25 | class_ = getattr(image_processing, c[0])
26 | module = class_(**c[1])
27 | pipeline.append(module)
28 | self._pipeline = nn.Sequential(*pipeline)
29 | # self._pipeline = tuple(pipeline)
30 | def forward(self, image):
31 | # import torchvision.transforms as transforms
32 | # trans = transforms.ToPILImage()
33 | # for index, func in enumerate(self._pipeline):
34 | # image = func(image)
35 | # # save images
36 | # # image_trans = trans((torch.clamp(image, 0.0, 1.0)).squeeze())
37 | # # image_trans.save('./train_images/tmp_{:02d}.png'.format(index), quality=100)
38 | # return image
39 | return self._pipeline(image)
40 |
41 |
--------------------------------------------------------------------------------
/dataset_specs/data_configspec.conf:
--------------------------------------------------------------------------------
1 | [dataset_configs]
2 | dataset_dir = string(default=None)
3 | burst_length = integer(default=8)
4 | patch_size = integer(default=200)
5 | big_jitter = integer(default=16)
6 | small_jitter = integer(default=2)
7 | down_sample = integer(default=4)
8 | color = boolean(default=False)
9 |
10 | [pipeline_configs]
11 | denoise = boolean(default=None)
12 | demosaick = boolean(default=None)
13 | jpeg_compression = boolean(default=None)
14 | use_unsharp_mask = boolean(default=None)
15 | use_motion_blur = boolean(default=None)
16 | use_chromatic_aberration = boolean(default=None)
17 | denoise_color_only = boolean(default=None)
18 | exposure_correction = boolean(default=None)
19 | quantize = boolean(default=None)
20 | denoise_guide_transform = float(default=None)
21 | denoise_n_iter = integer(default=1)
22 | demosaick_use_median = boolean(default=False)
23 | demosaick_n_iter = integer(default=0)
24 | use_median_denoise = boolean(default=False)
25 | use_wavelet = boolean(default=False)
26 | wavelet_family = string(default="db2")
27 | wavelet_th_method = string(default="BayesShrink")
28 | wavelet_levels = integer(default=None)
29 | bayer_crop_phase = integer(default=None)
30 | use_log_uniform = boolean(default=False)
31 | median_before_bilateral = boolean(default=False)
32 | repair_hotdead_pixel = boolean(default=False)
33 | hot_px_th = float(default=0.2)
34 | [pipeline_param_ranges]
35 | min_exposure_adjustment = float(default=0.0)
36 | max_exposure_adjustment = float(default=0.0)
37 | min_gaussian_noise = float(default=0.0)
38 | max_gaussian_noise = float(default=0.0)
39 | min_poisson_noise = float(default=0.0)
40 | max_poisson_noise = float(default=0.0)
41 | min_motion_blur = float(default=0)
42 | max_motion_blur = float(default=0)
43 | min_chromatic_aberration = float(default=1)
44 | max_chromatic_aberration = float(default=1)
45 | min_jpeg_quality = integer(default=1)
46 | max_jpeg_quality = integer(default=2)
47 | min_denoise_sigma_s = float(default=1)
48 | max_denoise_sigma_s = float(default=1)
49 | min_denoise_sigma_r = float(default=1)
50 | max_denoise_sigma_r = float(default=1)
51 | min_denoise_color_sigma_ratio = float(default=1)
52 | max_denoise_color_sigma_ratio = float(default=1)
53 | min_denoise_color_range_ratio = float(default=1)
54 | max_denoise_color_range_ratio = float(default=1)
55 | min_unsharp_amount = float(default=0)
56 | max_unsharp_amount = float(default=0)
57 | min_denoise_median_sz = integer(default=0)
58 | max_denoise_median_sz = integer(default=1)
59 | min_quantize_bits = integer(default=14)
60 | max_quantize_bits = integer(default=15)
61 | min_wavelet_sigma = float(default=0.003)
62 | max_wavelet_sigma = float(default=0.015)
63 | min_motionblur_th = float(default=0.99)
64 | max_motionblur_th = float(default=0.997)
65 | min_motionblur_boost = float(default=5)
66 | max_motionblur_boost = float(default=100)
67 | min_motionblur_segment = integer(default=1)
68 | max_motionblur_segment = integer(default=4)
69 |
--------------------------------------------------------------------------------
/dataset_specs/full_dataset.conf:
--------------------------------------------------------------------------------
1 | [dataset_configs]
2 | dataset_dir = ../dataset/Adobe5K/
3 | burst_length = 8
4 | patch_size = 128
5 | big_jitter = 16
6 | small_jitter = 2
7 | down_sample = 4
8 | color = True
9 |
10 | # From 6.3, use log-uniform randomization.
11 | [pipeline_configs]
12 | denoise = True
13 | demosaick = True
14 | jpeg_compression = True
15 | use_unsharp_mask = True
16 | use_motion_blur = False
17 | use_chromatic_aberration = False
18 | denoise_color_only = False
19 | exposure_correction = False
20 | quantize = True
21 | denoise_guide_transform = 0.5
22 | use_median_denoise = True
23 | use_wavelet = False
24 | use_log_uniform = True
25 | median_before_bilateral = True
26 |
27 | [pipeline_param_ranges]
28 | min_gaussian_noise = 0.002
29 | max_gaussian_noise = 0.1
30 | min_poisson_noise = 0.02
31 | max_poisson_noise = 0.2
32 | min_jpeg_quality = 4
33 | max_jpeg_quality = 8
34 | min_denoise_sigma_s = 0.25
35 | max_denoise_sigma_s = 1.0
36 | min_denoise_sigma_r = 0.1
37 | max_denoise_sigma_r = 1.0
38 | min_denoise_color_sigma_ratio = 4
39 | max_denoise_color_sigma_ratio = 32
40 | min_denoise_color_range_ratio = 0.1
41 | max_denoise_color_range_ratio = 0.5
42 | min_unsharp_amount = 0.0
43 | max_unsharp_amount = 0.25
44 | min_denoise_median_sz = -1
45 | max_denoise_median_sz = 0
46 |
--------------------------------------------------------------------------------
/eval_images/10_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/10_gt.png
--------------------------------------------------------------------------------
/eval_images/10_kpn_5x5_38.68dB_0.9798.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/10_kpn_5x5_38.68dB_0.9798.png
--------------------------------------------------------------------------------
/eval_images/10_noisy_34.50dB_0.8800.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/10_noisy_34.50dB_0.8800.png
--------------------------------------------------------------------------------
/eval_images/11_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/11_gt.png
--------------------------------------------------------------------------------
/eval_images/11_kpn_5x5_38.84dB_0.9909.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/11_kpn_5x5_38.84dB_0.9909.png
--------------------------------------------------------------------------------
/eval_images/11_noisy_34.61dB_0.8585.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/11_noisy_34.61dB_0.8585.png
--------------------------------------------------------------------------------
/eval_images/12_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/12_gt.png
--------------------------------------------------------------------------------
/eval_images/12_kpn_5x5_38.04dB_0.9860.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/12_kpn_5x5_38.04dB_0.9860.png
--------------------------------------------------------------------------------
/eval_images/12_noisy_34.56dB_0.8722.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/12_noisy_34.56dB_0.8722.png
--------------------------------------------------------------------------------
/eval_images/13_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/13_gt.png
--------------------------------------------------------------------------------
/eval_images/13_kpn_5x5_38.88dB_0.9890.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/13_kpn_5x5_38.88dB_0.9890.png
--------------------------------------------------------------------------------
/eval_images/13_noisy_34.48dB_0.8867.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/13_noisy_34.48dB_0.8867.png
--------------------------------------------------------------------------------
/eval_images/14_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/14_gt.png
--------------------------------------------------------------------------------
/eval_images/14_kpn_5x5_37.04dB_0.9497.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/14_kpn_5x5_37.04dB_0.9497.png
--------------------------------------------------------------------------------
/eval_images/14_noisy_34.69dB_0.8965.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/14_noisy_34.69dB_0.8965.png
--------------------------------------------------------------------------------
/eval_images/15_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/15_gt.png
--------------------------------------------------------------------------------
/eval_images/15_kpn_5x5_38.31dB_0.9893.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/15_kpn_5x5_38.31dB_0.9893.png
--------------------------------------------------------------------------------
/eval_images/15_noisy_34.50dB_0.9219.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/15_noisy_34.50dB_0.9219.png
--------------------------------------------------------------------------------
/eval_images/16_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/16_gt.png
--------------------------------------------------------------------------------
/eval_images/16_kpn_5x5_37.81dB_0.9653.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/16_kpn_5x5_37.81dB_0.9653.png
--------------------------------------------------------------------------------
/eval_images/16_noisy_34.38dB_0.8607.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/16_noisy_34.38dB_0.8607.png
--------------------------------------------------------------------------------
/eval_images/17_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/17_gt.png
--------------------------------------------------------------------------------
/eval_images/17_kpn_5x5_34.16dB_0.9625.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/17_kpn_5x5_34.16dB_0.9625.png
--------------------------------------------------------------------------------
/eval_images/17_noisy_28.32dB_0.7489.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/17_noisy_28.32dB_0.7489.png
--------------------------------------------------------------------------------
/eval_images/18_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/18_gt.png
--------------------------------------------------------------------------------
/eval_images/18_kpn_5x5_36.83dB_0.9698.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/18_kpn_5x5_36.83dB_0.9698.png
--------------------------------------------------------------------------------
/eval_images/18_noisy_28.09dB_0.6567.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/18_noisy_28.09dB_0.6567.png
--------------------------------------------------------------------------------
/eval_images/19_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/19_gt.png
--------------------------------------------------------------------------------
/eval_images/19_kpn_5x5_39.11dB_0.9738.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/19_kpn_5x5_39.11dB_0.9738.png
--------------------------------------------------------------------------------
/eval_images/19_noisy_28.13dB_0.5510.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/19_noisy_28.13dB_0.5510.png
--------------------------------------------------------------------------------
/eval_images/1_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/1_gt.png
--------------------------------------------------------------------------------
/eval_images/1_kpn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/1_kpn.png
--------------------------------------------------------------------------------
/eval_images/1_kpn_5x5_39.35dB_0.9866.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/1_kpn_5x5_39.35dB_0.9866.png
--------------------------------------------------------------------------------
/eval_images/1_noisy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/1_noisy.png
--------------------------------------------------------------------------------
/eval_images/1_noisy_36.73dB_0.9366.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/1_noisy_36.73dB_0.9366.png
--------------------------------------------------------------------------------
/eval_images/20_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/20_gt.png
--------------------------------------------------------------------------------
/eval_images/20_kpn_5x5_33.79dB_0.9656.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/20_kpn_5x5_33.79dB_0.9656.png
--------------------------------------------------------------------------------
/eval_images/20_noisy_28.72dB_0.7307.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/20_noisy_28.72dB_0.7307.png
--------------------------------------------------------------------------------
/eval_images/21_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/21_gt.png
--------------------------------------------------------------------------------
/eval_images/21_kpn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/21_kpn.png
--------------------------------------------------------------------------------
/eval_images/21_kpn_5x5_37.36dB_0.9316.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/21_kpn_5x5_37.36dB_0.9316.png
--------------------------------------------------------------------------------
/eval_images/21_noisy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/21_noisy.png
--------------------------------------------------------------------------------
/eval_images/21_noisy_27.61dB_0.5016.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/21_noisy_27.61dB_0.5016.png
--------------------------------------------------------------------------------
/eval_images/22_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/22_gt.png
--------------------------------------------------------------------------------
/eval_images/22_kpn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/22_kpn.png
--------------------------------------------------------------------------------
/eval_images/22_kpn_5x5_33.98dB_0.9473.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/22_kpn_5x5_33.98dB_0.9473.png
--------------------------------------------------------------------------------
/eval_images/22_noisy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/22_noisy.png
--------------------------------------------------------------------------------
/eval_images/22_noisy_28.10dB_0.7991.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/22_noisy_28.10dB_0.7991.png
--------------------------------------------------------------------------------
/eval_images/23_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/23_gt.png
--------------------------------------------------------------------------------
/eval_images/23_kpn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/23_kpn.png
--------------------------------------------------------------------------------
/eval_images/23_kpn_5x5_35.85dB_0.9759.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/23_kpn_5x5_35.85dB_0.9759.png
--------------------------------------------------------------------------------
/eval_images/23_noisy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/23_noisy.png
--------------------------------------------------------------------------------
/eval_images/23_noisy_28.39dB_0.6966.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/23_noisy_28.39dB_0.6966.png
--------------------------------------------------------------------------------
/eval_images/24_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/24_gt.png
--------------------------------------------------------------------------------
/eval_images/24_kpn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/24_kpn.png
--------------------------------------------------------------------------------
/eval_images/24_kpn_5x5_34.84dB_0.9692.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/24_kpn_5x5_34.84dB_0.9692.png
--------------------------------------------------------------------------------
/eval_images/24_noisy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/24_noisy.png
--------------------------------------------------------------------------------
/eval_images/24_noisy_28.60dB_0.6402.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/24_noisy_28.60dB_0.6402.png
--------------------------------------------------------------------------------
/eval_images/25_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/25_gt.png
--------------------------------------------------------------------------------
/eval_images/25_kpn_5x5_40.11dB_0.9853.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/25_kpn_5x5_40.11dB_0.9853.png
--------------------------------------------------------------------------------
/eval_images/25_noisy_29.38dB_0.5669.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/25_noisy_29.38dB_0.5669.png
--------------------------------------------------------------------------------
/eval_images/26_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/26_gt.png
--------------------------------------------------------------------------------
/eval_images/26_kpn_5x5_32.85dB_0.9452.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/26_kpn_5x5_32.85dB_0.9452.png
--------------------------------------------------------------------------------
/eval_images/26_noisy_26.66dB_0.7940.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/26_noisy_26.66dB_0.7940.png
--------------------------------------------------------------------------------
/eval_images/27_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/27_gt.png
--------------------------------------------------------------------------------
/eval_images/27_kpn_5x5_35.52dB_0.9546.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/27_kpn_5x5_35.52dB_0.9546.png
--------------------------------------------------------------------------------
/eval_images/27_noisy_27.42dB_0.7241.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/27_noisy_27.42dB_0.7241.png
--------------------------------------------------------------------------------
/eval_images/28_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/28_gt.png
--------------------------------------------------------------------------------
/eval_images/28_kpn_5x5_36.13dB_0.9708.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/28_kpn_5x5_36.13dB_0.9708.png
--------------------------------------------------------------------------------
/eval_images/28_noisy_28.70dB_0.6872.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/28_noisy_28.70dB_0.6872.png
--------------------------------------------------------------------------------
/eval_images/29_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/29_gt.png
--------------------------------------------------------------------------------
/eval_images/29_kpn_5x5_35.68dB_0.9565.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/29_kpn_5x5_35.68dB_0.9565.png
--------------------------------------------------------------------------------
/eval_images/29_noisy_27.39dB_0.7316.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/29_noisy_27.39dB_0.7316.png
--------------------------------------------------------------------------------
/eval_images/2_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/2_gt.png
--------------------------------------------------------------------------------
/eval_images/2_kpn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/2_kpn.png
--------------------------------------------------------------------------------
/eval_images/2_kpn_5x5_39.48dB_0.9919.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/2_kpn_5x5_39.48dB_0.9919.png
--------------------------------------------------------------------------------
/eval_images/2_noisy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/2_noisy.png
--------------------------------------------------------------------------------
/eval_images/2_noisy_37.73dB_0.9347.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/2_noisy_37.73dB_0.9347.png
--------------------------------------------------------------------------------
/eval_images/30_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/30_gt.png
--------------------------------------------------------------------------------
/eval_images/30_kpn_5x5_37.83dB_0.9713.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/30_kpn_5x5_37.83dB_0.9713.png
--------------------------------------------------------------------------------
/eval_images/30_noisy_26.21dB_0.6279.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/30_noisy_26.21dB_0.6279.png
--------------------------------------------------------------------------------
/eval_images/31_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/31_gt.png
--------------------------------------------------------------------------------
/eval_images/31_kpn_5x5_37.32dB_0.9682.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/31_kpn_5x5_37.32dB_0.9682.png
--------------------------------------------------------------------------------
/eval_images/31_noisy_28.10dB_0.6873.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/31_noisy_28.10dB_0.6873.png
--------------------------------------------------------------------------------
/eval_images/32_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/32_gt.png
--------------------------------------------------------------------------------
/eval_images/32_kpn_5x5_37.64dB_0.9700.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/32_kpn_5x5_37.64dB_0.9700.png
--------------------------------------------------------------------------------
/eval_images/32_noisy_32.31dB_0.7877.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/32_noisy_32.31dB_0.7877.png
--------------------------------------------------------------------------------
/eval_images/33_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/33_gt.png
--------------------------------------------------------------------------------
/eval_images/33_kpn_5x5_39.45dB_0.9817.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/33_kpn_5x5_39.45dB_0.9817.png
--------------------------------------------------------------------------------
/eval_images/33_noisy_32.41dB_0.7184.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/33_noisy_32.41dB_0.7184.png
--------------------------------------------------------------------------------
/eval_images/34_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/34_gt.png
--------------------------------------------------------------------------------
/eval_images/34_kpn_5x5_38.20dB_0.9803.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/34_kpn_5x5_38.20dB_0.9803.png
--------------------------------------------------------------------------------
/eval_images/34_noisy_32.51dB_0.7680.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/34_noisy_32.51dB_0.7680.png
--------------------------------------------------------------------------------
/eval_images/35_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/35_gt.png
--------------------------------------------------------------------------------
/eval_images/35_kpn_5x5_39.29dB_0.9842.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/35_kpn_5x5_39.29dB_0.9842.png
--------------------------------------------------------------------------------
/eval_images/35_noisy_32.38dB_0.8153.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/35_noisy_32.38dB_0.8153.png
--------------------------------------------------------------------------------
/eval_images/36_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/36_gt.png
--------------------------------------------------------------------------------
/eval_images/36_kpn_5x5_37.80dB_0.9754.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/36_kpn_5x5_37.80dB_0.9754.png
--------------------------------------------------------------------------------
/eval_images/36_noisy_32.38dB_0.8101.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/36_noisy_32.38dB_0.8101.png
--------------------------------------------------------------------------------
/eval_images/37_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/37_gt.png
--------------------------------------------------------------------------------
/eval_images/37_kpn_5x5_37.77dB_0.9826.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/37_kpn_5x5_37.77dB_0.9826.png
--------------------------------------------------------------------------------
/eval_images/37_noisy_32.21dB_0.8376.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/37_noisy_32.21dB_0.8376.png
--------------------------------------------------------------------------------
/eval_images/38_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/38_gt.png
--------------------------------------------------------------------------------
/eval_images/38_kpn_5x5_36.79dB_0.9648.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/38_kpn_5x5_36.79dB_0.9648.png
--------------------------------------------------------------------------------
/eval_images/38_noisy_32.33dB_0.8203.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/38_noisy_32.33dB_0.8203.png
--------------------------------------------------------------------------------
/eval_images/39_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/39_gt.png
--------------------------------------------------------------------------------
/eval_images/39_kpn_5x5_36.01dB_0.9710.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/39_kpn_5x5_36.01dB_0.9710.png
--------------------------------------------------------------------------------
/eval_images/39_noisy_27.63dB_0.7201.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/39_noisy_27.63dB_0.7201.png
--------------------------------------------------------------------------------
/eval_images/3_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/3_gt.png
--------------------------------------------------------------------------------
/eval_images/3_kpn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/3_kpn.png
--------------------------------------------------------------------------------
/eval_images/3_kpn_5x5_39.14dB_0.9938.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/3_kpn_5x5_39.14dB_0.9938.png
--------------------------------------------------------------------------------
/eval_images/3_noisy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/3_noisy.png
--------------------------------------------------------------------------------
/eval_images/3_noisy_37.67dB_0.9583.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/3_noisy_37.67dB_0.9583.png
--------------------------------------------------------------------------------
/eval_images/40_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/40_gt.png
--------------------------------------------------------------------------------
/eval_images/40_kpn_5x5_38.57dB_0.9918.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/40_kpn_5x5_38.57dB_0.9918.png
--------------------------------------------------------------------------------
/eval_images/40_noisy_36.00dB_0.9021.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/40_noisy_36.00dB_0.9021.png
--------------------------------------------------------------------------------
/eval_images/41_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/41_gt.png
--------------------------------------------------------------------------------
/eval_images/41_kpn_5x5_38.56dB_0.9867.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/41_kpn_5x5_38.56dB_0.9867.png
--------------------------------------------------------------------------------
/eval_images/41_noisy_36.12dB_0.9290.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/41_noisy_36.12dB_0.9290.png
--------------------------------------------------------------------------------
/eval_images/42_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/42_gt.png
--------------------------------------------------------------------------------
/eval_images/42_kpn_5x5_39.17dB_0.9878.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/42_kpn_5x5_39.17dB_0.9878.png
--------------------------------------------------------------------------------
/eval_images/42_noisy_36.09dB_0.9311.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/42_noisy_36.09dB_0.9311.png
--------------------------------------------------------------------------------
/eval_images/43_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/43_gt.png
--------------------------------------------------------------------------------
/eval_images/43_kpn_5x5_39.89dB_0.9728.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/43_kpn_5x5_39.89dB_0.9728.png
--------------------------------------------------------------------------------
/eval_images/43_noisy_36.18dB_0.8960.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/43_noisy_36.18dB_0.8960.png
--------------------------------------------------------------------------------
/eval_images/44_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/44_gt.png
--------------------------------------------------------------------------------
/eval_images/44_kpn_5x5_37.81dB_0.9755.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/44_kpn_5x5_37.81dB_0.9755.png
--------------------------------------------------------------------------------
/eval_images/44_noisy_36.06dB_0.9542.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/44_noisy_36.06dB_0.9542.png
--------------------------------------------------------------------------------
/eval_images/45_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/45_gt.png
--------------------------------------------------------------------------------
/eval_images/45_kpn_5x5_37.07dB_0.9824.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/45_kpn_5x5_37.07dB_0.9824.png
--------------------------------------------------------------------------------
/eval_images/45_noisy_36.21dB_0.9775.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/45_noisy_36.21dB_0.9775.png
--------------------------------------------------------------------------------
/eval_images/46_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/46_gt.png
--------------------------------------------------------------------------------
/eval_images/46_kpn_5x5_41.12dB_0.9823.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/46_kpn_5x5_41.12dB_0.9823.png
--------------------------------------------------------------------------------
/eval_images/46_noisy_31.75dB_0.7773.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/46_noisy_31.75dB_0.7773.png
--------------------------------------------------------------------------------
/eval_images/47_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/47_gt.png
--------------------------------------------------------------------------------
/eval_images/47_kpn_5x5_38.98dB_0.9849.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/47_kpn_5x5_38.98dB_0.9849.png
--------------------------------------------------------------------------------
/eval_images/47_noisy_35.49dB_0.9071.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/47_noisy_35.49dB_0.9071.png
--------------------------------------------------------------------------------
/eval_images/48_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/48_gt.png
--------------------------------------------------------------------------------
/eval_images/48_kpn_5x5_37.68dB_0.9244.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/48_kpn_5x5_37.68dB_0.9244.png
--------------------------------------------------------------------------------
/eval_images/48_noisy_30.25dB_0.7162.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/48_noisy_30.25dB_0.7162.png
--------------------------------------------------------------------------------
/eval_images/49_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/49_gt.png
--------------------------------------------------------------------------------
/eval_images/49_kpn_5x5_30.79dB_0.7580.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/49_kpn_5x5_30.79dB_0.7580.png
--------------------------------------------------------------------------------
/eval_images/49_noisy_26.10dB_0.5892.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/49_noisy_26.10dB_0.5892.png
--------------------------------------------------------------------------------
/eval_images/4_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/4_gt.png
--------------------------------------------------------------------------------
/eval_images/4_kpn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/4_kpn.png
--------------------------------------------------------------------------------
/eval_images/4_kpn_5x5_39.25dB_0.9824.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/4_kpn_5x5_39.25dB_0.9824.png
--------------------------------------------------------------------------------
/eval_images/4_noisy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/4_noisy.png
--------------------------------------------------------------------------------
/eval_images/4_noisy_37.99dB_0.9598.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/4_noisy_37.99dB_0.9598.png
--------------------------------------------------------------------------------
/eval_images/50_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/50_gt.png
--------------------------------------------------------------------------------
/eval_images/50_kpn_5x5_39.02dB_0.9887.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/50_kpn_5x5_39.02dB_0.9887.png
--------------------------------------------------------------------------------
/eval_images/50_noisy_36.48dB_0.9212.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/50_noisy_36.48dB_0.9212.png
--------------------------------------------------------------------------------
/eval_images/51_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/51_gt.png
--------------------------------------------------------------------------------
/eval_images/51_kpn_5x5_39.68dB_0.9903.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/51_kpn_5x5_39.68dB_0.9903.png
--------------------------------------------------------------------------------
/eval_images/51_noisy_36.14dB_0.9540.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/51_noisy_36.14dB_0.9540.png
--------------------------------------------------------------------------------
/eval_images/52_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/52_gt.png
--------------------------------------------------------------------------------
/eval_images/52_kpn_5x5_39.06dB_0.9819.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/52_kpn_5x5_39.06dB_0.9819.png
--------------------------------------------------------------------------------
/eval_images/52_noisy_36.34dB_0.9172.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/52_noisy_36.34dB_0.9172.png
--------------------------------------------------------------------------------
/eval_images/53_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/53_gt.png
--------------------------------------------------------------------------------
/eval_images/53_kpn_5x5_39.40dB_0.9871.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/53_kpn_5x5_39.40dB_0.9871.png
--------------------------------------------------------------------------------
/eval_images/53_noisy_34.24dB_0.9062.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/53_noisy_34.24dB_0.9062.png
--------------------------------------------------------------------------------
/eval_images/54_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/54_gt.png
--------------------------------------------------------------------------------
/eval_images/54_kpn_5x5_39.98dB_0.9886.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/54_kpn_5x5_39.98dB_0.9886.png
--------------------------------------------------------------------------------
/eval_images/54_noisy_36.25dB_0.9036.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/54_noisy_36.25dB_0.9036.png
--------------------------------------------------------------------------------
/eval_images/55_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/55_gt.png
--------------------------------------------------------------------------------
/eval_images/55_kpn_5x5_38.73dB_0.9822.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/55_kpn_5x5_38.73dB_0.9822.png
--------------------------------------------------------------------------------
/eval_images/55_noisy_37.04dB_0.9654.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/55_noisy_37.04dB_0.9654.png
--------------------------------------------------------------------------------
/eval_images/56_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/56_gt.png
--------------------------------------------------------------------------------
/eval_images/56_kpn_5x5_39.51dB_0.9936.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/56_kpn_5x5_39.51dB_0.9936.png
--------------------------------------------------------------------------------
/eval_images/56_noisy_38.16dB_0.9714.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/56_noisy_38.16dB_0.9714.png
--------------------------------------------------------------------------------
/eval_images/57_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/57_gt.png
--------------------------------------------------------------------------------
/eval_images/57_kpn_5x5_37.89dB_0.9887.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/57_kpn_5x5_37.89dB_0.9887.png
--------------------------------------------------------------------------------
/eval_images/57_noisy_35.77dB_0.9747.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/57_noisy_35.77dB_0.9747.png
--------------------------------------------------------------------------------
/eval_images/58_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/58_gt.png
--------------------------------------------------------------------------------
/eval_images/58_kpn_5x5_39.52dB_0.9840.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/58_kpn_5x5_39.52dB_0.9840.png
--------------------------------------------------------------------------------
/eval_images/58_noisy_38.25dB_0.9738.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/58_noisy_38.25dB_0.9738.png
--------------------------------------------------------------------------------
/eval_images/59_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/59_gt.png
--------------------------------------------------------------------------------
/eval_images/59_kpn_5x5_38.02dB_0.9827.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/59_kpn_5x5_38.02dB_0.9827.png
--------------------------------------------------------------------------------
/eval_images/59_noisy_33.73dB_0.9389.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/59_noisy_33.73dB_0.9389.png
--------------------------------------------------------------------------------
/eval_images/5_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/5_gt.png
--------------------------------------------------------------------------------
/eval_images/5_kpn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/5_kpn.png
--------------------------------------------------------------------------------
/eval_images/5_kpn_5x5_39.33dB_0.9888.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/5_kpn_5x5_39.33dB_0.9888.png
--------------------------------------------------------------------------------
/eval_images/5_noisy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/5_noisy.png
--------------------------------------------------------------------------------
/eval_images/5_noisy_37.77dB_0.9749.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/5_noisy_37.77dB_0.9749.png
--------------------------------------------------------------------------------
/eval_images/60_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/60_gt.png
--------------------------------------------------------------------------------
/eval_images/60_kpn_5x5_38.65dB_0.9853.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/60_kpn_5x5_38.65dB_0.9853.png
--------------------------------------------------------------------------------
/eval_images/60_noisy_37.21dB_0.9718.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/60_noisy_37.21dB_0.9718.png
--------------------------------------------------------------------------------
/eval_images/61_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/61_gt.png
--------------------------------------------------------------------------------
/eval_images/61_kpn_5x5_40.22dB_0.9922.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/61_kpn_5x5_40.22dB_0.9922.png
--------------------------------------------------------------------------------
/eval_images/61_noisy_37.93dB_0.9604.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/61_noisy_37.93dB_0.9604.png
--------------------------------------------------------------------------------
/eval_images/62_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/62_gt.png
--------------------------------------------------------------------------------
/eval_images/62_kpn_5x5_36.89dB_0.9901.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/62_kpn_5x5_36.89dB_0.9901.png
--------------------------------------------------------------------------------
/eval_images/62_noisy_34.35dB_0.9645.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/62_noisy_34.35dB_0.9645.png
--------------------------------------------------------------------------------
/eval_images/63_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/63_gt.png
--------------------------------------------------------------------------------
/eval_images/63_kpn_5x5_34.38dB_0.9104.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/63_kpn_5x5_34.38dB_0.9104.png
--------------------------------------------------------------------------------
/eval_images/63_noisy_25.35dB_0.4702.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/63_noisy_25.35dB_0.4702.png
--------------------------------------------------------------------------------
/eval_images/64_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/64_gt.png
--------------------------------------------------------------------------------
/eval_images/64_kpn_5x5_35.10dB_0.9525.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/64_kpn_5x5_35.10dB_0.9525.png
--------------------------------------------------------------------------------
/eval_images/64_noisy_25.87dB_0.5550.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/64_noisy_25.87dB_0.5550.png
--------------------------------------------------------------------------------
/eval_images/65_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/65_gt.png
--------------------------------------------------------------------------------
/eval_images/65_kpn_5x5_34.76dB_0.9458.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/65_kpn_5x5_34.76dB_0.9458.png
--------------------------------------------------------------------------------
/eval_images/65_noisy_25.34dB_0.5893.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/65_noisy_25.34dB_0.5893.png
--------------------------------------------------------------------------------
/eval_images/66_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/66_gt.png
--------------------------------------------------------------------------------
/eval_images/66_kpn_5x5_34.08dB_0.8902.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/66_kpn_5x5_34.08dB_0.8902.png
--------------------------------------------------------------------------------
/eval_images/66_noisy_25.16dB_0.5120.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/66_noisy_25.16dB_0.5120.png
--------------------------------------------------------------------------------
/eval_images/67_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/67_gt.png
--------------------------------------------------------------------------------
/eval_images/67_kpn_5x5_39.37dB_0.9908.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/67_kpn_5x5_39.37dB_0.9908.png
--------------------------------------------------------------------------------
/eval_images/67_noisy_38.30dB_0.9847.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/67_noisy_38.30dB_0.9847.png
--------------------------------------------------------------------------------
/eval_images/68_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/68_gt.png
--------------------------------------------------------------------------------
/eval_images/68_kpn_5x5_35.75dB_0.9324.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/68_kpn_5x5_35.75dB_0.9324.png
--------------------------------------------------------------------------------
/eval_images/68_noisy_25.69dB_0.4059.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/68_noisy_25.69dB_0.4059.png
--------------------------------------------------------------------------------
/eval_images/69_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/69_gt.png
--------------------------------------------------------------------------------
/eval_images/69_kpn_5x5_32.32dB_0.9361.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/69_kpn_5x5_32.32dB_0.9361.png
--------------------------------------------------------------------------------
/eval_images/69_noisy_25.68dB_0.7298.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/69_noisy_25.68dB_0.7298.png
--------------------------------------------------------------------------------
/eval_images/6_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/6_gt.png
--------------------------------------------------------------------------------
/eval_images/6_kpn_5x5_38.36dB_0.9825.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/6_kpn_5x5_38.36dB_0.9825.png
--------------------------------------------------------------------------------
/eval_images/6_noisy_36.65dB_0.9624.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/6_noisy_36.65dB_0.9624.png
--------------------------------------------------------------------------------
/eval_images/70_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/70_gt.png
--------------------------------------------------------------------------------
/eval_images/70_kpn_5x5_39.91dB_0.9910.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/70_kpn_5x5_39.91dB_0.9910.png
--------------------------------------------------------------------------------
/eval_images/70_noisy_38.50dB_0.9572.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/70_noisy_38.50dB_0.9572.png
--------------------------------------------------------------------------------
/eval_images/71_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/71_gt.png
--------------------------------------------------------------------------------
/eval_images/71_kpn_5x5_35.55dB_0.9774.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/71_kpn_5x5_35.55dB_0.9774.png
--------------------------------------------------------------------------------
/eval_images/71_noisy_32.97dB_0.8975.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/71_noisy_32.97dB_0.8975.png
--------------------------------------------------------------------------------
/eval_images/72_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/72_gt.png
--------------------------------------------------------------------------------
/eval_images/72_kpn_5x5_37.08dB_0.9755.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/72_kpn_5x5_37.08dB_0.9755.png
--------------------------------------------------------------------------------
/eval_images/72_noisy_32.98dB_0.8527.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/72_noisy_32.98dB_0.8527.png
--------------------------------------------------------------------------------
/eval_images/73_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/73_gt.png
--------------------------------------------------------------------------------
/eval_images/73_kpn_5x5_39.25dB_0.9713.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/73_kpn_5x5_39.25dB_0.9713.png
--------------------------------------------------------------------------------
/eval_images/73_noisy_31.69dB_0.7565.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/73_noisy_31.69dB_0.7565.png
--------------------------------------------------------------------------------
/eval_images/74_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/74_gt.png
--------------------------------------------------------------------------------
/eval_images/74_kpn_5x5_37.40dB_0.9598.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/74_kpn_5x5_37.40dB_0.9598.png
--------------------------------------------------------------------------------
/eval_images/74_noisy_31.40dB_0.7913.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/74_noisy_31.40dB_0.7913.png
--------------------------------------------------------------------------------
/eval_images/75_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/75_gt.png
--------------------------------------------------------------------------------
/eval_images/75_kpn_5x5_34.89dB_0.9287.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/75_kpn_5x5_34.89dB_0.9287.png
--------------------------------------------------------------------------------
/eval_images/75_noisy_25.27dB_0.6376.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/75_noisy_25.27dB_0.6376.png
--------------------------------------------------------------------------------
/eval_images/76_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/76_gt.png
--------------------------------------------------------------------------------
/eval_images/76_kpn_5x5_38.18dB_0.9756.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/76_kpn_5x5_38.18dB_0.9756.png
--------------------------------------------------------------------------------
/eval_images/76_noisy_32.34dB_0.8855.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/76_noisy_32.34dB_0.8855.png
--------------------------------------------------------------------------------
/eval_images/77_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/77_gt.png
--------------------------------------------------------------------------------
/eval_images/77_kpn_5x5_36.31dB_0.9730.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/77_kpn_5x5_36.31dB_0.9730.png
--------------------------------------------------------------------------------
/eval_images/77_noisy_32.73dB_0.9155.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/77_noisy_32.73dB_0.9155.png
--------------------------------------------------------------------------------
/eval_images/78_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/78_gt.png
--------------------------------------------------------------------------------
/eval_images/78_kpn_5x5_38.33dB_0.9751.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/78_kpn_5x5_38.33dB_0.9751.png
--------------------------------------------------------------------------------
/eval_images/78_noisy_25.95dB_0.4840.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/78_noisy_25.95dB_0.4840.png
--------------------------------------------------------------------------------
/eval_images/79_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/79_gt.png
--------------------------------------------------------------------------------
/eval_images/79_kpn_5x5_37.68dB_0.9554.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/79_kpn_5x5_37.68dB_0.9554.png
--------------------------------------------------------------------------------
/eval_images/79_noisy_32.21dB_0.7821.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/79_noisy_32.21dB_0.7821.png
--------------------------------------------------------------------------------
/eval_images/7_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/7_gt.png
--------------------------------------------------------------------------------
/eval_images/7_kpn_5x5_40.65dB_0.9868.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/7_kpn_5x5_40.65dB_0.9868.png
--------------------------------------------------------------------------------
/eval_images/7_noisy_37.47dB_0.9308.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/7_noisy_37.47dB_0.9308.png
--------------------------------------------------------------------------------
/eval_images/80_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/80_gt.png
--------------------------------------------------------------------------------
/eval_images/80_kpn_5x5_36.39dB_0.9837.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/80_kpn_5x5_36.39dB_0.9837.png
--------------------------------------------------------------------------------
/eval_images/80_noisy_32.55dB_0.9169.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/80_noisy_32.55dB_0.9169.png
--------------------------------------------------------------------------------
/eval_images/81_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/81_gt.png
--------------------------------------------------------------------------------
/eval_images/81_kpn_5x5_37.33dB_0.9816.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/81_kpn_5x5_37.33dB_0.9816.png
--------------------------------------------------------------------------------
/eval_images/81_noisy_32.67dB_0.8759.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/81_noisy_32.67dB_0.8759.png
--------------------------------------------------------------------------------
/eval_images/82_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/82_gt.png
--------------------------------------------------------------------------------
/eval_images/82_kpn_5x5_36.23dB_0.9788.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/82_kpn_5x5_36.23dB_0.9788.png
--------------------------------------------------------------------------------
/eval_images/82_noisy_32.49dB_0.8655.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/82_noisy_32.49dB_0.8655.png
--------------------------------------------------------------------------------
/eval_images/83_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/83_gt.png
--------------------------------------------------------------------------------
/eval_images/83_kpn_5x5_36.86dB_0.9658.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/83_kpn_5x5_36.86dB_0.9658.png
--------------------------------------------------------------------------------
/eval_images/83_noisy_31.89dB_0.8717.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/83_noisy_31.89dB_0.8717.png
--------------------------------------------------------------------------------
/eval_images/84_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/84_gt.png
--------------------------------------------------------------------------------
/eval_images/84_kpn_5x5_37.53dB_0.9908.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/84_kpn_5x5_37.53dB_0.9908.png
--------------------------------------------------------------------------------
/eval_images/84_noisy_32.70dB_0.8561.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/84_noisy_32.70dB_0.8561.png
--------------------------------------------------------------------------------
/eval_images/85_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/85_gt.png
--------------------------------------------------------------------------------
/eval_images/85_kpn_5x5_37.93dB_0.9557.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/85_kpn_5x5_37.93dB_0.9557.png
--------------------------------------------------------------------------------
/eval_images/85_noisy_32.45dB_0.7962.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/85_noisy_32.45dB_0.7962.png
--------------------------------------------------------------------------------
/eval_images/86_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/86_gt.png
--------------------------------------------------------------------------------
/eval_images/86_kpn_5x5_34.20dB_0.9634.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/86_kpn_5x5_34.20dB_0.9634.png
--------------------------------------------------------------------------------
/eval_images/86_noisy_30.81dB_0.9148.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/86_noisy_30.81dB_0.9148.png
--------------------------------------------------------------------------------
/eval_images/87_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/87_gt.png
--------------------------------------------------------------------------------
/eval_images/87_kpn_5x5_39.10dB_0.9747.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/87_kpn_5x5_39.10dB_0.9747.png
--------------------------------------------------------------------------------
/eval_images/87_noisy_30.31dB_0.5805.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/87_noisy_30.31dB_0.5805.png
--------------------------------------------------------------------------------
/eval_images/88_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/88_gt.png
--------------------------------------------------------------------------------
/eval_images/88_kpn_5x5_37.34dB_0.9650.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/88_kpn_5x5_37.34dB_0.9650.png
--------------------------------------------------------------------------------
/eval_images/88_noisy_29.50dB_0.6900.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/88_noisy_29.50dB_0.6900.png
--------------------------------------------------------------------------------
/eval_images/89_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/89_gt.png
--------------------------------------------------------------------------------
/eval_images/89_kpn_5x5_36.06dB_0.9816.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/89_kpn_5x5_36.06dB_0.9816.png
--------------------------------------------------------------------------------
/eval_images/89_noisy_30.16dB_0.7799.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/89_noisy_30.16dB_0.7799.png
--------------------------------------------------------------------------------
/eval_images/8_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/8_gt.png
--------------------------------------------------------------------------------
/eval_images/8_kpn_5x5_38.48dB_0.9800.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/8_kpn_5x5_38.48dB_0.9800.png
--------------------------------------------------------------------------------
/eval_images/8_noisy_35.23dB_0.9459.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/8_noisy_35.23dB_0.9459.png
--------------------------------------------------------------------------------
/eval_images/90_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/90_gt.png
--------------------------------------------------------------------------------
/eval_images/90_kpn_5x5_34.73dB_0.9713.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/90_kpn_5x5_34.73dB_0.9713.png
--------------------------------------------------------------------------------
/eval_images/90_noisy_29.64dB_0.8340.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/90_noisy_29.64dB_0.8340.png
--------------------------------------------------------------------------------
/eval_images/91_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/91_gt.png
--------------------------------------------------------------------------------
/eval_images/91_kpn_5x5_39.03dB_0.9762.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/91_kpn_5x5_39.03dB_0.9762.png
--------------------------------------------------------------------------------
/eval_images/91_noisy_32.42dB_0.7615.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/91_noisy_32.42dB_0.7615.png
--------------------------------------------------------------------------------
/eval_images/92_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/92_gt.png
--------------------------------------------------------------------------------
/eval_images/92_kpn_5x5_38.21dB_0.9746.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/92_kpn_5x5_38.21dB_0.9746.png
--------------------------------------------------------------------------------
/eval_images/92_noisy_30.11dB_0.7182.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/92_noisy_30.11dB_0.7182.png
--------------------------------------------------------------------------------
/eval_images/93_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/93_gt.png
--------------------------------------------------------------------------------
/eval_images/93_kpn_5x5_36.61dB_0.9652.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/93_kpn_5x5_36.61dB_0.9652.png
--------------------------------------------------------------------------------
/eval_images/93_noisy_29.85dB_0.6761.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/93_noisy_29.85dB_0.6761.png
--------------------------------------------------------------------------------
/eval_images/94_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/94_gt.png
--------------------------------------------------------------------------------
/eval_images/94_kpn_5x5_37.38dB_0.9859.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/94_kpn_5x5_37.38dB_0.9859.png
--------------------------------------------------------------------------------
/eval_images/94_noisy_32.82dB_0.8374.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/94_noisy_32.82dB_0.8374.png
--------------------------------------------------------------------------------
/eval_images/95_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/95_gt.png
--------------------------------------------------------------------------------
/eval_images/95_kpn_5x5_37.35dB_0.9740.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/95_kpn_5x5_37.35dB_0.9740.png
--------------------------------------------------------------------------------
/eval_images/95_noisy_32.39dB_0.8629.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/95_noisy_32.39dB_0.8629.png
--------------------------------------------------------------------------------
/eval_images/96_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/96_gt.png
--------------------------------------------------------------------------------
/eval_images/96_kpn_5x5_35.64dB_0.9681.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/96_kpn_5x5_35.64dB_0.9681.png
--------------------------------------------------------------------------------
/eval_images/96_noisy_29.94dB_0.7946.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/96_noisy_29.94dB_0.7946.png
--------------------------------------------------------------------------------
/eval_images/97_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/97_gt.png
--------------------------------------------------------------------------------
/eval_images/97_kpn_5x5_36.06dB_0.9525.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/97_kpn_5x5_36.06dB_0.9525.png
--------------------------------------------------------------------------------
/eval_images/97_noisy_32.48dB_0.8883.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/97_noisy_32.48dB_0.8883.png
--------------------------------------------------------------------------------
/eval_images/98_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/98_gt.png
--------------------------------------------------------------------------------
/eval_images/98_kpn_5x5_36.41dB_0.9654.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/98_kpn_5x5_36.41dB_0.9654.png
--------------------------------------------------------------------------------
/eval_images/98_noisy_31.56dB_0.8672.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/98_noisy_31.56dB_0.8672.png
--------------------------------------------------------------------------------
/eval_images/99_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/99_gt.png
--------------------------------------------------------------------------------
/eval_images/99_kpn_5x5_38.40dB_0.9825.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/99_kpn_5x5_38.40dB_0.9825.png
--------------------------------------------------------------------------------
/eval_images/99_noisy_32.58dB_0.8636.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/99_noisy_32.58dB_0.8636.png
--------------------------------------------------------------------------------
/eval_images/9_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/9_gt.png
--------------------------------------------------------------------------------
/eval_images/9_kpn_5x5_37.53dB_0.9877.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/9_kpn_5x5_37.53dB_0.9877.png
--------------------------------------------------------------------------------
/eval_images/9_noisy_34.32dB_0.8747.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images/9_noisy_34.32dB_0.8747.png
--------------------------------------------------------------------------------
/eval_images_RGB/0_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/0_gt.png
--------------------------------------------------------------------------------
/eval_images_RGB/0_noisy_29.17dB_0.911.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/0_noisy_29.17dB_0.911.png
--------------------------------------------------------------------------------
/eval_images_RGB/0_pred_31.86dB_0.957.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/0_pred_31.86dB_0.957.png
--------------------------------------------------------------------------------
/eval_images_RGB/10_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/10_gt.png
--------------------------------------------------------------------------------
/eval_images_RGB/10_noisy_31.95dB_0.776.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/10_noisy_31.95dB_0.776.png
--------------------------------------------------------------------------------
/eval_images_RGB/10_pred_39.27dB_0.965.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/10_pred_39.27dB_0.965.png
--------------------------------------------------------------------------------
/eval_images_RGB/11_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/11_gt.png
--------------------------------------------------------------------------------
/eval_images_RGB/11_noisy_23.93dB_0.549.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/11_noisy_23.93dB_0.549.png
--------------------------------------------------------------------------------
/eval_images_RGB/11_pred_29.44dB_0.801.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/11_pred_29.44dB_0.801.png
--------------------------------------------------------------------------------
/eval_images_RGB/12_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/12_gt.png
--------------------------------------------------------------------------------
/eval_images_RGB/12_noisy_18.70dB_0.308.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/12_noisy_18.70dB_0.308.png
--------------------------------------------------------------------------------
/eval_images_RGB/12_pred_34.02dB_0.954.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/12_pred_34.02dB_0.954.png
--------------------------------------------------------------------------------
/eval_images_RGB/13_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/13_gt.png
--------------------------------------------------------------------------------
/eval_images_RGB/13_noisy_32.41dB_0.916.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/13_noisy_32.41dB_0.916.png
--------------------------------------------------------------------------------
/eval_images_RGB/13_pred_37.87dB_0.987.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/13_pred_37.87dB_0.987.png
--------------------------------------------------------------------------------
/eval_images_RGB/1_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/1_gt.png
--------------------------------------------------------------------------------
/eval_images_RGB/1_noisy_22.25dB_0.544.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/1_noisy_22.25dB_0.544.png
--------------------------------------------------------------------------------
/eval_images_RGB/1_pred_32.77dB_0.912.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/1_pred_32.77dB_0.912.png
--------------------------------------------------------------------------------
/eval_images_RGB/2_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/2_gt.png
--------------------------------------------------------------------------------
/eval_images_RGB/2_noisy_19.42dB_0.496.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/2_noisy_19.42dB_0.496.png
--------------------------------------------------------------------------------
/eval_images_RGB/2_pred_29.73dB_0.900.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/2_pred_29.73dB_0.900.png
--------------------------------------------------------------------------------
/eval_images_RGB/3_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/3_gt.png
--------------------------------------------------------------------------------
/eval_images_RGB/3_noisy_25.74dB_0.701.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/3_noisy_25.74dB_0.701.png
--------------------------------------------------------------------------------
/eval_images_RGB/3_pred_35.85dB_0.960.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/3_pred_35.85dB_0.960.png
--------------------------------------------------------------------------------
/eval_images_RGB/4_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/4_gt.png
--------------------------------------------------------------------------------
/eval_images_RGB/4_noisy_19.34dB_0.595.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/4_noisy_19.34dB_0.595.png
--------------------------------------------------------------------------------
/eval_images_RGB/4_pred_29.69dB_0.937.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/4_pred_29.69dB_0.937.png
--------------------------------------------------------------------------------
/eval_images_RGB/5_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/5_gt.png
--------------------------------------------------------------------------------
/eval_images_RGB/5_noisy_27.50dB_0.641.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/5_noisy_27.50dB_0.641.png
--------------------------------------------------------------------------------
/eval_images_RGB/5_pred_33.03dB_0.840.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/5_pred_33.03dB_0.840.png
--------------------------------------------------------------------------------
/eval_images_RGB/6_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/6_gt.png
--------------------------------------------------------------------------------
/eval_images_RGB/6_noisy_16.37dB_0.283.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/6_noisy_16.37dB_0.283.png
--------------------------------------------------------------------------------
/eval_images_RGB/6_pred_29.25dB_0.844.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/6_pred_29.25dB_0.844.png
--------------------------------------------------------------------------------
/eval_images_RGB/7_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/7_gt.png
--------------------------------------------------------------------------------
/eval_images_RGB/7_noisy_27.71dB_0.634.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/7_noisy_27.71dB_0.634.png
--------------------------------------------------------------------------------
/eval_images_RGB/7_pred_38.33dB_0.961.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/7_pred_38.33dB_0.961.png
--------------------------------------------------------------------------------
/eval_images_RGB/8_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/8_gt.png
--------------------------------------------------------------------------------
/eval_images_RGB/8_noisy_30.11dB_0.870.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/8_noisy_30.11dB_0.870.png
--------------------------------------------------------------------------------
/eval_images_RGB/8_pred_32.60dB_0.942.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/8_pred_32.60dB_0.942.png
--------------------------------------------------------------------------------
/eval_images_RGB/9_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/9_gt.png
--------------------------------------------------------------------------------
/eval_images_RGB/9_noisy_24.02dB_0.597.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/9_noisy_24.02dB_0.597.png
--------------------------------------------------------------------------------
/eval_images_RGB/9_pred_33.24dB_0.914.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LujiaJin/kernel-prediction-networks-PyTorch/209f45ff04a282267ccbb5323daa33717379daad/eval_images_RGB/9_pred_33.24dB_0.914.png
--------------------------------------------------------------------------------
/kpn_data_provider.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torchvision.transforms as transforms
4 | import torch.nn.functional as F
5 | from torch.utils.data import Dataset, DataLoader
6 | import os
7 | from PIL import Image
8 | import numpy as np
9 | from skimage.color import rgb2xyz
10 | import inspect
11 | from utils.training_util import read_config
12 | from data_generation.data_utils import *
13 | import torch.nn.functional as F
14 |
15 |
16 | def sRGBGamma(tensor):
17 | threshold = 0.0031308
18 | a = 0.055
19 | mult = 12.92
20 | gamma = 2.4
21 | res = torch.zeros_like(tensor)
22 | mask = tensor > threshold
23 | # image_lo = tensor * mult
24 | # 0.001 is to avoid funny thing at 0.
25 | # image_hi = (1 + a) * torch.pow(tensor + 0.001, 1.0 / gamma) - a
26 | res[mask] = (1 + a) * torch.pow(tensor[mask], 1.0 / gamma) - a
27 |
28 | # res[1-mask] = tensor[1-mask] * mult
29 | res[~mask] = tensor[~mask] * mult
30 | # return mask * image_hi + (1 - mask) * image_lo
31 | return res
32 |
33 |
34 | def UndosRGBGamma(tensor):
35 | threshold = 0.0031308
36 | a = 0.055
37 | mult = 12.92
38 | gamma = 2.4
39 | res = torch.zeros_like(tensor)
40 | mask = tensor > threshold
41 | # image_lo = tensor / mult
42 | # image_hi = torch.pow(tensor + a, gamma) / (1 + a)
43 | # res[1-mask] = tensor[1-mask] / mult
44 | res[~mask] = tensor[~mask] / mult
45 | res[mask] = torch.pow(tensor[mask] + a, gamma) / (1 + a)
46 | # return mask * image_hi + (1 - mask) * image_lo
47 | return res
48 |
49 |
50 | class Random_Horizontal_Flip(object):
51 | def __init__(self, p=0.5):
52 | self.p = p
53 |
54 | def __call__(self, tensor):
55 | if np.random.rand() < self.p:
56 | return torch.flip(tensor, dims=[-1])
57 | return tensor
58 |
59 |
60 | class Random_Vertical_Flip(object):
61 | def __init__(self, p=0.5):
62 | self.p = p
63 |
64 | def __call__(self, tensor):
65 | if np.random.rand() < self.p:
66 | return torch.flip(tensor, dims=[-2])
67 | return tensor
68 |
69 |
70 | class TrainDataSet(torch.utils.data.Dataset):
71 | def __init__(self, config_file,
72 | config_spec=None, img_format='.bmp', degamma=True, color=True,
73 | blind=False, train=True):
74 | super(TrainDataSet, self).__init__()
75 | if config_spec is None:
76 | config_spec = self._configspec_path()
77 | config = read_config(config_file, config_spec)
78 | self.dataset_config = config['dataset_configs']
79 | self.dataset_dir = self.dataset_config['dataset_dir']
80 | self.images = list(filter(lambda x: True if img_format in x else False,
81 | os.listdir(self.dataset_dir)))
82 | self.burst_size = self.dataset_config['burst_length']
83 | self.patch_size = self.dataset_config['patch_size']
84 |
85 | self.upscale = self.dataset_config['down_sample']
86 | self.big_jitter = self.dataset_config['big_jitter']
87 | self.small_jitter = self.dataset_config['small_jitter']
88 | # 对应下采样之前图像的最大偏移量
89 | self.jitter_upscale = self.big_jitter * self.upscale
90 | # 对应下采样之前的图像的patch尺寸
91 | self.size_upscale = self.patch_size * self.upscale + 2 * self.jitter_upscale
92 | # 产生大jitter和小jitter之间的delta 在下采样之前的尺度上
93 | self.delta_upscale = (
94 | self.big_jitter - self.small_jitter) * self.upscale
95 | # 对应到原图的patch的尺寸
96 | self.patch_size_upscale = self.patch_size * self.upscale
97 |
98 | # 去伽马效应
99 | self.degamma = degamma
100 | # 是否用彩色图像进行处理
101 | self.color = color
102 | # 是否盲估计 盲估计即估计的噪声方差不会作为网络的输入
103 | self.blind = blind
104 | self.train = train
105 |
106 | self.vertical_flip = Random_Vertical_Flip(p=0.5)
107 | self.horizontal_flip = Random_Horizontal_Flip(p=0.5)
108 |
109 | @staticmethod
110 | def _configspec_path():
111 | current_dir = os.path.dirname(
112 | os.path.abspath(inspect.getfile(inspect.currentframe()))
113 | ) # 获取当前正在执行的脚本在系统中的文件路径
114 | return os.path.join(current_dir,
115 | 'dataset_specs/data_configspec.conf')
116 |
117 | @staticmethod
118 | def crop_random(tensor, patch_size):
119 | return random_crop(tensor, 1, patch_size)[0]
120 |
121 | # get一个item 根据index检索
122 | def __getitem__(self, index):
123 | # print(index)
124 | image = Image.open(
125 | os.path.join(self.dataset_dir, self.images[index])).convert('RGB')
126 |
127 | # 先转换为Tensor进行degamma
128 | image = transforms.ToTensor()(image)
129 |
130 | # At testing time, if the image size is odd, the edge should be chopped
131 | # off to make the image size even. Otherwise, an error will be reported
132 | # because the image size before and after skip concatenation are
133 | # inconsistent.
134 | if not self.train:
135 | height = image.size()[-2]
136 | width = image.size()[-1]
137 | if height % 2 == 0 and width % 2 == 0:
138 | pass
139 | elif height % 2 == 1 and width % 2 == 0:
140 | image = image[..., :-1, :]
141 | elif height % 2 == 0 and width % 2 == 1:
142 | image = image[..., :, :-1]
143 | else:
144 | image = image[..., :-1, :-1]
145 |
146 | # if self.degamma:
147 | # image = UndosRGBGamma(tensor=image)
148 | image_crop = self.crop_random(image, self.size_upscale)
149 | # 3*H*W 对应于较小jitter下
150 | image_crop_small = image_crop[:, self.delta_upscale:-self.delta_upscale,
151 | self.delta_upscale:-self.delta_upscale]
152 |
153 | # 进一步进行random_crop所需的transform
154 |
155 | # burst中的第一个不做偏移 后期作为target
156 | # output shape: N*3*H*W
157 | img_burst = []
158 | for i in range(self.burst_size):
159 | if i == 0:
160 | img_burst.append(
161 | image_crop[:, self.jitter_upscale:-self.jitter_upscale,
162 | self.jitter_upscale:-self.jitter_upscale]
163 | )
164 | else:
165 | if np.random.binomial(1, min(1.0, np.random.poisson(
166 | lam=1.5) / self.burst_size)) == 0:
167 | img_burst.append(
168 | self.crop_random(
169 | image_crop_small, self.patch_size_upscale
170 | )
171 | )
172 | else: # big
173 | img_burst.append(
174 | self.crop_random(image_crop, self.patch_size_upscale)
175 | )
176 | image_burst = torch.stack(img_burst, dim=0)
177 | image_burst = F.adaptive_avg_pool2d(image_burst,
178 | (self.patch_size, self.patch_size))
179 |
180 | # label为patch中burst的第一个
181 | if not self.color:
182 | image_burst = 0.2989 * image_burst[:, 0,
183 | ...] + 0.5870 * image_burst[:, 1,
184 | ...] + 0.1140 * image_burst[
185 | :, 2, ...]
186 | image_burst = torch.clamp(image_burst, 0.0, 1.0)
187 |
188 | if self.degamma:
189 | UndosRGBGamma(image_burst)
190 |
191 | if self.train:
192 | # data augment
193 | image_burst = self.horizontal_flip(image_burst)
194 | image_burst = self.vertical_flip(image_burst)
195 |
196 | gt = image_burst[0, ...]
197 |
198 | # 以上得到的patch size为burst*(3)*size*size
199 | """
200 | 数据加噪声等一系列处理 全部基于rgb图像做
201 | """
202 | # 要产生[log10(0.1), log10(1.0)]之间的均匀分布随机数 也就是[0,1加负号即可]
203 | # 产生pred之后 再除以white_level恢复原来的亮度
204 | # batch中的每一个burst 产生一个white_level
205 | white_level = torch.from_numpy(
206 | np.power(10, -np.random.rand(1, 1, 1))).type_as(image_burst)
207 | # 论文中对图像亮度赋值进行线性缩放[0.1, 1]
208 | image_burst = white_level * image_burst
209 |
210 | # gray image
211 | if not self.color:
212 | # 生成随机的read和shot噪声方差
213 | sigma_read = torch.from_numpy(
214 | np.power(10, np.random.uniform(-3.0, -1.5, (1, 1, 1)))).type_as(
215 | image_burst)
216 | sigma_shot = torch.from_numpy(
217 | np.power(10, np.random.uniform(-4.0, -2.0, (1, 1, 1)))).type_as(
218 | image_burst)
219 |
220 | # sigma_read = torch.from_numpy(2*np.power(10, np.array([[[-2.0]]]))).type_as(image_burst)
221 | # sigma_shot = torch.from_numpy(6.4 * np.power(10, np.array([[[-3.0]]]))).type_as(image_burst)
222 |
223 | # 产生噪声 依据论文中公式产生
224 | sigma_read_com = sigma_read.expand_as(image_burst)
225 | sigma_shot_com = sigma_shot.expand_as(image_burst)
226 |
227 | # generate noise
228 | burst_noise = torch.normal(image_burst, torch.sqrt(
229 | sigma_read_com ** 2 + image_burst * sigma_shot_com)).type_as(
230 | image_burst)
231 |
232 | # burst_noise 恢复到[0,1] 截去外面的值
233 | burst_noise = torch.clamp(burst_noise, 0.0, 1.0)
234 |
235 | # 非盲估计 就要估计噪声的方差
236 | if not self.blind:
237 | # 接下来就是根据两个sigma 将估计的噪声标准差也作为输入 用burst中的第一个进行估计
238 | # estimation shape: H*W
239 | sigma_read_est = sigma_read.view(1, 1).expand_as(gt)
240 | sigma_shot_est = sigma_shot.view(1, 1).expand_as(gt)
241 | sigma_estimate = torch.sqrt(
242 | sigma_read_est ** 2 + sigma_shot_est.mul(
243 | torch.max(torch.stack([burst_noise[0, ...],
244 | torch.zeros_like(
245 | burst_noise[0, ...])],
246 | dim=0), dim=0)[0]))
247 |
248 | # 把噪声的估计和burst图像连接在一起
249 | burst_noise = torch.cat(
250 | [burst_noise, sigma_estimate.unsqueeze(0)], dim=0)
251 |
252 | # 按照文章中的 ref Image作为target进行了训练 输出结果和ref很相似 没能起到太大的去噪作用
253 | # return patches_with_noise, patches_with_noise[:, 0, ...], white_level
254 | # 不含噪声的ref作为target进行测试
255 |
256 | return burst_noise, gt, white_level
257 | # color image
258 | else:
259 | # 生成随机的read和shot噪声方差
260 | sigma_read = torch.from_numpy(
261 | np.power(10,
262 | np.random.uniform(-3.0, -1.5, (1, 1, 1, 1)))).type_as(
263 | image_burst)
264 | sigma_shot = torch.from_numpy(
265 | np.power(10,
266 | np.random.uniform(-4.0, -2.0, (1, 1, 1, 1)))).type_as(
267 | image_burst)
268 |
269 | # 产生噪声 依据论文中公式产生
270 | sigma_read_com = sigma_read.expand_as(image_burst)
271 | sigma_shot_com = sigma_shot.expand_as(image_burst)
272 |
273 | # generate noise
274 | burst_noise = torch.normal(image_burst,
275 | torch.sqrt(
276 | sigma_read_com ** 2 + image_burst * sigma_shot_com)).type_as(
277 | image_burst)
278 |
279 | # burst_noise 恢复到[0,1] 截去外面的值
280 | burst_noise = torch.clamp(burst_noise, 0.0, 1.0)
281 |
282 | # 非盲估计 就要估计噪声的方差
283 | if not self.blind:
284 | # 接下来就是根据两个sigma 将估计的噪声标准差也作为输入 用burst中的第一个进行估计
285 | # estimation shape: H*W
286 | sigma_read_est = sigma_read.view(1, 1, 1).expand_as(gt)
287 | sigma_shot_est = sigma_shot.view(1, 1, 1).expand_as(gt)
288 | sigma_estimate = torch.sqrt(
289 | sigma_read_est ** 2 + sigma_shot_est.mul(
290 | torch.max(torch.stack([burst_noise[0, ...],
291 | torch.zeros_like(
292 | burst_noise[0, ...])],
293 | dim=0), dim=0)[0]))
294 |
295 | # 把噪声的估计和burst图像连接在一起
296 | burst_noise = torch.cat(
297 | [burst_noise, sigma_estimate.unsqueeze(0)], dim=0)
298 |
299 | white_level = white_level.unsqueeze(0)
300 | return burst_noise, gt, white_level
301 |
302 | def __len__(self):
303 | return len(self.images)
304 |
305 |
306 | if __name__ == '__main__':
307 | # path = 'F:/BinZhang/Codes/deep-burst-denoising/data/train'
308 | # dataset = TrainDataSet(path, '.jpg', 8, 128, 4, 16, 2, color=False)
309 | # dataloader = DataLoader(dataset,
310 | # batch_size=4,
311 | # shuffle=True,
312 | # num_workers=4)
313 | # dataloader = iter(dataloader)
314 | # a, b, c = next(dataloader)
315 | # print(a.size(), b.size(), c.size())
316 |
317 | hf = Random_Horizontal_Flip(0.5)
318 | a = torch.randint(0, 10, (2, 2))
319 | print(a, hf(a))
320 |
--------------------------------------------------------------------------------
/kpn_specs/configspec.conf:
--------------------------------------------------------------------------------
1 | exp_name = string
2 |
3 | [training]
4 | image_width = integer
5 | image_height = integer
6 | continue_train = boolean
7 | num_epochs = integer(default=100)
8 |
9 | use_cache = boolean(default=False)
10 | cache_dir = string(default=None)
11 |
12 | # IO
13 | dataset_configs = string(default=None)
14 | checkpoint_dir = string(default=None)
15 | logs_dir = string(default=None)
16 | eval_dir = string(default=None)
17 | restore_dir = string(default=None)
18 | save_freq = integer(default=6250)
19 | vis_freq = integer(default=100)
20 | ckpt_to_keep = integer(default=5)
21 |
22 | # Learning
23 | decay_steps = integer(default=3000)
24 | batch_size = integer(default=8)
25 | learning_rate = float(default=0.0002)
26 | lr_decay = float(default=0.97)
27 | beta1 = float(default=0.9)
28 | beta2 = float(default=0.999)
29 | n_loss_average = integer(default=1000)
30 | weight_decay = float(default=5e-4)
31 | optimizer = string(default="adam")
32 |
33 | [architecture]
34 | arch = string
35 | #
36 | kernel_size = string
37 | sep_conv = boolean(default=False)
38 | blind_est = boolean(default=False)
39 | # Spatial attention
40 | spatial_att = boolean(default=False)
41 | # Channel-wise attention
42 | channel_att = boolean(default=False)
43 | #
44 | alpha = float(default=0.9998)
45 | beta = float(default=100.0)
46 | #
47 | upMode = string(default=bilinear)
48 | #
49 | core_bias = boolean(default=False)
50 |
51 |
52 |
53 |
54 |
--------------------------------------------------------------------------------
/kpn_specs/kpn_config.conf:
--------------------------------------------------------------------------------
1 | exp_name = kpn
2 | root_exp_dir = ./models/
3 | exp_dir =%(root_exp_dir)s/%(exp_name)s
4 |
5 | [training]
6 | image_width = 128
7 | image_height = 128
8 | continue_train = False
9 | num_epochs = 500
10 |
11 | use_cache = True
12 | cache_dir = ../dataset/synthetic
13 |
14 | # IO
15 | dataset_configs = dataset_specs/full_dataset.conf
16 | checkpoint_dir = %(exp_dir)s/checkpoint
17 | logs_dir = %(exp_dir)s/logs
18 | eval_dir = %(exp_dir)s/eval_images
19 | save_freq = 200
20 | ckpt_to_keep = 20
21 |
22 | # Learning
23 | decay_steps = 1
24 | batch_size = 16
25 | lr_decay = 0.89125093813 # (10^-0.05)
26 | learning_rate = 2e-4
27 | weight_decay = 1e-4
28 | optimizer = adam
29 |
30 | [architecture]
31 | arch = "kpn"
32 |
33 | kernel_size = 5
34 | sep_conv = False
35 | blind_est = False
36 | alpha = 0.9998
37 | beta = 100.0
38 |
39 | upMode = bilinear
40 | core_bias = False
41 |
42 |
--------------------------------------------------------------------------------
/models/kpn/checkpoint/README.md:
--------------------------------------------------------------------------------
1 | Due to the limitation of bandwidth of github, the pretrained model has uploaded to Google Drive at [https://drive.google.com/open?id=1Xnpllr1dinAU7BIN21L3LkEP5AqMNWso](https://drive.google.com/open?id=1Xnpllr1dinAU7BIN21L3LkEP5AqMNWso).
2 |
--------------------------------------------------------------------------------
/train_eval_syn.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.optim as optim
3 | from torch.optim import lr_scheduler
4 | import torch.nn as nn
5 | from torch.utils.data import DataLoader
6 |
7 | import numpy as np
8 | import argparse
9 |
10 | import os, sys, time, shutil
11 |
12 | from data_provider import OnTheFlyDataset, _configspec_path
13 | from kpn_data_provider import TrainDataSet, UndosRGBGamma, sRGBGamma
14 | from KPN import KPN, LossFunc
15 | from utils.training_util import MovingAverage, save_checkpoint, load_checkpoint, read_config
16 | from utils.training_util import calculate_psnr, calculate_ssim
17 |
18 | from tensorboardX import SummaryWriter
19 | from PIL import Image
20 | from torchvision.transforms import transforms
21 |
22 | def train(config, num_workers, num_threads, cuda, restart_train, mGPU):
23 | # torch.set_num_threads(num_threads)
24 |
25 | train_config = config['training']
26 | arch_config = config['architecture']
27 |
28 | batch_size = train_config['batch_size']
29 | lr = train_config['learning_rate']
30 | weight_decay = train_config['weight_decay']
31 | decay_step = train_config['decay_steps']
32 | lr_decay = train_config['lr_decay']
33 |
34 | n_epoch = train_config['num_epochs']
35 | use_cache = train_config['use_cache']
36 |
37 | print('Configs:', config)
38 | # checkpoint path
39 | checkpoint_dir = train_config['checkpoint_dir']
40 | if not os.path.exists(checkpoint_dir):
41 | os.makedirs(checkpoint_dir)
42 | # logs path
43 | logs_dir = train_config['logs_dir']
44 | if not os.path.exists(logs_dir):
45 | os.makedirs(logs_dir)
46 | # This sentence is usually commented to prevent the previous log from being
47 | # deleted when continuing training from a checkpoint.
48 | # shutil.rmtree(logs_dir)
49 | log_writer = SummaryWriter(logs_dir)
50 |
51 | dataset_config = \
52 | read_config(train_config['dataset_configs'], _configspec_path())[
53 | 'dataset_configs']
54 | # dataset and dataloader
55 | data_set = TrainDataSet(
56 | train_config['dataset_configs'],
57 | img_format='.bmp',
58 | degamma=True,
59 | color=dataset_config['color'],
60 | blind=arch_config['blind_est']
61 | )
62 | data_loader = DataLoader(
63 | data_set,
64 | batch_size=batch_size,
65 | shuffle=True,
66 | num_workers=num_workers
67 | )
68 |
69 | # model here
70 | model = KPN(
71 | color=dataset_config['color'],
72 | burst_length=dataset_config['burst_length'],
73 | blind_est=arch_config['blind_est'],
74 | kernel_size=list(map(int, arch_config['kernel_size'].split())),
75 | sep_conv=arch_config['sep_conv'],
76 | channel_att=arch_config['channel_att'],
77 | spatial_att=arch_config['spatial_att'],
78 | upMode=arch_config['upMode'],
79 | core_bias=arch_config['core_bias']
80 | )
81 | if cuda:
82 | model = model.cuda()
83 |
84 | if mGPU:
85 | model = nn.DataParallel(model)
86 | model.train()
87 |
88 | # loss function here
89 | loss_func = LossFunc(
90 | coeff_basic=1.0,
91 | coeff_anneal=1.0,
92 | gradient_L1=True,
93 | alpha=arch_config['alpha'],
94 | beta=arch_config['beta']
95 | )
96 |
97 | # Optimizer here
98 | if train_config['optimizer'] == 'adam':
99 | optimizer = optim.Adam(
100 | model.parameters(),
101 | lr=lr
102 | )
103 | elif train_config['optimizer'] == 'sgd':
104 | optimizer = optim.SGD(
105 | model.parameters(),
106 | lr=lr,
107 | momentum=0.9,
108 | weight_decay=weight_decay
109 | )
110 | else:
111 | raise ValueError("Optimizer must be 'sgd' or 'adam', but received {}.".format(train_config['optimizer']))
112 | optimizer.zero_grad()
113 |
114 | # learning rate scheduler here
115 | scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=lr_decay)
116 |
117 | average_loss = MovingAverage(train_config['save_freq'])
118 | if not restart_train:
119 | try:
120 | checkpoint = load_checkpoint(checkpoint_dir, 'best')
121 | start_epoch = checkpoint['epoch']
122 | global_step = checkpoint['global_iter']
123 | best_loss = checkpoint['best_loss']
124 | model.load_state_dict(checkpoint['state_dict'])
125 | optimizer.load_state_dict(checkpoint['optimizer'])
126 | scheduler.load_state_dict(checkpoint['lr_scheduler'])
127 | print('=> loaded checkpoint (epoch {}, global_step {})'.format(start_epoch, global_step))
128 | except:
129 | start_epoch = 0
130 | global_step = 0
131 | best_loss = np.inf
132 | print('=> no checkpoint file to be loaded.')
133 | else:
134 | start_epoch = 0
135 | global_step = 0
136 | best_loss = np.inf
137 | if os.path.exists(checkpoint_dir):
138 | pass
139 | # files = os.listdir(checkpoint_dir)
140 | # for f in files:
141 | # os.remove(os.path.join(checkpoint_dir, f))
142 | else:
143 | os.mkdir(checkpoint_dir)
144 | print('=> training')
145 |
146 | burst_length = dataset_config['burst_length']
147 | data_length = burst_length if arch_config['blind_est'] else burst_length+1
148 | patch_size = dataset_config['patch_size']
149 |
150 | for epoch in range(start_epoch, n_epoch):
151 | epoch_start_time = time.time()
152 | # decay the learning rate
153 | lr_cur = [param['lr'] for param in optimizer.param_groups]
154 | if lr_cur[0] > 5e-6:
155 | scheduler.step()
156 | else:
157 | for param in optimizer.param_groups:
158 | param['lr'] = 5e-6
159 | print('='*20, 'lr={}'.format([param['lr'] for param in optimizer.param_groups]), '='*20)
160 | t1 = time.time()
161 | for step, (burst_noise, gt, white_level) in enumerate(data_loader):
162 | if cuda:
163 | burst_noise = burst_noise.cuda()
164 | gt = gt.cuda()
165 | white_level = white_level.cuda()
166 | # print('white_level', white_level, white_level.size())
167 |
168 | #
169 | pred_i, pred = model(torch.flatten(burst_noise, 1, 2) if dataset_config['color'] else burst_noise, burst_noise[:, 0:burst_length, ...], white_level)
170 |
171 | #
172 | loss_basic, loss_anneal = loss_func(sRGBGamma(pred_i), sRGBGamma(pred), sRGBGamma(gt), global_step)
173 | loss = loss_basic + loss_anneal
174 | # backward
175 | optimizer.zero_grad()
176 | loss.backward()
177 | optimizer.step()
178 | # update the average loss
179 | average_loss.update(loss)
180 | # calculate PSNR
181 | psnr = calculate_psnr(pred if dataset_config['color'] else pred.unsqueeze(1), gt if dataset_config['color'] else gt.unsqueeze(1))
182 | ssim = calculate_ssim(pred if dataset_config['color'] else pred.unsqueeze(1), gt if dataset_config['color'] else gt.unsqueeze(1))
183 |
184 | # add scalars to tensorboardX
185 | log_writer.add_scalar('loss_basic', loss_basic, global_step)
186 | log_writer.add_scalar('loss_anneal', loss_anneal, global_step)
187 | log_writer.add_scalar('loss_total', loss, global_step)
188 | log_writer.add_scalar('psnr', psnr, global_step)
189 | log_writer.add_scalar('ssim', ssim, global_step)
190 |
191 | # print
192 | print('{:-4d}\t| epoch {:2d}\t| step {:4d}\t| loss_basic: {:.4f}\t| loss_anneal: {:.4f}\t|'
193 | ' loss: {:.4f}\t| PSNR: {:.2f}dB\t| SSIM: {:.4f}\t| time:{:.2f} seconds.'
194 | .format(global_step, epoch, step, loss_basic, loss_anneal, loss, psnr, ssim, time.time()-t1))
195 | t1 = time.time()
196 | # global_step
197 | global_step += 1
198 |
199 | if global_step % train_config['save_freq'] == 0:
200 | if average_loss.get_value() < best_loss:
201 | is_best = True
202 | best_loss = average_loss.get_value()
203 | else:
204 | is_best = False
205 |
206 | save_dict = {
207 | 'epoch': epoch,
208 | 'global_iter': global_step,
209 | 'state_dict': model.state_dict(),
210 | 'best_loss': best_loss,
211 | 'optimizer': optimizer.state_dict(),
212 | 'lr_scheduler': scheduler.state_dict()
213 | }
214 | save_checkpoint(
215 | save_dict, is_best, checkpoint_dir, global_step, max_keep=train_config['ckpt_to_keep']
216 | )
217 |
218 | print('Epoch {} is finished, time elapsed {:.2f} seconds.'.format(epoch, time.time()-epoch_start_time))
219 |
220 |
221 | def eval(config, args):
222 | train_config = config['training']
223 | arch_config = config['architecture']
224 |
225 | use_cache = train_config['use_cache']
226 |
227 | print('Eval Process......')
228 |
229 | checkpoint_dir = train_config['checkpoint_dir']
230 | if not os.path.exists(checkpoint_dir) or len(os.listdir(checkpoint_dir)) == 0:
231 | print('There is no any checkpoint file in path:{}'.format(checkpoint_dir))
232 | # the path for saving eval images
233 | eval_dir = train_config['eval_dir']
234 | if not os.path.exists(eval_dir):
235 | os.mkdir(eval_dir)
236 | files = os.listdir(eval_dir)
237 | for f in files:
238 | os.remove(os.path.join(eval_dir, f))
239 |
240 | dataset_config = \
241 | read_config(train_config['dataset_configs'], _configspec_path())[
242 | 'dataset_configs']
243 | # dataset and dataloader
244 |
245 | # 固定随机种子
246 | setup_seed(42)
247 |
248 | data_set = TrainDataSet(
249 | train_config['dataset_configs'],
250 | img_format='.bmp',
251 | degamma=True,
252 | color=dataset_config['color'],
253 | blind=arch_config['blind_est'],
254 | train=False
255 | )
256 | data_loader = DataLoader(
257 | data_set,
258 | batch_size=1,
259 | shuffle=False,
260 | num_workers=args.num_workers
261 | )
262 |
263 |
264 | # model here
265 | model = KPN(
266 | color=dataset_config['color'],
267 | burst_length=dataset_config['burst_length'],
268 | blind_est=arch_config['blind_est'],
269 | kernel_size=list(map(int, arch_config['kernel_size'].split())),
270 | sep_conv=arch_config['sep_conv'],
271 | channel_att=arch_config['channel_att'],
272 | spatial_att=arch_config['spatial_att'],
273 | upMode=arch_config['upMode'],
274 | core_bias=arch_config['core_bias']
275 | )
276 | if args.cuda:
277 | model = model.cuda()
278 |
279 | if args.mGPU:
280 | model = nn.DataParallel(model)
281 | # load trained model
282 | ckpt = load_checkpoint(checkpoint_dir, args.checkpoint)
283 | model.load_state_dict(ckpt['state_dict'])
284 | print('The model has been loaded from epoch {}, n_iter {}.'.format(ckpt['epoch'], ckpt['global_iter']))
285 | # switch the eval mode
286 | model.eval()
287 |
288 | # data_loader = iter(data_loader)
289 | burst_length = dataset_config['burst_length']
290 | data_length = burst_length if arch_config['blind_est'] else burst_length + 1
291 | patch_size = dataset_config['patch_size']
292 |
293 | trans = transforms.ToPILImage()
294 |
295 | with torch.no_grad():
296 | psnr = []
297 | ssim = []
298 | test_num = 0
299 | for i, (burst_noise, gt, white_level) in enumerate(data_loader):
300 | # data = next(data_loader)
301 | if args.cuda:
302 | burst_noise = burst_noise.cuda()
303 | gt = gt.cuda()
304 | white_level = white_level.cuda()
305 |
306 | pred_i, pred = model(torch.flatten(burst_noise, 1, 2) if dataset_config['color'] else burst_noise, burst_noise[:, 0:burst_length, ...], white_level)
307 |
308 | pred_i = sRGBGamma(pred_i)
309 | pred = sRGBGamma(pred)
310 | gt = sRGBGamma(gt)
311 | burst_noise = sRGBGamma(burst_noise / white_level)
312 |
313 | psnr_t = calculate_psnr(pred, gt if dataset_config['color'] else gt.unsqueeze(1))
314 | ssim_t = calculate_ssim(pred, gt if dataset_config['color'] else gt.unsqueeze(1))
315 | psnr_noisy = calculate_psnr(burst_noise[:, 0, ...] if dataset_config['color'] else burst_noise[:, 0, ...].unsqueeze(1), gt if dataset_config['color'] else gt.unsqueeze(1))
316 | ssim_noisy = calculate_ssim(burst_noise[:, 0, ...] if dataset_config['color'] else burst_noise[:, 0, ...].unsqueeze(1), gt if dataset_config['color'] else gt.unsqueeze(1))
317 | psnr.append(psnr_t)
318 | ssim.append(ssim_t)
319 |
320 | pred = torch.clamp(pred, 0.0, 1.0)
321 | noise = torch.clamp(burst_noise[0, 0, ...], 0.0, 1.0)
322 |
323 | if args.cuda:
324 | pred = pred.cpu()
325 | gt = gt.cpu()
326 | burst_noise = burst_noise.cpu()
327 |
328 | trans(noise.squeeze()).save(os.path.join(eval_dir, '{}_noisy_{:.2f}dB_{:.3f}.png'.format(i, psnr_noisy, ssim_noisy)), quality=100)
329 | trans(pred.squeeze()).save(os.path.join(eval_dir, '{}_pred_{:.2f}dB_{:.3f}.png'.format(i, psnr_t, ssim_t)), quality=100)
330 | trans(gt.squeeze()).save(os.path.join(eval_dir, '{}_gt.png'.format(i)), quality=100)
331 |
332 | print('{}-th image is OK, with PSNR: {:.2f}dB, SSIM: {:.3f}'.format(i, psnr_t, ssim_t))
333 | test_num += 1
334 |
335 | psnr_mean, psnr_std = np.mean(psnr), np.std(psnr)
336 | ssim_mean, ssim_std = np.mean(ssim), np.std(ssim)
337 | print('All {} images are OK, average PSNR: {:.2f} ± {:.2f}dB, SSIM: {:.3f} ± {:.3f}'.format(test_num, psnr_mean, psnr_std, ssim_mean, ssim_std))
338 | print('({:.2f}±{:.2f}dB)-({:.3f}±{:.3f}))'.format(psnr_mean, psnr_std, ssim_mean, ssim_std))
339 |
340 |
341 | if __name__ == '__main__':
342 | # argparse
343 | parser = argparse.ArgumentParser(description='parameters for training')
344 | parser.add_argument('--config_file', dest='config_file', default='kpn_specs/kpn_config.conf', help='path to config file')
345 | parser.add_argument('--config_spec', dest='config_spec', default='kpn_specs/configspec.conf', help='path to config spec file')
346 | parser.add_argument('--restart', action='store_true', help='Whether to remove all old files and restart the training process')
347 | parser.add_argument('--num_workers', '-nw', default=4, type=int, help='number of workers in data loader')
348 | parser.add_argument('--num_threads', '-nt', default=8, type=int, help='number of threads in data loader')
349 | parser.add_argument('--cuda', '-c', action='store_true', help='whether to train on the GPU')
350 | parser.add_argument('--mGPU', '-m', action='store_true', help='whether to train on multiple GPUs')
351 | parser.add_argument('--eval', action='store_true', help='whether to work on the evaluation mode')
352 | parser.add_argument('--checkpoint', '-ckpt', dest='checkpoint', type=str, default='best',
353 | help='the checkpoint to eval')
354 | args = parser.parse_args()
355 |
356 | for k in args.__dict__:
357 | print(k + ": " + str(args.__dict__[k]))
358 | #
359 | config = read_config(args.config_file, args.config_spec)
360 | if args.eval:
361 | eval(config, args)
362 | else:
363 | train(config, args.num_workers, args.num_threads, args.cuda, args.restart, args.mGPU)
364 |
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import inspect, os
3 |
4 | sys.path.insert(0, os.path.join(
5 | os.path.dirname(inspect.getfile(inspect.currentframe())),
6 | '..'))
7 | sys.path.insert(0, os.path.join(
8 | os.path.dirname(inspect.getfile(inspect.currentframe()))))
9 |
--------------------------------------------------------------------------------
/utils/image_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 |
5 | def center_crop_tensor(tensor, w, h):
6 | tw = tensor.size(-1)
7 | th = tensor.size(-2)
8 | if tw < w or th < h:
9 | raise RuntimeError("Crop size is larger than image size.")
10 | h0 = int((th - h) / 2)
11 | w0 = int((tw - w) / 2)
12 | h1 = h0 + h
13 | w1 = w0 + w
14 | return tensor[..., h0:h1, w0:w1]
15 |
16 |
17 | def bayer_crop_tensor(tensor, w, h, mode="random"):
18 | """Crop that preserves Bayer phase"""
19 | tw = tensor.size(-1)
20 | th = tensor.size(-2)
21 | if tw < w or th < h:
22 | raise RuntimeError("Crop size ({}) is larger than image size ({})." \
23 | .format((w, h), (tw, th)))
24 | if mode == "random":
25 | h0 = np.random.choice(th + 1 - h)
26 | w0 = np.random.choice(tw + 1 - w)
27 | elif mode == "center":
28 | h0 = int((th - h) / 2)
29 | w0 = int((tw - w) / 2)
30 | else:
31 | raise ValueError("Bayer crop: unrecognized mode ({}). Must be 'random' or 'center'.".format(mode))
32 | # make sure start index is divisible by 2
33 | h0 = h0 - (h0 % 2)
34 | w0 = w0 - (w0 % 2)
35 | h1 = h0 + h
36 | w1 = w0 + w
37 | return tensor[..., h0:h1, w0:w1]
38 |
39 |
40 | def random_crop_tensor(tensor, w, h):
41 | tw = tensor.size(-1)
42 | th = tensor.size(-2)
43 | if tw < w or th < h:
44 | raise RuntimeError("Crop size is larger than image size.")
45 | h0 = np.random.randint(th - h)
46 | w0 = np.random.randint(tw - w)
47 | h1 = h0 + h
48 | w1 = w0 + w
49 | return tensor[..., h0:h1, w0:w1]
50 |
51 |
52 | def check_nan_tensor(x):
53 | return torch.isnan(x).any()
54 |
--------------------------------------------------------------------------------
/utils/training_util.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import glob
3 | import torch
4 | import shutil
5 | import os
6 | import cv2
7 | import numbers
8 | import skimage
9 | import random
10 | from collections import OrderedDict
11 | from configobj import ConfigObj
12 | from validate import Validator
13 | from data_generation.pipeline import ImageDegradationPipeline
14 |
15 |
16 | class MovingAverage(object):
17 | def __init__(self, n):
18 | self.n = n
19 | self._cache = []
20 | self.mean = 0
21 |
22 | def update(self, val):
23 | self._cache.append(val)
24 | if len(self._cache) > self.n:
25 | del self._cache[0]
26 | self.mean = sum(self._cache) / len(self._cache)
27 |
28 | def get_value(self):
29 | return self.mean
30 |
31 |
32 | def save_checkpoint(state, is_best, checkpoint_dir, n_iter, max_keep=10):
33 | filename = os.path.join(checkpoint_dir, "{:07d}.pth.tar".format(n_iter))
34 | torch.save(state, filename)
35 | if is_best:
36 | shutil.copyfile(filename,
37 | os.path.join(checkpoint_dir,
38 | 'model_best.pth.tar'))
39 | files = sorted(os.listdir(checkpoint_dir))
40 | rm_files = files[0:max(0, len(files) - max_keep)]
41 | for f in rm_files:
42 | os.remove(os.path.join(checkpoint_dir, f))
43 |
44 | def _represent_int(s):
45 | try:
46 | int(s)
47 | return True
48 | except ValueError:
49 | return False
50 |
51 |
52 | def load_checkpoint(checkpoint_dir, best_or_latest='best'):
53 | if best_or_latest == 'best':
54 | checkpoint_file = os.path.join(checkpoint_dir, 'model_best.pth.tar')
55 | elif best_or_latest.isdigit():
56 | best_or_latest = int(best_or_latest)
57 | checkpoint_file = os.path.join(checkpoint_dir,
58 | '{:07d}.pth.tar'.format(best_or_latest))
59 | if not os.path.exists(checkpoint_file):
60 | files = glob.glob(os.path.join(checkpoint_dir, '*.pth.tar'))
61 | basenames = [os.path.basename(f).split('.')[0] for f in files]
62 | iters = sorted([int(b) for b in basenames if _represent_int(b)])
63 | raise ValueError('Available iterations are ({} requested): {}'.format(best_or_latest, iters))
64 | else:
65 | files = glob.glob(os.path.join(checkpoint_dir, '*.pth.tar'))
66 | basenames = [os.path.basename(f).split('.')[0] for f in files]
67 | iters = sorted([int(b) for b in basenames if _represent_int(b)])
68 | checkpoint_file = os.path.join(checkpoint_dir,
69 | '{:07d}.pth.tar'.format(iters[-1]))
70 | return torch.load(checkpoint_file)
71 |
72 |
73 | def load_statedict_runtime(checkpoint_dir, best_or_latest='best'):
74 | # This function grabs state_dict from checkpoint, and do modification
75 | # to the weight name so that it can be load at runtime.
76 | # During training nn.DataParallel adds 'module.' to the name,
77 | # which doesn't exist at test time.
78 | ckpt = load_checkpoint(checkpoint_dir, best_or_latest)
79 | state_dict = ckpt['state_dict']
80 | global_iter = ckpt['global_iter']
81 | new_state_dict = OrderedDict()
82 | for k, v in state_dict.items():
83 | # remove `module.`
84 | name = k[7:]
85 | new_state_dict[name] = v
86 | return new_state_dict, global_iter
87 |
88 |
89 | def prep_and_vis_flow(flow, flow_visualizer, max_flow=None):
90 | flow = flow_visualizer(flow[0, :, :, :], max_flow=max_flow)
91 | flow = flow.cpu().data.numpy()
92 | return flow
93 |
94 |
95 | def put_text_on_img(image, text, loc=(20, 100), color=(1, 0, 0)):
96 | """ Put text on flow
97 |
98 | Args:
99 | image: numpy array of dimension (3, h, w)
100 | text: text to put on.
101 | loc: ibottom-left location of text in (x, y) from top-left of image.
102 | color: color of the text.
103 | Returns:
104 | image with text written on it.
105 | """
106 | image = np.array(np.moveaxis(image, 0, -1)).copy()
107 | cv2.putText(image, text, loc, cv2.FONT_HERSHEY_SIMPLEX, 1, color)
108 | return np.moveaxis(image, -1, 0)
109 |
110 |
111 | def read_config(config_file, config_spec):
112 | configspec = ConfigObj(config_spec, raise_errors=True)
113 | config = ConfigObj(config_file,
114 | configspec=configspec,
115 | raise_errors=True,
116 | file_error=True)
117 | config.validate(Validator())
118 | return config
119 |
120 |
121 | def torch2numpy(tensor, gamma=None):
122 | tensor = torch.clamp(tensor, 0.0, 1.0)
123 | # Convert to 0 - 255
124 | if gamma is not None:
125 | tensor = torch.pow(tensor, gamma)
126 | tensor *= 255.0
127 | return tensor.permute(0, 2, 3, 1).cpu().data.numpy()
128 |
129 |
130 | def prep_for_vis(degraded_img, target_img, output_img, exposure=None):
131 | if exposure is not None:
132 | def adjust_exp(img, exp):
133 | configs = [
134 | ('PixelClip', {}),
135 | ('ExposureAdjustment', {'nstops': exp}),
136 | ('PixelClip', {}),
137 | ]
138 | return ImageDegradationPipeline(configs)(img)
139 | degraded_img = adjust_exp(degraded_img, exposure)
140 | target_img = adjust_exp(target_img, exposure)
141 | output_img = adjust_exp(output_img, exposure)
142 | degraded_tf = torch2numpy(degraded_img, 1.0 / 2.2).astype('uint8')
143 | # Gamma encode output for illustration purpose
144 | target_tf = torch2numpy(target_img, 1.0 / 2.2).astype('uint8')
145 | output_tf = torch2numpy(output_img, 1.0 / 2.2).astype('uint8')
146 | return degraded_tf, target_tf, output_tf
147 |
148 |
149 | def prep_for_vis_arr(img_arr, exposure=None):
150 | if exposure is not None:
151 | configs = [
152 | ('PixelClip', {}),
153 | ('ExposureAdjustment', {'nstops': exposure}),
154 | ('PixelClip', {}),
155 | ]
156 | exp_adj = ImageDegradationPipeline(configs)
157 | img_arr = [exp_adj(im) for im in img_arr]
158 | img_arr = [torch2numpy(im, 1.0 / 2.2).astype('uint8') for im in img_arr]
159 | return img_arr
160 |
161 |
162 | def create_vis_arr(img_arr, exposure=None):
163 | img_arr = prep_for_vis_arr(img_arr, exposure)
164 | return np.concatenate(img_arr, axis=-2)
165 |
166 |
167 | def create_vis(degraded_img, target_img, output_img, exposure=None):
168 | degraded_tf, target_tf, output_tf = prep_for_vis(degraded_img,
169 | target_img,
170 | output_img)
171 | img = np.concatenate((degraded_tf,
172 | target_tf,
173 | output_tf),
174 | axis=-2)
175 | return img
176 |
177 |
178 | def calculate_psnr(output_img, target_img):
179 | target_tf = torch2numpy(target_img)
180 | output_tf = torch2numpy(output_img)
181 | psnr = 0.0
182 | n = 0.0
183 | for im_idx in range(output_tf.shape[0]):
184 | # psnr += skimage.measure.compare_psnr(target_tf[im_idx, ...],
185 | # output_tf[im_idx, ...],
186 | # data_range=1.)
187 | psnr += skimage.metrics.peak_signal_noise_ratio(normalize(target_tf[im_idx, ...]),
188 | normalize(output_tf[im_idx, ...]),
189 | data_range=1.)
190 | n += 1.0
191 | return psnr / n
192 |
193 |
194 | def calculate_ssim(output_img, target_img):
195 | target_tf = torch2numpy(target_img)
196 | output_tf = torch2numpy(output_img)
197 | ssim = 0.0
198 | n = 0.0
199 | for im_idx in range(output_tf.shape[0]):
200 | ssim += skimage.metrics.structural_similarity(normalize(target_tf[im_idx, ...]),
201 | normalize(output_tf[im_idx, ...]),
202 | channel_axis=2,
203 | K1=0.01, K2=0.03, sigma=1.5,
204 | data_range=1.)
205 | n += 1.0
206 | return ssim / n
207 |
--------------------------------------------------------------------------------