├── Figs
├── 23_table.PNG
├── 3x.PNG
├── 48_table.PNG
├── 4x.PNG
├── 8x.PNG
├── BD_table.PNG
├── BSDNoisy.PNG
├── Figures
├── LapAtt.PNG
├── Net.PNG
├── front.PNG
├── lamaNoisy.PNG
├── noiseplot.PNG
└── real.PNG
├── LICENSE
├── README.md
└── TestCode
├── HR
└── Set5
│ └── x2
│ ├── baby_HR_x2.png
│ ├── bird_HR_x2.png
│ ├── butterfly_HR_x2.png
│ ├── head_HR_x2.png
│ └── woman_HR_x2.png
├── LR
├── LRBD
│ └── Set5
│ │ └── x3
│ │ ├── Del.txt
│ │ ├── baby_LRBD_x3.png
│ │ ├── bird_LRBD_x3.png
│ │ ├── butterfly_LRBD_x3.png
│ │ ├── head_LRBD_x3.png
│ │ └── woman_LRBD_x3.png
└── LRBI
│ └── Set5
│ └── x2
│ ├── Del.txt
│ ├── baby_LRBI_x2.png
│ ├── bird_LRBI_x2.png
│ ├── butterfly_LRBI_x2.png
│ ├── head_LRBI_x2.png
│ └── woman_LRBI_x2.png
├── ReadMe.txt
├── TrainedModels
└── ReadMe.txt
└── code
├── TestDRLN_2x.sh
├── TestDRLN_3x.sh
├── TestDRLN_3x_BD.sh
├── TestDRLN_4x.sh
├── TestDRLN_8x.sh
├── TestDRLN_All.sh
├── __init__.py
├── data
├── __init__.py
├── benchmark.py
├── common.py
├── demo.py
├── div2k.py
├── myimage.py
└── srdata.py
├── dataloader.py
├── loss
├── __init__.py
├── adversarial.py
├── discriminator.py
└── vgg.py
├── main.py
├── model
├── __init__.py
├── common.py
├── drln.py
└── ops.py
├── option.py
├── template.py
├── trainer.py
└── utility.py
/Figs/23_table.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/Figs/23_table.PNG
--------------------------------------------------------------------------------
/Figs/3x.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/Figs/3x.PNG
--------------------------------------------------------------------------------
/Figs/48_table.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/Figs/48_table.PNG
--------------------------------------------------------------------------------
/Figs/4x.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/Figs/4x.PNG
--------------------------------------------------------------------------------
/Figs/8x.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/Figs/8x.PNG
--------------------------------------------------------------------------------
/Figs/BD_table.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/Figs/BD_table.PNG
--------------------------------------------------------------------------------
/Figs/BSDNoisy.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/Figs/BSDNoisy.PNG
--------------------------------------------------------------------------------
/Figs/Figures:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/Figs/LapAtt.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/Figs/LapAtt.PNG
--------------------------------------------------------------------------------
/Figs/Net.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/Figs/Net.PNG
--------------------------------------------------------------------------------
/Figs/front.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/Figs/front.PNG
--------------------------------------------------------------------------------
/Figs/lamaNoisy.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/Figs/lamaNoisy.PNG
--------------------------------------------------------------------------------
/Figs/noiseplot.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/Figs/noiseplot.PNG
--------------------------------------------------------------------------------
/Figs/real.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/Figs/real.PNG
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Saeed
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Densely Residual Laplacian Super-resolution
2 | This repository is for Densely Residual Laplacian Network (DRLN) introduced in the following paper
3 |
4 | [Saeed Anwar](https://saeed-anwar.github.io/), Nick Barnes, "[Densely Residual Laplacian Super-resolution](https://www.computer.org/csdl/journal/tp/5555/01/09185010/1mNmSufHH8c)", IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 2020. [arXiv version](https://arxiv.org/abs/1906.12021), and [Supplementary Materials](https://s3.amazonaws.com/ieeecs.cdn.csdl.public/trans/tp/5555/01/extras/ttp555501-09185010s1-supp1-3021088.pdf)
5 |
6 | The model is built in PyTorch 1.1.0 and tested on Ubuntu 14.04/16.04 environment (Python3.6, CUDA9.0, cuDNN5.1).
7 |
8 | Our DRLN is also available in PyTorch 0.4.0 and 0.4.1. You can download this version from [Google Drive](https://drive.google.com/open?id=1I91VGposSoFq6UrWBuxCKbst6sMi1yhR) or [here](https://icedrive.net/0/cb986Jh8rh).
9 |
10 | ## Contents
11 | 1. [Introduction](#introduction)
12 | 2. [Network](#network)
13 | 3. [Test](#test)
14 | 4. [Results](#results)
15 | 5. [Citation](#citation)
16 | 6. [Acknowledgements](#acknowledgements)
17 |
18 | ## Introduction
19 | Super-Resolution convolutional neural networks have recently demonstrated high-quality restoration for single images.
20 | However, existing algorithms often require very deep architectures and long training times. Furthermore, current convolutional neural
21 | networks for super-resolution are unable to exploit features at multiple scales and weigh them equally, limiting their learning capability. In this exposition, we present a compact and accurate super-resolution algorithm namely, Densely Residual Laplacian Network
22 | (DRLN). The proposed network employs cascading residual on the residual structure to allow the flow of low-frequency information to
23 | focus on learning high and mid-level features. In addition, deep supervision is achieved via the densely concatenated residual blocks
24 | settings, which also helps in learning from high-level complex features. Moreover, we propose Laplacian attention to model the crucial
25 | features to learn the inter and intra-level dependencies between the feature maps. Furthermore, comprehensive quantitative and
26 | qualitative evaluations on low-resolution, noisy low-resolution, and real historical image benchmark datasets illustrate that our DRLN
27 | algorithm performs favorably against the state-of-the-art methods visually and accurately.
28 |
29 | 
30 | Sample results on URBAN100 with Bicubic (BI) degradation for 4x on “img 074” and for 8x on “img 040”.
31 |
32 | ## Network
33 |
34 | 
35 | The architecture of our proposed densely residual Laplacian attention network (DRLN) with densely residual laplacian modules (DRLM).
36 | 
37 | Laplacian attention architecture.
38 |
39 |
40 | ## Test
41 | ### Quick start
42 | 1. Download the trained models for our paper and place them in '/TestCode/TrainedModels'.
43 |
44 | All the models (BIX2/3/4/8, BDX3) can be downloaded from [Google Drive](https://drive.google.com/open?id=1MwRNAcUOBcS0w6Q7gGNZWYO_AP_svi7i) or [Baidu](https://pan.baidu.com/s/1lAWlfQJHBJc3u9okpOL3lA) or [here](https://icedrive.net/0/a81sqSW91R). The total size for all models is 737MB.
45 |
46 | 2. Cd to '/TestCode/code', run the following scripts.
47 |
48 | **You can use scripts in file 'TestDRLN_All' to produce results for our paper or You can also use individual scripts such as TestDRLN_2x.sh**
49 |
50 | ```bash
51 | # No self-ensemble: DRLN
52 | # BI degradation model, x2, x3
53 | # x2
54 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --save 'DRLN_Set5' --testpath ../LR/LRBI --testset Set5
55 |
56 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --save 'DRLN_Set14' --testpath ../LR/LRBI --testset Set14
57 | # x3
58 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --save 'DRLN_Set5' --testpath ../LR/LRBI --testset Set5
59 |
60 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --save 'DRLN_Set14' --testpath ../LR/LRBI --testset Set14
61 |
62 | # x3 Blur-downgrade
63 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --save 'DRLN_BD_Set5' --testpath ../LR/LRBD --testset Set5
64 | ```
65 |
66 |
67 | ## Results
68 | **All the results for DRLN can be downloaded from [GoogleDrive](https://drive.google.com/open?id=1NJ20pHYolkzTBDB2UUy7pvY9F9sIyqO2) or [here](https://icedrive.net/0/bcATKQGntn). The size of the results is 2.41GB**
69 |
70 | ### Quantitative Results
71 | 
72 | 
73 | The performance of state-of-the-art algorithms on widely used publicly available five datasets (SET5, SET14, BSD100, URBAN100, MANGA109), in terms of PSNR (in dB) and SSIM. The best results are highlighted with red color while the blue color represents the second best super-resolution method.
74 |
75 |
76 | 
77 | Quantitative results on blur-down degradations for 3x. The best results are highlighted with red color while the blue color represents the second best.
78 |
79 |
80 |
81 |
82 |
83 |
84 | The plot shows the average PSNR as functions of noise sigma. Our method consistently improves over specific
85 | noisy super-resolution methods and CNN for all noise levels.
86 |
87 | ### Visual Results
88 | 
89 | Visual results with Bicubic (BI) degradation (4x) on "img 076" and "img_044" from URBAN100 as well as YumeiroCooking from MANGA109.
90 |
91 |
92 | 
93 | Comparisons on images with fine details for a high upsampling factor of 8x on URBAN100 and MANGA109. The best results are in bold.
94 |
95 | 
96 | Comparison on Blur-Downscale (BD) degraded images with sharp edges and texture, taken from URBAN100 and SET14
97 | datasets for the scale of 3x. The sharpness of the edges on the objects and textures restored by our method is the best.
98 |
99 | 
100 | Noisy SR visual Comparison on BSD100. Textures on the birds are much better reconstructed, and the noise removed by our method as
101 | compared to the IRCNN and RCAN for sigma = 10.
102 |
103 | 
104 | Noisy visual comparison on Llama. Textures on the fur, and on rocks in the background are much better reconstructed in our result as
105 | compared to the conventional BM3D-SR and BM3D-SRNI.
106 |
107 |
108 |
109 |
110 | Comparison on real-world images. In these cases, neither the downsampling blur kernels nor the ground-truth images are available.
111 |
112 | For more information, please refer to our [paper](https://arxiv.org/pdf/1906.12021.pdf)
113 |
114 | ## Citation
115 | If you find the code helpful in your resarch or work, please cite the following papers.
116 | ```
117 | @article{anwar2019drln,
118 | title={Densely Residual Laplacian Super-Resolution},
119 | author={Anwar, Saeed and Barnes, Nick},
120 | journal={IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI)},
121 | year={2020}
122 | }
123 |
124 | @article{anwar2020deepSR,
125 | author = {Anwar, Saeed and Khan, Salman and Barnes, Nick},
126 | title = {A Deep Journey into Super-Resolution: A Survey},
127 | year = {2020},
128 | publisher = {Association for Computing Machinery},
129 | address = {New York, NY, USA},
130 | volume = {53},
131 | number = {3},
132 | issn = {0360-0300},
133 | journal = {ACM Computing Surveys},
134 | month = may,
135 | articleno = {60},
136 | numpages = {34},
137 | }
138 |
139 | ```
140 | ## Acknowledgements
141 | This code is built on [RCAN (PyTorch)](https://github.com/yulunzhang/RCAN) and [EDSR (PyTorch)](https://github.com/thstkdgus35/EDSR-PyTorch). We thank the authors for sharing their codes.
142 |
--------------------------------------------------------------------------------
/TestCode/HR/Set5/x2/baby_HR_x2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/TestCode/HR/Set5/x2/baby_HR_x2.png
--------------------------------------------------------------------------------
/TestCode/HR/Set5/x2/bird_HR_x2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/TestCode/HR/Set5/x2/bird_HR_x2.png
--------------------------------------------------------------------------------
/TestCode/HR/Set5/x2/butterfly_HR_x2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/TestCode/HR/Set5/x2/butterfly_HR_x2.png
--------------------------------------------------------------------------------
/TestCode/HR/Set5/x2/head_HR_x2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/TestCode/HR/Set5/x2/head_HR_x2.png
--------------------------------------------------------------------------------
/TestCode/HR/Set5/x2/woman_HR_x2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/TestCode/HR/Set5/x2/woman_HR_x2.png
--------------------------------------------------------------------------------
/TestCode/LR/LRBD/Set5/x3/Del.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/TestCode/LR/LRBD/Set5/x3/baby_LRBD_x3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/TestCode/LR/LRBD/Set5/x3/baby_LRBD_x3.png
--------------------------------------------------------------------------------
/TestCode/LR/LRBD/Set5/x3/bird_LRBD_x3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/TestCode/LR/LRBD/Set5/x3/bird_LRBD_x3.png
--------------------------------------------------------------------------------
/TestCode/LR/LRBD/Set5/x3/butterfly_LRBD_x3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/TestCode/LR/LRBD/Set5/x3/butterfly_LRBD_x3.png
--------------------------------------------------------------------------------
/TestCode/LR/LRBD/Set5/x3/head_LRBD_x3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/TestCode/LR/LRBD/Set5/x3/head_LRBD_x3.png
--------------------------------------------------------------------------------
/TestCode/LR/LRBD/Set5/x3/woman_LRBD_x3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/TestCode/LR/LRBD/Set5/x3/woman_LRBD_x3.png
--------------------------------------------------------------------------------
/TestCode/LR/LRBI/Set5/x2/Del.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/TestCode/LR/LRBI/Set5/x2/baby_LRBI_x2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/TestCode/LR/LRBI/Set5/x2/baby_LRBI_x2.png
--------------------------------------------------------------------------------
/TestCode/LR/LRBI/Set5/x2/bird_LRBI_x2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/TestCode/LR/LRBI/Set5/x2/bird_LRBI_x2.png
--------------------------------------------------------------------------------
/TestCode/LR/LRBI/Set5/x2/butterfly_LRBI_x2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/TestCode/LR/LRBI/Set5/x2/butterfly_LRBI_x2.png
--------------------------------------------------------------------------------
/TestCode/LR/LRBI/Set5/x2/head_LRBI_x2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/TestCode/LR/LRBI/Set5/x2/head_LRBI_x2.png
--------------------------------------------------------------------------------
/TestCode/LR/LRBI/Set5/x2/woman_LRBI_x2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saeed-anwar/DRLN/e43027910551fe989630a36c84f090efd6e08ad0/TestCode/LR/LRBI/Set5/x2/woman_LRBI_x2.png
--------------------------------------------------------------------------------
/TestCode/ReadMe.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/TestCode/TrainedModels/ReadMe.txt:
--------------------------------------------------------------------------------
1 | Download the TrainedModels from here and replace this folder. All the models (BIX2/3/4/8, BDX3) can be downloaded from Google Drive (https://drive.google.com/open?id=1MwRNAcUOBcS0w6Q7gGNZWYO_AP_svi7i) or (https://icedrive.net/0/a81sqSW91R). The total size for all models is 737 MB.
2 |
3 |
--------------------------------------------------------------------------------
/TestCode/code/TestDRLN_2x.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash/
2 | # For Testing
3 | # 2x
4 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --save 'DRLN_Set5' --testpath ../LR/LRBI --testset Set5
5 |
6 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Set5' --testpath ../LR/LRBI --testset Set5
7 |
8 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --save 'DRLN_Set14' --testpath ../LR/LRBI --testset Set14
9 |
10 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Set14' --testpath ../LR/LRBI --testset Set14
11 |
12 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --save 'DRLN_B100' --testpath ../LR/LRBI --testset BSD100
13 |
14 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_B100' --testpath ../LR/LRBI --testset BSD100
15 |
16 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --save 'DRLN_Urban100' --testpath ../LR/LRBI --testset Urban100
17 |
18 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Urban100' --testpath ../LR/LRBI --testset Urban100
19 |
20 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --save 'DRLN_Manga109' --testpath ../LR/LRBI --testset Manga109
21 |
22 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Manga109' --testpath ../LR/LRBI --testset Manga109
23 |
--------------------------------------------------------------------------------
/TestCode/code/TestDRLN_3x.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash/
2 | # For Testing
3 | # 3x
4 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --save 'DRLN_Set5' --testpath ../LR/LRBI --testset Set5
5 |
6 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Set5' --testpath ../LR/LRBI --testset Set5
7 |
8 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --save 'DRLN_Set14' --testpath ../LR/LRBI --testset Set14
9 |
10 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Set14' --testpath ../LR/LRBI --testset Set14
11 |
12 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --save 'DRLN_B100' --testpath ../LR/LRBI --testset BSD100
13 |
14 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_B100' --testpath ../LR/LRBI --testset BSD100
15 |
16 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --save 'DRLN_Urban100' --testpath ../LR/LRBI --testset Urban100
17 |
18 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Urban100' --testpath ../LR/LRBI --testset Urban100
19 |
20 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --save 'DRLN_Manga109' --testpath ../LR/LRBI --testset Manga109
21 |
22 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Manga109' --testpath ../LR/LRBI --testset Manga109
23 |
--------------------------------------------------------------------------------
/TestCode/code/TestDRLN_3x_BD.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash/
2 | # For Testing
3 | # 3x
4 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --save 'DRLN_BD_Set5' --testpath ../LR/LRBD --testset Set5
5 |
6 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_BD_Set5' --testpath ../LR/LRBD --testset Set5
7 |
8 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --save 'DRLN_BD_Set14' --testpath ../LR/LRBD --testset Set14
9 |
10 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_BD_Set14' --testpath ../LR/LRBD --testset Set14
11 |
12 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --save 'DRLN_BD_B100' --testpath ../LR/LRBD --testset B100
13 |
14 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_BD_B100' --testpath ../LR/LRBD --testset B100
15 |
16 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --save 'DRLN_BD_Urban100' --testpath ../LR/LRBD --testset Urban100
17 |
18 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_BD_Urban100' --testpath ../LR/LRBD --testset Urban100
19 |
20 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --save 'DRLN_BD_Manga109' --testpath ../LR/LRBD --testset Manga109
21 |
22 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_BD_Manga109' --testpath ../LR/LRBD --testset Manga109
23 |
24 |
--------------------------------------------------------------------------------
/TestCode/code/TestDRLN_4x.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash/
2 | # For Testing
3 | # 4x
4 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --save 'DRLN_Set5' --testpath ../LR/LRBI --testset Set5
5 |
6 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Set5' --testpath ../LR/LRBI --testset Set5
7 |
8 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --save 'DRLN_Set14' --testpath ../LR/LRBI --testset Set14
9 |
10 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Set14' --testpath ../LR/LRBI --testset Set14
11 |
12 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --save 'DRLN_B100' --testpath ../LR/LRBI --testset BSD100
13 |
14 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_B100' --testpath ../LR/LRBI --testset BSD100
15 |
16 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --save 'DRLN_Urban100' --testpath ../LR/LRBI --testset Urban100
17 |
18 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Urban100' --testpath ../LR/LRBI --testset Urban100
19 |
20 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --save 'DRLN_Manga109' --testpath ../LR/LRBI --testset Manga109
21 |
22 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Manga109' --testpath ../LR/LRBI --testset Manga109
23 |
--------------------------------------------------------------------------------
/TestCode/code/TestDRLN_8x.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash/
2 | # For Testing
3 | # 8x
4 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --save 'DRLN_Set5' --testpath ../LR/LRBI --testset Set5
5 |
6 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Set5' --testpath ../LR/LRBI --testset Set5
7 |
8 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --save 'DRLN_Set14' --testpath ../LR/LRBI --testset Set14
9 |
10 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Set14' --testpath ../LR/LRBI --testset Set14
11 |
12 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --save 'DRLN_B100' --testpath ../LR/LRBI --testset BSD100
13 |
14 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_B100' --testpath ../LR/LRBI --testset BSD100
15 |
16 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --save 'DRLN_Urban100' --testpath ../LR/LRBI --testset Urban100
17 |
18 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Urban100' --testpath ../LR/LRBI --testset Urban100
19 |
20 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --save 'DRLN_Manga109' --testpath ../LR/LRBI --testset Manga109
21 |
22 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Manga109' --testpath ../LR/LRBI --testset Manga109
23 |
--------------------------------------------------------------------------------
/TestCode/code/TestDRLN_All.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash/
2 | # For Testing
3 | # 2x
4 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --save 'DRLN_Set5' --testpath ../LR/LRBI --testset Set5
5 |
6 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Set5' --testpath ../LR/LRBI --testset Set5
7 |
8 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --save 'DRLN_Set14' --testpath ../LR/LRBI --testset Set14
9 |
10 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Set14' --testpath ../LR/LRBI --testset Set14
11 |
12 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --save 'DRLN_B100' --testpath ../LR/LRBI --testset BSD100
13 |
14 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_B100' --testpath ../LR/LRBI --testset BSD100
15 |
16 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --save 'DRLN_Urban100' --testpath ../LR/LRBI --testset Urban100
17 |
18 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Urban100' --testpath ../LR/LRBI --testset Urban100
19 |
20 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --save 'DRLN_Manga109' --testpath ../LR/LRBI --testset Manga109
21 |
22 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 2 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX2/DRLN_BIX2.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Manga109' --testpath ../LR/LRBI --testset Manga109
23 |
24 |
25 | # 3x
26 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --save 'DRLN_Set5' --testpath ../LR/LRBI --testset Set5
27 |
28 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Set5' --testpath ../LR/LRBI --testset Set5
29 |
30 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --save 'DRLN_Set14' --testpath ../LR/LRBI --testset Set14
31 |
32 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Set14' --testpath ../LR/LRBI --testset Set14
33 |
34 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --save 'DRLN_B100' --testpath ../LR/LRBI --testset BSD100
35 |
36 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_B100' --testpath ../LR/LRBI --testset BSD100
37 |
38 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --save 'DRLN_Urban100' --testpath ../LR/LRBI --testset Urban100
39 |
40 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Urban100' --testpath ../LR/LRBI --testset Urban100
41 |
42 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --save 'DRLN_Manga109' --testpath ../LR/LRBI --testset Manga109
43 |
44 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX3/DRLN_BIX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Manga109' --testpath ../LR/LRBI --testset Manga109
45 |
46 | # 4x
47 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --save 'DRLN_Set5' --testpath ../LR/LRBI --testset Set5
48 |
49 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Set5' --testpath ../LR/LRBI --testset Set5
50 |
51 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --save 'DRLN_Set14' --testpath ../LR/LRBI --testset Set14
52 |
53 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Set14' --testpath ../LR/LRBI --testset Set14
54 |
55 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --save 'DRLN_B100' --testpath ../LR/LRBI --testset BSD100
56 |
57 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_B100' --testpath ../LR/LRBI --testset BSD100
58 |
59 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --save 'DRLN_Urban100' --testpath ../LR/LRBI --testset Urban100
60 |
61 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Urban100' --testpath ../LR/LRBI --testset Urban100
62 |
63 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --save 'DRLN_Manga109' --testpath ../LR/LRBI --testset Manga109
64 |
65 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 4 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX4/DRLN_BIX4.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Manga109' --testpath ../LR/LRBI --testset Manga109
66 |
67 | # 8x
68 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --save 'DRLN_Set5' --testpath ../LR/LRBI --testset Set5
69 |
70 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Set5' --testpath ../LR/LRBI --testset Set5
71 |
72 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --save 'DRLN_Set14' --testpath ../LR/LRBI --testset Set14
73 |
74 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Set14' --testpath ../LR/LRBI --testset Set14
75 |
76 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --save 'DRLN_B100' --testpath ../LR/LRBI --testset BSD100
77 |
78 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_B100' --testpath ../LR/LRBI --testset BSD100
79 |
80 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --save 'DRLN_Urban100' --testpath ../LR/LRBI --testset Urban100
81 |
82 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Urban100' --testpath ../LR/LRBI --testset Urban100
83 |
84 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --save 'DRLN_Manga109' --testpath ../LR/LRBI --testset Manga109
85 |
86 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 8 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BIX8/DRLN_BIX8.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_Manga109' --testpath ../LR/LRBI --testset Manga109
87 |
88 | # 3x
89 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --save 'DRLN_BD_Set5' --testpath ../LR/LRBD --testset Set5
90 |
91 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_BD_Set5' --testpath ../LR/LRBD --testset Set5
92 |
93 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --save 'DRLN_BD_Set14' --testpath ../LR/LRBD --testset Set14
94 |
95 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_BD_Set14' --testpath ../LR/LRBD --testset Set14
96 |
97 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --save 'DRLN_BD_B100' --testpath ../LR/LRBD --testset B100
98 |
99 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_BD_B100' --testpath ../LR/LRBD --testset B100
100 |
101 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --save 'DRLN_BD_Urban100' --testpath ../LR/LRBD --testset Urban100
102 |
103 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_BD_Urban100' --testpath ../LR/LRBD --testset Urban100
104 |
105 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --save 'DRLN_BD_Manga109' --testpath ../LR/LRBD --testset Manga109
106 |
107 | CUDA_VISIBLE_DEVICES=0 python main.py --data_test MyImage --scale 3 --model DRLN --n_feats 64 --pre_train ../TrainedModels/DRLN_BDX3/DRLN_BDX3.pt --test_only --save_results --chop --self_ensemble --save 'DRLNplus_BD_Manga109' --testpath ../LR/LRBD --testset Manga109
108 |
109 |
--------------------------------------------------------------------------------
/TestCode/code/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/TestCode/code/data/__init__.py:
--------------------------------------------------------------------------------
1 | from importlib import import_module
2 |
3 | from dataloader import MSDataLoader
4 | from torch.utils.data.dataloader import default_collate
5 |
6 | class Data:
7 | def __init__(self, args):
8 | kwargs = {}
9 | if not args.cpu:
10 | kwargs['collate_fn'] = default_collate
11 | kwargs['pin_memory'] = True
12 | else:
13 | kwargs['collate_fn'] = default_collate
14 | kwargs['pin_memory'] = False
15 |
16 | self.loader_train = None
17 | if not args.test_only:
18 | module_train = import_module('data.' + args.data_train.lower())
19 | trainset = getattr(module_train, args.data_train)(args)
20 | self.loader_train = MSDataLoader(
21 | args,
22 | trainset,
23 | batch_size=args.batch_size,
24 | shuffle=True,
25 | **kwargs
26 | )
27 |
28 | if args.data_test in ['Set5', 'Set14', 'B100', 'Urban100']:
29 | if not args.benchmark_noise:
30 | module_test = import_module('data.benchmark')
31 | testset = getattr(module_test, 'Benchmark')(args, train=False)
32 | else:
33 | module_test = import_module('data.benchmark_noise')
34 | testset = getattr(module_test, 'BenchmarkNoise')(
35 | args,
36 | train=False
37 | )
38 |
39 | else:
40 | module_test = import_module('data.' + args.data_test.lower())
41 | testset = getattr(module_test, args.data_test)(args, train=False)
42 |
43 | self.loader_test = MSDataLoader(
44 | args,
45 | testset,
46 | batch_size=1,
47 | shuffle=False,
48 | **kwargs
49 | )
50 |
--------------------------------------------------------------------------------
/TestCode/code/data/benchmark.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from data import common
4 | from data import srdata
5 |
6 | import numpy as np
7 | import scipy.misc as misc
8 |
9 | import torch
10 | import torch.utils.data as data
11 |
12 | class Benchmark(srdata.SRData):
13 | def __init__(self, args, train=True):
14 | super(Benchmark, self).__init__(args, train, benchmark=True)
15 |
16 | def _scan(self):
17 | list_hr = []
18 | list_lr = [[] for _ in self.scale]
19 | for entry in os.scandir(self.dir_hr):
20 | filename = os.path.splitext(entry.name)[0]
21 | list_hr.append(os.path.join(self.dir_hr, filename + self.ext))
22 | for si, s in enumerate(self.scale):
23 | list_lr[si].append(os.path.join(
24 | self.dir_lr,
25 | 'X{}/{}x{}{}'.format(s, filename, s, self.ext)
26 | ))
27 |
28 | list_hr.sort()
29 | for l in list_lr:
30 | l.sort()
31 |
32 | return list_hr, list_lr
33 |
34 | def _set_filesystem(self, dir_data):
35 | self.apath = os.path.join(dir_data, 'benchmark', self.args.data_test)
36 | self.dir_hr = os.path.join(self.apath, 'HR')
37 | self.dir_lr = os.path.join(self.apath, 'LR_bicubic')
38 | self.ext = '.png'
39 |
--------------------------------------------------------------------------------
/TestCode/code/data/common.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | import numpy as np
4 | import skimage.io as sio
5 | import skimage.color as sc
6 | import skimage.transform as st
7 |
8 | import torch
9 | from torchvision import transforms
10 |
11 | def get_patch(img_in, img_tar, patch_size, scale, multi_scale=False):
12 | ih, iw = img_in.shape[:2]
13 |
14 | p = scale if multi_scale else 1
15 | tp = p * patch_size
16 | ip = tp // scale
17 |
18 | ix = random.randrange(0, iw - ip + 1)
19 | iy = random.randrange(0, ih - ip + 1)
20 | tx, ty = scale * ix, scale * iy
21 |
22 | img_in = img_in[iy:iy + ip, ix:ix + ip, :]
23 | img_tar = img_tar[ty:ty + tp, tx:tx + tp, :]
24 |
25 | return img_in, img_tar
26 |
27 | def set_channel(l, n_channel):
28 | def _set_channel(img):
29 | if img.ndim == 2:
30 | img = np.expand_dims(img, axis=2)
31 |
32 | c = img.shape[2]
33 | if n_channel == 1 and c == 3:
34 | img = np.expand_dims(sc.rgb2ycbcr(img)[:, :, 0], 2)
35 | elif n_channel == 3 and c == 1:
36 | img = np.concatenate([img] * n_channel, 2)
37 |
38 | return img
39 |
40 | return [_set_channel(_l) for _l in l]
41 |
42 | def np2Tensor(l, rgb_range):
43 | def _np2Tensor(img):
44 | np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1)))
45 | tensor = torch.from_numpy(np_transpose).float()
46 | tensor.mul_(rgb_range / 255)
47 |
48 | return tensor
49 |
50 | return [_np2Tensor(_l) for _l in l]
51 |
52 | def add_noise(x, noise='.'):
53 | if noise is not '.':
54 | noise_type = noise[0]
55 | noise_value = int(noise[1:])
56 | if noise_type == 'G':
57 | noises = np.random.normal(scale=noise_value, size=x.shape)
58 | noises = noises.round()
59 | elif noise_type == 'S':
60 | noises = np.random.poisson(x * noise_value) / noise_value
61 | noises = noises - noises.mean(axis=0).mean(axis=0)
62 |
63 | x_noise = x.astype(np.int16) + noises.astype(np.int16)
64 | x_noise = x_noise.clip(0, 255).astype(np.uint8)
65 | return x_noise
66 | else:
67 | return x
68 |
69 | def augment(l, hflip=True, rot=True):
70 | hflip = hflip and random.random() < 0.5
71 | vflip = rot and random.random() < 0.5
72 | rot90 = rot and random.random() < 0.5
73 |
74 | def _augment(img):
75 | if hflip: img = img[:, ::-1, :]
76 | if vflip: img = img[::-1, :, :]
77 | if rot90: img = img.transpose(1, 0, 2)
78 |
79 | return img
80 |
81 | return [_augment(_l) for _l in l]
82 |
--------------------------------------------------------------------------------
/TestCode/code/data/demo.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from data import common
4 |
5 | import numpy as np
6 | import scipy.misc as misc
7 |
8 | import torch
9 | import torch.utils.data as data
10 |
11 | class Demo(data.Dataset):
12 | def __init__(self, args, train=False):
13 | self.args = args
14 | self.name = 'Demo'
15 | self.scale = args.scale
16 | self.idx_scale = 0
17 | self.train = False
18 | self.benchmark = False
19 |
20 | self.filelist = []
21 | for f in os.listdir(args.dir_demo):
22 | if f.find('.png') >= 0 or f.find('.jp') >= 0:
23 | self.filelist.append(os.path.join(args.dir_demo, f))
24 | self.filelist.sort()
25 |
26 | def __getitem__(self, idx):
27 | filename = os.path.split(self.filelist[idx])[-1]
28 | filename, _ = os.path.splitext(filename)
29 | lr = misc.imread(self.filelist[idx])
30 | lr = common.set_channel([lr], self.args.n_colors)[0]
31 |
32 | return common.np2Tensor([lr], self.args.rgb_range)[0], -1, filename
33 |
34 | def __len__(self):
35 | return len(self.filelist)
36 |
37 | def set_scale(self, idx_scale):
38 | self.idx_scale = idx_scale
39 |
40 |
--------------------------------------------------------------------------------
/TestCode/code/data/div2k.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from data import common
4 | from data import srdata
5 |
6 | import numpy as np
7 | import scipy.misc as misc
8 |
9 | import torch
10 | import torch.utils.data as data
11 |
12 | class DIV2K(srdata.SRData):
13 | def __init__(self, args, train=True):
14 | super(DIV2K, self).__init__(args, train)
15 | self.repeat = args.test_every // (args.n_train // args.batch_size)
16 |
17 | def _scan(self):
18 | list_hr = []
19 | list_lr = [[] for _ in self.scale]
20 | if self.train:
21 | idx_begin = 0
22 | idx_end = self.args.n_train
23 | else:
24 | idx_begin = self.args.n_train
25 | idx_end = self.args.offset_val + self.args.n_val
26 |
27 | for i in range(idx_begin + 1, idx_end + 1):
28 | filename = '{:0>4}'.format(i)
29 | list_hr.append(os.path.join(self.dir_hr, filename + self.ext))
30 | for si, s in enumerate(self.scale):
31 | list_lr[si].append(os.path.join(
32 | self.dir_lr,
33 | 'X{}/{}x{}{}'.format(s, filename, s, self.ext)
34 | ))
35 |
36 | return list_hr, list_lr
37 |
38 | def _set_filesystem(self, dir_data):
39 | self.apath = dir_data + '/DIV2K'
40 | self.dir_hr = os.path.join(self.apath, 'DIV2K_train_HR')
41 | self.dir_lr = os.path.join(self.apath, 'DIV2K_train_LR_bicubic')
42 | self.ext = '.png'
43 |
44 | def _name_hrbin(self):
45 | return os.path.join(
46 | self.apath,
47 | 'bin',
48 | '{}_bin_HR.npy'.format(self.split)
49 | )
50 |
51 | def _name_lrbin(self, scale):
52 | return os.path.join(
53 | self.apath,
54 | 'bin',
55 | '{}_bin_LR_X{}.npy'.format(self.split, scale)
56 | )
57 |
58 | def __len__(self):
59 | if self.train:
60 | return len(self.images_hr) * self.repeat
61 | else:
62 | return len(self.images_hr)
63 |
64 | def _get_index(self, idx):
65 | if self.train:
66 | return idx % len(self.images_hr)
67 | else:
68 | return idx
69 |
70 |
--------------------------------------------------------------------------------
/TestCode/code/data/myimage.py:
--------------------------------------------------------------------------------
1 | import os
2 | import os.path
3 | import random
4 | import math
5 | import errno
6 |
7 | from data import common
8 |
9 | import numpy as np
10 | import scipy.misc as misc
11 |
12 | import torch
13 | import torch.utils.data as data
14 | from torchvision import transforms
15 |
16 | class MyImage(data.Dataset):
17 | def __init__(self, args, train=False):
18 | self.args = args
19 | self.train = False
20 | self.name = 'MyImage'
21 | self.scale = args.scale
22 | self.idx_scale = 0
23 | apath = args.testpath + '/' + args.testset + '/x' + str(args.scale[0])
24 |
25 | self.filelist = []
26 | self.imnamelist = []
27 | if not train:
28 | for f in os.listdir(apath):
29 | try:
30 | filename = os.path.join(apath, f)
31 | misc.imread(filename)
32 | self.filelist.append(filename)
33 | self.imnamelist.append(f)
34 | except:
35 | pass
36 |
37 | def __getitem__(self, idx):
38 | filename = os.path.split(self.filelist[idx])[-1]
39 | filename, _ = os.path.splitext(filename)
40 | lr = misc.imread(self.filelist[idx])
41 | lr = common.set_channel([lr], self.args.n_colors)[0]
42 |
43 | return common.np2Tensor([lr], self.args.rgb_range)[0], -1, filename
44 | def __len__(self):
45 | return len(self.filelist)
46 |
47 | def set_scale(self, idx_scale):
48 | self.idx_scale = idx_scale
49 |
50 |
--------------------------------------------------------------------------------
/TestCode/code/data/srdata.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from data import common
4 |
5 | import numpy as np
6 | import scipy.misc as misc
7 |
8 | import torch
9 | import torch.utils.data as data
10 |
11 | class SRData(data.Dataset):
12 | def __init__(self, args, train=True, benchmark=False):
13 | self.args = args
14 | self.train = train
15 | self.split = 'train' if train else 'test'
16 | self.benchmark = benchmark
17 | self.scale = args.scale
18 | self.idx_scale = 0
19 |
20 | self._set_filesystem(args.dir_data)
21 |
22 | def _load_bin():
23 | self.images_hr = np.load(self._name_hrbin())
24 | self.images_lr = [
25 | np.load(self._name_lrbin(s)) for s in self.scale
26 | ]
27 |
28 | if args.ext == 'img' or benchmark:
29 | self.images_hr, self.images_lr = self._scan()
30 | elif args.ext.find('sep') >= 0:
31 | self.images_hr, self.images_lr = self._scan()
32 | if args.ext.find('reset') >= 0:
33 | print('Preparing seperated binary files')
34 | for v in self.images_hr:
35 | hr = misc.imread(v)
36 | name_sep = v.replace(self.ext, '.npy')
37 | np.save(name_sep, hr)
38 | for si, s in enumerate(self.scale):
39 | for v in self.images_lr[si]:
40 | lr = misc.imread(v)
41 | name_sep = v.replace(self.ext, '.npy')
42 | np.save(name_sep, lr)
43 |
44 | self.images_hr = [
45 | v.replace(self.ext, '.npy') for v in self.images_hr
46 | ]
47 | self.images_lr = [
48 | [v.replace(self.ext, '.npy') for v in self.images_lr[i]]
49 | for i in range(len(self.scale))
50 | ]
51 |
52 | elif args.ext.find('bin') >= 0:
53 | try:
54 | if args.ext.find('reset') >= 0:
55 | raise IOError
56 | print('Loading a binary file')
57 | _load_bin()
58 | except:
59 | print('Preparing a binary file')
60 | bin_path = os.path.join(self.apath, 'bin')
61 | if not os.path.isdir(bin_path):
62 | os.mkdir(bin_path)
63 |
64 | list_hr, list_lr = self._scan()
65 | hr = [misc.imread(f) for f in list_hr]
66 | np.save(self._name_hrbin(), hr)
67 | del hr
68 | for si, s in enumerate(self.scale):
69 | lr_scale = [misc.imread(f) for f in list_lr[si]]
70 | np.save(self._name_lrbin(s), lr_scale)
71 | del lr_scale
72 | _load_bin()
73 | else:
74 | print('Please define data type')
75 |
76 | def _scan(self):
77 | raise NotImplementedError
78 |
79 | def _set_filesystem(self, dir_data):
80 | raise NotImplementedError
81 |
82 | def _name_hrbin(self):
83 | raise NotImplementedError
84 |
85 | def _name_lrbin(self, scale):
86 | raise NotImplementedError
87 |
88 | def __getitem__(self, idx):
89 | lr, hr, filename = self._load_file(idx)
90 | lr, hr = self._get_patch(lr, hr)
91 | lr, hr = common.set_channel([lr, hr], self.args.n_colors)
92 | lr_tensor, hr_tensor = common.np2Tensor([lr, hr], self.args.rgb_range)
93 | return lr_tensor, hr_tensor, filename
94 |
95 | def __len__(self):
96 | return len(self.images_hr)
97 |
98 | def _get_index(self, idx):
99 | return idx
100 |
101 | def _load_file(self, idx):
102 | idx = self._get_index(idx)
103 | lr = self.images_lr[self.idx_scale][idx]
104 | hr = self.images_hr[idx]
105 | if self.args.ext == 'img' or self.benchmark:
106 | filename = hr
107 | lr = misc.imread(lr)
108 | hr = misc.imread(hr)
109 | elif self.args.ext.find('sep') >= 0:
110 | filename = hr
111 | lr = np.load(lr)
112 | hr = np.load(hr)
113 | else:
114 | filename = str(idx + 1)
115 |
116 | filename = os.path.splitext(os.path.split(filename)[-1])[0]
117 |
118 | return lr, hr, filename
119 |
120 | def _get_patch(self, lr, hr):
121 | patch_size = self.args.patch_size
122 | scale = self.scale[self.idx_scale]
123 | multi_scale = len(self.scale) > 1
124 | if self.train:
125 | lr, hr = common.get_patch(
126 | lr, hr, patch_size, scale, multi_scale=multi_scale
127 | )
128 | lr, hr = common.augment([lr, hr])
129 | lr = common.add_noise(lr, self.args.noise)
130 | else:
131 | ih, iw = lr.shape[0:2]
132 | hr = hr[0:ih * scale, 0:iw * scale]
133 |
134 | return lr, hr
135 |
136 | def set_scale(self, idx_scale):
137 | self.idx_scale = idx_scale
138 |
139 |
--------------------------------------------------------------------------------
/TestCode/code/dataloader.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import random
3 |
4 | import torch
5 | import torch.multiprocessing as multiprocessing
6 | from torch.utils.data import DataLoader
7 | from torch.utils.data import SequentialSampler
8 | from torch.utils.data import RandomSampler
9 | from torch.utils.data import BatchSampler
10 | from torch.utils.data import _utils
11 | from torch.utils.data.dataloader import _DataLoaderIter
12 |
13 | from torch.utils.data._utils import collate
14 | from torch.utils.data._utils import signal_handling
15 | from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
16 | from torch.utils.data._utils import ExceptionWrapper
17 | from torch.utils.data._utils import IS_WINDOWS
18 | from torch.utils.data._utils.worker import ManagerWatchdog
19 |
20 | from torch._six import queue
21 |
22 | def _ms_loop(dataset, index_queue, data_queue, done_event, collate_fn, scale, seed, init_fn, worker_id):
23 | try:
24 | collate._use_shared_memory = True
25 | signal_handling._set_worker_signal_handlers()
26 |
27 | torch.set_num_threads(1)
28 | random.seed(seed)
29 | torch.manual_seed(seed)
30 |
31 | data_queue.cancel_join_thread()
32 |
33 | if init_fn is not None:
34 | init_fn(worker_id)
35 |
36 | watchdog = ManagerWatchdog()
37 |
38 | while watchdog.is_alive():
39 | try:
40 | r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
41 | except queue.Empty:
42 | continue
43 |
44 | if r is None:
45 | assert done_event.is_set()
46 | return
47 | elif done_event.is_set():
48 | continue
49 |
50 | idx, batch_indices = r
51 | try:
52 | idx_scale = 0
53 | if len(scale) > 1 and dataset.train:
54 | idx_scale = random.randrange(0, len(scale))
55 | dataset.set_scale(idx_scale)
56 |
57 | samples = collate_fn([dataset[i] for i in batch_indices])
58 | samples.append(idx_scale)
59 | except Exception:
60 | data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
61 | else:
62 | data_queue.put((idx, samples))
63 | del samples
64 |
65 | except KeyboardInterrupt:
66 | pass
67 |
68 | class _MSDataLoaderIter(_DataLoaderIter):
69 |
70 | def __init__(self, loader):
71 | self.dataset = loader.dataset
72 | self.scale = loader.scale
73 | self.collate_fn = loader.collate_fn
74 | self.batch_sampler = loader.batch_sampler
75 | self.num_workers = loader.num_workers
76 | self.pin_memory = loader.pin_memory and torch.cuda.is_available()
77 | self.timeout = loader.timeout
78 |
79 | self.sample_iter = iter(self.batch_sampler)
80 |
81 | base_seed = torch.LongTensor(1).random_().item()
82 |
83 | if self.num_workers > 0:
84 | self.worker_init_fn = loader.worker_init_fn
85 | self.worker_queue_idx = 0
86 | self.worker_result_queue = multiprocessing.Queue()
87 | self.batches_outstanding = 0
88 | self.worker_pids_set = False
89 | self.shutdown = False
90 | self.send_idx = 0
91 | self.rcvd_idx = 0
92 | self.reorder_dict = {}
93 | self.done_event = multiprocessing.Event()
94 |
95 | base_seed = torch.LongTensor(1).random_()[0]
96 |
97 | self.index_queues = []
98 | self.workers = []
99 | for i in range(self.num_workers):
100 | index_queue = multiprocessing.Queue()
101 | index_queue.cancel_join_thread()
102 | w = multiprocessing.Process(
103 | target=_ms_loop,
104 | args=(
105 | self.dataset,
106 | index_queue,
107 | self.worker_result_queue,
108 | self.done_event,
109 | self.collate_fn,
110 | self.scale,
111 | base_seed + i,
112 | self.worker_init_fn,
113 | i
114 | )
115 | )
116 | w.daemon = True
117 | w.start()
118 | self.index_queues.append(index_queue)
119 | self.workers.append(w)
120 |
121 | if self.pin_memory:
122 | self.data_queue = queue.Queue()
123 | pin_memory_thread = threading.Thread(
124 | target=_utils.pin_memory._pin_memory_loop,
125 | args=(
126 | self.worker_result_queue,
127 | self.data_queue,
128 | torch.cuda.current_device(),
129 | self.done_event
130 | )
131 | )
132 | pin_memory_thread.daemon = True
133 | pin_memory_thread.start()
134 | self.pin_memory_thread = pin_memory_thread
135 | else:
136 | self.data_queue = self.worker_result_queue
137 |
138 | _utils.signal_handling._set_worker_pids(
139 | id(self), tuple(w.pid for w in self.workers)
140 | )
141 | _utils.signal_handling._set_SIGCHLD_handler()
142 | self.worker_pids_set = True
143 |
144 | for _ in range(2 * self.num_workers):
145 | self._put_indices()
146 |
147 |
148 | class MSDataLoader(DataLoader):
149 |
150 | def __init__(self, cfg, *args, **kwargs):
151 | super(MSDataLoader, self).__init__(
152 | *args, **kwargs, num_workers=cfg.n_threads
153 | )
154 | self.scale = cfg.scale
155 |
156 | def __iter__(self):
157 | return _MSDataLoaderIter(self)
158 |
159 |
--------------------------------------------------------------------------------
/TestCode/code/loss/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | from importlib import import_module
3 |
4 | import matplotlib
5 | matplotlib.use('Agg')
6 | import matplotlib.pyplot as plt
7 |
8 | import numpy as np
9 |
10 | import torch
11 | import torch.nn as nn
12 | import torch.nn.functional as F
13 |
14 | class Loss(nn.modules.loss._Loss):
15 | def __init__(self, args, ckp):
16 | super(Loss, self).__init__()
17 | print('Preparing loss function:')
18 |
19 | self.n_GPUs = args.n_GPUs
20 | self.loss = []
21 | self.loss_module = nn.ModuleList()
22 | for loss in args.loss.split('+'):
23 | weight, loss_type = loss.split('*')
24 | if loss_type == 'MSE':
25 | loss_function = nn.MSELoss()
26 | elif loss_type == 'L1':
27 | loss_function = nn.L1Loss()
28 | elif loss_type.find('VGG') >= 0:
29 | module = import_module('loss.vgg')
30 | loss_function = getattr(module, 'VGG')(
31 | loss_type[3:],
32 | rgb_range=args.rgb_range
33 | )
34 | elif loss_type.find('GAN') >= 0:
35 | module = import_module('loss.adversarial')
36 | loss_function = getattr(module, 'Adversarial')(
37 | args,
38 | loss_type
39 | )
40 |
41 | self.loss.append({
42 | 'type': loss_type,
43 | 'weight': float(weight),
44 | 'function': loss_function}
45 | )
46 | if loss_type.find('GAN') >= 0:
47 | self.loss.append({'type': 'DIS', 'weight': 1, 'function': None})
48 |
49 | if len(self.loss) > 1:
50 | self.loss.append({'type': 'Total', 'weight': 0, 'function': None})
51 |
52 | for l in self.loss:
53 | if l['function'] is not None:
54 | print('{:.3f} * {}'.format(l['weight'], l['type']))
55 | self.loss_module.append(l['function'])
56 |
57 | self.log = torch.Tensor()
58 |
59 | device = torch.device('cpu' if args.cpu else 'cuda')
60 | self.loss_module.to(device)
61 | if args.precision == 'half': self.loss_module.half()
62 | if not args.cpu and args.n_GPUs > 1:
63 | self.loss_module = nn.DataParallel(
64 | self.loss_module, range(args.n_GPUs)
65 | )
66 |
67 | if args.load != '.': self.load(ckp.dir, cpu=args.cpu)
68 |
69 | def forward(self, sr, hr):
70 | losses = []
71 | for i, l in enumerate(self.loss):
72 | if l['function'] is not None:
73 | loss = l['function'](sr, hr)
74 | effective_loss = l['weight'] * loss
75 | losses.append(effective_loss)
76 | self.log[-1, i] += effective_loss.item()
77 | elif l['type'] == 'DIS':
78 | self.log[-1, i] += self.loss[i - 1]['function'].loss
79 |
80 | loss_sum = sum(losses)
81 | if len(self.loss) > 1:
82 | self.log[-1, -1] += loss_sum.item()
83 |
84 | return loss_sum
85 |
86 | def step(self):
87 | for l in self.get_loss_module():
88 | if hasattr(l, 'scheduler'):
89 | l.scheduler.step()
90 |
91 | def start_log(self):
92 | self.log = torch.cat((self.log, torch.zeros(1, len(self.loss))))
93 |
94 | def end_log(self, n_batches):
95 | self.log[-1].div_(n_batches)
96 |
97 | def display_loss(self, batch):
98 | n_samples = batch + 1
99 | log = []
100 | for l, c in zip(self.loss, self.log[-1]):
101 | log.append('[{}: {:.4f}]'.format(l['type'], c / n_samples))
102 |
103 | return ''.join(log)
104 |
105 | def plot_loss(self, apath, epoch):
106 | axis = np.linspace(1, epoch, epoch)
107 | for i, l in enumerate(self.loss):
108 | label = '{} Loss'.format(l['type'])
109 | fig = plt.figure()
110 | plt.title(label)
111 | plt.plot(axis, self.log[:, i].numpy(), label=label)
112 | plt.legend()
113 | plt.xlabel('Epochs')
114 | plt.ylabel('Loss')
115 | plt.grid(True)
116 | plt.savefig('{}/loss_{}.pdf'.format(apath, l['type']))
117 | plt.close(fig)
118 |
119 | def get_loss_module(self):
120 | if self.n_GPUs == 1:
121 | return self.loss_module
122 | else:
123 | return self.loss_module.module
124 |
125 | def save(self, apath):
126 | torch.save(self.state_dict(), os.path.join(apath, 'loss.pt'))
127 | torch.save(self.log, os.path.join(apath, 'loss_log.pt'))
128 |
129 | def load(self, apath, cpu=False):
130 | if cpu:
131 | kwargs = {'map_location': lambda storage, loc: storage}
132 | else:
133 | kwargs = {}
134 |
135 | self.load_state_dict(torch.load(
136 | os.path.join(apath, 'loss.pt'),
137 | **kwargs
138 | ))
139 | self.log = torch.load(os.path.join(apath, 'loss_log.pt'))
140 | for l in self.loss_module:
141 | if hasattr(l, 'scheduler'):
142 | for _ in range(len(self.log)): l.scheduler.step()
143 |
144 |
--------------------------------------------------------------------------------
/TestCode/code/loss/adversarial.py:
--------------------------------------------------------------------------------
1 | import utility
2 | from model import common
3 | from loss import discriminator
4 |
5 | import torch
6 | import torch.nn as nn
7 | import torch.nn.functional as F
8 | import torch.optim as optim
9 | from torch.autograd import Variable
10 |
11 | class Adversarial(nn.Module):
12 | def __init__(self, args, gan_type):
13 | super(Adversarial, self).__init__()
14 | self.gan_type = gan_type
15 | self.gan_k = args.gan_k
16 | self.discriminator = discriminator.Discriminator(args, gan_type)
17 | if gan_type != 'WGAN_GP':
18 | self.optimizer = utility.make_optimizer(args, self.discriminator)
19 | else:
20 | self.optimizer = optim.Adam(
21 | self.discriminator.parameters(),
22 | betas=(0, 0.9), eps=1e-8, lr=1e-5
23 | )
24 | self.scheduler = utility.make_scheduler(args, self.optimizer)
25 |
26 | def forward(self, fake, real):
27 | fake_detach = fake.detach()
28 |
29 | self.loss = 0
30 | for _ in range(self.gan_k):
31 | self.optimizer.zero_grad()
32 | d_fake = self.discriminator(fake_detach)
33 | d_real = self.discriminator(real)
34 | if self.gan_type == 'GAN':
35 | label_fake = torch.zeros_like(d_fake)
36 | label_real = torch.ones_like(d_real)
37 | loss_d \
38 | = F.binary_cross_entropy_with_logits(d_fake, label_fake) \
39 | + F.binary_cross_entropy_with_logits(d_real, label_real)
40 | elif self.gan_type.find('WGAN') >= 0:
41 | loss_d = (d_fake - d_real).mean()
42 | if self.gan_type.find('GP') >= 0:
43 | epsilon = torch.rand_like(fake).view(-1, 1, 1, 1)
44 | hat = fake_detach.mul(1 - epsilon) + real.mul(epsilon)
45 | hat.requires_grad = True
46 | d_hat = self.discriminator(hat)
47 | gradients = torch.autograd.grad(
48 | outputs=d_hat.sum(), inputs=hat,
49 | retain_graph=True, create_graph=True, only_inputs=True
50 | )[0]
51 | gradients = gradients.view(gradients.size(0), -1)
52 | gradient_norm = gradients.norm(2, dim=1)
53 | gradient_penalty = 10 * gradient_norm.sub(1).pow(2).mean()
54 | loss_d += gradient_penalty
55 |
56 | # Discriminator update
57 | self.loss += loss_d.item()
58 | loss_d.backward()
59 | self.optimizer.step()
60 |
61 | if self.gan_type == 'WGAN':
62 | for p in self.discriminator.parameters():
63 | p.data.clamp_(-1, 1)
64 |
65 | self.loss /= self.gan_k
66 |
67 | d_fake_for_g = self.discriminator(fake)
68 | if self.gan_type == 'GAN':
69 | loss_g = F.binary_cross_entropy_with_logits(
70 | d_fake_for_g, label_real
71 | )
72 | elif self.gan_type.find('WGAN') >= 0:
73 | loss_g = -d_fake_for_g.mean()
74 |
75 | # Generator loss
76 | return loss_g
77 |
78 | def state_dict(self, *args, **kwargs):
79 | state_discriminator = self.discriminator.state_dict(*args, **kwargs)
80 | state_optimizer = self.optimizer.state_dict()
81 |
82 | return dict(**state_discriminator, **state_optimizer)
83 |
84 | # Some references
85 | # https://github.com/kuc2477/pytorch-wgan-gp/blob/master/model.py
86 | # OR
87 | # https://github.com/caogang/wgan-gp/blob/master/gan_cifar10.py
88 |
--------------------------------------------------------------------------------
/TestCode/code/loss/discriminator.py:
--------------------------------------------------------------------------------
1 | from model import common
2 |
3 | import torch.nn as nn
4 |
5 | class Discriminator(nn.Module):
6 | def __init__(self, args, gan_type='GAN'):
7 | super(Discriminator, self).__init__()
8 |
9 | in_channels = 3
10 | out_channels = 64
11 | depth = 7
12 | #bn = not gan_type == 'WGAN_GP'
13 | bn = True
14 | act = nn.LeakyReLU(negative_slope=0.2, inplace=True)
15 |
16 | m_features = [
17 | common.BasicBlock(args.n_colors, out_channels, 3, bn=bn, act=act)
18 | ]
19 | for i in range(depth):
20 | in_channels = out_channels
21 | if i % 2 == 1:
22 | stride = 1
23 | out_channels *= 2
24 | else:
25 | stride = 2
26 | m_features.append(common.BasicBlock(
27 | in_channels, out_channels, 3, stride=stride, bn=bn, act=act
28 | ))
29 |
30 | self.features = nn.Sequential(*m_features)
31 |
32 | patch_size = args.patch_size // (2**((depth + 1) // 2))
33 | m_classifier = [
34 | nn.Linear(out_channels * patch_size**2, 1024),
35 | act,
36 | nn.Linear(1024, 1)
37 | ]
38 | self.classifier = nn.Sequential(*m_classifier)
39 |
40 | def forward(self, x):
41 | features = self.features(x)
42 | output = self.classifier(features.view(features.size(0), -1))
43 |
44 | return output
45 |
46 |
--------------------------------------------------------------------------------
/TestCode/code/loss/vgg.py:
--------------------------------------------------------------------------------
1 | from model import common
2 |
3 | import torch
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 | import torchvision.models as models
7 | from torch.autograd import Variable
8 |
9 | class VGG(nn.Module):
10 | def __init__(self, conv_index, rgb_range=1):
11 | super(VGG, self).__init__()
12 | vgg_features = models.vgg19(pretrained=True).features
13 | modules = [m for m in vgg_features]
14 | if conv_index == '22':
15 | self.vgg = nn.Sequential(*modules[:8])
16 | elif conv_index == '54':
17 | self.vgg = nn.Sequential(*modules[:35])
18 |
19 | vgg_mean = (0.485, 0.456, 0.406)
20 | vgg_std = (0.229 * rgb_range, 0.224 * rgb_range, 0.225 * rgb_range)
21 | self.sub_mean = common.MeanShift(rgb_range, vgg_mean, vgg_std)
22 | self.vgg.requires_grad = False
23 |
24 | def forward(self, sr, hr):
25 | def _forward(x):
26 | x = self.sub_mean(x)
27 | x = self.vgg(x)
28 | return x
29 |
30 | vgg_sr = _forward(sr)
31 | with torch.no_grad():
32 | vgg_hr = _forward(hr.detach())
33 |
34 | loss = F.mse_loss(vgg_sr, vgg_hr)
35 |
36 | return loss
37 |
--------------------------------------------------------------------------------
/TestCode/code/main.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import utility
3 | import data
4 | import model
5 | import loss
6 | from option import args
7 | from trainer import Trainer
8 |
9 | torch.manual_seed(args.seed)
10 | checkpoint = utility.checkpoint(args)
11 |
12 | if checkpoint.ok:
13 | loader = data.Data(args)
14 | model = model.Model(args, checkpoint)
15 | loss = loss.Loss(args, checkpoint) if not args.test_only else None
16 | t = Trainer(args, loader, model, loss, checkpoint)
17 | while not t.terminate():
18 | t.train()
19 | t.test()
20 |
21 | checkpoint.done()
22 |
23 |
--------------------------------------------------------------------------------
/TestCode/code/model/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | from importlib import import_module
3 |
4 | import torch
5 | import torch.nn as nn
6 | from torch.autograd import Variable
7 |
8 | class Model(nn.Module):
9 | def __init__(self, args, ckp):
10 | super(Model, self).__init__()
11 | print('Making model...')
12 |
13 | self.scale = args.scale
14 | self.idx_scale = 0
15 | self.self_ensemble = args.self_ensemble
16 | self.chop = args.chop
17 | self.precision = args.precision
18 | self.cpu = args.cpu
19 | self.device = torch.device('cpu' if args.cpu else 'cuda')
20 | self.n_GPUs = args.n_GPUs
21 | self.save_models = args.save_models
22 |
23 | module = import_module('model.' + args.model.lower())
24 | self.model = module.make_model(args).to(self.device)
25 | if args.precision == 'half': self.model.half()
26 |
27 | if not args.cpu and args.n_GPUs > 1:
28 | self.model = nn.DataParallel(self.model, range(args.n_GPUs))
29 |
30 | self.load(
31 | ckp.dir,
32 | pre_train=args.pre_train,
33 | resume=args.resume,
34 | cpu=args.cpu
35 | )
36 | if args.print_model: print(self.model)
37 |
38 | def forward(self, x, idx_scale):
39 | self.idx_scale = idx_scale
40 | target = self.get_model()
41 | if hasattr(target, 'set_scale'):
42 | target.set_scale(idx_scale)
43 |
44 | if self.self_ensemble and not self.training:
45 | if self.chop:
46 | forward_function = self.forward_chop
47 | else:
48 | forward_function = self.model.forward
49 |
50 | return self.forward_x8(x, forward_function)
51 | elif self.chop and not self.training:
52 | return self.forward_chop(x)
53 | else:
54 | return self.model(x)
55 |
56 | def get_model(self):
57 | if self.n_GPUs == 1:
58 | return self.model
59 | else:
60 | return self.model.module
61 |
62 | def state_dict(self, **kwargs):
63 | target = self.get_model()
64 | return target.state_dict(**kwargs)
65 |
66 | def save(self, apath, epoch, is_best=False):
67 | target = self.get_model()
68 | torch.save(
69 | target.state_dict(),
70 | os.path.join(apath, 'model', 'model_latest.pt')
71 | )
72 | if is_best:
73 | torch.save(
74 | target.state_dict(),
75 | os.path.join(apath, 'model', 'model_best.pt')
76 | )
77 |
78 | if self.save_models:
79 | torch.save(
80 | target.state_dict(),
81 | os.path.join(apath, 'model', 'model_{}.pt'.format(epoch))
82 | )
83 |
84 | def load(self, apath, pre_train='.', resume=-1, cpu=False):
85 | if cpu:
86 | kwargs = {'map_location': lambda storage, loc: storage}
87 | else:
88 | kwargs = {}
89 |
90 | if resume == -1:
91 | self.get_model().load_state_dict(
92 | torch.load(
93 | os.path.join(apath, 'model', 'model_latest.pt'),
94 | **kwargs
95 | ),
96 | strict=False
97 | )
98 | elif resume == 0:
99 | if pre_train != '.':
100 | print('Loading model from {}'.format(pre_train))
101 | self.get_model().load_state_dict(
102 | torch.load(pre_train, **kwargs),
103 | strict=False
104 | )
105 | else:
106 | self.get_model().load_state_dict(
107 | torch.load(
108 | os.path.join(apath, 'model', 'model_{}.pt'.format(resume)),
109 | **kwargs
110 | ),
111 | strict=False
112 | )
113 |
114 | def forward_chop(self, x, shave=10, min_size=160000):
115 | scale = self.scale[self.idx_scale]
116 | n_GPUs = min(self.n_GPUs, 4)
117 | b, c, h, w = x.size()
118 | h_half, w_half = h // 2, w // 2
119 | h_size, w_size = h_half + shave, w_half + shave
120 | lr_list = [
121 | x[:, :, 0:h_size, 0:w_size],
122 | x[:, :, 0:h_size, (w - w_size):w],
123 | x[:, :, (h - h_size):h, 0:w_size],
124 | x[:, :, (h - h_size):h, (w - w_size):w]]
125 |
126 | if w_size * h_size < min_size:
127 | sr_list = []
128 | for i in range(0, 4, n_GPUs):
129 | lr_batch = torch.cat(lr_list[i:(i + n_GPUs)], dim=0)
130 | sr_batch = self.model(lr_batch)
131 | sr_list.extend(sr_batch.chunk(n_GPUs, dim=0))
132 | else:
133 | sr_list = [
134 | self.forward_chop(patch, shave=shave, min_size=min_size) \
135 | for patch in lr_list
136 | ]
137 |
138 | h, w = scale * h, scale * w
139 | h_half, w_half = scale * h_half, scale * w_half
140 | h_size, w_size = scale * h_size, scale * w_size
141 | shave *= scale
142 |
143 | output = x.new(b, c, h, w)
144 | output[:, :, 0:h_half, 0:w_half] \
145 | = sr_list[0][:, :, 0:h_half, 0:w_half]
146 | output[:, :, 0:h_half, w_half:w] \
147 | = sr_list[1][:, :, 0:h_half, (w_size - w + w_half):w_size]
148 | output[:, :, h_half:h, 0:w_half] \
149 | = sr_list[2][:, :, (h_size - h + h_half):h_size, 0:w_half]
150 | output[:, :, h_half:h, w_half:w] \
151 | = sr_list[3][:, :, (h_size - h + h_half):h_size, (w_size - w + w_half):w_size]
152 |
153 | return output
154 |
155 | def forward_x8(self, x, forward_function):
156 | def _transform(v, op):
157 | if self.precision != 'single': v = v.float()
158 |
159 | v2np = v.data.cpu().numpy()
160 | if op == 'v':
161 | tfnp = v2np[:, :, :, ::-1].copy()
162 | elif op == 'h':
163 | tfnp = v2np[:, :, ::-1, :].copy()
164 | elif op == 't':
165 | tfnp = v2np.transpose((0, 1, 3, 2)).copy()
166 |
167 | ret = torch.Tensor(tfnp).to(self.device)
168 | if self.precision == 'half': ret = ret.half()
169 |
170 | return ret
171 |
172 | lr_list = [x]
173 | for tf in 'v', 'h', 't':
174 | lr_list.extend([_transform(t, tf) for t in lr_list])
175 |
176 | sr_list = [forward_function(aug) for aug in lr_list]
177 | for i in range(len(sr_list)):
178 | if i > 3:
179 | sr_list[i] = _transform(sr_list[i], 't')
180 | if i % 4 > 1:
181 | sr_list[i] = _transform(sr_list[i], 'h')
182 | if (i % 4) % 2 == 1:
183 | sr_list[i] = _transform(sr_list[i], 'v')
184 |
185 | output_cat = torch.cat(sr_list, dim=0)
186 | output = output_cat.mean(dim=0, keepdim=True)
187 |
188 | return output
189 |
190 |
--------------------------------------------------------------------------------
/TestCode/code/model/common.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import torch
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 |
7 | from torch.autograd import Variable
8 |
9 | def default_conv(in_channels, out_channels, kernel_size, bias=True):
10 | return nn.Conv2d(
11 | in_channels, out_channels, kernel_size,
12 | padding=(kernel_size//2), bias=bias)
13 |
14 | class MeanShift(nn.Conv2d):
15 | def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):
16 | super(MeanShift, self).__init__(3, 3, kernel_size=1)
17 | std = torch.Tensor(rgb_std)
18 | self.weight.data = torch.eye(3).view(3, 3, 1, 1)
19 | self.weight.data.div_(std.view(3, 1, 1, 1))
20 | self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)
21 | self.bias.data.div_(std)
22 | self.requires_grad = False
23 |
24 | class BasicBlock(nn.Sequential):
25 | def __init__(
26 | self, in_channels, out_channels, kernel_size, stride=1, bias=False,
27 | bn=True, act=nn.ReLU(True)):
28 |
29 | m = [nn.Conv2d(
30 | in_channels, out_channels, kernel_size,
31 | padding=(kernel_size//2), stride=stride, bias=bias)
32 | ]
33 | if bn: m.append(nn.BatchNorm2d(out_channels))
34 | if act is not None: m.append(act)
35 | super(BasicBlock, self).__init__(*m)
36 |
37 | class ResBlock(nn.Module):
38 | def __init__(
39 | self, conv, n_feat, kernel_size,
40 | bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
41 |
42 | super(ResBlock, self).__init__()
43 | m = []
44 | for i in range(2):
45 | m.append(conv(n_feat, n_feat, kernel_size, bias=bias))
46 | if bn: m.append(nn.BatchNorm2d(n_feat))
47 | if i == 0: m.append(act)
48 |
49 | self.body = nn.Sequential(*m)
50 | self.res_scale = res_scale
51 |
52 | def forward(self, x):
53 | res = self.body(x).mul(self.res_scale)
54 | res += x
55 |
56 | return res
57 |
58 | class Upsampler(nn.Sequential):
59 | def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):
60 |
61 | m = []
62 | if (scale & (scale - 1)) == 0: # Is scale = 2^n?
63 | for _ in range(int(math.log(scale, 2))):
64 | m.append(conv(n_feat, 4 * n_feat, 3, bias))
65 | m.append(nn.PixelShuffle(2))
66 | if bn: m.append(nn.BatchNorm2d(n_feat))
67 | if act: m.append(act())
68 | elif scale == 3:
69 | m.append(conv(n_feat, 9 * n_feat, 3, bias))
70 | m.append(nn.PixelShuffle(3))
71 | if bn: m.append(nn.BatchNorm2d(n_feat))
72 | if act: m.append(act())
73 | else:
74 | raise NotImplementedError
75 |
76 | super(Upsampler, self).__init__(*m)
77 |
--------------------------------------------------------------------------------
/TestCode/code/model/drln.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import model.ops as ops
4 | import torch.nn.functional as F
5 |
6 | def make_model(args, parent=False):
7 | return DRLN(args)
8 |
9 | class CALayer(nn.Module):
10 | def __init__(self, channel, reduction=16):
11 | super(CALayer, self).__init__()
12 |
13 | self.avg_pool = nn.AdaptiveAvgPool2d(1)
14 |
15 | self.c1 = ops.BasicBlock(channel , channel // reduction, 3, 1, 3, 3)
16 | self.c2 = ops.BasicBlock(channel , channel // reduction, 3, 1, 5, 5)
17 | self.c3 = ops.BasicBlock(channel , channel // reduction, 3, 1, 7, 7)
18 | self.c4 = ops.BasicBlockSig((channel // reduction)*3, channel , 3, 1, 1)
19 |
20 | def forward(self, x):
21 | y = self.avg_pool(x)
22 | c1 = self.c1(y)
23 | c2 = self.c2(y)
24 | c3 = self.c3(y)
25 | c_out = torch.cat([c1, c2, c3], dim=1)
26 | y = self.c4(c_out)
27 | return x * y
28 |
29 | class Block(nn.Module):
30 | def __init__(self, in_channels, out_channels, group=1):
31 | super(Block, self).__init__()
32 |
33 | self.r1 = ops.ResidualBlock(in_channels, out_channels)
34 | self.r2 = ops.ResidualBlock(in_channels*2, out_channels*2)
35 | self.r3 = ops.ResidualBlock(in_channels*4, out_channels*4)
36 | self.g = ops.BasicBlock(in_channels*8, out_channels, 1, 1, 0)
37 | self.ca = CALayer(in_channels)
38 |
39 | def forward(self, x):
40 | c0 = x
41 |
42 | r1 = self.r1(c0)
43 | c1 = torch.cat([c0, r1], dim=1)
44 |
45 | r2 = self.r2(c1)
46 | c2 = torch.cat([c1, r2], dim=1)
47 |
48 | r3 = self.r3(c2)
49 | c3 = torch.cat([c2, r3], dim=1)
50 |
51 | g = self.g(c3)
52 | out = self.ca(g)
53 | return out
54 |
55 |
56 | class DRLN(nn.Module):
57 | def __init__(self, args):
58 | super(DRLN, self).__init__()
59 |
60 | #n_resgroups = args.n_resgroups
61 | #n_resblocks = args.n_resblocks
62 | #n_feats = args.n_feats
63 | #kernel_size = 3
64 | #reduction = args.reduction
65 | #scale = args.scale[0]
66 | #act = nn.ReLU(True)
67 |
68 | self.scale = args.scale[0]
69 | chs=64
70 |
71 | self.sub_mean = ops.MeanShift((0.4488, 0.4371, 0.4040), sub=True)
72 | self.add_mean = ops.MeanShift((0.4488, 0.4371, 0.4040), sub=False)
73 |
74 | self.head = nn.Conv2d(3, chs, 3, 1, 1)
75 |
76 | self.b1 = Block(chs, chs)
77 | self.b2 = Block(chs, chs)
78 | self.b3 = Block(chs, chs)
79 | self.b4 = Block(chs, chs)
80 | self.b5 = Block(chs, chs)
81 | self.b6 = Block(chs, chs)
82 | self.b7 = Block(chs, chs)
83 | self.b8 = Block(chs, chs)
84 | self.b9 = Block(chs, chs)
85 | self.b10 = Block(chs, chs)
86 | self.b11 = Block(chs, chs)
87 | self.b12 = Block(chs, chs)
88 | self.b13 = Block(chs, chs)
89 | self.b14 = Block(chs, chs)
90 | self.b15 = Block(chs, chs)
91 | self.b16 = Block(chs, chs)
92 | self.b17 = Block(chs, chs)
93 | self.b18 = Block(chs, chs)
94 | self.b19 = Block(chs, chs)
95 | self.b20 = Block(chs, chs)
96 |
97 | self.c1 = ops.BasicBlock(chs*2, chs, 3, 1, 1)
98 | self.c2 = ops.BasicBlock(chs*3, chs, 3, 1, 1)
99 | self.c3 = ops.BasicBlock(chs*4, chs, 3, 1, 1)
100 | self.c4 = ops.BasicBlock(chs*2, chs, 3, 1, 1)
101 | self.c5 = ops.BasicBlock(chs*3, chs, 3, 1, 1)
102 | self.c6 = ops.BasicBlock(chs*4, chs, 3, 1, 1)
103 | self.c7 = ops.BasicBlock(chs*2, chs, 3, 1, 1)
104 | self.c8 = ops.BasicBlock(chs*3, chs, 3, 1, 1)
105 | self.c9 = ops.BasicBlock(chs*4, chs, 3, 1, 1)
106 | self.c10 = ops.BasicBlock(chs*2, chs, 3, 1, 1)
107 | self.c11 = ops.BasicBlock(chs*3, chs, 3, 1, 1)
108 | self.c12 = ops.BasicBlock(chs*4, chs, 3, 1, 1)
109 | self.c13 = ops.BasicBlock(chs*2, chs, 3, 1, 1)
110 | self.c14 = ops.BasicBlock(chs*3, chs, 3, 1, 1)
111 | self.c15 = ops.BasicBlock(chs*4, chs, 3, 1, 1)
112 | self.c16 = ops.BasicBlock(chs*5, chs, 3, 1, 1)
113 | self.c17 = ops.BasicBlock(chs*2, chs, 3, 1, 1)
114 | self.c18 = ops.BasicBlock(chs*3, chs, 3, 1, 1)
115 | self.c19 = ops.BasicBlock(chs*4, chs, 3, 1, 1)
116 | self.c20 = ops.BasicBlock(chs*5, chs, 3, 1, 1)
117 |
118 | self.upsample = ops.UpsampleBlock(chs, self.scale , multi_scale=False)
119 | #self.convert = ops.ConvertBlock(chs, chs, 20)
120 | self.tail = nn.Conv2d(chs, 3, 3, 1, 1)
121 |
122 | def forward(self, x):
123 | x = self.sub_mean(x)
124 | x = self.head(x)
125 | c0 = o0 = x
126 |
127 | b1 = self.b1(o0)
128 | c1 = torch.cat([c0, b1], dim=1)
129 | o1 = self.c1(c1)
130 |
131 | b2 = self.b2(o1)
132 | c2 = torch.cat([c1, b2], dim=1)
133 | o2 = self.c2(c2)
134 |
135 | b3 = self.b3(o2)
136 | c3 = torch.cat([c2, b3], dim=1)
137 | o3 = self.c3(c3)
138 | a1 = o3 + c0
139 |
140 | b4 = self.b4(a1)
141 | c4 = torch.cat([o3, b4], dim=1)
142 | o4 = self.c4(c4)
143 |
144 | b5 = self.b5(a1)
145 | c5 = torch.cat([c4, b5], dim=1)
146 | o5 = self.c5(c5)
147 |
148 | b6 = self.b6(o5)
149 | c6 = torch.cat([c5, b6], dim=1)
150 | o6 = self.c6(c6)
151 | a2 = o6 + a1
152 |
153 | b7 = self.b7(a2)
154 | c7 = torch.cat([o6, b7], dim=1)
155 | o7 = self.c7(c7)
156 |
157 | b8 = self.b8(o7)
158 | c8 = torch.cat([c7, b8], dim=1)
159 | o8 = self.c8(c8)
160 |
161 | b9 = self.b9(o8)
162 | c9 = torch.cat([c8, b9], dim=1)
163 | o9 = self.c9(c9)
164 | a3 = o9 + a2
165 |
166 | b10 = self.b10(a3)
167 | c10 = torch.cat([o9, b10], dim=1)
168 | o10 = self.c10(c10)
169 |
170 |
171 | b11 = self.b11(o10)
172 | c11 = torch.cat([c10, b11], dim=1)
173 | o11 = self.c11(c11)
174 |
175 | b12 = self.b12(o11)
176 | c12 = torch.cat([c11, b12], dim=1)
177 | o12 = self.c12(c12)
178 | a4 = o12 + a3
179 |
180 |
181 | b13 = self.b13(a4)
182 | c13 = torch.cat([o12, b13], dim=1)
183 | o13 = self.c13(c13)
184 |
185 | b14 = self.b14(o13)
186 | c14 = torch.cat([c13, b14], dim=1)
187 | o14 = self.c14(c14)
188 |
189 |
190 | b15 = self.b15(o14)
191 | c15 = torch.cat([c14, b15], dim=1)
192 | o15 = self.c15(c15)
193 |
194 | b16 = self.b16(o15)
195 | c16 = torch.cat([c15, b16], dim=1)
196 | o16 = self.c16(c16)
197 | a5 = o16 + a4
198 |
199 |
200 | b17 = self.b17(a5)
201 | c17 = torch.cat([o16, b17], dim=1)
202 | o17 = self.c17(c17)
203 |
204 | b18 = self.b18(o17)
205 | c18 = torch.cat([c17, b18], dim=1)
206 | o18 = self.c18(c18)
207 |
208 |
209 | b19 = self.b19(o18)
210 | c19 = torch.cat([c18, b19], dim=1)
211 | o19 = self.c19(c19)
212 |
213 | b20 = self.b20(o19)
214 | c20 = torch.cat([c19, b20], dim=1)
215 | o20 = self.c20(c20)
216 | a6 = o20 + a5
217 |
218 | #c_out = torch.cat([b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, b16, b17, b18, b19, b20], dim=1)
219 |
220 | #b = self.convert(c_out)
221 | b_out = a6 + x
222 | out = self.upsample(b_out, scale=self.scale )
223 |
224 | out = self.tail(out)
225 | f_out = self.add_mean(out)
226 |
227 | return f_out
228 |
229 | def load_state_dict(self, state_dict, strict=False):
230 | own_state = self.state_dict()
231 | for name, param in state_dict.items():
232 | if name in own_state:
233 | if isinstance(param, nn.Parameter):
234 | param = param.data
235 | try:
236 | own_state[name].copy_(param)
237 | except Exception:
238 | if name.find('tail') >= 0 or name.find('upsample') >= 0:
239 | print('Replace pre-trained upsampler to new one...')
240 | else:
241 | raise RuntimeError('While copying the parameter named {}, '
242 | 'whose dimensions in the model are {} and '
243 | 'whose dimensions in the checkpoint are {}.'
244 | .format(name, own_state[name].size(), param.size()))
245 | elif strict:
246 | if name.find('tail') == -1:
247 | raise KeyError('unexpected key "{}" in state_dict'
248 | .format(name))
249 |
250 | if strict:
251 | missing = set(own_state.keys()) - set(state_dict.keys())
252 | if len(missing) > 0:
253 | raise KeyError('missing keys in state_dict: "{}"'.format(missing))
254 |
255 |
256 |
--------------------------------------------------------------------------------
/TestCode/code/model/ops.py:
--------------------------------------------------------------------------------
1 | import math
2 | import torch
3 | import torch.nn as nn
4 | import torch.nn.init as init
5 | import torch.nn.functional as F
6 |
7 | def init_weights(modules):
8 | pass
9 |
10 |
11 | class MeanShift(nn.Module):
12 | def __init__(self, mean_rgb, sub):
13 | super(MeanShift, self).__init__()
14 |
15 | sign = -1 if sub else 1
16 | r = mean_rgb[0] * sign
17 | g = mean_rgb[1] * sign
18 | b = mean_rgb[2] * sign
19 |
20 | self.shifter = nn.Conv2d(3, 3, 1, 1, 0)
21 | self.shifter.weight.data = torch.eye(3).view(3, 3, 1, 1)
22 | self.shifter.bias.data = torch.Tensor([r, g, b])
23 |
24 | # Freeze the mean shift layer
25 | for params in self.shifter.parameters():
26 | params.requires_grad = False
27 |
28 | def forward(self, x):
29 | x = self.shifter(x)
30 | return x
31 |
32 | class BasicBlock(nn.Module):
33 | def __init__(self,
34 | in_channels, out_channels,
35 | ksize=3, stride=1, pad=1, dilation=1):
36 | super(BasicBlock, self).__init__()
37 |
38 | self.body = nn.Sequential(
39 | nn.Conv2d(in_channels, out_channels, ksize, stride, pad, dilation),
40 | nn.ReLU(inplace=True)
41 | )
42 |
43 | init_weights(self.modules)
44 |
45 | def forward(self, x):
46 | out = self.body(x)
47 | return out
48 |
49 |
50 | class GBasicBlock(nn.Module):
51 | def __init__(self,
52 | in_channels, out_channels,
53 | ksize=3, stride=1, pad=1, dilation=1):
54 | super(GBasicBlock, self).__init__()
55 |
56 | self.body = nn.Sequential(
57 | nn.Conv2d(in_channels, out_channels, ksize, stride, pad, dilation, groups=4),
58 | nn.ReLU(inplace=True)
59 | )
60 |
61 | init_weights(self.modules)
62 |
63 | def forward(self, x):
64 | out = self.body(x)
65 | return out
66 |
67 | class BasicBlockSig(nn.Module):
68 | def __init__(self,
69 | in_channels, out_channels,
70 | ksize=3, stride=1, pad=1):
71 | super(BasicBlockSig, self).__init__()
72 |
73 | self.body = nn.Sequential(
74 | nn.Conv2d(in_channels, out_channels, ksize, stride, pad),
75 | nn.Sigmoid()
76 | )
77 |
78 | init_weights(self.modules)
79 |
80 | def forward(self, x):
81 | out = self.body(x)
82 | return out
83 |
84 | class GBasicBlockSig(nn.Module):
85 | def __init__(self,
86 | in_channels, out_channels,
87 | ksize=3, stride=1, pad=1):
88 | super(GBasicBlockSig, self).__init__()
89 |
90 | self.body = nn.Sequential(
91 | nn.Conv2d(in_channels, out_channels, ksize, stride, pad, groups=4),
92 | nn.Sigmoid()
93 | )
94 |
95 | init_weights(self.modules)
96 |
97 | def forward(self, x):
98 | out = self.body(x)
99 | return out
100 |
101 | class ResidualBlock(nn.Module):
102 | def __init__(self,
103 | in_channels, out_channels):
104 | super(ResidualBlock, self).__init__()
105 |
106 | self.body = nn.Sequential(
107 | nn.Conv2d(in_channels, out_channels, 3, 1, 1),
108 | nn.ReLU(inplace=True),
109 | nn.Conv2d(out_channels, out_channels, 3, 1, 1),
110 | )
111 |
112 | init_weights(self.modules)
113 |
114 | def forward(self, x):
115 | out = self.body(x)
116 | out = F.relu(out + x)
117 | return out
118 |
119 | class GResidualBlock(nn.Module):
120 | def __init__(self,
121 | in_channels, out_channels):
122 | super(GResidualBlock, self).__init__()
123 |
124 | self.body = nn.Sequential(
125 | nn.Conv2d(in_channels, out_channels, 3, 1, 1, groups=4),
126 | nn.ReLU(inplace=True),
127 | nn.Conv2d(out_channels, out_channels, 1, 1, 0),
128 | )
129 |
130 | init_weights(self.modules)
131 |
132 | def forward(self, x):
133 | out = self.body(x)
134 | out = F.relu(out + x)
135 | return out
136 |
137 | class EResidualBlock(nn.Module):
138 | def __init__(self,
139 | in_channels, out_channels,
140 | group=1):
141 | super(EResidualBlock, self).__init__()
142 |
143 | self.body = nn.Sequential(
144 | nn.Conv2d(in_channels, out_channels, 3, 1, 1, groups=group),
145 | nn.ReLU(inplace=True),
146 | nn.Conv2d(out_channels, out_channels, 3, 1, 1, groups=group),
147 | nn.ReLU(inplace=True),
148 | nn.Conv2d(out_channels, out_channels, 1, 1, 0),
149 | )
150 |
151 | init_weights(self.modules)
152 |
153 | def forward(self, x):
154 | out = self.body(x)
155 | out = F.relu(out + x)
156 | return out
157 |
158 | class ConvertBlock(nn.Module):
159 | def __init__(self,
160 | in_channels, out_channels,
161 | blocks):
162 | super(ConvertBlock, self).__init__()
163 |
164 | self.body = nn.Sequential(
165 | nn.Conv2d(in_channels*blocks, out_channels*blocks//2, 3, 1, 1),
166 | nn.ReLU(inplace=True),
167 | nn.Conv2d(out_channels*blocks//2, out_channels*blocks//4, 3, 1, 1),
168 | nn.ReLU(inplace=True),
169 | nn.Conv2d(out_channels*blocks//4, out_channels, 3, 1, 1),
170 | )
171 |
172 | init_weights(self.modules)
173 |
174 | def forward(self, x):
175 | out = self.body(x)
176 | #out = F.relu(out + x)
177 | return out
178 |
179 | class UpsampleBlock(nn.Module):
180 | def __init__(self,
181 | n_channels, scale, multi_scale,
182 | group=1):
183 | super(UpsampleBlock, self).__init__()
184 |
185 | if multi_scale:
186 | self.up2 = _UpsampleBlock(n_channels, scale=2, group=group)
187 | self.up3 = _UpsampleBlock(n_channels, scale=3, group=group)
188 | self.up4 = _UpsampleBlock(n_channels, scale=4, group=group)
189 | else:
190 | self.up = _UpsampleBlock(n_channels, scale=scale, group=group)
191 |
192 | self.multi_scale = multi_scale
193 |
194 | def forward(self, x, scale):
195 | if self.multi_scale:
196 | if scale == 2:
197 | return self.up2(x)
198 | elif scale == 3:
199 | return self.up3(x)
200 | elif scale == 4:
201 | return self.up4(x)
202 | else:
203 | return self.up(x)
204 |
205 |
206 | class _UpsampleBlock(nn.Module):
207 | def __init__(self,
208 | n_channels, scale,
209 | group=1):
210 | super(_UpsampleBlock, self).__init__()
211 |
212 | modules = []
213 | if scale == 2 or scale == 4 or scale == 8:
214 | for _ in range(int(math.log(scale, 2))):
215 | modules += [nn.Conv2d(n_channels, 4*n_channels, 3, 1, 1, groups=group), nn.ReLU(inplace=True)]
216 | modules += [nn.PixelShuffle(2)]
217 | elif scale == 3:
218 | modules += [nn.Conv2d(n_channels, 9*n_channels, 3, 1, 1, groups=group), nn.ReLU(inplace=True)]
219 | modules += [nn.PixelShuffle(3)]
220 |
221 | self.body = nn.Sequential(*modules)
222 | init_weights(self.modules)
223 |
224 | def forward(self, x):
225 | out = self.body(x)
226 | return out
227 |
--------------------------------------------------------------------------------
/TestCode/code/option.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import template
3 |
4 | parser = argparse.ArgumentParser(description='EDSR and MDSR')
5 |
6 | parser.add_argument('--debug', action='store_true',
7 | help='Enables debug mode')
8 | parser.add_argument('--template', default='.',
9 | help='You can set various templates in option.py')
10 |
11 | # Hardware specifications
12 | parser.add_argument('--n_threads', type=int, default=3,
13 | help='number of threads for data loading')
14 | parser.add_argument('--cpu', action='store_true',
15 | help='use cpu only')
16 | parser.add_argument('--n_GPUs', type=int, default=1,
17 | help='number of GPUs')
18 | parser.add_argument('--seed', type=int, default=1,
19 | help='random seed')
20 |
21 | # Data specifications
22 | parser.add_argument('--dir_data', type=str, default='/home/yulun/data/PyTorch/npy',
23 | help='dataset directory')
24 | parser.add_argument('--dir_demo', type=str, default='../test',
25 | help='demo image directory')
26 | parser.add_argument('--data_train', type=str, default='DIV2K',
27 | help='train dataset name')
28 | parser.add_argument('--data_test', type=str, default='DIV2K',
29 | help='test dataset name')
30 | parser.add_argument('--benchmark_noise', action='store_true',
31 | help='use noisy benchmark sets')
32 | parser.add_argument('--n_train', type=int, default=800,
33 | help='number of training set')
34 | parser.add_argument('--n_val', type=int, default=10,
35 | help='number of validation set')
36 | parser.add_argument('--offset_val', type=int, default=800,
37 | help='validation index offest')
38 | parser.add_argument('--ext', type=str, default='sep',
39 | help='dataset file extension')
40 | parser.add_argument('--scale', default='4',
41 | help='super resolution scale')
42 | parser.add_argument('--patch_size', type=int, default=192,
43 | help='output patch size')
44 | parser.add_argument('--rgb_range', type=int, default=255,
45 | help='maximum value of RGB')
46 | parser.add_argument('--n_colors', type=int, default=3,
47 | help='number of color channels to use')
48 | parser.add_argument('--noise', type=str, default='.',
49 | help='Gaussian noise std.')
50 | parser.add_argument('--chop', action='store_true',
51 | help='enable memory-efficient forward')
52 |
53 | # Model specifications
54 | parser.add_argument('--model', default='EDSR',
55 | help='model name')
56 |
57 | parser.add_argument('--act', type=str, default='relu',
58 | help='activation function')
59 | parser.add_argument('--pre_train', type=str, default='.',
60 | help='pre-trained model directory')
61 | parser.add_argument('--extend', type=str, default='.',
62 | help='pre-trained model directory')
63 | parser.add_argument('--n_resblocks', type=int, default=16,
64 | help='number of residual blocks')
65 | parser.add_argument('--n_feats', type=int, default=64,
66 | help='number of feature maps')
67 | parser.add_argument('--res_scale', type=float, default=1,
68 | help='residual scaling')
69 | parser.add_argument('--shift_mean', default=True,
70 | help='subtract pixel mean from the input')
71 | parser.add_argument('--precision', type=str, default='single',
72 | choices=('single', 'half'),
73 | help='FP precision for test (single | half)')
74 |
75 | # Training specifications
76 | parser.add_argument('--reset', action='store_true',
77 | help='reset the training')
78 | parser.add_argument('--test_every', type=int, default=1000,
79 | help='do test per every N batches')
80 | parser.add_argument('--epochs', type=int, default=3000,
81 | help='number of epochs to train')
82 | parser.add_argument('--batch_size', type=int, default=16,
83 | help='input batch size for training')
84 | parser.add_argument('--split_batch', type=int, default=1,
85 | help='split the batch into smaller chunks')
86 | parser.add_argument('--self_ensemble', action='store_true',
87 | help='use self-ensemble method for test')
88 | parser.add_argument('--test_only', action='store_true',
89 | help='set this option to test the model')
90 | parser.add_argument('--gan_k', type=int, default=1,
91 | help='k value for adversarial loss')
92 |
93 | # Optimization specifications
94 | parser.add_argument('--lr', type=float, default=1e-4,
95 | help='learning rate')
96 | parser.add_argument('--lr_decay', type=int, default=200,
97 | help='learning rate decay per N epochs')
98 | parser.add_argument('--decay_type', type=str, default='step',
99 | help='learning rate decay type')
100 | parser.add_argument('--gamma', type=float, default=0.5,
101 | help='learning rate decay factor for step decay')
102 | parser.add_argument('--optimizer', default='ADAM',
103 | choices=('SGD', 'ADAM', 'RMSprop'),
104 | help='optimizer to use (SGD | ADAM | RMSprop)')
105 | parser.add_argument('--momentum', type=float, default=0.9,
106 | help='SGD momentum')
107 | parser.add_argument('--beta1', type=float, default=0.9,
108 | help='ADAM beta1')
109 | parser.add_argument('--beta2', type=float, default=0.999,
110 | help='ADAM beta2')
111 | parser.add_argument('--epsilon', type=float, default=1e-8,
112 | help='ADAM epsilon for numerical stability')
113 | parser.add_argument('--weight_decay', type=float, default=0,
114 | help='weight decay')
115 |
116 | # Loss specifications
117 | parser.add_argument('--loss', type=str, default='1*L1',
118 | help='loss function configuration')
119 | parser.add_argument('--skip_threshold', type=float, default='1e6',
120 | help='skipping batch that has large error')
121 |
122 | # Log specifications
123 | parser.add_argument('--save', type=str, default='RCAN',
124 | help='file name to save')
125 | parser.add_argument('--load', type=str, default='.',
126 | help='file name to load')
127 | parser.add_argument('--resume', type=int, default=0,
128 | help='resume from specific checkpoint')
129 | parser.add_argument('--print_model', action='store_true',
130 | help='print model')
131 | parser.add_argument('--save_models', action='store_true',
132 | help='save all intermediate models')
133 | parser.add_argument('--print_every', type=int, default=100,
134 | help='how many batches to wait before logging training status')
135 | parser.add_argument('--save_results', action='store_true',
136 | help='save output results')
137 |
138 | # New options
139 | parser.add_argument('--n_resgroups', type=int, default=10,
140 | help='number of residual groups')
141 | parser.add_argument('--reduction', type=int, default=16,
142 | help='number of feature maps reduction')
143 | parser.add_argument('--testpath', type=str, default='../test/DIV2K_val_LR_our',
144 | help='dataset directory for testing')
145 | parser.add_argument('--testset', type=str, default='Set5',
146 | help='dataset name for testing')
147 | parser.add_argument('--degradation', type=str, default='BI',
148 | help='degradation model: BI, BD')
149 |
150 |
151 | args = parser.parse_args()
152 | template.set_template(args)
153 |
154 | args.scale = list(map(lambda x: int(x), args.scale.split('+')))
155 |
156 | if args.epochs == 0:
157 | args.epochs = 1e8
158 |
159 | for arg in vars(args):
160 | if vars(args)[arg] == 'True':
161 | vars(args)[arg] = True
162 | elif vars(args)[arg] == 'False':
163 | vars(args)[arg] = False
164 |
165 |
--------------------------------------------------------------------------------
/TestCode/code/template.py:
--------------------------------------------------------------------------------
1 | def set_template(args):
2 | # Set the templates here
3 | if args.template.find('jpeg') >= 0:
4 | args.data_train = 'DIV2K_jpeg'
5 | args.data_test = 'DIV2K_jpeg'
6 | args.epochs = 200
7 | args.lr_decay = 100
8 |
9 | if args.template.find('EDSR_paper') >= 0:
10 | args.model = 'EDSR'
11 | args.n_resblocks = 32
12 | args.n_feats = 256
13 | args.res_scale = 0.1
14 |
15 | if args.template.find('MDSR') >= 0:
16 | args.model = 'MDSR'
17 | args.patch_size = 48
18 | args.epochs = 1650
19 |
20 | if args.template.find('DDBPN') >= 0:
21 | args.model = 'DDBPN'
22 | args.patch_size = 128
23 | args.scale = '4'
24 |
25 | args.data_test = 'Set5'
26 |
27 | args.batch_size = 20
28 | args.epochs = 1000
29 | args.lr_decay = 500
30 | args.gamma = 0.1
31 | args.weight_decay = 1e-4
32 |
33 | args.loss = '1*MSE'
34 |
35 | if args.template.find('GAN') >= 0:
36 | args.epochs = 200
37 | args.lr = 5e-5
38 | args.lr_decay = 150
39 |
40 |
--------------------------------------------------------------------------------
/TestCode/code/trainer.py:
--------------------------------------------------------------------------------
1 | import os
2 | import math
3 | from decimal import Decimal
4 |
5 | import utility
6 |
7 | import torch
8 | from torch.autograd import Variable
9 | from tqdm import tqdm
10 |
11 | class Trainer():
12 | def __init__(self, args, loader, my_model, my_loss, ckp):
13 | self.args = args
14 | self.scale = args.scale
15 |
16 | self.ckp = ckp
17 | self.loader_train = loader.loader_train
18 | self.loader_test = loader.loader_test
19 | self.model = my_model
20 | self.loss = my_loss
21 | self.optimizer = utility.make_optimizer(args, self.model)
22 | self.scheduler = utility.make_scheduler(args, self.optimizer)
23 |
24 | if self.args.load != '.':
25 | self.optimizer.load_state_dict(
26 | torch.load(os.path.join(ckp.dir, 'optimizer.pt'))
27 | )
28 | for _ in range(len(ckp.log)): self.scheduler.step()
29 |
30 | self.error_last = 1e8
31 |
32 | def train(self):
33 | self.scheduler.step()
34 | self.loss.step()
35 | epoch = self.scheduler.last_epoch + 1
36 | lr = self.scheduler.get_lr()[0]
37 |
38 | self.ckp.write_log(
39 | '[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr))
40 | )
41 | self.loss.start_log()
42 | self.model.train()
43 |
44 | timer_data, timer_model = utility.timer(), utility.timer()
45 | for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train):
46 | lr, hr = self.prepare([lr, hr])
47 | timer_data.hold()
48 | timer_model.tic()
49 |
50 | self.optimizer.zero_grad()
51 | sr = self.model(lr, idx_scale)
52 | loss = self.loss(sr, hr)
53 | if loss.item() < self.args.skip_threshold * self.error_last:
54 | loss.backward()
55 | self.optimizer.step()
56 | else:
57 | print('Skip this batch {}! (Loss: {})'.format(
58 | batch + 1, loss.item()
59 | ))
60 |
61 | timer_model.hold()
62 |
63 | if (batch + 1) % self.args.print_every == 0:
64 | self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(
65 | (batch + 1) * self.args.batch_size,
66 | len(self.loader_train.dataset),
67 | self.loss.display_loss(batch),
68 | timer_model.release(),
69 | timer_data.release()))
70 |
71 | timer_data.tic()
72 |
73 | self.loss.end_log(len(self.loader_train))
74 | self.error_last = self.loss.log[-1, -1]
75 |
76 | def test(self):
77 | epoch = self.scheduler.last_epoch + 1
78 | self.ckp.write_log('\nEvaluation:')
79 | self.ckp.add_log(torch.zeros(1, len(self.scale)))
80 | self.model.eval()
81 |
82 | timer_test = utility.timer()
83 | with torch.no_grad():
84 | for idx_scale, scale in enumerate(self.scale):
85 | eval_acc = 0
86 | self.loader_test.dataset.set_scale(idx_scale)
87 | tqdm_test = tqdm(self.loader_test, ncols=80)
88 | for idx_img, (lr, hr, filename, _) in enumerate(tqdm_test):
89 | filename = filename[0]
90 | no_eval = (hr.nelement() == 1)
91 | if not no_eval:
92 | lr, hr = self.prepare([lr, hr])
93 | else:
94 | lr = self.prepare([lr])[0]
95 |
96 | sr = self.model(lr, idx_scale)
97 | sr = utility.quantize(sr, self.args.rgb_range)
98 |
99 | save_list = [sr]
100 | if not no_eval:
101 | eval_acc += utility.calc_psnr(
102 | sr, hr, scale, self.args.rgb_range,
103 | benchmark=self.loader_test.dataset.benchmark
104 | )
105 | save_list.extend([lr, hr])
106 |
107 | if self.args.save_results:
108 | #self.ckp.save_results(filename, save_list, scale)
109 | self.ckp.save_results_nopostfix(filename, save_list, scale)
110 |
111 | self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test)
112 | best = self.ckp.log.max(0)
113 | self.ckp.write_log(
114 | '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(
115 | self.args.data_test,
116 | scale,
117 | self.ckp.log[-1, idx_scale],
118 | best[0][idx_scale],
119 | best[1][idx_scale] + 1
120 | )
121 | )
122 |
123 | self.ckp.write_log(
124 | 'Total time: {:.2f}s, ave time: {:.2f}s\n'.format(timer_test.toc(), timer_test.toc()/len(self.loader_test)), refresh=True
125 | )
126 | if not self.args.test_only:
127 | self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
128 |
129 | def prepare(self, l, volatile=False):
130 | device = torch.device('cpu' if self.args.cpu else 'cuda')
131 | def _prepare(tensor):
132 | if self.args.precision == 'half': tensor = tensor.half()
133 | return tensor.to(device)
134 |
135 | return [_prepare(_l) for _l in l]
136 |
137 | def terminate(self):
138 | if self.args.test_only:
139 | self.test()
140 | return True
141 | else:
142 | epoch = self.scheduler.last_epoch + 1
143 | return epoch >= self.args.epochs
144 |
145 |
--------------------------------------------------------------------------------
/TestCode/code/utility.py:
--------------------------------------------------------------------------------
1 | import os
2 | import math
3 | import time
4 | import datetime
5 | from functools import reduce
6 |
7 | import matplotlib
8 | matplotlib.use('Agg')
9 | import matplotlib.pyplot as plt
10 |
11 | import numpy as np
12 | import scipy.misc as misc
13 |
14 | import torch
15 | import torch.optim as optim
16 | import torch.optim.lr_scheduler as lrs
17 |
18 | class timer():
19 | def __init__(self):
20 | self.acc = 0
21 | self.tic()
22 |
23 | def tic(self):
24 | self.t0 = time.time()
25 |
26 | def toc(self):
27 | return time.time() - self.t0
28 |
29 | def hold(self):
30 | self.acc += self.toc()
31 |
32 | def release(self):
33 | ret = self.acc
34 | self.acc = 0
35 |
36 | return ret
37 |
38 | def reset(self):
39 | self.acc = 0
40 |
41 | class checkpoint():
42 | def __init__(self, args):
43 | self.args = args
44 | self.ok = True
45 | self.log = torch.Tensor()
46 | now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
47 |
48 | if args.load == '.':
49 | if args.save == '.': args.save = now
50 | self.dir = '../SR/' + args.degradation + '/' + args.save
51 | else:
52 | self.dir = '../experiment/' + args.load
53 | if not os.path.exists(self.dir):
54 | args.load = '.'
55 | else:
56 | self.log = torch.load(self.dir + '/psnr_log.pt')
57 | print('Continue from epoch {}...'.format(len(self.log)))
58 |
59 | if args.reset:
60 | os.system('rm -rf ' + self.dir)
61 | args.load = '.'
62 |
63 | def _make_dir(path):
64 | if not os.path.exists(path): os.makedirs(path)
65 |
66 | _make_dir(self.dir)
67 |
68 | _make_dir(self.dir + '/' + args.testset + '/x' + str(args.scale[0]))
69 |
70 | open_type = 'a' if os.path.exists(self.dir + '/log.txt') else 'w'
71 | self.log_file = open(self.dir + '/log.txt', open_type)
72 | with open(self.dir + '/config.txt', open_type) as f:
73 | f.write(now + '\n\n')
74 | for arg in vars(args):
75 | f.write('{}: {}\n'.format(arg, getattr(args, arg)))
76 | f.write('\n')
77 |
78 | def save(self, trainer, epoch, is_best=False):
79 | trainer.model.save(self.dir, epoch, is_best=is_best)
80 | trainer.loss.save(self.dir)
81 | trainer.loss.plot_loss(self.dir, epoch)
82 |
83 | self.plot_psnr(epoch)
84 | torch.save(self.log, os.path.join(self.dir, 'psnr_log.pt'))
85 | torch.save(
86 | trainer.optimizer.state_dict(),
87 | os.path.join(self.dir, 'optimizer.pt')
88 | )
89 |
90 | def add_log(self, log):
91 | self.log = torch.cat([self.log, log])
92 |
93 | def write_log(self, log, refresh=False):
94 | print(log)
95 | self.log_file.write(log + '\n')
96 | if refresh:
97 | self.log_file.close()
98 | self.log_file = open(self.dir + '/log.txt', 'a')
99 |
100 | def done(self):
101 | self.log_file.close()
102 |
103 | def plot_psnr(self, epoch):
104 | axis = np.linspace(1, epoch, epoch)
105 | label = 'SR on {}'.format(self.args.data_test)
106 | fig = plt.figure()
107 | plt.title(label)
108 | for idx_scale, scale in enumerate(self.args.scale):
109 | plt.plot(
110 | axis,
111 | self.log[:, idx_scale].numpy(),
112 | label='Scale {}'.format(scale)
113 | )
114 | plt.legend()
115 | plt.xlabel('Epochs')
116 | plt.ylabel('PSNR')
117 | plt.grid(True)
118 | plt.savefig('{}/test_{}.pdf'.format(self.dir, self.args.data_test))
119 | plt.close(fig)
120 |
121 | def save_results(self, filename, save_list, scale):
122 | filename = '{}/results/{}_x{}_'.format(self.dir, filename, scale)
123 | postfix = ('SR', 'LR', 'HR')
124 | for v, p in zip(save_list, postfix):
125 | normalized = v[0].data.mul(255 / self.args.rgb_range)
126 | ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy()
127 | misc.imsave('{}{}.png'.format(filename, p), ndarr)
128 |
129 | def save_results_nopostfix(self, filename, save_list, scale):
130 | #print(filename)
131 | if self.args.degradation == 'BI':
132 | filename = filename.replace("LRBI", self.args.save)
133 | elif self.args.degradation == 'BD':
134 | filename = filename.replace("LRBD", self.args.save)
135 |
136 | filename = '{}/{}/x{}/{}'.format(self.dir, self.args.testset, scale, filename)
137 | postfix = ('SR', 'LR', 'HR')
138 | for v, p in zip(save_list, postfix):
139 | normalized = v[0].data.mul(255 / self.args.rgb_range)
140 | ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy()
141 | misc.imsave('{}.png'.format(filename), ndarr)
142 |
143 |
144 | def quantize(img, rgb_range):
145 | pixel_range = 255 / rgb_range
146 | return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
147 |
148 | def calc_psnr(sr, hr, scale, rgb_range, benchmark=False):
149 | diff = (sr - hr).data.div(rgb_range)
150 | '''
151 | if benchmark:
152 | shave = scale
153 | if diff.size(1) > 1:
154 | convert = diff.new(1, 3, 1, 1)
155 | convert[0, 0, 0, 0] = 65.738
156 | convert[0, 1, 0, 0] = 129.057
157 | convert[0, 2, 0, 0] = 25.064
158 | diff.mul_(convert).div_(256)
159 | diff = diff.sum(dim=1, keepdim=True)
160 | else:
161 | shave = scale + 6
162 | '''
163 | shave = scale
164 | if diff.size(1) > 1:
165 | convert = diff.new(1, 3, 1, 1)
166 | convert[0, 0, 0, 0] = 65.738
167 | convert[0, 1, 0, 0] = 129.057
168 | convert[0, 2, 0, 0] = 25.064
169 | diff.mul_(convert).div_(256)
170 | diff = diff.sum(dim=1, keepdim=True)
171 |
172 | valid = diff[:, :, shave:-shave, shave:-shave]
173 | mse = valid.pow(2).mean()
174 |
175 | return -10 * math.log10(mse)
176 |
177 | def make_optimizer(args, my_model):
178 | trainable = filter(lambda x: x.requires_grad, my_model.parameters())
179 |
180 | if args.optimizer == 'SGD':
181 | optimizer_function = optim.SGD
182 | kwargs = {'momentum': args.momentum}
183 | elif args.optimizer == 'ADAM':
184 | optimizer_function = optim.Adam
185 | kwargs = {
186 | 'betas': (args.beta1, args.beta2),
187 | 'eps': args.epsilon
188 | }
189 | elif args.optimizer == 'RMSprop':
190 | optimizer_function = optim.RMSprop
191 | kwargs = {'eps': args.epsilon}
192 |
193 | kwargs['lr'] = args.lr
194 | kwargs['weight_decay'] = args.weight_decay
195 |
196 | return optimizer_function(trainable, **kwargs)
197 |
198 | def make_scheduler(args, my_optimizer):
199 | if args.decay_type == 'step':
200 | scheduler = lrs.StepLR(
201 | my_optimizer,
202 | step_size=args.lr_decay,
203 | gamma=args.gamma
204 | )
205 | elif args.decay_type.find('step') >= 0:
206 | milestones = args.decay_type.split('_')
207 | milestones.pop(0)
208 | milestones = list(map(lambda x: int(x), milestones))
209 | scheduler = lrs.MultiStepLR(
210 | my_optimizer,
211 | milestones=milestones,
212 | gamma=args.gamma
213 | )
214 |
215 | return scheduler
216 |
217 |
--------------------------------------------------------------------------------